1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/i2c.h>
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 
20 #include <media/v4l2-async.h>
21 #include <media/v4l2-device.h>
22 #include <media/v4l2-fwnode.h>
23 #include <media/v4l2-subdev.h>
24 
25 static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
26 					  struct v4l2_subdev *subdev,
27 					  struct v4l2_async_subdev *asd)
28 {
29 	if (!n->ops || !n->ops->bound)
30 		return 0;
31 
32 	return n->ops->bound(n, subdev, asd);
33 }
34 
35 static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
36 					    struct v4l2_subdev *subdev,
37 					    struct v4l2_async_subdev *asd)
38 {
39 	if (!n->ops || !n->ops->unbind)
40 		return;
41 
42 	n->ops->unbind(n, subdev, asd);
43 }
44 
45 static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
46 {
47 	if (!n->ops || !n->ops->complete)
48 		return 0;
49 
50 	return n->ops->complete(n);
51 }
52 
53 static bool match_i2c(struct v4l2_async_notifier *notifier,
54 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
55 {
56 #if IS_ENABLED(CONFIG_I2C)
57 	struct i2c_client *client = i2c_verify_client(sd->dev);
58 
59 	return client &&
60 		asd->match.i2c.adapter_id == client->adapter->nr &&
61 		asd->match.i2c.address == client->addr;
62 #else
63 	return false;
64 #endif
65 }
66 
67 static bool match_devname(struct v4l2_async_notifier *notifier,
68 			  struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
69 {
70 	return !strcmp(asd->match.device_name, dev_name(sd->dev));
71 }
72 
73 static bool match_fwnode(struct v4l2_async_notifier *notifier,
74 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
75 {
76 	struct fwnode_handle *other_fwnode;
77 	struct fwnode_handle *dev_fwnode;
78 	bool asd_fwnode_is_ep;
79 	bool sd_fwnode_is_ep;
80 	struct device *dev;
81 
82 	/*
83 	 * Both the subdev and the async subdev can provide either an endpoint
84 	 * fwnode or a device fwnode. Start with the simple case of direct
85 	 * fwnode matching.
86 	 */
87 	if (sd->fwnode == asd->match.fwnode)
88 		return true;
89 
90 	/*
91 	 * Otherwise, check if the sd fwnode and the asd fwnode refer to an
92 	 * endpoint or a device. If they're of the same type, there's no match.
93 	 * Technically speaking this checks if the nodes refer to a connected
94 	 * endpoint, which is the simplest check that works for both OF and
95 	 * ACPI. This won't make a difference, as drivers should not try to
96 	 * match unconnected endpoints.
97 	 */
98 	sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd->fwnode);
99 	asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
100 
101 	if (sd_fwnode_is_ep == asd_fwnode_is_ep)
102 		return false;
103 
104 	/*
105 	 * The sd and asd fwnodes are of different types. Get the device fwnode
106 	 * parent of the endpoint fwnode, and compare it with the other fwnode.
107 	 */
108 	if (sd_fwnode_is_ep) {
109 		dev_fwnode = fwnode_graph_get_port_parent(sd->fwnode);
110 		other_fwnode = asd->match.fwnode;
111 	} else {
112 		dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
113 		other_fwnode = sd->fwnode;
114 	}
115 
116 	fwnode_handle_put(dev_fwnode);
117 
118 	if (dev_fwnode != other_fwnode)
119 		return false;
120 
121 	/*
122 	 * We have a heterogeneous match. Retrieve the struct device of the side
123 	 * that matched on a device fwnode to print its driver name.
124 	 */
125 	if (sd_fwnode_is_ep)
126 		dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
127 		    : notifier->sd->dev;
128 	else
129 		dev = sd->dev;
130 
131 	if (dev && dev->driver) {
132 		if (sd_fwnode_is_ep)
133 			dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
134 				 dev->driver->name);
135 		dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
136 			   dev->driver->name);
137 	}
138 
139 	return true;
140 }
141 
142 static bool match_custom(struct v4l2_async_notifier *notifier,
143 			 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
144 {
145 	if (!asd->match.custom.match)
146 		/* Match always */
147 		return true;
148 
149 	return asd->match.custom.match(sd->dev, asd);
150 }
151 
152 static LIST_HEAD(subdev_list);
153 static LIST_HEAD(notifier_list);
154 static DEFINE_MUTEX(list_lock);
155 
156 static struct v4l2_async_subdev *
157 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
158 		      struct v4l2_subdev *sd)
159 {
160 	bool (*match)(struct v4l2_async_notifier *notifier,
161 		      struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
162 	struct v4l2_async_subdev *asd;
163 
164 	list_for_each_entry(asd, &notifier->waiting, list) {
165 		/* bus_type has been verified valid before */
166 		switch (asd->match_type) {
167 		case V4L2_ASYNC_MATCH_CUSTOM:
168 			match = match_custom;
169 			break;
170 		case V4L2_ASYNC_MATCH_DEVNAME:
171 			match = match_devname;
172 			break;
173 		case V4L2_ASYNC_MATCH_I2C:
174 			match = match_i2c;
175 			break;
176 		case V4L2_ASYNC_MATCH_FWNODE:
177 			match = match_fwnode;
178 			break;
179 		default:
180 			/* Cannot happen, unless someone breaks us */
181 			WARN_ON(true);
182 			return NULL;
183 		}
184 
185 		/* match cannot be NULL here */
186 		if (match(notifier, sd, asd))
187 			return asd;
188 	}
189 
190 	return NULL;
191 }
192 
193 /* Compare two async sub-device descriptors for equivalence */
194 static bool asd_equal(struct v4l2_async_subdev *asd_x,
195 		      struct v4l2_async_subdev *asd_y)
196 {
197 	if (asd_x->match_type != asd_y->match_type)
198 		return false;
199 
200 	switch (asd_x->match_type) {
201 	case V4L2_ASYNC_MATCH_DEVNAME:
202 		return strcmp(asd_x->match.device_name,
203 			      asd_y->match.device_name) == 0;
204 	case V4L2_ASYNC_MATCH_I2C:
205 		return asd_x->match.i2c.adapter_id ==
206 			asd_y->match.i2c.adapter_id &&
207 			asd_x->match.i2c.address ==
208 			asd_y->match.i2c.address;
209 	case V4L2_ASYNC_MATCH_FWNODE:
210 		return asd_x->match.fwnode == asd_y->match.fwnode;
211 	default:
212 		break;
213 	}
214 
215 	return false;
216 }
217 
218 /* Find the sub-device notifier registered by a sub-device driver. */
219 static struct v4l2_async_notifier *
220 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
221 {
222 	struct v4l2_async_notifier *n;
223 
224 	list_for_each_entry(n, &notifier_list, list)
225 		if (n->sd == sd)
226 			return n;
227 
228 	return NULL;
229 }
230 
231 /* Get v4l2_device related to the notifier if one can be found. */
232 static struct v4l2_device *
233 v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
234 {
235 	while (notifier->parent)
236 		notifier = notifier->parent;
237 
238 	return notifier->v4l2_dev;
239 }
240 
241 /*
242  * Return true if all child sub-device notifiers are complete, false otherwise.
243  */
244 static bool
245 v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
246 {
247 	struct v4l2_subdev *sd;
248 
249 	if (!list_empty(&notifier->waiting))
250 		return false;
251 
252 	list_for_each_entry(sd, &notifier->done, async_list) {
253 		struct v4l2_async_notifier *subdev_notifier =
254 			v4l2_async_find_subdev_notifier(sd);
255 
256 		if (subdev_notifier &&
257 		    !v4l2_async_notifier_can_complete(subdev_notifier))
258 			return false;
259 	}
260 
261 	return true;
262 }
263 
264 /*
265  * Complete the master notifier if possible. This is done when all async
266  * sub-devices have been bound; v4l2_device is also available then.
267  */
268 static int
269 v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
270 {
271 	/* Quick check whether there are still more sub-devices here. */
272 	if (!list_empty(&notifier->waiting))
273 		return 0;
274 
275 	/* Check the entire notifier tree; find the root notifier first. */
276 	while (notifier->parent)
277 		notifier = notifier->parent;
278 
279 	/* This is root if it has v4l2_dev. */
280 	if (!notifier->v4l2_dev)
281 		return 0;
282 
283 	/* Is everything ready? */
284 	if (!v4l2_async_notifier_can_complete(notifier))
285 		return 0;
286 
287 	return v4l2_async_notifier_call_complete(notifier);
288 }
289 
290 static int
291 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
292 
293 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
294 				   struct v4l2_device *v4l2_dev,
295 				   struct v4l2_subdev *sd,
296 				   struct v4l2_async_subdev *asd)
297 {
298 	struct v4l2_async_notifier *subdev_notifier;
299 	int ret;
300 
301 	ret = v4l2_device_register_subdev(v4l2_dev, sd);
302 	if (ret < 0)
303 		return ret;
304 
305 	ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
306 	if (ret < 0) {
307 		v4l2_device_unregister_subdev(sd);
308 		return ret;
309 	}
310 
311 	/* Remove from the waiting list */
312 	list_del(&asd->list);
313 	sd->asd = asd;
314 	sd->notifier = notifier;
315 
316 	/* Move from the global subdevice list to notifier's done */
317 	list_move(&sd->async_list, &notifier->done);
318 
319 	/*
320 	 * See if the sub-device has a notifier. If not, return here.
321 	 */
322 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
323 	if (!subdev_notifier || subdev_notifier->parent)
324 		return 0;
325 
326 	/*
327 	 * Proceed with checking for the sub-device notifier's async
328 	 * sub-devices, and return the result. The error will be handled by the
329 	 * caller.
330 	 */
331 	subdev_notifier->parent = notifier;
332 
333 	return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
334 }
335 
336 /* Test all async sub-devices in a notifier for a match. */
337 static int
338 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
339 {
340 	struct v4l2_device *v4l2_dev =
341 		v4l2_async_notifier_find_v4l2_dev(notifier);
342 	struct v4l2_subdev *sd;
343 
344 	if (!v4l2_dev)
345 		return 0;
346 
347 again:
348 	list_for_each_entry(sd, &subdev_list, async_list) {
349 		struct v4l2_async_subdev *asd;
350 		int ret;
351 
352 		asd = v4l2_async_find_match(notifier, sd);
353 		if (!asd)
354 			continue;
355 
356 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
357 		if (ret < 0)
358 			return ret;
359 
360 		/*
361 		 * v4l2_async_match_notify() may lead to registering a
362 		 * new notifier and thus changing the async subdevs
363 		 * list. In order to proceed safely from here, restart
364 		 * parsing the list from the beginning.
365 		 */
366 		goto again;
367 	}
368 
369 	return 0;
370 }
371 
372 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
373 {
374 	v4l2_device_unregister_subdev(sd);
375 	/*
376 	 * Subdevice driver will reprobe and put the subdev back
377 	 * onto the list
378 	 */
379 	list_del_init(&sd->async_list);
380 	sd->asd = NULL;
381 }
382 
383 /* Unbind all sub-devices in the notifier tree. */
384 static void
385 v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
386 {
387 	struct v4l2_subdev *sd, *tmp;
388 
389 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
390 		struct v4l2_async_notifier *subdev_notifier =
391 			v4l2_async_find_subdev_notifier(sd);
392 
393 		if (subdev_notifier)
394 			v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
395 
396 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
397 		v4l2_async_cleanup(sd);
398 
399 		list_move(&sd->async_list, &subdev_list);
400 	}
401 
402 	notifier->parent = NULL;
403 }
404 
405 /* See if an async sub-device can be found in a notifier's lists. */
406 static bool
407 __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
408 				       struct v4l2_async_subdev *asd)
409 {
410 	struct v4l2_async_subdev *asd_y;
411 	struct v4l2_subdev *sd;
412 
413 	list_for_each_entry(asd_y, &notifier->waiting, list)
414 		if (asd_equal(asd, asd_y))
415 			return true;
416 
417 	list_for_each_entry(sd, &notifier->done, async_list) {
418 		if (WARN_ON(!sd->asd))
419 			continue;
420 
421 		if (asd_equal(asd, sd->asd))
422 			return true;
423 	}
424 
425 	return false;
426 }
427 
428 /*
429  * Find out whether an async sub-device was set up already or
430  * whether it exists in a given notifier before @this_index.
431  * If @this_index < 0, search the notifier's entire @asd_list.
432  */
433 static bool
434 v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
435 				     struct v4l2_async_subdev *asd,
436 				     int this_index)
437 {
438 	struct v4l2_async_subdev *asd_y;
439 	int j = 0;
440 
441 	lockdep_assert_held(&list_lock);
442 
443 	/* Check that an asd is not being added more than once. */
444 	list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
445 		if (this_index >= 0 && j++ >= this_index)
446 			break;
447 		if (asd_equal(asd, asd_y))
448 			return true;
449 	}
450 
451 	/* Check that an asd does not exist in other notifiers. */
452 	list_for_each_entry(notifier, &notifier_list, list)
453 		if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
454 			return true;
455 
456 	return false;
457 }
458 
459 static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
460 					 struct v4l2_async_subdev *asd,
461 					 int this_index)
462 {
463 	struct device *dev =
464 		notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
465 
466 	if (!asd)
467 		return -EINVAL;
468 
469 	switch (asd->match_type) {
470 	case V4L2_ASYNC_MATCH_CUSTOM:
471 	case V4L2_ASYNC_MATCH_DEVNAME:
472 	case V4L2_ASYNC_MATCH_I2C:
473 	case V4L2_ASYNC_MATCH_FWNODE:
474 		if (v4l2_async_notifier_has_async_subdev(notifier, asd,
475 							 this_index)) {
476 			dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
477 			return -EEXIST;
478 		}
479 		break;
480 	default:
481 		dev_err(dev, "Invalid match type %u on %p\n",
482 			asd->match_type, asd);
483 		return -EINVAL;
484 	}
485 
486 	return 0;
487 }
488 
489 void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
490 {
491 	INIT_LIST_HEAD(&notifier->asd_list);
492 }
493 EXPORT_SYMBOL(v4l2_async_notifier_init);
494 
495 static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
496 {
497 	struct v4l2_async_subdev *asd;
498 	int ret, i = 0;
499 
500 	INIT_LIST_HEAD(&notifier->waiting);
501 	INIT_LIST_HEAD(&notifier->done);
502 
503 	mutex_lock(&list_lock);
504 
505 	list_for_each_entry(asd, &notifier->asd_list, asd_list) {
506 		ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
507 		if (ret)
508 			goto err_unlock;
509 
510 		list_add_tail(&asd->list, &notifier->waiting);
511 	}
512 
513 	ret = v4l2_async_notifier_try_all_subdevs(notifier);
514 	if (ret < 0)
515 		goto err_unbind;
516 
517 	ret = v4l2_async_notifier_try_complete(notifier);
518 	if (ret < 0)
519 		goto err_unbind;
520 
521 	/* Keep also completed notifiers on the list */
522 	list_add(&notifier->list, &notifier_list);
523 
524 	mutex_unlock(&list_lock);
525 
526 	return 0;
527 
528 err_unbind:
529 	/*
530 	 * On failure, unbind all sub-devices registered through this notifier.
531 	 */
532 	v4l2_async_notifier_unbind_all_subdevs(notifier);
533 
534 err_unlock:
535 	mutex_unlock(&list_lock);
536 
537 	return ret;
538 }
539 
540 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
541 				 struct v4l2_async_notifier *notifier)
542 {
543 	int ret;
544 
545 	if (WARN_ON(!v4l2_dev || notifier->sd))
546 		return -EINVAL;
547 
548 	notifier->v4l2_dev = v4l2_dev;
549 
550 	ret = __v4l2_async_notifier_register(notifier);
551 	if (ret)
552 		notifier->v4l2_dev = NULL;
553 
554 	return ret;
555 }
556 EXPORT_SYMBOL(v4l2_async_notifier_register);
557 
558 int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
559 					struct v4l2_async_notifier *notifier)
560 {
561 	int ret;
562 
563 	if (WARN_ON(!sd || notifier->v4l2_dev))
564 		return -EINVAL;
565 
566 	notifier->sd = sd;
567 
568 	ret = __v4l2_async_notifier_register(notifier);
569 	if (ret)
570 		notifier->sd = NULL;
571 
572 	return ret;
573 }
574 EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
575 
576 static void
577 __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
578 {
579 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
580 		return;
581 
582 	v4l2_async_notifier_unbind_all_subdevs(notifier);
583 
584 	notifier->sd = NULL;
585 	notifier->v4l2_dev = NULL;
586 
587 	list_del(&notifier->list);
588 }
589 
590 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
591 {
592 	mutex_lock(&list_lock);
593 
594 	__v4l2_async_notifier_unregister(notifier);
595 
596 	mutex_unlock(&list_lock);
597 }
598 EXPORT_SYMBOL(v4l2_async_notifier_unregister);
599 
600 static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
601 {
602 	struct v4l2_async_subdev *asd, *tmp;
603 
604 	if (!notifier || !notifier->asd_list.next)
605 		return;
606 
607 	list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
608 		switch (asd->match_type) {
609 		case V4L2_ASYNC_MATCH_FWNODE:
610 			fwnode_handle_put(asd->match.fwnode);
611 			break;
612 		default:
613 			break;
614 		}
615 
616 		list_del(&asd->asd_list);
617 		kfree(asd);
618 	}
619 }
620 
621 void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
622 {
623 	mutex_lock(&list_lock);
624 
625 	__v4l2_async_notifier_cleanup(notifier);
626 
627 	mutex_unlock(&list_lock);
628 }
629 EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
630 
631 int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
632 				   struct v4l2_async_subdev *asd)
633 {
634 	int ret;
635 
636 	mutex_lock(&list_lock);
637 
638 	ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
639 	if (ret)
640 		goto unlock;
641 
642 	list_add_tail(&asd->asd_list, &notifier->asd_list);
643 
644 unlock:
645 	mutex_unlock(&list_lock);
646 	return ret;
647 }
648 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_subdev);
649 
650 struct v4l2_async_subdev *
651 v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
652 				      struct fwnode_handle *fwnode,
653 				      unsigned int asd_struct_size)
654 {
655 	struct v4l2_async_subdev *asd;
656 	int ret;
657 
658 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
659 	if (!asd)
660 		return ERR_PTR(-ENOMEM);
661 
662 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
663 	asd->match.fwnode = fwnode_handle_get(fwnode);
664 
665 	ret = v4l2_async_notifier_add_subdev(notifier, asd);
666 	if (ret) {
667 		fwnode_handle_put(fwnode);
668 		kfree(asd);
669 		return ERR_PTR(ret);
670 	}
671 
672 	return asd;
673 }
674 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev);
675 
676 int
677 v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
678 					     struct fwnode_handle *endpoint,
679 					     struct v4l2_async_subdev *asd)
680 {
681 	struct fwnode_handle *remote;
682 	int ret;
683 
684 	remote = fwnode_graph_get_remote_port_parent(endpoint);
685 	if (!remote)
686 		return -ENOTCONN;
687 
688 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
689 	asd->match.fwnode = remote;
690 
691 	ret = v4l2_async_notifier_add_subdev(notif, asd);
692 	if (ret)
693 		fwnode_handle_put(remote);
694 
695 	return ret;
696 }
697 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_remote_subdev);
698 
699 struct v4l2_async_subdev *
700 v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
701 				   int adapter_id, unsigned short address,
702 				   unsigned int asd_struct_size)
703 {
704 	struct v4l2_async_subdev *asd;
705 	int ret;
706 
707 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
708 	if (!asd)
709 		return ERR_PTR(-ENOMEM);
710 
711 	asd->match_type = V4L2_ASYNC_MATCH_I2C;
712 	asd->match.i2c.adapter_id = adapter_id;
713 	asd->match.i2c.address = address;
714 
715 	ret = v4l2_async_notifier_add_subdev(notifier, asd);
716 	if (ret) {
717 		kfree(asd);
718 		return ERR_PTR(ret);
719 	}
720 
721 	return asd;
722 }
723 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_i2c_subdev);
724 
725 struct v4l2_async_subdev *
726 v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier,
727 				       const char *device_name,
728 				       unsigned int asd_struct_size)
729 {
730 	struct v4l2_async_subdev *asd;
731 	int ret;
732 
733 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
734 	if (!asd)
735 		return ERR_PTR(-ENOMEM);
736 
737 	asd->match_type = V4L2_ASYNC_MATCH_DEVNAME;
738 	asd->match.device_name = device_name;
739 
740 	ret = v4l2_async_notifier_add_subdev(notifier, asd);
741 	if (ret) {
742 		kfree(asd);
743 		return ERR_PTR(ret);
744 	}
745 
746 	return asd;
747 }
748 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_devname_subdev);
749 
750 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
751 {
752 	struct v4l2_async_notifier *subdev_notifier;
753 	struct v4l2_async_notifier *notifier;
754 	int ret;
755 
756 	/*
757 	 * No reference taken. The reference is held by the device
758 	 * (struct v4l2_subdev.dev), and async sub-device does not
759 	 * exist independently of the device at any point of time.
760 	 */
761 	if (!sd->fwnode && sd->dev)
762 		sd->fwnode = dev_fwnode(sd->dev);
763 
764 	mutex_lock(&list_lock);
765 
766 	INIT_LIST_HEAD(&sd->async_list);
767 
768 	list_for_each_entry(notifier, &notifier_list, list) {
769 		struct v4l2_device *v4l2_dev =
770 			v4l2_async_notifier_find_v4l2_dev(notifier);
771 		struct v4l2_async_subdev *asd;
772 
773 		if (!v4l2_dev)
774 			continue;
775 
776 		asd = v4l2_async_find_match(notifier, sd);
777 		if (!asd)
778 			continue;
779 
780 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
781 		if (ret)
782 			goto err_unbind;
783 
784 		ret = v4l2_async_notifier_try_complete(notifier);
785 		if (ret)
786 			goto err_unbind;
787 
788 		goto out_unlock;
789 	}
790 
791 	/* None matched, wait for hot-plugging */
792 	list_add(&sd->async_list, &subdev_list);
793 
794 out_unlock:
795 	mutex_unlock(&list_lock);
796 
797 	return 0;
798 
799 err_unbind:
800 	/*
801 	 * Complete failed. Unbind the sub-devices bound through registering
802 	 * this async sub-device.
803 	 */
804 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
805 	if (subdev_notifier)
806 		v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
807 
808 	if (sd->asd)
809 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
810 	v4l2_async_cleanup(sd);
811 
812 	mutex_unlock(&list_lock);
813 
814 	return ret;
815 }
816 EXPORT_SYMBOL(v4l2_async_register_subdev);
817 
818 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
819 {
820 	mutex_lock(&list_lock);
821 
822 	__v4l2_async_notifier_unregister(sd->subdev_notifier);
823 	__v4l2_async_notifier_cleanup(sd->subdev_notifier);
824 	kfree(sd->subdev_notifier);
825 	sd->subdev_notifier = NULL;
826 
827 	if (sd->asd) {
828 		struct v4l2_async_notifier *notifier = sd->notifier;
829 
830 		list_add(&sd->asd->list, &notifier->waiting);
831 
832 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
833 	}
834 
835 	v4l2_async_cleanup(sd);
836 
837 	mutex_unlock(&list_lock);
838 }
839 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
840