1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 asynchronous subdevice registration API
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7 
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/i2c.h>
11 #include <linux/list.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 
20 #include <media/v4l2-async.h>
21 #include <media/v4l2-device.h>
22 #include <media/v4l2-fwnode.h>
23 #include <media/v4l2-subdev.h>
24 
25 static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
26 					  struct v4l2_subdev *subdev,
27 					  struct v4l2_async_subdev *asd)
28 {
29 	if (!n->ops || !n->ops->bound)
30 		return 0;
31 
32 	return n->ops->bound(n, subdev, asd);
33 }
34 
35 static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
36 					    struct v4l2_subdev *subdev,
37 					    struct v4l2_async_subdev *asd)
38 {
39 	if (!n->ops || !n->ops->unbind)
40 		return;
41 
42 	n->ops->unbind(n, subdev, asd);
43 }
44 
45 static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
46 {
47 	if (!n->ops || !n->ops->complete)
48 		return 0;
49 
50 	return n->ops->complete(n);
51 }
52 
53 static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
54 {
55 #if IS_ENABLED(CONFIG_I2C)
56 	struct i2c_client *client = i2c_verify_client(sd->dev);
57 
58 	return client &&
59 		asd->match.i2c.adapter_id == client->adapter->nr &&
60 		asd->match.i2c.address == client->addr;
61 #else
62 	return false;
63 #endif
64 }
65 
66 static bool match_devname(struct v4l2_subdev *sd,
67 			  struct v4l2_async_subdev *asd)
68 {
69 	return !strcmp(asd->match.device_name, dev_name(sd->dev));
70 }
71 
72 static bool match_fwnode(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
73 {
74 	return sd->fwnode == asd->match.fwnode;
75 }
76 
77 static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
78 {
79 	if (!asd->match.custom.match)
80 		/* Match always */
81 		return true;
82 
83 	return asd->match.custom.match(sd->dev, asd);
84 }
85 
86 static LIST_HEAD(subdev_list);
87 static LIST_HEAD(notifier_list);
88 static DEFINE_MUTEX(list_lock);
89 
90 static struct v4l2_async_subdev *
91 v4l2_async_find_match(struct v4l2_async_notifier *notifier,
92 		      struct v4l2_subdev *sd)
93 {
94 	bool (*match)(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
95 	struct v4l2_async_subdev *asd;
96 
97 	list_for_each_entry(asd, &notifier->waiting, list) {
98 		/* bus_type has been verified valid before */
99 		switch (asd->match_type) {
100 		case V4L2_ASYNC_MATCH_CUSTOM:
101 			match = match_custom;
102 			break;
103 		case V4L2_ASYNC_MATCH_DEVNAME:
104 			match = match_devname;
105 			break;
106 		case V4L2_ASYNC_MATCH_I2C:
107 			match = match_i2c;
108 			break;
109 		case V4L2_ASYNC_MATCH_FWNODE:
110 			match = match_fwnode;
111 			break;
112 		default:
113 			/* Cannot happen, unless someone breaks us */
114 			WARN_ON(true);
115 			return NULL;
116 		}
117 
118 		/* match cannot be NULL here */
119 		if (match(sd, asd))
120 			return asd;
121 	}
122 
123 	return NULL;
124 }
125 
126 /* Compare two async sub-device descriptors for equivalence */
127 static bool asd_equal(struct v4l2_async_subdev *asd_x,
128 		      struct v4l2_async_subdev *asd_y)
129 {
130 	if (asd_x->match_type != asd_y->match_type)
131 		return false;
132 
133 	switch (asd_x->match_type) {
134 	case V4L2_ASYNC_MATCH_DEVNAME:
135 		return strcmp(asd_x->match.device_name,
136 			      asd_y->match.device_name) == 0;
137 	case V4L2_ASYNC_MATCH_I2C:
138 		return asd_x->match.i2c.adapter_id ==
139 			asd_y->match.i2c.adapter_id &&
140 			asd_x->match.i2c.address ==
141 			asd_y->match.i2c.address;
142 	case V4L2_ASYNC_MATCH_FWNODE:
143 		return asd_x->match.fwnode == asd_y->match.fwnode;
144 	default:
145 		break;
146 	}
147 
148 	return false;
149 }
150 
151 /* Find the sub-device notifier registered by a sub-device driver. */
152 static struct v4l2_async_notifier *
153 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
154 {
155 	struct v4l2_async_notifier *n;
156 
157 	list_for_each_entry(n, &notifier_list, list)
158 		if (n->sd == sd)
159 			return n;
160 
161 	return NULL;
162 }
163 
164 /* Get v4l2_device related to the notifier if one can be found. */
165 static struct v4l2_device *
166 v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
167 {
168 	while (notifier->parent)
169 		notifier = notifier->parent;
170 
171 	return notifier->v4l2_dev;
172 }
173 
174 /*
175  * Return true if all child sub-device notifiers are complete, false otherwise.
176  */
177 static bool
178 v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
179 {
180 	struct v4l2_subdev *sd;
181 
182 	if (!list_empty(&notifier->waiting))
183 		return false;
184 
185 	list_for_each_entry(sd, &notifier->done, async_list) {
186 		struct v4l2_async_notifier *subdev_notifier =
187 			v4l2_async_find_subdev_notifier(sd);
188 
189 		if (subdev_notifier &&
190 		    !v4l2_async_notifier_can_complete(subdev_notifier))
191 			return false;
192 	}
193 
194 	return true;
195 }
196 
197 /*
198  * Complete the master notifier if possible. This is done when all async
199  * sub-devices have been bound; v4l2_device is also available then.
200  */
201 static int
202 v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
203 {
204 	/* Quick check whether there are still more sub-devices here. */
205 	if (!list_empty(&notifier->waiting))
206 		return 0;
207 
208 	/* Check the entire notifier tree; find the root notifier first. */
209 	while (notifier->parent)
210 		notifier = notifier->parent;
211 
212 	/* This is root if it has v4l2_dev. */
213 	if (!notifier->v4l2_dev)
214 		return 0;
215 
216 	/* Is everything ready? */
217 	if (!v4l2_async_notifier_can_complete(notifier))
218 		return 0;
219 
220 	return v4l2_async_notifier_call_complete(notifier);
221 }
222 
223 static int
224 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
225 
226 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
227 				   struct v4l2_device *v4l2_dev,
228 				   struct v4l2_subdev *sd,
229 				   struct v4l2_async_subdev *asd)
230 {
231 	struct v4l2_async_notifier *subdev_notifier;
232 	int ret;
233 
234 	ret = v4l2_device_register_subdev(v4l2_dev, sd);
235 	if (ret < 0)
236 		return ret;
237 
238 	ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
239 	if (ret < 0) {
240 		v4l2_device_unregister_subdev(sd);
241 		return ret;
242 	}
243 
244 	/* Remove from the waiting list */
245 	list_del(&asd->list);
246 	sd->asd = asd;
247 	sd->notifier = notifier;
248 
249 	/* Move from the global subdevice list to notifier's done */
250 	list_move(&sd->async_list, &notifier->done);
251 
252 	/*
253 	 * See if the sub-device has a notifier. If not, return here.
254 	 */
255 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
256 	if (!subdev_notifier || subdev_notifier->parent)
257 		return 0;
258 
259 	/*
260 	 * Proceed with checking for the sub-device notifier's async
261 	 * sub-devices, and return the result. The error will be handled by the
262 	 * caller.
263 	 */
264 	subdev_notifier->parent = notifier;
265 
266 	return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
267 }
268 
269 /* Test all async sub-devices in a notifier for a match. */
270 static int
271 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
272 {
273 	struct v4l2_device *v4l2_dev =
274 		v4l2_async_notifier_find_v4l2_dev(notifier);
275 	struct v4l2_subdev *sd;
276 
277 	if (!v4l2_dev)
278 		return 0;
279 
280 again:
281 	list_for_each_entry(sd, &subdev_list, async_list) {
282 		struct v4l2_async_subdev *asd;
283 		int ret;
284 
285 		asd = v4l2_async_find_match(notifier, sd);
286 		if (!asd)
287 			continue;
288 
289 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
290 		if (ret < 0)
291 			return ret;
292 
293 		/*
294 		 * v4l2_async_match_notify() may lead to registering a
295 		 * new notifier and thus changing the async subdevs
296 		 * list. In order to proceed safely from here, restart
297 		 * parsing the list from the beginning.
298 		 */
299 		goto again;
300 	}
301 
302 	return 0;
303 }
304 
305 static void v4l2_async_cleanup(struct v4l2_subdev *sd)
306 {
307 	v4l2_device_unregister_subdev(sd);
308 	/*
309 	 * Subdevice driver will reprobe and put the subdev back
310 	 * onto the list
311 	 */
312 	list_del_init(&sd->async_list);
313 	sd->asd = NULL;
314 }
315 
316 /* Unbind all sub-devices in the notifier tree. */
317 static void
318 v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
319 {
320 	struct v4l2_subdev *sd, *tmp;
321 
322 	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
323 		struct v4l2_async_notifier *subdev_notifier =
324 			v4l2_async_find_subdev_notifier(sd);
325 
326 		if (subdev_notifier)
327 			v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
328 
329 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
330 		v4l2_async_cleanup(sd);
331 
332 		list_move(&sd->async_list, &subdev_list);
333 	}
334 
335 	notifier->parent = NULL;
336 }
337 
338 /* See if an async sub-device can be found in a notifier's lists. */
339 static bool
340 __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
341 				       struct v4l2_async_subdev *asd)
342 {
343 	struct v4l2_async_subdev *asd_y;
344 	struct v4l2_subdev *sd;
345 
346 	list_for_each_entry(asd_y, &notifier->waiting, list)
347 		if (asd_equal(asd, asd_y))
348 			return true;
349 
350 	list_for_each_entry(sd, &notifier->done, async_list) {
351 		if (WARN_ON(!sd->asd))
352 			continue;
353 
354 		if (asd_equal(asd, sd->asd))
355 			return true;
356 	}
357 
358 	return false;
359 }
360 
361 /*
362  * Find out whether an async sub-device was set up already or
363  * whether it exists in a given notifier before @this_index.
364  * If @this_index < 0, search the notifier's entire @asd_list.
365  */
366 static bool
367 v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
368 				     struct v4l2_async_subdev *asd,
369 				     int this_index)
370 {
371 	struct v4l2_async_subdev *asd_y;
372 	int j = 0;
373 
374 	lockdep_assert_held(&list_lock);
375 
376 	/* Check that an asd is not being added more than once. */
377 	list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
378 		if (this_index >= 0 && j++ >= this_index)
379 			break;
380 		if (asd_equal(asd, asd_y))
381 			return true;
382 	}
383 
384 	/* Check that an asd does not exist in other notifiers. */
385 	list_for_each_entry(notifier, &notifier_list, list)
386 		if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
387 			return true;
388 
389 	return false;
390 }
391 
392 static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
393 					 struct v4l2_async_subdev *asd,
394 					 int this_index)
395 {
396 	struct device *dev =
397 		notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
398 
399 	if (!asd)
400 		return -EINVAL;
401 
402 	switch (asd->match_type) {
403 	case V4L2_ASYNC_MATCH_CUSTOM:
404 	case V4L2_ASYNC_MATCH_DEVNAME:
405 	case V4L2_ASYNC_MATCH_I2C:
406 	case V4L2_ASYNC_MATCH_FWNODE:
407 		if (v4l2_async_notifier_has_async_subdev(notifier, asd,
408 							 this_index)) {
409 			dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
410 			return -EEXIST;
411 		}
412 		break;
413 	default:
414 		dev_err(dev, "Invalid match type %u on %p\n",
415 			asd->match_type, asd);
416 		return -EINVAL;
417 	}
418 
419 	return 0;
420 }
421 
422 void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
423 {
424 	INIT_LIST_HEAD(&notifier->asd_list);
425 }
426 EXPORT_SYMBOL(v4l2_async_notifier_init);
427 
428 static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
429 {
430 	struct v4l2_async_subdev *asd;
431 	int ret, i = 0;
432 
433 	INIT_LIST_HEAD(&notifier->waiting);
434 	INIT_LIST_HEAD(&notifier->done);
435 
436 	mutex_lock(&list_lock);
437 
438 	list_for_each_entry(asd, &notifier->asd_list, asd_list) {
439 		ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
440 		if (ret)
441 			goto err_unlock;
442 
443 		list_add_tail(&asd->list, &notifier->waiting);
444 	}
445 
446 	ret = v4l2_async_notifier_try_all_subdevs(notifier);
447 	if (ret < 0)
448 		goto err_unbind;
449 
450 	ret = v4l2_async_notifier_try_complete(notifier);
451 	if (ret < 0)
452 		goto err_unbind;
453 
454 	/* Keep also completed notifiers on the list */
455 	list_add(&notifier->list, &notifier_list);
456 
457 	mutex_unlock(&list_lock);
458 
459 	return 0;
460 
461 err_unbind:
462 	/*
463 	 * On failure, unbind all sub-devices registered through this notifier.
464 	 */
465 	v4l2_async_notifier_unbind_all_subdevs(notifier);
466 
467 err_unlock:
468 	mutex_unlock(&list_lock);
469 
470 	return ret;
471 }
472 
473 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
474 				 struct v4l2_async_notifier *notifier)
475 {
476 	int ret;
477 
478 	if (WARN_ON(!v4l2_dev || notifier->sd))
479 		return -EINVAL;
480 
481 	notifier->v4l2_dev = v4l2_dev;
482 
483 	ret = __v4l2_async_notifier_register(notifier);
484 	if (ret)
485 		notifier->v4l2_dev = NULL;
486 
487 	return ret;
488 }
489 EXPORT_SYMBOL(v4l2_async_notifier_register);
490 
491 int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
492 					struct v4l2_async_notifier *notifier)
493 {
494 	int ret;
495 
496 	if (WARN_ON(!sd || notifier->v4l2_dev))
497 		return -EINVAL;
498 
499 	notifier->sd = sd;
500 
501 	ret = __v4l2_async_notifier_register(notifier);
502 	if (ret)
503 		notifier->sd = NULL;
504 
505 	return ret;
506 }
507 EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
508 
509 static void
510 __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
511 {
512 	if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
513 		return;
514 
515 	v4l2_async_notifier_unbind_all_subdevs(notifier);
516 
517 	notifier->sd = NULL;
518 	notifier->v4l2_dev = NULL;
519 
520 	list_del(&notifier->list);
521 }
522 
523 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
524 {
525 	mutex_lock(&list_lock);
526 
527 	__v4l2_async_notifier_unregister(notifier);
528 
529 	mutex_unlock(&list_lock);
530 }
531 EXPORT_SYMBOL(v4l2_async_notifier_unregister);
532 
533 static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
534 {
535 	struct v4l2_async_subdev *asd, *tmp;
536 
537 	if (!notifier)
538 		return;
539 
540 	list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
541 		switch (asd->match_type) {
542 		case V4L2_ASYNC_MATCH_FWNODE:
543 			fwnode_handle_put(asd->match.fwnode);
544 			break;
545 		default:
546 			break;
547 		}
548 
549 		list_del(&asd->asd_list);
550 		kfree(asd);
551 	}
552 }
553 
554 void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
555 {
556 	mutex_lock(&list_lock);
557 
558 	__v4l2_async_notifier_cleanup(notifier);
559 
560 	mutex_unlock(&list_lock);
561 }
562 EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
563 
564 int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
565 				   struct v4l2_async_subdev *asd)
566 {
567 	int ret;
568 
569 	mutex_lock(&list_lock);
570 
571 	ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
572 	if (ret)
573 		goto unlock;
574 
575 	list_add_tail(&asd->asd_list, &notifier->asd_list);
576 
577 unlock:
578 	mutex_unlock(&list_lock);
579 	return ret;
580 }
581 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_subdev);
582 
583 struct v4l2_async_subdev *
584 v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
585 				      struct fwnode_handle *fwnode,
586 				      unsigned int asd_struct_size)
587 {
588 	struct v4l2_async_subdev *asd;
589 	int ret;
590 
591 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
592 	if (!asd)
593 		return ERR_PTR(-ENOMEM);
594 
595 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
596 	asd->match.fwnode = fwnode;
597 
598 	ret = v4l2_async_notifier_add_subdev(notifier, asd);
599 	if (ret) {
600 		kfree(asd);
601 		return ERR_PTR(ret);
602 	}
603 
604 	return asd;
605 }
606 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev);
607 
608 struct v4l2_async_subdev *
609 v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
610 				   int adapter_id, unsigned short address,
611 				   unsigned int asd_struct_size)
612 {
613 	struct v4l2_async_subdev *asd;
614 	int ret;
615 
616 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
617 	if (!asd)
618 		return ERR_PTR(-ENOMEM);
619 
620 	asd->match_type = V4L2_ASYNC_MATCH_I2C;
621 	asd->match.i2c.adapter_id = adapter_id;
622 	asd->match.i2c.address = address;
623 
624 	ret = v4l2_async_notifier_add_subdev(notifier, asd);
625 	if (ret) {
626 		kfree(asd);
627 		return ERR_PTR(ret);
628 	}
629 
630 	return asd;
631 }
632 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_i2c_subdev);
633 
634 struct v4l2_async_subdev *
635 v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier,
636 				       const char *device_name,
637 				       unsigned int asd_struct_size)
638 {
639 	struct v4l2_async_subdev *asd;
640 	int ret;
641 
642 	asd = kzalloc(asd_struct_size, GFP_KERNEL);
643 	if (!asd)
644 		return ERR_PTR(-ENOMEM);
645 
646 	asd->match_type = V4L2_ASYNC_MATCH_DEVNAME;
647 	asd->match.device_name = device_name;
648 
649 	ret = v4l2_async_notifier_add_subdev(notifier, asd);
650 	if (ret) {
651 		kfree(asd);
652 		return ERR_PTR(ret);
653 	}
654 
655 	return asd;
656 }
657 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_devname_subdev);
658 
659 int v4l2_async_register_subdev(struct v4l2_subdev *sd)
660 {
661 	struct v4l2_async_notifier *subdev_notifier;
662 	struct v4l2_async_notifier *notifier;
663 	int ret;
664 
665 	/*
666 	 * No reference taken. The reference is held by the device
667 	 * (struct v4l2_subdev.dev), and async sub-device does not
668 	 * exist independently of the device at any point of time.
669 	 */
670 	if (!sd->fwnode && sd->dev)
671 		sd->fwnode = dev_fwnode(sd->dev);
672 
673 	mutex_lock(&list_lock);
674 
675 	INIT_LIST_HEAD(&sd->async_list);
676 
677 	list_for_each_entry(notifier, &notifier_list, list) {
678 		struct v4l2_device *v4l2_dev =
679 			v4l2_async_notifier_find_v4l2_dev(notifier);
680 		struct v4l2_async_subdev *asd;
681 
682 		if (!v4l2_dev)
683 			continue;
684 
685 		asd = v4l2_async_find_match(notifier, sd);
686 		if (!asd)
687 			continue;
688 
689 		ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
690 		if (ret)
691 			goto err_unbind;
692 
693 		ret = v4l2_async_notifier_try_complete(notifier);
694 		if (ret)
695 			goto err_unbind;
696 
697 		goto out_unlock;
698 	}
699 
700 	/* None matched, wait for hot-plugging */
701 	list_add(&sd->async_list, &subdev_list);
702 
703 out_unlock:
704 	mutex_unlock(&list_lock);
705 
706 	return 0;
707 
708 err_unbind:
709 	/*
710 	 * Complete failed. Unbind the sub-devices bound through registering
711 	 * this async sub-device.
712 	 */
713 	subdev_notifier = v4l2_async_find_subdev_notifier(sd);
714 	if (subdev_notifier)
715 		v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
716 
717 	if (sd->asd)
718 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
719 	v4l2_async_cleanup(sd);
720 
721 	mutex_unlock(&list_lock);
722 
723 	return ret;
724 }
725 EXPORT_SYMBOL(v4l2_async_register_subdev);
726 
727 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
728 {
729 	mutex_lock(&list_lock);
730 
731 	__v4l2_async_notifier_unregister(sd->subdev_notifier);
732 	__v4l2_async_notifier_cleanup(sd->subdev_notifier);
733 	kfree(sd->subdev_notifier);
734 	sd->subdev_notifier = NULL;
735 
736 	if (sd->asd) {
737 		struct v4l2_async_notifier *notifier = sd->notifier;
738 
739 		list_add(&sd->asd->list, &notifier->waiting);
740 
741 		v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
742 	}
743 
744 	v4l2_async_cleanup(sd);
745 
746 	mutex_unlock(&list_lock);
747 }
748 EXPORT_SYMBOL(v4l2_async_unregister_subdev);
749