1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * V4L2 asynchronous subdevice registration API 4 * 5 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de> 6 */ 7 8 #include <linux/debugfs.h> 9 #include <linux/device.h> 10 #include <linux/err.h> 11 #include <linux/i2c.h> 12 #include <linux/list.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/mutex.h> 16 #include <linux/of.h> 17 #include <linux/platform_device.h> 18 #include <linux/seq_file.h> 19 #include <linux/slab.h> 20 #include <linux/types.h> 21 22 #include <media/v4l2-async.h> 23 #include <media/v4l2-device.h> 24 #include <media/v4l2-fwnode.h> 25 #include <media/v4l2-subdev.h> 26 27 static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n, 28 struct v4l2_subdev *subdev, 29 struct v4l2_async_subdev *asd) 30 { 31 if (!n->ops || !n->ops->bound) 32 return 0; 33 34 return n->ops->bound(n, subdev, asd); 35 } 36 37 static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n, 38 struct v4l2_subdev *subdev, 39 struct v4l2_async_subdev *asd) 40 { 41 if (!n->ops || !n->ops->unbind) 42 return; 43 44 n->ops->unbind(n, subdev, asd); 45 } 46 47 static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n) 48 { 49 if (!n->ops || !n->ops->complete) 50 return 0; 51 52 return n->ops->complete(n); 53 } 54 55 static bool match_i2c(struct v4l2_async_notifier *notifier, 56 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) 57 { 58 #if IS_ENABLED(CONFIG_I2C) 59 struct i2c_client *client = i2c_verify_client(sd->dev); 60 61 return client && 62 asd->match.i2c.adapter_id == client->adapter->nr && 63 asd->match.i2c.address == client->addr; 64 #else 65 return false; 66 #endif 67 } 68 69 static bool match_devname(struct v4l2_async_notifier *notifier, 70 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) 71 { 72 return !strcmp(asd->match.device_name, dev_name(sd->dev)); 73 } 74 75 static bool match_fwnode(struct v4l2_async_notifier *notifier, 76 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) 77 { 78 struct fwnode_handle *other_fwnode; 79 struct fwnode_handle *dev_fwnode; 80 bool asd_fwnode_is_ep; 81 bool sd_fwnode_is_ep; 82 struct device *dev; 83 84 /* 85 * Both the subdev and the async subdev can provide either an endpoint 86 * fwnode or a device fwnode. Start with the simple case of direct 87 * fwnode matching. 88 */ 89 if (sd->fwnode == asd->match.fwnode) 90 return true; 91 92 /* 93 * Check the same situation for any possible secondary assigned to the 94 * subdev's fwnode 95 */ 96 if (!IS_ERR_OR_NULL(sd->fwnode->secondary) && 97 sd->fwnode->secondary == asd->match.fwnode) 98 return true; 99 100 /* 101 * Otherwise, check if the sd fwnode and the asd fwnode refer to an 102 * endpoint or a device. If they're of the same type, there's no match. 103 * Technically speaking this checks if the nodes refer to a connected 104 * endpoint, which is the simplest check that works for both OF and 105 * ACPI. This won't make a difference, as drivers should not try to 106 * match unconnected endpoints. 107 */ 108 sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd->fwnode); 109 asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode); 110 111 if (sd_fwnode_is_ep == asd_fwnode_is_ep) 112 return false; 113 114 /* 115 * The sd and asd fwnodes are of different types. Get the device fwnode 116 * parent of the endpoint fwnode, and compare it with the other fwnode. 117 */ 118 if (sd_fwnode_is_ep) { 119 dev_fwnode = fwnode_graph_get_port_parent(sd->fwnode); 120 other_fwnode = asd->match.fwnode; 121 } else { 122 dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode); 123 other_fwnode = sd->fwnode; 124 } 125 126 fwnode_handle_put(dev_fwnode); 127 128 if (dev_fwnode != other_fwnode) 129 return false; 130 131 /* 132 * We have a heterogeneous match. Retrieve the struct device of the side 133 * that matched on a device fwnode to print its driver name. 134 */ 135 if (sd_fwnode_is_ep) 136 dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev 137 : notifier->sd->dev; 138 else 139 dev = sd->dev; 140 141 if (dev && dev->driver) { 142 if (sd_fwnode_is_ep) 143 dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n", 144 dev->driver->name); 145 dev_notice(dev, "Consider updating driver %s to match on endpoints\n", 146 dev->driver->name); 147 } 148 149 return true; 150 } 151 152 static LIST_HEAD(subdev_list); 153 static LIST_HEAD(notifier_list); 154 static DEFINE_MUTEX(list_lock); 155 156 static struct v4l2_async_subdev * 157 v4l2_async_find_match(struct v4l2_async_notifier *notifier, 158 struct v4l2_subdev *sd) 159 { 160 bool (*match)(struct v4l2_async_notifier *notifier, 161 struct v4l2_subdev *sd, struct v4l2_async_subdev *asd); 162 struct v4l2_async_subdev *asd; 163 164 list_for_each_entry(asd, ¬ifier->waiting, list) { 165 /* bus_type has been verified valid before */ 166 switch (asd->match_type) { 167 case V4L2_ASYNC_MATCH_DEVNAME: 168 match = match_devname; 169 break; 170 case V4L2_ASYNC_MATCH_I2C: 171 match = match_i2c; 172 break; 173 case V4L2_ASYNC_MATCH_FWNODE: 174 match = match_fwnode; 175 break; 176 default: 177 /* Cannot happen, unless someone breaks us */ 178 WARN_ON(true); 179 return NULL; 180 } 181 182 /* match cannot be NULL here */ 183 if (match(notifier, sd, asd)) 184 return asd; 185 } 186 187 return NULL; 188 } 189 190 /* Compare two async sub-device descriptors for equivalence */ 191 static bool asd_equal(struct v4l2_async_subdev *asd_x, 192 struct v4l2_async_subdev *asd_y) 193 { 194 if (asd_x->match_type != asd_y->match_type) 195 return false; 196 197 switch (asd_x->match_type) { 198 case V4L2_ASYNC_MATCH_DEVNAME: 199 return strcmp(asd_x->match.device_name, 200 asd_y->match.device_name) == 0; 201 case V4L2_ASYNC_MATCH_I2C: 202 return asd_x->match.i2c.adapter_id == 203 asd_y->match.i2c.adapter_id && 204 asd_x->match.i2c.address == 205 asd_y->match.i2c.address; 206 case V4L2_ASYNC_MATCH_FWNODE: 207 return asd_x->match.fwnode == asd_y->match.fwnode; 208 default: 209 break; 210 } 211 212 return false; 213 } 214 215 /* Find the sub-device notifier registered by a sub-device driver. */ 216 static struct v4l2_async_notifier * 217 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd) 218 { 219 struct v4l2_async_notifier *n; 220 221 list_for_each_entry(n, ¬ifier_list, list) 222 if (n->sd == sd) 223 return n; 224 225 return NULL; 226 } 227 228 /* Get v4l2_device related to the notifier if one can be found. */ 229 static struct v4l2_device * 230 v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier) 231 { 232 while (notifier->parent) 233 notifier = notifier->parent; 234 235 return notifier->v4l2_dev; 236 } 237 238 /* 239 * Return true if all child sub-device notifiers are complete, false otherwise. 240 */ 241 static bool 242 v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier) 243 { 244 struct v4l2_subdev *sd; 245 246 if (!list_empty(¬ifier->waiting)) 247 return false; 248 249 list_for_each_entry(sd, ¬ifier->done, async_list) { 250 struct v4l2_async_notifier *subdev_notifier = 251 v4l2_async_find_subdev_notifier(sd); 252 253 if (subdev_notifier && 254 !v4l2_async_notifier_can_complete(subdev_notifier)) 255 return false; 256 } 257 258 return true; 259 } 260 261 /* 262 * Complete the master notifier if possible. This is done when all async 263 * sub-devices have been bound; v4l2_device is also available then. 264 */ 265 static int 266 v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier) 267 { 268 /* Quick check whether there are still more sub-devices here. */ 269 if (!list_empty(¬ifier->waiting)) 270 return 0; 271 272 /* Check the entire notifier tree; find the root notifier first. */ 273 while (notifier->parent) 274 notifier = notifier->parent; 275 276 /* This is root if it has v4l2_dev. */ 277 if (!notifier->v4l2_dev) 278 return 0; 279 280 /* Is everything ready? */ 281 if (!v4l2_async_notifier_can_complete(notifier)) 282 return 0; 283 284 return v4l2_async_notifier_call_complete(notifier); 285 } 286 287 static int 288 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier); 289 290 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier, 291 struct v4l2_device *v4l2_dev, 292 struct v4l2_subdev *sd, 293 struct v4l2_async_subdev *asd) 294 { 295 struct v4l2_async_notifier *subdev_notifier; 296 int ret; 297 298 ret = v4l2_device_register_subdev(v4l2_dev, sd); 299 if (ret < 0) 300 return ret; 301 302 ret = v4l2_async_notifier_call_bound(notifier, sd, asd); 303 if (ret < 0) { 304 v4l2_device_unregister_subdev(sd); 305 return ret; 306 } 307 308 /* Remove from the waiting list */ 309 list_del(&asd->list); 310 sd->asd = asd; 311 sd->notifier = notifier; 312 313 /* Move from the global subdevice list to notifier's done */ 314 list_move(&sd->async_list, ¬ifier->done); 315 316 /* 317 * See if the sub-device has a notifier. If not, return here. 318 */ 319 subdev_notifier = v4l2_async_find_subdev_notifier(sd); 320 if (!subdev_notifier || subdev_notifier->parent) 321 return 0; 322 323 /* 324 * Proceed with checking for the sub-device notifier's async 325 * sub-devices, and return the result. The error will be handled by the 326 * caller. 327 */ 328 subdev_notifier->parent = notifier; 329 330 return v4l2_async_notifier_try_all_subdevs(subdev_notifier); 331 } 332 333 /* Test all async sub-devices in a notifier for a match. */ 334 static int 335 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier) 336 { 337 struct v4l2_device *v4l2_dev = 338 v4l2_async_notifier_find_v4l2_dev(notifier); 339 struct v4l2_subdev *sd; 340 341 if (!v4l2_dev) 342 return 0; 343 344 again: 345 list_for_each_entry(sd, &subdev_list, async_list) { 346 struct v4l2_async_subdev *asd; 347 int ret; 348 349 asd = v4l2_async_find_match(notifier, sd); 350 if (!asd) 351 continue; 352 353 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd); 354 if (ret < 0) 355 return ret; 356 357 /* 358 * v4l2_async_match_notify() may lead to registering a 359 * new notifier and thus changing the async subdevs 360 * list. In order to proceed safely from here, restart 361 * parsing the list from the beginning. 362 */ 363 goto again; 364 } 365 366 return 0; 367 } 368 369 static void v4l2_async_cleanup(struct v4l2_subdev *sd) 370 { 371 v4l2_device_unregister_subdev(sd); 372 /* 373 * Subdevice driver will reprobe and put the subdev back 374 * onto the list 375 */ 376 list_del_init(&sd->async_list); 377 sd->asd = NULL; 378 } 379 380 /* Unbind all sub-devices in the notifier tree. */ 381 static void 382 v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier) 383 { 384 struct v4l2_subdev *sd, *tmp; 385 386 list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) { 387 struct v4l2_async_notifier *subdev_notifier = 388 v4l2_async_find_subdev_notifier(sd); 389 390 if (subdev_notifier) 391 v4l2_async_notifier_unbind_all_subdevs(subdev_notifier); 392 393 v4l2_async_notifier_call_unbind(notifier, sd, sd->asd); 394 v4l2_async_cleanup(sd); 395 396 list_move(&sd->async_list, &subdev_list); 397 } 398 399 notifier->parent = NULL; 400 } 401 402 /* See if an async sub-device can be found in a notifier's lists. */ 403 static bool 404 __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier, 405 struct v4l2_async_subdev *asd) 406 { 407 struct v4l2_async_subdev *asd_y; 408 struct v4l2_subdev *sd; 409 410 list_for_each_entry(asd_y, ¬ifier->waiting, list) 411 if (asd_equal(asd, asd_y)) 412 return true; 413 414 list_for_each_entry(sd, ¬ifier->done, async_list) { 415 if (WARN_ON(!sd->asd)) 416 continue; 417 418 if (asd_equal(asd, sd->asd)) 419 return true; 420 } 421 422 return false; 423 } 424 425 /* 426 * Find out whether an async sub-device was set up already or 427 * whether it exists in a given notifier before @this_index. 428 * If @this_index < 0, search the notifier's entire @asd_list. 429 */ 430 static bool 431 v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier, 432 struct v4l2_async_subdev *asd, 433 int this_index) 434 { 435 struct v4l2_async_subdev *asd_y; 436 int j = 0; 437 438 lockdep_assert_held(&list_lock); 439 440 /* Check that an asd is not being added more than once. */ 441 list_for_each_entry(asd_y, ¬ifier->asd_list, asd_list) { 442 if (this_index >= 0 && j++ >= this_index) 443 break; 444 if (asd_equal(asd, asd_y)) 445 return true; 446 } 447 448 /* Check that an asd does not exist in other notifiers. */ 449 list_for_each_entry(notifier, ¬ifier_list, list) 450 if (__v4l2_async_notifier_has_async_subdev(notifier, asd)) 451 return true; 452 453 return false; 454 } 455 456 static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier, 457 struct v4l2_async_subdev *asd, 458 int this_index) 459 { 460 struct device *dev = 461 notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL; 462 463 if (!asd) 464 return -EINVAL; 465 466 switch (asd->match_type) { 467 case V4L2_ASYNC_MATCH_DEVNAME: 468 case V4L2_ASYNC_MATCH_I2C: 469 case V4L2_ASYNC_MATCH_FWNODE: 470 if (v4l2_async_notifier_has_async_subdev(notifier, asd, 471 this_index)) { 472 dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n"); 473 return -EEXIST; 474 } 475 break; 476 default: 477 dev_err(dev, "Invalid match type %u on %p\n", 478 asd->match_type, asd); 479 return -EINVAL; 480 } 481 482 return 0; 483 } 484 485 void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier) 486 { 487 INIT_LIST_HEAD(¬ifier->asd_list); 488 } 489 EXPORT_SYMBOL(v4l2_async_notifier_init); 490 491 static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier) 492 { 493 struct v4l2_async_subdev *asd; 494 int ret, i = 0; 495 496 INIT_LIST_HEAD(¬ifier->waiting); 497 INIT_LIST_HEAD(¬ifier->done); 498 499 mutex_lock(&list_lock); 500 501 list_for_each_entry(asd, ¬ifier->asd_list, asd_list) { 502 ret = v4l2_async_notifier_asd_valid(notifier, asd, i++); 503 if (ret) 504 goto err_unlock; 505 506 list_add_tail(&asd->list, ¬ifier->waiting); 507 } 508 509 ret = v4l2_async_notifier_try_all_subdevs(notifier); 510 if (ret < 0) 511 goto err_unbind; 512 513 ret = v4l2_async_notifier_try_complete(notifier); 514 if (ret < 0) 515 goto err_unbind; 516 517 /* Keep also completed notifiers on the list */ 518 list_add(¬ifier->list, ¬ifier_list); 519 520 mutex_unlock(&list_lock); 521 522 return 0; 523 524 err_unbind: 525 /* 526 * On failure, unbind all sub-devices registered through this notifier. 527 */ 528 v4l2_async_notifier_unbind_all_subdevs(notifier); 529 530 err_unlock: 531 mutex_unlock(&list_lock); 532 533 return ret; 534 } 535 536 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev, 537 struct v4l2_async_notifier *notifier) 538 { 539 int ret; 540 541 if (WARN_ON(!v4l2_dev || notifier->sd)) 542 return -EINVAL; 543 544 notifier->v4l2_dev = v4l2_dev; 545 546 ret = __v4l2_async_notifier_register(notifier); 547 if (ret) 548 notifier->v4l2_dev = NULL; 549 550 return ret; 551 } 552 EXPORT_SYMBOL(v4l2_async_notifier_register); 553 554 int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd, 555 struct v4l2_async_notifier *notifier) 556 { 557 int ret; 558 559 if (WARN_ON(!sd || notifier->v4l2_dev)) 560 return -EINVAL; 561 562 notifier->sd = sd; 563 564 ret = __v4l2_async_notifier_register(notifier); 565 if (ret) 566 notifier->sd = NULL; 567 568 return ret; 569 } 570 EXPORT_SYMBOL(v4l2_async_subdev_notifier_register); 571 572 static void 573 __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier) 574 { 575 if (!notifier || (!notifier->v4l2_dev && !notifier->sd)) 576 return; 577 578 v4l2_async_notifier_unbind_all_subdevs(notifier); 579 580 notifier->sd = NULL; 581 notifier->v4l2_dev = NULL; 582 583 list_del(¬ifier->list); 584 } 585 586 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier) 587 { 588 mutex_lock(&list_lock); 589 590 __v4l2_async_notifier_unregister(notifier); 591 592 mutex_unlock(&list_lock); 593 } 594 EXPORT_SYMBOL(v4l2_async_notifier_unregister); 595 596 static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier) 597 { 598 struct v4l2_async_subdev *asd, *tmp; 599 600 if (!notifier || !notifier->asd_list.next) 601 return; 602 603 list_for_each_entry_safe(asd, tmp, ¬ifier->asd_list, asd_list) { 604 switch (asd->match_type) { 605 case V4L2_ASYNC_MATCH_FWNODE: 606 fwnode_handle_put(asd->match.fwnode); 607 break; 608 default: 609 break; 610 } 611 612 list_del(&asd->asd_list); 613 kfree(asd); 614 } 615 } 616 617 void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier) 618 { 619 mutex_lock(&list_lock); 620 621 __v4l2_async_notifier_cleanup(notifier); 622 623 mutex_unlock(&list_lock); 624 } 625 EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup); 626 627 int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier, 628 struct v4l2_async_subdev *asd) 629 { 630 int ret; 631 632 mutex_lock(&list_lock); 633 634 ret = v4l2_async_notifier_asd_valid(notifier, asd, -1); 635 if (ret) 636 goto unlock; 637 638 list_add_tail(&asd->asd_list, ¬ifier->asd_list); 639 640 unlock: 641 mutex_unlock(&list_lock); 642 return ret; 643 } 644 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_subdev); 645 646 struct v4l2_async_subdev * 647 v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier, 648 struct fwnode_handle *fwnode, 649 unsigned int asd_struct_size) 650 { 651 struct v4l2_async_subdev *asd; 652 int ret; 653 654 asd = kzalloc(asd_struct_size, GFP_KERNEL); 655 if (!asd) 656 return ERR_PTR(-ENOMEM); 657 658 asd->match_type = V4L2_ASYNC_MATCH_FWNODE; 659 asd->match.fwnode = fwnode_handle_get(fwnode); 660 661 ret = v4l2_async_notifier_add_subdev(notifier, asd); 662 if (ret) { 663 fwnode_handle_put(fwnode); 664 kfree(asd); 665 return ERR_PTR(ret); 666 } 667 668 return asd; 669 } 670 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev); 671 672 int 673 v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif, 674 struct fwnode_handle *endpoint, 675 struct v4l2_async_subdev *asd) 676 { 677 struct fwnode_handle *remote; 678 int ret; 679 680 remote = fwnode_graph_get_remote_port_parent(endpoint); 681 if (!remote) 682 return -ENOTCONN; 683 684 asd->match_type = V4L2_ASYNC_MATCH_FWNODE; 685 asd->match.fwnode = remote; 686 687 ret = v4l2_async_notifier_add_subdev(notif, asd); 688 if (ret) 689 fwnode_handle_put(remote); 690 691 return ret; 692 } 693 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_remote_subdev); 694 695 struct v4l2_async_subdev * 696 v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier, 697 int adapter_id, unsigned short address, 698 unsigned int asd_struct_size) 699 { 700 struct v4l2_async_subdev *asd; 701 int ret; 702 703 asd = kzalloc(asd_struct_size, GFP_KERNEL); 704 if (!asd) 705 return ERR_PTR(-ENOMEM); 706 707 asd->match_type = V4L2_ASYNC_MATCH_I2C; 708 asd->match.i2c.adapter_id = adapter_id; 709 asd->match.i2c.address = address; 710 711 ret = v4l2_async_notifier_add_subdev(notifier, asd); 712 if (ret) { 713 kfree(asd); 714 return ERR_PTR(ret); 715 } 716 717 return asd; 718 } 719 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_i2c_subdev); 720 721 struct v4l2_async_subdev * 722 v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier, 723 const char *device_name, 724 unsigned int asd_struct_size) 725 { 726 struct v4l2_async_subdev *asd; 727 int ret; 728 729 asd = kzalloc(asd_struct_size, GFP_KERNEL); 730 if (!asd) 731 return ERR_PTR(-ENOMEM); 732 733 asd->match_type = V4L2_ASYNC_MATCH_DEVNAME; 734 asd->match.device_name = device_name; 735 736 ret = v4l2_async_notifier_add_subdev(notifier, asd); 737 if (ret) { 738 kfree(asd); 739 return ERR_PTR(ret); 740 } 741 742 return asd; 743 } 744 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_devname_subdev); 745 746 int v4l2_async_register_subdev(struct v4l2_subdev *sd) 747 { 748 struct v4l2_async_notifier *subdev_notifier; 749 struct v4l2_async_notifier *notifier; 750 int ret; 751 752 /* 753 * No reference taken. The reference is held by the device 754 * (struct v4l2_subdev.dev), and async sub-device does not 755 * exist independently of the device at any point of time. 756 */ 757 if (!sd->fwnode && sd->dev) 758 sd->fwnode = dev_fwnode(sd->dev); 759 760 mutex_lock(&list_lock); 761 762 INIT_LIST_HEAD(&sd->async_list); 763 764 list_for_each_entry(notifier, ¬ifier_list, list) { 765 struct v4l2_device *v4l2_dev = 766 v4l2_async_notifier_find_v4l2_dev(notifier); 767 struct v4l2_async_subdev *asd; 768 769 if (!v4l2_dev) 770 continue; 771 772 asd = v4l2_async_find_match(notifier, sd); 773 if (!asd) 774 continue; 775 776 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd); 777 if (ret) 778 goto err_unbind; 779 780 ret = v4l2_async_notifier_try_complete(notifier); 781 if (ret) 782 goto err_unbind; 783 784 goto out_unlock; 785 } 786 787 /* None matched, wait for hot-plugging */ 788 list_add(&sd->async_list, &subdev_list); 789 790 out_unlock: 791 mutex_unlock(&list_lock); 792 793 return 0; 794 795 err_unbind: 796 /* 797 * Complete failed. Unbind the sub-devices bound through registering 798 * this async sub-device. 799 */ 800 subdev_notifier = v4l2_async_find_subdev_notifier(sd); 801 if (subdev_notifier) 802 v4l2_async_notifier_unbind_all_subdevs(subdev_notifier); 803 804 if (sd->asd) 805 v4l2_async_notifier_call_unbind(notifier, sd, sd->asd); 806 v4l2_async_cleanup(sd); 807 808 mutex_unlock(&list_lock); 809 810 return ret; 811 } 812 EXPORT_SYMBOL(v4l2_async_register_subdev); 813 814 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd) 815 { 816 if (!sd->async_list.next) 817 return; 818 819 mutex_lock(&list_lock); 820 821 __v4l2_async_notifier_unregister(sd->subdev_notifier); 822 __v4l2_async_notifier_cleanup(sd->subdev_notifier); 823 kfree(sd->subdev_notifier); 824 sd->subdev_notifier = NULL; 825 826 if (sd->asd) { 827 struct v4l2_async_notifier *notifier = sd->notifier; 828 829 list_add(&sd->asd->list, ¬ifier->waiting); 830 831 v4l2_async_notifier_call_unbind(notifier, sd, sd->asd); 832 } 833 834 v4l2_async_cleanup(sd); 835 836 mutex_unlock(&list_lock); 837 } 838 EXPORT_SYMBOL(v4l2_async_unregister_subdev); 839 840 static void print_waiting_subdev(struct seq_file *s, 841 struct v4l2_async_subdev *asd) 842 { 843 switch (asd->match_type) { 844 case V4L2_ASYNC_MATCH_DEVNAME: 845 seq_printf(s, " [devname] dev=%s\n", asd->match.device_name); 846 break; 847 case V4L2_ASYNC_MATCH_I2C: 848 seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id, 849 asd->match.i2c.address); 850 break; 851 case V4L2_ASYNC_MATCH_FWNODE: { 852 struct fwnode_handle *devnode, *fwnode = asd->match.fwnode; 853 854 devnode = fwnode_graph_is_endpoint(fwnode) ? 855 fwnode_graph_get_port_parent(fwnode) : 856 fwnode_handle_get(fwnode); 857 858 seq_printf(s, " [fwnode] dev=%s, node=%pfw\n", 859 devnode->dev ? dev_name(devnode->dev) : "nil", 860 fwnode); 861 862 fwnode_handle_put(devnode); 863 break; 864 } 865 } 866 } 867 868 static const char * 869 v4l2_async_notifier_name(struct v4l2_async_notifier *notifier) 870 { 871 if (notifier->v4l2_dev) 872 return notifier->v4l2_dev->name; 873 else if (notifier->sd) 874 return notifier->sd->name; 875 else 876 return "nil"; 877 } 878 879 static int pending_subdevs_show(struct seq_file *s, void *data) 880 { 881 struct v4l2_async_notifier *notif; 882 struct v4l2_async_subdev *asd; 883 884 mutex_lock(&list_lock); 885 886 list_for_each_entry(notif, ¬ifier_list, list) { 887 seq_printf(s, "%s:\n", v4l2_async_notifier_name(notif)); 888 list_for_each_entry(asd, ¬if->waiting, list) 889 print_waiting_subdev(s, asd); 890 } 891 892 mutex_unlock(&list_lock); 893 894 return 0; 895 } 896 DEFINE_SHOW_ATTRIBUTE(pending_subdevs); 897 898 void v4l2_async_debug_init(struct dentry *debugfs_dir) 899 { 900 debugfs_create_file("pending_async_subdevices", 0444, debugfs_dir, NULL, 901 &pending_subdevs_fops); 902 } 903