1 /* 2 * V4L2 asynchronous subdevice registration API 3 * 4 * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/device.h> 12 #include <linux/err.h> 13 #include <linux/i2c.h> 14 #include <linux/list.h> 15 #include <linux/mm.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/of.h> 19 #include <linux/platform_device.h> 20 #include <linux/slab.h> 21 #include <linux/types.h> 22 23 #include <media/v4l2-async.h> 24 #include <media/v4l2-device.h> 25 #include <media/v4l2-fwnode.h> 26 #include <media/v4l2-subdev.h> 27 28 static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n, 29 struct v4l2_subdev *subdev, 30 struct v4l2_async_subdev *asd) 31 { 32 if (!n->ops || !n->ops->bound) 33 return 0; 34 35 return n->ops->bound(n, subdev, asd); 36 } 37 38 static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n, 39 struct v4l2_subdev *subdev, 40 struct v4l2_async_subdev *asd) 41 { 42 if (!n->ops || !n->ops->unbind) 43 return; 44 45 n->ops->unbind(n, subdev, asd); 46 } 47 48 static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n) 49 { 50 if (!n->ops || !n->ops->complete) 51 return 0; 52 53 return n->ops->complete(n); 54 } 55 56 static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) 57 { 58 #if IS_ENABLED(CONFIG_I2C) 59 struct i2c_client *client = i2c_verify_client(sd->dev); 60 61 return client && 62 asd->match.i2c.adapter_id == client->adapter->nr && 63 asd->match.i2c.address == client->addr; 64 #else 65 return false; 66 #endif 67 } 68 69 static bool match_devname(struct v4l2_subdev *sd, 70 struct v4l2_async_subdev *asd) 71 { 72 return !strcmp(asd->match.device_name, dev_name(sd->dev)); 73 } 74 75 static bool match_fwnode(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) 76 { 77 return sd->fwnode == asd->match.fwnode; 78 } 79 80 static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) 81 { 82 if (!asd->match.custom.match) 83 /* Match always */ 84 return true; 85 86 return asd->match.custom.match(sd->dev, asd); 87 } 88 89 static LIST_HEAD(subdev_list); 90 static LIST_HEAD(notifier_list); 91 static DEFINE_MUTEX(list_lock); 92 93 static struct v4l2_async_subdev * 94 v4l2_async_find_match(struct v4l2_async_notifier *notifier, 95 struct v4l2_subdev *sd) 96 { 97 bool (*match)(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd); 98 struct v4l2_async_subdev *asd; 99 100 list_for_each_entry(asd, ¬ifier->waiting, list) { 101 /* bus_type has been verified valid before */ 102 switch (asd->match_type) { 103 case V4L2_ASYNC_MATCH_CUSTOM: 104 match = match_custom; 105 break; 106 case V4L2_ASYNC_MATCH_DEVNAME: 107 match = match_devname; 108 break; 109 case V4L2_ASYNC_MATCH_I2C: 110 match = match_i2c; 111 break; 112 case V4L2_ASYNC_MATCH_FWNODE: 113 match = match_fwnode; 114 break; 115 default: 116 /* Cannot happen, unless someone breaks us */ 117 WARN_ON(true); 118 return NULL; 119 } 120 121 /* match cannot be NULL here */ 122 if (match(sd, asd)) 123 return asd; 124 } 125 126 return NULL; 127 } 128 129 /* Compare two async sub-device descriptors for equivalence */ 130 static bool asd_equal(struct v4l2_async_subdev *asd_x, 131 struct v4l2_async_subdev *asd_y) 132 { 133 if (asd_x->match_type != asd_y->match_type) 134 return false; 135 136 switch (asd_x->match_type) { 137 case V4L2_ASYNC_MATCH_DEVNAME: 138 return strcmp(asd_x->match.device_name, 139 asd_y->match.device_name) == 0; 140 case V4L2_ASYNC_MATCH_I2C: 141 return asd_x->match.i2c.adapter_id == 142 asd_y->match.i2c.adapter_id && 143 asd_x->match.i2c.address == 144 asd_y->match.i2c.address; 145 case V4L2_ASYNC_MATCH_FWNODE: 146 return asd_x->match.fwnode == asd_y->match.fwnode; 147 default: 148 break; 149 } 150 151 return false; 152 } 153 154 /* Find the sub-device notifier registered by a sub-device driver. */ 155 static struct v4l2_async_notifier * 156 v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd) 157 { 158 struct v4l2_async_notifier *n; 159 160 list_for_each_entry(n, ¬ifier_list, list) 161 if (n->sd == sd) 162 return n; 163 164 return NULL; 165 } 166 167 /* Get v4l2_device related to the notifier if one can be found. */ 168 static struct v4l2_device * 169 v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier) 170 { 171 while (notifier->parent) 172 notifier = notifier->parent; 173 174 return notifier->v4l2_dev; 175 } 176 177 /* 178 * Return true if all child sub-device notifiers are complete, false otherwise. 179 */ 180 static bool 181 v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier) 182 { 183 struct v4l2_subdev *sd; 184 185 if (!list_empty(¬ifier->waiting)) 186 return false; 187 188 list_for_each_entry(sd, ¬ifier->done, async_list) { 189 struct v4l2_async_notifier *subdev_notifier = 190 v4l2_async_find_subdev_notifier(sd); 191 192 if (subdev_notifier && 193 !v4l2_async_notifier_can_complete(subdev_notifier)) 194 return false; 195 } 196 197 return true; 198 } 199 200 /* 201 * Complete the master notifier if possible. This is done when all async 202 * sub-devices have been bound; v4l2_device is also available then. 203 */ 204 static int 205 v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier) 206 { 207 /* Quick check whether there are still more sub-devices here. */ 208 if (!list_empty(¬ifier->waiting)) 209 return 0; 210 211 /* Check the entire notifier tree; find the root notifier first. */ 212 while (notifier->parent) 213 notifier = notifier->parent; 214 215 /* This is root if it has v4l2_dev. */ 216 if (!notifier->v4l2_dev) 217 return 0; 218 219 /* Is everything ready? */ 220 if (!v4l2_async_notifier_can_complete(notifier)) 221 return 0; 222 223 return v4l2_async_notifier_call_complete(notifier); 224 } 225 226 static int 227 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier); 228 229 static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier, 230 struct v4l2_device *v4l2_dev, 231 struct v4l2_subdev *sd, 232 struct v4l2_async_subdev *asd) 233 { 234 struct v4l2_async_notifier *subdev_notifier; 235 int ret; 236 237 ret = v4l2_device_register_subdev(v4l2_dev, sd); 238 if (ret < 0) 239 return ret; 240 241 ret = v4l2_async_notifier_call_bound(notifier, sd, asd); 242 if (ret < 0) { 243 v4l2_device_unregister_subdev(sd); 244 return ret; 245 } 246 247 /* Remove from the waiting list */ 248 list_del(&asd->list); 249 sd->asd = asd; 250 sd->notifier = notifier; 251 252 /* Move from the global subdevice list to notifier's done */ 253 list_move(&sd->async_list, ¬ifier->done); 254 255 /* 256 * See if the sub-device has a notifier. If not, return here. 257 */ 258 subdev_notifier = v4l2_async_find_subdev_notifier(sd); 259 if (!subdev_notifier || subdev_notifier->parent) 260 return 0; 261 262 /* 263 * Proceed with checking for the sub-device notifier's async 264 * sub-devices, and return the result. The error will be handled by the 265 * caller. 266 */ 267 subdev_notifier->parent = notifier; 268 269 return v4l2_async_notifier_try_all_subdevs(subdev_notifier); 270 } 271 272 /* Test all async sub-devices in a notifier for a match. */ 273 static int 274 v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier) 275 { 276 struct v4l2_device *v4l2_dev = 277 v4l2_async_notifier_find_v4l2_dev(notifier); 278 struct v4l2_subdev *sd; 279 280 if (!v4l2_dev) 281 return 0; 282 283 again: 284 list_for_each_entry(sd, &subdev_list, async_list) { 285 struct v4l2_async_subdev *asd; 286 int ret; 287 288 asd = v4l2_async_find_match(notifier, sd); 289 if (!asd) 290 continue; 291 292 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd); 293 if (ret < 0) 294 return ret; 295 296 /* 297 * v4l2_async_match_notify() may lead to registering a 298 * new notifier and thus changing the async subdevs 299 * list. In order to proceed safely from here, restart 300 * parsing the list from the beginning. 301 */ 302 goto again; 303 } 304 305 return 0; 306 } 307 308 static void v4l2_async_cleanup(struct v4l2_subdev *sd) 309 { 310 v4l2_device_unregister_subdev(sd); 311 /* 312 * Subdevice driver will reprobe and put the subdev back 313 * onto the list 314 */ 315 list_del_init(&sd->async_list); 316 sd->asd = NULL; 317 } 318 319 /* Unbind all sub-devices in the notifier tree. */ 320 static void 321 v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier) 322 { 323 struct v4l2_subdev *sd, *tmp; 324 325 list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) { 326 struct v4l2_async_notifier *subdev_notifier = 327 v4l2_async_find_subdev_notifier(sd); 328 329 if (subdev_notifier) 330 v4l2_async_notifier_unbind_all_subdevs(subdev_notifier); 331 332 v4l2_async_notifier_call_unbind(notifier, sd, sd->asd); 333 v4l2_async_cleanup(sd); 334 335 list_move(&sd->async_list, &subdev_list); 336 } 337 338 notifier->parent = NULL; 339 } 340 341 /* See if an async sub-device can be found in a notifier's lists. */ 342 static bool 343 __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier, 344 struct v4l2_async_subdev *asd) 345 { 346 struct v4l2_async_subdev *asd_y; 347 struct v4l2_subdev *sd; 348 349 list_for_each_entry(asd_y, ¬ifier->waiting, list) 350 if (asd_equal(asd, asd_y)) 351 return true; 352 353 list_for_each_entry(sd, ¬ifier->done, async_list) { 354 if (WARN_ON(!sd->asd)) 355 continue; 356 357 if (asd_equal(asd, sd->asd)) 358 return true; 359 } 360 361 return false; 362 } 363 364 /* 365 * Find out whether an async sub-device was set up already or 366 * whether it exists in a given notifier before @this_index. 367 * If @this_index < 0, search the notifier's entire @asd_list. 368 */ 369 static bool 370 v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier, 371 struct v4l2_async_subdev *asd, 372 int this_index) 373 { 374 struct v4l2_async_subdev *asd_y; 375 int j = 0; 376 377 lockdep_assert_held(&list_lock); 378 379 /* Check that an asd is not being added more than once. */ 380 list_for_each_entry(asd_y, ¬ifier->asd_list, asd_list) { 381 if (this_index >= 0 && j++ >= this_index) 382 break; 383 if (asd_equal(asd, asd_y)) 384 return true; 385 } 386 387 /* Check that an asd does not exist in other notifiers. */ 388 list_for_each_entry(notifier, ¬ifier_list, list) 389 if (__v4l2_async_notifier_has_async_subdev(notifier, asd)) 390 return true; 391 392 return false; 393 } 394 395 static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier, 396 struct v4l2_async_subdev *asd, 397 int this_index) 398 { 399 struct device *dev = 400 notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL; 401 402 if (!asd) 403 return -EINVAL; 404 405 switch (asd->match_type) { 406 case V4L2_ASYNC_MATCH_CUSTOM: 407 case V4L2_ASYNC_MATCH_DEVNAME: 408 case V4L2_ASYNC_MATCH_I2C: 409 case V4L2_ASYNC_MATCH_FWNODE: 410 if (v4l2_async_notifier_has_async_subdev(notifier, asd, 411 this_index)) { 412 dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n"); 413 return -EEXIST; 414 } 415 break; 416 default: 417 dev_err(dev, "Invalid match type %u on %p\n", 418 asd->match_type, asd); 419 return -EINVAL; 420 } 421 422 return 0; 423 } 424 425 void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier) 426 { 427 INIT_LIST_HEAD(¬ifier->asd_list); 428 } 429 EXPORT_SYMBOL(v4l2_async_notifier_init); 430 431 static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier) 432 { 433 struct v4l2_async_subdev *asd; 434 int ret, i = 0; 435 436 INIT_LIST_HEAD(¬ifier->waiting); 437 INIT_LIST_HEAD(¬ifier->done); 438 439 mutex_lock(&list_lock); 440 441 list_for_each_entry(asd, ¬ifier->asd_list, asd_list) { 442 ret = v4l2_async_notifier_asd_valid(notifier, asd, i++); 443 if (ret) 444 goto err_unlock; 445 446 list_add_tail(&asd->list, ¬ifier->waiting); 447 } 448 449 ret = v4l2_async_notifier_try_all_subdevs(notifier); 450 if (ret < 0) 451 goto err_unbind; 452 453 ret = v4l2_async_notifier_try_complete(notifier); 454 if (ret < 0) 455 goto err_unbind; 456 457 /* Keep also completed notifiers on the list */ 458 list_add(¬ifier->list, ¬ifier_list); 459 460 mutex_unlock(&list_lock); 461 462 return 0; 463 464 err_unbind: 465 /* 466 * On failure, unbind all sub-devices registered through this notifier. 467 */ 468 v4l2_async_notifier_unbind_all_subdevs(notifier); 469 470 err_unlock: 471 mutex_unlock(&list_lock); 472 473 return ret; 474 } 475 476 int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev, 477 struct v4l2_async_notifier *notifier) 478 { 479 int ret; 480 481 if (WARN_ON(!v4l2_dev || notifier->sd)) 482 return -EINVAL; 483 484 notifier->v4l2_dev = v4l2_dev; 485 486 ret = __v4l2_async_notifier_register(notifier); 487 if (ret) 488 notifier->v4l2_dev = NULL; 489 490 return ret; 491 } 492 EXPORT_SYMBOL(v4l2_async_notifier_register); 493 494 int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd, 495 struct v4l2_async_notifier *notifier) 496 { 497 int ret; 498 499 if (WARN_ON(!sd || notifier->v4l2_dev)) 500 return -EINVAL; 501 502 notifier->sd = sd; 503 504 ret = __v4l2_async_notifier_register(notifier); 505 if (ret) 506 notifier->sd = NULL; 507 508 return ret; 509 } 510 EXPORT_SYMBOL(v4l2_async_subdev_notifier_register); 511 512 static void 513 __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier) 514 { 515 if (!notifier || (!notifier->v4l2_dev && !notifier->sd)) 516 return; 517 518 v4l2_async_notifier_unbind_all_subdevs(notifier); 519 520 notifier->sd = NULL; 521 notifier->v4l2_dev = NULL; 522 523 list_del(¬ifier->list); 524 } 525 526 void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier) 527 { 528 mutex_lock(&list_lock); 529 530 __v4l2_async_notifier_unregister(notifier); 531 532 mutex_unlock(&list_lock); 533 } 534 EXPORT_SYMBOL(v4l2_async_notifier_unregister); 535 536 static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier) 537 { 538 struct v4l2_async_subdev *asd, *tmp; 539 540 if (!notifier) 541 return; 542 543 list_for_each_entry_safe(asd, tmp, ¬ifier->asd_list, asd_list) { 544 switch (asd->match_type) { 545 case V4L2_ASYNC_MATCH_FWNODE: 546 fwnode_handle_put(asd->match.fwnode); 547 break; 548 default: 549 break; 550 } 551 552 list_del(&asd->asd_list); 553 kfree(asd); 554 } 555 } 556 557 void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier) 558 { 559 mutex_lock(&list_lock); 560 561 __v4l2_async_notifier_cleanup(notifier); 562 563 mutex_unlock(&list_lock); 564 } 565 EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup); 566 567 int v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier, 568 struct v4l2_async_subdev *asd) 569 { 570 int ret; 571 572 mutex_lock(&list_lock); 573 574 ret = v4l2_async_notifier_asd_valid(notifier, asd, -1); 575 if (ret) 576 goto unlock; 577 578 list_add_tail(&asd->asd_list, ¬ifier->asd_list); 579 580 unlock: 581 mutex_unlock(&list_lock); 582 return ret; 583 } 584 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_subdev); 585 586 struct v4l2_async_subdev * 587 v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier, 588 struct fwnode_handle *fwnode, 589 unsigned int asd_struct_size) 590 { 591 struct v4l2_async_subdev *asd; 592 int ret; 593 594 asd = kzalloc(asd_struct_size, GFP_KERNEL); 595 if (!asd) 596 return ERR_PTR(-ENOMEM); 597 598 asd->match_type = V4L2_ASYNC_MATCH_FWNODE; 599 asd->match.fwnode = fwnode; 600 601 ret = v4l2_async_notifier_add_subdev(notifier, asd); 602 if (ret) { 603 kfree(asd); 604 return ERR_PTR(ret); 605 } 606 607 return asd; 608 } 609 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_fwnode_subdev); 610 611 struct v4l2_async_subdev * 612 v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier, 613 int adapter_id, unsigned short address, 614 unsigned int asd_struct_size) 615 { 616 struct v4l2_async_subdev *asd; 617 int ret; 618 619 asd = kzalloc(asd_struct_size, GFP_KERNEL); 620 if (!asd) 621 return ERR_PTR(-ENOMEM); 622 623 asd->match_type = V4L2_ASYNC_MATCH_I2C; 624 asd->match.i2c.adapter_id = adapter_id; 625 asd->match.i2c.address = address; 626 627 ret = v4l2_async_notifier_add_subdev(notifier, asd); 628 if (ret) { 629 kfree(asd); 630 return ERR_PTR(ret); 631 } 632 633 return asd; 634 } 635 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_i2c_subdev); 636 637 struct v4l2_async_subdev * 638 v4l2_async_notifier_add_devname_subdev(struct v4l2_async_notifier *notifier, 639 const char *device_name, 640 unsigned int asd_struct_size) 641 { 642 struct v4l2_async_subdev *asd; 643 int ret; 644 645 asd = kzalloc(asd_struct_size, GFP_KERNEL); 646 if (!asd) 647 return ERR_PTR(-ENOMEM); 648 649 asd->match_type = V4L2_ASYNC_MATCH_DEVNAME; 650 asd->match.device_name = device_name; 651 652 ret = v4l2_async_notifier_add_subdev(notifier, asd); 653 if (ret) { 654 kfree(asd); 655 return ERR_PTR(ret); 656 } 657 658 return asd; 659 } 660 EXPORT_SYMBOL_GPL(v4l2_async_notifier_add_devname_subdev); 661 662 int v4l2_async_register_subdev(struct v4l2_subdev *sd) 663 { 664 struct v4l2_async_notifier *subdev_notifier; 665 struct v4l2_async_notifier *notifier; 666 int ret; 667 668 /* 669 * No reference taken. The reference is held by the device 670 * (struct v4l2_subdev.dev), and async sub-device does not 671 * exist independently of the device at any point of time. 672 */ 673 if (!sd->fwnode && sd->dev) 674 sd->fwnode = dev_fwnode(sd->dev); 675 676 mutex_lock(&list_lock); 677 678 INIT_LIST_HEAD(&sd->async_list); 679 680 list_for_each_entry(notifier, ¬ifier_list, list) { 681 struct v4l2_device *v4l2_dev = 682 v4l2_async_notifier_find_v4l2_dev(notifier); 683 struct v4l2_async_subdev *asd; 684 685 if (!v4l2_dev) 686 continue; 687 688 asd = v4l2_async_find_match(notifier, sd); 689 if (!asd) 690 continue; 691 692 ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd); 693 if (ret) 694 goto err_unbind; 695 696 ret = v4l2_async_notifier_try_complete(notifier); 697 if (ret) 698 goto err_unbind; 699 700 goto out_unlock; 701 } 702 703 /* None matched, wait for hot-plugging */ 704 list_add(&sd->async_list, &subdev_list); 705 706 out_unlock: 707 mutex_unlock(&list_lock); 708 709 return 0; 710 711 err_unbind: 712 /* 713 * Complete failed. Unbind the sub-devices bound through registering 714 * this async sub-device. 715 */ 716 subdev_notifier = v4l2_async_find_subdev_notifier(sd); 717 if (subdev_notifier) 718 v4l2_async_notifier_unbind_all_subdevs(subdev_notifier); 719 720 if (sd->asd) 721 v4l2_async_notifier_call_unbind(notifier, sd, sd->asd); 722 v4l2_async_cleanup(sd); 723 724 mutex_unlock(&list_lock); 725 726 return ret; 727 } 728 EXPORT_SYMBOL(v4l2_async_register_subdev); 729 730 void v4l2_async_unregister_subdev(struct v4l2_subdev *sd) 731 { 732 mutex_lock(&list_lock); 733 734 __v4l2_async_notifier_unregister(sd->subdev_notifier); 735 __v4l2_async_notifier_cleanup(sd->subdev_notifier); 736 kfree(sd->subdev_notifier); 737 sd->subdev_notifier = NULL; 738 739 if (sd->asd) { 740 struct v4l2_async_notifier *notifier = sd->notifier; 741 742 list_add(&sd->asd->list, ¬ifier->waiting); 743 744 v4l2_async_notifier_call_unbind(notifier, sd, sd->asd); 745 } 746 747 v4l2_async_cleanup(sd); 748 749 mutex_unlock(&list_lock); 750 } 751 EXPORT_SYMBOL(v4l2_async_unregister_subdev); 752