1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Avionic Design GmbH 4 * Copyright (C) 2012-2013, NVIDIA Corporation 5 */ 6 7 #include <linux/debugfs.h> 8 #include <linux/host1x.h> 9 #include <linux/of.h> 10 #include <linux/seq_file.h> 11 #include <linux/slab.h> 12 #include <linux/of_device.h> 13 14 #include "bus.h" 15 #include "dev.h" 16 17 static DEFINE_MUTEX(clients_lock); 18 static LIST_HEAD(clients); 19 20 static DEFINE_MUTEX(drivers_lock); 21 static LIST_HEAD(drivers); 22 23 static DEFINE_MUTEX(devices_lock); 24 static LIST_HEAD(devices); 25 26 struct host1x_subdev { 27 struct host1x_client *client; 28 struct device_node *np; 29 struct list_head list; 30 }; 31 32 /** 33 * host1x_subdev_add() - add a new subdevice with an associated device node 34 * @device: host1x device to add the subdevice to 35 * @driver: host1x driver containing the subdevices 36 * @np: device node 37 */ 38 static int host1x_subdev_add(struct host1x_device *device, 39 struct host1x_driver *driver, 40 struct device_node *np) 41 { 42 struct host1x_subdev *subdev; 43 struct device_node *child; 44 int err; 45 46 subdev = kzalloc(sizeof(*subdev), GFP_KERNEL); 47 if (!subdev) 48 return -ENOMEM; 49 50 INIT_LIST_HEAD(&subdev->list); 51 subdev->np = of_node_get(np); 52 53 mutex_lock(&device->subdevs_lock); 54 list_add_tail(&subdev->list, &device->subdevs); 55 mutex_unlock(&device->subdevs_lock); 56 57 /* recursively add children */ 58 for_each_child_of_node(np, child) { 59 if (of_match_node(driver->subdevs, child) && 60 of_device_is_available(child)) { 61 err = host1x_subdev_add(device, driver, child); 62 if (err < 0) { 63 /* XXX cleanup? */ 64 of_node_put(child); 65 return err; 66 } 67 } 68 } 69 70 return 0; 71 } 72 73 /** 74 * host1x_subdev_del() - remove subdevice 75 * @subdev: subdevice to remove 76 */ 77 static void host1x_subdev_del(struct host1x_subdev *subdev) 78 { 79 list_del(&subdev->list); 80 of_node_put(subdev->np); 81 kfree(subdev); 82 } 83 84 /** 85 * host1x_device_parse_dt() - scan device tree and add matching subdevices 86 * @device: host1x logical device 87 * @driver: host1x driver 88 */ 89 static int host1x_device_parse_dt(struct host1x_device *device, 90 struct host1x_driver *driver) 91 { 92 struct device_node *np; 93 int err; 94 95 for_each_child_of_node(device->dev.parent->of_node, np) { 96 if (of_match_node(driver->subdevs, np) && 97 of_device_is_available(np)) { 98 err = host1x_subdev_add(device, driver, np); 99 if (err < 0) { 100 of_node_put(np); 101 return err; 102 } 103 } 104 } 105 106 return 0; 107 } 108 109 static void host1x_subdev_register(struct host1x_device *device, 110 struct host1x_subdev *subdev, 111 struct host1x_client *client) 112 { 113 int err; 114 115 /* 116 * Move the subdevice to the list of active (registered) subdevices 117 * and associate it with a client. At the same time, associate the 118 * client with its parent device. 119 */ 120 mutex_lock(&device->subdevs_lock); 121 mutex_lock(&device->clients_lock); 122 list_move_tail(&client->list, &device->clients); 123 list_move_tail(&subdev->list, &device->active); 124 client->host = &device->dev; 125 subdev->client = client; 126 mutex_unlock(&device->clients_lock); 127 mutex_unlock(&device->subdevs_lock); 128 129 if (list_empty(&device->subdevs)) { 130 err = device_add(&device->dev); 131 if (err < 0) 132 dev_err(&device->dev, "failed to add: %d\n", err); 133 else 134 device->registered = true; 135 } 136 } 137 138 static void __host1x_subdev_unregister(struct host1x_device *device, 139 struct host1x_subdev *subdev) 140 { 141 struct host1x_client *client = subdev->client; 142 143 /* 144 * If all subdevices have been activated, we're about to remove the 145 * first active subdevice, so unload the driver first. 146 */ 147 if (list_empty(&device->subdevs)) { 148 if (device->registered) { 149 device->registered = false; 150 device_del(&device->dev); 151 } 152 } 153 154 /* 155 * Move the subdevice back to the list of idle subdevices and remove 156 * it from list of clients. 157 */ 158 mutex_lock(&device->clients_lock); 159 subdev->client = NULL; 160 client->host = NULL; 161 list_move_tail(&subdev->list, &device->subdevs); 162 /* 163 * XXX: Perhaps don't do this here, but rather explicitly remove it 164 * when the device is about to be deleted. 165 * 166 * This is somewhat complicated by the fact that this function is 167 * used to remove the subdevice when a client is unregistered but 168 * also when the composite device is about to be removed. 169 */ 170 list_del_init(&client->list); 171 mutex_unlock(&device->clients_lock); 172 } 173 174 static void host1x_subdev_unregister(struct host1x_device *device, 175 struct host1x_subdev *subdev) 176 { 177 mutex_lock(&device->subdevs_lock); 178 __host1x_subdev_unregister(device, subdev); 179 mutex_unlock(&device->subdevs_lock); 180 } 181 182 /** 183 * host1x_device_init() - initialize a host1x logical device 184 * @device: host1x logical device 185 * 186 * The driver for the host1x logical device can call this during execution of 187 * its &host1x_driver.probe implementation to initialize each of its clients. 188 * The client drivers access the subsystem specific driver data using the 189 * &host1x_client.parent field and driver data associated with it (usually by 190 * calling dev_get_drvdata()). 191 */ 192 int host1x_device_init(struct host1x_device *device) 193 { 194 struct host1x_client *client; 195 int err; 196 197 mutex_lock(&device->clients_lock); 198 199 list_for_each_entry(client, &device->clients, list) { 200 if (client->ops && client->ops->early_init) { 201 err = client->ops->early_init(client); 202 if (err < 0) { 203 dev_err(&device->dev, "failed to early initialize %s: %d\n", 204 dev_name(client->dev), err); 205 goto teardown_late; 206 } 207 } 208 } 209 210 list_for_each_entry(client, &device->clients, list) { 211 if (client->ops && client->ops->init) { 212 err = client->ops->init(client); 213 if (err < 0) { 214 dev_err(&device->dev, 215 "failed to initialize %s: %d\n", 216 dev_name(client->dev), err); 217 goto teardown; 218 } 219 } 220 } 221 222 mutex_unlock(&device->clients_lock); 223 224 return 0; 225 226 teardown: 227 list_for_each_entry_continue_reverse(client, &device->clients, list) 228 if (client->ops->exit) 229 client->ops->exit(client); 230 231 /* reset client to end of list for late teardown */ 232 client = list_entry(&device->clients, struct host1x_client, list); 233 234 teardown_late: 235 list_for_each_entry_continue_reverse(client, &device->clients, list) 236 if (client->ops->late_exit) 237 client->ops->late_exit(client); 238 239 mutex_unlock(&device->clients_lock); 240 return err; 241 } 242 EXPORT_SYMBOL(host1x_device_init); 243 244 /** 245 * host1x_device_exit() - uninitialize host1x logical device 246 * @device: host1x logical device 247 * 248 * When the driver for a host1x logical device is unloaded, it can call this 249 * function to tear down each of its clients. Typically this is done after a 250 * subsystem-specific data structure is removed and the functionality can no 251 * longer be used. 252 */ 253 int host1x_device_exit(struct host1x_device *device) 254 { 255 struct host1x_client *client; 256 int err; 257 258 mutex_lock(&device->clients_lock); 259 260 list_for_each_entry_reverse(client, &device->clients, list) { 261 if (client->ops && client->ops->exit) { 262 err = client->ops->exit(client); 263 if (err < 0) { 264 dev_err(&device->dev, 265 "failed to cleanup %s: %d\n", 266 dev_name(client->dev), err); 267 mutex_unlock(&device->clients_lock); 268 return err; 269 } 270 } 271 } 272 273 list_for_each_entry_reverse(client, &device->clients, list) { 274 if (client->ops && client->ops->late_exit) { 275 err = client->ops->late_exit(client); 276 if (err < 0) { 277 dev_err(&device->dev, "failed to late cleanup %s: %d\n", 278 dev_name(client->dev), err); 279 mutex_unlock(&device->clients_lock); 280 return err; 281 } 282 } 283 } 284 285 mutex_unlock(&device->clients_lock); 286 287 return 0; 288 } 289 EXPORT_SYMBOL(host1x_device_exit); 290 291 static int host1x_add_client(struct host1x *host1x, 292 struct host1x_client *client) 293 { 294 struct host1x_device *device; 295 struct host1x_subdev *subdev; 296 297 mutex_lock(&host1x->devices_lock); 298 299 list_for_each_entry(device, &host1x->devices, list) { 300 list_for_each_entry(subdev, &device->subdevs, list) { 301 if (subdev->np == client->dev->of_node) { 302 host1x_subdev_register(device, subdev, client); 303 mutex_unlock(&host1x->devices_lock); 304 return 0; 305 } 306 } 307 } 308 309 mutex_unlock(&host1x->devices_lock); 310 return -ENODEV; 311 } 312 313 static int host1x_del_client(struct host1x *host1x, 314 struct host1x_client *client) 315 { 316 struct host1x_device *device, *dt; 317 struct host1x_subdev *subdev; 318 319 mutex_lock(&host1x->devices_lock); 320 321 list_for_each_entry_safe(device, dt, &host1x->devices, list) { 322 list_for_each_entry(subdev, &device->active, list) { 323 if (subdev->client == client) { 324 host1x_subdev_unregister(device, subdev); 325 mutex_unlock(&host1x->devices_lock); 326 return 0; 327 } 328 } 329 } 330 331 mutex_unlock(&host1x->devices_lock); 332 return -ENODEV; 333 } 334 335 static int host1x_device_match(struct device *dev, struct device_driver *drv) 336 { 337 return strcmp(dev_name(dev), drv->name) == 0; 338 } 339 340 static int host1x_device_uevent(struct device *dev, 341 struct kobj_uevent_env *env) 342 { 343 struct device_node *np = dev->parent->of_node; 344 unsigned int count = 0; 345 struct property *p; 346 const char *compat; 347 348 /* 349 * This duplicates most of of_device_uevent(), but the latter cannot 350 * be called from modules and operates on dev->of_node, which is not 351 * available in this case. 352 * 353 * Note that this is really only needed for backwards compatibility 354 * with libdrm, which parses this information from sysfs and will 355 * fail if it can't find the OF_FULLNAME, specifically. 356 */ 357 add_uevent_var(env, "OF_NAME=%pOFn", np); 358 add_uevent_var(env, "OF_FULLNAME=%pOF", np); 359 360 of_property_for_each_string(np, "compatible", p, compat) { 361 add_uevent_var(env, "OF_COMPATIBLE_%u=%s", count, compat); 362 count++; 363 } 364 365 add_uevent_var(env, "OF_COMPATIBLE_N=%u", count); 366 367 return 0; 368 } 369 370 static int host1x_dma_configure(struct device *dev) 371 { 372 return of_dma_configure(dev, dev->of_node, true); 373 } 374 375 static const struct dev_pm_ops host1x_device_pm_ops = { 376 .suspend = pm_generic_suspend, 377 .resume = pm_generic_resume, 378 .freeze = pm_generic_freeze, 379 .thaw = pm_generic_thaw, 380 .poweroff = pm_generic_poweroff, 381 .restore = pm_generic_restore, 382 }; 383 384 struct bus_type host1x_bus_type = { 385 .name = "host1x", 386 .match = host1x_device_match, 387 .uevent = host1x_device_uevent, 388 .dma_configure = host1x_dma_configure, 389 .pm = &host1x_device_pm_ops, 390 }; 391 392 static void __host1x_device_del(struct host1x_device *device) 393 { 394 struct host1x_subdev *subdev, *sd; 395 struct host1x_client *client, *cl; 396 397 mutex_lock(&device->subdevs_lock); 398 399 /* unregister subdevices */ 400 list_for_each_entry_safe(subdev, sd, &device->active, list) { 401 /* 402 * host1x_subdev_unregister() will remove the client from 403 * any lists, so we'll need to manually add it back to the 404 * list of idle clients. 405 * 406 * XXX: Alternatively, perhaps don't remove the client from 407 * any lists in host1x_subdev_unregister() and instead do 408 * that explicitly from host1x_unregister_client()? 409 */ 410 client = subdev->client; 411 412 __host1x_subdev_unregister(device, subdev); 413 414 /* add the client to the list of idle clients */ 415 mutex_lock(&clients_lock); 416 list_add_tail(&client->list, &clients); 417 mutex_unlock(&clients_lock); 418 } 419 420 /* remove subdevices */ 421 list_for_each_entry_safe(subdev, sd, &device->subdevs, list) 422 host1x_subdev_del(subdev); 423 424 mutex_unlock(&device->subdevs_lock); 425 426 /* move clients to idle list */ 427 mutex_lock(&clients_lock); 428 mutex_lock(&device->clients_lock); 429 430 list_for_each_entry_safe(client, cl, &device->clients, list) 431 list_move_tail(&client->list, &clients); 432 433 mutex_unlock(&device->clients_lock); 434 mutex_unlock(&clients_lock); 435 436 /* finally remove the device */ 437 list_del_init(&device->list); 438 } 439 440 static void host1x_device_release(struct device *dev) 441 { 442 struct host1x_device *device = to_host1x_device(dev); 443 444 __host1x_device_del(device); 445 kfree(device); 446 } 447 448 static int host1x_device_add(struct host1x *host1x, 449 struct host1x_driver *driver) 450 { 451 struct host1x_client *client, *tmp; 452 struct host1x_subdev *subdev; 453 struct host1x_device *device; 454 int err; 455 456 device = kzalloc(sizeof(*device), GFP_KERNEL); 457 if (!device) 458 return -ENOMEM; 459 460 device_initialize(&device->dev); 461 462 mutex_init(&device->subdevs_lock); 463 INIT_LIST_HEAD(&device->subdevs); 464 INIT_LIST_HEAD(&device->active); 465 mutex_init(&device->clients_lock); 466 INIT_LIST_HEAD(&device->clients); 467 INIT_LIST_HEAD(&device->list); 468 device->driver = driver; 469 470 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; 471 device->dev.dma_mask = &device->dev.coherent_dma_mask; 472 dev_set_name(&device->dev, "%s", driver->driver.name); 473 device->dev.release = host1x_device_release; 474 device->dev.bus = &host1x_bus_type; 475 device->dev.parent = host1x->dev; 476 477 of_dma_configure(&device->dev, host1x->dev->of_node, true); 478 479 device->dev.dma_parms = &device->dma_parms; 480 dma_set_max_seg_size(&device->dev, UINT_MAX); 481 482 err = host1x_device_parse_dt(device, driver); 483 if (err < 0) { 484 kfree(device); 485 return err; 486 } 487 488 list_add_tail(&device->list, &host1x->devices); 489 490 mutex_lock(&clients_lock); 491 492 list_for_each_entry_safe(client, tmp, &clients, list) { 493 list_for_each_entry(subdev, &device->subdevs, list) { 494 if (subdev->np == client->dev->of_node) { 495 host1x_subdev_register(device, subdev, client); 496 break; 497 } 498 } 499 } 500 501 mutex_unlock(&clients_lock); 502 503 return 0; 504 } 505 506 /* 507 * Removes a device by first unregistering any subdevices and then removing 508 * itself from the list of devices. 509 * 510 * This function must be called with the host1x->devices_lock held. 511 */ 512 static void host1x_device_del(struct host1x *host1x, 513 struct host1x_device *device) 514 { 515 if (device->registered) { 516 device->registered = false; 517 device_del(&device->dev); 518 } 519 520 put_device(&device->dev); 521 } 522 523 static void host1x_attach_driver(struct host1x *host1x, 524 struct host1x_driver *driver) 525 { 526 struct host1x_device *device; 527 int err; 528 529 mutex_lock(&host1x->devices_lock); 530 531 list_for_each_entry(device, &host1x->devices, list) { 532 if (device->driver == driver) { 533 mutex_unlock(&host1x->devices_lock); 534 return; 535 } 536 } 537 538 err = host1x_device_add(host1x, driver); 539 if (err < 0) 540 dev_err(host1x->dev, "failed to allocate device: %d\n", err); 541 542 mutex_unlock(&host1x->devices_lock); 543 } 544 545 static void host1x_detach_driver(struct host1x *host1x, 546 struct host1x_driver *driver) 547 { 548 struct host1x_device *device, *tmp; 549 550 mutex_lock(&host1x->devices_lock); 551 552 list_for_each_entry_safe(device, tmp, &host1x->devices, list) 553 if (device->driver == driver) 554 host1x_device_del(host1x, device); 555 556 mutex_unlock(&host1x->devices_lock); 557 } 558 559 static int host1x_devices_show(struct seq_file *s, void *data) 560 { 561 struct host1x *host1x = s->private; 562 struct host1x_device *device; 563 564 mutex_lock(&host1x->devices_lock); 565 566 list_for_each_entry(device, &host1x->devices, list) { 567 struct host1x_subdev *subdev; 568 569 seq_printf(s, "%s\n", dev_name(&device->dev)); 570 571 mutex_lock(&device->subdevs_lock); 572 573 list_for_each_entry(subdev, &device->active, list) 574 seq_printf(s, " %pOFf: %s\n", subdev->np, 575 dev_name(subdev->client->dev)); 576 577 list_for_each_entry(subdev, &device->subdevs, list) 578 seq_printf(s, " %pOFf:\n", subdev->np); 579 580 mutex_unlock(&device->subdevs_lock); 581 } 582 583 mutex_unlock(&host1x->devices_lock); 584 585 return 0; 586 } 587 DEFINE_SHOW_ATTRIBUTE(host1x_devices); 588 589 /** 590 * host1x_register() - register a host1x controller 591 * @host1x: host1x controller 592 * 593 * The host1x controller driver uses this to register a host1x controller with 594 * the infrastructure. Note that all Tegra SoC generations have only ever come 595 * with a single host1x instance, so this function is somewhat academic. 596 */ 597 int host1x_register(struct host1x *host1x) 598 { 599 struct host1x_driver *driver; 600 601 mutex_lock(&devices_lock); 602 list_add_tail(&host1x->list, &devices); 603 mutex_unlock(&devices_lock); 604 605 mutex_lock(&drivers_lock); 606 607 list_for_each_entry(driver, &drivers, list) 608 host1x_attach_driver(host1x, driver); 609 610 mutex_unlock(&drivers_lock); 611 612 debugfs_create_file("devices", S_IRUGO, host1x->debugfs, host1x, 613 &host1x_devices_fops); 614 615 return 0; 616 } 617 618 /** 619 * host1x_unregister() - unregister a host1x controller 620 * @host1x: host1x controller 621 * 622 * The host1x controller driver uses this to remove a host1x controller from 623 * the infrastructure. 624 */ 625 int host1x_unregister(struct host1x *host1x) 626 { 627 struct host1x_driver *driver; 628 629 mutex_lock(&drivers_lock); 630 631 list_for_each_entry(driver, &drivers, list) 632 host1x_detach_driver(host1x, driver); 633 634 mutex_unlock(&drivers_lock); 635 636 mutex_lock(&devices_lock); 637 list_del_init(&host1x->list); 638 mutex_unlock(&devices_lock); 639 640 return 0; 641 } 642 643 static int host1x_device_probe(struct device *dev) 644 { 645 struct host1x_driver *driver = to_host1x_driver(dev->driver); 646 struct host1x_device *device = to_host1x_device(dev); 647 648 if (driver->probe) 649 return driver->probe(device); 650 651 return 0; 652 } 653 654 static int host1x_device_remove(struct device *dev) 655 { 656 struct host1x_driver *driver = to_host1x_driver(dev->driver); 657 struct host1x_device *device = to_host1x_device(dev); 658 659 if (driver->remove) 660 return driver->remove(device); 661 662 return 0; 663 } 664 665 static void host1x_device_shutdown(struct device *dev) 666 { 667 struct host1x_driver *driver = to_host1x_driver(dev->driver); 668 struct host1x_device *device = to_host1x_device(dev); 669 670 if (driver->shutdown) 671 driver->shutdown(device); 672 } 673 674 /** 675 * host1x_driver_register_full() - register a host1x driver 676 * @driver: host1x driver 677 * @owner: owner module 678 * 679 * Drivers for host1x logical devices call this function to register a driver 680 * with the infrastructure. Note that since these drive logical devices, the 681 * registration of the driver actually triggers tho logical device creation. 682 * A logical device will be created for each host1x instance. 683 */ 684 int host1x_driver_register_full(struct host1x_driver *driver, 685 struct module *owner) 686 { 687 struct host1x *host1x; 688 689 INIT_LIST_HEAD(&driver->list); 690 691 mutex_lock(&drivers_lock); 692 list_add_tail(&driver->list, &drivers); 693 mutex_unlock(&drivers_lock); 694 695 mutex_lock(&devices_lock); 696 697 list_for_each_entry(host1x, &devices, list) 698 host1x_attach_driver(host1x, driver); 699 700 mutex_unlock(&devices_lock); 701 702 driver->driver.bus = &host1x_bus_type; 703 driver->driver.owner = owner; 704 driver->driver.probe = host1x_device_probe; 705 driver->driver.remove = host1x_device_remove; 706 driver->driver.shutdown = host1x_device_shutdown; 707 708 return driver_register(&driver->driver); 709 } 710 EXPORT_SYMBOL(host1x_driver_register_full); 711 712 /** 713 * host1x_driver_unregister() - unregister a host1x driver 714 * @driver: host1x driver 715 * 716 * Unbinds the driver from each of the host1x logical devices that it is 717 * bound to, effectively removing the subsystem devices that they represent. 718 */ 719 void host1x_driver_unregister(struct host1x_driver *driver) 720 { 721 struct host1x *host1x; 722 723 driver_unregister(&driver->driver); 724 725 mutex_lock(&devices_lock); 726 727 list_for_each_entry(host1x, &devices, list) 728 host1x_detach_driver(host1x, driver); 729 730 mutex_unlock(&devices_lock); 731 732 mutex_lock(&drivers_lock); 733 list_del_init(&driver->list); 734 mutex_unlock(&drivers_lock); 735 } 736 EXPORT_SYMBOL(host1x_driver_unregister); 737 738 /** 739 * __host1x_client_register() - register a host1x client 740 * @client: host1x client 741 * @key: lock class key for the client-specific mutex 742 * 743 * Registers a host1x client with each host1x controller instance. Note that 744 * each client will only match their parent host1x controller and will only be 745 * associated with that instance. Once all clients have been registered with 746 * their parent host1x controller, the infrastructure will set up the logical 747 * device and call host1x_device_init(), which will in turn call each client's 748 * &host1x_client_ops.init implementation. 749 */ 750 int __host1x_client_register(struct host1x_client *client, 751 struct lock_class_key *key) 752 { 753 struct host1x *host1x; 754 int err; 755 756 INIT_LIST_HEAD(&client->list); 757 __mutex_init(&client->lock, "host1x client lock", key); 758 client->usecount = 0; 759 760 mutex_lock(&devices_lock); 761 762 list_for_each_entry(host1x, &devices, list) { 763 err = host1x_add_client(host1x, client); 764 if (!err) { 765 mutex_unlock(&devices_lock); 766 return 0; 767 } 768 } 769 770 mutex_unlock(&devices_lock); 771 772 mutex_lock(&clients_lock); 773 list_add_tail(&client->list, &clients); 774 mutex_unlock(&clients_lock); 775 776 return 0; 777 } 778 EXPORT_SYMBOL(__host1x_client_register); 779 780 /** 781 * host1x_client_unregister() - unregister a host1x client 782 * @client: host1x client 783 * 784 * Removes a host1x client from its host1x controller instance. If a logical 785 * device has already been initialized, it will be torn down. 786 */ 787 int host1x_client_unregister(struct host1x_client *client) 788 { 789 struct host1x_client *c; 790 struct host1x *host1x; 791 int err; 792 793 mutex_lock(&devices_lock); 794 795 list_for_each_entry(host1x, &devices, list) { 796 err = host1x_del_client(host1x, client); 797 if (!err) { 798 mutex_unlock(&devices_lock); 799 return 0; 800 } 801 } 802 803 mutex_unlock(&devices_lock); 804 mutex_lock(&clients_lock); 805 806 list_for_each_entry(c, &clients, list) { 807 if (c == client) { 808 list_del_init(&c->list); 809 break; 810 } 811 } 812 813 mutex_unlock(&clients_lock); 814 815 return 0; 816 } 817 EXPORT_SYMBOL(host1x_client_unregister); 818 819 int host1x_client_suspend(struct host1x_client *client) 820 { 821 int err = 0; 822 823 mutex_lock(&client->lock); 824 825 if (client->usecount == 1) { 826 if (client->ops && client->ops->suspend) { 827 err = client->ops->suspend(client); 828 if (err < 0) 829 goto unlock; 830 } 831 } 832 833 client->usecount--; 834 dev_dbg(client->dev, "use count: %u\n", client->usecount); 835 836 if (client->parent) { 837 err = host1x_client_suspend(client->parent); 838 if (err < 0) 839 goto resume; 840 } 841 842 goto unlock; 843 844 resume: 845 if (client->usecount == 0) 846 if (client->ops && client->ops->resume) 847 client->ops->resume(client); 848 849 client->usecount++; 850 unlock: 851 mutex_unlock(&client->lock); 852 return err; 853 } 854 EXPORT_SYMBOL(host1x_client_suspend); 855 856 int host1x_client_resume(struct host1x_client *client) 857 { 858 int err = 0; 859 860 mutex_lock(&client->lock); 861 862 if (client->parent) { 863 err = host1x_client_resume(client->parent); 864 if (err < 0) 865 goto unlock; 866 } 867 868 if (client->usecount == 0) { 869 if (client->ops && client->ops->resume) { 870 err = client->ops->resume(client); 871 if (err < 0) 872 goto suspend; 873 } 874 } 875 876 client->usecount++; 877 dev_dbg(client->dev, "use count: %u\n", client->usecount); 878 879 goto unlock; 880 881 suspend: 882 if (client->parent) 883 host1x_client_suspend(client->parent); 884 unlock: 885 mutex_unlock(&client->lock); 886 return err; 887 } 888 EXPORT_SYMBOL(host1x_client_resume); 889