1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Avionic Design GmbH 4 * Copyright (C) 2012-2013, NVIDIA Corporation 5 */ 6 7 #include <linux/debugfs.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/host1x.h> 10 #include <linux/of.h> 11 #include <linux/seq_file.h> 12 #include <linux/slab.h> 13 #include <linux/of_device.h> 14 15 #include "bus.h" 16 #include "dev.h" 17 18 static DEFINE_MUTEX(clients_lock); 19 static LIST_HEAD(clients); 20 21 static DEFINE_MUTEX(drivers_lock); 22 static LIST_HEAD(drivers); 23 24 static DEFINE_MUTEX(devices_lock); 25 static LIST_HEAD(devices); 26 27 struct host1x_subdev { 28 struct host1x_client *client; 29 struct device_node *np; 30 struct list_head list; 31 }; 32 33 /** 34 * host1x_subdev_add() - add a new subdevice with an associated device node 35 * @device: host1x device to add the subdevice to 36 * @driver: host1x driver containing the subdevices 37 * @np: device node 38 */ 39 static int host1x_subdev_add(struct host1x_device *device, 40 struct host1x_driver *driver, 41 struct device_node *np) 42 { 43 struct host1x_subdev *subdev; 44 struct device_node *child; 45 int err; 46 47 subdev = kzalloc(sizeof(*subdev), GFP_KERNEL); 48 if (!subdev) 49 return -ENOMEM; 50 51 INIT_LIST_HEAD(&subdev->list); 52 subdev->np = of_node_get(np); 53 54 mutex_lock(&device->subdevs_lock); 55 list_add_tail(&subdev->list, &device->subdevs); 56 mutex_unlock(&device->subdevs_lock); 57 58 /* recursively add children */ 59 for_each_child_of_node(np, child) { 60 if (of_match_node(driver->subdevs, child) && 61 of_device_is_available(child)) { 62 err = host1x_subdev_add(device, driver, child); 63 if (err < 0) { 64 /* XXX cleanup? */ 65 of_node_put(child); 66 return err; 67 } 68 } 69 } 70 71 return 0; 72 } 73 74 /** 75 * host1x_subdev_del() - remove subdevice 76 * @subdev: subdevice to remove 77 */ 78 static void host1x_subdev_del(struct host1x_subdev *subdev) 79 { 80 list_del(&subdev->list); 81 of_node_put(subdev->np); 82 kfree(subdev); 83 } 84 85 /** 86 * host1x_device_parse_dt() - scan device tree and add matching subdevices 87 * @device: host1x logical device 88 * @driver: host1x driver 89 */ 90 static int host1x_device_parse_dt(struct host1x_device *device, 91 struct host1x_driver *driver) 92 { 93 struct device_node *np; 94 int err; 95 96 for_each_child_of_node(device->dev.parent->of_node, np) { 97 if (of_match_node(driver->subdevs, np) && 98 of_device_is_available(np)) { 99 err = host1x_subdev_add(device, driver, np); 100 if (err < 0) { 101 of_node_put(np); 102 return err; 103 } 104 } 105 } 106 107 return 0; 108 } 109 110 static void host1x_subdev_register(struct host1x_device *device, 111 struct host1x_subdev *subdev, 112 struct host1x_client *client) 113 { 114 int err; 115 116 /* 117 * Move the subdevice to the list of active (registered) subdevices 118 * and associate it with a client. At the same time, associate the 119 * client with its parent device. 120 */ 121 mutex_lock(&device->subdevs_lock); 122 mutex_lock(&device->clients_lock); 123 list_move_tail(&client->list, &device->clients); 124 list_move_tail(&subdev->list, &device->active); 125 client->host = &device->dev; 126 subdev->client = client; 127 mutex_unlock(&device->clients_lock); 128 mutex_unlock(&device->subdevs_lock); 129 130 if (list_empty(&device->subdevs)) { 131 err = device_add(&device->dev); 132 if (err < 0) 133 dev_err(&device->dev, "failed to add: %d\n", err); 134 else 135 device->registered = true; 136 } 137 } 138 139 static void __host1x_subdev_unregister(struct host1x_device *device, 140 struct host1x_subdev *subdev) 141 { 142 struct host1x_client *client = subdev->client; 143 144 /* 145 * If all subdevices have been activated, we're about to remove the 146 * first active subdevice, so unload the driver first. 147 */ 148 if (list_empty(&device->subdevs)) { 149 if (device->registered) { 150 device->registered = false; 151 device_del(&device->dev); 152 } 153 } 154 155 /* 156 * Move the subdevice back to the list of idle subdevices and remove 157 * it from list of clients. 158 */ 159 mutex_lock(&device->clients_lock); 160 subdev->client = NULL; 161 client->host = NULL; 162 list_move_tail(&subdev->list, &device->subdevs); 163 /* 164 * XXX: Perhaps don't do this here, but rather explicitly remove it 165 * when the device is about to be deleted. 166 * 167 * This is somewhat complicated by the fact that this function is 168 * used to remove the subdevice when a client is unregistered but 169 * also when the composite device is about to be removed. 170 */ 171 list_del_init(&client->list); 172 mutex_unlock(&device->clients_lock); 173 } 174 175 static void host1x_subdev_unregister(struct host1x_device *device, 176 struct host1x_subdev *subdev) 177 { 178 mutex_lock(&device->subdevs_lock); 179 __host1x_subdev_unregister(device, subdev); 180 mutex_unlock(&device->subdevs_lock); 181 } 182 183 /** 184 * host1x_device_init() - initialize a host1x logical device 185 * @device: host1x logical device 186 * 187 * The driver for the host1x logical device can call this during execution of 188 * its &host1x_driver.probe implementation to initialize each of its clients. 189 * The client drivers access the subsystem specific driver data using the 190 * &host1x_client.parent field and driver data associated with it (usually by 191 * calling dev_get_drvdata()). 192 */ 193 int host1x_device_init(struct host1x_device *device) 194 { 195 struct host1x_client *client; 196 int err; 197 198 mutex_lock(&device->clients_lock); 199 200 list_for_each_entry(client, &device->clients, list) { 201 if (client->ops && client->ops->early_init) { 202 err = client->ops->early_init(client); 203 if (err < 0) { 204 dev_err(&device->dev, "failed to early initialize %s: %d\n", 205 dev_name(client->dev), err); 206 goto teardown_late; 207 } 208 } 209 } 210 211 list_for_each_entry(client, &device->clients, list) { 212 if (client->ops && client->ops->init) { 213 err = client->ops->init(client); 214 if (err < 0) { 215 dev_err(&device->dev, 216 "failed to initialize %s: %d\n", 217 dev_name(client->dev), err); 218 goto teardown; 219 } 220 } 221 } 222 223 mutex_unlock(&device->clients_lock); 224 225 return 0; 226 227 teardown: 228 list_for_each_entry_continue_reverse(client, &device->clients, list) 229 if (client->ops->exit) 230 client->ops->exit(client); 231 232 /* reset client to end of list for late teardown */ 233 client = list_entry(&device->clients, struct host1x_client, list); 234 235 teardown_late: 236 list_for_each_entry_continue_reverse(client, &device->clients, list) 237 if (client->ops->late_exit) 238 client->ops->late_exit(client); 239 240 mutex_unlock(&device->clients_lock); 241 return err; 242 } 243 EXPORT_SYMBOL(host1x_device_init); 244 245 /** 246 * host1x_device_exit() - uninitialize host1x logical device 247 * @device: host1x logical device 248 * 249 * When the driver for a host1x logical device is unloaded, it can call this 250 * function to tear down each of its clients. Typically this is done after a 251 * subsystem-specific data structure is removed and the functionality can no 252 * longer be used. 253 */ 254 int host1x_device_exit(struct host1x_device *device) 255 { 256 struct host1x_client *client; 257 int err; 258 259 mutex_lock(&device->clients_lock); 260 261 list_for_each_entry_reverse(client, &device->clients, list) { 262 if (client->ops && client->ops->exit) { 263 err = client->ops->exit(client); 264 if (err < 0) { 265 dev_err(&device->dev, 266 "failed to cleanup %s: %d\n", 267 dev_name(client->dev), err); 268 mutex_unlock(&device->clients_lock); 269 return err; 270 } 271 } 272 } 273 274 list_for_each_entry_reverse(client, &device->clients, list) { 275 if (client->ops && client->ops->late_exit) { 276 err = client->ops->late_exit(client); 277 if (err < 0) { 278 dev_err(&device->dev, "failed to late cleanup %s: %d\n", 279 dev_name(client->dev), err); 280 mutex_unlock(&device->clients_lock); 281 return err; 282 } 283 } 284 } 285 286 mutex_unlock(&device->clients_lock); 287 288 return 0; 289 } 290 EXPORT_SYMBOL(host1x_device_exit); 291 292 static int host1x_add_client(struct host1x *host1x, 293 struct host1x_client *client) 294 { 295 struct host1x_device *device; 296 struct host1x_subdev *subdev; 297 298 mutex_lock(&host1x->devices_lock); 299 300 list_for_each_entry(device, &host1x->devices, list) { 301 list_for_each_entry(subdev, &device->subdevs, list) { 302 if (subdev->np == client->dev->of_node) { 303 host1x_subdev_register(device, subdev, client); 304 mutex_unlock(&host1x->devices_lock); 305 return 0; 306 } 307 } 308 } 309 310 mutex_unlock(&host1x->devices_lock); 311 return -ENODEV; 312 } 313 314 static int host1x_del_client(struct host1x *host1x, 315 struct host1x_client *client) 316 { 317 struct host1x_device *device, *dt; 318 struct host1x_subdev *subdev; 319 320 mutex_lock(&host1x->devices_lock); 321 322 list_for_each_entry_safe(device, dt, &host1x->devices, list) { 323 list_for_each_entry(subdev, &device->active, list) { 324 if (subdev->client == client) { 325 host1x_subdev_unregister(device, subdev); 326 mutex_unlock(&host1x->devices_lock); 327 return 0; 328 } 329 } 330 } 331 332 mutex_unlock(&host1x->devices_lock); 333 return -ENODEV; 334 } 335 336 static int host1x_device_match(struct device *dev, struct device_driver *drv) 337 { 338 return strcmp(dev_name(dev), drv->name) == 0; 339 } 340 341 /* 342 * Note that this is really only needed for backwards compatibility 343 * with libdrm, which parses this information from sysfs and will 344 * fail if it can't find the OF_FULLNAME, specifically. 345 */ 346 static int host1x_device_uevent(const struct device *dev, 347 struct kobj_uevent_env *env) 348 { 349 of_device_uevent(dev->parent, env); 350 351 return 0; 352 } 353 354 static int host1x_dma_configure(struct device *dev) 355 { 356 return of_dma_configure(dev, dev->of_node, true); 357 } 358 359 static const struct dev_pm_ops host1x_device_pm_ops = { 360 .suspend = pm_generic_suspend, 361 .resume = pm_generic_resume, 362 .freeze = pm_generic_freeze, 363 .thaw = pm_generic_thaw, 364 .poweroff = pm_generic_poweroff, 365 .restore = pm_generic_restore, 366 }; 367 368 struct bus_type host1x_bus_type = { 369 .name = "host1x", 370 .match = host1x_device_match, 371 .uevent = host1x_device_uevent, 372 .dma_configure = host1x_dma_configure, 373 .pm = &host1x_device_pm_ops, 374 }; 375 376 static void __host1x_device_del(struct host1x_device *device) 377 { 378 struct host1x_subdev *subdev, *sd; 379 struct host1x_client *client, *cl; 380 381 mutex_lock(&device->subdevs_lock); 382 383 /* unregister subdevices */ 384 list_for_each_entry_safe(subdev, sd, &device->active, list) { 385 /* 386 * host1x_subdev_unregister() will remove the client from 387 * any lists, so we'll need to manually add it back to the 388 * list of idle clients. 389 * 390 * XXX: Alternatively, perhaps don't remove the client from 391 * any lists in host1x_subdev_unregister() and instead do 392 * that explicitly from host1x_unregister_client()? 393 */ 394 client = subdev->client; 395 396 __host1x_subdev_unregister(device, subdev); 397 398 /* add the client to the list of idle clients */ 399 mutex_lock(&clients_lock); 400 list_add_tail(&client->list, &clients); 401 mutex_unlock(&clients_lock); 402 } 403 404 /* remove subdevices */ 405 list_for_each_entry_safe(subdev, sd, &device->subdevs, list) 406 host1x_subdev_del(subdev); 407 408 mutex_unlock(&device->subdevs_lock); 409 410 /* move clients to idle list */ 411 mutex_lock(&clients_lock); 412 mutex_lock(&device->clients_lock); 413 414 list_for_each_entry_safe(client, cl, &device->clients, list) 415 list_move_tail(&client->list, &clients); 416 417 mutex_unlock(&device->clients_lock); 418 mutex_unlock(&clients_lock); 419 420 /* finally remove the device */ 421 list_del_init(&device->list); 422 } 423 424 static void host1x_device_release(struct device *dev) 425 { 426 struct host1x_device *device = to_host1x_device(dev); 427 428 __host1x_device_del(device); 429 kfree(device); 430 } 431 432 static int host1x_device_add(struct host1x *host1x, 433 struct host1x_driver *driver) 434 { 435 struct host1x_client *client, *tmp; 436 struct host1x_subdev *subdev; 437 struct host1x_device *device; 438 int err; 439 440 device = kzalloc(sizeof(*device), GFP_KERNEL); 441 if (!device) 442 return -ENOMEM; 443 444 device_initialize(&device->dev); 445 446 mutex_init(&device->subdevs_lock); 447 INIT_LIST_HEAD(&device->subdevs); 448 INIT_LIST_HEAD(&device->active); 449 mutex_init(&device->clients_lock); 450 INIT_LIST_HEAD(&device->clients); 451 INIT_LIST_HEAD(&device->list); 452 device->driver = driver; 453 454 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; 455 device->dev.dma_mask = &device->dev.coherent_dma_mask; 456 dev_set_name(&device->dev, "%s", driver->driver.name); 457 device->dev.release = host1x_device_release; 458 device->dev.bus = &host1x_bus_type; 459 device->dev.parent = host1x->dev; 460 461 of_dma_configure(&device->dev, host1x->dev->of_node, true); 462 463 device->dev.dma_parms = &device->dma_parms; 464 dma_set_max_seg_size(&device->dev, UINT_MAX); 465 466 err = host1x_device_parse_dt(device, driver); 467 if (err < 0) { 468 kfree(device); 469 return err; 470 } 471 472 list_add_tail(&device->list, &host1x->devices); 473 474 mutex_lock(&clients_lock); 475 476 list_for_each_entry_safe(client, tmp, &clients, list) { 477 list_for_each_entry(subdev, &device->subdevs, list) { 478 if (subdev->np == client->dev->of_node) { 479 host1x_subdev_register(device, subdev, client); 480 break; 481 } 482 } 483 } 484 485 mutex_unlock(&clients_lock); 486 487 return 0; 488 } 489 490 /* 491 * Removes a device by first unregistering any subdevices and then removing 492 * itself from the list of devices. 493 * 494 * This function must be called with the host1x->devices_lock held. 495 */ 496 static void host1x_device_del(struct host1x *host1x, 497 struct host1x_device *device) 498 { 499 if (device->registered) { 500 device->registered = false; 501 device_del(&device->dev); 502 } 503 504 put_device(&device->dev); 505 } 506 507 static void host1x_attach_driver(struct host1x *host1x, 508 struct host1x_driver *driver) 509 { 510 struct host1x_device *device; 511 int err; 512 513 mutex_lock(&host1x->devices_lock); 514 515 list_for_each_entry(device, &host1x->devices, list) { 516 if (device->driver == driver) { 517 mutex_unlock(&host1x->devices_lock); 518 return; 519 } 520 } 521 522 err = host1x_device_add(host1x, driver); 523 if (err < 0) 524 dev_err(host1x->dev, "failed to allocate device: %d\n", err); 525 526 mutex_unlock(&host1x->devices_lock); 527 } 528 529 static void host1x_detach_driver(struct host1x *host1x, 530 struct host1x_driver *driver) 531 { 532 struct host1x_device *device, *tmp; 533 534 mutex_lock(&host1x->devices_lock); 535 536 list_for_each_entry_safe(device, tmp, &host1x->devices, list) 537 if (device->driver == driver) 538 host1x_device_del(host1x, device); 539 540 mutex_unlock(&host1x->devices_lock); 541 } 542 543 static int host1x_devices_show(struct seq_file *s, void *data) 544 { 545 struct host1x *host1x = s->private; 546 struct host1x_device *device; 547 548 mutex_lock(&host1x->devices_lock); 549 550 list_for_each_entry(device, &host1x->devices, list) { 551 struct host1x_subdev *subdev; 552 553 seq_printf(s, "%s\n", dev_name(&device->dev)); 554 555 mutex_lock(&device->subdevs_lock); 556 557 list_for_each_entry(subdev, &device->active, list) 558 seq_printf(s, " %pOFf: %s\n", subdev->np, 559 dev_name(subdev->client->dev)); 560 561 list_for_each_entry(subdev, &device->subdevs, list) 562 seq_printf(s, " %pOFf:\n", subdev->np); 563 564 mutex_unlock(&device->subdevs_lock); 565 } 566 567 mutex_unlock(&host1x->devices_lock); 568 569 return 0; 570 } 571 DEFINE_SHOW_ATTRIBUTE(host1x_devices); 572 573 /** 574 * host1x_register() - register a host1x controller 575 * @host1x: host1x controller 576 * 577 * The host1x controller driver uses this to register a host1x controller with 578 * the infrastructure. Note that all Tegra SoC generations have only ever come 579 * with a single host1x instance, so this function is somewhat academic. 580 */ 581 int host1x_register(struct host1x *host1x) 582 { 583 struct host1x_driver *driver; 584 585 mutex_lock(&devices_lock); 586 list_add_tail(&host1x->list, &devices); 587 mutex_unlock(&devices_lock); 588 589 mutex_lock(&drivers_lock); 590 591 list_for_each_entry(driver, &drivers, list) 592 host1x_attach_driver(host1x, driver); 593 594 mutex_unlock(&drivers_lock); 595 596 debugfs_create_file("devices", S_IRUGO, host1x->debugfs, host1x, 597 &host1x_devices_fops); 598 599 return 0; 600 } 601 602 /** 603 * host1x_unregister() - unregister a host1x controller 604 * @host1x: host1x controller 605 * 606 * The host1x controller driver uses this to remove a host1x controller from 607 * the infrastructure. 608 */ 609 int host1x_unregister(struct host1x *host1x) 610 { 611 struct host1x_driver *driver; 612 613 mutex_lock(&drivers_lock); 614 615 list_for_each_entry(driver, &drivers, list) 616 host1x_detach_driver(host1x, driver); 617 618 mutex_unlock(&drivers_lock); 619 620 mutex_lock(&devices_lock); 621 list_del_init(&host1x->list); 622 mutex_unlock(&devices_lock); 623 624 return 0; 625 } 626 627 static int host1x_device_probe(struct device *dev) 628 { 629 struct host1x_driver *driver = to_host1x_driver(dev->driver); 630 struct host1x_device *device = to_host1x_device(dev); 631 632 if (driver->probe) 633 return driver->probe(device); 634 635 return 0; 636 } 637 638 static int host1x_device_remove(struct device *dev) 639 { 640 struct host1x_driver *driver = to_host1x_driver(dev->driver); 641 struct host1x_device *device = to_host1x_device(dev); 642 643 if (driver->remove) 644 return driver->remove(device); 645 646 return 0; 647 } 648 649 static void host1x_device_shutdown(struct device *dev) 650 { 651 struct host1x_driver *driver = to_host1x_driver(dev->driver); 652 struct host1x_device *device = to_host1x_device(dev); 653 654 if (driver->shutdown) 655 driver->shutdown(device); 656 } 657 658 /** 659 * host1x_driver_register_full() - register a host1x driver 660 * @driver: host1x driver 661 * @owner: owner module 662 * 663 * Drivers for host1x logical devices call this function to register a driver 664 * with the infrastructure. Note that since these drive logical devices, the 665 * registration of the driver actually triggers tho logical device creation. 666 * A logical device will be created for each host1x instance. 667 */ 668 int host1x_driver_register_full(struct host1x_driver *driver, 669 struct module *owner) 670 { 671 struct host1x *host1x; 672 673 INIT_LIST_HEAD(&driver->list); 674 675 mutex_lock(&drivers_lock); 676 list_add_tail(&driver->list, &drivers); 677 mutex_unlock(&drivers_lock); 678 679 mutex_lock(&devices_lock); 680 681 list_for_each_entry(host1x, &devices, list) 682 host1x_attach_driver(host1x, driver); 683 684 mutex_unlock(&devices_lock); 685 686 driver->driver.bus = &host1x_bus_type; 687 driver->driver.owner = owner; 688 driver->driver.probe = host1x_device_probe; 689 driver->driver.remove = host1x_device_remove; 690 driver->driver.shutdown = host1x_device_shutdown; 691 692 return driver_register(&driver->driver); 693 } 694 EXPORT_SYMBOL(host1x_driver_register_full); 695 696 /** 697 * host1x_driver_unregister() - unregister a host1x driver 698 * @driver: host1x driver 699 * 700 * Unbinds the driver from each of the host1x logical devices that it is 701 * bound to, effectively removing the subsystem devices that they represent. 702 */ 703 void host1x_driver_unregister(struct host1x_driver *driver) 704 { 705 struct host1x *host1x; 706 707 driver_unregister(&driver->driver); 708 709 mutex_lock(&devices_lock); 710 711 list_for_each_entry(host1x, &devices, list) 712 host1x_detach_driver(host1x, driver); 713 714 mutex_unlock(&devices_lock); 715 716 mutex_lock(&drivers_lock); 717 list_del_init(&driver->list); 718 mutex_unlock(&drivers_lock); 719 } 720 EXPORT_SYMBOL(host1x_driver_unregister); 721 722 /** 723 * __host1x_client_init() - initialize a host1x client 724 * @client: host1x client 725 * @key: lock class key for the client-specific mutex 726 */ 727 void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key) 728 { 729 host1x_bo_cache_init(&client->cache); 730 INIT_LIST_HEAD(&client->list); 731 __mutex_init(&client->lock, "host1x client lock", key); 732 client->usecount = 0; 733 } 734 EXPORT_SYMBOL(__host1x_client_init); 735 736 /** 737 * host1x_client_exit() - uninitialize a host1x client 738 * @client: host1x client 739 */ 740 void host1x_client_exit(struct host1x_client *client) 741 { 742 mutex_destroy(&client->lock); 743 } 744 EXPORT_SYMBOL(host1x_client_exit); 745 746 /** 747 * __host1x_client_register() - register a host1x client 748 * @client: host1x client 749 * 750 * Registers a host1x client with each host1x controller instance. Note that 751 * each client will only match their parent host1x controller and will only be 752 * associated with that instance. Once all clients have been registered with 753 * their parent host1x controller, the infrastructure will set up the logical 754 * device and call host1x_device_init(), which will in turn call each client's 755 * &host1x_client_ops.init implementation. 756 */ 757 int __host1x_client_register(struct host1x_client *client) 758 { 759 struct host1x *host1x; 760 int err; 761 762 mutex_lock(&devices_lock); 763 764 list_for_each_entry(host1x, &devices, list) { 765 err = host1x_add_client(host1x, client); 766 if (!err) { 767 mutex_unlock(&devices_lock); 768 return 0; 769 } 770 } 771 772 mutex_unlock(&devices_lock); 773 774 mutex_lock(&clients_lock); 775 list_add_tail(&client->list, &clients); 776 mutex_unlock(&clients_lock); 777 778 return 0; 779 } 780 EXPORT_SYMBOL(__host1x_client_register); 781 782 /** 783 * host1x_client_unregister() - unregister a host1x client 784 * @client: host1x client 785 * 786 * Removes a host1x client from its host1x controller instance. If a logical 787 * device has already been initialized, it will be torn down. 788 */ 789 void host1x_client_unregister(struct host1x_client *client) 790 { 791 struct host1x_client *c; 792 struct host1x *host1x; 793 int err; 794 795 mutex_lock(&devices_lock); 796 797 list_for_each_entry(host1x, &devices, list) { 798 err = host1x_del_client(host1x, client); 799 if (!err) { 800 mutex_unlock(&devices_lock); 801 return; 802 } 803 } 804 805 mutex_unlock(&devices_lock); 806 mutex_lock(&clients_lock); 807 808 list_for_each_entry(c, &clients, list) { 809 if (c == client) { 810 list_del_init(&c->list); 811 break; 812 } 813 } 814 815 mutex_unlock(&clients_lock); 816 817 host1x_bo_cache_destroy(&client->cache); 818 } 819 EXPORT_SYMBOL(host1x_client_unregister); 820 821 int host1x_client_suspend(struct host1x_client *client) 822 { 823 int err = 0; 824 825 mutex_lock(&client->lock); 826 827 if (client->usecount == 1) { 828 if (client->ops && client->ops->suspend) { 829 err = client->ops->suspend(client); 830 if (err < 0) 831 goto unlock; 832 } 833 } 834 835 client->usecount--; 836 dev_dbg(client->dev, "use count: %u\n", client->usecount); 837 838 if (client->parent) { 839 err = host1x_client_suspend(client->parent); 840 if (err < 0) 841 goto resume; 842 } 843 844 goto unlock; 845 846 resume: 847 if (client->usecount == 0) 848 if (client->ops && client->ops->resume) 849 client->ops->resume(client); 850 851 client->usecount++; 852 unlock: 853 mutex_unlock(&client->lock); 854 return err; 855 } 856 EXPORT_SYMBOL(host1x_client_suspend); 857 858 int host1x_client_resume(struct host1x_client *client) 859 { 860 int err = 0; 861 862 mutex_lock(&client->lock); 863 864 if (client->parent) { 865 err = host1x_client_resume(client->parent); 866 if (err < 0) 867 goto unlock; 868 } 869 870 if (client->usecount == 0) { 871 if (client->ops && client->ops->resume) { 872 err = client->ops->resume(client); 873 if (err < 0) 874 goto suspend; 875 } 876 } 877 878 client->usecount++; 879 dev_dbg(client->dev, "use count: %u\n", client->usecount); 880 881 goto unlock; 882 883 suspend: 884 if (client->parent) 885 host1x_client_suspend(client->parent); 886 unlock: 887 mutex_unlock(&client->lock); 888 return err; 889 } 890 EXPORT_SYMBOL(host1x_client_resume); 891 892 struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo, 893 enum dma_data_direction dir, 894 struct host1x_bo_cache *cache) 895 { 896 struct host1x_bo_mapping *mapping; 897 898 if (cache) { 899 mutex_lock(&cache->lock); 900 901 list_for_each_entry(mapping, &cache->mappings, entry) { 902 if (mapping->bo == bo && mapping->direction == dir) { 903 kref_get(&mapping->ref); 904 goto unlock; 905 } 906 } 907 } 908 909 mapping = bo->ops->pin(dev, bo, dir); 910 if (IS_ERR(mapping)) 911 goto unlock; 912 913 spin_lock(&mapping->bo->lock); 914 list_add_tail(&mapping->list, &bo->mappings); 915 spin_unlock(&mapping->bo->lock); 916 917 if (cache) { 918 INIT_LIST_HEAD(&mapping->entry); 919 mapping->cache = cache; 920 921 list_add_tail(&mapping->entry, &cache->mappings); 922 923 /* bump reference count to track the copy in the cache */ 924 kref_get(&mapping->ref); 925 } 926 927 unlock: 928 if (cache) 929 mutex_unlock(&cache->lock); 930 931 return mapping; 932 } 933 EXPORT_SYMBOL(host1x_bo_pin); 934 935 static void __host1x_bo_unpin(struct kref *ref) 936 { 937 struct host1x_bo_mapping *mapping = to_host1x_bo_mapping(ref); 938 939 /* 940 * When the last reference of the mapping goes away, make sure to remove the mapping from 941 * the cache. 942 */ 943 if (mapping->cache) 944 list_del(&mapping->entry); 945 946 spin_lock(&mapping->bo->lock); 947 list_del(&mapping->list); 948 spin_unlock(&mapping->bo->lock); 949 950 mapping->bo->ops->unpin(mapping); 951 } 952 953 void host1x_bo_unpin(struct host1x_bo_mapping *mapping) 954 { 955 struct host1x_bo_cache *cache = mapping->cache; 956 957 if (cache) 958 mutex_lock(&cache->lock); 959 960 kref_put(&mapping->ref, __host1x_bo_unpin); 961 962 if (cache) 963 mutex_unlock(&cache->lock); 964 } 965 EXPORT_SYMBOL(host1x_bo_unpin); 966