1 /* 2 * Copyright (C) 2012 Avionic Design GmbH 3 * Copyright (C) 2012-2013, NVIDIA Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/host1x.h> 19 #include <linux/of.h> 20 #include <linux/slab.h> 21 22 #include "bus.h" 23 #include "dev.h" 24 25 static DEFINE_MUTEX(clients_lock); 26 static LIST_HEAD(clients); 27 28 static DEFINE_MUTEX(drivers_lock); 29 static LIST_HEAD(drivers); 30 31 static DEFINE_MUTEX(devices_lock); 32 static LIST_HEAD(devices); 33 34 struct host1x_subdev { 35 struct host1x_client *client; 36 struct device_node *np; 37 struct list_head list; 38 }; 39 40 /** 41 * host1x_subdev_add() - add a new subdevice with an associated device node 42 */ 43 static int host1x_subdev_add(struct host1x_device *device, 44 struct device_node *np) 45 { 46 struct host1x_subdev *subdev; 47 48 subdev = kzalloc(sizeof(*subdev), GFP_KERNEL); 49 if (!subdev) 50 return -ENOMEM; 51 52 INIT_LIST_HEAD(&subdev->list); 53 subdev->np = of_node_get(np); 54 55 mutex_lock(&device->subdevs_lock); 56 list_add_tail(&subdev->list, &device->subdevs); 57 mutex_unlock(&device->subdevs_lock); 58 59 return 0; 60 } 61 62 /** 63 * host1x_subdev_del() - remove subdevice 64 */ 65 static void host1x_subdev_del(struct host1x_subdev *subdev) 66 { 67 list_del(&subdev->list); 68 of_node_put(subdev->np); 69 kfree(subdev); 70 } 71 72 /** 73 * host1x_device_parse_dt() - scan device tree and add matching subdevices 74 */ 75 static int host1x_device_parse_dt(struct host1x_device *device) 76 { 77 struct device_node *np; 78 int err; 79 80 for_each_child_of_node(device->dev.parent->of_node, np) { 81 if (of_match_node(device->driver->subdevs, np) && 82 of_device_is_available(np)) { 83 err = host1x_subdev_add(device, np); 84 if (err < 0) 85 return err; 86 } 87 } 88 89 return 0; 90 } 91 92 static void host1x_subdev_register(struct host1x_device *device, 93 struct host1x_subdev *subdev, 94 struct host1x_client *client) 95 { 96 int err; 97 98 /* 99 * Move the subdevice to the list of active (registered) subdevices 100 * and associate it with a client. At the same time, associate the 101 * client with its parent device. 102 */ 103 mutex_lock(&device->subdevs_lock); 104 mutex_lock(&device->clients_lock); 105 list_move_tail(&client->list, &device->clients); 106 list_move_tail(&subdev->list, &device->active); 107 client->parent = &device->dev; 108 subdev->client = client; 109 mutex_unlock(&device->clients_lock); 110 mutex_unlock(&device->subdevs_lock); 111 112 /* 113 * When all subdevices have been registered, the composite device is 114 * ready to be probed. 115 */ 116 if (list_empty(&device->subdevs)) { 117 err = device->driver->probe(device); 118 if (err < 0) 119 dev_err(&device->dev, "probe failed: %d\n", err); 120 } 121 } 122 123 static void __host1x_subdev_unregister(struct host1x_device *device, 124 struct host1x_subdev *subdev) 125 { 126 struct host1x_client *client = subdev->client; 127 int err; 128 129 /* 130 * If all subdevices have been activated, we're about to remove the 131 * first active subdevice, so unload the driver first. 132 */ 133 if (list_empty(&device->subdevs)) { 134 err = device->driver->remove(device); 135 if (err < 0) 136 dev_err(&device->dev, "remove failed: %d\n", err); 137 } 138 139 /* 140 * Move the subdevice back to the list of idle subdevices and remove 141 * it from list of clients. 142 */ 143 mutex_lock(&device->clients_lock); 144 subdev->client = NULL; 145 client->parent = NULL; 146 list_move_tail(&subdev->list, &device->subdevs); 147 /* 148 * XXX: Perhaps don't do this here, but rather explicitly remove it 149 * when the device is about to be deleted. 150 * 151 * This is somewhat complicated by the fact that this function is 152 * used to remove the subdevice when a client is unregistered but 153 * also when the composite device is about to be removed. 154 */ 155 list_del_init(&client->list); 156 mutex_unlock(&device->clients_lock); 157 } 158 159 static void host1x_subdev_unregister(struct host1x_device *device, 160 struct host1x_subdev *subdev) 161 { 162 mutex_lock(&device->subdevs_lock); 163 __host1x_subdev_unregister(device, subdev); 164 mutex_unlock(&device->subdevs_lock); 165 } 166 167 int host1x_device_init(struct host1x_device *device) 168 { 169 struct host1x_client *client; 170 int err; 171 172 mutex_lock(&device->clients_lock); 173 174 list_for_each_entry(client, &device->clients, list) { 175 if (client->ops && client->ops->init) { 176 err = client->ops->init(client); 177 if (err < 0) { 178 dev_err(&device->dev, 179 "failed to initialize %s: %d\n", 180 dev_name(client->dev), err); 181 mutex_unlock(&device->clients_lock); 182 return err; 183 } 184 } 185 } 186 187 mutex_unlock(&device->clients_lock); 188 189 return 0; 190 } 191 192 int host1x_device_exit(struct host1x_device *device) 193 { 194 struct host1x_client *client; 195 int err; 196 197 mutex_lock(&device->clients_lock); 198 199 list_for_each_entry_reverse(client, &device->clients, list) { 200 if (client->ops && client->ops->exit) { 201 err = client->ops->exit(client); 202 if (err < 0) { 203 dev_err(&device->dev, 204 "failed to cleanup %s: %d\n", 205 dev_name(client->dev), err); 206 mutex_unlock(&device->clients_lock); 207 return err; 208 } 209 } 210 } 211 212 mutex_unlock(&device->clients_lock); 213 214 return 0; 215 } 216 217 static int host1x_register_client(struct host1x *host1x, 218 struct host1x_client *client) 219 { 220 struct host1x_device *device; 221 struct host1x_subdev *subdev; 222 223 mutex_lock(&host1x->devices_lock); 224 225 list_for_each_entry(device, &host1x->devices, list) { 226 list_for_each_entry(subdev, &device->subdevs, list) { 227 if (subdev->np == client->dev->of_node) { 228 host1x_subdev_register(device, subdev, client); 229 mutex_unlock(&host1x->devices_lock); 230 return 0; 231 } 232 } 233 } 234 235 mutex_unlock(&host1x->devices_lock); 236 return -ENODEV; 237 } 238 239 static int host1x_unregister_client(struct host1x *host1x, 240 struct host1x_client *client) 241 { 242 struct host1x_device *device, *dt; 243 struct host1x_subdev *subdev; 244 245 mutex_lock(&host1x->devices_lock); 246 247 list_for_each_entry_safe(device, dt, &host1x->devices, list) { 248 list_for_each_entry(subdev, &device->active, list) { 249 if (subdev->client == client) { 250 host1x_subdev_unregister(device, subdev); 251 mutex_unlock(&host1x->devices_lock); 252 return 0; 253 } 254 } 255 } 256 257 mutex_unlock(&host1x->devices_lock); 258 return -ENODEV; 259 } 260 261 static struct bus_type host1x_bus_type = { 262 .name = "host1x", 263 }; 264 265 int host1x_bus_init(void) 266 { 267 return bus_register(&host1x_bus_type); 268 } 269 270 void host1x_bus_exit(void) 271 { 272 bus_unregister(&host1x_bus_type); 273 } 274 275 static void host1x_device_release(struct device *dev) 276 { 277 struct host1x_device *device = to_host1x_device(dev); 278 279 kfree(device); 280 } 281 282 static int host1x_device_add(struct host1x *host1x, 283 struct host1x_driver *driver) 284 { 285 struct host1x_client *client, *tmp; 286 struct host1x_subdev *subdev; 287 struct host1x_device *device; 288 int err; 289 290 device = kzalloc(sizeof(*device), GFP_KERNEL); 291 if (!device) 292 return -ENOMEM; 293 294 mutex_init(&device->subdevs_lock); 295 INIT_LIST_HEAD(&device->subdevs); 296 INIT_LIST_HEAD(&device->active); 297 mutex_init(&device->clients_lock); 298 INIT_LIST_HEAD(&device->clients); 299 INIT_LIST_HEAD(&device->list); 300 device->driver = driver; 301 302 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; 303 device->dev.dma_mask = &device->dev.coherent_dma_mask; 304 device->dev.release = host1x_device_release; 305 dev_set_name(&device->dev, "%s", driver->name); 306 device->dev.bus = &host1x_bus_type; 307 device->dev.parent = host1x->dev; 308 309 err = device_register(&device->dev); 310 if (err < 0) 311 return err; 312 313 err = host1x_device_parse_dt(device); 314 if (err < 0) { 315 device_unregister(&device->dev); 316 return err; 317 } 318 319 mutex_lock(&host1x->devices_lock); 320 list_add_tail(&device->list, &host1x->devices); 321 mutex_unlock(&host1x->devices_lock); 322 323 mutex_lock(&clients_lock); 324 325 list_for_each_entry_safe(client, tmp, &clients, list) { 326 list_for_each_entry(subdev, &device->subdevs, list) { 327 if (subdev->np == client->dev->of_node) { 328 host1x_subdev_register(device, subdev, client); 329 break; 330 } 331 } 332 } 333 334 mutex_unlock(&clients_lock); 335 336 return 0; 337 } 338 339 /* 340 * Removes a device by first unregistering any subdevices and then removing 341 * itself from the list of devices. 342 * 343 * This function must be called with the host1x->devices_lock held. 344 */ 345 static void host1x_device_del(struct host1x *host1x, 346 struct host1x_device *device) 347 { 348 struct host1x_subdev *subdev, *sd; 349 struct host1x_client *client, *cl; 350 351 mutex_lock(&device->subdevs_lock); 352 353 /* unregister subdevices */ 354 list_for_each_entry_safe(subdev, sd, &device->active, list) { 355 /* 356 * host1x_subdev_unregister() will remove the client from 357 * any lists, so we'll need to manually add it back to the 358 * list of idle clients. 359 * 360 * XXX: Alternatively, perhaps don't remove the client from 361 * any lists in host1x_subdev_unregister() and instead do 362 * that explicitly from host1x_unregister_client()? 363 */ 364 client = subdev->client; 365 366 __host1x_subdev_unregister(device, subdev); 367 368 /* add the client to the list of idle clients */ 369 mutex_lock(&clients_lock); 370 list_add_tail(&client->list, &clients); 371 mutex_unlock(&clients_lock); 372 } 373 374 /* remove subdevices */ 375 list_for_each_entry_safe(subdev, sd, &device->subdevs, list) 376 host1x_subdev_del(subdev); 377 378 mutex_unlock(&device->subdevs_lock); 379 380 /* move clients to idle list */ 381 mutex_lock(&clients_lock); 382 mutex_lock(&device->clients_lock); 383 384 list_for_each_entry_safe(client, cl, &device->clients, list) 385 list_move_tail(&client->list, &clients); 386 387 mutex_unlock(&device->clients_lock); 388 mutex_unlock(&clients_lock); 389 390 /* finally remove the device */ 391 list_del_init(&device->list); 392 device_unregister(&device->dev); 393 } 394 395 static void host1x_attach_driver(struct host1x *host1x, 396 struct host1x_driver *driver) 397 { 398 struct host1x_device *device; 399 int err; 400 401 mutex_lock(&host1x->devices_lock); 402 403 list_for_each_entry(device, &host1x->devices, list) { 404 if (device->driver == driver) { 405 mutex_unlock(&host1x->devices_lock); 406 return; 407 } 408 } 409 410 mutex_unlock(&host1x->devices_lock); 411 412 err = host1x_device_add(host1x, driver); 413 if (err < 0) 414 dev_err(host1x->dev, "failed to allocate device: %d\n", err); 415 } 416 417 static void host1x_detach_driver(struct host1x *host1x, 418 struct host1x_driver *driver) 419 { 420 struct host1x_device *device, *tmp; 421 422 mutex_lock(&host1x->devices_lock); 423 424 list_for_each_entry_safe(device, tmp, &host1x->devices, list) 425 if (device->driver == driver) 426 host1x_device_del(host1x, device); 427 428 mutex_unlock(&host1x->devices_lock); 429 } 430 431 int host1x_register(struct host1x *host1x) 432 { 433 struct host1x_driver *driver; 434 435 mutex_lock(&devices_lock); 436 list_add_tail(&host1x->list, &devices); 437 mutex_unlock(&devices_lock); 438 439 mutex_lock(&drivers_lock); 440 441 list_for_each_entry(driver, &drivers, list) 442 host1x_attach_driver(host1x, driver); 443 444 mutex_unlock(&drivers_lock); 445 446 return 0; 447 } 448 449 int host1x_unregister(struct host1x *host1x) 450 { 451 struct host1x_driver *driver; 452 453 mutex_lock(&drivers_lock); 454 455 list_for_each_entry(driver, &drivers, list) 456 host1x_detach_driver(host1x, driver); 457 458 mutex_unlock(&drivers_lock); 459 460 mutex_lock(&devices_lock); 461 list_del_init(&host1x->list); 462 mutex_unlock(&devices_lock); 463 464 return 0; 465 } 466 467 int host1x_driver_register(struct host1x_driver *driver) 468 { 469 struct host1x *host1x; 470 471 INIT_LIST_HEAD(&driver->list); 472 473 mutex_lock(&drivers_lock); 474 list_add_tail(&driver->list, &drivers); 475 mutex_unlock(&drivers_lock); 476 477 mutex_lock(&devices_lock); 478 479 list_for_each_entry(host1x, &devices, list) 480 host1x_attach_driver(host1x, driver); 481 482 mutex_unlock(&devices_lock); 483 484 return 0; 485 } 486 EXPORT_SYMBOL(host1x_driver_register); 487 488 void host1x_driver_unregister(struct host1x_driver *driver) 489 { 490 mutex_lock(&drivers_lock); 491 list_del_init(&driver->list); 492 mutex_unlock(&drivers_lock); 493 } 494 EXPORT_SYMBOL(host1x_driver_unregister); 495 496 int host1x_client_register(struct host1x_client *client) 497 { 498 struct host1x *host1x; 499 int err; 500 501 mutex_lock(&devices_lock); 502 503 list_for_each_entry(host1x, &devices, list) { 504 err = host1x_register_client(host1x, client); 505 if (!err) { 506 mutex_unlock(&devices_lock); 507 return 0; 508 } 509 } 510 511 mutex_unlock(&devices_lock); 512 513 mutex_lock(&clients_lock); 514 list_add_tail(&client->list, &clients); 515 mutex_unlock(&clients_lock); 516 517 return 0; 518 } 519 EXPORT_SYMBOL(host1x_client_register); 520 521 int host1x_client_unregister(struct host1x_client *client) 522 { 523 struct host1x_client *c; 524 struct host1x *host1x; 525 int err; 526 527 mutex_lock(&devices_lock); 528 529 list_for_each_entry(host1x, &devices, list) { 530 err = host1x_unregister_client(host1x, client); 531 if (!err) { 532 mutex_unlock(&devices_lock); 533 return 0; 534 } 535 } 536 537 mutex_unlock(&devices_lock); 538 mutex_lock(&clients_lock); 539 540 list_for_each_entry(c, &clients, list) { 541 if (c == client) { 542 list_del_init(&c->list); 543 break; 544 } 545 } 546 547 mutex_unlock(&clients_lock); 548 549 return 0; 550 } 551 EXPORT_SYMBOL(host1x_client_unregister); 552