1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/property.h> 35 #include <linux/export.h> 36 #include <linux/sched/rt.h> 37 #include <uapi/linux/sched/types.h> 38 #include <linux/delay.h> 39 #include <linux/kthread.h> 40 #include <linux/ioport.h> 41 #include <linux/acpi.h> 42 #include <linux/highmem.h> 43 #include <linux/idr.h> 44 #include <linux/platform_data/x86/apple.h> 45 46 #define CREATE_TRACE_POINTS 47 #include <trace/events/spi.h> 48 #define SPI_DYN_FIRST_BUS_NUM 0 49 50 static DEFINE_IDR(spi_master_idr); 51 52 static void spidev_release(struct device *dev) 53 { 54 struct spi_device *spi = to_spi_device(dev); 55 56 /* spi controllers may cleanup for released devices */ 57 if (spi->controller->cleanup) 58 spi->controller->cleanup(spi); 59 60 spi_controller_put(spi->controller); 61 kfree(spi); 62 } 63 64 static ssize_t 65 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 66 { 67 const struct spi_device *spi = to_spi_device(dev); 68 int len; 69 70 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 71 if (len != -ENODEV) 72 return len; 73 74 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 75 } 76 static DEVICE_ATTR_RO(modalias); 77 78 #define SPI_STATISTICS_ATTRS(field, file) \ 79 static ssize_t spi_controller_##field##_show(struct device *dev, \ 80 struct device_attribute *attr, \ 81 char *buf) \ 82 { \ 83 struct spi_controller *ctlr = container_of(dev, \ 84 struct spi_controller, dev); \ 85 return spi_statistics_##field##_show(&ctlr->statistics, buf); \ 86 } \ 87 static struct device_attribute dev_attr_spi_controller_##field = { \ 88 .attr = { .name = file, .mode = 0444 }, \ 89 .show = spi_controller_##field##_show, \ 90 }; \ 91 static ssize_t spi_device_##field##_show(struct device *dev, \ 92 struct device_attribute *attr, \ 93 char *buf) \ 94 { \ 95 struct spi_device *spi = to_spi_device(dev); \ 96 return spi_statistics_##field##_show(&spi->statistics, buf); \ 97 } \ 98 static struct device_attribute dev_attr_spi_device_##field = { \ 99 .attr = { .name = file, .mode = 0444 }, \ 100 .show = spi_device_##field##_show, \ 101 } 102 103 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 104 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 105 char *buf) \ 106 { \ 107 unsigned long flags; \ 108 ssize_t len; \ 109 spin_lock_irqsave(&stat->lock, flags); \ 110 len = sprintf(buf, format_string, stat->field); \ 111 spin_unlock_irqrestore(&stat->lock, flags); \ 112 return len; \ 113 } \ 114 SPI_STATISTICS_ATTRS(name, file) 115 116 #define SPI_STATISTICS_SHOW(field, format_string) \ 117 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 118 field, format_string) 119 120 SPI_STATISTICS_SHOW(messages, "%lu"); 121 SPI_STATISTICS_SHOW(transfers, "%lu"); 122 SPI_STATISTICS_SHOW(errors, "%lu"); 123 SPI_STATISTICS_SHOW(timedout, "%lu"); 124 125 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 126 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 127 SPI_STATISTICS_SHOW(spi_async, "%lu"); 128 129 SPI_STATISTICS_SHOW(bytes, "%llu"); 130 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 131 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 132 133 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 134 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 135 "transfer_bytes_histo_" number, \ 136 transfer_bytes_histo[index], "%lu") 137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 147 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 148 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 149 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 150 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 151 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 152 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 153 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 154 155 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 156 157 static struct attribute *spi_dev_attrs[] = { 158 &dev_attr_modalias.attr, 159 NULL, 160 }; 161 162 static const struct attribute_group spi_dev_group = { 163 .attrs = spi_dev_attrs, 164 }; 165 166 static struct attribute *spi_device_statistics_attrs[] = { 167 &dev_attr_spi_device_messages.attr, 168 &dev_attr_spi_device_transfers.attr, 169 &dev_attr_spi_device_errors.attr, 170 &dev_attr_spi_device_timedout.attr, 171 &dev_attr_spi_device_spi_sync.attr, 172 &dev_attr_spi_device_spi_sync_immediate.attr, 173 &dev_attr_spi_device_spi_async.attr, 174 &dev_attr_spi_device_bytes.attr, 175 &dev_attr_spi_device_bytes_rx.attr, 176 &dev_attr_spi_device_bytes_tx.attr, 177 &dev_attr_spi_device_transfer_bytes_histo0.attr, 178 &dev_attr_spi_device_transfer_bytes_histo1.attr, 179 &dev_attr_spi_device_transfer_bytes_histo2.attr, 180 &dev_attr_spi_device_transfer_bytes_histo3.attr, 181 &dev_attr_spi_device_transfer_bytes_histo4.attr, 182 &dev_attr_spi_device_transfer_bytes_histo5.attr, 183 &dev_attr_spi_device_transfer_bytes_histo6.attr, 184 &dev_attr_spi_device_transfer_bytes_histo7.attr, 185 &dev_attr_spi_device_transfer_bytes_histo8.attr, 186 &dev_attr_spi_device_transfer_bytes_histo9.attr, 187 &dev_attr_spi_device_transfer_bytes_histo10.attr, 188 &dev_attr_spi_device_transfer_bytes_histo11.attr, 189 &dev_attr_spi_device_transfer_bytes_histo12.attr, 190 &dev_attr_spi_device_transfer_bytes_histo13.attr, 191 &dev_attr_spi_device_transfer_bytes_histo14.attr, 192 &dev_attr_spi_device_transfer_bytes_histo15.attr, 193 &dev_attr_spi_device_transfer_bytes_histo16.attr, 194 &dev_attr_spi_device_transfers_split_maxsize.attr, 195 NULL, 196 }; 197 198 static const struct attribute_group spi_device_statistics_group = { 199 .name = "statistics", 200 .attrs = spi_device_statistics_attrs, 201 }; 202 203 static const struct attribute_group *spi_dev_groups[] = { 204 &spi_dev_group, 205 &spi_device_statistics_group, 206 NULL, 207 }; 208 209 static struct attribute *spi_controller_statistics_attrs[] = { 210 &dev_attr_spi_controller_messages.attr, 211 &dev_attr_spi_controller_transfers.attr, 212 &dev_attr_spi_controller_errors.attr, 213 &dev_attr_spi_controller_timedout.attr, 214 &dev_attr_spi_controller_spi_sync.attr, 215 &dev_attr_spi_controller_spi_sync_immediate.attr, 216 &dev_attr_spi_controller_spi_async.attr, 217 &dev_attr_spi_controller_bytes.attr, 218 &dev_attr_spi_controller_bytes_rx.attr, 219 &dev_attr_spi_controller_bytes_tx.attr, 220 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 221 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 222 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 223 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 224 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 225 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 226 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 227 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 228 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 229 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 230 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 231 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 232 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 233 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 234 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 235 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 236 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 237 &dev_attr_spi_controller_transfers_split_maxsize.attr, 238 NULL, 239 }; 240 241 static const struct attribute_group spi_controller_statistics_group = { 242 .name = "statistics", 243 .attrs = spi_controller_statistics_attrs, 244 }; 245 246 static const struct attribute_group *spi_master_groups[] = { 247 &spi_controller_statistics_group, 248 NULL, 249 }; 250 251 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 252 struct spi_transfer *xfer, 253 struct spi_controller *ctlr) 254 { 255 unsigned long flags; 256 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 257 258 if (l2len < 0) 259 l2len = 0; 260 261 spin_lock_irqsave(&stats->lock, flags); 262 263 stats->transfers++; 264 stats->transfer_bytes_histo[l2len]++; 265 266 stats->bytes += xfer->len; 267 if ((xfer->tx_buf) && 268 (xfer->tx_buf != ctlr->dummy_tx)) 269 stats->bytes_tx += xfer->len; 270 if ((xfer->rx_buf) && 271 (xfer->rx_buf != ctlr->dummy_rx)) 272 stats->bytes_rx += xfer->len; 273 274 spin_unlock_irqrestore(&stats->lock, flags); 275 } 276 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 277 278 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 279 * and the sysfs version makes coldplug work too. 280 */ 281 282 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 283 const struct spi_device *sdev) 284 { 285 while (id->name[0]) { 286 if (!strcmp(sdev->modalias, id->name)) 287 return id; 288 id++; 289 } 290 return NULL; 291 } 292 293 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 294 { 295 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 296 297 return spi_match_id(sdrv->id_table, sdev); 298 } 299 EXPORT_SYMBOL_GPL(spi_get_device_id); 300 301 static int spi_match_device(struct device *dev, struct device_driver *drv) 302 { 303 const struct spi_device *spi = to_spi_device(dev); 304 const struct spi_driver *sdrv = to_spi_driver(drv); 305 306 /* Attempt an OF style match */ 307 if (of_driver_match_device(dev, drv)) 308 return 1; 309 310 /* Then try ACPI */ 311 if (acpi_driver_match_device(dev, drv)) 312 return 1; 313 314 if (sdrv->id_table) 315 return !!spi_match_id(sdrv->id_table, spi); 316 317 return strcmp(spi->modalias, drv->name) == 0; 318 } 319 320 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 321 { 322 const struct spi_device *spi = to_spi_device(dev); 323 int rc; 324 325 rc = acpi_device_uevent_modalias(dev, env); 326 if (rc != -ENODEV) 327 return rc; 328 329 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 330 } 331 332 struct bus_type spi_bus_type = { 333 .name = "spi", 334 .dev_groups = spi_dev_groups, 335 .match = spi_match_device, 336 .uevent = spi_uevent, 337 }; 338 EXPORT_SYMBOL_GPL(spi_bus_type); 339 340 341 static int spi_drv_probe(struct device *dev) 342 { 343 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 344 struct spi_device *spi = to_spi_device(dev); 345 int ret; 346 347 ret = of_clk_set_defaults(dev->of_node, false); 348 if (ret) 349 return ret; 350 351 if (dev->of_node) { 352 spi->irq = of_irq_get(dev->of_node, 0); 353 if (spi->irq == -EPROBE_DEFER) 354 return -EPROBE_DEFER; 355 if (spi->irq < 0) 356 spi->irq = 0; 357 } 358 359 ret = dev_pm_domain_attach(dev, true); 360 if (ret != -EPROBE_DEFER) { 361 ret = sdrv->probe(spi); 362 if (ret) 363 dev_pm_domain_detach(dev, true); 364 } 365 366 return ret; 367 } 368 369 static int spi_drv_remove(struct device *dev) 370 { 371 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 372 int ret; 373 374 ret = sdrv->remove(to_spi_device(dev)); 375 dev_pm_domain_detach(dev, true); 376 377 return ret; 378 } 379 380 static void spi_drv_shutdown(struct device *dev) 381 { 382 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 383 384 sdrv->shutdown(to_spi_device(dev)); 385 } 386 387 /** 388 * __spi_register_driver - register a SPI driver 389 * @owner: owner module of the driver to register 390 * @sdrv: the driver to register 391 * Context: can sleep 392 * 393 * Return: zero on success, else a negative error code. 394 */ 395 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 396 { 397 sdrv->driver.owner = owner; 398 sdrv->driver.bus = &spi_bus_type; 399 if (sdrv->probe) 400 sdrv->driver.probe = spi_drv_probe; 401 if (sdrv->remove) 402 sdrv->driver.remove = spi_drv_remove; 403 if (sdrv->shutdown) 404 sdrv->driver.shutdown = spi_drv_shutdown; 405 return driver_register(&sdrv->driver); 406 } 407 EXPORT_SYMBOL_GPL(__spi_register_driver); 408 409 /*-------------------------------------------------------------------------*/ 410 411 /* SPI devices should normally not be created by SPI device drivers; that 412 * would make them board-specific. Similarly with SPI controller drivers. 413 * Device registration normally goes into like arch/.../mach.../board-YYY.c 414 * with other readonly (flashable) information about mainboard devices. 415 */ 416 417 struct boardinfo { 418 struct list_head list; 419 struct spi_board_info board_info; 420 }; 421 422 static LIST_HEAD(board_list); 423 static LIST_HEAD(spi_controller_list); 424 425 /* 426 * Used to protect add/del opertion for board_info list and 427 * spi_controller list, and their matching process 428 * also used to protect object of type struct idr 429 */ 430 static DEFINE_MUTEX(board_lock); 431 432 /** 433 * spi_alloc_device - Allocate a new SPI device 434 * @ctlr: Controller to which device is connected 435 * Context: can sleep 436 * 437 * Allows a driver to allocate and initialize a spi_device without 438 * registering it immediately. This allows a driver to directly 439 * fill the spi_device with device parameters before calling 440 * spi_add_device() on it. 441 * 442 * Caller is responsible to call spi_add_device() on the returned 443 * spi_device structure to add it to the SPI controller. If the caller 444 * needs to discard the spi_device without adding it, then it should 445 * call spi_dev_put() on it. 446 * 447 * Return: a pointer to the new device, or NULL. 448 */ 449 struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 450 { 451 struct spi_device *spi; 452 453 if (!spi_controller_get(ctlr)) 454 return NULL; 455 456 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 457 if (!spi) { 458 spi_controller_put(ctlr); 459 return NULL; 460 } 461 462 spi->master = spi->controller = ctlr; 463 spi->dev.parent = &ctlr->dev; 464 spi->dev.bus = &spi_bus_type; 465 spi->dev.release = spidev_release; 466 spi->cs_gpio = -ENOENT; 467 468 spin_lock_init(&spi->statistics.lock); 469 470 device_initialize(&spi->dev); 471 return spi; 472 } 473 EXPORT_SYMBOL_GPL(spi_alloc_device); 474 475 static void spi_dev_set_name(struct spi_device *spi) 476 { 477 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 478 479 if (adev) { 480 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 481 return; 482 } 483 484 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 485 spi->chip_select); 486 } 487 488 static int spi_dev_check(struct device *dev, void *data) 489 { 490 struct spi_device *spi = to_spi_device(dev); 491 struct spi_device *new_spi = data; 492 493 if (spi->controller == new_spi->controller && 494 spi->chip_select == new_spi->chip_select) 495 return -EBUSY; 496 return 0; 497 } 498 499 /** 500 * spi_add_device - Add spi_device allocated with spi_alloc_device 501 * @spi: spi_device to register 502 * 503 * Companion function to spi_alloc_device. Devices allocated with 504 * spi_alloc_device can be added onto the spi bus with this function. 505 * 506 * Return: 0 on success; negative errno on failure 507 */ 508 int spi_add_device(struct spi_device *spi) 509 { 510 static DEFINE_MUTEX(spi_add_lock); 511 struct spi_controller *ctlr = spi->controller; 512 struct device *dev = ctlr->dev.parent; 513 int status; 514 515 /* Chipselects are numbered 0..max; validate. */ 516 if (spi->chip_select >= ctlr->num_chipselect) { 517 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 518 ctlr->num_chipselect); 519 return -EINVAL; 520 } 521 522 /* Set the bus ID string */ 523 spi_dev_set_name(spi); 524 525 /* We need to make sure there's no other device with this 526 * chipselect **BEFORE** we call setup(), else we'll trash 527 * its configuration. Lock against concurrent add() calls. 528 */ 529 mutex_lock(&spi_add_lock); 530 531 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 532 if (status) { 533 dev_err(dev, "chipselect %d already in use\n", 534 spi->chip_select); 535 goto done; 536 } 537 538 if (ctlr->cs_gpios) 539 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; 540 541 /* Drivers may modify this initial i/o setup, but will 542 * normally rely on the device being setup. Devices 543 * using SPI_CS_HIGH can't coexist well otherwise... 544 */ 545 status = spi_setup(spi); 546 if (status < 0) { 547 dev_err(dev, "can't setup %s, status %d\n", 548 dev_name(&spi->dev), status); 549 goto done; 550 } 551 552 /* Device may be bound to an active driver when this returns */ 553 status = device_add(&spi->dev); 554 if (status < 0) 555 dev_err(dev, "can't add %s, status %d\n", 556 dev_name(&spi->dev), status); 557 else 558 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 559 560 done: 561 mutex_unlock(&spi_add_lock); 562 return status; 563 } 564 EXPORT_SYMBOL_GPL(spi_add_device); 565 566 /** 567 * spi_new_device - instantiate one new SPI device 568 * @ctlr: Controller to which device is connected 569 * @chip: Describes the SPI device 570 * Context: can sleep 571 * 572 * On typical mainboards, this is purely internal; and it's not needed 573 * after board init creates the hard-wired devices. Some development 574 * platforms may not be able to use spi_register_board_info though, and 575 * this is exported so that for example a USB or parport based adapter 576 * driver could add devices (which it would learn about out-of-band). 577 * 578 * Return: the new device, or NULL. 579 */ 580 struct spi_device *spi_new_device(struct spi_controller *ctlr, 581 struct spi_board_info *chip) 582 { 583 struct spi_device *proxy; 584 int status; 585 586 /* NOTE: caller did any chip->bus_num checks necessary. 587 * 588 * Also, unless we change the return value convention to use 589 * error-or-pointer (not NULL-or-pointer), troubleshootability 590 * suggests syslogged diagnostics are best here (ugh). 591 */ 592 593 proxy = spi_alloc_device(ctlr); 594 if (!proxy) 595 return NULL; 596 597 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 598 599 proxy->chip_select = chip->chip_select; 600 proxy->max_speed_hz = chip->max_speed_hz; 601 proxy->mode = chip->mode; 602 proxy->irq = chip->irq; 603 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 604 proxy->dev.platform_data = (void *) chip->platform_data; 605 proxy->controller_data = chip->controller_data; 606 proxy->controller_state = NULL; 607 608 if (chip->properties) { 609 status = device_add_properties(&proxy->dev, chip->properties); 610 if (status) { 611 dev_err(&ctlr->dev, 612 "failed to add properties to '%s': %d\n", 613 chip->modalias, status); 614 goto err_dev_put; 615 } 616 } 617 618 status = spi_add_device(proxy); 619 if (status < 0) 620 goto err_remove_props; 621 622 return proxy; 623 624 err_remove_props: 625 if (chip->properties) 626 device_remove_properties(&proxy->dev); 627 err_dev_put: 628 spi_dev_put(proxy); 629 return NULL; 630 } 631 EXPORT_SYMBOL_GPL(spi_new_device); 632 633 /** 634 * spi_unregister_device - unregister a single SPI device 635 * @spi: spi_device to unregister 636 * 637 * Start making the passed SPI device vanish. Normally this would be handled 638 * by spi_unregister_controller(). 639 */ 640 void spi_unregister_device(struct spi_device *spi) 641 { 642 if (!spi) 643 return; 644 645 if (spi->dev.of_node) { 646 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 647 of_node_put(spi->dev.of_node); 648 } 649 if (ACPI_COMPANION(&spi->dev)) 650 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 651 device_unregister(&spi->dev); 652 } 653 EXPORT_SYMBOL_GPL(spi_unregister_device); 654 655 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 656 struct spi_board_info *bi) 657 { 658 struct spi_device *dev; 659 660 if (ctlr->bus_num != bi->bus_num) 661 return; 662 663 dev = spi_new_device(ctlr, bi); 664 if (!dev) 665 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 666 bi->modalias); 667 } 668 669 /** 670 * spi_register_board_info - register SPI devices for a given board 671 * @info: array of chip descriptors 672 * @n: how many descriptors are provided 673 * Context: can sleep 674 * 675 * Board-specific early init code calls this (probably during arch_initcall) 676 * with segments of the SPI device table. Any device nodes are created later, 677 * after the relevant parent SPI controller (bus_num) is defined. We keep 678 * this table of devices forever, so that reloading a controller driver will 679 * not make Linux forget about these hard-wired devices. 680 * 681 * Other code can also call this, e.g. a particular add-on board might provide 682 * SPI devices through its expansion connector, so code initializing that board 683 * would naturally declare its SPI devices. 684 * 685 * The board info passed can safely be __initdata ... but be careful of 686 * any embedded pointers (platform_data, etc), they're copied as-is. 687 * Device properties are deep-copied though. 688 * 689 * Return: zero on success, else a negative error code. 690 */ 691 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 692 { 693 struct boardinfo *bi; 694 int i; 695 696 if (!n) 697 return 0; 698 699 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 700 if (!bi) 701 return -ENOMEM; 702 703 for (i = 0; i < n; i++, bi++, info++) { 704 struct spi_controller *ctlr; 705 706 memcpy(&bi->board_info, info, sizeof(*info)); 707 if (info->properties) { 708 bi->board_info.properties = 709 property_entries_dup(info->properties); 710 if (IS_ERR(bi->board_info.properties)) 711 return PTR_ERR(bi->board_info.properties); 712 } 713 714 mutex_lock(&board_lock); 715 list_add_tail(&bi->list, &board_list); 716 list_for_each_entry(ctlr, &spi_controller_list, list) 717 spi_match_controller_to_boardinfo(ctlr, 718 &bi->board_info); 719 mutex_unlock(&board_lock); 720 } 721 722 return 0; 723 } 724 725 /*-------------------------------------------------------------------------*/ 726 727 static void spi_set_cs(struct spi_device *spi, bool enable) 728 { 729 if (spi->mode & SPI_CS_HIGH) 730 enable = !enable; 731 732 if (gpio_is_valid(spi->cs_gpio)) { 733 gpio_set_value(spi->cs_gpio, !enable); 734 /* Some SPI masters need both GPIO CS & slave_select */ 735 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 736 spi->controller->set_cs) 737 spi->controller->set_cs(spi, !enable); 738 } else if (spi->controller->set_cs) { 739 spi->controller->set_cs(spi, !enable); 740 } 741 } 742 743 #ifdef CONFIG_HAS_DMA 744 static int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 745 struct sg_table *sgt, void *buf, size_t len, 746 enum dma_data_direction dir) 747 { 748 const bool vmalloced_buf = is_vmalloc_addr(buf); 749 unsigned int max_seg_size = dma_get_max_seg_size(dev); 750 #ifdef CONFIG_HIGHMEM 751 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 752 (unsigned long)buf < (PKMAP_BASE + 753 (LAST_PKMAP * PAGE_SIZE))); 754 #else 755 const bool kmap_buf = false; 756 #endif 757 int desc_len; 758 int sgs; 759 struct page *vm_page; 760 struct scatterlist *sg; 761 void *sg_buf; 762 size_t min; 763 int i, ret; 764 765 if (vmalloced_buf || kmap_buf) { 766 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 767 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 768 } else if (virt_addr_valid(buf)) { 769 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); 770 sgs = DIV_ROUND_UP(len, desc_len); 771 } else { 772 return -EINVAL; 773 } 774 775 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 776 if (ret != 0) 777 return ret; 778 779 sg = &sgt->sgl[0]; 780 for (i = 0; i < sgs; i++) { 781 782 if (vmalloced_buf || kmap_buf) { 783 min = min_t(size_t, 784 len, desc_len - offset_in_page(buf)); 785 if (vmalloced_buf) 786 vm_page = vmalloc_to_page(buf); 787 else 788 vm_page = kmap_to_page(buf); 789 if (!vm_page) { 790 sg_free_table(sgt); 791 return -ENOMEM; 792 } 793 sg_set_page(sg, vm_page, 794 min, offset_in_page(buf)); 795 } else { 796 min = min_t(size_t, len, desc_len); 797 sg_buf = buf; 798 sg_set_buf(sg, sg_buf, min); 799 } 800 801 buf += min; 802 len -= min; 803 sg = sg_next(sg); 804 } 805 806 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 807 if (!ret) 808 ret = -ENOMEM; 809 if (ret < 0) { 810 sg_free_table(sgt); 811 return ret; 812 } 813 814 sgt->nents = ret; 815 816 return 0; 817 } 818 819 static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 820 struct sg_table *sgt, enum dma_data_direction dir) 821 { 822 if (sgt->orig_nents) { 823 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 824 sg_free_table(sgt); 825 } 826 } 827 828 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 829 { 830 struct device *tx_dev, *rx_dev; 831 struct spi_transfer *xfer; 832 int ret; 833 834 if (!ctlr->can_dma) 835 return 0; 836 837 if (ctlr->dma_tx) 838 tx_dev = ctlr->dma_tx->device->dev; 839 else 840 tx_dev = ctlr->dev.parent; 841 842 if (ctlr->dma_rx) 843 rx_dev = ctlr->dma_rx->device->dev; 844 else 845 rx_dev = ctlr->dev.parent; 846 847 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 848 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 849 continue; 850 851 if (xfer->tx_buf != NULL) { 852 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, 853 (void *)xfer->tx_buf, xfer->len, 854 DMA_TO_DEVICE); 855 if (ret != 0) 856 return ret; 857 } 858 859 if (xfer->rx_buf != NULL) { 860 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, 861 xfer->rx_buf, xfer->len, 862 DMA_FROM_DEVICE); 863 if (ret != 0) { 864 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, 865 DMA_TO_DEVICE); 866 return ret; 867 } 868 } 869 } 870 871 ctlr->cur_msg_mapped = true; 872 873 return 0; 874 } 875 876 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 877 { 878 struct spi_transfer *xfer; 879 struct device *tx_dev, *rx_dev; 880 881 if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 882 return 0; 883 884 if (ctlr->dma_tx) 885 tx_dev = ctlr->dma_tx->device->dev; 886 else 887 tx_dev = ctlr->dev.parent; 888 889 if (ctlr->dma_rx) 890 rx_dev = ctlr->dma_rx->device->dev; 891 else 892 rx_dev = ctlr->dev.parent; 893 894 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 895 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 896 continue; 897 898 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 899 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 900 } 901 902 return 0; 903 } 904 #else /* !CONFIG_HAS_DMA */ 905 static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 906 struct sg_table *sgt, void *buf, size_t len, 907 enum dma_data_direction dir) 908 { 909 return -EINVAL; 910 } 911 912 static inline void spi_unmap_buf(struct spi_controller *ctlr, 913 struct device *dev, struct sg_table *sgt, 914 enum dma_data_direction dir) 915 { 916 } 917 918 static inline int __spi_map_msg(struct spi_controller *ctlr, 919 struct spi_message *msg) 920 { 921 return 0; 922 } 923 924 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 925 struct spi_message *msg) 926 { 927 return 0; 928 } 929 #endif /* !CONFIG_HAS_DMA */ 930 931 static inline int spi_unmap_msg(struct spi_controller *ctlr, 932 struct spi_message *msg) 933 { 934 struct spi_transfer *xfer; 935 936 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 937 /* 938 * Restore the original value of tx_buf or rx_buf if they are 939 * NULL. 940 */ 941 if (xfer->tx_buf == ctlr->dummy_tx) 942 xfer->tx_buf = NULL; 943 if (xfer->rx_buf == ctlr->dummy_rx) 944 xfer->rx_buf = NULL; 945 } 946 947 return __spi_unmap_msg(ctlr, msg); 948 } 949 950 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 951 { 952 struct spi_transfer *xfer; 953 void *tmp; 954 unsigned int max_tx, max_rx; 955 956 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) { 957 max_tx = 0; 958 max_rx = 0; 959 960 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 961 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 962 !xfer->tx_buf) 963 max_tx = max(xfer->len, max_tx); 964 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 965 !xfer->rx_buf) 966 max_rx = max(xfer->len, max_rx); 967 } 968 969 if (max_tx) { 970 tmp = krealloc(ctlr->dummy_tx, max_tx, 971 GFP_KERNEL | GFP_DMA); 972 if (!tmp) 973 return -ENOMEM; 974 ctlr->dummy_tx = tmp; 975 memset(tmp, 0, max_tx); 976 } 977 978 if (max_rx) { 979 tmp = krealloc(ctlr->dummy_rx, max_rx, 980 GFP_KERNEL | GFP_DMA); 981 if (!tmp) 982 return -ENOMEM; 983 ctlr->dummy_rx = tmp; 984 } 985 986 if (max_tx || max_rx) { 987 list_for_each_entry(xfer, &msg->transfers, 988 transfer_list) { 989 if (!xfer->tx_buf) 990 xfer->tx_buf = ctlr->dummy_tx; 991 if (!xfer->rx_buf) 992 xfer->rx_buf = ctlr->dummy_rx; 993 } 994 } 995 } 996 997 return __spi_map_msg(ctlr, msg); 998 } 999 1000 /* 1001 * spi_transfer_one_message - Default implementation of transfer_one_message() 1002 * 1003 * This is a standard implementation of transfer_one_message() for 1004 * drivers which implement a transfer_one() operation. It provides 1005 * standard handling of delays and chip select management. 1006 */ 1007 static int spi_transfer_one_message(struct spi_controller *ctlr, 1008 struct spi_message *msg) 1009 { 1010 struct spi_transfer *xfer; 1011 bool keep_cs = false; 1012 int ret = 0; 1013 unsigned long long ms = 1; 1014 struct spi_statistics *statm = &ctlr->statistics; 1015 struct spi_statistics *stats = &msg->spi->statistics; 1016 1017 spi_set_cs(msg->spi, true); 1018 1019 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1020 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1021 1022 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1023 trace_spi_transfer_start(msg, xfer); 1024 1025 spi_statistics_add_transfer_stats(statm, xfer, ctlr); 1026 spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1027 1028 if (xfer->tx_buf || xfer->rx_buf) { 1029 reinit_completion(&ctlr->xfer_completion); 1030 1031 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1032 if (ret < 0) { 1033 SPI_STATISTICS_INCREMENT_FIELD(statm, 1034 errors); 1035 SPI_STATISTICS_INCREMENT_FIELD(stats, 1036 errors); 1037 dev_err(&msg->spi->dev, 1038 "SPI transfer failed: %d\n", ret); 1039 goto out; 1040 } 1041 1042 if (ret > 0) { 1043 ret = 0; 1044 ms = 8LL * 1000LL * xfer->len; 1045 do_div(ms, xfer->speed_hz); 1046 ms += ms + 200; /* some tolerance */ 1047 1048 if (ms > UINT_MAX) 1049 ms = UINT_MAX; 1050 1051 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1052 msecs_to_jiffies(ms)); 1053 } 1054 1055 if (ms == 0) { 1056 SPI_STATISTICS_INCREMENT_FIELD(statm, 1057 timedout); 1058 SPI_STATISTICS_INCREMENT_FIELD(stats, 1059 timedout); 1060 dev_err(&msg->spi->dev, 1061 "SPI transfer timed out\n"); 1062 msg->status = -ETIMEDOUT; 1063 } 1064 } else { 1065 if (xfer->len) 1066 dev_err(&msg->spi->dev, 1067 "Bufferless transfer has length %u\n", 1068 xfer->len); 1069 } 1070 1071 trace_spi_transfer_stop(msg, xfer); 1072 1073 if (msg->status != -EINPROGRESS) 1074 goto out; 1075 1076 if (xfer->delay_usecs) { 1077 u16 us = xfer->delay_usecs; 1078 1079 if (us <= 10) 1080 udelay(us); 1081 else 1082 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1083 } 1084 1085 if (xfer->cs_change) { 1086 if (list_is_last(&xfer->transfer_list, 1087 &msg->transfers)) { 1088 keep_cs = true; 1089 } else { 1090 spi_set_cs(msg->spi, false); 1091 udelay(10); 1092 spi_set_cs(msg->spi, true); 1093 } 1094 } 1095 1096 msg->actual_length += xfer->len; 1097 } 1098 1099 out: 1100 if (ret != 0 || !keep_cs) 1101 spi_set_cs(msg->spi, false); 1102 1103 if (msg->status == -EINPROGRESS) 1104 msg->status = ret; 1105 1106 if (msg->status && ctlr->handle_err) 1107 ctlr->handle_err(ctlr, msg); 1108 1109 spi_res_release(ctlr, msg); 1110 1111 spi_finalize_current_message(ctlr); 1112 1113 return ret; 1114 } 1115 1116 /** 1117 * spi_finalize_current_transfer - report completion of a transfer 1118 * @ctlr: the controller reporting completion 1119 * 1120 * Called by SPI drivers using the core transfer_one_message() 1121 * implementation to notify it that the current interrupt driven 1122 * transfer has finished and the next one may be scheduled. 1123 */ 1124 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1125 { 1126 complete(&ctlr->xfer_completion); 1127 } 1128 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1129 1130 /** 1131 * __spi_pump_messages - function which processes spi message queue 1132 * @ctlr: controller to process queue for 1133 * @in_kthread: true if we are in the context of the message pump thread 1134 * 1135 * This function checks if there is any spi message in the queue that 1136 * needs processing and if so call out to the driver to initialize hardware 1137 * and transfer each message. 1138 * 1139 * Note that it is called both from the kthread itself and also from 1140 * inside spi_sync(); the queue extraction handling at the top of the 1141 * function should deal with this safely. 1142 */ 1143 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1144 { 1145 unsigned long flags; 1146 bool was_busy = false; 1147 int ret; 1148 1149 /* Lock queue */ 1150 spin_lock_irqsave(&ctlr->queue_lock, flags); 1151 1152 /* Make sure we are not already running a message */ 1153 if (ctlr->cur_msg) { 1154 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1155 return; 1156 } 1157 1158 /* If another context is idling the device then defer */ 1159 if (ctlr->idling) { 1160 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1161 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1162 return; 1163 } 1164 1165 /* Check if the queue is idle */ 1166 if (list_empty(&ctlr->queue) || !ctlr->running) { 1167 if (!ctlr->busy) { 1168 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1169 return; 1170 } 1171 1172 /* Only do teardown in the thread */ 1173 if (!in_kthread) { 1174 kthread_queue_work(&ctlr->kworker, 1175 &ctlr->pump_messages); 1176 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1177 return; 1178 } 1179 1180 ctlr->busy = false; 1181 ctlr->idling = true; 1182 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1183 1184 kfree(ctlr->dummy_rx); 1185 ctlr->dummy_rx = NULL; 1186 kfree(ctlr->dummy_tx); 1187 ctlr->dummy_tx = NULL; 1188 if (ctlr->unprepare_transfer_hardware && 1189 ctlr->unprepare_transfer_hardware(ctlr)) 1190 dev_err(&ctlr->dev, 1191 "failed to unprepare transfer hardware\n"); 1192 if (ctlr->auto_runtime_pm) { 1193 pm_runtime_mark_last_busy(ctlr->dev.parent); 1194 pm_runtime_put_autosuspend(ctlr->dev.parent); 1195 } 1196 trace_spi_controller_idle(ctlr); 1197 1198 spin_lock_irqsave(&ctlr->queue_lock, flags); 1199 ctlr->idling = false; 1200 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1201 return; 1202 } 1203 1204 /* Extract head of queue */ 1205 ctlr->cur_msg = 1206 list_first_entry(&ctlr->queue, struct spi_message, queue); 1207 1208 list_del_init(&ctlr->cur_msg->queue); 1209 if (ctlr->busy) 1210 was_busy = true; 1211 else 1212 ctlr->busy = true; 1213 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1214 1215 mutex_lock(&ctlr->io_mutex); 1216 1217 if (!was_busy && ctlr->auto_runtime_pm) { 1218 ret = pm_runtime_get_sync(ctlr->dev.parent); 1219 if (ret < 0) { 1220 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1221 ret); 1222 mutex_unlock(&ctlr->io_mutex); 1223 return; 1224 } 1225 } 1226 1227 if (!was_busy) 1228 trace_spi_controller_busy(ctlr); 1229 1230 if (!was_busy && ctlr->prepare_transfer_hardware) { 1231 ret = ctlr->prepare_transfer_hardware(ctlr); 1232 if (ret) { 1233 dev_err(&ctlr->dev, 1234 "failed to prepare transfer hardware\n"); 1235 1236 if (ctlr->auto_runtime_pm) 1237 pm_runtime_put(ctlr->dev.parent); 1238 mutex_unlock(&ctlr->io_mutex); 1239 return; 1240 } 1241 } 1242 1243 trace_spi_message_start(ctlr->cur_msg); 1244 1245 if (ctlr->prepare_message) { 1246 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg); 1247 if (ret) { 1248 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1249 ret); 1250 ctlr->cur_msg->status = ret; 1251 spi_finalize_current_message(ctlr); 1252 goto out; 1253 } 1254 ctlr->cur_msg_prepared = true; 1255 } 1256 1257 ret = spi_map_msg(ctlr, ctlr->cur_msg); 1258 if (ret) { 1259 ctlr->cur_msg->status = ret; 1260 spi_finalize_current_message(ctlr); 1261 goto out; 1262 } 1263 1264 ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg); 1265 if (ret) { 1266 dev_err(&ctlr->dev, 1267 "failed to transfer one message from queue\n"); 1268 goto out; 1269 } 1270 1271 out: 1272 mutex_unlock(&ctlr->io_mutex); 1273 1274 /* Prod the scheduler in case transfer_one() was busy waiting */ 1275 if (!ret) 1276 cond_resched(); 1277 } 1278 1279 /** 1280 * spi_pump_messages - kthread work function which processes spi message queue 1281 * @work: pointer to kthread work struct contained in the controller struct 1282 */ 1283 static void spi_pump_messages(struct kthread_work *work) 1284 { 1285 struct spi_controller *ctlr = 1286 container_of(work, struct spi_controller, pump_messages); 1287 1288 __spi_pump_messages(ctlr, true); 1289 } 1290 1291 static int spi_init_queue(struct spi_controller *ctlr) 1292 { 1293 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1294 1295 ctlr->running = false; 1296 ctlr->busy = false; 1297 1298 kthread_init_worker(&ctlr->kworker); 1299 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker, 1300 "%s", dev_name(&ctlr->dev)); 1301 if (IS_ERR(ctlr->kworker_task)) { 1302 dev_err(&ctlr->dev, "failed to create message pump task\n"); 1303 return PTR_ERR(ctlr->kworker_task); 1304 } 1305 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1306 1307 /* 1308 * Controller config will indicate if this controller should run the 1309 * message pump with high (realtime) priority to reduce the transfer 1310 * latency on the bus by minimising the delay between a transfer 1311 * request and the scheduling of the message pump thread. Without this 1312 * setting the message pump thread will remain at default priority. 1313 */ 1314 if (ctlr->rt) { 1315 dev_info(&ctlr->dev, 1316 "will run message pump with realtime priority\n"); 1317 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m); 1318 } 1319 1320 return 0; 1321 } 1322 1323 /** 1324 * spi_get_next_queued_message() - called by driver to check for queued 1325 * messages 1326 * @ctlr: the controller to check for queued messages 1327 * 1328 * If there are more messages in the queue, the next message is returned from 1329 * this call. 1330 * 1331 * Return: the next message in the queue, else NULL if the queue is empty. 1332 */ 1333 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1334 { 1335 struct spi_message *next; 1336 unsigned long flags; 1337 1338 /* get a pointer to the next message, if any */ 1339 spin_lock_irqsave(&ctlr->queue_lock, flags); 1340 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 1341 queue); 1342 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1343 1344 return next; 1345 } 1346 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1347 1348 /** 1349 * spi_finalize_current_message() - the current message is complete 1350 * @ctlr: the controller to return the message to 1351 * 1352 * Called by the driver to notify the core that the message in the front of the 1353 * queue is complete and can be removed from the queue. 1354 */ 1355 void spi_finalize_current_message(struct spi_controller *ctlr) 1356 { 1357 struct spi_message *mesg; 1358 unsigned long flags; 1359 int ret; 1360 1361 spin_lock_irqsave(&ctlr->queue_lock, flags); 1362 mesg = ctlr->cur_msg; 1363 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1364 1365 spi_unmap_msg(ctlr, mesg); 1366 1367 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { 1368 ret = ctlr->unprepare_message(ctlr, mesg); 1369 if (ret) { 1370 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 1371 ret); 1372 } 1373 } 1374 1375 spin_lock_irqsave(&ctlr->queue_lock, flags); 1376 ctlr->cur_msg = NULL; 1377 ctlr->cur_msg_prepared = false; 1378 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1379 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1380 1381 trace_spi_message_done(mesg); 1382 1383 mesg->state = NULL; 1384 if (mesg->complete) 1385 mesg->complete(mesg->context); 1386 } 1387 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1388 1389 static int spi_start_queue(struct spi_controller *ctlr) 1390 { 1391 unsigned long flags; 1392 1393 spin_lock_irqsave(&ctlr->queue_lock, flags); 1394 1395 if (ctlr->running || ctlr->busy) { 1396 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1397 return -EBUSY; 1398 } 1399 1400 ctlr->running = true; 1401 ctlr->cur_msg = NULL; 1402 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1403 1404 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1405 1406 return 0; 1407 } 1408 1409 static int spi_stop_queue(struct spi_controller *ctlr) 1410 { 1411 unsigned long flags; 1412 unsigned limit = 500; 1413 int ret = 0; 1414 1415 spin_lock_irqsave(&ctlr->queue_lock, flags); 1416 1417 /* 1418 * This is a bit lame, but is optimized for the common execution path. 1419 * A wait_queue on the ctlr->busy could be used, but then the common 1420 * execution path (pump_messages) would be required to call wake_up or 1421 * friends on every SPI message. Do this instead. 1422 */ 1423 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 1424 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1425 usleep_range(10000, 11000); 1426 spin_lock_irqsave(&ctlr->queue_lock, flags); 1427 } 1428 1429 if (!list_empty(&ctlr->queue) || ctlr->busy) 1430 ret = -EBUSY; 1431 else 1432 ctlr->running = false; 1433 1434 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1435 1436 if (ret) { 1437 dev_warn(&ctlr->dev, "could not stop message queue\n"); 1438 return ret; 1439 } 1440 return ret; 1441 } 1442 1443 static int spi_destroy_queue(struct spi_controller *ctlr) 1444 { 1445 int ret; 1446 1447 ret = spi_stop_queue(ctlr); 1448 1449 /* 1450 * kthread_flush_worker will block until all work is done. 1451 * If the reason that stop_queue timed out is that the work will never 1452 * finish, then it does no good to call flush/stop thread, so 1453 * return anyway. 1454 */ 1455 if (ret) { 1456 dev_err(&ctlr->dev, "problem destroying queue\n"); 1457 return ret; 1458 } 1459 1460 kthread_flush_worker(&ctlr->kworker); 1461 kthread_stop(ctlr->kworker_task); 1462 1463 return 0; 1464 } 1465 1466 static int __spi_queued_transfer(struct spi_device *spi, 1467 struct spi_message *msg, 1468 bool need_pump) 1469 { 1470 struct spi_controller *ctlr = spi->controller; 1471 unsigned long flags; 1472 1473 spin_lock_irqsave(&ctlr->queue_lock, flags); 1474 1475 if (!ctlr->running) { 1476 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1477 return -ESHUTDOWN; 1478 } 1479 msg->actual_length = 0; 1480 msg->status = -EINPROGRESS; 1481 1482 list_add_tail(&msg->queue, &ctlr->queue); 1483 if (!ctlr->busy && need_pump) 1484 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1485 1486 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1487 return 0; 1488 } 1489 1490 /** 1491 * spi_queued_transfer - transfer function for queued transfers 1492 * @spi: spi device which is requesting transfer 1493 * @msg: spi message which is to handled is queued to driver queue 1494 * 1495 * Return: zero on success, else a negative error code. 1496 */ 1497 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1498 { 1499 return __spi_queued_transfer(spi, msg, true); 1500 } 1501 1502 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 1503 { 1504 int ret; 1505 1506 ctlr->transfer = spi_queued_transfer; 1507 if (!ctlr->transfer_one_message) 1508 ctlr->transfer_one_message = spi_transfer_one_message; 1509 1510 /* Initialize and start queue */ 1511 ret = spi_init_queue(ctlr); 1512 if (ret) { 1513 dev_err(&ctlr->dev, "problem initializing queue\n"); 1514 goto err_init_queue; 1515 } 1516 ctlr->queued = true; 1517 ret = spi_start_queue(ctlr); 1518 if (ret) { 1519 dev_err(&ctlr->dev, "problem starting queue\n"); 1520 goto err_start_queue; 1521 } 1522 1523 return 0; 1524 1525 err_start_queue: 1526 spi_destroy_queue(ctlr); 1527 err_init_queue: 1528 return ret; 1529 } 1530 1531 /*-------------------------------------------------------------------------*/ 1532 1533 #if defined(CONFIG_OF) 1534 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 1535 struct device_node *nc) 1536 { 1537 u32 value; 1538 int rc; 1539 1540 /* Mode (clock phase/polarity/etc.) */ 1541 if (of_property_read_bool(nc, "spi-cpha")) 1542 spi->mode |= SPI_CPHA; 1543 if (of_property_read_bool(nc, "spi-cpol")) 1544 spi->mode |= SPI_CPOL; 1545 if (of_property_read_bool(nc, "spi-cs-high")) 1546 spi->mode |= SPI_CS_HIGH; 1547 if (of_property_read_bool(nc, "spi-3wire")) 1548 spi->mode |= SPI_3WIRE; 1549 if (of_property_read_bool(nc, "spi-lsb-first")) 1550 spi->mode |= SPI_LSB_FIRST; 1551 1552 /* Device DUAL/QUAD mode */ 1553 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1554 switch (value) { 1555 case 1: 1556 break; 1557 case 2: 1558 spi->mode |= SPI_TX_DUAL; 1559 break; 1560 case 4: 1561 spi->mode |= SPI_TX_QUAD; 1562 break; 1563 default: 1564 dev_warn(&ctlr->dev, 1565 "spi-tx-bus-width %d not supported\n", 1566 value); 1567 break; 1568 } 1569 } 1570 1571 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1572 switch (value) { 1573 case 1: 1574 break; 1575 case 2: 1576 spi->mode |= SPI_RX_DUAL; 1577 break; 1578 case 4: 1579 spi->mode |= SPI_RX_QUAD; 1580 break; 1581 default: 1582 dev_warn(&ctlr->dev, 1583 "spi-rx-bus-width %d not supported\n", 1584 value); 1585 break; 1586 } 1587 } 1588 1589 if (spi_controller_is_slave(ctlr)) { 1590 if (strcmp(nc->name, "slave")) { 1591 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 1592 nc); 1593 return -EINVAL; 1594 } 1595 return 0; 1596 } 1597 1598 /* Device address */ 1599 rc = of_property_read_u32(nc, "reg", &value); 1600 if (rc) { 1601 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 1602 nc, rc); 1603 return rc; 1604 } 1605 spi->chip_select = value; 1606 1607 /* Device speed */ 1608 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1609 if (rc) { 1610 dev_err(&ctlr->dev, 1611 "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc); 1612 return rc; 1613 } 1614 spi->max_speed_hz = value; 1615 1616 return 0; 1617 } 1618 1619 static struct spi_device * 1620 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 1621 { 1622 struct spi_device *spi; 1623 int rc; 1624 1625 /* Alloc an spi_device */ 1626 spi = spi_alloc_device(ctlr); 1627 if (!spi) { 1628 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 1629 rc = -ENOMEM; 1630 goto err_out; 1631 } 1632 1633 /* Select device driver */ 1634 rc = of_modalias_node(nc, spi->modalias, 1635 sizeof(spi->modalias)); 1636 if (rc < 0) { 1637 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 1638 goto err_out; 1639 } 1640 1641 rc = of_spi_parse_dt(ctlr, spi, nc); 1642 if (rc) 1643 goto err_out; 1644 1645 /* Store a pointer to the node in the device structure */ 1646 of_node_get(nc); 1647 spi->dev.of_node = nc; 1648 1649 /* Register the new device */ 1650 rc = spi_add_device(spi); 1651 if (rc) { 1652 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 1653 goto err_of_node_put; 1654 } 1655 1656 return spi; 1657 1658 err_of_node_put: 1659 of_node_put(nc); 1660 err_out: 1661 spi_dev_put(spi); 1662 return ERR_PTR(rc); 1663 } 1664 1665 /** 1666 * of_register_spi_devices() - Register child devices onto the SPI bus 1667 * @ctlr: Pointer to spi_controller device 1668 * 1669 * Registers an spi_device for each child node of controller node which 1670 * represents a valid SPI slave. 1671 */ 1672 static void of_register_spi_devices(struct spi_controller *ctlr) 1673 { 1674 struct spi_device *spi; 1675 struct device_node *nc; 1676 1677 if (!ctlr->dev.of_node) 1678 return; 1679 1680 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 1681 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1682 continue; 1683 spi = of_register_spi_device(ctlr, nc); 1684 if (IS_ERR(spi)) { 1685 dev_warn(&ctlr->dev, 1686 "Failed to create SPI device for %pOF\n", nc); 1687 of_node_clear_flag(nc, OF_POPULATED); 1688 } 1689 } 1690 } 1691 #else 1692 static void of_register_spi_devices(struct spi_controller *ctlr) { } 1693 #endif 1694 1695 #ifdef CONFIG_ACPI 1696 static void acpi_spi_parse_apple_properties(struct spi_device *spi) 1697 { 1698 struct acpi_device *dev = ACPI_COMPANION(&spi->dev); 1699 const union acpi_object *obj; 1700 1701 if (!x86_apple_machine) 1702 return; 1703 1704 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 1705 && obj->buffer.length >= 4) 1706 spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 1707 1708 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 1709 && obj->buffer.length == 8) 1710 spi->bits_per_word = *(u64 *)obj->buffer.pointer; 1711 1712 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 1713 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 1714 spi->mode |= SPI_LSB_FIRST; 1715 1716 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 1717 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 1718 spi->mode |= SPI_CPOL; 1719 1720 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 1721 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 1722 spi->mode |= SPI_CPHA; 1723 } 1724 1725 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1726 { 1727 struct spi_device *spi = data; 1728 struct spi_controller *ctlr = spi->controller; 1729 1730 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1731 struct acpi_resource_spi_serialbus *sb; 1732 1733 sb = &ares->data.spi_serial_bus; 1734 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1735 /* 1736 * ACPI DeviceSelection numbering is handled by the 1737 * host controller driver in Windows and can vary 1738 * from driver to driver. In Linux we always expect 1739 * 0 .. max - 1 so we need to ask the driver to 1740 * translate between the two schemes. 1741 */ 1742 if (ctlr->fw_translate_cs) { 1743 int cs = ctlr->fw_translate_cs(ctlr, 1744 sb->device_selection); 1745 if (cs < 0) 1746 return cs; 1747 spi->chip_select = cs; 1748 } else { 1749 spi->chip_select = sb->device_selection; 1750 } 1751 1752 spi->max_speed_hz = sb->connection_speed; 1753 1754 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1755 spi->mode |= SPI_CPHA; 1756 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1757 spi->mode |= SPI_CPOL; 1758 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1759 spi->mode |= SPI_CS_HIGH; 1760 } 1761 } else if (spi->irq < 0) { 1762 struct resource r; 1763 1764 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1765 spi->irq = r.start; 1766 } 1767 1768 /* Always tell the ACPI core to skip this resource */ 1769 return 1; 1770 } 1771 1772 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 1773 struct acpi_device *adev) 1774 { 1775 struct list_head resource_list; 1776 struct spi_device *spi; 1777 int ret; 1778 1779 if (acpi_bus_get_status(adev) || !adev->status.present || 1780 acpi_device_enumerated(adev)) 1781 return AE_OK; 1782 1783 spi = spi_alloc_device(ctlr); 1784 if (!spi) { 1785 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n", 1786 dev_name(&adev->dev)); 1787 return AE_NO_MEMORY; 1788 } 1789 1790 ACPI_COMPANION_SET(&spi->dev, adev); 1791 spi->irq = -1; 1792 1793 INIT_LIST_HEAD(&resource_list); 1794 ret = acpi_dev_get_resources(adev, &resource_list, 1795 acpi_spi_add_resource, spi); 1796 acpi_dev_free_resource_list(&resource_list); 1797 1798 acpi_spi_parse_apple_properties(spi); 1799 1800 if (ret < 0 || !spi->max_speed_hz) { 1801 spi_dev_put(spi); 1802 return AE_OK; 1803 } 1804 1805 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 1806 sizeof(spi->modalias)); 1807 1808 if (spi->irq < 0) 1809 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 1810 1811 acpi_device_set_enumerated(adev); 1812 1813 adev->power.flags.ignore_parent = true; 1814 if (spi_add_device(spi)) { 1815 adev->power.flags.ignore_parent = false; 1816 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 1817 dev_name(&adev->dev)); 1818 spi_dev_put(spi); 1819 } 1820 1821 return AE_OK; 1822 } 1823 1824 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1825 void *data, void **return_value) 1826 { 1827 struct spi_controller *ctlr = data; 1828 struct acpi_device *adev; 1829 1830 if (acpi_bus_get_device(handle, &adev)) 1831 return AE_OK; 1832 1833 return acpi_register_spi_device(ctlr, adev); 1834 } 1835 1836 static void acpi_register_spi_devices(struct spi_controller *ctlr) 1837 { 1838 acpi_status status; 1839 acpi_handle handle; 1840 1841 handle = ACPI_HANDLE(ctlr->dev.parent); 1842 if (!handle) 1843 return; 1844 1845 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1846 acpi_spi_add_device, NULL, ctlr, NULL); 1847 if (ACPI_FAILURE(status)) 1848 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 1849 } 1850 #else 1851 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 1852 #endif /* CONFIG_ACPI */ 1853 1854 static void spi_controller_release(struct device *dev) 1855 { 1856 struct spi_controller *ctlr; 1857 1858 ctlr = container_of(dev, struct spi_controller, dev); 1859 kfree(ctlr); 1860 } 1861 1862 static struct class spi_master_class = { 1863 .name = "spi_master", 1864 .owner = THIS_MODULE, 1865 .dev_release = spi_controller_release, 1866 .dev_groups = spi_master_groups, 1867 }; 1868 1869 #ifdef CONFIG_SPI_SLAVE 1870 /** 1871 * spi_slave_abort - abort the ongoing transfer request on an SPI slave 1872 * controller 1873 * @spi: device used for the current transfer 1874 */ 1875 int spi_slave_abort(struct spi_device *spi) 1876 { 1877 struct spi_controller *ctlr = spi->controller; 1878 1879 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 1880 return ctlr->slave_abort(ctlr); 1881 1882 return -ENOTSUPP; 1883 } 1884 EXPORT_SYMBOL_GPL(spi_slave_abort); 1885 1886 static int match_true(struct device *dev, void *data) 1887 { 1888 return 1; 1889 } 1890 1891 static ssize_t spi_slave_show(struct device *dev, 1892 struct device_attribute *attr, char *buf) 1893 { 1894 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 1895 dev); 1896 struct device *child; 1897 1898 child = device_find_child(&ctlr->dev, NULL, match_true); 1899 return sprintf(buf, "%s\n", 1900 child ? to_spi_device(child)->modalias : NULL); 1901 } 1902 1903 static ssize_t spi_slave_store(struct device *dev, 1904 struct device_attribute *attr, const char *buf, 1905 size_t count) 1906 { 1907 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 1908 dev); 1909 struct spi_device *spi; 1910 struct device *child; 1911 char name[32]; 1912 int rc; 1913 1914 rc = sscanf(buf, "%31s", name); 1915 if (rc != 1 || !name[0]) 1916 return -EINVAL; 1917 1918 child = device_find_child(&ctlr->dev, NULL, match_true); 1919 if (child) { 1920 /* Remove registered slave */ 1921 device_unregister(child); 1922 put_device(child); 1923 } 1924 1925 if (strcmp(name, "(null)")) { 1926 /* Register new slave */ 1927 spi = spi_alloc_device(ctlr); 1928 if (!spi) 1929 return -ENOMEM; 1930 1931 strlcpy(spi->modalias, name, sizeof(spi->modalias)); 1932 1933 rc = spi_add_device(spi); 1934 if (rc) { 1935 spi_dev_put(spi); 1936 return rc; 1937 } 1938 } 1939 1940 return count; 1941 } 1942 1943 static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store); 1944 1945 static struct attribute *spi_slave_attrs[] = { 1946 &dev_attr_slave.attr, 1947 NULL, 1948 }; 1949 1950 static const struct attribute_group spi_slave_group = { 1951 .attrs = spi_slave_attrs, 1952 }; 1953 1954 static const struct attribute_group *spi_slave_groups[] = { 1955 &spi_controller_statistics_group, 1956 &spi_slave_group, 1957 NULL, 1958 }; 1959 1960 static struct class spi_slave_class = { 1961 .name = "spi_slave", 1962 .owner = THIS_MODULE, 1963 .dev_release = spi_controller_release, 1964 .dev_groups = spi_slave_groups, 1965 }; 1966 #else 1967 extern struct class spi_slave_class; /* dummy */ 1968 #endif 1969 1970 /** 1971 * __spi_alloc_controller - allocate an SPI master or slave controller 1972 * @dev: the controller, possibly using the platform_bus 1973 * @size: how much zeroed driver-private data to allocate; the pointer to this 1974 * memory is in the driver_data field of the returned device, 1975 * accessible with spi_controller_get_devdata(). 1976 * @slave: flag indicating whether to allocate an SPI master (false) or SPI 1977 * slave (true) controller 1978 * Context: can sleep 1979 * 1980 * This call is used only by SPI controller drivers, which are the 1981 * only ones directly touching chip registers. It's how they allocate 1982 * an spi_controller structure, prior to calling spi_register_controller(). 1983 * 1984 * This must be called from context that can sleep. 1985 * 1986 * The caller is responsible for assigning the bus number and initializing the 1987 * controller's methods before calling spi_register_controller(); and (after 1988 * errors adding the device) calling spi_controller_put() to prevent a memory 1989 * leak. 1990 * 1991 * Return: the SPI controller structure on success, else NULL. 1992 */ 1993 struct spi_controller *__spi_alloc_controller(struct device *dev, 1994 unsigned int size, bool slave) 1995 { 1996 struct spi_controller *ctlr; 1997 1998 if (!dev) 1999 return NULL; 2000 2001 ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL); 2002 if (!ctlr) 2003 return NULL; 2004 2005 device_initialize(&ctlr->dev); 2006 ctlr->bus_num = -1; 2007 ctlr->num_chipselect = 1; 2008 ctlr->slave = slave; 2009 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 2010 ctlr->dev.class = &spi_slave_class; 2011 else 2012 ctlr->dev.class = &spi_master_class; 2013 ctlr->dev.parent = dev; 2014 pm_suspend_ignore_children(&ctlr->dev, true); 2015 spi_controller_set_devdata(ctlr, &ctlr[1]); 2016 2017 return ctlr; 2018 } 2019 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 2020 2021 #ifdef CONFIG_OF 2022 static int of_spi_register_master(struct spi_controller *ctlr) 2023 { 2024 int nb, i, *cs; 2025 struct device_node *np = ctlr->dev.of_node; 2026 2027 if (!np) 2028 return 0; 2029 2030 nb = of_gpio_named_count(np, "cs-gpios"); 2031 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2032 2033 /* Return error only for an incorrectly formed cs-gpios property */ 2034 if (nb == 0 || nb == -ENOENT) 2035 return 0; 2036 else if (nb < 0) 2037 return nb; 2038 2039 cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect, 2040 GFP_KERNEL); 2041 ctlr->cs_gpios = cs; 2042 2043 if (!ctlr->cs_gpios) 2044 return -ENOMEM; 2045 2046 for (i = 0; i < ctlr->num_chipselect; i++) 2047 cs[i] = -ENOENT; 2048 2049 for (i = 0; i < nb; i++) 2050 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 2051 2052 return 0; 2053 } 2054 #else 2055 static int of_spi_register_master(struct spi_controller *ctlr) 2056 { 2057 return 0; 2058 } 2059 #endif 2060 2061 /** 2062 * spi_register_controller - register SPI master or slave controller 2063 * @ctlr: initialized master, originally from spi_alloc_master() or 2064 * spi_alloc_slave() 2065 * Context: can sleep 2066 * 2067 * SPI controllers connect to their drivers using some non-SPI bus, 2068 * such as the platform bus. The final stage of probe() in that code 2069 * includes calling spi_register_controller() to hook up to this SPI bus glue. 2070 * 2071 * SPI controllers use board specific (often SOC specific) bus numbers, 2072 * and board-specific addressing for SPI devices combines those numbers 2073 * with chip select numbers. Since SPI does not directly support dynamic 2074 * device identification, boards need configuration tables telling which 2075 * chip is at which address. 2076 * 2077 * This must be called from context that can sleep. It returns zero on 2078 * success, else a negative error code (dropping the controller's refcount). 2079 * After a successful return, the caller is responsible for calling 2080 * spi_unregister_controller(). 2081 * 2082 * Return: zero on success, else a negative error code. 2083 */ 2084 int spi_register_controller(struct spi_controller *ctlr) 2085 { 2086 struct device *dev = ctlr->dev.parent; 2087 struct boardinfo *bi; 2088 int status = -ENODEV; 2089 int id; 2090 2091 if (!dev) 2092 return -ENODEV; 2093 2094 if (!spi_controller_is_slave(ctlr)) { 2095 status = of_spi_register_master(ctlr); 2096 if (status) 2097 return status; 2098 } 2099 2100 /* even if it's just one always-selected device, there must 2101 * be at least one chipselect 2102 */ 2103 if (ctlr->num_chipselect == 0) 2104 return -EINVAL; 2105 /* allocate dynamic bus number using Linux idr */ 2106 if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { 2107 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 2108 if (id >= 0) { 2109 ctlr->bus_num = id; 2110 mutex_lock(&board_lock); 2111 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 2112 ctlr->bus_num + 1, GFP_KERNEL); 2113 mutex_unlock(&board_lock); 2114 if (WARN(id < 0, "couldn't get idr")) 2115 return id == -ENOSPC ? -EBUSY : id; 2116 } 2117 } 2118 if (ctlr->bus_num < 0) { 2119 mutex_lock(&board_lock); 2120 id = idr_alloc(&spi_master_idr, ctlr, SPI_DYN_FIRST_BUS_NUM, 0, 2121 GFP_KERNEL); 2122 mutex_unlock(&board_lock); 2123 if (WARN(id < 0, "couldn't get idr")) 2124 return id; 2125 ctlr->bus_num = id; 2126 } 2127 INIT_LIST_HEAD(&ctlr->queue); 2128 spin_lock_init(&ctlr->queue_lock); 2129 spin_lock_init(&ctlr->bus_lock_spinlock); 2130 mutex_init(&ctlr->bus_lock_mutex); 2131 mutex_init(&ctlr->io_mutex); 2132 ctlr->bus_lock_flag = 0; 2133 init_completion(&ctlr->xfer_completion); 2134 if (!ctlr->max_dma_len) 2135 ctlr->max_dma_len = INT_MAX; 2136 2137 /* register the device, then userspace will see it. 2138 * registration fails if the bus ID is in use. 2139 */ 2140 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 2141 status = device_add(&ctlr->dev); 2142 if (status < 0) { 2143 /* free bus id */ 2144 mutex_lock(&board_lock); 2145 idr_remove(&spi_master_idr, ctlr->bus_num); 2146 mutex_unlock(&board_lock); 2147 goto done; 2148 } 2149 dev_dbg(dev, "registered %s %s\n", 2150 spi_controller_is_slave(ctlr) ? "slave" : "master", 2151 dev_name(&ctlr->dev)); 2152 2153 /* If we're using a queued driver, start the queue */ 2154 if (ctlr->transfer) 2155 dev_info(dev, "controller is unqueued, this is deprecated\n"); 2156 else { 2157 status = spi_controller_initialize_queue(ctlr); 2158 if (status) { 2159 device_del(&ctlr->dev); 2160 /* free bus id */ 2161 mutex_lock(&board_lock); 2162 idr_remove(&spi_master_idr, ctlr->bus_num); 2163 mutex_unlock(&board_lock); 2164 goto done; 2165 } 2166 } 2167 /* add statistics */ 2168 spin_lock_init(&ctlr->statistics.lock); 2169 2170 mutex_lock(&board_lock); 2171 list_add_tail(&ctlr->list, &spi_controller_list); 2172 list_for_each_entry(bi, &board_list, list) 2173 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 2174 mutex_unlock(&board_lock); 2175 2176 /* Register devices from the device tree and ACPI */ 2177 of_register_spi_devices(ctlr); 2178 acpi_register_spi_devices(ctlr); 2179 done: 2180 return status; 2181 } 2182 EXPORT_SYMBOL_GPL(spi_register_controller); 2183 2184 static void devm_spi_unregister(struct device *dev, void *res) 2185 { 2186 spi_unregister_controller(*(struct spi_controller **)res); 2187 } 2188 2189 /** 2190 * devm_spi_register_controller - register managed SPI master or slave 2191 * controller 2192 * @dev: device managing SPI controller 2193 * @ctlr: initialized controller, originally from spi_alloc_master() or 2194 * spi_alloc_slave() 2195 * Context: can sleep 2196 * 2197 * Register a SPI device as with spi_register_controller() which will 2198 * automatically be unregister 2199 * 2200 * Return: zero on success, else a negative error code. 2201 */ 2202 int devm_spi_register_controller(struct device *dev, 2203 struct spi_controller *ctlr) 2204 { 2205 struct spi_controller **ptr; 2206 int ret; 2207 2208 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 2209 if (!ptr) 2210 return -ENOMEM; 2211 2212 ret = spi_register_controller(ctlr); 2213 if (!ret) { 2214 *ptr = ctlr; 2215 devres_add(dev, ptr); 2216 } else { 2217 devres_free(ptr); 2218 } 2219 2220 return ret; 2221 } 2222 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 2223 2224 static int __unregister(struct device *dev, void *null) 2225 { 2226 spi_unregister_device(to_spi_device(dev)); 2227 return 0; 2228 } 2229 2230 /** 2231 * spi_unregister_controller - unregister SPI master or slave controller 2232 * @ctlr: the controller being unregistered 2233 * Context: can sleep 2234 * 2235 * This call is used only by SPI controller drivers, which are the 2236 * only ones directly touching chip registers. 2237 * 2238 * This must be called from context that can sleep. 2239 */ 2240 void spi_unregister_controller(struct spi_controller *ctlr) 2241 { 2242 struct spi_controller *found; 2243 int dummy; 2244 2245 /* First make sure that this controller was ever added */ 2246 mutex_lock(&board_lock); 2247 found = idr_find(&spi_master_idr, ctlr->bus_num); 2248 mutex_unlock(&board_lock); 2249 if (found != ctlr) { 2250 dev_dbg(&ctlr->dev, 2251 "attempting to delete unregistered controller [%s]\n", 2252 dev_name(&ctlr->dev)); 2253 return; 2254 } 2255 if (ctlr->queued) { 2256 if (spi_destroy_queue(ctlr)) 2257 dev_err(&ctlr->dev, "queue remove failed\n"); 2258 } 2259 mutex_lock(&board_lock); 2260 list_del(&ctlr->list); 2261 mutex_unlock(&board_lock); 2262 2263 dummy = device_for_each_child(&ctlr->dev, NULL, __unregister); 2264 device_unregister(&ctlr->dev); 2265 /* free bus id */ 2266 mutex_lock(&board_lock); 2267 idr_remove(&spi_master_idr, ctlr->bus_num); 2268 mutex_unlock(&board_lock); 2269 } 2270 EXPORT_SYMBOL_GPL(spi_unregister_controller); 2271 2272 int spi_controller_suspend(struct spi_controller *ctlr) 2273 { 2274 int ret; 2275 2276 /* Basically no-ops for non-queued controllers */ 2277 if (!ctlr->queued) 2278 return 0; 2279 2280 ret = spi_stop_queue(ctlr); 2281 if (ret) 2282 dev_err(&ctlr->dev, "queue stop failed\n"); 2283 2284 return ret; 2285 } 2286 EXPORT_SYMBOL_GPL(spi_controller_suspend); 2287 2288 int spi_controller_resume(struct spi_controller *ctlr) 2289 { 2290 int ret; 2291 2292 if (!ctlr->queued) 2293 return 0; 2294 2295 ret = spi_start_queue(ctlr); 2296 if (ret) 2297 dev_err(&ctlr->dev, "queue restart failed\n"); 2298 2299 return ret; 2300 } 2301 EXPORT_SYMBOL_GPL(spi_controller_resume); 2302 2303 static int __spi_controller_match(struct device *dev, const void *data) 2304 { 2305 struct spi_controller *ctlr; 2306 const u16 *bus_num = data; 2307 2308 ctlr = container_of(dev, struct spi_controller, dev); 2309 return ctlr->bus_num == *bus_num; 2310 } 2311 2312 /** 2313 * spi_busnum_to_master - look up master associated with bus_num 2314 * @bus_num: the master's bus number 2315 * Context: can sleep 2316 * 2317 * This call may be used with devices that are registered after 2318 * arch init time. It returns a refcounted pointer to the relevant 2319 * spi_controller (which the caller must release), or NULL if there is 2320 * no such master registered. 2321 * 2322 * Return: the SPI master structure on success, else NULL. 2323 */ 2324 struct spi_controller *spi_busnum_to_master(u16 bus_num) 2325 { 2326 struct device *dev; 2327 struct spi_controller *ctlr = NULL; 2328 2329 dev = class_find_device(&spi_master_class, NULL, &bus_num, 2330 __spi_controller_match); 2331 if (dev) 2332 ctlr = container_of(dev, struct spi_controller, dev); 2333 /* reference got in class_find_device */ 2334 return ctlr; 2335 } 2336 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2337 2338 /*-------------------------------------------------------------------------*/ 2339 2340 /* Core methods for SPI resource management */ 2341 2342 /** 2343 * spi_res_alloc - allocate a spi resource that is life-cycle managed 2344 * during the processing of a spi_message while using 2345 * spi_transfer_one 2346 * @spi: the spi device for which we allocate memory 2347 * @release: the release code to execute for this resource 2348 * @size: size to alloc and return 2349 * @gfp: GFP allocation flags 2350 * 2351 * Return: the pointer to the allocated data 2352 * 2353 * This may get enhanced in the future to allocate from a memory pool 2354 * of the @spi_device or @spi_controller to avoid repeated allocations. 2355 */ 2356 void *spi_res_alloc(struct spi_device *spi, 2357 spi_res_release_t release, 2358 size_t size, gfp_t gfp) 2359 { 2360 struct spi_res *sres; 2361 2362 sres = kzalloc(sizeof(*sres) + size, gfp); 2363 if (!sres) 2364 return NULL; 2365 2366 INIT_LIST_HEAD(&sres->entry); 2367 sres->release = release; 2368 2369 return sres->data; 2370 } 2371 EXPORT_SYMBOL_GPL(spi_res_alloc); 2372 2373 /** 2374 * spi_res_free - free an spi resource 2375 * @res: pointer to the custom data of a resource 2376 * 2377 */ 2378 void spi_res_free(void *res) 2379 { 2380 struct spi_res *sres = container_of(res, struct spi_res, data); 2381 2382 if (!res) 2383 return; 2384 2385 WARN_ON(!list_empty(&sres->entry)); 2386 kfree(sres); 2387 } 2388 EXPORT_SYMBOL_GPL(spi_res_free); 2389 2390 /** 2391 * spi_res_add - add a spi_res to the spi_message 2392 * @message: the spi message 2393 * @res: the spi_resource 2394 */ 2395 void spi_res_add(struct spi_message *message, void *res) 2396 { 2397 struct spi_res *sres = container_of(res, struct spi_res, data); 2398 2399 WARN_ON(!list_empty(&sres->entry)); 2400 list_add_tail(&sres->entry, &message->resources); 2401 } 2402 EXPORT_SYMBOL_GPL(spi_res_add); 2403 2404 /** 2405 * spi_res_release - release all spi resources for this message 2406 * @ctlr: the @spi_controller 2407 * @message: the @spi_message 2408 */ 2409 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 2410 { 2411 struct spi_res *res; 2412 2413 while (!list_empty(&message->resources)) { 2414 res = list_last_entry(&message->resources, 2415 struct spi_res, entry); 2416 2417 if (res->release) 2418 res->release(ctlr, message, res->data); 2419 2420 list_del(&res->entry); 2421 2422 kfree(res); 2423 } 2424 } 2425 EXPORT_SYMBOL_GPL(spi_res_release); 2426 2427 /*-------------------------------------------------------------------------*/ 2428 2429 /* Core methods for spi_message alterations */ 2430 2431 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 2432 struct spi_message *msg, 2433 void *res) 2434 { 2435 struct spi_replaced_transfers *rxfer = res; 2436 size_t i; 2437 2438 /* call extra callback if requested */ 2439 if (rxfer->release) 2440 rxfer->release(ctlr, msg, res); 2441 2442 /* insert replaced transfers back into the message */ 2443 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2444 2445 /* remove the formerly inserted entries */ 2446 for (i = 0; i < rxfer->inserted; i++) 2447 list_del(&rxfer->inserted_transfers[i].transfer_list); 2448 } 2449 2450 /** 2451 * spi_replace_transfers - replace transfers with several transfers 2452 * and register change with spi_message.resources 2453 * @msg: the spi_message we work upon 2454 * @xfer_first: the first spi_transfer we want to replace 2455 * @remove: number of transfers to remove 2456 * @insert: the number of transfers we want to insert instead 2457 * @release: extra release code necessary in some circumstances 2458 * @extradatasize: extra data to allocate (with alignment guarantees 2459 * of struct @spi_transfer) 2460 * @gfp: gfp flags 2461 * 2462 * Returns: pointer to @spi_replaced_transfers, 2463 * PTR_ERR(...) in case of errors. 2464 */ 2465 struct spi_replaced_transfers *spi_replace_transfers( 2466 struct spi_message *msg, 2467 struct spi_transfer *xfer_first, 2468 size_t remove, 2469 size_t insert, 2470 spi_replaced_release_t release, 2471 size_t extradatasize, 2472 gfp_t gfp) 2473 { 2474 struct spi_replaced_transfers *rxfer; 2475 struct spi_transfer *xfer; 2476 size_t i; 2477 2478 /* allocate the structure using spi_res */ 2479 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2480 insert * sizeof(struct spi_transfer) 2481 + sizeof(struct spi_replaced_transfers) 2482 + extradatasize, 2483 gfp); 2484 if (!rxfer) 2485 return ERR_PTR(-ENOMEM); 2486 2487 /* the release code to invoke before running the generic release */ 2488 rxfer->release = release; 2489 2490 /* assign extradata */ 2491 if (extradatasize) 2492 rxfer->extradata = 2493 &rxfer->inserted_transfers[insert]; 2494 2495 /* init the replaced_transfers list */ 2496 INIT_LIST_HEAD(&rxfer->replaced_transfers); 2497 2498 /* assign the list_entry after which we should reinsert 2499 * the @replaced_transfers - it may be spi_message.messages! 2500 */ 2501 rxfer->replaced_after = xfer_first->transfer_list.prev; 2502 2503 /* remove the requested number of transfers */ 2504 for (i = 0; i < remove; i++) { 2505 /* if the entry after replaced_after it is msg->transfers 2506 * then we have been requested to remove more transfers 2507 * than are in the list 2508 */ 2509 if (rxfer->replaced_after->next == &msg->transfers) { 2510 dev_err(&msg->spi->dev, 2511 "requested to remove more spi_transfers than are available\n"); 2512 /* insert replaced transfers back into the message */ 2513 list_splice(&rxfer->replaced_transfers, 2514 rxfer->replaced_after); 2515 2516 /* free the spi_replace_transfer structure */ 2517 spi_res_free(rxfer); 2518 2519 /* and return with an error */ 2520 return ERR_PTR(-EINVAL); 2521 } 2522 2523 /* remove the entry after replaced_after from list of 2524 * transfers and add it to list of replaced_transfers 2525 */ 2526 list_move_tail(rxfer->replaced_after->next, 2527 &rxfer->replaced_transfers); 2528 } 2529 2530 /* create copy of the given xfer with identical settings 2531 * based on the first transfer to get removed 2532 */ 2533 for (i = 0; i < insert; i++) { 2534 /* we need to run in reverse order */ 2535 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 2536 2537 /* copy all spi_transfer data */ 2538 memcpy(xfer, xfer_first, sizeof(*xfer)); 2539 2540 /* add to list */ 2541 list_add(&xfer->transfer_list, rxfer->replaced_after); 2542 2543 /* clear cs_change and delay_usecs for all but the last */ 2544 if (i) { 2545 xfer->cs_change = false; 2546 xfer->delay_usecs = 0; 2547 } 2548 } 2549 2550 /* set up inserted */ 2551 rxfer->inserted = insert; 2552 2553 /* and register it with spi_res/spi_message */ 2554 spi_res_add(msg, rxfer); 2555 2556 return rxfer; 2557 } 2558 EXPORT_SYMBOL_GPL(spi_replace_transfers); 2559 2560 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 2561 struct spi_message *msg, 2562 struct spi_transfer **xferp, 2563 size_t maxsize, 2564 gfp_t gfp) 2565 { 2566 struct spi_transfer *xfer = *xferp, *xfers; 2567 struct spi_replaced_transfers *srt; 2568 size_t offset; 2569 size_t count, i; 2570 2571 /* warn once about this fact that we are splitting a transfer */ 2572 dev_warn_once(&msg->spi->dev, 2573 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n", 2574 xfer->len, maxsize); 2575 2576 /* calculate how many we have to replace */ 2577 count = DIV_ROUND_UP(xfer->len, maxsize); 2578 2579 /* create replacement */ 2580 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 2581 if (IS_ERR(srt)) 2582 return PTR_ERR(srt); 2583 xfers = srt->inserted_transfers; 2584 2585 /* now handle each of those newly inserted spi_transfers 2586 * note that the replacements spi_transfers all are preset 2587 * to the same values as *xferp, so tx_buf, rx_buf and len 2588 * are all identical (as well as most others) 2589 * so we just have to fix up len and the pointers. 2590 * 2591 * this also includes support for the depreciated 2592 * spi_message.is_dma_mapped interface 2593 */ 2594 2595 /* the first transfer just needs the length modified, so we 2596 * run it outside the loop 2597 */ 2598 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 2599 2600 /* all the others need rx_buf/tx_buf also set */ 2601 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 2602 /* update rx_buf, tx_buf and dma */ 2603 if (xfers[i].rx_buf) 2604 xfers[i].rx_buf += offset; 2605 if (xfers[i].rx_dma) 2606 xfers[i].rx_dma += offset; 2607 if (xfers[i].tx_buf) 2608 xfers[i].tx_buf += offset; 2609 if (xfers[i].tx_dma) 2610 xfers[i].tx_dma += offset; 2611 2612 /* update length */ 2613 xfers[i].len = min(maxsize, xfers[i].len - offset); 2614 } 2615 2616 /* we set up xferp to the last entry we have inserted, 2617 * so that we skip those already split transfers 2618 */ 2619 *xferp = &xfers[count - 1]; 2620 2621 /* increment statistics counters */ 2622 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 2623 transfers_split_maxsize); 2624 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2625 transfers_split_maxsize); 2626 2627 return 0; 2628 } 2629 2630 /** 2631 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2632 * when an individual transfer exceeds a 2633 * certain size 2634 * @ctlr: the @spi_controller for this transfer 2635 * @msg: the @spi_message to transform 2636 * @maxsize: the maximum when to apply this 2637 * @gfp: GFP allocation flags 2638 * 2639 * Return: status of transformation 2640 */ 2641 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 2642 struct spi_message *msg, 2643 size_t maxsize, 2644 gfp_t gfp) 2645 { 2646 struct spi_transfer *xfer; 2647 int ret; 2648 2649 /* iterate over the transfer_list, 2650 * but note that xfer is advanced to the last transfer inserted 2651 * to avoid checking sizes again unnecessarily (also xfer does 2652 * potentiall belong to a different list by the time the 2653 * replacement has happened 2654 */ 2655 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2656 if (xfer->len > maxsize) { 2657 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 2658 maxsize, gfp); 2659 if (ret) 2660 return ret; 2661 } 2662 } 2663 2664 return 0; 2665 } 2666 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 2667 2668 /*-------------------------------------------------------------------------*/ 2669 2670 /* Core methods for SPI controller protocol drivers. Some of the 2671 * other core methods are currently defined as inline functions. 2672 */ 2673 2674 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 2675 u8 bits_per_word) 2676 { 2677 if (ctlr->bits_per_word_mask) { 2678 /* Only 32 bits fit in the mask */ 2679 if (bits_per_word > 32) 2680 return -EINVAL; 2681 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 2682 return -EINVAL; 2683 } 2684 2685 return 0; 2686 } 2687 2688 /** 2689 * spi_setup - setup SPI mode and clock rate 2690 * @spi: the device whose settings are being modified 2691 * Context: can sleep, and no requests are queued to the device 2692 * 2693 * SPI protocol drivers may need to update the transfer mode if the 2694 * device doesn't work with its default. They may likewise need 2695 * to update clock rates or word sizes from initial values. This function 2696 * changes those settings, and must be called from a context that can sleep. 2697 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 2698 * effect the next time the device is selected and data is transferred to 2699 * or from it. When this function returns, the spi device is deselected. 2700 * 2701 * Note that this call will fail if the protocol driver specifies an option 2702 * that the underlying controller or its driver does not support. For 2703 * example, not all hardware supports wire transfers using nine bit words, 2704 * LSB-first wire encoding, or active-high chipselects. 2705 * 2706 * Return: zero on success, else a negative error code. 2707 */ 2708 int spi_setup(struct spi_device *spi) 2709 { 2710 unsigned bad_bits, ugly_bits; 2711 int status; 2712 2713 /* check mode to prevent that DUAL and QUAD set at the same time 2714 */ 2715 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2716 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2717 dev_err(&spi->dev, 2718 "setup: can not select dual and quad at the same time\n"); 2719 return -EINVAL; 2720 } 2721 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2722 */ 2723 if ((spi->mode & SPI_3WIRE) && (spi->mode & 2724 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2725 return -EINVAL; 2726 /* help drivers fail *cleanly* when they need options 2727 * that aren't supported with their current controller 2728 */ 2729 bad_bits = spi->mode & ~spi->controller->mode_bits; 2730 ugly_bits = bad_bits & 2731 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 2732 if (ugly_bits) { 2733 dev_warn(&spi->dev, 2734 "setup: ignoring unsupported mode bits %x\n", 2735 ugly_bits); 2736 spi->mode &= ~ugly_bits; 2737 bad_bits &= ~ugly_bits; 2738 } 2739 if (bad_bits) { 2740 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2741 bad_bits); 2742 return -EINVAL; 2743 } 2744 2745 if (!spi->bits_per_word) 2746 spi->bits_per_word = 8; 2747 2748 status = __spi_validate_bits_per_word(spi->controller, 2749 spi->bits_per_word); 2750 if (status) 2751 return status; 2752 2753 if (!spi->max_speed_hz) 2754 spi->max_speed_hz = spi->controller->max_speed_hz; 2755 2756 if (spi->controller->setup) 2757 status = spi->controller->setup(spi); 2758 2759 spi_set_cs(spi, false); 2760 2761 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 2762 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 2763 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 2764 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 2765 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 2766 (spi->mode & SPI_LOOP) ? "loopback, " : "", 2767 spi->bits_per_word, spi->max_speed_hz, 2768 status); 2769 2770 return status; 2771 } 2772 EXPORT_SYMBOL_GPL(spi_setup); 2773 2774 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2775 { 2776 struct spi_controller *ctlr = spi->controller; 2777 struct spi_transfer *xfer; 2778 int w_size; 2779 2780 if (list_empty(&message->transfers)) 2781 return -EINVAL; 2782 2783 /* Half-duplex links include original MicroWire, and ones with 2784 * only one data pin like SPI_3WIRE (switches direction) or where 2785 * either MOSI or MISO is missing. They can also be caused by 2786 * software limitations. 2787 */ 2788 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 2789 (spi->mode & SPI_3WIRE)) { 2790 unsigned flags = ctlr->flags; 2791 2792 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2793 if (xfer->rx_buf && xfer->tx_buf) 2794 return -EINVAL; 2795 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 2796 return -EINVAL; 2797 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 2798 return -EINVAL; 2799 } 2800 } 2801 2802 /** 2803 * Set transfer bits_per_word and max speed as spi device default if 2804 * it is not set for this transfer. 2805 * Set transfer tx_nbits and rx_nbits as single transfer default 2806 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2807 */ 2808 message->frame_length = 0; 2809 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2810 message->frame_length += xfer->len; 2811 if (!xfer->bits_per_word) 2812 xfer->bits_per_word = spi->bits_per_word; 2813 2814 if (!xfer->speed_hz) 2815 xfer->speed_hz = spi->max_speed_hz; 2816 if (!xfer->speed_hz) 2817 xfer->speed_hz = ctlr->max_speed_hz; 2818 2819 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 2820 xfer->speed_hz = ctlr->max_speed_hz; 2821 2822 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 2823 return -EINVAL; 2824 2825 /* 2826 * SPI transfer length should be multiple of SPI word size 2827 * where SPI word size should be power-of-two multiple 2828 */ 2829 if (xfer->bits_per_word <= 8) 2830 w_size = 1; 2831 else if (xfer->bits_per_word <= 16) 2832 w_size = 2; 2833 else 2834 w_size = 4; 2835 2836 /* No partial transfers accepted */ 2837 if (xfer->len % w_size) 2838 return -EINVAL; 2839 2840 if (xfer->speed_hz && ctlr->min_speed_hz && 2841 xfer->speed_hz < ctlr->min_speed_hz) 2842 return -EINVAL; 2843 2844 if (xfer->tx_buf && !xfer->tx_nbits) 2845 xfer->tx_nbits = SPI_NBITS_SINGLE; 2846 if (xfer->rx_buf && !xfer->rx_nbits) 2847 xfer->rx_nbits = SPI_NBITS_SINGLE; 2848 /* check transfer tx/rx_nbits: 2849 * 1. check the value matches one of single, dual and quad 2850 * 2. check tx/rx_nbits match the mode in spi_device 2851 */ 2852 if (xfer->tx_buf) { 2853 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2854 xfer->tx_nbits != SPI_NBITS_DUAL && 2855 xfer->tx_nbits != SPI_NBITS_QUAD) 2856 return -EINVAL; 2857 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2858 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2859 return -EINVAL; 2860 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2861 !(spi->mode & SPI_TX_QUAD)) 2862 return -EINVAL; 2863 } 2864 /* check transfer rx_nbits */ 2865 if (xfer->rx_buf) { 2866 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2867 xfer->rx_nbits != SPI_NBITS_DUAL && 2868 xfer->rx_nbits != SPI_NBITS_QUAD) 2869 return -EINVAL; 2870 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2871 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2872 return -EINVAL; 2873 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2874 !(spi->mode & SPI_RX_QUAD)) 2875 return -EINVAL; 2876 } 2877 } 2878 2879 message->status = -EINPROGRESS; 2880 2881 return 0; 2882 } 2883 2884 static int __spi_async(struct spi_device *spi, struct spi_message *message) 2885 { 2886 struct spi_controller *ctlr = spi->controller; 2887 2888 message->spi = spi; 2889 2890 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); 2891 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2892 2893 trace_spi_message_submit(message); 2894 2895 return ctlr->transfer(spi, message); 2896 } 2897 2898 /** 2899 * spi_async - asynchronous SPI transfer 2900 * @spi: device with which data will be exchanged 2901 * @message: describes the data transfers, including completion callback 2902 * Context: any (irqs may be blocked, etc) 2903 * 2904 * This call may be used in_irq and other contexts which can't sleep, 2905 * as well as from task contexts which can sleep. 2906 * 2907 * The completion callback is invoked in a context which can't sleep. 2908 * Before that invocation, the value of message->status is undefined. 2909 * When the callback is issued, message->status holds either zero (to 2910 * indicate complete success) or a negative error code. After that 2911 * callback returns, the driver which issued the transfer request may 2912 * deallocate the associated memory; it's no longer in use by any SPI 2913 * core or controller driver code. 2914 * 2915 * Note that although all messages to a spi_device are handled in 2916 * FIFO order, messages may go to different devices in other orders. 2917 * Some device might be higher priority, or have various "hard" access 2918 * time requirements, for example. 2919 * 2920 * On detection of any fault during the transfer, processing of 2921 * the entire message is aborted, and the device is deselected. 2922 * Until returning from the associated message completion callback, 2923 * no other spi_message queued to that device will be processed. 2924 * (This rule applies equally to all the synchronous transfer calls, 2925 * which are wrappers around this core asynchronous primitive.) 2926 * 2927 * Return: zero on success, else a negative error code. 2928 */ 2929 int spi_async(struct spi_device *spi, struct spi_message *message) 2930 { 2931 struct spi_controller *ctlr = spi->controller; 2932 int ret; 2933 unsigned long flags; 2934 2935 ret = __spi_validate(spi, message); 2936 if (ret != 0) 2937 return ret; 2938 2939 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 2940 2941 if (ctlr->bus_lock_flag) 2942 ret = -EBUSY; 2943 else 2944 ret = __spi_async(spi, message); 2945 2946 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 2947 2948 return ret; 2949 } 2950 EXPORT_SYMBOL_GPL(spi_async); 2951 2952 /** 2953 * spi_async_locked - version of spi_async with exclusive bus usage 2954 * @spi: device with which data will be exchanged 2955 * @message: describes the data transfers, including completion callback 2956 * Context: any (irqs may be blocked, etc) 2957 * 2958 * This call may be used in_irq and other contexts which can't sleep, 2959 * as well as from task contexts which can sleep. 2960 * 2961 * The completion callback is invoked in a context which can't sleep. 2962 * Before that invocation, the value of message->status is undefined. 2963 * When the callback is issued, message->status holds either zero (to 2964 * indicate complete success) or a negative error code. After that 2965 * callback returns, the driver which issued the transfer request may 2966 * deallocate the associated memory; it's no longer in use by any SPI 2967 * core or controller driver code. 2968 * 2969 * Note that although all messages to a spi_device are handled in 2970 * FIFO order, messages may go to different devices in other orders. 2971 * Some device might be higher priority, or have various "hard" access 2972 * time requirements, for example. 2973 * 2974 * On detection of any fault during the transfer, processing of 2975 * the entire message is aborted, and the device is deselected. 2976 * Until returning from the associated message completion callback, 2977 * no other spi_message queued to that device will be processed. 2978 * (This rule applies equally to all the synchronous transfer calls, 2979 * which are wrappers around this core asynchronous primitive.) 2980 * 2981 * Return: zero on success, else a negative error code. 2982 */ 2983 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2984 { 2985 struct spi_controller *ctlr = spi->controller; 2986 int ret; 2987 unsigned long flags; 2988 2989 ret = __spi_validate(spi, message); 2990 if (ret != 0) 2991 return ret; 2992 2993 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 2994 2995 ret = __spi_async(spi, message); 2996 2997 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 2998 2999 return ret; 3000 3001 } 3002 EXPORT_SYMBOL_GPL(spi_async_locked); 3003 3004 3005 int spi_flash_read(struct spi_device *spi, 3006 struct spi_flash_read_message *msg) 3007 3008 { 3009 struct spi_controller *master = spi->controller; 3010 struct device *rx_dev = NULL; 3011 int ret; 3012 3013 if ((msg->opcode_nbits == SPI_NBITS_DUAL || 3014 msg->addr_nbits == SPI_NBITS_DUAL) && 3015 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 3016 return -EINVAL; 3017 if ((msg->opcode_nbits == SPI_NBITS_QUAD || 3018 msg->addr_nbits == SPI_NBITS_QUAD) && 3019 !(spi->mode & SPI_TX_QUAD)) 3020 return -EINVAL; 3021 if (msg->data_nbits == SPI_NBITS_DUAL && 3022 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 3023 return -EINVAL; 3024 if (msg->data_nbits == SPI_NBITS_QUAD && 3025 !(spi->mode & SPI_RX_QUAD)) 3026 return -EINVAL; 3027 3028 if (master->auto_runtime_pm) { 3029 ret = pm_runtime_get_sync(master->dev.parent); 3030 if (ret < 0) { 3031 dev_err(&master->dev, "Failed to power device: %d\n", 3032 ret); 3033 return ret; 3034 } 3035 } 3036 3037 mutex_lock(&master->bus_lock_mutex); 3038 mutex_lock(&master->io_mutex); 3039 if (master->dma_rx && master->spi_flash_can_dma(spi, msg)) { 3040 rx_dev = master->dma_rx->device->dev; 3041 ret = spi_map_buf(master, rx_dev, &msg->rx_sg, 3042 msg->buf, msg->len, 3043 DMA_FROM_DEVICE); 3044 if (!ret) 3045 msg->cur_msg_mapped = true; 3046 } 3047 ret = master->spi_flash_read(spi, msg); 3048 if (msg->cur_msg_mapped) 3049 spi_unmap_buf(master, rx_dev, &msg->rx_sg, 3050 DMA_FROM_DEVICE); 3051 mutex_unlock(&master->io_mutex); 3052 mutex_unlock(&master->bus_lock_mutex); 3053 3054 if (master->auto_runtime_pm) 3055 pm_runtime_put(master->dev.parent); 3056 3057 return ret; 3058 } 3059 EXPORT_SYMBOL_GPL(spi_flash_read); 3060 3061 /*-------------------------------------------------------------------------*/ 3062 3063 /* Utility methods for SPI protocol drivers, layered on 3064 * top of the core. Some other utility methods are defined as 3065 * inline functions. 3066 */ 3067 3068 static void spi_complete(void *arg) 3069 { 3070 complete(arg); 3071 } 3072 3073 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 3074 { 3075 DECLARE_COMPLETION_ONSTACK(done); 3076 int status; 3077 struct spi_controller *ctlr = spi->controller; 3078 unsigned long flags; 3079 3080 status = __spi_validate(spi, message); 3081 if (status != 0) 3082 return status; 3083 3084 message->complete = spi_complete; 3085 message->context = &done; 3086 message->spi = spi; 3087 3088 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); 3089 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 3090 3091 /* If we're not using the legacy transfer method then we will 3092 * try to transfer in the calling context so special case. 3093 * This code would be less tricky if we could remove the 3094 * support for driver implemented message queues. 3095 */ 3096 if (ctlr->transfer == spi_queued_transfer) { 3097 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3098 3099 trace_spi_message_submit(message); 3100 3101 status = __spi_queued_transfer(spi, message, false); 3102 3103 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3104 } else { 3105 status = spi_async_locked(spi, message); 3106 } 3107 3108 if (status == 0) { 3109 /* Push out the messages in the calling context if we 3110 * can. 3111 */ 3112 if (ctlr->transfer == spi_queued_transfer) { 3113 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3114 spi_sync_immediate); 3115 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 3116 spi_sync_immediate); 3117 __spi_pump_messages(ctlr, false); 3118 } 3119 3120 wait_for_completion(&done); 3121 status = message->status; 3122 } 3123 message->context = NULL; 3124 return status; 3125 } 3126 3127 /** 3128 * spi_sync - blocking/synchronous SPI data transfers 3129 * @spi: device with which data will be exchanged 3130 * @message: describes the data transfers 3131 * Context: can sleep 3132 * 3133 * This call may only be used from a context that may sleep. The sleep 3134 * is non-interruptible, and has no timeout. Low-overhead controller 3135 * drivers may DMA directly into and out of the message buffers. 3136 * 3137 * Note that the SPI device's chip select is active during the message, 3138 * and then is normally disabled between messages. Drivers for some 3139 * frequently-used devices may want to minimize costs of selecting a chip, 3140 * by leaving it selected in anticipation that the next message will go 3141 * to the same chip. (That may increase power usage.) 3142 * 3143 * Also, the caller is guaranteeing that the memory associated with the 3144 * message will not be freed before this call returns. 3145 * 3146 * Return: zero on success, else a negative error code. 3147 */ 3148 int spi_sync(struct spi_device *spi, struct spi_message *message) 3149 { 3150 int ret; 3151 3152 mutex_lock(&spi->controller->bus_lock_mutex); 3153 ret = __spi_sync(spi, message); 3154 mutex_unlock(&spi->controller->bus_lock_mutex); 3155 3156 return ret; 3157 } 3158 EXPORT_SYMBOL_GPL(spi_sync); 3159 3160 /** 3161 * spi_sync_locked - version of spi_sync with exclusive bus usage 3162 * @spi: device with which data will be exchanged 3163 * @message: describes the data transfers 3164 * Context: can sleep 3165 * 3166 * This call may only be used from a context that may sleep. The sleep 3167 * is non-interruptible, and has no timeout. Low-overhead controller 3168 * drivers may DMA directly into and out of the message buffers. 3169 * 3170 * This call should be used by drivers that require exclusive access to the 3171 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 3172 * be released by a spi_bus_unlock call when the exclusive access is over. 3173 * 3174 * Return: zero on success, else a negative error code. 3175 */ 3176 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 3177 { 3178 return __spi_sync(spi, message); 3179 } 3180 EXPORT_SYMBOL_GPL(spi_sync_locked); 3181 3182 /** 3183 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 3184 * @ctlr: SPI bus master that should be locked for exclusive bus access 3185 * Context: can sleep 3186 * 3187 * This call may only be used from a context that may sleep. The sleep 3188 * is non-interruptible, and has no timeout. 3189 * 3190 * This call should be used by drivers that require exclusive access to the 3191 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 3192 * exclusive access is over. Data transfer must be done by spi_sync_locked 3193 * and spi_async_locked calls when the SPI bus lock is held. 3194 * 3195 * Return: always zero. 3196 */ 3197 int spi_bus_lock(struct spi_controller *ctlr) 3198 { 3199 unsigned long flags; 3200 3201 mutex_lock(&ctlr->bus_lock_mutex); 3202 3203 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3204 ctlr->bus_lock_flag = 1; 3205 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3206 3207 /* mutex remains locked until spi_bus_unlock is called */ 3208 3209 return 0; 3210 } 3211 EXPORT_SYMBOL_GPL(spi_bus_lock); 3212 3213 /** 3214 * spi_bus_unlock - release the lock for exclusive SPI bus usage 3215 * @ctlr: SPI bus master that was locked for exclusive bus access 3216 * Context: can sleep 3217 * 3218 * This call may only be used from a context that may sleep. The sleep 3219 * is non-interruptible, and has no timeout. 3220 * 3221 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 3222 * call. 3223 * 3224 * Return: always zero. 3225 */ 3226 int spi_bus_unlock(struct spi_controller *ctlr) 3227 { 3228 ctlr->bus_lock_flag = 0; 3229 3230 mutex_unlock(&ctlr->bus_lock_mutex); 3231 3232 return 0; 3233 } 3234 EXPORT_SYMBOL_GPL(spi_bus_unlock); 3235 3236 /* portable code must never pass more than 32 bytes */ 3237 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 3238 3239 static u8 *buf; 3240 3241 /** 3242 * spi_write_then_read - SPI synchronous write followed by read 3243 * @spi: device with which data will be exchanged 3244 * @txbuf: data to be written (need not be dma-safe) 3245 * @n_tx: size of txbuf, in bytes 3246 * @rxbuf: buffer into which data will be read (need not be dma-safe) 3247 * @n_rx: size of rxbuf, in bytes 3248 * Context: can sleep 3249 * 3250 * This performs a half duplex MicroWire style transaction with the 3251 * device, sending txbuf and then reading rxbuf. The return value 3252 * is zero for success, else a negative errno status code. 3253 * This call may only be used from a context that may sleep. 3254 * 3255 * Parameters to this routine are always copied using a small buffer; 3256 * portable code should never use this for more than 32 bytes. 3257 * Performance-sensitive or bulk transfer code should instead use 3258 * spi_{async,sync}() calls with dma-safe buffers. 3259 * 3260 * Return: zero on success, else a negative error code. 3261 */ 3262 int spi_write_then_read(struct spi_device *spi, 3263 const void *txbuf, unsigned n_tx, 3264 void *rxbuf, unsigned n_rx) 3265 { 3266 static DEFINE_MUTEX(lock); 3267 3268 int status; 3269 struct spi_message message; 3270 struct spi_transfer x[2]; 3271 u8 *local_buf; 3272 3273 /* Use preallocated DMA-safe buffer if we can. We can't avoid 3274 * copying here, (as a pure convenience thing), but we can 3275 * keep heap costs out of the hot path unless someone else is 3276 * using the pre-allocated buffer or the transfer is too large. 3277 */ 3278 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 3279 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 3280 GFP_KERNEL | GFP_DMA); 3281 if (!local_buf) 3282 return -ENOMEM; 3283 } else { 3284 local_buf = buf; 3285 } 3286 3287 spi_message_init(&message); 3288 memset(x, 0, sizeof(x)); 3289 if (n_tx) { 3290 x[0].len = n_tx; 3291 spi_message_add_tail(&x[0], &message); 3292 } 3293 if (n_rx) { 3294 x[1].len = n_rx; 3295 spi_message_add_tail(&x[1], &message); 3296 } 3297 3298 memcpy(local_buf, txbuf, n_tx); 3299 x[0].tx_buf = local_buf; 3300 x[1].rx_buf = local_buf + n_tx; 3301 3302 /* do the i/o */ 3303 status = spi_sync(spi, &message); 3304 if (status == 0) 3305 memcpy(rxbuf, x[1].rx_buf, n_rx); 3306 3307 if (x[0].tx_buf == buf) 3308 mutex_unlock(&lock); 3309 else 3310 kfree(local_buf); 3311 3312 return status; 3313 } 3314 EXPORT_SYMBOL_GPL(spi_write_then_read); 3315 3316 /*-------------------------------------------------------------------------*/ 3317 3318 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 3319 static int __spi_of_device_match(struct device *dev, void *data) 3320 { 3321 return dev->of_node == data; 3322 } 3323 3324 /* must call put_device() when done with returned spi_device device */ 3325 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 3326 { 3327 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 3328 __spi_of_device_match); 3329 return dev ? to_spi_device(dev) : NULL; 3330 } 3331 3332 static int __spi_of_controller_match(struct device *dev, const void *data) 3333 { 3334 return dev->of_node == data; 3335 } 3336 3337 /* the spi controllers are not using spi_bus, so we find it with another way */ 3338 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 3339 { 3340 struct device *dev; 3341 3342 dev = class_find_device(&spi_master_class, NULL, node, 3343 __spi_of_controller_match); 3344 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 3345 dev = class_find_device(&spi_slave_class, NULL, node, 3346 __spi_of_controller_match); 3347 if (!dev) 3348 return NULL; 3349 3350 /* reference got in class_find_device */ 3351 return container_of(dev, struct spi_controller, dev); 3352 } 3353 3354 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3355 void *arg) 3356 { 3357 struct of_reconfig_data *rd = arg; 3358 struct spi_controller *ctlr; 3359 struct spi_device *spi; 3360 3361 switch (of_reconfig_get_state_change(action, arg)) { 3362 case OF_RECONFIG_CHANGE_ADD: 3363 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 3364 if (ctlr == NULL) 3365 return NOTIFY_OK; /* not for us */ 3366 3367 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3368 put_device(&ctlr->dev); 3369 return NOTIFY_OK; 3370 } 3371 3372 spi = of_register_spi_device(ctlr, rd->dn); 3373 put_device(&ctlr->dev); 3374 3375 if (IS_ERR(spi)) { 3376 pr_err("%s: failed to create for '%pOF'\n", 3377 __func__, rd->dn); 3378 of_node_clear_flag(rd->dn, OF_POPULATED); 3379 return notifier_from_errno(PTR_ERR(spi)); 3380 } 3381 break; 3382 3383 case OF_RECONFIG_CHANGE_REMOVE: 3384 /* already depopulated? */ 3385 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 3386 return NOTIFY_OK; 3387 3388 /* find our device by node */ 3389 spi = of_find_spi_device_by_node(rd->dn); 3390 if (spi == NULL) 3391 return NOTIFY_OK; /* no? not meant for us */ 3392 3393 /* unregister takes one ref away */ 3394 spi_unregister_device(spi); 3395 3396 /* and put the reference of the find */ 3397 put_device(&spi->dev); 3398 break; 3399 } 3400 3401 return NOTIFY_OK; 3402 } 3403 3404 static struct notifier_block spi_of_notifier = { 3405 .notifier_call = of_spi_notify, 3406 }; 3407 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3408 extern struct notifier_block spi_of_notifier; 3409 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3410 3411 #if IS_ENABLED(CONFIG_ACPI) 3412 static int spi_acpi_controller_match(struct device *dev, const void *data) 3413 { 3414 return ACPI_COMPANION(dev->parent) == data; 3415 } 3416 3417 static int spi_acpi_device_match(struct device *dev, void *data) 3418 { 3419 return ACPI_COMPANION(dev) == data; 3420 } 3421 3422 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 3423 { 3424 struct device *dev; 3425 3426 dev = class_find_device(&spi_master_class, NULL, adev, 3427 spi_acpi_controller_match); 3428 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 3429 dev = class_find_device(&spi_slave_class, NULL, adev, 3430 spi_acpi_controller_match); 3431 if (!dev) 3432 return NULL; 3433 3434 return container_of(dev, struct spi_controller, dev); 3435 } 3436 3437 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 3438 { 3439 struct device *dev; 3440 3441 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); 3442 3443 return dev ? to_spi_device(dev) : NULL; 3444 } 3445 3446 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 3447 void *arg) 3448 { 3449 struct acpi_device *adev = arg; 3450 struct spi_controller *ctlr; 3451 struct spi_device *spi; 3452 3453 switch (value) { 3454 case ACPI_RECONFIG_DEVICE_ADD: 3455 ctlr = acpi_spi_find_controller_by_adev(adev->parent); 3456 if (!ctlr) 3457 break; 3458 3459 acpi_register_spi_device(ctlr, adev); 3460 put_device(&ctlr->dev); 3461 break; 3462 case ACPI_RECONFIG_DEVICE_REMOVE: 3463 if (!acpi_device_enumerated(adev)) 3464 break; 3465 3466 spi = acpi_spi_find_device_by_adev(adev); 3467 if (!spi) 3468 break; 3469 3470 spi_unregister_device(spi); 3471 put_device(&spi->dev); 3472 break; 3473 } 3474 3475 return NOTIFY_OK; 3476 } 3477 3478 static struct notifier_block spi_acpi_notifier = { 3479 .notifier_call = acpi_spi_notify, 3480 }; 3481 #else 3482 extern struct notifier_block spi_acpi_notifier; 3483 #endif 3484 3485 static int __init spi_init(void) 3486 { 3487 int status; 3488 3489 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 3490 if (!buf) { 3491 status = -ENOMEM; 3492 goto err0; 3493 } 3494 3495 status = bus_register(&spi_bus_type); 3496 if (status < 0) 3497 goto err1; 3498 3499 status = class_register(&spi_master_class); 3500 if (status < 0) 3501 goto err2; 3502 3503 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 3504 status = class_register(&spi_slave_class); 3505 if (status < 0) 3506 goto err3; 3507 } 3508 3509 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3510 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 3511 if (IS_ENABLED(CONFIG_ACPI)) 3512 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 3513 3514 return 0; 3515 3516 err3: 3517 class_unregister(&spi_master_class); 3518 err2: 3519 bus_unregister(&spi_bus_type); 3520 err1: 3521 kfree(buf); 3522 buf = NULL; 3523 err0: 3524 return status; 3525 } 3526 3527 /* board_info is normally registered in arch_initcall(), 3528 * but even essential drivers wait till later 3529 * 3530 * REVISIT only boardinfo really needs static linking. the rest (device and 3531 * driver registration) _could_ be dynamically linked (modular) ... costs 3532 * include needing to have boardinfo data structures be much more public. 3533 */ 3534 postcore_initcall(spi_init); 3535 3536