1 // SPDX-License-Identifier: GPL-2.0-or-later 2 // SPI init/core code 3 // 4 // Copyright (C) 2005 David Brownell 5 // Copyright (C) 2008 Secret Lab Technologies Ltd. 6 7 #include <linux/kernel.h> 8 #include <linux/device.h> 9 #include <linux/init.h> 10 #include <linux/cache.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmaengine.h> 13 #include <linux/mutex.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/clk/clk-conf.h> 17 #include <linux/slab.h> 18 #include <linux/mod_devicetable.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi-mem.h> 21 #include <linux/of_gpio.h> 22 #include <linux/gpio/consumer.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/pm_domain.h> 25 #include <linux/property.h> 26 #include <linux/export.h> 27 #include <linux/sched/rt.h> 28 #include <uapi/linux/sched/types.h> 29 #include <linux/delay.h> 30 #include <linux/kthread.h> 31 #include <linux/ioport.h> 32 #include <linux/acpi.h> 33 #include <linux/highmem.h> 34 #include <linux/idr.h> 35 #include <linux/platform_data/x86/apple.h> 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/spi.h> 39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); 40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); 41 42 #include "internals.h" 43 44 static DEFINE_IDR(spi_master_idr); 45 46 static void spidev_release(struct device *dev) 47 { 48 struct spi_device *spi = to_spi_device(dev); 49 50 /* spi controllers may cleanup for released devices */ 51 if (spi->controller->cleanup) 52 spi->controller->cleanup(spi); 53 54 spi_controller_put(spi->controller); 55 kfree(spi->driver_override); 56 kfree(spi); 57 } 58 59 static ssize_t 60 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 61 { 62 const struct spi_device *spi = to_spi_device(dev); 63 int len; 64 65 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 66 if (len != -ENODEV) 67 return len; 68 69 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 70 } 71 static DEVICE_ATTR_RO(modalias); 72 73 static ssize_t driver_override_store(struct device *dev, 74 struct device_attribute *a, 75 const char *buf, size_t count) 76 { 77 struct spi_device *spi = to_spi_device(dev); 78 const char *end = memchr(buf, '\n', count); 79 const size_t len = end ? end - buf : count; 80 const char *driver_override, *old; 81 82 /* We need to keep extra room for a newline when displaying value */ 83 if (len >= (PAGE_SIZE - 1)) 84 return -EINVAL; 85 86 driver_override = kstrndup(buf, len, GFP_KERNEL); 87 if (!driver_override) 88 return -ENOMEM; 89 90 device_lock(dev); 91 old = spi->driver_override; 92 if (len) { 93 spi->driver_override = driver_override; 94 } else { 95 /* Empty string, disable driver override */ 96 spi->driver_override = NULL; 97 kfree(driver_override); 98 } 99 device_unlock(dev); 100 kfree(old); 101 102 return count; 103 } 104 105 static ssize_t driver_override_show(struct device *dev, 106 struct device_attribute *a, char *buf) 107 { 108 const struct spi_device *spi = to_spi_device(dev); 109 ssize_t len; 110 111 device_lock(dev); 112 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : ""); 113 device_unlock(dev); 114 return len; 115 } 116 static DEVICE_ATTR_RW(driver_override); 117 118 #define SPI_STATISTICS_ATTRS(field, file) \ 119 static ssize_t spi_controller_##field##_show(struct device *dev, \ 120 struct device_attribute *attr, \ 121 char *buf) \ 122 { \ 123 struct spi_controller *ctlr = container_of(dev, \ 124 struct spi_controller, dev); \ 125 return spi_statistics_##field##_show(&ctlr->statistics, buf); \ 126 } \ 127 static struct device_attribute dev_attr_spi_controller_##field = { \ 128 .attr = { .name = file, .mode = 0444 }, \ 129 .show = spi_controller_##field##_show, \ 130 }; \ 131 static ssize_t spi_device_##field##_show(struct device *dev, \ 132 struct device_attribute *attr, \ 133 char *buf) \ 134 { \ 135 struct spi_device *spi = to_spi_device(dev); \ 136 return spi_statistics_##field##_show(&spi->statistics, buf); \ 137 } \ 138 static struct device_attribute dev_attr_spi_device_##field = { \ 139 .attr = { .name = file, .mode = 0444 }, \ 140 .show = spi_device_##field##_show, \ 141 } 142 143 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 144 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 145 char *buf) \ 146 { \ 147 unsigned long flags; \ 148 ssize_t len; \ 149 spin_lock_irqsave(&stat->lock, flags); \ 150 len = sprintf(buf, format_string, stat->field); \ 151 spin_unlock_irqrestore(&stat->lock, flags); \ 152 return len; \ 153 } \ 154 SPI_STATISTICS_ATTRS(name, file) 155 156 #define SPI_STATISTICS_SHOW(field, format_string) \ 157 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 158 field, format_string) 159 160 SPI_STATISTICS_SHOW(messages, "%lu"); 161 SPI_STATISTICS_SHOW(transfers, "%lu"); 162 SPI_STATISTICS_SHOW(errors, "%lu"); 163 SPI_STATISTICS_SHOW(timedout, "%lu"); 164 165 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 166 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 167 SPI_STATISTICS_SHOW(spi_async, "%lu"); 168 169 SPI_STATISTICS_SHOW(bytes, "%llu"); 170 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 171 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 172 173 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 174 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 175 "transfer_bytes_histo_" number, \ 176 transfer_bytes_histo[index], "%lu") 177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 190 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 191 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 192 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 193 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 194 195 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 196 197 static struct attribute *spi_dev_attrs[] = { 198 &dev_attr_modalias.attr, 199 &dev_attr_driver_override.attr, 200 NULL, 201 }; 202 203 static const struct attribute_group spi_dev_group = { 204 .attrs = spi_dev_attrs, 205 }; 206 207 static struct attribute *spi_device_statistics_attrs[] = { 208 &dev_attr_spi_device_messages.attr, 209 &dev_attr_spi_device_transfers.attr, 210 &dev_attr_spi_device_errors.attr, 211 &dev_attr_spi_device_timedout.attr, 212 &dev_attr_spi_device_spi_sync.attr, 213 &dev_attr_spi_device_spi_sync_immediate.attr, 214 &dev_attr_spi_device_spi_async.attr, 215 &dev_attr_spi_device_bytes.attr, 216 &dev_attr_spi_device_bytes_rx.attr, 217 &dev_attr_spi_device_bytes_tx.attr, 218 &dev_attr_spi_device_transfer_bytes_histo0.attr, 219 &dev_attr_spi_device_transfer_bytes_histo1.attr, 220 &dev_attr_spi_device_transfer_bytes_histo2.attr, 221 &dev_attr_spi_device_transfer_bytes_histo3.attr, 222 &dev_attr_spi_device_transfer_bytes_histo4.attr, 223 &dev_attr_spi_device_transfer_bytes_histo5.attr, 224 &dev_attr_spi_device_transfer_bytes_histo6.attr, 225 &dev_attr_spi_device_transfer_bytes_histo7.attr, 226 &dev_attr_spi_device_transfer_bytes_histo8.attr, 227 &dev_attr_spi_device_transfer_bytes_histo9.attr, 228 &dev_attr_spi_device_transfer_bytes_histo10.attr, 229 &dev_attr_spi_device_transfer_bytes_histo11.attr, 230 &dev_attr_spi_device_transfer_bytes_histo12.attr, 231 &dev_attr_spi_device_transfer_bytes_histo13.attr, 232 &dev_attr_spi_device_transfer_bytes_histo14.attr, 233 &dev_attr_spi_device_transfer_bytes_histo15.attr, 234 &dev_attr_spi_device_transfer_bytes_histo16.attr, 235 &dev_attr_spi_device_transfers_split_maxsize.attr, 236 NULL, 237 }; 238 239 static const struct attribute_group spi_device_statistics_group = { 240 .name = "statistics", 241 .attrs = spi_device_statistics_attrs, 242 }; 243 244 static const struct attribute_group *spi_dev_groups[] = { 245 &spi_dev_group, 246 &spi_device_statistics_group, 247 NULL, 248 }; 249 250 static struct attribute *spi_controller_statistics_attrs[] = { 251 &dev_attr_spi_controller_messages.attr, 252 &dev_attr_spi_controller_transfers.attr, 253 &dev_attr_spi_controller_errors.attr, 254 &dev_attr_spi_controller_timedout.attr, 255 &dev_attr_spi_controller_spi_sync.attr, 256 &dev_attr_spi_controller_spi_sync_immediate.attr, 257 &dev_attr_spi_controller_spi_async.attr, 258 &dev_attr_spi_controller_bytes.attr, 259 &dev_attr_spi_controller_bytes_rx.attr, 260 &dev_attr_spi_controller_bytes_tx.attr, 261 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 262 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 263 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 264 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 265 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 266 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 267 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 268 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 269 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 270 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 271 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 272 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 273 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 274 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 275 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 276 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 277 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 278 &dev_attr_spi_controller_transfers_split_maxsize.attr, 279 NULL, 280 }; 281 282 static const struct attribute_group spi_controller_statistics_group = { 283 .name = "statistics", 284 .attrs = spi_controller_statistics_attrs, 285 }; 286 287 static const struct attribute_group *spi_master_groups[] = { 288 &spi_controller_statistics_group, 289 NULL, 290 }; 291 292 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 293 struct spi_transfer *xfer, 294 struct spi_controller *ctlr) 295 { 296 unsigned long flags; 297 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 298 299 if (l2len < 0) 300 l2len = 0; 301 302 spin_lock_irqsave(&stats->lock, flags); 303 304 stats->transfers++; 305 stats->transfer_bytes_histo[l2len]++; 306 307 stats->bytes += xfer->len; 308 if ((xfer->tx_buf) && 309 (xfer->tx_buf != ctlr->dummy_tx)) 310 stats->bytes_tx += xfer->len; 311 if ((xfer->rx_buf) && 312 (xfer->rx_buf != ctlr->dummy_rx)) 313 stats->bytes_rx += xfer->len; 314 315 spin_unlock_irqrestore(&stats->lock, flags); 316 } 317 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 318 319 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 320 * and the sysfs version makes coldplug work too. 321 */ 322 323 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 324 const struct spi_device *sdev) 325 { 326 while (id->name[0]) { 327 if (!strcmp(sdev->modalias, id->name)) 328 return id; 329 id++; 330 } 331 return NULL; 332 } 333 334 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 335 { 336 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 337 338 return spi_match_id(sdrv->id_table, sdev); 339 } 340 EXPORT_SYMBOL_GPL(spi_get_device_id); 341 342 static int spi_match_device(struct device *dev, struct device_driver *drv) 343 { 344 const struct spi_device *spi = to_spi_device(dev); 345 const struct spi_driver *sdrv = to_spi_driver(drv); 346 347 /* Check override first, and if set, only use the named driver */ 348 if (spi->driver_override) 349 return strcmp(spi->driver_override, drv->name) == 0; 350 351 /* Attempt an OF style match */ 352 if (of_driver_match_device(dev, drv)) 353 return 1; 354 355 /* Then try ACPI */ 356 if (acpi_driver_match_device(dev, drv)) 357 return 1; 358 359 if (sdrv->id_table) 360 return !!spi_match_id(sdrv->id_table, spi); 361 362 return strcmp(spi->modalias, drv->name) == 0; 363 } 364 365 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 366 { 367 const struct spi_device *spi = to_spi_device(dev); 368 int rc; 369 370 rc = acpi_device_uevent_modalias(dev, env); 371 if (rc != -ENODEV) 372 return rc; 373 374 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 375 } 376 377 struct bus_type spi_bus_type = { 378 .name = "spi", 379 .dev_groups = spi_dev_groups, 380 .match = spi_match_device, 381 .uevent = spi_uevent, 382 }; 383 EXPORT_SYMBOL_GPL(spi_bus_type); 384 385 386 static int spi_drv_probe(struct device *dev) 387 { 388 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 389 struct spi_device *spi = to_spi_device(dev); 390 int ret; 391 392 ret = of_clk_set_defaults(dev->of_node, false); 393 if (ret) 394 return ret; 395 396 if (dev->of_node) { 397 spi->irq = of_irq_get(dev->of_node, 0); 398 if (spi->irq == -EPROBE_DEFER) 399 return -EPROBE_DEFER; 400 if (spi->irq < 0) 401 spi->irq = 0; 402 } 403 404 ret = dev_pm_domain_attach(dev, true); 405 if (ret) 406 return ret; 407 408 ret = sdrv->probe(spi); 409 if (ret) 410 dev_pm_domain_detach(dev, true); 411 412 return ret; 413 } 414 415 static int spi_drv_remove(struct device *dev) 416 { 417 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 418 int ret; 419 420 ret = sdrv->remove(to_spi_device(dev)); 421 dev_pm_domain_detach(dev, true); 422 423 return ret; 424 } 425 426 static void spi_drv_shutdown(struct device *dev) 427 { 428 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 429 430 sdrv->shutdown(to_spi_device(dev)); 431 } 432 433 /** 434 * __spi_register_driver - register a SPI driver 435 * @owner: owner module of the driver to register 436 * @sdrv: the driver to register 437 * Context: can sleep 438 * 439 * Return: zero on success, else a negative error code. 440 */ 441 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 442 { 443 sdrv->driver.owner = owner; 444 sdrv->driver.bus = &spi_bus_type; 445 if (sdrv->probe) 446 sdrv->driver.probe = spi_drv_probe; 447 if (sdrv->remove) 448 sdrv->driver.remove = spi_drv_remove; 449 if (sdrv->shutdown) 450 sdrv->driver.shutdown = spi_drv_shutdown; 451 return driver_register(&sdrv->driver); 452 } 453 EXPORT_SYMBOL_GPL(__spi_register_driver); 454 455 /*-------------------------------------------------------------------------*/ 456 457 /* SPI devices should normally not be created by SPI device drivers; that 458 * would make them board-specific. Similarly with SPI controller drivers. 459 * Device registration normally goes into like arch/.../mach.../board-YYY.c 460 * with other readonly (flashable) information about mainboard devices. 461 */ 462 463 struct boardinfo { 464 struct list_head list; 465 struct spi_board_info board_info; 466 }; 467 468 static LIST_HEAD(board_list); 469 static LIST_HEAD(spi_controller_list); 470 471 /* 472 * Used to protect add/del operation for board_info list and 473 * spi_controller list, and their matching process 474 * also used to protect object of type struct idr 475 */ 476 static DEFINE_MUTEX(board_lock); 477 478 /** 479 * spi_alloc_device - Allocate a new SPI device 480 * @ctlr: Controller to which device is connected 481 * Context: can sleep 482 * 483 * Allows a driver to allocate and initialize a spi_device without 484 * registering it immediately. This allows a driver to directly 485 * fill the spi_device with device parameters before calling 486 * spi_add_device() on it. 487 * 488 * Caller is responsible to call spi_add_device() on the returned 489 * spi_device structure to add it to the SPI controller. If the caller 490 * needs to discard the spi_device without adding it, then it should 491 * call spi_dev_put() on it. 492 * 493 * Return: a pointer to the new device, or NULL. 494 */ 495 struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 496 { 497 struct spi_device *spi; 498 499 if (!spi_controller_get(ctlr)) 500 return NULL; 501 502 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 503 if (!spi) { 504 spi_controller_put(ctlr); 505 return NULL; 506 } 507 508 spi->master = spi->controller = ctlr; 509 spi->dev.parent = &ctlr->dev; 510 spi->dev.bus = &spi_bus_type; 511 spi->dev.release = spidev_release; 512 spi->cs_gpio = -ENOENT; 513 spi->mode = ctlr->buswidth_override_bits; 514 515 spin_lock_init(&spi->statistics.lock); 516 517 device_initialize(&spi->dev); 518 return spi; 519 } 520 EXPORT_SYMBOL_GPL(spi_alloc_device); 521 522 static void spi_dev_set_name(struct spi_device *spi) 523 { 524 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 525 526 if (adev) { 527 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 528 return; 529 } 530 531 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 532 spi->chip_select); 533 } 534 535 static int spi_dev_check(struct device *dev, void *data) 536 { 537 struct spi_device *spi = to_spi_device(dev); 538 struct spi_device *new_spi = data; 539 540 if (spi->controller == new_spi->controller && 541 spi->chip_select == new_spi->chip_select) 542 return -EBUSY; 543 return 0; 544 } 545 546 /** 547 * spi_add_device - Add spi_device allocated with spi_alloc_device 548 * @spi: spi_device to register 549 * 550 * Companion function to spi_alloc_device. Devices allocated with 551 * spi_alloc_device can be added onto the spi bus with this function. 552 * 553 * Return: 0 on success; negative errno on failure 554 */ 555 int spi_add_device(struct spi_device *spi) 556 { 557 static DEFINE_MUTEX(spi_add_lock); 558 struct spi_controller *ctlr = spi->controller; 559 struct device *dev = ctlr->dev.parent; 560 int status; 561 562 /* Chipselects are numbered 0..max; validate. */ 563 if (spi->chip_select >= ctlr->num_chipselect) { 564 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 565 ctlr->num_chipselect); 566 return -EINVAL; 567 } 568 569 /* Set the bus ID string */ 570 spi_dev_set_name(spi); 571 572 /* We need to make sure there's no other device with this 573 * chipselect **BEFORE** we call setup(), else we'll trash 574 * its configuration. Lock against concurrent add() calls. 575 */ 576 mutex_lock(&spi_add_lock); 577 578 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 579 if (status) { 580 dev_err(dev, "chipselect %d already in use\n", 581 spi->chip_select); 582 goto done; 583 } 584 585 /* Descriptors take precedence */ 586 if (ctlr->cs_gpiods) 587 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; 588 else if (ctlr->cs_gpios) 589 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; 590 591 /* Drivers may modify this initial i/o setup, but will 592 * normally rely on the device being setup. Devices 593 * using SPI_CS_HIGH can't coexist well otherwise... 594 */ 595 status = spi_setup(spi); 596 if (status < 0) { 597 dev_err(dev, "can't setup %s, status %d\n", 598 dev_name(&spi->dev), status); 599 goto done; 600 } 601 602 /* Device may be bound to an active driver when this returns */ 603 status = device_add(&spi->dev); 604 if (status < 0) 605 dev_err(dev, "can't add %s, status %d\n", 606 dev_name(&spi->dev), status); 607 else 608 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 609 610 done: 611 mutex_unlock(&spi_add_lock); 612 return status; 613 } 614 EXPORT_SYMBOL_GPL(spi_add_device); 615 616 /** 617 * spi_new_device - instantiate one new SPI device 618 * @ctlr: Controller to which device is connected 619 * @chip: Describes the SPI device 620 * Context: can sleep 621 * 622 * On typical mainboards, this is purely internal; and it's not needed 623 * after board init creates the hard-wired devices. Some development 624 * platforms may not be able to use spi_register_board_info though, and 625 * this is exported so that for example a USB or parport based adapter 626 * driver could add devices (which it would learn about out-of-band). 627 * 628 * Return: the new device, or NULL. 629 */ 630 struct spi_device *spi_new_device(struct spi_controller *ctlr, 631 struct spi_board_info *chip) 632 { 633 struct spi_device *proxy; 634 int status; 635 636 /* NOTE: caller did any chip->bus_num checks necessary. 637 * 638 * Also, unless we change the return value convention to use 639 * error-or-pointer (not NULL-or-pointer), troubleshootability 640 * suggests syslogged diagnostics are best here (ugh). 641 */ 642 643 proxy = spi_alloc_device(ctlr); 644 if (!proxy) 645 return NULL; 646 647 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 648 649 proxy->chip_select = chip->chip_select; 650 proxy->max_speed_hz = chip->max_speed_hz; 651 proxy->mode = chip->mode; 652 proxy->irq = chip->irq; 653 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 654 proxy->dev.platform_data = (void *) chip->platform_data; 655 proxy->controller_data = chip->controller_data; 656 proxy->controller_state = NULL; 657 658 if (chip->properties) { 659 status = device_add_properties(&proxy->dev, chip->properties); 660 if (status) { 661 dev_err(&ctlr->dev, 662 "failed to add properties to '%s': %d\n", 663 chip->modalias, status); 664 goto err_dev_put; 665 } 666 } 667 668 status = spi_add_device(proxy); 669 if (status < 0) 670 goto err_remove_props; 671 672 return proxy; 673 674 err_remove_props: 675 if (chip->properties) 676 device_remove_properties(&proxy->dev); 677 err_dev_put: 678 spi_dev_put(proxy); 679 return NULL; 680 } 681 EXPORT_SYMBOL_GPL(spi_new_device); 682 683 /** 684 * spi_unregister_device - unregister a single SPI device 685 * @spi: spi_device to unregister 686 * 687 * Start making the passed SPI device vanish. Normally this would be handled 688 * by spi_unregister_controller(). 689 */ 690 void spi_unregister_device(struct spi_device *spi) 691 { 692 if (!spi) 693 return; 694 695 if (spi->dev.of_node) { 696 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 697 of_node_put(spi->dev.of_node); 698 } 699 if (ACPI_COMPANION(&spi->dev)) 700 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 701 device_unregister(&spi->dev); 702 } 703 EXPORT_SYMBOL_GPL(spi_unregister_device); 704 705 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 706 struct spi_board_info *bi) 707 { 708 struct spi_device *dev; 709 710 if (ctlr->bus_num != bi->bus_num) 711 return; 712 713 dev = spi_new_device(ctlr, bi); 714 if (!dev) 715 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 716 bi->modalias); 717 } 718 719 /** 720 * spi_register_board_info - register SPI devices for a given board 721 * @info: array of chip descriptors 722 * @n: how many descriptors are provided 723 * Context: can sleep 724 * 725 * Board-specific early init code calls this (probably during arch_initcall) 726 * with segments of the SPI device table. Any device nodes are created later, 727 * after the relevant parent SPI controller (bus_num) is defined. We keep 728 * this table of devices forever, so that reloading a controller driver will 729 * not make Linux forget about these hard-wired devices. 730 * 731 * Other code can also call this, e.g. a particular add-on board might provide 732 * SPI devices through its expansion connector, so code initializing that board 733 * would naturally declare its SPI devices. 734 * 735 * The board info passed can safely be __initdata ... but be careful of 736 * any embedded pointers (platform_data, etc), they're copied as-is. 737 * Device properties are deep-copied though. 738 * 739 * Return: zero on success, else a negative error code. 740 */ 741 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 742 { 743 struct boardinfo *bi; 744 int i; 745 746 if (!n) 747 return 0; 748 749 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 750 if (!bi) 751 return -ENOMEM; 752 753 for (i = 0; i < n; i++, bi++, info++) { 754 struct spi_controller *ctlr; 755 756 memcpy(&bi->board_info, info, sizeof(*info)); 757 if (info->properties) { 758 bi->board_info.properties = 759 property_entries_dup(info->properties); 760 if (IS_ERR(bi->board_info.properties)) 761 return PTR_ERR(bi->board_info.properties); 762 } 763 764 mutex_lock(&board_lock); 765 list_add_tail(&bi->list, &board_list); 766 list_for_each_entry(ctlr, &spi_controller_list, list) 767 spi_match_controller_to_boardinfo(ctlr, 768 &bi->board_info); 769 mutex_unlock(&board_lock); 770 } 771 772 return 0; 773 } 774 775 /*-------------------------------------------------------------------------*/ 776 777 static void spi_set_cs(struct spi_device *spi, bool enable) 778 { 779 bool enable1 = enable; 780 781 if (!spi->controller->set_cs_timing) { 782 if (enable1) 783 spi_delay_exec(&spi->controller->cs_setup, NULL); 784 else 785 spi_delay_exec(&spi->controller->cs_hold, NULL); 786 } 787 788 if (spi->mode & SPI_CS_HIGH) 789 enable = !enable; 790 791 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { 792 /* 793 * Honour the SPI_NO_CS flag and invert the enable line, as 794 * active low is default for SPI. Execution paths that handle 795 * polarity inversion in gpiolib (such as device tree) will 796 * enforce active high using the SPI_CS_HIGH resulting in a 797 * double inversion through the code above. 798 */ 799 if (!(spi->mode & SPI_NO_CS)) { 800 if (spi->cs_gpiod) 801 gpiod_set_value_cansleep(spi->cs_gpiod, 802 !enable); 803 else 804 gpio_set_value_cansleep(spi->cs_gpio, !enable); 805 } 806 /* Some SPI masters need both GPIO CS & slave_select */ 807 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 808 spi->controller->set_cs) 809 spi->controller->set_cs(spi, !enable); 810 } else if (spi->controller->set_cs) { 811 spi->controller->set_cs(spi, !enable); 812 } 813 814 if (!spi->controller->set_cs_timing) { 815 if (!enable1) 816 spi_delay_exec(&spi->controller->cs_inactive, NULL); 817 } 818 } 819 820 #ifdef CONFIG_HAS_DMA 821 int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 822 struct sg_table *sgt, void *buf, size_t len, 823 enum dma_data_direction dir) 824 { 825 const bool vmalloced_buf = is_vmalloc_addr(buf); 826 unsigned int max_seg_size = dma_get_max_seg_size(dev); 827 #ifdef CONFIG_HIGHMEM 828 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 829 (unsigned long)buf < (PKMAP_BASE + 830 (LAST_PKMAP * PAGE_SIZE))); 831 #else 832 const bool kmap_buf = false; 833 #endif 834 int desc_len; 835 int sgs; 836 struct page *vm_page; 837 struct scatterlist *sg; 838 void *sg_buf; 839 size_t min; 840 int i, ret; 841 842 if (vmalloced_buf || kmap_buf) { 843 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 844 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 845 } else if (virt_addr_valid(buf)) { 846 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); 847 sgs = DIV_ROUND_UP(len, desc_len); 848 } else { 849 return -EINVAL; 850 } 851 852 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 853 if (ret != 0) 854 return ret; 855 856 sg = &sgt->sgl[0]; 857 for (i = 0; i < sgs; i++) { 858 859 if (vmalloced_buf || kmap_buf) { 860 /* 861 * Next scatterlist entry size is the minimum between 862 * the desc_len and the remaining buffer length that 863 * fits in a page. 864 */ 865 min = min_t(size_t, desc_len, 866 min_t(size_t, len, 867 PAGE_SIZE - offset_in_page(buf))); 868 if (vmalloced_buf) 869 vm_page = vmalloc_to_page(buf); 870 else 871 vm_page = kmap_to_page(buf); 872 if (!vm_page) { 873 sg_free_table(sgt); 874 return -ENOMEM; 875 } 876 sg_set_page(sg, vm_page, 877 min, offset_in_page(buf)); 878 } else { 879 min = min_t(size_t, len, desc_len); 880 sg_buf = buf; 881 sg_set_buf(sg, sg_buf, min); 882 } 883 884 buf += min; 885 len -= min; 886 sg = sg_next(sg); 887 } 888 889 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 890 if (!ret) 891 ret = -ENOMEM; 892 if (ret < 0) { 893 sg_free_table(sgt); 894 return ret; 895 } 896 897 sgt->nents = ret; 898 899 return 0; 900 } 901 902 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 903 struct sg_table *sgt, enum dma_data_direction dir) 904 { 905 if (sgt->orig_nents) { 906 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 907 sg_free_table(sgt); 908 } 909 } 910 911 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 912 { 913 struct device *tx_dev, *rx_dev; 914 struct spi_transfer *xfer; 915 int ret; 916 917 if (!ctlr->can_dma) 918 return 0; 919 920 if (ctlr->dma_tx) 921 tx_dev = ctlr->dma_tx->device->dev; 922 else 923 tx_dev = ctlr->dev.parent; 924 925 if (ctlr->dma_rx) 926 rx_dev = ctlr->dma_rx->device->dev; 927 else 928 rx_dev = ctlr->dev.parent; 929 930 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 931 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 932 continue; 933 934 if (xfer->tx_buf != NULL) { 935 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, 936 (void *)xfer->tx_buf, xfer->len, 937 DMA_TO_DEVICE); 938 if (ret != 0) 939 return ret; 940 } 941 942 if (xfer->rx_buf != NULL) { 943 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, 944 xfer->rx_buf, xfer->len, 945 DMA_FROM_DEVICE); 946 if (ret != 0) { 947 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, 948 DMA_TO_DEVICE); 949 return ret; 950 } 951 } 952 } 953 954 ctlr->cur_msg_mapped = true; 955 956 return 0; 957 } 958 959 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 960 { 961 struct spi_transfer *xfer; 962 struct device *tx_dev, *rx_dev; 963 964 if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 965 return 0; 966 967 if (ctlr->dma_tx) 968 tx_dev = ctlr->dma_tx->device->dev; 969 else 970 tx_dev = ctlr->dev.parent; 971 972 if (ctlr->dma_rx) 973 rx_dev = ctlr->dma_rx->device->dev; 974 else 975 rx_dev = ctlr->dev.parent; 976 977 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 978 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 979 continue; 980 981 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 982 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 983 } 984 985 return 0; 986 } 987 #else /* !CONFIG_HAS_DMA */ 988 static inline int __spi_map_msg(struct spi_controller *ctlr, 989 struct spi_message *msg) 990 { 991 return 0; 992 } 993 994 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 995 struct spi_message *msg) 996 { 997 return 0; 998 } 999 #endif /* !CONFIG_HAS_DMA */ 1000 1001 static inline int spi_unmap_msg(struct spi_controller *ctlr, 1002 struct spi_message *msg) 1003 { 1004 struct spi_transfer *xfer; 1005 1006 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1007 /* 1008 * Restore the original value of tx_buf or rx_buf if they are 1009 * NULL. 1010 */ 1011 if (xfer->tx_buf == ctlr->dummy_tx) 1012 xfer->tx_buf = NULL; 1013 if (xfer->rx_buf == ctlr->dummy_rx) 1014 xfer->rx_buf = NULL; 1015 } 1016 1017 return __spi_unmap_msg(ctlr, msg); 1018 } 1019 1020 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1021 { 1022 struct spi_transfer *xfer; 1023 void *tmp; 1024 unsigned int max_tx, max_rx; 1025 1026 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) 1027 && !(msg->spi->mode & SPI_3WIRE)) { 1028 max_tx = 0; 1029 max_rx = 0; 1030 1031 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1032 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 1033 !xfer->tx_buf) 1034 max_tx = max(xfer->len, max_tx); 1035 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 1036 !xfer->rx_buf) 1037 max_rx = max(xfer->len, max_rx); 1038 } 1039 1040 if (max_tx) { 1041 tmp = krealloc(ctlr->dummy_tx, max_tx, 1042 GFP_KERNEL | GFP_DMA); 1043 if (!tmp) 1044 return -ENOMEM; 1045 ctlr->dummy_tx = tmp; 1046 memset(tmp, 0, max_tx); 1047 } 1048 1049 if (max_rx) { 1050 tmp = krealloc(ctlr->dummy_rx, max_rx, 1051 GFP_KERNEL | GFP_DMA); 1052 if (!tmp) 1053 return -ENOMEM; 1054 ctlr->dummy_rx = tmp; 1055 } 1056 1057 if (max_tx || max_rx) { 1058 list_for_each_entry(xfer, &msg->transfers, 1059 transfer_list) { 1060 if (!xfer->len) 1061 continue; 1062 if (!xfer->tx_buf) 1063 xfer->tx_buf = ctlr->dummy_tx; 1064 if (!xfer->rx_buf) 1065 xfer->rx_buf = ctlr->dummy_rx; 1066 } 1067 } 1068 } 1069 1070 return __spi_map_msg(ctlr, msg); 1071 } 1072 1073 static int spi_transfer_wait(struct spi_controller *ctlr, 1074 struct spi_message *msg, 1075 struct spi_transfer *xfer) 1076 { 1077 struct spi_statistics *statm = &ctlr->statistics; 1078 struct spi_statistics *stats = &msg->spi->statistics; 1079 unsigned long long ms; 1080 1081 if (spi_controller_is_slave(ctlr)) { 1082 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1083 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1084 return -EINTR; 1085 } 1086 } else { 1087 ms = 8LL * 1000LL * xfer->len; 1088 do_div(ms, xfer->speed_hz); 1089 ms += ms + 200; /* some tolerance */ 1090 1091 if (ms > UINT_MAX) 1092 ms = UINT_MAX; 1093 1094 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1095 msecs_to_jiffies(ms)); 1096 1097 if (ms == 0) { 1098 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1099 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1100 dev_err(&msg->spi->dev, 1101 "SPI transfer timed out\n"); 1102 return -ETIMEDOUT; 1103 } 1104 } 1105 1106 return 0; 1107 } 1108 1109 static void _spi_transfer_delay_ns(u32 ns) 1110 { 1111 if (!ns) 1112 return; 1113 if (ns <= 1000) { 1114 ndelay(ns); 1115 } else { 1116 u32 us = DIV_ROUND_UP(ns, 1000); 1117 1118 if (us <= 10) 1119 udelay(us); 1120 else 1121 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1122 } 1123 } 1124 1125 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) 1126 { 1127 u32 delay = _delay->value; 1128 u32 unit = _delay->unit; 1129 u32 hz; 1130 1131 if (!delay) 1132 return 0; 1133 1134 switch (unit) { 1135 case SPI_DELAY_UNIT_USECS: 1136 delay *= 1000; 1137 break; 1138 case SPI_DELAY_UNIT_NSECS: /* nothing to do here */ 1139 break; 1140 case SPI_DELAY_UNIT_SCK: 1141 /* clock cycles need to be obtained from spi_transfer */ 1142 if (!xfer) 1143 return -EINVAL; 1144 /* if there is no effective speed know, then approximate 1145 * by underestimating with half the requested hz 1146 */ 1147 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; 1148 if (!hz) 1149 return -EINVAL; 1150 delay *= DIV_ROUND_UP(1000000000, hz); 1151 break; 1152 default: 1153 return -EINVAL; 1154 } 1155 1156 return delay; 1157 } 1158 EXPORT_SYMBOL_GPL(spi_delay_to_ns); 1159 1160 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) 1161 { 1162 int delay; 1163 1164 might_sleep(); 1165 1166 if (!_delay) 1167 return -EINVAL; 1168 1169 delay = spi_delay_to_ns(_delay, xfer); 1170 if (delay < 0) 1171 return delay; 1172 1173 _spi_transfer_delay_ns(delay); 1174 1175 return 0; 1176 } 1177 EXPORT_SYMBOL_GPL(spi_delay_exec); 1178 1179 static void _spi_transfer_cs_change_delay(struct spi_message *msg, 1180 struct spi_transfer *xfer) 1181 { 1182 u32 delay = xfer->cs_change_delay.value; 1183 u32 unit = xfer->cs_change_delay.unit; 1184 int ret; 1185 1186 /* return early on "fast" mode - for everything but USECS */ 1187 if (!delay) { 1188 if (unit == SPI_DELAY_UNIT_USECS) 1189 _spi_transfer_delay_ns(10000); 1190 return; 1191 } 1192 1193 ret = spi_delay_exec(&xfer->cs_change_delay, xfer); 1194 if (ret) { 1195 dev_err_once(&msg->spi->dev, 1196 "Use of unsupported delay unit %i, using default of 10us\n", 1197 unit); 1198 _spi_transfer_delay_ns(10000); 1199 } 1200 } 1201 1202 /* 1203 * spi_transfer_one_message - Default implementation of transfer_one_message() 1204 * 1205 * This is a standard implementation of transfer_one_message() for 1206 * drivers which implement a transfer_one() operation. It provides 1207 * standard handling of delays and chip select management. 1208 */ 1209 static int spi_transfer_one_message(struct spi_controller *ctlr, 1210 struct spi_message *msg) 1211 { 1212 struct spi_transfer *xfer; 1213 bool keep_cs = false; 1214 int ret = 0; 1215 struct spi_statistics *statm = &ctlr->statistics; 1216 struct spi_statistics *stats = &msg->spi->statistics; 1217 1218 spi_set_cs(msg->spi, true); 1219 1220 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1221 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1222 1223 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1224 trace_spi_transfer_start(msg, xfer); 1225 1226 spi_statistics_add_transfer_stats(statm, xfer, ctlr); 1227 spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1228 1229 if (!ctlr->ptp_sts_supported) { 1230 xfer->ptp_sts_word_pre = 0; 1231 ptp_read_system_prets(xfer->ptp_sts); 1232 } 1233 1234 if (xfer->tx_buf || xfer->rx_buf) { 1235 reinit_completion(&ctlr->xfer_completion); 1236 1237 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1238 if (ret < 0) { 1239 SPI_STATISTICS_INCREMENT_FIELD(statm, 1240 errors); 1241 SPI_STATISTICS_INCREMENT_FIELD(stats, 1242 errors); 1243 dev_err(&msg->spi->dev, 1244 "SPI transfer failed: %d\n", ret); 1245 goto out; 1246 } 1247 1248 if (ret > 0) { 1249 ret = spi_transfer_wait(ctlr, msg, xfer); 1250 if (ret < 0) 1251 msg->status = ret; 1252 } 1253 } else { 1254 if (xfer->len) 1255 dev_err(&msg->spi->dev, 1256 "Bufferless transfer has length %u\n", 1257 xfer->len); 1258 } 1259 1260 if (!ctlr->ptp_sts_supported) { 1261 ptp_read_system_postts(xfer->ptp_sts); 1262 xfer->ptp_sts_word_post = xfer->len; 1263 } 1264 1265 trace_spi_transfer_stop(msg, xfer); 1266 1267 if (msg->status != -EINPROGRESS) 1268 goto out; 1269 1270 spi_transfer_delay_exec(xfer); 1271 1272 if (xfer->cs_change) { 1273 if (list_is_last(&xfer->transfer_list, 1274 &msg->transfers)) { 1275 keep_cs = true; 1276 } else { 1277 spi_set_cs(msg->spi, false); 1278 _spi_transfer_cs_change_delay(msg, xfer); 1279 spi_set_cs(msg->spi, true); 1280 } 1281 } 1282 1283 msg->actual_length += xfer->len; 1284 } 1285 1286 out: 1287 if (ret != 0 || !keep_cs) 1288 spi_set_cs(msg->spi, false); 1289 1290 if (msg->status == -EINPROGRESS) 1291 msg->status = ret; 1292 1293 if (msg->status && ctlr->handle_err) 1294 ctlr->handle_err(ctlr, msg); 1295 1296 spi_res_release(ctlr, msg); 1297 1298 spi_finalize_current_message(ctlr); 1299 1300 return ret; 1301 } 1302 1303 /** 1304 * spi_finalize_current_transfer - report completion of a transfer 1305 * @ctlr: the controller reporting completion 1306 * 1307 * Called by SPI drivers using the core transfer_one_message() 1308 * implementation to notify it that the current interrupt driven 1309 * transfer has finished and the next one may be scheduled. 1310 */ 1311 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1312 { 1313 complete(&ctlr->xfer_completion); 1314 } 1315 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1316 1317 /** 1318 * __spi_pump_messages - function which processes spi message queue 1319 * @ctlr: controller to process queue for 1320 * @in_kthread: true if we are in the context of the message pump thread 1321 * 1322 * This function checks if there is any spi message in the queue that 1323 * needs processing and if so call out to the driver to initialize hardware 1324 * and transfer each message. 1325 * 1326 * Note that it is called both from the kthread itself and also from 1327 * inside spi_sync(); the queue extraction handling at the top of the 1328 * function should deal with this safely. 1329 */ 1330 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1331 { 1332 struct spi_transfer *xfer; 1333 struct spi_message *msg; 1334 bool was_busy = false; 1335 unsigned long flags; 1336 int ret; 1337 1338 /* Lock queue */ 1339 spin_lock_irqsave(&ctlr->queue_lock, flags); 1340 1341 /* Make sure we are not already running a message */ 1342 if (ctlr->cur_msg) { 1343 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1344 return; 1345 } 1346 1347 /* If another context is idling the device then defer */ 1348 if (ctlr->idling) { 1349 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1350 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1351 return; 1352 } 1353 1354 /* Check if the queue is idle */ 1355 if (list_empty(&ctlr->queue) || !ctlr->running) { 1356 if (!ctlr->busy) { 1357 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1358 return; 1359 } 1360 1361 /* Only do teardown in the thread */ 1362 if (!in_kthread) { 1363 kthread_queue_work(&ctlr->kworker, 1364 &ctlr->pump_messages); 1365 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1366 return; 1367 } 1368 1369 ctlr->busy = false; 1370 ctlr->idling = true; 1371 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1372 1373 kfree(ctlr->dummy_rx); 1374 ctlr->dummy_rx = NULL; 1375 kfree(ctlr->dummy_tx); 1376 ctlr->dummy_tx = NULL; 1377 if (ctlr->unprepare_transfer_hardware && 1378 ctlr->unprepare_transfer_hardware(ctlr)) 1379 dev_err(&ctlr->dev, 1380 "failed to unprepare transfer hardware\n"); 1381 if (ctlr->auto_runtime_pm) { 1382 pm_runtime_mark_last_busy(ctlr->dev.parent); 1383 pm_runtime_put_autosuspend(ctlr->dev.parent); 1384 } 1385 trace_spi_controller_idle(ctlr); 1386 1387 spin_lock_irqsave(&ctlr->queue_lock, flags); 1388 ctlr->idling = false; 1389 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1390 return; 1391 } 1392 1393 /* Extract head of queue */ 1394 msg = list_first_entry(&ctlr->queue, struct spi_message, queue); 1395 ctlr->cur_msg = msg; 1396 1397 list_del_init(&msg->queue); 1398 if (ctlr->busy) 1399 was_busy = true; 1400 else 1401 ctlr->busy = true; 1402 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1403 1404 mutex_lock(&ctlr->io_mutex); 1405 1406 if (!was_busy && ctlr->auto_runtime_pm) { 1407 ret = pm_runtime_get_sync(ctlr->dev.parent); 1408 if (ret < 0) { 1409 pm_runtime_put_noidle(ctlr->dev.parent); 1410 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1411 ret); 1412 mutex_unlock(&ctlr->io_mutex); 1413 return; 1414 } 1415 } 1416 1417 if (!was_busy) 1418 trace_spi_controller_busy(ctlr); 1419 1420 if (!was_busy && ctlr->prepare_transfer_hardware) { 1421 ret = ctlr->prepare_transfer_hardware(ctlr); 1422 if (ret) { 1423 dev_err(&ctlr->dev, 1424 "failed to prepare transfer hardware: %d\n", 1425 ret); 1426 1427 if (ctlr->auto_runtime_pm) 1428 pm_runtime_put(ctlr->dev.parent); 1429 1430 msg->status = ret; 1431 spi_finalize_current_message(ctlr); 1432 1433 mutex_unlock(&ctlr->io_mutex); 1434 return; 1435 } 1436 } 1437 1438 trace_spi_message_start(msg); 1439 1440 if (ctlr->prepare_message) { 1441 ret = ctlr->prepare_message(ctlr, msg); 1442 if (ret) { 1443 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1444 ret); 1445 msg->status = ret; 1446 spi_finalize_current_message(ctlr); 1447 goto out; 1448 } 1449 ctlr->cur_msg_prepared = true; 1450 } 1451 1452 ret = spi_map_msg(ctlr, msg); 1453 if (ret) { 1454 msg->status = ret; 1455 spi_finalize_current_message(ctlr); 1456 goto out; 1457 } 1458 1459 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1460 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1461 xfer->ptp_sts_word_pre = 0; 1462 ptp_read_system_prets(xfer->ptp_sts); 1463 } 1464 } 1465 1466 ret = ctlr->transfer_one_message(ctlr, msg); 1467 if (ret) { 1468 dev_err(&ctlr->dev, 1469 "failed to transfer one message from queue\n"); 1470 goto out; 1471 } 1472 1473 out: 1474 mutex_unlock(&ctlr->io_mutex); 1475 1476 /* Prod the scheduler in case transfer_one() was busy waiting */ 1477 if (!ret) 1478 cond_resched(); 1479 } 1480 1481 /** 1482 * spi_pump_messages - kthread work function which processes spi message queue 1483 * @work: pointer to kthread work struct contained in the controller struct 1484 */ 1485 static void spi_pump_messages(struct kthread_work *work) 1486 { 1487 struct spi_controller *ctlr = 1488 container_of(work, struct spi_controller, pump_messages); 1489 1490 __spi_pump_messages(ctlr, true); 1491 } 1492 1493 /** 1494 * spi_take_timestamp_pre - helper for drivers to collect the beginning of the 1495 * TX timestamp for the requested byte from the SPI 1496 * transfer. The frequency with which this function 1497 * must be called (once per word, once for the whole 1498 * transfer, once per batch of words etc) is arbitrary 1499 * as long as the @tx buffer offset is greater than or 1500 * equal to the requested byte at the time of the 1501 * call. The timestamp is only taken once, at the 1502 * first such call. It is assumed that the driver 1503 * advances its @tx buffer pointer monotonically. 1504 * @ctlr: Pointer to the spi_controller structure of the driver 1505 * @xfer: Pointer to the transfer being timestamped 1506 * @progress: How many words (not bytes) have been transferred so far 1507 * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1508 * transfer, for less jitter in time measurement. Only compatible 1509 * with PIO drivers. If true, must follow up with 1510 * spi_take_timestamp_post or otherwise system will crash. 1511 * WARNING: for fully predictable results, the CPU frequency must 1512 * also be under control (governor). 1513 */ 1514 void spi_take_timestamp_pre(struct spi_controller *ctlr, 1515 struct spi_transfer *xfer, 1516 size_t progress, bool irqs_off) 1517 { 1518 if (!xfer->ptp_sts) 1519 return; 1520 1521 if (xfer->timestamped) 1522 return; 1523 1524 if (progress > xfer->ptp_sts_word_pre) 1525 return; 1526 1527 /* Capture the resolution of the timestamp */ 1528 xfer->ptp_sts_word_pre = progress; 1529 1530 if (irqs_off) { 1531 local_irq_save(ctlr->irq_flags); 1532 preempt_disable(); 1533 } 1534 1535 ptp_read_system_prets(xfer->ptp_sts); 1536 } 1537 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); 1538 1539 /** 1540 * spi_take_timestamp_post - helper for drivers to collect the end of the 1541 * TX timestamp for the requested byte from the SPI 1542 * transfer. Can be called with an arbitrary 1543 * frequency: only the first call where @tx exceeds 1544 * or is equal to the requested word will be 1545 * timestamped. 1546 * @ctlr: Pointer to the spi_controller structure of the driver 1547 * @xfer: Pointer to the transfer being timestamped 1548 * @progress: How many words (not bytes) have been transferred so far 1549 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 1550 */ 1551 void spi_take_timestamp_post(struct spi_controller *ctlr, 1552 struct spi_transfer *xfer, 1553 size_t progress, bool irqs_off) 1554 { 1555 if (!xfer->ptp_sts) 1556 return; 1557 1558 if (xfer->timestamped) 1559 return; 1560 1561 if (progress < xfer->ptp_sts_word_post) 1562 return; 1563 1564 ptp_read_system_postts(xfer->ptp_sts); 1565 1566 if (irqs_off) { 1567 local_irq_restore(ctlr->irq_flags); 1568 preempt_enable(); 1569 } 1570 1571 /* Capture the resolution of the timestamp */ 1572 xfer->ptp_sts_word_post = progress; 1573 1574 xfer->timestamped = true; 1575 } 1576 EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 1577 1578 /** 1579 * spi_set_thread_rt - set the controller to pump at realtime priority 1580 * @ctlr: controller to boost priority of 1581 * 1582 * This can be called because the controller requested realtime priority 1583 * (by setting the ->rt value before calling spi_register_controller()) or 1584 * because a device on the bus said that its transfers needed realtime 1585 * priority. 1586 * 1587 * NOTE: at the moment if any device on a bus says it needs realtime then 1588 * the thread will be at realtime priority for all transfers on that 1589 * controller. If this eventually becomes a problem we may see if we can 1590 * find a way to boost the priority only temporarily during relevant 1591 * transfers. 1592 */ 1593 static void spi_set_thread_rt(struct spi_controller *ctlr) 1594 { 1595 struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; 1596 1597 dev_info(&ctlr->dev, 1598 "will run message pump with realtime priority\n"); 1599 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m); 1600 } 1601 1602 static int spi_init_queue(struct spi_controller *ctlr) 1603 { 1604 ctlr->running = false; 1605 ctlr->busy = false; 1606 1607 kthread_init_worker(&ctlr->kworker); 1608 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker, 1609 "%s", dev_name(&ctlr->dev)); 1610 if (IS_ERR(ctlr->kworker_task)) { 1611 dev_err(&ctlr->dev, "failed to create message pump task\n"); 1612 return PTR_ERR(ctlr->kworker_task); 1613 } 1614 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1615 1616 /* 1617 * Controller config will indicate if this controller should run the 1618 * message pump with high (realtime) priority to reduce the transfer 1619 * latency on the bus by minimising the delay between a transfer 1620 * request and the scheduling of the message pump thread. Without this 1621 * setting the message pump thread will remain at default priority. 1622 */ 1623 if (ctlr->rt) 1624 spi_set_thread_rt(ctlr); 1625 1626 return 0; 1627 } 1628 1629 /** 1630 * spi_get_next_queued_message() - called by driver to check for queued 1631 * messages 1632 * @ctlr: the controller to check for queued messages 1633 * 1634 * If there are more messages in the queue, the next message is returned from 1635 * this call. 1636 * 1637 * Return: the next message in the queue, else NULL if the queue is empty. 1638 */ 1639 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1640 { 1641 struct spi_message *next; 1642 unsigned long flags; 1643 1644 /* get a pointer to the next message, if any */ 1645 spin_lock_irqsave(&ctlr->queue_lock, flags); 1646 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 1647 queue); 1648 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1649 1650 return next; 1651 } 1652 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1653 1654 /** 1655 * spi_finalize_current_message() - the current message is complete 1656 * @ctlr: the controller to return the message to 1657 * 1658 * Called by the driver to notify the core that the message in the front of the 1659 * queue is complete and can be removed from the queue. 1660 */ 1661 void spi_finalize_current_message(struct spi_controller *ctlr) 1662 { 1663 struct spi_transfer *xfer; 1664 struct spi_message *mesg; 1665 unsigned long flags; 1666 int ret; 1667 1668 spin_lock_irqsave(&ctlr->queue_lock, flags); 1669 mesg = ctlr->cur_msg; 1670 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1671 1672 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1673 list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 1674 ptp_read_system_postts(xfer->ptp_sts); 1675 xfer->ptp_sts_word_post = xfer->len; 1676 } 1677 } 1678 1679 if (unlikely(ctlr->ptp_sts_supported)) 1680 list_for_each_entry(xfer, &mesg->transfers, transfer_list) 1681 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 1682 1683 spi_unmap_msg(ctlr, mesg); 1684 1685 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { 1686 ret = ctlr->unprepare_message(ctlr, mesg); 1687 if (ret) { 1688 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 1689 ret); 1690 } 1691 } 1692 1693 spin_lock_irqsave(&ctlr->queue_lock, flags); 1694 ctlr->cur_msg = NULL; 1695 ctlr->cur_msg_prepared = false; 1696 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1697 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1698 1699 trace_spi_message_done(mesg); 1700 1701 mesg->state = NULL; 1702 if (mesg->complete) 1703 mesg->complete(mesg->context); 1704 } 1705 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1706 1707 static int spi_start_queue(struct spi_controller *ctlr) 1708 { 1709 unsigned long flags; 1710 1711 spin_lock_irqsave(&ctlr->queue_lock, flags); 1712 1713 if (ctlr->running || ctlr->busy) { 1714 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1715 return -EBUSY; 1716 } 1717 1718 ctlr->running = true; 1719 ctlr->cur_msg = NULL; 1720 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1721 1722 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1723 1724 return 0; 1725 } 1726 1727 static int spi_stop_queue(struct spi_controller *ctlr) 1728 { 1729 unsigned long flags; 1730 unsigned limit = 500; 1731 int ret = 0; 1732 1733 spin_lock_irqsave(&ctlr->queue_lock, flags); 1734 1735 /* 1736 * This is a bit lame, but is optimized for the common execution path. 1737 * A wait_queue on the ctlr->busy could be used, but then the common 1738 * execution path (pump_messages) would be required to call wake_up or 1739 * friends on every SPI message. Do this instead. 1740 */ 1741 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 1742 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1743 usleep_range(10000, 11000); 1744 spin_lock_irqsave(&ctlr->queue_lock, flags); 1745 } 1746 1747 if (!list_empty(&ctlr->queue) || ctlr->busy) 1748 ret = -EBUSY; 1749 else 1750 ctlr->running = false; 1751 1752 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1753 1754 if (ret) { 1755 dev_warn(&ctlr->dev, "could not stop message queue\n"); 1756 return ret; 1757 } 1758 return ret; 1759 } 1760 1761 static int spi_destroy_queue(struct spi_controller *ctlr) 1762 { 1763 int ret; 1764 1765 ret = spi_stop_queue(ctlr); 1766 1767 /* 1768 * kthread_flush_worker will block until all work is done. 1769 * If the reason that stop_queue timed out is that the work will never 1770 * finish, then it does no good to call flush/stop thread, so 1771 * return anyway. 1772 */ 1773 if (ret) { 1774 dev_err(&ctlr->dev, "problem destroying queue\n"); 1775 return ret; 1776 } 1777 1778 kthread_flush_worker(&ctlr->kworker); 1779 kthread_stop(ctlr->kworker_task); 1780 1781 return 0; 1782 } 1783 1784 static int __spi_queued_transfer(struct spi_device *spi, 1785 struct spi_message *msg, 1786 bool need_pump) 1787 { 1788 struct spi_controller *ctlr = spi->controller; 1789 unsigned long flags; 1790 1791 spin_lock_irqsave(&ctlr->queue_lock, flags); 1792 1793 if (!ctlr->running) { 1794 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1795 return -ESHUTDOWN; 1796 } 1797 msg->actual_length = 0; 1798 msg->status = -EINPROGRESS; 1799 1800 list_add_tail(&msg->queue, &ctlr->queue); 1801 if (!ctlr->busy && need_pump) 1802 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1803 1804 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1805 return 0; 1806 } 1807 1808 /** 1809 * spi_queued_transfer - transfer function for queued transfers 1810 * @spi: spi device which is requesting transfer 1811 * @msg: spi message which is to handled is queued to driver queue 1812 * 1813 * Return: zero on success, else a negative error code. 1814 */ 1815 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1816 { 1817 return __spi_queued_transfer(spi, msg, true); 1818 } 1819 1820 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 1821 { 1822 int ret; 1823 1824 ctlr->transfer = spi_queued_transfer; 1825 if (!ctlr->transfer_one_message) 1826 ctlr->transfer_one_message = spi_transfer_one_message; 1827 1828 /* Initialize and start queue */ 1829 ret = spi_init_queue(ctlr); 1830 if (ret) { 1831 dev_err(&ctlr->dev, "problem initializing queue\n"); 1832 goto err_init_queue; 1833 } 1834 ctlr->queued = true; 1835 ret = spi_start_queue(ctlr); 1836 if (ret) { 1837 dev_err(&ctlr->dev, "problem starting queue\n"); 1838 goto err_start_queue; 1839 } 1840 1841 return 0; 1842 1843 err_start_queue: 1844 spi_destroy_queue(ctlr); 1845 err_init_queue: 1846 return ret; 1847 } 1848 1849 /** 1850 * spi_flush_queue - Send all pending messages in the queue from the callers' 1851 * context 1852 * @ctlr: controller to process queue for 1853 * 1854 * This should be used when one wants to ensure all pending messages have been 1855 * sent before doing something. Is used by the spi-mem code to make sure SPI 1856 * memory operations do not preempt regular SPI transfers that have been queued 1857 * before the spi-mem operation. 1858 */ 1859 void spi_flush_queue(struct spi_controller *ctlr) 1860 { 1861 if (ctlr->transfer == spi_queued_transfer) 1862 __spi_pump_messages(ctlr, false); 1863 } 1864 1865 /*-------------------------------------------------------------------------*/ 1866 1867 #if defined(CONFIG_OF) 1868 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 1869 struct device_node *nc) 1870 { 1871 u32 value; 1872 int rc; 1873 1874 /* Mode (clock phase/polarity/etc.) */ 1875 if (of_property_read_bool(nc, "spi-cpha")) 1876 spi->mode |= SPI_CPHA; 1877 if (of_property_read_bool(nc, "spi-cpol")) 1878 spi->mode |= SPI_CPOL; 1879 if (of_property_read_bool(nc, "spi-3wire")) 1880 spi->mode |= SPI_3WIRE; 1881 if (of_property_read_bool(nc, "spi-lsb-first")) 1882 spi->mode |= SPI_LSB_FIRST; 1883 if (of_property_read_bool(nc, "spi-cs-high")) 1884 spi->mode |= SPI_CS_HIGH; 1885 1886 /* Device DUAL/QUAD mode */ 1887 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1888 switch (value) { 1889 case 1: 1890 break; 1891 case 2: 1892 spi->mode |= SPI_TX_DUAL; 1893 break; 1894 case 4: 1895 spi->mode |= SPI_TX_QUAD; 1896 break; 1897 case 8: 1898 spi->mode |= SPI_TX_OCTAL; 1899 break; 1900 default: 1901 dev_warn(&ctlr->dev, 1902 "spi-tx-bus-width %d not supported\n", 1903 value); 1904 break; 1905 } 1906 } 1907 1908 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1909 switch (value) { 1910 case 1: 1911 break; 1912 case 2: 1913 spi->mode |= SPI_RX_DUAL; 1914 break; 1915 case 4: 1916 spi->mode |= SPI_RX_QUAD; 1917 break; 1918 case 8: 1919 spi->mode |= SPI_RX_OCTAL; 1920 break; 1921 default: 1922 dev_warn(&ctlr->dev, 1923 "spi-rx-bus-width %d not supported\n", 1924 value); 1925 break; 1926 } 1927 } 1928 1929 if (spi_controller_is_slave(ctlr)) { 1930 if (!of_node_name_eq(nc, "slave")) { 1931 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 1932 nc); 1933 return -EINVAL; 1934 } 1935 return 0; 1936 } 1937 1938 /* Device address */ 1939 rc = of_property_read_u32(nc, "reg", &value); 1940 if (rc) { 1941 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 1942 nc, rc); 1943 return rc; 1944 } 1945 spi->chip_select = value; 1946 1947 /* 1948 * For descriptors associated with the device, polarity inversion is 1949 * handled in the gpiolib, so all gpio chip selects are "active high" 1950 * in the logical sense, the gpiolib will invert the line if need be. 1951 */ 1952 if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods && 1953 ctlr->cs_gpiods[spi->chip_select]) 1954 spi->mode |= SPI_CS_HIGH; 1955 1956 /* Device speed */ 1957 if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 1958 spi->max_speed_hz = value; 1959 1960 return 0; 1961 } 1962 1963 static struct spi_device * 1964 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 1965 { 1966 struct spi_device *spi; 1967 int rc; 1968 1969 /* Alloc an spi_device */ 1970 spi = spi_alloc_device(ctlr); 1971 if (!spi) { 1972 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 1973 rc = -ENOMEM; 1974 goto err_out; 1975 } 1976 1977 /* Select device driver */ 1978 rc = of_modalias_node(nc, spi->modalias, 1979 sizeof(spi->modalias)); 1980 if (rc < 0) { 1981 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 1982 goto err_out; 1983 } 1984 1985 rc = of_spi_parse_dt(ctlr, spi, nc); 1986 if (rc) 1987 goto err_out; 1988 1989 /* Store a pointer to the node in the device structure */ 1990 of_node_get(nc); 1991 spi->dev.of_node = nc; 1992 1993 /* Register the new device */ 1994 rc = spi_add_device(spi); 1995 if (rc) { 1996 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 1997 goto err_of_node_put; 1998 } 1999 2000 return spi; 2001 2002 err_of_node_put: 2003 of_node_put(nc); 2004 err_out: 2005 spi_dev_put(spi); 2006 return ERR_PTR(rc); 2007 } 2008 2009 /** 2010 * of_register_spi_devices() - Register child devices onto the SPI bus 2011 * @ctlr: Pointer to spi_controller device 2012 * 2013 * Registers an spi_device for each child node of controller node which 2014 * represents a valid SPI slave. 2015 */ 2016 static void of_register_spi_devices(struct spi_controller *ctlr) 2017 { 2018 struct spi_device *spi; 2019 struct device_node *nc; 2020 2021 if (!ctlr->dev.of_node) 2022 return; 2023 2024 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 2025 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 2026 continue; 2027 spi = of_register_spi_device(ctlr, nc); 2028 if (IS_ERR(spi)) { 2029 dev_warn(&ctlr->dev, 2030 "Failed to create SPI device for %pOF\n", nc); 2031 of_node_clear_flag(nc, OF_POPULATED); 2032 } 2033 } 2034 } 2035 #else 2036 static void of_register_spi_devices(struct spi_controller *ctlr) { } 2037 #endif 2038 2039 #ifdef CONFIG_ACPI 2040 struct acpi_spi_lookup { 2041 struct spi_controller *ctlr; 2042 u32 max_speed_hz; 2043 u32 mode; 2044 int irq; 2045 u8 bits_per_word; 2046 u8 chip_select; 2047 }; 2048 2049 static void acpi_spi_parse_apple_properties(struct acpi_device *dev, 2050 struct acpi_spi_lookup *lookup) 2051 { 2052 const union acpi_object *obj; 2053 2054 if (!x86_apple_machine) 2055 return; 2056 2057 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 2058 && obj->buffer.length >= 4) 2059 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 2060 2061 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 2062 && obj->buffer.length == 8) 2063 lookup->bits_per_word = *(u64 *)obj->buffer.pointer; 2064 2065 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 2066 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 2067 lookup->mode |= SPI_LSB_FIRST; 2068 2069 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 2070 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2071 lookup->mode |= SPI_CPOL; 2072 2073 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 2074 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2075 lookup->mode |= SPI_CPHA; 2076 } 2077 2078 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 2079 { 2080 struct acpi_spi_lookup *lookup = data; 2081 struct spi_controller *ctlr = lookup->ctlr; 2082 2083 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 2084 struct acpi_resource_spi_serialbus *sb; 2085 acpi_handle parent_handle; 2086 acpi_status status; 2087 2088 sb = &ares->data.spi_serial_bus; 2089 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 2090 2091 status = acpi_get_handle(NULL, 2092 sb->resource_source.string_ptr, 2093 &parent_handle); 2094 2095 if (ACPI_FAILURE(status) || 2096 ACPI_HANDLE(ctlr->dev.parent) != parent_handle) 2097 return -ENODEV; 2098 2099 /* 2100 * ACPI DeviceSelection numbering is handled by the 2101 * host controller driver in Windows and can vary 2102 * from driver to driver. In Linux we always expect 2103 * 0 .. max - 1 so we need to ask the driver to 2104 * translate between the two schemes. 2105 */ 2106 if (ctlr->fw_translate_cs) { 2107 int cs = ctlr->fw_translate_cs(ctlr, 2108 sb->device_selection); 2109 if (cs < 0) 2110 return cs; 2111 lookup->chip_select = cs; 2112 } else { 2113 lookup->chip_select = sb->device_selection; 2114 } 2115 2116 lookup->max_speed_hz = sb->connection_speed; 2117 lookup->bits_per_word = sb->data_bit_length; 2118 2119 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 2120 lookup->mode |= SPI_CPHA; 2121 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 2122 lookup->mode |= SPI_CPOL; 2123 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 2124 lookup->mode |= SPI_CS_HIGH; 2125 } 2126 } else if (lookup->irq < 0) { 2127 struct resource r; 2128 2129 if (acpi_dev_resource_interrupt(ares, 0, &r)) 2130 lookup->irq = r.start; 2131 } 2132 2133 /* Always tell the ACPI core to skip this resource */ 2134 return 1; 2135 } 2136 2137 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 2138 struct acpi_device *adev) 2139 { 2140 acpi_handle parent_handle = NULL; 2141 struct list_head resource_list; 2142 struct acpi_spi_lookup lookup = {}; 2143 struct spi_device *spi; 2144 int ret; 2145 2146 if (acpi_bus_get_status(adev) || !adev->status.present || 2147 acpi_device_enumerated(adev)) 2148 return AE_OK; 2149 2150 lookup.ctlr = ctlr; 2151 lookup.irq = -1; 2152 2153 INIT_LIST_HEAD(&resource_list); 2154 ret = acpi_dev_get_resources(adev, &resource_list, 2155 acpi_spi_add_resource, &lookup); 2156 acpi_dev_free_resource_list(&resource_list); 2157 2158 if (ret < 0) 2159 /* found SPI in _CRS but it points to another controller */ 2160 return AE_OK; 2161 2162 if (!lookup.max_speed_hz && 2163 !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) && 2164 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) { 2165 /* Apple does not use _CRS but nested devices for SPI slaves */ 2166 acpi_spi_parse_apple_properties(adev, &lookup); 2167 } 2168 2169 if (!lookup.max_speed_hz) 2170 return AE_OK; 2171 2172 spi = spi_alloc_device(ctlr); 2173 if (!spi) { 2174 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n", 2175 dev_name(&adev->dev)); 2176 return AE_NO_MEMORY; 2177 } 2178 2179 2180 ACPI_COMPANION_SET(&spi->dev, adev); 2181 spi->max_speed_hz = lookup.max_speed_hz; 2182 spi->mode |= lookup.mode; 2183 spi->irq = lookup.irq; 2184 spi->bits_per_word = lookup.bits_per_word; 2185 spi->chip_select = lookup.chip_select; 2186 2187 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 2188 sizeof(spi->modalias)); 2189 2190 if (spi->irq < 0) 2191 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 2192 2193 acpi_device_set_enumerated(adev); 2194 2195 adev->power.flags.ignore_parent = true; 2196 if (spi_add_device(spi)) { 2197 adev->power.flags.ignore_parent = false; 2198 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 2199 dev_name(&adev->dev)); 2200 spi_dev_put(spi); 2201 } 2202 2203 return AE_OK; 2204 } 2205 2206 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 2207 void *data, void **return_value) 2208 { 2209 struct spi_controller *ctlr = data; 2210 struct acpi_device *adev; 2211 2212 if (acpi_bus_get_device(handle, &adev)) 2213 return AE_OK; 2214 2215 return acpi_register_spi_device(ctlr, adev); 2216 } 2217 2218 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 2219 2220 static void acpi_register_spi_devices(struct spi_controller *ctlr) 2221 { 2222 acpi_status status; 2223 acpi_handle handle; 2224 2225 handle = ACPI_HANDLE(ctlr->dev.parent); 2226 if (!handle) 2227 return; 2228 2229 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 2230 SPI_ACPI_ENUMERATE_MAX_DEPTH, 2231 acpi_spi_add_device, NULL, ctlr, NULL); 2232 if (ACPI_FAILURE(status)) 2233 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 2234 } 2235 #else 2236 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 2237 #endif /* CONFIG_ACPI */ 2238 2239 static void spi_controller_release(struct device *dev) 2240 { 2241 struct spi_controller *ctlr; 2242 2243 ctlr = container_of(dev, struct spi_controller, dev); 2244 kfree(ctlr); 2245 } 2246 2247 static struct class spi_master_class = { 2248 .name = "spi_master", 2249 .owner = THIS_MODULE, 2250 .dev_release = spi_controller_release, 2251 .dev_groups = spi_master_groups, 2252 }; 2253 2254 #ifdef CONFIG_SPI_SLAVE 2255 /** 2256 * spi_slave_abort - abort the ongoing transfer request on an SPI slave 2257 * controller 2258 * @spi: device used for the current transfer 2259 */ 2260 int spi_slave_abort(struct spi_device *spi) 2261 { 2262 struct spi_controller *ctlr = spi->controller; 2263 2264 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 2265 return ctlr->slave_abort(ctlr); 2266 2267 return -ENOTSUPP; 2268 } 2269 EXPORT_SYMBOL_GPL(spi_slave_abort); 2270 2271 static int match_true(struct device *dev, void *data) 2272 { 2273 return 1; 2274 } 2275 2276 static ssize_t slave_show(struct device *dev, struct device_attribute *attr, 2277 char *buf) 2278 { 2279 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2280 dev); 2281 struct device *child; 2282 2283 child = device_find_child(&ctlr->dev, NULL, match_true); 2284 return sprintf(buf, "%s\n", 2285 child ? to_spi_device(child)->modalias : NULL); 2286 } 2287 2288 static ssize_t slave_store(struct device *dev, struct device_attribute *attr, 2289 const char *buf, size_t count) 2290 { 2291 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2292 dev); 2293 struct spi_device *spi; 2294 struct device *child; 2295 char name[32]; 2296 int rc; 2297 2298 rc = sscanf(buf, "%31s", name); 2299 if (rc != 1 || !name[0]) 2300 return -EINVAL; 2301 2302 child = device_find_child(&ctlr->dev, NULL, match_true); 2303 if (child) { 2304 /* Remove registered slave */ 2305 device_unregister(child); 2306 put_device(child); 2307 } 2308 2309 if (strcmp(name, "(null)")) { 2310 /* Register new slave */ 2311 spi = spi_alloc_device(ctlr); 2312 if (!spi) 2313 return -ENOMEM; 2314 2315 strlcpy(spi->modalias, name, sizeof(spi->modalias)); 2316 2317 rc = spi_add_device(spi); 2318 if (rc) { 2319 spi_dev_put(spi); 2320 return rc; 2321 } 2322 } 2323 2324 return count; 2325 } 2326 2327 static DEVICE_ATTR_RW(slave); 2328 2329 static struct attribute *spi_slave_attrs[] = { 2330 &dev_attr_slave.attr, 2331 NULL, 2332 }; 2333 2334 static const struct attribute_group spi_slave_group = { 2335 .attrs = spi_slave_attrs, 2336 }; 2337 2338 static const struct attribute_group *spi_slave_groups[] = { 2339 &spi_controller_statistics_group, 2340 &spi_slave_group, 2341 NULL, 2342 }; 2343 2344 static struct class spi_slave_class = { 2345 .name = "spi_slave", 2346 .owner = THIS_MODULE, 2347 .dev_release = spi_controller_release, 2348 .dev_groups = spi_slave_groups, 2349 }; 2350 #else 2351 extern struct class spi_slave_class; /* dummy */ 2352 #endif 2353 2354 /** 2355 * __spi_alloc_controller - allocate an SPI master or slave controller 2356 * @dev: the controller, possibly using the platform_bus 2357 * @size: how much zeroed driver-private data to allocate; the pointer to this 2358 * memory is in the driver_data field of the returned device, accessible 2359 * with spi_controller_get_devdata(); the memory is cacheline aligned; 2360 * drivers granting DMA access to portions of their private data need to 2361 * round up @size using ALIGN(size, dma_get_cache_alignment()). 2362 * @slave: flag indicating whether to allocate an SPI master (false) or SPI 2363 * slave (true) controller 2364 * Context: can sleep 2365 * 2366 * This call is used only by SPI controller drivers, which are the 2367 * only ones directly touching chip registers. It's how they allocate 2368 * an spi_controller structure, prior to calling spi_register_controller(). 2369 * 2370 * This must be called from context that can sleep. 2371 * 2372 * The caller is responsible for assigning the bus number and initializing the 2373 * controller's methods before calling spi_register_controller(); and (after 2374 * errors adding the device) calling spi_controller_put() to prevent a memory 2375 * leak. 2376 * 2377 * Return: the SPI controller structure on success, else NULL. 2378 */ 2379 struct spi_controller *__spi_alloc_controller(struct device *dev, 2380 unsigned int size, bool slave) 2381 { 2382 struct spi_controller *ctlr; 2383 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); 2384 2385 if (!dev) 2386 return NULL; 2387 2388 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 2389 if (!ctlr) 2390 return NULL; 2391 2392 device_initialize(&ctlr->dev); 2393 ctlr->bus_num = -1; 2394 ctlr->num_chipselect = 1; 2395 ctlr->slave = slave; 2396 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 2397 ctlr->dev.class = &spi_slave_class; 2398 else 2399 ctlr->dev.class = &spi_master_class; 2400 ctlr->dev.parent = dev; 2401 pm_suspend_ignore_children(&ctlr->dev, true); 2402 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); 2403 2404 return ctlr; 2405 } 2406 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 2407 2408 #ifdef CONFIG_OF 2409 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) 2410 { 2411 int nb, i, *cs; 2412 struct device_node *np = ctlr->dev.of_node; 2413 2414 if (!np) 2415 return 0; 2416 2417 nb = of_gpio_named_count(np, "cs-gpios"); 2418 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2419 2420 /* Return error only for an incorrectly formed cs-gpios property */ 2421 if (nb == 0 || nb == -ENOENT) 2422 return 0; 2423 else if (nb < 0) 2424 return nb; 2425 2426 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int), 2427 GFP_KERNEL); 2428 ctlr->cs_gpios = cs; 2429 2430 if (!ctlr->cs_gpios) 2431 return -ENOMEM; 2432 2433 for (i = 0; i < ctlr->num_chipselect; i++) 2434 cs[i] = -ENOENT; 2435 2436 for (i = 0; i < nb; i++) 2437 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 2438 2439 return 0; 2440 } 2441 #else 2442 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) 2443 { 2444 return 0; 2445 } 2446 #endif 2447 2448 /** 2449 * spi_get_gpio_descs() - grab chip select GPIOs for the master 2450 * @ctlr: The SPI master to grab GPIO descriptors for 2451 */ 2452 static int spi_get_gpio_descs(struct spi_controller *ctlr) 2453 { 2454 int nb, i; 2455 struct gpio_desc **cs; 2456 struct device *dev = &ctlr->dev; 2457 unsigned long native_cs_mask = 0; 2458 unsigned int num_cs_gpios = 0; 2459 2460 nb = gpiod_count(dev, "cs"); 2461 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2462 2463 /* No GPIOs at all is fine, else return the error */ 2464 if (nb == 0 || nb == -ENOENT) 2465 return 0; 2466 else if (nb < 0) 2467 return nb; 2468 2469 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 2470 GFP_KERNEL); 2471 if (!cs) 2472 return -ENOMEM; 2473 ctlr->cs_gpiods = cs; 2474 2475 for (i = 0; i < nb; i++) { 2476 /* 2477 * Most chipselects are active low, the inverted 2478 * semantics are handled by special quirks in gpiolib, 2479 * so initializing them GPIOD_OUT_LOW here means 2480 * "unasserted", in most cases this will drive the physical 2481 * line high. 2482 */ 2483 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 2484 GPIOD_OUT_LOW); 2485 if (IS_ERR(cs[i])) 2486 return PTR_ERR(cs[i]); 2487 2488 if (cs[i]) { 2489 /* 2490 * If we find a CS GPIO, name it after the device and 2491 * chip select line. 2492 */ 2493 char *gpioname; 2494 2495 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 2496 dev_name(dev), i); 2497 if (!gpioname) 2498 return -ENOMEM; 2499 gpiod_set_consumer_name(cs[i], gpioname); 2500 num_cs_gpios++; 2501 continue; 2502 } 2503 2504 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { 2505 dev_err(dev, "Invalid native chip select %d\n", i); 2506 return -EINVAL; 2507 } 2508 native_cs_mask |= BIT(i); 2509 } 2510 2511 ctlr->unused_native_cs = ffz(native_cs_mask); 2512 if (num_cs_gpios && ctlr->max_native_cs && 2513 ctlr->unused_native_cs >= ctlr->max_native_cs) { 2514 dev_err(dev, "No unused native chip select available\n"); 2515 return -EINVAL; 2516 } 2517 2518 return 0; 2519 } 2520 2521 static int spi_controller_check_ops(struct spi_controller *ctlr) 2522 { 2523 /* 2524 * The controller may implement only the high-level SPI-memory like 2525 * operations if it does not support regular SPI transfers, and this is 2526 * valid use case. 2527 * If ->mem_ops is NULL, we request that at least one of the 2528 * ->transfer_xxx() method be implemented. 2529 */ 2530 if (ctlr->mem_ops) { 2531 if (!ctlr->mem_ops->exec_op) 2532 return -EINVAL; 2533 } else if (!ctlr->transfer && !ctlr->transfer_one && 2534 !ctlr->transfer_one_message) { 2535 return -EINVAL; 2536 } 2537 2538 return 0; 2539 } 2540 2541 /** 2542 * spi_register_controller - register SPI master or slave controller 2543 * @ctlr: initialized master, originally from spi_alloc_master() or 2544 * spi_alloc_slave() 2545 * Context: can sleep 2546 * 2547 * SPI controllers connect to their drivers using some non-SPI bus, 2548 * such as the platform bus. The final stage of probe() in that code 2549 * includes calling spi_register_controller() to hook up to this SPI bus glue. 2550 * 2551 * SPI controllers use board specific (often SOC specific) bus numbers, 2552 * and board-specific addressing for SPI devices combines those numbers 2553 * with chip select numbers. Since SPI does not directly support dynamic 2554 * device identification, boards need configuration tables telling which 2555 * chip is at which address. 2556 * 2557 * This must be called from context that can sleep. It returns zero on 2558 * success, else a negative error code (dropping the controller's refcount). 2559 * After a successful return, the caller is responsible for calling 2560 * spi_unregister_controller(). 2561 * 2562 * Return: zero on success, else a negative error code. 2563 */ 2564 int spi_register_controller(struct spi_controller *ctlr) 2565 { 2566 struct device *dev = ctlr->dev.parent; 2567 struct boardinfo *bi; 2568 int status; 2569 int id, first_dynamic; 2570 2571 if (!dev) 2572 return -ENODEV; 2573 2574 /* 2575 * Make sure all necessary hooks are implemented before registering 2576 * the SPI controller. 2577 */ 2578 status = spi_controller_check_ops(ctlr); 2579 if (status) 2580 return status; 2581 2582 if (ctlr->bus_num >= 0) { 2583 /* devices with a fixed bus num must check-in with the num */ 2584 mutex_lock(&board_lock); 2585 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 2586 ctlr->bus_num + 1, GFP_KERNEL); 2587 mutex_unlock(&board_lock); 2588 if (WARN(id < 0, "couldn't get idr")) 2589 return id == -ENOSPC ? -EBUSY : id; 2590 ctlr->bus_num = id; 2591 } else if (ctlr->dev.of_node) { 2592 /* allocate dynamic bus number using Linux idr */ 2593 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 2594 if (id >= 0) { 2595 ctlr->bus_num = id; 2596 mutex_lock(&board_lock); 2597 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 2598 ctlr->bus_num + 1, GFP_KERNEL); 2599 mutex_unlock(&board_lock); 2600 if (WARN(id < 0, "couldn't get idr")) 2601 return id == -ENOSPC ? -EBUSY : id; 2602 } 2603 } 2604 if (ctlr->bus_num < 0) { 2605 first_dynamic = of_alias_get_highest_id("spi"); 2606 if (first_dynamic < 0) 2607 first_dynamic = 0; 2608 else 2609 first_dynamic++; 2610 2611 mutex_lock(&board_lock); 2612 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 2613 0, GFP_KERNEL); 2614 mutex_unlock(&board_lock); 2615 if (WARN(id < 0, "couldn't get idr")) 2616 return id; 2617 ctlr->bus_num = id; 2618 } 2619 INIT_LIST_HEAD(&ctlr->queue); 2620 spin_lock_init(&ctlr->queue_lock); 2621 spin_lock_init(&ctlr->bus_lock_spinlock); 2622 mutex_init(&ctlr->bus_lock_mutex); 2623 mutex_init(&ctlr->io_mutex); 2624 ctlr->bus_lock_flag = 0; 2625 init_completion(&ctlr->xfer_completion); 2626 if (!ctlr->max_dma_len) 2627 ctlr->max_dma_len = INT_MAX; 2628 2629 /* register the device, then userspace will see it. 2630 * registration fails if the bus ID is in use. 2631 */ 2632 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 2633 2634 if (!spi_controller_is_slave(ctlr)) { 2635 if (ctlr->use_gpio_descriptors) { 2636 status = spi_get_gpio_descs(ctlr); 2637 if (status) 2638 goto free_bus_id; 2639 /* 2640 * A controller using GPIO descriptors always 2641 * supports SPI_CS_HIGH if need be. 2642 */ 2643 ctlr->mode_bits |= SPI_CS_HIGH; 2644 } else { 2645 /* Legacy code path for GPIOs from DT */ 2646 status = of_spi_get_gpio_numbers(ctlr); 2647 if (status) 2648 goto free_bus_id; 2649 } 2650 } 2651 2652 /* 2653 * Even if it's just one always-selected device, there must 2654 * be at least one chipselect. 2655 */ 2656 if (!ctlr->num_chipselect) { 2657 status = -EINVAL; 2658 goto free_bus_id; 2659 } 2660 2661 status = device_add(&ctlr->dev); 2662 if (status < 0) 2663 goto free_bus_id; 2664 dev_dbg(dev, "registered %s %s\n", 2665 spi_controller_is_slave(ctlr) ? "slave" : "master", 2666 dev_name(&ctlr->dev)); 2667 2668 /* 2669 * If we're using a queued driver, start the queue. Note that we don't 2670 * need the queueing logic if the driver is only supporting high-level 2671 * memory operations. 2672 */ 2673 if (ctlr->transfer) { 2674 dev_info(dev, "controller is unqueued, this is deprecated\n"); 2675 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 2676 status = spi_controller_initialize_queue(ctlr); 2677 if (status) { 2678 device_del(&ctlr->dev); 2679 goto free_bus_id; 2680 } 2681 } 2682 /* add statistics */ 2683 spin_lock_init(&ctlr->statistics.lock); 2684 2685 mutex_lock(&board_lock); 2686 list_add_tail(&ctlr->list, &spi_controller_list); 2687 list_for_each_entry(bi, &board_list, list) 2688 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 2689 mutex_unlock(&board_lock); 2690 2691 /* Register devices from the device tree and ACPI */ 2692 of_register_spi_devices(ctlr); 2693 acpi_register_spi_devices(ctlr); 2694 return status; 2695 2696 free_bus_id: 2697 mutex_lock(&board_lock); 2698 idr_remove(&spi_master_idr, ctlr->bus_num); 2699 mutex_unlock(&board_lock); 2700 return status; 2701 } 2702 EXPORT_SYMBOL_GPL(spi_register_controller); 2703 2704 static void devm_spi_unregister(struct device *dev, void *res) 2705 { 2706 spi_unregister_controller(*(struct spi_controller **)res); 2707 } 2708 2709 /** 2710 * devm_spi_register_controller - register managed SPI master or slave 2711 * controller 2712 * @dev: device managing SPI controller 2713 * @ctlr: initialized controller, originally from spi_alloc_master() or 2714 * spi_alloc_slave() 2715 * Context: can sleep 2716 * 2717 * Register a SPI device as with spi_register_controller() which will 2718 * automatically be unregistered and freed. 2719 * 2720 * Return: zero on success, else a negative error code. 2721 */ 2722 int devm_spi_register_controller(struct device *dev, 2723 struct spi_controller *ctlr) 2724 { 2725 struct spi_controller **ptr; 2726 int ret; 2727 2728 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 2729 if (!ptr) 2730 return -ENOMEM; 2731 2732 ret = spi_register_controller(ctlr); 2733 if (!ret) { 2734 *ptr = ctlr; 2735 devres_add(dev, ptr); 2736 } else { 2737 devres_free(ptr); 2738 } 2739 2740 return ret; 2741 } 2742 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 2743 2744 static int __unregister(struct device *dev, void *null) 2745 { 2746 spi_unregister_device(to_spi_device(dev)); 2747 return 0; 2748 } 2749 2750 /** 2751 * spi_unregister_controller - unregister SPI master or slave controller 2752 * @ctlr: the controller being unregistered 2753 * Context: can sleep 2754 * 2755 * This call is used only by SPI controller drivers, which are the 2756 * only ones directly touching chip registers. 2757 * 2758 * This must be called from context that can sleep. 2759 * 2760 * Note that this function also drops a reference to the controller. 2761 */ 2762 void spi_unregister_controller(struct spi_controller *ctlr) 2763 { 2764 struct spi_controller *found; 2765 int id = ctlr->bus_num; 2766 2767 device_for_each_child(&ctlr->dev, NULL, __unregister); 2768 2769 /* First make sure that this controller was ever added */ 2770 mutex_lock(&board_lock); 2771 found = idr_find(&spi_master_idr, id); 2772 mutex_unlock(&board_lock); 2773 if (ctlr->queued) { 2774 if (spi_destroy_queue(ctlr)) 2775 dev_err(&ctlr->dev, "queue remove failed\n"); 2776 } 2777 mutex_lock(&board_lock); 2778 list_del(&ctlr->list); 2779 mutex_unlock(&board_lock); 2780 2781 device_unregister(&ctlr->dev); 2782 /* free bus id */ 2783 mutex_lock(&board_lock); 2784 if (found == ctlr) 2785 idr_remove(&spi_master_idr, id); 2786 mutex_unlock(&board_lock); 2787 } 2788 EXPORT_SYMBOL_GPL(spi_unregister_controller); 2789 2790 int spi_controller_suspend(struct spi_controller *ctlr) 2791 { 2792 int ret; 2793 2794 /* Basically no-ops for non-queued controllers */ 2795 if (!ctlr->queued) 2796 return 0; 2797 2798 ret = spi_stop_queue(ctlr); 2799 if (ret) 2800 dev_err(&ctlr->dev, "queue stop failed\n"); 2801 2802 return ret; 2803 } 2804 EXPORT_SYMBOL_GPL(spi_controller_suspend); 2805 2806 int spi_controller_resume(struct spi_controller *ctlr) 2807 { 2808 int ret; 2809 2810 if (!ctlr->queued) 2811 return 0; 2812 2813 ret = spi_start_queue(ctlr); 2814 if (ret) 2815 dev_err(&ctlr->dev, "queue restart failed\n"); 2816 2817 return ret; 2818 } 2819 EXPORT_SYMBOL_GPL(spi_controller_resume); 2820 2821 static int __spi_controller_match(struct device *dev, const void *data) 2822 { 2823 struct spi_controller *ctlr; 2824 const u16 *bus_num = data; 2825 2826 ctlr = container_of(dev, struct spi_controller, dev); 2827 return ctlr->bus_num == *bus_num; 2828 } 2829 2830 /** 2831 * spi_busnum_to_master - look up master associated with bus_num 2832 * @bus_num: the master's bus number 2833 * Context: can sleep 2834 * 2835 * This call may be used with devices that are registered after 2836 * arch init time. It returns a refcounted pointer to the relevant 2837 * spi_controller (which the caller must release), or NULL if there is 2838 * no such master registered. 2839 * 2840 * Return: the SPI master structure on success, else NULL. 2841 */ 2842 struct spi_controller *spi_busnum_to_master(u16 bus_num) 2843 { 2844 struct device *dev; 2845 struct spi_controller *ctlr = NULL; 2846 2847 dev = class_find_device(&spi_master_class, NULL, &bus_num, 2848 __spi_controller_match); 2849 if (dev) 2850 ctlr = container_of(dev, struct spi_controller, dev); 2851 /* reference got in class_find_device */ 2852 return ctlr; 2853 } 2854 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2855 2856 /*-------------------------------------------------------------------------*/ 2857 2858 /* Core methods for SPI resource management */ 2859 2860 /** 2861 * spi_res_alloc - allocate a spi resource that is life-cycle managed 2862 * during the processing of a spi_message while using 2863 * spi_transfer_one 2864 * @spi: the spi device for which we allocate memory 2865 * @release: the release code to execute for this resource 2866 * @size: size to alloc and return 2867 * @gfp: GFP allocation flags 2868 * 2869 * Return: the pointer to the allocated data 2870 * 2871 * This may get enhanced in the future to allocate from a memory pool 2872 * of the @spi_device or @spi_controller to avoid repeated allocations. 2873 */ 2874 void *spi_res_alloc(struct spi_device *spi, 2875 spi_res_release_t release, 2876 size_t size, gfp_t gfp) 2877 { 2878 struct spi_res *sres; 2879 2880 sres = kzalloc(sizeof(*sres) + size, gfp); 2881 if (!sres) 2882 return NULL; 2883 2884 INIT_LIST_HEAD(&sres->entry); 2885 sres->release = release; 2886 2887 return sres->data; 2888 } 2889 EXPORT_SYMBOL_GPL(spi_res_alloc); 2890 2891 /** 2892 * spi_res_free - free an spi resource 2893 * @res: pointer to the custom data of a resource 2894 * 2895 */ 2896 void spi_res_free(void *res) 2897 { 2898 struct spi_res *sres = container_of(res, struct spi_res, data); 2899 2900 if (!res) 2901 return; 2902 2903 WARN_ON(!list_empty(&sres->entry)); 2904 kfree(sres); 2905 } 2906 EXPORT_SYMBOL_GPL(spi_res_free); 2907 2908 /** 2909 * spi_res_add - add a spi_res to the spi_message 2910 * @message: the spi message 2911 * @res: the spi_resource 2912 */ 2913 void spi_res_add(struct spi_message *message, void *res) 2914 { 2915 struct spi_res *sres = container_of(res, struct spi_res, data); 2916 2917 WARN_ON(!list_empty(&sres->entry)); 2918 list_add_tail(&sres->entry, &message->resources); 2919 } 2920 EXPORT_SYMBOL_GPL(spi_res_add); 2921 2922 /** 2923 * spi_res_release - release all spi resources for this message 2924 * @ctlr: the @spi_controller 2925 * @message: the @spi_message 2926 */ 2927 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 2928 { 2929 struct spi_res *res, *tmp; 2930 2931 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 2932 if (res->release) 2933 res->release(ctlr, message, res->data); 2934 2935 list_del(&res->entry); 2936 2937 kfree(res); 2938 } 2939 } 2940 EXPORT_SYMBOL_GPL(spi_res_release); 2941 2942 /*-------------------------------------------------------------------------*/ 2943 2944 /* Core methods for spi_message alterations */ 2945 2946 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 2947 struct spi_message *msg, 2948 void *res) 2949 { 2950 struct spi_replaced_transfers *rxfer = res; 2951 size_t i; 2952 2953 /* call extra callback if requested */ 2954 if (rxfer->release) 2955 rxfer->release(ctlr, msg, res); 2956 2957 /* insert replaced transfers back into the message */ 2958 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2959 2960 /* remove the formerly inserted entries */ 2961 for (i = 0; i < rxfer->inserted; i++) 2962 list_del(&rxfer->inserted_transfers[i].transfer_list); 2963 } 2964 2965 /** 2966 * spi_replace_transfers - replace transfers with several transfers 2967 * and register change with spi_message.resources 2968 * @msg: the spi_message we work upon 2969 * @xfer_first: the first spi_transfer we want to replace 2970 * @remove: number of transfers to remove 2971 * @insert: the number of transfers we want to insert instead 2972 * @release: extra release code necessary in some circumstances 2973 * @extradatasize: extra data to allocate (with alignment guarantees 2974 * of struct @spi_transfer) 2975 * @gfp: gfp flags 2976 * 2977 * Returns: pointer to @spi_replaced_transfers, 2978 * PTR_ERR(...) in case of errors. 2979 */ 2980 struct spi_replaced_transfers *spi_replace_transfers( 2981 struct spi_message *msg, 2982 struct spi_transfer *xfer_first, 2983 size_t remove, 2984 size_t insert, 2985 spi_replaced_release_t release, 2986 size_t extradatasize, 2987 gfp_t gfp) 2988 { 2989 struct spi_replaced_transfers *rxfer; 2990 struct spi_transfer *xfer; 2991 size_t i; 2992 2993 /* allocate the structure using spi_res */ 2994 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2995 struct_size(rxfer, inserted_transfers, insert) 2996 + extradatasize, 2997 gfp); 2998 if (!rxfer) 2999 return ERR_PTR(-ENOMEM); 3000 3001 /* the release code to invoke before running the generic release */ 3002 rxfer->release = release; 3003 3004 /* assign extradata */ 3005 if (extradatasize) 3006 rxfer->extradata = 3007 &rxfer->inserted_transfers[insert]; 3008 3009 /* init the replaced_transfers list */ 3010 INIT_LIST_HEAD(&rxfer->replaced_transfers); 3011 3012 /* assign the list_entry after which we should reinsert 3013 * the @replaced_transfers - it may be spi_message.messages! 3014 */ 3015 rxfer->replaced_after = xfer_first->transfer_list.prev; 3016 3017 /* remove the requested number of transfers */ 3018 for (i = 0; i < remove; i++) { 3019 /* if the entry after replaced_after it is msg->transfers 3020 * then we have been requested to remove more transfers 3021 * than are in the list 3022 */ 3023 if (rxfer->replaced_after->next == &msg->transfers) { 3024 dev_err(&msg->spi->dev, 3025 "requested to remove more spi_transfers than are available\n"); 3026 /* insert replaced transfers back into the message */ 3027 list_splice(&rxfer->replaced_transfers, 3028 rxfer->replaced_after); 3029 3030 /* free the spi_replace_transfer structure */ 3031 spi_res_free(rxfer); 3032 3033 /* and return with an error */ 3034 return ERR_PTR(-EINVAL); 3035 } 3036 3037 /* remove the entry after replaced_after from list of 3038 * transfers and add it to list of replaced_transfers 3039 */ 3040 list_move_tail(rxfer->replaced_after->next, 3041 &rxfer->replaced_transfers); 3042 } 3043 3044 /* create copy of the given xfer with identical settings 3045 * based on the first transfer to get removed 3046 */ 3047 for (i = 0; i < insert; i++) { 3048 /* we need to run in reverse order */ 3049 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 3050 3051 /* copy all spi_transfer data */ 3052 memcpy(xfer, xfer_first, sizeof(*xfer)); 3053 3054 /* add to list */ 3055 list_add(&xfer->transfer_list, rxfer->replaced_after); 3056 3057 /* clear cs_change and delay for all but the last */ 3058 if (i) { 3059 xfer->cs_change = false; 3060 xfer->delay_usecs = 0; 3061 xfer->delay.value = 0; 3062 } 3063 } 3064 3065 /* set up inserted */ 3066 rxfer->inserted = insert; 3067 3068 /* and register it with spi_res/spi_message */ 3069 spi_res_add(msg, rxfer); 3070 3071 return rxfer; 3072 } 3073 EXPORT_SYMBOL_GPL(spi_replace_transfers); 3074 3075 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3076 struct spi_message *msg, 3077 struct spi_transfer **xferp, 3078 size_t maxsize, 3079 gfp_t gfp) 3080 { 3081 struct spi_transfer *xfer = *xferp, *xfers; 3082 struct spi_replaced_transfers *srt; 3083 size_t offset; 3084 size_t count, i; 3085 3086 /* calculate how many we have to replace */ 3087 count = DIV_ROUND_UP(xfer->len, maxsize); 3088 3089 /* create replacement */ 3090 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 3091 if (IS_ERR(srt)) 3092 return PTR_ERR(srt); 3093 xfers = srt->inserted_transfers; 3094 3095 /* now handle each of those newly inserted spi_transfers 3096 * note that the replacements spi_transfers all are preset 3097 * to the same values as *xferp, so tx_buf, rx_buf and len 3098 * are all identical (as well as most others) 3099 * so we just have to fix up len and the pointers. 3100 * 3101 * this also includes support for the depreciated 3102 * spi_message.is_dma_mapped interface 3103 */ 3104 3105 /* the first transfer just needs the length modified, so we 3106 * run it outside the loop 3107 */ 3108 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 3109 3110 /* all the others need rx_buf/tx_buf also set */ 3111 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 3112 /* update rx_buf, tx_buf and dma */ 3113 if (xfers[i].rx_buf) 3114 xfers[i].rx_buf += offset; 3115 if (xfers[i].rx_dma) 3116 xfers[i].rx_dma += offset; 3117 if (xfers[i].tx_buf) 3118 xfers[i].tx_buf += offset; 3119 if (xfers[i].tx_dma) 3120 xfers[i].tx_dma += offset; 3121 3122 /* update length */ 3123 xfers[i].len = min(maxsize, xfers[i].len - offset); 3124 } 3125 3126 /* we set up xferp to the last entry we have inserted, 3127 * so that we skip those already split transfers 3128 */ 3129 *xferp = &xfers[count - 1]; 3130 3131 /* increment statistics counters */ 3132 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3133 transfers_split_maxsize); 3134 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 3135 transfers_split_maxsize); 3136 3137 return 0; 3138 } 3139 3140 /** 3141 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 3142 * when an individual transfer exceeds a 3143 * certain size 3144 * @ctlr: the @spi_controller for this transfer 3145 * @msg: the @spi_message to transform 3146 * @maxsize: the maximum when to apply this 3147 * @gfp: GFP allocation flags 3148 * 3149 * Return: status of transformation 3150 */ 3151 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 3152 struct spi_message *msg, 3153 size_t maxsize, 3154 gfp_t gfp) 3155 { 3156 struct spi_transfer *xfer; 3157 int ret; 3158 3159 /* iterate over the transfer_list, 3160 * but note that xfer is advanced to the last transfer inserted 3161 * to avoid checking sizes again unnecessarily (also xfer does 3162 * potentiall belong to a different list by the time the 3163 * replacement has happened 3164 */ 3165 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3166 if (xfer->len > maxsize) { 3167 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3168 maxsize, gfp); 3169 if (ret) 3170 return ret; 3171 } 3172 } 3173 3174 return 0; 3175 } 3176 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 3177 3178 /*-------------------------------------------------------------------------*/ 3179 3180 /* Core methods for SPI controller protocol drivers. Some of the 3181 * other core methods are currently defined as inline functions. 3182 */ 3183 3184 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 3185 u8 bits_per_word) 3186 { 3187 if (ctlr->bits_per_word_mask) { 3188 /* Only 32 bits fit in the mask */ 3189 if (bits_per_word > 32) 3190 return -EINVAL; 3191 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 3192 return -EINVAL; 3193 } 3194 3195 return 0; 3196 } 3197 3198 /** 3199 * spi_setup - setup SPI mode and clock rate 3200 * @spi: the device whose settings are being modified 3201 * Context: can sleep, and no requests are queued to the device 3202 * 3203 * SPI protocol drivers may need to update the transfer mode if the 3204 * device doesn't work with its default. They may likewise need 3205 * to update clock rates or word sizes from initial values. This function 3206 * changes those settings, and must be called from a context that can sleep. 3207 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 3208 * effect the next time the device is selected and data is transferred to 3209 * or from it. When this function returns, the spi device is deselected. 3210 * 3211 * Note that this call will fail if the protocol driver specifies an option 3212 * that the underlying controller or its driver does not support. For 3213 * example, not all hardware supports wire transfers using nine bit words, 3214 * LSB-first wire encoding, or active-high chipselects. 3215 * 3216 * Return: zero on success, else a negative error code. 3217 */ 3218 int spi_setup(struct spi_device *spi) 3219 { 3220 unsigned bad_bits, ugly_bits; 3221 int status; 3222 3223 /* check mode to prevent that DUAL and QUAD set at the same time 3224 */ 3225 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 3226 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 3227 dev_err(&spi->dev, 3228 "setup: can not select dual and quad at the same time\n"); 3229 return -EINVAL; 3230 } 3231 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 3232 */ 3233 if ((spi->mode & SPI_3WIRE) && (spi->mode & 3234 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3235 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 3236 return -EINVAL; 3237 /* help drivers fail *cleanly* when they need options 3238 * that aren't supported with their current controller 3239 * SPI_CS_WORD has a fallback software implementation, 3240 * so it is ignored here. 3241 */ 3242 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD); 3243 /* nothing prevents from working with active-high CS in case if it 3244 * is driven by GPIO. 3245 */ 3246 if (gpio_is_valid(spi->cs_gpio)) 3247 bad_bits &= ~SPI_CS_HIGH; 3248 ugly_bits = bad_bits & 3249 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3250 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 3251 if (ugly_bits) { 3252 dev_warn(&spi->dev, 3253 "setup: ignoring unsupported mode bits %x\n", 3254 ugly_bits); 3255 spi->mode &= ~ugly_bits; 3256 bad_bits &= ~ugly_bits; 3257 } 3258 if (bad_bits) { 3259 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 3260 bad_bits); 3261 return -EINVAL; 3262 } 3263 3264 if (!spi->bits_per_word) 3265 spi->bits_per_word = 8; 3266 3267 status = __spi_validate_bits_per_word(spi->controller, 3268 spi->bits_per_word); 3269 if (status) 3270 return status; 3271 3272 if (!spi->max_speed_hz) 3273 spi->max_speed_hz = spi->controller->max_speed_hz; 3274 3275 if (spi->controller->setup) 3276 status = spi->controller->setup(spi); 3277 3278 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { 3279 status = pm_runtime_get_sync(spi->controller->dev.parent); 3280 if (status < 0) { 3281 pm_runtime_put_noidle(spi->controller->dev.parent); 3282 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3283 status); 3284 return status; 3285 } 3286 3287 /* 3288 * We do not want to return positive value from pm_runtime_get, 3289 * there are many instances of devices calling spi_setup() and 3290 * checking for a non-zero return value instead of a negative 3291 * return value. 3292 */ 3293 status = 0; 3294 3295 spi_set_cs(spi, false); 3296 pm_runtime_mark_last_busy(spi->controller->dev.parent); 3297 pm_runtime_put_autosuspend(spi->controller->dev.parent); 3298 } else { 3299 spi_set_cs(spi, false); 3300 } 3301 3302 if (spi->rt && !spi->controller->rt) { 3303 spi->controller->rt = true; 3304 spi_set_thread_rt(spi->controller); 3305 } 3306 3307 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 3308 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 3309 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 3310 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 3311 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 3312 (spi->mode & SPI_LOOP) ? "loopback, " : "", 3313 spi->bits_per_word, spi->max_speed_hz, 3314 status); 3315 3316 return status; 3317 } 3318 EXPORT_SYMBOL_GPL(spi_setup); 3319 3320 /** 3321 * spi_set_cs_timing - configure CS setup, hold, and inactive delays 3322 * @spi: the device that requires specific CS timing configuration 3323 * @setup: CS setup time specified via @spi_delay 3324 * @hold: CS hold time specified via @spi_delay 3325 * @inactive: CS inactive delay between transfers specified via @spi_delay 3326 * 3327 * Return: zero on success, else a negative error code. 3328 */ 3329 int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup, 3330 struct spi_delay *hold, struct spi_delay *inactive) 3331 { 3332 size_t len; 3333 3334 if (spi->controller->set_cs_timing) 3335 return spi->controller->set_cs_timing(spi, setup, hold, 3336 inactive); 3337 3338 if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) || 3339 (hold && hold->unit == SPI_DELAY_UNIT_SCK) || 3340 (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) { 3341 dev_err(&spi->dev, 3342 "Clock-cycle delays for CS not supported in SW mode\n"); 3343 return -ENOTSUPP; 3344 } 3345 3346 len = sizeof(struct spi_delay); 3347 3348 /* copy delays to controller */ 3349 if (setup) 3350 memcpy(&spi->controller->cs_setup, setup, len); 3351 else 3352 memset(&spi->controller->cs_setup, 0, len); 3353 3354 if (hold) 3355 memcpy(&spi->controller->cs_hold, hold, len); 3356 else 3357 memset(&spi->controller->cs_hold, 0, len); 3358 3359 if (inactive) 3360 memcpy(&spi->controller->cs_inactive, inactive, len); 3361 else 3362 memset(&spi->controller->cs_inactive, 0, len); 3363 3364 return 0; 3365 } 3366 EXPORT_SYMBOL_GPL(spi_set_cs_timing); 3367 3368 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, 3369 struct spi_device *spi) 3370 { 3371 int delay1, delay2; 3372 3373 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); 3374 if (delay1 < 0) 3375 return delay1; 3376 3377 delay2 = spi_delay_to_ns(&spi->word_delay, xfer); 3378 if (delay2 < 0) 3379 return delay2; 3380 3381 if (delay1 < delay2) 3382 memcpy(&xfer->word_delay, &spi->word_delay, 3383 sizeof(xfer->word_delay)); 3384 3385 return 0; 3386 } 3387 3388 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 3389 { 3390 struct spi_controller *ctlr = spi->controller; 3391 struct spi_transfer *xfer; 3392 int w_size; 3393 3394 if (list_empty(&message->transfers)) 3395 return -EINVAL; 3396 3397 /* If an SPI controller does not support toggling the CS line on each 3398 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 3399 * for the CS line, we can emulate the CS-per-word hardware function by 3400 * splitting transfers into one-word transfers and ensuring that 3401 * cs_change is set for each transfer. 3402 */ 3403 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || 3404 spi->cs_gpiod || 3405 gpio_is_valid(spi->cs_gpio))) { 3406 size_t maxsize; 3407 int ret; 3408 3409 maxsize = (spi->bits_per_word + 7) / 8; 3410 3411 /* spi_split_transfers_maxsize() requires message->spi */ 3412 message->spi = spi; 3413 3414 ret = spi_split_transfers_maxsize(ctlr, message, maxsize, 3415 GFP_KERNEL); 3416 if (ret) 3417 return ret; 3418 3419 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3420 /* don't change cs_change on the last entry in the list */ 3421 if (list_is_last(&xfer->transfer_list, &message->transfers)) 3422 break; 3423 xfer->cs_change = 1; 3424 } 3425 } 3426 3427 /* Half-duplex links include original MicroWire, and ones with 3428 * only one data pin like SPI_3WIRE (switches direction) or where 3429 * either MOSI or MISO is missing. They can also be caused by 3430 * software limitations. 3431 */ 3432 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 3433 (spi->mode & SPI_3WIRE)) { 3434 unsigned flags = ctlr->flags; 3435 3436 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3437 if (xfer->rx_buf && xfer->tx_buf) 3438 return -EINVAL; 3439 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 3440 return -EINVAL; 3441 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 3442 return -EINVAL; 3443 } 3444 } 3445 3446 /** 3447 * Set transfer bits_per_word and max speed as spi device default if 3448 * it is not set for this transfer. 3449 * Set transfer tx_nbits and rx_nbits as single transfer default 3450 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 3451 * Ensure transfer word_delay is at least as long as that required by 3452 * device itself. 3453 */ 3454 message->frame_length = 0; 3455 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3456 xfer->effective_speed_hz = 0; 3457 message->frame_length += xfer->len; 3458 if (!xfer->bits_per_word) 3459 xfer->bits_per_word = spi->bits_per_word; 3460 3461 if (!xfer->speed_hz) 3462 xfer->speed_hz = spi->max_speed_hz; 3463 3464 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 3465 xfer->speed_hz = ctlr->max_speed_hz; 3466 3467 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 3468 return -EINVAL; 3469 3470 /* 3471 * SPI transfer length should be multiple of SPI word size 3472 * where SPI word size should be power-of-two multiple 3473 */ 3474 if (xfer->bits_per_word <= 8) 3475 w_size = 1; 3476 else if (xfer->bits_per_word <= 16) 3477 w_size = 2; 3478 else 3479 w_size = 4; 3480 3481 /* No partial transfers accepted */ 3482 if (xfer->len % w_size) 3483 return -EINVAL; 3484 3485 if (xfer->speed_hz && ctlr->min_speed_hz && 3486 xfer->speed_hz < ctlr->min_speed_hz) 3487 return -EINVAL; 3488 3489 if (xfer->tx_buf && !xfer->tx_nbits) 3490 xfer->tx_nbits = SPI_NBITS_SINGLE; 3491 if (xfer->rx_buf && !xfer->rx_nbits) 3492 xfer->rx_nbits = SPI_NBITS_SINGLE; 3493 /* check transfer tx/rx_nbits: 3494 * 1. check the value matches one of single, dual and quad 3495 * 2. check tx/rx_nbits match the mode in spi_device 3496 */ 3497 if (xfer->tx_buf) { 3498 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 3499 xfer->tx_nbits != SPI_NBITS_DUAL && 3500 xfer->tx_nbits != SPI_NBITS_QUAD) 3501 return -EINVAL; 3502 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 3503 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 3504 return -EINVAL; 3505 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 3506 !(spi->mode & SPI_TX_QUAD)) 3507 return -EINVAL; 3508 } 3509 /* check transfer rx_nbits */ 3510 if (xfer->rx_buf) { 3511 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 3512 xfer->rx_nbits != SPI_NBITS_DUAL && 3513 xfer->rx_nbits != SPI_NBITS_QUAD) 3514 return -EINVAL; 3515 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 3516 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 3517 return -EINVAL; 3518 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 3519 !(spi->mode & SPI_RX_QUAD)) 3520 return -EINVAL; 3521 } 3522 3523 if (_spi_xfer_word_delay_update(xfer, spi)) 3524 return -EINVAL; 3525 } 3526 3527 message->status = -EINPROGRESS; 3528 3529 return 0; 3530 } 3531 3532 static int __spi_async(struct spi_device *spi, struct spi_message *message) 3533 { 3534 struct spi_controller *ctlr = spi->controller; 3535 struct spi_transfer *xfer; 3536 3537 /* 3538 * Some controllers do not support doing regular SPI transfers. Return 3539 * ENOTSUPP when this is the case. 3540 */ 3541 if (!ctlr->transfer) 3542 return -ENOTSUPP; 3543 3544 message->spi = spi; 3545 3546 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); 3547 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 3548 3549 trace_spi_message_submit(message); 3550 3551 if (!ctlr->ptp_sts_supported) { 3552 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3553 xfer->ptp_sts_word_pre = 0; 3554 ptp_read_system_prets(xfer->ptp_sts); 3555 } 3556 } 3557 3558 return ctlr->transfer(spi, message); 3559 } 3560 3561 /** 3562 * spi_async - asynchronous SPI transfer 3563 * @spi: device with which data will be exchanged 3564 * @message: describes the data transfers, including completion callback 3565 * Context: any (irqs may be blocked, etc) 3566 * 3567 * This call may be used in_irq and other contexts which can't sleep, 3568 * as well as from task contexts which can sleep. 3569 * 3570 * The completion callback is invoked in a context which can't sleep. 3571 * Before that invocation, the value of message->status is undefined. 3572 * When the callback is issued, message->status holds either zero (to 3573 * indicate complete success) or a negative error code. After that 3574 * callback returns, the driver which issued the transfer request may 3575 * deallocate the associated memory; it's no longer in use by any SPI 3576 * core or controller driver code. 3577 * 3578 * Note that although all messages to a spi_device are handled in 3579 * FIFO order, messages may go to different devices in other orders. 3580 * Some device might be higher priority, or have various "hard" access 3581 * time requirements, for example. 3582 * 3583 * On detection of any fault during the transfer, processing of 3584 * the entire message is aborted, and the device is deselected. 3585 * Until returning from the associated message completion callback, 3586 * no other spi_message queued to that device will be processed. 3587 * (This rule applies equally to all the synchronous transfer calls, 3588 * which are wrappers around this core asynchronous primitive.) 3589 * 3590 * Return: zero on success, else a negative error code. 3591 */ 3592 int spi_async(struct spi_device *spi, struct spi_message *message) 3593 { 3594 struct spi_controller *ctlr = spi->controller; 3595 int ret; 3596 unsigned long flags; 3597 3598 ret = __spi_validate(spi, message); 3599 if (ret != 0) 3600 return ret; 3601 3602 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3603 3604 if (ctlr->bus_lock_flag) 3605 ret = -EBUSY; 3606 else 3607 ret = __spi_async(spi, message); 3608 3609 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3610 3611 return ret; 3612 } 3613 EXPORT_SYMBOL_GPL(spi_async); 3614 3615 /** 3616 * spi_async_locked - version of spi_async with exclusive bus usage 3617 * @spi: device with which data will be exchanged 3618 * @message: describes the data transfers, including completion callback 3619 * Context: any (irqs may be blocked, etc) 3620 * 3621 * This call may be used in_irq and other contexts which can't sleep, 3622 * as well as from task contexts which can sleep. 3623 * 3624 * The completion callback is invoked in a context which can't sleep. 3625 * Before that invocation, the value of message->status is undefined. 3626 * When the callback is issued, message->status holds either zero (to 3627 * indicate complete success) or a negative error code. After that 3628 * callback returns, the driver which issued the transfer request may 3629 * deallocate the associated memory; it's no longer in use by any SPI 3630 * core or controller driver code. 3631 * 3632 * Note that although all messages to a spi_device are handled in 3633 * FIFO order, messages may go to different devices in other orders. 3634 * Some device might be higher priority, or have various "hard" access 3635 * time requirements, for example. 3636 * 3637 * On detection of any fault during the transfer, processing of 3638 * the entire message is aborted, and the device is deselected. 3639 * Until returning from the associated message completion callback, 3640 * no other spi_message queued to that device will be processed. 3641 * (This rule applies equally to all the synchronous transfer calls, 3642 * which are wrappers around this core asynchronous primitive.) 3643 * 3644 * Return: zero on success, else a negative error code. 3645 */ 3646 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3647 { 3648 struct spi_controller *ctlr = spi->controller; 3649 int ret; 3650 unsigned long flags; 3651 3652 ret = __spi_validate(spi, message); 3653 if (ret != 0) 3654 return ret; 3655 3656 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3657 3658 ret = __spi_async(spi, message); 3659 3660 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3661 3662 return ret; 3663 3664 } 3665 EXPORT_SYMBOL_GPL(spi_async_locked); 3666 3667 /*-------------------------------------------------------------------------*/ 3668 3669 /* Utility methods for SPI protocol drivers, layered on 3670 * top of the core. Some other utility methods are defined as 3671 * inline functions. 3672 */ 3673 3674 static void spi_complete(void *arg) 3675 { 3676 complete(arg); 3677 } 3678 3679 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 3680 { 3681 DECLARE_COMPLETION_ONSTACK(done); 3682 int status; 3683 struct spi_controller *ctlr = spi->controller; 3684 unsigned long flags; 3685 3686 status = __spi_validate(spi, message); 3687 if (status != 0) 3688 return status; 3689 3690 message->complete = spi_complete; 3691 message->context = &done; 3692 message->spi = spi; 3693 3694 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); 3695 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 3696 3697 /* If we're not using the legacy transfer method then we will 3698 * try to transfer in the calling context so special case. 3699 * This code would be less tricky if we could remove the 3700 * support for driver implemented message queues. 3701 */ 3702 if (ctlr->transfer == spi_queued_transfer) { 3703 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3704 3705 trace_spi_message_submit(message); 3706 3707 status = __spi_queued_transfer(spi, message, false); 3708 3709 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3710 } else { 3711 status = spi_async_locked(spi, message); 3712 } 3713 3714 if (status == 0) { 3715 /* Push out the messages in the calling context if we 3716 * can. 3717 */ 3718 if (ctlr->transfer == spi_queued_transfer) { 3719 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3720 spi_sync_immediate); 3721 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 3722 spi_sync_immediate); 3723 __spi_pump_messages(ctlr, false); 3724 } 3725 3726 wait_for_completion(&done); 3727 status = message->status; 3728 } 3729 message->context = NULL; 3730 return status; 3731 } 3732 3733 /** 3734 * spi_sync - blocking/synchronous SPI data transfers 3735 * @spi: device with which data will be exchanged 3736 * @message: describes the data transfers 3737 * Context: can sleep 3738 * 3739 * This call may only be used from a context that may sleep. The sleep 3740 * is non-interruptible, and has no timeout. Low-overhead controller 3741 * drivers may DMA directly into and out of the message buffers. 3742 * 3743 * Note that the SPI device's chip select is active during the message, 3744 * and then is normally disabled between messages. Drivers for some 3745 * frequently-used devices may want to minimize costs of selecting a chip, 3746 * by leaving it selected in anticipation that the next message will go 3747 * to the same chip. (That may increase power usage.) 3748 * 3749 * Also, the caller is guaranteeing that the memory associated with the 3750 * message will not be freed before this call returns. 3751 * 3752 * Return: zero on success, else a negative error code. 3753 */ 3754 int spi_sync(struct spi_device *spi, struct spi_message *message) 3755 { 3756 int ret; 3757 3758 mutex_lock(&spi->controller->bus_lock_mutex); 3759 ret = __spi_sync(spi, message); 3760 mutex_unlock(&spi->controller->bus_lock_mutex); 3761 3762 return ret; 3763 } 3764 EXPORT_SYMBOL_GPL(spi_sync); 3765 3766 /** 3767 * spi_sync_locked - version of spi_sync with exclusive bus usage 3768 * @spi: device with which data will be exchanged 3769 * @message: describes the data transfers 3770 * Context: can sleep 3771 * 3772 * This call may only be used from a context that may sleep. The sleep 3773 * is non-interruptible, and has no timeout. Low-overhead controller 3774 * drivers may DMA directly into and out of the message buffers. 3775 * 3776 * This call should be used by drivers that require exclusive access to the 3777 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 3778 * be released by a spi_bus_unlock call when the exclusive access is over. 3779 * 3780 * Return: zero on success, else a negative error code. 3781 */ 3782 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 3783 { 3784 return __spi_sync(spi, message); 3785 } 3786 EXPORT_SYMBOL_GPL(spi_sync_locked); 3787 3788 /** 3789 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 3790 * @ctlr: SPI bus master that should be locked for exclusive bus access 3791 * Context: can sleep 3792 * 3793 * This call may only be used from a context that may sleep. The sleep 3794 * is non-interruptible, and has no timeout. 3795 * 3796 * This call should be used by drivers that require exclusive access to the 3797 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 3798 * exclusive access is over. Data transfer must be done by spi_sync_locked 3799 * and spi_async_locked calls when the SPI bus lock is held. 3800 * 3801 * Return: always zero. 3802 */ 3803 int spi_bus_lock(struct spi_controller *ctlr) 3804 { 3805 unsigned long flags; 3806 3807 mutex_lock(&ctlr->bus_lock_mutex); 3808 3809 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3810 ctlr->bus_lock_flag = 1; 3811 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3812 3813 /* mutex remains locked until spi_bus_unlock is called */ 3814 3815 return 0; 3816 } 3817 EXPORT_SYMBOL_GPL(spi_bus_lock); 3818 3819 /** 3820 * spi_bus_unlock - release the lock for exclusive SPI bus usage 3821 * @ctlr: SPI bus master that was locked for exclusive bus access 3822 * Context: can sleep 3823 * 3824 * This call may only be used from a context that may sleep. The sleep 3825 * is non-interruptible, and has no timeout. 3826 * 3827 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 3828 * call. 3829 * 3830 * Return: always zero. 3831 */ 3832 int spi_bus_unlock(struct spi_controller *ctlr) 3833 { 3834 ctlr->bus_lock_flag = 0; 3835 3836 mutex_unlock(&ctlr->bus_lock_mutex); 3837 3838 return 0; 3839 } 3840 EXPORT_SYMBOL_GPL(spi_bus_unlock); 3841 3842 /* portable code must never pass more than 32 bytes */ 3843 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 3844 3845 static u8 *buf; 3846 3847 /** 3848 * spi_write_then_read - SPI synchronous write followed by read 3849 * @spi: device with which data will be exchanged 3850 * @txbuf: data to be written (need not be dma-safe) 3851 * @n_tx: size of txbuf, in bytes 3852 * @rxbuf: buffer into which data will be read (need not be dma-safe) 3853 * @n_rx: size of rxbuf, in bytes 3854 * Context: can sleep 3855 * 3856 * This performs a half duplex MicroWire style transaction with the 3857 * device, sending txbuf and then reading rxbuf. The return value 3858 * is zero for success, else a negative errno status code. 3859 * This call may only be used from a context that may sleep. 3860 * 3861 * Parameters to this routine are always copied using a small buffer. 3862 * Performance-sensitive or bulk transfer code should instead use 3863 * spi_{async,sync}() calls with dma-safe buffers. 3864 * 3865 * Return: zero on success, else a negative error code. 3866 */ 3867 int spi_write_then_read(struct spi_device *spi, 3868 const void *txbuf, unsigned n_tx, 3869 void *rxbuf, unsigned n_rx) 3870 { 3871 static DEFINE_MUTEX(lock); 3872 3873 int status; 3874 struct spi_message message; 3875 struct spi_transfer x[2]; 3876 u8 *local_buf; 3877 3878 /* Use preallocated DMA-safe buffer if we can. We can't avoid 3879 * copying here, (as a pure convenience thing), but we can 3880 * keep heap costs out of the hot path unless someone else is 3881 * using the pre-allocated buffer or the transfer is too large. 3882 */ 3883 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 3884 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 3885 GFP_KERNEL | GFP_DMA); 3886 if (!local_buf) 3887 return -ENOMEM; 3888 } else { 3889 local_buf = buf; 3890 } 3891 3892 spi_message_init(&message); 3893 memset(x, 0, sizeof(x)); 3894 if (n_tx) { 3895 x[0].len = n_tx; 3896 spi_message_add_tail(&x[0], &message); 3897 } 3898 if (n_rx) { 3899 x[1].len = n_rx; 3900 spi_message_add_tail(&x[1], &message); 3901 } 3902 3903 memcpy(local_buf, txbuf, n_tx); 3904 x[0].tx_buf = local_buf; 3905 x[1].rx_buf = local_buf + n_tx; 3906 3907 /* do the i/o */ 3908 status = spi_sync(spi, &message); 3909 if (status == 0) 3910 memcpy(rxbuf, x[1].rx_buf, n_rx); 3911 3912 if (x[0].tx_buf == buf) 3913 mutex_unlock(&lock); 3914 else 3915 kfree(local_buf); 3916 3917 return status; 3918 } 3919 EXPORT_SYMBOL_GPL(spi_write_then_read); 3920 3921 /*-------------------------------------------------------------------------*/ 3922 3923 #if IS_ENABLED(CONFIG_OF) 3924 /* must call put_device() when done with returned spi_device device */ 3925 struct spi_device *of_find_spi_device_by_node(struct device_node *node) 3926 { 3927 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 3928 3929 return dev ? to_spi_device(dev) : NULL; 3930 } 3931 EXPORT_SYMBOL_GPL(of_find_spi_device_by_node); 3932 #endif /* IS_ENABLED(CONFIG_OF) */ 3933 3934 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 3935 /* the spi controllers are not using spi_bus, so we find it with another way */ 3936 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 3937 { 3938 struct device *dev; 3939 3940 dev = class_find_device_by_of_node(&spi_master_class, node); 3941 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 3942 dev = class_find_device_by_of_node(&spi_slave_class, node); 3943 if (!dev) 3944 return NULL; 3945 3946 /* reference got in class_find_device */ 3947 return container_of(dev, struct spi_controller, dev); 3948 } 3949 3950 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3951 void *arg) 3952 { 3953 struct of_reconfig_data *rd = arg; 3954 struct spi_controller *ctlr; 3955 struct spi_device *spi; 3956 3957 switch (of_reconfig_get_state_change(action, arg)) { 3958 case OF_RECONFIG_CHANGE_ADD: 3959 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 3960 if (ctlr == NULL) 3961 return NOTIFY_OK; /* not for us */ 3962 3963 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3964 put_device(&ctlr->dev); 3965 return NOTIFY_OK; 3966 } 3967 3968 spi = of_register_spi_device(ctlr, rd->dn); 3969 put_device(&ctlr->dev); 3970 3971 if (IS_ERR(spi)) { 3972 pr_err("%s: failed to create for '%pOF'\n", 3973 __func__, rd->dn); 3974 of_node_clear_flag(rd->dn, OF_POPULATED); 3975 return notifier_from_errno(PTR_ERR(spi)); 3976 } 3977 break; 3978 3979 case OF_RECONFIG_CHANGE_REMOVE: 3980 /* already depopulated? */ 3981 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 3982 return NOTIFY_OK; 3983 3984 /* find our device by node */ 3985 spi = of_find_spi_device_by_node(rd->dn); 3986 if (spi == NULL) 3987 return NOTIFY_OK; /* no? not meant for us */ 3988 3989 /* unregister takes one ref away */ 3990 spi_unregister_device(spi); 3991 3992 /* and put the reference of the find */ 3993 put_device(&spi->dev); 3994 break; 3995 } 3996 3997 return NOTIFY_OK; 3998 } 3999 4000 static struct notifier_block spi_of_notifier = { 4001 .notifier_call = of_spi_notify, 4002 }; 4003 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4004 extern struct notifier_block spi_of_notifier; 4005 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4006 4007 #if IS_ENABLED(CONFIG_ACPI) 4008 static int spi_acpi_controller_match(struct device *dev, const void *data) 4009 { 4010 return ACPI_COMPANION(dev->parent) == data; 4011 } 4012 4013 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 4014 { 4015 struct device *dev; 4016 4017 dev = class_find_device(&spi_master_class, NULL, adev, 4018 spi_acpi_controller_match); 4019 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4020 dev = class_find_device(&spi_slave_class, NULL, adev, 4021 spi_acpi_controller_match); 4022 if (!dev) 4023 return NULL; 4024 4025 return container_of(dev, struct spi_controller, dev); 4026 } 4027 4028 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 4029 { 4030 struct device *dev; 4031 4032 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 4033 return to_spi_device(dev); 4034 } 4035 4036 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 4037 void *arg) 4038 { 4039 struct acpi_device *adev = arg; 4040 struct spi_controller *ctlr; 4041 struct spi_device *spi; 4042 4043 switch (value) { 4044 case ACPI_RECONFIG_DEVICE_ADD: 4045 ctlr = acpi_spi_find_controller_by_adev(adev->parent); 4046 if (!ctlr) 4047 break; 4048 4049 acpi_register_spi_device(ctlr, adev); 4050 put_device(&ctlr->dev); 4051 break; 4052 case ACPI_RECONFIG_DEVICE_REMOVE: 4053 if (!acpi_device_enumerated(adev)) 4054 break; 4055 4056 spi = acpi_spi_find_device_by_adev(adev); 4057 if (!spi) 4058 break; 4059 4060 spi_unregister_device(spi); 4061 put_device(&spi->dev); 4062 break; 4063 } 4064 4065 return NOTIFY_OK; 4066 } 4067 4068 static struct notifier_block spi_acpi_notifier = { 4069 .notifier_call = acpi_spi_notify, 4070 }; 4071 #else 4072 extern struct notifier_block spi_acpi_notifier; 4073 #endif 4074 4075 static int __init spi_init(void) 4076 { 4077 int status; 4078 4079 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 4080 if (!buf) { 4081 status = -ENOMEM; 4082 goto err0; 4083 } 4084 4085 status = bus_register(&spi_bus_type); 4086 if (status < 0) 4087 goto err1; 4088 4089 status = class_register(&spi_master_class); 4090 if (status < 0) 4091 goto err2; 4092 4093 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 4094 status = class_register(&spi_slave_class); 4095 if (status < 0) 4096 goto err3; 4097 } 4098 4099 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 4100 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 4101 if (IS_ENABLED(CONFIG_ACPI)) 4102 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 4103 4104 return 0; 4105 4106 err3: 4107 class_unregister(&spi_master_class); 4108 err2: 4109 bus_unregister(&spi_bus_type); 4110 err1: 4111 kfree(buf); 4112 buf = NULL; 4113 err0: 4114 return status; 4115 } 4116 4117 /* board_info is normally registered in arch_initcall(), 4118 * but even essential drivers wait till later 4119 * 4120 * REVISIT only boardinfo really needs static linking. the rest (device and 4121 * driver registration) _could_ be dynamically linked (modular) ... costs 4122 * include needing to have boardinfo data structures be much more public. 4123 */ 4124 postcore_initcall(spi_init); 4125