1 // SPDX-License-Identifier: GPL-2.0-or-later 2 // SPI init/core code 3 // 4 // Copyright (C) 2005 David Brownell 5 // Copyright (C) 2008 Secret Lab Technologies Ltd. 6 7 #include <linux/kernel.h> 8 #include <linux/device.h> 9 #include <linux/init.h> 10 #include <linux/cache.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmaengine.h> 13 #include <linux/mutex.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/clk/clk-conf.h> 17 #include <linux/slab.h> 18 #include <linux/mod_devicetable.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi-mem.h> 21 #include <linux/of_gpio.h> 22 #include <linux/gpio/consumer.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/pm_domain.h> 25 #include <linux/property.h> 26 #include <linux/export.h> 27 #include <linux/sched/rt.h> 28 #include <uapi/linux/sched/types.h> 29 #include <linux/delay.h> 30 #include <linux/kthread.h> 31 #include <linux/ioport.h> 32 #include <linux/acpi.h> 33 #include <linux/highmem.h> 34 #include <linux/idr.h> 35 #include <linux/platform_data/x86/apple.h> 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/spi.h> 39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); 40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); 41 42 #include "internals.h" 43 44 static DEFINE_IDR(spi_master_idr); 45 46 static void spidev_release(struct device *dev) 47 { 48 struct spi_device *spi = to_spi_device(dev); 49 50 spi_controller_put(spi->controller); 51 kfree(spi->driver_override); 52 kfree(spi); 53 } 54 55 static ssize_t 56 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 57 { 58 const struct spi_device *spi = to_spi_device(dev); 59 int len; 60 61 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 62 if (len != -ENODEV) 63 return len; 64 65 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 66 } 67 static DEVICE_ATTR_RO(modalias); 68 69 static ssize_t driver_override_store(struct device *dev, 70 struct device_attribute *a, 71 const char *buf, size_t count) 72 { 73 struct spi_device *spi = to_spi_device(dev); 74 const char *end = memchr(buf, '\n', count); 75 const size_t len = end ? end - buf : count; 76 const char *driver_override, *old; 77 78 /* We need to keep extra room for a newline when displaying value */ 79 if (len >= (PAGE_SIZE - 1)) 80 return -EINVAL; 81 82 driver_override = kstrndup(buf, len, GFP_KERNEL); 83 if (!driver_override) 84 return -ENOMEM; 85 86 device_lock(dev); 87 old = spi->driver_override; 88 if (len) { 89 spi->driver_override = driver_override; 90 } else { 91 /* Empty string, disable driver override */ 92 spi->driver_override = NULL; 93 kfree(driver_override); 94 } 95 device_unlock(dev); 96 kfree(old); 97 98 return count; 99 } 100 101 static ssize_t driver_override_show(struct device *dev, 102 struct device_attribute *a, char *buf) 103 { 104 const struct spi_device *spi = to_spi_device(dev); 105 ssize_t len; 106 107 device_lock(dev); 108 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : ""); 109 device_unlock(dev); 110 return len; 111 } 112 static DEVICE_ATTR_RW(driver_override); 113 114 #define SPI_STATISTICS_ATTRS(field, file) \ 115 static ssize_t spi_controller_##field##_show(struct device *dev, \ 116 struct device_attribute *attr, \ 117 char *buf) \ 118 { \ 119 struct spi_controller *ctlr = container_of(dev, \ 120 struct spi_controller, dev); \ 121 return spi_statistics_##field##_show(&ctlr->statistics, buf); \ 122 } \ 123 static struct device_attribute dev_attr_spi_controller_##field = { \ 124 .attr = { .name = file, .mode = 0444 }, \ 125 .show = spi_controller_##field##_show, \ 126 }; \ 127 static ssize_t spi_device_##field##_show(struct device *dev, \ 128 struct device_attribute *attr, \ 129 char *buf) \ 130 { \ 131 struct spi_device *spi = to_spi_device(dev); \ 132 return spi_statistics_##field##_show(&spi->statistics, buf); \ 133 } \ 134 static struct device_attribute dev_attr_spi_device_##field = { \ 135 .attr = { .name = file, .mode = 0444 }, \ 136 .show = spi_device_##field##_show, \ 137 } 138 139 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 140 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 141 char *buf) \ 142 { \ 143 unsigned long flags; \ 144 ssize_t len; \ 145 spin_lock_irqsave(&stat->lock, flags); \ 146 len = sprintf(buf, format_string, stat->field); \ 147 spin_unlock_irqrestore(&stat->lock, flags); \ 148 return len; \ 149 } \ 150 SPI_STATISTICS_ATTRS(name, file) 151 152 #define SPI_STATISTICS_SHOW(field, format_string) \ 153 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 154 field, format_string) 155 156 SPI_STATISTICS_SHOW(messages, "%lu"); 157 SPI_STATISTICS_SHOW(transfers, "%lu"); 158 SPI_STATISTICS_SHOW(errors, "%lu"); 159 SPI_STATISTICS_SHOW(timedout, "%lu"); 160 161 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 162 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 163 SPI_STATISTICS_SHOW(spi_async, "%lu"); 164 165 SPI_STATISTICS_SHOW(bytes, "%llu"); 166 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 167 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 168 169 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 170 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 171 "transfer_bytes_histo_" number, \ 172 transfer_bytes_histo[index], "%lu") 173 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 174 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 175 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 176 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 190 191 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 192 193 static struct attribute *spi_dev_attrs[] = { 194 &dev_attr_modalias.attr, 195 &dev_attr_driver_override.attr, 196 NULL, 197 }; 198 199 static const struct attribute_group spi_dev_group = { 200 .attrs = spi_dev_attrs, 201 }; 202 203 static struct attribute *spi_device_statistics_attrs[] = { 204 &dev_attr_spi_device_messages.attr, 205 &dev_attr_spi_device_transfers.attr, 206 &dev_attr_spi_device_errors.attr, 207 &dev_attr_spi_device_timedout.attr, 208 &dev_attr_spi_device_spi_sync.attr, 209 &dev_attr_spi_device_spi_sync_immediate.attr, 210 &dev_attr_spi_device_spi_async.attr, 211 &dev_attr_spi_device_bytes.attr, 212 &dev_attr_spi_device_bytes_rx.attr, 213 &dev_attr_spi_device_bytes_tx.attr, 214 &dev_attr_spi_device_transfer_bytes_histo0.attr, 215 &dev_attr_spi_device_transfer_bytes_histo1.attr, 216 &dev_attr_spi_device_transfer_bytes_histo2.attr, 217 &dev_attr_spi_device_transfer_bytes_histo3.attr, 218 &dev_attr_spi_device_transfer_bytes_histo4.attr, 219 &dev_attr_spi_device_transfer_bytes_histo5.attr, 220 &dev_attr_spi_device_transfer_bytes_histo6.attr, 221 &dev_attr_spi_device_transfer_bytes_histo7.attr, 222 &dev_attr_spi_device_transfer_bytes_histo8.attr, 223 &dev_attr_spi_device_transfer_bytes_histo9.attr, 224 &dev_attr_spi_device_transfer_bytes_histo10.attr, 225 &dev_attr_spi_device_transfer_bytes_histo11.attr, 226 &dev_attr_spi_device_transfer_bytes_histo12.attr, 227 &dev_attr_spi_device_transfer_bytes_histo13.attr, 228 &dev_attr_spi_device_transfer_bytes_histo14.attr, 229 &dev_attr_spi_device_transfer_bytes_histo15.attr, 230 &dev_attr_spi_device_transfer_bytes_histo16.attr, 231 &dev_attr_spi_device_transfers_split_maxsize.attr, 232 NULL, 233 }; 234 235 static const struct attribute_group spi_device_statistics_group = { 236 .name = "statistics", 237 .attrs = spi_device_statistics_attrs, 238 }; 239 240 static const struct attribute_group *spi_dev_groups[] = { 241 &spi_dev_group, 242 &spi_device_statistics_group, 243 NULL, 244 }; 245 246 static struct attribute *spi_controller_statistics_attrs[] = { 247 &dev_attr_spi_controller_messages.attr, 248 &dev_attr_spi_controller_transfers.attr, 249 &dev_attr_spi_controller_errors.attr, 250 &dev_attr_spi_controller_timedout.attr, 251 &dev_attr_spi_controller_spi_sync.attr, 252 &dev_attr_spi_controller_spi_sync_immediate.attr, 253 &dev_attr_spi_controller_spi_async.attr, 254 &dev_attr_spi_controller_bytes.attr, 255 &dev_attr_spi_controller_bytes_rx.attr, 256 &dev_attr_spi_controller_bytes_tx.attr, 257 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 258 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 259 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 260 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 261 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 262 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 263 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 264 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 265 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 266 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 267 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 268 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 269 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 270 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 271 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 272 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 273 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 274 &dev_attr_spi_controller_transfers_split_maxsize.attr, 275 NULL, 276 }; 277 278 static const struct attribute_group spi_controller_statistics_group = { 279 .name = "statistics", 280 .attrs = spi_controller_statistics_attrs, 281 }; 282 283 static const struct attribute_group *spi_master_groups[] = { 284 &spi_controller_statistics_group, 285 NULL, 286 }; 287 288 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 289 struct spi_transfer *xfer, 290 struct spi_controller *ctlr) 291 { 292 unsigned long flags; 293 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 294 295 if (l2len < 0) 296 l2len = 0; 297 298 spin_lock_irqsave(&stats->lock, flags); 299 300 stats->transfers++; 301 stats->transfer_bytes_histo[l2len]++; 302 303 stats->bytes += xfer->len; 304 if ((xfer->tx_buf) && 305 (xfer->tx_buf != ctlr->dummy_tx)) 306 stats->bytes_tx += xfer->len; 307 if ((xfer->rx_buf) && 308 (xfer->rx_buf != ctlr->dummy_rx)) 309 stats->bytes_rx += xfer->len; 310 311 spin_unlock_irqrestore(&stats->lock, flags); 312 } 313 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 314 315 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 316 * and the sysfs version makes coldplug work too. 317 */ 318 319 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 320 const struct spi_device *sdev) 321 { 322 while (id->name[0]) { 323 if (!strcmp(sdev->modalias, id->name)) 324 return id; 325 id++; 326 } 327 return NULL; 328 } 329 330 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 331 { 332 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 333 334 return spi_match_id(sdrv->id_table, sdev); 335 } 336 EXPORT_SYMBOL_GPL(spi_get_device_id); 337 338 static int spi_match_device(struct device *dev, struct device_driver *drv) 339 { 340 const struct spi_device *spi = to_spi_device(dev); 341 const struct spi_driver *sdrv = to_spi_driver(drv); 342 343 /* Check override first, and if set, only use the named driver */ 344 if (spi->driver_override) 345 return strcmp(spi->driver_override, drv->name) == 0; 346 347 /* Attempt an OF style match */ 348 if (of_driver_match_device(dev, drv)) 349 return 1; 350 351 /* Then try ACPI */ 352 if (acpi_driver_match_device(dev, drv)) 353 return 1; 354 355 if (sdrv->id_table) 356 return !!spi_match_id(sdrv->id_table, spi); 357 358 return strcmp(spi->modalias, drv->name) == 0; 359 } 360 361 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 362 { 363 const struct spi_device *spi = to_spi_device(dev); 364 int rc; 365 366 rc = of_device_uevent_modalias(dev, env); 367 if (rc != -ENODEV) 368 return rc; 369 370 rc = acpi_device_uevent_modalias(dev, env); 371 if (rc != -ENODEV) 372 return rc; 373 374 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 375 } 376 377 static int spi_probe(struct device *dev) 378 { 379 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 380 struct spi_device *spi = to_spi_device(dev); 381 int ret; 382 383 ret = of_clk_set_defaults(dev->of_node, false); 384 if (ret) 385 return ret; 386 387 if (dev->of_node) { 388 spi->irq = of_irq_get(dev->of_node, 0); 389 if (spi->irq == -EPROBE_DEFER) 390 return -EPROBE_DEFER; 391 if (spi->irq < 0) 392 spi->irq = 0; 393 } 394 395 ret = dev_pm_domain_attach(dev, true); 396 if (ret) 397 return ret; 398 399 if (sdrv->probe) { 400 ret = sdrv->probe(spi); 401 if (ret) 402 dev_pm_domain_detach(dev, true); 403 } 404 405 return ret; 406 } 407 408 static int spi_remove(struct device *dev) 409 { 410 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 411 412 if (sdrv->remove) { 413 int ret; 414 415 ret = sdrv->remove(to_spi_device(dev)); 416 if (ret) 417 dev_warn(dev, 418 "Failed to unbind driver (%pe), ignoring\n", 419 ERR_PTR(ret)); 420 } 421 422 dev_pm_domain_detach(dev, true); 423 424 return 0; 425 } 426 427 static void spi_shutdown(struct device *dev) 428 { 429 if (dev->driver) { 430 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 431 432 if (sdrv->shutdown) 433 sdrv->shutdown(to_spi_device(dev)); 434 } 435 } 436 437 struct bus_type spi_bus_type = { 438 .name = "spi", 439 .dev_groups = spi_dev_groups, 440 .match = spi_match_device, 441 .uevent = spi_uevent, 442 .probe = spi_probe, 443 .remove = spi_remove, 444 .shutdown = spi_shutdown, 445 }; 446 EXPORT_SYMBOL_GPL(spi_bus_type); 447 448 /** 449 * __spi_register_driver - register a SPI driver 450 * @owner: owner module of the driver to register 451 * @sdrv: the driver to register 452 * Context: can sleep 453 * 454 * Return: zero on success, else a negative error code. 455 */ 456 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 457 { 458 sdrv->driver.owner = owner; 459 sdrv->driver.bus = &spi_bus_type; 460 return driver_register(&sdrv->driver); 461 } 462 EXPORT_SYMBOL_GPL(__spi_register_driver); 463 464 /*-------------------------------------------------------------------------*/ 465 466 /* SPI devices should normally not be created by SPI device drivers; that 467 * would make them board-specific. Similarly with SPI controller drivers. 468 * Device registration normally goes into like arch/.../mach.../board-YYY.c 469 * with other readonly (flashable) information about mainboard devices. 470 */ 471 472 struct boardinfo { 473 struct list_head list; 474 struct spi_board_info board_info; 475 }; 476 477 static LIST_HEAD(board_list); 478 static LIST_HEAD(spi_controller_list); 479 480 /* 481 * Used to protect add/del operation for board_info list and 482 * spi_controller list, and their matching process 483 * also used to protect object of type struct idr 484 */ 485 static DEFINE_MUTEX(board_lock); 486 487 /* 488 * Prevents addition of devices with same chip select and 489 * addition of devices below an unregistering controller. 490 */ 491 static DEFINE_MUTEX(spi_add_lock); 492 493 /** 494 * spi_alloc_device - Allocate a new SPI device 495 * @ctlr: Controller to which device is connected 496 * Context: can sleep 497 * 498 * Allows a driver to allocate and initialize a spi_device without 499 * registering it immediately. This allows a driver to directly 500 * fill the spi_device with device parameters before calling 501 * spi_add_device() on it. 502 * 503 * Caller is responsible to call spi_add_device() on the returned 504 * spi_device structure to add it to the SPI controller. If the caller 505 * needs to discard the spi_device without adding it, then it should 506 * call spi_dev_put() on it. 507 * 508 * Return: a pointer to the new device, or NULL. 509 */ 510 struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 511 { 512 struct spi_device *spi; 513 514 if (!spi_controller_get(ctlr)) 515 return NULL; 516 517 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 518 if (!spi) { 519 spi_controller_put(ctlr); 520 return NULL; 521 } 522 523 spi->master = spi->controller = ctlr; 524 spi->dev.parent = &ctlr->dev; 525 spi->dev.bus = &spi_bus_type; 526 spi->dev.release = spidev_release; 527 spi->cs_gpio = -ENOENT; 528 spi->mode = ctlr->buswidth_override_bits; 529 530 spin_lock_init(&spi->statistics.lock); 531 532 device_initialize(&spi->dev); 533 return spi; 534 } 535 EXPORT_SYMBOL_GPL(spi_alloc_device); 536 537 static void spi_dev_set_name(struct spi_device *spi) 538 { 539 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 540 541 if (adev) { 542 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 543 return; 544 } 545 546 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 547 spi->chip_select); 548 } 549 550 static int spi_dev_check(struct device *dev, void *data) 551 { 552 struct spi_device *spi = to_spi_device(dev); 553 struct spi_device *new_spi = data; 554 555 if (spi->controller == new_spi->controller && 556 spi->chip_select == new_spi->chip_select) 557 return -EBUSY; 558 return 0; 559 } 560 561 static void spi_cleanup(struct spi_device *spi) 562 { 563 if (spi->controller->cleanup) 564 spi->controller->cleanup(spi); 565 } 566 567 static int __spi_add_device(struct spi_device *spi) 568 { 569 struct spi_controller *ctlr = spi->controller; 570 struct device *dev = ctlr->dev.parent; 571 int status; 572 573 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 574 if (status) { 575 dev_err(dev, "chipselect %d already in use\n", 576 spi->chip_select); 577 return status; 578 } 579 580 /* Controller may unregister concurrently */ 581 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && 582 !device_is_registered(&ctlr->dev)) { 583 return -ENODEV; 584 } 585 586 /* Descriptors take precedence */ 587 if (ctlr->cs_gpiods) 588 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; 589 else if (ctlr->cs_gpios) 590 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; 591 592 /* Drivers may modify this initial i/o setup, but will 593 * normally rely on the device being setup. Devices 594 * using SPI_CS_HIGH can't coexist well otherwise... 595 */ 596 status = spi_setup(spi); 597 if (status < 0) { 598 dev_err(dev, "can't setup %s, status %d\n", 599 dev_name(&spi->dev), status); 600 return status; 601 } 602 603 /* Device may be bound to an active driver when this returns */ 604 status = device_add(&spi->dev); 605 if (status < 0) { 606 dev_err(dev, "can't add %s, status %d\n", 607 dev_name(&spi->dev), status); 608 spi_cleanup(spi); 609 } else { 610 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 611 } 612 613 return status; 614 } 615 616 /** 617 * spi_add_device - Add spi_device allocated with spi_alloc_device 618 * @spi: spi_device to register 619 * 620 * Companion function to spi_alloc_device. Devices allocated with 621 * spi_alloc_device can be added onto the spi bus with this function. 622 * 623 * Return: 0 on success; negative errno on failure 624 */ 625 int spi_add_device(struct spi_device *spi) 626 { 627 struct spi_controller *ctlr = spi->controller; 628 struct device *dev = ctlr->dev.parent; 629 int status; 630 631 /* Chipselects are numbered 0..max; validate. */ 632 if (spi->chip_select >= ctlr->num_chipselect) { 633 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 634 ctlr->num_chipselect); 635 return -EINVAL; 636 } 637 638 /* Set the bus ID string */ 639 spi_dev_set_name(spi); 640 641 /* We need to make sure there's no other device with this 642 * chipselect **BEFORE** we call setup(), else we'll trash 643 * its configuration. Lock against concurrent add() calls. 644 */ 645 mutex_lock(&spi_add_lock); 646 status = __spi_add_device(spi); 647 mutex_unlock(&spi_add_lock); 648 return status; 649 } 650 EXPORT_SYMBOL_GPL(spi_add_device); 651 652 static int spi_add_device_locked(struct spi_device *spi) 653 { 654 struct spi_controller *ctlr = spi->controller; 655 struct device *dev = ctlr->dev.parent; 656 657 /* Chipselects are numbered 0..max; validate. */ 658 if (spi->chip_select >= ctlr->num_chipselect) { 659 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 660 ctlr->num_chipselect); 661 return -EINVAL; 662 } 663 664 /* Set the bus ID string */ 665 spi_dev_set_name(spi); 666 667 WARN_ON(!mutex_is_locked(&spi_add_lock)); 668 return __spi_add_device(spi); 669 } 670 671 /** 672 * spi_new_device - instantiate one new SPI device 673 * @ctlr: Controller to which device is connected 674 * @chip: Describes the SPI device 675 * Context: can sleep 676 * 677 * On typical mainboards, this is purely internal; and it's not needed 678 * after board init creates the hard-wired devices. Some development 679 * platforms may not be able to use spi_register_board_info though, and 680 * this is exported so that for example a USB or parport based adapter 681 * driver could add devices (which it would learn about out-of-band). 682 * 683 * Return: the new device, or NULL. 684 */ 685 struct spi_device *spi_new_device(struct spi_controller *ctlr, 686 struct spi_board_info *chip) 687 { 688 struct spi_device *proxy; 689 int status; 690 691 /* NOTE: caller did any chip->bus_num checks necessary. 692 * 693 * Also, unless we change the return value convention to use 694 * error-or-pointer (not NULL-or-pointer), troubleshootability 695 * suggests syslogged diagnostics are best here (ugh). 696 */ 697 698 proxy = spi_alloc_device(ctlr); 699 if (!proxy) 700 return NULL; 701 702 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 703 704 proxy->chip_select = chip->chip_select; 705 proxy->max_speed_hz = chip->max_speed_hz; 706 proxy->mode = chip->mode; 707 proxy->irq = chip->irq; 708 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 709 proxy->dev.platform_data = (void *) chip->platform_data; 710 proxy->controller_data = chip->controller_data; 711 proxy->controller_state = NULL; 712 713 if (chip->swnode) { 714 status = device_add_software_node(&proxy->dev, chip->swnode); 715 if (status) { 716 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n", 717 chip->modalias, status); 718 goto err_dev_put; 719 } 720 } 721 722 status = spi_add_device(proxy); 723 if (status < 0) 724 goto err_dev_put; 725 726 return proxy; 727 728 err_dev_put: 729 device_remove_software_node(&proxy->dev); 730 spi_dev_put(proxy); 731 return NULL; 732 } 733 EXPORT_SYMBOL_GPL(spi_new_device); 734 735 /** 736 * spi_unregister_device - unregister a single SPI device 737 * @spi: spi_device to unregister 738 * 739 * Start making the passed SPI device vanish. Normally this would be handled 740 * by spi_unregister_controller(). 741 */ 742 void spi_unregister_device(struct spi_device *spi) 743 { 744 if (!spi) 745 return; 746 747 if (spi->dev.of_node) { 748 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 749 of_node_put(spi->dev.of_node); 750 } 751 if (ACPI_COMPANION(&spi->dev)) 752 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 753 device_remove_software_node(&spi->dev); 754 device_del(&spi->dev); 755 spi_cleanup(spi); 756 put_device(&spi->dev); 757 } 758 EXPORT_SYMBOL_GPL(spi_unregister_device); 759 760 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 761 struct spi_board_info *bi) 762 { 763 struct spi_device *dev; 764 765 if (ctlr->bus_num != bi->bus_num) 766 return; 767 768 dev = spi_new_device(ctlr, bi); 769 if (!dev) 770 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 771 bi->modalias); 772 } 773 774 /** 775 * spi_register_board_info - register SPI devices for a given board 776 * @info: array of chip descriptors 777 * @n: how many descriptors are provided 778 * Context: can sleep 779 * 780 * Board-specific early init code calls this (probably during arch_initcall) 781 * with segments of the SPI device table. Any device nodes are created later, 782 * after the relevant parent SPI controller (bus_num) is defined. We keep 783 * this table of devices forever, so that reloading a controller driver will 784 * not make Linux forget about these hard-wired devices. 785 * 786 * Other code can also call this, e.g. a particular add-on board might provide 787 * SPI devices through its expansion connector, so code initializing that board 788 * would naturally declare its SPI devices. 789 * 790 * The board info passed can safely be __initdata ... but be careful of 791 * any embedded pointers (platform_data, etc), they're copied as-is. 792 * 793 * Return: zero on success, else a negative error code. 794 */ 795 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 796 { 797 struct boardinfo *bi; 798 int i; 799 800 if (!n) 801 return 0; 802 803 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 804 if (!bi) 805 return -ENOMEM; 806 807 for (i = 0; i < n; i++, bi++, info++) { 808 struct spi_controller *ctlr; 809 810 memcpy(&bi->board_info, info, sizeof(*info)); 811 812 mutex_lock(&board_lock); 813 list_add_tail(&bi->list, &board_list); 814 list_for_each_entry(ctlr, &spi_controller_list, list) 815 spi_match_controller_to_boardinfo(ctlr, 816 &bi->board_info); 817 mutex_unlock(&board_lock); 818 } 819 820 return 0; 821 } 822 823 /*-------------------------------------------------------------------------*/ 824 825 static void spi_set_cs(struct spi_device *spi, bool enable, bool force) 826 { 827 bool activate = enable; 828 829 /* 830 * Avoid calling into the driver (or doing delays) if the chip select 831 * isn't actually changing from the last time this was called. 832 */ 833 if (!force && (spi->controller->last_cs_enable == enable) && 834 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) 835 return; 836 837 trace_spi_set_cs(spi, activate); 838 839 spi->controller->last_cs_enable = enable; 840 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; 841 842 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) || 843 !spi->controller->set_cs_timing) { 844 if (activate) 845 spi_delay_exec(&spi->controller->cs_setup, NULL); 846 else 847 spi_delay_exec(&spi->controller->cs_hold, NULL); 848 } 849 850 if (spi->mode & SPI_CS_HIGH) 851 enable = !enable; 852 853 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { 854 if (!(spi->mode & SPI_NO_CS)) { 855 if (spi->cs_gpiod) { 856 /* 857 * Historically ACPI has no means of the GPIO polarity and 858 * thus the SPISerialBus() resource defines it on the per-chip 859 * basis. In order to avoid a chain of negations, the GPIO 860 * polarity is considered being Active High. Even for the cases 861 * when _DSD() is involved (in the updated versions of ACPI) 862 * the GPIO CS polarity must be defined Active High to avoid 863 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH 864 * into account. 865 */ 866 if (has_acpi_companion(&spi->dev)) 867 gpiod_set_value_cansleep(spi->cs_gpiod, !enable); 868 else 869 /* Polarity handled by GPIO library */ 870 gpiod_set_value_cansleep(spi->cs_gpiod, activate); 871 } else { 872 /* 873 * invert the enable line, as active low is 874 * default for SPI. 875 */ 876 gpio_set_value_cansleep(spi->cs_gpio, !enable); 877 } 878 } 879 /* Some SPI masters need both GPIO CS & slave_select */ 880 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 881 spi->controller->set_cs) 882 spi->controller->set_cs(spi, !enable); 883 } else if (spi->controller->set_cs) { 884 spi->controller->set_cs(spi, !enable); 885 } 886 887 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) || 888 !spi->controller->set_cs_timing) { 889 if (!activate) 890 spi_delay_exec(&spi->controller->cs_inactive, NULL); 891 } 892 } 893 894 #ifdef CONFIG_HAS_DMA 895 int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 896 struct sg_table *sgt, void *buf, size_t len, 897 enum dma_data_direction dir) 898 { 899 const bool vmalloced_buf = is_vmalloc_addr(buf); 900 unsigned int max_seg_size = dma_get_max_seg_size(dev); 901 #ifdef CONFIG_HIGHMEM 902 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 903 (unsigned long)buf < (PKMAP_BASE + 904 (LAST_PKMAP * PAGE_SIZE))); 905 #else 906 const bool kmap_buf = false; 907 #endif 908 int desc_len; 909 int sgs; 910 struct page *vm_page; 911 struct scatterlist *sg; 912 void *sg_buf; 913 size_t min; 914 int i, ret; 915 916 if (vmalloced_buf || kmap_buf) { 917 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 918 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 919 } else if (virt_addr_valid(buf)) { 920 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); 921 sgs = DIV_ROUND_UP(len, desc_len); 922 } else { 923 return -EINVAL; 924 } 925 926 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 927 if (ret != 0) 928 return ret; 929 930 sg = &sgt->sgl[0]; 931 for (i = 0; i < sgs; i++) { 932 933 if (vmalloced_buf || kmap_buf) { 934 /* 935 * Next scatterlist entry size is the minimum between 936 * the desc_len and the remaining buffer length that 937 * fits in a page. 938 */ 939 min = min_t(size_t, desc_len, 940 min_t(size_t, len, 941 PAGE_SIZE - offset_in_page(buf))); 942 if (vmalloced_buf) 943 vm_page = vmalloc_to_page(buf); 944 else 945 vm_page = kmap_to_page(buf); 946 if (!vm_page) { 947 sg_free_table(sgt); 948 return -ENOMEM; 949 } 950 sg_set_page(sg, vm_page, 951 min, offset_in_page(buf)); 952 } else { 953 min = min_t(size_t, len, desc_len); 954 sg_buf = buf; 955 sg_set_buf(sg, sg_buf, min); 956 } 957 958 buf += min; 959 len -= min; 960 sg = sg_next(sg); 961 } 962 963 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 964 if (!ret) 965 ret = -ENOMEM; 966 if (ret < 0) { 967 sg_free_table(sgt); 968 return ret; 969 } 970 971 sgt->nents = ret; 972 973 return 0; 974 } 975 976 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 977 struct sg_table *sgt, enum dma_data_direction dir) 978 { 979 if (sgt->orig_nents) { 980 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 981 sg_free_table(sgt); 982 } 983 } 984 985 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 986 { 987 struct device *tx_dev, *rx_dev; 988 struct spi_transfer *xfer; 989 int ret; 990 991 if (!ctlr->can_dma) 992 return 0; 993 994 if (ctlr->dma_tx) 995 tx_dev = ctlr->dma_tx->device->dev; 996 else if (ctlr->dma_map_dev) 997 tx_dev = ctlr->dma_map_dev; 998 else 999 tx_dev = ctlr->dev.parent; 1000 1001 if (ctlr->dma_rx) 1002 rx_dev = ctlr->dma_rx->device->dev; 1003 else if (ctlr->dma_map_dev) 1004 rx_dev = ctlr->dma_map_dev; 1005 else 1006 rx_dev = ctlr->dev.parent; 1007 1008 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1009 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1010 continue; 1011 1012 if (xfer->tx_buf != NULL) { 1013 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, 1014 (void *)xfer->tx_buf, xfer->len, 1015 DMA_TO_DEVICE); 1016 if (ret != 0) 1017 return ret; 1018 } 1019 1020 if (xfer->rx_buf != NULL) { 1021 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, 1022 xfer->rx_buf, xfer->len, 1023 DMA_FROM_DEVICE); 1024 if (ret != 0) { 1025 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, 1026 DMA_TO_DEVICE); 1027 return ret; 1028 } 1029 } 1030 } 1031 1032 ctlr->cur_msg_mapped = true; 1033 1034 return 0; 1035 } 1036 1037 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 1038 { 1039 struct spi_transfer *xfer; 1040 struct device *tx_dev, *rx_dev; 1041 1042 if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 1043 return 0; 1044 1045 if (ctlr->dma_tx) 1046 tx_dev = ctlr->dma_tx->device->dev; 1047 else 1048 tx_dev = ctlr->dev.parent; 1049 1050 if (ctlr->dma_rx) 1051 rx_dev = ctlr->dma_rx->device->dev; 1052 else 1053 rx_dev = ctlr->dev.parent; 1054 1055 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1056 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1057 continue; 1058 1059 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1060 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1061 } 1062 1063 ctlr->cur_msg_mapped = false; 1064 1065 return 0; 1066 } 1067 #else /* !CONFIG_HAS_DMA */ 1068 static inline int __spi_map_msg(struct spi_controller *ctlr, 1069 struct spi_message *msg) 1070 { 1071 return 0; 1072 } 1073 1074 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 1075 struct spi_message *msg) 1076 { 1077 return 0; 1078 } 1079 #endif /* !CONFIG_HAS_DMA */ 1080 1081 static inline int spi_unmap_msg(struct spi_controller *ctlr, 1082 struct spi_message *msg) 1083 { 1084 struct spi_transfer *xfer; 1085 1086 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1087 /* 1088 * Restore the original value of tx_buf or rx_buf if they are 1089 * NULL. 1090 */ 1091 if (xfer->tx_buf == ctlr->dummy_tx) 1092 xfer->tx_buf = NULL; 1093 if (xfer->rx_buf == ctlr->dummy_rx) 1094 xfer->rx_buf = NULL; 1095 } 1096 1097 return __spi_unmap_msg(ctlr, msg); 1098 } 1099 1100 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1101 { 1102 struct spi_transfer *xfer; 1103 void *tmp; 1104 unsigned int max_tx, max_rx; 1105 1106 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) 1107 && !(msg->spi->mode & SPI_3WIRE)) { 1108 max_tx = 0; 1109 max_rx = 0; 1110 1111 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1112 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 1113 !xfer->tx_buf) 1114 max_tx = max(xfer->len, max_tx); 1115 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 1116 !xfer->rx_buf) 1117 max_rx = max(xfer->len, max_rx); 1118 } 1119 1120 if (max_tx) { 1121 tmp = krealloc(ctlr->dummy_tx, max_tx, 1122 GFP_KERNEL | GFP_DMA); 1123 if (!tmp) 1124 return -ENOMEM; 1125 ctlr->dummy_tx = tmp; 1126 memset(tmp, 0, max_tx); 1127 } 1128 1129 if (max_rx) { 1130 tmp = krealloc(ctlr->dummy_rx, max_rx, 1131 GFP_KERNEL | GFP_DMA); 1132 if (!tmp) 1133 return -ENOMEM; 1134 ctlr->dummy_rx = tmp; 1135 } 1136 1137 if (max_tx || max_rx) { 1138 list_for_each_entry(xfer, &msg->transfers, 1139 transfer_list) { 1140 if (!xfer->len) 1141 continue; 1142 if (!xfer->tx_buf) 1143 xfer->tx_buf = ctlr->dummy_tx; 1144 if (!xfer->rx_buf) 1145 xfer->rx_buf = ctlr->dummy_rx; 1146 } 1147 } 1148 } 1149 1150 return __spi_map_msg(ctlr, msg); 1151 } 1152 1153 static int spi_transfer_wait(struct spi_controller *ctlr, 1154 struct spi_message *msg, 1155 struct spi_transfer *xfer) 1156 { 1157 struct spi_statistics *statm = &ctlr->statistics; 1158 struct spi_statistics *stats = &msg->spi->statistics; 1159 u32 speed_hz = xfer->speed_hz; 1160 unsigned long long ms; 1161 1162 if (spi_controller_is_slave(ctlr)) { 1163 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1164 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1165 return -EINTR; 1166 } 1167 } else { 1168 if (!speed_hz) 1169 speed_hz = 100000; 1170 1171 /* 1172 * For each byte we wait for 8 cycles of the SPI clock. 1173 * Since speed is defined in Hz and we want milliseconds, 1174 * use respective multiplier, but before the division, 1175 * otherwise we may get 0 for short transfers. 1176 */ 1177 ms = 8LL * MSEC_PER_SEC * xfer->len; 1178 do_div(ms, speed_hz); 1179 1180 /* 1181 * Increase it twice and add 200 ms tolerance, use 1182 * predefined maximum in case of overflow. 1183 */ 1184 ms += ms + 200; 1185 if (ms > UINT_MAX) 1186 ms = UINT_MAX; 1187 1188 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1189 msecs_to_jiffies(ms)); 1190 1191 if (ms == 0) { 1192 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1193 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1194 dev_err(&msg->spi->dev, 1195 "SPI transfer timed out\n"); 1196 return -ETIMEDOUT; 1197 } 1198 } 1199 1200 return 0; 1201 } 1202 1203 static void _spi_transfer_delay_ns(u32 ns) 1204 { 1205 if (!ns) 1206 return; 1207 if (ns <= NSEC_PER_USEC) { 1208 ndelay(ns); 1209 } else { 1210 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 1211 1212 if (us <= 10) 1213 udelay(us); 1214 else 1215 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1216 } 1217 } 1218 1219 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) 1220 { 1221 u32 delay = _delay->value; 1222 u32 unit = _delay->unit; 1223 u32 hz; 1224 1225 if (!delay) 1226 return 0; 1227 1228 switch (unit) { 1229 case SPI_DELAY_UNIT_USECS: 1230 delay *= NSEC_PER_USEC; 1231 break; 1232 case SPI_DELAY_UNIT_NSECS: 1233 /* Nothing to do here */ 1234 break; 1235 case SPI_DELAY_UNIT_SCK: 1236 /* clock cycles need to be obtained from spi_transfer */ 1237 if (!xfer) 1238 return -EINVAL; 1239 /* 1240 * If there is unknown effective speed, approximate it 1241 * by underestimating with half of the requested hz. 1242 */ 1243 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; 1244 if (!hz) 1245 return -EINVAL; 1246 1247 /* Convert delay to nanoseconds */ 1248 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); 1249 break; 1250 default: 1251 return -EINVAL; 1252 } 1253 1254 return delay; 1255 } 1256 EXPORT_SYMBOL_GPL(spi_delay_to_ns); 1257 1258 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) 1259 { 1260 int delay; 1261 1262 might_sleep(); 1263 1264 if (!_delay) 1265 return -EINVAL; 1266 1267 delay = spi_delay_to_ns(_delay, xfer); 1268 if (delay < 0) 1269 return delay; 1270 1271 _spi_transfer_delay_ns(delay); 1272 1273 return 0; 1274 } 1275 EXPORT_SYMBOL_GPL(spi_delay_exec); 1276 1277 static void _spi_transfer_cs_change_delay(struct spi_message *msg, 1278 struct spi_transfer *xfer) 1279 { 1280 u32 default_delay_ns = 10 * NSEC_PER_USEC; 1281 u32 delay = xfer->cs_change_delay.value; 1282 u32 unit = xfer->cs_change_delay.unit; 1283 int ret; 1284 1285 /* return early on "fast" mode - for everything but USECS */ 1286 if (!delay) { 1287 if (unit == SPI_DELAY_UNIT_USECS) 1288 _spi_transfer_delay_ns(default_delay_ns); 1289 return; 1290 } 1291 1292 ret = spi_delay_exec(&xfer->cs_change_delay, xfer); 1293 if (ret) { 1294 dev_err_once(&msg->spi->dev, 1295 "Use of unsupported delay unit %i, using default of %luus\n", 1296 unit, default_delay_ns / NSEC_PER_USEC); 1297 _spi_transfer_delay_ns(default_delay_ns); 1298 } 1299 } 1300 1301 /* 1302 * spi_transfer_one_message - Default implementation of transfer_one_message() 1303 * 1304 * This is a standard implementation of transfer_one_message() for 1305 * drivers which implement a transfer_one() operation. It provides 1306 * standard handling of delays and chip select management. 1307 */ 1308 static int spi_transfer_one_message(struct spi_controller *ctlr, 1309 struct spi_message *msg) 1310 { 1311 struct spi_transfer *xfer; 1312 bool keep_cs = false; 1313 int ret = 0; 1314 struct spi_statistics *statm = &ctlr->statistics; 1315 struct spi_statistics *stats = &msg->spi->statistics; 1316 1317 spi_set_cs(msg->spi, true, false); 1318 1319 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1320 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1321 1322 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1323 trace_spi_transfer_start(msg, xfer); 1324 1325 spi_statistics_add_transfer_stats(statm, xfer, ctlr); 1326 spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1327 1328 if (!ctlr->ptp_sts_supported) { 1329 xfer->ptp_sts_word_pre = 0; 1330 ptp_read_system_prets(xfer->ptp_sts); 1331 } 1332 1333 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1334 reinit_completion(&ctlr->xfer_completion); 1335 1336 fallback_pio: 1337 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1338 if (ret < 0) { 1339 if (ctlr->cur_msg_mapped && 1340 (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1341 __spi_unmap_msg(ctlr, msg); 1342 ctlr->fallback = true; 1343 xfer->error &= ~SPI_TRANS_FAIL_NO_START; 1344 goto fallback_pio; 1345 } 1346 1347 SPI_STATISTICS_INCREMENT_FIELD(statm, 1348 errors); 1349 SPI_STATISTICS_INCREMENT_FIELD(stats, 1350 errors); 1351 dev_err(&msg->spi->dev, 1352 "SPI transfer failed: %d\n", ret); 1353 goto out; 1354 } 1355 1356 if (ret > 0) { 1357 ret = spi_transfer_wait(ctlr, msg, xfer); 1358 if (ret < 0) 1359 msg->status = ret; 1360 } 1361 } else { 1362 if (xfer->len) 1363 dev_err(&msg->spi->dev, 1364 "Bufferless transfer has length %u\n", 1365 xfer->len); 1366 } 1367 1368 if (!ctlr->ptp_sts_supported) { 1369 ptp_read_system_postts(xfer->ptp_sts); 1370 xfer->ptp_sts_word_post = xfer->len; 1371 } 1372 1373 trace_spi_transfer_stop(msg, xfer); 1374 1375 if (msg->status != -EINPROGRESS) 1376 goto out; 1377 1378 spi_transfer_delay_exec(xfer); 1379 1380 if (xfer->cs_change) { 1381 if (list_is_last(&xfer->transfer_list, 1382 &msg->transfers)) { 1383 keep_cs = true; 1384 } else { 1385 spi_set_cs(msg->spi, false, false); 1386 _spi_transfer_cs_change_delay(msg, xfer); 1387 spi_set_cs(msg->spi, true, false); 1388 } 1389 } 1390 1391 msg->actual_length += xfer->len; 1392 } 1393 1394 out: 1395 if (ret != 0 || !keep_cs) 1396 spi_set_cs(msg->spi, false, false); 1397 1398 if (msg->status == -EINPROGRESS) 1399 msg->status = ret; 1400 1401 if (msg->status && ctlr->handle_err) 1402 ctlr->handle_err(ctlr, msg); 1403 1404 spi_finalize_current_message(ctlr); 1405 1406 return ret; 1407 } 1408 1409 /** 1410 * spi_finalize_current_transfer - report completion of a transfer 1411 * @ctlr: the controller reporting completion 1412 * 1413 * Called by SPI drivers using the core transfer_one_message() 1414 * implementation to notify it that the current interrupt driven 1415 * transfer has finished and the next one may be scheduled. 1416 */ 1417 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1418 { 1419 complete(&ctlr->xfer_completion); 1420 } 1421 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1422 1423 static void spi_idle_runtime_pm(struct spi_controller *ctlr) 1424 { 1425 if (ctlr->auto_runtime_pm) { 1426 pm_runtime_mark_last_busy(ctlr->dev.parent); 1427 pm_runtime_put_autosuspend(ctlr->dev.parent); 1428 } 1429 } 1430 1431 /** 1432 * __spi_pump_messages - function which processes spi message queue 1433 * @ctlr: controller to process queue for 1434 * @in_kthread: true if we are in the context of the message pump thread 1435 * 1436 * This function checks if there is any spi message in the queue that 1437 * needs processing and if so call out to the driver to initialize hardware 1438 * and transfer each message. 1439 * 1440 * Note that it is called both from the kthread itself and also from 1441 * inside spi_sync(); the queue extraction handling at the top of the 1442 * function should deal with this safely. 1443 */ 1444 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1445 { 1446 struct spi_transfer *xfer; 1447 struct spi_message *msg; 1448 bool was_busy = false; 1449 unsigned long flags; 1450 int ret; 1451 1452 /* Lock queue */ 1453 spin_lock_irqsave(&ctlr->queue_lock, flags); 1454 1455 /* Make sure we are not already running a message */ 1456 if (ctlr->cur_msg) { 1457 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1458 return; 1459 } 1460 1461 /* If another context is idling the device then defer */ 1462 if (ctlr->idling) { 1463 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1464 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1465 return; 1466 } 1467 1468 /* Check if the queue is idle */ 1469 if (list_empty(&ctlr->queue) || !ctlr->running) { 1470 if (!ctlr->busy) { 1471 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1472 return; 1473 } 1474 1475 /* Defer any non-atomic teardown to the thread */ 1476 if (!in_kthread) { 1477 if (!ctlr->dummy_rx && !ctlr->dummy_tx && 1478 !ctlr->unprepare_transfer_hardware) { 1479 spi_idle_runtime_pm(ctlr); 1480 ctlr->busy = false; 1481 trace_spi_controller_idle(ctlr); 1482 } else { 1483 kthread_queue_work(ctlr->kworker, 1484 &ctlr->pump_messages); 1485 } 1486 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1487 return; 1488 } 1489 1490 ctlr->busy = false; 1491 ctlr->idling = true; 1492 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1493 1494 kfree(ctlr->dummy_rx); 1495 ctlr->dummy_rx = NULL; 1496 kfree(ctlr->dummy_tx); 1497 ctlr->dummy_tx = NULL; 1498 if (ctlr->unprepare_transfer_hardware && 1499 ctlr->unprepare_transfer_hardware(ctlr)) 1500 dev_err(&ctlr->dev, 1501 "failed to unprepare transfer hardware\n"); 1502 spi_idle_runtime_pm(ctlr); 1503 trace_spi_controller_idle(ctlr); 1504 1505 spin_lock_irqsave(&ctlr->queue_lock, flags); 1506 ctlr->idling = false; 1507 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1508 return; 1509 } 1510 1511 /* Extract head of queue */ 1512 msg = list_first_entry(&ctlr->queue, struct spi_message, queue); 1513 ctlr->cur_msg = msg; 1514 1515 list_del_init(&msg->queue); 1516 if (ctlr->busy) 1517 was_busy = true; 1518 else 1519 ctlr->busy = true; 1520 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1521 1522 mutex_lock(&ctlr->io_mutex); 1523 1524 if (!was_busy && ctlr->auto_runtime_pm) { 1525 ret = pm_runtime_get_sync(ctlr->dev.parent); 1526 if (ret < 0) { 1527 pm_runtime_put_noidle(ctlr->dev.parent); 1528 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1529 ret); 1530 mutex_unlock(&ctlr->io_mutex); 1531 return; 1532 } 1533 } 1534 1535 if (!was_busy) 1536 trace_spi_controller_busy(ctlr); 1537 1538 if (!was_busy && ctlr->prepare_transfer_hardware) { 1539 ret = ctlr->prepare_transfer_hardware(ctlr); 1540 if (ret) { 1541 dev_err(&ctlr->dev, 1542 "failed to prepare transfer hardware: %d\n", 1543 ret); 1544 1545 if (ctlr->auto_runtime_pm) 1546 pm_runtime_put(ctlr->dev.parent); 1547 1548 msg->status = ret; 1549 spi_finalize_current_message(ctlr); 1550 1551 mutex_unlock(&ctlr->io_mutex); 1552 return; 1553 } 1554 } 1555 1556 trace_spi_message_start(msg); 1557 1558 if (ctlr->prepare_message) { 1559 ret = ctlr->prepare_message(ctlr, msg); 1560 if (ret) { 1561 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1562 ret); 1563 msg->status = ret; 1564 spi_finalize_current_message(ctlr); 1565 goto out; 1566 } 1567 ctlr->cur_msg_prepared = true; 1568 } 1569 1570 ret = spi_map_msg(ctlr, msg); 1571 if (ret) { 1572 msg->status = ret; 1573 spi_finalize_current_message(ctlr); 1574 goto out; 1575 } 1576 1577 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1578 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1579 xfer->ptp_sts_word_pre = 0; 1580 ptp_read_system_prets(xfer->ptp_sts); 1581 } 1582 } 1583 1584 ret = ctlr->transfer_one_message(ctlr, msg); 1585 if (ret) { 1586 dev_err(&ctlr->dev, 1587 "failed to transfer one message from queue\n"); 1588 goto out; 1589 } 1590 1591 out: 1592 mutex_unlock(&ctlr->io_mutex); 1593 1594 /* Prod the scheduler in case transfer_one() was busy waiting */ 1595 if (!ret) 1596 cond_resched(); 1597 } 1598 1599 /** 1600 * spi_pump_messages - kthread work function which processes spi message queue 1601 * @work: pointer to kthread work struct contained in the controller struct 1602 */ 1603 static void spi_pump_messages(struct kthread_work *work) 1604 { 1605 struct spi_controller *ctlr = 1606 container_of(work, struct spi_controller, pump_messages); 1607 1608 __spi_pump_messages(ctlr, true); 1609 } 1610 1611 /** 1612 * spi_take_timestamp_pre - helper for drivers to collect the beginning of the 1613 * TX timestamp for the requested byte from the SPI 1614 * transfer. The frequency with which this function 1615 * must be called (once per word, once for the whole 1616 * transfer, once per batch of words etc) is arbitrary 1617 * as long as the @tx buffer offset is greater than or 1618 * equal to the requested byte at the time of the 1619 * call. The timestamp is only taken once, at the 1620 * first such call. It is assumed that the driver 1621 * advances its @tx buffer pointer monotonically. 1622 * @ctlr: Pointer to the spi_controller structure of the driver 1623 * @xfer: Pointer to the transfer being timestamped 1624 * @progress: How many words (not bytes) have been transferred so far 1625 * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1626 * transfer, for less jitter in time measurement. Only compatible 1627 * with PIO drivers. If true, must follow up with 1628 * spi_take_timestamp_post or otherwise system will crash. 1629 * WARNING: for fully predictable results, the CPU frequency must 1630 * also be under control (governor). 1631 */ 1632 void spi_take_timestamp_pre(struct spi_controller *ctlr, 1633 struct spi_transfer *xfer, 1634 size_t progress, bool irqs_off) 1635 { 1636 if (!xfer->ptp_sts) 1637 return; 1638 1639 if (xfer->timestamped) 1640 return; 1641 1642 if (progress > xfer->ptp_sts_word_pre) 1643 return; 1644 1645 /* Capture the resolution of the timestamp */ 1646 xfer->ptp_sts_word_pre = progress; 1647 1648 if (irqs_off) { 1649 local_irq_save(ctlr->irq_flags); 1650 preempt_disable(); 1651 } 1652 1653 ptp_read_system_prets(xfer->ptp_sts); 1654 } 1655 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); 1656 1657 /** 1658 * spi_take_timestamp_post - helper for drivers to collect the end of the 1659 * TX timestamp for the requested byte from the SPI 1660 * transfer. Can be called with an arbitrary 1661 * frequency: only the first call where @tx exceeds 1662 * or is equal to the requested word will be 1663 * timestamped. 1664 * @ctlr: Pointer to the spi_controller structure of the driver 1665 * @xfer: Pointer to the transfer being timestamped 1666 * @progress: How many words (not bytes) have been transferred so far 1667 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 1668 */ 1669 void spi_take_timestamp_post(struct spi_controller *ctlr, 1670 struct spi_transfer *xfer, 1671 size_t progress, bool irqs_off) 1672 { 1673 if (!xfer->ptp_sts) 1674 return; 1675 1676 if (xfer->timestamped) 1677 return; 1678 1679 if (progress < xfer->ptp_sts_word_post) 1680 return; 1681 1682 ptp_read_system_postts(xfer->ptp_sts); 1683 1684 if (irqs_off) { 1685 local_irq_restore(ctlr->irq_flags); 1686 preempt_enable(); 1687 } 1688 1689 /* Capture the resolution of the timestamp */ 1690 xfer->ptp_sts_word_post = progress; 1691 1692 xfer->timestamped = true; 1693 } 1694 EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 1695 1696 /** 1697 * spi_set_thread_rt - set the controller to pump at realtime priority 1698 * @ctlr: controller to boost priority of 1699 * 1700 * This can be called because the controller requested realtime priority 1701 * (by setting the ->rt value before calling spi_register_controller()) or 1702 * because a device on the bus said that its transfers needed realtime 1703 * priority. 1704 * 1705 * NOTE: at the moment if any device on a bus says it needs realtime then 1706 * the thread will be at realtime priority for all transfers on that 1707 * controller. If this eventually becomes a problem we may see if we can 1708 * find a way to boost the priority only temporarily during relevant 1709 * transfers. 1710 */ 1711 static void spi_set_thread_rt(struct spi_controller *ctlr) 1712 { 1713 dev_info(&ctlr->dev, 1714 "will run message pump with realtime priority\n"); 1715 sched_set_fifo(ctlr->kworker->task); 1716 } 1717 1718 static int spi_init_queue(struct spi_controller *ctlr) 1719 { 1720 ctlr->running = false; 1721 ctlr->busy = false; 1722 1723 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); 1724 if (IS_ERR(ctlr->kworker)) { 1725 dev_err(&ctlr->dev, "failed to create message pump kworker\n"); 1726 return PTR_ERR(ctlr->kworker); 1727 } 1728 1729 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1730 1731 /* 1732 * Controller config will indicate if this controller should run the 1733 * message pump with high (realtime) priority to reduce the transfer 1734 * latency on the bus by minimising the delay between a transfer 1735 * request and the scheduling of the message pump thread. Without this 1736 * setting the message pump thread will remain at default priority. 1737 */ 1738 if (ctlr->rt) 1739 spi_set_thread_rt(ctlr); 1740 1741 return 0; 1742 } 1743 1744 /** 1745 * spi_get_next_queued_message() - called by driver to check for queued 1746 * messages 1747 * @ctlr: the controller to check for queued messages 1748 * 1749 * If there are more messages in the queue, the next message is returned from 1750 * this call. 1751 * 1752 * Return: the next message in the queue, else NULL if the queue is empty. 1753 */ 1754 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1755 { 1756 struct spi_message *next; 1757 unsigned long flags; 1758 1759 /* get a pointer to the next message, if any */ 1760 spin_lock_irqsave(&ctlr->queue_lock, flags); 1761 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 1762 queue); 1763 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1764 1765 return next; 1766 } 1767 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1768 1769 /** 1770 * spi_finalize_current_message() - the current message is complete 1771 * @ctlr: the controller to return the message to 1772 * 1773 * Called by the driver to notify the core that the message in the front of the 1774 * queue is complete and can be removed from the queue. 1775 */ 1776 void spi_finalize_current_message(struct spi_controller *ctlr) 1777 { 1778 struct spi_transfer *xfer; 1779 struct spi_message *mesg; 1780 unsigned long flags; 1781 int ret; 1782 1783 spin_lock_irqsave(&ctlr->queue_lock, flags); 1784 mesg = ctlr->cur_msg; 1785 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1786 1787 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1788 list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 1789 ptp_read_system_postts(xfer->ptp_sts); 1790 xfer->ptp_sts_word_post = xfer->len; 1791 } 1792 } 1793 1794 if (unlikely(ctlr->ptp_sts_supported)) 1795 list_for_each_entry(xfer, &mesg->transfers, transfer_list) 1796 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 1797 1798 spi_unmap_msg(ctlr, mesg); 1799 1800 /* In the prepare_messages callback the spi bus has the opportunity to 1801 * split a transfer to smaller chunks. 1802 * Release splited transfers here since spi_map_msg is done on the 1803 * splited transfers. 1804 */ 1805 spi_res_release(ctlr, mesg); 1806 1807 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { 1808 ret = ctlr->unprepare_message(ctlr, mesg); 1809 if (ret) { 1810 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 1811 ret); 1812 } 1813 } 1814 1815 spin_lock_irqsave(&ctlr->queue_lock, flags); 1816 ctlr->cur_msg = NULL; 1817 ctlr->cur_msg_prepared = false; 1818 ctlr->fallback = false; 1819 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1820 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1821 1822 trace_spi_message_done(mesg); 1823 1824 mesg->state = NULL; 1825 if (mesg->complete) 1826 mesg->complete(mesg->context); 1827 } 1828 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1829 1830 static int spi_start_queue(struct spi_controller *ctlr) 1831 { 1832 unsigned long flags; 1833 1834 spin_lock_irqsave(&ctlr->queue_lock, flags); 1835 1836 if (ctlr->running || ctlr->busy) { 1837 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1838 return -EBUSY; 1839 } 1840 1841 ctlr->running = true; 1842 ctlr->cur_msg = NULL; 1843 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1844 1845 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1846 1847 return 0; 1848 } 1849 1850 static int spi_stop_queue(struct spi_controller *ctlr) 1851 { 1852 unsigned long flags; 1853 unsigned limit = 500; 1854 int ret = 0; 1855 1856 spin_lock_irqsave(&ctlr->queue_lock, flags); 1857 1858 /* 1859 * This is a bit lame, but is optimized for the common execution path. 1860 * A wait_queue on the ctlr->busy could be used, but then the common 1861 * execution path (pump_messages) would be required to call wake_up or 1862 * friends on every SPI message. Do this instead. 1863 */ 1864 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 1865 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1866 usleep_range(10000, 11000); 1867 spin_lock_irqsave(&ctlr->queue_lock, flags); 1868 } 1869 1870 if (!list_empty(&ctlr->queue) || ctlr->busy) 1871 ret = -EBUSY; 1872 else 1873 ctlr->running = false; 1874 1875 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1876 1877 if (ret) { 1878 dev_warn(&ctlr->dev, "could not stop message queue\n"); 1879 return ret; 1880 } 1881 return ret; 1882 } 1883 1884 static int spi_destroy_queue(struct spi_controller *ctlr) 1885 { 1886 int ret; 1887 1888 ret = spi_stop_queue(ctlr); 1889 1890 /* 1891 * kthread_flush_worker will block until all work is done. 1892 * If the reason that stop_queue timed out is that the work will never 1893 * finish, then it does no good to call flush/stop thread, so 1894 * return anyway. 1895 */ 1896 if (ret) { 1897 dev_err(&ctlr->dev, "problem destroying queue\n"); 1898 return ret; 1899 } 1900 1901 kthread_destroy_worker(ctlr->kworker); 1902 1903 return 0; 1904 } 1905 1906 static int __spi_queued_transfer(struct spi_device *spi, 1907 struct spi_message *msg, 1908 bool need_pump) 1909 { 1910 struct spi_controller *ctlr = spi->controller; 1911 unsigned long flags; 1912 1913 spin_lock_irqsave(&ctlr->queue_lock, flags); 1914 1915 if (!ctlr->running) { 1916 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1917 return -ESHUTDOWN; 1918 } 1919 msg->actual_length = 0; 1920 msg->status = -EINPROGRESS; 1921 1922 list_add_tail(&msg->queue, &ctlr->queue); 1923 if (!ctlr->busy && need_pump) 1924 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1925 1926 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1927 return 0; 1928 } 1929 1930 /** 1931 * spi_queued_transfer - transfer function for queued transfers 1932 * @spi: spi device which is requesting transfer 1933 * @msg: spi message which is to handled is queued to driver queue 1934 * 1935 * Return: zero on success, else a negative error code. 1936 */ 1937 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1938 { 1939 return __spi_queued_transfer(spi, msg, true); 1940 } 1941 1942 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 1943 { 1944 int ret; 1945 1946 ctlr->transfer = spi_queued_transfer; 1947 if (!ctlr->transfer_one_message) 1948 ctlr->transfer_one_message = spi_transfer_one_message; 1949 1950 /* Initialize and start queue */ 1951 ret = spi_init_queue(ctlr); 1952 if (ret) { 1953 dev_err(&ctlr->dev, "problem initializing queue\n"); 1954 goto err_init_queue; 1955 } 1956 ctlr->queued = true; 1957 ret = spi_start_queue(ctlr); 1958 if (ret) { 1959 dev_err(&ctlr->dev, "problem starting queue\n"); 1960 goto err_start_queue; 1961 } 1962 1963 return 0; 1964 1965 err_start_queue: 1966 spi_destroy_queue(ctlr); 1967 err_init_queue: 1968 return ret; 1969 } 1970 1971 /** 1972 * spi_flush_queue - Send all pending messages in the queue from the callers' 1973 * context 1974 * @ctlr: controller to process queue for 1975 * 1976 * This should be used when one wants to ensure all pending messages have been 1977 * sent before doing something. Is used by the spi-mem code to make sure SPI 1978 * memory operations do not preempt regular SPI transfers that have been queued 1979 * before the spi-mem operation. 1980 */ 1981 void spi_flush_queue(struct spi_controller *ctlr) 1982 { 1983 if (ctlr->transfer == spi_queued_transfer) 1984 __spi_pump_messages(ctlr, false); 1985 } 1986 1987 /*-------------------------------------------------------------------------*/ 1988 1989 #if defined(CONFIG_OF) 1990 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 1991 struct device_node *nc) 1992 { 1993 u32 value; 1994 int rc; 1995 1996 /* Mode (clock phase/polarity/etc.) */ 1997 if (of_property_read_bool(nc, "spi-cpha")) 1998 spi->mode |= SPI_CPHA; 1999 if (of_property_read_bool(nc, "spi-cpol")) 2000 spi->mode |= SPI_CPOL; 2001 if (of_property_read_bool(nc, "spi-3wire")) 2002 spi->mode |= SPI_3WIRE; 2003 if (of_property_read_bool(nc, "spi-lsb-first")) 2004 spi->mode |= SPI_LSB_FIRST; 2005 if (of_property_read_bool(nc, "spi-cs-high")) 2006 spi->mode |= SPI_CS_HIGH; 2007 2008 /* Device DUAL/QUAD mode */ 2009 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 2010 switch (value) { 2011 case 0: 2012 spi->mode |= SPI_NO_TX; 2013 break; 2014 case 1: 2015 break; 2016 case 2: 2017 spi->mode |= SPI_TX_DUAL; 2018 break; 2019 case 4: 2020 spi->mode |= SPI_TX_QUAD; 2021 break; 2022 case 8: 2023 spi->mode |= SPI_TX_OCTAL; 2024 break; 2025 default: 2026 dev_warn(&ctlr->dev, 2027 "spi-tx-bus-width %d not supported\n", 2028 value); 2029 break; 2030 } 2031 } 2032 2033 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 2034 switch (value) { 2035 case 0: 2036 spi->mode |= SPI_NO_RX; 2037 break; 2038 case 1: 2039 break; 2040 case 2: 2041 spi->mode |= SPI_RX_DUAL; 2042 break; 2043 case 4: 2044 spi->mode |= SPI_RX_QUAD; 2045 break; 2046 case 8: 2047 spi->mode |= SPI_RX_OCTAL; 2048 break; 2049 default: 2050 dev_warn(&ctlr->dev, 2051 "spi-rx-bus-width %d not supported\n", 2052 value); 2053 break; 2054 } 2055 } 2056 2057 if (spi_controller_is_slave(ctlr)) { 2058 if (!of_node_name_eq(nc, "slave")) { 2059 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 2060 nc); 2061 return -EINVAL; 2062 } 2063 return 0; 2064 } 2065 2066 /* Device address */ 2067 rc = of_property_read_u32(nc, "reg", &value); 2068 if (rc) { 2069 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 2070 nc, rc); 2071 return rc; 2072 } 2073 spi->chip_select = value; 2074 2075 /* Device speed */ 2076 if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 2077 spi->max_speed_hz = value; 2078 2079 return 0; 2080 } 2081 2082 static struct spi_device * 2083 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 2084 { 2085 struct spi_device *spi; 2086 int rc; 2087 2088 /* Alloc an spi_device */ 2089 spi = spi_alloc_device(ctlr); 2090 if (!spi) { 2091 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 2092 rc = -ENOMEM; 2093 goto err_out; 2094 } 2095 2096 /* Select device driver */ 2097 rc = of_modalias_node(nc, spi->modalias, 2098 sizeof(spi->modalias)); 2099 if (rc < 0) { 2100 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 2101 goto err_out; 2102 } 2103 2104 rc = of_spi_parse_dt(ctlr, spi, nc); 2105 if (rc) 2106 goto err_out; 2107 2108 /* Store a pointer to the node in the device structure */ 2109 of_node_get(nc); 2110 spi->dev.of_node = nc; 2111 spi->dev.fwnode = of_fwnode_handle(nc); 2112 2113 /* Register the new device */ 2114 rc = spi_add_device(spi); 2115 if (rc) { 2116 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 2117 goto err_of_node_put; 2118 } 2119 2120 return spi; 2121 2122 err_of_node_put: 2123 of_node_put(nc); 2124 err_out: 2125 spi_dev_put(spi); 2126 return ERR_PTR(rc); 2127 } 2128 2129 /** 2130 * of_register_spi_devices() - Register child devices onto the SPI bus 2131 * @ctlr: Pointer to spi_controller device 2132 * 2133 * Registers an spi_device for each child node of controller node which 2134 * represents a valid SPI slave. 2135 */ 2136 static void of_register_spi_devices(struct spi_controller *ctlr) 2137 { 2138 struct spi_device *spi; 2139 struct device_node *nc; 2140 2141 if (!ctlr->dev.of_node) 2142 return; 2143 2144 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 2145 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 2146 continue; 2147 spi = of_register_spi_device(ctlr, nc); 2148 if (IS_ERR(spi)) { 2149 dev_warn(&ctlr->dev, 2150 "Failed to create SPI device for %pOF\n", nc); 2151 of_node_clear_flag(nc, OF_POPULATED); 2152 } 2153 } 2154 } 2155 #else 2156 static void of_register_spi_devices(struct spi_controller *ctlr) { } 2157 #endif 2158 2159 /** 2160 * spi_new_ancillary_device() - Register ancillary SPI device 2161 * @spi: Pointer to the main SPI device registering the ancillary device 2162 * @chip_select: Chip Select of the ancillary device 2163 * 2164 * Register an ancillary SPI device; for example some chips have a chip-select 2165 * for normal device usage and another one for setup/firmware upload. 2166 * 2167 * This may only be called from main SPI device's probe routine. 2168 * 2169 * Return: 0 on success; negative errno on failure 2170 */ 2171 struct spi_device *spi_new_ancillary_device(struct spi_device *spi, 2172 u8 chip_select) 2173 { 2174 struct spi_device *ancillary; 2175 int rc = 0; 2176 2177 /* Alloc an spi_device */ 2178 ancillary = spi_alloc_device(spi->controller); 2179 if (!ancillary) { 2180 rc = -ENOMEM; 2181 goto err_out; 2182 } 2183 2184 strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); 2185 2186 /* Use provided chip-select for ancillary device */ 2187 ancillary->chip_select = chip_select; 2188 2189 /* Take over SPI mode/speed from SPI main device */ 2190 ancillary->max_speed_hz = spi->max_speed_hz; 2191 ancillary->mode = spi->mode; 2192 2193 /* Register the new device */ 2194 rc = spi_add_device_locked(ancillary); 2195 if (rc) { 2196 dev_err(&spi->dev, "failed to register ancillary device\n"); 2197 goto err_out; 2198 } 2199 2200 return ancillary; 2201 2202 err_out: 2203 spi_dev_put(ancillary); 2204 return ERR_PTR(rc); 2205 } 2206 EXPORT_SYMBOL_GPL(spi_new_ancillary_device); 2207 2208 #ifdef CONFIG_ACPI 2209 struct acpi_spi_lookup { 2210 struct spi_controller *ctlr; 2211 u32 max_speed_hz; 2212 u32 mode; 2213 int irq; 2214 u8 bits_per_word; 2215 u8 chip_select; 2216 }; 2217 2218 static void acpi_spi_parse_apple_properties(struct acpi_device *dev, 2219 struct acpi_spi_lookup *lookup) 2220 { 2221 const union acpi_object *obj; 2222 2223 if (!x86_apple_machine) 2224 return; 2225 2226 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 2227 && obj->buffer.length >= 4) 2228 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 2229 2230 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 2231 && obj->buffer.length == 8) 2232 lookup->bits_per_word = *(u64 *)obj->buffer.pointer; 2233 2234 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 2235 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 2236 lookup->mode |= SPI_LSB_FIRST; 2237 2238 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 2239 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2240 lookup->mode |= SPI_CPOL; 2241 2242 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 2243 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2244 lookup->mode |= SPI_CPHA; 2245 } 2246 2247 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 2248 { 2249 struct acpi_spi_lookup *lookup = data; 2250 struct spi_controller *ctlr = lookup->ctlr; 2251 2252 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 2253 struct acpi_resource_spi_serialbus *sb; 2254 acpi_handle parent_handle; 2255 acpi_status status; 2256 2257 sb = &ares->data.spi_serial_bus; 2258 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 2259 2260 status = acpi_get_handle(NULL, 2261 sb->resource_source.string_ptr, 2262 &parent_handle); 2263 2264 if (ACPI_FAILURE(status) || 2265 ACPI_HANDLE(ctlr->dev.parent) != parent_handle) 2266 return -ENODEV; 2267 2268 /* 2269 * ACPI DeviceSelection numbering is handled by the 2270 * host controller driver in Windows and can vary 2271 * from driver to driver. In Linux we always expect 2272 * 0 .. max - 1 so we need to ask the driver to 2273 * translate between the two schemes. 2274 */ 2275 if (ctlr->fw_translate_cs) { 2276 int cs = ctlr->fw_translate_cs(ctlr, 2277 sb->device_selection); 2278 if (cs < 0) 2279 return cs; 2280 lookup->chip_select = cs; 2281 } else { 2282 lookup->chip_select = sb->device_selection; 2283 } 2284 2285 lookup->max_speed_hz = sb->connection_speed; 2286 lookup->bits_per_word = sb->data_bit_length; 2287 2288 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 2289 lookup->mode |= SPI_CPHA; 2290 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 2291 lookup->mode |= SPI_CPOL; 2292 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 2293 lookup->mode |= SPI_CS_HIGH; 2294 } 2295 } else if (lookup->irq < 0) { 2296 struct resource r; 2297 2298 if (acpi_dev_resource_interrupt(ares, 0, &r)) 2299 lookup->irq = r.start; 2300 } 2301 2302 /* Always tell the ACPI core to skip this resource */ 2303 return 1; 2304 } 2305 2306 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 2307 struct acpi_device *adev) 2308 { 2309 acpi_handle parent_handle = NULL; 2310 struct list_head resource_list; 2311 struct acpi_spi_lookup lookup = {}; 2312 struct spi_device *spi; 2313 int ret; 2314 2315 if (acpi_bus_get_status(adev) || !adev->status.present || 2316 acpi_device_enumerated(adev)) 2317 return AE_OK; 2318 2319 lookup.ctlr = ctlr; 2320 lookup.irq = -1; 2321 2322 INIT_LIST_HEAD(&resource_list); 2323 ret = acpi_dev_get_resources(adev, &resource_list, 2324 acpi_spi_add_resource, &lookup); 2325 acpi_dev_free_resource_list(&resource_list); 2326 2327 if (ret < 0) 2328 /* found SPI in _CRS but it points to another controller */ 2329 return AE_OK; 2330 2331 if (!lookup.max_speed_hz && 2332 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && 2333 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) { 2334 /* Apple does not use _CRS but nested devices for SPI slaves */ 2335 acpi_spi_parse_apple_properties(adev, &lookup); 2336 } 2337 2338 if (!lookup.max_speed_hz) 2339 return AE_OK; 2340 2341 spi = spi_alloc_device(ctlr); 2342 if (!spi) { 2343 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n", 2344 dev_name(&adev->dev)); 2345 return AE_NO_MEMORY; 2346 } 2347 2348 2349 ACPI_COMPANION_SET(&spi->dev, adev); 2350 spi->max_speed_hz = lookup.max_speed_hz; 2351 spi->mode |= lookup.mode; 2352 spi->irq = lookup.irq; 2353 spi->bits_per_word = lookup.bits_per_word; 2354 spi->chip_select = lookup.chip_select; 2355 2356 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 2357 sizeof(spi->modalias)); 2358 2359 if (spi->irq < 0) 2360 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 2361 2362 acpi_device_set_enumerated(adev); 2363 2364 adev->power.flags.ignore_parent = true; 2365 if (spi_add_device(spi)) { 2366 adev->power.flags.ignore_parent = false; 2367 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 2368 dev_name(&adev->dev)); 2369 spi_dev_put(spi); 2370 } 2371 2372 return AE_OK; 2373 } 2374 2375 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 2376 void *data, void **return_value) 2377 { 2378 struct spi_controller *ctlr = data; 2379 struct acpi_device *adev; 2380 2381 if (acpi_bus_get_device(handle, &adev)) 2382 return AE_OK; 2383 2384 return acpi_register_spi_device(ctlr, adev); 2385 } 2386 2387 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 2388 2389 static void acpi_register_spi_devices(struct spi_controller *ctlr) 2390 { 2391 acpi_status status; 2392 acpi_handle handle; 2393 2394 handle = ACPI_HANDLE(ctlr->dev.parent); 2395 if (!handle) 2396 return; 2397 2398 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 2399 SPI_ACPI_ENUMERATE_MAX_DEPTH, 2400 acpi_spi_add_device, NULL, ctlr, NULL); 2401 if (ACPI_FAILURE(status)) 2402 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 2403 } 2404 #else 2405 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 2406 #endif /* CONFIG_ACPI */ 2407 2408 static void spi_controller_release(struct device *dev) 2409 { 2410 struct spi_controller *ctlr; 2411 2412 ctlr = container_of(dev, struct spi_controller, dev); 2413 kfree(ctlr); 2414 } 2415 2416 static struct class spi_master_class = { 2417 .name = "spi_master", 2418 .owner = THIS_MODULE, 2419 .dev_release = spi_controller_release, 2420 .dev_groups = spi_master_groups, 2421 }; 2422 2423 #ifdef CONFIG_SPI_SLAVE 2424 /** 2425 * spi_slave_abort - abort the ongoing transfer request on an SPI slave 2426 * controller 2427 * @spi: device used for the current transfer 2428 */ 2429 int spi_slave_abort(struct spi_device *spi) 2430 { 2431 struct spi_controller *ctlr = spi->controller; 2432 2433 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 2434 return ctlr->slave_abort(ctlr); 2435 2436 return -ENOTSUPP; 2437 } 2438 EXPORT_SYMBOL_GPL(spi_slave_abort); 2439 2440 static int match_true(struct device *dev, void *data) 2441 { 2442 return 1; 2443 } 2444 2445 static ssize_t slave_show(struct device *dev, struct device_attribute *attr, 2446 char *buf) 2447 { 2448 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2449 dev); 2450 struct device *child; 2451 2452 child = device_find_child(&ctlr->dev, NULL, match_true); 2453 return sprintf(buf, "%s\n", 2454 child ? to_spi_device(child)->modalias : NULL); 2455 } 2456 2457 static ssize_t slave_store(struct device *dev, struct device_attribute *attr, 2458 const char *buf, size_t count) 2459 { 2460 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2461 dev); 2462 struct spi_device *spi; 2463 struct device *child; 2464 char name[32]; 2465 int rc; 2466 2467 rc = sscanf(buf, "%31s", name); 2468 if (rc != 1 || !name[0]) 2469 return -EINVAL; 2470 2471 child = device_find_child(&ctlr->dev, NULL, match_true); 2472 if (child) { 2473 /* Remove registered slave */ 2474 device_unregister(child); 2475 put_device(child); 2476 } 2477 2478 if (strcmp(name, "(null)")) { 2479 /* Register new slave */ 2480 spi = spi_alloc_device(ctlr); 2481 if (!spi) 2482 return -ENOMEM; 2483 2484 strlcpy(spi->modalias, name, sizeof(spi->modalias)); 2485 2486 rc = spi_add_device(spi); 2487 if (rc) { 2488 spi_dev_put(spi); 2489 return rc; 2490 } 2491 } 2492 2493 return count; 2494 } 2495 2496 static DEVICE_ATTR_RW(slave); 2497 2498 static struct attribute *spi_slave_attrs[] = { 2499 &dev_attr_slave.attr, 2500 NULL, 2501 }; 2502 2503 static const struct attribute_group spi_slave_group = { 2504 .attrs = spi_slave_attrs, 2505 }; 2506 2507 static const struct attribute_group *spi_slave_groups[] = { 2508 &spi_controller_statistics_group, 2509 &spi_slave_group, 2510 NULL, 2511 }; 2512 2513 static struct class spi_slave_class = { 2514 .name = "spi_slave", 2515 .owner = THIS_MODULE, 2516 .dev_release = spi_controller_release, 2517 .dev_groups = spi_slave_groups, 2518 }; 2519 #else 2520 extern struct class spi_slave_class; /* dummy */ 2521 #endif 2522 2523 /** 2524 * __spi_alloc_controller - allocate an SPI master or slave controller 2525 * @dev: the controller, possibly using the platform_bus 2526 * @size: how much zeroed driver-private data to allocate; the pointer to this 2527 * memory is in the driver_data field of the returned device, accessible 2528 * with spi_controller_get_devdata(); the memory is cacheline aligned; 2529 * drivers granting DMA access to portions of their private data need to 2530 * round up @size using ALIGN(size, dma_get_cache_alignment()). 2531 * @slave: flag indicating whether to allocate an SPI master (false) or SPI 2532 * slave (true) controller 2533 * Context: can sleep 2534 * 2535 * This call is used only by SPI controller drivers, which are the 2536 * only ones directly touching chip registers. It's how they allocate 2537 * an spi_controller structure, prior to calling spi_register_controller(). 2538 * 2539 * This must be called from context that can sleep. 2540 * 2541 * The caller is responsible for assigning the bus number and initializing the 2542 * controller's methods before calling spi_register_controller(); and (after 2543 * errors adding the device) calling spi_controller_put() to prevent a memory 2544 * leak. 2545 * 2546 * Return: the SPI controller structure on success, else NULL. 2547 */ 2548 struct spi_controller *__spi_alloc_controller(struct device *dev, 2549 unsigned int size, bool slave) 2550 { 2551 struct spi_controller *ctlr; 2552 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); 2553 2554 if (!dev) 2555 return NULL; 2556 2557 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 2558 if (!ctlr) 2559 return NULL; 2560 2561 device_initialize(&ctlr->dev); 2562 ctlr->bus_num = -1; 2563 ctlr->num_chipselect = 1; 2564 ctlr->slave = slave; 2565 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 2566 ctlr->dev.class = &spi_slave_class; 2567 else 2568 ctlr->dev.class = &spi_master_class; 2569 ctlr->dev.parent = dev; 2570 pm_suspend_ignore_children(&ctlr->dev, true); 2571 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); 2572 2573 return ctlr; 2574 } 2575 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 2576 2577 static void devm_spi_release_controller(struct device *dev, void *ctlr) 2578 { 2579 spi_controller_put(*(struct spi_controller **)ctlr); 2580 } 2581 2582 /** 2583 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() 2584 * @dev: physical device of SPI controller 2585 * @size: how much zeroed driver-private data to allocate 2586 * @slave: whether to allocate an SPI master (false) or SPI slave (true) 2587 * Context: can sleep 2588 * 2589 * Allocate an SPI controller and automatically release a reference on it 2590 * when @dev is unbound from its driver. Drivers are thus relieved from 2591 * having to call spi_controller_put(). 2592 * 2593 * The arguments to this function are identical to __spi_alloc_controller(). 2594 * 2595 * Return: the SPI controller structure on success, else NULL. 2596 */ 2597 struct spi_controller *__devm_spi_alloc_controller(struct device *dev, 2598 unsigned int size, 2599 bool slave) 2600 { 2601 struct spi_controller **ptr, *ctlr; 2602 2603 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), 2604 GFP_KERNEL); 2605 if (!ptr) 2606 return NULL; 2607 2608 ctlr = __spi_alloc_controller(dev, size, slave); 2609 if (ctlr) { 2610 ctlr->devm_allocated = true; 2611 *ptr = ctlr; 2612 devres_add(dev, ptr); 2613 } else { 2614 devres_free(ptr); 2615 } 2616 2617 return ctlr; 2618 } 2619 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); 2620 2621 #ifdef CONFIG_OF 2622 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) 2623 { 2624 int nb, i, *cs; 2625 struct device_node *np = ctlr->dev.of_node; 2626 2627 if (!np) 2628 return 0; 2629 2630 nb = of_gpio_named_count(np, "cs-gpios"); 2631 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2632 2633 /* Return error only for an incorrectly formed cs-gpios property */ 2634 if (nb == 0 || nb == -ENOENT) 2635 return 0; 2636 else if (nb < 0) 2637 return nb; 2638 2639 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int), 2640 GFP_KERNEL); 2641 ctlr->cs_gpios = cs; 2642 2643 if (!ctlr->cs_gpios) 2644 return -ENOMEM; 2645 2646 for (i = 0; i < ctlr->num_chipselect; i++) 2647 cs[i] = -ENOENT; 2648 2649 for (i = 0; i < nb; i++) 2650 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 2651 2652 return 0; 2653 } 2654 #else 2655 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) 2656 { 2657 return 0; 2658 } 2659 #endif 2660 2661 /** 2662 * spi_get_gpio_descs() - grab chip select GPIOs for the master 2663 * @ctlr: The SPI master to grab GPIO descriptors for 2664 */ 2665 static int spi_get_gpio_descs(struct spi_controller *ctlr) 2666 { 2667 int nb, i; 2668 struct gpio_desc **cs; 2669 struct device *dev = &ctlr->dev; 2670 unsigned long native_cs_mask = 0; 2671 unsigned int num_cs_gpios = 0; 2672 2673 nb = gpiod_count(dev, "cs"); 2674 if (nb < 0) { 2675 /* No GPIOs at all is fine, else return the error */ 2676 if (nb == -ENOENT) 2677 return 0; 2678 return nb; 2679 } 2680 2681 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2682 2683 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 2684 GFP_KERNEL); 2685 if (!cs) 2686 return -ENOMEM; 2687 ctlr->cs_gpiods = cs; 2688 2689 for (i = 0; i < nb; i++) { 2690 /* 2691 * Most chipselects are active low, the inverted 2692 * semantics are handled by special quirks in gpiolib, 2693 * so initializing them GPIOD_OUT_LOW here means 2694 * "unasserted", in most cases this will drive the physical 2695 * line high. 2696 */ 2697 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 2698 GPIOD_OUT_LOW); 2699 if (IS_ERR(cs[i])) 2700 return PTR_ERR(cs[i]); 2701 2702 if (cs[i]) { 2703 /* 2704 * If we find a CS GPIO, name it after the device and 2705 * chip select line. 2706 */ 2707 char *gpioname; 2708 2709 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 2710 dev_name(dev), i); 2711 if (!gpioname) 2712 return -ENOMEM; 2713 gpiod_set_consumer_name(cs[i], gpioname); 2714 num_cs_gpios++; 2715 continue; 2716 } 2717 2718 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { 2719 dev_err(dev, "Invalid native chip select %d\n", i); 2720 return -EINVAL; 2721 } 2722 native_cs_mask |= BIT(i); 2723 } 2724 2725 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; 2726 2727 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios && 2728 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { 2729 dev_err(dev, "No unused native chip select available\n"); 2730 return -EINVAL; 2731 } 2732 2733 return 0; 2734 } 2735 2736 static int spi_controller_check_ops(struct spi_controller *ctlr) 2737 { 2738 /* 2739 * The controller may implement only the high-level SPI-memory like 2740 * operations if it does not support regular SPI transfers, and this is 2741 * valid use case. 2742 * If ->mem_ops is NULL, we request that at least one of the 2743 * ->transfer_xxx() method be implemented. 2744 */ 2745 if (ctlr->mem_ops) { 2746 if (!ctlr->mem_ops->exec_op) 2747 return -EINVAL; 2748 } else if (!ctlr->transfer && !ctlr->transfer_one && 2749 !ctlr->transfer_one_message) { 2750 return -EINVAL; 2751 } 2752 2753 return 0; 2754 } 2755 2756 /** 2757 * spi_register_controller - register SPI master or slave controller 2758 * @ctlr: initialized master, originally from spi_alloc_master() or 2759 * spi_alloc_slave() 2760 * Context: can sleep 2761 * 2762 * SPI controllers connect to their drivers using some non-SPI bus, 2763 * such as the platform bus. The final stage of probe() in that code 2764 * includes calling spi_register_controller() to hook up to this SPI bus glue. 2765 * 2766 * SPI controllers use board specific (often SOC specific) bus numbers, 2767 * and board-specific addressing for SPI devices combines those numbers 2768 * with chip select numbers. Since SPI does not directly support dynamic 2769 * device identification, boards need configuration tables telling which 2770 * chip is at which address. 2771 * 2772 * This must be called from context that can sleep. It returns zero on 2773 * success, else a negative error code (dropping the controller's refcount). 2774 * After a successful return, the caller is responsible for calling 2775 * spi_unregister_controller(). 2776 * 2777 * Return: zero on success, else a negative error code. 2778 */ 2779 int spi_register_controller(struct spi_controller *ctlr) 2780 { 2781 struct device *dev = ctlr->dev.parent; 2782 struct boardinfo *bi; 2783 int status; 2784 int id, first_dynamic; 2785 2786 if (!dev) 2787 return -ENODEV; 2788 2789 /* 2790 * Make sure all necessary hooks are implemented before registering 2791 * the SPI controller. 2792 */ 2793 status = spi_controller_check_ops(ctlr); 2794 if (status) 2795 return status; 2796 2797 if (ctlr->bus_num >= 0) { 2798 /* devices with a fixed bus num must check-in with the num */ 2799 mutex_lock(&board_lock); 2800 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 2801 ctlr->bus_num + 1, GFP_KERNEL); 2802 mutex_unlock(&board_lock); 2803 if (WARN(id < 0, "couldn't get idr")) 2804 return id == -ENOSPC ? -EBUSY : id; 2805 ctlr->bus_num = id; 2806 } else if (ctlr->dev.of_node) { 2807 /* allocate dynamic bus number using Linux idr */ 2808 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 2809 if (id >= 0) { 2810 ctlr->bus_num = id; 2811 mutex_lock(&board_lock); 2812 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 2813 ctlr->bus_num + 1, GFP_KERNEL); 2814 mutex_unlock(&board_lock); 2815 if (WARN(id < 0, "couldn't get idr")) 2816 return id == -ENOSPC ? -EBUSY : id; 2817 } 2818 } 2819 if (ctlr->bus_num < 0) { 2820 first_dynamic = of_alias_get_highest_id("spi"); 2821 if (first_dynamic < 0) 2822 first_dynamic = 0; 2823 else 2824 first_dynamic++; 2825 2826 mutex_lock(&board_lock); 2827 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 2828 0, GFP_KERNEL); 2829 mutex_unlock(&board_lock); 2830 if (WARN(id < 0, "couldn't get idr")) 2831 return id; 2832 ctlr->bus_num = id; 2833 } 2834 INIT_LIST_HEAD(&ctlr->queue); 2835 spin_lock_init(&ctlr->queue_lock); 2836 spin_lock_init(&ctlr->bus_lock_spinlock); 2837 mutex_init(&ctlr->bus_lock_mutex); 2838 mutex_init(&ctlr->io_mutex); 2839 ctlr->bus_lock_flag = 0; 2840 init_completion(&ctlr->xfer_completion); 2841 if (!ctlr->max_dma_len) 2842 ctlr->max_dma_len = INT_MAX; 2843 2844 /* register the device, then userspace will see it. 2845 * registration fails if the bus ID is in use. 2846 */ 2847 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 2848 2849 if (!spi_controller_is_slave(ctlr)) { 2850 if (ctlr->use_gpio_descriptors) { 2851 status = spi_get_gpio_descs(ctlr); 2852 if (status) 2853 goto free_bus_id; 2854 /* 2855 * A controller using GPIO descriptors always 2856 * supports SPI_CS_HIGH if need be. 2857 */ 2858 ctlr->mode_bits |= SPI_CS_HIGH; 2859 } else { 2860 /* Legacy code path for GPIOs from DT */ 2861 status = of_spi_get_gpio_numbers(ctlr); 2862 if (status) 2863 goto free_bus_id; 2864 } 2865 } 2866 2867 /* 2868 * Even if it's just one always-selected device, there must 2869 * be at least one chipselect. 2870 */ 2871 if (!ctlr->num_chipselect) { 2872 status = -EINVAL; 2873 goto free_bus_id; 2874 } 2875 2876 status = device_add(&ctlr->dev); 2877 if (status < 0) 2878 goto free_bus_id; 2879 dev_dbg(dev, "registered %s %s\n", 2880 spi_controller_is_slave(ctlr) ? "slave" : "master", 2881 dev_name(&ctlr->dev)); 2882 2883 /* 2884 * If we're using a queued driver, start the queue. Note that we don't 2885 * need the queueing logic if the driver is only supporting high-level 2886 * memory operations. 2887 */ 2888 if (ctlr->transfer) { 2889 dev_info(dev, "controller is unqueued, this is deprecated\n"); 2890 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 2891 status = spi_controller_initialize_queue(ctlr); 2892 if (status) { 2893 device_del(&ctlr->dev); 2894 goto free_bus_id; 2895 } 2896 } 2897 /* add statistics */ 2898 spin_lock_init(&ctlr->statistics.lock); 2899 2900 mutex_lock(&board_lock); 2901 list_add_tail(&ctlr->list, &spi_controller_list); 2902 list_for_each_entry(bi, &board_list, list) 2903 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 2904 mutex_unlock(&board_lock); 2905 2906 /* Register devices from the device tree and ACPI */ 2907 of_register_spi_devices(ctlr); 2908 acpi_register_spi_devices(ctlr); 2909 return status; 2910 2911 free_bus_id: 2912 mutex_lock(&board_lock); 2913 idr_remove(&spi_master_idr, ctlr->bus_num); 2914 mutex_unlock(&board_lock); 2915 return status; 2916 } 2917 EXPORT_SYMBOL_GPL(spi_register_controller); 2918 2919 static void devm_spi_unregister(void *ctlr) 2920 { 2921 spi_unregister_controller(ctlr); 2922 } 2923 2924 /** 2925 * devm_spi_register_controller - register managed SPI master or slave 2926 * controller 2927 * @dev: device managing SPI controller 2928 * @ctlr: initialized controller, originally from spi_alloc_master() or 2929 * spi_alloc_slave() 2930 * Context: can sleep 2931 * 2932 * Register a SPI device as with spi_register_controller() which will 2933 * automatically be unregistered and freed. 2934 * 2935 * Return: zero on success, else a negative error code. 2936 */ 2937 int devm_spi_register_controller(struct device *dev, 2938 struct spi_controller *ctlr) 2939 { 2940 int ret; 2941 2942 ret = spi_register_controller(ctlr); 2943 if (ret) 2944 return ret; 2945 2946 return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr); 2947 } 2948 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 2949 2950 static int __unregister(struct device *dev, void *null) 2951 { 2952 spi_unregister_device(to_spi_device(dev)); 2953 return 0; 2954 } 2955 2956 /** 2957 * spi_unregister_controller - unregister SPI master or slave controller 2958 * @ctlr: the controller being unregistered 2959 * Context: can sleep 2960 * 2961 * This call is used only by SPI controller drivers, which are the 2962 * only ones directly touching chip registers. 2963 * 2964 * This must be called from context that can sleep. 2965 * 2966 * Note that this function also drops a reference to the controller. 2967 */ 2968 void spi_unregister_controller(struct spi_controller *ctlr) 2969 { 2970 struct spi_controller *found; 2971 int id = ctlr->bus_num; 2972 2973 /* Prevent addition of new devices, unregister existing ones */ 2974 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 2975 mutex_lock(&spi_add_lock); 2976 2977 device_for_each_child(&ctlr->dev, NULL, __unregister); 2978 2979 /* First make sure that this controller was ever added */ 2980 mutex_lock(&board_lock); 2981 found = idr_find(&spi_master_idr, id); 2982 mutex_unlock(&board_lock); 2983 if (ctlr->queued) { 2984 if (spi_destroy_queue(ctlr)) 2985 dev_err(&ctlr->dev, "queue remove failed\n"); 2986 } 2987 mutex_lock(&board_lock); 2988 list_del(&ctlr->list); 2989 mutex_unlock(&board_lock); 2990 2991 device_del(&ctlr->dev); 2992 2993 /* Release the last reference on the controller if its driver 2994 * has not yet been converted to devm_spi_alloc_master/slave(). 2995 */ 2996 if (!ctlr->devm_allocated) 2997 put_device(&ctlr->dev); 2998 2999 /* free bus id */ 3000 mutex_lock(&board_lock); 3001 if (found == ctlr) 3002 idr_remove(&spi_master_idr, id); 3003 mutex_unlock(&board_lock); 3004 3005 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3006 mutex_unlock(&spi_add_lock); 3007 } 3008 EXPORT_SYMBOL_GPL(spi_unregister_controller); 3009 3010 int spi_controller_suspend(struct spi_controller *ctlr) 3011 { 3012 int ret; 3013 3014 /* Basically no-ops for non-queued controllers */ 3015 if (!ctlr->queued) 3016 return 0; 3017 3018 ret = spi_stop_queue(ctlr); 3019 if (ret) 3020 dev_err(&ctlr->dev, "queue stop failed\n"); 3021 3022 return ret; 3023 } 3024 EXPORT_SYMBOL_GPL(spi_controller_suspend); 3025 3026 int spi_controller_resume(struct spi_controller *ctlr) 3027 { 3028 int ret; 3029 3030 if (!ctlr->queued) 3031 return 0; 3032 3033 ret = spi_start_queue(ctlr); 3034 if (ret) 3035 dev_err(&ctlr->dev, "queue restart failed\n"); 3036 3037 return ret; 3038 } 3039 EXPORT_SYMBOL_GPL(spi_controller_resume); 3040 3041 static int __spi_controller_match(struct device *dev, const void *data) 3042 { 3043 struct spi_controller *ctlr; 3044 const u16 *bus_num = data; 3045 3046 ctlr = container_of(dev, struct spi_controller, dev); 3047 return ctlr->bus_num == *bus_num; 3048 } 3049 3050 /** 3051 * spi_busnum_to_master - look up master associated with bus_num 3052 * @bus_num: the master's bus number 3053 * Context: can sleep 3054 * 3055 * This call may be used with devices that are registered after 3056 * arch init time. It returns a refcounted pointer to the relevant 3057 * spi_controller (which the caller must release), or NULL if there is 3058 * no such master registered. 3059 * 3060 * Return: the SPI master structure on success, else NULL. 3061 */ 3062 struct spi_controller *spi_busnum_to_master(u16 bus_num) 3063 { 3064 struct device *dev; 3065 struct spi_controller *ctlr = NULL; 3066 3067 dev = class_find_device(&spi_master_class, NULL, &bus_num, 3068 __spi_controller_match); 3069 if (dev) 3070 ctlr = container_of(dev, struct spi_controller, dev); 3071 /* reference got in class_find_device */ 3072 return ctlr; 3073 } 3074 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 3075 3076 /*-------------------------------------------------------------------------*/ 3077 3078 /* Core methods for SPI resource management */ 3079 3080 /** 3081 * spi_res_alloc - allocate a spi resource that is life-cycle managed 3082 * during the processing of a spi_message while using 3083 * spi_transfer_one 3084 * @spi: the spi device for which we allocate memory 3085 * @release: the release code to execute for this resource 3086 * @size: size to alloc and return 3087 * @gfp: GFP allocation flags 3088 * 3089 * Return: the pointer to the allocated data 3090 * 3091 * This may get enhanced in the future to allocate from a memory pool 3092 * of the @spi_device or @spi_controller to avoid repeated allocations. 3093 */ 3094 void *spi_res_alloc(struct spi_device *spi, 3095 spi_res_release_t release, 3096 size_t size, gfp_t gfp) 3097 { 3098 struct spi_res *sres; 3099 3100 sres = kzalloc(sizeof(*sres) + size, gfp); 3101 if (!sres) 3102 return NULL; 3103 3104 INIT_LIST_HEAD(&sres->entry); 3105 sres->release = release; 3106 3107 return sres->data; 3108 } 3109 EXPORT_SYMBOL_GPL(spi_res_alloc); 3110 3111 /** 3112 * spi_res_free - free an spi resource 3113 * @res: pointer to the custom data of a resource 3114 * 3115 */ 3116 void spi_res_free(void *res) 3117 { 3118 struct spi_res *sres = container_of(res, struct spi_res, data); 3119 3120 if (!res) 3121 return; 3122 3123 WARN_ON(!list_empty(&sres->entry)); 3124 kfree(sres); 3125 } 3126 EXPORT_SYMBOL_GPL(spi_res_free); 3127 3128 /** 3129 * spi_res_add - add a spi_res to the spi_message 3130 * @message: the spi message 3131 * @res: the spi_resource 3132 */ 3133 void spi_res_add(struct spi_message *message, void *res) 3134 { 3135 struct spi_res *sres = container_of(res, struct spi_res, data); 3136 3137 WARN_ON(!list_empty(&sres->entry)); 3138 list_add_tail(&sres->entry, &message->resources); 3139 } 3140 EXPORT_SYMBOL_GPL(spi_res_add); 3141 3142 /** 3143 * spi_res_release - release all spi resources for this message 3144 * @ctlr: the @spi_controller 3145 * @message: the @spi_message 3146 */ 3147 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 3148 { 3149 struct spi_res *res, *tmp; 3150 3151 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 3152 if (res->release) 3153 res->release(ctlr, message, res->data); 3154 3155 list_del(&res->entry); 3156 3157 kfree(res); 3158 } 3159 } 3160 EXPORT_SYMBOL_GPL(spi_res_release); 3161 3162 /*-------------------------------------------------------------------------*/ 3163 3164 /* Core methods for spi_message alterations */ 3165 3166 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 3167 struct spi_message *msg, 3168 void *res) 3169 { 3170 struct spi_replaced_transfers *rxfer = res; 3171 size_t i; 3172 3173 /* call extra callback if requested */ 3174 if (rxfer->release) 3175 rxfer->release(ctlr, msg, res); 3176 3177 /* insert replaced transfers back into the message */ 3178 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 3179 3180 /* remove the formerly inserted entries */ 3181 for (i = 0; i < rxfer->inserted; i++) 3182 list_del(&rxfer->inserted_transfers[i].transfer_list); 3183 } 3184 3185 /** 3186 * spi_replace_transfers - replace transfers with several transfers 3187 * and register change with spi_message.resources 3188 * @msg: the spi_message we work upon 3189 * @xfer_first: the first spi_transfer we want to replace 3190 * @remove: number of transfers to remove 3191 * @insert: the number of transfers we want to insert instead 3192 * @release: extra release code necessary in some circumstances 3193 * @extradatasize: extra data to allocate (with alignment guarantees 3194 * of struct @spi_transfer) 3195 * @gfp: gfp flags 3196 * 3197 * Returns: pointer to @spi_replaced_transfers, 3198 * PTR_ERR(...) in case of errors. 3199 */ 3200 struct spi_replaced_transfers *spi_replace_transfers( 3201 struct spi_message *msg, 3202 struct spi_transfer *xfer_first, 3203 size_t remove, 3204 size_t insert, 3205 spi_replaced_release_t release, 3206 size_t extradatasize, 3207 gfp_t gfp) 3208 { 3209 struct spi_replaced_transfers *rxfer; 3210 struct spi_transfer *xfer; 3211 size_t i; 3212 3213 /* allocate the structure using spi_res */ 3214 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 3215 struct_size(rxfer, inserted_transfers, insert) 3216 + extradatasize, 3217 gfp); 3218 if (!rxfer) 3219 return ERR_PTR(-ENOMEM); 3220 3221 /* the release code to invoke before running the generic release */ 3222 rxfer->release = release; 3223 3224 /* assign extradata */ 3225 if (extradatasize) 3226 rxfer->extradata = 3227 &rxfer->inserted_transfers[insert]; 3228 3229 /* init the replaced_transfers list */ 3230 INIT_LIST_HEAD(&rxfer->replaced_transfers); 3231 3232 /* assign the list_entry after which we should reinsert 3233 * the @replaced_transfers - it may be spi_message.messages! 3234 */ 3235 rxfer->replaced_after = xfer_first->transfer_list.prev; 3236 3237 /* remove the requested number of transfers */ 3238 for (i = 0; i < remove; i++) { 3239 /* if the entry after replaced_after it is msg->transfers 3240 * then we have been requested to remove more transfers 3241 * than are in the list 3242 */ 3243 if (rxfer->replaced_after->next == &msg->transfers) { 3244 dev_err(&msg->spi->dev, 3245 "requested to remove more spi_transfers than are available\n"); 3246 /* insert replaced transfers back into the message */ 3247 list_splice(&rxfer->replaced_transfers, 3248 rxfer->replaced_after); 3249 3250 /* free the spi_replace_transfer structure */ 3251 spi_res_free(rxfer); 3252 3253 /* and return with an error */ 3254 return ERR_PTR(-EINVAL); 3255 } 3256 3257 /* remove the entry after replaced_after from list of 3258 * transfers and add it to list of replaced_transfers 3259 */ 3260 list_move_tail(rxfer->replaced_after->next, 3261 &rxfer->replaced_transfers); 3262 } 3263 3264 /* create copy of the given xfer with identical settings 3265 * based on the first transfer to get removed 3266 */ 3267 for (i = 0; i < insert; i++) { 3268 /* we need to run in reverse order */ 3269 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 3270 3271 /* copy all spi_transfer data */ 3272 memcpy(xfer, xfer_first, sizeof(*xfer)); 3273 3274 /* add to list */ 3275 list_add(&xfer->transfer_list, rxfer->replaced_after); 3276 3277 /* clear cs_change and delay for all but the last */ 3278 if (i) { 3279 xfer->cs_change = false; 3280 xfer->delay.value = 0; 3281 } 3282 } 3283 3284 /* set up inserted */ 3285 rxfer->inserted = insert; 3286 3287 /* and register it with spi_res/spi_message */ 3288 spi_res_add(msg, rxfer); 3289 3290 return rxfer; 3291 } 3292 EXPORT_SYMBOL_GPL(spi_replace_transfers); 3293 3294 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3295 struct spi_message *msg, 3296 struct spi_transfer **xferp, 3297 size_t maxsize, 3298 gfp_t gfp) 3299 { 3300 struct spi_transfer *xfer = *xferp, *xfers; 3301 struct spi_replaced_transfers *srt; 3302 size_t offset; 3303 size_t count, i; 3304 3305 /* calculate how many we have to replace */ 3306 count = DIV_ROUND_UP(xfer->len, maxsize); 3307 3308 /* create replacement */ 3309 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 3310 if (IS_ERR(srt)) 3311 return PTR_ERR(srt); 3312 xfers = srt->inserted_transfers; 3313 3314 /* now handle each of those newly inserted spi_transfers 3315 * note that the replacements spi_transfers all are preset 3316 * to the same values as *xferp, so tx_buf, rx_buf and len 3317 * are all identical (as well as most others) 3318 * so we just have to fix up len and the pointers. 3319 * 3320 * this also includes support for the depreciated 3321 * spi_message.is_dma_mapped interface 3322 */ 3323 3324 /* the first transfer just needs the length modified, so we 3325 * run it outside the loop 3326 */ 3327 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 3328 3329 /* all the others need rx_buf/tx_buf also set */ 3330 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 3331 /* update rx_buf, tx_buf and dma */ 3332 if (xfers[i].rx_buf) 3333 xfers[i].rx_buf += offset; 3334 if (xfers[i].rx_dma) 3335 xfers[i].rx_dma += offset; 3336 if (xfers[i].tx_buf) 3337 xfers[i].tx_buf += offset; 3338 if (xfers[i].tx_dma) 3339 xfers[i].tx_dma += offset; 3340 3341 /* update length */ 3342 xfers[i].len = min(maxsize, xfers[i].len - offset); 3343 } 3344 3345 /* we set up xferp to the last entry we have inserted, 3346 * so that we skip those already split transfers 3347 */ 3348 *xferp = &xfers[count - 1]; 3349 3350 /* increment statistics counters */ 3351 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3352 transfers_split_maxsize); 3353 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 3354 transfers_split_maxsize); 3355 3356 return 0; 3357 } 3358 3359 /** 3360 * spi_split_transfers_maxsize - split spi transfers into multiple transfers 3361 * when an individual transfer exceeds a 3362 * certain size 3363 * @ctlr: the @spi_controller for this transfer 3364 * @msg: the @spi_message to transform 3365 * @maxsize: the maximum when to apply this 3366 * @gfp: GFP allocation flags 3367 * 3368 * Return: status of transformation 3369 */ 3370 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 3371 struct spi_message *msg, 3372 size_t maxsize, 3373 gfp_t gfp) 3374 { 3375 struct spi_transfer *xfer; 3376 int ret; 3377 3378 /* iterate over the transfer_list, 3379 * but note that xfer is advanced to the last transfer inserted 3380 * to avoid checking sizes again unnecessarily (also xfer does 3381 * potentiall belong to a different list by the time the 3382 * replacement has happened 3383 */ 3384 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3385 if (xfer->len > maxsize) { 3386 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3387 maxsize, gfp); 3388 if (ret) 3389 return ret; 3390 } 3391 } 3392 3393 return 0; 3394 } 3395 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 3396 3397 /*-------------------------------------------------------------------------*/ 3398 3399 /* Core methods for SPI controller protocol drivers. Some of the 3400 * other core methods are currently defined as inline functions. 3401 */ 3402 3403 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 3404 u8 bits_per_word) 3405 { 3406 if (ctlr->bits_per_word_mask) { 3407 /* Only 32 bits fit in the mask */ 3408 if (bits_per_word > 32) 3409 return -EINVAL; 3410 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 3411 return -EINVAL; 3412 } 3413 3414 return 0; 3415 } 3416 3417 /** 3418 * spi_setup - setup SPI mode and clock rate 3419 * @spi: the device whose settings are being modified 3420 * Context: can sleep, and no requests are queued to the device 3421 * 3422 * SPI protocol drivers may need to update the transfer mode if the 3423 * device doesn't work with its default. They may likewise need 3424 * to update clock rates or word sizes from initial values. This function 3425 * changes those settings, and must be called from a context that can sleep. 3426 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 3427 * effect the next time the device is selected and data is transferred to 3428 * or from it. When this function returns, the spi device is deselected. 3429 * 3430 * Note that this call will fail if the protocol driver specifies an option 3431 * that the underlying controller or its driver does not support. For 3432 * example, not all hardware supports wire transfers using nine bit words, 3433 * LSB-first wire encoding, or active-high chipselects. 3434 * 3435 * Return: zero on success, else a negative error code. 3436 */ 3437 int spi_setup(struct spi_device *spi) 3438 { 3439 unsigned bad_bits, ugly_bits; 3440 int status; 3441 3442 /* 3443 * check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO 3444 * are set at the same time 3445 */ 3446 if ((hweight_long(spi->mode & 3447 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || 3448 (hweight_long(spi->mode & 3449 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { 3450 dev_err(&spi->dev, 3451 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); 3452 return -EINVAL; 3453 } 3454 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 3455 */ 3456 if ((spi->mode & SPI_3WIRE) && (spi->mode & 3457 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3458 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 3459 return -EINVAL; 3460 /* help drivers fail *cleanly* when they need options 3461 * that aren't supported with their current controller 3462 * SPI_CS_WORD has a fallback software implementation, 3463 * so it is ignored here. 3464 */ 3465 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | 3466 SPI_NO_TX | SPI_NO_RX); 3467 /* nothing prevents from working with active-high CS in case if it 3468 * is driven by GPIO. 3469 */ 3470 if (gpio_is_valid(spi->cs_gpio)) 3471 bad_bits &= ~SPI_CS_HIGH; 3472 ugly_bits = bad_bits & 3473 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3474 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 3475 if (ugly_bits) { 3476 dev_warn(&spi->dev, 3477 "setup: ignoring unsupported mode bits %x\n", 3478 ugly_bits); 3479 spi->mode &= ~ugly_bits; 3480 bad_bits &= ~ugly_bits; 3481 } 3482 if (bad_bits) { 3483 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 3484 bad_bits); 3485 return -EINVAL; 3486 } 3487 3488 if (!spi->bits_per_word) 3489 spi->bits_per_word = 8; 3490 3491 status = __spi_validate_bits_per_word(spi->controller, 3492 spi->bits_per_word); 3493 if (status) 3494 return status; 3495 3496 if (spi->controller->max_speed_hz && 3497 (!spi->max_speed_hz || 3498 spi->max_speed_hz > spi->controller->max_speed_hz)) 3499 spi->max_speed_hz = spi->controller->max_speed_hz; 3500 3501 mutex_lock(&spi->controller->io_mutex); 3502 3503 if (spi->controller->setup) { 3504 status = spi->controller->setup(spi); 3505 if (status) { 3506 mutex_unlock(&spi->controller->io_mutex); 3507 dev_err(&spi->controller->dev, "Failed to setup device: %d\n", 3508 status); 3509 return status; 3510 } 3511 } 3512 3513 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { 3514 status = pm_runtime_get_sync(spi->controller->dev.parent); 3515 if (status < 0) { 3516 mutex_unlock(&spi->controller->io_mutex); 3517 pm_runtime_put_noidle(spi->controller->dev.parent); 3518 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3519 status); 3520 return status; 3521 } 3522 3523 /* 3524 * We do not want to return positive value from pm_runtime_get, 3525 * there are many instances of devices calling spi_setup() and 3526 * checking for a non-zero return value instead of a negative 3527 * return value. 3528 */ 3529 status = 0; 3530 3531 spi_set_cs(spi, false, true); 3532 pm_runtime_mark_last_busy(spi->controller->dev.parent); 3533 pm_runtime_put_autosuspend(spi->controller->dev.parent); 3534 } else { 3535 spi_set_cs(spi, false, true); 3536 } 3537 3538 mutex_unlock(&spi->controller->io_mutex); 3539 3540 if (spi->rt && !spi->controller->rt) { 3541 spi->controller->rt = true; 3542 spi_set_thread_rt(spi->controller); 3543 } 3544 3545 trace_spi_setup(spi, status); 3546 3547 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 3548 spi->mode & SPI_MODE_X_MASK, 3549 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 3550 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 3551 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 3552 (spi->mode & SPI_LOOP) ? "loopback, " : "", 3553 spi->bits_per_word, spi->max_speed_hz, 3554 status); 3555 3556 return status; 3557 } 3558 EXPORT_SYMBOL_GPL(spi_setup); 3559 3560 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, 3561 struct spi_device *spi) 3562 { 3563 int delay1, delay2; 3564 3565 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); 3566 if (delay1 < 0) 3567 return delay1; 3568 3569 delay2 = spi_delay_to_ns(&spi->word_delay, xfer); 3570 if (delay2 < 0) 3571 return delay2; 3572 3573 if (delay1 < delay2) 3574 memcpy(&xfer->word_delay, &spi->word_delay, 3575 sizeof(xfer->word_delay)); 3576 3577 return 0; 3578 } 3579 3580 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 3581 { 3582 struct spi_controller *ctlr = spi->controller; 3583 struct spi_transfer *xfer; 3584 int w_size; 3585 3586 if (list_empty(&message->transfers)) 3587 return -EINVAL; 3588 3589 /* If an SPI controller does not support toggling the CS line on each 3590 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 3591 * for the CS line, we can emulate the CS-per-word hardware function by 3592 * splitting transfers into one-word transfers and ensuring that 3593 * cs_change is set for each transfer. 3594 */ 3595 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || 3596 spi->cs_gpiod || 3597 gpio_is_valid(spi->cs_gpio))) { 3598 size_t maxsize; 3599 int ret; 3600 3601 maxsize = (spi->bits_per_word + 7) / 8; 3602 3603 /* spi_split_transfers_maxsize() requires message->spi */ 3604 message->spi = spi; 3605 3606 ret = spi_split_transfers_maxsize(ctlr, message, maxsize, 3607 GFP_KERNEL); 3608 if (ret) 3609 return ret; 3610 3611 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3612 /* don't change cs_change on the last entry in the list */ 3613 if (list_is_last(&xfer->transfer_list, &message->transfers)) 3614 break; 3615 xfer->cs_change = 1; 3616 } 3617 } 3618 3619 /* Half-duplex links include original MicroWire, and ones with 3620 * only one data pin like SPI_3WIRE (switches direction) or where 3621 * either MOSI or MISO is missing. They can also be caused by 3622 * software limitations. 3623 */ 3624 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 3625 (spi->mode & SPI_3WIRE)) { 3626 unsigned flags = ctlr->flags; 3627 3628 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3629 if (xfer->rx_buf && xfer->tx_buf) 3630 return -EINVAL; 3631 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 3632 return -EINVAL; 3633 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 3634 return -EINVAL; 3635 } 3636 } 3637 3638 /** 3639 * Set transfer bits_per_word and max speed as spi device default if 3640 * it is not set for this transfer. 3641 * Set transfer tx_nbits and rx_nbits as single transfer default 3642 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 3643 * Ensure transfer word_delay is at least as long as that required by 3644 * device itself. 3645 */ 3646 message->frame_length = 0; 3647 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3648 xfer->effective_speed_hz = 0; 3649 message->frame_length += xfer->len; 3650 if (!xfer->bits_per_word) 3651 xfer->bits_per_word = spi->bits_per_word; 3652 3653 if (!xfer->speed_hz) 3654 xfer->speed_hz = spi->max_speed_hz; 3655 3656 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 3657 xfer->speed_hz = ctlr->max_speed_hz; 3658 3659 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 3660 return -EINVAL; 3661 3662 /* 3663 * SPI transfer length should be multiple of SPI word size 3664 * where SPI word size should be power-of-two multiple 3665 */ 3666 if (xfer->bits_per_word <= 8) 3667 w_size = 1; 3668 else if (xfer->bits_per_word <= 16) 3669 w_size = 2; 3670 else 3671 w_size = 4; 3672 3673 /* No partial transfers accepted */ 3674 if (xfer->len % w_size) 3675 return -EINVAL; 3676 3677 if (xfer->speed_hz && ctlr->min_speed_hz && 3678 xfer->speed_hz < ctlr->min_speed_hz) 3679 return -EINVAL; 3680 3681 if (xfer->tx_buf && !xfer->tx_nbits) 3682 xfer->tx_nbits = SPI_NBITS_SINGLE; 3683 if (xfer->rx_buf && !xfer->rx_nbits) 3684 xfer->rx_nbits = SPI_NBITS_SINGLE; 3685 /* check transfer tx/rx_nbits: 3686 * 1. check the value matches one of single, dual and quad 3687 * 2. check tx/rx_nbits match the mode in spi_device 3688 */ 3689 if (xfer->tx_buf) { 3690 if (spi->mode & SPI_NO_TX) 3691 return -EINVAL; 3692 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 3693 xfer->tx_nbits != SPI_NBITS_DUAL && 3694 xfer->tx_nbits != SPI_NBITS_QUAD) 3695 return -EINVAL; 3696 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 3697 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 3698 return -EINVAL; 3699 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 3700 !(spi->mode & SPI_TX_QUAD)) 3701 return -EINVAL; 3702 } 3703 /* check transfer rx_nbits */ 3704 if (xfer->rx_buf) { 3705 if (spi->mode & SPI_NO_RX) 3706 return -EINVAL; 3707 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 3708 xfer->rx_nbits != SPI_NBITS_DUAL && 3709 xfer->rx_nbits != SPI_NBITS_QUAD) 3710 return -EINVAL; 3711 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 3712 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 3713 return -EINVAL; 3714 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 3715 !(spi->mode & SPI_RX_QUAD)) 3716 return -EINVAL; 3717 } 3718 3719 if (_spi_xfer_word_delay_update(xfer, spi)) 3720 return -EINVAL; 3721 } 3722 3723 message->status = -EINPROGRESS; 3724 3725 return 0; 3726 } 3727 3728 static int __spi_async(struct spi_device *spi, struct spi_message *message) 3729 { 3730 struct spi_controller *ctlr = spi->controller; 3731 struct spi_transfer *xfer; 3732 3733 /* 3734 * Some controllers do not support doing regular SPI transfers. Return 3735 * ENOTSUPP when this is the case. 3736 */ 3737 if (!ctlr->transfer) 3738 return -ENOTSUPP; 3739 3740 message->spi = spi; 3741 3742 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); 3743 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 3744 3745 trace_spi_message_submit(message); 3746 3747 if (!ctlr->ptp_sts_supported) { 3748 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3749 xfer->ptp_sts_word_pre = 0; 3750 ptp_read_system_prets(xfer->ptp_sts); 3751 } 3752 } 3753 3754 return ctlr->transfer(spi, message); 3755 } 3756 3757 /** 3758 * spi_async - asynchronous SPI transfer 3759 * @spi: device with which data will be exchanged 3760 * @message: describes the data transfers, including completion callback 3761 * Context: any (irqs may be blocked, etc) 3762 * 3763 * This call may be used in_irq and other contexts which can't sleep, 3764 * as well as from task contexts which can sleep. 3765 * 3766 * The completion callback is invoked in a context which can't sleep. 3767 * Before that invocation, the value of message->status is undefined. 3768 * When the callback is issued, message->status holds either zero (to 3769 * indicate complete success) or a negative error code. After that 3770 * callback returns, the driver which issued the transfer request may 3771 * deallocate the associated memory; it's no longer in use by any SPI 3772 * core or controller driver code. 3773 * 3774 * Note that although all messages to a spi_device are handled in 3775 * FIFO order, messages may go to different devices in other orders. 3776 * Some device might be higher priority, or have various "hard" access 3777 * time requirements, for example. 3778 * 3779 * On detection of any fault during the transfer, processing of 3780 * the entire message is aborted, and the device is deselected. 3781 * Until returning from the associated message completion callback, 3782 * no other spi_message queued to that device will be processed. 3783 * (This rule applies equally to all the synchronous transfer calls, 3784 * which are wrappers around this core asynchronous primitive.) 3785 * 3786 * Return: zero on success, else a negative error code. 3787 */ 3788 int spi_async(struct spi_device *spi, struct spi_message *message) 3789 { 3790 struct spi_controller *ctlr = spi->controller; 3791 int ret; 3792 unsigned long flags; 3793 3794 ret = __spi_validate(spi, message); 3795 if (ret != 0) 3796 return ret; 3797 3798 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3799 3800 if (ctlr->bus_lock_flag) 3801 ret = -EBUSY; 3802 else 3803 ret = __spi_async(spi, message); 3804 3805 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3806 3807 return ret; 3808 } 3809 EXPORT_SYMBOL_GPL(spi_async); 3810 3811 /** 3812 * spi_async_locked - version of spi_async with exclusive bus usage 3813 * @spi: device with which data will be exchanged 3814 * @message: describes the data transfers, including completion callback 3815 * Context: any (irqs may be blocked, etc) 3816 * 3817 * This call may be used in_irq and other contexts which can't sleep, 3818 * as well as from task contexts which can sleep. 3819 * 3820 * The completion callback is invoked in a context which can't sleep. 3821 * Before that invocation, the value of message->status is undefined. 3822 * When the callback is issued, message->status holds either zero (to 3823 * indicate complete success) or a negative error code. After that 3824 * callback returns, the driver which issued the transfer request may 3825 * deallocate the associated memory; it's no longer in use by any SPI 3826 * core or controller driver code. 3827 * 3828 * Note that although all messages to a spi_device are handled in 3829 * FIFO order, messages may go to different devices in other orders. 3830 * Some device might be higher priority, or have various "hard" access 3831 * time requirements, for example. 3832 * 3833 * On detection of any fault during the transfer, processing of 3834 * the entire message is aborted, and the device is deselected. 3835 * Until returning from the associated message completion callback, 3836 * no other spi_message queued to that device will be processed. 3837 * (This rule applies equally to all the synchronous transfer calls, 3838 * which are wrappers around this core asynchronous primitive.) 3839 * 3840 * Return: zero on success, else a negative error code. 3841 */ 3842 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3843 { 3844 struct spi_controller *ctlr = spi->controller; 3845 int ret; 3846 unsigned long flags; 3847 3848 ret = __spi_validate(spi, message); 3849 if (ret != 0) 3850 return ret; 3851 3852 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3853 3854 ret = __spi_async(spi, message); 3855 3856 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3857 3858 return ret; 3859 3860 } 3861 EXPORT_SYMBOL_GPL(spi_async_locked); 3862 3863 /*-------------------------------------------------------------------------*/ 3864 3865 /* Utility methods for SPI protocol drivers, layered on 3866 * top of the core. Some other utility methods are defined as 3867 * inline functions. 3868 */ 3869 3870 static void spi_complete(void *arg) 3871 { 3872 complete(arg); 3873 } 3874 3875 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 3876 { 3877 DECLARE_COMPLETION_ONSTACK(done); 3878 int status; 3879 struct spi_controller *ctlr = spi->controller; 3880 unsigned long flags; 3881 3882 status = __spi_validate(spi, message); 3883 if (status != 0) 3884 return status; 3885 3886 message->complete = spi_complete; 3887 message->context = &done; 3888 message->spi = spi; 3889 3890 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); 3891 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 3892 3893 /* If we're not using the legacy transfer method then we will 3894 * try to transfer in the calling context so special case. 3895 * This code would be less tricky if we could remove the 3896 * support for driver implemented message queues. 3897 */ 3898 if (ctlr->transfer == spi_queued_transfer) { 3899 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3900 3901 trace_spi_message_submit(message); 3902 3903 status = __spi_queued_transfer(spi, message, false); 3904 3905 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3906 } else { 3907 status = spi_async_locked(spi, message); 3908 } 3909 3910 if (status == 0) { 3911 /* Push out the messages in the calling context if we 3912 * can. 3913 */ 3914 if (ctlr->transfer == spi_queued_transfer) { 3915 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3916 spi_sync_immediate); 3917 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 3918 spi_sync_immediate); 3919 __spi_pump_messages(ctlr, false); 3920 } 3921 3922 wait_for_completion(&done); 3923 status = message->status; 3924 } 3925 message->context = NULL; 3926 return status; 3927 } 3928 3929 /** 3930 * spi_sync - blocking/synchronous SPI data transfers 3931 * @spi: device with which data will be exchanged 3932 * @message: describes the data transfers 3933 * Context: can sleep 3934 * 3935 * This call may only be used from a context that may sleep. The sleep 3936 * is non-interruptible, and has no timeout. Low-overhead controller 3937 * drivers may DMA directly into and out of the message buffers. 3938 * 3939 * Note that the SPI device's chip select is active during the message, 3940 * and then is normally disabled between messages. Drivers for some 3941 * frequently-used devices may want to minimize costs of selecting a chip, 3942 * by leaving it selected in anticipation that the next message will go 3943 * to the same chip. (That may increase power usage.) 3944 * 3945 * Also, the caller is guaranteeing that the memory associated with the 3946 * message will not be freed before this call returns. 3947 * 3948 * Return: zero on success, else a negative error code. 3949 */ 3950 int spi_sync(struct spi_device *spi, struct spi_message *message) 3951 { 3952 int ret; 3953 3954 mutex_lock(&spi->controller->bus_lock_mutex); 3955 ret = __spi_sync(spi, message); 3956 mutex_unlock(&spi->controller->bus_lock_mutex); 3957 3958 return ret; 3959 } 3960 EXPORT_SYMBOL_GPL(spi_sync); 3961 3962 /** 3963 * spi_sync_locked - version of spi_sync with exclusive bus usage 3964 * @spi: device with which data will be exchanged 3965 * @message: describes the data transfers 3966 * Context: can sleep 3967 * 3968 * This call may only be used from a context that may sleep. The sleep 3969 * is non-interruptible, and has no timeout. Low-overhead controller 3970 * drivers may DMA directly into and out of the message buffers. 3971 * 3972 * This call should be used by drivers that require exclusive access to the 3973 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 3974 * be released by a spi_bus_unlock call when the exclusive access is over. 3975 * 3976 * Return: zero on success, else a negative error code. 3977 */ 3978 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 3979 { 3980 return __spi_sync(spi, message); 3981 } 3982 EXPORT_SYMBOL_GPL(spi_sync_locked); 3983 3984 /** 3985 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 3986 * @ctlr: SPI bus master that should be locked for exclusive bus access 3987 * Context: can sleep 3988 * 3989 * This call may only be used from a context that may sleep. The sleep 3990 * is non-interruptible, and has no timeout. 3991 * 3992 * This call should be used by drivers that require exclusive access to the 3993 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 3994 * exclusive access is over. Data transfer must be done by spi_sync_locked 3995 * and spi_async_locked calls when the SPI bus lock is held. 3996 * 3997 * Return: always zero. 3998 */ 3999 int spi_bus_lock(struct spi_controller *ctlr) 4000 { 4001 unsigned long flags; 4002 4003 mutex_lock(&ctlr->bus_lock_mutex); 4004 4005 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4006 ctlr->bus_lock_flag = 1; 4007 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4008 4009 /* mutex remains locked until spi_bus_unlock is called */ 4010 4011 return 0; 4012 } 4013 EXPORT_SYMBOL_GPL(spi_bus_lock); 4014 4015 /** 4016 * spi_bus_unlock - release the lock for exclusive SPI bus usage 4017 * @ctlr: SPI bus master that was locked for exclusive bus access 4018 * Context: can sleep 4019 * 4020 * This call may only be used from a context that may sleep. The sleep 4021 * is non-interruptible, and has no timeout. 4022 * 4023 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 4024 * call. 4025 * 4026 * Return: always zero. 4027 */ 4028 int spi_bus_unlock(struct spi_controller *ctlr) 4029 { 4030 ctlr->bus_lock_flag = 0; 4031 4032 mutex_unlock(&ctlr->bus_lock_mutex); 4033 4034 return 0; 4035 } 4036 EXPORT_SYMBOL_GPL(spi_bus_unlock); 4037 4038 /* portable code must never pass more than 32 bytes */ 4039 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 4040 4041 static u8 *buf; 4042 4043 /** 4044 * spi_write_then_read - SPI synchronous write followed by read 4045 * @spi: device with which data will be exchanged 4046 * @txbuf: data to be written (need not be dma-safe) 4047 * @n_tx: size of txbuf, in bytes 4048 * @rxbuf: buffer into which data will be read (need not be dma-safe) 4049 * @n_rx: size of rxbuf, in bytes 4050 * Context: can sleep 4051 * 4052 * This performs a half duplex MicroWire style transaction with the 4053 * device, sending txbuf and then reading rxbuf. The return value 4054 * is zero for success, else a negative errno status code. 4055 * This call may only be used from a context that may sleep. 4056 * 4057 * Parameters to this routine are always copied using a small buffer. 4058 * Performance-sensitive or bulk transfer code should instead use 4059 * spi_{async,sync}() calls with dma-safe buffers. 4060 * 4061 * Return: zero on success, else a negative error code. 4062 */ 4063 int spi_write_then_read(struct spi_device *spi, 4064 const void *txbuf, unsigned n_tx, 4065 void *rxbuf, unsigned n_rx) 4066 { 4067 static DEFINE_MUTEX(lock); 4068 4069 int status; 4070 struct spi_message message; 4071 struct spi_transfer x[2]; 4072 u8 *local_buf; 4073 4074 /* Use preallocated DMA-safe buffer if we can. We can't avoid 4075 * copying here, (as a pure convenience thing), but we can 4076 * keep heap costs out of the hot path unless someone else is 4077 * using the pre-allocated buffer or the transfer is too large. 4078 */ 4079 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 4080 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 4081 GFP_KERNEL | GFP_DMA); 4082 if (!local_buf) 4083 return -ENOMEM; 4084 } else { 4085 local_buf = buf; 4086 } 4087 4088 spi_message_init(&message); 4089 memset(x, 0, sizeof(x)); 4090 if (n_tx) { 4091 x[0].len = n_tx; 4092 spi_message_add_tail(&x[0], &message); 4093 } 4094 if (n_rx) { 4095 x[1].len = n_rx; 4096 spi_message_add_tail(&x[1], &message); 4097 } 4098 4099 memcpy(local_buf, txbuf, n_tx); 4100 x[0].tx_buf = local_buf; 4101 x[1].rx_buf = local_buf + n_tx; 4102 4103 /* do the i/o */ 4104 status = spi_sync(spi, &message); 4105 if (status == 0) 4106 memcpy(rxbuf, x[1].rx_buf, n_rx); 4107 4108 if (x[0].tx_buf == buf) 4109 mutex_unlock(&lock); 4110 else 4111 kfree(local_buf); 4112 4113 return status; 4114 } 4115 EXPORT_SYMBOL_GPL(spi_write_then_read); 4116 4117 /*-------------------------------------------------------------------------*/ 4118 4119 #if IS_ENABLED(CONFIG_OF) 4120 /* must call put_device() when done with returned spi_device device */ 4121 struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4122 { 4123 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 4124 4125 return dev ? to_spi_device(dev) : NULL; 4126 } 4127 EXPORT_SYMBOL_GPL(of_find_spi_device_by_node); 4128 #endif /* IS_ENABLED(CONFIG_OF) */ 4129 4130 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 4131 /* the spi controllers are not using spi_bus, so we find it with another way */ 4132 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 4133 { 4134 struct device *dev; 4135 4136 dev = class_find_device_by_of_node(&spi_master_class, node); 4137 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4138 dev = class_find_device_by_of_node(&spi_slave_class, node); 4139 if (!dev) 4140 return NULL; 4141 4142 /* reference got in class_find_device */ 4143 return container_of(dev, struct spi_controller, dev); 4144 } 4145 4146 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 4147 void *arg) 4148 { 4149 struct of_reconfig_data *rd = arg; 4150 struct spi_controller *ctlr; 4151 struct spi_device *spi; 4152 4153 switch (of_reconfig_get_state_change(action, arg)) { 4154 case OF_RECONFIG_CHANGE_ADD: 4155 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 4156 if (ctlr == NULL) 4157 return NOTIFY_OK; /* not for us */ 4158 4159 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 4160 put_device(&ctlr->dev); 4161 return NOTIFY_OK; 4162 } 4163 4164 spi = of_register_spi_device(ctlr, rd->dn); 4165 put_device(&ctlr->dev); 4166 4167 if (IS_ERR(spi)) { 4168 pr_err("%s: failed to create for '%pOF'\n", 4169 __func__, rd->dn); 4170 of_node_clear_flag(rd->dn, OF_POPULATED); 4171 return notifier_from_errno(PTR_ERR(spi)); 4172 } 4173 break; 4174 4175 case OF_RECONFIG_CHANGE_REMOVE: 4176 /* already depopulated? */ 4177 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 4178 return NOTIFY_OK; 4179 4180 /* find our device by node */ 4181 spi = of_find_spi_device_by_node(rd->dn); 4182 if (spi == NULL) 4183 return NOTIFY_OK; /* no? not meant for us */ 4184 4185 /* unregister takes one ref away */ 4186 spi_unregister_device(spi); 4187 4188 /* and put the reference of the find */ 4189 put_device(&spi->dev); 4190 break; 4191 } 4192 4193 return NOTIFY_OK; 4194 } 4195 4196 static struct notifier_block spi_of_notifier = { 4197 .notifier_call = of_spi_notify, 4198 }; 4199 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4200 extern struct notifier_block spi_of_notifier; 4201 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4202 4203 #if IS_ENABLED(CONFIG_ACPI) 4204 static int spi_acpi_controller_match(struct device *dev, const void *data) 4205 { 4206 return ACPI_COMPANION(dev->parent) == data; 4207 } 4208 4209 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 4210 { 4211 struct device *dev; 4212 4213 dev = class_find_device(&spi_master_class, NULL, adev, 4214 spi_acpi_controller_match); 4215 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4216 dev = class_find_device(&spi_slave_class, NULL, adev, 4217 spi_acpi_controller_match); 4218 if (!dev) 4219 return NULL; 4220 4221 return container_of(dev, struct spi_controller, dev); 4222 } 4223 4224 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 4225 { 4226 struct device *dev; 4227 4228 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 4229 return to_spi_device(dev); 4230 } 4231 4232 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 4233 void *arg) 4234 { 4235 struct acpi_device *adev = arg; 4236 struct spi_controller *ctlr; 4237 struct spi_device *spi; 4238 4239 switch (value) { 4240 case ACPI_RECONFIG_DEVICE_ADD: 4241 ctlr = acpi_spi_find_controller_by_adev(adev->parent); 4242 if (!ctlr) 4243 break; 4244 4245 acpi_register_spi_device(ctlr, adev); 4246 put_device(&ctlr->dev); 4247 break; 4248 case ACPI_RECONFIG_DEVICE_REMOVE: 4249 if (!acpi_device_enumerated(adev)) 4250 break; 4251 4252 spi = acpi_spi_find_device_by_adev(adev); 4253 if (!spi) 4254 break; 4255 4256 spi_unregister_device(spi); 4257 put_device(&spi->dev); 4258 break; 4259 } 4260 4261 return NOTIFY_OK; 4262 } 4263 4264 static struct notifier_block spi_acpi_notifier = { 4265 .notifier_call = acpi_spi_notify, 4266 }; 4267 #else 4268 extern struct notifier_block spi_acpi_notifier; 4269 #endif 4270 4271 static int __init spi_init(void) 4272 { 4273 int status; 4274 4275 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 4276 if (!buf) { 4277 status = -ENOMEM; 4278 goto err0; 4279 } 4280 4281 status = bus_register(&spi_bus_type); 4282 if (status < 0) 4283 goto err1; 4284 4285 status = class_register(&spi_master_class); 4286 if (status < 0) 4287 goto err2; 4288 4289 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 4290 status = class_register(&spi_slave_class); 4291 if (status < 0) 4292 goto err3; 4293 } 4294 4295 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 4296 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 4297 if (IS_ENABLED(CONFIG_ACPI)) 4298 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 4299 4300 return 0; 4301 4302 err3: 4303 class_unregister(&spi_master_class); 4304 err2: 4305 bus_unregister(&spi_bus_type); 4306 err1: 4307 kfree(buf); 4308 buf = NULL; 4309 err0: 4310 return status; 4311 } 4312 4313 /* board_info is normally registered in arch_initcall(), 4314 * but even essential drivers wait till later 4315 * 4316 * REVISIT only boardinfo really needs static linking. the rest (device and 4317 * driver registration) _could_ be dynamically linked (modular) ... costs 4318 * include needing to have boardinfo data structures be much more public. 4319 */ 4320 postcore_initcall(spi_init); 4321