1 // SPDX-License-Identifier: GPL-2.0-or-later 2 // SPI init/core code 3 // 4 // Copyright (C) 2005 David Brownell 5 // Copyright (C) 2008 Secret Lab Technologies Ltd. 6 7 #include <linux/kernel.h> 8 #include <linux/device.h> 9 #include <linux/init.h> 10 #include <linux/cache.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmaengine.h> 13 #include <linux/mutex.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/clk/clk-conf.h> 17 #include <linux/slab.h> 18 #include <linux/mod_devicetable.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi-mem.h> 21 #include <linux/of_gpio.h> 22 #include <linux/gpio/consumer.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/pm_domain.h> 25 #include <linux/property.h> 26 #include <linux/export.h> 27 #include <linux/sched/rt.h> 28 #include <uapi/linux/sched/types.h> 29 #include <linux/delay.h> 30 #include <linux/kthread.h> 31 #include <linux/ioport.h> 32 #include <linux/acpi.h> 33 #include <linux/highmem.h> 34 #include <linux/idr.h> 35 #include <linux/platform_data/x86/apple.h> 36 37 #define CREATE_TRACE_POINTS 38 #include <trace/events/spi.h> 39 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); 40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); 41 42 #include "internals.h" 43 44 static DEFINE_IDR(spi_master_idr); 45 46 static void spidev_release(struct device *dev) 47 { 48 struct spi_device *spi = to_spi_device(dev); 49 50 spi_controller_put(spi->controller); 51 kfree(spi->driver_override); 52 kfree(spi); 53 } 54 55 static ssize_t 56 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 57 { 58 const struct spi_device *spi = to_spi_device(dev); 59 int len; 60 61 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 62 if (len != -ENODEV) 63 return len; 64 65 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 66 } 67 static DEVICE_ATTR_RO(modalias); 68 69 static ssize_t driver_override_store(struct device *dev, 70 struct device_attribute *a, 71 const char *buf, size_t count) 72 { 73 struct spi_device *spi = to_spi_device(dev); 74 const char *end = memchr(buf, '\n', count); 75 const size_t len = end ? end - buf : count; 76 const char *driver_override, *old; 77 78 /* We need to keep extra room for a newline when displaying value */ 79 if (len >= (PAGE_SIZE - 1)) 80 return -EINVAL; 81 82 driver_override = kstrndup(buf, len, GFP_KERNEL); 83 if (!driver_override) 84 return -ENOMEM; 85 86 device_lock(dev); 87 old = spi->driver_override; 88 if (len) { 89 spi->driver_override = driver_override; 90 } else { 91 /* Empty string, disable driver override */ 92 spi->driver_override = NULL; 93 kfree(driver_override); 94 } 95 device_unlock(dev); 96 kfree(old); 97 98 return count; 99 } 100 101 static ssize_t driver_override_show(struct device *dev, 102 struct device_attribute *a, char *buf) 103 { 104 const struct spi_device *spi = to_spi_device(dev); 105 ssize_t len; 106 107 device_lock(dev); 108 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : ""); 109 device_unlock(dev); 110 return len; 111 } 112 static DEVICE_ATTR_RW(driver_override); 113 114 #define SPI_STATISTICS_ATTRS(field, file) \ 115 static ssize_t spi_controller_##field##_show(struct device *dev, \ 116 struct device_attribute *attr, \ 117 char *buf) \ 118 { \ 119 struct spi_controller *ctlr = container_of(dev, \ 120 struct spi_controller, dev); \ 121 return spi_statistics_##field##_show(&ctlr->statistics, buf); \ 122 } \ 123 static struct device_attribute dev_attr_spi_controller_##field = { \ 124 .attr = { .name = file, .mode = 0444 }, \ 125 .show = spi_controller_##field##_show, \ 126 }; \ 127 static ssize_t spi_device_##field##_show(struct device *dev, \ 128 struct device_attribute *attr, \ 129 char *buf) \ 130 { \ 131 struct spi_device *spi = to_spi_device(dev); \ 132 return spi_statistics_##field##_show(&spi->statistics, buf); \ 133 } \ 134 static struct device_attribute dev_attr_spi_device_##field = { \ 135 .attr = { .name = file, .mode = 0444 }, \ 136 .show = spi_device_##field##_show, \ 137 } 138 139 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 140 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 141 char *buf) \ 142 { \ 143 unsigned long flags; \ 144 ssize_t len; \ 145 spin_lock_irqsave(&stat->lock, flags); \ 146 len = sprintf(buf, format_string, stat->field); \ 147 spin_unlock_irqrestore(&stat->lock, flags); \ 148 return len; \ 149 } \ 150 SPI_STATISTICS_ATTRS(name, file) 151 152 #define SPI_STATISTICS_SHOW(field, format_string) \ 153 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 154 field, format_string) 155 156 SPI_STATISTICS_SHOW(messages, "%lu"); 157 SPI_STATISTICS_SHOW(transfers, "%lu"); 158 SPI_STATISTICS_SHOW(errors, "%lu"); 159 SPI_STATISTICS_SHOW(timedout, "%lu"); 160 161 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 162 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 163 SPI_STATISTICS_SHOW(spi_async, "%lu"); 164 165 SPI_STATISTICS_SHOW(bytes, "%llu"); 166 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 167 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 168 169 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 170 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 171 "transfer_bytes_histo_" number, \ 172 transfer_bytes_histo[index], "%lu") 173 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 174 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 175 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 176 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 190 191 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 192 193 static struct attribute *spi_dev_attrs[] = { 194 &dev_attr_modalias.attr, 195 &dev_attr_driver_override.attr, 196 NULL, 197 }; 198 199 static const struct attribute_group spi_dev_group = { 200 .attrs = spi_dev_attrs, 201 }; 202 203 static struct attribute *spi_device_statistics_attrs[] = { 204 &dev_attr_spi_device_messages.attr, 205 &dev_attr_spi_device_transfers.attr, 206 &dev_attr_spi_device_errors.attr, 207 &dev_attr_spi_device_timedout.attr, 208 &dev_attr_spi_device_spi_sync.attr, 209 &dev_attr_spi_device_spi_sync_immediate.attr, 210 &dev_attr_spi_device_spi_async.attr, 211 &dev_attr_spi_device_bytes.attr, 212 &dev_attr_spi_device_bytes_rx.attr, 213 &dev_attr_spi_device_bytes_tx.attr, 214 &dev_attr_spi_device_transfer_bytes_histo0.attr, 215 &dev_attr_spi_device_transfer_bytes_histo1.attr, 216 &dev_attr_spi_device_transfer_bytes_histo2.attr, 217 &dev_attr_spi_device_transfer_bytes_histo3.attr, 218 &dev_attr_spi_device_transfer_bytes_histo4.attr, 219 &dev_attr_spi_device_transfer_bytes_histo5.attr, 220 &dev_attr_spi_device_transfer_bytes_histo6.attr, 221 &dev_attr_spi_device_transfer_bytes_histo7.attr, 222 &dev_attr_spi_device_transfer_bytes_histo8.attr, 223 &dev_attr_spi_device_transfer_bytes_histo9.attr, 224 &dev_attr_spi_device_transfer_bytes_histo10.attr, 225 &dev_attr_spi_device_transfer_bytes_histo11.attr, 226 &dev_attr_spi_device_transfer_bytes_histo12.attr, 227 &dev_attr_spi_device_transfer_bytes_histo13.attr, 228 &dev_attr_spi_device_transfer_bytes_histo14.attr, 229 &dev_attr_spi_device_transfer_bytes_histo15.attr, 230 &dev_attr_spi_device_transfer_bytes_histo16.attr, 231 &dev_attr_spi_device_transfers_split_maxsize.attr, 232 NULL, 233 }; 234 235 static const struct attribute_group spi_device_statistics_group = { 236 .name = "statistics", 237 .attrs = spi_device_statistics_attrs, 238 }; 239 240 static const struct attribute_group *spi_dev_groups[] = { 241 &spi_dev_group, 242 &spi_device_statistics_group, 243 NULL, 244 }; 245 246 static struct attribute *spi_controller_statistics_attrs[] = { 247 &dev_attr_spi_controller_messages.attr, 248 &dev_attr_spi_controller_transfers.attr, 249 &dev_attr_spi_controller_errors.attr, 250 &dev_attr_spi_controller_timedout.attr, 251 &dev_attr_spi_controller_spi_sync.attr, 252 &dev_attr_spi_controller_spi_sync_immediate.attr, 253 &dev_attr_spi_controller_spi_async.attr, 254 &dev_attr_spi_controller_bytes.attr, 255 &dev_attr_spi_controller_bytes_rx.attr, 256 &dev_attr_spi_controller_bytes_tx.attr, 257 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 258 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 259 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 260 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 261 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 262 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 263 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 264 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 265 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 266 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 267 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 268 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 269 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 270 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 271 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 272 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 273 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 274 &dev_attr_spi_controller_transfers_split_maxsize.attr, 275 NULL, 276 }; 277 278 static const struct attribute_group spi_controller_statistics_group = { 279 .name = "statistics", 280 .attrs = spi_controller_statistics_attrs, 281 }; 282 283 static const struct attribute_group *spi_master_groups[] = { 284 &spi_controller_statistics_group, 285 NULL, 286 }; 287 288 static void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 289 struct spi_transfer *xfer, 290 struct spi_controller *ctlr) 291 { 292 unsigned long flags; 293 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 294 295 if (l2len < 0) 296 l2len = 0; 297 298 spin_lock_irqsave(&stats->lock, flags); 299 300 stats->transfers++; 301 stats->transfer_bytes_histo[l2len]++; 302 303 stats->bytes += xfer->len; 304 if ((xfer->tx_buf) && 305 (xfer->tx_buf != ctlr->dummy_tx)) 306 stats->bytes_tx += xfer->len; 307 if ((xfer->rx_buf) && 308 (xfer->rx_buf != ctlr->dummy_rx)) 309 stats->bytes_rx += xfer->len; 310 311 spin_unlock_irqrestore(&stats->lock, flags); 312 } 313 314 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 315 * and the sysfs version makes coldplug work too. 316 */ 317 318 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 319 const struct spi_device *sdev) 320 { 321 while (id->name[0]) { 322 if (!strcmp(sdev->modalias, id->name)) 323 return id; 324 id++; 325 } 326 return NULL; 327 } 328 329 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 330 { 331 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 332 333 return spi_match_id(sdrv->id_table, sdev); 334 } 335 EXPORT_SYMBOL_GPL(spi_get_device_id); 336 337 static int spi_match_device(struct device *dev, struct device_driver *drv) 338 { 339 const struct spi_device *spi = to_spi_device(dev); 340 const struct spi_driver *sdrv = to_spi_driver(drv); 341 342 /* Check override first, and if set, only use the named driver */ 343 if (spi->driver_override) 344 return strcmp(spi->driver_override, drv->name) == 0; 345 346 /* Attempt an OF style match */ 347 if (of_driver_match_device(dev, drv)) 348 return 1; 349 350 /* Then try ACPI */ 351 if (acpi_driver_match_device(dev, drv)) 352 return 1; 353 354 if (sdrv->id_table) 355 return !!spi_match_id(sdrv->id_table, spi); 356 357 return strcmp(spi->modalias, drv->name) == 0; 358 } 359 360 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 361 { 362 const struct spi_device *spi = to_spi_device(dev); 363 int rc; 364 365 rc = acpi_device_uevent_modalias(dev, env); 366 if (rc != -ENODEV) 367 return rc; 368 369 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 370 } 371 372 static int spi_probe(struct device *dev) 373 { 374 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 375 struct spi_device *spi = to_spi_device(dev); 376 int ret; 377 378 ret = of_clk_set_defaults(dev->of_node, false); 379 if (ret) 380 return ret; 381 382 if (dev->of_node) { 383 spi->irq = of_irq_get(dev->of_node, 0); 384 if (spi->irq == -EPROBE_DEFER) 385 return -EPROBE_DEFER; 386 if (spi->irq < 0) 387 spi->irq = 0; 388 } 389 390 ret = dev_pm_domain_attach(dev, true); 391 if (ret) 392 return ret; 393 394 if (sdrv->probe) { 395 ret = sdrv->probe(spi); 396 if (ret) 397 dev_pm_domain_detach(dev, true); 398 } 399 400 return ret; 401 } 402 403 static void spi_remove(struct device *dev) 404 { 405 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 406 407 if (sdrv->remove) { 408 int ret; 409 410 ret = sdrv->remove(to_spi_device(dev)); 411 if (ret) 412 dev_warn(dev, 413 "Failed to unbind driver (%pe), ignoring\n", 414 ERR_PTR(ret)); 415 } 416 417 dev_pm_domain_detach(dev, true); 418 } 419 420 static void spi_shutdown(struct device *dev) 421 { 422 if (dev->driver) { 423 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 424 425 if (sdrv->shutdown) 426 sdrv->shutdown(to_spi_device(dev)); 427 } 428 } 429 430 struct bus_type spi_bus_type = { 431 .name = "spi", 432 .dev_groups = spi_dev_groups, 433 .match = spi_match_device, 434 .uevent = spi_uevent, 435 .probe = spi_probe, 436 .remove = spi_remove, 437 .shutdown = spi_shutdown, 438 }; 439 EXPORT_SYMBOL_GPL(spi_bus_type); 440 441 /** 442 * __spi_register_driver - register a SPI driver 443 * @owner: owner module of the driver to register 444 * @sdrv: the driver to register 445 * Context: can sleep 446 * 447 * Return: zero on success, else a negative error code. 448 */ 449 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 450 { 451 sdrv->driver.owner = owner; 452 sdrv->driver.bus = &spi_bus_type; 453 454 /* 455 * For Really Good Reasons we use spi: modaliases not of: 456 * modaliases for DT so module autoloading won't work if we 457 * don't have a spi_device_id as well as a compatible string. 458 */ 459 if (sdrv->driver.of_match_table) { 460 const struct of_device_id *of_id; 461 462 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0]; 463 of_id++) { 464 const char *of_name; 465 466 /* Strip off any vendor prefix */ 467 of_name = strnchr(of_id->compatible, 468 sizeof(of_id->compatible), ','); 469 if (of_name) 470 of_name++; 471 else 472 of_name = of_id->compatible; 473 474 if (sdrv->id_table) { 475 const struct spi_device_id *spi_id; 476 477 for (spi_id = sdrv->id_table; spi_id->name[0]; 478 spi_id++) 479 if (strcmp(spi_id->name, of_name) == 0) 480 break; 481 482 if (spi_id->name[0]) 483 continue; 484 } else { 485 if (strcmp(sdrv->driver.name, of_name) == 0) 486 continue; 487 } 488 489 pr_warn("SPI driver %s has no spi_device_id for %s\n", 490 sdrv->driver.name, of_id->compatible); 491 } 492 } 493 494 return driver_register(&sdrv->driver); 495 } 496 EXPORT_SYMBOL_GPL(__spi_register_driver); 497 498 /*-------------------------------------------------------------------------*/ 499 500 /* SPI devices should normally not be created by SPI device drivers; that 501 * would make them board-specific. Similarly with SPI controller drivers. 502 * Device registration normally goes into like arch/.../mach.../board-YYY.c 503 * with other readonly (flashable) information about mainboard devices. 504 */ 505 506 struct boardinfo { 507 struct list_head list; 508 struct spi_board_info board_info; 509 }; 510 511 static LIST_HEAD(board_list); 512 static LIST_HEAD(spi_controller_list); 513 514 /* 515 * Used to protect add/del operation for board_info list and 516 * spi_controller list, and their matching process 517 * also used to protect object of type struct idr 518 */ 519 static DEFINE_MUTEX(board_lock); 520 521 /** 522 * spi_alloc_device - Allocate a new SPI device 523 * @ctlr: Controller to which device is connected 524 * Context: can sleep 525 * 526 * Allows a driver to allocate and initialize a spi_device without 527 * registering it immediately. This allows a driver to directly 528 * fill the spi_device with device parameters before calling 529 * spi_add_device() on it. 530 * 531 * Caller is responsible to call spi_add_device() on the returned 532 * spi_device structure to add it to the SPI controller. If the caller 533 * needs to discard the spi_device without adding it, then it should 534 * call spi_dev_put() on it. 535 * 536 * Return: a pointer to the new device, or NULL. 537 */ 538 static struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 539 { 540 struct spi_device *spi; 541 542 if (!spi_controller_get(ctlr)) 543 return NULL; 544 545 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 546 if (!spi) { 547 spi_controller_put(ctlr); 548 return NULL; 549 } 550 551 spi->master = spi->controller = ctlr; 552 spi->dev.parent = &ctlr->dev; 553 spi->dev.bus = &spi_bus_type; 554 spi->dev.release = spidev_release; 555 spi->cs_gpio = -ENOENT; 556 spi->mode = ctlr->buswidth_override_bits; 557 558 spin_lock_init(&spi->statistics.lock); 559 560 device_initialize(&spi->dev); 561 return spi; 562 } 563 564 static void spi_dev_set_name(struct spi_device *spi) 565 { 566 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 567 568 if (adev) { 569 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 570 return; 571 } 572 573 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 574 spi->chip_select); 575 } 576 577 static int spi_dev_check(struct device *dev, void *data) 578 { 579 struct spi_device *spi = to_spi_device(dev); 580 struct spi_device *new_spi = data; 581 582 if (spi->controller == new_spi->controller && 583 spi->chip_select == new_spi->chip_select) 584 return -EBUSY; 585 return 0; 586 } 587 588 static void spi_cleanup(struct spi_device *spi) 589 { 590 if (spi->controller->cleanup) 591 spi->controller->cleanup(spi); 592 } 593 594 static int __spi_add_device(struct spi_device *spi) 595 { 596 struct spi_controller *ctlr = spi->controller; 597 struct device *dev = ctlr->dev.parent; 598 int status; 599 600 /* 601 * We need to make sure there's no other device with this 602 * chipselect **BEFORE** we call setup(), else we'll trash 603 * its configuration. 604 */ 605 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 606 if (status) { 607 dev_err(dev, "chipselect %d already in use\n", 608 spi->chip_select); 609 return status; 610 } 611 612 /* Controller may unregister concurrently */ 613 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && 614 !device_is_registered(&ctlr->dev)) { 615 return -ENODEV; 616 } 617 618 /* Descriptors take precedence */ 619 if (ctlr->cs_gpiods) 620 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; 621 else if (ctlr->cs_gpios) 622 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; 623 624 /* Drivers may modify this initial i/o setup, but will 625 * normally rely on the device being setup. Devices 626 * using SPI_CS_HIGH can't coexist well otherwise... 627 */ 628 status = spi_setup(spi); 629 if (status < 0) { 630 dev_err(dev, "can't setup %s, status %d\n", 631 dev_name(&spi->dev), status); 632 return status; 633 } 634 635 /* Device may be bound to an active driver when this returns */ 636 status = device_add(&spi->dev); 637 if (status < 0) { 638 dev_err(dev, "can't add %s, status %d\n", 639 dev_name(&spi->dev), status); 640 spi_cleanup(spi); 641 } else { 642 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 643 } 644 645 return status; 646 } 647 648 /** 649 * spi_add_device - Add spi_device allocated with spi_alloc_device 650 * @spi: spi_device to register 651 * 652 * Companion function to spi_alloc_device. Devices allocated with 653 * spi_alloc_device can be added onto the spi bus with this function. 654 * 655 * Return: 0 on success; negative errno on failure 656 */ 657 static int spi_add_device(struct spi_device *spi) 658 { 659 struct spi_controller *ctlr = spi->controller; 660 struct device *dev = ctlr->dev.parent; 661 int status; 662 663 /* Chipselects are numbered 0..max; validate. */ 664 if (spi->chip_select >= ctlr->num_chipselect) { 665 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 666 ctlr->num_chipselect); 667 return -EINVAL; 668 } 669 670 /* Set the bus ID string */ 671 spi_dev_set_name(spi); 672 673 mutex_lock(&ctlr->add_lock); 674 status = __spi_add_device(spi); 675 mutex_unlock(&ctlr->add_lock); 676 return status; 677 } 678 679 static int spi_add_device_locked(struct spi_device *spi) 680 { 681 struct spi_controller *ctlr = spi->controller; 682 struct device *dev = ctlr->dev.parent; 683 684 /* Chipselects are numbered 0..max; validate. */ 685 if (spi->chip_select >= ctlr->num_chipselect) { 686 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 687 ctlr->num_chipselect); 688 return -EINVAL; 689 } 690 691 /* Set the bus ID string */ 692 spi_dev_set_name(spi); 693 694 WARN_ON(!mutex_is_locked(&ctlr->add_lock)); 695 return __spi_add_device(spi); 696 } 697 698 /** 699 * spi_new_device - instantiate one new SPI device 700 * @ctlr: Controller to which device is connected 701 * @chip: Describes the SPI device 702 * Context: can sleep 703 * 704 * On typical mainboards, this is purely internal; and it's not needed 705 * after board init creates the hard-wired devices. Some development 706 * platforms may not be able to use spi_register_board_info though, and 707 * this is exported so that for example a USB or parport based adapter 708 * driver could add devices (which it would learn about out-of-band). 709 * 710 * Return: the new device, or NULL. 711 */ 712 struct spi_device *spi_new_device(struct spi_controller *ctlr, 713 struct spi_board_info *chip) 714 { 715 struct spi_device *proxy; 716 int status; 717 718 /* NOTE: caller did any chip->bus_num checks necessary. 719 * 720 * Also, unless we change the return value convention to use 721 * error-or-pointer (not NULL-or-pointer), troubleshootability 722 * suggests syslogged diagnostics are best here (ugh). 723 */ 724 725 proxy = spi_alloc_device(ctlr); 726 if (!proxy) 727 return NULL; 728 729 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 730 731 proxy->chip_select = chip->chip_select; 732 proxy->max_speed_hz = chip->max_speed_hz; 733 proxy->mode = chip->mode; 734 proxy->irq = chip->irq; 735 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 736 proxy->dev.platform_data = (void *) chip->platform_data; 737 proxy->controller_data = chip->controller_data; 738 proxy->controller_state = NULL; 739 740 if (chip->swnode) { 741 status = device_add_software_node(&proxy->dev, chip->swnode); 742 if (status) { 743 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n", 744 chip->modalias, status); 745 goto err_dev_put; 746 } 747 } 748 749 status = spi_add_device(proxy); 750 if (status < 0) 751 goto err_dev_put; 752 753 return proxy; 754 755 err_dev_put: 756 device_remove_software_node(&proxy->dev); 757 spi_dev_put(proxy); 758 return NULL; 759 } 760 EXPORT_SYMBOL_GPL(spi_new_device); 761 762 /** 763 * spi_unregister_device - unregister a single SPI device 764 * @spi: spi_device to unregister 765 * 766 * Start making the passed SPI device vanish. Normally this would be handled 767 * by spi_unregister_controller(). 768 */ 769 void spi_unregister_device(struct spi_device *spi) 770 { 771 if (!spi) 772 return; 773 774 if (spi->dev.of_node) { 775 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 776 of_node_put(spi->dev.of_node); 777 } 778 if (ACPI_COMPANION(&spi->dev)) 779 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 780 device_remove_software_node(&spi->dev); 781 device_del(&spi->dev); 782 spi_cleanup(spi); 783 put_device(&spi->dev); 784 } 785 EXPORT_SYMBOL_GPL(spi_unregister_device); 786 787 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 788 struct spi_board_info *bi) 789 { 790 struct spi_device *dev; 791 792 if (ctlr->bus_num != bi->bus_num) 793 return; 794 795 dev = spi_new_device(ctlr, bi); 796 if (!dev) 797 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 798 bi->modalias); 799 } 800 801 /** 802 * spi_register_board_info - register SPI devices for a given board 803 * @info: array of chip descriptors 804 * @n: how many descriptors are provided 805 * Context: can sleep 806 * 807 * Board-specific early init code calls this (probably during arch_initcall) 808 * with segments of the SPI device table. Any device nodes are created later, 809 * after the relevant parent SPI controller (bus_num) is defined. We keep 810 * this table of devices forever, so that reloading a controller driver will 811 * not make Linux forget about these hard-wired devices. 812 * 813 * Other code can also call this, e.g. a particular add-on board might provide 814 * SPI devices through its expansion connector, so code initializing that board 815 * would naturally declare its SPI devices. 816 * 817 * The board info passed can safely be __initdata ... but be careful of 818 * any embedded pointers (platform_data, etc), they're copied as-is. 819 * 820 * Return: zero on success, else a negative error code. 821 */ 822 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 823 { 824 struct boardinfo *bi; 825 int i; 826 827 if (!n) 828 return 0; 829 830 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 831 if (!bi) 832 return -ENOMEM; 833 834 for (i = 0; i < n; i++, bi++, info++) { 835 struct spi_controller *ctlr; 836 837 memcpy(&bi->board_info, info, sizeof(*info)); 838 839 mutex_lock(&board_lock); 840 list_add_tail(&bi->list, &board_list); 841 list_for_each_entry(ctlr, &spi_controller_list, list) 842 spi_match_controller_to_boardinfo(ctlr, 843 &bi->board_info); 844 mutex_unlock(&board_lock); 845 } 846 847 return 0; 848 } 849 850 /*-------------------------------------------------------------------------*/ 851 852 /* Core methods for SPI resource management */ 853 854 /** 855 * spi_res_alloc - allocate a spi resource that is life-cycle managed 856 * during the processing of a spi_message while using 857 * spi_transfer_one 858 * @spi: the spi device for which we allocate memory 859 * @release: the release code to execute for this resource 860 * @size: size to alloc and return 861 * @gfp: GFP allocation flags 862 * 863 * Return: the pointer to the allocated data 864 * 865 * This may get enhanced in the future to allocate from a memory pool 866 * of the @spi_device or @spi_controller to avoid repeated allocations. 867 */ 868 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, 869 size_t size, gfp_t gfp) 870 { 871 struct spi_res *sres; 872 873 sres = kzalloc(sizeof(*sres) + size, gfp); 874 if (!sres) 875 return NULL; 876 877 INIT_LIST_HEAD(&sres->entry); 878 sres->release = release; 879 880 return sres->data; 881 } 882 883 /** 884 * spi_res_free - free an spi resource 885 * @res: pointer to the custom data of a resource 886 * 887 */ 888 static void spi_res_free(void *res) 889 { 890 struct spi_res *sres = container_of(res, struct spi_res, data); 891 892 if (!res) 893 return; 894 895 WARN_ON(!list_empty(&sres->entry)); 896 kfree(sres); 897 } 898 899 /** 900 * spi_res_add - add a spi_res to the spi_message 901 * @message: the spi message 902 * @res: the spi_resource 903 */ 904 static void spi_res_add(struct spi_message *message, void *res) 905 { 906 struct spi_res *sres = container_of(res, struct spi_res, data); 907 908 WARN_ON(!list_empty(&sres->entry)); 909 list_add_tail(&sres->entry, &message->resources); 910 } 911 912 /** 913 * spi_res_release - release all spi resources for this message 914 * @ctlr: the @spi_controller 915 * @message: the @spi_message 916 */ 917 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 918 { 919 struct spi_res *res, *tmp; 920 921 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 922 if (res->release) 923 res->release(ctlr, message, res->data); 924 925 list_del(&res->entry); 926 927 kfree(res); 928 } 929 } 930 931 /*-------------------------------------------------------------------------*/ 932 933 static void spi_set_cs(struct spi_device *spi, bool enable, bool force) 934 { 935 bool activate = enable; 936 937 /* 938 * Avoid calling into the driver (or doing delays) if the chip select 939 * isn't actually changing from the last time this was called. 940 */ 941 if (!force && (spi->controller->last_cs_enable == enable) && 942 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) 943 return; 944 945 trace_spi_set_cs(spi, activate); 946 947 spi->controller->last_cs_enable = enable; 948 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; 949 950 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) || 951 !spi->controller->set_cs_timing) { 952 if (activate) 953 spi_delay_exec(&spi->cs_setup, NULL); 954 else 955 spi_delay_exec(&spi->cs_hold, NULL); 956 } 957 958 if (spi->mode & SPI_CS_HIGH) 959 enable = !enable; 960 961 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { 962 if (!(spi->mode & SPI_NO_CS)) { 963 if (spi->cs_gpiod) { 964 /* 965 * Historically ACPI has no means of the GPIO polarity and 966 * thus the SPISerialBus() resource defines it on the per-chip 967 * basis. In order to avoid a chain of negations, the GPIO 968 * polarity is considered being Active High. Even for the cases 969 * when _DSD() is involved (in the updated versions of ACPI) 970 * the GPIO CS polarity must be defined Active High to avoid 971 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH 972 * into account. 973 */ 974 if (has_acpi_companion(&spi->dev)) 975 gpiod_set_value_cansleep(spi->cs_gpiod, !enable); 976 else 977 /* Polarity handled by GPIO library */ 978 gpiod_set_value_cansleep(spi->cs_gpiod, activate); 979 } else { 980 /* 981 * invert the enable line, as active low is 982 * default for SPI. 983 */ 984 gpio_set_value_cansleep(spi->cs_gpio, !enable); 985 } 986 } 987 /* Some SPI masters need both GPIO CS & slave_select */ 988 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 989 spi->controller->set_cs) 990 spi->controller->set_cs(spi, !enable); 991 } else if (spi->controller->set_cs) { 992 spi->controller->set_cs(spi, !enable); 993 } 994 995 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) || 996 !spi->controller->set_cs_timing) { 997 if (!activate) 998 spi_delay_exec(&spi->cs_inactive, NULL); 999 } 1000 } 1001 1002 #ifdef CONFIG_HAS_DMA 1003 int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 1004 struct sg_table *sgt, void *buf, size_t len, 1005 enum dma_data_direction dir) 1006 { 1007 const bool vmalloced_buf = is_vmalloc_addr(buf); 1008 unsigned int max_seg_size = dma_get_max_seg_size(dev); 1009 #ifdef CONFIG_HIGHMEM 1010 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 1011 (unsigned long)buf < (PKMAP_BASE + 1012 (LAST_PKMAP * PAGE_SIZE))); 1013 #else 1014 const bool kmap_buf = false; 1015 #endif 1016 int desc_len; 1017 int sgs; 1018 struct page *vm_page; 1019 struct scatterlist *sg; 1020 void *sg_buf; 1021 size_t min; 1022 int i, ret; 1023 1024 if (vmalloced_buf || kmap_buf) { 1025 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 1026 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 1027 } else if (virt_addr_valid(buf)) { 1028 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); 1029 sgs = DIV_ROUND_UP(len, desc_len); 1030 } else { 1031 return -EINVAL; 1032 } 1033 1034 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 1035 if (ret != 0) 1036 return ret; 1037 1038 sg = &sgt->sgl[0]; 1039 for (i = 0; i < sgs; i++) { 1040 1041 if (vmalloced_buf || kmap_buf) { 1042 /* 1043 * Next scatterlist entry size is the minimum between 1044 * the desc_len and the remaining buffer length that 1045 * fits in a page. 1046 */ 1047 min = min_t(size_t, desc_len, 1048 min_t(size_t, len, 1049 PAGE_SIZE - offset_in_page(buf))); 1050 if (vmalloced_buf) 1051 vm_page = vmalloc_to_page(buf); 1052 else 1053 vm_page = kmap_to_page(buf); 1054 if (!vm_page) { 1055 sg_free_table(sgt); 1056 return -ENOMEM; 1057 } 1058 sg_set_page(sg, vm_page, 1059 min, offset_in_page(buf)); 1060 } else { 1061 min = min_t(size_t, len, desc_len); 1062 sg_buf = buf; 1063 sg_set_buf(sg, sg_buf, min); 1064 } 1065 1066 buf += min; 1067 len -= min; 1068 sg = sg_next(sg); 1069 } 1070 1071 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 1072 if (!ret) 1073 ret = -ENOMEM; 1074 if (ret < 0) { 1075 sg_free_table(sgt); 1076 return ret; 1077 } 1078 1079 sgt->nents = ret; 1080 1081 return 0; 1082 } 1083 1084 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 1085 struct sg_table *sgt, enum dma_data_direction dir) 1086 { 1087 if (sgt->orig_nents) { 1088 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 1089 sg_free_table(sgt); 1090 } 1091 } 1092 1093 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1094 { 1095 struct device *tx_dev, *rx_dev; 1096 struct spi_transfer *xfer; 1097 int ret; 1098 1099 if (!ctlr->can_dma) 1100 return 0; 1101 1102 if (ctlr->dma_tx) 1103 tx_dev = ctlr->dma_tx->device->dev; 1104 else if (ctlr->dma_map_dev) 1105 tx_dev = ctlr->dma_map_dev; 1106 else 1107 tx_dev = ctlr->dev.parent; 1108 1109 if (ctlr->dma_rx) 1110 rx_dev = ctlr->dma_rx->device->dev; 1111 else if (ctlr->dma_map_dev) 1112 rx_dev = ctlr->dma_map_dev; 1113 else 1114 rx_dev = ctlr->dev.parent; 1115 1116 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1117 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1118 continue; 1119 1120 if (xfer->tx_buf != NULL) { 1121 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, 1122 (void *)xfer->tx_buf, xfer->len, 1123 DMA_TO_DEVICE); 1124 if (ret != 0) 1125 return ret; 1126 } 1127 1128 if (xfer->rx_buf != NULL) { 1129 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, 1130 xfer->rx_buf, xfer->len, 1131 DMA_FROM_DEVICE); 1132 if (ret != 0) { 1133 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, 1134 DMA_TO_DEVICE); 1135 return ret; 1136 } 1137 } 1138 } 1139 1140 ctlr->cur_msg_mapped = true; 1141 1142 return 0; 1143 } 1144 1145 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 1146 { 1147 struct spi_transfer *xfer; 1148 struct device *tx_dev, *rx_dev; 1149 1150 if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 1151 return 0; 1152 1153 if (ctlr->dma_tx) 1154 tx_dev = ctlr->dma_tx->device->dev; 1155 else 1156 tx_dev = ctlr->dev.parent; 1157 1158 if (ctlr->dma_rx) 1159 rx_dev = ctlr->dma_rx->device->dev; 1160 else 1161 rx_dev = ctlr->dev.parent; 1162 1163 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1164 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1165 continue; 1166 1167 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1168 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1169 } 1170 1171 ctlr->cur_msg_mapped = false; 1172 1173 return 0; 1174 } 1175 #else /* !CONFIG_HAS_DMA */ 1176 static inline int __spi_map_msg(struct spi_controller *ctlr, 1177 struct spi_message *msg) 1178 { 1179 return 0; 1180 } 1181 1182 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 1183 struct spi_message *msg) 1184 { 1185 return 0; 1186 } 1187 #endif /* !CONFIG_HAS_DMA */ 1188 1189 static inline int spi_unmap_msg(struct spi_controller *ctlr, 1190 struct spi_message *msg) 1191 { 1192 struct spi_transfer *xfer; 1193 1194 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1195 /* 1196 * Restore the original value of tx_buf or rx_buf if they are 1197 * NULL. 1198 */ 1199 if (xfer->tx_buf == ctlr->dummy_tx) 1200 xfer->tx_buf = NULL; 1201 if (xfer->rx_buf == ctlr->dummy_rx) 1202 xfer->rx_buf = NULL; 1203 } 1204 1205 return __spi_unmap_msg(ctlr, msg); 1206 } 1207 1208 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1209 { 1210 struct spi_transfer *xfer; 1211 void *tmp; 1212 unsigned int max_tx, max_rx; 1213 1214 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) 1215 && !(msg->spi->mode & SPI_3WIRE)) { 1216 max_tx = 0; 1217 max_rx = 0; 1218 1219 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1220 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 1221 !xfer->tx_buf) 1222 max_tx = max(xfer->len, max_tx); 1223 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 1224 !xfer->rx_buf) 1225 max_rx = max(xfer->len, max_rx); 1226 } 1227 1228 if (max_tx) { 1229 tmp = krealloc(ctlr->dummy_tx, max_tx, 1230 GFP_KERNEL | GFP_DMA); 1231 if (!tmp) 1232 return -ENOMEM; 1233 ctlr->dummy_tx = tmp; 1234 memset(tmp, 0, max_tx); 1235 } 1236 1237 if (max_rx) { 1238 tmp = krealloc(ctlr->dummy_rx, max_rx, 1239 GFP_KERNEL | GFP_DMA); 1240 if (!tmp) 1241 return -ENOMEM; 1242 ctlr->dummy_rx = tmp; 1243 } 1244 1245 if (max_tx || max_rx) { 1246 list_for_each_entry(xfer, &msg->transfers, 1247 transfer_list) { 1248 if (!xfer->len) 1249 continue; 1250 if (!xfer->tx_buf) 1251 xfer->tx_buf = ctlr->dummy_tx; 1252 if (!xfer->rx_buf) 1253 xfer->rx_buf = ctlr->dummy_rx; 1254 } 1255 } 1256 } 1257 1258 return __spi_map_msg(ctlr, msg); 1259 } 1260 1261 static int spi_transfer_wait(struct spi_controller *ctlr, 1262 struct spi_message *msg, 1263 struct spi_transfer *xfer) 1264 { 1265 struct spi_statistics *statm = &ctlr->statistics; 1266 struct spi_statistics *stats = &msg->spi->statistics; 1267 u32 speed_hz = xfer->speed_hz; 1268 unsigned long long ms; 1269 1270 if (spi_controller_is_slave(ctlr)) { 1271 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1272 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1273 return -EINTR; 1274 } 1275 } else { 1276 if (!speed_hz) 1277 speed_hz = 100000; 1278 1279 /* 1280 * For each byte we wait for 8 cycles of the SPI clock. 1281 * Since speed is defined in Hz and we want milliseconds, 1282 * use respective multiplier, but before the division, 1283 * otherwise we may get 0 for short transfers. 1284 */ 1285 ms = 8LL * MSEC_PER_SEC * xfer->len; 1286 do_div(ms, speed_hz); 1287 1288 /* 1289 * Increase it twice and add 200 ms tolerance, use 1290 * predefined maximum in case of overflow. 1291 */ 1292 ms += ms + 200; 1293 if (ms > UINT_MAX) 1294 ms = UINT_MAX; 1295 1296 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1297 msecs_to_jiffies(ms)); 1298 1299 if (ms == 0) { 1300 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1301 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1302 dev_err(&msg->spi->dev, 1303 "SPI transfer timed out\n"); 1304 return -ETIMEDOUT; 1305 } 1306 } 1307 1308 return 0; 1309 } 1310 1311 static void _spi_transfer_delay_ns(u32 ns) 1312 { 1313 if (!ns) 1314 return; 1315 if (ns <= NSEC_PER_USEC) { 1316 ndelay(ns); 1317 } else { 1318 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 1319 1320 if (us <= 10) 1321 udelay(us); 1322 else 1323 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1324 } 1325 } 1326 1327 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) 1328 { 1329 u32 delay = _delay->value; 1330 u32 unit = _delay->unit; 1331 u32 hz; 1332 1333 if (!delay) 1334 return 0; 1335 1336 switch (unit) { 1337 case SPI_DELAY_UNIT_USECS: 1338 delay *= NSEC_PER_USEC; 1339 break; 1340 case SPI_DELAY_UNIT_NSECS: 1341 /* Nothing to do here */ 1342 break; 1343 case SPI_DELAY_UNIT_SCK: 1344 /* clock cycles need to be obtained from spi_transfer */ 1345 if (!xfer) 1346 return -EINVAL; 1347 /* 1348 * If there is unknown effective speed, approximate it 1349 * by underestimating with half of the requested hz. 1350 */ 1351 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; 1352 if (!hz) 1353 return -EINVAL; 1354 1355 /* Convert delay to nanoseconds */ 1356 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); 1357 break; 1358 default: 1359 return -EINVAL; 1360 } 1361 1362 return delay; 1363 } 1364 EXPORT_SYMBOL_GPL(spi_delay_to_ns); 1365 1366 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) 1367 { 1368 int delay; 1369 1370 might_sleep(); 1371 1372 if (!_delay) 1373 return -EINVAL; 1374 1375 delay = spi_delay_to_ns(_delay, xfer); 1376 if (delay < 0) 1377 return delay; 1378 1379 _spi_transfer_delay_ns(delay); 1380 1381 return 0; 1382 } 1383 EXPORT_SYMBOL_GPL(spi_delay_exec); 1384 1385 static void _spi_transfer_cs_change_delay(struct spi_message *msg, 1386 struct spi_transfer *xfer) 1387 { 1388 u32 default_delay_ns = 10 * NSEC_PER_USEC; 1389 u32 delay = xfer->cs_change_delay.value; 1390 u32 unit = xfer->cs_change_delay.unit; 1391 int ret; 1392 1393 /* return early on "fast" mode - for everything but USECS */ 1394 if (!delay) { 1395 if (unit == SPI_DELAY_UNIT_USECS) 1396 _spi_transfer_delay_ns(default_delay_ns); 1397 return; 1398 } 1399 1400 ret = spi_delay_exec(&xfer->cs_change_delay, xfer); 1401 if (ret) { 1402 dev_err_once(&msg->spi->dev, 1403 "Use of unsupported delay unit %i, using default of %luus\n", 1404 unit, default_delay_ns / NSEC_PER_USEC); 1405 _spi_transfer_delay_ns(default_delay_ns); 1406 } 1407 } 1408 1409 /* 1410 * spi_transfer_one_message - Default implementation of transfer_one_message() 1411 * 1412 * This is a standard implementation of transfer_one_message() for 1413 * drivers which implement a transfer_one() operation. It provides 1414 * standard handling of delays and chip select management. 1415 */ 1416 static int spi_transfer_one_message(struct spi_controller *ctlr, 1417 struct spi_message *msg) 1418 { 1419 struct spi_transfer *xfer; 1420 bool keep_cs = false; 1421 int ret = 0; 1422 struct spi_statistics *statm = &ctlr->statistics; 1423 struct spi_statistics *stats = &msg->spi->statistics; 1424 1425 spi_set_cs(msg->spi, true, false); 1426 1427 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1428 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1429 1430 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1431 trace_spi_transfer_start(msg, xfer); 1432 1433 spi_statistics_add_transfer_stats(statm, xfer, ctlr); 1434 spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1435 1436 if (!ctlr->ptp_sts_supported) { 1437 xfer->ptp_sts_word_pre = 0; 1438 ptp_read_system_prets(xfer->ptp_sts); 1439 } 1440 1441 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1442 reinit_completion(&ctlr->xfer_completion); 1443 1444 fallback_pio: 1445 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1446 if (ret < 0) { 1447 if (ctlr->cur_msg_mapped && 1448 (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1449 __spi_unmap_msg(ctlr, msg); 1450 ctlr->fallback = true; 1451 xfer->error &= ~SPI_TRANS_FAIL_NO_START; 1452 goto fallback_pio; 1453 } 1454 1455 SPI_STATISTICS_INCREMENT_FIELD(statm, 1456 errors); 1457 SPI_STATISTICS_INCREMENT_FIELD(stats, 1458 errors); 1459 dev_err(&msg->spi->dev, 1460 "SPI transfer failed: %d\n", ret); 1461 goto out; 1462 } 1463 1464 if (ret > 0) { 1465 ret = spi_transfer_wait(ctlr, msg, xfer); 1466 if (ret < 0) 1467 msg->status = ret; 1468 } 1469 } else { 1470 if (xfer->len) 1471 dev_err(&msg->spi->dev, 1472 "Bufferless transfer has length %u\n", 1473 xfer->len); 1474 } 1475 1476 if (!ctlr->ptp_sts_supported) { 1477 ptp_read_system_postts(xfer->ptp_sts); 1478 xfer->ptp_sts_word_post = xfer->len; 1479 } 1480 1481 trace_spi_transfer_stop(msg, xfer); 1482 1483 if (msg->status != -EINPROGRESS) 1484 goto out; 1485 1486 spi_transfer_delay_exec(xfer); 1487 1488 if (xfer->cs_change) { 1489 if (list_is_last(&xfer->transfer_list, 1490 &msg->transfers)) { 1491 keep_cs = true; 1492 } else { 1493 spi_set_cs(msg->spi, false, false); 1494 _spi_transfer_cs_change_delay(msg, xfer); 1495 spi_set_cs(msg->spi, true, false); 1496 } 1497 } 1498 1499 msg->actual_length += xfer->len; 1500 } 1501 1502 out: 1503 if (ret != 0 || !keep_cs) 1504 spi_set_cs(msg->spi, false, false); 1505 1506 if (msg->status == -EINPROGRESS) 1507 msg->status = ret; 1508 1509 if (msg->status && ctlr->handle_err) 1510 ctlr->handle_err(ctlr, msg); 1511 1512 spi_finalize_current_message(ctlr); 1513 1514 return ret; 1515 } 1516 1517 /** 1518 * spi_finalize_current_transfer - report completion of a transfer 1519 * @ctlr: the controller reporting completion 1520 * 1521 * Called by SPI drivers using the core transfer_one_message() 1522 * implementation to notify it that the current interrupt driven 1523 * transfer has finished and the next one may be scheduled. 1524 */ 1525 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1526 { 1527 complete(&ctlr->xfer_completion); 1528 } 1529 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1530 1531 static void spi_idle_runtime_pm(struct spi_controller *ctlr) 1532 { 1533 if (ctlr->auto_runtime_pm) { 1534 pm_runtime_mark_last_busy(ctlr->dev.parent); 1535 pm_runtime_put_autosuspend(ctlr->dev.parent); 1536 } 1537 } 1538 1539 /** 1540 * __spi_pump_messages - function which processes spi message queue 1541 * @ctlr: controller to process queue for 1542 * @in_kthread: true if we are in the context of the message pump thread 1543 * 1544 * This function checks if there is any spi message in the queue that 1545 * needs processing and if so call out to the driver to initialize hardware 1546 * and transfer each message. 1547 * 1548 * Note that it is called both from the kthread itself and also from 1549 * inside spi_sync(); the queue extraction handling at the top of the 1550 * function should deal with this safely. 1551 */ 1552 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1553 { 1554 struct spi_transfer *xfer; 1555 struct spi_message *msg; 1556 bool was_busy = false; 1557 unsigned long flags; 1558 int ret; 1559 1560 /* Lock queue */ 1561 spin_lock_irqsave(&ctlr->queue_lock, flags); 1562 1563 /* Make sure we are not already running a message */ 1564 if (ctlr->cur_msg) { 1565 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1566 return; 1567 } 1568 1569 /* If another context is idling the device then defer */ 1570 if (ctlr->idling) { 1571 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1572 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1573 return; 1574 } 1575 1576 /* Check if the queue is idle */ 1577 if (list_empty(&ctlr->queue) || !ctlr->running) { 1578 if (!ctlr->busy) { 1579 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1580 return; 1581 } 1582 1583 /* Defer any non-atomic teardown to the thread */ 1584 if (!in_kthread) { 1585 if (!ctlr->dummy_rx && !ctlr->dummy_tx && 1586 !ctlr->unprepare_transfer_hardware) { 1587 spi_idle_runtime_pm(ctlr); 1588 ctlr->busy = false; 1589 trace_spi_controller_idle(ctlr); 1590 } else { 1591 kthread_queue_work(ctlr->kworker, 1592 &ctlr->pump_messages); 1593 } 1594 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1595 return; 1596 } 1597 1598 ctlr->busy = false; 1599 ctlr->idling = true; 1600 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1601 1602 kfree(ctlr->dummy_rx); 1603 ctlr->dummy_rx = NULL; 1604 kfree(ctlr->dummy_tx); 1605 ctlr->dummy_tx = NULL; 1606 if (ctlr->unprepare_transfer_hardware && 1607 ctlr->unprepare_transfer_hardware(ctlr)) 1608 dev_err(&ctlr->dev, 1609 "failed to unprepare transfer hardware\n"); 1610 spi_idle_runtime_pm(ctlr); 1611 trace_spi_controller_idle(ctlr); 1612 1613 spin_lock_irqsave(&ctlr->queue_lock, flags); 1614 ctlr->idling = false; 1615 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1616 return; 1617 } 1618 1619 /* Extract head of queue */ 1620 msg = list_first_entry(&ctlr->queue, struct spi_message, queue); 1621 ctlr->cur_msg = msg; 1622 1623 list_del_init(&msg->queue); 1624 if (ctlr->busy) 1625 was_busy = true; 1626 else 1627 ctlr->busy = true; 1628 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1629 1630 mutex_lock(&ctlr->io_mutex); 1631 1632 if (!was_busy && ctlr->auto_runtime_pm) { 1633 ret = pm_runtime_get_sync(ctlr->dev.parent); 1634 if (ret < 0) { 1635 pm_runtime_put_noidle(ctlr->dev.parent); 1636 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1637 ret); 1638 mutex_unlock(&ctlr->io_mutex); 1639 return; 1640 } 1641 } 1642 1643 if (!was_busy) 1644 trace_spi_controller_busy(ctlr); 1645 1646 if (!was_busy && ctlr->prepare_transfer_hardware) { 1647 ret = ctlr->prepare_transfer_hardware(ctlr); 1648 if (ret) { 1649 dev_err(&ctlr->dev, 1650 "failed to prepare transfer hardware: %d\n", 1651 ret); 1652 1653 if (ctlr->auto_runtime_pm) 1654 pm_runtime_put(ctlr->dev.parent); 1655 1656 msg->status = ret; 1657 spi_finalize_current_message(ctlr); 1658 1659 mutex_unlock(&ctlr->io_mutex); 1660 return; 1661 } 1662 } 1663 1664 trace_spi_message_start(msg); 1665 1666 if (ctlr->prepare_message) { 1667 ret = ctlr->prepare_message(ctlr, msg); 1668 if (ret) { 1669 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1670 ret); 1671 msg->status = ret; 1672 spi_finalize_current_message(ctlr); 1673 goto out; 1674 } 1675 ctlr->cur_msg_prepared = true; 1676 } 1677 1678 ret = spi_map_msg(ctlr, msg); 1679 if (ret) { 1680 msg->status = ret; 1681 spi_finalize_current_message(ctlr); 1682 goto out; 1683 } 1684 1685 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1686 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1687 xfer->ptp_sts_word_pre = 0; 1688 ptp_read_system_prets(xfer->ptp_sts); 1689 } 1690 } 1691 1692 ret = ctlr->transfer_one_message(ctlr, msg); 1693 if (ret) { 1694 dev_err(&ctlr->dev, 1695 "failed to transfer one message from queue\n"); 1696 goto out; 1697 } 1698 1699 out: 1700 mutex_unlock(&ctlr->io_mutex); 1701 1702 /* Prod the scheduler in case transfer_one() was busy waiting */ 1703 if (!ret) 1704 cond_resched(); 1705 } 1706 1707 /** 1708 * spi_pump_messages - kthread work function which processes spi message queue 1709 * @work: pointer to kthread work struct contained in the controller struct 1710 */ 1711 static void spi_pump_messages(struct kthread_work *work) 1712 { 1713 struct spi_controller *ctlr = 1714 container_of(work, struct spi_controller, pump_messages); 1715 1716 __spi_pump_messages(ctlr, true); 1717 } 1718 1719 /** 1720 * spi_take_timestamp_pre - helper for drivers to collect the beginning of the 1721 * TX timestamp for the requested byte from the SPI 1722 * transfer. The frequency with which this function 1723 * must be called (once per word, once for the whole 1724 * transfer, once per batch of words etc) is arbitrary 1725 * as long as the @tx buffer offset is greater than or 1726 * equal to the requested byte at the time of the 1727 * call. The timestamp is only taken once, at the 1728 * first such call. It is assumed that the driver 1729 * advances its @tx buffer pointer monotonically. 1730 * @ctlr: Pointer to the spi_controller structure of the driver 1731 * @xfer: Pointer to the transfer being timestamped 1732 * @progress: How many words (not bytes) have been transferred so far 1733 * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1734 * transfer, for less jitter in time measurement. Only compatible 1735 * with PIO drivers. If true, must follow up with 1736 * spi_take_timestamp_post or otherwise system will crash. 1737 * WARNING: for fully predictable results, the CPU frequency must 1738 * also be under control (governor). 1739 */ 1740 void spi_take_timestamp_pre(struct spi_controller *ctlr, 1741 struct spi_transfer *xfer, 1742 size_t progress, bool irqs_off) 1743 { 1744 if (!xfer->ptp_sts) 1745 return; 1746 1747 if (xfer->timestamped) 1748 return; 1749 1750 if (progress > xfer->ptp_sts_word_pre) 1751 return; 1752 1753 /* Capture the resolution of the timestamp */ 1754 xfer->ptp_sts_word_pre = progress; 1755 1756 if (irqs_off) { 1757 local_irq_save(ctlr->irq_flags); 1758 preempt_disable(); 1759 } 1760 1761 ptp_read_system_prets(xfer->ptp_sts); 1762 } 1763 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); 1764 1765 /** 1766 * spi_take_timestamp_post - helper for drivers to collect the end of the 1767 * TX timestamp for the requested byte from the SPI 1768 * transfer. Can be called with an arbitrary 1769 * frequency: only the first call where @tx exceeds 1770 * or is equal to the requested word will be 1771 * timestamped. 1772 * @ctlr: Pointer to the spi_controller structure of the driver 1773 * @xfer: Pointer to the transfer being timestamped 1774 * @progress: How many words (not bytes) have been transferred so far 1775 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 1776 */ 1777 void spi_take_timestamp_post(struct spi_controller *ctlr, 1778 struct spi_transfer *xfer, 1779 size_t progress, bool irqs_off) 1780 { 1781 if (!xfer->ptp_sts) 1782 return; 1783 1784 if (xfer->timestamped) 1785 return; 1786 1787 if (progress < xfer->ptp_sts_word_post) 1788 return; 1789 1790 ptp_read_system_postts(xfer->ptp_sts); 1791 1792 if (irqs_off) { 1793 local_irq_restore(ctlr->irq_flags); 1794 preempt_enable(); 1795 } 1796 1797 /* Capture the resolution of the timestamp */ 1798 xfer->ptp_sts_word_post = progress; 1799 1800 xfer->timestamped = true; 1801 } 1802 EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 1803 1804 /** 1805 * spi_set_thread_rt - set the controller to pump at realtime priority 1806 * @ctlr: controller to boost priority of 1807 * 1808 * This can be called because the controller requested realtime priority 1809 * (by setting the ->rt value before calling spi_register_controller()) or 1810 * because a device on the bus said that its transfers needed realtime 1811 * priority. 1812 * 1813 * NOTE: at the moment if any device on a bus says it needs realtime then 1814 * the thread will be at realtime priority for all transfers on that 1815 * controller. If this eventually becomes a problem we may see if we can 1816 * find a way to boost the priority only temporarily during relevant 1817 * transfers. 1818 */ 1819 static void spi_set_thread_rt(struct spi_controller *ctlr) 1820 { 1821 dev_info(&ctlr->dev, 1822 "will run message pump with realtime priority\n"); 1823 sched_set_fifo(ctlr->kworker->task); 1824 } 1825 1826 static int spi_init_queue(struct spi_controller *ctlr) 1827 { 1828 ctlr->running = false; 1829 ctlr->busy = false; 1830 1831 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); 1832 if (IS_ERR(ctlr->kworker)) { 1833 dev_err(&ctlr->dev, "failed to create message pump kworker\n"); 1834 return PTR_ERR(ctlr->kworker); 1835 } 1836 1837 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1838 1839 /* 1840 * Controller config will indicate if this controller should run the 1841 * message pump with high (realtime) priority to reduce the transfer 1842 * latency on the bus by minimising the delay between a transfer 1843 * request and the scheduling of the message pump thread. Without this 1844 * setting the message pump thread will remain at default priority. 1845 */ 1846 if (ctlr->rt) 1847 spi_set_thread_rt(ctlr); 1848 1849 return 0; 1850 } 1851 1852 /** 1853 * spi_get_next_queued_message() - called by driver to check for queued 1854 * messages 1855 * @ctlr: the controller to check for queued messages 1856 * 1857 * If there are more messages in the queue, the next message is returned from 1858 * this call. 1859 * 1860 * Return: the next message in the queue, else NULL if the queue is empty. 1861 */ 1862 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1863 { 1864 struct spi_message *next; 1865 unsigned long flags; 1866 1867 /* get a pointer to the next message, if any */ 1868 spin_lock_irqsave(&ctlr->queue_lock, flags); 1869 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 1870 queue); 1871 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1872 1873 return next; 1874 } 1875 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1876 1877 /** 1878 * spi_finalize_current_message() - the current message is complete 1879 * @ctlr: the controller to return the message to 1880 * 1881 * Called by the driver to notify the core that the message in the front of the 1882 * queue is complete and can be removed from the queue. 1883 */ 1884 void spi_finalize_current_message(struct spi_controller *ctlr) 1885 { 1886 struct spi_transfer *xfer; 1887 struct spi_message *mesg; 1888 unsigned long flags; 1889 int ret; 1890 1891 spin_lock_irqsave(&ctlr->queue_lock, flags); 1892 mesg = ctlr->cur_msg; 1893 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1894 1895 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1896 list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 1897 ptp_read_system_postts(xfer->ptp_sts); 1898 xfer->ptp_sts_word_post = xfer->len; 1899 } 1900 } 1901 1902 if (unlikely(ctlr->ptp_sts_supported)) 1903 list_for_each_entry(xfer, &mesg->transfers, transfer_list) 1904 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 1905 1906 spi_unmap_msg(ctlr, mesg); 1907 1908 /* In the prepare_messages callback the spi bus has the opportunity to 1909 * split a transfer to smaller chunks. 1910 * Release splited transfers here since spi_map_msg is done on the 1911 * splited transfers. 1912 */ 1913 spi_res_release(ctlr, mesg); 1914 1915 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { 1916 ret = ctlr->unprepare_message(ctlr, mesg); 1917 if (ret) { 1918 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 1919 ret); 1920 } 1921 } 1922 1923 spin_lock_irqsave(&ctlr->queue_lock, flags); 1924 ctlr->cur_msg = NULL; 1925 ctlr->cur_msg_prepared = false; 1926 ctlr->fallback = false; 1927 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1928 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1929 1930 trace_spi_message_done(mesg); 1931 1932 mesg->state = NULL; 1933 if (mesg->complete) 1934 mesg->complete(mesg->context); 1935 } 1936 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1937 1938 static int spi_start_queue(struct spi_controller *ctlr) 1939 { 1940 unsigned long flags; 1941 1942 spin_lock_irqsave(&ctlr->queue_lock, flags); 1943 1944 if (ctlr->running || ctlr->busy) { 1945 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1946 return -EBUSY; 1947 } 1948 1949 ctlr->running = true; 1950 ctlr->cur_msg = NULL; 1951 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1952 1953 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1954 1955 return 0; 1956 } 1957 1958 static int spi_stop_queue(struct spi_controller *ctlr) 1959 { 1960 unsigned long flags; 1961 unsigned limit = 500; 1962 int ret = 0; 1963 1964 spin_lock_irqsave(&ctlr->queue_lock, flags); 1965 1966 /* 1967 * This is a bit lame, but is optimized for the common execution path. 1968 * A wait_queue on the ctlr->busy could be used, but then the common 1969 * execution path (pump_messages) would be required to call wake_up or 1970 * friends on every SPI message. Do this instead. 1971 */ 1972 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 1973 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1974 usleep_range(10000, 11000); 1975 spin_lock_irqsave(&ctlr->queue_lock, flags); 1976 } 1977 1978 if (!list_empty(&ctlr->queue) || ctlr->busy) 1979 ret = -EBUSY; 1980 else 1981 ctlr->running = false; 1982 1983 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1984 1985 if (ret) { 1986 dev_warn(&ctlr->dev, "could not stop message queue\n"); 1987 return ret; 1988 } 1989 return ret; 1990 } 1991 1992 static int spi_destroy_queue(struct spi_controller *ctlr) 1993 { 1994 int ret; 1995 1996 ret = spi_stop_queue(ctlr); 1997 1998 /* 1999 * kthread_flush_worker will block until all work is done. 2000 * If the reason that stop_queue timed out is that the work will never 2001 * finish, then it does no good to call flush/stop thread, so 2002 * return anyway. 2003 */ 2004 if (ret) { 2005 dev_err(&ctlr->dev, "problem destroying queue\n"); 2006 return ret; 2007 } 2008 2009 kthread_destroy_worker(ctlr->kworker); 2010 2011 return 0; 2012 } 2013 2014 static int __spi_queued_transfer(struct spi_device *spi, 2015 struct spi_message *msg, 2016 bool need_pump) 2017 { 2018 struct spi_controller *ctlr = spi->controller; 2019 unsigned long flags; 2020 2021 spin_lock_irqsave(&ctlr->queue_lock, flags); 2022 2023 if (!ctlr->running) { 2024 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2025 return -ESHUTDOWN; 2026 } 2027 msg->actual_length = 0; 2028 msg->status = -EINPROGRESS; 2029 2030 list_add_tail(&msg->queue, &ctlr->queue); 2031 if (!ctlr->busy && need_pump) 2032 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 2033 2034 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2035 return 0; 2036 } 2037 2038 /** 2039 * spi_queued_transfer - transfer function for queued transfers 2040 * @spi: spi device which is requesting transfer 2041 * @msg: spi message which is to handled is queued to driver queue 2042 * 2043 * Return: zero on success, else a negative error code. 2044 */ 2045 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 2046 { 2047 return __spi_queued_transfer(spi, msg, true); 2048 } 2049 2050 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 2051 { 2052 int ret; 2053 2054 ctlr->transfer = spi_queued_transfer; 2055 if (!ctlr->transfer_one_message) 2056 ctlr->transfer_one_message = spi_transfer_one_message; 2057 2058 /* Initialize and start queue */ 2059 ret = spi_init_queue(ctlr); 2060 if (ret) { 2061 dev_err(&ctlr->dev, "problem initializing queue\n"); 2062 goto err_init_queue; 2063 } 2064 ctlr->queued = true; 2065 ret = spi_start_queue(ctlr); 2066 if (ret) { 2067 dev_err(&ctlr->dev, "problem starting queue\n"); 2068 goto err_start_queue; 2069 } 2070 2071 return 0; 2072 2073 err_start_queue: 2074 spi_destroy_queue(ctlr); 2075 err_init_queue: 2076 return ret; 2077 } 2078 2079 /** 2080 * spi_flush_queue - Send all pending messages in the queue from the callers' 2081 * context 2082 * @ctlr: controller to process queue for 2083 * 2084 * This should be used when one wants to ensure all pending messages have been 2085 * sent before doing something. Is used by the spi-mem code to make sure SPI 2086 * memory operations do not preempt regular SPI transfers that have been queued 2087 * before the spi-mem operation. 2088 */ 2089 void spi_flush_queue(struct spi_controller *ctlr) 2090 { 2091 if (ctlr->transfer == spi_queued_transfer) 2092 __spi_pump_messages(ctlr, false); 2093 } 2094 2095 /*-------------------------------------------------------------------------*/ 2096 2097 #if defined(CONFIG_OF) 2098 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 2099 struct device_node *nc) 2100 { 2101 u32 value; 2102 int rc; 2103 2104 /* Mode (clock phase/polarity/etc.) */ 2105 if (of_property_read_bool(nc, "spi-cpha")) 2106 spi->mode |= SPI_CPHA; 2107 if (of_property_read_bool(nc, "spi-cpol")) 2108 spi->mode |= SPI_CPOL; 2109 if (of_property_read_bool(nc, "spi-3wire")) 2110 spi->mode |= SPI_3WIRE; 2111 if (of_property_read_bool(nc, "spi-lsb-first")) 2112 spi->mode |= SPI_LSB_FIRST; 2113 if (of_property_read_bool(nc, "spi-cs-high")) 2114 spi->mode |= SPI_CS_HIGH; 2115 2116 /* Device DUAL/QUAD mode */ 2117 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 2118 switch (value) { 2119 case 0: 2120 spi->mode |= SPI_NO_TX; 2121 break; 2122 case 1: 2123 break; 2124 case 2: 2125 spi->mode |= SPI_TX_DUAL; 2126 break; 2127 case 4: 2128 spi->mode |= SPI_TX_QUAD; 2129 break; 2130 case 8: 2131 spi->mode |= SPI_TX_OCTAL; 2132 break; 2133 default: 2134 dev_warn(&ctlr->dev, 2135 "spi-tx-bus-width %d not supported\n", 2136 value); 2137 break; 2138 } 2139 } 2140 2141 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 2142 switch (value) { 2143 case 0: 2144 spi->mode |= SPI_NO_RX; 2145 break; 2146 case 1: 2147 break; 2148 case 2: 2149 spi->mode |= SPI_RX_DUAL; 2150 break; 2151 case 4: 2152 spi->mode |= SPI_RX_QUAD; 2153 break; 2154 case 8: 2155 spi->mode |= SPI_RX_OCTAL; 2156 break; 2157 default: 2158 dev_warn(&ctlr->dev, 2159 "spi-rx-bus-width %d not supported\n", 2160 value); 2161 break; 2162 } 2163 } 2164 2165 if (spi_controller_is_slave(ctlr)) { 2166 if (!of_node_name_eq(nc, "slave")) { 2167 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 2168 nc); 2169 return -EINVAL; 2170 } 2171 return 0; 2172 } 2173 2174 /* Device address */ 2175 rc = of_property_read_u32(nc, "reg", &value); 2176 if (rc) { 2177 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 2178 nc, rc); 2179 return rc; 2180 } 2181 spi->chip_select = value; 2182 2183 /* Device speed */ 2184 if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 2185 spi->max_speed_hz = value; 2186 2187 return 0; 2188 } 2189 2190 static struct spi_device * 2191 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 2192 { 2193 struct spi_device *spi; 2194 int rc; 2195 2196 /* Alloc an spi_device */ 2197 spi = spi_alloc_device(ctlr); 2198 if (!spi) { 2199 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 2200 rc = -ENOMEM; 2201 goto err_out; 2202 } 2203 2204 /* Select device driver */ 2205 rc = of_modalias_node(nc, spi->modalias, 2206 sizeof(spi->modalias)); 2207 if (rc < 0) { 2208 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 2209 goto err_out; 2210 } 2211 2212 rc = of_spi_parse_dt(ctlr, spi, nc); 2213 if (rc) 2214 goto err_out; 2215 2216 /* Store a pointer to the node in the device structure */ 2217 of_node_get(nc); 2218 spi->dev.of_node = nc; 2219 spi->dev.fwnode = of_fwnode_handle(nc); 2220 2221 /* Register the new device */ 2222 rc = spi_add_device(spi); 2223 if (rc) { 2224 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 2225 goto err_of_node_put; 2226 } 2227 2228 return spi; 2229 2230 err_of_node_put: 2231 of_node_put(nc); 2232 err_out: 2233 spi_dev_put(spi); 2234 return ERR_PTR(rc); 2235 } 2236 2237 /** 2238 * of_register_spi_devices() - Register child devices onto the SPI bus 2239 * @ctlr: Pointer to spi_controller device 2240 * 2241 * Registers an spi_device for each child node of controller node which 2242 * represents a valid SPI slave. 2243 */ 2244 static void of_register_spi_devices(struct spi_controller *ctlr) 2245 { 2246 struct spi_device *spi; 2247 struct device_node *nc; 2248 2249 if (!ctlr->dev.of_node) 2250 return; 2251 2252 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 2253 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 2254 continue; 2255 spi = of_register_spi_device(ctlr, nc); 2256 if (IS_ERR(spi)) { 2257 dev_warn(&ctlr->dev, 2258 "Failed to create SPI device for %pOF\n", nc); 2259 of_node_clear_flag(nc, OF_POPULATED); 2260 } 2261 } 2262 } 2263 #else 2264 static void of_register_spi_devices(struct spi_controller *ctlr) { } 2265 #endif 2266 2267 /** 2268 * spi_new_ancillary_device() - Register ancillary SPI device 2269 * @spi: Pointer to the main SPI device registering the ancillary device 2270 * @chip_select: Chip Select of the ancillary device 2271 * 2272 * Register an ancillary SPI device; for example some chips have a chip-select 2273 * for normal device usage and another one for setup/firmware upload. 2274 * 2275 * This may only be called from main SPI device's probe routine. 2276 * 2277 * Return: 0 on success; negative errno on failure 2278 */ 2279 struct spi_device *spi_new_ancillary_device(struct spi_device *spi, 2280 u8 chip_select) 2281 { 2282 struct spi_device *ancillary; 2283 int rc = 0; 2284 2285 /* Alloc an spi_device */ 2286 ancillary = spi_alloc_device(spi->controller); 2287 if (!ancillary) { 2288 rc = -ENOMEM; 2289 goto err_out; 2290 } 2291 2292 strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); 2293 2294 /* Use provided chip-select for ancillary device */ 2295 ancillary->chip_select = chip_select; 2296 2297 /* Take over SPI mode/speed from SPI main device */ 2298 ancillary->max_speed_hz = spi->max_speed_hz; 2299 ancillary->mode = spi->mode; 2300 2301 /* Register the new device */ 2302 rc = spi_add_device_locked(ancillary); 2303 if (rc) { 2304 dev_err(&spi->dev, "failed to register ancillary device\n"); 2305 goto err_out; 2306 } 2307 2308 return ancillary; 2309 2310 err_out: 2311 spi_dev_put(ancillary); 2312 return ERR_PTR(rc); 2313 } 2314 EXPORT_SYMBOL_GPL(spi_new_ancillary_device); 2315 2316 #ifdef CONFIG_ACPI 2317 struct acpi_spi_lookup { 2318 struct spi_controller *ctlr; 2319 u32 max_speed_hz; 2320 u32 mode; 2321 int irq; 2322 u8 bits_per_word; 2323 u8 chip_select; 2324 }; 2325 2326 static void acpi_spi_parse_apple_properties(struct acpi_device *dev, 2327 struct acpi_spi_lookup *lookup) 2328 { 2329 const union acpi_object *obj; 2330 2331 if (!x86_apple_machine) 2332 return; 2333 2334 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 2335 && obj->buffer.length >= 4) 2336 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 2337 2338 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 2339 && obj->buffer.length == 8) 2340 lookup->bits_per_word = *(u64 *)obj->buffer.pointer; 2341 2342 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 2343 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 2344 lookup->mode |= SPI_LSB_FIRST; 2345 2346 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 2347 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2348 lookup->mode |= SPI_CPOL; 2349 2350 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 2351 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2352 lookup->mode |= SPI_CPHA; 2353 } 2354 2355 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 2356 { 2357 struct acpi_spi_lookup *lookup = data; 2358 struct spi_controller *ctlr = lookup->ctlr; 2359 2360 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 2361 struct acpi_resource_spi_serialbus *sb; 2362 acpi_handle parent_handle; 2363 acpi_status status; 2364 2365 sb = &ares->data.spi_serial_bus; 2366 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 2367 2368 status = acpi_get_handle(NULL, 2369 sb->resource_source.string_ptr, 2370 &parent_handle); 2371 2372 if (ACPI_FAILURE(status) || 2373 ACPI_HANDLE(ctlr->dev.parent) != parent_handle) 2374 return -ENODEV; 2375 2376 /* 2377 * ACPI DeviceSelection numbering is handled by the 2378 * host controller driver in Windows and can vary 2379 * from driver to driver. In Linux we always expect 2380 * 0 .. max - 1 so we need to ask the driver to 2381 * translate between the two schemes. 2382 */ 2383 if (ctlr->fw_translate_cs) { 2384 int cs = ctlr->fw_translate_cs(ctlr, 2385 sb->device_selection); 2386 if (cs < 0) 2387 return cs; 2388 lookup->chip_select = cs; 2389 } else { 2390 lookup->chip_select = sb->device_selection; 2391 } 2392 2393 lookup->max_speed_hz = sb->connection_speed; 2394 lookup->bits_per_word = sb->data_bit_length; 2395 2396 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 2397 lookup->mode |= SPI_CPHA; 2398 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 2399 lookup->mode |= SPI_CPOL; 2400 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 2401 lookup->mode |= SPI_CS_HIGH; 2402 } 2403 } else if (lookup->irq < 0) { 2404 struct resource r; 2405 2406 if (acpi_dev_resource_interrupt(ares, 0, &r)) 2407 lookup->irq = r.start; 2408 } 2409 2410 /* Always tell the ACPI core to skip this resource */ 2411 return 1; 2412 } 2413 2414 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 2415 struct acpi_device *adev) 2416 { 2417 acpi_handle parent_handle = NULL; 2418 struct list_head resource_list; 2419 struct acpi_spi_lookup lookup = {}; 2420 struct spi_device *spi; 2421 int ret; 2422 2423 if (acpi_bus_get_status(adev) || !adev->status.present || 2424 acpi_device_enumerated(adev)) 2425 return AE_OK; 2426 2427 lookup.ctlr = ctlr; 2428 lookup.irq = -1; 2429 2430 INIT_LIST_HEAD(&resource_list); 2431 ret = acpi_dev_get_resources(adev, &resource_list, 2432 acpi_spi_add_resource, &lookup); 2433 acpi_dev_free_resource_list(&resource_list); 2434 2435 if (ret < 0) 2436 /* found SPI in _CRS but it points to another controller */ 2437 return AE_OK; 2438 2439 if (!lookup.max_speed_hz && 2440 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && 2441 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) { 2442 /* Apple does not use _CRS but nested devices for SPI slaves */ 2443 acpi_spi_parse_apple_properties(adev, &lookup); 2444 } 2445 2446 if (!lookup.max_speed_hz) 2447 return AE_OK; 2448 2449 spi = spi_alloc_device(ctlr); 2450 if (!spi) { 2451 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n", 2452 dev_name(&adev->dev)); 2453 return AE_NO_MEMORY; 2454 } 2455 2456 2457 ACPI_COMPANION_SET(&spi->dev, adev); 2458 spi->max_speed_hz = lookup.max_speed_hz; 2459 spi->mode |= lookup.mode; 2460 spi->irq = lookup.irq; 2461 spi->bits_per_word = lookup.bits_per_word; 2462 spi->chip_select = lookup.chip_select; 2463 2464 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 2465 sizeof(spi->modalias)); 2466 2467 if (spi->irq < 0) 2468 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 2469 2470 acpi_device_set_enumerated(adev); 2471 2472 adev->power.flags.ignore_parent = true; 2473 if (spi_add_device(spi)) { 2474 adev->power.flags.ignore_parent = false; 2475 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 2476 dev_name(&adev->dev)); 2477 spi_dev_put(spi); 2478 } 2479 2480 return AE_OK; 2481 } 2482 2483 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 2484 void *data, void **return_value) 2485 { 2486 struct spi_controller *ctlr = data; 2487 struct acpi_device *adev; 2488 2489 if (acpi_bus_get_device(handle, &adev)) 2490 return AE_OK; 2491 2492 return acpi_register_spi_device(ctlr, adev); 2493 } 2494 2495 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 2496 2497 static void acpi_register_spi_devices(struct spi_controller *ctlr) 2498 { 2499 acpi_status status; 2500 acpi_handle handle; 2501 2502 handle = ACPI_HANDLE(ctlr->dev.parent); 2503 if (!handle) 2504 return; 2505 2506 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 2507 SPI_ACPI_ENUMERATE_MAX_DEPTH, 2508 acpi_spi_add_device, NULL, ctlr, NULL); 2509 if (ACPI_FAILURE(status)) 2510 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 2511 } 2512 #else 2513 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 2514 #endif /* CONFIG_ACPI */ 2515 2516 static void spi_controller_release(struct device *dev) 2517 { 2518 struct spi_controller *ctlr; 2519 2520 ctlr = container_of(dev, struct spi_controller, dev); 2521 kfree(ctlr); 2522 } 2523 2524 static struct class spi_master_class = { 2525 .name = "spi_master", 2526 .owner = THIS_MODULE, 2527 .dev_release = spi_controller_release, 2528 .dev_groups = spi_master_groups, 2529 }; 2530 2531 #ifdef CONFIG_SPI_SLAVE 2532 /** 2533 * spi_slave_abort - abort the ongoing transfer request on an SPI slave 2534 * controller 2535 * @spi: device used for the current transfer 2536 */ 2537 int spi_slave_abort(struct spi_device *spi) 2538 { 2539 struct spi_controller *ctlr = spi->controller; 2540 2541 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 2542 return ctlr->slave_abort(ctlr); 2543 2544 return -ENOTSUPP; 2545 } 2546 EXPORT_SYMBOL_GPL(spi_slave_abort); 2547 2548 static int match_true(struct device *dev, void *data) 2549 { 2550 return 1; 2551 } 2552 2553 static ssize_t slave_show(struct device *dev, struct device_attribute *attr, 2554 char *buf) 2555 { 2556 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2557 dev); 2558 struct device *child; 2559 2560 child = device_find_child(&ctlr->dev, NULL, match_true); 2561 return sprintf(buf, "%s\n", 2562 child ? to_spi_device(child)->modalias : NULL); 2563 } 2564 2565 static ssize_t slave_store(struct device *dev, struct device_attribute *attr, 2566 const char *buf, size_t count) 2567 { 2568 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2569 dev); 2570 struct spi_device *spi; 2571 struct device *child; 2572 char name[32]; 2573 int rc; 2574 2575 rc = sscanf(buf, "%31s", name); 2576 if (rc != 1 || !name[0]) 2577 return -EINVAL; 2578 2579 child = device_find_child(&ctlr->dev, NULL, match_true); 2580 if (child) { 2581 /* Remove registered slave */ 2582 device_unregister(child); 2583 put_device(child); 2584 } 2585 2586 if (strcmp(name, "(null)")) { 2587 /* Register new slave */ 2588 spi = spi_alloc_device(ctlr); 2589 if (!spi) 2590 return -ENOMEM; 2591 2592 strlcpy(spi->modalias, name, sizeof(spi->modalias)); 2593 2594 rc = spi_add_device(spi); 2595 if (rc) { 2596 spi_dev_put(spi); 2597 return rc; 2598 } 2599 } 2600 2601 return count; 2602 } 2603 2604 static DEVICE_ATTR_RW(slave); 2605 2606 static struct attribute *spi_slave_attrs[] = { 2607 &dev_attr_slave.attr, 2608 NULL, 2609 }; 2610 2611 static const struct attribute_group spi_slave_group = { 2612 .attrs = spi_slave_attrs, 2613 }; 2614 2615 static const struct attribute_group *spi_slave_groups[] = { 2616 &spi_controller_statistics_group, 2617 &spi_slave_group, 2618 NULL, 2619 }; 2620 2621 static struct class spi_slave_class = { 2622 .name = "spi_slave", 2623 .owner = THIS_MODULE, 2624 .dev_release = spi_controller_release, 2625 .dev_groups = spi_slave_groups, 2626 }; 2627 #else 2628 extern struct class spi_slave_class; /* dummy */ 2629 #endif 2630 2631 /** 2632 * __spi_alloc_controller - allocate an SPI master or slave controller 2633 * @dev: the controller, possibly using the platform_bus 2634 * @size: how much zeroed driver-private data to allocate; the pointer to this 2635 * memory is in the driver_data field of the returned device, accessible 2636 * with spi_controller_get_devdata(); the memory is cacheline aligned; 2637 * drivers granting DMA access to portions of their private data need to 2638 * round up @size using ALIGN(size, dma_get_cache_alignment()). 2639 * @slave: flag indicating whether to allocate an SPI master (false) or SPI 2640 * slave (true) controller 2641 * Context: can sleep 2642 * 2643 * This call is used only by SPI controller drivers, which are the 2644 * only ones directly touching chip registers. It's how they allocate 2645 * an spi_controller structure, prior to calling spi_register_controller(). 2646 * 2647 * This must be called from context that can sleep. 2648 * 2649 * The caller is responsible for assigning the bus number and initializing the 2650 * controller's methods before calling spi_register_controller(); and (after 2651 * errors adding the device) calling spi_controller_put() to prevent a memory 2652 * leak. 2653 * 2654 * Return: the SPI controller structure on success, else NULL. 2655 */ 2656 struct spi_controller *__spi_alloc_controller(struct device *dev, 2657 unsigned int size, bool slave) 2658 { 2659 struct spi_controller *ctlr; 2660 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); 2661 2662 if (!dev) 2663 return NULL; 2664 2665 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 2666 if (!ctlr) 2667 return NULL; 2668 2669 device_initialize(&ctlr->dev); 2670 INIT_LIST_HEAD(&ctlr->queue); 2671 spin_lock_init(&ctlr->queue_lock); 2672 spin_lock_init(&ctlr->bus_lock_spinlock); 2673 mutex_init(&ctlr->bus_lock_mutex); 2674 mutex_init(&ctlr->io_mutex); 2675 mutex_init(&ctlr->add_lock); 2676 ctlr->bus_num = -1; 2677 ctlr->num_chipselect = 1; 2678 ctlr->slave = slave; 2679 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 2680 ctlr->dev.class = &spi_slave_class; 2681 else 2682 ctlr->dev.class = &spi_master_class; 2683 ctlr->dev.parent = dev; 2684 pm_suspend_ignore_children(&ctlr->dev, true); 2685 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); 2686 2687 return ctlr; 2688 } 2689 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 2690 2691 static void devm_spi_release_controller(struct device *dev, void *ctlr) 2692 { 2693 spi_controller_put(*(struct spi_controller **)ctlr); 2694 } 2695 2696 /** 2697 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() 2698 * @dev: physical device of SPI controller 2699 * @size: how much zeroed driver-private data to allocate 2700 * @slave: whether to allocate an SPI master (false) or SPI slave (true) 2701 * Context: can sleep 2702 * 2703 * Allocate an SPI controller and automatically release a reference on it 2704 * when @dev is unbound from its driver. Drivers are thus relieved from 2705 * having to call spi_controller_put(). 2706 * 2707 * The arguments to this function are identical to __spi_alloc_controller(). 2708 * 2709 * Return: the SPI controller structure on success, else NULL. 2710 */ 2711 struct spi_controller *__devm_spi_alloc_controller(struct device *dev, 2712 unsigned int size, 2713 bool slave) 2714 { 2715 struct spi_controller **ptr, *ctlr; 2716 2717 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), 2718 GFP_KERNEL); 2719 if (!ptr) 2720 return NULL; 2721 2722 ctlr = __spi_alloc_controller(dev, size, slave); 2723 if (ctlr) { 2724 ctlr->devm_allocated = true; 2725 *ptr = ctlr; 2726 devres_add(dev, ptr); 2727 } else { 2728 devres_free(ptr); 2729 } 2730 2731 return ctlr; 2732 } 2733 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); 2734 2735 #ifdef CONFIG_OF 2736 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) 2737 { 2738 int nb, i, *cs; 2739 struct device_node *np = ctlr->dev.of_node; 2740 2741 if (!np) 2742 return 0; 2743 2744 nb = of_gpio_named_count(np, "cs-gpios"); 2745 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2746 2747 /* Return error only for an incorrectly formed cs-gpios property */ 2748 if (nb == 0 || nb == -ENOENT) 2749 return 0; 2750 else if (nb < 0) 2751 return nb; 2752 2753 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int), 2754 GFP_KERNEL); 2755 ctlr->cs_gpios = cs; 2756 2757 if (!ctlr->cs_gpios) 2758 return -ENOMEM; 2759 2760 for (i = 0; i < ctlr->num_chipselect; i++) 2761 cs[i] = -ENOENT; 2762 2763 for (i = 0; i < nb; i++) 2764 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 2765 2766 return 0; 2767 } 2768 #else 2769 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) 2770 { 2771 return 0; 2772 } 2773 #endif 2774 2775 /** 2776 * spi_get_gpio_descs() - grab chip select GPIOs for the master 2777 * @ctlr: The SPI master to grab GPIO descriptors for 2778 */ 2779 static int spi_get_gpio_descs(struct spi_controller *ctlr) 2780 { 2781 int nb, i; 2782 struct gpio_desc **cs; 2783 struct device *dev = &ctlr->dev; 2784 unsigned long native_cs_mask = 0; 2785 unsigned int num_cs_gpios = 0; 2786 2787 nb = gpiod_count(dev, "cs"); 2788 if (nb < 0) { 2789 /* No GPIOs at all is fine, else return the error */ 2790 if (nb == -ENOENT) 2791 return 0; 2792 return nb; 2793 } 2794 2795 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2796 2797 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 2798 GFP_KERNEL); 2799 if (!cs) 2800 return -ENOMEM; 2801 ctlr->cs_gpiods = cs; 2802 2803 for (i = 0; i < nb; i++) { 2804 /* 2805 * Most chipselects are active low, the inverted 2806 * semantics are handled by special quirks in gpiolib, 2807 * so initializing them GPIOD_OUT_LOW here means 2808 * "unasserted", in most cases this will drive the physical 2809 * line high. 2810 */ 2811 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 2812 GPIOD_OUT_LOW); 2813 if (IS_ERR(cs[i])) 2814 return PTR_ERR(cs[i]); 2815 2816 if (cs[i]) { 2817 /* 2818 * If we find a CS GPIO, name it after the device and 2819 * chip select line. 2820 */ 2821 char *gpioname; 2822 2823 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 2824 dev_name(dev), i); 2825 if (!gpioname) 2826 return -ENOMEM; 2827 gpiod_set_consumer_name(cs[i], gpioname); 2828 num_cs_gpios++; 2829 continue; 2830 } 2831 2832 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { 2833 dev_err(dev, "Invalid native chip select %d\n", i); 2834 return -EINVAL; 2835 } 2836 native_cs_mask |= BIT(i); 2837 } 2838 2839 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; 2840 2841 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios && 2842 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { 2843 dev_err(dev, "No unused native chip select available\n"); 2844 return -EINVAL; 2845 } 2846 2847 return 0; 2848 } 2849 2850 static int spi_controller_check_ops(struct spi_controller *ctlr) 2851 { 2852 /* 2853 * The controller may implement only the high-level SPI-memory like 2854 * operations if it does not support regular SPI transfers, and this is 2855 * valid use case. 2856 * If ->mem_ops is NULL, we request that at least one of the 2857 * ->transfer_xxx() method be implemented. 2858 */ 2859 if (ctlr->mem_ops) { 2860 if (!ctlr->mem_ops->exec_op) 2861 return -EINVAL; 2862 } else if (!ctlr->transfer && !ctlr->transfer_one && 2863 !ctlr->transfer_one_message) { 2864 return -EINVAL; 2865 } 2866 2867 return 0; 2868 } 2869 2870 /** 2871 * spi_register_controller - register SPI master or slave controller 2872 * @ctlr: initialized master, originally from spi_alloc_master() or 2873 * spi_alloc_slave() 2874 * Context: can sleep 2875 * 2876 * SPI controllers connect to their drivers using some non-SPI bus, 2877 * such as the platform bus. The final stage of probe() in that code 2878 * includes calling spi_register_controller() to hook up to this SPI bus glue. 2879 * 2880 * SPI controllers use board specific (often SOC specific) bus numbers, 2881 * and board-specific addressing for SPI devices combines those numbers 2882 * with chip select numbers. Since SPI does not directly support dynamic 2883 * device identification, boards need configuration tables telling which 2884 * chip is at which address. 2885 * 2886 * This must be called from context that can sleep. It returns zero on 2887 * success, else a negative error code (dropping the controller's refcount). 2888 * After a successful return, the caller is responsible for calling 2889 * spi_unregister_controller(). 2890 * 2891 * Return: zero on success, else a negative error code. 2892 */ 2893 int spi_register_controller(struct spi_controller *ctlr) 2894 { 2895 struct device *dev = ctlr->dev.parent; 2896 struct boardinfo *bi; 2897 int status; 2898 int id, first_dynamic; 2899 2900 if (!dev) 2901 return -ENODEV; 2902 2903 /* 2904 * Make sure all necessary hooks are implemented before registering 2905 * the SPI controller. 2906 */ 2907 status = spi_controller_check_ops(ctlr); 2908 if (status) 2909 return status; 2910 2911 if (ctlr->bus_num >= 0) { 2912 /* devices with a fixed bus num must check-in with the num */ 2913 mutex_lock(&board_lock); 2914 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 2915 ctlr->bus_num + 1, GFP_KERNEL); 2916 mutex_unlock(&board_lock); 2917 if (WARN(id < 0, "couldn't get idr")) 2918 return id == -ENOSPC ? -EBUSY : id; 2919 ctlr->bus_num = id; 2920 } else if (ctlr->dev.of_node) { 2921 /* allocate dynamic bus number using Linux idr */ 2922 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 2923 if (id >= 0) { 2924 ctlr->bus_num = id; 2925 mutex_lock(&board_lock); 2926 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 2927 ctlr->bus_num + 1, GFP_KERNEL); 2928 mutex_unlock(&board_lock); 2929 if (WARN(id < 0, "couldn't get idr")) 2930 return id == -ENOSPC ? -EBUSY : id; 2931 } 2932 } 2933 if (ctlr->bus_num < 0) { 2934 first_dynamic = of_alias_get_highest_id("spi"); 2935 if (first_dynamic < 0) 2936 first_dynamic = 0; 2937 else 2938 first_dynamic++; 2939 2940 mutex_lock(&board_lock); 2941 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 2942 0, GFP_KERNEL); 2943 mutex_unlock(&board_lock); 2944 if (WARN(id < 0, "couldn't get idr")) 2945 return id; 2946 ctlr->bus_num = id; 2947 } 2948 ctlr->bus_lock_flag = 0; 2949 init_completion(&ctlr->xfer_completion); 2950 if (!ctlr->max_dma_len) 2951 ctlr->max_dma_len = INT_MAX; 2952 2953 /* register the device, then userspace will see it. 2954 * registration fails if the bus ID is in use. 2955 */ 2956 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 2957 2958 if (!spi_controller_is_slave(ctlr)) { 2959 if (ctlr->use_gpio_descriptors) { 2960 status = spi_get_gpio_descs(ctlr); 2961 if (status) 2962 goto free_bus_id; 2963 /* 2964 * A controller using GPIO descriptors always 2965 * supports SPI_CS_HIGH if need be. 2966 */ 2967 ctlr->mode_bits |= SPI_CS_HIGH; 2968 } else { 2969 /* Legacy code path for GPIOs from DT */ 2970 status = of_spi_get_gpio_numbers(ctlr); 2971 if (status) 2972 goto free_bus_id; 2973 } 2974 } 2975 2976 /* 2977 * Even if it's just one always-selected device, there must 2978 * be at least one chipselect. 2979 */ 2980 if (!ctlr->num_chipselect) { 2981 status = -EINVAL; 2982 goto free_bus_id; 2983 } 2984 2985 status = device_add(&ctlr->dev); 2986 if (status < 0) 2987 goto free_bus_id; 2988 dev_dbg(dev, "registered %s %s\n", 2989 spi_controller_is_slave(ctlr) ? "slave" : "master", 2990 dev_name(&ctlr->dev)); 2991 2992 /* 2993 * If we're using a queued driver, start the queue. Note that we don't 2994 * need the queueing logic if the driver is only supporting high-level 2995 * memory operations. 2996 */ 2997 if (ctlr->transfer) { 2998 dev_info(dev, "controller is unqueued, this is deprecated\n"); 2999 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 3000 status = spi_controller_initialize_queue(ctlr); 3001 if (status) { 3002 device_del(&ctlr->dev); 3003 goto free_bus_id; 3004 } 3005 } 3006 /* add statistics */ 3007 spin_lock_init(&ctlr->statistics.lock); 3008 3009 mutex_lock(&board_lock); 3010 list_add_tail(&ctlr->list, &spi_controller_list); 3011 list_for_each_entry(bi, &board_list, list) 3012 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 3013 mutex_unlock(&board_lock); 3014 3015 /* Register devices from the device tree and ACPI */ 3016 of_register_spi_devices(ctlr); 3017 acpi_register_spi_devices(ctlr); 3018 return status; 3019 3020 free_bus_id: 3021 mutex_lock(&board_lock); 3022 idr_remove(&spi_master_idr, ctlr->bus_num); 3023 mutex_unlock(&board_lock); 3024 return status; 3025 } 3026 EXPORT_SYMBOL_GPL(spi_register_controller); 3027 3028 static void devm_spi_unregister(void *ctlr) 3029 { 3030 spi_unregister_controller(ctlr); 3031 } 3032 3033 /** 3034 * devm_spi_register_controller - register managed SPI master or slave 3035 * controller 3036 * @dev: device managing SPI controller 3037 * @ctlr: initialized controller, originally from spi_alloc_master() or 3038 * spi_alloc_slave() 3039 * Context: can sleep 3040 * 3041 * Register a SPI device as with spi_register_controller() which will 3042 * automatically be unregistered and freed. 3043 * 3044 * Return: zero on success, else a negative error code. 3045 */ 3046 int devm_spi_register_controller(struct device *dev, 3047 struct spi_controller *ctlr) 3048 { 3049 int ret; 3050 3051 ret = spi_register_controller(ctlr); 3052 if (ret) 3053 return ret; 3054 3055 return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr); 3056 } 3057 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 3058 3059 static int __unregister(struct device *dev, void *null) 3060 { 3061 spi_unregister_device(to_spi_device(dev)); 3062 return 0; 3063 } 3064 3065 /** 3066 * spi_unregister_controller - unregister SPI master or slave controller 3067 * @ctlr: the controller being unregistered 3068 * Context: can sleep 3069 * 3070 * This call is used only by SPI controller drivers, which are the 3071 * only ones directly touching chip registers. 3072 * 3073 * This must be called from context that can sleep. 3074 * 3075 * Note that this function also drops a reference to the controller. 3076 */ 3077 void spi_unregister_controller(struct spi_controller *ctlr) 3078 { 3079 struct spi_controller *found; 3080 int id = ctlr->bus_num; 3081 3082 /* Prevent addition of new devices, unregister existing ones */ 3083 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3084 mutex_lock(&ctlr->add_lock); 3085 3086 device_for_each_child(&ctlr->dev, NULL, __unregister); 3087 3088 /* First make sure that this controller was ever added */ 3089 mutex_lock(&board_lock); 3090 found = idr_find(&spi_master_idr, id); 3091 mutex_unlock(&board_lock); 3092 if (ctlr->queued) { 3093 if (spi_destroy_queue(ctlr)) 3094 dev_err(&ctlr->dev, "queue remove failed\n"); 3095 } 3096 mutex_lock(&board_lock); 3097 list_del(&ctlr->list); 3098 mutex_unlock(&board_lock); 3099 3100 device_del(&ctlr->dev); 3101 3102 /* Release the last reference on the controller if its driver 3103 * has not yet been converted to devm_spi_alloc_master/slave(). 3104 */ 3105 if (!ctlr->devm_allocated) 3106 put_device(&ctlr->dev); 3107 3108 /* free bus id */ 3109 mutex_lock(&board_lock); 3110 if (found == ctlr) 3111 idr_remove(&spi_master_idr, id); 3112 mutex_unlock(&board_lock); 3113 3114 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3115 mutex_unlock(&ctlr->add_lock); 3116 } 3117 EXPORT_SYMBOL_GPL(spi_unregister_controller); 3118 3119 int spi_controller_suspend(struct spi_controller *ctlr) 3120 { 3121 int ret; 3122 3123 /* Basically no-ops for non-queued controllers */ 3124 if (!ctlr->queued) 3125 return 0; 3126 3127 ret = spi_stop_queue(ctlr); 3128 if (ret) 3129 dev_err(&ctlr->dev, "queue stop failed\n"); 3130 3131 return ret; 3132 } 3133 EXPORT_SYMBOL_GPL(spi_controller_suspend); 3134 3135 int spi_controller_resume(struct spi_controller *ctlr) 3136 { 3137 int ret; 3138 3139 if (!ctlr->queued) 3140 return 0; 3141 3142 ret = spi_start_queue(ctlr); 3143 if (ret) 3144 dev_err(&ctlr->dev, "queue restart failed\n"); 3145 3146 return ret; 3147 } 3148 EXPORT_SYMBOL_GPL(spi_controller_resume); 3149 3150 /*-------------------------------------------------------------------------*/ 3151 3152 /* Core methods for spi_message alterations */ 3153 3154 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 3155 struct spi_message *msg, 3156 void *res) 3157 { 3158 struct spi_replaced_transfers *rxfer = res; 3159 size_t i; 3160 3161 /* call extra callback if requested */ 3162 if (rxfer->release) 3163 rxfer->release(ctlr, msg, res); 3164 3165 /* insert replaced transfers back into the message */ 3166 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 3167 3168 /* remove the formerly inserted entries */ 3169 for (i = 0; i < rxfer->inserted; i++) 3170 list_del(&rxfer->inserted_transfers[i].transfer_list); 3171 } 3172 3173 /** 3174 * spi_replace_transfers - replace transfers with several transfers 3175 * and register change with spi_message.resources 3176 * @msg: the spi_message we work upon 3177 * @xfer_first: the first spi_transfer we want to replace 3178 * @remove: number of transfers to remove 3179 * @insert: the number of transfers we want to insert instead 3180 * @release: extra release code necessary in some circumstances 3181 * @extradatasize: extra data to allocate (with alignment guarantees 3182 * of struct @spi_transfer) 3183 * @gfp: gfp flags 3184 * 3185 * Returns: pointer to @spi_replaced_transfers, 3186 * PTR_ERR(...) in case of errors. 3187 */ 3188 static struct spi_replaced_transfers *spi_replace_transfers( 3189 struct spi_message *msg, 3190 struct spi_transfer *xfer_first, 3191 size_t remove, 3192 size_t insert, 3193 spi_replaced_release_t release, 3194 size_t extradatasize, 3195 gfp_t gfp) 3196 { 3197 struct spi_replaced_transfers *rxfer; 3198 struct spi_transfer *xfer; 3199 size_t i; 3200 3201 /* allocate the structure using spi_res */ 3202 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 3203 struct_size(rxfer, inserted_transfers, insert) 3204 + extradatasize, 3205 gfp); 3206 if (!rxfer) 3207 return ERR_PTR(-ENOMEM); 3208 3209 /* the release code to invoke before running the generic release */ 3210 rxfer->release = release; 3211 3212 /* assign extradata */ 3213 if (extradatasize) 3214 rxfer->extradata = 3215 &rxfer->inserted_transfers[insert]; 3216 3217 /* init the replaced_transfers list */ 3218 INIT_LIST_HEAD(&rxfer->replaced_transfers); 3219 3220 /* assign the list_entry after which we should reinsert 3221 * the @replaced_transfers - it may be spi_message.messages! 3222 */ 3223 rxfer->replaced_after = xfer_first->transfer_list.prev; 3224 3225 /* remove the requested number of transfers */ 3226 for (i = 0; i < remove; i++) { 3227 /* if the entry after replaced_after it is msg->transfers 3228 * then we have been requested to remove more transfers 3229 * than are in the list 3230 */ 3231 if (rxfer->replaced_after->next == &msg->transfers) { 3232 dev_err(&msg->spi->dev, 3233 "requested to remove more spi_transfers than are available\n"); 3234 /* insert replaced transfers back into the message */ 3235 list_splice(&rxfer->replaced_transfers, 3236 rxfer->replaced_after); 3237 3238 /* free the spi_replace_transfer structure */ 3239 spi_res_free(rxfer); 3240 3241 /* and return with an error */ 3242 return ERR_PTR(-EINVAL); 3243 } 3244 3245 /* remove the entry after replaced_after from list of 3246 * transfers and add it to list of replaced_transfers 3247 */ 3248 list_move_tail(rxfer->replaced_after->next, 3249 &rxfer->replaced_transfers); 3250 } 3251 3252 /* create copy of the given xfer with identical settings 3253 * based on the first transfer to get removed 3254 */ 3255 for (i = 0; i < insert; i++) { 3256 /* we need to run in reverse order */ 3257 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 3258 3259 /* copy all spi_transfer data */ 3260 memcpy(xfer, xfer_first, sizeof(*xfer)); 3261 3262 /* add to list */ 3263 list_add(&xfer->transfer_list, rxfer->replaced_after); 3264 3265 /* clear cs_change and delay for all but the last */ 3266 if (i) { 3267 xfer->cs_change = false; 3268 xfer->delay.value = 0; 3269 } 3270 } 3271 3272 /* set up inserted */ 3273 rxfer->inserted = insert; 3274 3275 /* and register it with spi_res/spi_message */ 3276 spi_res_add(msg, rxfer); 3277 3278 return rxfer; 3279 } 3280 3281 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3282 struct spi_message *msg, 3283 struct spi_transfer **xferp, 3284 size_t maxsize, 3285 gfp_t gfp) 3286 { 3287 struct spi_transfer *xfer = *xferp, *xfers; 3288 struct spi_replaced_transfers *srt; 3289 size_t offset; 3290 size_t count, i; 3291 3292 /* calculate how many we have to replace */ 3293 count = DIV_ROUND_UP(xfer->len, maxsize); 3294 3295 /* create replacement */ 3296 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 3297 if (IS_ERR(srt)) 3298 return PTR_ERR(srt); 3299 xfers = srt->inserted_transfers; 3300 3301 /* now handle each of those newly inserted spi_transfers 3302 * note that the replacements spi_transfers all are preset 3303 * to the same values as *xferp, so tx_buf, rx_buf and len 3304 * are all identical (as well as most others) 3305 * so we just have to fix up len and the pointers. 3306 * 3307 * this also includes support for the depreciated 3308 * spi_message.is_dma_mapped interface 3309 */ 3310 3311 /* the first transfer just needs the length modified, so we 3312 * run it outside the loop 3313 */ 3314 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 3315 3316 /* all the others need rx_buf/tx_buf also set */ 3317 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 3318 /* update rx_buf, tx_buf and dma */ 3319 if (xfers[i].rx_buf) 3320 xfers[i].rx_buf += offset; 3321 if (xfers[i].rx_dma) 3322 xfers[i].rx_dma += offset; 3323 if (xfers[i].tx_buf) 3324 xfers[i].tx_buf += offset; 3325 if (xfers[i].tx_dma) 3326 xfers[i].tx_dma += offset; 3327 3328 /* update length */ 3329 xfers[i].len = min(maxsize, xfers[i].len - offset); 3330 } 3331 3332 /* we set up xferp to the last entry we have inserted, 3333 * so that we skip those already split transfers 3334 */ 3335 *xferp = &xfers[count - 1]; 3336 3337 /* increment statistics counters */ 3338 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3339 transfers_split_maxsize); 3340 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 3341 transfers_split_maxsize); 3342 3343 return 0; 3344 } 3345 3346 /** 3347 * spi_split_transfers_maxsize - split spi transfers into multiple transfers 3348 * when an individual transfer exceeds a 3349 * certain size 3350 * @ctlr: the @spi_controller for this transfer 3351 * @msg: the @spi_message to transform 3352 * @maxsize: the maximum when to apply this 3353 * @gfp: GFP allocation flags 3354 * 3355 * Return: status of transformation 3356 */ 3357 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 3358 struct spi_message *msg, 3359 size_t maxsize, 3360 gfp_t gfp) 3361 { 3362 struct spi_transfer *xfer; 3363 int ret; 3364 3365 /* iterate over the transfer_list, 3366 * but note that xfer is advanced to the last transfer inserted 3367 * to avoid checking sizes again unnecessarily (also xfer does 3368 * potentiall belong to a different list by the time the 3369 * replacement has happened 3370 */ 3371 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3372 if (xfer->len > maxsize) { 3373 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3374 maxsize, gfp); 3375 if (ret) 3376 return ret; 3377 } 3378 } 3379 3380 return 0; 3381 } 3382 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 3383 3384 /*-------------------------------------------------------------------------*/ 3385 3386 /* Core methods for SPI controller protocol drivers. Some of the 3387 * other core methods are currently defined as inline functions. 3388 */ 3389 3390 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 3391 u8 bits_per_word) 3392 { 3393 if (ctlr->bits_per_word_mask) { 3394 /* Only 32 bits fit in the mask */ 3395 if (bits_per_word > 32) 3396 return -EINVAL; 3397 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 3398 return -EINVAL; 3399 } 3400 3401 return 0; 3402 } 3403 3404 /** 3405 * spi_setup - setup SPI mode and clock rate 3406 * @spi: the device whose settings are being modified 3407 * Context: can sleep, and no requests are queued to the device 3408 * 3409 * SPI protocol drivers may need to update the transfer mode if the 3410 * device doesn't work with its default. They may likewise need 3411 * to update clock rates or word sizes from initial values. This function 3412 * changes those settings, and must be called from a context that can sleep. 3413 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 3414 * effect the next time the device is selected and data is transferred to 3415 * or from it. When this function returns, the spi device is deselected. 3416 * 3417 * Note that this call will fail if the protocol driver specifies an option 3418 * that the underlying controller or its driver does not support. For 3419 * example, not all hardware supports wire transfers using nine bit words, 3420 * LSB-first wire encoding, or active-high chipselects. 3421 * 3422 * Return: zero on success, else a negative error code. 3423 */ 3424 int spi_setup(struct spi_device *spi) 3425 { 3426 unsigned bad_bits, ugly_bits; 3427 int status; 3428 3429 /* 3430 * check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO 3431 * are set at the same time 3432 */ 3433 if ((hweight_long(spi->mode & 3434 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || 3435 (hweight_long(spi->mode & 3436 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { 3437 dev_err(&spi->dev, 3438 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); 3439 return -EINVAL; 3440 } 3441 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 3442 */ 3443 if ((spi->mode & SPI_3WIRE) && (spi->mode & 3444 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3445 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 3446 return -EINVAL; 3447 /* help drivers fail *cleanly* when they need options 3448 * that aren't supported with their current controller 3449 * SPI_CS_WORD has a fallback software implementation, 3450 * so it is ignored here. 3451 */ 3452 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | 3453 SPI_NO_TX | SPI_NO_RX); 3454 /* nothing prevents from working with active-high CS in case if it 3455 * is driven by GPIO. 3456 */ 3457 if (gpio_is_valid(spi->cs_gpio)) 3458 bad_bits &= ~SPI_CS_HIGH; 3459 ugly_bits = bad_bits & 3460 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3461 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 3462 if (ugly_bits) { 3463 dev_warn(&spi->dev, 3464 "setup: ignoring unsupported mode bits %x\n", 3465 ugly_bits); 3466 spi->mode &= ~ugly_bits; 3467 bad_bits &= ~ugly_bits; 3468 } 3469 if (bad_bits) { 3470 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 3471 bad_bits); 3472 return -EINVAL; 3473 } 3474 3475 if (!spi->bits_per_word) 3476 spi->bits_per_word = 8; 3477 3478 status = __spi_validate_bits_per_word(spi->controller, 3479 spi->bits_per_word); 3480 if (status) 3481 return status; 3482 3483 if (spi->controller->max_speed_hz && 3484 (!spi->max_speed_hz || 3485 spi->max_speed_hz > spi->controller->max_speed_hz)) 3486 spi->max_speed_hz = spi->controller->max_speed_hz; 3487 3488 mutex_lock(&spi->controller->io_mutex); 3489 3490 if (spi->controller->setup) { 3491 status = spi->controller->setup(spi); 3492 if (status) { 3493 mutex_unlock(&spi->controller->io_mutex); 3494 dev_err(&spi->controller->dev, "Failed to setup device: %d\n", 3495 status); 3496 return status; 3497 } 3498 } 3499 3500 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { 3501 status = pm_runtime_get_sync(spi->controller->dev.parent); 3502 if (status < 0) { 3503 mutex_unlock(&spi->controller->io_mutex); 3504 pm_runtime_put_noidle(spi->controller->dev.parent); 3505 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3506 status); 3507 return status; 3508 } 3509 3510 /* 3511 * We do not want to return positive value from pm_runtime_get, 3512 * there are many instances of devices calling spi_setup() and 3513 * checking for a non-zero return value instead of a negative 3514 * return value. 3515 */ 3516 status = 0; 3517 3518 spi_set_cs(spi, false, true); 3519 pm_runtime_mark_last_busy(spi->controller->dev.parent); 3520 pm_runtime_put_autosuspend(spi->controller->dev.parent); 3521 } else { 3522 spi_set_cs(spi, false, true); 3523 } 3524 3525 mutex_unlock(&spi->controller->io_mutex); 3526 3527 if (spi->rt && !spi->controller->rt) { 3528 spi->controller->rt = true; 3529 spi_set_thread_rt(spi->controller); 3530 } 3531 3532 trace_spi_setup(spi, status); 3533 3534 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 3535 spi->mode & SPI_MODE_X_MASK, 3536 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 3537 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 3538 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 3539 (spi->mode & SPI_LOOP) ? "loopback, " : "", 3540 spi->bits_per_word, spi->max_speed_hz, 3541 status); 3542 3543 return status; 3544 } 3545 EXPORT_SYMBOL_GPL(spi_setup); 3546 3547 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, 3548 struct spi_device *spi) 3549 { 3550 int delay1, delay2; 3551 3552 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); 3553 if (delay1 < 0) 3554 return delay1; 3555 3556 delay2 = spi_delay_to_ns(&spi->word_delay, xfer); 3557 if (delay2 < 0) 3558 return delay2; 3559 3560 if (delay1 < delay2) 3561 memcpy(&xfer->word_delay, &spi->word_delay, 3562 sizeof(xfer->word_delay)); 3563 3564 return 0; 3565 } 3566 3567 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 3568 { 3569 struct spi_controller *ctlr = spi->controller; 3570 struct spi_transfer *xfer; 3571 int w_size; 3572 3573 if (list_empty(&message->transfers)) 3574 return -EINVAL; 3575 3576 /* If an SPI controller does not support toggling the CS line on each 3577 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 3578 * for the CS line, we can emulate the CS-per-word hardware function by 3579 * splitting transfers into one-word transfers and ensuring that 3580 * cs_change is set for each transfer. 3581 */ 3582 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || 3583 spi->cs_gpiod || 3584 gpio_is_valid(spi->cs_gpio))) { 3585 size_t maxsize; 3586 int ret; 3587 3588 maxsize = (spi->bits_per_word + 7) / 8; 3589 3590 /* spi_split_transfers_maxsize() requires message->spi */ 3591 message->spi = spi; 3592 3593 ret = spi_split_transfers_maxsize(ctlr, message, maxsize, 3594 GFP_KERNEL); 3595 if (ret) 3596 return ret; 3597 3598 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3599 /* don't change cs_change on the last entry in the list */ 3600 if (list_is_last(&xfer->transfer_list, &message->transfers)) 3601 break; 3602 xfer->cs_change = 1; 3603 } 3604 } 3605 3606 /* Half-duplex links include original MicroWire, and ones with 3607 * only one data pin like SPI_3WIRE (switches direction) or where 3608 * either MOSI or MISO is missing. They can also be caused by 3609 * software limitations. 3610 */ 3611 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 3612 (spi->mode & SPI_3WIRE)) { 3613 unsigned flags = ctlr->flags; 3614 3615 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3616 if (xfer->rx_buf && xfer->tx_buf) 3617 return -EINVAL; 3618 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 3619 return -EINVAL; 3620 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 3621 return -EINVAL; 3622 } 3623 } 3624 3625 /** 3626 * Set transfer bits_per_word and max speed as spi device default if 3627 * it is not set for this transfer. 3628 * Set transfer tx_nbits and rx_nbits as single transfer default 3629 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 3630 * Ensure transfer word_delay is at least as long as that required by 3631 * device itself. 3632 */ 3633 message->frame_length = 0; 3634 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3635 xfer->effective_speed_hz = 0; 3636 message->frame_length += xfer->len; 3637 if (!xfer->bits_per_word) 3638 xfer->bits_per_word = spi->bits_per_word; 3639 3640 if (!xfer->speed_hz) 3641 xfer->speed_hz = spi->max_speed_hz; 3642 3643 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 3644 xfer->speed_hz = ctlr->max_speed_hz; 3645 3646 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 3647 return -EINVAL; 3648 3649 /* 3650 * SPI transfer length should be multiple of SPI word size 3651 * where SPI word size should be power-of-two multiple 3652 */ 3653 if (xfer->bits_per_word <= 8) 3654 w_size = 1; 3655 else if (xfer->bits_per_word <= 16) 3656 w_size = 2; 3657 else 3658 w_size = 4; 3659 3660 /* No partial transfers accepted */ 3661 if (xfer->len % w_size) 3662 return -EINVAL; 3663 3664 if (xfer->speed_hz && ctlr->min_speed_hz && 3665 xfer->speed_hz < ctlr->min_speed_hz) 3666 return -EINVAL; 3667 3668 if (xfer->tx_buf && !xfer->tx_nbits) 3669 xfer->tx_nbits = SPI_NBITS_SINGLE; 3670 if (xfer->rx_buf && !xfer->rx_nbits) 3671 xfer->rx_nbits = SPI_NBITS_SINGLE; 3672 /* check transfer tx/rx_nbits: 3673 * 1. check the value matches one of single, dual and quad 3674 * 2. check tx/rx_nbits match the mode in spi_device 3675 */ 3676 if (xfer->tx_buf) { 3677 if (spi->mode & SPI_NO_TX) 3678 return -EINVAL; 3679 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 3680 xfer->tx_nbits != SPI_NBITS_DUAL && 3681 xfer->tx_nbits != SPI_NBITS_QUAD) 3682 return -EINVAL; 3683 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 3684 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 3685 return -EINVAL; 3686 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 3687 !(spi->mode & SPI_TX_QUAD)) 3688 return -EINVAL; 3689 } 3690 /* check transfer rx_nbits */ 3691 if (xfer->rx_buf) { 3692 if (spi->mode & SPI_NO_RX) 3693 return -EINVAL; 3694 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 3695 xfer->rx_nbits != SPI_NBITS_DUAL && 3696 xfer->rx_nbits != SPI_NBITS_QUAD) 3697 return -EINVAL; 3698 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 3699 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 3700 return -EINVAL; 3701 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 3702 !(spi->mode & SPI_RX_QUAD)) 3703 return -EINVAL; 3704 } 3705 3706 if (_spi_xfer_word_delay_update(xfer, spi)) 3707 return -EINVAL; 3708 } 3709 3710 message->status = -EINPROGRESS; 3711 3712 return 0; 3713 } 3714 3715 static int __spi_async(struct spi_device *spi, struct spi_message *message) 3716 { 3717 struct spi_controller *ctlr = spi->controller; 3718 struct spi_transfer *xfer; 3719 3720 /* 3721 * Some controllers do not support doing regular SPI transfers. Return 3722 * ENOTSUPP when this is the case. 3723 */ 3724 if (!ctlr->transfer) 3725 return -ENOTSUPP; 3726 3727 message->spi = spi; 3728 3729 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); 3730 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 3731 3732 trace_spi_message_submit(message); 3733 3734 if (!ctlr->ptp_sts_supported) { 3735 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3736 xfer->ptp_sts_word_pre = 0; 3737 ptp_read_system_prets(xfer->ptp_sts); 3738 } 3739 } 3740 3741 return ctlr->transfer(spi, message); 3742 } 3743 3744 /** 3745 * spi_async - asynchronous SPI transfer 3746 * @spi: device with which data will be exchanged 3747 * @message: describes the data transfers, including completion callback 3748 * Context: any (irqs may be blocked, etc) 3749 * 3750 * This call may be used in_irq and other contexts which can't sleep, 3751 * as well as from task contexts which can sleep. 3752 * 3753 * The completion callback is invoked in a context which can't sleep. 3754 * Before that invocation, the value of message->status is undefined. 3755 * When the callback is issued, message->status holds either zero (to 3756 * indicate complete success) or a negative error code. After that 3757 * callback returns, the driver which issued the transfer request may 3758 * deallocate the associated memory; it's no longer in use by any SPI 3759 * core or controller driver code. 3760 * 3761 * Note that although all messages to a spi_device are handled in 3762 * FIFO order, messages may go to different devices in other orders. 3763 * Some device might be higher priority, or have various "hard" access 3764 * time requirements, for example. 3765 * 3766 * On detection of any fault during the transfer, processing of 3767 * the entire message is aborted, and the device is deselected. 3768 * Until returning from the associated message completion callback, 3769 * no other spi_message queued to that device will be processed. 3770 * (This rule applies equally to all the synchronous transfer calls, 3771 * which are wrappers around this core asynchronous primitive.) 3772 * 3773 * Return: zero on success, else a negative error code. 3774 */ 3775 int spi_async(struct spi_device *spi, struct spi_message *message) 3776 { 3777 struct spi_controller *ctlr = spi->controller; 3778 int ret; 3779 unsigned long flags; 3780 3781 ret = __spi_validate(spi, message); 3782 if (ret != 0) 3783 return ret; 3784 3785 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3786 3787 if (ctlr->bus_lock_flag) 3788 ret = -EBUSY; 3789 else 3790 ret = __spi_async(spi, message); 3791 3792 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3793 3794 return ret; 3795 } 3796 EXPORT_SYMBOL_GPL(spi_async); 3797 3798 /** 3799 * spi_async_locked - version of spi_async with exclusive bus usage 3800 * @spi: device with which data will be exchanged 3801 * @message: describes the data transfers, including completion callback 3802 * Context: any (irqs may be blocked, etc) 3803 * 3804 * This call may be used in_irq and other contexts which can't sleep, 3805 * as well as from task contexts which can sleep. 3806 * 3807 * The completion callback is invoked in a context which can't sleep. 3808 * Before that invocation, the value of message->status is undefined. 3809 * When the callback is issued, message->status holds either zero (to 3810 * indicate complete success) or a negative error code. After that 3811 * callback returns, the driver which issued the transfer request may 3812 * deallocate the associated memory; it's no longer in use by any SPI 3813 * core or controller driver code. 3814 * 3815 * Note that although all messages to a spi_device are handled in 3816 * FIFO order, messages may go to different devices in other orders. 3817 * Some device might be higher priority, or have various "hard" access 3818 * time requirements, for example. 3819 * 3820 * On detection of any fault during the transfer, processing of 3821 * the entire message is aborted, and the device is deselected. 3822 * Until returning from the associated message completion callback, 3823 * no other spi_message queued to that device will be processed. 3824 * (This rule applies equally to all the synchronous transfer calls, 3825 * which are wrappers around this core asynchronous primitive.) 3826 * 3827 * Return: zero on success, else a negative error code. 3828 */ 3829 static int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3830 { 3831 struct spi_controller *ctlr = spi->controller; 3832 int ret; 3833 unsigned long flags; 3834 3835 ret = __spi_validate(spi, message); 3836 if (ret != 0) 3837 return ret; 3838 3839 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3840 3841 ret = __spi_async(spi, message); 3842 3843 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3844 3845 return ret; 3846 3847 } 3848 3849 /*-------------------------------------------------------------------------*/ 3850 3851 /* Utility methods for SPI protocol drivers, layered on 3852 * top of the core. Some other utility methods are defined as 3853 * inline functions. 3854 */ 3855 3856 static void spi_complete(void *arg) 3857 { 3858 complete(arg); 3859 } 3860 3861 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 3862 { 3863 DECLARE_COMPLETION_ONSTACK(done); 3864 int status; 3865 struct spi_controller *ctlr = spi->controller; 3866 unsigned long flags; 3867 3868 status = __spi_validate(spi, message); 3869 if (status != 0) 3870 return status; 3871 3872 message->complete = spi_complete; 3873 message->context = &done; 3874 message->spi = spi; 3875 3876 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); 3877 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 3878 3879 /* If we're not using the legacy transfer method then we will 3880 * try to transfer in the calling context so special case. 3881 * This code would be less tricky if we could remove the 3882 * support for driver implemented message queues. 3883 */ 3884 if (ctlr->transfer == spi_queued_transfer) { 3885 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3886 3887 trace_spi_message_submit(message); 3888 3889 status = __spi_queued_transfer(spi, message, false); 3890 3891 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3892 } else { 3893 status = spi_async_locked(spi, message); 3894 } 3895 3896 if (status == 0) { 3897 /* Push out the messages in the calling context if we 3898 * can. 3899 */ 3900 if (ctlr->transfer == spi_queued_transfer) { 3901 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3902 spi_sync_immediate); 3903 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 3904 spi_sync_immediate); 3905 __spi_pump_messages(ctlr, false); 3906 } 3907 3908 wait_for_completion(&done); 3909 status = message->status; 3910 } 3911 message->context = NULL; 3912 return status; 3913 } 3914 3915 /** 3916 * spi_sync - blocking/synchronous SPI data transfers 3917 * @spi: device with which data will be exchanged 3918 * @message: describes the data transfers 3919 * Context: can sleep 3920 * 3921 * This call may only be used from a context that may sleep. The sleep 3922 * is non-interruptible, and has no timeout. Low-overhead controller 3923 * drivers may DMA directly into and out of the message buffers. 3924 * 3925 * Note that the SPI device's chip select is active during the message, 3926 * and then is normally disabled between messages. Drivers for some 3927 * frequently-used devices may want to minimize costs of selecting a chip, 3928 * by leaving it selected in anticipation that the next message will go 3929 * to the same chip. (That may increase power usage.) 3930 * 3931 * Also, the caller is guaranteeing that the memory associated with the 3932 * message will not be freed before this call returns. 3933 * 3934 * Return: zero on success, else a negative error code. 3935 */ 3936 int spi_sync(struct spi_device *spi, struct spi_message *message) 3937 { 3938 int ret; 3939 3940 mutex_lock(&spi->controller->bus_lock_mutex); 3941 ret = __spi_sync(spi, message); 3942 mutex_unlock(&spi->controller->bus_lock_mutex); 3943 3944 return ret; 3945 } 3946 EXPORT_SYMBOL_GPL(spi_sync); 3947 3948 /** 3949 * spi_sync_locked - version of spi_sync with exclusive bus usage 3950 * @spi: device with which data will be exchanged 3951 * @message: describes the data transfers 3952 * Context: can sleep 3953 * 3954 * This call may only be used from a context that may sleep. The sleep 3955 * is non-interruptible, and has no timeout. Low-overhead controller 3956 * drivers may DMA directly into and out of the message buffers. 3957 * 3958 * This call should be used by drivers that require exclusive access to the 3959 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 3960 * be released by a spi_bus_unlock call when the exclusive access is over. 3961 * 3962 * Return: zero on success, else a negative error code. 3963 */ 3964 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 3965 { 3966 return __spi_sync(spi, message); 3967 } 3968 EXPORT_SYMBOL_GPL(spi_sync_locked); 3969 3970 /** 3971 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 3972 * @ctlr: SPI bus master that should be locked for exclusive bus access 3973 * Context: can sleep 3974 * 3975 * This call may only be used from a context that may sleep. The sleep 3976 * is non-interruptible, and has no timeout. 3977 * 3978 * This call should be used by drivers that require exclusive access to the 3979 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 3980 * exclusive access is over. Data transfer must be done by spi_sync_locked 3981 * and spi_async_locked calls when the SPI bus lock is held. 3982 * 3983 * Return: always zero. 3984 */ 3985 int spi_bus_lock(struct spi_controller *ctlr) 3986 { 3987 unsigned long flags; 3988 3989 mutex_lock(&ctlr->bus_lock_mutex); 3990 3991 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3992 ctlr->bus_lock_flag = 1; 3993 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3994 3995 /* mutex remains locked until spi_bus_unlock is called */ 3996 3997 return 0; 3998 } 3999 EXPORT_SYMBOL_GPL(spi_bus_lock); 4000 4001 /** 4002 * spi_bus_unlock - release the lock for exclusive SPI bus usage 4003 * @ctlr: SPI bus master that was locked for exclusive bus access 4004 * Context: can sleep 4005 * 4006 * This call may only be used from a context that may sleep. The sleep 4007 * is non-interruptible, and has no timeout. 4008 * 4009 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 4010 * call. 4011 * 4012 * Return: always zero. 4013 */ 4014 int spi_bus_unlock(struct spi_controller *ctlr) 4015 { 4016 ctlr->bus_lock_flag = 0; 4017 4018 mutex_unlock(&ctlr->bus_lock_mutex); 4019 4020 return 0; 4021 } 4022 EXPORT_SYMBOL_GPL(spi_bus_unlock); 4023 4024 /* portable code must never pass more than 32 bytes */ 4025 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 4026 4027 static u8 *buf; 4028 4029 /** 4030 * spi_write_then_read - SPI synchronous write followed by read 4031 * @spi: device with which data will be exchanged 4032 * @txbuf: data to be written (need not be dma-safe) 4033 * @n_tx: size of txbuf, in bytes 4034 * @rxbuf: buffer into which data will be read (need not be dma-safe) 4035 * @n_rx: size of rxbuf, in bytes 4036 * Context: can sleep 4037 * 4038 * This performs a half duplex MicroWire style transaction with the 4039 * device, sending txbuf and then reading rxbuf. The return value 4040 * is zero for success, else a negative errno status code. 4041 * This call may only be used from a context that may sleep. 4042 * 4043 * Parameters to this routine are always copied using a small buffer. 4044 * Performance-sensitive or bulk transfer code should instead use 4045 * spi_{async,sync}() calls with dma-safe buffers. 4046 * 4047 * Return: zero on success, else a negative error code. 4048 */ 4049 int spi_write_then_read(struct spi_device *spi, 4050 const void *txbuf, unsigned n_tx, 4051 void *rxbuf, unsigned n_rx) 4052 { 4053 static DEFINE_MUTEX(lock); 4054 4055 int status; 4056 struct spi_message message; 4057 struct spi_transfer x[2]; 4058 u8 *local_buf; 4059 4060 /* Use preallocated DMA-safe buffer if we can. We can't avoid 4061 * copying here, (as a pure convenience thing), but we can 4062 * keep heap costs out of the hot path unless someone else is 4063 * using the pre-allocated buffer or the transfer is too large. 4064 */ 4065 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 4066 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 4067 GFP_KERNEL | GFP_DMA); 4068 if (!local_buf) 4069 return -ENOMEM; 4070 } else { 4071 local_buf = buf; 4072 } 4073 4074 spi_message_init(&message); 4075 memset(x, 0, sizeof(x)); 4076 if (n_tx) { 4077 x[0].len = n_tx; 4078 spi_message_add_tail(&x[0], &message); 4079 } 4080 if (n_rx) { 4081 x[1].len = n_rx; 4082 spi_message_add_tail(&x[1], &message); 4083 } 4084 4085 memcpy(local_buf, txbuf, n_tx); 4086 x[0].tx_buf = local_buf; 4087 x[1].rx_buf = local_buf + n_tx; 4088 4089 /* do the i/o */ 4090 status = spi_sync(spi, &message); 4091 if (status == 0) 4092 memcpy(rxbuf, x[1].rx_buf, n_rx); 4093 4094 if (x[0].tx_buf == buf) 4095 mutex_unlock(&lock); 4096 else 4097 kfree(local_buf); 4098 4099 return status; 4100 } 4101 EXPORT_SYMBOL_GPL(spi_write_then_read); 4102 4103 /*-------------------------------------------------------------------------*/ 4104 4105 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 4106 /* must call put_device() when done with returned spi_device device */ 4107 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4108 { 4109 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 4110 4111 return dev ? to_spi_device(dev) : NULL; 4112 } 4113 4114 /* the spi controllers are not using spi_bus, so we find it with another way */ 4115 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 4116 { 4117 struct device *dev; 4118 4119 dev = class_find_device_by_of_node(&spi_master_class, node); 4120 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4121 dev = class_find_device_by_of_node(&spi_slave_class, node); 4122 if (!dev) 4123 return NULL; 4124 4125 /* reference got in class_find_device */ 4126 return container_of(dev, struct spi_controller, dev); 4127 } 4128 4129 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 4130 void *arg) 4131 { 4132 struct of_reconfig_data *rd = arg; 4133 struct spi_controller *ctlr; 4134 struct spi_device *spi; 4135 4136 switch (of_reconfig_get_state_change(action, arg)) { 4137 case OF_RECONFIG_CHANGE_ADD: 4138 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 4139 if (ctlr == NULL) 4140 return NOTIFY_OK; /* not for us */ 4141 4142 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 4143 put_device(&ctlr->dev); 4144 return NOTIFY_OK; 4145 } 4146 4147 spi = of_register_spi_device(ctlr, rd->dn); 4148 put_device(&ctlr->dev); 4149 4150 if (IS_ERR(spi)) { 4151 pr_err("%s: failed to create for '%pOF'\n", 4152 __func__, rd->dn); 4153 of_node_clear_flag(rd->dn, OF_POPULATED); 4154 return notifier_from_errno(PTR_ERR(spi)); 4155 } 4156 break; 4157 4158 case OF_RECONFIG_CHANGE_REMOVE: 4159 /* already depopulated? */ 4160 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 4161 return NOTIFY_OK; 4162 4163 /* find our device by node */ 4164 spi = of_find_spi_device_by_node(rd->dn); 4165 if (spi == NULL) 4166 return NOTIFY_OK; /* no? not meant for us */ 4167 4168 /* unregister takes one ref away */ 4169 spi_unregister_device(spi); 4170 4171 /* and put the reference of the find */ 4172 put_device(&spi->dev); 4173 break; 4174 } 4175 4176 return NOTIFY_OK; 4177 } 4178 4179 static struct notifier_block spi_of_notifier = { 4180 .notifier_call = of_spi_notify, 4181 }; 4182 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4183 extern struct notifier_block spi_of_notifier; 4184 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4185 4186 #if IS_ENABLED(CONFIG_ACPI) 4187 static int spi_acpi_controller_match(struct device *dev, const void *data) 4188 { 4189 return ACPI_COMPANION(dev->parent) == data; 4190 } 4191 4192 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 4193 { 4194 struct device *dev; 4195 4196 dev = class_find_device(&spi_master_class, NULL, adev, 4197 spi_acpi_controller_match); 4198 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4199 dev = class_find_device(&spi_slave_class, NULL, adev, 4200 spi_acpi_controller_match); 4201 if (!dev) 4202 return NULL; 4203 4204 return container_of(dev, struct spi_controller, dev); 4205 } 4206 4207 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 4208 { 4209 struct device *dev; 4210 4211 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 4212 return to_spi_device(dev); 4213 } 4214 4215 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 4216 void *arg) 4217 { 4218 struct acpi_device *adev = arg; 4219 struct spi_controller *ctlr; 4220 struct spi_device *spi; 4221 4222 switch (value) { 4223 case ACPI_RECONFIG_DEVICE_ADD: 4224 ctlr = acpi_spi_find_controller_by_adev(adev->parent); 4225 if (!ctlr) 4226 break; 4227 4228 acpi_register_spi_device(ctlr, adev); 4229 put_device(&ctlr->dev); 4230 break; 4231 case ACPI_RECONFIG_DEVICE_REMOVE: 4232 if (!acpi_device_enumerated(adev)) 4233 break; 4234 4235 spi = acpi_spi_find_device_by_adev(adev); 4236 if (!spi) 4237 break; 4238 4239 spi_unregister_device(spi); 4240 put_device(&spi->dev); 4241 break; 4242 } 4243 4244 return NOTIFY_OK; 4245 } 4246 4247 static struct notifier_block spi_acpi_notifier = { 4248 .notifier_call = acpi_spi_notify, 4249 }; 4250 #else 4251 extern struct notifier_block spi_acpi_notifier; 4252 #endif 4253 4254 static int __init spi_init(void) 4255 { 4256 int status; 4257 4258 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 4259 if (!buf) { 4260 status = -ENOMEM; 4261 goto err0; 4262 } 4263 4264 status = bus_register(&spi_bus_type); 4265 if (status < 0) 4266 goto err1; 4267 4268 status = class_register(&spi_master_class); 4269 if (status < 0) 4270 goto err2; 4271 4272 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 4273 status = class_register(&spi_slave_class); 4274 if (status < 0) 4275 goto err3; 4276 } 4277 4278 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 4279 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 4280 if (IS_ENABLED(CONFIG_ACPI)) 4281 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 4282 4283 return 0; 4284 4285 err3: 4286 class_unregister(&spi_master_class); 4287 err2: 4288 bus_unregister(&spi_bus_type); 4289 err1: 4290 kfree(buf); 4291 buf = NULL; 4292 err0: 4293 return status; 4294 } 4295 4296 /* board_info is normally registered in arch_initcall(), 4297 * but even essential drivers wait till later 4298 * 4299 * REVISIT only boardinfo really needs static linking. the rest (device and 4300 * driver registration) _could_ be dynamically linked (modular) ... costs 4301 * include needing to have boardinfo data structures be much more public. 4302 */ 4303 postcore_initcall(spi_init); 4304