1 // SPDX-License-Identifier: GPL-2.0-or-later 2 // SPI init/core code 3 // 4 // Copyright (C) 2005 David Brownell 5 // Copyright (C) 2008 Secret Lab Technologies Ltd. 6 7 #include <linux/kernel.h> 8 #include <linux/device.h> 9 #include <linux/init.h> 10 #include <linux/cache.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmaengine.h> 13 #include <linux/mutex.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/clk/clk-conf.h> 17 #include <linux/slab.h> 18 #include <linux/mod_devicetable.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi-mem.h> 21 #include <linux/of_gpio.h> 22 #include <linux/gpio/consumer.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/pm_domain.h> 25 #include <linux/property.h> 26 #include <linux/export.h> 27 #include <linux/sched/rt.h> 28 #include <uapi/linux/sched/types.h> 29 #include <linux/delay.h> 30 #include <linux/kthread.h> 31 #include <linux/ioport.h> 32 #include <linux/acpi.h> 33 #include <linux/highmem.h> 34 #include <linux/idr.h> 35 #include <linux/platform_data/x86/apple.h> 36 #include <linux/ptp_clock_kernel.h> 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/spi.h> 40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); 41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); 42 43 #include "internals.h" 44 45 static DEFINE_IDR(spi_master_idr); 46 47 static void spidev_release(struct device *dev) 48 { 49 struct spi_device *spi = to_spi_device(dev); 50 51 spi_controller_put(spi->controller); 52 kfree(spi->driver_override); 53 kfree(spi); 54 } 55 56 static ssize_t 57 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 58 { 59 const struct spi_device *spi = to_spi_device(dev); 60 int len; 61 62 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 63 if (len != -ENODEV) 64 return len; 65 66 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 67 } 68 static DEVICE_ATTR_RO(modalias); 69 70 static ssize_t driver_override_store(struct device *dev, 71 struct device_attribute *a, 72 const char *buf, size_t count) 73 { 74 struct spi_device *spi = to_spi_device(dev); 75 const char *end = memchr(buf, '\n', count); 76 const size_t len = end ? end - buf : count; 77 const char *driver_override, *old; 78 79 /* We need to keep extra room for a newline when displaying value */ 80 if (len >= (PAGE_SIZE - 1)) 81 return -EINVAL; 82 83 driver_override = kstrndup(buf, len, GFP_KERNEL); 84 if (!driver_override) 85 return -ENOMEM; 86 87 device_lock(dev); 88 old = spi->driver_override; 89 if (len) { 90 spi->driver_override = driver_override; 91 } else { 92 /* Empty string, disable driver override */ 93 spi->driver_override = NULL; 94 kfree(driver_override); 95 } 96 device_unlock(dev); 97 kfree(old); 98 99 return count; 100 } 101 102 static ssize_t driver_override_show(struct device *dev, 103 struct device_attribute *a, char *buf) 104 { 105 const struct spi_device *spi = to_spi_device(dev); 106 ssize_t len; 107 108 device_lock(dev); 109 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : ""); 110 device_unlock(dev); 111 return len; 112 } 113 static DEVICE_ATTR_RW(driver_override); 114 115 #define SPI_STATISTICS_ATTRS(field, file) \ 116 static ssize_t spi_controller_##field##_show(struct device *dev, \ 117 struct device_attribute *attr, \ 118 char *buf) \ 119 { \ 120 struct spi_controller *ctlr = container_of(dev, \ 121 struct spi_controller, dev); \ 122 return spi_statistics_##field##_show(&ctlr->statistics, buf); \ 123 } \ 124 static struct device_attribute dev_attr_spi_controller_##field = { \ 125 .attr = { .name = file, .mode = 0444 }, \ 126 .show = spi_controller_##field##_show, \ 127 }; \ 128 static ssize_t spi_device_##field##_show(struct device *dev, \ 129 struct device_attribute *attr, \ 130 char *buf) \ 131 { \ 132 struct spi_device *spi = to_spi_device(dev); \ 133 return spi_statistics_##field##_show(&spi->statistics, buf); \ 134 } \ 135 static struct device_attribute dev_attr_spi_device_##field = { \ 136 .attr = { .name = file, .mode = 0444 }, \ 137 .show = spi_device_##field##_show, \ 138 } 139 140 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 141 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 142 char *buf) \ 143 { \ 144 unsigned long flags; \ 145 ssize_t len; \ 146 spin_lock_irqsave(&stat->lock, flags); \ 147 len = sprintf(buf, format_string, stat->field); \ 148 spin_unlock_irqrestore(&stat->lock, flags); \ 149 return len; \ 150 } \ 151 SPI_STATISTICS_ATTRS(name, file) 152 153 #define SPI_STATISTICS_SHOW(field, format_string) \ 154 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 155 field, format_string) 156 157 SPI_STATISTICS_SHOW(messages, "%lu"); 158 SPI_STATISTICS_SHOW(transfers, "%lu"); 159 SPI_STATISTICS_SHOW(errors, "%lu"); 160 SPI_STATISTICS_SHOW(timedout, "%lu"); 161 162 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 163 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 164 SPI_STATISTICS_SHOW(spi_async, "%lu"); 165 166 SPI_STATISTICS_SHOW(bytes, "%llu"); 167 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 168 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 169 170 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 171 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 172 "transfer_bytes_histo_" number, \ 173 transfer_bytes_histo[index], "%lu") 174 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 175 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 176 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 177 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 178 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 179 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 180 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 181 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 182 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 183 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 184 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 185 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 186 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 187 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 188 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 189 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 190 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 191 192 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 193 194 static struct attribute *spi_dev_attrs[] = { 195 &dev_attr_modalias.attr, 196 &dev_attr_driver_override.attr, 197 NULL, 198 }; 199 200 static const struct attribute_group spi_dev_group = { 201 .attrs = spi_dev_attrs, 202 }; 203 204 static struct attribute *spi_device_statistics_attrs[] = { 205 &dev_attr_spi_device_messages.attr, 206 &dev_attr_spi_device_transfers.attr, 207 &dev_attr_spi_device_errors.attr, 208 &dev_attr_spi_device_timedout.attr, 209 &dev_attr_spi_device_spi_sync.attr, 210 &dev_attr_spi_device_spi_sync_immediate.attr, 211 &dev_attr_spi_device_spi_async.attr, 212 &dev_attr_spi_device_bytes.attr, 213 &dev_attr_spi_device_bytes_rx.attr, 214 &dev_attr_spi_device_bytes_tx.attr, 215 &dev_attr_spi_device_transfer_bytes_histo0.attr, 216 &dev_attr_spi_device_transfer_bytes_histo1.attr, 217 &dev_attr_spi_device_transfer_bytes_histo2.attr, 218 &dev_attr_spi_device_transfer_bytes_histo3.attr, 219 &dev_attr_spi_device_transfer_bytes_histo4.attr, 220 &dev_attr_spi_device_transfer_bytes_histo5.attr, 221 &dev_attr_spi_device_transfer_bytes_histo6.attr, 222 &dev_attr_spi_device_transfer_bytes_histo7.attr, 223 &dev_attr_spi_device_transfer_bytes_histo8.attr, 224 &dev_attr_spi_device_transfer_bytes_histo9.attr, 225 &dev_attr_spi_device_transfer_bytes_histo10.attr, 226 &dev_attr_spi_device_transfer_bytes_histo11.attr, 227 &dev_attr_spi_device_transfer_bytes_histo12.attr, 228 &dev_attr_spi_device_transfer_bytes_histo13.attr, 229 &dev_attr_spi_device_transfer_bytes_histo14.attr, 230 &dev_attr_spi_device_transfer_bytes_histo15.attr, 231 &dev_attr_spi_device_transfer_bytes_histo16.attr, 232 &dev_attr_spi_device_transfers_split_maxsize.attr, 233 NULL, 234 }; 235 236 static const struct attribute_group spi_device_statistics_group = { 237 .name = "statistics", 238 .attrs = spi_device_statistics_attrs, 239 }; 240 241 static const struct attribute_group *spi_dev_groups[] = { 242 &spi_dev_group, 243 &spi_device_statistics_group, 244 NULL, 245 }; 246 247 static struct attribute *spi_controller_statistics_attrs[] = { 248 &dev_attr_spi_controller_messages.attr, 249 &dev_attr_spi_controller_transfers.attr, 250 &dev_attr_spi_controller_errors.attr, 251 &dev_attr_spi_controller_timedout.attr, 252 &dev_attr_spi_controller_spi_sync.attr, 253 &dev_attr_spi_controller_spi_sync_immediate.attr, 254 &dev_attr_spi_controller_spi_async.attr, 255 &dev_attr_spi_controller_bytes.attr, 256 &dev_attr_spi_controller_bytes_rx.attr, 257 &dev_attr_spi_controller_bytes_tx.attr, 258 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 259 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 260 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 261 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 262 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 263 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 264 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 265 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 266 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 267 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 268 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 269 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 270 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 271 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 272 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 273 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 274 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 275 &dev_attr_spi_controller_transfers_split_maxsize.attr, 276 NULL, 277 }; 278 279 static const struct attribute_group spi_controller_statistics_group = { 280 .name = "statistics", 281 .attrs = spi_controller_statistics_attrs, 282 }; 283 284 static const struct attribute_group *spi_master_groups[] = { 285 &spi_controller_statistics_group, 286 NULL, 287 }; 288 289 static void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 290 struct spi_transfer *xfer, 291 struct spi_controller *ctlr) 292 { 293 unsigned long flags; 294 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 295 296 if (l2len < 0) 297 l2len = 0; 298 299 spin_lock_irqsave(&stats->lock, flags); 300 301 stats->transfers++; 302 stats->transfer_bytes_histo[l2len]++; 303 304 stats->bytes += xfer->len; 305 if ((xfer->tx_buf) && 306 (xfer->tx_buf != ctlr->dummy_tx)) 307 stats->bytes_tx += xfer->len; 308 if ((xfer->rx_buf) && 309 (xfer->rx_buf != ctlr->dummy_rx)) 310 stats->bytes_rx += xfer->len; 311 312 spin_unlock_irqrestore(&stats->lock, flags); 313 } 314 315 /* 316 * modalias support makes "modprobe $MODALIAS" new-style hotplug work, 317 * and the sysfs version makes coldplug work too. 318 */ 319 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name) 320 { 321 while (id->name[0]) { 322 if (!strcmp(name, id->name)) 323 return id; 324 id++; 325 } 326 return NULL; 327 } 328 329 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 330 { 331 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 332 333 return spi_match_id(sdrv->id_table, sdev->modalias); 334 } 335 EXPORT_SYMBOL_GPL(spi_get_device_id); 336 337 static int spi_match_device(struct device *dev, struct device_driver *drv) 338 { 339 const struct spi_device *spi = to_spi_device(dev); 340 const struct spi_driver *sdrv = to_spi_driver(drv); 341 342 /* Check override first, and if set, only use the named driver */ 343 if (spi->driver_override) 344 return strcmp(spi->driver_override, drv->name) == 0; 345 346 /* Attempt an OF style match */ 347 if (of_driver_match_device(dev, drv)) 348 return 1; 349 350 /* Then try ACPI */ 351 if (acpi_driver_match_device(dev, drv)) 352 return 1; 353 354 if (sdrv->id_table) 355 return !!spi_match_id(sdrv->id_table, spi->modalias); 356 357 return strcmp(spi->modalias, drv->name) == 0; 358 } 359 360 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 361 { 362 const struct spi_device *spi = to_spi_device(dev); 363 int rc; 364 365 rc = acpi_device_uevent_modalias(dev, env); 366 if (rc != -ENODEV) 367 return rc; 368 369 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 370 } 371 372 static int spi_probe(struct device *dev) 373 { 374 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 375 struct spi_device *spi = to_spi_device(dev); 376 int ret; 377 378 ret = of_clk_set_defaults(dev->of_node, false); 379 if (ret) 380 return ret; 381 382 if (dev->of_node) { 383 spi->irq = of_irq_get(dev->of_node, 0); 384 if (spi->irq == -EPROBE_DEFER) 385 return -EPROBE_DEFER; 386 if (spi->irq < 0) 387 spi->irq = 0; 388 } 389 390 ret = dev_pm_domain_attach(dev, true); 391 if (ret) 392 return ret; 393 394 if (sdrv->probe) { 395 ret = sdrv->probe(spi); 396 if (ret) 397 dev_pm_domain_detach(dev, true); 398 } 399 400 return ret; 401 } 402 403 static void spi_remove(struct device *dev) 404 { 405 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 406 407 if (sdrv->remove) { 408 int ret; 409 410 ret = sdrv->remove(to_spi_device(dev)); 411 if (ret) 412 dev_warn(dev, 413 "Failed to unbind driver (%pe), ignoring\n", 414 ERR_PTR(ret)); 415 } 416 417 dev_pm_domain_detach(dev, true); 418 } 419 420 static void spi_shutdown(struct device *dev) 421 { 422 if (dev->driver) { 423 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 424 425 if (sdrv->shutdown) 426 sdrv->shutdown(to_spi_device(dev)); 427 } 428 } 429 430 struct bus_type spi_bus_type = { 431 .name = "spi", 432 .dev_groups = spi_dev_groups, 433 .match = spi_match_device, 434 .uevent = spi_uevent, 435 .probe = spi_probe, 436 .remove = spi_remove, 437 .shutdown = spi_shutdown, 438 }; 439 EXPORT_SYMBOL_GPL(spi_bus_type); 440 441 /** 442 * __spi_register_driver - register a SPI driver 443 * @owner: owner module of the driver to register 444 * @sdrv: the driver to register 445 * Context: can sleep 446 * 447 * Return: zero on success, else a negative error code. 448 */ 449 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 450 { 451 sdrv->driver.owner = owner; 452 sdrv->driver.bus = &spi_bus_type; 453 454 /* 455 * For Really Good Reasons we use spi: modaliases not of: 456 * modaliases for DT so module autoloading won't work if we 457 * don't have a spi_device_id as well as a compatible string. 458 */ 459 if (sdrv->driver.of_match_table) { 460 const struct of_device_id *of_id; 461 462 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0]; 463 of_id++) { 464 const char *of_name; 465 466 /* Strip off any vendor prefix */ 467 of_name = strnchr(of_id->compatible, 468 sizeof(of_id->compatible), ','); 469 if (of_name) 470 of_name++; 471 else 472 of_name = of_id->compatible; 473 474 if (sdrv->id_table) { 475 const struct spi_device_id *spi_id; 476 477 spi_id = spi_match_id(sdrv->id_table, of_name); 478 if (spi_id) 479 continue; 480 } else { 481 if (strcmp(sdrv->driver.name, of_name) == 0) 482 continue; 483 } 484 485 pr_warn("SPI driver %s has no spi_device_id for %s\n", 486 sdrv->driver.name, of_id->compatible); 487 } 488 } 489 490 return driver_register(&sdrv->driver); 491 } 492 EXPORT_SYMBOL_GPL(__spi_register_driver); 493 494 /*-------------------------------------------------------------------------*/ 495 496 /* 497 * SPI devices should normally not be created by SPI device drivers; that 498 * would make them board-specific. Similarly with SPI controller drivers. 499 * Device registration normally goes into like arch/.../mach.../board-YYY.c 500 * with other readonly (flashable) information about mainboard devices. 501 */ 502 503 struct boardinfo { 504 struct list_head list; 505 struct spi_board_info board_info; 506 }; 507 508 static LIST_HEAD(board_list); 509 static LIST_HEAD(spi_controller_list); 510 511 /* 512 * Used to protect add/del operation for board_info list and 513 * spi_controller list, and their matching process also used 514 * to protect object of type struct idr. 515 */ 516 static DEFINE_MUTEX(board_lock); 517 518 /** 519 * spi_alloc_device - Allocate a new SPI device 520 * @ctlr: Controller to which device is connected 521 * Context: can sleep 522 * 523 * Allows a driver to allocate and initialize a spi_device without 524 * registering it immediately. This allows a driver to directly 525 * fill the spi_device with device parameters before calling 526 * spi_add_device() on it. 527 * 528 * Caller is responsible to call spi_add_device() on the returned 529 * spi_device structure to add it to the SPI controller. If the caller 530 * needs to discard the spi_device without adding it, then it should 531 * call spi_dev_put() on it. 532 * 533 * Return: a pointer to the new device, or NULL. 534 */ 535 static struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 536 { 537 struct spi_device *spi; 538 539 if (!spi_controller_get(ctlr)) 540 return NULL; 541 542 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 543 if (!spi) { 544 spi_controller_put(ctlr); 545 return NULL; 546 } 547 548 spi->master = spi->controller = ctlr; 549 spi->dev.parent = &ctlr->dev; 550 spi->dev.bus = &spi_bus_type; 551 spi->dev.release = spidev_release; 552 spi->cs_gpio = -ENOENT; 553 spi->mode = ctlr->buswidth_override_bits; 554 555 spin_lock_init(&spi->statistics.lock); 556 557 device_initialize(&spi->dev); 558 return spi; 559 } 560 561 static void spi_dev_set_name(struct spi_device *spi) 562 { 563 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 564 565 if (adev) { 566 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 567 return; 568 } 569 570 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 571 spi->chip_select); 572 } 573 574 static int spi_dev_check(struct device *dev, void *data) 575 { 576 struct spi_device *spi = to_spi_device(dev); 577 struct spi_device *new_spi = data; 578 579 if (spi->controller == new_spi->controller && 580 spi->chip_select == new_spi->chip_select) 581 return -EBUSY; 582 return 0; 583 } 584 585 static void spi_cleanup(struct spi_device *spi) 586 { 587 if (spi->controller->cleanup) 588 spi->controller->cleanup(spi); 589 } 590 591 static int __spi_add_device(struct spi_device *spi) 592 { 593 struct spi_controller *ctlr = spi->controller; 594 struct device *dev = ctlr->dev.parent; 595 int status; 596 597 /* 598 * We need to make sure there's no other device with this 599 * chipselect **BEFORE** we call setup(), else we'll trash 600 * its configuration. 601 */ 602 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 603 if (status) { 604 dev_err(dev, "chipselect %d already in use\n", 605 spi->chip_select); 606 return status; 607 } 608 609 /* Controller may unregister concurrently */ 610 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && 611 !device_is_registered(&ctlr->dev)) { 612 return -ENODEV; 613 } 614 615 /* Descriptors take precedence */ 616 if (ctlr->cs_gpiods) 617 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; 618 else if (ctlr->cs_gpios) 619 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; 620 621 /* 622 * Drivers may modify this initial i/o setup, but will 623 * normally rely on the device being setup. Devices 624 * using SPI_CS_HIGH can't coexist well otherwise... 625 */ 626 status = spi_setup(spi); 627 if (status < 0) { 628 dev_err(dev, "can't setup %s, status %d\n", 629 dev_name(&spi->dev), status); 630 return status; 631 } 632 633 /* Device may be bound to an active driver when this returns */ 634 status = device_add(&spi->dev); 635 if (status < 0) { 636 dev_err(dev, "can't add %s, status %d\n", 637 dev_name(&spi->dev), status); 638 spi_cleanup(spi); 639 } else { 640 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 641 } 642 643 return status; 644 } 645 646 /** 647 * spi_add_device - Add spi_device allocated with spi_alloc_device 648 * @spi: spi_device to register 649 * 650 * Companion function to spi_alloc_device. Devices allocated with 651 * spi_alloc_device can be added onto the spi bus with this function. 652 * 653 * Return: 0 on success; negative errno on failure 654 */ 655 static int spi_add_device(struct spi_device *spi) 656 { 657 struct spi_controller *ctlr = spi->controller; 658 struct device *dev = ctlr->dev.parent; 659 int status; 660 661 /* Chipselects are numbered 0..max; validate. */ 662 if (spi->chip_select >= ctlr->num_chipselect) { 663 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 664 ctlr->num_chipselect); 665 return -EINVAL; 666 } 667 668 /* Set the bus ID string */ 669 spi_dev_set_name(spi); 670 671 mutex_lock(&ctlr->add_lock); 672 status = __spi_add_device(spi); 673 mutex_unlock(&ctlr->add_lock); 674 return status; 675 } 676 677 static int spi_add_device_locked(struct spi_device *spi) 678 { 679 struct spi_controller *ctlr = spi->controller; 680 struct device *dev = ctlr->dev.parent; 681 682 /* Chipselects are numbered 0..max; validate. */ 683 if (spi->chip_select >= ctlr->num_chipselect) { 684 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 685 ctlr->num_chipselect); 686 return -EINVAL; 687 } 688 689 /* Set the bus ID string */ 690 spi_dev_set_name(spi); 691 692 WARN_ON(!mutex_is_locked(&ctlr->add_lock)); 693 return __spi_add_device(spi); 694 } 695 696 /** 697 * spi_new_device - instantiate one new SPI device 698 * @ctlr: Controller to which device is connected 699 * @chip: Describes the SPI device 700 * Context: can sleep 701 * 702 * On typical mainboards, this is purely internal; and it's not needed 703 * after board init creates the hard-wired devices. Some development 704 * platforms may not be able to use spi_register_board_info though, and 705 * this is exported so that for example a USB or parport based adapter 706 * driver could add devices (which it would learn about out-of-band). 707 * 708 * Return: the new device, or NULL. 709 */ 710 struct spi_device *spi_new_device(struct spi_controller *ctlr, 711 struct spi_board_info *chip) 712 { 713 struct spi_device *proxy; 714 int status; 715 716 /* 717 * NOTE: caller did any chip->bus_num checks necessary. 718 * 719 * Also, unless we change the return value convention to use 720 * error-or-pointer (not NULL-or-pointer), troubleshootability 721 * suggests syslogged diagnostics are best here (ugh). 722 */ 723 724 proxy = spi_alloc_device(ctlr); 725 if (!proxy) 726 return NULL; 727 728 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 729 730 proxy->chip_select = chip->chip_select; 731 proxy->max_speed_hz = chip->max_speed_hz; 732 proxy->mode = chip->mode; 733 proxy->irq = chip->irq; 734 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 735 proxy->dev.platform_data = (void *) chip->platform_data; 736 proxy->controller_data = chip->controller_data; 737 proxy->controller_state = NULL; 738 739 if (chip->swnode) { 740 status = device_add_software_node(&proxy->dev, chip->swnode); 741 if (status) { 742 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n", 743 chip->modalias, status); 744 goto err_dev_put; 745 } 746 } 747 748 status = spi_add_device(proxy); 749 if (status < 0) 750 goto err_dev_put; 751 752 return proxy; 753 754 err_dev_put: 755 device_remove_software_node(&proxy->dev); 756 spi_dev_put(proxy); 757 return NULL; 758 } 759 EXPORT_SYMBOL_GPL(spi_new_device); 760 761 /** 762 * spi_unregister_device - unregister a single SPI device 763 * @spi: spi_device to unregister 764 * 765 * Start making the passed SPI device vanish. Normally this would be handled 766 * by spi_unregister_controller(). 767 */ 768 void spi_unregister_device(struct spi_device *spi) 769 { 770 if (!spi) 771 return; 772 773 if (spi->dev.of_node) { 774 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 775 of_node_put(spi->dev.of_node); 776 } 777 if (ACPI_COMPANION(&spi->dev)) 778 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 779 device_remove_software_node(&spi->dev); 780 device_del(&spi->dev); 781 spi_cleanup(spi); 782 put_device(&spi->dev); 783 } 784 EXPORT_SYMBOL_GPL(spi_unregister_device); 785 786 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 787 struct spi_board_info *bi) 788 { 789 struct spi_device *dev; 790 791 if (ctlr->bus_num != bi->bus_num) 792 return; 793 794 dev = spi_new_device(ctlr, bi); 795 if (!dev) 796 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 797 bi->modalias); 798 } 799 800 /** 801 * spi_register_board_info - register SPI devices for a given board 802 * @info: array of chip descriptors 803 * @n: how many descriptors are provided 804 * Context: can sleep 805 * 806 * Board-specific early init code calls this (probably during arch_initcall) 807 * with segments of the SPI device table. Any device nodes are created later, 808 * after the relevant parent SPI controller (bus_num) is defined. We keep 809 * this table of devices forever, so that reloading a controller driver will 810 * not make Linux forget about these hard-wired devices. 811 * 812 * Other code can also call this, e.g. a particular add-on board might provide 813 * SPI devices through its expansion connector, so code initializing that board 814 * would naturally declare its SPI devices. 815 * 816 * The board info passed can safely be __initdata ... but be careful of 817 * any embedded pointers (platform_data, etc), they're copied as-is. 818 * 819 * Return: zero on success, else a negative error code. 820 */ 821 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 822 { 823 struct boardinfo *bi; 824 int i; 825 826 if (!n) 827 return 0; 828 829 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 830 if (!bi) 831 return -ENOMEM; 832 833 for (i = 0; i < n; i++, bi++, info++) { 834 struct spi_controller *ctlr; 835 836 memcpy(&bi->board_info, info, sizeof(*info)); 837 838 mutex_lock(&board_lock); 839 list_add_tail(&bi->list, &board_list); 840 list_for_each_entry(ctlr, &spi_controller_list, list) 841 spi_match_controller_to_boardinfo(ctlr, 842 &bi->board_info); 843 mutex_unlock(&board_lock); 844 } 845 846 return 0; 847 } 848 849 /*-------------------------------------------------------------------------*/ 850 851 /* Core methods for SPI resource management */ 852 853 /** 854 * spi_res_alloc - allocate a spi resource that is life-cycle managed 855 * during the processing of a spi_message while using 856 * spi_transfer_one 857 * @spi: the spi device for which we allocate memory 858 * @release: the release code to execute for this resource 859 * @size: size to alloc and return 860 * @gfp: GFP allocation flags 861 * 862 * Return: the pointer to the allocated data 863 * 864 * This may get enhanced in the future to allocate from a memory pool 865 * of the @spi_device or @spi_controller to avoid repeated allocations. 866 */ 867 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, 868 size_t size, gfp_t gfp) 869 { 870 struct spi_res *sres; 871 872 sres = kzalloc(sizeof(*sres) + size, gfp); 873 if (!sres) 874 return NULL; 875 876 INIT_LIST_HEAD(&sres->entry); 877 sres->release = release; 878 879 return sres->data; 880 } 881 882 /** 883 * spi_res_free - free an spi resource 884 * @res: pointer to the custom data of a resource 885 */ 886 static void spi_res_free(void *res) 887 { 888 struct spi_res *sres = container_of(res, struct spi_res, data); 889 890 if (!res) 891 return; 892 893 WARN_ON(!list_empty(&sres->entry)); 894 kfree(sres); 895 } 896 897 /** 898 * spi_res_add - add a spi_res to the spi_message 899 * @message: the spi message 900 * @res: the spi_resource 901 */ 902 static void spi_res_add(struct spi_message *message, void *res) 903 { 904 struct spi_res *sres = container_of(res, struct spi_res, data); 905 906 WARN_ON(!list_empty(&sres->entry)); 907 list_add_tail(&sres->entry, &message->resources); 908 } 909 910 /** 911 * spi_res_release - release all spi resources for this message 912 * @ctlr: the @spi_controller 913 * @message: the @spi_message 914 */ 915 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 916 { 917 struct spi_res *res, *tmp; 918 919 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 920 if (res->release) 921 res->release(ctlr, message, res->data); 922 923 list_del(&res->entry); 924 925 kfree(res); 926 } 927 } 928 929 /*-------------------------------------------------------------------------*/ 930 931 static void spi_set_cs(struct spi_device *spi, bool enable, bool force) 932 { 933 bool activate = enable; 934 935 /* 936 * Avoid calling into the driver (or doing delays) if the chip select 937 * isn't actually changing from the last time this was called. 938 */ 939 if (!force && (spi->controller->last_cs_enable == enable) && 940 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) 941 return; 942 943 trace_spi_set_cs(spi, activate); 944 945 spi->controller->last_cs_enable = enable; 946 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; 947 948 if ((spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) || 949 !spi->controller->set_cs_timing) && !activate) { 950 spi_delay_exec(&spi->cs_hold, NULL); 951 } 952 953 if (spi->mode & SPI_CS_HIGH) 954 enable = !enable; 955 956 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { 957 if (!(spi->mode & SPI_NO_CS)) { 958 if (spi->cs_gpiod) { 959 /* 960 * Historically ACPI has no means of the GPIO polarity and 961 * thus the SPISerialBus() resource defines it on the per-chip 962 * basis. In order to avoid a chain of negations, the GPIO 963 * polarity is considered being Active High. Even for the cases 964 * when _DSD() is involved (in the updated versions of ACPI) 965 * the GPIO CS polarity must be defined Active High to avoid 966 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH 967 * into account. 968 */ 969 if (has_acpi_companion(&spi->dev)) 970 gpiod_set_value_cansleep(spi->cs_gpiod, !enable); 971 else 972 /* Polarity handled by GPIO library */ 973 gpiod_set_value_cansleep(spi->cs_gpiod, activate); 974 } else { 975 /* 976 * Invert the enable line, as active low is 977 * default for SPI. 978 */ 979 gpio_set_value_cansleep(spi->cs_gpio, !enable); 980 } 981 } 982 /* Some SPI masters need both GPIO CS & slave_select */ 983 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 984 spi->controller->set_cs) 985 spi->controller->set_cs(spi, !enable); 986 } else if (spi->controller->set_cs) { 987 spi->controller->set_cs(spi, !enable); 988 } 989 990 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) || 991 !spi->controller->set_cs_timing) { 992 if (activate) 993 spi_delay_exec(&spi->cs_setup, NULL); 994 else 995 spi_delay_exec(&spi->cs_inactive, NULL); 996 } 997 } 998 999 #ifdef CONFIG_HAS_DMA 1000 int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 1001 struct sg_table *sgt, void *buf, size_t len, 1002 enum dma_data_direction dir) 1003 { 1004 const bool vmalloced_buf = is_vmalloc_addr(buf); 1005 unsigned int max_seg_size = dma_get_max_seg_size(dev); 1006 #ifdef CONFIG_HIGHMEM 1007 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 1008 (unsigned long)buf < (PKMAP_BASE + 1009 (LAST_PKMAP * PAGE_SIZE))); 1010 #else 1011 const bool kmap_buf = false; 1012 #endif 1013 int desc_len; 1014 int sgs; 1015 struct page *vm_page; 1016 struct scatterlist *sg; 1017 void *sg_buf; 1018 size_t min; 1019 int i, ret; 1020 1021 if (vmalloced_buf || kmap_buf) { 1022 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 1023 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 1024 } else if (virt_addr_valid(buf)) { 1025 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); 1026 sgs = DIV_ROUND_UP(len, desc_len); 1027 } else { 1028 return -EINVAL; 1029 } 1030 1031 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 1032 if (ret != 0) 1033 return ret; 1034 1035 sg = &sgt->sgl[0]; 1036 for (i = 0; i < sgs; i++) { 1037 1038 if (vmalloced_buf || kmap_buf) { 1039 /* 1040 * Next scatterlist entry size is the minimum between 1041 * the desc_len and the remaining buffer length that 1042 * fits in a page. 1043 */ 1044 min = min_t(size_t, desc_len, 1045 min_t(size_t, len, 1046 PAGE_SIZE - offset_in_page(buf))); 1047 if (vmalloced_buf) 1048 vm_page = vmalloc_to_page(buf); 1049 else 1050 vm_page = kmap_to_page(buf); 1051 if (!vm_page) { 1052 sg_free_table(sgt); 1053 return -ENOMEM; 1054 } 1055 sg_set_page(sg, vm_page, 1056 min, offset_in_page(buf)); 1057 } else { 1058 min = min_t(size_t, len, desc_len); 1059 sg_buf = buf; 1060 sg_set_buf(sg, sg_buf, min); 1061 } 1062 1063 buf += min; 1064 len -= min; 1065 sg = sg_next(sg); 1066 } 1067 1068 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 1069 if (!ret) 1070 ret = -ENOMEM; 1071 if (ret < 0) { 1072 sg_free_table(sgt); 1073 return ret; 1074 } 1075 1076 sgt->nents = ret; 1077 1078 return 0; 1079 } 1080 1081 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 1082 struct sg_table *sgt, enum dma_data_direction dir) 1083 { 1084 if (sgt->orig_nents) { 1085 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 1086 sg_free_table(sgt); 1087 } 1088 } 1089 1090 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1091 { 1092 struct device *tx_dev, *rx_dev; 1093 struct spi_transfer *xfer; 1094 int ret; 1095 1096 if (!ctlr->can_dma) 1097 return 0; 1098 1099 if (ctlr->dma_tx) 1100 tx_dev = ctlr->dma_tx->device->dev; 1101 else if (ctlr->dma_map_dev) 1102 tx_dev = ctlr->dma_map_dev; 1103 else 1104 tx_dev = ctlr->dev.parent; 1105 1106 if (ctlr->dma_rx) 1107 rx_dev = ctlr->dma_rx->device->dev; 1108 else if (ctlr->dma_map_dev) 1109 rx_dev = ctlr->dma_map_dev; 1110 else 1111 rx_dev = ctlr->dev.parent; 1112 1113 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1114 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1115 continue; 1116 1117 if (xfer->tx_buf != NULL) { 1118 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, 1119 (void *)xfer->tx_buf, xfer->len, 1120 DMA_TO_DEVICE); 1121 if (ret != 0) 1122 return ret; 1123 } 1124 1125 if (xfer->rx_buf != NULL) { 1126 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, 1127 xfer->rx_buf, xfer->len, 1128 DMA_FROM_DEVICE); 1129 if (ret != 0) { 1130 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, 1131 DMA_TO_DEVICE); 1132 return ret; 1133 } 1134 } 1135 } 1136 1137 ctlr->cur_msg_mapped = true; 1138 1139 return 0; 1140 } 1141 1142 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 1143 { 1144 struct spi_transfer *xfer; 1145 struct device *tx_dev, *rx_dev; 1146 1147 if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 1148 return 0; 1149 1150 if (ctlr->dma_tx) 1151 tx_dev = ctlr->dma_tx->device->dev; 1152 else 1153 tx_dev = ctlr->dev.parent; 1154 1155 if (ctlr->dma_rx) 1156 rx_dev = ctlr->dma_rx->device->dev; 1157 else 1158 rx_dev = ctlr->dev.parent; 1159 1160 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1161 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1162 continue; 1163 1164 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1165 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1166 } 1167 1168 ctlr->cur_msg_mapped = false; 1169 1170 return 0; 1171 } 1172 #else /* !CONFIG_HAS_DMA */ 1173 static inline int __spi_map_msg(struct spi_controller *ctlr, 1174 struct spi_message *msg) 1175 { 1176 return 0; 1177 } 1178 1179 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 1180 struct spi_message *msg) 1181 { 1182 return 0; 1183 } 1184 #endif /* !CONFIG_HAS_DMA */ 1185 1186 static inline int spi_unmap_msg(struct spi_controller *ctlr, 1187 struct spi_message *msg) 1188 { 1189 struct spi_transfer *xfer; 1190 1191 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1192 /* 1193 * Restore the original value of tx_buf or rx_buf if they are 1194 * NULL. 1195 */ 1196 if (xfer->tx_buf == ctlr->dummy_tx) 1197 xfer->tx_buf = NULL; 1198 if (xfer->rx_buf == ctlr->dummy_rx) 1199 xfer->rx_buf = NULL; 1200 } 1201 1202 return __spi_unmap_msg(ctlr, msg); 1203 } 1204 1205 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1206 { 1207 struct spi_transfer *xfer; 1208 void *tmp; 1209 unsigned int max_tx, max_rx; 1210 1211 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) 1212 && !(msg->spi->mode & SPI_3WIRE)) { 1213 max_tx = 0; 1214 max_rx = 0; 1215 1216 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1217 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 1218 !xfer->tx_buf) 1219 max_tx = max(xfer->len, max_tx); 1220 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 1221 !xfer->rx_buf) 1222 max_rx = max(xfer->len, max_rx); 1223 } 1224 1225 if (max_tx) { 1226 tmp = krealloc(ctlr->dummy_tx, max_tx, 1227 GFP_KERNEL | GFP_DMA | __GFP_ZERO); 1228 if (!tmp) 1229 return -ENOMEM; 1230 ctlr->dummy_tx = tmp; 1231 } 1232 1233 if (max_rx) { 1234 tmp = krealloc(ctlr->dummy_rx, max_rx, 1235 GFP_KERNEL | GFP_DMA); 1236 if (!tmp) 1237 return -ENOMEM; 1238 ctlr->dummy_rx = tmp; 1239 } 1240 1241 if (max_tx || max_rx) { 1242 list_for_each_entry(xfer, &msg->transfers, 1243 transfer_list) { 1244 if (!xfer->len) 1245 continue; 1246 if (!xfer->tx_buf) 1247 xfer->tx_buf = ctlr->dummy_tx; 1248 if (!xfer->rx_buf) 1249 xfer->rx_buf = ctlr->dummy_rx; 1250 } 1251 } 1252 } 1253 1254 return __spi_map_msg(ctlr, msg); 1255 } 1256 1257 static int spi_transfer_wait(struct spi_controller *ctlr, 1258 struct spi_message *msg, 1259 struct spi_transfer *xfer) 1260 { 1261 struct spi_statistics *statm = &ctlr->statistics; 1262 struct spi_statistics *stats = &msg->spi->statistics; 1263 u32 speed_hz = xfer->speed_hz; 1264 unsigned long long ms; 1265 1266 if (spi_controller_is_slave(ctlr)) { 1267 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1268 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1269 return -EINTR; 1270 } 1271 } else { 1272 if (!speed_hz) 1273 speed_hz = 100000; 1274 1275 /* 1276 * For each byte we wait for 8 cycles of the SPI clock. 1277 * Since speed is defined in Hz and we want milliseconds, 1278 * use respective multiplier, but before the division, 1279 * otherwise we may get 0 for short transfers. 1280 */ 1281 ms = 8LL * MSEC_PER_SEC * xfer->len; 1282 do_div(ms, speed_hz); 1283 1284 /* 1285 * Increase it twice and add 200 ms tolerance, use 1286 * predefined maximum in case of overflow. 1287 */ 1288 ms += ms + 200; 1289 if (ms > UINT_MAX) 1290 ms = UINT_MAX; 1291 1292 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1293 msecs_to_jiffies(ms)); 1294 1295 if (ms == 0) { 1296 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1297 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1298 dev_err(&msg->spi->dev, 1299 "SPI transfer timed out\n"); 1300 return -ETIMEDOUT; 1301 } 1302 } 1303 1304 return 0; 1305 } 1306 1307 static void _spi_transfer_delay_ns(u32 ns) 1308 { 1309 if (!ns) 1310 return; 1311 if (ns <= NSEC_PER_USEC) { 1312 ndelay(ns); 1313 } else { 1314 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 1315 1316 if (us <= 10) 1317 udelay(us); 1318 else 1319 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1320 } 1321 } 1322 1323 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) 1324 { 1325 u32 delay = _delay->value; 1326 u32 unit = _delay->unit; 1327 u32 hz; 1328 1329 if (!delay) 1330 return 0; 1331 1332 switch (unit) { 1333 case SPI_DELAY_UNIT_USECS: 1334 delay *= NSEC_PER_USEC; 1335 break; 1336 case SPI_DELAY_UNIT_NSECS: 1337 /* Nothing to do here */ 1338 break; 1339 case SPI_DELAY_UNIT_SCK: 1340 /* clock cycles need to be obtained from spi_transfer */ 1341 if (!xfer) 1342 return -EINVAL; 1343 /* 1344 * If there is unknown effective speed, approximate it 1345 * by underestimating with half of the requested hz. 1346 */ 1347 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; 1348 if (!hz) 1349 return -EINVAL; 1350 1351 /* Convert delay to nanoseconds */ 1352 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); 1353 break; 1354 default: 1355 return -EINVAL; 1356 } 1357 1358 return delay; 1359 } 1360 EXPORT_SYMBOL_GPL(spi_delay_to_ns); 1361 1362 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) 1363 { 1364 int delay; 1365 1366 might_sleep(); 1367 1368 if (!_delay) 1369 return -EINVAL; 1370 1371 delay = spi_delay_to_ns(_delay, xfer); 1372 if (delay < 0) 1373 return delay; 1374 1375 _spi_transfer_delay_ns(delay); 1376 1377 return 0; 1378 } 1379 EXPORT_SYMBOL_GPL(spi_delay_exec); 1380 1381 static void _spi_transfer_cs_change_delay(struct spi_message *msg, 1382 struct spi_transfer *xfer) 1383 { 1384 u32 default_delay_ns = 10 * NSEC_PER_USEC; 1385 u32 delay = xfer->cs_change_delay.value; 1386 u32 unit = xfer->cs_change_delay.unit; 1387 int ret; 1388 1389 /* return early on "fast" mode - for everything but USECS */ 1390 if (!delay) { 1391 if (unit == SPI_DELAY_UNIT_USECS) 1392 _spi_transfer_delay_ns(default_delay_ns); 1393 return; 1394 } 1395 1396 ret = spi_delay_exec(&xfer->cs_change_delay, xfer); 1397 if (ret) { 1398 dev_err_once(&msg->spi->dev, 1399 "Use of unsupported delay unit %i, using default of %luus\n", 1400 unit, default_delay_ns / NSEC_PER_USEC); 1401 _spi_transfer_delay_ns(default_delay_ns); 1402 } 1403 } 1404 1405 /* 1406 * spi_transfer_one_message - Default implementation of transfer_one_message() 1407 * 1408 * This is a standard implementation of transfer_one_message() for 1409 * drivers which implement a transfer_one() operation. It provides 1410 * standard handling of delays and chip select management. 1411 */ 1412 static int spi_transfer_one_message(struct spi_controller *ctlr, 1413 struct spi_message *msg) 1414 { 1415 struct spi_transfer *xfer; 1416 bool keep_cs = false; 1417 int ret = 0; 1418 struct spi_statistics *statm = &ctlr->statistics; 1419 struct spi_statistics *stats = &msg->spi->statistics; 1420 1421 spi_set_cs(msg->spi, true, false); 1422 1423 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1424 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1425 1426 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1427 trace_spi_transfer_start(msg, xfer); 1428 1429 spi_statistics_add_transfer_stats(statm, xfer, ctlr); 1430 spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1431 1432 if (!ctlr->ptp_sts_supported) { 1433 xfer->ptp_sts_word_pre = 0; 1434 ptp_read_system_prets(xfer->ptp_sts); 1435 } 1436 1437 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1438 reinit_completion(&ctlr->xfer_completion); 1439 1440 fallback_pio: 1441 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1442 if (ret < 0) { 1443 if (ctlr->cur_msg_mapped && 1444 (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1445 __spi_unmap_msg(ctlr, msg); 1446 ctlr->fallback = true; 1447 xfer->error &= ~SPI_TRANS_FAIL_NO_START; 1448 goto fallback_pio; 1449 } 1450 1451 SPI_STATISTICS_INCREMENT_FIELD(statm, 1452 errors); 1453 SPI_STATISTICS_INCREMENT_FIELD(stats, 1454 errors); 1455 dev_err(&msg->spi->dev, 1456 "SPI transfer failed: %d\n", ret); 1457 goto out; 1458 } 1459 1460 if (ret > 0) { 1461 ret = spi_transfer_wait(ctlr, msg, xfer); 1462 if (ret < 0) 1463 msg->status = ret; 1464 } 1465 } else { 1466 if (xfer->len) 1467 dev_err(&msg->spi->dev, 1468 "Bufferless transfer has length %u\n", 1469 xfer->len); 1470 } 1471 1472 if (!ctlr->ptp_sts_supported) { 1473 ptp_read_system_postts(xfer->ptp_sts); 1474 xfer->ptp_sts_word_post = xfer->len; 1475 } 1476 1477 trace_spi_transfer_stop(msg, xfer); 1478 1479 if (msg->status != -EINPROGRESS) 1480 goto out; 1481 1482 spi_transfer_delay_exec(xfer); 1483 1484 if (xfer->cs_change) { 1485 if (list_is_last(&xfer->transfer_list, 1486 &msg->transfers)) { 1487 keep_cs = true; 1488 } else { 1489 spi_set_cs(msg->spi, false, false); 1490 _spi_transfer_cs_change_delay(msg, xfer); 1491 spi_set_cs(msg->spi, true, false); 1492 } 1493 } 1494 1495 msg->actual_length += xfer->len; 1496 } 1497 1498 out: 1499 if (ret != 0 || !keep_cs) 1500 spi_set_cs(msg->spi, false, false); 1501 1502 if (msg->status == -EINPROGRESS) 1503 msg->status = ret; 1504 1505 if (msg->status && ctlr->handle_err) 1506 ctlr->handle_err(ctlr, msg); 1507 1508 spi_finalize_current_message(ctlr); 1509 1510 return ret; 1511 } 1512 1513 /** 1514 * spi_finalize_current_transfer - report completion of a transfer 1515 * @ctlr: the controller reporting completion 1516 * 1517 * Called by SPI drivers using the core transfer_one_message() 1518 * implementation to notify it that the current interrupt driven 1519 * transfer has finished and the next one may be scheduled. 1520 */ 1521 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1522 { 1523 complete(&ctlr->xfer_completion); 1524 } 1525 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1526 1527 static void spi_idle_runtime_pm(struct spi_controller *ctlr) 1528 { 1529 if (ctlr->auto_runtime_pm) { 1530 pm_runtime_mark_last_busy(ctlr->dev.parent); 1531 pm_runtime_put_autosuspend(ctlr->dev.parent); 1532 } 1533 } 1534 1535 /** 1536 * __spi_pump_messages - function which processes spi message queue 1537 * @ctlr: controller to process queue for 1538 * @in_kthread: true if we are in the context of the message pump thread 1539 * 1540 * This function checks if there is any spi message in the queue that 1541 * needs processing and if so call out to the driver to initialize hardware 1542 * and transfer each message. 1543 * 1544 * Note that it is called both from the kthread itself and also from 1545 * inside spi_sync(); the queue extraction handling at the top of the 1546 * function should deal with this safely. 1547 */ 1548 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1549 { 1550 struct spi_transfer *xfer; 1551 struct spi_message *msg; 1552 bool was_busy = false; 1553 unsigned long flags; 1554 int ret; 1555 1556 /* Lock queue */ 1557 spin_lock_irqsave(&ctlr->queue_lock, flags); 1558 1559 /* Make sure we are not already running a message */ 1560 if (ctlr->cur_msg) { 1561 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1562 return; 1563 } 1564 1565 /* If another context is idling the device then defer */ 1566 if (ctlr->idling) { 1567 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1568 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1569 return; 1570 } 1571 1572 /* Check if the queue is idle */ 1573 if (list_empty(&ctlr->queue) || !ctlr->running) { 1574 if (!ctlr->busy) { 1575 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1576 return; 1577 } 1578 1579 /* Defer any non-atomic teardown to the thread */ 1580 if (!in_kthread) { 1581 if (!ctlr->dummy_rx && !ctlr->dummy_tx && 1582 !ctlr->unprepare_transfer_hardware) { 1583 spi_idle_runtime_pm(ctlr); 1584 ctlr->busy = false; 1585 trace_spi_controller_idle(ctlr); 1586 } else { 1587 kthread_queue_work(ctlr->kworker, 1588 &ctlr->pump_messages); 1589 } 1590 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1591 return; 1592 } 1593 1594 ctlr->busy = false; 1595 ctlr->idling = true; 1596 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1597 1598 kfree(ctlr->dummy_rx); 1599 ctlr->dummy_rx = NULL; 1600 kfree(ctlr->dummy_tx); 1601 ctlr->dummy_tx = NULL; 1602 if (ctlr->unprepare_transfer_hardware && 1603 ctlr->unprepare_transfer_hardware(ctlr)) 1604 dev_err(&ctlr->dev, 1605 "failed to unprepare transfer hardware\n"); 1606 spi_idle_runtime_pm(ctlr); 1607 trace_spi_controller_idle(ctlr); 1608 1609 spin_lock_irqsave(&ctlr->queue_lock, flags); 1610 ctlr->idling = false; 1611 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1612 return; 1613 } 1614 1615 /* Extract head of queue */ 1616 msg = list_first_entry(&ctlr->queue, struct spi_message, queue); 1617 ctlr->cur_msg = msg; 1618 1619 list_del_init(&msg->queue); 1620 if (ctlr->busy) 1621 was_busy = true; 1622 else 1623 ctlr->busy = true; 1624 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1625 1626 mutex_lock(&ctlr->io_mutex); 1627 1628 if (!was_busy && ctlr->auto_runtime_pm) { 1629 ret = pm_runtime_get_sync(ctlr->dev.parent); 1630 if (ret < 0) { 1631 pm_runtime_put_noidle(ctlr->dev.parent); 1632 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1633 ret); 1634 mutex_unlock(&ctlr->io_mutex); 1635 return; 1636 } 1637 } 1638 1639 if (!was_busy) 1640 trace_spi_controller_busy(ctlr); 1641 1642 if (!was_busy && ctlr->prepare_transfer_hardware) { 1643 ret = ctlr->prepare_transfer_hardware(ctlr); 1644 if (ret) { 1645 dev_err(&ctlr->dev, 1646 "failed to prepare transfer hardware: %d\n", 1647 ret); 1648 1649 if (ctlr->auto_runtime_pm) 1650 pm_runtime_put(ctlr->dev.parent); 1651 1652 msg->status = ret; 1653 spi_finalize_current_message(ctlr); 1654 1655 mutex_unlock(&ctlr->io_mutex); 1656 return; 1657 } 1658 } 1659 1660 trace_spi_message_start(msg); 1661 1662 if (ctlr->prepare_message) { 1663 ret = ctlr->prepare_message(ctlr, msg); 1664 if (ret) { 1665 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1666 ret); 1667 msg->status = ret; 1668 spi_finalize_current_message(ctlr); 1669 goto out; 1670 } 1671 ctlr->cur_msg_prepared = true; 1672 } 1673 1674 ret = spi_map_msg(ctlr, msg); 1675 if (ret) { 1676 msg->status = ret; 1677 spi_finalize_current_message(ctlr); 1678 goto out; 1679 } 1680 1681 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1682 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1683 xfer->ptp_sts_word_pre = 0; 1684 ptp_read_system_prets(xfer->ptp_sts); 1685 } 1686 } 1687 1688 ret = ctlr->transfer_one_message(ctlr, msg); 1689 if (ret) { 1690 dev_err(&ctlr->dev, 1691 "failed to transfer one message from queue\n"); 1692 goto out; 1693 } 1694 1695 out: 1696 mutex_unlock(&ctlr->io_mutex); 1697 1698 /* Prod the scheduler in case transfer_one() was busy waiting */ 1699 if (!ret) 1700 cond_resched(); 1701 } 1702 1703 /** 1704 * spi_pump_messages - kthread work function which processes spi message queue 1705 * @work: pointer to kthread work struct contained in the controller struct 1706 */ 1707 static void spi_pump_messages(struct kthread_work *work) 1708 { 1709 struct spi_controller *ctlr = 1710 container_of(work, struct spi_controller, pump_messages); 1711 1712 __spi_pump_messages(ctlr, true); 1713 } 1714 1715 /** 1716 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp 1717 * @ctlr: Pointer to the spi_controller structure of the driver 1718 * @xfer: Pointer to the transfer being timestamped 1719 * @progress: How many words (not bytes) have been transferred so far 1720 * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1721 * transfer, for less jitter in time measurement. Only compatible 1722 * with PIO drivers. If true, must follow up with 1723 * spi_take_timestamp_post or otherwise system will crash. 1724 * WARNING: for fully predictable results, the CPU frequency must 1725 * also be under control (governor). 1726 * 1727 * This is a helper for drivers to collect the beginning of the TX timestamp 1728 * for the requested byte from the SPI transfer. The frequency with which this 1729 * function must be called (once per word, once for the whole transfer, once 1730 * per batch of words etc) is arbitrary as long as the @tx buffer offset is 1731 * greater than or equal to the requested byte at the time of the call. The 1732 * timestamp is only taken once, at the first such call. It is assumed that 1733 * the driver advances its @tx buffer pointer monotonically. 1734 */ 1735 void spi_take_timestamp_pre(struct spi_controller *ctlr, 1736 struct spi_transfer *xfer, 1737 size_t progress, bool irqs_off) 1738 { 1739 if (!xfer->ptp_sts) 1740 return; 1741 1742 if (xfer->timestamped) 1743 return; 1744 1745 if (progress > xfer->ptp_sts_word_pre) 1746 return; 1747 1748 /* Capture the resolution of the timestamp */ 1749 xfer->ptp_sts_word_pre = progress; 1750 1751 if (irqs_off) { 1752 local_irq_save(ctlr->irq_flags); 1753 preempt_disable(); 1754 } 1755 1756 ptp_read_system_prets(xfer->ptp_sts); 1757 } 1758 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); 1759 1760 /** 1761 * spi_take_timestamp_post - helper to collect the end of the TX timestamp 1762 * @ctlr: Pointer to the spi_controller structure of the driver 1763 * @xfer: Pointer to the transfer being timestamped 1764 * @progress: How many words (not bytes) have been transferred so far 1765 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 1766 * 1767 * This is a helper for drivers to collect the end of the TX timestamp for 1768 * the requested byte from the SPI transfer. Can be called with an arbitrary 1769 * frequency: only the first call where @tx exceeds or is equal to the 1770 * requested word will be timestamped. 1771 */ 1772 void spi_take_timestamp_post(struct spi_controller *ctlr, 1773 struct spi_transfer *xfer, 1774 size_t progress, bool irqs_off) 1775 { 1776 if (!xfer->ptp_sts) 1777 return; 1778 1779 if (xfer->timestamped) 1780 return; 1781 1782 if (progress < xfer->ptp_sts_word_post) 1783 return; 1784 1785 ptp_read_system_postts(xfer->ptp_sts); 1786 1787 if (irqs_off) { 1788 local_irq_restore(ctlr->irq_flags); 1789 preempt_enable(); 1790 } 1791 1792 /* Capture the resolution of the timestamp */ 1793 xfer->ptp_sts_word_post = progress; 1794 1795 xfer->timestamped = true; 1796 } 1797 EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 1798 1799 /** 1800 * spi_set_thread_rt - set the controller to pump at realtime priority 1801 * @ctlr: controller to boost priority of 1802 * 1803 * This can be called because the controller requested realtime priority 1804 * (by setting the ->rt value before calling spi_register_controller()) or 1805 * because a device on the bus said that its transfers needed realtime 1806 * priority. 1807 * 1808 * NOTE: at the moment if any device on a bus says it needs realtime then 1809 * the thread will be at realtime priority for all transfers on that 1810 * controller. If this eventually becomes a problem we may see if we can 1811 * find a way to boost the priority only temporarily during relevant 1812 * transfers. 1813 */ 1814 static void spi_set_thread_rt(struct spi_controller *ctlr) 1815 { 1816 dev_info(&ctlr->dev, 1817 "will run message pump with realtime priority\n"); 1818 sched_set_fifo(ctlr->kworker->task); 1819 } 1820 1821 static int spi_init_queue(struct spi_controller *ctlr) 1822 { 1823 ctlr->running = false; 1824 ctlr->busy = false; 1825 1826 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); 1827 if (IS_ERR(ctlr->kworker)) { 1828 dev_err(&ctlr->dev, "failed to create message pump kworker\n"); 1829 return PTR_ERR(ctlr->kworker); 1830 } 1831 1832 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1833 1834 /* 1835 * Controller config will indicate if this controller should run the 1836 * message pump with high (realtime) priority to reduce the transfer 1837 * latency on the bus by minimising the delay between a transfer 1838 * request and the scheduling of the message pump thread. Without this 1839 * setting the message pump thread will remain at default priority. 1840 */ 1841 if (ctlr->rt) 1842 spi_set_thread_rt(ctlr); 1843 1844 return 0; 1845 } 1846 1847 /** 1848 * spi_get_next_queued_message() - called by driver to check for queued 1849 * messages 1850 * @ctlr: the controller to check for queued messages 1851 * 1852 * If there are more messages in the queue, the next message is returned from 1853 * this call. 1854 * 1855 * Return: the next message in the queue, else NULL if the queue is empty. 1856 */ 1857 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1858 { 1859 struct spi_message *next; 1860 unsigned long flags; 1861 1862 /* get a pointer to the next message, if any */ 1863 spin_lock_irqsave(&ctlr->queue_lock, flags); 1864 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 1865 queue); 1866 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1867 1868 return next; 1869 } 1870 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1871 1872 /** 1873 * spi_finalize_current_message() - the current message is complete 1874 * @ctlr: the controller to return the message to 1875 * 1876 * Called by the driver to notify the core that the message in the front of the 1877 * queue is complete and can be removed from the queue. 1878 */ 1879 void spi_finalize_current_message(struct spi_controller *ctlr) 1880 { 1881 struct spi_transfer *xfer; 1882 struct spi_message *mesg; 1883 unsigned long flags; 1884 int ret; 1885 1886 spin_lock_irqsave(&ctlr->queue_lock, flags); 1887 mesg = ctlr->cur_msg; 1888 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1889 1890 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1891 list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 1892 ptp_read_system_postts(xfer->ptp_sts); 1893 xfer->ptp_sts_word_post = xfer->len; 1894 } 1895 } 1896 1897 if (unlikely(ctlr->ptp_sts_supported)) 1898 list_for_each_entry(xfer, &mesg->transfers, transfer_list) 1899 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 1900 1901 spi_unmap_msg(ctlr, mesg); 1902 1903 /* 1904 * In the prepare_messages callback the SPI bus has the opportunity 1905 * to split a transfer to smaller chunks. 1906 * 1907 * Release the split transfers here since spi_map_msg() is done on 1908 * the split transfers. 1909 */ 1910 spi_res_release(ctlr, mesg); 1911 1912 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { 1913 ret = ctlr->unprepare_message(ctlr, mesg); 1914 if (ret) { 1915 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 1916 ret); 1917 } 1918 } 1919 1920 spin_lock_irqsave(&ctlr->queue_lock, flags); 1921 ctlr->cur_msg = NULL; 1922 ctlr->cur_msg_prepared = false; 1923 ctlr->fallback = false; 1924 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1925 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1926 1927 trace_spi_message_done(mesg); 1928 1929 mesg->state = NULL; 1930 if (mesg->complete) 1931 mesg->complete(mesg->context); 1932 } 1933 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1934 1935 static int spi_start_queue(struct spi_controller *ctlr) 1936 { 1937 unsigned long flags; 1938 1939 spin_lock_irqsave(&ctlr->queue_lock, flags); 1940 1941 if (ctlr->running || ctlr->busy) { 1942 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1943 return -EBUSY; 1944 } 1945 1946 ctlr->running = true; 1947 ctlr->cur_msg = NULL; 1948 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1949 1950 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1951 1952 return 0; 1953 } 1954 1955 static int spi_stop_queue(struct spi_controller *ctlr) 1956 { 1957 unsigned long flags; 1958 unsigned limit = 500; 1959 int ret = 0; 1960 1961 spin_lock_irqsave(&ctlr->queue_lock, flags); 1962 1963 /* 1964 * This is a bit lame, but is optimized for the common execution path. 1965 * A wait_queue on the ctlr->busy could be used, but then the common 1966 * execution path (pump_messages) would be required to call wake_up or 1967 * friends on every SPI message. Do this instead. 1968 */ 1969 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 1970 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1971 usleep_range(10000, 11000); 1972 spin_lock_irqsave(&ctlr->queue_lock, flags); 1973 } 1974 1975 if (!list_empty(&ctlr->queue) || ctlr->busy) 1976 ret = -EBUSY; 1977 else 1978 ctlr->running = false; 1979 1980 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1981 1982 if (ret) { 1983 dev_warn(&ctlr->dev, "could not stop message queue\n"); 1984 return ret; 1985 } 1986 return ret; 1987 } 1988 1989 static int spi_destroy_queue(struct spi_controller *ctlr) 1990 { 1991 int ret; 1992 1993 ret = spi_stop_queue(ctlr); 1994 1995 /* 1996 * kthread_flush_worker will block until all work is done. 1997 * If the reason that stop_queue timed out is that the work will never 1998 * finish, then it does no good to call flush/stop thread, so 1999 * return anyway. 2000 */ 2001 if (ret) { 2002 dev_err(&ctlr->dev, "problem destroying queue\n"); 2003 return ret; 2004 } 2005 2006 kthread_destroy_worker(ctlr->kworker); 2007 2008 return 0; 2009 } 2010 2011 static int __spi_queued_transfer(struct spi_device *spi, 2012 struct spi_message *msg, 2013 bool need_pump) 2014 { 2015 struct spi_controller *ctlr = spi->controller; 2016 unsigned long flags; 2017 2018 spin_lock_irqsave(&ctlr->queue_lock, flags); 2019 2020 if (!ctlr->running) { 2021 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2022 return -ESHUTDOWN; 2023 } 2024 msg->actual_length = 0; 2025 msg->status = -EINPROGRESS; 2026 2027 list_add_tail(&msg->queue, &ctlr->queue); 2028 if (!ctlr->busy && need_pump) 2029 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 2030 2031 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2032 return 0; 2033 } 2034 2035 /** 2036 * spi_queued_transfer - transfer function for queued transfers 2037 * @spi: spi device which is requesting transfer 2038 * @msg: spi message which is to handled is queued to driver queue 2039 * 2040 * Return: zero on success, else a negative error code. 2041 */ 2042 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 2043 { 2044 return __spi_queued_transfer(spi, msg, true); 2045 } 2046 2047 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 2048 { 2049 int ret; 2050 2051 ctlr->transfer = spi_queued_transfer; 2052 if (!ctlr->transfer_one_message) 2053 ctlr->transfer_one_message = spi_transfer_one_message; 2054 2055 /* Initialize and start queue */ 2056 ret = spi_init_queue(ctlr); 2057 if (ret) { 2058 dev_err(&ctlr->dev, "problem initializing queue\n"); 2059 goto err_init_queue; 2060 } 2061 ctlr->queued = true; 2062 ret = spi_start_queue(ctlr); 2063 if (ret) { 2064 dev_err(&ctlr->dev, "problem starting queue\n"); 2065 goto err_start_queue; 2066 } 2067 2068 return 0; 2069 2070 err_start_queue: 2071 spi_destroy_queue(ctlr); 2072 err_init_queue: 2073 return ret; 2074 } 2075 2076 /** 2077 * spi_flush_queue - Send all pending messages in the queue from the callers' 2078 * context 2079 * @ctlr: controller to process queue for 2080 * 2081 * This should be used when one wants to ensure all pending messages have been 2082 * sent before doing something. Is used by the spi-mem code to make sure SPI 2083 * memory operations do not preempt regular SPI transfers that have been queued 2084 * before the spi-mem operation. 2085 */ 2086 void spi_flush_queue(struct spi_controller *ctlr) 2087 { 2088 if (ctlr->transfer == spi_queued_transfer) 2089 __spi_pump_messages(ctlr, false); 2090 } 2091 2092 /*-------------------------------------------------------------------------*/ 2093 2094 #if defined(CONFIG_OF) 2095 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 2096 struct device_node *nc) 2097 { 2098 u32 value; 2099 int rc; 2100 2101 /* Mode (clock phase/polarity/etc.) */ 2102 if (of_property_read_bool(nc, "spi-cpha")) 2103 spi->mode |= SPI_CPHA; 2104 if (of_property_read_bool(nc, "spi-cpol")) 2105 spi->mode |= SPI_CPOL; 2106 if (of_property_read_bool(nc, "spi-3wire")) 2107 spi->mode |= SPI_3WIRE; 2108 if (of_property_read_bool(nc, "spi-lsb-first")) 2109 spi->mode |= SPI_LSB_FIRST; 2110 if (of_property_read_bool(nc, "spi-cs-high")) 2111 spi->mode |= SPI_CS_HIGH; 2112 2113 /* Device DUAL/QUAD mode */ 2114 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 2115 switch (value) { 2116 case 0: 2117 spi->mode |= SPI_NO_TX; 2118 break; 2119 case 1: 2120 break; 2121 case 2: 2122 spi->mode |= SPI_TX_DUAL; 2123 break; 2124 case 4: 2125 spi->mode |= SPI_TX_QUAD; 2126 break; 2127 case 8: 2128 spi->mode |= SPI_TX_OCTAL; 2129 break; 2130 default: 2131 dev_warn(&ctlr->dev, 2132 "spi-tx-bus-width %d not supported\n", 2133 value); 2134 break; 2135 } 2136 } 2137 2138 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 2139 switch (value) { 2140 case 0: 2141 spi->mode |= SPI_NO_RX; 2142 break; 2143 case 1: 2144 break; 2145 case 2: 2146 spi->mode |= SPI_RX_DUAL; 2147 break; 2148 case 4: 2149 spi->mode |= SPI_RX_QUAD; 2150 break; 2151 case 8: 2152 spi->mode |= SPI_RX_OCTAL; 2153 break; 2154 default: 2155 dev_warn(&ctlr->dev, 2156 "spi-rx-bus-width %d not supported\n", 2157 value); 2158 break; 2159 } 2160 } 2161 2162 if (spi_controller_is_slave(ctlr)) { 2163 if (!of_node_name_eq(nc, "slave")) { 2164 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 2165 nc); 2166 return -EINVAL; 2167 } 2168 return 0; 2169 } 2170 2171 /* Device address */ 2172 rc = of_property_read_u32(nc, "reg", &value); 2173 if (rc) { 2174 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 2175 nc, rc); 2176 return rc; 2177 } 2178 spi->chip_select = value; 2179 2180 /* Device speed */ 2181 if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 2182 spi->max_speed_hz = value; 2183 2184 return 0; 2185 } 2186 2187 static struct spi_device * 2188 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 2189 { 2190 struct spi_device *spi; 2191 int rc; 2192 2193 /* Alloc an spi_device */ 2194 spi = spi_alloc_device(ctlr); 2195 if (!spi) { 2196 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 2197 rc = -ENOMEM; 2198 goto err_out; 2199 } 2200 2201 /* Select device driver */ 2202 rc = of_modalias_node(nc, spi->modalias, 2203 sizeof(spi->modalias)); 2204 if (rc < 0) { 2205 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 2206 goto err_out; 2207 } 2208 2209 rc = of_spi_parse_dt(ctlr, spi, nc); 2210 if (rc) 2211 goto err_out; 2212 2213 /* Store a pointer to the node in the device structure */ 2214 of_node_get(nc); 2215 spi->dev.of_node = nc; 2216 spi->dev.fwnode = of_fwnode_handle(nc); 2217 2218 /* Register the new device */ 2219 rc = spi_add_device(spi); 2220 if (rc) { 2221 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 2222 goto err_of_node_put; 2223 } 2224 2225 return spi; 2226 2227 err_of_node_put: 2228 of_node_put(nc); 2229 err_out: 2230 spi_dev_put(spi); 2231 return ERR_PTR(rc); 2232 } 2233 2234 /** 2235 * of_register_spi_devices() - Register child devices onto the SPI bus 2236 * @ctlr: Pointer to spi_controller device 2237 * 2238 * Registers an spi_device for each child node of controller node which 2239 * represents a valid SPI slave. 2240 */ 2241 static void of_register_spi_devices(struct spi_controller *ctlr) 2242 { 2243 struct spi_device *spi; 2244 struct device_node *nc; 2245 2246 if (!ctlr->dev.of_node) 2247 return; 2248 2249 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 2250 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 2251 continue; 2252 spi = of_register_spi_device(ctlr, nc); 2253 if (IS_ERR(spi)) { 2254 dev_warn(&ctlr->dev, 2255 "Failed to create SPI device for %pOF\n", nc); 2256 of_node_clear_flag(nc, OF_POPULATED); 2257 } 2258 } 2259 } 2260 #else 2261 static void of_register_spi_devices(struct spi_controller *ctlr) { } 2262 #endif 2263 2264 /** 2265 * spi_new_ancillary_device() - Register ancillary SPI device 2266 * @spi: Pointer to the main SPI device registering the ancillary device 2267 * @chip_select: Chip Select of the ancillary device 2268 * 2269 * Register an ancillary SPI device; for example some chips have a chip-select 2270 * for normal device usage and another one for setup/firmware upload. 2271 * 2272 * This may only be called from main SPI device's probe routine. 2273 * 2274 * Return: 0 on success; negative errno on failure 2275 */ 2276 struct spi_device *spi_new_ancillary_device(struct spi_device *spi, 2277 u8 chip_select) 2278 { 2279 struct spi_device *ancillary; 2280 int rc = 0; 2281 2282 /* Alloc an spi_device */ 2283 ancillary = spi_alloc_device(spi->controller); 2284 if (!ancillary) { 2285 rc = -ENOMEM; 2286 goto err_out; 2287 } 2288 2289 strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); 2290 2291 /* Use provided chip-select for ancillary device */ 2292 ancillary->chip_select = chip_select; 2293 2294 /* Take over SPI mode/speed from SPI main device */ 2295 ancillary->max_speed_hz = spi->max_speed_hz; 2296 ancillary->mode = spi->mode; 2297 2298 /* Register the new device */ 2299 rc = spi_add_device_locked(ancillary); 2300 if (rc) { 2301 dev_err(&spi->dev, "failed to register ancillary device\n"); 2302 goto err_out; 2303 } 2304 2305 return ancillary; 2306 2307 err_out: 2308 spi_dev_put(ancillary); 2309 return ERR_PTR(rc); 2310 } 2311 EXPORT_SYMBOL_GPL(spi_new_ancillary_device); 2312 2313 #ifdef CONFIG_ACPI 2314 struct acpi_spi_lookup { 2315 struct spi_controller *ctlr; 2316 u32 max_speed_hz; 2317 u32 mode; 2318 int irq; 2319 u8 bits_per_word; 2320 u8 chip_select; 2321 }; 2322 2323 static void acpi_spi_parse_apple_properties(struct acpi_device *dev, 2324 struct acpi_spi_lookup *lookup) 2325 { 2326 const union acpi_object *obj; 2327 2328 if (!x86_apple_machine) 2329 return; 2330 2331 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 2332 && obj->buffer.length >= 4) 2333 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 2334 2335 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 2336 && obj->buffer.length == 8) 2337 lookup->bits_per_word = *(u64 *)obj->buffer.pointer; 2338 2339 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 2340 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 2341 lookup->mode |= SPI_LSB_FIRST; 2342 2343 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 2344 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2345 lookup->mode |= SPI_CPOL; 2346 2347 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 2348 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2349 lookup->mode |= SPI_CPHA; 2350 } 2351 2352 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 2353 { 2354 struct acpi_spi_lookup *lookup = data; 2355 struct spi_controller *ctlr = lookup->ctlr; 2356 2357 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 2358 struct acpi_resource_spi_serialbus *sb; 2359 acpi_handle parent_handle; 2360 acpi_status status; 2361 2362 sb = &ares->data.spi_serial_bus; 2363 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 2364 2365 status = acpi_get_handle(NULL, 2366 sb->resource_source.string_ptr, 2367 &parent_handle); 2368 2369 if (ACPI_FAILURE(status) || 2370 ACPI_HANDLE(ctlr->dev.parent) != parent_handle) 2371 return -ENODEV; 2372 2373 /* 2374 * ACPI DeviceSelection numbering is handled by the 2375 * host controller driver in Windows and can vary 2376 * from driver to driver. In Linux we always expect 2377 * 0 .. max - 1 so we need to ask the driver to 2378 * translate between the two schemes. 2379 */ 2380 if (ctlr->fw_translate_cs) { 2381 int cs = ctlr->fw_translate_cs(ctlr, 2382 sb->device_selection); 2383 if (cs < 0) 2384 return cs; 2385 lookup->chip_select = cs; 2386 } else { 2387 lookup->chip_select = sb->device_selection; 2388 } 2389 2390 lookup->max_speed_hz = sb->connection_speed; 2391 lookup->bits_per_word = sb->data_bit_length; 2392 2393 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 2394 lookup->mode |= SPI_CPHA; 2395 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 2396 lookup->mode |= SPI_CPOL; 2397 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 2398 lookup->mode |= SPI_CS_HIGH; 2399 } 2400 } else if (lookup->irq < 0) { 2401 struct resource r; 2402 2403 if (acpi_dev_resource_interrupt(ares, 0, &r)) 2404 lookup->irq = r.start; 2405 } 2406 2407 /* Always tell the ACPI core to skip this resource */ 2408 return 1; 2409 } 2410 2411 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 2412 struct acpi_device *adev) 2413 { 2414 acpi_handle parent_handle = NULL; 2415 struct list_head resource_list; 2416 struct acpi_spi_lookup lookup = {}; 2417 struct spi_device *spi; 2418 int ret; 2419 2420 if (acpi_bus_get_status(adev) || !adev->status.present || 2421 acpi_device_enumerated(adev)) 2422 return AE_OK; 2423 2424 lookup.ctlr = ctlr; 2425 lookup.irq = -1; 2426 2427 INIT_LIST_HEAD(&resource_list); 2428 ret = acpi_dev_get_resources(adev, &resource_list, 2429 acpi_spi_add_resource, &lookup); 2430 acpi_dev_free_resource_list(&resource_list); 2431 2432 if (ret < 0) 2433 /* found SPI in _CRS but it points to another controller */ 2434 return AE_OK; 2435 2436 if (!lookup.max_speed_hz && 2437 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && 2438 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) { 2439 /* Apple does not use _CRS but nested devices for SPI slaves */ 2440 acpi_spi_parse_apple_properties(adev, &lookup); 2441 } 2442 2443 if (!lookup.max_speed_hz) 2444 return AE_OK; 2445 2446 spi = spi_alloc_device(ctlr); 2447 if (!spi) { 2448 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n", 2449 dev_name(&adev->dev)); 2450 return AE_NO_MEMORY; 2451 } 2452 2453 2454 ACPI_COMPANION_SET(&spi->dev, adev); 2455 spi->max_speed_hz = lookup.max_speed_hz; 2456 spi->mode |= lookup.mode; 2457 spi->irq = lookup.irq; 2458 spi->bits_per_word = lookup.bits_per_word; 2459 spi->chip_select = lookup.chip_select; 2460 2461 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 2462 sizeof(spi->modalias)); 2463 2464 if (spi->irq < 0) 2465 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 2466 2467 acpi_device_set_enumerated(adev); 2468 2469 adev->power.flags.ignore_parent = true; 2470 if (spi_add_device(spi)) { 2471 adev->power.flags.ignore_parent = false; 2472 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 2473 dev_name(&adev->dev)); 2474 spi_dev_put(spi); 2475 } 2476 2477 return AE_OK; 2478 } 2479 2480 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 2481 void *data, void **return_value) 2482 { 2483 struct spi_controller *ctlr = data; 2484 struct acpi_device *adev; 2485 2486 if (acpi_bus_get_device(handle, &adev)) 2487 return AE_OK; 2488 2489 return acpi_register_spi_device(ctlr, adev); 2490 } 2491 2492 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 2493 2494 static void acpi_register_spi_devices(struct spi_controller *ctlr) 2495 { 2496 acpi_status status; 2497 acpi_handle handle; 2498 2499 handle = ACPI_HANDLE(ctlr->dev.parent); 2500 if (!handle) 2501 return; 2502 2503 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 2504 SPI_ACPI_ENUMERATE_MAX_DEPTH, 2505 acpi_spi_add_device, NULL, ctlr, NULL); 2506 if (ACPI_FAILURE(status)) 2507 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 2508 } 2509 #else 2510 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 2511 #endif /* CONFIG_ACPI */ 2512 2513 static void spi_controller_release(struct device *dev) 2514 { 2515 struct spi_controller *ctlr; 2516 2517 ctlr = container_of(dev, struct spi_controller, dev); 2518 kfree(ctlr); 2519 } 2520 2521 static struct class spi_master_class = { 2522 .name = "spi_master", 2523 .owner = THIS_MODULE, 2524 .dev_release = spi_controller_release, 2525 .dev_groups = spi_master_groups, 2526 }; 2527 2528 #ifdef CONFIG_SPI_SLAVE 2529 /** 2530 * spi_slave_abort - abort the ongoing transfer request on an SPI slave 2531 * controller 2532 * @spi: device used for the current transfer 2533 */ 2534 int spi_slave_abort(struct spi_device *spi) 2535 { 2536 struct spi_controller *ctlr = spi->controller; 2537 2538 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 2539 return ctlr->slave_abort(ctlr); 2540 2541 return -ENOTSUPP; 2542 } 2543 EXPORT_SYMBOL_GPL(spi_slave_abort); 2544 2545 static int match_true(struct device *dev, void *data) 2546 { 2547 return 1; 2548 } 2549 2550 static ssize_t slave_show(struct device *dev, struct device_attribute *attr, 2551 char *buf) 2552 { 2553 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2554 dev); 2555 struct device *child; 2556 2557 child = device_find_child(&ctlr->dev, NULL, match_true); 2558 return sprintf(buf, "%s\n", 2559 child ? to_spi_device(child)->modalias : NULL); 2560 } 2561 2562 static ssize_t slave_store(struct device *dev, struct device_attribute *attr, 2563 const char *buf, size_t count) 2564 { 2565 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2566 dev); 2567 struct spi_device *spi; 2568 struct device *child; 2569 char name[32]; 2570 int rc; 2571 2572 rc = sscanf(buf, "%31s", name); 2573 if (rc != 1 || !name[0]) 2574 return -EINVAL; 2575 2576 child = device_find_child(&ctlr->dev, NULL, match_true); 2577 if (child) { 2578 /* Remove registered slave */ 2579 device_unregister(child); 2580 put_device(child); 2581 } 2582 2583 if (strcmp(name, "(null)")) { 2584 /* Register new slave */ 2585 spi = spi_alloc_device(ctlr); 2586 if (!spi) 2587 return -ENOMEM; 2588 2589 strlcpy(spi->modalias, name, sizeof(spi->modalias)); 2590 2591 rc = spi_add_device(spi); 2592 if (rc) { 2593 spi_dev_put(spi); 2594 return rc; 2595 } 2596 } 2597 2598 return count; 2599 } 2600 2601 static DEVICE_ATTR_RW(slave); 2602 2603 static struct attribute *spi_slave_attrs[] = { 2604 &dev_attr_slave.attr, 2605 NULL, 2606 }; 2607 2608 static const struct attribute_group spi_slave_group = { 2609 .attrs = spi_slave_attrs, 2610 }; 2611 2612 static const struct attribute_group *spi_slave_groups[] = { 2613 &spi_controller_statistics_group, 2614 &spi_slave_group, 2615 NULL, 2616 }; 2617 2618 static struct class spi_slave_class = { 2619 .name = "spi_slave", 2620 .owner = THIS_MODULE, 2621 .dev_release = spi_controller_release, 2622 .dev_groups = spi_slave_groups, 2623 }; 2624 #else 2625 extern struct class spi_slave_class; /* dummy */ 2626 #endif 2627 2628 /** 2629 * __spi_alloc_controller - allocate an SPI master or slave controller 2630 * @dev: the controller, possibly using the platform_bus 2631 * @size: how much zeroed driver-private data to allocate; the pointer to this 2632 * memory is in the driver_data field of the returned device, accessible 2633 * with spi_controller_get_devdata(); the memory is cacheline aligned; 2634 * drivers granting DMA access to portions of their private data need to 2635 * round up @size using ALIGN(size, dma_get_cache_alignment()). 2636 * @slave: flag indicating whether to allocate an SPI master (false) or SPI 2637 * slave (true) controller 2638 * Context: can sleep 2639 * 2640 * This call is used only by SPI controller drivers, which are the 2641 * only ones directly touching chip registers. It's how they allocate 2642 * an spi_controller structure, prior to calling spi_register_controller(). 2643 * 2644 * This must be called from context that can sleep. 2645 * 2646 * The caller is responsible for assigning the bus number and initializing the 2647 * controller's methods before calling spi_register_controller(); and (after 2648 * errors adding the device) calling spi_controller_put() to prevent a memory 2649 * leak. 2650 * 2651 * Return: the SPI controller structure on success, else NULL. 2652 */ 2653 struct spi_controller *__spi_alloc_controller(struct device *dev, 2654 unsigned int size, bool slave) 2655 { 2656 struct spi_controller *ctlr; 2657 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); 2658 2659 if (!dev) 2660 return NULL; 2661 2662 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 2663 if (!ctlr) 2664 return NULL; 2665 2666 device_initialize(&ctlr->dev); 2667 INIT_LIST_HEAD(&ctlr->queue); 2668 spin_lock_init(&ctlr->queue_lock); 2669 spin_lock_init(&ctlr->bus_lock_spinlock); 2670 mutex_init(&ctlr->bus_lock_mutex); 2671 mutex_init(&ctlr->io_mutex); 2672 mutex_init(&ctlr->add_lock); 2673 ctlr->bus_num = -1; 2674 ctlr->num_chipselect = 1; 2675 ctlr->slave = slave; 2676 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 2677 ctlr->dev.class = &spi_slave_class; 2678 else 2679 ctlr->dev.class = &spi_master_class; 2680 ctlr->dev.parent = dev; 2681 pm_suspend_ignore_children(&ctlr->dev, true); 2682 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); 2683 2684 return ctlr; 2685 } 2686 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 2687 2688 static void devm_spi_release_controller(struct device *dev, void *ctlr) 2689 { 2690 spi_controller_put(*(struct spi_controller **)ctlr); 2691 } 2692 2693 /** 2694 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() 2695 * @dev: physical device of SPI controller 2696 * @size: how much zeroed driver-private data to allocate 2697 * @slave: whether to allocate an SPI master (false) or SPI slave (true) 2698 * Context: can sleep 2699 * 2700 * Allocate an SPI controller and automatically release a reference on it 2701 * when @dev is unbound from its driver. Drivers are thus relieved from 2702 * having to call spi_controller_put(). 2703 * 2704 * The arguments to this function are identical to __spi_alloc_controller(). 2705 * 2706 * Return: the SPI controller structure on success, else NULL. 2707 */ 2708 struct spi_controller *__devm_spi_alloc_controller(struct device *dev, 2709 unsigned int size, 2710 bool slave) 2711 { 2712 struct spi_controller **ptr, *ctlr; 2713 2714 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), 2715 GFP_KERNEL); 2716 if (!ptr) 2717 return NULL; 2718 2719 ctlr = __spi_alloc_controller(dev, size, slave); 2720 if (ctlr) { 2721 ctlr->devm_allocated = true; 2722 *ptr = ctlr; 2723 devres_add(dev, ptr); 2724 } else { 2725 devres_free(ptr); 2726 } 2727 2728 return ctlr; 2729 } 2730 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); 2731 2732 #ifdef CONFIG_OF 2733 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) 2734 { 2735 int nb, i, *cs; 2736 struct device_node *np = ctlr->dev.of_node; 2737 2738 if (!np) 2739 return 0; 2740 2741 nb = of_gpio_named_count(np, "cs-gpios"); 2742 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2743 2744 /* Return error only for an incorrectly formed cs-gpios property */ 2745 if (nb == 0 || nb == -ENOENT) 2746 return 0; 2747 else if (nb < 0) 2748 return nb; 2749 2750 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int), 2751 GFP_KERNEL); 2752 ctlr->cs_gpios = cs; 2753 2754 if (!ctlr->cs_gpios) 2755 return -ENOMEM; 2756 2757 for (i = 0; i < ctlr->num_chipselect; i++) 2758 cs[i] = -ENOENT; 2759 2760 for (i = 0; i < nb; i++) 2761 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 2762 2763 return 0; 2764 } 2765 #else 2766 static int of_spi_get_gpio_numbers(struct spi_controller *ctlr) 2767 { 2768 return 0; 2769 } 2770 #endif 2771 2772 /** 2773 * spi_get_gpio_descs() - grab chip select GPIOs for the master 2774 * @ctlr: The SPI master to grab GPIO descriptors for 2775 */ 2776 static int spi_get_gpio_descs(struct spi_controller *ctlr) 2777 { 2778 int nb, i; 2779 struct gpio_desc **cs; 2780 struct device *dev = &ctlr->dev; 2781 unsigned long native_cs_mask = 0; 2782 unsigned int num_cs_gpios = 0; 2783 2784 nb = gpiod_count(dev, "cs"); 2785 if (nb < 0) { 2786 /* No GPIOs at all is fine, else return the error */ 2787 if (nb == -ENOENT) 2788 return 0; 2789 return nb; 2790 } 2791 2792 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2793 2794 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 2795 GFP_KERNEL); 2796 if (!cs) 2797 return -ENOMEM; 2798 ctlr->cs_gpiods = cs; 2799 2800 for (i = 0; i < nb; i++) { 2801 /* 2802 * Most chipselects are active low, the inverted 2803 * semantics are handled by special quirks in gpiolib, 2804 * so initializing them GPIOD_OUT_LOW here means 2805 * "unasserted", in most cases this will drive the physical 2806 * line high. 2807 */ 2808 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 2809 GPIOD_OUT_LOW); 2810 if (IS_ERR(cs[i])) 2811 return PTR_ERR(cs[i]); 2812 2813 if (cs[i]) { 2814 /* 2815 * If we find a CS GPIO, name it after the device and 2816 * chip select line. 2817 */ 2818 char *gpioname; 2819 2820 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 2821 dev_name(dev), i); 2822 if (!gpioname) 2823 return -ENOMEM; 2824 gpiod_set_consumer_name(cs[i], gpioname); 2825 num_cs_gpios++; 2826 continue; 2827 } 2828 2829 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { 2830 dev_err(dev, "Invalid native chip select %d\n", i); 2831 return -EINVAL; 2832 } 2833 native_cs_mask |= BIT(i); 2834 } 2835 2836 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; 2837 2838 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios && 2839 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { 2840 dev_err(dev, "No unused native chip select available\n"); 2841 return -EINVAL; 2842 } 2843 2844 return 0; 2845 } 2846 2847 static int spi_controller_check_ops(struct spi_controller *ctlr) 2848 { 2849 /* 2850 * The controller may implement only the high-level SPI-memory like 2851 * operations if it does not support regular SPI transfers, and this is 2852 * valid use case. 2853 * If ->mem_ops is NULL, we request that at least one of the 2854 * ->transfer_xxx() method be implemented. 2855 */ 2856 if (ctlr->mem_ops) { 2857 if (!ctlr->mem_ops->exec_op) 2858 return -EINVAL; 2859 } else if (!ctlr->transfer && !ctlr->transfer_one && 2860 !ctlr->transfer_one_message) { 2861 return -EINVAL; 2862 } 2863 2864 return 0; 2865 } 2866 2867 /** 2868 * spi_register_controller - register SPI master or slave controller 2869 * @ctlr: initialized master, originally from spi_alloc_master() or 2870 * spi_alloc_slave() 2871 * Context: can sleep 2872 * 2873 * SPI controllers connect to their drivers using some non-SPI bus, 2874 * such as the platform bus. The final stage of probe() in that code 2875 * includes calling spi_register_controller() to hook up to this SPI bus glue. 2876 * 2877 * SPI controllers use board specific (often SOC specific) bus numbers, 2878 * and board-specific addressing for SPI devices combines those numbers 2879 * with chip select numbers. Since SPI does not directly support dynamic 2880 * device identification, boards need configuration tables telling which 2881 * chip is at which address. 2882 * 2883 * This must be called from context that can sleep. It returns zero on 2884 * success, else a negative error code (dropping the controller's refcount). 2885 * After a successful return, the caller is responsible for calling 2886 * spi_unregister_controller(). 2887 * 2888 * Return: zero on success, else a negative error code. 2889 */ 2890 int spi_register_controller(struct spi_controller *ctlr) 2891 { 2892 struct device *dev = ctlr->dev.parent; 2893 struct boardinfo *bi; 2894 int status; 2895 int id, first_dynamic; 2896 2897 if (!dev) 2898 return -ENODEV; 2899 2900 /* 2901 * Make sure all necessary hooks are implemented before registering 2902 * the SPI controller. 2903 */ 2904 status = spi_controller_check_ops(ctlr); 2905 if (status) 2906 return status; 2907 2908 if (ctlr->bus_num >= 0) { 2909 /* devices with a fixed bus num must check-in with the num */ 2910 mutex_lock(&board_lock); 2911 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 2912 ctlr->bus_num + 1, GFP_KERNEL); 2913 mutex_unlock(&board_lock); 2914 if (WARN(id < 0, "couldn't get idr")) 2915 return id == -ENOSPC ? -EBUSY : id; 2916 ctlr->bus_num = id; 2917 } else if (ctlr->dev.of_node) { 2918 /* allocate dynamic bus number using Linux idr */ 2919 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 2920 if (id >= 0) { 2921 ctlr->bus_num = id; 2922 mutex_lock(&board_lock); 2923 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 2924 ctlr->bus_num + 1, GFP_KERNEL); 2925 mutex_unlock(&board_lock); 2926 if (WARN(id < 0, "couldn't get idr")) 2927 return id == -ENOSPC ? -EBUSY : id; 2928 } 2929 } 2930 if (ctlr->bus_num < 0) { 2931 first_dynamic = of_alias_get_highest_id("spi"); 2932 if (first_dynamic < 0) 2933 first_dynamic = 0; 2934 else 2935 first_dynamic++; 2936 2937 mutex_lock(&board_lock); 2938 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 2939 0, GFP_KERNEL); 2940 mutex_unlock(&board_lock); 2941 if (WARN(id < 0, "couldn't get idr")) 2942 return id; 2943 ctlr->bus_num = id; 2944 } 2945 ctlr->bus_lock_flag = 0; 2946 init_completion(&ctlr->xfer_completion); 2947 if (!ctlr->max_dma_len) 2948 ctlr->max_dma_len = INT_MAX; 2949 2950 /* 2951 * Register the device, then userspace will see it. 2952 * Registration fails if the bus ID is in use. 2953 */ 2954 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 2955 2956 if (!spi_controller_is_slave(ctlr)) { 2957 if (ctlr->use_gpio_descriptors) { 2958 status = spi_get_gpio_descs(ctlr); 2959 if (status) 2960 goto free_bus_id; 2961 /* 2962 * A controller using GPIO descriptors always 2963 * supports SPI_CS_HIGH if need be. 2964 */ 2965 ctlr->mode_bits |= SPI_CS_HIGH; 2966 } else { 2967 /* Legacy code path for GPIOs from DT */ 2968 status = of_spi_get_gpio_numbers(ctlr); 2969 if (status) 2970 goto free_bus_id; 2971 } 2972 } 2973 2974 /* 2975 * Even if it's just one always-selected device, there must 2976 * be at least one chipselect. 2977 */ 2978 if (!ctlr->num_chipselect) { 2979 status = -EINVAL; 2980 goto free_bus_id; 2981 } 2982 2983 status = device_add(&ctlr->dev); 2984 if (status < 0) 2985 goto free_bus_id; 2986 dev_dbg(dev, "registered %s %s\n", 2987 spi_controller_is_slave(ctlr) ? "slave" : "master", 2988 dev_name(&ctlr->dev)); 2989 2990 /* 2991 * If we're using a queued driver, start the queue. Note that we don't 2992 * need the queueing logic if the driver is only supporting high-level 2993 * memory operations. 2994 */ 2995 if (ctlr->transfer) { 2996 dev_info(dev, "controller is unqueued, this is deprecated\n"); 2997 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 2998 status = spi_controller_initialize_queue(ctlr); 2999 if (status) { 3000 device_del(&ctlr->dev); 3001 goto free_bus_id; 3002 } 3003 } 3004 /* add statistics */ 3005 spin_lock_init(&ctlr->statistics.lock); 3006 3007 mutex_lock(&board_lock); 3008 list_add_tail(&ctlr->list, &spi_controller_list); 3009 list_for_each_entry(bi, &board_list, list) 3010 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 3011 mutex_unlock(&board_lock); 3012 3013 /* Register devices from the device tree and ACPI */ 3014 of_register_spi_devices(ctlr); 3015 acpi_register_spi_devices(ctlr); 3016 return status; 3017 3018 free_bus_id: 3019 mutex_lock(&board_lock); 3020 idr_remove(&spi_master_idr, ctlr->bus_num); 3021 mutex_unlock(&board_lock); 3022 return status; 3023 } 3024 EXPORT_SYMBOL_GPL(spi_register_controller); 3025 3026 static void devm_spi_unregister(void *ctlr) 3027 { 3028 spi_unregister_controller(ctlr); 3029 } 3030 3031 /** 3032 * devm_spi_register_controller - register managed SPI master or slave 3033 * controller 3034 * @dev: device managing SPI controller 3035 * @ctlr: initialized controller, originally from spi_alloc_master() or 3036 * spi_alloc_slave() 3037 * Context: can sleep 3038 * 3039 * Register a SPI device as with spi_register_controller() which will 3040 * automatically be unregistered and freed. 3041 * 3042 * Return: zero on success, else a negative error code. 3043 */ 3044 int devm_spi_register_controller(struct device *dev, 3045 struct spi_controller *ctlr) 3046 { 3047 int ret; 3048 3049 ret = spi_register_controller(ctlr); 3050 if (ret) 3051 return ret; 3052 3053 return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr); 3054 } 3055 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 3056 3057 static int __unregister(struct device *dev, void *null) 3058 { 3059 spi_unregister_device(to_spi_device(dev)); 3060 return 0; 3061 } 3062 3063 /** 3064 * spi_unregister_controller - unregister SPI master or slave controller 3065 * @ctlr: the controller being unregistered 3066 * Context: can sleep 3067 * 3068 * This call is used only by SPI controller drivers, which are the 3069 * only ones directly touching chip registers. 3070 * 3071 * This must be called from context that can sleep. 3072 * 3073 * Note that this function also drops a reference to the controller. 3074 */ 3075 void spi_unregister_controller(struct spi_controller *ctlr) 3076 { 3077 struct spi_controller *found; 3078 int id = ctlr->bus_num; 3079 3080 /* Prevent addition of new devices, unregister existing ones */ 3081 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3082 mutex_lock(&ctlr->add_lock); 3083 3084 device_for_each_child(&ctlr->dev, NULL, __unregister); 3085 3086 /* First make sure that this controller was ever added */ 3087 mutex_lock(&board_lock); 3088 found = idr_find(&spi_master_idr, id); 3089 mutex_unlock(&board_lock); 3090 if (ctlr->queued) { 3091 if (spi_destroy_queue(ctlr)) 3092 dev_err(&ctlr->dev, "queue remove failed\n"); 3093 } 3094 mutex_lock(&board_lock); 3095 list_del(&ctlr->list); 3096 mutex_unlock(&board_lock); 3097 3098 device_del(&ctlr->dev); 3099 3100 /* free bus id */ 3101 mutex_lock(&board_lock); 3102 if (found == ctlr) 3103 idr_remove(&spi_master_idr, id); 3104 mutex_unlock(&board_lock); 3105 3106 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3107 mutex_unlock(&ctlr->add_lock); 3108 3109 /* Release the last reference on the controller if its driver 3110 * has not yet been converted to devm_spi_alloc_master/slave(). 3111 */ 3112 if (!ctlr->devm_allocated) 3113 put_device(&ctlr->dev); 3114 } 3115 EXPORT_SYMBOL_GPL(spi_unregister_controller); 3116 3117 int spi_controller_suspend(struct spi_controller *ctlr) 3118 { 3119 int ret; 3120 3121 /* Basically no-ops for non-queued controllers */ 3122 if (!ctlr->queued) 3123 return 0; 3124 3125 ret = spi_stop_queue(ctlr); 3126 if (ret) 3127 dev_err(&ctlr->dev, "queue stop failed\n"); 3128 3129 return ret; 3130 } 3131 EXPORT_SYMBOL_GPL(spi_controller_suspend); 3132 3133 int spi_controller_resume(struct spi_controller *ctlr) 3134 { 3135 int ret; 3136 3137 if (!ctlr->queued) 3138 return 0; 3139 3140 ret = spi_start_queue(ctlr); 3141 if (ret) 3142 dev_err(&ctlr->dev, "queue restart failed\n"); 3143 3144 return ret; 3145 } 3146 EXPORT_SYMBOL_GPL(spi_controller_resume); 3147 3148 /*-------------------------------------------------------------------------*/ 3149 3150 /* Core methods for spi_message alterations */ 3151 3152 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 3153 struct spi_message *msg, 3154 void *res) 3155 { 3156 struct spi_replaced_transfers *rxfer = res; 3157 size_t i; 3158 3159 /* call extra callback if requested */ 3160 if (rxfer->release) 3161 rxfer->release(ctlr, msg, res); 3162 3163 /* insert replaced transfers back into the message */ 3164 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 3165 3166 /* remove the formerly inserted entries */ 3167 for (i = 0; i < rxfer->inserted; i++) 3168 list_del(&rxfer->inserted_transfers[i].transfer_list); 3169 } 3170 3171 /** 3172 * spi_replace_transfers - replace transfers with several transfers 3173 * and register change with spi_message.resources 3174 * @msg: the spi_message we work upon 3175 * @xfer_first: the first spi_transfer we want to replace 3176 * @remove: number of transfers to remove 3177 * @insert: the number of transfers we want to insert instead 3178 * @release: extra release code necessary in some circumstances 3179 * @extradatasize: extra data to allocate (with alignment guarantees 3180 * of struct @spi_transfer) 3181 * @gfp: gfp flags 3182 * 3183 * Returns: pointer to @spi_replaced_transfers, 3184 * PTR_ERR(...) in case of errors. 3185 */ 3186 static struct spi_replaced_transfers *spi_replace_transfers( 3187 struct spi_message *msg, 3188 struct spi_transfer *xfer_first, 3189 size_t remove, 3190 size_t insert, 3191 spi_replaced_release_t release, 3192 size_t extradatasize, 3193 gfp_t gfp) 3194 { 3195 struct spi_replaced_transfers *rxfer; 3196 struct spi_transfer *xfer; 3197 size_t i; 3198 3199 /* allocate the structure using spi_res */ 3200 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 3201 struct_size(rxfer, inserted_transfers, insert) 3202 + extradatasize, 3203 gfp); 3204 if (!rxfer) 3205 return ERR_PTR(-ENOMEM); 3206 3207 /* the release code to invoke before running the generic release */ 3208 rxfer->release = release; 3209 3210 /* assign extradata */ 3211 if (extradatasize) 3212 rxfer->extradata = 3213 &rxfer->inserted_transfers[insert]; 3214 3215 /* init the replaced_transfers list */ 3216 INIT_LIST_HEAD(&rxfer->replaced_transfers); 3217 3218 /* 3219 * Assign the list_entry after which we should reinsert 3220 * the @replaced_transfers - it may be spi_message.messages! 3221 */ 3222 rxfer->replaced_after = xfer_first->transfer_list.prev; 3223 3224 /* remove the requested number of transfers */ 3225 for (i = 0; i < remove; i++) { 3226 /* 3227 * If the entry after replaced_after it is msg->transfers 3228 * then we have been requested to remove more transfers 3229 * than are in the list. 3230 */ 3231 if (rxfer->replaced_after->next == &msg->transfers) { 3232 dev_err(&msg->spi->dev, 3233 "requested to remove more spi_transfers than are available\n"); 3234 /* insert replaced transfers back into the message */ 3235 list_splice(&rxfer->replaced_transfers, 3236 rxfer->replaced_after); 3237 3238 /* free the spi_replace_transfer structure */ 3239 spi_res_free(rxfer); 3240 3241 /* and return with an error */ 3242 return ERR_PTR(-EINVAL); 3243 } 3244 3245 /* 3246 * Remove the entry after replaced_after from list of 3247 * transfers and add it to list of replaced_transfers. 3248 */ 3249 list_move_tail(rxfer->replaced_after->next, 3250 &rxfer->replaced_transfers); 3251 } 3252 3253 /* 3254 * Create copy of the given xfer with identical settings 3255 * based on the first transfer to get removed. 3256 */ 3257 for (i = 0; i < insert; i++) { 3258 /* we need to run in reverse order */ 3259 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 3260 3261 /* copy all spi_transfer data */ 3262 memcpy(xfer, xfer_first, sizeof(*xfer)); 3263 3264 /* add to list */ 3265 list_add(&xfer->transfer_list, rxfer->replaced_after); 3266 3267 /* clear cs_change and delay for all but the last */ 3268 if (i) { 3269 xfer->cs_change = false; 3270 xfer->delay.value = 0; 3271 } 3272 } 3273 3274 /* set up inserted */ 3275 rxfer->inserted = insert; 3276 3277 /* and register it with spi_res/spi_message */ 3278 spi_res_add(msg, rxfer); 3279 3280 return rxfer; 3281 } 3282 3283 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3284 struct spi_message *msg, 3285 struct spi_transfer **xferp, 3286 size_t maxsize, 3287 gfp_t gfp) 3288 { 3289 struct spi_transfer *xfer = *xferp, *xfers; 3290 struct spi_replaced_transfers *srt; 3291 size_t offset; 3292 size_t count, i; 3293 3294 /* calculate how many we have to replace */ 3295 count = DIV_ROUND_UP(xfer->len, maxsize); 3296 3297 /* create replacement */ 3298 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 3299 if (IS_ERR(srt)) 3300 return PTR_ERR(srt); 3301 xfers = srt->inserted_transfers; 3302 3303 /* 3304 * Now handle each of those newly inserted spi_transfers. 3305 * Note that the replacements spi_transfers all are preset 3306 * to the same values as *xferp, so tx_buf, rx_buf and len 3307 * are all identical (as well as most others) 3308 * so we just have to fix up len and the pointers. 3309 * 3310 * This also includes support for the depreciated 3311 * spi_message.is_dma_mapped interface. 3312 */ 3313 3314 /* 3315 * The first transfer just needs the length modified, so we 3316 * run it outside the loop. 3317 */ 3318 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 3319 3320 /* all the others need rx_buf/tx_buf also set */ 3321 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 3322 /* update rx_buf, tx_buf and dma */ 3323 if (xfers[i].rx_buf) 3324 xfers[i].rx_buf += offset; 3325 if (xfers[i].rx_dma) 3326 xfers[i].rx_dma += offset; 3327 if (xfers[i].tx_buf) 3328 xfers[i].tx_buf += offset; 3329 if (xfers[i].tx_dma) 3330 xfers[i].tx_dma += offset; 3331 3332 /* update length */ 3333 xfers[i].len = min(maxsize, xfers[i].len - offset); 3334 } 3335 3336 /* 3337 * We set up xferp to the last entry we have inserted, 3338 * so that we skip those already split transfers. 3339 */ 3340 *xferp = &xfers[count - 1]; 3341 3342 /* increment statistics counters */ 3343 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3344 transfers_split_maxsize); 3345 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 3346 transfers_split_maxsize); 3347 3348 return 0; 3349 } 3350 3351 /** 3352 * spi_split_transfers_maxsize - split spi transfers into multiple transfers 3353 * when an individual transfer exceeds a 3354 * certain size 3355 * @ctlr: the @spi_controller for this transfer 3356 * @msg: the @spi_message to transform 3357 * @maxsize: the maximum when to apply this 3358 * @gfp: GFP allocation flags 3359 * 3360 * Return: status of transformation 3361 */ 3362 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 3363 struct spi_message *msg, 3364 size_t maxsize, 3365 gfp_t gfp) 3366 { 3367 struct spi_transfer *xfer; 3368 int ret; 3369 3370 /* 3371 * Iterate over the transfer_list, 3372 * but note that xfer is advanced to the last transfer inserted 3373 * to avoid checking sizes again unnecessarily (also xfer does 3374 * potentially belong to a different list by the time the 3375 * replacement has happened). 3376 */ 3377 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3378 if (xfer->len > maxsize) { 3379 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3380 maxsize, gfp); 3381 if (ret) 3382 return ret; 3383 } 3384 } 3385 3386 return 0; 3387 } 3388 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 3389 3390 /*-------------------------------------------------------------------------*/ 3391 3392 /* Core methods for SPI controller protocol drivers. Some of the 3393 * other core methods are currently defined as inline functions. 3394 */ 3395 3396 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 3397 u8 bits_per_word) 3398 { 3399 if (ctlr->bits_per_word_mask) { 3400 /* Only 32 bits fit in the mask */ 3401 if (bits_per_word > 32) 3402 return -EINVAL; 3403 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 3404 return -EINVAL; 3405 } 3406 3407 return 0; 3408 } 3409 3410 /** 3411 * spi_setup - setup SPI mode and clock rate 3412 * @spi: the device whose settings are being modified 3413 * Context: can sleep, and no requests are queued to the device 3414 * 3415 * SPI protocol drivers may need to update the transfer mode if the 3416 * device doesn't work with its default. They may likewise need 3417 * to update clock rates or word sizes from initial values. This function 3418 * changes those settings, and must be called from a context that can sleep. 3419 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 3420 * effect the next time the device is selected and data is transferred to 3421 * or from it. When this function returns, the spi device is deselected. 3422 * 3423 * Note that this call will fail if the protocol driver specifies an option 3424 * that the underlying controller or its driver does not support. For 3425 * example, not all hardware supports wire transfers using nine bit words, 3426 * LSB-first wire encoding, or active-high chipselects. 3427 * 3428 * Return: zero on success, else a negative error code. 3429 */ 3430 int spi_setup(struct spi_device *spi) 3431 { 3432 unsigned bad_bits, ugly_bits; 3433 int status; 3434 3435 /* 3436 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO 3437 * are set at the same time. 3438 */ 3439 if ((hweight_long(spi->mode & 3440 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || 3441 (hweight_long(spi->mode & 3442 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { 3443 dev_err(&spi->dev, 3444 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); 3445 return -EINVAL; 3446 } 3447 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */ 3448 if ((spi->mode & SPI_3WIRE) && (spi->mode & 3449 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3450 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 3451 return -EINVAL; 3452 /* 3453 * Help drivers fail *cleanly* when they need options 3454 * that aren't supported with their current controller. 3455 * SPI_CS_WORD has a fallback software implementation, 3456 * so it is ignored here. 3457 */ 3458 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | 3459 SPI_NO_TX | SPI_NO_RX); 3460 /* 3461 * Nothing prevents from working with active-high CS in case if it 3462 * is driven by GPIO. 3463 */ 3464 if (gpio_is_valid(spi->cs_gpio)) 3465 bad_bits &= ~SPI_CS_HIGH; 3466 ugly_bits = bad_bits & 3467 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3468 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 3469 if (ugly_bits) { 3470 dev_warn(&spi->dev, 3471 "setup: ignoring unsupported mode bits %x\n", 3472 ugly_bits); 3473 spi->mode &= ~ugly_bits; 3474 bad_bits &= ~ugly_bits; 3475 } 3476 if (bad_bits) { 3477 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 3478 bad_bits); 3479 return -EINVAL; 3480 } 3481 3482 if (!spi->bits_per_word) 3483 spi->bits_per_word = 8; 3484 3485 status = __spi_validate_bits_per_word(spi->controller, 3486 spi->bits_per_word); 3487 if (status) 3488 return status; 3489 3490 if (spi->controller->max_speed_hz && 3491 (!spi->max_speed_hz || 3492 spi->max_speed_hz > spi->controller->max_speed_hz)) 3493 spi->max_speed_hz = spi->controller->max_speed_hz; 3494 3495 mutex_lock(&spi->controller->io_mutex); 3496 3497 if (spi->controller->setup) { 3498 status = spi->controller->setup(spi); 3499 if (status) { 3500 mutex_unlock(&spi->controller->io_mutex); 3501 dev_err(&spi->controller->dev, "Failed to setup device: %d\n", 3502 status); 3503 return status; 3504 } 3505 } 3506 3507 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { 3508 status = pm_runtime_get_sync(spi->controller->dev.parent); 3509 if (status < 0) { 3510 mutex_unlock(&spi->controller->io_mutex); 3511 pm_runtime_put_noidle(spi->controller->dev.parent); 3512 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3513 status); 3514 return status; 3515 } 3516 3517 /* 3518 * We do not want to return positive value from pm_runtime_get, 3519 * there are many instances of devices calling spi_setup() and 3520 * checking for a non-zero return value instead of a negative 3521 * return value. 3522 */ 3523 status = 0; 3524 3525 spi_set_cs(spi, false, true); 3526 pm_runtime_mark_last_busy(spi->controller->dev.parent); 3527 pm_runtime_put_autosuspend(spi->controller->dev.parent); 3528 } else { 3529 spi_set_cs(spi, false, true); 3530 } 3531 3532 mutex_unlock(&spi->controller->io_mutex); 3533 3534 if (spi->rt && !spi->controller->rt) { 3535 spi->controller->rt = true; 3536 spi_set_thread_rt(spi->controller); 3537 } 3538 3539 trace_spi_setup(spi, status); 3540 3541 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 3542 spi->mode & SPI_MODE_X_MASK, 3543 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 3544 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 3545 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 3546 (spi->mode & SPI_LOOP) ? "loopback, " : "", 3547 spi->bits_per_word, spi->max_speed_hz, 3548 status); 3549 3550 return status; 3551 } 3552 EXPORT_SYMBOL_GPL(spi_setup); 3553 3554 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, 3555 struct spi_device *spi) 3556 { 3557 int delay1, delay2; 3558 3559 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); 3560 if (delay1 < 0) 3561 return delay1; 3562 3563 delay2 = spi_delay_to_ns(&spi->word_delay, xfer); 3564 if (delay2 < 0) 3565 return delay2; 3566 3567 if (delay1 < delay2) 3568 memcpy(&xfer->word_delay, &spi->word_delay, 3569 sizeof(xfer->word_delay)); 3570 3571 return 0; 3572 } 3573 3574 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 3575 { 3576 struct spi_controller *ctlr = spi->controller; 3577 struct spi_transfer *xfer; 3578 int w_size; 3579 3580 if (list_empty(&message->transfers)) 3581 return -EINVAL; 3582 3583 /* 3584 * If an SPI controller does not support toggling the CS line on each 3585 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 3586 * for the CS line, we can emulate the CS-per-word hardware function by 3587 * splitting transfers into one-word transfers and ensuring that 3588 * cs_change is set for each transfer. 3589 */ 3590 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || 3591 spi->cs_gpiod || 3592 gpio_is_valid(spi->cs_gpio))) { 3593 size_t maxsize; 3594 int ret; 3595 3596 maxsize = (spi->bits_per_word + 7) / 8; 3597 3598 /* spi_split_transfers_maxsize() requires message->spi */ 3599 message->spi = spi; 3600 3601 ret = spi_split_transfers_maxsize(ctlr, message, maxsize, 3602 GFP_KERNEL); 3603 if (ret) 3604 return ret; 3605 3606 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3607 /* don't change cs_change on the last entry in the list */ 3608 if (list_is_last(&xfer->transfer_list, &message->transfers)) 3609 break; 3610 xfer->cs_change = 1; 3611 } 3612 } 3613 3614 /* 3615 * Half-duplex links include original MicroWire, and ones with 3616 * only one data pin like SPI_3WIRE (switches direction) or where 3617 * either MOSI or MISO is missing. They can also be caused by 3618 * software limitations. 3619 */ 3620 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 3621 (spi->mode & SPI_3WIRE)) { 3622 unsigned flags = ctlr->flags; 3623 3624 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3625 if (xfer->rx_buf && xfer->tx_buf) 3626 return -EINVAL; 3627 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 3628 return -EINVAL; 3629 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 3630 return -EINVAL; 3631 } 3632 } 3633 3634 /* 3635 * Set transfer bits_per_word and max speed as spi device default if 3636 * it is not set for this transfer. 3637 * Set transfer tx_nbits and rx_nbits as single transfer default 3638 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 3639 * Ensure transfer word_delay is at least as long as that required by 3640 * device itself. 3641 */ 3642 message->frame_length = 0; 3643 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3644 xfer->effective_speed_hz = 0; 3645 message->frame_length += xfer->len; 3646 if (!xfer->bits_per_word) 3647 xfer->bits_per_word = spi->bits_per_word; 3648 3649 if (!xfer->speed_hz) 3650 xfer->speed_hz = spi->max_speed_hz; 3651 3652 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 3653 xfer->speed_hz = ctlr->max_speed_hz; 3654 3655 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 3656 return -EINVAL; 3657 3658 /* 3659 * SPI transfer length should be multiple of SPI word size 3660 * where SPI word size should be power-of-two multiple. 3661 */ 3662 if (xfer->bits_per_word <= 8) 3663 w_size = 1; 3664 else if (xfer->bits_per_word <= 16) 3665 w_size = 2; 3666 else 3667 w_size = 4; 3668 3669 /* No partial transfers accepted */ 3670 if (xfer->len % w_size) 3671 return -EINVAL; 3672 3673 if (xfer->speed_hz && ctlr->min_speed_hz && 3674 xfer->speed_hz < ctlr->min_speed_hz) 3675 return -EINVAL; 3676 3677 if (xfer->tx_buf && !xfer->tx_nbits) 3678 xfer->tx_nbits = SPI_NBITS_SINGLE; 3679 if (xfer->rx_buf && !xfer->rx_nbits) 3680 xfer->rx_nbits = SPI_NBITS_SINGLE; 3681 /* 3682 * Check transfer tx/rx_nbits: 3683 * 1. check the value matches one of single, dual and quad 3684 * 2. check tx/rx_nbits match the mode in spi_device 3685 */ 3686 if (xfer->tx_buf) { 3687 if (spi->mode & SPI_NO_TX) 3688 return -EINVAL; 3689 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 3690 xfer->tx_nbits != SPI_NBITS_DUAL && 3691 xfer->tx_nbits != SPI_NBITS_QUAD) 3692 return -EINVAL; 3693 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 3694 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 3695 return -EINVAL; 3696 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 3697 !(spi->mode & SPI_TX_QUAD)) 3698 return -EINVAL; 3699 } 3700 /* check transfer rx_nbits */ 3701 if (xfer->rx_buf) { 3702 if (spi->mode & SPI_NO_RX) 3703 return -EINVAL; 3704 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 3705 xfer->rx_nbits != SPI_NBITS_DUAL && 3706 xfer->rx_nbits != SPI_NBITS_QUAD) 3707 return -EINVAL; 3708 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 3709 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 3710 return -EINVAL; 3711 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 3712 !(spi->mode & SPI_RX_QUAD)) 3713 return -EINVAL; 3714 } 3715 3716 if (_spi_xfer_word_delay_update(xfer, spi)) 3717 return -EINVAL; 3718 } 3719 3720 message->status = -EINPROGRESS; 3721 3722 return 0; 3723 } 3724 3725 static int __spi_async(struct spi_device *spi, struct spi_message *message) 3726 { 3727 struct spi_controller *ctlr = spi->controller; 3728 struct spi_transfer *xfer; 3729 3730 /* 3731 * Some controllers do not support doing regular SPI transfers. Return 3732 * ENOTSUPP when this is the case. 3733 */ 3734 if (!ctlr->transfer) 3735 return -ENOTSUPP; 3736 3737 message->spi = spi; 3738 3739 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); 3740 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 3741 3742 trace_spi_message_submit(message); 3743 3744 if (!ctlr->ptp_sts_supported) { 3745 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3746 xfer->ptp_sts_word_pre = 0; 3747 ptp_read_system_prets(xfer->ptp_sts); 3748 } 3749 } 3750 3751 return ctlr->transfer(spi, message); 3752 } 3753 3754 /** 3755 * spi_async - asynchronous SPI transfer 3756 * @spi: device with which data will be exchanged 3757 * @message: describes the data transfers, including completion callback 3758 * Context: any (irqs may be blocked, etc) 3759 * 3760 * This call may be used in_irq and other contexts which can't sleep, 3761 * as well as from task contexts which can sleep. 3762 * 3763 * The completion callback is invoked in a context which can't sleep. 3764 * Before that invocation, the value of message->status is undefined. 3765 * When the callback is issued, message->status holds either zero (to 3766 * indicate complete success) or a negative error code. After that 3767 * callback returns, the driver which issued the transfer request may 3768 * deallocate the associated memory; it's no longer in use by any SPI 3769 * core or controller driver code. 3770 * 3771 * Note that although all messages to a spi_device are handled in 3772 * FIFO order, messages may go to different devices in other orders. 3773 * Some device might be higher priority, or have various "hard" access 3774 * time requirements, for example. 3775 * 3776 * On detection of any fault during the transfer, processing of 3777 * the entire message is aborted, and the device is deselected. 3778 * Until returning from the associated message completion callback, 3779 * no other spi_message queued to that device will be processed. 3780 * (This rule applies equally to all the synchronous transfer calls, 3781 * which are wrappers around this core asynchronous primitive.) 3782 * 3783 * Return: zero on success, else a negative error code. 3784 */ 3785 int spi_async(struct spi_device *spi, struct spi_message *message) 3786 { 3787 struct spi_controller *ctlr = spi->controller; 3788 int ret; 3789 unsigned long flags; 3790 3791 ret = __spi_validate(spi, message); 3792 if (ret != 0) 3793 return ret; 3794 3795 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3796 3797 if (ctlr->bus_lock_flag) 3798 ret = -EBUSY; 3799 else 3800 ret = __spi_async(spi, message); 3801 3802 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3803 3804 return ret; 3805 } 3806 EXPORT_SYMBOL_GPL(spi_async); 3807 3808 /** 3809 * spi_async_locked - version of spi_async with exclusive bus usage 3810 * @spi: device with which data will be exchanged 3811 * @message: describes the data transfers, including completion callback 3812 * Context: any (irqs may be blocked, etc) 3813 * 3814 * This call may be used in_irq and other contexts which can't sleep, 3815 * as well as from task contexts which can sleep. 3816 * 3817 * The completion callback is invoked in a context which can't sleep. 3818 * Before that invocation, the value of message->status is undefined. 3819 * When the callback is issued, message->status holds either zero (to 3820 * indicate complete success) or a negative error code. After that 3821 * callback returns, the driver which issued the transfer request may 3822 * deallocate the associated memory; it's no longer in use by any SPI 3823 * core or controller driver code. 3824 * 3825 * Note that although all messages to a spi_device are handled in 3826 * FIFO order, messages may go to different devices in other orders. 3827 * Some device might be higher priority, or have various "hard" access 3828 * time requirements, for example. 3829 * 3830 * On detection of any fault during the transfer, processing of 3831 * the entire message is aborted, and the device is deselected. 3832 * Until returning from the associated message completion callback, 3833 * no other spi_message queued to that device will be processed. 3834 * (This rule applies equally to all the synchronous transfer calls, 3835 * which are wrappers around this core asynchronous primitive.) 3836 * 3837 * Return: zero on success, else a negative error code. 3838 */ 3839 static int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3840 { 3841 struct spi_controller *ctlr = spi->controller; 3842 int ret; 3843 unsigned long flags; 3844 3845 ret = __spi_validate(spi, message); 3846 if (ret != 0) 3847 return ret; 3848 3849 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3850 3851 ret = __spi_async(spi, message); 3852 3853 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3854 3855 return ret; 3856 3857 } 3858 3859 /*-------------------------------------------------------------------------*/ 3860 3861 /* 3862 * Utility methods for SPI protocol drivers, layered on 3863 * top of the core. Some other utility methods are defined as 3864 * inline functions. 3865 */ 3866 3867 static void spi_complete(void *arg) 3868 { 3869 complete(arg); 3870 } 3871 3872 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 3873 { 3874 DECLARE_COMPLETION_ONSTACK(done); 3875 int status; 3876 struct spi_controller *ctlr = spi->controller; 3877 unsigned long flags; 3878 3879 status = __spi_validate(spi, message); 3880 if (status != 0) 3881 return status; 3882 3883 message->complete = spi_complete; 3884 message->context = &done; 3885 message->spi = spi; 3886 3887 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); 3888 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 3889 3890 /* 3891 * If we're not using the legacy transfer method then we will 3892 * try to transfer in the calling context so special case. 3893 * This code would be less tricky if we could remove the 3894 * support for driver implemented message queues. 3895 */ 3896 if (ctlr->transfer == spi_queued_transfer) { 3897 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3898 3899 trace_spi_message_submit(message); 3900 3901 status = __spi_queued_transfer(spi, message, false); 3902 3903 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3904 } else { 3905 status = spi_async_locked(spi, message); 3906 } 3907 3908 if (status == 0) { 3909 /* Push out the messages in the calling context if we can */ 3910 if (ctlr->transfer == spi_queued_transfer) { 3911 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3912 spi_sync_immediate); 3913 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 3914 spi_sync_immediate); 3915 __spi_pump_messages(ctlr, false); 3916 } 3917 3918 wait_for_completion(&done); 3919 status = message->status; 3920 } 3921 message->context = NULL; 3922 return status; 3923 } 3924 3925 /** 3926 * spi_sync - blocking/synchronous SPI data transfers 3927 * @spi: device with which data will be exchanged 3928 * @message: describes the data transfers 3929 * Context: can sleep 3930 * 3931 * This call may only be used from a context that may sleep. The sleep 3932 * is non-interruptible, and has no timeout. Low-overhead controller 3933 * drivers may DMA directly into and out of the message buffers. 3934 * 3935 * Note that the SPI device's chip select is active during the message, 3936 * and then is normally disabled between messages. Drivers for some 3937 * frequently-used devices may want to minimize costs of selecting a chip, 3938 * by leaving it selected in anticipation that the next message will go 3939 * to the same chip. (That may increase power usage.) 3940 * 3941 * Also, the caller is guaranteeing that the memory associated with the 3942 * message will not be freed before this call returns. 3943 * 3944 * Return: zero on success, else a negative error code. 3945 */ 3946 int spi_sync(struct spi_device *spi, struct spi_message *message) 3947 { 3948 int ret; 3949 3950 mutex_lock(&spi->controller->bus_lock_mutex); 3951 ret = __spi_sync(spi, message); 3952 mutex_unlock(&spi->controller->bus_lock_mutex); 3953 3954 return ret; 3955 } 3956 EXPORT_SYMBOL_GPL(spi_sync); 3957 3958 /** 3959 * spi_sync_locked - version of spi_sync with exclusive bus usage 3960 * @spi: device with which data will be exchanged 3961 * @message: describes the data transfers 3962 * Context: can sleep 3963 * 3964 * This call may only be used from a context that may sleep. The sleep 3965 * is non-interruptible, and has no timeout. Low-overhead controller 3966 * drivers may DMA directly into and out of the message buffers. 3967 * 3968 * This call should be used by drivers that require exclusive access to the 3969 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 3970 * be released by a spi_bus_unlock call when the exclusive access is over. 3971 * 3972 * Return: zero on success, else a negative error code. 3973 */ 3974 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 3975 { 3976 return __spi_sync(spi, message); 3977 } 3978 EXPORT_SYMBOL_GPL(spi_sync_locked); 3979 3980 /** 3981 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 3982 * @ctlr: SPI bus master that should be locked for exclusive bus access 3983 * Context: can sleep 3984 * 3985 * This call may only be used from a context that may sleep. The sleep 3986 * is non-interruptible, and has no timeout. 3987 * 3988 * This call should be used by drivers that require exclusive access to the 3989 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 3990 * exclusive access is over. Data transfer must be done by spi_sync_locked 3991 * and spi_async_locked calls when the SPI bus lock is held. 3992 * 3993 * Return: always zero. 3994 */ 3995 int spi_bus_lock(struct spi_controller *ctlr) 3996 { 3997 unsigned long flags; 3998 3999 mutex_lock(&ctlr->bus_lock_mutex); 4000 4001 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4002 ctlr->bus_lock_flag = 1; 4003 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4004 4005 /* mutex remains locked until spi_bus_unlock is called */ 4006 4007 return 0; 4008 } 4009 EXPORT_SYMBOL_GPL(spi_bus_lock); 4010 4011 /** 4012 * spi_bus_unlock - release the lock for exclusive SPI bus usage 4013 * @ctlr: SPI bus master that was locked for exclusive bus access 4014 * Context: can sleep 4015 * 4016 * This call may only be used from a context that may sleep. The sleep 4017 * is non-interruptible, and has no timeout. 4018 * 4019 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 4020 * call. 4021 * 4022 * Return: always zero. 4023 */ 4024 int spi_bus_unlock(struct spi_controller *ctlr) 4025 { 4026 ctlr->bus_lock_flag = 0; 4027 4028 mutex_unlock(&ctlr->bus_lock_mutex); 4029 4030 return 0; 4031 } 4032 EXPORT_SYMBOL_GPL(spi_bus_unlock); 4033 4034 /* portable code must never pass more than 32 bytes */ 4035 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 4036 4037 static u8 *buf; 4038 4039 /** 4040 * spi_write_then_read - SPI synchronous write followed by read 4041 * @spi: device with which data will be exchanged 4042 * @txbuf: data to be written (need not be dma-safe) 4043 * @n_tx: size of txbuf, in bytes 4044 * @rxbuf: buffer into which data will be read (need not be dma-safe) 4045 * @n_rx: size of rxbuf, in bytes 4046 * Context: can sleep 4047 * 4048 * This performs a half duplex MicroWire style transaction with the 4049 * device, sending txbuf and then reading rxbuf. The return value 4050 * is zero for success, else a negative errno status code. 4051 * This call may only be used from a context that may sleep. 4052 * 4053 * Parameters to this routine are always copied using a small buffer. 4054 * Performance-sensitive or bulk transfer code should instead use 4055 * spi_{async,sync}() calls with dma-safe buffers. 4056 * 4057 * Return: zero on success, else a negative error code. 4058 */ 4059 int spi_write_then_read(struct spi_device *spi, 4060 const void *txbuf, unsigned n_tx, 4061 void *rxbuf, unsigned n_rx) 4062 { 4063 static DEFINE_MUTEX(lock); 4064 4065 int status; 4066 struct spi_message message; 4067 struct spi_transfer x[2]; 4068 u8 *local_buf; 4069 4070 /* 4071 * Use preallocated DMA-safe buffer if we can. We can't avoid 4072 * copying here, (as a pure convenience thing), but we can 4073 * keep heap costs out of the hot path unless someone else is 4074 * using the pre-allocated buffer or the transfer is too large. 4075 */ 4076 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 4077 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 4078 GFP_KERNEL | GFP_DMA); 4079 if (!local_buf) 4080 return -ENOMEM; 4081 } else { 4082 local_buf = buf; 4083 } 4084 4085 spi_message_init(&message); 4086 memset(x, 0, sizeof(x)); 4087 if (n_tx) { 4088 x[0].len = n_tx; 4089 spi_message_add_tail(&x[0], &message); 4090 } 4091 if (n_rx) { 4092 x[1].len = n_rx; 4093 spi_message_add_tail(&x[1], &message); 4094 } 4095 4096 memcpy(local_buf, txbuf, n_tx); 4097 x[0].tx_buf = local_buf; 4098 x[1].rx_buf = local_buf + n_tx; 4099 4100 /* do the i/o */ 4101 status = spi_sync(spi, &message); 4102 if (status == 0) 4103 memcpy(rxbuf, x[1].rx_buf, n_rx); 4104 4105 if (x[0].tx_buf == buf) 4106 mutex_unlock(&lock); 4107 else 4108 kfree(local_buf); 4109 4110 return status; 4111 } 4112 EXPORT_SYMBOL_GPL(spi_write_then_read); 4113 4114 /*-------------------------------------------------------------------------*/ 4115 4116 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 4117 /* must call put_device() when done with returned spi_device device */ 4118 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4119 { 4120 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 4121 4122 return dev ? to_spi_device(dev) : NULL; 4123 } 4124 4125 /* the spi controllers are not using spi_bus, so we find it with another way */ 4126 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 4127 { 4128 struct device *dev; 4129 4130 dev = class_find_device_by_of_node(&spi_master_class, node); 4131 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4132 dev = class_find_device_by_of_node(&spi_slave_class, node); 4133 if (!dev) 4134 return NULL; 4135 4136 /* reference got in class_find_device */ 4137 return container_of(dev, struct spi_controller, dev); 4138 } 4139 4140 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 4141 void *arg) 4142 { 4143 struct of_reconfig_data *rd = arg; 4144 struct spi_controller *ctlr; 4145 struct spi_device *spi; 4146 4147 switch (of_reconfig_get_state_change(action, arg)) { 4148 case OF_RECONFIG_CHANGE_ADD: 4149 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 4150 if (ctlr == NULL) 4151 return NOTIFY_OK; /* not for us */ 4152 4153 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 4154 put_device(&ctlr->dev); 4155 return NOTIFY_OK; 4156 } 4157 4158 spi = of_register_spi_device(ctlr, rd->dn); 4159 put_device(&ctlr->dev); 4160 4161 if (IS_ERR(spi)) { 4162 pr_err("%s: failed to create for '%pOF'\n", 4163 __func__, rd->dn); 4164 of_node_clear_flag(rd->dn, OF_POPULATED); 4165 return notifier_from_errno(PTR_ERR(spi)); 4166 } 4167 break; 4168 4169 case OF_RECONFIG_CHANGE_REMOVE: 4170 /* already depopulated? */ 4171 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 4172 return NOTIFY_OK; 4173 4174 /* find our device by node */ 4175 spi = of_find_spi_device_by_node(rd->dn); 4176 if (spi == NULL) 4177 return NOTIFY_OK; /* no? not meant for us */ 4178 4179 /* unregister takes one ref away */ 4180 spi_unregister_device(spi); 4181 4182 /* and put the reference of the find */ 4183 put_device(&spi->dev); 4184 break; 4185 } 4186 4187 return NOTIFY_OK; 4188 } 4189 4190 static struct notifier_block spi_of_notifier = { 4191 .notifier_call = of_spi_notify, 4192 }; 4193 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4194 extern struct notifier_block spi_of_notifier; 4195 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4196 4197 #if IS_ENABLED(CONFIG_ACPI) 4198 static int spi_acpi_controller_match(struct device *dev, const void *data) 4199 { 4200 return ACPI_COMPANION(dev->parent) == data; 4201 } 4202 4203 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 4204 { 4205 struct device *dev; 4206 4207 dev = class_find_device(&spi_master_class, NULL, adev, 4208 spi_acpi_controller_match); 4209 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4210 dev = class_find_device(&spi_slave_class, NULL, adev, 4211 spi_acpi_controller_match); 4212 if (!dev) 4213 return NULL; 4214 4215 return container_of(dev, struct spi_controller, dev); 4216 } 4217 4218 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 4219 { 4220 struct device *dev; 4221 4222 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 4223 return to_spi_device(dev); 4224 } 4225 4226 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 4227 void *arg) 4228 { 4229 struct acpi_device *adev = arg; 4230 struct spi_controller *ctlr; 4231 struct spi_device *spi; 4232 4233 switch (value) { 4234 case ACPI_RECONFIG_DEVICE_ADD: 4235 ctlr = acpi_spi_find_controller_by_adev(adev->parent); 4236 if (!ctlr) 4237 break; 4238 4239 acpi_register_spi_device(ctlr, adev); 4240 put_device(&ctlr->dev); 4241 break; 4242 case ACPI_RECONFIG_DEVICE_REMOVE: 4243 if (!acpi_device_enumerated(adev)) 4244 break; 4245 4246 spi = acpi_spi_find_device_by_adev(adev); 4247 if (!spi) 4248 break; 4249 4250 spi_unregister_device(spi); 4251 put_device(&spi->dev); 4252 break; 4253 } 4254 4255 return NOTIFY_OK; 4256 } 4257 4258 static struct notifier_block spi_acpi_notifier = { 4259 .notifier_call = acpi_spi_notify, 4260 }; 4261 #else 4262 extern struct notifier_block spi_acpi_notifier; 4263 #endif 4264 4265 static int __init spi_init(void) 4266 { 4267 int status; 4268 4269 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 4270 if (!buf) { 4271 status = -ENOMEM; 4272 goto err0; 4273 } 4274 4275 status = bus_register(&spi_bus_type); 4276 if (status < 0) 4277 goto err1; 4278 4279 status = class_register(&spi_master_class); 4280 if (status < 0) 4281 goto err2; 4282 4283 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 4284 status = class_register(&spi_slave_class); 4285 if (status < 0) 4286 goto err3; 4287 } 4288 4289 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 4290 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 4291 if (IS_ENABLED(CONFIG_ACPI)) 4292 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 4293 4294 return 0; 4295 4296 err3: 4297 class_unregister(&spi_master_class); 4298 err2: 4299 bus_unregister(&spi_bus_type); 4300 err1: 4301 kfree(buf); 4302 buf = NULL; 4303 err0: 4304 return status; 4305 } 4306 4307 /* 4308 * A board_info is normally registered in arch_initcall(), 4309 * but even essential drivers wait till later. 4310 * 4311 * REVISIT only boardinfo really needs static linking. The rest (device and 4312 * driver registration) _could_ be dynamically linked (modular) ... Costs 4313 * include needing to have boardinfo data structures be much more public. 4314 */ 4315 postcore_initcall(spi_init); 4316