1 // SPDX-License-Identifier: GPL-2.0-or-later 2 // SPI init/core code 3 // 4 // Copyright (C) 2005 David Brownell 5 // Copyright (C) 2008 Secret Lab Technologies Ltd. 6 7 #include <linux/kernel.h> 8 #include <linux/device.h> 9 #include <linux/init.h> 10 #include <linux/cache.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmaengine.h> 13 #include <linux/mutex.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/clk/clk-conf.h> 17 #include <linux/slab.h> 18 #include <linux/mod_devicetable.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi-mem.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm_domain.h> 24 #include <linux/property.h> 25 #include <linux/export.h> 26 #include <linux/sched/rt.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/delay.h> 29 #include <linux/kthread.h> 30 #include <linux/ioport.h> 31 #include <linux/acpi.h> 32 #include <linux/highmem.h> 33 #include <linux/idr.h> 34 #include <linux/platform_data/x86/apple.h> 35 #include <linux/ptp_clock_kernel.h> 36 #include <linux/percpu.h> 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/spi.h> 40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); 41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); 42 43 #include "internals.h" 44 45 static DEFINE_IDR(spi_master_idr); 46 47 static void spidev_release(struct device *dev) 48 { 49 struct spi_device *spi = to_spi_device(dev); 50 51 spi_controller_put(spi->controller); 52 kfree(spi->driver_override); 53 free_percpu(spi->pcpu_statistics); 54 kfree(spi); 55 } 56 57 static ssize_t 58 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 59 { 60 const struct spi_device *spi = to_spi_device(dev); 61 int len; 62 63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 64 if (len != -ENODEV) 65 return len; 66 67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 68 } 69 static DEVICE_ATTR_RO(modalias); 70 71 static ssize_t driver_override_store(struct device *dev, 72 struct device_attribute *a, 73 const char *buf, size_t count) 74 { 75 struct spi_device *spi = to_spi_device(dev); 76 int ret; 77 78 ret = driver_set_override(dev, &spi->driver_override, buf, count); 79 if (ret) 80 return ret; 81 82 return count; 83 } 84 85 static ssize_t driver_override_show(struct device *dev, 86 struct device_attribute *a, char *buf) 87 { 88 const struct spi_device *spi = to_spi_device(dev); 89 ssize_t len; 90 91 device_lock(dev); 92 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : ""); 93 device_unlock(dev); 94 return len; 95 } 96 static DEVICE_ATTR_RW(driver_override); 97 98 static struct spi_statistics *spi_alloc_pcpu_stats(struct device *dev) 99 { 100 struct spi_statistics __percpu *pcpu_stats; 101 102 if (dev) 103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics); 104 else 105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL); 106 107 if (pcpu_stats) { 108 int cpu; 109 110 for_each_possible_cpu(cpu) { 111 struct spi_statistics *stat; 112 113 stat = per_cpu_ptr(pcpu_stats, cpu); 114 u64_stats_init(&stat->syncp); 115 } 116 } 117 return pcpu_stats; 118 } 119 120 #define spi_pcpu_stats_totalize(ret, in, field) \ 121 do { \ 122 int i; \ 123 ret = 0; \ 124 for_each_possible_cpu(i) { \ 125 const struct spi_statistics *pcpu_stats; \ 126 u64 inc; \ 127 unsigned int start; \ 128 pcpu_stats = per_cpu_ptr(in, i); \ 129 do { \ 130 start = u64_stats_fetch_begin_irq( \ 131 &pcpu_stats->syncp); \ 132 inc = u64_stats_read(&pcpu_stats->field); \ 133 } while (u64_stats_fetch_retry_irq( \ 134 &pcpu_stats->syncp, start)); \ 135 ret += inc; \ 136 } \ 137 } while (0) 138 139 #define SPI_STATISTICS_ATTRS(field, file) \ 140 static ssize_t spi_controller_##field##_show(struct device *dev, \ 141 struct device_attribute *attr, \ 142 char *buf) \ 143 { \ 144 struct spi_controller *ctlr = container_of(dev, \ 145 struct spi_controller, dev); \ 146 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \ 147 } \ 148 static struct device_attribute dev_attr_spi_controller_##field = { \ 149 .attr = { .name = file, .mode = 0444 }, \ 150 .show = spi_controller_##field##_show, \ 151 }; \ 152 static ssize_t spi_device_##field##_show(struct device *dev, \ 153 struct device_attribute *attr, \ 154 char *buf) \ 155 { \ 156 struct spi_device *spi = to_spi_device(dev); \ 157 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \ 158 } \ 159 static struct device_attribute dev_attr_spi_device_##field = { \ 160 .attr = { .name = file, .mode = 0444 }, \ 161 .show = spi_device_##field##_show, \ 162 } 163 164 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \ 165 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 166 char *buf) \ 167 { \ 168 ssize_t len; \ 169 u64 val; \ 170 spi_pcpu_stats_totalize(val, stat, field); \ 171 len = sysfs_emit(buf, "%llu\n", val); \ 172 return len; \ 173 } \ 174 SPI_STATISTICS_ATTRS(name, file) 175 176 #define SPI_STATISTICS_SHOW(field) \ 177 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 178 field) 179 180 SPI_STATISTICS_SHOW(messages); 181 SPI_STATISTICS_SHOW(transfers); 182 SPI_STATISTICS_SHOW(errors); 183 SPI_STATISTICS_SHOW(timedout); 184 185 SPI_STATISTICS_SHOW(spi_sync); 186 SPI_STATISTICS_SHOW(spi_sync_immediate); 187 SPI_STATISTICS_SHOW(spi_async); 188 189 SPI_STATISTICS_SHOW(bytes); 190 SPI_STATISTICS_SHOW(bytes_rx); 191 SPI_STATISTICS_SHOW(bytes_tx); 192 193 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 194 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 195 "transfer_bytes_histo_" number, \ 196 transfer_bytes_histo[index]) 197 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 214 215 SPI_STATISTICS_SHOW(transfers_split_maxsize); 216 217 static struct attribute *spi_dev_attrs[] = { 218 &dev_attr_modalias.attr, 219 &dev_attr_driver_override.attr, 220 NULL, 221 }; 222 223 static const struct attribute_group spi_dev_group = { 224 .attrs = spi_dev_attrs, 225 }; 226 227 static struct attribute *spi_device_statistics_attrs[] = { 228 &dev_attr_spi_device_messages.attr, 229 &dev_attr_spi_device_transfers.attr, 230 &dev_attr_spi_device_errors.attr, 231 &dev_attr_spi_device_timedout.attr, 232 &dev_attr_spi_device_spi_sync.attr, 233 &dev_attr_spi_device_spi_sync_immediate.attr, 234 &dev_attr_spi_device_spi_async.attr, 235 &dev_attr_spi_device_bytes.attr, 236 &dev_attr_spi_device_bytes_rx.attr, 237 &dev_attr_spi_device_bytes_tx.attr, 238 &dev_attr_spi_device_transfer_bytes_histo0.attr, 239 &dev_attr_spi_device_transfer_bytes_histo1.attr, 240 &dev_attr_spi_device_transfer_bytes_histo2.attr, 241 &dev_attr_spi_device_transfer_bytes_histo3.attr, 242 &dev_attr_spi_device_transfer_bytes_histo4.attr, 243 &dev_attr_spi_device_transfer_bytes_histo5.attr, 244 &dev_attr_spi_device_transfer_bytes_histo6.attr, 245 &dev_attr_spi_device_transfer_bytes_histo7.attr, 246 &dev_attr_spi_device_transfer_bytes_histo8.attr, 247 &dev_attr_spi_device_transfer_bytes_histo9.attr, 248 &dev_attr_spi_device_transfer_bytes_histo10.attr, 249 &dev_attr_spi_device_transfer_bytes_histo11.attr, 250 &dev_attr_spi_device_transfer_bytes_histo12.attr, 251 &dev_attr_spi_device_transfer_bytes_histo13.attr, 252 &dev_attr_spi_device_transfer_bytes_histo14.attr, 253 &dev_attr_spi_device_transfer_bytes_histo15.attr, 254 &dev_attr_spi_device_transfer_bytes_histo16.attr, 255 &dev_attr_spi_device_transfers_split_maxsize.attr, 256 NULL, 257 }; 258 259 static const struct attribute_group spi_device_statistics_group = { 260 .name = "statistics", 261 .attrs = spi_device_statistics_attrs, 262 }; 263 264 static const struct attribute_group *spi_dev_groups[] = { 265 &spi_dev_group, 266 &spi_device_statistics_group, 267 NULL, 268 }; 269 270 static struct attribute *spi_controller_statistics_attrs[] = { 271 &dev_attr_spi_controller_messages.attr, 272 &dev_attr_spi_controller_transfers.attr, 273 &dev_attr_spi_controller_errors.attr, 274 &dev_attr_spi_controller_timedout.attr, 275 &dev_attr_spi_controller_spi_sync.attr, 276 &dev_attr_spi_controller_spi_sync_immediate.attr, 277 &dev_attr_spi_controller_spi_async.attr, 278 &dev_attr_spi_controller_bytes.attr, 279 &dev_attr_spi_controller_bytes_rx.attr, 280 &dev_attr_spi_controller_bytes_tx.attr, 281 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 282 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 283 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 284 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 285 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 286 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 287 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 288 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 289 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 290 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 291 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 292 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 293 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 294 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 295 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 296 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 297 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 298 &dev_attr_spi_controller_transfers_split_maxsize.attr, 299 NULL, 300 }; 301 302 static const struct attribute_group spi_controller_statistics_group = { 303 .name = "statistics", 304 .attrs = spi_controller_statistics_attrs, 305 }; 306 307 static const struct attribute_group *spi_master_groups[] = { 308 &spi_controller_statistics_group, 309 NULL, 310 }; 311 312 static void spi_statistics_add_transfer_stats(struct spi_statistics *pcpu_stats, 313 struct spi_transfer *xfer, 314 struct spi_controller *ctlr) 315 { 316 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 317 struct spi_statistics *stats; 318 319 if (l2len < 0) 320 l2len = 0; 321 322 get_cpu(); 323 stats = this_cpu_ptr(pcpu_stats); 324 u64_stats_update_begin(&stats->syncp); 325 326 u64_stats_inc(&stats->transfers); 327 u64_stats_inc(&stats->transfer_bytes_histo[l2len]); 328 329 u64_stats_add(&stats->bytes, xfer->len); 330 if ((xfer->tx_buf) && 331 (xfer->tx_buf != ctlr->dummy_tx)) 332 u64_stats_add(&stats->bytes_tx, xfer->len); 333 if ((xfer->rx_buf) && 334 (xfer->rx_buf != ctlr->dummy_rx)) 335 u64_stats_add(&stats->bytes_rx, xfer->len); 336 337 u64_stats_update_end(&stats->syncp); 338 put_cpu(); 339 } 340 341 /* 342 * modalias support makes "modprobe $MODALIAS" new-style hotplug work, 343 * and the sysfs version makes coldplug work too. 344 */ 345 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name) 346 { 347 while (id->name[0]) { 348 if (!strcmp(name, id->name)) 349 return id; 350 id++; 351 } 352 return NULL; 353 } 354 355 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 356 { 357 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 358 359 return spi_match_id(sdrv->id_table, sdev->modalias); 360 } 361 EXPORT_SYMBOL_GPL(spi_get_device_id); 362 363 static int spi_match_device(struct device *dev, struct device_driver *drv) 364 { 365 const struct spi_device *spi = to_spi_device(dev); 366 const struct spi_driver *sdrv = to_spi_driver(drv); 367 368 /* Check override first, and if set, only use the named driver */ 369 if (spi->driver_override) 370 return strcmp(spi->driver_override, drv->name) == 0; 371 372 /* Attempt an OF style match */ 373 if (of_driver_match_device(dev, drv)) 374 return 1; 375 376 /* Then try ACPI */ 377 if (acpi_driver_match_device(dev, drv)) 378 return 1; 379 380 if (sdrv->id_table) 381 return !!spi_match_id(sdrv->id_table, spi->modalias); 382 383 return strcmp(spi->modalias, drv->name) == 0; 384 } 385 386 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 387 { 388 const struct spi_device *spi = to_spi_device(dev); 389 int rc; 390 391 rc = acpi_device_uevent_modalias(dev, env); 392 if (rc != -ENODEV) 393 return rc; 394 395 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 396 } 397 398 static int spi_probe(struct device *dev) 399 { 400 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 401 struct spi_device *spi = to_spi_device(dev); 402 int ret; 403 404 ret = of_clk_set_defaults(dev->of_node, false); 405 if (ret) 406 return ret; 407 408 if (dev->of_node) { 409 spi->irq = of_irq_get(dev->of_node, 0); 410 if (spi->irq == -EPROBE_DEFER) 411 return -EPROBE_DEFER; 412 if (spi->irq < 0) 413 spi->irq = 0; 414 } 415 416 ret = dev_pm_domain_attach(dev, true); 417 if (ret) 418 return ret; 419 420 if (sdrv->probe) { 421 ret = sdrv->probe(spi); 422 if (ret) 423 dev_pm_domain_detach(dev, true); 424 } 425 426 return ret; 427 } 428 429 static void spi_remove(struct device *dev) 430 { 431 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 432 433 if (sdrv->remove) 434 sdrv->remove(to_spi_device(dev)); 435 436 dev_pm_domain_detach(dev, true); 437 } 438 439 static void spi_shutdown(struct device *dev) 440 { 441 if (dev->driver) { 442 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 443 444 if (sdrv->shutdown) 445 sdrv->shutdown(to_spi_device(dev)); 446 } 447 } 448 449 struct bus_type spi_bus_type = { 450 .name = "spi", 451 .dev_groups = spi_dev_groups, 452 .match = spi_match_device, 453 .uevent = spi_uevent, 454 .probe = spi_probe, 455 .remove = spi_remove, 456 .shutdown = spi_shutdown, 457 }; 458 EXPORT_SYMBOL_GPL(spi_bus_type); 459 460 /** 461 * __spi_register_driver - register a SPI driver 462 * @owner: owner module of the driver to register 463 * @sdrv: the driver to register 464 * Context: can sleep 465 * 466 * Return: zero on success, else a negative error code. 467 */ 468 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 469 { 470 sdrv->driver.owner = owner; 471 sdrv->driver.bus = &spi_bus_type; 472 473 /* 474 * For Really Good Reasons we use spi: modaliases not of: 475 * modaliases for DT so module autoloading won't work if we 476 * don't have a spi_device_id as well as a compatible string. 477 */ 478 if (sdrv->driver.of_match_table) { 479 const struct of_device_id *of_id; 480 481 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0]; 482 of_id++) { 483 const char *of_name; 484 485 /* Strip off any vendor prefix */ 486 of_name = strnchr(of_id->compatible, 487 sizeof(of_id->compatible), ','); 488 if (of_name) 489 of_name++; 490 else 491 of_name = of_id->compatible; 492 493 if (sdrv->id_table) { 494 const struct spi_device_id *spi_id; 495 496 spi_id = spi_match_id(sdrv->id_table, of_name); 497 if (spi_id) 498 continue; 499 } else { 500 if (strcmp(sdrv->driver.name, of_name) == 0) 501 continue; 502 } 503 504 pr_warn("SPI driver %s has no spi_device_id for %s\n", 505 sdrv->driver.name, of_id->compatible); 506 } 507 } 508 509 return driver_register(&sdrv->driver); 510 } 511 EXPORT_SYMBOL_GPL(__spi_register_driver); 512 513 /*-------------------------------------------------------------------------*/ 514 515 /* 516 * SPI devices should normally not be created by SPI device drivers; that 517 * would make them board-specific. Similarly with SPI controller drivers. 518 * Device registration normally goes into like arch/.../mach.../board-YYY.c 519 * with other readonly (flashable) information about mainboard devices. 520 */ 521 522 struct boardinfo { 523 struct list_head list; 524 struct spi_board_info board_info; 525 }; 526 527 static LIST_HEAD(board_list); 528 static LIST_HEAD(spi_controller_list); 529 530 /* 531 * Used to protect add/del operation for board_info list and 532 * spi_controller list, and their matching process also used 533 * to protect object of type struct idr. 534 */ 535 static DEFINE_MUTEX(board_lock); 536 537 /** 538 * spi_alloc_device - Allocate a new SPI device 539 * @ctlr: Controller to which device is connected 540 * Context: can sleep 541 * 542 * Allows a driver to allocate and initialize a spi_device without 543 * registering it immediately. This allows a driver to directly 544 * fill the spi_device with device parameters before calling 545 * spi_add_device() on it. 546 * 547 * Caller is responsible to call spi_add_device() on the returned 548 * spi_device structure to add it to the SPI controller. If the caller 549 * needs to discard the spi_device without adding it, then it should 550 * call spi_dev_put() on it. 551 * 552 * Return: a pointer to the new device, or NULL. 553 */ 554 struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 555 { 556 struct spi_device *spi; 557 558 if (!spi_controller_get(ctlr)) 559 return NULL; 560 561 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 562 if (!spi) { 563 spi_controller_put(ctlr); 564 return NULL; 565 } 566 567 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL); 568 if (!spi->pcpu_statistics) { 569 kfree(spi); 570 spi_controller_put(ctlr); 571 return NULL; 572 } 573 574 spi->master = spi->controller = ctlr; 575 spi->dev.parent = &ctlr->dev; 576 spi->dev.bus = &spi_bus_type; 577 spi->dev.release = spidev_release; 578 spi->mode = ctlr->buswidth_override_bits; 579 580 device_initialize(&spi->dev); 581 return spi; 582 } 583 EXPORT_SYMBOL_GPL(spi_alloc_device); 584 585 static void spi_dev_set_name(struct spi_device *spi) 586 { 587 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 588 589 if (adev) { 590 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 591 return; 592 } 593 594 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 595 spi->chip_select); 596 } 597 598 static int spi_dev_check(struct device *dev, void *data) 599 { 600 struct spi_device *spi = to_spi_device(dev); 601 struct spi_device *new_spi = data; 602 603 if (spi->controller == new_spi->controller && 604 spi->chip_select == new_spi->chip_select) 605 return -EBUSY; 606 return 0; 607 } 608 609 static void spi_cleanup(struct spi_device *spi) 610 { 611 if (spi->controller->cleanup) 612 spi->controller->cleanup(spi); 613 } 614 615 static int __spi_add_device(struct spi_device *spi) 616 { 617 struct spi_controller *ctlr = spi->controller; 618 struct device *dev = ctlr->dev.parent; 619 int status; 620 621 /* 622 * We need to make sure there's no other device with this 623 * chipselect **BEFORE** we call setup(), else we'll trash 624 * its configuration. 625 */ 626 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 627 if (status) { 628 dev_err(dev, "chipselect %d already in use\n", 629 spi->chip_select); 630 return status; 631 } 632 633 /* Controller may unregister concurrently */ 634 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && 635 !device_is_registered(&ctlr->dev)) { 636 return -ENODEV; 637 } 638 639 if (ctlr->cs_gpiods) 640 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; 641 642 /* 643 * Drivers may modify this initial i/o setup, but will 644 * normally rely on the device being setup. Devices 645 * using SPI_CS_HIGH can't coexist well otherwise... 646 */ 647 status = spi_setup(spi); 648 if (status < 0) { 649 dev_err(dev, "can't setup %s, status %d\n", 650 dev_name(&spi->dev), status); 651 return status; 652 } 653 654 /* Device may be bound to an active driver when this returns */ 655 status = device_add(&spi->dev); 656 if (status < 0) { 657 dev_err(dev, "can't add %s, status %d\n", 658 dev_name(&spi->dev), status); 659 spi_cleanup(spi); 660 } else { 661 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 662 } 663 664 return status; 665 } 666 667 /** 668 * spi_add_device - Add spi_device allocated with spi_alloc_device 669 * @spi: spi_device to register 670 * 671 * Companion function to spi_alloc_device. Devices allocated with 672 * spi_alloc_device can be added onto the spi bus with this function. 673 * 674 * Return: 0 on success; negative errno on failure 675 */ 676 int spi_add_device(struct spi_device *spi) 677 { 678 struct spi_controller *ctlr = spi->controller; 679 struct device *dev = ctlr->dev.parent; 680 int status; 681 682 /* Chipselects are numbered 0..max; validate. */ 683 if (spi->chip_select >= ctlr->num_chipselect) { 684 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 685 ctlr->num_chipselect); 686 return -EINVAL; 687 } 688 689 /* Set the bus ID string */ 690 spi_dev_set_name(spi); 691 692 mutex_lock(&ctlr->add_lock); 693 status = __spi_add_device(spi); 694 mutex_unlock(&ctlr->add_lock); 695 return status; 696 } 697 EXPORT_SYMBOL_GPL(spi_add_device); 698 699 static int spi_add_device_locked(struct spi_device *spi) 700 { 701 struct spi_controller *ctlr = spi->controller; 702 struct device *dev = ctlr->dev.parent; 703 704 /* Chipselects are numbered 0..max; validate. */ 705 if (spi->chip_select >= ctlr->num_chipselect) { 706 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 707 ctlr->num_chipselect); 708 return -EINVAL; 709 } 710 711 /* Set the bus ID string */ 712 spi_dev_set_name(spi); 713 714 WARN_ON(!mutex_is_locked(&ctlr->add_lock)); 715 return __spi_add_device(spi); 716 } 717 718 /** 719 * spi_new_device - instantiate one new SPI device 720 * @ctlr: Controller to which device is connected 721 * @chip: Describes the SPI device 722 * Context: can sleep 723 * 724 * On typical mainboards, this is purely internal; and it's not needed 725 * after board init creates the hard-wired devices. Some development 726 * platforms may not be able to use spi_register_board_info though, and 727 * this is exported so that for example a USB or parport based adapter 728 * driver could add devices (which it would learn about out-of-band). 729 * 730 * Return: the new device, or NULL. 731 */ 732 struct spi_device *spi_new_device(struct spi_controller *ctlr, 733 struct spi_board_info *chip) 734 { 735 struct spi_device *proxy; 736 int status; 737 738 /* 739 * NOTE: caller did any chip->bus_num checks necessary. 740 * 741 * Also, unless we change the return value convention to use 742 * error-or-pointer (not NULL-or-pointer), troubleshootability 743 * suggests syslogged diagnostics are best here (ugh). 744 */ 745 746 proxy = spi_alloc_device(ctlr); 747 if (!proxy) 748 return NULL; 749 750 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 751 752 proxy->chip_select = chip->chip_select; 753 proxy->max_speed_hz = chip->max_speed_hz; 754 proxy->mode = chip->mode; 755 proxy->irq = chip->irq; 756 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 757 proxy->dev.platform_data = (void *) chip->platform_data; 758 proxy->controller_data = chip->controller_data; 759 proxy->controller_state = NULL; 760 761 if (chip->swnode) { 762 status = device_add_software_node(&proxy->dev, chip->swnode); 763 if (status) { 764 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n", 765 chip->modalias, status); 766 goto err_dev_put; 767 } 768 } 769 770 status = spi_add_device(proxy); 771 if (status < 0) 772 goto err_dev_put; 773 774 return proxy; 775 776 err_dev_put: 777 device_remove_software_node(&proxy->dev); 778 spi_dev_put(proxy); 779 return NULL; 780 } 781 EXPORT_SYMBOL_GPL(spi_new_device); 782 783 /** 784 * spi_unregister_device - unregister a single SPI device 785 * @spi: spi_device to unregister 786 * 787 * Start making the passed SPI device vanish. Normally this would be handled 788 * by spi_unregister_controller(). 789 */ 790 void spi_unregister_device(struct spi_device *spi) 791 { 792 if (!spi) 793 return; 794 795 if (spi->dev.of_node) { 796 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 797 of_node_put(spi->dev.of_node); 798 } 799 if (ACPI_COMPANION(&spi->dev)) 800 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 801 device_remove_software_node(&spi->dev); 802 device_del(&spi->dev); 803 spi_cleanup(spi); 804 put_device(&spi->dev); 805 } 806 EXPORT_SYMBOL_GPL(spi_unregister_device); 807 808 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 809 struct spi_board_info *bi) 810 { 811 struct spi_device *dev; 812 813 if (ctlr->bus_num != bi->bus_num) 814 return; 815 816 dev = spi_new_device(ctlr, bi); 817 if (!dev) 818 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 819 bi->modalias); 820 } 821 822 /** 823 * spi_register_board_info - register SPI devices for a given board 824 * @info: array of chip descriptors 825 * @n: how many descriptors are provided 826 * Context: can sleep 827 * 828 * Board-specific early init code calls this (probably during arch_initcall) 829 * with segments of the SPI device table. Any device nodes are created later, 830 * after the relevant parent SPI controller (bus_num) is defined. We keep 831 * this table of devices forever, so that reloading a controller driver will 832 * not make Linux forget about these hard-wired devices. 833 * 834 * Other code can also call this, e.g. a particular add-on board might provide 835 * SPI devices through its expansion connector, so code initializing that board 836 * would naturally declare its SPI devices. 837 * 838 * The board info passed can safely be __initdata ... but be careful of 839 * any embedded pointers (platform_data, etc), they're copied as-is. 840 * 841 * Return: zero on success, else a negative error code. 842 */ 843 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 844 { 845 struct boardinfo *bi; 846 int i; 847 848 if (!n) 849 return 0; 850 851 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 852 if (!bi) 853 return -ENOMEM; 854 855 for (i = 0; i < n; i++, bi++, info++) { 856 struct spi_controller *ctlr; 857 858 memcpy(&bi->board_info, info, sizeof(*info)); 859 860 mutex_lock(&board_lock); 861 list_add_tail(&bi->list, &board_list); 862 list_for_each_entry(ctlr, &spi_controller_list, list) 863 spi_match_controller_to_boardinfo(ctlr, 864 &bi->board_info); 865 mutex_unlock(&board_lock); 866 } 867 868 return 0; 869 } 870 871 /*-------------------------------------------------------------------------*/ 872 873 /* Core methods for SPI resource management */ 874 875 /** 876 * spi_res_alloc - allocate a spi resource that is life-cycle managed 877 * during the processing of a spi_message while using 878 * spi_transfer_one 879 * @spi: the spi device for which we allocate memory 880 * @release: the release code to execute for this resource 881 * @size: size to alloc and return 882 * @gfp: GFP allocation flags 883 * 884 * Return: the pointer to the allocated data 885 * 886 * This may get enhanced in the future to allocate from a memory pool 887 * of the @spi_device or @spi_controller to avoid repeated allocations. 888 */ 889 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, 890 size_t size, gfp_t gfp) 891 { 892 struct spi_res *sres; 893 894 sres = kzalloc(sizeof(*sres) + size, gfp); 895 if (!sres) 896 return NULL; 897 898 INIT_LIST_HEAD(&sres->entry); 899 sres->release = release; 900 901 return sres->data; 902 } 903 904 /** 905 * spi_res_free - free an spi resource 906 * @res: pointer to the custom data of a resource 907 */ 908 static void spi_res_free(void *res) 909 { 910 struct spi_res *sres = container_of(res, struct spi_res, data); 911 912 if (!res) 913 return; 914 915 WARN_ON(!list_empty(&sres->entry)); 916 kfree(sres); 917 } 918 919 /** 920 * spi_res_add - add a spi_res to the spi_message 921 * @message: the spi message 922 * @res: the spi_resource 923 */ 924 static void spi_res_add(struct spi_message *message, void *res) 925 { 926 struct spi_res *sres = container_of(res, struct spi_res, data); 927 928 WARN_ON(!list_empty(&sres->entry)); 929 list_add_tail(&sres->entry, &message->resources); 930 } 931 932 /** 933 * spi_res_release - release all spi resources for this message 934 * @ctlr: the @spi_controller 935 * @message: the @spi_message 936 */ 937 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 938 { 939 struct spi_res *res, *tmp; 940 941 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 942 if (res->release) 943 res->release(ctlr, message, res->data); 944 945 list_del(&res->entry); 946 947 kfree(res); 948 } 949 } 950 951 /*-------------------------------------------------------------------------*/ 952 953 static void spi_set_cs(struct spi_device *spi, bool enable, bool force) 954 { 955 bool activate = enable; 956 957 /* 958 * Avoid calling into the driver (or doing delays) if the chip select 959 * isn't actually changing from the last time this was called. 960 */ 961 if (!force && ((enable && spi->controller->last_cs == spi->chip_select) || 962 (!enable && spi->controller->last_cs != spi->chip_select)) && 963 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) 964 return; 965 966 trace_spi_set_cs(spi, activate); 967 968 spi->controller->last_cs = enable ? spi->chip_select : -1; 969 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; 970 971 if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) { 972 spi_delay_exec(&spi->cs_hold, NULL); 973 } 974 975 if (spi->mode & SPI_CS_HIGH) 976 enable = !enable; 977 978 if (spi->cs_gpiod) { 979 if (!(spi->mode & SPI_NO_CS)) { 980 /* 981 * Historically ACPI has no means of the GPIO polarity and 982 * thus the SPISerialBus() resource defines it on the per-chip 983 * basis. In order to avoid a chain of negations, the GPIO 984 * polarity is considered being Active High. Even for the cases 985 * when _DSD() is involved (in the updated versions of ACPI) 986 * the GPIO CS polarity must be defined Active High to avoid 987 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH 988 * into account. 989 */ 990 if (has_acpi_companion(&spi->dev)) 991 gpiod_set_value_cansleep(spi->cs_gpiod, !enable); 992 else 993 /* Polarity handled by GPIO library */ 994 gpiod_set_value_cansleep(spi->cs_gpiod, activate); 995 } 996 /* Some SPI masters need both GPIO CS & slave_select */ 997 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 998 spi->controller->set_cs) 999 spi->controller->set_cs(spi, !enable); 1000 } else if (spi->controller->set_cs) { 1001 spi->controller->set_cs(spi, !enable); 1002 } 1003 1004 if (spi->cs_gpiod || !spi->controller->set_cs_timing) { 1005 if (activate) 1006 spi_delay_exec(&spi->cs_setup, NULL); 1007 else 1008 spi_delay_exec(&spi->cs_inactive, NULL); 1009 } 1010 } 1011 1012 #ifdef CONFIG_HAS_DMA 1013 int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 1014 struct sg_table *sgt, void *buf, size_t len, 1015 enum dma_data_direction dir) 1016 { 1017 const bool vmalloced_buf = is_vmalloc_addr(buf); 1018 unsigned int max_seg_size = dma_get_max_seg_size(dev); 1019 #ifdef CONFIG_HIGHMEM 1020 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 1021 (unsigned long)buf < (PKMAP_BASE + 1022 (LAST_PKMAP * PAGE_SIZE))); 1023 #else 1024 const bool kmap_buf = false; 1025 #endif 1026 int desc_len; 1027 int sgs; 1028 struct page *vm_page; 1029 struct scatterlist *sg; 1030 void *sg_buf; 1031 size_t min; 1032 int i, ret; 1033 1034 if (vmalloced_buf || kmap_buf) { 1035 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); 1036 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 1037 } else if (virt_addr_valid(buf)) { 1038 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); 1039 sgs = DIV_ROUND_UP(len, desc_len); 1040 } else { 1041 return -EINVAL; 1042 } 1043 1044 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 1045 if (ret != 0) 1046 return ret; 1047 1048 sg = &sgt->sgl[0]; 1049 for (i = 0; i < sgs; i++) { 1050 1051 if (vmalloced_buf || kmap_buf) { 1052 /* 1053 * Next scatterlist entry size is the minimum between 1054 * the desc_len and the remaining buffer length that 1055 * fits in a page. 1056 */ 1057 min = min_t(size_t, desc_len, 1058 min_t(size_t, len, 1059 PAGE_SIZE - offset_in_page(buf))); 1060 if (vmalloced_buf) 1061 vm_page = vmalloc_to_page(buf); 1062 else 1063 vm_page = kmap_to_page(buf); 1064 if (!vm_page) { 1065 sg_free_table(sgt); 1066 return -ENOMEM; 1067 } 1068 sg_set_page(sg, vm_page, 1069 min, offset_in_page(buf)); 1070 } else { 1071 min = min_t(size_t, len, desc_len); 1072 sg_buf = buf; 1073 sg_set_buf(sg, sg_buf, min); 1074 } 1075 1076 buf += min; 1077 len -= min; 1078 sg = sg_next(sg); 1079 } 1080 1081 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 1082 if (!ret) 1083 ret = -ENOMEM; 1084 if (ret < 0) { 1085 sg_free_table(sgt); 1086 return ret; 1087 } 1088 1089 sgt->nents = ret; 1090 1091 return 0; 1092 } 1093 1094 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 1095 struct sg_table *sgt, enum dma_data_direction dir) 1096 { 1097 if (sgt->orig_nents) { 1098 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 1099 sg_free_table(sgt); 1100 } 1101 } 1102 1103 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1104 { 1105 struct device *tx_dev, *rx_dev; 1106 struct spi_transfer *xfer; 1107 int ret; 1108 1109 if (!ctlr->can_dma) 1110 return 0; 1111 1112 if (ctlr->dma_tx) 1113 tx_dev = ctlr->dma_tx->device->dev; 1114 else if (ctlr->dma_map_dev) 1115 tx_dev = ctlr->dma_map_dev; 1116 else 1117 tx_dev = ctlr->dev.parent; 1118 1119 if (ctlr->dma_rx) 1120 rx_dev = ctlr->dma_rx->device->dev; 1121 else if (ctlr->dma_map_dev) 1122 rx_dev = ctlr->dma_map_dev; 1123 else 1124 rx_dev = ctlr->dev.parent; 1125 1126 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1127 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1128 continue; 1129 1130 if (xfer->tx_buf != NULL) { 1131 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, 1132 (void *)xfer->tx_buf, xfer->len, 1133 DMA_TO_DEVICE); 1134 if (ret != 0) 1135 return ret; 1136 } 1137 1138 if (xfer->rx_buf != NULL) { 1139 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, 1140 xfer->rx_buf, xfer->len, 1141 DMA_FROM_DEVICE); 1142 if (ret != 0) { 1143 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, 1144 DMA_TO_DEVICE); 1145 return ret; 1146 } 1147 } 1148 } 1149 1150 ctlr->cur_msg_mapped = true; 1151 1152 return 0; 1153 } 1154 1155 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 1156 { 1157 struct spi_transfer *xfer; 1158 struct device *tx_dev, *rx_dev; 1159 1160 if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 1161 return 0; 1162 1163 if (ctlr->dma_tx) 1164 tx_dev = ctlr->dma_tx->device->dev; 1165 else if (ctlr->dma_map_dev) 1166 tx_dev = ctlr->dma_map_dev; 1167 else 1168 tx_dev = ctlr->dev.parent; 1169 1170 if (ctlr->dma_rx) 1171 rx_dev = ctlr->dma_rx->device->dev; 1172 else if (ctlr->dma_map_dev) 1173 rx_dev = ctlr->dma_map_dev; 1174 else 1175 rx_dev = ctlr->dev.parent; 1176 1177 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1178 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1179 continue; 1180 1181 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1182 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1183 } 1184 1185 ctlr->cur_msg_mapped = false; 1186 1187 return 0; 1188 } 1189 #else /* !CONFIG_HAS_DMA */ 1190 static inline int __spi_map_msg(struct spi_controller *ctlr, 1191 struct spi_message *msg) 1192 { 1193 return 0; 1194 } 1195 1196 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 1197 struct spi_message *msg) 1198 { 1199 return 0; 1200 } 1201 #endif /* !CONFIG_HAS_DMA */ 1202 1203 static inline int spi_unmap_msg(struct spi_controller *ctlr, 1204 struct spi_message *msg) 1205 { 1206 struct spi_transfer *xfer; 1207 1208 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1209 /* 1210 * Restore the original value of tx_buf or rx_buf if they are 1211 * NULL. 1212 */ 1213 if (xfer->tx_buf == ctlr->dummy_tx) 1214 xfer->tx_buf = NULL; 1215 if (xfer->rx_buf == ctlr->dummy_rx) 1216 xfer->rx_buf = NULL; 1217 } 1218 1219 return __spi_unmap_msg(ctlr, msg); 1220 } 1221 1222 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1223 { 1224 struct spi_transfer *xfer; 1225 void *tmp; 1226 unsigned int max_tx, max_rx; 1227 1228 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) 1229 && !(msg->spi->mode & SPI_3WIRE)) { 1230 max_tx = 0; 1231 max_rx = 0; 1232 1233 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1234 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 1235 !xfer->tx_buf) 1236 max_tx = max(xfer->len, max_tx); 1237 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 1238 !xfer->rx_buf) 1239 max_rx = max(xfer->len, max_rx); 1240 } 1241 1242 if (max_tx) { 1243 tmp = krealloc(ctlr->dummy_tx, max_tx, 1244 GFP_KERNEL | GFP_DMA | __GFP_ZERO); 1245 if (!tmp) 1246 return -ENOMEM; 1247 ctlr->dummy_tx = tmp; 1248 } 1249 1250 if (max_rx) { 1251 tmp = krealloc(ctlr->dummy_rx, max_rx, 1252 GFP_KERNEL | GFP_DMA); 1253 if (!tmp) 1254 return -ENOMEM; 1255 ctlr->dummy_rx = tmp; 1256 } 1257 1258 if (max_tx || max_rx) { 1259 list_for_each_entry(xfer, &msg->transfers, 1260 transfer_list) { 1261 if (!xfer->len) 1262 continue; 1263 if (!xfer->tx_buf) 1264 xfer->tx_buf = ctlr->dummy_tx; 1265 if (!xfer->rx_buf) 1266 xfer->rx_buf = ctlr->dummy_rx; 1267 } 1268 } 1269 } 1270 1271 return __spi_map_msg(ctlr, msg); 1272 } 1273 1274 static int spi_transfer_wait(struct spi_controller *ctlr, 1275 struct spi_message *msg, 1276 struct spi_transfer *xfer) 1277 { 1278 struct spi_statistics *statm = ctlr->pcpu_statistics; 1279 struct spi_statistics *stats = msg->spi->pcpu_statistics; 1280 u32 speed_hz = xfer->speed_hz; 1281 unsigned long long ms; 1282 1283 if (spi_controller_is_slave(ctlr)) { 1284 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1285 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1286 return -EINTR; 1287 } 1288 } else { 1289 if (!speed_hz) 1290 speed_hz = 100000; 1291 1292 /* 1293 * For each byte we wait for 8 cycles of the SPI clock. 1294 * Since speed is defined in Hz and we want milliseconds, 1295 * use respective multiplier, but before the division, 1296 * otherwise we may get 0 for short transfers. 1297 */ 1298 ms = 8LL * MSEC_PER_SEC * xfer->len; 1299 do_div(ms, speed_hz); 1300 1301 /* 1302 * Increase it twice and add 200 ms tolerance, use 1303 * predefined maximum in case of overflow. 1304 */ 1305 ms += ms + 200; 1306 if (ms > UINT_MAX) 1307 ms = UINT_MAX; 1308 1309 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1310 msecs_to_jiffies(ms)); 1311 1312 if (ms == 0) { 1313 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1314 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1315 dev_err(&msg->spi->dev, 1316 "SPI transfer timed out\n"); 1317 return -ETIMEDOUT; 1318 } 1319 } 1320 1321 return 0; 1322 } 1323 1324 static void _spi_transfer_delay_ns(u32 ns) 1325 { 1326 if (!ns) 1327 return; 1328 if (ns <= NSEC_PER_USEC) { 1329 ndelay(ns); 1330 } else { 1331 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 1332 1333 if (us <= 10) 1334 udelay(us); 1335 else 1336 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1337 } 1338 } 1339 1340 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) 1341 { 1342 u32 delay = _delay->value; 1343 u32 unit = _delay->unit; 1344 u32 hz; 1345 1346 if (!delay) 1347 return 0; 1348 1349 switch (unit) { 1350 case SPI_DELAY_UNIT_USECS: 1351 delay *= NSEC_PER_USEC; 1352 break; 1353 case SPI_DELAY_UNIT_NSECS: 1354 /* Nothing to do here */ 1355 break; 1356 case SPI_DELAY_UNIT_SCK: 1357 /* Clock cycles need to be obtained from spi_transfer */ 1358 if (!xfer) 1359 return -EINVAL; 1360 /* 1361 * If there is unknown effective speed, approximate it 1362 * by underestimating with half of the requested hz. 1363 */ 1364 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; 1365 if (!hz) 1366 return -EINVAL; 1367 1368 /* Convert delay to nanoseconds */ 1369 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); 1370 break; 1371 default: 1372 return -EINVAL; 1373 } 1374 1375 return delay; 1376 } 1377 EXPORT_SYMBOL_GPL(spi_delay_to_ns); 1378 1379 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) 1380 { 1381 int delay; 1382 1383 might_sleep(); 1384 1385 if (!_delay) 1386 return -EINVAL; 1387 1388 delay = spi_delay_to_ns(_delay, xfer); 1389 if (delay < 0) 1390 return delay; 1391 1392 _spi_transfer_delay_ns(delay); 1393 1394 return 0; 1395 } 1396 EXPORT_SYMBOL_GPL(spi_delay_exec); 1397 1398 static void _spi_transfer_cs_change_delay(struct spi_message *msg, 1399 struct spi_transfer *xfer) 1400 { 1401 u32 default_delay_ns = 10 * NSEC_PER_USEC; 1402 u32 delay = xfer->cs_change_delay.value; 1403 u32 unit = xfer->cs_change_delay.unit; 1404 int ret; 1405 1406 /* Return early on "fast" mode - for everything but USECS */ 1407 if (!delay) { 1408 if (unit == SPI_DELAY_UNIT_USECS) 1409 _spi_transfer_delay_ns(default_delay_ns); 1410 return; 1411 } 1412 1413 ret = spi_delay_exec(&xfer->cs_change_delay, xfer); 1414 if (ret) { 1415 dev_err_once(&msg->spi->dev, 1416 "Use of unsupported delay unit %i, using default of %luus\n", 1417 unit, default_delay_ns / NSEC_PER_USEC); 1418 _spi_transfer_delay_ns(default_delay_ns); 1419 } 1420 } 1421 1422 /* 1423 * spi_transfer_one_message - Default implementation of transfer_one_message() 1424 * 1425 * This is a standard implementation of transfer_one_message() for 1426 * drivers which implement a transfer_one() operation. It provides 1427 * standard handling of delays and chip select management. 1428 */ 1429 static int spi_transfer_one_message(struct spi_controller *ctlr, 1430 struct spi_message *msg) 1431 { 1432 struct spi_transfer *xfer; 1433 bool keep_cs = false; 1434 int ret = 0; 1435 struct spi_statistics *statm = ctlr->pcpu_statistics; 1436 struct spi_statistics *stats = msg->spi->pcpu_statistics; 1437 1438 spi_set_cs(msg->spi, true, false); 1439 1440 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1441 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1442 1443 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1444 trace_spi_transfer_start(msg, xfer); 1445 1446 spi_statistics_add_transfer_stats(statm, xfer, ctlr); 1447 spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1448 1449 if (!ctlr->ptp_sts_supported) { 1450 xfer->ptp_sts_word_pre = 0; 1451 ptp_read_system_prets(xfer->ptp_sts); 1452 } 1453 1454 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1455 reinit_completion(&ctlr->xfer_completion); 1456 1457 fallback_pio: 1458 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1459 if (ret < 0) { 1460 if (ctlr->cur_msg_mapped && 1461 (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1462 __spi_unmap_msg(ctlr, msg); 1463 ctlr->fallback = true; 1464 xfer->error &= ~SPI_TRANS_FAIL_NO_START; 1465 goto fallback_pio; 1466 } 1467 1468 SPI_STATISTICS_INCREMENT_FIELD(statm, 1469 errors); 1470 SPI_STATISTICS_INCREMENT_FIELD(stats, 1471 errors); 1472 dev_err(&msg->spi->dev, 1473 "SPI transfer failed: %d\n", ret); 1474 goto out; 1475 } 1476 1477 if (ret > 0) { 1478 ret = spi_transfer_wait(ctlr, msg, xfer); 1479 if (ret < 0) 1480 msg->status = ret; 1481 } 1482 } else { 1483 if (xfer->len) 1484 dev_err(&msg->spi->dev, 1485 "Bufferless transfer has length %u\n", 1486 xfer->len); 1487 } 1488 1489 if (!ctlr->ptp_sts_supported) { 1490 ptp_read_system_postts(xfer->ptp_sts); 1491 xfer->ptp_sts_word_post = xfer->len; 1492 } 1493 1494 trace_spi_transfer_stop(msg, xfer); 1495 1496 if (msg->status != -EINPROGRESS) 1497 goto out; 1498 1499 spi_transfer_delay_exec(xfer); 1500 1501 if (xfer->cs_change) { 1502 if (list_is_last(&xfer->transfer_list, 1503 &msg->transfers)) { 1504 keep_cs = true; 1505 } else { 1506 spi_set_cs(msg->spi, false, false); 1507 _spi_transfer_cs_change_delay(msg, xfer); 1508 spi_set_cs(msg->spi, true, false); 1509 } 1510 } 1511 1512 msg->actual_length += xfer->len; 1513 } 1514 1515 out: 1516 if (ret != 0 || !keep_cs) 1517 spi_set_cs(msg->spi, false, false); 1518 1519 if (msg->status == -EINPROGRESS) 1520 msg->status = ret; 1521 1522 if (msg->status && ctlr->handle_err) 1523 ctlr->handle_err(ctlr, msg); 1524 1525 spi_finalize_current_message(ctlr); 1526 1527 return ret; 1528 } 1529 1530 /** 1531 * spi_finalize_current_transfer - report completion of a transfer 1532 * @ctlr: the controller reporting completion 1533 * 1534 * Called by SPI drivers using the core transfer_one_message() 1535 * implementation to notify it that the current interrupt driven 1536 * transfer has finished and the next one may be scheduled. 1537 */ 1538 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1539 { 1540 complete(&ctlr->xfer_completion); 1541 } 1542 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1543 1544 static void spi_idle_runtime_pm(struct spi_controller *ctlr) 1545 { 1546 if (ctlr->auto_runtime_pm) { 1547 pm_runtime_mark_last_busy(ctlr->dev.parent); 1548 pm_runtime_put_autosuspend(ctlr->dev.parent); 1549 } 1550 } 1551 1552 static int __spi_pump_transfer_message(struct spi_controller *ctlr, 1553 struct spi_message *msg, bool was_busy) 1554 { 1555 struct spi_transfer *xfer; 1556 int ret; 1557 1558 if (!was_busy && ctlr->auto_runtime_pm) { 1559 ret = pm_runtime_get_sync(ctlr->dev.parent); 1560 if (ret < 0) { 1561 pm_runtime_put_noidle(ctlr->dev.parent); 1562 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1563 ret); 1564 return ret; 1565 } 1566 } 1567 1568 if (!was_busy) 1569 trace_spi_controller_busy(ctlr); 1570 1571 if (!was_busy && ctlr->prepare_transfer_hardware) { 1572 ret = ctlr->prepare_transfer_hardware(ctlr); 1573 if (ret) { 1574 dev_err(&ctlr->dev, 1575 "failed to prepare transfer hardware: %d\n", 1576 ret); 1577 1578 if (ctlr->auto_runtime_pm) 1579 pm_runtime_put(ctlr->dev.parent); 1580 1581 msg->status = ret; 1582 spi_finalize_current_message(ctlr); 1583 1584 return ret; 1585 } 1586 } 1587 1588 trace_spi_message_start(msg); 1589 1590 if (ctlr->prepare_message) { 1591 ret = ctlr->prepare_message(ctlr, msg); 1592 if (ret) { 1593 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1594 ret); 1595 msg->status = ret; 1596 spi_finalize_current_message(ctlr); 1597 return ret; 1598 } 1599 msg->prepared = true; 1600 } 1601 1602 ret = spi_map_msg(ctlr, msg); 1603 if (ret) { 1604 msg->status = ret; 1605 spi_finalize_current_message(ctlr); 1606 return ret; 1607 } 1608 1609 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1610 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1611 xfer->ptp_sts_word_pre = 0; 1612 ptp_read_system_prets(xfer->ptp_sts); 1613 } 1614 } 1615 1616 /* 1617 * Drivers implementation of transfer_one_message() must arrange for 1618 * spi_finalize_current_message() to get called. Most drivers will do 1619 * this in the calling context, but some don't. For those cases, a 1620 * completion is used to guarantee that this function does not return 1621 * until spi_finalize_current_message() is done accessing 1622 * ctlr->cur_msg. 1623 * Use of the following two flags enable to opportunistically skip the 1624 * use of the completion since its use involves expensive spin locks. 1625 * In case of a race with the context that calls 1626 * spi_finalize_current_message() the completion will always be used, 1627 * due to strict ordering of these flags using barriers. 1628 */ 1629 WRITE_ONCE(ctlr->cur_msg_incomplete, true); 1630 WRITE_ONCE(ctlr->cur_msg_need_completion, false); 1631 reinit_completion(&ctlr->cur_msg_completion); 1632 smp_wmb(); /* Make these available to spi_finalize_current_message() */ 1633 1634 ret = ctlr->transfer_one_message(ctlr, msg); 1635 if (ret) { 1636 dev_err(&ctlr->dev, 1637 "failed to transfer one message from queue\n"); 1638 return ret; 1639 } 1640 1641 WRITE_ONCE(ctlr->cur_msg_need_completion, true); 1642 smp_mb(); /* See spi_finalize_current_message()... */ 1643 if (READ_ONCE(ctlr->cur_msg_incomplete)) 1644 wait_for_completion(&ctlr->cur_msg_completion); 1645 1646 return 0; 1647 } 1648 1649 /** 1650 * __spi_pump_messages - function which processes spi message queue 1651 * @ctlr: controller to process queue for 1652 * @in_kthread: true if we are in the context of the message pump thread 1653 * 1654 * This function checks if there is any spi message in the queue that 1655 * needs processing and if so call out to the driver to initialize hardware 1656 * and transfer each message. 1657 * 1658 * Note that it is called both from the kthread itself and also from 1659 * inside spi_sync(); the queue extraction handling at the top of the 1660 * function should deal with this safely. 1661 */ 1662 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1663 { 1664 struct spi_message *msg; 1665 bool was_busy = false; 1666 unsigned long flags; 1667 int ret; 1668 1669 /* Take the IO mutex */ 1670 mutex_lock(&ctlr->io_mutex); 1671 1672 /* Lock queue */ 1673 spin_lock_irqsave(&ctlr->queue_lock, flags); 1674 1675 /* Make sure we are not already running a message */ 1676 if (ctlr->cur_msg) 1677 goto out_unlock; 1678 1679 /* Check if the queue is idle */ 1680 if (list_empty(&ctlr->queue) || !ctlr->running) { 1681 if (!ctlr->busy) 1682 goto out_unlock; 1683 1684 /* Defer any non-atomic teardown to the thread */ 1685 if (!in_kthread) { 1686 if (!ctlr->dummy_rx && !ctlr->dummy_tx && 1687 !ctlr->unprepare_transfer_hardware) { 1688 spi_idle_runtime_pm(ctlr); 1689 ctlr->busy = false; 1690 ctlr->queue_empty = true; 1691 trace_spi_controller_idle(ctlr); 1692 } else { 1693 kthread_queue_work(ctlr->kworker, 1694 &ctlr->pump_messages); 1695 } 1696 goto out_unlock; 1697 } 1698 1699 ctlr->busy = false; 1700 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1701 1702 kfree(ctlr->dummy_rx); 1703 ctlr->dummy_rx = NULL; 1704 kfree(ctlr->dummy_tx); 1705 ctlr->dummy_tx = NULL; 1706 if (ctlr->unprepare_transfer_hardware && 1707 ctlr->unprepare_transfer_hardware(ctlr)) 1708 dev_err(&ctlr->dev, 1709 "failed to unprepare transfer hardware\n"); 1710 spi_idle_runtime_pm(ctlr); 1711 trace_spi_controller_idle(ctlr); 1712 1713 spin_lock_irqsave(&ctlr->queue_lock, flags); 1714 ctlr->queue_empty = true; 1715 goto out_unlock; 1716 } 1717 1718 /* Extract head of queue */ 1719 msg = list_first_entry(&ctlr->queue, struct spi_message, queue); 1720 ctlr->cur_msg = msg; 1721 1722 list_del_init(&msg->queue); 1723 if (ctlr->busy) 1724 was_busy = true; 1725 else 1726 ctlr->busy = true; 1727 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1728 1729 ret = __spi_pump_transfer_message(ctlr, msg, was_busy); 1730 if (!ret) 1731 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1732 1733 ctlr->cur_msg = NULL; 1734 ctlr->fallback = false; 1735 1736 mutex_unlock(&ctlr->io_mutex); 1737 1738 /* Prod the scheduler in case transfer_one() was busy waiting */ 1739 if (!ret) 1740 cond_resched(); 1741 return; 1742 1743 out_unlock: 1744 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1745 mutex_unlock(&ctlr->io_mutex); 1746 } 1747 1748 /** 1749 * spi_pump_messages - kthread work function which processes spi message queue 1750 * @work: pointer to kthread work struct contained in the controller struct 1751 */ 1752 static void spi_pump_messages(struct kthread_work *work) 1753 { 1754 struct spi_controller *ctlr = 1755 container_of(work, struct spi_controller, pump_messages); 1756 1757 __spi_pump_messages(ctlr, true); 1758 } 1759 1760 /** 1761 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp 1762 * @ctlr: Pointer to the spi_controller structure of the driver 1763 * @xfer: Pointer to the transfer being timestamped 1764 * @progress: How many words (not bytes) have been transferred so far 1765 * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1766 * transfer, for less jitter in time measurement. Only compatible 1767 * with PIO drivers. If true, must follow up with 1768 * spi_take_timestamp_post or otherwise system will crash. 1769 * WARNING: for fully predictable results, the CPU frequency must 1770 * also be under control (governor). 1771 * 1772 * This is a helper for drivers to collect the beginning of the TX timestamp 1773 * for the requested byte from the SPI transfer. The frequency with which this 1774 * function must be called (once per word, once for the whole transfer, once 1775 * per batch of words etc) is arbitrary as long as the @tx buffer offset is 1776 * greater than or equal to the requested byte at the time of the call. The 1777 * timestamp is only taken once, at the first such call. It is assumed that 1778 * the driver advances its @tx buffer pointer monotonically. 1779 */ 1780 void spi_take_timestamp_pre(struct spi_controller *ctlr, 1781 struct spi_transfer *xfer, 1782 size_t progress, bool irqs_off) 1783 { 1784 if (!xfer->ptp_sts) 1785 return; 1786 1787 if (xfer->timestamped) 1788 return; 1789 1790 if (progress > xfer->ptp_sts_word_pre) 1791 return; 1792 1793 /* Capture the resolution of the timestamp */ 1794 xfer->ptp_sts_word_pre = progress; 1795 1796 if (irqs_off) { 1797 local_irq_save(ctlr->irq_flags); 1798 preempt_disable(); 1799 } 1800 1801 ptp_read_system_prets(xfer->ptp_sts); 1802 } 1803 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); 1804 1805 /** 1806 * spi_take_timestamp_post - helper to collect the end of the TX timestamp 1807 * @ctlr: Pointer to the spi_controller structure of the driver 1808 * @xfer: Pointer to the transfer being timestamped 1809 * @progress: How many words (not bytes) have been transferred so far 1810 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 1811 * 1812 * This is a helper for drivers to collect the end of the TX timestamp for 1813 * the requested byte from the SPI transfer. Can be called with an arbitrary 1814 * frequency: only the first call where @tx exceeds or is equal to the 1815 * requested word will be timestamped. 1816 */ 1817 void spi_take_timestamp_post(struct spi_controller *ctlr, 1818 struct spi_transfer *xfer, 1819 size_t progress, bool irqs_off) 1820 { 1821 if (!xfer->ptp_sts) 1822 return; 1823 1824 if (xfer->timestamped) 1825 return; 1826 1827 if (progress < xfer->ptp_sts_word_post) 1828 return; 1829 1830 ptp_read_system_postts(xfer->ptp_sts); 1831 1832 if (irqs_off) { 1833 local_irq_restore(ctlr->irq_flags); 1834 preempt_enable(); 1835 } 1836 1837 /* Capture the resolution of the timestamp */ 1838 xfer->ptp_sts_word_post = progress; 1839 1840 xfer->timestamped = true; 1841 } 1842 EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 1843 1844 /** 1845 * spi_set_thread_rt - set the controller to pump at realtime priority 1846 * @ctlr: controller to boost priority of 1847 * 1848 * This can be called because the controller requested realtime priority 1849 * (by setting the ->rt value before calling spi_register_controller()) or 1850 * because a device on the bus said that its transfers needed realtime 1851 * priority. 1852 * 1853 * NOTE: at the moment if any device on a bus says it needs realtime then 1854 * the thread will be at realtime priority for all transfers on that 1855 * controller. If this eventually becomes a problem we may see if we can 1856 * find a way to boost the priority only temporarily during relevant 1857 * transfers. 1858 */ 1859 static void spi_set_thread_rt(struct spi_controller *ctlr) 1860 { 1861 dev_info(&ctlr->dev, 1862 "will run message pump with realtime priority\n"); 1863 sched_set_fifo(ctlr->kworker->task); 1864 } 1865 1866 static int spi_init_queue(struct spi_controller *ctlr) 1867 { 1868 ctlr->running = false; 1869 ctlr->busy = false; 1870 ctlr->queue_empty = true; 1871 1872 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); 1873 if (IS_ERR(ctlr->kworker)) { 1874 dev_err(&ctlr->dev, "failed to create message pump kworker\n"); 1875 return PTR_ERR(ctlr->kworker); 1876 } 1877 1878 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1879 1880 /* 1881 * Controller config will indicate if this controller should run the 1882 * message pump with high (realtime) priority to reduce the transfer 1883 * latency on the bus by minimising the delay between a transfer 1884 * request and the scheduling of the message pump thread. Without this 1885 * setting the message pump thread will remain at default priority. 1886 */ 1887 if (ctlr->rt) 1888 spi_set_thread_rt(ctlr); 1889 1890 return 0; 1891 } 1892 1893 /** 1894 * spi_get_next_queued_message() - called by driver to check for queued 1895 * messages 1896 * @ctlr: the controller to check for queued messages 1897 * 1898 * If there are more messages in the queue, the next message is returned from 1899 * this call. 1900 * 1901 * Return: the next message in the queue, else NULL if the queue is empty. 1902 */ 1903 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1904 { 1905 struct spi_message *next; 1906 unsigned long flags; 1907 1908 /* Get a pointer to the next message, if any */ 1909 spin_lock_irqsave(&ctlr->queue_lock, flags); 1910 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 1911 queue); 1912 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1913 1914 return next; 1915 } 1916 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1917 1918 /** 1919 * spi_finalize_current_message() - the current message is complete 1920 * @ctlr: the controller to return the message to 1921 * 1922 * Called by the driver to notify the core that the message in the front of the 1923 * queue is complete and can be removed from the queue. 1924 */ 1925 void spi_finalize_current_message(struct spi_controller *ctlr) 1926 { 1927 struct spi_transfer *xfer; 1928 struct spi_message *mesg; 1929 int ret; 1930 1931 mesg = ctlr->cur_msg; 1932 1933 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1934 list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 1935 ptp_read_system_postts(xfer->ptp_sts); 1936 xfer->ptp_sts_word_post = xfer->len; 1937 } 1938 } 1939 1940 if (unlikely(ctlr->ptp_sts_supported)) 1941 list_for_each_entry(xfer, &mesg->transfers, transfer_list) 1942 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 1943 1944 spi_unmap_msg(ctlr, mesg); 1945 1946 /* 1947 * In the prepare_messages callback the SPI bus has the opportunity 1948 * to split a transfer to smaller chunks. 1949 * 1950 * Release the split transfers here since spi_map_msg() is done on 1951 * the split transfers. 1952 */ 1953 spi_res_release(ctlr, mesg); 1954 1955 if (mesg->prepared && ctlr->unprepare_message) { 1956 ret = ctlr->unprepare_message(ctlr, mesg); 1957 if (ret) { 1958 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 1959 ret); 1960 } 1961 } 1962 1963 mesg->prepared = false; 1964 1965 WRITE_ONCE(ctlr->cur_msg_incomplete, false); 1966 smp_mb(); /* See __spi_pump_transfer_message()... */ 1967 if (READ_ONCE(ctlr->cur_msg_need_completion)) 1968 complete(&ctlr->cur_msg_completion); 1969 1970 trace_spi_message_done(mesg); 1971 1972 mesg->state = NULL; 1973 if (mesg->complete) 1974 mesg->complete(mesg->context); 1975 } 1976 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1977 1978 static int spi_start_queue(struct spi_controller *ctlr) 1979 { 1980 unsigned long flags; 1981 1982 spin_lock_irqsave(&ctlr->queue_lock, flags); 1983 1984 if (ctlr->running || ctlr->busy) { 1985 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1986 return -EBUSY; 1987 } 1988 1989 ctlr->running = true; 1990 ctlr->cur_msg = NULL; 1991 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1992 1993 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1994 1995 return 0; 1996 } 1997 1998 static int spi_stop_queue(struct spi_controller *ctlr) 1999 { 2000 unsigned long flags; 2001 unsigned limit = 500; 2002 int ret = 0; 2003 2004 spin_lock_irqsave(&ctlr->queue_lock, flags); 2005 2006 /* 2007 * This is a bit lame, but is optimized for the common execution path. 2008 * A wait_queue on the ctlr->busy could be used, but then the common 2009 * execution path (pump_messages) would be required to call wake_up or 2010 * friends on every SPI message. Do this instead. 2011 */ 2012 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 2013 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2014 usleep_range(10000, 11000); 2015 spin_lock_irqsave(&ctlr->queue_lock, flags); 2016 } 2017 2018 if (!list_empty(&ctlr->queue) || ctlr->busy) 2019 ret = -EBUSY; 2020 else 2021 ctlr->running = false; 2022 2023 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2024 2025 if (ret) { 2026 dev_warn(&ctlr->dev, "could not stop message queue\n"); 2027 return ret; 2028 } 2029 return ret; 2030 } 2031 2032 static int spi_destroy_queue(struct spi_controller *ctlr) 2033 { 2034 int ret; 2035 2036 ret = spi_stop_queue(ctlr); 2037 2038 /* 2039 * kthread_flush_worker will block until all work is done. 2040 * If the reason that stop_queue timed out is that the work will never 2041 * finish, then it does no good to call flush/stop thread, so 2042 * return anyway. 2043 */ 2044 if (ret) { 2045 dev_err(&ctlr->dev, "problem destroying queue\n"); 2046 return ret; 2047 } 2048 2049 kthread_destroy_worker(ctlr->kworker); 2050 2051 return 0; 2052 } 2053 2054 static int __spi_queued_transfer(struct spi_device *spi, 2055 struct spi_message *msg, 2056 bool need_pump) 2057 { 2058 struct spi_controller *ctlr = spi->controller; 2059 unsigned long flags; 2060 2061 spin_lock_irqsave(&ctlr->queue_lock, flags); 2062 2063 if (!ctlr->running) { 2064 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2065 return -ESHUTDOWN; 2066 } 2067 msg->actual_length = 0; 2068 msg->status = -EINPROGRESS; 2069 2070 list_add_tail(&msg->queue, &ctlr->queue); 2071 ctlr->queue_empty = false; 2072 if (!ctlr->busy && need_pump) 2073 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 2074 2075 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2076 return 0; 2077 } 2078 2079 /** 2080 * spi_queued_transfer - transfer function for queued transfers 2081 * @spi: spi device which is requesting transfer 2082 * @msg: spi message which is to handled is queued to driver queue 2083 * 2084 * Return: zero on success, else a negative error code. 2085 */ 2086 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 2087 { 2088 return __spi_queued_transfer(spi, msg, true); 2089 } 2090 2091 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 2092 { 2093 int ret; 2094 2095 ctlr->transfer = spi_queued_transfer; 2096 if (!ctlr->transfer_one_message) 2097 ctlr->transfer_one_message = spi_transfer_one_message; 2098 2099 /* Initialize and start queue */ 2100 ret = spi_init_queue(ctlr); 2101 if (ret) { 2102 dev_err(&ctlr->dev, "problem initializing queue\n"); 2103 goto err_init_queue; 2104 } 2105 ctlr->queued = true; 2106 ret = spi_start_queue(ctlr); 2107 if (ret) { 2108 dev_err(&ctlr->dev, "problem starting queue\n"); 2109 goto err_start_queue; 2110 } 2111 2112 return 0; 2113 2114 err_start_queue: 2115 spi_destroy_queue(ctlr); 2116 err_init_queue: 2117 return ret; 2118 } 2119 2120 /** 2121 * spi_flush_queue - Send all pending messages in the queue from the callers' 2122 * context 2123 * @ctlr: controller to process queue for 2124 * 2125 * This should be used when one wants to ensure all pending messages have been 2126 * sent before doing something. Is used by the spi-mem code to make sure SPI 2127 * memory operations do not preempt regular SPI transfers that have been queued 2128 * before the spi-mem operation. 2129 */ 2130 void spi_flush_queue(struct spi_controller *ctlr) 2131 { 2132 if (ctlr->transfer == spi_queued_transfer) 2133 __spi_pump_messages(ctlr, false); 2134 } 2135 2136 /*-------------------------------------------------------------------------*/ 2137 2138 #if defined(CONFIG_OF) 2139 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 2140 struct device_node *nc) 2141 { 2142 u32 value; 2143 int rc; 2144 2145 /* Mode (clock phase/polarity/etc.) */ 2146 if (of_property_read_bool(nc, "spi-cpha")) 2147 spi->mode |= SPI_CPHA; 2148 if (of_property_read_bool(nc, "spi-cpol")) 2149 spi->mode |= SPI_CPOL; 2150 if (of_property_read_bool(nc, "spi-3wire")) 2151 spi->mode |= SPI_3WIRE; 2152 if (of_property_read_bool(nc, "spi-lsb-first")) 2153 spi->mode |= SPI_LSB_FIRST; 2154 if (of_property_read_bool(nc, "spi-cs-high")) 2155 spi->mode |= SPI_CS_HIGH; 2156 2157 /* Device DUAL/QUAD mode */ 2158 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 2159 switch (value) { 2160 case 0: 2161 spi->mode |= SPI_NO_TX; 2162 break; 2163 case 1: 2164 break; 2165 case 2: 2166 spi->mode |= SPI_TX_DUAL; 2167 break; 2168 case 4: 2169 spi->mode |= SPI_TX_QUAD; 2170 break; 2171 case 8: 2172 spi->mode |= SPI_TX_OCTAL; 2173 break; 2174 default: 2175 dev_warn(&ctlr->dev, 2176 "spi-tx-bus-width %d not supported\n", 2177 value); 2178 break; 2179 } 2180 } 2181 2182 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 2183 switch (value) { 2184 case 0: 2185 spi->mode |= SPI_NO_RX; 2186 break; 2187 case 1: 2188 break; 2189 case 2: 2190 spi->mode |= SPI_RX_DUAL; 2191 break; 2192 case 4: 2193 spi->mode |= SPI_RX_QUAD; 2194 break; 2195 case 8: 2196 spi->mode |= SPI_RX_OCTAL; 2197 break; 2198 default: 2199 dev_warn(&ctlr->dev, 2200 "spi-rx-bus-width %d not supported\n", 2201 value); 2202 break; 2203 } 2204 } 2205 2206 if (spi_controller_is_slave(ctlr)) { 2207 if (!of_node_name_eq(nc, "slave")) { 2208 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 2209 nc); 2210 return -EINVAL; 2211 } 2212 return 0; 2213 } 2214 2215 /* Device address */ 2216 rc = of_property_read_u32(nc, "reg", &value); 2217 if (rc) { 2218 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 2219 nc, rc); 2220 return rc; 2221 } 2222 spi->chip_select = value; 2223 2224 /* Device speed */ 2225 if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 2226 spi->max_speed_hz = value; 2227 2228 return 0; 2229 } 2230 2231 static struct spi_device * 2232 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 2233 { 2234 struct spi_device *spi; 2235 int rc; 2236 2237 /* Alloc an spi_device */ 2238 spi = spi_alloc_device(ctlr); 2239 if (!spi) { 2240 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 2241 rc = -ENOMEM; 2242 goto err_out; 2243 } 2244 2245 /* Select device driver */ 2246 rc = of_modalias_node(nc, spi->modalias, 2247 sizeof(spi->modalias)); 2248 if (rc < 0) { 2249 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 2250 goto err_out; 2251 } 2252 2253 rc = of_spi_parse_dt(ctlr, spi, nc); 2254 if (rc) 2255 goto err_out; 2256 2257 /* Store a pointer to the node in the device structure */ 2258 of_node_get(nc); 2259 spi->dev.of_node = nc; 2260 spi->dev.fwnode = of_fwnode_handle(nc); 2261 2262 /* Register the new device */ 2263 rc = spi_add_device(spi); 2264 if (rc) { 2265 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 2266 goto err_of_node_put; 2267 } 2268 2269 return spi; 2270 2271 err_of_node_put: 2272 of_node_put(nc); 2273 err_out: 2274 spi_dev_put(spi); 2275 return ERR_PTR(rc); 2276 } 2277 2278 /** 2279 * of_register_spi_devices() - Register child devices onto the SPI bus 2280 * @ctlr: Pointer to spi_controller device 2281 * 2282 * Registers an spi_device for each child node of controller node which 2283 * represents a valid SPI slave. 2284 */ 2285 static void of_register_spi_devices(struct spi_controller *ctlr) 2286 { 2287 struct spi_device *spi; 2288 struct device_node *nc; 2289 2290 if (!ctlr->dev.of_node) 2291 return; 2292 2293 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 2294 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 2295 continue; 2296 spi = of_register_spi_device(ctlr, nc); 2297 if (IS_ERR(spi)) { 2298 dev_warn(&ctlr->dev, 2299 "Failed to create SPI device for %pOF\n", nc); 2300 of_node_clear_flag(nc, OF_POPULATED); 2301 } 2302 } 2303 } 2304 #else 2305 static void of_register_spi_devices(struct spi_controller *ctlr) { } 2306 #endif 2307 2308 /** 2309 * spi_new_ancillary_device() - Register ancillary SPI device 2310 * @spi: Pointer to the main SPI device registering the ancillary device 2311 * @chip_select: Chip Select of the ancillary device 2312 * 2313 * Register an ancillary SPI device; for example some chips have a chip-select 2314 * for normal device usage and another one for setup/firmware upload. 2315 * 2316 * This may only be called from main SPI device's probe routine. 2317 * 2318 * Return: 0 on success; negative errno on failure 2319 */ 2320 struct spi_device *spi_new_ancillary_device(struct spi_device *spi, 2321 u8 chip_select) 2322 { 2323 struct spi_device *ancillary; 2324 int rc = 0; 2325 2326 /* Alloc an spi_device */ 2327 ancillary = spi_alloc_device(spi->controller); 2328 if (!ancillary) { 2329 rc = -ENOMEM; 2330 goto err_out; 2331 } 2332 2333 strlcpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); 2334 2335 /* Use provided chip-select for ancillary device */ 2336 ancillary->chip_select = chip_select; 2337 2338 /* Take over SPI mode/speed from SPI main device */ 2339 ancillary->max_speed_hz = spi->max_speed_hz; 2340 ancillary->mode = spi->mode; 2341 2342 /* Register the new device */ 2343 rc = spi_add_device_locked(ancillary); 2344 if (rc) { 2345 dev_err(&spi->dev, "failed to register ancillary device\n"); 2346 goto err_out; 2347 } 2348 2349 return ancillary; 2350 2351 err_out: 2352 spi_dev_put(ancillary); 2353 return ERR_PTR(rc); 2354 } 2355 EXPORT_SYMBOL_GPL(spi_new_ancillary_device); 2356 2357 #ifdef CONFIG_ACPI 2358 struct acpi_spi_lookup { 2359 struct spi_controller *ctlr; 2360 u32 max_speed_hz; 2361 u32 mode; 2362 int irq; 2363 u8 bits_per_word; 2364 u8 chip_select; 2365 int n; 2366 int index; 2367 }; 2368 2369 static int acpi_spi_count(struct acpi_resource *ares, void *data) 2370 { 2371 struct acpi_resource_spi_serialbus *sb; 2372 int *count = data; 2373 2374 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) 2375 return 1; 2376 2377 sb = &ares->data.spi_serial_bus; 2378 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI) 2379 return 1; 2380 2381 *count = *count + 1; 2382 2383 return 1; 2384 } 2385 2386 /** 2387 * acpi_spi_count_resources - Count the number of SpiSerialBus resources 2388 * @adev: ACPI device 2389 * 2390 * Returns the number of SpiSerialBus resources in the ACPI-device's 2391 * resource-list; or a negative error code. 2392 */ 2393 int acpi_spi_count_resources(struct acpi_device *adev) 2394 { 2395 LIST_HEAD(r); 2396 int count = 0; 2397 int ret; 2398 2399 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count); 2400 if (ret < 0) 2401 return ret; 2402 2403 acpi_dev_free_resource_list(&r); 2404 2405 return count; 2406 } 2407 EXPORT_SYMBOL_GPL(acpi_spi_count_resources); 2408 2409 static void acpi_spi_parse_apple_properties(struct acpi_device *dev, 2410 struct acpi_spi_lookup *lookup) 2411 { 2412 const union acpi_object *obj; 2413 2414 if (!x86_apple_machine) 2415 return; 2416 2417 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 2418 && obj->buffer.length >= 4) 2419 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 2420 2421 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 2422 && obj->buffer.length == 8) 2423 lookup->bits_per_word = *(u64 *)obj->buffer.pointer; 2424 2425 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 2426 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 2427 lookup->mode |= SPI_LSB_FIRST; 2428 2429 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 2430 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2431 lookup->mode |= SPI_CPOL; 2432 2433 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 2434 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2435 lookup->mode |= SPI_CPHA; 2436 } 2437 2438 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev); 2439 2440 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 2441 { 2442 struct acpi_spi_lookup *lookup = data; 2443 struct spi_controller *ctlr = lookup->ctlr; 2444 2445 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 2446 struct acpi_resource_spi_serialbus *sb; 2447 acpi_handle parent_handle; 2448 acpi_status status; 2449 2450 sb = &ares->data.spi_serial_bus; 2451 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 2452 2453 if (lookup->index != -1 && lookup->n++ != lookup->index) 2454 return 1; 2455 2456 status = acpi_get_handle(NULL, 2457 sb->resource_source.string_ptr, 2458 &parent_handle); 2459 2460 if (ACPI_FAILURE(status)) 2461 return -ENODEV; 2462 2463 if (ctlr) { 2464 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle) 2465 return -ENODEV; 2466 } else { 2467 struct acpi_device *adev; 2468 2469 adev = acpi_fetch_acpi_dev(parent_handle); 2470 if (!adev) 2471 return -ENODEV; 2472 2473 ctlr = acpi_spi_find_controller_by_adev(adev); 2474 if (!ctlr) 2475 return -EPROBE_DEFER; 2476 2477 lookup->ctlr = ctlr; 2478 } 2479 2480 /* 2481 * ACPI DeviceSelection numbering is handled by the 2482 * host controller driver in Windows and can vary 2483 * from driver to driver. In Linux we always expect 2484 * 0 .. max - 1 so we need to ask the driver to 2485 * translate between the two schemes. 2486 */ 2487 if (ctlr->fw_translate_cs) { 2488 int cs = ctlr->fw_translate_cs(ctlr, 2489 sb->device_selection); 2490 if (cs < 0) 2491 return cs; 2492 lookup->chip_select = cs; 2493 } else { 2494 lookup->chip_select = sb->device_selection; 2495 } 2496 2497 lookup->max_speed_hz = sb->connection_speed; 2498 lookup->bits_per_word = sb->data_bit_length; 2499 2500 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 2501 lookup->mode |= SPI_CPHA; 2502 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 2503 lookup->mode |= SPI_CPOL; 2504 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 2505 lookup->mode |= SPI_CS_HIGH; 2506 } 2507 } else if (lookup->irq < 0) { 2508 struct resource r; 2509 2510 if (acpi_dev_resource_interrupt(ares, 0, &r)) 2511 lookup->irq = r.start; 2512 } 2513 2514 /* Always tell the ACPI core to skip this resource */ 2515 return 1; 2516 } 2517 2518 /** 2519 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information 2520 * @ctlr: controller to which the spi device belongs 2521 * @adev: ACPI Device for the spi device 2522 * @index: Index of the spi resource inside the ACPI Node 2523 * 2524 * This should be used to allocate a new spi device from and ACPI Node. 2525 * The caller is responsible for calling spi_add_device to register the spi device. 2526 * 2527 * If ctlr is set to NULL, the Controller for the spi device will be looked up 2528 * using the resource. 2529 * If index is set to -1, index is not used. 2530 * Note: If index is -1, ctlr must be set. 2531 * 2532 * Return: a pointer to the new device, or ERR_PTR on error. 2533 */ 2534 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, 2535 struct acpi_device *adev, 2536 int index) 2537 { 2538 acpi_handle parent_handle = NULL; 2539 struct list_head resource_list; 2540 struct acpi_spi_lookup lookup = {}; 2541 struct spi_device *spi; 2542 int ret; 2543 2544 if (!ctlr && index == -1) 2545 return ERR_PTR(-EINVAL); 2546 2547 lookup.ctlr = ctlr; 2548 lookup.irq = -1; 2549 lookup.index = index; 2550 lookup.n = 0; 2551 2552 INIT_LIST_HEAD(&resource_list); 2553 ret = acpi_dev_get_resources(adev, &resource_list, 2554 acpi_spi_add_resource, &lookup); 2555 acpi_dev_free_resource_list(&resource_list); 2556 2557 if (ret < 0) 2558 /* Found SPI in _CRS but it points to another controller */ 2559 return ERR_PTR(ret); 2560 2561 if (!lookup.max_speed_hz && 2562 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && 2563 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) { 2564 /* Apple does not use _CRS but nested devices for SPI slaves */ 2565 acpi_spi_parse_apple_properties(adev, &lookup); 2566 } 2567 2568 if (!lookup.max_speed_hz) 2569 return ERR_PTR(-ENODEV); 2570 2571 spi = spi_alloc_device(lookup.ctlr); 2572 if (!spi) { 2573 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n", 2574 dev_name(&adev->dev)); 2575 return ERR_PTR(-ENOMEM); 2576 } 2577 2578 ACPI_COMPANION_SET(&spi->dev, adev); 2579 spi->max_speed_hz = lookup.max_speed_hz; 2580 spi->mode |= lookup.mode; 2581 spi->irq = lookup.irq; 2582 spi->bits_per_word = lookup.bits_per_word; 2583 spi->chip_select = lookup.chip_select; 2584 2585 return spi; 2586 } 2587 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc); 2588 2589 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 2590 struct acpi_device *adev) 2591 { 2592 struct spi_device *spi; 2593 2594 if (acpi_bus_get_status(adev) || !adev->status.present || 2595 acpi_device_enumerated(adev)) 2596 return AE_OK; 2597 2598 spi = acpi_spi_device_alloc(ctlr, adev, -1); 2599 if (IS_ERR(spi)) { 2600 if (PTR_ERR(spi) == -ENOMEM) 2601 return AE_NO_MEMORY; 2602 else 2603 return AE_OK; 2604 } 2605 2606 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 2607 sizeof(spi->modalias)); 2608 2609 if (spi->irq < 0) 2610 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 2611 2612 acpi_device_set_enumerated(adev); 2613 2614 adev->power.flags.ignore_parent = true; 2615 if (spi_add_device(spi)) { 2616 adev->power.flags.ignore_parent = false; 2617 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 2618 dev_name(&adev->dev)); 2619 spi_dev_put(spi); 2620 } 2621 2622 return AE_OK; 2623 } 2624 2625 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 2626 void *data, void **return_value) 2627 { 2628 struct acpi_device *adev = acpi_fetch_acpi_dev(handle); 2629 struct spi_controller *ctlr = data; 2630 2631 if (!adev) 2632 return AE_OK; 2633 2634 return acpi_register_spi_device(ctlr, adev); 2635 } 2636 2637 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 2638 2639 static void acpi_register_spi_devices(struct spi_controller *ctlr) 2640 { 2641 acpi_status status; 2642 acpi_handle handle; 2643 2644 handle = ACPI_HANDLE(ctlr->dev.parent); 2645 if (!handle) 2646 return; 2647 2648 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 2649 SPI_ACPI_ENUMERATE_MAX_DEPTH, 2650 acpi_spi_add_device, NULL, ctlr, NULL); 2651 if (ACPI_FAILURE(status)) 2652 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 2653 } 2654 #else 2655 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 2656 #endif /* CONFIG_ACPI */ 2657 2658 static void spi_controller_release(struct device *dev) 2659 { 2660 struct spi_controller *ctlr; 2661 2662 ctlr = container_of(dev, struct spi_controller, dev); 2663 kfree(ctlr); 2664 } 2665 2666 static struct class spi_master_class = { 2667 .name = "spi_master", 2668 .owner = THIS_MODULE, 2669 .dev_release = spi_controller_release, 2670 .dev_groups = spi_master_groups, 2671 }; 2672 2673 #ifdef CONFIG_SPI_SLAVE 2674 /** 2675 * spi_slave_abort - abort the ongoing transfer request on an SPI slave 2676 * controller 2677 * @spi: device used for the current transfer 2678 */ 2679 int spi_slave_abort(struct spi_device *spi) 2680 { 2681 struct spi_controller *ctlr = spi->controller; 2682 2683 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 2684 return ctlr->slave_abort(ctlr); 2685 2686 return -ENOTSUPP; 2687 } 2688 EXPORT_SYMBOL_GPL(spi_slave_abort); 2689 2690 static int match_true(struct device *dev, void *data) 2691 { 2692 return 1; 2693 } 2694 2695 static ssize_t slave_show(struct device *dev, struct device_attribute *attr, 2696 char *buf) 2697 { 2698 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2699 dev); 2700 struct device *child; 2701 2702 child = device_find_child(&ctlr->dev, NULL, match_true); 2703 return sprintf(buf, "%s\n", 2704 child ? to_spi_device(child)->modalias : NULL); 2705 } 2706 2707 static ssize_t slave_store(struct device *dev, struct device_attribute *attr, 2708 const char *buf, size_t count) 2709 { 2710 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2711 dev); 2712 struct spi_device *spi; 2713 struct device *child; 2714 char name[32]; 2715 int rc; 2716 2717 rc = sscanf(buf, "%31s", name); 2718 if (rc != 1 || !name[0]) 2719 return -EINVAL; 2720 2721 child = device_find_child(&ctlr->dev, NULL, match_true); 2722 if (child) { 2723 /* Remove registered slave */ 2724 device_unregister(child); 2725 put_device(child); 2726 } 2727 2728 if (strcmp(name, "(null)")) { 2729 /* Register new slave */ 2730 spi = spi_alloc_device(ctlr); 2731 if (!spi) 2732 return -ENOMEM; 2733 2734 strlcpy(spi->modalias, name, sizeof(spi->modalias)); 2735 2736 rc = spi_add_device(spi); 2737 if (rc) { 2738 spi_dev_put(spi); 2739 return rc; 2740 } 2741 } 2742 2743 return count; 2744 } 2745 2746 static DEVICE_ATTR_RW(slave); 2747 2748 static struct attribute *spi_slave_attrs[] = { 2749 &dev_attr_slave.attr, 2750 NULL, 2751 }; 2752 2753 static const struct attribute_group spi_slave_group = { 2754 .attrs = spi_slave_attrs, 2755 }; 2756 2757 static const struct attribute_group *spi_slave_groups[] = { 2758 &spi_controller_statistics_group, 2759 &spi_slave_group, 2760 NULL, 2761 }; 2762 2763 static struct class spi_slave_class = { 2764 .name = "spi_slave", 2765 .owner = THIS_MODULE, 2766 .dev_release = spi_controller_release, 2767 .dev_groups = spi_slave_groups, 2768 }; 2769 #else 2770 extern struct class spi_slave_class; /* dummy */ 2771 #endif 2772 2773 /** 2774 * __spi_alloc_controller - allocate an SPI master or slave controller 2775 * @dev: the controller, possibly using the platform_bus 2776 * @size: how much zeroed driver-private data to allocate; the pointer to this 2777 * memory is in the driver_data field of the returned device, accessible 2778 * with spi_controller_get_devdata(); the memory is cacheline aligned; 2779 * drivers granting DMA access to portions of their private data need to 2780 * round up @size using ALIGN(size, dma_get_cache_alignment()). 2781 * @slave: flag indicating whether to allocate an SPI master (false) or SPI 2782 * slave (true) controller 2783 * Context: can sleep 2784 * 2785 * This call is used only by SPI controller drivers, which are the 2786 * only ones directly touching chip registers. It's how they allocate 2787 * an spi_controller structure, prior to calling spi_register_controller(). 2788 * 2789 * This must be called from context that can sleep. 2790 * 2791 * The caller is responsible for assigning the bus number and initializing the 2792 * controller's methods before calling spi_register_controller(); and (after 2793 * errors adding the device) calling spi_controller_put() to prevent a memory 2794 * leak. 2795 * 2796 * Return: the SPI controller structure on success, else NULL. 2797 */ 2798 struct spi_controller *__spi_alloc_controller(struct device *dev, 2799 unsigned int size, bool slave) 2800 { 2801 struct spi_controller *ctlr; 2802 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); 2803 2804 if (!dev) 2805 return NULL; 2806 2807 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 2808 if (!ctlr) 2809 return NULL; 2810 2811 device_initialize(&ctlr->dev); 2812 INIT_LIST_HEAD(&ctlr->queue); 2813 spin_lock_init(&ctlr->queue_lock); 2814 spin_lock_init(&ctlr->bus_lock_spinlock); 2815 mutex_init(&ctlr->bus_lock_mutex); 2816 mutex_init(&ctlr->io_mutex); 2817 mutex_init(&ctlr->add_lock); 2818 ctlr->bus_num = -1; 2819 ctlr->num_chipselect = 1; 2820 ctlr->slave = slave; 2821 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 2822 ctlr->dev.class = &spi_slave_class; 2823 else 2824 ctlr->dev.class = &spi_master_class; 2825 ctlr->dev.parent = dev; 2826 pm_suspend_ignore_children(&ctlr->dev, true); 2827 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); 2828 2829 return ctlr; 2830 } 2831 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 2832 2833 static void devm_spi_release_controller(struct device *dev, void *ctlr) 2834 { 2835 spi_controller_put(*(struct spi_controller **)ctlr); 2836 } 2837 2838 /** 2839 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() 2840 * @dev: physical device of SPI controller 2841 * @size: how much zeroed driver-private data to allocate 2842 * @slave: whether to allocate an SPI master (false) or SPI slave (true) 2843 * Context: can sleep 2844 * 2845 * Allocate an SPI controller and automatically release a reference on it 2846 * when @dev is unbound from its driver. Drivers are thus relieved from 2847 * having to call spi_controller_put(). 2848 * 2849 * The arguments to this function are identical to __spi_alloc_controller(). 2850 * 2851 * Return: the SPI controller structure on success, else NULL. 2852 */ 2853 struct spi_controller *__devm_spi_alloc_controller(struct device *dev, 2854 unsigned int size, 2855 bool slave) 2856 { 2857 struct spi_controller **ptr, *ctlr; 2858 2859 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), 2860 GFP_KERNEL); 2861 if (!ptr) 2862 return NULL; 2863 2864 ctlr = __spi_alloc_controller(dev, size, slave); 2865 if (ctlr) { 2866 ctlr->devm_allocated = true; 2867 *ptr = ctlr; 2868 devres_add(dev, ptr); 2869 } else { 2870 devres_free(ptr); 2871 } 2872 2873 return ctlr; 2874 } 2875 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); 2876 2877 /** 2878 * spi_get_gpio_descs() - grab chip select GPIOs for the master 2879 * @ctlr: The SPI master to grab GPIO descriptors for 2880 */ 2881 static int spi_get_gpio_descs(struct spi_controller *ctlr) 2882 { 2883 int nb, i; 2884 struct gpio_desc **cs; 2885 struct device *dev = &ctlr->dev; 2886 unsigned long native_cs_mask = 0; 2887 unsigned int num_cs_gpios = 0; 2888 2889 nb = gpiod_count(dev, "cs"); 2890 if (nb < 0) { 2891 /* No GPIOs at all is fine, else return the error */ 2892 if (nb == -ENOENT) 2893 return 0; 2894 return nb; 2895 } 2896 2897 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2898 2899 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 2900 GFP_KERNEL); 2901 if (!cs) 2902 return -ENOMEM; 2903 ctlr->cs_gpiods = cs; 2904 2905 for (i = 0; i < nb; i++) { 2906 /* 2907 * Most chipselects are active low, the inverted 2908 * semantics are handled by special quirks in gpiolib, 2909 * so initializing them GPIOD_OUT_LOW here means 2910 * "unasserted", in most cases this will drive the physical 2911 * line high. 2912 */ 2913 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 2914 GPIOD_OUT_LOW); 2915 if (IS_ERR(cs[i])) 2916 return PTR_ERR(cs[i]); 2917 2918 if (cs[i]) { 2919 /* 2920 * If we find a CS GPIO, name it after the device and 2921 * chip select line. 2922 */ 2923 char *gpioname; 2924 2925 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 2926 dev_name(dev), i); 2927 if (!gpioname) 2928 return -ENOMEM; 2929 gpiod_set_consumer_name(cs[i], gpioname); 2930 num_cs_gpios++; 2931 continue; 2932 } 2933 2934 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { 2935 dev_err(dev, "Invalid native chip select %d\n", i); 2936 return -EINVAL; 2937 } 2938 native_cs_mask |= BIT(i); 2939 } 2940 2941 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; 2942 2943 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios && 2944 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { 2945 dev_err(dev, "No unused native chip select available\n"); 2946 return -EINVAL; 2947 } 2948 2949 return 0; 2950 } 2951 2952 static int spi_controller_check_ops(struct spi_controller *ctlr) 2953 { 2954 /* 2955 * The controller may implement only the high-level SPI-memory like 2956 * operations if it does not support regular SPI transfers, and this is 2957 * valid use case. 2958 * If ->mem_ops is NULL, we request that at least one of the 2959 * ->transfer_xxx() method be implemented. 2960 */ 2961 if (ctlr->mem_ops) { 2962 if (!ctlr->mem_ops->exec_op) 2963 return -EINVAL; 2964 } else if (!ctlr->transfer && !ctlr->transfer_one && 2965 !ctlr->transfer_one_message) { 2966 return -EINVAL; 2967 } 2968 2969 return 0; 2970 } 2971 2972 /** 2973 * spi_register_controller - register SPI master or slave controller 2974 * @ctlr: initialized master, originally from spi_alloc_master() or 2975 * spi_alloc_slave() 2976 * Context: can sleep 2977 * 2978 * SPI controllers connect to their drivers using some non-SPI bus, 2979 * such as the platform bus. The final stage of probe() in that code 2980 * includes calling spi_register_controller() to hook up to this SPI bus glue. 2981 * 2982 * SPI controllers use board specific (often SOC specific) bus numbers, 2983 * and board-specific addressing for SPI devices combines those numbers 2984 * with chip select numbers. Since SPI does not directly support dynamic 2985 * device identification, boards need configuration tables telling which 2986 * chip is at which address. 2987 * 2988 * This must be called from context that can sleep. It returns zero on 2989 * success, else a negative error code (dropping the controller's refcount). 2990 * After a successful return, the caller is responsible for calling 2991 * spi_unregister_controller(). 2992 * 2993 * Return: zero on success, else a negative error code. 2994 */ 2995 int spi_register_controller(struct spi_controller *ctlr) 2996 { 2997 struct device *dev = ctlr->dev.parent; 2998 struct boardinfo *bi; 2999 int status; 3000 int id, first_dynamic; 3001 3002 if (!dev) 3003 return -ENODEV; 3004 3005 /* 3006 * Make sure all necessary hooks are implemented before registering 3007 * the SPI controller. 3008 */ 3009 status = spi_controller_check_ops(ctlr); 3010 if (status) 3011 return status; 3012 3013 if (ctlr->bus_num >= 0) { 3014 /* Devices with a fixed bus num must check-in with the num */ 3015 mutex_lock(&board_lock); 3016 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 3017 ctlr->bus_num + 1, GFP_KERNEL); 3018 mutex_unlock(&board_lock); 3019 if (WARN(id < 0, "couldn't get idr")) 3020 return id == -ENOSPC ? -EBUSY : id; 3021 ctlr->bus_num = id; 3022 } else if (ctlr->dev.of_node) { 3023 /* Allocate dynamic bus number using Linux idr */ 3024 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 3025 if (id >= 0) { 3026 ctlr->bus_num = id; 3027 mutex_lock(&board_lock); 3028 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 3029 ctlr->bus_num + 1, GFP_KERNEL); 3030 mutex_unlock(&board_lock); 3031 if (WARN(id < 0, "couldn't get idr")) 3032 return id == -ENOSPC ? -EBUSY : id; 3033 } 3034 } 3035 if (ctlr->bus_num < 0) { 3036 first_dynamic = of_alias_get_highest_id("spi"); 3037 if (first_dynamic < 0) 3038 first_dynamic = 0; 3039 else 3040 first_dynamic++; 3041 3042 mutex_lock(&board_lock); 3043 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 3044 0, GFP_KERNEL); 3045 mutex_unlock(&board_lock); 3046 if (WARN(id < 0, "couldn't get idr")) 3047 return id; 3048 ctlr->bus_num = id; 3049 } 3050 ctlr->bus_lock_flag = 0; 3051 init_completion(&ctlr->xfer_completion); 3052 init_completion(&ctlr->cur_msg_completion); 3053 if (!ctlr->max_dma_len) 3054 ctlr->max_dma_len = INT_MAX; 3055 3056 /* 3057 * Register the device, then userspace will see it. 3058 * Registration fails if the bus ID is in use. 3059 */ 3060 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 3061 3062 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) { 3063 status = spi_get_gpio_descs(ctlr); 3064 if (status) 3065 goto free_bus_id; 3066 /* 3067 * A controller using GPIO descriptors always 3068 * supports SPI_CS_HIGH if need be. 3069 */ 3070 ctlr->mode_bits |= SPI_CS_HIGH; 3071 } 3072 3073 /* 3074 * Even if it's just one always-selected device, there must 3075 * be at least one chipselect. 3076 */ 3077 if (!ctlr->num_chipselect) { 3078 status = -EINVAL; 3079 goto free_bus_id; 3080 } 3081 3082 /* Setting last_cs to -1 means no chip selected */ 3083 ctlr->last_cs = -1; 3084 3085 status = device_add(&ctlr->dev); 3086 if (status < 0) 3087 goto free_bus_id; 3088 dev_dbg(dev, "registered %s %s\n", 3089 spi_controller_is_slave(ctlr) ? "slave" : "master", 3090 dev_name(&ctlr->dev)); 3091 3092 /* 3093 * If we're using a queued driver, start the queue. Note that we don't 3094 * need the queueing logic if the driver is only supporting high-level 3095 * memory operations. 3096 */ 3097 if (ctlr->transfer) { 3098 dev_info(dev, "controller is unqueued, this is deprecated\n"); 3099 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 3100 status = spi_controller_initialize_queue(ctlr); 3101 if (status) { 3102 device_del(&ctlr->dev); 3103 goto free_bus_id; 3104 } 3105 } 3106 /* Add statistics */ 3107 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev); 3108 if (!ctlr->pcpu_statistics) { 3109 dev_err(dev, "Error allocating per-cpu statistics\n"); 3110 status = -ENOMEM; 3111 goto destroy_queue; 3112 } 3113 3114 mutex_lock(&board_lock); 3115 list_add_tail(&ctlr->list, &spi_controller_list); 3116 list_for_each_entry(bi, &board_list, list) 3117 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 3118 mutex_unlock(&board_lock); 3119 3120 /* Register devices from the device tree and ACPI */ 3121 of_register_spi_devices(ctlr); 3122 acpi_register_spi_devices(ctlr); 3123 return status; 3124 3125 destroy_queue: 3126 spi_destroy_queue(ctlr); 3127 free_bus_id: 3128 mutex_lock(&board_lock); 3129 idr_remove(&spi_master_idr, ctlr->bus_num); 3130 mutex_unlock(&board_lock); 3131 return status; 3132 } 3133 EXPORT_SYMBOL_GPL(spi_register_controller); 3134 3135 static void devm_spi_unregister(struct device *dev, void *res) 3136 { 3137 spi_unregister_controller(*(struct spi_controller **)res); 3138 } 3139 3140 /** 3141 * devm_spi_register_controller - register managed SPI master or slave 3142 * controller 3143 * @dev: device managing SPI controller 3144 * @ctlr: initialized controller, originally from spi_alloc_master() or 3145 * spi_alloc_slave() 3146 * Context: can sleep 3147 * 3148 * Register a SPI device as with spi_register_controller() which will 3149 * automatically be unregistered and freed. 3150 * 3151 * Return: zero on success, else a negative error code. 3152 */ 3153 int devm_spi_register_controller(struct device *dev, 3154 struct spi_controller *ctlr) 3155 { 3156 struct spi_controller **ptr; 3157 int ret; 3158 3159 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 3160 if (!ptr) 3161 return -ENOMEM; 3162 3163 ret = spi_register_controller(ctlr); 3164 if (!ret) { 3165 *ptr = ctlr; 3166 devres_add(dev, ptr); 3167 } else { 3168 devres_free(ptr); 3169 } 3170 3171 return ret; 3172 } 3173 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 3174 3175 static int __unregister(struct device *dev, void *null) 3176 { 3177 spi_unregister_device(to_spi_device(dev)); 3178 return 0; 3179 } 3180 3181 /** 3182 * spi_unregister_controller - unregister SPI master or slave controller 3183 * @ctlr: the controller being unregistered 3184 * Context: can sleep 3185 * 3186 * This call is used only by SPI controller drivers, which are the 3187 * only ones directly touching chip registers. 3188 * 3189 * This must be called from context that can sleep. 3190 * 3191 * Note that this function also drops a reference to the controller. 3192 */ 3193 void spi_unregister_controller(struct spi_controller *ctlr) 3194 { 3195 struct spi_controller *found; 3196 int id = ctlr->bus_num; 3197 3198 /* Prevent addition of new devices, unregister existing ones */ 3199 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3200 mutex_lock(&ctlr->add_lock); 3201 3202 device_for_each_child(&ctlr->dev, NULL, __unregister); 3203 3204 /* First make sure that this controller was ever added */ 3205 mutex_lock(&board_lock); 3206 found = idr_find(&spi_master_idr, id); 3207 mutex_unlock(&board_lock); 3208 if (ctlr->queued) { 3209 if (spi_destroy_queue(ctlr)) 3210 dev_err(&ctlr->dev, "queue remove failed\n"); 3211 } 3212 mutex_lock(&board_lock); 3213 list_del(&ctlr->list); 3214 mutex_unlock(&board_lock); 3215 3216 device_del(&ctlr->dev); 3217 3218 /* Free bus id */ 3219 mutex_lock(&board_lock); 3220 if (found == ctlr) 3221 idr_remove(&spi_master_idr, id); 3222 mutex_unlock(&board_lock); 3223 3224 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3225 mutex_unlock(&ctlr->add_lock); 3226 3227 /* Release the last reference on the controller if its driver 3228 * has not yet been converted to devm_spi_alloc_master/slave(). 3229 */ 3230 if (!ctlr->devm_allocated) 3231 put_device(&ctlr->dev); 3232 } 3233 EXPORT_SYMBOL_GPL(spi_unregister_controller); 3234 3235 int spi_controller_suspend(struct spi_controller *ctlr) 3236 { 3237 int ret; 3238 3239 /* Basically no-ops for non-queued controllers */ 3240 if (!ctlr->queued) 3241 return 0; 3242 3243 ret = spi_stop_queue(ctlr); 3244 if (ret) 3245 dev_err(&ctlr->dev, "queue stop failed\n"); 3246 3247 return ret; 3248 } 3249 EXPORT_SYMBOL_GPL(spi_controller_suspend); 3250 3251 int spi_controller_resume(struct spi_controller *ctlr) 3252 { 3253 int ret; 3254 3255 if (!ctlr->queued) 3256 return 0; 3257 3258 ret = spi_start_queue(ctlr); 3259 if (ret) 3260 dev_err(&ctlr->dev, "queue restart failed\n"); 3261 3262 return ret; 3263 } 3264 EXPORT_SYMBOL_GPL(spi_controller_resume); 3265 3266 /*-------------------------------------------------------------------------*/ 3267 3268 /* Core methods for spi_message alterations */ 3269 3270 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 3271 struct spi_message *msg, 3272 void *res) 3273 { 3274 struct spi_replaced_transfers *rxfer = res; 3275 size_t i; 3276 3277 /* Call extra callback if requested */ 3278 if (rxfer->release) 3279 rxfer->release(ctlr, msg, res); 3280 3281 /* Insert replaced transfers back into the message */ 3282 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 3283 3284 /* Remove the formerly inserted entries */ 3285 for (i = 0; i < rxfer->inserted; i++) 3286 list_del(&rxfer->inserted_transfers[i].transfer_list); 3287 } 3288 3289 /** 3290 * spi_replace_transfers - replace transfers with several transfers 3291 * and register change with spi_message.resources 3292 * @msg: the spi_message we work upon 3293 * @xfer_first: the first spi_transfer we want to replace 3294 * @remove: number of transfers to remove 3295 * @insert: the number of transfers we want to insert instead 3296 * @release: extra release code necessary in some circumstances 3297 * @extradatasize: extra data to allocate (with alignment guarantees 3298 * of struct @spi_transfer) 3299 * @gfp: gfp flags 3300 * 3301 * Returns: pointer to @spi_replaced_transfers, 3302 * PTR_ERR(...) in case of errors. 3303 */ 3304 static struct spi_replaced_transfers *spi_replace_transfers( 3305 struct spi_message *msg, 3306 struct spi_transfer *xfer_first, 3307 size_t remove, 3308 size_t insert, 3309 spi_replaced_release_t release, 3310 size_t extradatasize, 3311 gfp_t gfp) 3312 { 3313 struct spi_replaced_transfers *rxfer; 3314 struct spi_transfer *xfer; 3315 size_t i; 3316 3317 /* Allocate the structure using spi_res */ 3318 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 3319 struct_size(rxfer, inserted_transfers, insert) 3320 + extradatasize, 3321 gfp); 3322 if (!rxfer) 3323 return ERR_PTR(-ENOMEM); 3324 3325 /* The release code to invoke before running the generic release */ 3326 rxfer->release = release; 3327 3328 /* Assign extradata */ 3329 if (extradatasize) 3330 rxfer->extradata = 3331 &rxfer->inserted_transfers[insert]; 3332 3333 /* Init the replaced_transfers list */ 3334 INIT_LIST_HEAD(&rxfer->replaced_transfers); 3335 3336 /* 3337 * Assign the list_entry after which we should reinsert 3338 * the @replaced_transfers - it may be spi_message.messages! 3339 */ 3340 rxfer->replaced_after = xfer_first->transfer_list.prev; 3341 3342 /* Remove the requested number of transfers */ 3343 for (i = 0; i < remove; i++) { 3344 /* 3345 * If the entry after replaced_after it is msg->transfers 3346 * then we have been requested to remove more transfers 3347 * than are in the list. 3348 */ 3349 if (rxfer->replaced_after->next == &msg->transfers) { 3350 dev_err(&msg->spi->dev, 3351 "requested to remove more spi_transfers than are available\n"); 3352 /* Insert replaced transfers back into the message */ 3353 list_splice(&rxfer->replaced_transfers, 3354 rxfer->replaced_after); 3355 3356 /* Free the spi_replace_transfer structure... */ 3357 spi_res_free(rxfer); 3358 3359 /* ...and return with an error */ 3360 return ERR_PTR(-EINVAL); 3361 } 3362 3363 /* 3364 * Remove the entry after replaced_after from list of 3365 * transfers and add it to list of replaced_transfers. 3366 */ 3367 list_move_tail(rxfer->replaced_after->next, 3368 &rxfer->replaced_transfers); 3369 } 3370 3371 /* 3372 * Create copy of the given xfer with identical settings 3373 * based on the first transfer to get removed. 3374 */ 3375 for (i = 0; i < insert; i++) { 3376 /* We need to run in reverse order */ 3377 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 3378 3379 /* Copy all spi_transfer data */ 3380 memcpy(xfer, xfer_first, sizeof(*xfer)); 3381 3382 /* Add to list */ 3383 list_add(&xfer->transfer_list, rxfer->replaced_after); 3384 3385 /* Clear cs_change and delay for all but the last */ 3386 if (i) { 3387 xfer->cs_change = false; 3388 xfer->delay.value = 0; 3389 } 3390 } 3391 3392 /* Set up inserted... */ 3393 rxfer->inserted = insert; 3394 3395 /* ...and register it with spi_res/spi_message */ 3396 spi_res_add(msg, rxfer); 3397 3398 return rxfer; 3399 } 3400 3401 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3402 struct spi_message *msg, 3403 struct spi_transfer **xferp, 3404 size_t maxsize, 3405 gfp_t gfp) 3406 { 3407 struct spi_transfer *xfer = *xferp, *xfers; 3408 struct spi_replaced_transfers *srt; 3409 size_t offset; 3410 size_t count, i; 3411 3412 /* Calculate how many we have to replace */ 3413 count = DIV_ROUND_UP(xfer->len, maxsize); 3414 3415 /* Create replacement */ 3416 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 3417 if (IS_ERR(srt)) 3418 return PTR_ERR(srt); 3419 xfers = srt->inserted_transfers; 3420 3421 /* 3422 * Now handle each of those newly inserted spi_transfers. 3423 * Note that the replacements spi_transfers all are preset 3424 * to the same values as *xferp, so tx_buf, rx_buf and len 3425 * are all identical (as well as most others) 3426 * so we just have to fix up len and the pointers. 3427 * 3428 * This also includes support for the depreciated 3429 * spi_message.is_dma_mapped interface. 3430 */ 3431 3432 /* 3433 * The first transfer just needs the length modified, so we 3434 * run it outside the loop. 3435 */ 3436 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 3437 3438 /* All the others need rx_buf/tx_buf also set */ 3439 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 3440 /* Update rx_buf, tx_buf and dma */ 3441 if (xfers[i].rx_buf) 3442 xfers[i].rx_buf += offset; 3443 if (xfers[i].rx_dma) 3444 xfers[i].rx_dma += offset; 3445 if (xfers[i].tx_buf) 3446 xfers[i].tx_buf += offset; 3447 if (xfers[i].tx_dma) 3448 xfers[i].tx_dma += offset; 3449 3450 /* Update length */ 3451 xfers[i].len = min(maxsize, xfers[i].len - offset); 3452 } 3453 3454 /* 3455 * We set up xferp to the last entry we have inserted, 3456 * so that we skip those already split transfers. 3457 */ 3458 *xferp = &xfers[count - 1]; 3459 3460 /* Increment statistics counters */ 3461 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, 3462 transfers_split_maxsize); 3463 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics, 3464 transfers_split_maxsize); 3465 3466 return 0; 3467 } 3468 3469 /** 3470 * spi_split_transfers_maxsize - split spi transfers into multiple transfers 3471 * when an individual transfer exceeds a 3472 * certain size 3473 * @ctlr: the @spi_controller for this transfer 3474 * @msg: the @spi_message to transform 3475 * @maxsize: the maximum when to apply this 3476 * @gfp: GFP allocation flags 3477 * 3478 * Return: status of transformation 3479 */ 3480 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 3481 struct spi_message *msg, 3482 size_t maxsize, 3483 gfp_t gfp) 3484 { 3485 struct spi_transfer *xfer; 3486 int ret; 3487 3488 /* 3489 * Iterate over the transfer_list, 3490 * but note that xfer is advanced to the last transfer inserted 3491 * to avoid checking sizes again unnecessarily (also xfer does 3492 * potentially belong to a different list by the time the 3493 * replacement has happened). 3494 */ 3495 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3496 if (xfer->len > maxsize) { 3497 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3498 maxsize, gfp); 3499 if (ret) 3500 return ret; 3501 } 3502 } 3503 3504 return 0; 3505 } 3506 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 3507 3508 /*-------------------------------------------------------------------------*/ 3509 3510 /* Core methods for SPI controller protocol drivers. Some of the 3511 * other core methods are currently defined as inline functions. 3512 */ 3513 3514 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 3515 u8 bits_per_word) 3516 { 3517 if (ctlr->bits_per_word_mask) { 3518 /* Only 32 bits fit in the mask */ 3519 if (bits_per_word > 32) 3520 return -EINVAL; 3521 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 3522 return -EINVAL; 3523 } 3524 3525 return 0; 3526 } 3527 3528 /** 3529 * spi_setup - setup SPI mode and clock rate 3530 * @spi: the device whose settings are being modified 3531 * Context: can sleep, and no requests are queued to the device 3532 * 3533 * SPI protocol drivers may need to update the transfer mode if the 3534 * device doesn't work with its default. They may likewise need 3535 * to update clock rates or word sizes from initial values. This function 3536 * changes those settings, and must be called from a context that can sleep. 3537 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 3538 * effect the next time the device is selected and data is transferred to 3539 * or from it. When this function returns, the spi device is deselected. 3540 * 3541 * Note that this call will fail if the protocol driver specifies an option 3542 * that the underlying controller or its driver does not support. For 3543 * example, not all hardware supports wire transfers using nine bit words, 3544 * LSB-first wire encoding, or active-high chipselects. 3545 * 3546 * Return: zero on success, else a negative error code. 3547 */ 3548 int spi_setup(struct spi_device *spi) 3549 { 3550 unsigned bad_bits, ugly_bits; 3551 int status = 0; 3552 3553 /* 3554 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO 3555 * are set at the same time. 3556 */ 3557 if ((hweight_long(spi->mode & 3558 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || 3559 (hweight_long(spi->mode & 3560 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { 3561 dev_err(&spi->dev, 3562 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); 3563 return -EINVAL; 3564 } 3565 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */ 3566 if ((spi->mode & SPI_3WIRE) && (spi->mode & 3567 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3568 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 3569 return -EINVAL; 3570 /* 3571 * Help drivers fail *cleanly* when they need options 3572 * that aren't supported with their current controller. 3573 * SPI_CS_WORD has a fallback software implementation, 3574 * so it is ignored here. 3575 */ 3576 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | 3577 SPI_NO_TX | SPI_NO_RX); 3578 ugly_bits = bad_bits & 3579 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3580 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 3581 if (ugly_bits) { 3582 dev_warn(&spi->dev, 3583 "setup: ignoring unsupported mode bits %x\n", 3584 ugly_bits); 3585 spi->mode &= ~ugly_bits; 3586 bad_bits &= ~ugly_bits; 3587 } 3588 if (bad_bits) { 3589 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 3590 bad_bits); 3591 return -EINVAL; 3592 } 3593 3594 if (!spi->bits_per_word) { 3595 spi->bits_per_word = 8; 3596 } else { 3597 /* 3598 * Some controllers may not support the default 8 bits-per-word 3599 * so only perform the check when this is explicitly provided. 3600 */ 3601 status = __spi_validate_bits_per_word(spi->controller, 3602 spi->bits_per_word); 3603 if (status) 3604 return status; 3605 } 3606 3607 if (spi->controller->max_speed_hz && 3608 (!spi->max_speed_hz || 3609 spi->max_speed_hz > spi->controller->max_speed_hz)) 3610 spi->max_speed_hz = spi->controller->max_speed_hz; 3611 3612 mutex_lock(&spi->controller->io_mutex); 3613 3614 if (spi->controller->setup) { 3615 status = spi->controller->setup(spi); 3616 if (status) { 3617 mutex_unlock(&spi->controller->io_mutex); 3618 dev_err(&spi->controller->dev, "Failed to setup device: %d\n", 3619 status); 3620 return status; 3621 } 3622 } 3623 3624 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { 3625 status = pm_runtime_resume_and_get(spi->controller->dev.parent); 3626 if (status < 0) { 3627 mutex_unlock(&spi->controller->io_mutex); 3628 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3629 status); 3630 return status; 3631 } 3632 3633 /* 3634 * We do not want to return positive value from pm_runtime_get, 3635 * there are many instances of devices calling spi_setup() and 3636 * checking for a non-zero return value instead of a negative 3637 * return value. 3638 */ 3639 status = 0; 3640 3641 spi_set_cs(spi, false, true); 3642 pm_runtime_mark_last_busy(spi->controller->dev.parent); 3643 pm_runtime_put_autosuspend(spi->controller->dev.parent); 3644 } else { 3645 spi_set_cs(spi, false, true); 3646 } 3647 3648 mutex_unlock(&spi->controller->io_mutex); 3649 3650 if (spi->rt && !spi->controller->rt) { 3651 spi->controller->rt = true; 3652 spi_set_thread_rt(spi->controller); 3653 } 3654 3655 trace_spi_setup(spi, status); 3656 3657 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 3658 spi->mode & SPI_MODE_X_MASK, 3659 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 3660 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 3661 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 3662 (spi->mode & SPI_LOOP) ? "loopback, " : "", 3663 spi->bits_per_word, spi->max_speed_hz, 3664 status); 3665 3666 return status; 3667 } 3668 EXPORT_SYMBOL_GPL(spi_setup); 3669 3670 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, 3671 struct spi_device *spi) 3672 { 3673 int delay1, delay2; 3674 3675 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); 3676 if (delay1 < 0) 3677 return delay1; 3678 3679 delay2 = spi_delay_to_ns(&spi->word_delay, xfer); 3680 if (delay2 < 0) 3681 return delay2; 3682 3683 if (delay1 < delay2) 3684 memcpy(&xfer->word_delay, &spi->word_delay, 3685 sizeof(xfer->word_delay)); 3686 3687 return 0; 3688 } 3689 3690 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 3691 { 3692 struct spi_controller *ctlr = spi->controller; 3693 struct spi_transfer *xfer; 3694 int w_size; 3695 3696 if (list_empty(&message->transfers)) 3697 return -EINVAL; 3698 3699 /* 3700 * If an SPI controller does not support toggling the CS line on each 3701 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 3702 * for the CS line, we can emulate the CS-per-word hardware function by 3703 * splitting transfers into one-word transfers and ensuring that 3704 * cs_change is set for each transfer. 3705 */ 3706 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || 3707 spi->cs_gpiod)) { 3708 size_t maxsize; 3709 int ret; 3710 3711 maxsize = (spi->bits_per_word + 7) / 8; 3712 3713 /* spi_split_transfers_maxsize() requires message->spi */ 3714 message->spi = spi; 3715 3716 ret = spi_split_transfers_maxsize(ctlr, message, maxsize, 3717 GFP_KERNEL); 3718 if (ret) 3719 return ret; 3720 3721 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3722 /* Don't change cs_change on the last entry in the list */ 3723 if (list_is_last(&xfer->transfer_list, &message->transfers)) 3724 break; 3725 xfer->cs_change = 1; 3726 } 3727 } 3728 3729 /* 3730 * Half-duplex links include original MicroWire, and ones with 3731 * only one data pin like SPI_3WIRE (switches direction) or where 3732 * either MOSI or MISO is missing. They can also be caused by 3733 * software limitations. 3734 */ 3735 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 3736 (spi->mode & SPI_3WIRE)) { 3737 unsigned flags = ctlr->flags; 3738 3739 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3740 if (xfer->rx_buf && xfer->tx_buf) 3741 return -EINVAL; 3742 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 3743 return -EINVAL; 3744 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 3745 return -EINVAL; 3746 } 3747 } 3748 3749 /* 3750 * Set transfer bits_per_word and max speed as spi device default if 3751 * it is not set for this transfer. 3752 * Set transfer tx_nbits and rx_nbits as single transfer default 3753 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 3754 * Ensure transfer word_delay is at least as long as that required by 3755 * device itself. 3756 */ 3757 message->frame_length = 0; 3758 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3759 xfer->effective_speed_hz = 0; 3760 message->frame_length += xfer->len; 3761 if (!xfer->bits_per_word) 3762 xfer->bits_per_word = spi->bits_per_word; 3763 3764 if (!xfer->speed_hz) 3765 xfer->speed_hz = spi->max_speed_hz; 3766 3767 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 3768 xfer->speed_hz = ctlr->max_speed_hz; 3769 3770 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 3771 return -EINVAL; 3772 3773 /* 3774 * SPI transfer length should be multiple of SPI word size 3775 * where SPI word size should be power-of-two multiple. 3776 */ 3777 if (xfer->bits_per_word <= 8) 3778 w_size = 1; 3779 else if (xfer->bits_per_word <= 16) 3780 w_size = 2; 3781 else 3782 w_size = 4; 3783 3784 /* No partial transfers accepted */ 3785 if (xfer->len % w_size) 3786 return -EINVAL; 3787 3788 if (xfer->speed_hz && ctlr->min_speed_hz && 3789 xfer->speed_hz < ctlr->min_speed_hz) 3790 return -EINVAL; 3791 3792 if (xfer->tx_buf && !xfer->tx_nbits) 3793 xfer->tx_nbits = SPI_NBITS_SINGLE; 3794 if (xfer->rx_buf && !xfer->rx_nbits) 3795 xfer->rx_nbits = SPI_NBITS_SINGLE; 3796 /* 3797 * Check transfer tx/rx_nbits: 3798 * 1. check the value matches one of single, dual and quad 3799 * 2. check tx/rx_nbits match the mode in spi_device 3800 */ 3801 if (xfer->tx_buf) { 3802 if (spi->mode & SPI_NO_TX) 3803 return -EINVAL; 3804 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 3805 xfer->tx_nbits != SPI_NBITS_DUAL && 3806 xfer->tx_nbits != SPI_NBITS_QUAD) 3807 return -EINVAL; 3808 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 3809 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 3810 return -EINVAL; 3811 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 3812 !(spi->mode & SPI_TX_QUAD)) 3813 return -EINVAL; 3814 } 3815 /* Check transfer rx_nbits */ 3816 if (xfer->rx_buf) { 3817 if (spi->mode & SPI_NO_RX) 3818 return -EINVAL; 3819 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 3820 xfer->rx_nbits != SPI_NBITS_DUAL && 3821 xfer->rx_nbits != SPI_NBITS_QUAD) 3822 return -EINVAL; 3823 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 3824 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 3825 return -EINVAL; 3826 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 3827 !(spi->mode & SPI_RX_QUAD)) 3828 return -EINVAL; 3829 } 3830 3831 if (_spi_xfer_word_delay_update(xfer, spi)) 3832 return -EINVAL; 3833 } 3834 3835 message->status = -EINPROGRESS; 3836 3837 return 0; 3838 } 3839 3840 static int __spi_async(struct spi_device *spi, struct spi_message *message) 3841 { 3842 struct spi_controller *ctlr = spi->controller; 3843 struct spi_transfer *xfer; 3844 3845 /* 3846 * Some controllers do not support doing regular SPI transfers. Return 3847 * ENOTSUPP when this is the case. 3848 */ 3849 if (!ctlr->transfer) 3850 return -ENOTSUPP; 3851 3852 message->spi = spi; 3853 3854 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); 3855 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); 3856 3857 trace_spi_message_submit(message); 3858 3859 if (!ctlr->ptp_sts_supported) { 3860 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3861 xfer->ptp_sts_word_pre = 0; 3862 ptp_read_system_prets(xfer->ptp_sts); 3863 } 3864 } 3865 3866 return ctlr->transfer(spi, message); 3867 } 3868 3869 /** 3870 * spi_async - asynchronous SPI transfer 3871 * @spi: device with which data will be exchanged 3872 * @message: describes the data transfers, including completion callback 3873 * Context: any (irqs may be blocked, etc) 3874 * 3875 * This call may be used in_irq and other contexts which can't sleep, 3876 * as well as from task contexts which can sleep. 3877 * 3878 * The completion callback is invoked in a context which can't sleep. 3879 * Before that invocation, the value of message->status is undefined. 3880 * When the callback is issued, message->status holds either zero (to 3881 * indicate complete success) or a negative error code. After that 3882 * callback returns, the driver which issued the transfer request may 3883 * deallocate the associated memory; it's no longer in use by any SPI 3884 * core or controller driver code. 3885 * 3886 * Note that although all messages to a spi_device are handled in 3887 * FIFO order, messages may go to different devices in other orders. 3888 * Some device might be higher priority, or have various "hard" access 3889 * time requirements, for example. 3890 * 3891 * On detection of any fault during the transfer, processing of 3892 * the entire message is aborted, and the device is deselected. 3893 * Until returning from the associated message completion callback, 3894 * no other spi_message queued to that device will be processed. 3895 * (This rule applies equally to all the synchronous transfer calls, 3896 * which are wrappers around this core asynchronous primitive.) 3897 * 3898 * Return: zero on success, else a negative error code. 3899 */ 3900 int spi_async(struct spi_device *spi, struct spi_message *message) 3901 { 3902 struct spi_controller *ctlr = spi->controller; 3903 int ret; 3904 unsigned long flags; 3905 3906 ret = __spi_validate(spi, message); 3907 if (ret != 0) 3908 return ret; 3909 3910 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3911 3912 if (ctlr->bus_lock_flag) 3913 ret = -EBUSY; 3914 else 3915 ret = __spi_async(spi, message); 3916 3917 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3918 3919 return ret; 3920 } 3921 EXPORT_SYMBOL_GPL(spi_async); 3922 3923 /** 3924 * spi_async_locked - version of spi_async with exclusive bus usage 3925 * @spi: device with which data will be exchanged 3926 * @message: describes the data transfers, including completion callback 3927 * Context: any (irqs may be blocked, etc) 3928 * 3929 * This call may be used in_irq and other contexts which can't sleep, 3930 * as well as from task contexts which can sleep. 3931 * 3932 * The completion callback is invoked in a context which can't sleep. 3933 * Before that invocation, the value of message->status is undefined. 3934 * When the callback is issued, message->status holds either zero (to 3935 * indicate complete success) or a negative error code. After that 3936 * callback returns, the driver which issued the transfer request may 3937 * deallocate the associated memory; it's no longer in use by any SPI 3938 * core or controller driver code. 3939 * 3940 * Note that although all messages to a spi_device are handled in 3941 * FIFO order, messages may go to different devices in other orders. 3942 * Some device might be higher priority, or have various "hard" access 3943 * time requirements, for example. 3944 * 3945 * On detection of any fault during the transfer, processing of 3946 * the entire message is aborted, and the device is deselected. 3947 * Until returning from the associated message completion callback, 3948 * no other spi_message queued to that device will be processed. 3949 * (This rule applies equally to all the synchronous transfer calls, 3950 * which are wrappers around this core asynchronous primitive.) 3951 * 3952 * Return: zero on success, else a negative error code. 3953 */ 3954 static int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3955 { 3956 struct spi_controller *ctlr = spi->controller; 3957 int ret; 3958 unsigned long flags; 3959 3960 ret = __spi_validate(spi, message); 3961 if (ret != 0) 3962 return ret; 3963 3964 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3965 3966 ret = __spi_async(spi, message); 3967 3968 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3969 3970 return ret; 3971 3972 } 3973 3974 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) 3975 { 3976 bool was_busy; 3977 int ret; 3978 3979 mutex_lock(&ctlr->io_mutex); 3980 3981 was_busy = ctlr->busy; 3982 3983 ctlr->cur_msg = msg; 3984 ret = __spi_pump_transfer_message(ctlr, msg, was_busy); 3985 if (ret) 3986 goto out; 3987 3988 ctlr->cur_msg = NULL; 3989 ctlr->fallback = false; 3990 3991 if (!was_busy) { 3992 kfree(ctlr->dummy_rx); 3993 ctlr->dummy_rx = NULL; 3994 kfree(ctlr->dummy_tx); 3995 ctlr->dummy_tx = NULL; 3996 if (ctlr->unprepare_transfer_hardware && 3997 ctlr->unprepare_transfer_hardware(ctlr)) 3998 dev_err(&ctlr->dev, 3999 "failed to unprepare transfer hardware\n"); 4000 spi_idle_runtime_pm(ctlr); 4001 } 4002 4003 out: 4004 mutex_unlock(&ctlr->io_mutex); 4005 } 4006 4007 /*-------------------------------------------------------------------------*/ 4008 4009 /* 4010 * Utility methods for SPI protocol drivers, layered on 4011 * top of the core. Some other utility methods are defined as 4012 * inline functions. 4013 */ 4014 4015 static void spi_complete(void *arg) 4016 { 4017 complete(arg); 4018 } 4019 4020 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 4021 { 4022 DECLARE_COMPLETION_ONSTACK(done); 4023 int status; 4024 struct spi_controller *ctlr = spi->controller; 4025 4026 status = __spi_validate(spi, message); 4027 if (status != 0) 4028 return status; 4029 4030 message->spi = spi; 4031 4032 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); 4033 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); 4034 4035 /* 4036 * Checking queue_empty here only guarantees async/sync message 4037 * ordering when coming from the same context. It does not need to 4038 * guard against reentrancy from a different context. The io_mutex 4039 * will catch those cases. 4040 */ 4041 if (READ_ONCE(ctlr->queue_empty)) { 4042 message->actual_length = 0; 4043 message->status = -EINPROGRESS; 4044 4045 trace_spi_message_submit(message); 4046 4047 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); 4048 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); 4049 4050 __spi_transfer_message_noqueue(ctlr, message); 4051 4052 return message->status; 4053 } 4054 4055 /* 4056 * There are messages in the async queue that could have originated 4057 * from the same context, so we need to preserve ordering. 4058 * Therefor we send the message to the async queue and wait until they 4059 * are completed. 4060 */ 4061 message->complete = spi_complete; 4062 message->context = &done; 4063 status = spi_async_locked(spi, message); 4064 if (status == 0) { 4065 wait_for_completion(&done); 4066 status = message->status; 4067 } 4068 message->context = NULL; 4069 4070 return status; 4071 } 4072 4073 /** 4074 * spi_sync - blocking/synchronous SPI data transfers 4075 * @spi: device with which data will be exchanged 4076 * @message: describes the data transfers 4077 * Context: can sleep 4078 * 4079 * This call may only be used from a context that may sleep. The sleep 4080 * is non-interruptible, and has no timeout. Low-overhead controller 4081 * drivers may DMA directly into and out of the message buffers. 4082 * 4083 * Note that the SPI device's chip select is active during the message, 4084 * and then is normally disabled between messages. Drivers for some 4085 * frequently-used devices may want to minimize costs of selecting a chip, 4086 * by leaving it selected in anticipation that the next message will go 4087 * to the same chip. (That may increase power usage.) 4088 * 4089 * Also, the caller is guaranteeing that the memory associated with the 4090 * message will not be freed before this call returns. 4091 * 4092 * Return: zero on success, else a negative error code. 4093 */ 4094 int spi_sync(struct spi_device *spi, struct spi_message *message) 4095 { 4096 int ret; 4097 4098 mutex_lock(&spi->controller->bus_lock_mutex); 4099 ret = __spi_sync(spi, message); 4100 mutex_unlock(&spi->controller->bus_lock_mutex); 4101 4102 return ret; 4103 } 4104 EXPORT_SYMBOL_GPL(spi_sync); 4105 4106 /** 4107 * spi_sync_locked - version of spi_sync with exclusive bus usage 4108 * @spi: device with which data will be exchanged 4109 * @message: describes the data transfers 4110 * Context: can sleep 4111 * 4112 * This call may only be used from a context that may sleep. The sleep 4113 * is non-interruptible, and has no timeout. Low-overhead controller 4114 * drivers may DMA directly into and out of the message buffers. 4115 * 4116 * This call should be used by drivers that require exclusive access to the 4117 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 4118 * be released by a spi_bus_unlock call when the exclusive access is over. 4119 * 4120 * Return: zero on success, else a negative error code. 4121 */ 4122 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 4123 { 4124 return __spi_sync(spi, message); 4125 } 4126 EXPORT_SYMBOL_GPL(spi_sync_locked); 4127 4128 /** 4129 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 4130 * @ctlr: SPI bus master that should be locked for exclusive bus access 4131 * Context: can sleep 4132 * 4133 * This call may only be used from a context that may sleep. The sleep 4134 * is non-interruptible, and has no timeout. 4135 * 4136 * This call should be used by drivers that require exclusive access to the 4137 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 4138 * exclusive access is over. Data transfer must be done by spi_sync_locked 4139 * and spi_async_locked calls when the SPI bus lock is held. 4140 * 4141 * Return: always zero. 4142 */ 4143 int spi_bus_lock(struct spi_controller *ctlr) 4144 { 4145 unsigned long flags; 4146 4147 mutex_lock(&ctlr->bus_lock_mutex); 4148 4149 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4150 ctlr->bus_lock_flag = 1; 4151 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4152 4153 /* Mutex remains locked until spi_bus_unlock() is called */ 4154 4155 return 0; 4156 } 4157 EXPORT_SYMBOL_GPL(spi_bus_lock); 4158 4159 /** 4160 * spi_bus_unlock - release the lock for exclusive SPI bus usage 4161 * @ctlr: SPI bus master that was locked for exclusive bus access 4162 * Context: can sleep 4163 * 4164 * This call may only be used from a context that may sleep. The sleep 4165 * is non-interruptible, and has no timeout. 4166 * 4167 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 4168 * call. 4169 * 4170 * Return: always zero. 4171 */ 4172 int spi_bus_unlock(struct spi_controller *ctlr) 4173 { 4174 ctlr->bus_lock_flag = 0; 4175 4176 mutex_unlock(&ctlr->bus_lock_mutex); 4177 4178 return 0; 4179 } 4180 EXPORT_SYMBOL_GPL(spi_bus_unlock); 4181 4182 /* Portable code must never pass more than 32 bytes */ 4183 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 4184 4185 static u8 *buf; 4186 4187 /** 4188 * spi_write_then_read - SPI synchronous write followed by read 4189 * @spi: device with which data will be exchanged 4190 * @txbuf: data to be written (need not be dma-safe) 4191 * @n_tx: size of txbuf, in bytes 4192 * @rxbuf: buffer into which data will be read (need not be dma-safe) 4193 * @n_rx: size of rxbuf, in bytes 4194 * Context: can sleep 4195 * 4196 * This performs a half duplex MicroWire style transaction with the 4197 * device, sending txbuf and then reading rxbuf. The return value 4198 * is zero for success, else a negative errno status code. 4199 * This call may only be used from a context that may sleep. 4200 * 4201 * Parameters to this routine are always copied using a small buffer. 4202 * Performance-sensitive or bulk transfer code should instead use 4203 * spi_{async,sync}() calls with dma-safe buffers. 4204 * 4205 * Return: zero on success, else a negative error code. 4206 */ 4207 int spi_write_then_read(struct spi_device *spi, 4208 const void *txbuf, unsigned n_tx, 4209 void *rxbuf, unsigned n_rx) 4210 { 4211 static DEFINE_MUTEX(lock); 4212 4213 int status; 4214 struct spi_message message; 4215 struct spi_transfer x[2]; 4216 u8 *local_buf; 4217 4218 /* 4219 * Use preallocated DMA-safe buffer if we can. We can't avoid 4220 * copying here, (as a pure convenience thing), but we can 4221 * keep heap costs out of the hot path unless someone else is 4222 * using the pre-allocated buffer or the transfer is too large. 4223 */ 4224 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 4225 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 4226 GFP_KERNEL | GFP_DMA); 4227 if (!local_buf) 4228 return -ENOMEM; 4229 } else { 4230 local_buf = buf; 4231 } 4232 4233 spi_message_init(&message); 4234 memset(x, 0, sizeof(x)); 4235 if (n_tx) { 4236 x[0].len = n_tx; 4237 spi_message_add_tail(&x[0], &message); 4238 } 4239 if (n_rx) { 4240 x[1].len = n_rx; 4241 spi_message_add_tail(&x[1], &message); 4242 } 4243 4244 memcpy(local_buf, txbuf, n_tx); 4245 x[0].tx_buf = local_buf; 4246 x[1].rx_buf = local_buf + n_tx; 4247 4248 /* Do the i/o */ 4249 status = spi_sync(spi, &message); 4250 if (status == 0) 4251 memcpy(rxbuf, x[1].rx_buf, n_rx); 4252 4253 if (x[0].tx_buf == buf) 4254 mutex_unlock(&lock); 4255 else 4256 kfree(local_buf); 4257 4258 return status; 4259 } 4260 EXPORT_SYMBOL_GPL(spi_write_then_read); 4261 4262 /*-------------------------------------------------------------------------*/ 4263 4264 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 4265 /* Must call put_device() when done with returned spi_device device */ 4266 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4267 { 4268 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 4269 4270 return dev ? to_spi_device(dev) : NULL; 4271 } 4272 4273 /* The spi controllers are not using spi_bus, so we find it with another way */ 4274 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 4275 { 4276 struct device *dev; 4277 4278 dev = class_find_device_by_of_node(&spi_master_class, node); 4279 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4280 dev = class_find_device_by_of_node(&spi_slave_class, node); 4281 if (!dev) 4282 return NULL; 4283 4284 /* Reference got in class_find_device */ 4285 return container_of(dev, struct spi_controller, dev); 4286 } 4287 4288 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 4289 void *arg) 4290 { 4291 struct of_reconfig_data *rd = arg; 4292 struct spi_controller *ctlr; 4293 struct spi_device *spi; 4294 4295 switch (of_reconfig_get_state_change(action, arg)) { 4296 case OF_RECONFIG_CHANGE_ADD: 4297 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 4298 if (ctlr == NULL) 4299 return NOTIFY_OK; /* Not for us */ 4300 4301 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 4302 put_device(&ctlr->dev); 4303 return NOTIFY_OK; 4304 } 4305 4306 spi = of_register_spi_device(ctlr, rd->dn); 4307 put_device(&ctlr->dev); 4308 4309 if (IS_ERR(spi)) { 4310 pr_err("%s: failed to create for '%pOF'\n", 4311 __func__, rd->dn); 4312 of_node_clear_flag(rd->dn, OF_POPULATED); 4313 return notifier_from_errno(PTR_ERR(spi)); 4314 } 4315 break; 4316 4317 case OF_RECONFIG_CHANGE_REMOVE: 4318 /* Already depopulated? */ 4319 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 4320 return NOTIFY_OK; 4321 4322 /* Find our device by node */ 4323 spi = of_find_spi_device_by_node(rd->dn); 4324 if (spi == NULL) 4325 return NOTIFY_OK; /* No? not meant for us */ 4326 4327 /* Unregister takes one ref away */ 4328 spi_unregister_device(spi); 4329 4330 /* And put the reference of the find */ 4331 put_device(&spi->dev); 4332 break; 4333 } 4334 4335 return NOTIFY_OK; 4336 } 4337 4338 static struct notifier_block spi_of_notifier = { 4339 .notifier_call = of_spi_notify, 4340 }; 4341 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4342 extern struct notifier_block spi_of_notifier; 4343 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4344 4345 #if IS_ENABLED(CONFIG_ACPI) 4346 static int spi_acpi_controller_match(struct device *dev, const void *data) 4347 { 4348 return ACPI_COMPANION(dev->parent) == data; 4349 } 4350 4351 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 4352 { 4353 struct device *dev; 4354 4355 dev = class_find_device(&spi_master_class, NULL, adev, 4356 spi_acpi_controller_match); 4357 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4358 dev = class_find_device(&spi_slave_class, NULL, adev, 4359 spi_acpi_controller_match); 4360 if (!dev) 4361 return NULL; 4362 4363 return container_of(dev, struct spi_controller, dev); 4364 } 4365 4366 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 4367 { 4368 struct device *dev; 4369 4370 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 4371 return to_spi_device(dev); 4372 } 4373 4374 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 4375 void *arg) 4376 { 4377 struct acpi_device *adev = arg; 4378 struct spi_controller *ctlr; 4379 struct spi_device *spi; 4380 4381 switch (value) { 4382 case ACPI_RECONFIG_DEVICE_ADD: 4383 ctlr = acpi_spi_find_controller_by_adev(adev->parent); 4384 if (!ctlr) 4385 break; 4386 4387 acpi_register_spi_device(ctlr, adev); 4388 put_device(&ctlr->dev); 4389 break; 4390 case ACPI_RECONFIG_DEVICE_REMOVE: 4391 if (!acpi_device_enumerated(adev)) 4392 break; 4393 4394 spi = acpi_spi_find_device_by_adev(adev); 4395 if (!spi) 4396 break; 4397 4398 spi_unregister_device(spi); 4399 put_device(&spi->dev); 4400 break; 4401 } 4402 4403 return NOTIFY_OK; 4404 } 4405 4406 static struct notifier_block spi_acpi_notifier = { 4407 .notifier_call = acpi_spi_notify, 4408 }; 4409 #else 4410 extern struct notifier_block spi_acpi_notifier; 4411 #endif 4412 4413 static int __init spi_init(void) 4414 { 4415 int status; 4416 4417 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 4418 if (!buf) { 4419 status = -ENOMEM; 4420 goto err0; 4421 } 4422 4423 status = bus_register(&spi_bus_type); 4424 if (status < 0) 4425 goto err1; 4426 4427 status = class_register(&spi_master_class); 4428 if (status < 0) 4429 goto err2; 4430 4431 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 4432 status = class_register(&spi_slave_class); 4433 if (status < 0) 4434 goto err3; 4435 } 4436 4437 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 4438 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 4439 if (IS_ENABLED(CONFIG_ACPI)) 4440 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 4441 4442 return 0; 4443 4444 err3: 4445 class_unregister(&spi_master_class); 4446 err2: 4447 bus_unregister(&spi_bus_type); 4448 err1: 4449 kfree(buf); 4450 buf = NULL; 4451 err0: 4452 return status; 4453 } 4454 4455 /* 4456 * A board_info is normally registered in arch_initcall(), 4457 * but even essential drivers wait till later. 4458 * 4459 * REVISIT only boardinfo really needs static linking. The rest (device and 4460 * driver registration) _could_ be dynamically linked (modular) ... Costs 4461 * include needing to have boardinfo data structures be much more public. 4462 */ 4463 postcore_initcall(spi_init); 4464