1 // SPDX-License-Identifier: GPL-2.0-or-later 2 // SPI init/core code 3 // 4 // Copyright (C) 2005 David Brownell 5 // Copyright (C) 2008 Secret Lab Technologies Ltd. 6 7 #include <linux/kernel.h> 8 #include <linux/device.h> 9 #include <linux/init.h> 10 #include <linux/cache.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/dmaengine.h> 13 #include <linux/mutex.h> 14 #include <linux/of_device.h> 15 #include <linux/of_irq.h> 16 #include <linux/clk/clk-conf.h> 17 #include <linux/slab.h> 18 #include <linux/mod_devicetable.h> 19 #include <linux/spi/spi.h> 20 #include <linux/spi/spi-mem.h> 21 #include <linux/gpio/consumer.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm_domain.h> 24 #include <linux/property.h> 25 #include <linux/export.h> 26 #include <linux/sched/rt.h> 27 #include <uapi/linux/sched/types.h> 28 #include <linux/delay.h> 29 #include <linux/kthread.h> 30 #include <linux/ioport.h> 31 #include <linux/acpi.h> 32 #include <linux/highmem.h> 33 #include <linux/idr.h> 34 #include <linux/platform_data/x86/apple.h> 35 #include <linux/ptp_clock_kernel.h> 36 #include <linux/percpu.h> 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/spi.h> 40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); 41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); 42 43 #include "internals.h" 44 45 static DEFINE_IDR(spi_master_idr); 46 47 static void spidev_release(struct device *dev) 48 { 49 struct spi_device *spi = to_spi_device(dev); 50 51 spi_controller_put(spi->controller); 52 kfree(spi->driver_override); 53 free_percpu(spi->pcpu_statistics); 54 kfree(spi); 55 } 56 57 static ssize_t 58 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 59 { 60 const struct spi_device *spi = to_spi_device(dev); 61 int len; 62 63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 64 if (len != -ENODEV) 65 return len; 66 67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 68 } 69 static DEVICE_ATTR_RO(modalias); 70 71 static ssize_t driver_override_store(struct device *dev, 72 struct device_attribute *a, 73 const char *buf, size_t count) 74 { 75 struct spi_device *spi = to_spi_device(dev); 76 int ret; 77 78 ret = driver_set_override(dev, &spi->driver_override, buf, count); 79 if (ret) 80 return ret; 81 82 return count; 83 } 84 85 static ssize_t driver_override_show(struct device *dev, 86 struct device_attribute *a, char *buf) 87 { 88 const struct spi_device *spi = to_spi_device(dev); 89 ssize_t len; 90 91 device_lock(dev); 92 len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : ""); 93 device_unlock(dev); 94 return len; 95 } 96 static DEVICE_ATTR_RW(driver_override); 97 98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev) 99 { 100 struct spi_statistics __percpu *pcpu_stats; 101 102 if (dev) 103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics); 104 else 105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL); 106 107 if (pcpu_stats) { 108 int cpu; 109 110 for_each_possible_cpu(cpu) { 111 struct spi_statistics *stat; 112 113 stat = per_cpu_ptr(pcpu_stats, cpu); 114 u64_stats_init(&stat->syncp); 115 } 116 } 117 return pcpu_stats; 118 } 119 120 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat, 121 char *buf, size_t offset) 122 { 123 u64 val = 0; 124 int i; 125 126 for_each_possible_cpu(i) { 127 const struct spi_statistics *pcpu_stats; 128 u64_stats_t *field; 129 unsigned int start; 130 u64 inc; 131 132 pcpu_stats = per_cpu_ptr(stat, i); 133 field = (void *)pcpu_stats + offset; 134 do { 135 start = u64_stats_fetch_begin(&pcpu_stats->syncp); 136 inc = u64_stats_read(field); 137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start)); 138 val += inc; 139 } 140 return sysfs_emit(buf, "%llu\n", val); 141 } 142 143 #define SPI_STATISTICS_ATTRS(field, file) \ 144 static ssize_t spi_controller_##field##_show(struct device *dev, \ 145 struct device_attribute *attr, \ 146 char *buf) \ 147 { \ 148 struct spi_controller *ctlr = container_of(dev, \ 149 struct spi_controller, dev); \ 150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \ 151 } \ 152 static struct device_attribute dev_attr_spi_controller_##field = { \ 153 .attr = { .name = file, .mode = 0444 }, \ 154 .show = spi_controller_##field##_show, \ 155 }; \ 156 static ssize_t spi_device_##field##_show(struct device *dev, \ 157 struct device_attribute *attr, \ 158 char *buf) \ 159 { \ 160 struct spi_device *spi = to_spi_device(dev); \ 161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \ 162 } \ 163 static struct device_attribute dev_attr_spi_device_##field = { \ 164 .attr = { .name = file, .mode = 0444 }, \ 165 .show = spi_device_##field##_show, \ 166 } 167 168 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \ 169 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \ 170 char *buf) \ 171 { \ 172 return spi_emit_pcpu_stats(stat, buf, \ 173 offsetof(struct spi_statistics, field)); \ 174 } \ 175 SPI_STATISTICS_ATTRS(name, file) 176 177 #define SPI_STATISTICS_SHOW(field) \ 178 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 179 field) 180 181 SPI_STATISTICS_SHOW(messages); 182 SPI_STATISTICS_SHOW(transfers); 183 SPI_STATISTICS_SHOW(errors); 184 SPI_STATISTICS_SHOW(timedout); 185 186 SPI_STATISTICS_SHOW(spi_sync); 187 SPI_STATISTICS_SHOW(spi_sync_immediate); 188 SPI_STATISTICS_SHOW(spi_async); 189 190 SPI_STATISTICS_SHOW(bytes); 191 SPI_STATISTICS_SHOW(bytes_rx); 192 SPI_STATISTICS_SHOW(bytes_tx); 193 194 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 195 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 196 "transfer_bytes_histo_" number, \ 197 transfer_bytes_histo[index]) 198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 215 216 SPI_STATISTICS_SHOW(transfers_split_maxsize); 217 218 static struct attribute *spi_dev_attrs[] = { 219 &dev_attr_modalias.attr, 220 &dev_attr_driver_override.attr, 221 NULL, 222 }; 223 224 static const struct attribute_group spi_dev_group = { 225 .attrs = spi_dev_attrs, 226 }; 227 228 static struct attribute *spi_device_statistics_attrs[] = { 229 &dev_attr_spi_device_messages.attr, 230 &dev_attr_spi_device_transfers.attr, 231 &dev_attr_spi_device_errors.attr, 232 &dev_attr_spi_device_timedout.attr, 233 &dev_attr_spi_device_spi_sync.attr, 234 &dev_attr_spi_device_spi_sync_immediate.attr, 235 &dev_attr_spi_device_spi_async.attr, 236 &dev_attr_spi_device_bytes.attr, 237 &dev_attr_spi_device_bytes_rx.attr, 238 &dev_attr_spi_device_bytes_tx.attr, 239 &dev_attr_spi_device_transfer_bytes_histo0.attr, 240 &dev_attr_spi_device_transfer_bytes_histo1.attr, 241 &dev_attr_spi_device_transfer_bytes_histo2.attr, 242 &dev_attr_spi_device_transfer_bytes_histo3.attr, 243 &dev_attr_spi_device_transfer_bytes_histo4.attr, 244 &dev_attr_spi_device_transfer_bytes_histo5.attr, 245 &dev_attr_spi_device_transfer_bytes_histo6.attr, 246 &dev_attr_spi_device_transfer_bytes_histo7.attr, 247 &dev_attr_spi_device_transfer_bytes_histo8.attr, 248 &dev_attr_spi_device_transfer_bytes_histo9.attr, 249 &dev_attr_spi_device_transfer_bytes_histo10.attr, 250 &dev_attr_spi_device_transfer_bytes_histo11.attr, 251 &dev_attr_spi_device_transfer_bytes_histo12.attr, 252 &dev_attr_spi_device_transfer_bytes_histo13.attr, 253 &dev_attr_spi_device_transfer_bytes_histo14.attr, 254 &dev_attr_spi_device_transfer_bytes_histo15.attr, 255 &dev_attr_spi_device_transfer_bytes_histo16.attr, 256 &dev_attr_spi_device_transfers_split_maxsize.attr, 257 NULL, 258 }; 259 260 static const struct attribute_group spi_device_statistics_group = { 261 .name = "statistics", 262 .attrs = spi_device_statistics_attrs, 263 }; 264 265 static const struct attribute_group *spi_dev_groups[] = { 266 &spi_dev_group, 267 &spi_device_statistics_group, 268 NULL, 269 }; 270 271 static struct attribute *spi_controller_statistics_attrs[] = { 272 &dev_attr_spi_controller_messages.attr, 273 &dev_attr_spi_controller_transfers.attr, 274 &dev_attr_spi_controller_errors.attr, 275 &dev_attr_spi_controller_timedout.attr, 276 &dev_attr_spi_controller_spi_sync.attr, 277 &dev_attr_spi_controller_spi_sync_immediate.attr, 278 &dev_attr_spi_controller_spi_async.attr, 279 &dev_attr_spi_controller_bytes.attr, 280 &dev_attr_spi_controller_bytes_rx.attr, 281 &dev_attr_spi_controller_bytes_tx.attr, 282 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 283 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 284 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 285 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 286 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 287 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 288 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 289 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 290 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 291 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 292 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 293 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 294 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 295 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 296 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 297 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 298 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 299 &dev_attr_spi_controller_transfers_split_maxsize.attr, 300 NULL, 301 }; 302 303 static const struct attribute_group spi_controller_statistics_group = { 304 .name = "statistics", 305 .attrs = spi_controller_statistics_attrs, 306 }; 307 308 static const struct attribute_group *spi_master_groups[] = { 309 &spi_controller_statistics_group, 310 NULL, 311 }; 312 313 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats, 314 struct spi_transfer *xfer, 315 struct spi_controller *ctlr) 316 { 317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 318 struct spi_statistics *stats; 319 320 if (l2len < 0) 321 l2len = 0; 322 323 get_cpu(); 324 stats = this_cpu_ptr(pcpu_stats); 325 u64_stats_update_begin(&stats->syncp); 326 327 u64_stats_inc(&stats->transfers); 328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]); 329 330 u64_stats_add(&stats->bytes, xfer->len); 331 if ((xfer->tx_buf) && 332 (xfer->tx_buf != ctlr->dummy_tx)) 333 u64_stats_add(&stats->bytes_tx, xfer->len); 334 if ((xfer->rx_buf) && 335 (xfer->rx_buf != ctlr->dummy_rx)) 336 u64_stats_add(&stats->bytes_rx, xfer->len); 337 338 u64_stats_update_end(&stats->syncp); 339 put_cpu(); 340 } 341 342 /* 343 * modalias support makes "modprobe $MODALIAS" new-style hotplug work, 344 * and the sysfs version makes coldplug work too. 345 */ 346 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name) 347 { 348 while (id->name[0]) { 349 if (!strcmp(name, id->name)) 350 return id; 351 id++; 352 } 353 return NULL; 354 } 355 356 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 357 { 358 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 359 360 return spi_match_id(sdrv->id_table, sdev->modalias); 361 } 362 EXPORT_SYMBOL_GPL(spi_get_device_id); 363 364 const void *spi_get_device_match_data(const struct spi_device *sdev) 365 { 366 const void *match; 367 368 match = device_get_match_data(&sdev->dev); 369 if (match) 370 return match; 371 372 return (const void *)spi_get_device_id(sdev)->driver_data; 373 } 374 EXPORT_SYMBOL_GPL(spi_get_device_match_data); 375 376 static int spi_match_device(struct device *dev, struct device_driver *drv) 377 { 378 const struct spi_device *spi = to_spi_device(dev); 379 const struct spi_driver *sdrv = to_spi_driver(drv); 380 381 /* Check override first, and if set, only use the named driver */ 382 if (spi->driver_override) 383 return strcmp(spi->driver_override, drv->name) == 0; 384 385 /* Attempt an OF style match */ 386 if (of_driver_match_device(dev, drv)) 387 return 1; 388 389 /* Then try ACPI */ 390 if (acpi_driver_match_device(dev, drv)) 391 return 1; 392 393 if (sdrv->id_table) 394 return !!spi_match_id(sdrv->id_table, spi->modalias); 395 396 return strcmp(spi->modalias, drv->name) == 0; 397 } 398 399 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env) 400 { 401 const struct spi_device *spi = to_spi_device(dev); 402 int rc; 403 404 rc = acpi_device_uevent_modalias(dev, env); 405 if (rc != -ENODEV) 406 return rc; 407 408 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 409 } 410 411 static int spi_probe(struct device *dev) 412 { 413 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 414 struct spi_device *spi = to_spi_device(dev); 415 int ret; 416 417 ret = of_clk_set_defaults(dev->of_node, false); 418 if (ret) 419 return ret; 420 421 if (dev->of_node) { 422 spi->irq = of_irq_get(dev->of_node, 0); 423 if (spi->irq == -EPROBE_DEFER) 424 return -EPROBE_DEFER; 425 if (spi->irq < 0) 426 spi->irq = 0; 427 } 428 429 ret = dev_pm_domain_attach(dev, true); 430 if (ret) 431 return ret; 432 433 if (sdrv->probe) { 434 ret = sdrv->probe(spi); 435 if (ret) 436 dev_pm_domain_detach(dev, true); 437 } 438 439 return ret; 440 } 441 442 static void spi_remove(struct device *dev) 443 { 444 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 445 446 if (sdrv->remove) 447 sdrv->remove(to_spi_device(dev)); 448 449 dev_pm_domain_detach(dev, true); 450 } 451 452 static void spi_shutdown(struct device *dev) 453 { 454 if (dev->driver) { 455 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 456 457 if (sdrv->shutdown) 458 sdrv->shutdown(to_spi_device(dev)); 459 } 460 } 461 462 struct bus_type spi_bus_type = { 463 .name = "spi", 464 .dev_groups = spi_dev_groups, 465 .match = spi_match_device, 466 .uevent = spi_uevent, 467 .probe = spi_probe, 468 .remove = spi_remove, 469 .shutdown = spi_shutdown, 470 }; 471 EXPORT_SYMBOL_GPL(spi_bus_type); 472 473 /** 474 * __spi_register_driver - register a SPI driver 475 * @owner: owner module of the driver to register 476 * @sdrv: the driver to register 477 * Context: can sleep 478 * 479 * Return: zero on success, else a negative error code. 480 */ 481 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 482 { 483 sdrv->driver.owner = owner; 484 sdrv->driver.bus = &spi_bus_type; 485 486 /* 487 * For Really Good Reasons we use spi: modaliases not of: 488 * modaliases for DT so module autoloading won't work if we 489 * don't have a spi_device_id as well as a compatible string. 490 */ 491 if (sdrv->driver.of_match_table) { 492 const struct of_device_id *of_id; 493 494 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0]; 495 of_id++) { 496 const char *of_name; 497 498 /* Strip off any vendor prefix */ 499 of_name = strnchr(of_id->compatible, 500 sizeof(of_id->compatible), ','); 501 if (of_name) 502 of_name++; 503 else 504 of_name = of_id->compatible; 505 506 if (sdrv->id_table) { 507 const struct spi_device_id *spi_id; 508 509 spi_id = spi_match_id(sdrv->id_table, of_name); 510 if (spi_id) 511 continue; 512 } else { 513 if (strcmp(sdrv->driver.name, of_name) == 0) 514 continue; 515 } 516 517 pr_warn("SPI driver %s has no spi_device_id for %s\n", 518 sdrv->driver.name, of_id->compatible); 519 } 520 } 521 522 return driver_register(&sdrv->driver); 523 } 524 EXPORT_SYMBOL_GPL(__spi_register_driver); 525 526 /*-------------------------------------------------------------------------*/ 527 528 /* 529 * SPI devices should normally not be created by SPI device drivers; that 530 * would make them board-specific. Similarly with SPI controller drivers. 531 * Device registration normally goes into like arch/.../mach.../board-YYY.c 532 * with other readonly (flashable) information about mainboard devices. 533 */ 534 535 struct boardinfo { 536 struct list_head list; 537 struct spi_board_info board_info; 538 }; 539 540 static LIST_HEAD(board_list); 541 static LIST_HEAD(spi_controller_list); 542 543 /* 544 * Used to protect add/del operation for board_info list and 545 * spi_controller list, and their matching process also used 546 * to protect object of type struct idr. 547 */ 548 static DEFINE_MUTEX(board_lock); 549 550 /** 551 * spi_alloc_device - Allocate a new SPI device 552 * @ctlr: Controller to which device is connected 553 * Context: can sleep 554 * 555 * Allows a driver to allocate and initialize a spi_device without 556 * registering it immediately. This allows a driver to directly 557 * fill the spi_device with device parameters before calling 558 * spi_add_device() on it. 559 * 560 * Caller is responsible to call spi_add_device() on the returned 561 * spi_device structure to add it to the SPI controller. If the caller 562 * needs to discard the spi_device without adding it, then it should 563 * call spi_dev_put() on it. 564 * 565 * Return: a pointer to the new device, or NULL. 566 */ 567 struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 568 { 569 struct spi_device *spi; 570 571 if (!spi_controller_get(ctlr)) 572 return NULL; 573 574 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 575 if (!spi) { 576 spi_controller_put(ctlr); 577 return NULL; 578 } 579 580 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL); 581 if (!spi->pcpu_statistics) { 582 kfree(spi); 583 spi_controller_put(ctlr); 584 return NULL; 585 } 586 587 spi->master = spi->controller = ctlr; 588 spi->dev.parent = &ctlr->dev; 589 spi->dev.bus = &spi_bus_type; 590 spi->dev.release = spidev_release; 591 spi->mode = ctlr->buswidth_override_bits; 592 593 device_initialize(&spi->dev); 594 return spi; 595 } 596 EXPORT_SYMBOL_GPL(spi_alloc_device); 597 598 static void spi_dev_set_name(struct spi_device *spi) 599 { 600 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 601 602 if (adev) { 603 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 604 return; 605 } 606 607 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 608 spi_get_chipselect(spi, 0)); 609 } 610 611 static int spi_dev_check(struct device *dev, void *data) 612 { 613 struct spi_device *spi = to_spi_device(dev); 614 struct spi_device *new_spi = data; 615 616 if (spi->controller == new_spi->controller && 617 spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0)) 618 return -EBUSY; 619 return 0; 620 } 621 622 static void spi_cleanup(struct spi_device *spi) 623 { 624 if (spi->controller->cleanup) 625 spi->controller->cleanup(spi); 626 } 627 628 static int __spi_add_device(struct spi_device *spi) 629 { 630 struct spi_controller *ctlr = spi->controller; 631 struct device *dev = ctlr->dev.parent; 632 int status; 633 634 /* 635 * We need to make sure there's no other device with this 636 * chipselect **BEFORE** we call setup(), else we'll trash 637 * its configuration. 638 */ 639 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 640 if (status) { 641 dev_err(dev, "chipselect %d already in use\n", 642 spi_get_chipselect(spi, 0)); 643 return status; 644 } 645 646 /* Controller may unregister concurrently */ 647 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && 648 !device_is_registered(&ctlr->dev)) { 649 return -ENODEV; 650 } 651 652 if (ctlr->cs_gpiods) 653 spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]); 654 655 /* 656 * Drivers may modify this initial i/o setup, but will 657 * normally rely on the device being setup. Devices 658 * using SPI_CS_HIGH can't coexist well otherwise... 659 */ 660 status = spi_setup(spi); 661 if (status < 0) { 662 dev_err(dev, "can't setup %s, status %d\n", 663 dev_name(&spi->dev), status); 664 return status; 665 } 666 667 /* Device may be bound to an active driver when this returns */ 668 status = device_add(&spi->dev); 669 if (status < 0) { 670 dev_err(dev, "can't add %s, status %d\n", 671 dev_name(&spi->dev), status); 672 spi_cleanup(spi); 673 } else { 674 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 675 } 676 677 return status; 678 } 679 680 /** 681 * spi_add_device - Add spi_device allocated with spi_alloc_device 682 * @spi: spi_device to register 683 * 684 * Companion function to spi_alloc_device. Devices allocated with 685 * spi_alloc_device can be added onto the spi bus with this function. 686 * 687 * Return: 0 on success; negative errno on failure 688 */ 689 int spi_add_device(struct spi_device *spi) 690 { 691 struct spi_controller *ctlr = spi->controller; 692 struct device *dev = ctlr->dev.parent; 693 int status; 694 695 /* Chipselects are numbered 0..max; validate. */ 696 if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) { 697 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0), 698 ctlr->num_chipselect); 699 return -EINVAL; 700 } 701 702 /* Set the bus ID string */ 703 spi_dev_set_name(spi); 704 705 mutex_lock(&ctlr->add_lock); 706 status = __spi_add_device(spi); 707 mutex_unlock(&ctlr->add_lock); 708 return status; 709 } 710 EXPORT_SYMBOL_GPL(spi_add_device); 711 712 static int spi_add_device_locked(struct spi_device *spi) 713 { 714 struct spi_controller *ctlr = spi->controller; 715 struct device *dev = ctlr->dev.parent; 716 717 /* Chipselects are numbered 0..max; validate. */ 718 if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) { 719 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0), 720 ctlr->num_chipselect); 721 return -EINVAL; 722 } 723 724 /* Set the bus ID string */ 725 spi_dev_set_name(spi); 726 727 WARN_ON(!mutex_is_locked(&ctlr->add_lock)); 728 return __spi_add_device(spi); 729 } 730 731 /** 732 * spi_new_device - instantiate one new SPI device 733 * @ctlr: Controller to which device is connected 734 * @chip: Describes the SPI device 735 * Context: can sleep 736 * 737 * On typical mainboards, this is purely internal; and it's not needed 738 * after board init creates the hard-wired devices. Some development 739 * platforms may not be able to use spi_register_board_info though, and 740 * this is exported so that for example a USB or parport based adapter 741 * driver could add devices (which it would learn about out-of-band). 742 * 743 * Return: the new device, or NULL. 744 */ 745 struct spi_device *spi_new_device(struct spi_controller *ctlr, 746 struct spi_board_info *chip) 747 { 748 struct spi_device *proxy; 749 int status; 750 751 /* 752 * NOTE: caller did any chip->bus_num checks necessary. 753 * 754 * Also, unless we change the return value convention to use 755 * error-or-pointer (not NULL-or-pointer), troubleshootability 756 * suggests syslogged diagnostics are best here (ugh). 757 */ 758 759 proxy = spi_alloc_device(ctlr); 760 if (!proxy) 761 return NULL; 762 763 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 764 765 spi_set_chipselect(proxy, 0, chip->chip_select); 766 proxy->max_speed_hz = chip->max_speed_hz; 767 proxy->mode = chip->mode; 768 proxy->irq = chip->irq; 769 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 770 proxy->dev.platform_data = (void *) chip->platform_data; 771 proxy->controller_data = chip->controller_data; 772 proxy->controller_state = NULL; 773 774 if (chip->swnode) { 775 status = device_add_software_node(&proxy->dev, chip->swnode); 776 if (status) { 777 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n", 778 chip->modalias, status); 779 goto err_dev_put; 780 } 781 } 782 783 status = spi_add_device(proxy); 784 if (status < 0) 785 goto err_dev_put; 786 787 return proxy; 788 789 err_dev_put: 790 device_remove_software_node(&proxy->dev); 791 spi_dev_put(proxy); 792 return NULL; 793 } 794 EXPORT_SYMBOL_GPL(spi_new_device); 795 796 /** 797 * spi_unregister_device - unregister a single SPI device 798 * @spi: spi_device to unregister 799 * 800 * Start making the passed SPI device vanish. Normally this would be handled 801 * by spi_unregister_controller(). 802 */ 803 void spi_unregister_device(struct spi_device *spi) 804 { 805 if (!spi) 806 return; 807 808 if (spi->dev.of_node) { 809 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 810 of_node_put(spi->dev.of_node); 811 } 812 if (ACPI_COMPANION(&spi->dev)) 813 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 814 device_remove_software_node(&spi->dev); 815 device_del(&spi->dev); 816 spi_cleanup(spi); 817 put_device(&spi->dev); 818 } 819 EXPORT_SYMBOL_GPL(spi_unregister_device); 820 821 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 822 struct spi_board_info *bi) 823 { 824 struct spi_device *dev; 825 826 if (ctlr->bus_num != bi->bus_num) 827 return; 828 829 dev = spi_new_device(ctlr, bi); 830 if (!dev) 831 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 832 bi->modalias); 833 } 834 835 /** 836 * spi_register_board_info - register SPI devices for a given board 837 * @info: array of chip descriptors 838 * @n: how many descriptors are provided 839 * Context: can sleep 840 * 841 * Board-specific early init code calls this (probably during arch_initcall) 842 * with segments of the SPI device table. Any device nodes are created later, 843 * after the relevant parent SPI controller (bus_num) is defined. We keep 844 * this table of devices forever, so that reloading a controller driver will 845 * not make Linux forget about these hard-wired devices. 846 * 847 * Other code can also call this, e.g. a particular add-on board might provide 848 * SPI devices through its expansion connector, so code initializing that board 849 * would naturally declare its SPI devices. 850 * 851 * The board info passed can safely be __initdata ... but be careful of 852 * any embedded pointers (platform_data, etc), they're copied as-is. 853 * 854 * Return: zero on success, else a negative error code. 855 */ 856 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 857 { 858 struct boardinfo *bi; 859 int i; 860 861 if (!n) 862 return 0; 863 864 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 865 if (!bi) 866 return -ENOMEM; 867 868 for (i = 0; i < n; i++, bi++, info++) { 869 struct spi_controller *ctlr; 870 871 memcpy(&bi->board_info, info, sizeof(*info)); 872 873 mutex_lock(&board_lock); 874 list_add_tail(&bi->list, &board_list); 875 list_for_each_entry(ctlr, &spi_controller_list, list) 876 spi_match_controller_to_boardinfo(ctlr, 877 &bi->board_info); 878 mutex_unlock(&board_lock); 879 } 880 881 return 0; 882 } 883 884 /*-------------------------------------------------------------------------*/ 885 886 /* Core methods for SPI resource management */ 887 888 /** 889 * spi_res_alloc - allocate a spi resource that is life-cycle managed 890 * during the processing of a spi_message while using 891 * spi_transfer_one 892 * @spi: the spi device for which we allocate memory 893 * @release: the release code to execute for this resource 894 * @size: size to alloc and return 895 * @gfp: GFP allocation flags 896 * 897 * Return: the pointer to the allocated data 898 * 899 * This may get enhanced in the future to allocate from a memory pool 900 * of the @spi_device or @spi_controller to avoid repeated allocations. 901 */ 902 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, 903 size_t size, gfp_t gfp) 904 { 905 struct spi_res *sres; 906 907 sres = kzalloc(sizeof(*sres) + size, gfp); 908 if (!sres) 909 return NULL; 910 911 INIT_LIST_HEAD(&sres->entry); 912 sres->release = release; 913 914 return sres->data; 915 } 916 917 /** 918 * spi_res_free - free an spi resource 919 * @res: pointer to the custom data of a resource 920 */ 921 static void spi_res_free(void *res) 922 { 923 struct spi_res *sres = container_of(res, struct spi_res, data); 924 925 if (!res) 926 return; 927 928 WARN_ON(!list_empty(&sres->entry)); 929 kfree(sres); 930 } 931 932 /** 933 * spi_res_add - add a spi_res to the spi_message 934 * @message: the spi message 935 * @res: the spi_resource 936 */ 937 static void spi_res_add(struct spi_message *message, void *res) 938 { 939 struct spi_res *sres = container_of(res, struct spi_res, data); 940 941 WARN_ON(!list_empty(&sres->entry)); 942 list_add_tail(&sres->entry, &message->resources); 943 } 944 945 /** 946 * spi_res_release - release all spi resources for this message 947 * @ctlr: the @spi_controller 948 * @message: the @spi_message 949 */ 950 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 951 { 952 struct spi_res *res, *tmp; 953 954 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { 955 if (res->release) 956 res->release(ctlr, message, res->data); 957 958 list_del(&res->entry); 959 960 kfree(res); 961 } 962 } 963 964 /*-------------------------------------------------------------------------*/ 965 966 static void spi_set_cs(struct spi_device *spi, bool enable, bool force) 967 { 968 bool activate = enable; 969 970 /* 971 * Avoid calling into the driver (or doing delays) if the chip select 972 * isn't actually changing from the last time this was called. 973 */ 974 if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) || 975 (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) && 976 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) 977 return; 978 979 trace_spi_set_cs(spi, activate); 980 981 spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1; 982 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; 983 984 if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate) 985 spi_delay_exec(&spi->cs_hold, NULL); 986 987 if (spi->mode & SPI_CS_HIGH) 988 enable = !enable; 989 990 if (spi_get_csgpiod(spi, 0)) { 991 if (!(spi->mode & SPI_NO_CS)) { 992 /* 993 * Historically ACPI has no means of the GPIO polarity and 994 * thus the SPISerialBus() resource defines it on the per-chip 995 * basis. In order to avoid a chain of negations, the GPIO 996 * polarity is considered being Active High. Even for the cases 997 * when _DSD() is involved (in the updated versions of ACPI) 998 * the GPIO CS polarity must be defined Active High to avoid 999 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH 1000 * into account. 1001 */ 1002 if (has_acpi_companion(&spi->dev)) 1003 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable); 1004 else 1005 /* Polarity handled by GPIO library */ 1006 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate); 1007 } 1008 /* Some SPI masters need both GPIO CS & slave_select */ 1009 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 1010 spi->controller->set_cs) 1011 spi->controller->set_cs(spi, !enable); 1012 } else if (spi->controller->set_cs) { 1013 spi->controller->set_cs(spi, !enable); 1014 } 1015 1016 if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) { 1017 if (activate) 1018 spi_delay_exec(&spi->cs_setup, NULL); 1019 else 1020 spi_delay_exec(&spi->cs_inactive, NULL); 1021 } 1022 } 1023 1024 #ifdef CONFIG_HAS_DMA 1025 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev, 1026 struct sg_table *sgt, void *buf, size_t len, 1027 enum dma_data_direction dir, unsigned long attrs) 1028 { 1029 const bool vmalloced_buf = is_vmalloc_addr(buf); 1030 unsigned int max_seg_size = dma_get_max_seg_size(dev); 1031 #ifdef CONFIG_HIGHMEM 1032 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 1033 (unsigned long)buf < (PKMAP_BASE + 1034 (LAST_PKMAP * PAGE_SIZE))); 1035 #else 1036 const bool kmap_buf = false; 1037 #endif 1038 int desc_len; 1039 int sgs; 1040 struct page *vm_page; 1041 struct scatterlist *sg; 1042 void *sg_buf; 1043 size_t min; 1044 int i, ret; 1045 1046 if (vmalloced_buf || kmap_buf) { 1047 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); 1048 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 1049 } else if (virt_addr_valid(buf)) { 1050 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); 1051 sgs = DIV_ROUND_UP(len, desc_len); 1052 } else { 1053 return -EINVAL; 1054 } 1055 1056 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 1057 if (ret != 0) 1058 return ret; 1059 1060 sg = &sgt->sgl[0]; 1061 for (i = 0; i < sgs; i++) { 1062 1063 if (vmalloced_buf || kmap_buf) { 1064 /* 1065 * Next scatterlist entry size is the minimum between 1066 * the desc_len and the remaining buffer length that 1067 * fits in a page. 1068 */ 1069 min = min_t(size_t, desc_len, 1070 min_t(size_t, len, 1071 PAGE_SIZE - offset_in_page(buf))); 1072 if (vmalloced_buf) 1073 vm_page = vmalloc_to_page(buf); 1074 else 1075 vm_page = kmap_to_page(buf); 1076 if (!vm_page) { 1077 sg_free_table(sgt); 1078 return -ENOMEM; 1079 } 1080 sg_set_page(sg, vm_page, 1081 min, offset_in_page(buf)); 1082 } else { 1083 min = min_t(size_t, len, desc_len); 1084 sg_buf = buf; 1085 sg_set_buf(sg, sg_buf, min); 1086 } 1087 1088 buf += min; 1089 len -= min; 1090 sg = sg_next(sg); 1091 } 1092 1093 ret = dma_map_sgtable(dev, sgt, dir, attrs); 1094 if (ret < 0) { 1095 sg_free_table(sgt); 1096 return ret; 1097 } 1098 1099 return 0; 1100 } 1101 1102 int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 1103 struct sg_table *sgt, void *buf, size_t len, 1104 enum dma_data_direction dir) 1105 { 1106 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0); 1107 } 1108 1109 static void spi_unmap_buf_attrs(struct spi_controller *ctlr, 1110 struct device *dev, struct sg_table *sgt, 1111 enum dma_data_direction dir, 1112 unsigned long attrs) 1113 { 1114 if (sgt->orig_nents) { 1115 dma_unmap_sgtable(dev, sgt, dir, attrs); 1116 sg_free_table(sgt); 1117 sgt->orig_nents = 0; 1118 sgt->nents = 0; 1119 } 1120 } 1121 1122 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 1123 struct sg_table *sgt, enum dma_data_direction dir) 1124 { 1125 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0); 1126 } 1127 1128 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1129 { 1130 struct device *tx_dev, *rx_dev; 1131 struct spi_transfer *xfer; 1132 int ret; 1133 1134 if (!ctlr->can_dma) 1135 return 0; 1136 1137 if (ctlr->dma_tx) 1138 tx_dev = ctlr->dma_tx->device->dev; 1139 else if (ctlr->dma_map_dev) 1140 tx_dev = ctlr->dma_map_dev; 1141 else 1142 tx_dev = ctlr->dev.parent; 1143 1144 if (ctlr->dma_rx) 1145 rx_dev = ctlr->dma_rx->device->dev; 1146 else if (ctlr->dma_map_dev) 1147 rx_dev = ctlr->dma_map_dev; 1148 else 1149 rx_dev = ctlr->dev.parent; 1150 1151 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1152 /* The sync is done before each transfer. */ 1153 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; 1154 1155 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1156 continue; 1157 1158 if (xfer->tx_buf != NULL) { 1159 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, 1160 (void *)xfer->tx_buf, 1161 xfer->len, DMA_TO_DEVICE, 1162 attrs); 1163 if (ret != 0) 1164 return ret; 1165 } 1166 1167 if (xfer->rx_buf != NULL) { 1168 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, 1169 xfer->rx_buf, xfer->len, 1170 DMA_FROM_DEVICE, attrs); 1171 if (ret != 0) { 1172 spi_unmap_buf_attrs(ctlr, tx_dev, 1173 &xfer->tx_sg, DMA_TO_DEVICE, 1174 attrs); 1175 1176 return ret; 1177 } 1178 } 1179 } 1180 1181 ctlr->cur_rx_dma_dev = rx_dev; 1182 ctlr->cur_tx_dma_dev = tx_dev; 1183 ctlr->cur_msg_mapped = true; 1184 1185 return 0; 1186 } 1187 1188 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 1189 { 1190 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1191 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1192 struct spi_transfer *xfer; 1193 1194 if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 1195 return 0; 1196 1197 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1198 /* The sync has already been done after each transfer. */ 1199 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; 1200 1201 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 1202 continue; 1203 1204 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, 1205 DMA_FROM_DEVICE, attrs); 1206 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, 1207 DMA_TO_DEVICE, attrs); 1208 } 1209 1210 ctlr->cur_msg_mapped = false; 1211 1212 return 0; 1213 } 1214 1215 static void spi_dma_sync_for_device(struct spi_controller *ctlr, 1216 struct spi_transfer *xfer) 1217 { 1218 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1219 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1220 1221 if (!ctlr->cur_msg_mapped) 1222 return; 1223 1224 if (xfer->tx_sg.orig_nents) 1225 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1226 if (xfer->rx_sg.orig_nents) 1227 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1228 } 1229 1230 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, 1231 struct spi_transfer *xfer) 1232 { 1233 struct device *rx_dev = ctlr->cur_rx_dma_dev; 1234 struct device *tx_dev = ctlr->cur_tx_dma_dev; 1235 1236 if (!ctlr->cur_msg_mapped) 1237 return; 1238 1239 if (xfer->rx_sg.orig_nents) 1240 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 1241 if (xfer->tx_sg.orig_nents) 1242 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 1243 } 1244 #else /* !CONFIG_HAS_DMA */ 1245 static inline int __spi_map_msg(struct spi_controller *ctlr, 1246 struct spi_message *msg) 1247 { 1248 return 0; 1249 } 1250 1251 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 1252 struct spi_message *msg) 1253 { 1254 return 0; 1255 } 1256 1257 static void spi_dma_sync_for_device(struct spi_controller *ctrl, 1258 struct spi_transfer *xfer) 1259 { 1260 } 1261 1262 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, 1263 struct spi_transfer *xfer) 1264 { 1265 } 1266 #endif /* !CONFIG_HAS_DMA */ 1267 1268 static inline int spi_unmap_msg(struct spi_controller *ctlr, 1269 struct spi_message *msg) 1270 { 1271 struct spi_transfer *xfer; 1272 1273 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1274 /* 1275 * Restore the original value of tx_buf or rx_buf if they are 1276 * NULL. 1277 */ 1278 if (xfer->tx_buf == ctlr->dummy_tx) 1279 xfer->tx_buf = NULL; 1280 if (xfer->rx_buf == ctlr->dummy_rx) 1281 xfer->rx_buf = NULL; 1282 } 1283 1284 return __spi_unmap_msg(ctlr, msg); 1285 } 1286 1287 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 1288 { 1289 struct spi_transfer *xfer; 1290 void *tmp; 1291 unsigned int max_tx, max_rx; 1292 1293 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) 1294 && !(msg->spi->mode & SPI_3WIRE)) { 1295 max_tx = 0; 1296 max_rx = 0; 1297 1298 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1299 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 1300 !xfer->tx_buf) 1301 max_tx = max(xfer->len, max_tx); 1302 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 1303 !xfer->rx_buf) 1304 max_rx = max(xfer->len, max_rx); 1305 } 1306 1307 if (max_tx) { 1308 tmp = krealloc(ctlr->dummy_tx, max_tx, 1309 GFP_KERNEL | GFP_DMA | __GFP_ZERO); 1310 if (!tmp) 1311 return -ENOMEM; 1312 ctlr->dummy_tx = tmp; 1313 } 1314 1315 if (max_rx) { 1316 tmp = krealloc(ctlr->dummy_rx, max_rx, 1317 GFP_KERNEL | GFP_DMA); 1318 if (!tmp) 1319 return -ENOMEM; 1320 ctlr->dummy_rx = tmp; 1321 } 1322 1323 if (max_tx || max_rx) { 1324 list_for_each_entry(xfer, &msg->transfers, 1325 transfer_list) { 1326 if (!xfer->len) 1327 continue; 1328 if (!xfer->tx_buf) 1329 xfer->tx_buf = ctlr->dummy_tx; 1330 if (!xfer->rx_buf) 1331 xfer->rx_buf = ctlr->dummy_rx; 1332 } 1333 } 1334 } 1335 1336 return __spi_map_msg(ctlr, msg); 1337 } 1338 1339 static int spi_transfer_wait(struct spi_controller *ctlr, 1340 struct spi_message *msg, 1341 struct spi_transfer *xfer) 1342 { 1343 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; 1344 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; 1345 u32 speed_hz = xfer->speed_hz; 1346 unsigned long long ms; 1347 1348 if (spi_controller_is_slave(ctlr)) { 1349 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { 1350 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); 1351 return -EINTR; 1352 } 1353 } else { 1354 if (!speed_hz) 1355 speed_hz = 100000; 1356 1357 /* 1358 * For each byte we wait for 8 cycles of the SPI clock. 1359 * Since speed is defined in Hz and we want milliseconds, 1360 * use respective multiplier, but before the division, 1361 * otherwise we may get 0 for short transfers. 1362 */ 1363 ms = 8LL * MSEC_PER_SEC * xfer->len; 1364 do_div(ms, speed_hz); 1365 1366 /* 1367 * Increase it twice and add 200 ms tolerance, use 1368 * predefined maximum in case of overflow. 1369 */ 1370 ms += ms + 200; 1371 if (ms > UINT_MAX) 1372 ms = UINT_MAX; 1373 1374 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1375 msecs_to_jiffies(ms)); 1376 1377 if (ms == 0) { 1378 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); 1379 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); 1380 dev_err(&msg->spi->dev, 1381 "SPI transfer timed out\n"); 1382 return -ETIMEDOUT; 1383 } 1384 } 1385 1386 return 0; 1387 } 1388 1389 static void _spi_transfer_delay_ns(u32 ns) 1390 { 1391 if (!ns) 1392 return; 1393 if (ns <= NSEC_PER_USEC) { 1394 ndelay(ns); 1395 } else { 1396 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); 1397 1398 if (us <= 10) 1399 udelay(us); 1400 else 1401 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1402 } 1403 } 1404 1405 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) 1406 { 1407 u32 delay = _delay->value; 1408 u32 unit = _delay->unit; 1409 u32 hz; 1410 1411 if (!delay) 1412 return 0; 1413 1414 switch (unit) { 1415 case SPI_DELAY_UNIT_USECS: 1416 delay *= NSEC_PER_USEC; 1417 break; 1418 case SPI_DELAY_UNIT_NSECS: 1419 /* Nothing to do here */ 1420 break; 1421 case SPI_DELAY_UNIT_SCK: 1422 /* Clock cycles need to be obtained from spi_transfer */ 1423 if (!xfer) 1424 return -EINVAL; 1425 /* 1426 * If there is unknown effective speed, approximate it 1427 * by underestimating with half of the requested hz. 1428 */ 1429 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; 1430 if (!hz) 1431 return -EINVAL; 1432 1433 /* Convert delay to nanoseconds */ 1434 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); 1435 break; 1436 default: 1437 return -EINVAL; 1438 } 1439 1440 return delay; 1441 } 1442 EXPORT_SYMBOL_GPL(spi_delay_to_ns); 1443 1444 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) 1445 { 1446 int delay; 1447 1448 might_sleep(); 1449 1450 if (!_delay) 1451 return -EINVAL; 1452 1453 delay = spi_delay_to_ns(_delay, xfer); 1454 if (delay < 0) 1455 return delay; 1456 1457 _spi_transfer_delay_ns(delay); 1458 1459 return 0; 1460 } 1461 EXPORT_SYMBOL_GPL(spi_delay_exec); 1462 1463 static void _spi_transfer_cs_change_delay(struct spi_message *msg, 1464 struct spi_transfer *xfer) 1465 { 1466 u32 default_delay_ns = 10 * NSEC_PER_USEC; 1467 u32 delay = xfer->cs_change_delay.value; 1468 u32 unit = xfer->cs_change_delay.unit; 1469 int ret; 1470 1471 /* Return early on "fast" mode - for everything but USECS */ 1472 if (!delay) { 1473 if (unit == SPI_DELAY_UNIT_USECS) 1474 _spi_transfer_delay_ns(default_delay_ns); 1475 return; 1476 } 1477 1478 ret = spi_delay_exec(&xfer->cs_change_delay, xfer); 1479 if (ret) { 1480 dev_err_once(&msg->spi->dev, 1481 "Use of unsupported delay unit %i, using default of %luus\n", 1482 unit, default_delay_ns / NSEC_PER_USEC); 1483 _spi_transfer_delay_ns(default_delay_ns); 1484 } 1485 } 1486 1487 void spi_transfer_cs_change_delay_exec(struct spi_message *msg, 1488 struct spi_transfer *xfer) 1489 { 1490 _spi_transfer_cs_change_delay(msg, xfer); 1491 } 1492 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec); 1493 1494 /* 1495 * spi_transfer_one_message - Default implementation of transfer_one_message() 1496 * 1497 * This is a standard implementation of transfer_one_message() for 1498 * drivers which implement a transfer_one() operation. It provides 1499 * standard handling of delays and chip select management. 1500 */ 1501 static int spi_transfer_one_message(struct spi_controller *ctlr, 1502 struct spi_message *msg) 1503 { 1504 struct spi_transfer *xfer; 1505 bool keep_cs = false; 1506 int ret = 0; 1507 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; 1508 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; 1509 1510 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list); 1511 spi_set_cs(msg->spi, !xfer->cs_off, false); 1512 1513 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1514 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1515 1516 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1517 trace_spi_transfer_start(msg, xfer); 1518 1519 spi_statistics_add_transfer_stats(statm, xfer, ctlr); 1520 spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1521 1522 if (!ctlr->ptp_sts_supported) { 1523 xfer->ptp_sts_word_pre = 0; 1524 ptp_read_system_prets(xfer->ptp_sts); 1525 } 1526 1527 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { 1528 reinit_completion(&ctlr->xfer_completion); 1529 1530 fallback_pio: 1531 spi_dma_sync_for_device(ctlr, xfer); 1532 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1533 if (ret < 0) { 1534 spi_dma_sync_for_cpu(ctlr, xfer); 1535 1536 if (ctlr->cur_msg_mapped && 1537 (xfer->error & SPI_TRANS_FAIL_NO_START)) { 1538 __spi_unmap_msg(ctlr, msg); 1539 ctlr->fallback = true; 1540 xfer->error &= ~SPI_TRANS_FAIL_NO_START; 1541 goto fallback_pio; 1542 } 1543 1544 SPI_STATISTICS_INCREMENT_FIELD(statm, 1545 errors); 1546 SPI_STATISTICS_INCREMENT_FIELD(stats, 1547 errors); 1548 dev_err(&msg->spi->dev, 1549 "SPI transfer failed: %d\n", ret); 1550 goto out; 1551 } 1552 1553 if (ret > 0) { 1554 ret = spi_transfer_wait(ctlr, msg, xfer); 1555 if (ret < 0) 1556 msg->status = ret; 1557 } 1558 1559 spi_dma_sync_for_cpu(ctlr, xfer); 1560 } else { 1561 if (xfer->len) 1562 dev_err(&msg->spi->dev, 1563 "Bufferless transfer has length %u\n", 1564 xfer->len); 1565 } 1566 1567 if (!ctlr->ptp_sts_supported) { 1568 ptp_read_system_postts(xfer->ptp_sts); 1569 xfer->ptp_sts_word_post = xfer->len; 1570 } 1571 1572 trace_spi_transfer_stop(msg, xfer); 1573 1574 if (msg->status != -EINPROGRESS) 1575 goto out; 1576 1577 spi_transfer_delay_exec(xfer); 1578 1579 if (xfer->cs_change) { 1580 if (list_is_last(&xfer->transfer_list, 1581 &msg->transfers)) { 1582 keep_cs = true; 1583 } else { 1584 if (!xfer->cs_off) 1585 spi_set_cs(msg->spi, false, false); 1586 _spi_transfer_cs_change_delay(msg, xfer); 1587 if (!list_next_entry(xfer, transfer_list)->cs_off) 1588 spi_set_cs(msg->spi, true, false); 1589 } 1590 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) && 1591 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) { 1592 spi_set_cs(msg->spi, xfer->cs_off, false); 1593 } 1594 1595 msg->actual_length += xfer->len; 1596 } 1597 1598 out: 1599 if (ret != 0 || !keep_cs) 1600 spi_set_cs(msg->spi, false, false); 1601 1602 if (msg->status == -EINPROGRESS) 1603 msg->status = ret; 1604 1605 if (msg->status && ctlr->handle_err) 1606 ctlr->handle_err(ctlr, msg); 1607 1608 spi_finalize_current_message(ctlr); 1609 1610 return ret; 1611 } 1612 1613 /** 1614 * spi_finalize_current_transfer - report completion of a transfer 1615 * @ctlr: the controller reporting completion 1616 * 1617 * Called by SPI drivers using the core transfer_one_message() 1618 * implementation to notify it that the current interrupt driven 1619 * transfer has finished and the next one may be scheduled. 1620 */ 1621 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1622 { 1623 complete(&ctlr->xfer_completion); 1624 } 1625 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1626 1627 static void spi_idle_runtime_pm(struct spi_controller *ctlr) 1628 { 1629 if (ctlr->auto_runtime_pm) { 1630 pm_runtime_mark_last_busy(ctlr->dev.parent); 1631 pm_runtime_put_autosuspend(ctlr->dev.parent); 1632 } 1633 } 1634 1635 static int __spi_pump_transfer_message(struct spi_controller *ctlr, 1636 struct spi_message *msg, bool was_busy) 1637 { 1638 struct spi_transfer *xfer; 1639 int ret; 1640 1641 if (!was_busy && ctlr->auto_runtime_pm) { 1642 ret = pm_runtime_get_sync(ctlr->dev.parent); 1643 if (ret < 0) { 1644 pm_runtime_put_noidle(ctlr->dev.parent); 1645 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1646 ret); 1647 return ret; 1648 } 1649 } 1650 1651 if (!was_busy) 1652 trace_spi_controller_busy(ctlr); 1653 1654 if (!was_busy && ctlr->prepare_transfer_hardware) { 1655 ret = ctlr->prepare_transfer_hardware(ctlr); 1656 if (ret) { 1657 dev_err(&ctlr->dev, 1658 "failed to prepare transfer hardware: %d\n", 1659 ret); 1660 1661 if (ctlr->auto_runtime_pm) 1662 pm_runtime_put(ctlr->dev.parent); 1663 1664 msg->status = ret; 1665 spi_finalize_current_message(ctlr); 1666 1667 return ret; 1668 } 1669 } 1670 1671 trace_spi_message_start(msg); 1672 1673 ret = spi_split_transfers_maxsize(ctlr, msg, 1674 spi_max_transfer_size(msg->spi), 1675 GFP_KERNEL | GFP_DMA); 1676 if (ret) { 1677 msg->status = ret; 1678 spi_finalize_current_message(ctlr); 1679 return ret; 1680 } 1681 1682 if (ctlr->prepare_message) { 1683 ret = ctlr->prepare_message(ctlr, msg); 1684 if (ret) { 1685 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1686 ret); 1687 msg->status = ret; 1688 spi_finalize_current_message(ctlr); 1689 return ret; 1690 } 1691 msg->prepared = true; 1692 } 1693 1694 ret = spi_map_msg(ctlr, msg); 1695 if (ret) { 1696 msg->status = ret; 1697 spi_finalize_current_message(ctlr); 1698 return ret; 1699 } 1700 1701 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 1702 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1703 xfer->ptp_sts_word_pre = 0; 1704 ptp_read_system_prets(xfer->ptp_sts); 1705 } 1706 } 1707 1708 /* 1709 * Drivers implementation of transfer_one_message() must arrange for 1710 * spi_finalize_current_message() to get called. Most drivers will do 1711 * this in the calling context, but some don't. For those cases, a 1712 * completion is used to guarantee that this function does not return 1713 * until spi_finalize_current_message() is done accessing 1714 * ctlr->cur_msg. 1715 * Use of the following two flags enable to opportunistically skip the 1716 * use of the completion since its use involves expensive spin locks. 1717 * In case of a race with the context that calls 1718 * spi_finalize_current_message() the completion will always be used, 1719 * due to strict ordering of these flags using barriers. 1720 */ 1721 WRITE_ONCE(ctlr->cur_msg_incomplete, true); 1722 WRITE_ONCE(ctlr->cur_msg_need_completion, false); 1723 reinit_completion(&ctlr->cur_msg_completion); 1724 smp_wmb(); /* Make these available to spi_finalize_current_message() */ 1725 1726 ret = ctlr->transfer_one_message(ctlr, msg); 1727 if (ret) { 1728 dev_err(&ctlr->dev, 1729 "failed to transfer one message from queue\n"); 1730 return ret; 1731 } 1732 1733 WRITE_ONCE(ctlr->cur_msg_need_completion, true); 1734 smp_mb(); /* See spi_finalize_current_message()... */ 1735 if (READ_ONCE(ctlr->cur_msg_incomplete)) 1736 wait_for_completion(&ctlr->cur_msg_completion); 1737 1738 return 0; 1739 } 1740 1741 /** 1742 * __spi_pump_messages - function which processes spi message queue 1743 * @ctlr: controller to process queue for 1744 * @in_kthread: true if we are in the context of the message pump thread 1745 * 1746 * This function checks if there is any spi message in the queue that 1747 * needs processing and if so call out to the driver to initialize hardware 1748 * and transfer each message. 1749 * 1750 * Note that it is called both from the kthread itself and also from 1751 * inside spi_sync(); the queue extraction handling at the top of the 1752 * function should deal with this safely. 1753 */ 1754 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1755 { 1756 struct spi_message *msg; 1757 bool was_busy = false; 1758 unsigned long flags; 1759 int ret; 1760 1761 /* Take the IO mutex */ 1762 mutex_lock(&ctlr->io_mutex); 1763 1764 /* Lock queue */ 1765 spin_lock_irqsave(&ctlr->queue_lock, flags); 1766 1767 /* Make sure we are not already running a message */ 1768 if (ctlr->cur_msg) 1769 goto out_unlock; 1770 1771 /* Check if the queue is idle */ 1772 if (list_empty(&ctlr->queue) || !ctlr->running) { 1773 if (!ctlr->busy) 1774 goto out_unlock; 1775 1776 /* Defer any non-atomic teardown to the thread */ 1777 if (!in_kthread) { 1778 if (!ctlr->dummy_rx && !ctlr->dummy_tx && 1779 !ctlr->unprepare_transfer_hardware) { 1780 spi_idle_runtime_pm(ctlr); 1781 ctlr->busy = false; 1782 ctlr->queue_empty = true; 1783 trace_spi_controller_idle(ctlr); 1784 } else { 1785 kthread_queue_work(ctlr->kworker, 1786 &ctlr->pump_messages); 1787 } 1788 goto out_unlock; 1789 } 1790 1791 ctlr->busy = false; 1792 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1793 1794 kfree(ctlr->dummy_rx); 1795 ctlr->dummy_rx = NULL; 1796 kfree(ctlr->dummy_tx); 1797 ctlr->dummy_tx = NULL; 1798 if (ctlr->unprepare_transfer_hardware && 1799 ctlr->unprepare_transfer_hardware(ctlr)) 1800 dev_err(&ctlr->dev, 1801 "failed to unprepare transfer hardware\n"); 1802 spi_idle_runtime_pm(ctlr); 1803 trace_spi_controller_idle(ctlr); 1804 1805 spin_lock_irqsave(&ctlr->queue_lock, flags); 1806 ctlr->queue_empty = true; 1807 goto out_unlock; 1808 } 1809 1810 /* Extract head of queue */ 1811 msg = list_first_entry(&ctlr->queue, struct spi_message, queue); 1812 ctlr->cur_msg = msg; 1813 1814 list_del_init(&msg->queue); 1815 if (ctlr->busy) 1816 was_busy = true; 1817 else 1818 ctlr->busy = true; 1819 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1820 1821 ret = __spi_pump_transfer_message(ctlr, msg, was_busy); 1822 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 1823 1824 ctlr->cur_msg = NULL; 1825 ctlr->fallback = false; 1826 1827 mutex_unlock(&ctlr->io_mutex); 1828 1829 /* Prod the scheduler in case transfer_one() was busy waiting */ 1830 if (!ret) 1831 cond_resched(); 1832 return; 1833 1834 out_unlock: 1835 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1836 mutex_unlock(&ctlr->io_mutex); 1837 } 1838 1839 /** 1840 * spi_pump_messages - kthread work function which processes spi message queue 1841 * @work: pointer to kthread work struct contained in the controller struct 1842 */ 1843 static void spi_pump_messages(struct kthread_work *work) 1844 { 1845 struct spi_controller *ctlr = 1846 container_of(work, struct spi_controller, pump_messages); 1847 1848 __spi_pump_messages(ctlr, true); 1849 } 1850 1851 /** 1852 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp 1853 * @ctlr: Pointer to the spi_controller structure of the driver 1854 * @xfer: Pointer to the transfer being timestamped 1855 * @progress: How many words (not bytes) have been transferred so far 1856 * @irqs_off: If true, will disable IRQs and preemption for the duration of the 1857 * transfer, for less jitter in time measurement. Only compatible 1858 * with PIO drivers. If true, must follow up with 1859 * spi_take_timestamp_post or otherwise system will crash. 1860 * WARNING: for fully predictable results, the CPU frequency must 1861 * also be under control (governor). 1862 * 1863 * This is a helper for drivers to collect the beginning of the TX timestamp 1864 * for the requested byte from the SPI transfer. The frequency with which this 1865 * function must be called (once per word, once for the whole transfer, once 1866 * per batch of words etc) is arbitrary as long as the @tx buffer offset is 1867 * greater than or equal to the requested byte at the time of the call. The 1868 * timestamp is only taken once, at the first such call. It is assumed that 1869 * the driver advances its @tx buffer pointer monotonically. 1870 */ 1871 void spi_take_timestamp_pre(struct spi_controller *ctlr, 1872 struct spi_transfer *xfer, 1873 size_t progress, bool irqs_off) 1874 { 1875 if (!xfer->ptp_sts) 1876 return; 1877 1878 if (xfer->timestamped) 1879 return; 1880 1881 if (progress > xfer->ptp_sts_word_pre) 1882 return; 1883 1884 /* Capture the resolution of the timestamp */ 1885 xfer->ptp_sts_word_pre = progress; 1886 1887 if (irqs_off) { 1888 local_irq_save(ctlr->irq_flags); 1889 preempt_disable(); 1890 } 1891 1892 ptp_read_system_prets(xfer->ptp_sts); 1893 } 1894 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); 1895 1896 /** 1897 * spi_take_timestamp_post - helper to collect the end of the TX timestamp 1898 * @ctlr: Pointer to the spi_controller structure of the driver 1899 * @xfer: Pointer to the transfer being timestamped 1900 * @progress: How many words (not bytes) have been transferred so far 1901 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. 1902 * 1903 * This is a helper for drivers to collect the end of the TX timestamp for 1904 * the requested byte from the SPI transfer. Can be called with an arbitrary 1905 * frequency: only the first call where @tx exceeds or is equal to the 1906 * requested word will be timestamped. 1907 */ 1908 void spi_take_timestamp_post(struct spi_controller *ctlr, 1909 struct spi_transfer *xfer, 1910 size_t progress, bool irqs_off) 1911 { 1912 if (!xfer->ptp_sts) 1913 return; 1914 1915 if (xfer->timestamped) 1916 return; 1917 1918 if (progress < xfer->ptp_sts_word_post) 1919 return; 1920 1921 ptp_read_system_postts(xfer->ptp_sts); 1922 1923 if (irqs_off) { 1924 local_irq_restore(ctlr->irq_flags); 1925 preempt_enable(); 1926 } 1927 1928 /* Capture the resolution of the timestamp */ 1929 xfer->ptp_sts_word_post = progress; 1930 1931 xfer->timestamped = 1; 1932 } 1933 EXPORT_SYMBOL_GPL(spi_take_timestamp_post); 1934 1935 /** 1936 * spi_set_thread_rt - set the controller to pump at realtime priority 1937 * @ctlr: controller to boost priority of 1938 * 1939 * This can be called because the controller requested realtime priority 1940 * (by setting the ->rt value before calling spi_register_controller()) or 1941 * because a device on the bus said that its transfers needed realtime 1942 * priority. 1943 * 1944 * NOTE: at the moment if any device on a bus says it needs realtime then 1945 * the thread will be at realtime priority for all transfers on that 1946 * controller. If this eventually becomes a problem we may see if we can 1947 * find a way to boost the priority only temporarily during relevant 1948 * transfers. 1949 */ 1950 static void spi_set_thread_rt(struct spi_controller *ctlr) 1951 { 1952 dev_info(&ctlr->dev, 1953 "will run message pump with realtime priority\n"); 1954 sched_set_fifo(ctlr->kworker->task); 1955 } 1956 1957 static int spi_init_queue(struct spi_controller *ctlr) 1958 { 1959 ctlr->running = false; 1960 ctlr->busy = false; 1961 ctlr->queue_empty = true; 1962 1963 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); 1964 if (IS_ERR(ctlr->kworker)) { 1965 dev_err(&ctlr->dev, "failed to create message pump kworker\n"); 1966 return PTR_ERR(ctlr->kworker); 1967 } 1968 1969 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1970 1971 /* 1972 * Controller config will indicate if this controller should run the 1973 * message pump with high (realtime) priority to reduce the transfer 1974 * latency on the bus by minimising the delay between a transfer 1975 * request and the scheduling of the message pump thread. Without this 1976 * setting the message pump thread will remain at default priority. 1977 */ 1978 if (ctlr->rt) 1979 spi_set_thread_rt(ctlr); 1980 1981 return 0; 1982 } 1983 1984 /** 1985 * spi_get_next_queued_message() - called by driver to check for queued 1986 * messages 1987 * @ctlr: the controller to check for queued messages 1988 * 1989 * If there are more messages in the queue, the next message is returned from 1990 * this call. 1991 * 1992 * Return: the next message in the queue, else NULL if the queue is empty. 1993 */ 1994 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1995 { 1996 struct spi_message *next; 1997 unsigned long flags; 1998 1999 /* Get a pointer to the next message, if any */ 2000 spin_lock_irqsave(&ctlr->queue_lock, flags); 2001 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 2002 queue); 2003 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2004 2005 return next; 2006 } 2007 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 2008 2009 /** 2010 * spi_finalize_current_message() - the current message is complete 2011 * @ctlr: the controller to return the message to 2012 * 2013 * Called by the driver to notify the core that the message in the front of the 2014 * queue is complete and can be removed from the queue. 2015 */ 2016 void spi_finalize_current_message(struct spi_controller *ctlr) 2017 { 2018 struct spi_transfer *xfer; 2019 struct spi_message *mesg; 2020 int ret; 2021 2022 mesg = ctlr->cur_msg; 2023 2024 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { 2025 list_for_each_entry(xfer, &mesg->transfers, transfer_list) { 2026 ptp_read_system_postts(xfer->ptp_sts); 2027 xfer->ptp_sts_word_post = xfer->len; 2028 } 2029 } 2030 2031 if (unlikely(ctlr->ptp_sts_supported)) 2032 list_for_each_entry(xfer, &mesg->transfers, transfer_list) 2033 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); 2034 2035 spi_unmap_msg(ctlr, mesg); 2036 2037 /* 2038 * In the prepare_messages callback the SPI bus has the opportunity 2039 * to split a transfer to smaller chunks. 2040 * 2041 * Release the split transfers here since spi_map_msg() is done on 2042 * the split transfers. 2043 */ 2044 spi_res_release(ctlr, mesg); 2045 2046 if (mesg->prepared && ctlr->unprepare_message) { 2047 ret = ctlr->unprepare_message(ctlr, mesg); 2048 if (ret) { 2049 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 2050 ret); 2051 } 2052 } 2053 2054 mesg->prepared = false; 2055 2056 WRITE_ONCE(ctlr->cur_msg_incomplete, false); 2057 smp_mb(); /* See __spi_pump_transfer_message()... */ 2058 if (READ_ONCE(ctlr->cur_msg_need_completion)) 2059 complete(&ctlr->cur_msg_completion); 2060 2061 trace_spi_message_done(mesg); 2062 2063 mesg->state = NULL; 2064 if (mesg->complete) 2065 mesg->complete(mesg->context); 2066 } 2067 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 2068 2069 static int spi_start_queue(struct spi_controller *ctlr) 2070 { 2071 unsigned long flags; 2072 2073 spin_lock_irqsave(&ctlr->queue_lock, flags); 2074 2075 if (ctlr->running || ctlr->busy) { 2076 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2077 return -EBUSY; 2078 } 2079 2080 ctlr->running = true; 2081 ctlr->cur_msg = NULL; 2082 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2083 2084 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 2085 2086 return 0; 2087 } 2088 2089 static int spi_stop_queue(struct spi_controller *ctlr) 2090 { 2091 unsigned long flags; 2092 unsigned limit = 500; 2093 int ret = 0; 2094 2095 spin_lock_irqsave(&ctlr->queue_lock, flags); 2096 2097 /* 2098 * This is a bit lame, but is optimized for the common execution path. 2099 * A wait_queue on the ctlr->busy could be used, but then the common 2100 * execution path (pump_messages) would be required to call wake_up or 2101 * friends on every SPI message. Do this instead. 2102 */ 2103 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 2104 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2105 usleep_range(10000, 11000); 2106 spin_lock_irqsave(&ctlr->queue_lock, flags); 2107 } 2108 2109 if (!list_empty(&ctlr->queue) || ctlr->busy) 2110 ret = -EBUSY; 2111 else 2112 ctlr->running = false; 2113 2114 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2115 2116 if (ret) { 2117 dev_warn(&ctlr->dev, "could not stop message queue\n"); 2118 return ret; 2119 } 2120 return ret; 2121 } 2122 2123 static int spi_destroy_queue(struct spi_controller *ctlr) 2124 { 2125 int ret; 2126 2127 ret = spi_stop_queue(ctlr); 2128 2129 /* 2130 * kthread_flush_worker will block until all work is done. 2131 * If the reason that stop_queue timed out is that the work will never 2132 * finish, then it does no good to call flush/stop thread, so 2133 * return anyway. 2134 */ 2135 if (ret) { 2136 dev_err(&ctlr->dev, "problem destroying queue\n"); 2137 return ret; 2138 } 2139 2140 kthread_destroy_worker(ctlr->kworker); 2141 2142 return 0; 2143 } 2144 2145 static int __spi_queued_transfer(struct spi_device *spi, 2146 struct spi_message *msg, 2147 bool need_pump) 2148 { 2149 struct spi_controller *ctlr = spi->controller; 2150 unsigned long flags; 2151 2152 spin_lock_irqsave(&ctlr->queue_lock, flags); 2153 2154 if (!ctlr->running) { 2155 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2156 return -ESHUTDOWN; 2157 } 2158 msg->actual_length = 0; 2159 msg->status = -EINPROGRESS; 2160 2161 list_add_tail(&msg->queue, &ctlr->queue); 2162 ctlr->queue_empty = false; 2163 if (!ctlr->busy && need_pump) 2164 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); 2165 2166 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 2167 return 0; 2168 } 2169 2170 /** 2171 * spi_queued_transfer - transfer function for queued transfers 2172 * @spi: spi device which is requesting transfer 2173 * @msg: spi message which is to handled is queued to driver queue 2174 * 2175 * Return: zero on success, else a negative error code. 2176 */ 2177 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 2178 { 2179 return __spi_queued_transfer(spi, msg, true); 2180 } 2181 2182 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 2183 { 2184 int ret; 2185 2186 ctlr->transfer = spi_queued_transfer; 2187 if (!ctlr->transfer_one_message) 2188 ctlr->transfer_one_message = spi_transfer_one_message; 2189 2190 /* Initialize and start queue */ 2191 ret = spi_init_queue(ctlr); 2192 if (ret) { 2193 dev_err(&ctlr->dev, "problem initializing queue\n"); 2194 goto err_init_queue; 2195 } 2196 ctlr->queued = true; 2197 ret = spi_start_queue(ctlr); 2198 if (ret) { 2199 dev_err(&ctlr->dev, "problem starting queue\n"); 2200 goto err_start_queue; 2201 } 2202 2203 return 0; 2204 2205 err_start_queue: 2206 spi_destroy_queue(ctlr); 2207 err_init_queue: 2208 return ret; 2209 } 2210 2211 /** 2212 * spi_flush_queue - Send all pending messages in the queue from the callers' 2213 * context 2214 * @ctlr: controller to process queue for 2215 * 2216 * This should be used when one wants to ensure all pending messages have been 2217 * sent before doing something. Is used by the spi-mem code to make sure SPI 2218 * memory operations do not preempt regular SPI transfers that have been queued 2219 * before the spi-mem operation. 2220 */ 2221 void spi_flush_queue(struct spi_controller *ctlr) 2222 { 2223 if (ctlr->transfer == spi_queued_transfer) 2224 __spi_pump_messages(ctlr, false); 2225 } 2226 2227 /*-------------------------------------------------------------------------*/ 2228 2229 #if defined(CONFIG_OF) 2230 static void of_spi_parse_dt_cs_delay(struct device_node *nc, 2231 struct spi_delay *delay, const char *prop) 2232 { 2233 u32 value; 2234 2235 if (!of_property_read_u32(nc, prop, &value)) { 2236 if (value > U16_MAX) { 2237 delay->value = DIV_ROUND_UP(value, 1000); 2238 delay->unit = SPI_DELAY_UNIT_USECS; 2239 } else { 2240 delay->value = value; 2241 delay->unit = SPI_DELAY_UNIT_NSECS; 2242 } 2243 } 2244 } 2245 2246 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 2247 struct device_node *nc) 2248 { 2249 u32 value; 2250 int rc; 2251 2252 /* Mode (clock phase/polarity/etc.) */ 2253 if (of_property_read_bool(nc, "spi-cpha")) 2254 spi->mode |= SPI_CPHA; 2255 if (of_property_read_bool(nc, "spi-cpol")) 2256 spi->mode |= SPI_CPOL; 2257 if (of_property_read_bool(nc, "spi-3wire")) 2258 spi->mode |= SPI_3WIRE; 2259 if (of_property_read_bool(nc, "spi-lsb-first")) 2260 spi->mode |= SPI_LSB_FIRST; 2261 if (of_property_read_bool(nc, "spi-cs-high")) 2262 spi->mode |= SPI_CS_HIGH; 2263 2264 /* Device DUAL/QUAD mode */ 2265 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 2266 switch (value) { 2267 case 0: 2268 spi->mode |= SPI_NO_TX; 2269 break; 2270 case 1: 2271 break; 2272 case 2: 2273 spi->mode |= SPI_TX_DUAL; 2274 break; 2275 case 4: 2276 spi->mode |= SPI_TX_QUAD; 2277 break; 2278 case 8: 2279 spi->mode |= SPI_TX_OCTAL; 2280 break; 2281 default: 2282 dev_warn(&ctlr->dev, 2283 "spi-tx-bus-width %d not supported\n", 2284 value); 2285 break; 2286 } 2287 } 2288 2289 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 2290 switch (value) { 2291 case 0: 2292 spi->mode |= SPI_NO_RX; 2293 break; 2294 case 1: 2295 break; 2296 case 2: 2297 spi->mode |= SPI_RX_DUAL; 2298 break; 2299 case 4: 2300 spi->mode |= SPI_RX_QUAD; 2301 break; 2302 case 8: 2303 spi->mode |= SPI_RX_OCTAL; 2304 break; 2305 default: 2306 dev_warn(&ctlr->dev, 2307 "spi-rx-bus-width %d not supported\n", 2308 value); 2309 break; 2310 } 2311 } 2312 2313 if (spi_controller_is_slave(ctlr)) { 2314 if (!of_node_name_eq(nc, "slave")) { 2315 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 2316 nc); 2317 return -EINVAL; 2318 } 2319 return 0; 2320 } 2321 2322 /* Device address */ 2323 rc = of_property_read_u32(nc, "reg", &value); 2324 if (rc) { 2325 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 2326 nc, rc); 2327 return rc; 2328 } 2329 spi_set_chipselect(spi, 0, value); 2330 2331 /* Device speed */ 2332 if (!of_property_read_u32(nc, "spi-max-frequency", &value)) 2333 spi->max_speed_hz = value; 2334 2335 /* Device CS delays */ 2336 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns"); 2337 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns"); 2338 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns"); 2339 2340 return 0; 2341 } 2342 2343 static struct spi_device * 2344 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 2345 { 2346 struct spi_device *spi; 2347 int rc; 2348 2349 /* Alloc an spi_device */ 2350 spi = spi_alloc_device(ctlr); 2351 if (!spi) { 2352 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 2353 rc = -ENOMEM; 2354 goto err_out; 2355 } 2356 2357 /* Select device driver */ 2358 rc = of_alias_from_compatible(nc, spi->modalias, 2359 sizeof(spi->modalias)); 2360 if (rc < 0) { 2361 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 2362 goto err_out; 2363 } 2364 2365 rc = of_spi_parse_dt(ctlr, spi, nc); 2366 if (rc) 2367 goto err_out; 2368 2369 /* Store a pointer to the node in the device structure */ 2370 of_node_get(nc); 2371 2372 device_set_node(&spi->dev, of_fwnode_handle(nc)); 2373 2374 /* Register the new device */ 2375 rc = spi_add_device(spi); 2376 if (rc) { 2377 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 2378 goto err_of_node_put; 2379 } 2380 2381 return spi; 2382 2383 err_of_node_put: 2384 of_node_put(nc); 2385 err_out: 2386 spi_dev_put(spi); 2387 return ERR_PTR(rc); 2388 } 2389 2390 /** 2391 * of_register_spi_devices() - Register child devices onto the SPI bus 2392 * @ctlr: Pointer to spi_controller device 2393 * 2394 * Registers an spi_device for each child node of controller node which 2395 * represents a valid SPI slave. 2396 */ 2397 static void of_register_spi_devices(struct spi_controller *ctlr) 2398 { 2399 struct spi_device *spi; 2400 struct device_node *nc; 2401 2402 if (!ctlr->dev.of_node) 2403 return; 2404 2405 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 2406 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 2407 continue; 2408 spi = of_register_spi_device(ctlr, nc); 2409 if (IS_ERR(spi)) { 2410 dev_warn(&ctlr->dev, 2411 "Failed to create SPI device for %pOF\n", nc); 2412 of_node_clear_flag(nc, OF_POPULATED); 2413 } 2414 } 2415 } 2416 #else 2417 static void of_register_spi_devices(struct spi_controller *ctlr) { } 2418 #endif 2419 2420 /** 2421 * spi_new_ancillary_device() - Register ancillary SPI device 2422 * @spi: Pointer to the main SPI device registering the ancillary device 2423 * @chip_select: Chip Select of the ancillary device 2424 * 2425 * Register an ancillary SPI device; for example some chips have a chip-select 2426 * for normal device usage and another one for setup/firmware upload. 2427 * 2428 * This may only be called from main SPI device's probe routine. 2429 * 2430 * Return: 0 on success; negative errno on failure 2431 */ 2432 struct spi_device *spi_new_ancillary_device(struct spi_device *spi, 2433 u8 chip_select) 2434 { 2435 struct spi_device *ancillary; 2436 int rc = 0; 2437 2438 /* Alloc an spi_device */ 2439 ancillary = spi_alloc_device(spi->controller); 2440 if (!ancillary) { 2441 rc = -ENOMEM; 2442 goto err_out; 2443 } 2444 2445 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); 2446 2447 /* Use provided chip-select for ancillary device */ 2448 spi_set_chipselect(ancillary, 0, chip_select); 2449 2450 /* Take over SPI mode/speed from SPI main device */ 2451 ancillary->max_speed_hz = spi->max_speed_hz; 2452 ancillary->mode = spi->mode; 2453 2454 /* Register the new device */ 2455 rc = spi_add_device_locked(ancillary); 2456 if (rc) { 2457 dev_err(&spi->dev, "failed to register ancillary device\n"); 2458 goto err_out; 2459 } 2460 2461 return ancillary; 2462 2463 err_out: 2464 spi_dev_put(ancillary); 2465 return ERR_PTR(rc); 2466 } 2467 EXPORT_SYMBOL_GPL(spi_new_ancillary_device); 2468 2469 #ifdef CONFIG_ACPI 2470 struct acpi_spi_lookup { 2471 struct spi_controller *ctlr; 2472 u32 max_speed_hz; 2473 u32 mode; 2474 int irq; 2475 u8 bits_per_word; 2476 u8 chip_select; 2477 int n; 2478 int index; 2479 }; 2480 2481 static int acpi_spi_count(struct acpi_resource *ares, void *data) 2482 { 2483 struct acpi_resource_spi_serialbus *sb; 2484 int *count = data; 2485 2486 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) 2487 return 1; 2488 2489 sb = &ares->data.spi_serial_bus; 2490 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI) 2491 return 1; 2492 2493 *count = *count + 1; 2494 2495 return 1; 2496 } 2497 2498 /** 2499 * acpi_spi_count_resources - Count the number of SpiSerialBus resources 2500 * @adev: ACPI device 2501 * 2502 * Returns the number of SpiSerialBus resources in the ACPI-device's 2503 * resource-list; or a negative error code. 2504 */ 2505 int acpi_spi_count_resources(struct acpi_device *adev) 2506 { 2507 LIST_HEAD(r); 2508 int count = 0; 2509 int ret; 2510 2511 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count); 2512 if (ret < 0) 2513 return ret; 2514 2515 acpi_dev_free_resource_list(&r); 2516 2517 return count; 2518 } 2519 EXPORT_SYMBOL_GPL(acpi_spi_count_resources); 2520 2521 static void acpi_spi_parse_apple_properties(struct acpi_device *dev, 2522 struct acpi_spi_lookup *lookup) 2523 { 2524 const union acpi_object *obj; 2525 2526 if (!x86_apple_machine) 2527 return; 2528 2529 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 2530 && obj->buffer.length >= 4) 2531 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 2532 2533 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 2534 && obj->buffer.length == 8) 2535 lookup->bits_per_word = *(u64 *)obj->buffer.pointer; 2536 2537 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 2538 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 2539 lookup->mode |= SPI_LSB_FIRST; 2540 2541 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 2542 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2543 lookup->mode |= SPI_CPOL; 2544 2545 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 2546 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 2547 lookup->mode |= SPI_CPHA; 2548 } 2549 2550 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev); 2551 2552 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 2553 { 2554 struct acpi_spi_lookup *lookup = data; 2555 struct spi_controller *ctlr = lookup->ctlr; 2556 2557 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 2558 struct acpi_resource_spi_serialbus *sb; 2559 acpi_handle parent_handle; 2560 acpi_status status; 2561 2562 sb = &ares->data.spi_serial_bus; 2563 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 2564 2565 if (lookup->index != -1 && lookup->n++ != lookup->index) 2566 return 1; 2567 2568 status = acpi_get_handle(NULL, 2569 sb->resource_source.string_ptr, 2570 &parent_handle); 2571 2572 if (ACPI_FAILURE(status)) 2573 return -ENODEV; 2574 2575 if (ctlr) { 2576 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle) 2577 return -ENODEV; 2578 } else { 2579 struct acpi_device *adev; 2580 2581 adev = acpi_fetch_acpi_dev(parent_handle); 2582 if (!adev) 2583 return -ENODEV; 2584 2585 ctlr = acpi_spi_find_controller_by_adev(adev); 2586 if (!ctlr) 2587 return -EPROBE_DEFER; 2588 2589 lookup->ctlr = ctlr; 2590 } 2591 2592 /* 2593 * ACPI DeviceSelection numbering is handled by the 2594 * host controller driver in Windows and can vary 2595 * from driver to driver. In Linux we always expect 2596 * 0 .. max - 1 so we need to ask the driver to 2597 * translate between the two schemes. 2598 */ 2599 if (ctlr->fw_translate_cs) { 2600 int cs = ctlr->fw_translate_cs(ctlr, 2601 sb->device_selection); 2602 if (cs < 0) 2603 return cs; 2604 lookup->chip_select = cs; 2605 } else { 2606 lookup->chip_select = sb->device_selection; 2607 } 2608 2609 lookup->max_speed_hz = sb->connection_speed; 2610 lookup->bits_per_word = sb->data_bit_length; 2611 2612 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 2613 lookup->mode |= SPI_CPHA; 2614 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 2615 lookup->mode |= SPI_CPOL; 2616 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 2617 lookup->mode |= SPI_CS_HIGH; 2618 } 2619 } else if (lookup->irq < 0) { 2620 struct resource r; 2621 2622 if (acpi_dev_resource_interrupt(ares, 0, &r)) 2623 lookup->irq = r.start; 2624 } 2625 2626 /* Always tell the ACPI core to skip this resource */ 2627 return 1; 2628 } 2629 2630 /** 2631 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information 2632 * @ctlr: controller to which the spi device belongs 2633 * @adev: ACPI Device for the spi device 2634 * @index: Index of the spi resource inside the ACPI Node 2635 * 2636 * This should be used to allocate a new spi device from and ACPI Node. 2637 * The caller is responsible for calling spi_add_device to register the spi device. 2638 * 2639 * If ctlr is set to NULL, the Controller for the spi device will be looked up 2640 * using the resource. 2641 * If index is set to -1, index is not used. 2642 * Note: If index is -1, ctlr must be set. 2643 * 2644 * Return: a pointer to the new device, or ERR_PTR on error. 2645 */ 2646 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, 2647 struct acpi_device *adev, 2648 int index) 2649 { 2650 acpi_handle parent_handle = NULL; 2651 struct list_head resource_list; 2652 struct acpi_spi_lookup lookup = {}; 2653 struct spi_device *spi; 2654 int ret; 2655 2656 if (!ctlr && index == -1) 2657 return ERR_PTR(-EINVAL); 2658 2659 lookup.ctlr = ctlr; 2660 lookup.irq = -1; 2661 lookup.index = index; 2662 lookup.n = 0; 2663 2664 INIT_LIST_HEAD(&resource_list); 2665 ret = acpi_dev_get_resources(adev, &resource_list, 2666 acpi_spi_add_resource, &lookup); 2667 acpi_dev_free_resource_list(&resource_list); 2668 2669 if (ret < 0) 2670 /* Found SPI in _CRS but it points to another controller */ 2671 return ERR_PTR(ret); 2672 2673 if (!lookup.max_speed_hz && 2674 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && 2675 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) { 2676 /* Apple does not use _CRS but nested devices for SPI slaves */ 2677 acpi_spi_parse_apple_properties(adev, &lookup); 2678 } 2679 2680 if (!lookup.max_speed_hz) 2681 return ERR_PTR(-ENODEV); 2682 2683 spi = spi_alloc_device(lookup.ctlr); 2684 if (!spi) { 2685 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n", 2686 dev_name(&adev->dev)); 2687 return ERR_PTR(-ENOMEM); 2688 } 2689 2690 ACPI_COMPANION_SET(&spi->dev, adev); 2691 spi->max_speed_hz = lookup.max_speed_hz; 2692 spi->mode |= lookup.mode; 2693 spi->irq = lookup.irq; 2694 spi->bits_per_word = lookup.bits_per_word; 2695 spi_set_chipselect(spi, 0, lookup.chip_select); 2696 2697 return spi; 2698 } 2699 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc); 2700 2701 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 2702 struct acpi_device *adev) 2703 { 2704 struct spi_device *spi; 2705 2706 if (acpi_bus_get_status(adev) || !adev->status.present || 2707 acpi_device_enumerated(adev)) 2708 return AE_OK; 2709 2710 spi = acpi_spi_device_alloc(ctlr, adev, -1); 2711 if (IS_ERR(spi)) { 2712 if (PTR_ERR(spi) == -ENOMEM) 2713 return AE_NO_MEMORY; 2714 else 2715 return AE_OK; 2716 } 2717 2718 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 2719 sizeof(spi->modalias)); 2720 2721 if (spi->irq < 0) 2722 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 2723 2724 acpi_device_set_enumerated(adev); 2725 2726 adev->power.flags.ignore_parent = true; 2727 if (spi_add_device(spi)) { 2728 adev->power.flags.ignore_parent = false; 2729 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 2730 dev_name(&adev->dev)); 2731 spi_dev_put(spi); 2732 } 2733 2734 return AE_OK; 2735 } 2736 2737 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 2738 void *data, void **return_value) 2739 { 2740 struct acpi_device *adev = acpi_fetch_acpi_dev(handle); 2741 struct spi_controller *ctlr = data; 2742 2743 if (!adev) 2744 return AE_OK; 2745 2746 return acpi_register_spi_device(ctlr, adev); 2747 } 2748 2749 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 2750 2751 static void acpi_register_spi_devices(struct spi_controller *ctlr) 2752 { 2753 acpi_status status; 2754 acpi_handle handle; 2755 2756 handle = ACPI_HANDLE(ctlr->dev.parent); 2757 if (!handle) 2758 return; 2759 2760 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 2761 SPI_ACPI_ENUMERATE_MAX_DEPTH, 2762 acpi_spi_add_device, NULL, ctlr, NULL); 2763 if (ACPI_FAILURE(status)) 2764 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 2765 } 2766 #else 2767 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 2768 #endif /* CONFIG_ACPI */ 2769 2770 static void spi_controller_release(struct device *dev) 2771 { 2772 struct spi_controller *ctlr; 2773 2774 ctlr = container_of(dev, struct spi_controller, dev); 2775 kfree(ctlr); 2776 } 2777 2778 static struct class spi_master_class = { 2779 .name = "spi_master", 2780 .dev_release = spi_controller_release, 2781 .dev_groups = spi_master_groups, 2782 }; 2783 2784 #ifdef CONFIG_SPI_SLAVE 2785 /** 2786 * spi_slave_abort - abort the ongoing transfer request on an SPI slave 2787 * controller 2788 * @spi: device used for the current transfer 2789 */ 2790 int spi_slave_abort(struct spi_device *spi) 2791 { 2792 struct spi_controller *ctlr = spi->controller; 2793 2794 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 2795 return ctlr->slave_abort(ctlr); 2796 2797 return -ENOTSUPP; 2798 } 2799 EXPORT_SYMBOL_GPL(spi_slave_abort); 2800 2801 int spi_target_abort(struct spi_device *spi) 2802 { 2803 struct spi_controller *ctlr = spi->controller; 2804 2805 if (spi_controller_is_target(ctlr) && ctlr->target_abort) 2806 return ctlr->target_abort(ctlr); 2807 2808 return -ENOTSUPP; 2809 } 2810 EXPORT_SYMBOL_GPL(spi_target_abort); 2811 2812 static ssize_t slave_show(struct device *dev, struct device_attribute *attr, 2813 char *buf) 2814 { 2815 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2816 dev); 2817 struct device *child; 2818 2819 child = device_find_any_child(&ctlr->dev); 2820 return sprintf(buf, "%s\n", 2821 child ? to_spi_device(child)->modalias : NULL); 2822 } 2823 2824 static ssize_t slave_store(struct device *dev, struct device_attribute *attr, 2825 const char *buf, size_t count) 2826 { 2827 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 2828 dev); 2829 struct spi_device *spi; 2830 struct device *child; 2831 char name[32]; 2832 int rc; 2833 2834 rc = sscanf(buf, "%31s", name); 2835 if (rc != 1 || !name[0]) 2836 return -EINVAL; 2837 2838 child = device_find_any_child(&ctlr->dev); 2839 if (child) { 2840 /* Remove registered slave */ 2841 device_unregister(child); 2842 put_device(child); 2843 } 2844 2845 if (strcmp(name, "(null)")) { 2846 /* Register new slave */ 2847 spi = spi_alloc_device(ctlr); 2848 if (!spi) 2849 return -ENOMEM; 2850 2851 strscpy(spi->modalias, name, sizeof(spi->modalias)); 2852 2853 rc = spi_add_device(spi); 2854 if (rc) { 2855 spi_dev_put(spi); 2856 return rc; 2857 } 2858 } 2859 2860 return count; 2861 } 2862 2863 static DEVICE_ATTR_RW(slave); 2864 2865 static struct attribute *spi_slave_attrs[] = { 2866 &dev_attr_slave.attr, 2867 NULL, 2868 }; 2869 2870 static const struct attribute_group spi_slave_group = { 2871 .attrs = spi_slave_attrs, 2872 }; 2873 2874 static const struct attribute_group *spi_slave_groups[] = { 2875 &spi_controller_statistics_group, 2876 &spi_slave_group, 2877 NULL, 2878 }; 2879 2880 static struct class spi_slave_class = { 2881 .name = "spi_slave", 2882 .dev_release = spi_controller_release, 2883 .dev_groups = spi_slave_groups, 2884 }; 2885 #else 2886 extern struct class spi_slave_class; /* dummy */ 2887 #endif 2888 2889 /** 2890 * __spi_alloc_controller - allocate an SPI master or slave controller 2891 * @dev: the controller, possibly using the platform_bus 2892 * @size: how much zeroed driver-private data to allocate; the pointer to this 2893 * memory is in the driver_data field of the returned device, accessible 2894 * with spi_controller_get_devdata(); the memory is cacheline aligned; 2895 * drivers granting DMA access to portions of their private data need to 2896 * round up @size using ALIGN(size, dma_get_cache_alignment()). 2897 * @slave: flag indicating whether to allocate an SPI master (false) or SPI 2898 * slave (true) controller 2899 * Context: can sleep 2900 * 2901 * This call is used only by SPI controller drivers, which are the 2902 * only ones directly touching chip registers. It's how they allocate 2903 * an spi_controller structure, prior to calling spi_register_controller(). 2904 * 2905 * This must be called from context that can sleep. 2906 * 2907 * The caller is responsible for assigning the bus number and initializing the 2908 * controller's methods before calling spi_register_controller(); and (after 2909 * errors adding the device) calling spi_controller_put() to prevent a memory 2910 * leak. 2911 * 2912 * Return: the SPI controller structure on success, else NULL. 2913 */ 2914 struct spi_controller *__spi_alloc_controller(struct device *dev, 2915 unsigned int size, bool slave) 2916 { 2917 struct spi_controller *ctlr; 2918 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); 2919 2920 if (!dev) 2921 return NULL; 2922 2923 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); 2924 if (!ctlr) 2925 return NULL; 2926 2927 device_initialize(&ctlr->dev); 2928 INIT_LIST_HEAD(&ctlr->queue); 2929 spin_lock_init(&ctlr->queue_lock); 2930 spin_lock_init(&ctlr->bus_lock_spinlock); 2931 mutex_init(&ctlr->bus_lock_mutex); 2932 mutex_init(&ctlr->io_mutex); 2933 mutex_init(&ctlr->add_lock); 2934 ctlr->bus_num = -1; 2935 ctlr->num_chipselect = 1; 2936 ctlr->slave = slave; 2937 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 2938 ctlr->dev.class = &spi_slave_class; 2939 else 2940 ctlr->dev.class = &spi_master_class; 2941 ctlr->dev.parent = dev; 2942 pm_suspend_ignore_children(&ctlr->dev, true); 2943 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); 2944 2945 return ctlr; 2946 } 2947 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 2948 2949 static void devm_spi_release_controller(struct device *dev, void *ctlr) 2950 { 2951 spi_controller_put(*(struct spi_controller **)ctlr); 2952 } 2953 2954 /** 2955 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() 2956 * @dev: physical device of SPI controller 2957 * @size: how much zeroed driver-private data to allocate 2958 * @slave: whether to allocate an SPI master (false) or SPI slave (true) 2959 * Context: can sleep 2960 * 2961 * Allocate an SPI controller and automatically release a reference on it 2962 * when @dev is unbound from its driver. Drivers are thus relieved from 2963 * having to call spi_controller_put(). 2964 * 2965 * The arguments to this function are identical to __spi_alloc_controller(). 2966 * 2967 * Return: the SPI controller structure on success, else NULL. 2968 */ 2969 struct spi_controller *__devm_spi_alloc_controller(struct device *dev, 2970 unsigned int size, 2971 bool slave) 2972 { 2973 struct spi_controller **ptr, *ctlr; 2974 2975 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), 2976 GFP_KERNEL); 2977 if (!ptr) 2978 return NULL; 2979 2980 ctlr = __spi_alloc_controller(dev, size, slave); 2981 if (ctlr) { 2982 ctlr->devm_allocated = true; 2983 *ptr = ctlr; 2984 devres_add(dev, ptr); 2985 } else { 2986 devres_free(ptr); 2987 } 2988 2989 return ctlr; 2990 } 2991 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); 2992 2993 /** 2994 * spi_get_gpio_descs() - grab chip select GPIOs for the master 2995 * @ctlr: The SPI master to grab GPIO descriptors for 2996 */ 2997 static int spi_get_gpio_descs(struct spi_controller *ctlr) 2998 { 2999 int nb, i; 3000 struct gpio_desc **cs; 3001 struct device *dev = &ctlr->dev; 3002 unsigned long native_cs_mask = 0; 3003 unsigned int num_cs_gpios = 0; 3004 3005 nb = gpiod_count(dev, "cs"); 3006 if (nb < 0) { 3007 /* No GPIOs at all is fine, else return the error */ 3008 if (nb == -ENOENT) 3009 return 0; 3010 return nb; 3011 } 3012 3013 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 3014 3015 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), 3016 GFP_KERNEL); 3017 if (!cs) 3018 return -ENOMEM; 3019 ctlr->cs_gpiods = cs; 3020 3021 for (i = 0; i < nb; i++) { 3022 /* 3023 * Most chipselects are active low, the inverted 3024 * semantics are handled by special quirks in gpiolib, 3025 * so initializing them GPIOD_OUT_LOW here means 3026 * "unasserted", in most cases this will drive the physical 3027 * line high. 3028 */ 3029 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, 3030 GPIOD_OUT_LOW); 3031 if (IS_ERR(cs[i])) 3032 return PTR_ERR(cs[i]); 3033 3034 if (cs[i]) { 3035 /* 3036 * If we find a CS GPIO, name it after the device and 3037 * chip select line. 3038 */ 3039 char *gpioname; 3040 3041 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", 3042 dev_name(dev), i); 3043 if (!gpioname) 3044 return -ENOMEM; 3045 gpiod_set_consumer_name(cs[i], gpioname); 3046 num_cs_gpios++; 3047 continue; 3048 } 3049 3050 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { 3051 dev_err(dev, "Invalid native chip select %d\n", i); 3052 return -EINVAL; 3053 } 3054 native_cs_mask |= BIT(i); 3055 } 3056 3057 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; 3058 3059 if ((ctlr->flags & SPI_MASTER_GPIO_SS) && num_cs_gpios && 3060 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { 3061 dev_err(dev, "No unused native chip select available\n"); 3062 return -EINVAL; 3063 } 3064 3065 return 0; 3066 } 3067 3068 static int spi_controller_check_ops(struct spi_controller *ctlr) 3069 { 3070 /* 3071 * The controller may implement only the high-level SPI-memory like 3072 * operations if it does not support regular SPI transfers, and this is 3073 * valid use case. 3074 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least 3075 * one of the ->transfer_xxx() method be implemented. 3076 */ 3077 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { 3078 if (!ctlr->transfer && !ctlr->transfer_one && 3079 !ctlr->transfer_one_message) { 3080 return -EINVAL; 3081 } 3082 } 3083 3084 return 0; 3085 } 3086 3087 /** 3088 * spi_register_controller - register SPI master or slave controller 3089 * @ctlr: initialized master, originally from spi_alloc_master() or 3090 * spi_alloc_slave() 3091 * Context: can sleep 3092 * 3093 * SPI controllers connect to their drivers using some non-SPI bus, 3094 * such as the platform bus. The final stage of probe() in that code 3095 * includes calling spi_register_controller() to hook up to this SPI bus glue. 3096 * 3097 * SPI controllers use board specific (often SOC specific) bus numbers, 3098 * and board-specific addressing for SPI devices combines those numbers 3099 * with chip select numbers. Since SPI does not directly support dynamic 3100 * device identification, boards need configuration tables telling which 3101 * chip is at which address. 3102 * 3103 * This must be called from context that can sleep. It returns zero on 3104 * success, else a negative error code (dropping the controller's refcount). 3105 * After a successful return, the caller is responsible for calling 3106 * spi_unregister_controller(). 3107 * 3108 * Return: zero on success, else a negative error code. 3109 */ 3110 int spi_register_controller(struct spi_controller *ctlr) 3111 { 3112 struct device *dev = ctlr->dev.parent; 3113 struct boardinfo *bi; 3114 int status; 3115 int id, first_dynamic; 3116 3117 if (!dev) 3118 return -ENODEV; 3119 3120 /* 3121 * Make sure all necessary hooks are implemented before registering 3122 * the SPI controller. 3123 */ 3124 status = spi_controller_check_ops(ctlr); 3125 if (status) 3126 return status; 3127 3128 if (ctlr->bus_num >= 0) { 3129 /* Devices with a fixed bus num must check-in with the num */ 3130 mutex_lock(&board_lock); 3131 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 3132 ctlr->bus_num + 1, GFP_KERNEL); 3133 mutex_unlock(&board_lock); 3134 if (WARN(id < 0, "couldn't get idr")) 3135 return id == -ENOSPC ? -EBUSY : id; 3136 ctlr->bus_num = id; 3137 } else if (ctlr->dev.of_node) { 3138 /* Allocate dynamic bus number using Linux idr */ 3139 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 3140 if (id >= 0) { 3141 ctlr->bus_num = id; 3142 mutex_lock(&board_lock); 3143 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 3144 ctlr->bus_num + 1, GFP_KERNEL); 3145 mutex_unlock(&board_lock); 3146 if (WARN(id < 0, "couldn't get idr")) 3147 return id == -ENOSPC ? -EBUSY : id; 3148 } 3149 } 3150 if (ctlr->bus_num < 0) { 3151 first_dynamic = of_alias_get_highest_id("spi"); 3152 if (first_dynamic < 0) 3153 first_dynamic = 0; 3154 else 3155 first_dynamic++; 3156 3157 mutex_lock(&board_lock); 3158 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 3159 0, GFP_KERNEL); 3160 mutex_unlock(&board_lock); 3161 if (WARN(id < 0, "couldn't get idr")) 3162 return id; 3163 ctlr->bus_num = id; 3164 } 3165 ctlr->bus_lock_flag = 0; 3166 init_completion(&ctlr->xfer_completion); 3167 init_completion(&ctlr->cur_msg_completion); 3168 if (!ctlr->max_dma_len) 3169 ctlr->max_dma_len = INT_MAX; 3170 3171 /* 3172 * Register the device, then userspace will see it. 3173 * Registration fails if the bus ID is in use. 3174 */ 3175 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 3176 3177 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) { 3178 status = spi_get_gpio_descs(ctlr); 3179 if (status) 3180 goto free_bus_id; 3181 /* 3182 * A controller using GPIO descriptors always 3183 * supports SPI_CS_HIGH if need be. 3184 */ 3185 ctlr->mode_bits |= SPI_CS_HIGH; 3186 } 3187 3188 /* 3189 * Even if it's just one always-selected device, there must 3190 * be at least one chipselect. 3191 */ 3192 if (!ctlr->num_chipselect) { 3193 status = -EINVAL; 3194 goto free_bus_id; 3195 } 3196 3197 /* Setting last_cs to -1 means no chip selected */ 3198 ctlr->last_cs = -1; 3199 3200 status = device_add(&ctlr->dev); 3201 if (status < 0) 3202 goto free_bus_id; 3203 dev_dbg(dev, "registered %s %s\n", 3204 spi_controller_is_slave(ctlr) ? "slave" : "master", 3205 dev_name(&ctlr->dev)); 3206 3207 /* 3208 * If we're using a queued driver, start the queue. Note that we don't 3209 * need the queueing logic if the driver is only supporting high-level 3210 * memory operations. 3211 */ 3212 if (ctlr->transfer) { 3213 dev_info(dev, "controller is unqueued, this is deprecated\n"); 3214 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 3215 status = spi_controller_initialize_queue(ctlr); 3216 if (status) { 3217 device_del(&ctlr->dev); 3218 goto free_bus_id; 3219 } 3220 } 3221 /* Add statistics */ 3222 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev); 3223 if (!ctlr->pcpu_statistics) { 3224 dev_err(dev, "Error allocating per-cpu statistics\n"); 3225 status = -ENOMEM; 3226 goto destroy_queue; 3227 } 3228 3229 mutex_lock(&board_lock); 3230 list_add_tail(&ctlr->list, &spi_controller_list); 3231 list_for_each_entry(bi, &board_list, list) 3232 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 3233 mutex_unlock(&board_lock); 3234 3235 /* Register devices from the device tree and ACPI */ 3236 of_register_spi_devices(ctlr); 3237 acpi_register_spi_devices(ctlr); 3238 return status; 3239 3240 destroy_queue: 3241 spi_destroy_queue(ctlr); 3242 free_bus_id: 3243 mutex_lock(&board_lock); 3244 idr_remove(&spi_master_idr, ctlr->bus_num); 3245 mutex_unlock(&board_lock); 3246 return status; 3247 } 3248 EXPORT_SYMBOL_GPL(spi_register_controller); 3249 3250 static void devm_spi_unregister(struct device *dev, void *res) 3251 { 3252 spi_unregister_controller(*(struct spi_controller **)res); 3253 } 3254 3255 /** 3256 * devm_spi_register_controller - register managed SPI master or slave 3257 * controller 3258 * @dev: device managing SPI controller 3259 * @ctlr: initialized controller, originally from spi_alloc_master() or 3260 * spi_alloc_slave() 3261 * Context: can sleep 3262 * 3263 * Register a SPI device as with spi_register_controller() which will 3264 * automatically be unregistered and freed. 3265 * 3266 * Return: zero on success, else a negative error code. 3267 */ 3268 int devm_spi_register_controller(struct device *dev, 3269 struct spi_controller *ctlr) 3270 { 3271 struct spi_controller **ptr; 3272 int ret; 3273 3274 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 3275 if (!ptr) 3276 return -ENOMEM; 3277 3278 ret = spi_register_controller(ctlr); 3279 if (!ret) { 3280 *ptr = ctlr; 3281 devres_add(dev, ptr); 3282 } else { 3283 devres_free(ptr); 3284 } 3285 3286 return ret; 3287 } 3288 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 3289 3290 static int __unregister(struct device *dev, void *null) 3291 { 3292 spi_unregister_device(to_spi_device(dev)); 3293 return 0; 3294 } 3295 3296 /** 3297 * spi_unregister_controller - unregister SPI master or slave controller 3298 * @ctlr: the controller being unregistered 3299 * Context: can sleep 3300 * 3301 * This call is used only by SPI controller drivers, which are the 3302 * only ones directly touching chip registers. 3303 * 3304 * This must be called from context that can sleep. 3305 * 3306 * Note that this function also drops a reference to the controller. 3307 */ 3308 void spi_unregister_controller(struct spi_controller *ctlr) 3309 { 3310 struct spi_controller *found; 3311 int id = ctlr->bus_num; 3312 3313 /* Prevent addition of new devices, unregister existing ones */ 3314 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3315 mutex_lock(&ctlr->add_lock); 3316 3317 device_for_each_child(&ctlr->dev, NULL, __unregister); 3318 3319 /* First make sure that this controller was ever added */ 3320 mutex_lock(&board_lock); 3321 found = idr_find(&spi_master_idr, id); 3322 mutex_unlock(&board_lock); 3323 if (ctlr->queued) { 3324 if (spi_destroy_queue(ctlr)) 3325 dev_err(&ctlr->dev, "queue remove failed\n"); 3326 } 3327 mutex_lock(&board_lock); 3328 list_del(&ctlr->list); 3329 mutex_unlock(&board_lock); 3330 3331 device_del(&ctlr->dev); 3332 3333 /* Free bus id */ 3334 mutex_lock(&board_lock); 3335 if (found == ctlr) 3336 idr_remove(&spi_master_idr, id); 3337 mutex_unlock(&board_lock); 3338 3339 if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) 3340 mutex_unlock(&ctlr->add_lock); 3341 3342 /* Release the last reference on the controller if its driver 3343 * has not yet been converted to devm_spi_alloc_master/slave(). 3344 */ 3345 if (!ctlr->devm_allocated) 3346 put_device(&ctlr->dev); 3347 } 3348 EXPORT_SYMBOL_GPL(spi_unregister_controller); 3349 3350 int spi_controller_suspend(struct spi_controller *ctlr) 3351 { 3352 int ret; 3353 3354 /* Basically no-ops for non-queued controllers */ 3355 if (!ctlr->queued) 3356 return 0; 3357 3358 ret = spi_stop_queue(ctlr); 3359 if (ret) 3360 dev_err(&ctlr->dev, "queue stop failed\n"); 3361 3362 return ret; 3363 } 3364 EXPORT_SYMBOL_GPL(spi_controller_suspend); 3365 3366 int spi_controller_resume(struct spi_controller *ctlr) 3367 { 3368 int ret; 3369 3370 if (!ctlr->queued) 3371 return 0; 3372 3373 ret = spi_start_queue(ctlr); 3374 if (ret) 3375 dev_err(&ctlr->dev, "queue restart failed\n"); 3376 3377 return ret; 3378 } 3379 EXPORT_SYMBOL_GPL(spi_controller_resume); 3380 3381 /*-------------------------------------------------------------------------*/ 3382 3383 /* Core methods for spi_message alterations */ 3384 3385 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 3386 struct spi_message *msg, 3387 void *res) 3388 { 3389 struct spi_replaced_transfers *rxfer = res; 3390 size_t i; 3391 3392 /* Call extra callback if requested */ 3393 if (rxfer->release) 3394 rxfer->release(ctlr, msg, res); 3395 3396 /* Insert replaced transfers back into the message */ 3397 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 3398 3399 /* Remove the formerly inserted entries */ 3400 for (i = 0; i < rxfer->inserted; i++) 3401 list_del(&rxfer->inserted_transfers[i].transfer_list); 3402 } 3403 3404 /** 3405 * spi_replace_transfers - replace transfers with several transfers 3406 * and register change with spi_message.resources 3407 * @msg: the spi_message we work upon 3408 * @xfer_first: the first spi_transfer we want to replace 3409 * @remove: number of transfers to remove 3410 * @insert: the number of transfers we want to insert instead 3411 * @release: extra release code necessary in some circumstances 3412 * @extradatasize: extra data to allocate (with alignment guarantees 3413 * of struct @spi_transfer) 3414 * @gfp: gfp flags 3415 * 3416 * Returns: pointer to @spi_replaced_transfers, 3417 * PTR_ERR(...) in case of errors. 3418 */ 3419 static struct spi_replaced_transfers *spi_replace_transfers( 3420 struct spi_message *msg, 3421 struct spi_transfer *xfer_first, 3422 size_t remove, 3423 size_t insert, 3424 spi_replaced_release_t release, 3425 size_t extradatasize, 3426 gfp_t gfp) 3427 { 3428 struct spi_replaced_transfers *rxfer; 3429 struct spi_transfer *xfer; 3430 size_t i; 3431 3432 /* Allocate the structure using spi_res */ 3433 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 3434 struct_size(rxfer, inserted_transfers, insert) 3435 + extradatasize, 3436 gfp); 3437 if (!rxfer) 3438 return ERR_PTR(-ENOMEM); 3439 3440 /* The release code to invoke before running the generic release */ 3441 rxfer->release = release; 3442 3443 /* Assign extradata */ 3444 if (extradatasize) 3445 rxfer->extradata = 3446 &rxfer->inserted_transfers[insert]; 3447 3448 /* Init the replaced_transfers list */ 3449 INIT_LIST_HEAD(&rxfer->replaced_transfers); 3450 3451 /* 3452 * Assign the list_entry after which we should reinsert 3453 * the @replaced_transfers - it may be spi_message.messages! 3454 */ 3455 rxfer->replaced_after = xfer_first->transfer_list.prev; 3456 3457 /* Remove the requested number of transfers */ 3458 for (i = 0; i < remove; i++) { 3459 /* 3460 * If the entry after replaced_after it is msg->transfers 3461 * then we have been requested to remove more transfers 3462 * than are in the list. 3463 */ 3464 if (rxfer->replaced_after->next == &msg->transfers) { 3465 dev_err(&msg->spi->dev, 3466 "requested to remove more spi_transfers than are available\n"); 3467 /* Insert replaced transfers back into the message */ 3468 list_splice(&rxfer->replaced_transfers, 3469 rxfer->replaced_after); 3470 3471 /* Free the spi_replace_transfer structure... */ 3472 spi_res_free(rxfer); 3473 3474 /* ...and return with an error */ 3475 return ERR_PTR(-EINVAL); 3476 } 3477 3478 /* 3479 * Remove the entry after replaced_after from list of 3480 * transfers and add it to list of replaced_transfers. 3481 */ 3482 list_move_tail(rxfer->replaced_after->next, 3483 &rxfer->replaced_transfers); 3484 } 3485 3486 /* 3487 * Create copy of the given xfer with identical settings 3488 * based on the first transfer to get removed. 3489 */ 3490 for (i = 0; i < insert; i++) { 3491 /* We need to run in reverse order */ 3492 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 3493 3494 /* Copy all spi_transfer data */ 3495 memcpy(xfer, xfer_first, sizeof(*xfer)); 3496 3497 /* Add to list */ 3498 list_add(&xfer->transfer_list, rxfer->replaced_after); 3499 3500 /* Clear cs_change and delay for all but the last */ 3501 if (i) { 3502 xfer->cs_change = false; 3503 xfer->delay.value = 0; 3504 } 3505 } 3506 3507 /* Set up inserted... */ 3508 rxfer->inserted = insert; 3509 3510 /* ...and register it with spi_res/spi_message */ 3511 spi_res_add(msg, rxfer); 3512 3513 return rxfer; 3514 } 3515 3516 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 3517 struct spi_message *msg, 3518 struct spi_transfer **xferp, 3519 size_t maxsize, 3520 gfp_t gfp) 3521 { 3522 struct spi_transfer *xfer = *xferp, *xfers; 3523 struct spi_replaced_transfers *srt; 3524 size_t offset; 3525 size_t count, i; 3526 3527 /* Calculate how many we have to replace */ 3528 count = DIV_ROUND_UP(xfer->len, maxsize); 3529 3530 /* Create replacement */ 3531 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 3532 if (IS_ERR(srt)) 3533 return PTR_ERR(srt); 3534 xfers = srt->inserted_transfers; 3535 3536 /* 3537 * Now handle each of those newly inserted spi_transfers. 3538 * Note that the replacements spi_transfers all are preset 3539 * to the same values as *xferp, so tx_buf, rx_buf and len 3540 * are all identical (as well as most others) 3541 * so we just have to fix up len and the pointers. 3542 * 3543 * This also includes support for the depreciated 3544 * spi_message.is_dma_mapped interface. 3545 */ 3546 3547 /* 3548 * The first transfer just needs the length modified, so we 3549 * run it outside the loop. 3550 */ 3551 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 3552 3553 /* All the others need rx_buf/tx_buf also set */ 3554 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 3555 /* Update rx_buf, tx_buf and dma */ 3556 if (xfers[i].rx_buf) 3557 xfers[i].rx_buf += offset; 3558 if (xfers[i].rx_dma) 3559 xfers[i].rx_dma += offset; 3560 if (xfers[i].tx_buf) 3561 xfers[i].tx_buf += offset; 3562 if (xfers[i].tx_dma) 3563 xfers[i].tx_dma += offset; 3564 3565 /* Update length */ 3566 xfers[i].len = min(maxsize, xfers[i].len - offset); 3567 } 3568 3569 /* 3570 * We set up xferp to the last entry we have inserted, 3571 * so that we skip those already split transfers. 3572 */ 3573 *xferp = &xfers[count - 1]; 3574 3575 /* Increment statistics counters */ 3576 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, 3577 transfers_split_maxsize); 3578 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics, 3579 transfers_split_maxsize); 3580 3581 return 0; 3582 } 3583 3584 /** 3585 * spi_split_transfers_maxsize - split spi transfers into multiple transfers 3586 * when an individual transfer exceeds a 3587 * certain size 3588 * @ctlr: the @spi_controller for this transfer 3589 * @msg: the @spi_message to transform 3590 * @maxsize: the maximum when to apply this 3591 * @gfp: GFP allocation flags 3592 * 3593 * Return: status of transformation 3594 */ 3595 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 3596 struct spi_message *msg, 3597 size_t maxsize, 3598 gfp_t gfp) 3599 { 3600 struct spi_transfer *xfer; 3601 int ret; 3602 3603 /* 3604 * Iterate over the transfer_list, 3605 * but note that xfer is advanced to the last transfer inserted 3606 * to avoid checking sizes again unnecessarily (also xfer does 3607 * potentially belong to a different list by the time the 3608 * replacement has happened). 3609 */ 3610 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3611 if (xfer->len > maxsize) { 3612 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3613 maxsize, gfp); 3614 if (ret) 3615 return ret; 3616 } 3617 } 3618 3619 return 0; 3620 } 3621 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 3622 3623 3624 /** 3625 * spi_split_transfers_maxwords - split spi transfers into multiple transfers 3626 * when an individual transfer exceeds a 3627 * certain number of SPI words 3628 * @ctlr: the @spi_controller for this transfer 3629 * @msg: the @spi_message to transform 3630 * @maxwords: the number of words to limit each transfer to 3631 * @gfp: GFP allocation flags 3632 * 3633 * Return: status of transformation 3634 */ 3635 int spi_split_transfers_maxwords(struct spi_controller *ctlr, 3636 struct spi_message *msg, 3637 size_t maxwords, 3638 gfp_t gfp) 3639 { 3640 struct spi_transfer *xfer; 3641 3642 /* 3643 * Iterate over the transfer_list, 3644 * but note that xfer is advanced to the last transfer inserted 3645 * to avoid checking sizes again unnecessarily (also xfer does 3646 * potentially belong to a different list by the time the 3647 * replacement has happened). 3648 */ 3649 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 3650 size_t maxsize; 3651 int ret; 3652 3653 if (xfer->bits_per_word <= 8) 3654 maxsize = maxwords; 3655 else if (xfer->bits_per_word <= 16) 3656 maxsize = 2 * maxwords; 3657 else 3658 maxsize = 4 * maxwords; 3659 3660 if (xfer->len > maxsize) { 3661 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 3662 maxsize, gfp); 3663 if (ret) 3664 return ret; 3665 } 3666 } 3667 3668 return 0; 3669 } 3670 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords); 3671 3672 /*-------------------------------------------------------------------------*/ 3673 3674 /* Core methods for SPI controller protocol drivers. Some of the 3675 * other core methods are currently defined as inline functions. 3676 */ 3677 3678 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 3679 u8 bits_per_word) 3680 { 3681 if (ctlr->bits_per_word_mask) { 3682 /* Only 32 bits fit in the mask */ 3683 if (bits_per_word > 32) 3684 return -EINVAL; 3685 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 3686 return -EINVAL; 3687 } 3688 3689 return 0; 3690 } 3691 3692 /** 3693 * spi_set_cs_timing - configure CS setup, hold, and inactive delays 3694 * @spi: the device that requires specific CS timing configuration 3695 * 3696 * Return: zero on success, else a negative error code. 3697 */ 3698 static int spi_set_cs_timing(struct spi_device *spi) 3699 { 3700 struct device *parent = spi->controller->dev.parent; 3701 int status = 0; 3702 3703 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) { 3704 if (spi->controller->auto_runtime_pm) { 3705 status = pm_runtime_get_sync(parent); 3706 if (status < 0) { 3707 pm_runtime_put_noidle(parent); 3708 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3709 status); 3710 return status; 3711 } 3712 3713 status = spi->controller->set_cs_timing(spi); 3714 pm_runtime_mark_last_busy(parent); 3715 pm_runtime_put_autosuspend(parent); 3716 } else { 3717 status = spi->controller->set_cs_timing(spi); 3718 } 3719 } 3720 return status; 3721 } 3722 3723 /** 3724 * spi_setup - setup SPI mode and clock rate 3725 * @spi: the device whose settings are being modified 3726 * Context: can sleep, and no requests are queued to the device 3727 * 3728 * SPI protocol drivers may need to update the transfer mode if the 3729 * device doesn't work with its default. They may likewise need 3730 * to update clock rates or word sizes from initial values. This function 3731 * changes those settings, and must be called from a context that can sleep. 3732 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 3733 * effect the next time the device is selected and data is transferred to 3734 * or from it. When this function returns, the spi device is deselected. 3735 * 3736 * Note that this call will fail if the protocol driver specifies an option 3737 * that the underlying controller or its driver does not support. For 3738 * example, not all hardware supports wire transfers using nine bit words, 3739 * LSB-first wire encoding, or active-high chipselects. 3740 * 3741 * Return: zero on success, else a negative error code. 3742 */ 3743 int spi_setup(struct spi_device *spi) 3744 { 3745 unsigned bad_bits, ugly_bits; 3746 int status = 0; 3747 3748 /* 3749 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO 3750 * are set at the same time. 3751 */ 3752 if ((hweight_long(spi->mode & 3753 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || 3754 (hweight_long(spi->mode & 3755 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { 3756 dev_err(&spi->dev, 3757 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); 3758 return -EINVAL; 3759 } 3760 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */ 3761 if ((spi->mode & SPI_3WIRE) && (spi->mode & 3762 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3763 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) 3764 return -EINVAL; 3765 /* 3766 * Help drivers fail *cleanly* when they need options 3767 * that aren't supported with their current controller. 3768 * SPI_CS_WORD has a fallback software implementation, 3769 * so it is ignored here. 3770 */ 3771 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | 3772 SPI_NO_TX | SPI_NO_RX); 3773 ugly_bits = bad_bits & 3774 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | 3775 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); 3776 if (ugly_bits) { 3777 dev_warn(&spi->dev, 3778 "setup: ignoring unsupported mode bits %x\n", 3779 ugly_bits); 3780 spi->mode &= ~ugly_bits; 3781 bad_bits &= ~ugly_bits; 3782 } 3783 if (bad_bits) { 3784 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 3785 bad_bits); 3786 return -EINVAL; 3787 } 3788 3789 if (!spi->bits_per_word) { 3790 spi->bits_per_word = 8; 3791 } else { 3792 /* 3793 * Some controllers may not support the default 8 bits-per-word 3794 * so only perform the check when this is explicitly provided. 3795 */ 3796 status = __spi_validate_bits_per_word(spi->controller, 3797 spi->bits_per_word); 3798 if (status) 3799 return status; 3800 } 3801 3802 if (spi->controller->max_speed_hz && 3803 (!spi->max_speed_hz || 3804 spi->max_speed_hz > spi->controller->max_speed_hz)) 3805 spi->max_speed_hz = spi->controller->max_speed_hz; 3806 3807 mutex_lock(&spi->controller->io_mutex); 3808 3809 if (spi->controller->setup) { 3810 status = spi->controller->setup(spi); 3811 if (status) { 3812 mutex_unlock(&spi->controller->io_mutex); 3813 dev_err(&spi->controller->dev, "Failed to setup device: %d\n", 3814 status); 3815 return status; 3816 } 3817 } 3818 3819 status = spi_set_cs_timing(spi); 3820 if (status) { 3821 mutex_unlock(&spi->controller->io_mutex); 3822 return status; 3823 } 3824 3825 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { 3826 status = pm_runtime_resume_and_get(spi->controller->dev.parent); 3827 if (status < 0) { 3828 mutex_unlock(&spi->controller->io_mutex); 3829 dev_err(&spi->controller->dev, "Failed to power device: %d\n", 3830 status); 3831 return status; 3832 } 3833 3834 /* 3835 * We do not want to return positive value from pm_runtime_get, 3836 * there are many instances of devices calling spi_setup() and 3837 * checking for a non-zero return value instead of a negative 3838 * return value. 3839 */ 3840 status = 0; 3841 3842 spi_set_cs(spi, false, true); 3843 pm_runtime_mark_last_busy(spi->controller->dev.parent); 3844 pm_runtime_put_autosuspend(spi->controller->dev.parent); 3845 } else { 3846 spi_set_cs(spi, false, true); 3847 } 3848 3849 mutex_unlock(&spi->controller->io_mutex); 3850 3851 if (spi->rt && !spi->controller->rt) { 3852 spi->controller->rt = true; 3853 spi_set_thread_rt(spi->controller); 3854 } 3855 3856 trace_spi_setup(spi, status); 3857 3858 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 3859 spi->mode & SPI_MODE_X_MASK, 3860 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 3861 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 3862 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 3863 (spi->mode & SPI_LOOP) ? "loopback, " : "", 3864 spi->bits_per_word, spi->max_speed_hz, 3865 status); 3866 3867 return status; 3868 } 3869 EXPORT_SYMBOL_GPL(spi_setup); 3870 3871 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, 3872 struct spi_device *spi) 3873 { 3874 int delay1, delay2; 3875 3876 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); 3877 if (delay1 < 0) 3878 return delay1; 3879 3880 delay2 = spi_delay_to_ns(&spi->word_delay, xfer); 3881 if (delay2 < 0) 3882 return delay2; 3883 3884 if (delay1 < delay2) 3885 memcpy(&xfer->word_delay, &spi->word_delay, 3886 sizeof(xfer->word_delay)); 3887 3888 return 0; 3889 } 3890 3891 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 3892 { 3893 struct spi_controller *ctlr = spi->controller; 3894 struct spi_transfer *xfer; 3895 int w_size; 3896 3897 if (list_empty(&message->transfers)) 3898 return -EINVAL; 3899 3900 /* 3901 * If an SPI controller does not support toggling the CS line on each 3902 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO 3903 * for the CS line, we can emulate the CS-per-word hardware function by 3904 * splitting transfers into one-word transfers and ensuring that 3905 * cs_change is set for each transfer. 3906 */ 3907 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || 3908 spi_get_csgpiod(spi, 0))) { 3909 size_t maxsize; 3910 int ret; 3911 3912 maxsize = (spi->bits_per_word + 7) / 8; 3913 3914 /* spi_split_transfers_maxsize() requires message->spi */ 3915 message->spi = spi; 3916 3917 ret = spi_split_transfers_maxsize(ctlr, message, maxsize, 3918 GFP_KERNEL); 3919 if (ret) 3920 return ret; 3921 3922 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3923 /* Don't change cs_change on the last entry in the list */ 3924 if (list_is_last(&xfer->transfer_list, &message->transfers)) 3925 break; 3926 xfer->cs_change = 1; 3927 } 3928 } 3929 3930 /* 3931 * Half-duplex links include original MicroWire, and ones with 3932 * only one data pin like SPI_3WIRE (switches direction) or where 3933 * either MOSI or MISO is missing. They can also be caused by 3934 * software limitations. 3935 */ 3936 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 3937 (spi->mode & SPI_3WIRE)) { 3938 unsigned flags = ctlr->flags; 3939 3940 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3941 if (xfer->rx_buf && xfer->tx_buf) 3942 return -EINVAL; 3943 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 3944 return -EINVAL; 3945 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 3946 return -EINVAL; 3947 } 3948 } 3949 3950 /* 3951 * Set transfer bits_per_word and max speed as spi device default if 3952 * it is not set for this transfer. 3953 * Set transfer tx_nbits and rx_nbits as single transfer default 3954 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 3955 * Ensure transfer word_delay is at least as long as that required by 3956 * device itself. 3957 */ 3958 message->frame_length = 0; 3959 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3960 xfer->effective_speed_hz = 0; 3961 message->frame_length += xfer->len; 3962 if (!xfer->bits_per_word) 3963 xfer->bits_per_word = spi->bits_per_word; 3964 3965 if (!xfer->speed_hz) 3966 xfer->speed_hz = spi->max_speed_hz; 3967 3968 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 3969 xfer->speed_hz = ctlr->max_speed_hz; 3970 3971 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 3972 return -EINVAL; 3973 3974 /* 3975 * SPI transfer length should be multiple of SPI word size 3976 * where SPI word size should be power-of-two multiple. 3977 */ 3978 if (xfer->bits_per_word <= 8) 3979 w_size = 1; 3980 else if (xfer->bits_per_word <= 16) 3981 w_size = 2; 3982 else 3983 w_size = 4; 3984 3985 /* No partial transfers accepted */ 3986 if (xfer->len % w_size) 3987 return -EINVAL; 3988 3989 if (xfer->speed_hz && ctlr->min_speed_hz && 3990 xfer->speed_hz < ctlr->min_speed_hz) 3991 return -EINVAL; 3992 3993 if (xfer->tx_buf && !xfer->tx_nbits) 3994 xfer->tx_nbits = SPI_NBITS_SINGLE; 3995 if (xfer->rx_buf && !xfer->rx_nbits) 3996 xfer->rx_nbits = SPI_NBITS_SINGLE; 3997 /* 3998 * Check transfer tx/rx_nbits: 3999 * 1. check the value matches one of single, dual and quad 4000 * 2. check tx/rx_nbits match the mode in spi_device 4001 */ 4002 if (xfer->tx_buf) { 4003 if (spi->mode & SPI_NO_TX) 4004 return -EINVAL; 4005 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 4006 xfer->tx_nbits != SPI_NBITS_DUAL && 4007 xfer->tx_nbits != SPI_NBITS_QUAD) 4008 return -EINVAL; 4009 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 4010 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 4011 return -EINVAL; 4012 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 4013 !(spi->mode & SPI_TX_QUAD)) 4014 return -EINVAL; 4015 } 4016 /* Check transfer rx_nbits */ 4017 if (xfer->rx_buf) { 4018 if (spi->mode & SPI_NO_RX) 4019 return -EINVAL; 4020 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 4021 xfer->rx_nbits != SPI_NBITS_DUAL && 4022 xfer->rx_nbits != SPI_NBITS_QUAD) 4023 return -EINVAL; 4024 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 4025 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 4026 return -EINVAL; 4027 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 4028 !(spi->mode & SPI_RX_QUAD)) 4029 return -EINVAL; 4030 } 4031 4032 if (_spi_xfer_word_delay_update(xfer, spi)) 4033 return -EINVAL; 4034 } 4035 4036 message->status = -EINPROGRESS; 4037 4038 return 0; 4039 } 4040 4041 static int __spi_async(struct spi_device *spi, struct spi_message *message) 4042 { 4043 struct spi_controller *ctlr = spi->controller; 4044 struct spi_transfer *xfer; 4045 4046 /* 4047 * Some controllers do not support doing regular SPI transfers. Return 4048 * ENOTSUPP when this is the case. 4049 */ 4050 if (!ctlr->transfer) 4051 return -ENOTSUPP; 4052 4053 message->spi = spi; 4054 4055 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); 4056 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); 4057 4058 trace_spi_message_submit(message); 4059 4060 if (!ctlr->ptp_sts_supported) { 4061 list_for_each_entry(xfer, &message->transfers, transfer_list) { 4062 xfer->ptp_sts_word_pre = 0; 4063 ptp_read_system_prets(xfer->ptp_sts); 4064 } 4065 } 4066 4067 return ctlr->transfer(spi, message); 4068 } 4069 4070 /** 4071 * spi_async - asynchronous SPI transfer 4072 * @spi: device with which data will be exchanged 4073 * @message: describes the data transfers, including completion callback 4074 * Context: any (irqs may be blocked, etc) 4075 * 4076 * This call may be used in_irq and other contexts which can't sleep, 4077 * as well as from task contexts which can sleep. 4078 * 4079 * The completion callback is invoked in a context which can't sleep. 4080 * Before that invocation, the value of message->status is undefined. 4081 * When the callback is issued, message->status holds either zero (to 4082 * indicate complete success) or a negative error code. After that 4083 * callback returns, the driver which issued the transfer request may 4084 * deallocate the associated memory; it's no longer in use by any SPI 4085 * core or controller driver code. 4086 * 4087 * Note that although all messages to a spi_device are handled in 4088 * FIFO order, messages may go to different devices in other orders. 4089 * Some device might be higher priority, or have various "hard" access 4090 * time requirements, for example. 4091 * 4092 * On detection of any fault during the transfer, processing of 4093 * the entire message is aborted, and the device is deselected. 4094 * Until returning from the associated message completion callback, 4095 * no other spi_message queued to that device will be processed. 4096 * (This rule applies equally to all the synchronous transfer calls, 4097 * which are wrappers around this core asynchronous primitive.) 4098 * 4099 * Return: zero on success, else a negative error code. 4100 */ 4101 int spi_async(struct spi_device *spi, struct spi_message *message) 4102 { 4103 struct spi_controller *ctlr = spi->controller; 4104 int ret; 4105 unsigned long flags; 4106 4107 ret = __spi_validate(spi, message); 4108 if (ret != 0) 4109 return ret; 4110 4111 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4112 4113 if (ctlr->bus_lock_flag) 4114 ret = -EBUSY; 4115 else 4116 ret = __spi_async(spi, message); 4117 4118 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4119 4120 return ret; 4121 } 4122 EXPORT_SYMBOL_GPL(spi_async); 4123 4124 /** 4125 * spi_async_locked - version of spi_async with exclusive bus usage 4126 * @spi: device with which data will be exchanged 4127 * @message: describes the data transfers, including completion callback 4128 * Context: any (irqs may be blocked, etc) 4129 * 4130 * This call may be used in_irq and other contexts which can't sleep, 4131 * as well as from task contexts which can sleep. 4132 * 4133 * The completion callback is invoked in a context which can't sleep. 4134 * Before that invocation, the value of message->status is undefined. 4135 * When the callback is issued, message->status holds either zero (to 4136 * indicate complete success) or a negative error code. After that 4137 * callback returns, the driver which issued the transfer request may 4138 * deallocate the associated memory; it's no longer in use by any SPI 4139 * core or controller driver code. 4140 * 4141 * Note that although all messages to a spi_device are handled in 4142 * FIFO order, messages may go to different devices in other orders. 4143 * Some device might be higher priority, or have various "hard" access 4144 * time requirements, for example. 4145 * 4146 * On detection of any fault during the transfer, processing of 4147 * the entire message is aborted, and the device is deselected. 4148 * Until returning from the associated message completion callback, 4149 * no other spi_message queued to that device will be processed. 4150 * (This rule applies equally to all the synchronous transfer calls, 4151 * which are wrappers around this core asynchronous primitive.) 4152 * 4153 * Return: zero on success, else a negative error code. 4154 */ 4155 static int spi_async_locked(struct spi_device *spi, struct spi_message *message) 4156 { 4157 struct spi_controller *ctlr = spi->controller; 4158 int ret; 4159 unsigned long flags; 4160 4161 ret = __spi_validate(spi, message); 4162 if (ret != 0) 4163 return ret; 4164 4165 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4166 4167 ret = __spi_async(spi, message); 4168 4169 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4170 4171 return ret; 4172 4173 } 4174 4175 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) 4176 { 4177 bool was_busy; 4178 int ret; 4179 4180 mutex_lock(&ctlr->io_mutex); 4181 4182 was_busy = ctlr->busy; 4183 4184 ctlr->cur_msg = msg; 4185 ret = __spi_pump_transfer_message(ctlr, msg, was_busy); 4186 if (ret) 4187 goto out; 4188 4189 ctlr->cur_msg = NULL; 4190 ctlr->fallback = false; 4191 4192 if (!was_busy) { 4193 kfree(ctlr->dummy_rx); 4194 ctlr->dummy_rx = NULL; 4195 kfree(ctlr->dummy_tx); 4196 ctlr->dummy_tx = NULL; 4197 if (ctlr->unprepare_transfer_hardware && 4198 ctlr->unprepare_transfer_hardware(ctlr)) 4199 dev_err(&ctlr->dev, 4200 "failed to unprepare transfer hardware\n"); 4201 spi_idle_runtime_pm(ctlr); 4202 } 4203 4204 out: 4205 mutex_unlock(&ctlr->io_mutex); 4206 } 4207 4208 /*-------------------------------------------------------------------------*/ 4209 4210 /* 4211 * Utility methods for SPI protocol drivers, layered on 4212 * top of the core. Some other utility methods are defined as 4213 * inline functions. 4214 */ 4215 4216 static void spi_complete(void *arg) 4217 { 4218 complete(arg); 4219 } 4220 4221 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 4222 { 4223 DECLARE_COMPLETION_ONSTACK(done); 4224 int status; 4225 struct spi_controller *ctlr = spi->controller; 4226 4227 status = __spi_validate(spi, message); 4228 if (status != 0) 4229 return status; 4230 4231 message->spi = spi; 4232 4233 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); 4234 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); 4235 4236 /* 4237 * Checking queue_empty here only guarantees async/sync message 4238 * ordering when coming from the same context. It does not need to 4239 * guard against reentrancy from a different context. The io_mutex 4240 * will catch those cases. 4241 */ 4242 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) { 4243 message->actual_length = 0; 4244 message->status = -EINPROGRESS; 4245 4246 trace_spi_message_submit(message); 4247 4248 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); 4249 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); 4250 4251 __spi_transfer_message_noqueue(ctlr, message); 4252 4253 return message->status; 4254 } 4255 4256 /* 4257 * There are messages in the async queue that could have originated 4258 * from the same context, so we need to preserve ordering. 4259 * Therefor we send the message to the async queue and wait until they 4260 * are completed. 4261 */ 4262 message->complete = spi_complete; 4263 message->context = &done; 4264 status = spi_async_locked(spi, message); 4265 if (status == 0) { 4266 wait_for_completion(&done); 4267 status = message->status; 4268 } 4269 message->context = NULL; 4270 4271 return status; 4272 } 4273 4274 /** 4275 * spi_sync - blocking/synchronous SPI data transfers 4276 * @spi: device with which data will be exchanged 4277 * @message: describes the data transfers 4278 * Context: can sleep 4279 * 4280 * This call may only be used from a context that may sleep. The sleep 4281 * is non-interruptible, and has no timeout. Low-overhead controller 4282 * drivers may DMA directly into and out of the message buffers. 4283 * 4284 * Note that the SPI device's chip select is active during the message, 4285 * and then is normally disabled between messages. Drivers for some 4286 * frequently-used devices may want to minimize costs of selecting a chip, 4287 * by leaving it selected in anticipation that the next message will go 4288 * to the same chip. (That may increase power usage.) 4289 * 4290 * Also, the caller is guaranteeing that the memory associated with the 4291 * message will not be freed before this call returns. 4292 * 4293 * Return: zero on success, else a negative error code. 4294 */ 4295 int spi_sync(struct spi_device *spi, struct spi_message *message) 4296 { 4297 int ret; 4298 4299 mutex_lock(&spi->controller->bus_lock_mutex); 4300 ret = __spi_sync(spi, message); 4301 mutex_unlock(&spi->controller->bus_lock_mutex); 4302 4303 return ret; 4304 } 4305 EXPORT_SYMBOL_GPL(spi_sync); 4306 4307 /** 4308 * spi_sync_locked - version of spi_sync with exclusive bus usage 4309 * @spi: device with which data will be exchanged 4310 * @message: describes the data transfers 4311 * Context: can sleep 4312 * 4313 * This call may only be used from a context that may sleep. The sleep 4314 * is non-interruptible, and has no timeout. Low-overhead controller 4315 * drivers may DMA directly into and out of the message buffers. 4316 * 4317 * This call should be used by drivers that require exclusive access to the 4318 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 4319 * be released by a spi_bus_unlock call when the exclusive access is over. 4320 * 4321 * Return: zero on success, else a negative error code. 4322 */ 4323 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 4324 { 4325 return __spi_sync(spi, message); 4326 } 4327 EXPORT_SYMBOL_GPL(spi_sync_locked); 4328 4329 /** 4330 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 4331 * @ctlr: SPI bus master that should be locked for exclusive bus access 4332 * Context: can sleep 4333 * 4334 * This call may only be used from a context that may sleep. The sleep 4335 * is non-interruptible, and has no timeout. 4336 * 4337 * This call should be used by drivers that require exclusive access to the 4338 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 4339 * exclusive access is over. Data transfer must be done by spi_sync_locked 4340 * and spi_async_locked calls when the SPI bus lock is held. 4341 * 4342 * Return: always zero. 4343 */ 4344 int spi_bus_lock(struct spi_controller *ctlr) 4345 { 4346 unsigned long flags; 4347 4348 mutex_lock(&ctlr->bus_lock_mutex); 4349 4350 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 4351 ctlr->bus_lock_flag = 1; 4352 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 4353 4354 /* Mutex remains locked until spi_bus_unlock() is called */ 4355 4356 return 0; 4357 } 4358 EXPORT_SYMBOL_GPL(spi_bus_lock); 4359 4360 /** 4361 * spi_bus_unlock - release the lock for exclusive SPI bus usage 4362 * @ctlr: SPI bus master that was locked for exclusive bus access 4363 * Context: can sleep 4364 * 4365 * This call may only be used from a context that may sleep. The sleep 4366 * is non-interruptible, and has no timeout. 4367 * 4368 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 4369 * call. 4370 * 4371 * Return: always zero. 4372 */ 4373 int spi_bus_unlock(struct spi_controller *ctlr) 4374 { 4375 ctlr->bus_lock_flag = 0; 4376 4377 mutex_unlock(&ctlr->bus_lock_mutex); 4378 4379 return 0; 4380 } 4381 EXPORT_SYMBOL_GPL(spi_bus_unlock); 4382 4383 /* Portable code must never pass more than 32 bytes */ 4384 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 4385 4386 static u8 *buf; 4387 4388 /** 4389 * spi_write_then_read - SPI synchronous write followed by read 4390 * @spi: device with which data will be exchanged 4391 * @txbuf: data to be written (need not be dma-safe) 4392 * @n_tx: size of txbuf, in bytes 4393 * @rxbuf: buffer into which data will be read (need not be dma-safe) 4394 * @n_rx: size of rxbuf, in bytes 4395 * Context: can sleep 4396 * 4397 * This performs a half duplex MicroWire style transaction with the 4398 * device, sending txbuf and then reading rxbuf. The return value 4399 * is zero for success, else a negative errno status code. 4400 * This call may only be used from a context that may sleep. 4401 * 4402 * Parameters to this routine are always copied using a small buffer. 4403 * Performance-sensitive or bulk transfer code should instead use 4404 * spi_{async,sync}() calls with dma-safe buffers. 4405 * 4406 * Return: zero on success, else a negative error code. 4407 */ 4408 int spi_write_then_read(struct spi_device *spi, 4409 const void *txbuf, unsigned n_tx, 4410 void *rxbuf, unsigned n_rx) 4411 { 4412 static DEFINE_MUTEX(lock); 4413 4414 int status; 4415 struct spi_message message; 4416 struct spi_transfer x[2]; 4417 u8 *local_buf; 4418 4419 /* 4420 * Use preallocated DMA-safe buffer if we can. We can't avoid 4421 * copying here, (as a pure convenience thing), but we can 4422 * keep heap costs out of the hot path unless someone else is 4423 * using the pre-allocated buffer or the transfer is too large. 4424 */ 4425 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 4426 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 4427 GFP_KERNEL | GFP_DMA); 4428 if (!local_buf) 4429 return -ENOMEM; 4430 } else { 4431 local_buf = buf; 4432 } 4433 4434 spi_message_init(&message); 4435 memset(x, 0, sizeof(x)); 4436 if (n_tx) { 4437 x[0].len = n_tx; 4438 spi_message_add_tail(&x[0], &message); 4439 } 4440 if (n_rx) { 4441 x[1].len = n_rx; 4442 spi_message_add_tail(&x[1], &message); 4443 } 4444 4445 memcpy(local_buf, txbuf, n_tx); 4446 x[0].tx_buf = local_buf; 4447 x[1].rx_buf = local_buf + n_tx; 4448 4449 /* Do the i/o */ 4450 status = spi_sync(spi, &message); 4451 if (status == 0) 4452 memcpy(rxbuf, x[1].rx_buf, n_rx); 4453 4454 if (x[0].tx_buf == buf) 4455 mutex_unlock(&lock); 4456 else 4457 kfree(local_buf); 4458 4459 return status; 4460 } 4461 EXPORT_SYMBOL_GPL(spi_write_then_read); 4462 4463 /*-------------------------------------------------------------------------*/ 4464 4465 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 4466 /* Must call put_device() when done with returned spi_device device */ 4467 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 4468 { 4469 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); 4470 4471 return dev ? to_spi_device(dev) : NULL; 4472 } 4473 4474 /* The spi controllers are not using spi_bus, so we find it with another way */ 4475 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 4476 { 4477 struct device *dev; 4478 4479 dev = class_find_device_by_of_node(&spi_master_class, node); 4480 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4481 dev = class_find_device_by_of_node(&spi_slave_class, node); 4482 if (!dev) 4483 return NULL; 4484 4485 /* Reference got in class_find_device */ 4486 return container_of(dev, struct spi_controller, dev); 4487 } 4488 4489 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 4490 void *arg) 4491 { 4492 struct of_reconfig_data *rd = arg; 4493 struct spi_controller *ctlr; 4494 struct spi_device *spi; 4495 4496 switch (of_reconfig_get_state_change(action, arg)) { 4497 case OF_RECONFIG_CHANGE_ADD: 4498 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 4499 if (ctlr == NULL) 4500 return NOTIFY_OK; /* Not for us */ 4501 4502 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 4503 put_device(&ctlr->dev); 4504 return NOTIFY_OK; 4505 } 4506 4507 /* 4508 * Clear the flag before adding the device so that fw_devlink 4509 * doesn't skip adding consumers to this device. 4510 */ 4511 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE; 4512 spi = of_register_spi_device(ctlr, rd->dn); 4513 put_device(&ctlr->dev); 4514 4515 if (IS_ERR(spi)) { 4516 pr_err("%s: failed to create for '%pOF'\n", 4517 __func__, rd->dn); 4518 of_node_clear_flag(rd->dn, OF_POPULATED); 4519 return notifier_from_errno(PTR_ERR(spi)); 4520 } 4521 break; 4522 4523 case OF_RECONFIG_CHANGE_REMOVE: 4524 /* Already depopulated? */ 4525 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 4526 return NOTIFY_OK; 4527 4528 /* Find our device by node */ 4529 spi = of_find_spi_device_by_node(rd->dn); 4530 if (spi == NULL) 4531 return NOTIFY_OK; /* No? not meant for us */ 4532 4533 /* Unregister takes one ref away */ 4534 spi_unregister_device(spi); 4535 4536 /* And put the reference of the find */ 4537 put_device(&spi->dev); 4538 break; 4539 } 4540 4541 return NOTIFY_OK; 4542 } 4543 4544 static struct notifier_block spi_of_notifier = { 4545 .notifier_call = of_spi_notify, 4546 }; 4547 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4548 extern struct notifier_block spi_of_notifier; 4549 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 4550 4551 #if IS_ENABLED(CONFIG_ACPI) 4552 static int spi_acpi_controller_match(struct device *dev, const void *data) 4553 { 4554 return ACPI_COMPANION(dev->parent) == data; 4555 } 4556 4557 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 4558 { 4559 struct device *dev; 4560 4561 dev = class_find_device(&spi_master_class, NULL, adev, 4562 spi_acpi_controller_match); 4563 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 4564 dev = class_find_device(&spi_slave_class, NULL, adev, 4565 spi_acpi_controller_match); 4566 if (!dev) 4567 return NULL; 4568 4569 return container_of(dev, struct spi_controller, dev); 4570 } 4571 4572 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 4573 { 4574 struct device *dev; 4575 4576 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); 4577 return to_spi_device(dev); 4578 } 4579 4580 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 4581 void *arg) 4582 { 4583 struct acpi_device *adev = arg; 4584 struct spi_controller *ctlr; 4585 struct spi_device *spi; 4586 4587 switch (value) { 4588 case ACPI_RECONFIG_DEVICE_ADD: 4589 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev)); 4590 if (!ctlr) 4591 break; 4592 4593 acpi_register_spi_device(ctlr, adev); 4594 put_device(&ctlr->dev); 4595 break; 4596 case ACPI_RECONFIG_DEVICE_REMOVE: 4597 if (!acpi_device_enumerated(adev)) 4598 break; 4599 4600 spi = acpi_spi_find_device_by_adev(adev); 4601 if (!spi) 4602 break; 4603 4604 spi_unregister_device(spi); 4605 put_device(&spi->dev); 4606 break; 4607 } 4608 4609 return NOTIFY_OK; 4610 } 4611 4612 static struct notifier_block spi_acpi_notifier = { 4613 .notifier_call = acpi_spi_notify, 4614 }; 4615 #else 4616 extern struct notifier_block spi_acpi_notifier; 4617 #endif 4618 4619 static int __init spi_init(void) 4620 { 4621 int status; 4622 4623 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 4624 if (!buf) { 4625 status = -ENOMEM; 4626 goto err0; 4627 } 4628 4629 status = bus_register(&spi_bus_type); 4630 if (status < 0) 4631 goto err1; 4632 4633 status = class_register(&spi_master_class); 4634 if (status < 0) 4635 goto err2; 4636 4637 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 4638 status = class_register(&spi_slave_class); 4639 if (status < 0) 4640 goto err3; 4641 } 4642 4643 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 4644 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 4645 if (IS_ENABLED(CONFIG_ACPI)) 4646 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 4647 4648 return 0; 4649 4650 err3: 4651 class_unregister(&spi_master_class); 4652 err2: 4653 bus_unregister(&spi_bus_type); 4654 err1: 4655 kfree(buf); 4656 buf = NULL; 4657 err0: 4658 return status; 4659 } 4660 4661 /* 4662 * A board_info is normally registered in arch_initcall(), 4663 * but even essential drivers wait till later. 4664 * 4665 * REVISIT only boardinfo really needs static linking. The rest (device and 4666 * driver registration) _could_ be dynamically linked (modular) ... Costs 4667 * include needing to have boardinfo data structures be much more public. 4668 */ 4669 postcore_initcall(spi_init); 4670