Lines Matching +full:spi +full:- +full:cs +full:- +full:setup +full:- +full:delay +full:- +full:ns

1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
34 #include <linux/spi/spi.h>
35 #include <linux/spi/spi-mem.h>
39 #include <trace/events/spi.h>
49 struct spi_device *spi = to_spi_device(dev);
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
60 const struct spi_device *spi = to_spi_device(dev);
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
75 struct spi_device *spi = to_spi_device(dev);
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
88 const struct spi_device *spi = to_spi_device(dev);
92 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
114 u64_stats_init(&stat->syncp);
135 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
160 struct spi_device *spi = to_spi_device(dev); \
161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
325 u64_stats_update_begin(&stats->syncp);
327 u64_stats_inc(&stats->transfers);
328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
330 u64_stats_add(&stats->bytes, xfer->len);
331 if ((xfer->tx_buf) &&
332 (xfer->tx_buf != ctlr->dummy_tx))
333 u64_stats_add(&stats->bytes_tx, xfer->len);
334 if ((xfer->rx_buf) &&
335 (xfer->rx_buf != ctlr->dummy_rx))
336 u64_stats_add(&stats->bytes_rx, xfer->len);
338 u64_stats_update_end(&stats->syncp);
343 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
348 while (id->name[0]) {
349 if (!strcmp(name, id->name))
358 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
360 return spi_match_id(sdrv->id_table, sdev->modalias);
368 match = device_get_match_data(&sdev->dev);
372 return (const void *)spi_get_device_id(sdev)->driver_data;
378 const struct spi_device *spi = to_spi_device(dev);
382 if (spi->driver_override)
383 return strcmp(spi->driver_override, drv->name) == 0;
393 if (sdrv->id_table)
394 return !!spi_match_id(sdrv->id_table, spi->modalias);
396 return strcmp(spi->modalias, drv->name) == 0;
401 const struct spi_device *spi = to_spi_device(dev);
405 if (rc != -ENODEV)
408 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
413 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
414 struct spi_device *spi = to_spi_device(dev);
417 ret = of_clk_set_defaults(dev->of_node, false);
421 if (dev->of_node) {
422 spi->irq = of_irq_get(dev->of_node, 0);
423 if (spi->irq == -EPROBE_DEFER)
424 return -EPROBE_DEFER;
425 if (spi->irq < 0)
426 spi->irq = 0;
429 if (has_acpi_companion(dev) && spi->irq < 0) {
430 struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
432 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
433 if (spi->irq == -EPROBE_DEFER)
434 return -EPROBE_DEFER;
435 if (spi->irq < 0)
436 spi->irq = 0;
443 if (sdrv->probe) {
444 ret = sdrv->probe(spi);
454 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
456 if (sdrv->remove)
457 sdrv->remove(to_spi_device(dev));
464 if (dev->driver) {
465 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
467 if (sdrv->shutdown)
468 sdrv->shutdown(to_spi_device(dev));
473 .name = "spi",
484 * __spi_register_driver - register a SPI driver
493 sdrv->driver.owner = owner;
494 sdrv->driver.bus = &spi_bus_type;
497 * For Really Good Reasons we use spi: modaliases not of:
501 if (sdrv->driver.of_match_table) {
504 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
509 of_name = strnchr(of_id->compatible,
510 sizeof(of_id->compatible), ',');
514 of_name = of_id->compatible;
516 if (sdrv->id_table) {
519 spi_id = spi_match_id(sdrv->id_table, of_name);
523 if (strcmp(sdrv->driver.name, of_name) == 0)
527 pr_warn("SPI driver %s has no spi_device_id for %s\n",
528 sdrv->driver.name, of_id->compatible);
532 return driver_register(&sdrv->driver);
536 /*-------------------------------------------------------------------------*/
539 * SPI devices should normally not be created by SPI device drivers; that
540 * would make them board-specific. Similarly with SPI controller drivers.
541 * Device registration normally goes into like arch/.../mach.../board-YYY.c
561 * spi_alloc_device - Allocate a new SPI device
571 * spi_device structure to add it to the SPI controller. If the caller
579 struct spi_device *spi;
584 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
585 if (!spi) {
590 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
591 if (!spi->pcpu_statistics) {
592 kfree(spi);
597 spi->master = spi->controller = ctlr;
598 spi->dev.parent = &ctlr->dev;
599 spi->dev.bus = &spi_bus_type;
600 spi->dev.release = spidev_release;
601 spi->mode = ctlr->buswidth_override_bits;
603 device_initialize(&spi->dev);
604 return spi;
608 static void spi_dev_set_name(struct spi_device *spi)
610 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
613 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
617 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
618 spi_get_chipselect(spi, 0));
623 struct spi_device *spi = to_spi_device(dev);
626 if (spi->controller == new_spi->controller &&
627 spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
628 return -EBUSY;
632 static void spi_cleanup(struct spi_device *spi)
634 if (spi->controller->cleanup)
635 spi->controller->cleanup(spi);
638 static int __spi_add_device(struct spi_device *spi)
640 struct spi_controller *ctlr = spi->controller;
641 struct device *dev = ctlr->dev.parent;
645 if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
646 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
647 ctlr->num_chipselect);
648 return -EINVAL;
652 spi_dev_set_name(spi);
656 * chipselect **BEFORE** we call setup(), else we'll trash
659 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
662 spi_get_chipselect(spi, 0));
668 !device_is_registered(&ctlr->dev)) {
669 return -ENODEV;
672 if (ctlr->cs_gpiods)
673 spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
676 * Drivers may modify this initial i/o setup, but will
677 * normally rely on the device being setup. Devices
680 status = spi_setup(spi);
682 dev_err(dev, "can't setup %s, status %d\n",
683 dev_name(&spi->dev), status);
688 status = device_add(&spi->dev);
691 dev_name(&spi->dev), status);
692 spi_cleanup(spi);
694 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
701 * spi_add_device - Add spi_device allocated with spi_alloc_device
702 * @spi: spi_device to register
705 * spi_alloc_device can be added onto the SPI bus with this function.
709 int spi_add_device(struct spi_device *spi)
711 struct spi_controller *ctlr = spi->controller;
714 mutex_lock(&ctlr->add_lock);
715 status = __spi_add_device(spi);
716 mutex_unlock(&ctlr->add_lock);
722 * spi_new_device - instantiate one new SPI device
724 * @chip: Describes the SPI device
728 * after board init creates the hard-wired devices. Some development
731 * driver could add devices (which it would learn about out-of-band).
742 * NOTE: caller did any chip->bus_num checks necessary.
745 * error-or-pointer (not NULL-or-pointer), troubleshootability
753 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
755 spi_set_chipselect(proxy, 0, chip->chip_select);
756 proxy->max_speed_hz = chip->max_speed_hz;
757 proxy->mode = chip->mode;
758 proxy->irq = chip->irq;
759 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
760 proxy->dev.platform_data = (void *) chip->platform_data;
761 proxy->controller_data = chip->controller_data;
762 proxy->controller_state = NULL;
764 if (chip->swnode) {
765 status = device_add_software_node(&proxy->dev, chip->swnode);
767 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
768 chip->modalias, status);
780 device_remove_software_node(&proxy->dev);
787 * spi_unregister_device - unregister a single SPI device
788 * @spi: spi_device to unregister
790 * Start making the passed SPI device vanish. Normally this would be handled
793 void spi_unregister_device(struct spi_device *spi)
795 if (!spi)
798 if (spi->dev.of_node) {
799 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
800 of_node_put(spi->dev.of_node);
802 if (ACPI_COMPANION(&spi->dev))
803 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
804 device_remove_software_node(&spi->dev);
805 device_del(&spi->dev);
806 spi_cleanup(spi);
807 put_device(&spi->dev);
816 if (ctlr->bus_num != bi->bus_num)
821 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
822 bi->modalias);
826 * spi_register_board_info - register SPI devices for a given board
831 * Board-specific early init code calls this (probably during arch_initcall)
832 * with segments of the SPI device table. Any device nodes are created later,
833 * after the relevant parent SPI controller (bus_num) is defined. We keep
835 * not make Linux forget about these hard-wired devices.
837 * Other code can also call this, e.g. a particular add-on board might provide
838 * SPI devices through its expansion connector, so code initializing that board
839 * would naturally declare its SPI devices.
842 * any embedded pointers (platform_data, etc), they're copied as-is.
856 return -ENOMEM;
861 memcpy(&bi->board_info, info, sizeof(*info));
864 list_add_tail(&bi->list, &board_list);
867 &bi->board_info);
874 /*-------------------------------------------------------------------------*/
876 /* Core methods for SPI resource management */
879 * spi_res_alloc - allocate a spi resource that is life-cycle managed
882 * @spi: the SPI device for which we allocate memory
892 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
901 INIT_LIST_HEAD(&sres->entry);
902 sres->release = release;
904 return sres->data;
908 * spi_res_free - free an SPI resource
918 WARN_ON(!list_empty(&sres->entry));
923 * spi_res_add - add a spi_res to the spi_message
924 * @message: the SPI message
931 WARN_ON(!list_empty(&sres->entry));
932 list_add_tail(&sres->entry, &message->resources);
936 * spi_res_release - release all SPI resources for this message
944 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
945 if (res->release)
946 res->release(ctlr, message, res->data);
948 list_del(&res->entry);
954 /*-------------------------------------------------------------------------*/
956 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
964 if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
965 (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
966 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
969 trace_spi_set_cs(spi, activate);
971 spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
972 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
974 if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
975 spi_delay_exec(&spi->cs_hold, NULL);
977 if (spi->mode & SPI_CS_HIGH)
980 if (spi_get_csgpiod(spi, 0)) {
981 if (!(spi->mode & SPI_NO_CS)) {
984 * thus the SPISerialBus() resource defines it on the per-chip
988 * the GPIO CS polarity must be defined Active High to avoid
992 if (has_acpi_companion(&spi->dev))
993 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
996 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
998 /* Some SPI masters need both GPIO CS & slave_select */
999 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1000 spi->controller->set_cs)
1001 spi->controller->set_cs(spi, !enable);
1002 } else if (spi->controller->set_cs) {
1003 spi->controller->set_cs(spi, !enable);
1006 if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
1008 spi_delay_exec(&spi->cs_setup, NULL);
1010 spi_delay_exec(&spi->cs_inactive, NULL);
1040 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1043 return -EINVAL;
1050 sg = &sgt->sgl[0];
1061 PAGE_SIZE - offset_in_page(buf)));
1068 return -ENOMEM;
1079 len -= min;
1104 if (sgt->orig_nents) {
1107 sgt->orig_nents = 0;
1108 sgt->nents = 0;
1124 if (!ctlr->can_dma)
1127 if (ctlr->dma_tx)
1128 tx_dev = ctlr->dma_tx->device->dev;
1129 else if (ctlr->dma_map_dev)
1130 tx_dev = ctlr->dma_map_dev;
1132 tx_dev = ctlr->dev.parent;
1134 if (ctlr->dma_rx)
1135 rx_dev = ctlr->dma_rx->device->dev;
1136 else if (ctlr->dma_map_dev)
1137 rx_dev = ctlr->dma_map_dev;
1139 rx_dev = ctlr->dev.parent;
1141 ret = -ENOMSG;
1142 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1146 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1149 if (xfer->tx_buf != NULL) {
1150 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1151 (void *)xfer->tx_buf,
1152 xfer->len, DMA_TO_DEVICE,
1158 if (xfer->rx_buf != NULL) {
1159 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1160 xfer->rx_buf, xfer->len,
1164 &xfer->tx_sg, DMA_TO_DEVICE,
1175 ctlr->cur_rx_dma_dev = rx_dev;
1176 ctlr->cur_tx_dma_dev = tx_dev;
1177 ctlr->cur_msg_mapped = true;
1184 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1185 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1188 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1191 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1195 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1198 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1200 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1204 ctlr->cur_msg_mapped = false;
1212 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1213 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1215 if (!ctlr->cur_msg_mapped)
1218 if (xfer->tx_sg.orig_nents)
1219 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1220 if (xfer->rx_sg.orig_nents)
1221 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1227 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1228 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1230 if (!ctlr->cur_msg_mapped)
1233 if (xfer->rx_sg.orig_nents)
1234 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1235 if (xfer->tx_sg.orig_nents)
1236 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1267 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1272 if (xfer->tx_buf == ctlr->dummy_tx)
1273 xfer->tx_buf = NULL;
1274 if (xfer->rx_buf == ctlr->dummy_rx)
1275 xfer->rx_buf = NULL;
1287 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1288 && !(msg->spi->mode & SPI_3WIRE)) {
1292 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1293 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1294 !xfer->tx_buf)
1295 max_tx = max(xfer->len, max_tx);
1296 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1297 !xfer->rx_buf)
1298 max_rx = max(xfer->len, max_rx);
1302 tmp = krealloc(ctlr->dummy_tx, max_tx,
1305 return -ENOMEM;
1306 ctlr->dummy_tx = tmp;
1310 tmp = krealloc(ctlr->dummy_rx, max_rx,
1313 return -ENOMEM;
1314 ctlr->dummy_rx = tmp;
1318 list_for_each_entry(xfer, &msg->transfers,
1320 if (!xfer->len)
1322 if (!xfer->tx_buf)
1323 xfer->tx_buf = ctlr->dummy_tx;
1324 if (!xfer->rx_buf)
1325 xfer->rx_buf = ctlr->dummy_rx;
1337 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1338 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1339 u32 speed_hz = xfer->speed_hz;
1343 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1344 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1345 return -EINTR;
1352 * For each byte we wait for 8 cycles of the SPI clock.
1357 ms = 8LL * MSEC_PER_SEC * xfer->len;
1368 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1374 dev_err(&msg->spi->dev,
1375 "SPI transfer timed out\n");
1376 return -ETIMEDOUT;
1383 static void _spi_transfer_delay_ns(u32 ns)
1385 if (!ns)
1387 if (ns <= NSEC_PER_USEC) {
1388 ndelay(ns);
1390 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1401 u32 delay = _delay->value;
1402 u32 unit = _delay->unit;
1405 if (!delay)
1410 delay *= NSEC_PER_USEC;
1418 return -EINVAL;
1423 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1425 return -EINVAL;
1427 /* Convert delay to nanoseconds */
1428 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1431 return -EINVAL;
1434 return delay;
1440 int delay;
1445 return -EINVAL;
1447 delay = spi_delay_to_ns(_delay, xfer);
1448 if (delay < 0)
1449 return delay;
1451 _spi_transfer_delay_ns(delay);
1461 u32 delay = xfer->cs_change_delay.value;
1462 u32 unit = xfer->cs_change_delay.unit;
1465 /* Return early on "fast" mode - for everything but USECS */
1466 if (!delay) {
1472 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1474 dev_err_once(&msg->spi->dev,
1475 "Use of unsupported delay unit %i, using default of %luus\n",
1489 * spi_transfer_one_message - Default implementation of transfer_one_message()
1501 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1502 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1504 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1505 spi_set_cs(msg->spi, !xfer->cs_off, false);
1510 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1516 if (!ctlr->ptp_sts_supported) {
1517 xfer->ptp_sts_word_pre = 0;
1518 ptp_read_system_prets(xfer->ptp_sts);
1521 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1522 reinit_completion(&ctlr->xfer_completion);
1526 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1530 if (ctlr->cur_msg_mapped &&
1531 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1533 ctlr->fallback = true;
1534 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1542 dev_err(&msg->spi->dev,
1543 "SPI transfer failed: %d\n", ret);
1550 msg->status = ret;
1555 if (xfer->len)
1556 dev_err(&msg->spi->dev,
1558 xfer->len);
1561 if (!ctlr->ptp_sts_supported) {
1562 ptp_read_system_postts(xfer->ptp_sts);
1563 xfer->ptp_sts_word_post = xfer->len;
1568 if (msg->status != -EINPROGRESS)
1573 if (xfer->cs_change) {
1574 if (list_is_last(&xfer->transfer_list,
1575 &msg->transfers)) {
1578 if (!xfer->cs_off)
1579 spi_set_cs(msg->spi, false, false);
1581 if (!list_next_entry(xfer, transfer_list)->cs_off)
1582 spi_set_cs(msg->spi, true, false);
1584 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1585 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1586 spi_set_cs(msg->spi, xfer->cs_off, false);
1589 msg->actual_length += xfer->len;
1594 spi_set_cs(msg->spi, false, false);
1596 if (msg->status == -EINPROGRESS)
1597 msg->status = ret;
1599 if (msg->status && ctlr->handle_err)
1600 ctlr->handle_err(ctlr, msg);
1608 * spi_finalize_current_transfer - report completion of a transfer
1611 * Called by SPI drivers using the core transfer_one_message()
1617 complete(&ctlr->xfer_completion);
1623 if (ctlr->auto_runtime_pm) {
1624 pm_runtime_mark_last_busy(ctlr->dev.parent);
1625 pm_runtime_put_autosuspend(ctlr->dev.parent);
1635 if (!was_busy && ctlr->auto_runtime_pm) {
1636 ret = pm_runtime_get_sync(ctlr->dev.parent);
1638 pm_runtime_put_noidle(ctlr->dev.parent);
1639 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1642 msg->status = ret;
1652 if (!was_busy && ctlr->prepare_transfer_hardware) {
1653 ret = ctlr->prepare_transfer_hardware(ctlr);
1655 dev_err(&ctlr->dev,
1659 if (ctlr->auto_runtime_pm)
1660 pm_runtime_put(ctlr->dev.parent);
1662 msg->status = ret;
1672 spi_max_transfer_size(msg->spi),
1675 msg->status = ret;
1680 if (ctlr->prepare_message) {
1681 ret = ctlr->prepare_message(ctlr, msg);
1683 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1685 msg->status = ret;
1689 msg->prepared = true;
1694 msg->status = ret;
1699 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1700 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1701 xfer->ptp_sts_word_pre = 0;
1702 ptp_read_system_prets(xfer->ptp_sts);
1712 * ctlr->cur_msg.
1719 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1720 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1721 reinit_completion(&ctlr->cur_msg_completion);
1724 ret = ctlr->transfer_one_message(ctlr, msg);
1726 dev_err(&ctlr->dev,
1731 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1733 if (READ_ONCE(ctlr->cur_msg_incomplete))
1734 wait_for_completion(&ctlr->cur_msg_completion);
1740 * __spi_pump_messages - function which processes SPI message queue
1744 * This function checks if there is any SPI message in the queue that
1760 mutex_lock(&ctlr->io_mutex);
1763 spin_lock_irqsave(&ctlr->queue_lock, flags);
1766 if (ctlr->cur_msg)
1770 if (list_empty(&ctlr->queue) || !ctlr->running) {
1771 if (!ctlr->busy)
1774 /* Defer any non-atomic teardown to the thread */
1776 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1777 !ctlr->unprepare_transfer_hardware) {
1779 ctlr->busy = false;
1780 ctlr->queue_empty = true;
1783 kthread_queue_work(ctlr->kworker,
1784 &ctlr->pump_messages);
1789 ctlr->busy = false;
1790 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1792 kfree(ctlr->dummy_rx);
1793 ctlr->dummy_rx = NULL;
1794 kfree(ctlr->dummy_tx);
1795 ctlr->dummy_tx = NULL;
1796 if (ctlr->unprepare_transfer_hardware &&
1797 ctlr->unprepare_transfer_hardware(ctlr))
1798 dev_err(&ctlr->dev,
1803 spin_lock_irqsave(&ctlr->queue_lock, flags);
1804 ctlr->queue_empty = true;
1809 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1810 ctlr->cur_msg = msg;
1812 list_del_init(&msg->queue);
1813 if (ctlr->busy)
1816 ctlr->busy = true;
1817 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1820 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1822 ctlr->cur_msg = NULL;
1823 ctlr->fallback = false;
1825 mutex_unlock(&ctlr->io_mutex);
1833 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1834 mutex_unlock(&ctlr->io_mutex);
1838 * spi_pump_messages - kthread work function which processes spi message queue
1850 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1862 * for the requested byte from the SPI transfer. The frequency with which this
1873 if (!xfer->ptp_sts)
1876 if (xfer->timestamped)
1879 if (progress > xfer->ptp_sts_word_pre)
1883 xfer->ptp_sts_word_pre = progress;
1886 local_irq_save(ctlr->irq_flags);
1890 ptp_read_system_prets(xfer->ptp_sts);
1895 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1899 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1902 * the requested byte from the SPI transfer. Can be called with an arbitrary
1910 if (!xfer->ptp_sts)
1913 if (xfer->timestamped)
1916 if (progress < xfer->ptp_sts_word_post)
1919 ptp_read_system_postts(xfer->ptp_sts);
1922 local_irq_restore(ctlr->irq_flags);
1927 xfer->ptp_sts_word_post = progress;
1929 xfer->timestamped = 1;
1934 * spi_set_thread_rt - set the controller to pump at realtime priority
1938 * (by setting the ->rt value before calling spi_register_controller()) or
1950 dev_info(&ctlr->dev,
1952 sched_set_fifo(ctlr->kworker->task);
1957 ctlr->running = false;
1958 ctlr->busy = false;
1959 ctlr->queue_empty = true;
1961 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1962 if (IS_ERR(ctlr->kworker)) {
1963 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1964 return PTR_ERR(ctlr->kworker);
1967 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1972 * latency on the bus by minimising the delay between a transfer
1976 if (ctlr->rt)
1983 * spi_get_next_queued_message() - called by driver to check for queued
1998 spin_lock_irqsave(&ctlr->queue_lock, flags);
1999 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2001 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2008 * spi_finalize_current_message() - the current message is complete
2020 mesg = ctlr->cur_msg;
2022 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2023 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2024 ptp_read_system_postts(xfer->ptp_sts);
2025 xfer->ptp_sts_word_post = xfer->len;
2029 if (unlikely(ctlr->ptp_sts_supported))
2030 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2031 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2036 * In the prepare_messages callback the SPI bus has the opportunity
2044 if (mesg->prepared && ctlr->unprepare_message) {
2045 ret = ctlr->unprepare_message(ctlr, mesg);
2047 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2052 mesg->prepared = false;
2054 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2056 if (READ_ONCE(ctlr->cur_msg_need_completion))
2057 complete(&ctlr->cur_msg_completion);
2061 mesg->state = NULL;
2062 if (mesg->complete)
2063 mesg->complete(mesg->context);
2071 spin_lock_irqsave(&ctlr->queue_lock, flags);
2073 if (ctlr->running || ctlr->busy) {
2074 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2075 return -EBUSY;
2078 ctlr->running = true;
2079 ctlr->cur_msg = NULL;
2080 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2082 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2093 spin_lock_irqsave(&ctlr->queue_lock, flags);
2097 * A wait_queue on the ctlr->busy could be used, but then the common
2099 * friends on every SPI message. Do this instead.
2101 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2102 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2104 spin_lock_irqsave(&ctlr->queue_lock, flags);
2107 if (!list_empty(&ctlr->queue) || ctlr->busy)
2108 ret = -EBUSY;
2110 ctlr->running = false;
2112 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2115 dev_warn(&ctlr->dev, "could not stop message queue\n");
2134 dev_err(&ctlr->dev, "problem destroying queue\n");
2138 kthread_destroy_worker(ctlr->kworker);
2143 static int __spi_queued_transfer(struct spi_device *spi,
2147 struct spi_controller *ctlr = spi->controller;
2150 spin_lock_irqsave(&ctlr->queue_lock, flags);
2152 if (!ctlr->running) {
2153 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2154 return -ESHUTDOWN;
2156 msg->actual_length = 0;
2157 msg->status = -EINPROGRESS;
2159 list_add_tail(&msg->queue, &ctlr->queue);
2160 ctlr->queue_empty = false;
2161 if (!ctlr->busy && need_pump)
2162 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2164 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2169 * spi_queued_transfer - transfer function for queued transfers
2170 * @spi: SPI device which is requesting transfer
2171 * @msg: SPI message which is to handled is queued to driver queue
2175 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2177 return __spi_queued_transfer(spi, msg, true);
2184 ctlr->transfer = spi_queued_transfer;
2185 if (!ctlr->transfer_one_message)
2186 ctlr->transfer_one_message = spi_transfer_one_message;
2191 dev_err(&ctlr->dev, "problem initializing queue\n");
2194 ctlr->queued = true;
2197 dev_err(&ctlr->dev, "problem starting queue\n");
2210 * spi_flush_queue - Send all pending messages in the queue from the callers'
2215 * sent before doing something. Is used by the spi-mem code to make sure SPI
2216 * memory operations do not preempt regular SPI transfers that have been queued
2217 * before the spi-mem operation.
2221 if (ctlr->transfer == spi_queued_transfer)
2225 /*-------------------------------------------------------------------------*/
2229 struct spi_delay *delay, const char *prop)
2235 delay->value = DIV_ROUND_UP(value, 1000);
2236 delay->unit = SPI_DELAY_UNIT_USECS;
2238 delay->value = value;
2239 delay->unit = SPI_DELAY_UNIT_NSECS;
2244 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2251 if (of_property_read_bool(nc, "spi-cpha"))
2252 spi->mode |= SPI_CPHA;
2253 if (of_property_read_bool(nc, "spi-cpol"))
2254 spi->mode |= SPI_CPOL;
2255 if (of_property_read_bool(nc, "spi-3wire"))
2256 spi->mode |= SPI_3WIRE;
2257 if (of_property_read_bool(nc, "spi-lsb-first"))
2258 spi->mode |= SPI_LSB_FIRST;
2259 if (of_property_read_bool(nc, "spi-cs-high"))
2260 spi->mode |= SPI_CS_HIGH;
2263 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2266 spi->mode |= SPI_NO_TX;
2271 spi->mode |= SPI_TX_DUAL;
2274 spi->mode |= SPI_TX_QUAD;
2277 spi->mode |= SPI_TX_OCTAL;
2280 dev_warn(&ctlr->dev,
2281 "spi-tx-bus-width %d not supported\n",
2287 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2290 spi->mode |= SPI_NO_RX;
2295 spi->mode |= SPI_RX_DUAL;
2298 spi->mode |= SPI_RX_QUAD;
2301 spi->mode |= SPI_RX_OCTAL;
2304 dev_warn(&ctlr->dev,
2305 "spi-rx-bus-width %d not supported\n",
2313 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2315 return -EINVAL;
2323 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2327 spi_set_chipselect(spi, 0, value);
2330 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2331 spi->max_speed_hz = value;
2333 /* Device CS delays */
2334 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2335 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2336 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2344 struct spi_device *spi;
2348 spi = spi_alloc_device(ctlr);
2349 if (!spi) {
2350 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2351 rc = -ENOMEM;
2356 rc = of_alias_from_compatible(nc, spi->modalias,
2357 sizeof(spi->modalias));
2359 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2363 rc = of_spi_parse_dt(ctlr, spi, nc);
2370 device_set_node(&spi->dev, of_fwnode_handle(nc));
2373 rc = spi_add_device(spi);
2375 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2379 return spi;
2384 spi_dev_put(spi);
2389 * of_register_spi_devices() - Register child devices onto the SPI bus
2393 * represents a valid SPI slave.
2397 struct spi_device *spi;
2400 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2403 spi = of_register_spi_device(ctlr, nc);
2404 if (IS_ERR(spi)) {
2405 dev_warn(&ctlr->dev,
2406 "Failed to create SPI device for %pOF\n", nc);
2416 * spi_new_ancillary_device() - Register ancillary SPI device
2417 * @spi: Pointer to the main SPI device registering the ancillary device
2420 * Register an ancillary SPI device; for example some chips have a chip-select
2421 * for normal device usage and another one for setup/firmware upload.
2423 * This may only be called from main SPI device's probe routine.
2427 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2430 struct spi_controller *ctlr = spi->controller;
2437 rc = -ENOMEM;
2441 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2443 /* Use provided chip-select for ancillary device */
2446 /* Take over SPI mode/speed from SPI main device */
2447 ancillary->max_speed_hz = spi->max_speed_hz;
2448 ancillary->mode = spi->mode;
2450 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2455 dev_err(&spi->dev, "failed to register ancillary device\n");
2484 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2487 sb = &ares->data.spi_serial_bus;
2488 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2497 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2500 * Return: the number of SpiSerialBus resources in the ACPI-device's
2501 * resource-list; or a negative error code.
2528 && obj->buffer.length >= 4)
2529 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2532 && obj->buffer.length == 8)
2533 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2536 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2537 lookup->mode |= SPI_LSB_FIRST;
2540 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2541 lookup->mode |= SPI_CPOL;
2544 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2545 lookup->mode |= SPI_CPHA;
2553 struct spi_controller *ctlr = lookup->ctlr;
2555 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2560 sb = &ares->data.spi_serial_bus;
2561 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2563 if (lookup->index != -1 && lookup->n++ != lookup->index)
2567 sb->resource_source.string_ptr,
2571 return -ENODEV;
2574 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2575 return -ENODEV;
2581 return -ENODEV;
2585 return -EPROBE_DEFER;
2587 lookup->ctlr = ctlr;
2594 * 0 .. max - 1 so we need to ask the driver to
2597 if (ctlr->fw_translate_cs) {
2598 int cs = ctlr->fw_translate_cs(ctlr,
2599 sb->device_selection);
2600 if (cs < 0)
2601 return cs;
2602 lookup->chip_select = cs;
2604 lookup->chip_select = sb->device_selection;
2607 lookup->max_speed_hz = sb->connection_speed;
2608 lookup->bits_per_word = sb->data_bit_length;
2610 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2611 lookup->mode |= SPI_CPHA;
2612 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2613 lookup->mode |= SPI_CPOL;
2614 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2615 lookup->mode |= SPI_CS_HIGH;
2617 } else if (lookup->irq < 0) {
2621 lookup->irq = r.start;
2629 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2630 * @ctlr: controller to which the spi device belongs
2631 * @adev: ACPI Device for the spi device
2632 * @index: Index of the spi resource inside the ACPI Node
2634 * This should be used to allocate a new SPI device from and ACPI Device node.
2635 * The caller is responsible for calling spi_add_device to register the SPI device.
2637 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2639 * If index is set to -1, index is not used.
2640 * Note: If index is -1, ctlr must be set.
2651 struct spi_device *spi;
2654 if (!ctlr && index == -1)
2655 return ERR_PTR(-EINVAL);
2658 lookup.irq = -1;
2668 /* Found SPI in _CRS but it points to another controller */
2672 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2673 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2674 /* Apple does not use _CRS but nested devices for SPI slaves */
2679 return ERR_PTR(-ENODEV);
2681 spi = spi_alloc_device(lookup.ctlr);
2682 if (!spi) {
2683 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2684 dev_name(&adev->dev));
2685 return ERR_PTR(-ENOMEM);
2688 ACPI_COMPANION_SET(&spi->dev, adev);
2689 spi->max_speed_hz = lookup.max_speed_hz;
2690 spi->mode |= lookup.mode;
2691 spi->irq = lookup.irq;
2692 spi->bits_per_word = lookup.bits_per_word;
2693 spi_set_chipselect(spi, 0, lookup.chip_select);
2695 return spi;
2702 struct spi_device *spi;
2704 if (acpi_bus_get_status(adev) || !adev->status.present ||
2708 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2709 if (IS_ERR(spi)) {
2710 if (PTR_ERR(spi) == -ENOMEM)
2716 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2717 sizeof(spi->modalias));
2721 adev->power.flags.ignore_parent = true;
2722 if (spi_add_device(spi)) {
2723 adev->power.flags.ignore_parent = false;
2724 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2725 dev_name(&adev->dev));
2726 spi_dev_put(spi);
2751 handle = ACPI_HANDLE(ctlr->dev.parent);
2759 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2781 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2783 * @spi: device used for the current transfer
2785 int spi_slave_abort(struct spi_device *spi)
2787 struct spi_controller *ctlr = spi->controller;
2789 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2790 return ctlr->slave_abort(ctlr);
2792 return -ENOTSUPP;
2796 int spi_target_abort(struct spi_device *spi)
2798 struct spi_controller *ctlr = spi->controller;
2800 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2801 return ctlr->target_abort(ctlr);
2803 return -ENOTSUPP;
2814 child = device_find_any_child(&ctlr->dev);
2815 return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2823 struct spi_device *spi;
2830 return -EINVAL;
2832 child = device_find_any_child(&ctlr->dev);
2841 spi = spi_alloc_device(ctlr);
2842 if (!spi)
2843 return -ENOMEM;
2845 strscpy(spi->modalias, name, sizeof(spi->modalias));
2847 rc = spi_add_device(spi);
2849 spi_dev_put(spi);
2884 * __spi_alloc_controller - allocate an SPI master or slave controller
2886 * @size: how much zeroed driver-private data to allocate; the pointer to this
2891 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2895 * This call is used only by SPI controller drivers, which are the
2906 * Return: the SPI controller structure on success, else NULL.
2921 device_initialize(&ctlr->dev);
2922 INIT_LIST_HEAD(&ctlr->queue);
2923 spin_lock_init(&ctlr->queue_lock);
2924 spin_lock_init(&ctlr->bus_lock_spinlock);
2925 mutex_init(&ctlr->bus_lock_mutex);
2926 mutex_init(&ctlr->io_mutex);
2927 mutex_init(&ctlr->add_lock);
2928 ctlr->bus_num = -1;
2929 ctlr->num_chipselect = 1;
2930 ctlr->slave = slave;
2932 ctlr->dev.class = &spi_slave_class;
2934 ctlr->dev.class = &spi_master_class;
2935 ctlr->dev.parent = dev;
2936 pm_suspend_ignore_children(&ctlr->dev, true);
2949 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2950 * @dev: physical device of SPI controller
2951 * @size: how much zeroed driver-private data to allocate
2952 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2955 * Allocate an SPI controller and automatically release a reference on it
2961 * Return: the SPI controller structure on success, else NULL.
2976 ctlr->devm_allocated = true;
2988 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2989 * @ctlr: The SPI master to grab GPIO descriptors for
2994 struct gpio_desc **cs;
2995 struct device *dev = &ctlr->dev;
2999 nb = gpiod_count(dev, "cs");
3002 if (nb == -ENOENT)
3007 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3009 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3011 if (!cs)
3012 return -ENOMEM;
3013 ctlr->cs_gpiods = cs;
3023 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3025 if (IS_ERR(cs[i]))
3026 return PTR_ERR(cs[i]);
3028 if (cs[i]) {
3030 * If we find a CS GPIO, name it after the device and
3035 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3038 return -ENOMEM;
3039 gpiod_set_consumer_name(cs[i], gpioname);
3044 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3046 return -EINVAL;
3051 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3053 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3054 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3056 return -EINVAL;
3065 * The controller may implement only the high-level SPI-memory like
3066 * operations if it does not support regular SPI transfers, and this is
3068 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3069 * one of the ->transfer_xxx() method be implemented.
3071 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3072 if (!ctlr->transfer && !ctlr->transfer_one &&
3073 !ctlr->transfer_one_message) {
3074 return -EINVAL;
3090 return id == -ENOSPC ? -EBUSY : id;
3091 ctlr->bus_num = id;
3096 * spi_register_controller - register SPI master or slave controller
3101 * SPI controllers connect to their drivers using some non-SPI bus,
3103 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3105 * SPI controllers use board specific (often SOC specific) bus numbers,
3106 * and board-specific addressing for SPI devices combines those numbers
3107 * with chip select numbers. Since SPI does not directly support dynamic
3120 struct device *dev = ctlr->dev.parent;
3126 return -ENODEV;
3130 * the SPI controller.
3136 if (ctlr->bus_num < 0)
3137 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3138 if (ctlr->bus_num >= 0) {
3139 /* Devices with a fixed bus num must check-in with the num */
3140 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3144 if (ctlr->bus_num < 0) {
3145 first_dynamic = of_alias_get_highest_id("spi");
3155 ctlr->bus_lock_flag = 0;
3156 init_completion(&ctlr->xfer_completion);
3157 init_completion(&ctlr->cur_msg_completion);
3158 if (!ctlr->max_dma_len)
3159 ctlr->max_dma_len = INT_MAX;
3165 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3167 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3175 ctlr->mode_bits |= SPI_CS_HIGH;
3179 * Even if it's just one always-selected device, there must
3182 if (!ctlr->num_chipselect) {
3183 status = -EINVAL;
3187 /* Setting last_cs to -1 means no chip selected */
3188 ctlr->last_cs = -1;
3190 status = device_add(&ctlr->dev);
3195 dev_name(&ctlr->dev));
3199 * need the queueing logic if the driver is only supporting high-level
3202 if (ctlr->transfer) {
3204 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3207 device_del(&ctlr->dev);
3212 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3213 if (!ctlr->pcpu_statistics) {
3214 dev_err(dev, "Error allocating per-cpu statistics\n");
3215 status = -ENOMEM;
3220 list_add_tail(&ctlr->list, &spi_controller_list);
3222 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3234 idr_remove(&spi_master_idr, ctlr->bus_num);
3246 * devm_spi_register_controller - register managed SPI master or slave
3248 * @dev: device managing SPI controller
3253 * Register a SPI device as with spi_register_controller() which will
3266 return -ENOMEM;
3287 * spi_unregister_controller - unregister SPI master or slave controller
3291 * This call is used only by SPI controller drivers, which are the
3301 int id = ctlr->bus_num;
3305 mutex_lock(&ctlr->add_lock);
3307 device_for_each_child(&ctlr->dev, NULL, __unregister);
3313 if (ctlr->queued) {
3315 dev_err(&ctlr->dev, "queue remove failed\n");
3318 list_del(&ctlr->list);
3321 device_del(&ctlr->dev);
3330 mutex_unlock(&ctlr->add_lock);
3336 if (!ctlr->devm_allocated)
3337 put_device(&ctlr->dev);
3343 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3348 mutex_lock(&ctlr->bus_lock_mutex);
3349 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3350 mutex_unlock(&ctlr->bus_lock_mutex);
3355 mutex_lock(&ctlr->bus_lock_mutex);
3356 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3357 mutex_unlock(&ctlr->bus_lock_mutex);
3364 /* Basically no-ops for non-queued controllers */
3365 if (ctlr->queued) {
3368 dev_err(&ctlr->dev, "queue stop failed\n");
3382 if (ctlr->queued) {
3385 dev_err(&ctlr->dev, "queue restart failed\n");
3391 /*-------------------------------------------------------------------------*/
3403 if (rxfer->release)
3404 rxfer->release(ctlr, msg, res);
3407 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3410 for (i = 0; i < rxfer->inserted; i++)
3411 list_del(&rxfer->inserted_transfers[i].transfer_list);
3415 * spi_replace_transfers - replace transfers with several transfers
3443 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3448 return ERR_PTR(-ENOMEM);
3451 rxfer->release = release;
3455 rxfer->extradata =
3456 &rxfer->inserted_transfers[insert];
3459 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3463 * the @replaced_transfers - it may be spi_message.messages!
3465 rxfer->replaced_after = xfer_first->transfer_list.prev;
3470 * If the entry after replaced_after it is msg->transfers
3474 if (rxfer->replaced_after->next == &msg->transfers) {
3475 dev_err(&msg->spi->dev,
3478 list_splice(&rxfer->replaced_transfers,
3479 rxfer->replaced_after);
3485 return ERR_PTR(-EINVAL);
3492 list_move_tail(rxfer->replaced_after->next,
3493 &rxfer->replaced_transfers);
3502 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3508 list_add(&xfer->transfer_list, rxfer->replaced_after);
3510 /* Clear cs_change and delay for all but the last */
3512 xfer->cs_change = false;
3513 xfer->delay.value = 0;
3518 rxfer->inserted = insert;
3538 count = DIV_ROUND_UP(xfer->len, maxsize);
3544 xfers = srt->inserted_transfers;
3576 xfers[i].len = min(maxsize, xfers[i].len - offset);
3583 *xferp = &xfers[count - 1];
3586 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3588 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3595 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3620 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3621 if (xfer->len > maxsize) {
3635 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3637 * certain number of SPI words
3659 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3663 maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3664 if (xfer->len > maxsize) {
3676 /*-------------------------------------------------------------------------*/
3679 * Core methods for SPI controller protocol drivers. Some of the
3686 if (ctlr->bits_per_word_mask) {
3689 return -EINVAL;
3690 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3691 return -EINVAL;
3698 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3699 * @spi: the device that requires specific CS timing configuration
3703 static int spi_set_cs_timing(struct spi_device *spi)
3705 struct device *parent = spi->controller->dev.parent;
3708 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3709 if (spi->controller->auto_runtime_pm) {
3713 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3718 status = spi->controller->set_cs_timing(spi);
3722 status = spi->controller->set_cs_timing(spi);
3729 * spi_setup - setup SPI mode and clock rate
3730 * @spi: the device whose settings are being modified
3733 * SPI protocol drivers may need to update the transfer mode if the
3739 * or from it. When this function returns, the SPI device is deselected.
3744 * LSB-first wire encoding, or active-high chipselects.
3748 int spi_setup(struct spi_device *spi)
3757 if ((hweight_long(spi->mode &
3759 (hweight_long(spi->mode &
3761 dev_err(&spi->dev,
3762 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3763 return -EINVAL;
3766 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3769 return -EINVAL;
3776 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3782 dev_warn(&spi->dev,
3783 "setup: ignoring unsupported mode bits %x\n",
3785 spi->mode &= ~ugly_bits;
3789 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3791 return -EINVAL;
3794 if (!spi->bits_per_word) {
3795 spi->bits_per_word = 8;
3798 * Some controllers may not support the default 8 bits-per-word
3801 status = __spi_validate_bits_per_word(spi->controller,
3802 spi->bits_per_word);
3807 if (spi->controller->max_speed_hz &&
3808 (!spi->max_speed_hz ||
3809 spi->max_speed_hz > spi->controller->max_speed_hz))
3810 spi->max_speed_hz = spi->controller->max_speed_hz;
3812 mutex_lock(&spi->controller->io_mutex);
3814 if (spi->controller->setup) {
3815 status = spi->controller->setup(spi);
3817 mutex_unlock(&spi->controller->io_mutex);
3818 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3824 status = spi_set_cs_timing(spi);
3826 mutex_unlock(&spi->controller->io_mutex);
3830 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3831 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3833 mutex_unlock(&spi->controller->io_mutex);
3834 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3842 * checking for a non-zero return value instead of a negative
3847 spi_set_cs(spi, false, true);
3848 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3849 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3851 spi_set_cs(spi, false, true);
3854 mutex_unlock(&spi->controller->io_mutex);
3856 if (spi->rt && !spi->controller->rt) {
3857 spi->controller->rt = true;
3858 spi_set_thread_rt(spi->controller);
3861 trace_spi_setup(spi, status);
3863 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3864 spi->mode & SPI_MODE_X_MASK,
3865 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3866 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3867 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3868 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3869 spi->bits_per_word, spi->max_speed_hz,
3877 struct spi_device *spi)
3881 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3885 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3890 memcpy(&xfer->word_delay, &spi->word_delay,
3891 sizeof(xfer->word_delay));
3896 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3898 struct spi_controller *ctlr = spi->controller;
3902 if (list_empty(&message->transfers))
3903 return -EINVAL;
3906 * If an SPI controller does not support toggling the CS line on each
3908 * for the CS line, we can emulate the CS-per-word hardware function by
3909 * splitting transfers into one-word transfers and ensuring that
3912 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3913 spi_get_csgpiod(spi, 0))) {
3914 size_t maxsize = BITS_TO_BYTES(spi->bits_per_word);
3917 /* spi_split_transfers_maxsize() requires message->spi */
3918 message->spi = spi;
3925 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3927 if (list_is_last(&xfer->transfer_list, &message->transfers))
3929 xfer->cs_change = 1;
3934 * Half-duplex links include original MicroWire, and ones with
3939 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3940 (spi->mode & SPI_3WIRE)) {
3941 unsigned flags = ctlr->flags;
3943 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3944 if (xfer->rx_buf && xfer->tx_buf)
3945 return -EINVAL;
3946 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3947 return -EINVAL;
3948 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3949 return -EINVAL;
3954 * Set transfer bits_per_word and max speed as spi device default if
3961 message->frame_length = 0;
3962 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3963 xfer->effective_speed_hz = 0;
3964 message->frame_length += xfer->len;
3965 if (!xfer->bits_per_word)
3966 xfer->bits_per_word = spi->bits_per_word;
3968 if (!xfer->speed_hz)
3969 xfer->speed_hz = spi->max_speed_hz;
3971 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3972 xfer->speed_hz = ctlr->max_speed_hz;
3974 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3975 return -EINVAL;
3978 * SPI transfer length should be multiple of SPI word size
3979 * where SPI word size should be power-of-two multiple.
3981 if (xfer->bits_per_word <= 8)
3983 else if (xfer->bits_per_word <= 16)
3989 if (xfer->len % w_size)
3990 return -EINVAL;
3992 if (xfer->speed_hz && ctlr->min_speed_hz &&
3993 xfer->speed_hz < ctlr->min_speed_hz)
3994 return -EINVAL;
3996 if (xfer->tx_buf && !xfer->tx_nbits)
3997 xfer->tx_nbits = SPI_NBITS_SINGLE;
3998 if (xfer->rx_buf && !xfer->rx_nbits)
3999 xfer->rx_nbits = SPI_NBITS_SINGLE;
4005 if (xfer->tx_buf) {
4006 if (spi->mode & SPI_NO_TX)
4007 return -EINVAL;
4008 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4009 xfer->tx_nbits != SPI_NBITS_DUAL &&
4010 xfer->tx_nbits != SPI_NBITS_QUAD &&
4011 xfer->tx_nbits != SPI_NBITS_OCTAL)
4012 return -EINVAL;
4013 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4014 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4015 return -EINVAL;
4016 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4017 !(spi->mode & SPI_TX_QUAD))
4018 return -EINVAL;
4021 if (xfer->rx_buf) {
4022 if (spi->mode & SPI_NO_RX)
4023 return -EINVAL;
4024 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4025 xfer->rx_nbits != SPI_NBITS_DUAL &&
4026 xfer->rx_nbits != SPI_NBITS_QUAD &&
4027 xfer->rx_nbits != SPI_NBITS_OCTAL)
4028 return -EINVAL;
4029 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4030 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4031 return -EINVAL;
4032 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4033 !(spi->mode & SPI_RX_QUAD))
4034 return -EINVAL;
4037 if (_spi_xfer_word_delay_update(xfer, spi))
4038 return -EINVAL;
4041 message->status = -EINPROGRESS;
4046 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4048 struct spi_controller *ctlr = spi->controller;
4052 * Some controllers do not support doing regular SPI transfers. Return
4055 if (!ctlr->transfer)
4056 return -ENOTSUPP;
4058 message->spi = spi;
4060 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4061 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4065 if (!ctlr->ptp_sts_supported) {
4066 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4067 xfer->ptp_sts_word_pre = 0;
4068 ptp_read_system_prets(xfer->ptp_sts);
4072 return ctlr->transfer(spi, message);
4076 * spi_async - asynchronous SPI transfer
4077 * @spi: device with which data will be exchanged
4085 * Before that invocation, the value of message->status is undefined.
4086 * When the callback is issued, message->status holds either zero (to
4089 * deallocate the associated memory; it's no longer in use by any SPI
4106 int spi_async(struct spi_device *spi, struct spi_message *message)
4108 struct spi_controller *ctlr = spi->controller;
4112 ret = __spi_validate(spi, message);
4116 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4118 if (ctlr->bus_lock_flag)
4119 ret = -EBUSY;
4121 ret = __spi_async(spi, message);
4123 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4130 * spi_async_locked - version of spi_async with exclusive bus usage
4131 * @spi: device with which data will be exchanged
4139 * Before that invocation, the value of message->status is undefined.
4140 * When the callback is issued, message->status holds either zero (to
4143 * deallocate the associated memory; it's no longer in use by any SPI
4160 static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
4162 struct spi_controller *ctlr = spi->controller;
4166 ret = __spi_validate(spi, message);
4170 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4172 ret = __spi_async(spi, message);
4174 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4185 mutex_lock(&ctlr->io_mutex);
4187 was_busy = ctlr->busy;
4189 ctlr->cur_msg = msg;
4192 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4193 ctlr->cur_msg = NULL;
4194 ctlr->fallback = false;
4197 kfree(ctlr->dummy_rx);
4198 ctlr->dummy_rx = NULL;
4199 kfree(ctlr->dummy_tx);
4200 ctlr->dummy_tx = NULL;
4201 if (ctlr->unprepare_transfer_hardware &&
4202 ctlr->unprepare_transfer_hardware(ctlr))
4203 dev_err(&ctlr->dev,
4208 mutex_unlock(&ctlr->io_mutex);
4211 /*-------------------------------------------------------------------------*/
4214 * Utility methods for SPI protocol drivers, layered on
4224 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4228 struct spi_controller *ctlr = spi->controller;
4231 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4232 return -ESHUTDOWN;
4235 status = __spi_validate(spi, message);
4239 message->spi = spi;
4241 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4242 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4250 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4251 message->actual_length = 0;
4252 message->status = -EINPROGRESS;
4256 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4257 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4261 return message->status;
4270 message->complete = spi_complete;
4271 message->context = &done;
4272 status = spi_async_locked(spi, message);
4275 status = message->status;
4277 message->complete = NULL;
4278 message->context = NULL;
4284 * spi_sync - blocking/synchronous SPI data transfers
4285 * @spi: device with which data will be exchanged
4290 * is non-interruptible, and has no timeout. Low-overhead controller
4293 * Note that the SPI device's chip select is active during the message,
4295 * frequently-used devices may want to minimize costs of selecting a chip,
4304 int spi_sync(struct spi_device *spi, struct spi_message *message)
4308 mutex_lock(&spi->controller->bus_lock_mutex);
4309 ret = __spi_sync(spi, message);
4310 mutex_unlock(&spi->controller->bus_lock_mutex);
4317 * spi_sync_locked - version of spi_sync with exclusive bus usage
4318 * @spi: device with which data will be exchanged
4323 * is non-interruptible, and has no timeout. Low-overhead controller
4327 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4332 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4334 return __spi_sync(spi, message);
4339 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4340 * @ctlr: SPI bus master that should be locked for exclusive bus access
4344 * is non-interruptible, and has no timeout.
4347 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4349 * and spi_async_locked calls when the SPI bus lock is held.
4357 mutex_lock(&ctlr->bus_lock_mutex);
4359 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4360 ctlr->bus_lock_flag = 1;
4361 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4370 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4371 * @ctlr: SPI bus master that was locked for exclusive bus access
4375 * is non-interruptible, and has no timeout.
4377 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4384 ctlr->bus_lock_flag = 0;
4386 mutex_unlock(&ctlr->bus_lock_mutex);
4398 * spi_write_then_read - SPI synchronous write followed by read
4399 * @spi: device with which data will be exchanged
4400 * @txbuf: data to be written (need not be DMA-safe)
4402 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4412 * Performance-sensitive or bulk transfer code should instead use
4413 * spi_{async,sync}() calls with DMA-safe buffers.
4417 int spi_write_then_read(struct spi_device *spi,
4429 * Use preallocated DMA-safe buffer if we can. We can't avoid
4432 * using the pre-allocated buffer or the transfer is too large.
4438 return -ENOMEM;
4459 status = spi_sync(spi, &message);
4472 /*-------------------------------------------------------------------------*/
4483 /* The spi controllers are not using spi_bus, so we find it with another way */
4503 struct spi_device *spi;
4507 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4511 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4512 put_device(&ctlr->dev);
4520 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4521 spi = of_register_spi_device(ctlr, rd->dn);
4522 put_device(&ctlr->dev);
4524 if (IS_ERR(spi)) {
4526 __func__, rd->dn);
4527 of_node_clear_flag(rd->dn, OF_POPULATED);
4528 return notifier_from_errno(PTR_ERR(spi));
4534 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4538 spi = of_find_spi_device_by_node(rd->dn);
4539 if (spi == NULL)
4543 spi_unregister_device(spi);
4546 put_device(&spi->dev);
4563 return ACPI_COMPANION(dev->parent) == data;
4594 struct spi_device *spi;
4603 put_device(&ctlr->dev);
4609 spi = acpi_spi_find_device_by_adev(adev);
4610 if (!spi)
4613 spi_unregister_device(spi);
4614 put_device(&spi->dev);
4634 status = -ENOMEM;