1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/spi/spi-mem.h> 32 #include <linux/of_gpio.h> 33 #include <linux/pm_runtime.h> 34 #include <linux/pm_domain.h> 35 #include <linux/property.h> 36 #include <linux/export.h> 37 #include <linux/sched/rt.h> 38 #include <uapi/linux/sched/types.h> 39 #include <linux/delay.h> 40 #include <linux/kthread.h> 41 #include <linux/ioport.h> 42 #include <linux/acpi.h> 43 #include <linux/highmem.h> 44 #include <linux/idr.h> 45 #include <linux/platform_data/x86/apple.h> 46 47 #define CREATE_TRACE_POINTS 48 #include <trace/events/spi.h> 49 50 #include "internals.h" 51 52 static DEFINE_IDR(spi_master_idr); 53 54 static void spidev_release(struct device *dev) 55 { 56 struct spi_device *spi = to_spi_device(dev); 57 58 /* spi controllers may cleanup for released devices */ 59 if (spi->controller->cleanup) 60 spi->controller->cleanup(spi); 61 62 spi_controller_put(spi->controller); 63 kfree(spi); 64 } 65 66 static ssize_t 67 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 68 { 69 const struct spi_device *spi = to_spi_device(dev); 70 int len; 71 72 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 73 if (len != -ENODEV) 74 return len; 75 76 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 77 } 78 static DEVICE_ATTR_RO(modalias); 79 80 #define SPI_STATISTICS_ATTRS(field, file) \ 81 static ssize_t spi_controller_##field##_show(struct device *dev, \ 82 struct device_attribute *attr, \ 83 char *buf) \ 84 { \ 85 struct spi_controller *ctlr = container_of(dev, \ 86 struct spi_controller, dev); \ 87 return spi_statistics_##field##_show(&ctlr->statistics, buf); \ 88 } \ 89 static struct device_attribute dev_attr_spi_controller_##field = { \ 90 .attr = { .name = file, .mode = 0444 }, \ 91 .show = spi_controller_##field##_show, \ 92 }; \ 93 static ssize_t spi_device_##field##_show(struct device *dev, \ 94 struct device_attribute *attr, \ 95 char *buf) \ 96 { \ 97 struct spi_device *spi = to_spi_device(dev); \ 98 return spi_statistics_##field##_show(&spi->statistics, buf); \ 99 } \ 100 static struct device_attribute dev_attr_spi_device_##field = { \ 101 .attr = { .name = file, .mode = 0444 }, \ 102 .show = spi_device_##field##_show, \ 103 } 104 105 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 106 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 107 char *buf) \ 108 { \ 109 unsigned long flags; \ 110 ssize_t len; \ 111 spin_lock_irqsave(&stat->lock, flags); \ 112 len = sprintf(buf, format_string, stat->field); \ 113 spin_unlock_irqrestore(&stat->lock, flags); \ 114 return len; \ 115 } \ 116 SPI_STATISTICS_ATTRS(name, file) 117 118 #define SPI_STATISTICS_SHOW(field, format_string) \ 119 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 120 field, format_string) 121 122 SPI_STATISTICS_SHOW(messages, "%lu"); 123 SPI_STATISTICS_SHOW(transfers, "%lu"); 124 SPI_STATISTICS_SHOW(errors, "%lu"); 125 SPI_STATISTICS_SHOW(timedout, "%lu"); 126 127 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 128 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 129 SPI_STATISTICS_SHOW(spi_async, "%lu"); 130 131 SPI_STATISTICS_SHOW(bytes, "%llu"); 132 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 133 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 134 135 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 136 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 137 "transfer_bytes_histo_" number, \ 138 transfer_bytes_histo[index], "%lu") 139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 147 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 148 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 149 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 150 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 151 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 152 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 153 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 154 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 155 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 156 157 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 158 159 static struct attribute *spi_dev_attrs[] = { 160 &dev_attr_modalias.attr, 161 NULL, 162 }; 163 164 static const struct attribute_group spi_dev_group = { 165 .attrs = spi_dev_attrs, 166 }; 167 168 static struct attribute *spi_device_statistics_attrs[] = { 169 &dev_attr_spi_device_messages.attr, 170 &dev_attr_spi_device_transfers.attr, 171 &dev_attr_spi_device_errors.attr, 172 &dev_attr_spi_device_timedout.attr, 173 &dev_attr_spi_device_spi_sync.attr, 174 &dev_attr_spi_device_spi_sync_immediate.attr, 175 &dev_attr_spi_device_spi_async.attr, 176 &dev_attr_spi_device_bytes.attr, 177 &dev_attr_spi_device_bytes_rx.attr, 178 &dev_attr_spi_device_bytes_tx.attr, 179 &dev_attr_spi_device_transfer_bytes_histo0.attr, 180 &dev_attr_spi_device_transfer_bytes_histo1.attr, 181 &dev_attr_spi_device_transfer_bytes_histo2.attr, 182 &dev_attr_spi_device_transfer_bytes_histo3.attr, 183 &dev_attr_spi_device_transfer_bytes_histo4.attr, 184 &dev_attr_spi_device_transfer_bytes_histo5.attr, 185 &dev_attr_spi_device_transfer_bytes_histo6.attr, 186 &dev_attr_spi_device_transfer_bytes_histo7.attr, 187 &dev_attr_spi_device_transfer_bytes_histo8.attr, 188 &dev_attr_spi_device_transfer_bytes_histo9.attr, 189 &dev_attr_spi_device_transfer_bytes_histo10.attr, 190 &dev_attr_spi_device_transfer_bytes_histo11.attr, 191 &dev_attr_spi_device_transfer_bytes_histo12.attr, 192 &dev_attr_spi_device_transfer_bytes_histo13.attr, 193 &dev_attr_spi_device_transfer_bytes_histo14.attr, 194 &dev_attr_spi_device_transfer_bytes_histo15.attr, 195 &dev_attr_spi_device_transfer_bytes_histo16.attr, 196 &dev_attr_spi_device_transfers_split_maxsize.attr, 197 NULL, 198 }; 199 200 static const struct attribute_group spi_device_statistics_group = { 201 .name = "statistics", 202 .attrs = spi_device_statistics_attrs, 203 }; 204 205 static const struct attribute_group *spi_dev_groups[] = { 206 &spi_dev_group, 207 &spi_device_statistics_group, 208 NULL, 209 }; 210 211 static struct attribute *spi_controller_statistics_attrs[] = { 212 &dev_attr_spi_controller_messages.attr, 213 &dev_attr_spi_controller_transfers.attr, 214 &dev_attr_spi_controller_errors.attr, 215 &dev_attr_spi_controller_timedout.attr, 216 &dev_attr_spi_controller_spi_sync.attr, 217 &dev_attr_spi_controller_spi_sync_immediate.attr, 218 &dev_attr_spi_controller_spi_async.attr, 219 &dev_attr_spi_controller_bytes.attr, 220 &dev_attr_spi_controller_bytes_rx.attr, 221 &dev_attr_spi_controller_bytes_tx.attr, 222 &dev_attr_spi_controller_transfer_bytes_histo0.attr, 223 &dev_attr_spi_controller_transfer_bytes_histo1.attr, 224 &dev_attr_spi_controller_transfer_bytes_histo2.attr, 225 &dev_attr_spi_controller_transfer_bytes_histo3.attr, 226 &dev_attr_spi_controller_transfer_bytes_histo4.attr, 227 &dev_attr_spi_controller_transfer_bytes_histo5.attr, 228 &dev_attr_spi_controller_transfer_bytes_histo6.attr, 229 &dev_attr_spi_controller_transfer_bytes_histo7.attr, 230 &dev_attr_spi_controller_transfer_bytes_histo8.attr, 231 &dev_attr_spi_controller_transfer_bytes_histo9.attr, 232 &dev_attr_spi_controller_transfer_bytes_histo10.attr, 233 &dev_attr_spi_controller_transfer_bytes_histo11.attr, 234 &dev_attr_spi_controller_transfer_bytes_histo12.attr, 235 &dev_attr_spi_controller_transfer_bytes_histo13.attr, 236 &dev_attr_spi_controller_transfer_bytes_histo14.attr, 237 &dev_attr_spi_controller_transfer_bytes_histo15.attr, 238 &dev_attr_spi_controller_transfer_bytes_histo16.attr, 239 &dev_attr_spi_controller_transfers_split_maxsize.attr, 240 NULL, 241 }; 242 243 static const struct attribute_group spi_controller_statistics_group = { 244 .name = "statistics", 245 .attrs = spi_controller_statistics_attrs, 246 }; 247 248 static const struct attribute_group *spi_master_groups[] = { 249 &spi_controller_statistics_group, 250 NULL, 251 }; 252 253 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 254 struct spi_transfer *xfer, 255 struct spi_controller *ctlr) 256 { 257 unsigned long flags; 258 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 259 260 if (l2len < 0) 261 l2len = 0; 262 263 spin_lock_irqsave(&stats->lock, flags); 264 265 stats->transfers++; 266 stats->transfer_bytes_histo[l2len]++; 267 268 stats->bytes += xfer->len; 269 if ((xfer->tx_buf) && 270 (xfer->tx_buf != ctlr->dummy_tx)) 271 stats->bytes_tx += xfer->len; 272 if ((xfer->rx_buf) && 273 (xfer->rx_buf != ctlr->dummy_rx)) 274 stats->bytes_rx += xfer->len; 275 276 spin_unlock_irqrestore(&stats->lock, flags); 277 } 278 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 279 280 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 281 * and the sysfs version makes coldplug work too. 282 */ 283 284 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 285 const struct spi_device *sdev) 286 { 287 while (id->name[0]) { 288 if (!strcmp(sdev->modalias, id->name)) 289 return id; 290 id++; 291 } 292 return NULL; 293 } 294 295 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 296 { 297 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 298 299 return spi_match_id(sdrv->id_table, sdev); 300 } 301 EXPORT_SYMBOL_GPL(spi_get_device_id); 302 303 static int spi_match_device(struct device *dev, struct device_driver *drv) 304 { 305 const struct spi_device *spi = to_spi_device(dev); 306 const struct spi_driver *sdrv = to_spi_driver(drv); 307 308 /* Attempt an OF style match */ 309 if (of_driver_match_device(dev, drv)) 310 return 1; 311 312 /* Then try ACPI */ 313 if (acpi_driver_match_device(dev, drv)) 314 return 1; 315 316 if (sdrv->id_table) 317 return !!spi_match_id(sdrv->id_table, spi); 318 319 return strcmp(spi->modalias, drv->name) == 0; 320 } 321 322 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 323 { 324 const struct spi_device *spi = to_spi_device(dev); 325 int rc; 326 327 rc = acpi_device_uevent_modalias(dev, env); 328 if (rc != -ENODEV) 329 return rc; 330 331 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 332 } 333 334 struct bus_type spi_bus_type = { 335 .name = "spi", 336 .dev_groups = spi_dev_groups, 337 .match = spi_match_device, 338 .uevent = spi_uevent, 339 }; 340 EXPORT_SYMBOL_GPL(spi_bus_type); 341 342 343 static int spi_drv_probe(struct device *dev) 344 { 345 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 346 struct spi_device *spi = to_spi_device(dev); 347 int ret; 348 349 ret = of_clk_set_defaults(dev->of_node, false); 350 if (ret) 351 return ret; 352 353 if (dev->of_node) { 354 spi->irq = of_irq_get(dev->of_node, 0); 355 if (spi->irq == -EPROBE_DEFER) 356 return -EPROBE_DEFER; 357 if (spi->irq < 0) 358 spi->irq = 0; 359 } 360 361 ret = dev_pm_domain_attach(dev, true); 362 if (ret) 363 return ret; 364 365 ret = sdrv->probe(spi); 366 if (ret) 367 dev_pm_domain_detach(dev, true); 368 369 return ret; 370 } 371 372 static int spi_drv_remove(struct device *dev) 373 { 374 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 375 int ret; 376 377 ret = sdrv->remove(to_spi_device(dev)); 378 dev_pm_domain_detach(dev, true); 379 380 return ret; 381 } 382 383 static void spi_drv_shutdown(struct device *dev) 384 { 385 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 386 387 sdrv->shutdown(to_spi_device(dev)); 388 } 389 390 /** 391 * __spi_register_driver - register a SPI driver 392 * @owner: owner module of the driver to register 393 * @sdrv: the driver to register 394 * Context: can sleep 395 * 396 * Return: zero on success, else a negative error code. 397 */ 398 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 399 { 400 sdrv->driver.owner = owner; 401 sdrv->driver.bus = &spi_bus_type; 402 if (sdrv->probe) 403 sdrv->driver.probe = spi_drv_probe; 404 if (sdrv->remove) 405 sdrv->driver.remove = spi_drv_remove; 406 if (sdrv->shutdown) 407 sdrv->driver.shutdown = spi_drv_shutdown; 408 return driver_register(&sdrv->driver); 409 } 410 EXPORT_SYMBOL_GPL(__spi_register_driver); 411 412 /*-------------------------------------------------------------------------*/ 413 414 /* SPI devices should normally not be created by SPI device drivers; that 415 * would make them board-specific. Similarly with SPI controller drivers. 416 * Device registration normally goes into like arch/.../mach.../board-YYY.c 417 * with other readonly (flashable) information about mainboard devices. 418 */ 419 420 struct boardinfo { 421 struct list_head list; 422 struct spi_board_info board_info; 423 }; 424 425 static LIST_HEAD(board_list); 426 static LIST_HEAD(spi_controller_list); 427 428 /* 429 * Used to protect add/del opertion for board_info list and 430 * spi_controller list, and their matching process 431 * also used to protect object of type struct idr 432 */ 433 static DEFINE_MUTEX(board_lock); 434 435 /** 436 * spi_alloc_device - Allocate a new SPI device 437 * @ctlr: Controller to which device is connected 438 * Context: can sleep 439 * 440 * Allows a driver to allocate and initialize a spi_device without 441 * registering it immediately. This allows a driver to directly 442 * fill the spi_device with device parameters before calling 443 * spi_add_device() on it. 444 * 445 * Caller is responsible to call spi_add_device() on the returned 446 * spi_device structure to add it to the SPI controller. If the caller 447 * needs to discard the spi_device without adding it, then it should 448 * call spi_dev_put() on it. 449 * 450 * Return: a pointer to the new device, or NULL. 451 */ 452 struct spi_device *spi_alloc_device(struct spi_controller *ctlr) 453 { 454 struct spi_device *spi; 455 456 if (!spi_controller_get(ctlr)) 457 return NULL; 458 459 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 460 if (!spi) { 461 spi_controller_put(ctlr); 462 return NULL; 463 } 464 465 spi->master = spi->controller = ctlr; 466 spi->dev.parent = &ctlr->dev; 467 spi->dev.bus = &spi_bus_type; 468 spi->dev.release = spidev_release; 469 spi->cs_gpio = -ENOENT; 470 471 spin_lock_init(&spi->statistics.lock); 472 473 device_initialize(&spi->dev); 474 return spi; 475 } 476 EXPORT_SYMBOL_GPL(spi_alloc_device); 477 478 static void spi_dev_set_name(struct spi_device *spi) 479 { 480 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 481 482 if (adev) { 483 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 484 return; 485 } 486 487 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), 488 spi->chip_select); 489 } 490 491 static int spi_dev_check(struct device *dev, void *data) 492 { 493 struct spi_device *spi = to_spi_device(dev); 494 struct spi_device *new_spi = data; 495 496 if (spi->controller == new_spi->controller && 497 spi->chip_select == new_spi->chip_select) 498 return -EBUSY; 499 return 0; 500 } 501 502 /** 503 * spi_add_device - Add spi_device allocated with spi_alloc_device 504 * @spi: spi_device to register 505 * 506 * Companion function to spi_alloc_device. Devices allocated with 507 * spi_alloc_device can be added onto the spi bus with this function. 508 * 509 * Return: 0 on success; negative errno on failure 510 */ 511 int spi_add_device(struct spi_device *spi) 512 { 513 static DEFINE_MUTEX(spi_add_lock); 514 struct spi_controller *ctlr = spi->controller; 515 struct device *dev = ctlr->dev.parent; 516 int status; 517 518 /* Chipselects are numbered 0..max; validate. */ 519 if (spi->chip_select >= ctlr->num_chipselect) { 520 dev_err(dev, "cs%d >= max %d\n", spi->chip_select, 521 ctlr->num_chipselect); 522 return -EINVAL; 523 } 524 525 /* Set the bus ID string */ 526 spi_dev_set_name(spi); 527 528 /* We need to make sure there's no other device with this 529 * chipselect **BEFORE** we call setup(), else we'll trash 530 * its configuration. Lock against concurrent add() calls. 531 */ 532 mutex_lock(&spi_add_lock); 533 534 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 535 if (status) { 536 dev_err(dev, "chipselect %d already in use\n", 537 spi->chip_select); 538 goto done; 539 } 540 541 if (ctlr->cs_gpios) 542 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; 543 544 /* Drivers may modify this initial i/o setup, but will 545 * normally rely on the device being setup. Devices 546 * using SPI_CS_HIGH can't coexist well otherwise... 547 */ 548 status = spi_setup(spi); 549 if (status < 0) { 550 dev_err(dev, "can't setup %s, status %d\n", 551 dev_name(&spi->dev), status); 552 goto done; 553 } 554 555 /* Device may be bound to an active driver when this returns */ 556 status = device_add(&spi->dev); 557 if (status < 0) 558 dev_err(dev, "can't add %s, status %d\n", 559 dev_name(&spi->dev), status); 560 else 561 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 562 563 done: 564 mutex_unlock(&spi_add_lock); 565 return status; 566 } 567 EXPORT_SYMBOL_GPL(spi_add_device); 568 569 /** 570 * spi_new_device - instantiate one new SPI device 571 * @ctlr: Controller to which device is connected 572 * @chip: Describes the SPI device 573 * Context: can sleep 574 * 575 * On typical mainboards, this is purely internal; and it's not needed 576 * after board init creates the hard-wired devices. Some development 577 * platforms may not be able to use spi_register_board_info though, and 578 * this is exported so that for example a USB or parport based adapter 579 * driver could add devices (which it would learn about out-of-band). 580 * 581 * Return: the new device, or NULL. 582 */ 583 struct spi_device *spi_new_device(struct spi_controller *ctlr, 584 struct spi_board_info *chip) 585 { 586 struct spi_device *proxy; 587 int status; 588 589 /* NOTE: caller did any chip->bus_num checks necessary. 590 * 591 * Also, unless we change the return value convention to use 592 * error-or-pointer (not NULL-or-pointer), troubleshootability 593 * suggests syslogged diagnostics are best here (ugh). 594 */ 595 596 proxy = spi_alloc_device(ctlr); 597 if (!proxy) 598 return NULL; 599 600 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 601 602 proxy->chip_select = chip->chip_select; 603 proxy->max_speed_hz = chip->max_speed_hz; 604 proxy->mode = chip->mode; 605 proxy->irq = chip->irq; 606 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 607 proxy->dev.platform_data = (void *) chip->platform_data; 608 proxy->controller_data = chip->controller_data; 609 proxy->controller_state = NULL; 610 611 if (chip->properties) { 612 status = device_add_properties(&proxy->dev, chip->properties); 613 if (status) { 614 dev_err(&ctlr->dev, 615 "failed to add properties to '%s': %d\n", 616 chip->modalias, status); 617 goto err_dev_put; 618 } 619 } 620 621 status = spi_add_device(proxy); 622 if (status < 0) 623 goto err_remove_props; 624 625 return proxy; 626 627 err_remove_props: 628 if (chip->properties) 629 device_remove_properties(&proxy->dev); 630 err_dev_put: 631 spi_dev_put(proxy); 632 return NULL; 633 } 634 EXPORT_SYMBOL_GPL(spi_new_device); 635 636 /** 637 * spi_unregister_device - unregister a single SPI device 638 * @spi: spi_device to unregister 639 * 640 * Start making the passed SPI device vanish. Normally this would be handled 641 * by spi_unregister_controller(). 642 */ 643 void spi_unregister_device(struct spi_device *spi) 644 { 645 if (!spi) 646 return; 647 648 if (spi->dev.of_node) { 649 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 650 of_node_put(spi->dev.of_node); 651 } 652 if (ACPI_COMPANION(&spi->dev)) 653 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 654 device_unregister(&spi->dev); 655 } 656 EXPORT_SYMBOL_GPL(spi_unregister_device); 657 658 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, 659 struct spi_board_info *bi) 660 { 661 struct spi_device *dev; 662 663 if (ctlr->bus_num != bi->bus_num) 664 return; 665 666 dev = spi_new_device(ctlr, bi); 667 if (!dev) 668 dev_err(ctlr->dev.parent, "can't create new device for %s\n", 669 bi->modalias); 670 } 671 672 /** 673 * spi_register_board_info - register SPI devices for a given board 674 * @info: array of chip descriptors 675 * @n: how many descriptors are provided 676 * Context: can sleep 677 * 678 * Board-specific early init code calls this (probably during arch_initcall) 679 * with segments of the SPI device table. Any device nodes are created later, 680 * after the relevant parent SPI controller (bus_num) is defined. We keep 681 * this table of devices forever, so that reloading a controller driver will 682 * not make Linux forget about these hard-wired devices. 683 * 684 * Other code can also call this, e.g. a particular add-on board might provide 685 * SPI devices through its expansion connector, so code initializing that board 686 * would naturally declare its SPI devices. 687 * 688 * The board info passed can safely be __initdata ... but be careful of 689 * any embedded pointers (platform_data, etc), they're copied as-is. 690 * Device properties are deep-copied though. 691 * 692 * Return: zero on success, else a negative error code. 693 */ 694 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 695 { 696 struct boardinfo *bi; 697 int i; 698 699 if (!n) 700 return 0; 701 702 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 703 if (!bi) 704 return -ENOMEM; 705 706 for (i = 0; i < n; i++, bi++, info++) { 707 struct spi_controller *ctlr; 708 709 memcpy(&bi->board_info, info, sizeof(*info)); 710 if (info->properties) { 711 bi->board_info.properties = 712 property_entries_dup(info->properties); 713 if (IS_ERR(bi->board_info.properties)) 714 return PTR_ERR(bi->board_info.properties); 715 } 716 717 mutex_lock(&board_lock); 718 list_add_tail(&bi->list, &board_list); 719 list_for_each_entry(ctlr, &spi_controller_list, list) 720 spi_match_controller_to_boardinfo(ctlr, 721 &bi->board_info); 722 mutex_unlock(&board_lock); 723 } 724 725 return 0; 726 } 727 728 /*-------------------------------------------------------------------------*/ 729 730 static void spi_set_cs(struct spi_device *spi, bool enable) 731 { 732 if (spi->mode & SPI_CS_HIGH) 733 enable = !enable; 734 735 if (gpio_is_valid(spi->cs_gpio)) { 736 gpio_set_value(spi->cs_gpio, !enable); 737 /* Some SPI masters need both GPIO CS & slave_select */ 738 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 739 spi->controller->set_cs) 740 spi->controller->set_cs(spi, !enable); 741 } else if (spi->controller->set_cs) { 742 spi->controller->set_cs(spi, !enable); 743 } 744 } 745 746 #ifdef CONFIG_HAS_DMA 747 int spi_map_buf(struct spi_controller *ctlr, struct device *dev, 748 struct sg_table *sgt, void *buf, size_t len, 749 enum dma_data_direction dir) 750 { 751 const bool vmalloced_buf = is_vmalloc_addr(buf); 752 unsigned int max_seg_size = dma_get_max_seg_size(dev); 753 #ifdef CONFIG_HIGHMEM 754 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 755 (unsigned long)buf < (PKMAP_BASE + 756 (LAST_PKMAP * PAGE_SIZE))); 757 #else 758 const bool kmap_buf = false; 759 #endif 760 int desc_len; 761 int sgs; 762 struct page *vm_page; 763 struct scatterlist *sg; 764 void *sg_buf; 765 size_t min; 766 int i, ret; 767 768 if (vmalloced_buf || kmap_buf) { 769 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 770 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 771 } else if (virt_addr_valid(buf)) { 772 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len); 773 sgs = DIV_ROUND_UP(len, desc_len); 774 } else { 775 return -EINVAL; 776 } 777 778 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 779 if (ret != 0) 780 return ret; 781 782 sg = &sgt->sgl[0]; 783 for (i = 0; i < sgs; i++) { 784 785 if (vmalloced_buf || kmap_buf) { 786 /* 787 * Next scatterlist entry size is the minimum between 788 * the desc_len and the remaining buffer length that 789 * fits in a page. 790 */ 791 min = min_t(size_t, desc_len, 792 min_t(size_t, len, 793 PAGE_SIZE - offset_in_page(buf))); 794 if (vmalloced_buf) 795 vm_page = vmalloc_to_page(buf); 796 else 797 vm_page = kmap_to_page(buf); 798 if (!vm_page) { 799 sg_free_table(sgt); 800 return -ENOMEM; 801 } 802 sg_set_page(sg, vm_page, 803 min, offset_in_page(buf)); 804 } else { 805 min = min_t(size_t, len, desc_len); 806 sg_buf = buf; 807 sg_set_buf(sg, sg_buf, min); 808 } 809 810 buf += min; 811 len -= min; 812 sg = sg_next(sg); 813 } 814 815 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 816 if (!ret) 817 ret = -ENOMEM; 818 if (ret < 0) { 819 sg_free_table(sgt); 820 return ret; 821 } 822 823 sgt->nents = ret; 824 825 return 0; 826 } 827 828 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, 829 struct sg_table *sgt, enum dma_data_direction dir) 830 { 831 if (sgt->orig_nents) { 832 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 833 sg_free_table(sgt); 834 } 835 } 836 837 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 838 { 839 struct device *tx_dev, *rx_dev; 840 struct spi_transfer *xfer; 841 int ret; 842 843 if (!ctlr->can_dma) 844 return 0; 845 846 if (ctlr->dma_tx) 847 tx_dev = ctlr->dma_tx->device->dev; 848 else 849 tx_dev = ctlr->dev.parent; 850 851 if (ctlr->dma_rx) 852 rx_dev = ctlr->dma_rx->device->dev; 853 else 854 rx_dev = ctlr->dev.parent; 855 856 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 857 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 858 continue; 859 860 if (xfer->tx_buf != NULL) { 861 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg, 862 (void *)xfer->tx_buf, xfer->len, 863 DMA_TO_DEVICE); 864 if (ret != 0) 865 return ret; 866 } 867 868 if (xfer->rx_buf != NULL) { 869 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg, 870 xfer->rx_buf, xfer->len, 871 DMA_FROM_DEVICE); 872 if (ret != 0) { 873 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, 874 DMA_TO_DEVICE); 875 return ret; 876 } 877 } 878 } 879 880 ctlr->cur_msg_mapped = true; 881 882 return 0; 883 } 884 885 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) 886 { 887 struct spi_transfer *xfer; 888 struct device *tx_dev, *rx_dev; 889 890 if (!ctlr->cur_msg_mapped || !ctlr->can_dma) 891 return 0; 892 893 if (ctlr->dma_tx) 894 tx_dev = ctlr->dma_tx->device->dev; 895 else 896 tx_dev = ctlr->dev.parent; 897 898 if (ctlr->dma_rx) 899 rx_dev = ctlr->dma_rx->device->dev; 900 else 901 rx_dev = ctlr->dev.parent; 902 903 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 904 if (!ctlr->can_dma(ctlr, msg->spi, xfer)) 905 continue; 906 907 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 908 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 909 } 910 911 return 0; 912 } 913 #else /* !CONFIG_HAS_DMA */ 914 static inline int __spi_map_msg(struct spi_controller *ctlr, 915 struct spi_message *msg) 916 { 917 return 0; 918 } 919 920 static inline int __spi_unmap_msg(struct spi_controller *ctlr, 921 struct spi_message *msg) 922 { 923 return 0; 924 } 925 #endif /* !CONFIG_HAS_DMA */ 926 927 static inline int spi_unmap_msg(struct spi_controller *ctlr, 928 struct spi_message *msg) 929 { 930 struct spi_transfer *xfer; 931 932 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 933 /* 934 * Restore the original value of tx_buf or rx_buf if they are 935 * NULL. 936 */ 937 if (xfer->tx_buf == ctlr->dummy_tx) 938 xfer->tx_buf = NULL; 939 if (xfer->rx_buf == ctlr->dummy_rx) 940 xfer->rx_buf = NULL; 941 } 942 943 return __spi_unmap_msg(ctlr, msg); 944 } 945 946 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) 947 { 948 struct spi_transfer *xfer; 949 void *tmp; 950 unsigned int max_tx, max_rx; 951 952 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) { 953 max_tx = 0; 954 max_rx = 0; 955 956 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 957 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && 958 !xfer->tx_buf) 959 max_tx = max(xfer->len, max_tx); 960 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && 961 !xfer->rx_buf) 962 max_rx = max(xfer->len, max_rx); 963 } 964 965 if (max_tx) { 966 tmp = krealloc(ctlr->dummy_tx, max_tx, 967 GFP_KERNEL | GFP_DMA); 968 if (!tmp) 969 return -ENOMEM; 970 ctlr->dummy_tx = tmp; 971 memset(tmp, 0, max_tx); 972 } 973 974 if (max_rx) { 975 tmp = krealloc(ctlr->dummy_rx, max_rx, 976 GFP_KERNEL | GFP_DMA); 977 if (!tmp) 978 return -ENOMEM; 979 ctlr->dummy_rx = tmp; 980 } 981 982 if (max_tx || max_rx) { 983 list_for_each_entry(xfer, &msg->transfers, 984 transfer_list) { 985 if (!xfer->tx_buf) 986 xfer->tx_buf = ctlr->dummy_tx; 987 if (!xfer->rx_buf) 988 xfer->rx_buf = ctlr->dummy_rx; 989 } 990 } 991 } 992 993 return __spi_map_msg(ctlr, msg); 994 } 995 996 /* 997 * spi_transfer_one_message - Default implementation of transfer_one_message() 998 * 999 * This is a standard implementation of transfer_one_message() for 1000 * drivers which implement a transfer_one() operation. It provides 1001 * standard handling of delays and chip select management. 1002 */ 1003 static int spi_transfer_one_message(struct spi_controller *ctlr, 1004 struct spi_message *msg) 1005 { 1006 struct spi_transfer *xfer; 1007 bool keep_cs = false; 1008 int ret = 0; 1009 unsigned long long ms = 1; 1010 struct spi_statistics *statm = &ctlr->statistics; 1011 struct spi_statistics *stats = &msg->spi->statistics; 1012 1013 spi_set_cs(msg->spi, true); 1014 1015 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 1016 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 1017 1018 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1019 trace_spi_transfer_start(msg, xfer); 1020 1021 spi_statistics_add_transfer_stats(statm, xfer, ctlr); 1022 spi_statistics_add_transfer_stats(stats, xfer, ctlr); 1023 1024 if (xfer->tx_buf || xfer->rx_buf) { 1025 reinit_completion(&ctlr->xfer_completion); 1026 1027 ret = ctlr->transfer_one(ctlr, msg->spi, xfer); 1028 if (ret < 0) { 1029 SPI_STATISTICS_INCREMENT_FIELD(statm, 1030 errors); 1031 SPI_STATISTICS_INCREMENT_FIELD(stats, 1032 errors); 1033 dev_err(&msg->spi->dev, 1034 "SPI transfer failed: %d\n", ret); 1035 goto out; 1036 } 1037 1038 if (ret > 0) { 1039 ret = 0; 1040 ms = 8LL * 1000LL * xfer->len; 1041 do_div(ms, xfer->speed_hz); 1042 ms += ms + 200; /* some tolerance */ 1043 1044 if (ms > UINT_MAX) 1045 ms = UINT_MAX; 1046 1047 ms = wait_for_completion_timeout(&ctlr->xfer_completion, 1048 msecs_to_jiffies(ms)); 1049 } 1050 1051 if (ms == 0) { 1052 SPI_STATISTICS_INCREMENT_FIELD(statm, 1053 timedout); 1054 SPI_STATISTICS_INCREMENT_FIELD(stats, 1055 timedout); 1056 dev_err(&msg->spi->dev, 1057 "SPI transfer timed out\n"); 1058 msg->status = -ETIMEDOUT; 1059 } 1060 } else { 1061 if (xfer->len) 1062 dev_err(&msg->spi->dev, 1063 "Bufferless transfer has length %u\n", 1064 xfer->len); 1065 } 1066 1067 trace_spi_transfer_stop(msg, xfer); 1068 1069 if (msg->status != -EINPROGRESS) 1070 goto out; 1071 1072 if (xfer->delay_usecs) { 1073 u16 us = xfer->delay_usecs; 1074 1075 if (us <= 10) 1076 udelay(us); 1077 else 1078 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1079 } 1080 1081 if (xfer->cs_change) { 1082 if (list_is_last(&xfer->transfer_list, 1083 &msg->transfers)) { 1084 keep_cs = true; 1085 } else { 1086 spi_set_cs(msg->spi, false); 1087 udelay(10); 1088 spi_set_cs(msg->spi, true); 1089 } 1090 } 1091 1092 msg->actual_length += xfer->len; 1093 } 1094 1095 out: 1096 if (ret != 0 || !keep_cs) 1097 spi_set_cs(msg->spi, false); 1098 1099 if (msg->status == -EINPROGRESS) 1100 msg->status = ret; 1101 1102 if (msg->status && ctlr->handle_err) 1103 ctlr->handle_err(ctlr, msg); 1104 1105 spi_res_release(ctlr, msg); 1106 1107 spi_finalize_current_message(ctlr); 1108 1109 return ret; 1110 } 1111 1112 /** 1113 * spi_finalize_current_transfer - report completion of a transfer 1114 * @ctlr: the controller reporting completion 1115 * 1116 * Called by SPI drivers using the core transfer_one_message() 1117 * implementation to notify it that the current interrupt driven 1118 * transfer has finished and the next one may be scheduled. 1119 */ 1120 void spi_finalize_current_transfer(struct spi_controller *ctlr) 1121 { 1122 complete(&ctlr->xfer_completion); 1123 } 1124 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1125 1126 /** 1127 * __spi_pump_messages - function which processes spi message queue 1128 * @ctlr: controller to process queue for 1129 * @in_kthread: true if we are in the context of the message pump thread 1130 * 1131 * This function checks if there is any spi message in the queue that 1132 * needs processing and if so call out to the driver to initialize hardware 1133 * and transfer each message. 1134 * 1135 * Note that it is called both from the kthread itself and also from 1136 * inside spi_sync(); the queue extraction handling at the top of the 1137 * function should deal with this safely. 1138 */ 1139 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) 1140 { 1141 unsigned long flags; 1142 bool was_busy = false; 1143 int ret; 1144 1145 /* Lock queue */ 1146 spin_lock_irqsave(&ctlr->queue_lock, flags); 1147 1148 /* Make sure we are not already running a message */ 1149 if (ctlr->cur_msg) { 1150 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1151 return; 1152 } 1153 1154 /* If another context is idling the device then defer */ 1155 if (ctlr->idling) { 1156 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1157 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1158 return; 1159 } 1160 1161 /* Check if the queue is idle */ 1162 if (list_empty(&ctlr->queue) || !ctlr->running) { 1163 if (!ctlr->busy) { 1164 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1165 return; 1166 } 1167 1168 /* Only do teardown in the thread */ 1169 if (!in_kthread) { 1170 kthread_queue_work(&ctlr->kworker, 1171 &ctlr->pump_messages); 1172 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1173 return; 1174 } 1175 1176 ctlr->busy = false; 1177 ctlr->idling = true; 1178 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1179 1180 kfree(ctlr->dummy_rx); 1181 ctlr->dummy_rx = NULL; 1182 kfree(ctlr->dummy_tx); 1183 ctlr->dummy_tx = NULL; 1184 if (ctlr->unprepare_transfer_hardware && 1185 ctlr->unprepare_transfer_hardware(ctlr)) 1186 dev_err(&ctlr->dev, 1187 "failed to unprepare transfer hardware\n"); 1188 if (ctlr->auto_runtime_pm) { 1189 pm_runtime_mark_last_busy(ctlr->dev.parent); 1190 pm_runtime_put_autosuspend(ctlr->dev.parent); 1191 } 1192 trace_spi_controller_idle(ctlr); 1193 1194 spin_lock_irqsave(&ctlr->queue_lock, flags); 1195 ctlr->idling = false; 1196 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1197 return; 1198 } 1199 1200 /* Extract head of queue */ 1201 ctlr->cur_msg = 1202 list_first_entry(&ctlr->queue, struct spi_message, queue); 1203 1204 list_del_init(&ctlr->cur_msg->queue); 1205 if (ctlr->busy) 1206 was_busy = true; 1207 else 1208 ctlr->busy = true; 1209 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1210 1211 mutex_lock(&ctlr->io_mutex); 1212 1213 if (!was_busy && ctlr->auto_runtime_pm) { 1214 ret = pm_runtime_get_sync(ctlr->dev.parent); 1215 if (ret < 0) { 1216 pm_runtime_put_noidle(ctlr->dev.parent); 1217 dev_err(&ctlr->dev, "Failed to power device: %d\n", 1218 ret); 1219 mutex_unlock(&ctlr->io_mutex); 1220 return; 1221 } 1222 } 1223 1224 if (!was_busy) 1225 trace_spi_controller_busy(ctlr); 1226 1227 if (!was_busy && ctlr->prepare_transfer_hardware) { 1228 ret = ctlr->prepare_transfer_hardware(ctlr); 1229 if (ret) { 1230 dev_err(&ctlr->dev, 1231 "failed to prepare transfer hardware\n"); 1232 1233 if (ctlr->auto_runtime_pm) 1234 pm_runtime_put(ctlr->dev.parent); 1235 mutex_unlock(&ctlr->io_mutex); 1236 return; 1237 } 1238 } 1239 1240 trace_spi_message_start(ctlr->cur_msg); 1241 1242 if (ctlr->prepare_message) { 1243 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg); 1244 if (ret) { 1245 dev_err(&ctlr->dev, "failed to prepare message: %d\n", 1246 ret); 1247 ctlr->cur_msg->status = ret; 1248 spi_finalize_current_message(ctlr); 1249 goto out; 1250 } 1251 ctlr->cur_msg_prepared = true; 1252 } 1253 1254 ret = spi_map_msg(ctlr, ctlr->cur_msg); 1255 if (ret) { 1256 ctlr->cur_msg->status = ret; 1257 spi_finalize_current_message(ctlr); 1258 goto out; 1259 } 1260 1261 ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg); 1262 if (ret) { 1263 dev_err(&ctlr->dev, 1264 "failed to transfer one message from queue\n"); 1265 goto out; 1266 } 1267 1268 out: 1269 mutex_unlock(&ctlr->io_mutex); 1270 1271 /* Prod the scheduler in case transfer_one() was busy waiting */ 1272 if (!ret) 1273 cond_resched(); 1274 } 1275 1276 /** 1277 * spi_pump_messages - kthread work function which processes spi message queue 1278 * @work: pointer to kthread work struct contained in the controller struct 1279 */ 1280 static void spi_pump_messages(struct kthread_work *work) 1281 { 1282 struct spi_controller *ctlr = 1283 container_of(work, struct spi_controller, pump_messages); 1284 1285 __spi_pump_messages(ctlr, true); 1286 } 1287 1288 static int spi_init_queue(struct spi_controller *ctlr) 1289 { 1290 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1291 1292 ctlr->running = false; 1293 ctlr->busy = false; 1294 1295 kthread_init_worker(&ctlr->kworker); 1296 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker, 1297 "%s", dev_name(&ctlr->dev)); 1298 if (IS_ERR(ctlr->kworker_task)) { 1299 dev_err(&ctlr->dev, "failed to create message pump task\n"); 1300 return PTR_ERR(ctlr->kworker_task); 1301 } 1302 kthread_init_work(&ctlr->pump_messages, spi_pump_messages); 1303 1304 /* 1305 * Controller config will indicate if this controller should run the 1306 * message pump with high (realtime) priority to reduce the transfer 1307 * latency on the bus by minimising the delay between a transfer 1308 * request and the scheduling of the message pump thread. Without this 1309 * setting the message pump thread will remain at default priority. 1310 */ 1311 if (ctlr->rt) { 1312 dev_info(&ctlr->dev, 1313 "will run message pump with realtime priority\n"); 1314 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m); 1315 } 1316 1317 return 0; 1318 } 1319 1320 /** 1321 * spi_get_next_queued_message() - called by driver to check for queued 1322 * messages 1323 * @ctlr: the controller to check for queued messages 1324 * 1325 * If there are more messages in the queue, the next message is returned from 1326 * this call. 1327 * 1328 * Return: the next message in the queue, else NULL if the queue is empty. 1329 */ 1330 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) 1331 { 1332 struct spi_message *next; 1333 unsigned long flags; 1334 1335 /* get a pointer to the next message, if any */ 1336 spin_lock_irqsave(&ctlr->queue_lock, flags); 1337 next = list_first_entry_or_null(&ctlr->queue, struct spi_message, 1338 queue); 1339 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1340 1341 return next; 1342 } 1343 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1344 1345 /** 1346 * spi_finalize_current_message() - the current message is complete 1347 * @ctlr: the controller to return the message to 1348 * 1349 * Called by the driver to notify the core that the message in the front of the 1350 * queue is complete and can be removed from the queue. 1351 */ 1352 void spi_finalize_current_message(struct spi_controller *ctlr) 1353 { 1354 struct spi_message *mesg; 1355 unsigned long flags; 1356 int ret; 1357 1358 spin_lock_irqsave(&ctlr->queue_lock, flags); 1359 mesg = ctlr->cur_msg; 1360 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1361 1362 spi_unmap_msg(ctlr, mesg); 1363 1364 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { 1365 ret = ctlr->unprepare_message(ctlr, mesg); 1366 if (ret) { 1367 dev_err(&ctlr->dev, "failed to unprepare message: %d\n", 1368 ret); 1369 } 1370 } 1371 1372 spin_lock_irqsave(&ctlr->queue_lock, flags); 1373 ctlr->cur_msg = NULL; 1374 ctlr->cur_msg_prepared = false; 1375 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1376 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1377 1378 trace_spi_message_done(mesg); 1379 1380 mesg->state = NULL; 1381 if (mesg->complete) 1382 mesg->complete(mesg->context); 1383 } 1384 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1385 1386 static int spi_start_queue(struct spi_controller *ctlr) 1387 { 1388 unsigned long flags; 1389 1390 spin_lock_irqsave(&ctlr->queue_lock, flags); 1391 1392 if (ctlr->running || ctlr->busy) { 1393 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1394 return -EBUSY; 1395 } 1396 1397 ctlr->running = true; 1398 ctlr->cur_msg = NULL; 1399 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1400 1401 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1402 1403 return 0; 1404 } 1405 1406 static int spi_stop_queue(struct spi_controller *ctlr) 1407 { 1408 unsigned long flags; 1409 unsigned limit = 500; 1410 int ret = 0; 1411 1412 spin_lock_irqsave(&ctlr->queue_lock, flags); 1413 1414 /* 1415 * This is a bit lame, but is optimized for the common execution path. 1416 * A wait_queue on the ctlr->busy could be used, but then the common 1417 * execution path (pump_messages) would be required to call wake_up or 1418 * friends on every SPI message. Do this instead. 1419 */ 1420 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) { 1421 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1422 usleep_range(10000, 11000); 1423 spin_lock_irqsave(&ctlr->queue_lock, flags); 1424 } 1425 1426 if (!list_empty(&ctlr->queue) || ctlr->busy) 1427 ret = -EBUSY; 1428 else 1429 ctlr->running = false; 1430 1431 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1432 1433 if (ret) { 1434 dev_warn(&ctlr->dev, "could not stop message queue\n"); 1435 return ret; 1436 } 1437 return ret; 1438 } 1439 1440 static int spi_destroy_queue(struct spi_controller *ctlr) 1441 { 1442 int ret; 1443 1444 ret = spi_stop_queue(ctlr); 1445 1446 /* 1447 * kthread_flush_worker will block until all work is done. 1448 * If the reason that stop_queue timed out is that the work will never 1449 * finish, then it does no good to call flush/stop thread, so 1450 * return anyway. 1451 */ 1452 if (ret) { 1453 dev_err(&ctlr->dev, "problem destroying queue\n"); 1454 return ret; 1455 } 1456 1457 kthread_flush_worker(&ctlr->kworker); 1458 kthread_stop(ctlr->kworker_task); 1459 1460 return 0; 1461 } 1462 1463 static int __spi_queued_transfer(struct spi_device *spi, 1464 struct spi_message *msg, 1465 bool need_pump) 1466 { 1467 struct spi_controller *ctlr = spi->controller; 1468 unsigned long flags; 1469 1470 spin_lock_irqsave(&ctlr->queue_lock, flags); 1471 1472 if (!ctlr->running) { 1473 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1474 return -ESHUTDOWN; 1475 } 1476 msg->actual_length = 0; 1477 msg->status = -EINPROGRESS; 1478 1479 list_add_tail(&msg->queue, &ctlr->queue); 1480 if (!ctlr->busy && need_pump) 1481 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); 1482 1483 spin_unlock_irqrestore(&ctlr->queue_lock, flags); 1484 return 0; 1485 } 1486 1487 /** 1488 * spi_queued_transfer - transfer function for queued transfers 1489 * @spi: spi device which is requesting transfer 1490 * @msg: spi message which is to handled is queued to driver queue 1491 * 1492 * Return: zero on success, else a negative error code. 1493 */ 1494 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1495 { 1496 return __spi_queued_transfer(spi, msg, true); 1497 } 1498 1499 static int spi_controller_initialize_queue(struct spi_controller *ctlr) 1500 { 1501 int ret; 1502 1503 ctlr->transfer = spi_queued_transfer; 1504 if (!ctlr->transfer_one_message) 1505 ctlr->transfer_one_message = spi_transfer_one_message; 1506 1507 /* Initialize and start queue */ 1508 ret = spi_init_queue(ctlr); 1509 if (ret) { 1510 dev_err(&ctlr->dev, "problem initializing queue\n"); 1511 goto err_init_queue; 1512 } 1513 ctlr->queued = true; 1514 ret = spi_start_queue(ctlr); 1515 if (ret) { 1516 dev_err(&ctlr->dev, "problem starting queue\n"); 1517 goto err_start_queue; 1518 } 1519 1520 return 0; 1521 1522 err_start_queue: 1523 spi_destroy_queue(ctlr); 1524 err_init_queue: 1525 return ret; 1526 } 1527 1528 /** 1529 * spi_flush_queue - Send all pending messages in the queue from the callers' 1530 * context 1531 * @ctlr: controller to process queue for 1532 * 1533 * This should be used when one wants to ensure all pending messages have been 1534 * sent before doing something. Is used by the spi-mem code to make sure SPI 1535 * memory operations do not preempt regular SPI transfers that have been queued 1536 * before the spi-mem operation. 1537 */ 1538 void spi_flush_queue(struct spi_controller *ctlr) 1539 { 1540 if (ctlr->transfer == spi_queued_transfer) 1541 __spi_pump_messages(ctlr, false); 1542 } 1543 1544 /*-------------------------------------------------------------------------*/ 1545 1546 #if defined(CONFIG_OF) 1547 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, 1548 struct device_node *nc) 1549 { 1550 u32 value; 1551 int rc; 1552 1553 /* Mode (clock phase/polarity/etc.) */ 1554 if (of_property_read_bool(nc, "spi-cpha")) 1555 spi->mode |= SPI_CPHA; 1556 if (of_property_read_bool(nc, "spi-cpol")) 1557 spi->mode |= SPI_CPOL; 1558 if (of_property_read_bool(nc, "spi-cs-high")) 1559 spi->mode |= SPI_CS_HIGH; 1560 if (of_property_read_bool(nc, "spi-3wire")) 1561 spi->mode |= SPI_3WIRE; 1562 if (of_property_read_bool(nc, "spi-lsb-first")) 1563 spi->mode |= SPI_LSB_FIRST; 1564 1565 /* Device DUAL/QUAD mode */ 1566 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1567 switch (value) { 1568 case 1: 1569 break; 1570 case 2: 1571 spi->mode |= SPI_TX_DUAL; 1572 break; 1573 case 4: 1574 spi->mode |= SPI_TX_QUAD; 1575 break; 1576 default: 1577 dev_warn(&ctlr->dev, 1578 "spi-tx-bus-width %d not supported\n", 1579 value); 1580 break; 1581 } 1582 } 1583 1584 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1585 switch (value) { 1586 case 1: 1587 break; 1588 case 2: 1589 spi->mode |= SPI_RX_DUAL; 1590 break; 1591 case 4: 1592 spi->mode |= SPI_RX_QUAD; 1593 break; 1594 default: 1595 dev_warn(&ctlr->dev, 1596 "spi-rx-bus-width %d not supported\n", 1597 value); 1598 break; 1599 } 1600 } 1601 1602 if (spi_controller_is_slave(ctlr)) { 1603 if (strcmp(nc->name, "slave")) { 1604 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", 1605 nc); 1606 return -EINVAL; 1607 } 1608 return 0; 1609 } 1610 1611 /* Device address */ 1612 rc = of_property_read_u32(nc, "reg", &value); 1613 if (rc) { 1614 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", 1615 nc, rc); 1616 return rc; 1617 } 1618 spi->chip_select = value; 1619 1620 /* Device speed */ 1621 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1622 if (rc) { 1623 dev_err(&ctlr->dev, 1624 "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc); 1625 return rc; 1626 } 1627 spi->max_speed_hz = value; 1628 1629 return 0; 1630 } 1631 1632 static struct spi_device * 1633 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) 1634 { 1635 struct spi_device *spi; 1636 int rc; 1637 1638 /* Alloc an spi_device */ 1639 spi = spi_alloc_device(ctlr); 1640 if (!spi) { 1641 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); 1642 rc = -ENOMEM; 1643 goto err_out; 1644 } 1645 1646 /* Select device driver */ 1647 rc = of_modalias_node(nc, spi->modalias, 1648 sizeof(spi->modalias)); 1649 if (rc < 0) { 1650 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); 1651 goto err_out; 1652 } 1653 1654 rc = of_spi_parse_dt(ctlr, spi, nc); 1655 if (rc) 1656 goto err_out; 1657 1658 /* Store a pointer to the node in the device structure */ 1659 of_node_get(nc); 1660 spi->dev.of_node = nc; 1661 1662 /* Register the new device */ 1663 rc = spi_add_device(spi); 1664 if (rc) { 1665 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); 1666 goto err_of_node_put; 1667 } 1668 1669 return spi; 1670 1671 err_of_node_put: 1672 of_node_put(nc); 1673 err_out: 1674 spi_dev_put(spi); 1675 return ERR_PTR(rc); 1676 } 1677 1678 /** 1679 * of_register_spi_devices() - Register child devices onto the SPI bus 1680 * @ctlr: Pointer to spi_controller device 1681 * 1682 * Registers an spi_device for each child node of controller node which 1683 * represents a valid SPI slave. 1684 */ 1685 static void of_register_spi_devices(struct spi_controller *ctlr) 1686 { 1687 struct spi_device *spi; 1688 struct device_node *nc; 1689 1690 if (!ctlr->dev.of_node) 1691 return; 1692 1693 for_each_available_child_of_node(ctlr->dev.of_node, nc) { 1694 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1695 continue; 1696 spi = of_register_spi_device(ctlr, nc); 1697 if (IS_ERR(spi)) { 1698 dev_warn(&ctlr->dev, 1699 "Failed to create SPI device for %pOF\n", nc); 1700 of_node_clear_flag(nc, OF_POPULATED); 1701 } 1702 } 1703 } 1704 #else 1705 static void of_register_spi_devices(struct spi_controller *ctlr) { } 1706 #endif 1707 1708 #ifdef CONFIG_ACPI 1709 static void acpi_spi_parse_apple_properties(struct spi_device *spi) 1710 { 1711 struct acpi_device *dev = ACPI_COMPANION(&spi->dev); 1712 const union acpi_object *obj; 1713 1714 if (!x86_apple_machine) 1715 return; 1716 1717 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 1718 && obj->buffer.length >= 4) 1719 spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 1720 1721 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 1722 && obj->buffer.length == 8) 1723 spi->bits_per_word = *(u64 *)obj->buffer.pointer; 1724 1725 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 1726 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 1727 spi->mode |= SPI_LSB_FIRST; 1728 1729 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 1730 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 1731 spi->mode |= SPI_CPOL; 1732 1733 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 1734 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 1735 spi->mode |= SPI_CPHA; 1736 } 1737 1738 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1739 { 1740 struct spi_device *spi = data; 1741 struct spi_controller *ctlr = spi->controller; 1742 1743 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1744 struct acpi_resource_spi_serialbus *sb; 1745 1746 sb = &ares->data.spi_serial_bus; 1747 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1748 /* 1749 * ACPI DeviceSelection numbering is handled by the 1750 * host controller driver in Windows and can vary 1751 * from driver to driver. In Linux we always expect 1752 * 0 .. max - 1 so we need to ask the driver to 1753 * translate between the two schemes. 1754 */ 1755 if (ctlr->fw_translate_cs) { 1756 int cs = ctlr->fw_translate_cs(ctlr, 1757 sb->device_selection); 1758 if (cs < 0) 1759 return cs; 1760 spi->chip_select = cs; 1761 } else { 1762 spi->chip_select = sb->device_selection; 1763 } 1764 1765 spi->max_speed_hz = sb->connection_speed; 1766 1767 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1768 spi->mode |= SPI_CPHA; 1769 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1770 spi->mode |= SPI_CPOL; 1771 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1772 spi->mode |= SPI_CS_HIGH; 1773 } 1774 } else if (spi->irq < 0) { 1775 struct resource r; 1776 1777 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1778 spi->irq = r.start; 1779 } 1780 1781 /* Always tell the ACPI core to skip this resource */ 1782 return 1; 1783 } 1784 1785 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 1786 struct acpi_device *adev) 1787 { 1788 struct list_head resource_list; 1789 struct spi_device *spi; 1790 int ret; 1791 1792 if (acpi_bus_get_status(adev) || !adev->status.present || 1793 acpi_device_enumerated(adev)) 1794 return AE_OK; 1795 1796 spi = spi_alloc_device(ctlr); 1797 if (!spi) { 1798 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n", 1799 dev_name(&adev->dev)); 1800 return AE_NO_MEMORY; 1801 } 1802 1803 ACPI_COMPANION_SET(&spi->dev, adev); 1804 spi->irq = -1; 1805 1806 INIT_LIST_HEAD(&resource_list); 1807 ret = acpi_dev_get_resources(adev, &resource_list, 1808 acpi_spi_add_resource, spi); 1809 acpi_dev_free_resource_list(&resource_list); 1810 1811 acpi_spi_parse_apple_properties(spi); 1812 1813 if (ret < 0 || !spi->max_speed_hz) { 1814 spi_dev_put(spi); 1815 return AE_OK; 1816 } 1817 1818 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 1819 sizeof(spi->modalias)); 1820 1821 if (spi->irq < 0) 1822 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 1823 1824 acpi_device_set_enumerated(adev); 1825 1826 adev->power.flags.ignore_parent = true; 1827 if (spi_add_device(spi)) { 1828 adev->power.flags.ignore_parent = false; 1829 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", 1830 dev_name(&adev->dev)); 1831 spi_dev_put(spi); 1832 } 1833 1834 return AE_OK; 1835 } 1836 1837 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1838 void *data, void **return_value) 1839 { 1840 struct spi_controller *ctlr = data; 1841 struct acpi_device *adev; 1842 1843 if (acpi_bus_get_device(handle, &adev)) 1844 return AE_OK; 1845 1846 return acpi_register_spi_device(ctlr, adev); 1847 } 1848 1849 static void acpi_register_spi_devices(struct spi_controller *ctlr) 1850 { 1851 acpi_status status; 1852 acpi_handle handle; 1853 1854 handle = ACPI_HANDLE(ctlr->dev.parent); 1855 if (!handle) 1856 return; 1857 1858 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1859 acpi_spi_add_device, NULL, ctlr, NULL); 1860 if (ACPI_FAILURE(status)) 1861 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 1862 } 1863 #else 1864 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} 1865 #endif /* CONFIG_ACPI */ 1866 1867 static void spi_controller_release(struct device *dev) 1868 { 1869 struct spi_controller *ctlr; 1870 1871 ctlr = container_of(dev, struct spi_controller, dev); 1872 kfree(ctlr); 1873 } 1874 1875 static struct class spi_master_class = { 1876 .name = "spi_master", 1877 .owner = THIS_MODULE, 1878 .dev_release = spi_controller_release, 1879 .dev_groups = spi_master_groups, 1880 }; 1881 1882 #ifdef CONFIG_SPI_SLAVE 1883 /** 1884 * spi_slave_abort - abort the ongoing transfer request on an SPI slave 1885 * controller 1886 * @spi: device used for the current transfer 1887 */ 1888 int spi_slave_abort(struct spi_device *spi) 1889 { 1890 struct spi_controller *ctlr = spi->controller; 1891 1892 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort) 1893 return ctlr->slave_abort(ctlr); 1894 1895 return -ENOTSUPP; 1896 } 1897 EXPORT_SYMBOL_GPL(spi_slave_abort); 1898 1899 static int match_true(struct device *dev, void *data) 1900 { 1901 return 1; 1902 } 1903 1904 static ssize_t spi_slave_show(struct device *dev, 1905 struct device_attribute *attr, char *buf) 1906 { 1907 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 1908 dev); 1909 struct device *child; 1910 1911 child = device_find_child(&ctlr->dev, NULL, match_true); 1912 return sprintf(buf, "%s\n", 1913 child ? to_spi_device(child)->modalias : NULL); 1914 } 1915 1916 static ssize_t spi_slave_store(struct device *dev, 1917 struct device_attribute *attr, const char *buf, 1918 size_t count) 1919 { 1920 struct spi_controller *ctlr = container_of(dev, struct spi_controller, 1921 dev); 1922 struct spi_device *spi; 1923 struct device *child; 1924 char name[32]; 1925 int rc; 1926 1927 rc = sscanf(buf, "%31s", name); 1928 if (rc != 1 || !name[0]) 1929 return -EINVAL; 1930 1931 child = device_find_child(&ctlr->dev, NULL, match_true); 1932 if (child) { 1933 /* Remove registered slave */ 1934 device_unregister(child); 1935 put_device(child); 1936 } 1937 1938 if (strcmp(name, "(null)")) { 1939 /* Register new slave */ 1940 spi = spi_alloc_device(ctlr); 1941 if (!spi) 1942 return -ENOMEM; 1943 1944 strlcpy(spi->modalias, name, sizeof(spi->modalias)); 1945 1946 rc = spi_add_device(spi); 1947 if (rc) { 1948 spi_dev_put(spi); 1949 return rc; 1950 } 1951 } 1952 1953 return count; 1954 } 1955 1956 static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store); 1957 1958 static struct attribute *spi_slave_attrs[] = { 1959 &dev_attr_slave.attr, 1960 NULL, 1961 }; 1962 1963 static const struct attribute_group spi_slave_group = { 1964 .attrs = spi_slave_attrs, 1965 }; 1966 1967 static const struct attribute_group *spi_slave_groups[] = { 1968 &spi_controller_statistics_group, 1969 &spi_slave_group, 1970 NULL, 1971 }; 1972 1973 static struct class spi_slave_class = { 1974 .name = "spi_slave", 1975 .owner = THIS_MODULE, 1976 .dev_release = spi_controller_release, 1977 .dev_groups = spi_slave_groups, 1978 }; 1979 #else 1980 extern struct class spi_slave_class; /* dummy */ 1981 #endif 1982 1983 /** 1984 * __spi_alloc_controller - allocate an SPI master or slave controller 1985 * @dev: the controller, possibly using the platform_bus 1986 * @size: how much zeroed driver-private data to allocate; the pointer to this 1987 * memory is in the driver_data field of the returned device, 1988 * accessible with spi_controller_get_devdata(). 1989 * @slave: flag indicating whether to allocate an SPI master (false) or SPI 1990 * slave (true) controller 1991 * Context: can sleep 1992 * 1993 * This call is used only by SPI controller drivers, which are the 1994 * only ones directly touching chip registers. It's how they allocate 1995 * an spi_controller structure, prior to calling spi_register_controller(). 1996 * 1997 * This must be called from context that can sleep. 1998 * 1999 * The caller is responsible for assigning the bus number and initializing the 2000 * controller's methods before calling spi_register_controller(); and (after 2001 * errors adding the device) calling spi_controller_put() to prevent a memory 2002 * leak. 2003 * 2004 * Return: the SPI controller structure on success, else NULL. 2005 */ 2006 struct spi_controller *__spi_alloc_controller(struct device *dev, 2007 unsigned int size, bool slave) 2008 { 2009 struct spi_controller *ctlr; 2010 2011 if (!dev) 2012 return NULL; 2013 2014 ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL); 2015 if (!ctlr) 2016 return NULL; 2017 2018 device_initialize(&ctlr->dev); 2019 ctlr->bus_num = -1; 2020 ctlr->num_chipselect = 1; 2021 ctlr->slave = slave; 2022 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) 2023 ctlr->dev.class = &spi_slave_class; 2024 else 2025 ctlr->dev.class = &spi_master_class; 2026 ctlr->dev.parent = dev; 2027 pm_suspend_ignore_children(&ctlr->dev, true); 2028 spi_controller_set_devdata(ctlr, &ctlr[1]); 2029 2030 return ctlr; 2031 } 2032 EXPORT_SYMBOL_GPL(__spi_alloc_controller); 2033 2034 #ifdef CONFIG_OF 2035 static int of_spi_register_master(struct spi_controller *ctlr) 2036 { 2037 int nb, i, *cs; 2038 struct device_node *np = ctlr->dev.of_node; 2039 2040 if (!np) 2041 return 0; 2042 2043 nb = of_gpio_named_count(np, "cs-gpios"); 2044 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); 2045 2046 /* Return error only for an incorrectly formed cs-gpios property */ 2047 if (nb == 0 || nb == -ENOENT) 2048 return 0; 2049 else if (nb < 0) 2050 return nb; 2051 2052 cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int), 2053 GFP_KERNEL); 2054 ctlr->cs_gpios = cs; 2055 2056 if (!ctlr->cs_gpios) 2057 return -ENOMEM; 2058 2059 for (i = 0; i < ctlr->num_chipselect; i++) 2060 cs[i] = -ENOENT; 2061 2062 for (i = 0; i < nb; i++) 2063 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 2064 2065 return 0; 2066 } 2067 #else 2068 static int of_spi_register_master(struct spi_controller *ctlr) 2069 { 2070 return 0; 2071 } 2072 #endif 2073 2074 static int spi_controller_check_ops(struct spi_controller *ctlr) 2075 { 2076 /* 2077 * The controller may implement only the high-level SPI-memory like 2078 * operations if it does not support regular SPI transfers, and this is 2079 * valid use case. 2080 * If ->mem_ops is NULL, we request that at least one of the 2081 * ->transfer_xxx() method be implemented. 2082 */ 2083 if (ctlr->mem_ops) { 2084 if (!ctlr->mem_ops->exec_op) 2085 return -EINVAL; 2086 } else if (!ctlr->transfer && !ctlr->transfer_one && 2087 !ctlr->transfer_one_message) { 2088 return -EINVAL; 2089 } 2090 2091 return 0; 2092 } 2093 2094 /** 2095 * spi_register_controller - register SPI master or slave controller 2096 * @ctlr: initialized master, originally from spi_alloc_master() or 2097 * spi_alloc_slave() 2098 * Context: can sleep 2099 * 2100 * SPI controllers connect to their drivers using some non-SPI bus, 2101 * such as the platform bus. The final stage of probe() in that code 2102 * includes calling spi_register_controller() to hook up to this SPI bus glue. 2103 * 2104 * SPI controllers use board specific (often SOC specific) bus numbers, 2105 * and board-specific addressing for SPI devices combines those numbers 2106 * with chip select numbers. Since SPI does not directly support dynamic 2107 * device identification, boards need configuration tables telling which 2108 * chip is at which address. 2109 * 2110 * This must be called from context that can sleep. It returns zero on 2111 * success, else a negative error code (dropping the controller's refcount). 2112 * After a successful return, the caller is responsible for calling 2113 * spi_unregister_controller(). 2114 * 2115 * Return: zero on success, else a negative error code. 2116 */ 2117 int spi_register_controller(struct spi_controller *ctlr) 2118 { 2119 struct device *dev = ctlr->dev.parent; 2120 struct boardinfo *bi; 2121 int status = -ENODEV; 2122 int id, first_dynamic; 2123 2124 if (!dev) 2125 return -ENODEV; 2126 2127 /* 2128 * Make sure all necessary hooks are implemented before registering 2129 * the SPI controller. 2130 */ 2131 status = spi_controller_check_ops(ctlr); 2132 if (status) 2133 return status; 2134 2135 if (!spi_controller_is_slave(ctlr)) { 2136 status = of_spi_register_master(ctlr); 2137 if (status) 2138 return status; 2139 } 2140 2141 /* even if it's just one always-selected device, there must 2142 * be at least one chipselect 2143 */ 2144 if (ctlr->num_chipselect == 0) 2145 return -EINVAL; 2146 /* allocate dynamic bus number using Linux idr */ 2147 if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { 2148 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 2149 if (id >= 0) { 2150 ctlr->bus_num = id; 2151 mutex_lock(&board_lock); 2152 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num, 2153 ctlr->bus_num + 1, GFP_KERNEL); 2154 mutex_unlock(&board_lock); 2155 if (WARN(id < 0, "couldn't get idr")) 2156 return id == -ENOSPC ? -EBUSY : id; 2157 } 2158 } 2159 if (ctlr->bus_num < 0) { 2160 first_dynamic = of_alias_get_highest_id("spi"); 2161 if (first_dynamic < 0) 2162 first_dynamic = 0; 2163 else 2164 first_dynamic++; 2165 2166 mutex_lock(&board_lock); 2167 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, 2168 0, GFP_KERNEL); 2169 mutex_unlock(&board_lock); 2170 if (WARN(id < 0, "couldn't get idr")) 2171 return id; 2172 ctlr->bus_num = id; 2173 } 2174 INIT_LIST_HEAD(&ctlr->queue); 2175 spin_lock_init(&ctlr->queue_lock); 2176 spin_lock_init(&ctlr->bus_lock_spinlock); 2177 mutex_init(&ctlr->bus_lock_mutex); 2178 mutex_init(&ctlr->io_mutex); 2179 ctlr->bus_lock_flag = 0; 2180 init_completion(&ctlr->xfer_completion); 2181 if (!ctlr->max_dma_len) 2182 ctlr->max_dma_len = INT_MAX; 2183 2184 /* register the device, then userspace will see it. 2185 * registration fails if the bus ID is in use. 2186 */ 2187 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); 2188 status = device_add(&ctlr->dev); 2189 if (status < 0) { 2190 /* free bus id */ 2191 mutex_lock(&board_lock); 2192 idr_remove(&spi_master_idr, ctlr->bus_num); 2193 mutex_unlock(&board_lock); 2194 goto done; 2195 } 2196 dev_dbg(dev, "registered %s %s\n", 2197 spi_controller_is_slave(ctlr) ? "slave" : "master", 2198 dev_name(&ctlr->dev)); 2199 2200 /* 2201 * If we're using a queued driver, start the queue. Note that we don't 2202 * need the queueing logic if the driver is only supporting high-level 2203 * memory operations. 2204 */ 2205 if (ctlr->transfer) { 2206 dev_info(dev, "controller is unqueued, this is deprecated\n"); 2207 } else if (ctlr->transfer_one || ctlr->transfer_one_message) { 2208 status = spi_controller_initialize_queue(ctlr); 2209 if (status) { 2210 device_del(&ctlr->dev); 2211 /* free bus id */ 2212 mutex_lock(&board_lock); 2213 idr_remove(&spi_master_idr, ctlr->bus_num); 2214 mutex_unlock(&board_lock); 2215 goto done; 2216 } 2217 } 2218 /* add statistics */ 2219 spin_lock_init(&ctlr->statistics.lock); 2220 2221 mutex_lock(&board_lock); 2222 list_add_tail(&ctlr->list, &spi_controller_list); 2223 list_for_each_entry(bi, &board_list, list) 2224 spi_match_controller_to_boardinfo(ctlr, &bi->board_info); 2225 mutex_unlock(&board_lock); 2226 2227 /* Register devices from the device tree and ACPI */ 2228 of_register_spi_devices(ctlr); 2229 acpi_register_spi_devices(ctlr); 2230 done: 2231 return status; 2232 } 2233 EXPORT_SYMBOL_GPL(spi_register_controller); 2234 2235 static void devm_spi_unregister(struct device *dev, void *res) 2236 { 2237 spi_unregister_controller(*(struct spi_controller **)res); 2238 } 2239 2240 /** 2241 * devm_spi_register_controller - register managed SPI master or slave 2242 * controller 2243 * @dev: device managing SPI controller 2244 * @ctlr: initialized controller, originally from spi_alloc_master() or 2245 * spi_alloc_slave() 2246 * Context: can sleep 2247 * 2248 * Register a SPI device as with spi_register_controller() which will 2249 * automatically be unregistered and freed. 2250 * 2251 * Return: zero on success, else a negative error code. 2252 */ 2253 int devm_spi_register_controller(struct device *dev, 2254 struct spi_controller *ctlr) 2255 { 2256 struct spi_controller **ptr; 2257 int ret; 2258 2259 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 2260 if (!ptr) 2261 return -ENOMEM; 2262 2263 ret = spi_register_controller(ctlr); 2264 if (!ret) { 2265 *ptr = ctlr; 2266 devres_add(dev, ptr); 2267 } else { 2268 devres_free(ptr); 2269 } 2270 2271 return ret; 2272 } 2273 EXPORT_SYMBOL_GPL(devm_spi_register_controller); 2274 2275 static int __unregister(struct device *dev, void *null) 2276 { 2277 spi_unregister_device(to_spi_device(dev)); 2278 return 0; 2279 } 2280 2281 /** 2282 * spi_unregister_controller - unregister SPI master or slave controller 2283 * @ctlr: the controller being unregistered 2284 * Context: can sleep 2285 * 2286 * This call is used only by SPI controller drivers, which are the 2287 * only ones directly touching chip registers. 2288 * 2289 * This must be called from context that can sleep. 2290 * 2291 * Note that this function also drops a reference to the controller. 2292 */ 2293 void spi_unregister_controller(struct spi_controller *ctlr) 2294 { 2295 struct spi_controller *found; 2296 int id = ctlr->bus_num; 2297 int dummy; 2298 2299 /* First make sure that this controller was ever added */ 2300 mutex_lock(&board_lock); 2301 found = idr_find(&spi_master_idr, id); 2302 mutex_unlock(&board_lock); 2303 if (ctlr->queued) { 2304 if (spi_destroy_queue(ctlr)) 2305 dev_err(&ctlr->dev, "queue remove failed\n"); 2306 } 2307 mutex_lock(&board_lock); 2308 list_del(&ctlr->list); 2309 mutex_unlock(&board_lock); 2310 2311 dummy = device_for_each_child(&ctlr->dev, NULL, __unregister); 2312 device_unregister(&ctlr->dev); 2313 /* free bus id */ 2314 mutex_lock(&board_lock); 2315 if (found == ctlr) 2316 idr_remove(&spi_master_idr, id); 2317 mutex_unlock(&board_lock); 2318 } 2319 EXPORT_SYMBOL_GPL(spi_unregister_controller); 2320 2321 int spi_controller_suspend(struct spi_controller *ctlr) 2322 { 2323 int ret; 2324 2325 /* Basically no-ops for non-queued controllers */ 2326 if (!ctlr->queued) 2327 return 0; 2328 2329 ret = spi_stop_queue(ctlr); 2330 if (ret) 2331 dev_err(&ctlr->dev, "queue stop failed\n"); 2332 2333 return ret; 2334 } 2335 EXPORT_SYMBOL_GPL(spi_controller_suspend); 2336 2337 int spi_controller_resume(struct spi_controller *ctlr) 2338 { 2339 int ret; 2340 2341 if (!ctlr->queued) 2342 return 0; 2343 2344 ret = spi_start_queue(ctlr); 2345 if (ret) 2346 dev_err(&ctlr->dev, "queue restart failed\n"); 2347 2348 return ret; 2349 } 2350 EXPORT_SYMBOL_GPL(spi_controller_resume); 2351 2352 static int __spi_controller_match(struct device *dev, const void *data) 2353 { 2354 struct spi_controller *ctlr; 2355 const u16 *bus_num = data; 2356 2357 ctlr = container_of(dev, struct spi_controller, dev); 2358 return ctlr->bus_num == *bus_num; 2359 } 2360 2361 /** 2362 * spi_busnum_to_master - look up master associated with bus_num 2363 * @bus_num: the master's bus number 2364 * Context: can sleep 2365 * 2366 * This call may be used with devices that are registered after 2367 * arch init time. It returns a refcounted pointer to the relevant 2368 * spi_controller (which the caller must release), or NULL if there is 2369 * no such master registered. 2370 * 2371 * Return: the SPI master structure on success, else NULL. 2372 */ 2373 struct spi_controller *spi_busnum_to_master(u16 bus_num) 2374 { 2375 struct device *dev; 2376 struct spi_controller *ctlr = NULL; 2377 2378 dev = class_find_device(&spi_master_class, NULL, &bus_num, 2379 __spi_controller_match); 2380 if (dev) 2381 ctlr = container_of(dev, struct spi_controller, dev); 2382 /* reference got in class_find_device */ 2383 return ctlr; 2384 } 2385 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2386 2387 /*-------------------------------------------------------------------------*/ 2388 2389 /* Core methods for SPI resource management */ 2390 2391 /** 2392 * spi_res_alloc - allocate a spi resource that is life-cycle managed 2393 * during the processing of a spi_message while using 2394 * spi_transfer_one 2395 * @spi: the spi device for which we allocate memory 2396 * @release: the release code to execute for this resource 2397 * @size: size to alloc and return 2398 * @gfp: GFP allocation flags 2399 * 2400 * Return: the pointer to the allocated data 2401 * 2402 * This may get enhanced in the future to allocate from a memory pool 2403 * of the @spi_device or @spi_controller to avoid repeated allocations. 2404 */ 2405 void *spi_res_alloc(struct spi_device *spi, 2406 spi_res_release_t release, 2407 size_t size, gfp_t gfp) 2408 { 2409 struct spi_res *sres; 2410 2411 sres = kzalloc(sizeof(*sres) + size, gfp); 2412 if (!sres) 2413 return NULL; 2414 2415 INIT_LIST_HEAD(&sres->entry); 2416 sres->release = release; 2417 2418 return sres->data; 2419 } 2420 EXPORT_SYMBOL_GPL(spi_res_alloc); 2421 2422 /** 2423 * spi_res_free - free an spi resource 2424 * @res: pointer to the custom data of a resource 2425 * 2426 */ 2427 void spi_res_free(void *res) 2428 { 2429 struct spi_res *sres = container_of(res, struct spi_res, data); 2430 2431 if (!res) 2432 return; 2433 2434 WARN_ON(!list_empty(&sres->entry)); 2435 kfree(sres); 2436 } 2437 EXPORT_SYMBOL_GPL(spi_res_free); 2438 2439 /** 2440 * spi_res_add - add a spi_res to the spi_message 2441 * @message: the spi message 2442 * @res: the spi_resource 2443 */ 2444 void spi_res_add(struct spi_message *message, void *res) 2445 { 2446 struct spi_res *sres = container_of(res, struct spi_res, data); 2447 2448 WARN_ON(!list_empty(&sres->entry)); 2449 list_add_tail(&sres->entry, &message->resources); 2450 } 2451 EXPORT_SYMBOL_GPL(spi_res_add); 2452 2453 /** 2454 * spi_res_release - release all spi resources for this message 2455 * @ctlr: the @spi_controller 2456 * @message: the @spi_message 2457 */ 2458 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 2459 { 2460 struct spi_res *res; 2461 2462 while (!list_empty(&message->resources)) { 2463 res = list_last_entry(&message->resources, 2464 struct spi_res, entry); 2465 2466 if (res->release) 2467 res->release(ctlr, message, res->data); 2468 2469 list_del(&res->entry); 2470 2471 kfree(res); 2472 } 2473 } 2474 EXPORT_SYMBOL_GPL(spi_res_release); 2475 2476 /*-------------------------------------------------------------------------*/ 2477 2478 /* Core methods for spi_message alterations */ 2479 2480 static void __spi_replace_transfers_release(struct spi_controller *ctlr, 2481 struct spi_message *msg, 2482 void *res) 2483 { 2484 struct spi_replaced_transfers *rxfer = res; 2485 size_t i; 2486 2487 /* call extra callback if requested */ 2488 if (rxfer->release) 2489 rxfer->release(ctlr, msg, res); 2490 2491 /* insert replaced transfers back into the message */ 2492 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2493 2494 /* remove the formerly inserted entries */ 2495 for (i = 0; i < rxfer->inserted; i++) 2496 list_del(&rxfer->inserted_transfers[i].transfer_list); 2497 } 2498 2499 /** 2500 * spi_replace_transfers - replace transfers with several transfers 2501 * and register change with spi_message.resources 2502 * @msg: the spi_message we work upon 2503 * @xfer_first: the first spi_transfer we want to replace 2504 * @remove: number of transfers to remove 2505 * @insert: the number of transfers we want to insert instead 2506 * @release: extra release code necessary in some circumstances 2507 * @extradatasize: extra data to allocate (with alignment guarantees 2508 * of struct @spi_transfer) 2509 * @gfp: gfp flags 2510 * 2511 * Returns: pointer to @spi_replaced_transfers, 2512 * PTR_ERR(...) in case of errors. 2513 */ 2514 struct spi_replaced_transfers *spi_replace_transfers( 2515 struct spi_message *msg, 2516 struct spi_transfer *xfer_first, 2517 size_t remove, 2518 size_t insert, 2519 spi_replaced_release_t release, 2520 size_t extradatasize, 2521 gfp_t gfp) 2522 { 2523 struct spi_replaced_transfers *rxfer; 2524 struct spi_transfer *xfer; 2525 size_t i; 2526 2527 /* allocate the structure using spi_res */ 2528 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2529 insert * sizeof(struct spi_transfer) 2530 + sizeof(struct spi_replaced_transfers) 2531 + extradatasize, 2532 gfp); 2533 if (!rxfer) 2534 return ERR_PTR(-ENOMEM); 2535 2536 /* the release code to invoke before running the generic release */ 2537 rxfer->release = release; 2538 2539 /* assign extradata */ 2540 if (extradatasize) 2541 rxfer->extradata = 2542 &rxfer->inserted_transfers[insert]; 2543 2544 /* init the replaced_transfers list */ 2545 INIT_LIST_HEAD(&rxfer->replaced_transfers); 2546 2547 /* assign the list_entry after which we should reinsert 2548 * the @replaced_transfers - it may be spi_message.messages! 2549 */ 2550 rxfer->replaced_after = xfer_first->transfer_list.prev; 2551 2552 /* remove the requested number of transfers */ 2553 for (i = 0; i < remove; i++) { 2554 /* if the entry after replaced_after it is msg->transfers 2555 * then we have been requested to remove more transfers 2556 * than are in the list 2557 */ 2558 if (rxfer->replaced_after->next == &msg->transfers) { 2559 dev_err(&msg->spi->dev, 2560 "requested to remove more spi_transfers than are available\n"); 2561 /* insert replaced transfers back into the message */ 2562 list_splice(&rxfer->replaced_transfers, 2563 rxfer->replaced_after); 2564 2565 /* free the spi_replace_transfer structure */ 2566 spi_res_free(rxfer); 2567 2568 /* and return with an error */ 2569 return ERR_PTR(-EINVAL); 2570 } 2571 2572 /* remove the entry after replaced_after from list of 2573 * transfers and add it to list of replaced_transfers 2574 */ 2575 list_move_tail(rxfer->replaced_after->next, 2576 &rxfer->replaced_transfers); 2577 } 2578 2579 /* create copy of the given xfer with identical settings 2580 * based on the first transfer to get removed 2581 */ 2582 for (i = 0; i < insert; i++) { 2583 /* we need to run in reverse order */ 2584 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 2585 2586 /* copy all spi_transfer data */ 2587 memcpy(xfer, xfer_first, sizeof(*xfer)); 2588 2589 /* add to list */ 2590 list_add(&xfer->transfer_list, rxfer->replaced_after); 2591 2592 /* clear cs_change and delay_usecs for all but the last */ 2593 if (i) { 2594 xfer->cs_change = false; 2595 xfer->delay_usecs = 0; 2596 } 2597 } 2598 2599 /* set up inserted */ 2600 rxfer->inserted = insert; 2601 2602 /* and register it with spi_res/spi_message */ 2603 spi_res_add(msg, rxfer); 2604 2605 return rxfer; 2606 } 2607 EXPORT_SYMBOL_GPL(spi_replace_transfers); 2608 2609 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, 2610 struct spi_message *msg, 2611 struct spi_transfer **xferp, 2612 size_t maxsize, 2613 gfp_t gfp) 2614 { 2615 struct spi_transfer *xfer = *xferp, *xfers; 2616 struct spi_replaced_transfers *srt; 2617 size_t offset; 2618 size_t count, i; 2619 2620 /* warn once about this fact that we are splitting a transfer */ 2621 dev_warn_once(&msg->spi->dev, 2622 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n", 2623 xfer->len, maxsize); 2624 2625 /* calculate how many we have to replace */ 2626 count = DIV_ROUND_UP(xfer->len, maxsize); 2627 2628 /* create replacement */ 2629 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 2630 if (IS_ERR(srt)) 2631 return PTR_ERR(srt); 2632 xfers = srt->inserted_transfers; 2633 2634 /* now handle each of those newly inserted spi_transfers 2635 * note that the replacements spi_transfers all are preset 2636 * to the same values as *xferp, so tx_buf, rx_buf and len 2637 * are all identical (as well as most others) 2638 * so we just have to fix up len and the pointers. 2639 * 2640 * this also includes support for the depreciated 2641 * spi_message.is_dma_mapped interface 2642 */ 2643 2644 /* the first transfer just needs the length modified, so we 2645 * run it outside the loop 2646 */ 2647 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 2648 2649 /* all the others need rx_buf/tx_buf also set */ 2650 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 2651 /* update rx_buf, tx_buf and dma */ 2652 if (xfers[i].rx_buf) 2653 xfers[i].rx_buf += offset; 2654 if (xfers[i].rx_dma) 2655 xfers[i].rx_dma += offset; 2656 if (xfers[i].tx_buf) 2657 xfers[i].tx_buf += offset; 2658 if (xfers[i].tx_dma) 2659 xfers[i].tx_dma += offset; 2660 2661 /* update length */ 2662 xfers[i].len = min(maxsize, xfers[i].len - offset); 2663 } 2664 2665 /* we set up xferp to the last entry we have inserted, 2666 * so that we skip those already split transfers 2667 */ 2668 *xferp = &xfers[count - 1]; 2669 2670 /* increment statistics counters */ 2671 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 2672 transfers_split_maxsize); 2673 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2674 transfers_split_maxsize); 2675 2676 return 0; 2677 } 2678 2679 /** 2680 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2681 * when an individual transfer exceeds a 2682 * certain size 2683 * @ctlr: the @spi_controller for this transfer 2684 * @msg: the @spi_message to transform 2685 * @maxsize: the maximum when to apply this 2686 * @gfp: GFP allocation flags 2687 * 2688 * Return: status of transformation 2689 */ 2690 int spi_split_transfers_maxsize(struct spi_controller *ctlr, 2691 struct spi_message *msg, 2692 size_t maxsize, 2693 gfp_t gfp) 2694 { 2695 struct spi_transfer *xfer; 2696 int ret; 2697 2698 /* iterate over the transfer_list, 2699 * but note that xfer is advanced to the last transfer inserted 2700 * to avoid checking sizes again unnecessarily (also xfer does 2701 * potentiall belong to a different list by the time the 2702 * replacement has happened 2703 */ 2704 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2705 if (xfer->len > maxsize) { 2706 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, 2707 maxsize, gfp); 2708 if (ret) 2709 return ret; 2710 } 2711 } 2712 2713 return 0; 2714 } 2715 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 2716 2717 /*-------------------------------------------------------------------------*/ 2718 2719 /* Core methods for SPI controller protocol drivers. Some of the 2720 * other core methods are currently defined as inline functions. 2721 */ 2722 2723 static int __spi_validate_bits_per_word(struct spi_controller *ctlr, 2724 u8 bits_per_word) 2725 { 2726 if (ctlr->bits_per_word_mask) { 2727 /* Only 32 bits fit in the mask */ 2728 if (bits_per_word > 32) 2729 return -EINVAL; 2730 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) 2731 return -EINVAL; 2732 } 2733 2734 return 0; 2735 } 2736 2737 /** 2738 * spi_setup - setup SPI mode and clock rate 2739 * @spi: the device whose settings are being modified 2740 * Context: can sleep, and no requests are queued to the device 2741 * 2742 * SPI protocol drivers may need to update the transfer mode if the 2743 * device doesn't work with its default. They may likewise need 2744 * to update clock rates or word sizes from initial values. This function 2745 * changes those settings, and must be called from a context that can sleep. 2746 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 2747 * effect the next time the device is selected and data is transferred to 2748 * or from it. When this function returns, the spi device is deselected. 2749 * 2750 * Note that this call will fail if the protocol driver specifies an option 2751 * that the underlying controller or its driver does not support. For 2752 * example, not all hardware supports wire transfers using nine bit words, 2753 * LSB-first wire encoding, or active-high chipselects. 2754 * 2755 * Return: zero on success, else a negative error code. 2756 */ 2757 int spi_setup(struct spi_device *spi) 2758 { 2759 unsigned bad_bits, ugly_bits; 2760 int status; 2761 2762 /* check mode to prevent that DUAL and QUAD set at the same time 2763 */ 2764 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2765 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2766 dev_err(&spi->dev, 2767 "setup: can not select dual and quad at the same time\n"); 2768 return -EINVAL; 2769 } 2770 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2771 */ 2772 if ((spi->mode & SPI_3WIRE) && (spi->mode & 2773 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2774 return -EINVAL; 2775 /* help drivers fail *cleanly* when they need options 2776 * that aren't supported with their current controller 2777 */ 2778 bad_bits = spi->mode & ~spi->controller->mode_bits; 2779 ugly_bits = bad_bits & 2780 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 2781 if (ugly_bits) { 2782 dev_warn(&spi->dev, 2783 "setup: ignoring unsupported mode bits %x\n", 2784 ugly_bits); 2785 spi->mode &= ~ugly_bits; 2786 bad_bits &= ~ugly_bits; 2787 } 2788 if (bad_bits) { 2789 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2790 bad_bits); 2791 return -EINVAL; 2792 } 2793 2794 if (!spi->bits_per_word) 2795 spi->bits_per_word = 8; 2796 2797 status = __spi_validate_bits_per_word(spi->controller, 2798 spi->bits_per_word); 2799 if (status) 2800 return status; 2801 2802 if (!spi->max_speed_hz) 2803 spi->max_speed_hz = spi->controller->max_speed_hz; 2804 2805 if (spi->controller->setup) 2806 status = spi->controller->setup(spi); 2807 2808 spi_set_cs(spi, false); 2809 2810 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 2811 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 2812 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 2813 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 2814 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 2815 (spi->mode & SPI_LOOP) ? "loopback, " : "", 2816 spi->bits_per_word, spi->max_speed_hz, 2817 status); 2818 2819 return status; 2820 } 2821 EXPORT_SYMBOL_GPL(spi_setup); 2822 2823 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2824 { 2825 struct spi_controller *ctlr = spi->controller; 2826 struct spi_transfer *xfer; 2827 int w_size; 2828 2829 if (list_empty(&message->transfers)) 2830 return -EINVAL; 2831 2832 /* Half-duplex links include original MicroWire, and ones with 2833 * only one data pin like SPI_3WIRE (switches direction) or where 2834 * either MOSI or MISO is missing. They can also be caused by 2835 * software limitations. 2836 */ 2837 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || 2838 (spi->mode & SPI_3WIRE)) { 2839 unsigned flags = ctlr->flags; 2840 2841 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2842 if (xfer->rx_buf && xfer->tx_buf) 2843 return -EINVAL; 2844 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) 2845 return -EINVAL; 2846 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) 2847 return -EINVAL; 2848 } 2849 } 2850 2851 /** 2852 * Set transfer bits_per_word and max speed as spi device default if 2853 * it is not set for this transfer. 2854 * Set transfer tx_nbits and rx_nbits as single transfer default 2855 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2856 */ 2857 message->frame_length = 0; 2858 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2859 message->frame_length += xfer->len; 2860 if (!xfer->bits_per_word) 2861 xfer->bits_per_word = spi->bits_per_word; 2862 2863 if (!xfer->speed_hz) 2864 xfer->speed_hz = spi->max_speed_hz; 2865 if (!xfer->speed_hz) 2866 xfer->speed_hz = ctlr->max_speed_hz; 2867 2868 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) 2869 xfer->speed_hz = ctlr->max_speed_hz; 2870 2871 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) 2872 return -EINVAL; 2873 2874 /* 2875 * SPI transfer length should be multiple of SPI word size 2876 * where SPI word size should be power-of-two multiple 2877 */ 2878 if (xfer->bits_per_word <= 8) 2879 w_size = 1; 2880 else if (xfer->bits_per_word <= 16) 2881 w_size = 2; 2882 else 2883 w_size = 4; 2884 2885 /* No partial transfers accepted */ 2886 if (xfer->len % w_size) 2887 return -EINVAL; 2888 2889 if (xfer->speed_hz && ctlr->min_speed_hz && 2890 xfer->speed_hz < ctlr->min_speed_hz) 2891 return -EINVAL; 2892 2893 if (xfer->tx_buf && !xfer->tx_nbits) 2894 xfer->tx_nbits = SPI_NBITS_SINGLE; 2895 if (xfer->rx_buf && !xfer->rx_nbits) 2896 xfer->rx_nbits = SPI_NBITS_SINGLE; 2897 /* check transfer tx/rx_nbits: 2898 * 1. check the value matches one of single, dual and quad 2899 * 2. check tx/rx_nbits match the mode in spi_device 2900 */ 2901 if (xfer->tx_buf) { 2902 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2903 xfer->tx_nbits != SPI_NBITS_DUAL && 2904 xfer->tx_nbits != SPI_NBITS_QUAD) 2905 return -EINVAL; 2906 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2907 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2908 return -EINVAL; 2909 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2910 !(spi->mode & SPI_TX_QUAD)) 2911 return -EINVAL; 2912 } 2913 /* check transfer rx_nbits */ 2914 if (xfer->rx_buf) { 2915 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2916 xfer->rx_nbits != SPI_NBITS_DUAL && 2917 xfer->rx_nbits != SPI_NBITS_QUAD) 2918 return -EINVAL; 2919 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2920 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2921 return -EINVAL; 2922 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2923 !(spi->mode & SPI_RX_QUAD)) 2924 return -EINVAL; 2925 } 2926 } 2927 2928 message->status = -EINPROGRESS; 2929 2930 return 0; 2931 } 2932 2933 static int __spi_async(struct spi_device *spi, struct spi_message *message) 2934 { 2935 struct spi_controller *ctlr = spi->controller; 2936 2937 /* 2938 * Some controllers do not support doing regular SPI transfers. Return 2939 * ENOTSUPP when this is the case. 2940 */ 2941 if (!ctlr->transfer) 2942 return -ENOTSUPP; 2943 2944 message->spi = spi; 2945 2946 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async); 2947 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2948 2949 trace_spi_message_submit(message); 2950 2951 return ctlr->transfer(spi, message); 2952 } 2953 2954 /** 2955 * spi_async - asynchronous SPI transfer 2956 * @spi: device with which data will be exchanged 2957 * @message: describes the data transfers, including completion callback 2958 * Context: any (irqs may be blocked, etc) 2959 * 2960 * This call may be used in_irq and other contexts which can't sleep, 2961 * as well as from task contexts which can sleep. 2962 * 2963 * The completion callback is invoked in a context which can't sleep. 2964 * Before that invocation, the value of message->status is undefined. 2965 * When the callback is issued, message->status holds either zero (to 2966 * indicate complete success) or a negative error code. After that 2967 * callback returns, the driver which issued the transfer request may 2968 * deallocate the associated memory; it's no longer in use by any SPI 2969 * core or controller driver code. 2970 * 2971 * Note that although all messages to a spi_device are handled in 2972 * FIFO order, messages may go to different devices in other orders. 2973 * Some device might be higher priority, or have various "hard" access 2974 * time requirements, for example. 2975 * 2976 * On detection of any fault during the transfer, processing of 2977 * the entire message is aborted, and the device is deselected. 2978 * Until returning from the associated message completion callback, 2979 * no other spi_message queued to that device will be processed. 2980 * (This rule applies equally to all the synchronous transfer calls, 2981 * which are wrappers around this core asynchronous primitive.) 2982 * 2983 * Return: zero on success, else a negative error code. 2984 */ 2985 int spi_async(struct spi_device *spi, struct spi_message *message) 2986 { 2987 struct spi_controller *ctlr = spi->controller; 2988 int ret; 2989 unsigned long flags; 2990 2991 ret = __spi_validate(spi, message); 2992 if (ret != 0) 2993 return ret; 2994 2995 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 2996 2997 if (ctlr->bus_lock_flag) 2998 ret = -EBUSY; 2999 else 3000 ret = __spi_async(spi, message); 3001 3002 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3003 3004 return ret; 3005 } 3006 EXPORT_SYMBOL_GPL(spi_async); 3007 3008 /** 3009 * spi_async_locked - version of spi_async with exclusive bus usage 3010 * @spi: device with which data will be exchanged 3011 * @message: describes the data transfers, including completion callback 3012 * Context: any (irqs may be blocked, etc) 3013 * 3014 * This call may be used in_irq and other contexts which can't sleep, 3015 * as well as from task contexts which can sleep. 3016 * 3017 * The completion callback is invoked in a context which can't sleep. 3018 * Before that invocation, the value of message->status is undefined. 3019 * When the callback is issued, message->status holds either zero (to 3020 * indicate complete success) or a negative error code. After that 3021 * callback returns, the driver which issued the transfer request may 3022 * deallocate the associated memory; it's no longer in use by any SPI 3023 * core or controller driver code. 3024 * 3025 * Note that although all messages to a spi_device are handled in 3026 * FIFO order, messages may go to different devices in other orders. 3027 * Some device might be higher priority, or have various "hard" access 3028 * time requirements, for example. 3029 * 3030 * On detection of any fault during the transfer, processing of 3031 * the entire message is aborted, and the device is deselected. 3032 * Until returning from the associated message completion callback, 3033 * no other spi_message queued to that device will be processed. 3034 * (This rule applies equally to all the synchronous transfer calls, 3035 * which are wrappers around this core asynchronous primitive.) 3036 * 3037 * Return: zero on success, else a negative error code. 3038 */ 3039 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 3040 { 3041 struct spi_controller *ctlr = spi->controller; 3042 int ret; 3043 unsigned long flags; 3044 3045 ret = __spi_validate(spi, message); 3046 if (ret != 0) 3047 return ret; 3048 3049 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3050 3051 ret = __spi_async(spi, message); 3052 3053 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3054 3055 return ret; 3056 3057 } 3058 EXPORT_SYMBOL_GPL(spi_async_locked); 3059 3060 /*-------------------------------------------------------------------------*/ 3061 3062 /* Utility methods for SPI protocol drivers, layered on 3063 * top of the core. Some other utility methods are defined as 3064 * inline functions. 3065 */ 3066 3067 static void spi_complete(void *arg) 3068 { 3069 complete(arg); 3070 } 3071 3072 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 3073 { 3074 DECLARE_COMPLETION_ONSTACK(done); 3075 int status; 3076 struct spi_controller *ctlr = spi->controller; 3077 unsigned long flags; 3078 3079 status = __spi_validate(spi, message); 3080 if (status != 0) 3081 return status; 3082 3083 message->complete = spi_complete; 3084 message->context = &done; 3085 message->spi = spi; 3086 3087 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync); 3088 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 3089 3090 /* If we're not using the legacy transfer method then we will 3091 * try to transfer in the calling context so special case. 3092 * This code would be less tricky if we could remove the 3093 * support for driver implemented message queues. 3094 */ 3095 if (ctlr->transfer == spi_queued_transfer) { 3096 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3097 3098 trace_spi_message_submit(message); 3099 3100 status = __spi_queued_transfer(spi, message, false); 3101 3102 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3103 } else { 3104 status = spi_async_locked(spi, message); 3105 } 3106 3107 if (status == 0) { 3108 /* Push out the messages in the calling context if we 3109 * can. 3110 */ 3111 if (ctlr->transfer == spi_queued_transfer) { 3112 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, 3113 spi_sync_immediate); 3114 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 3115 spi_sync_immediate); 3116 __spi_pump_messages(ctlr, false); 3117 } 3118 3119 wait_for_completion(&done); 3120 status = message->status; 3121 } 3122 message->context = NULL; 3123 return status; 3124 } 3125 3126 /** 3127 * spi_sync - blocking/synchronous SPI data transfers 3128 * @spi: device with which data will be exchanged 3129 * @message: describes the data transfers 3130 * Context: can sleep 3131 * 3132 * This call may only be used from a context that may sleep. The sleep 3133 * is non-interruptible, and has no timeout. Low-overhead controller 3134 * drivers may DMA directly into and out of the message buffers. 3135 * 3136 * Note that the SPI device's chip select is active during the message, 3137 * and then is normally disabled between messages. Drivers for some 3138 * frequently-used devices may want to minimize costs of selecting a chip, 3139 * by leaving it selected in anticipation that the next message will go 3140 * to the same chip. (That may increase power usage.) 3141 * 3142 * Also, the caller is guaranteeing that the memory associated with the 3143 * message will not be freed before this call returns. 3144 * 3145 * Return: zero on success, else a negative error code. 3146 */ 3147 int spi_sync(struct spi_device *spi, struct spi_message *message) 3148 { 3149 int ret; 3150 3151 mutex_lock(&spi->controller->bus_lock_mutex); 3152 ret = __spi_sync(spi, message); 3153 mutex_unlock(&spi->controller->bus_lock_mutex); 3154 3155 return ret; 3156 } 3157 EXPORT_SYMBOL_GPL(spi_sync); 3158 3159 /** 3160 * spi_sync_locked - version of spi_sync with exclusive bus usage 3161 * @spi: device with which data will be exchanged 3162 * @message: describes the data transfers 3163 * Context: can sleep 3164 * 3165 * This call may only be used from a context that may sleep. The sleep 3166 * is non-interruptible, and has no timeout. Low-overhead controller 3167 * drivers may DMA directly into and out of the message buffers. 3168 * 3169 * This call should be used by drivers that require exclusive access to the 3170 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 3171 * be released by a spi_bus_unlock call when the exclusive access is over. 3172 * 3173 * Return: zero on success, else a negative error code. 3174 */ 3175 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 3176 { 3177 return __spi_sync(spi, message); 3178 } 3179 EXPORT_SYMBOL_GPL(spi_sync_locked); 3180 3181 /** 3182 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 3183 * @ctlr: SPI bus master that should be locked for exclusive bus access 3184 * Context: can sleep 3185 * 3186 * This call may only be used from a context that may sleep. The sleep 3187 * is non-interruptible, and has no timeout. 3188 * 3189 * This call should be used by drivers that require exclusive access to the 3190 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 3191 * exclusive access is over. Data transfer must be done by spi_sync_locked 3192 * and spi_async_locked calls when the SPI bus lock is held. 3193 * 3194 * Return: always zero. 3195 */ 3196 int spi_bus_lock(struct spi_controller *ctlr) 3197 { 3198 unsigned long flags; 3199 3200 mutex_lock(&ctlr->bus_lock_mutex); 3201 3202 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); 3203 ctlr->bus_lock_flag = 1; 3204 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); 3205 3206 /* mutex remains locked until spi_bus_unlock is called */ 3207 3208 return 0; 3209 } 3210 EXPORT_SYMBOL_GPL(spi_bus_lock); 3211 3212 /** 3213 * spi_bus_unlock - release the lock for exclusive SPI bus usage 3214 * @ctlr: SPI bus master that was locked for exclusive bus access 3215 * Context: can sleep 3216 * 3217 * This call may only be used from a context that may sleep. The sleep 3218 * is non-interruptible, and has no timeout. 3219 * 3220 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 3221 * call. 3222 * 3223 * Return: always zero. 3224 */ 3225 int spi_bus_unlock(struct spi_controller *ctlr) 3226 { 3227 ctlr->bus_lock_flag = 0; 3228 3229 mutex_unlock(&ctlr->bus_lock_mutex); 3230 3231 return 0; 3232 } 3233 EXPORT_SYMBOL_GPL(spi_bus_unlock); 3234 3235 /* portable code must never pass more than 32 bytes */ 3236 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 3237 3238 static u8 *buf; 3239 3240 /** 3241 * spi_write_then_read - SPI synchronous write followed by read 3242 * @spi: device with which data will be exchanged 3243 * @txbuf: data to be written (need not be dma-safe) 3244 * @n_tx: size of txbuf, in bytes 3245 * @rxbuf: buffer into which data will be read (need not be dma-safe) 3246 * @n_rx: size of rxbuf, in bytes 3247 * Context: can sleep 3248 * 3249 * This performs a half duplex MicroWire style transaction with the 3250 * device, sending txbuf and then reading rxbuf. The return value 3251 * is zero for success, else a negative errno status code. 3252 * This call may only be used from a context that may sleep. 3253 * 3254 * Parameters to this routine are always copied using a small buffer; 3255 * portable code should never use this for more than 32 bytes. 3256 * Performance-sensitive or bulk transfer code should instead use 3257 * spi_{async,sync}() calls with dma-safe buffers. 3258 * 3259 * Return: zero on success, else a negative error code. 3260 */ 3261 int spi_write_then_read(struct spi_device *spi, 3262 const void *txbuf, unsigned n_tx, 3263 void *rxbuf, unsigned n_rx) 3264 { 3265 static DEFINE_MUTEX(lock); 3266 3267 int status; 3268 struct spi_message message; 3269 struct spi_transfer x[2]; 3270 u8 *local_buf; 3271 3272 /* Use preallocated DMA-safe buffer if we can. We can't avoid 3273 * copying here, (as a pure convenience thing), but we can 3274 * keep heap costs out of the hot path unless someone else is 3275 * using the pre-allocated buffer or the transfer is too large. 3276 */ 3277 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 3278 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 3279 GFP_KERNEL | GFP_DMA); 3280 if (!local_buf) 3281 return -ENOMEM; 3282 } else { 3283 local_buf = buf; 3284 } 3285 3286 spi_message_init(&message); 3287 memset(x, 0, sizeof(x)); 3288 if (n_tx) { 3289 x[0].len = n_tx; 3290 spi_message_add_tail(&x[0], &message); 3291 } 3292 if (n_rx) { 3293 x[1].len = n_rx; 3294 spi_message_add_tail(&x[1], &message); 3295 } 3296 3297 memcpy(local_buf, txbuf, n_tx); 3298 x[0].tx_buf = local_buf; 3299 x[1].rx_buf = local_buf + n_tx; 3300 3301 /* do the i/o */ 3302 status = spi_sync(spi, &message); 3303 if (status == 0) 3304 memcpy(rxbuf, x[1].rx_buf, n_rx); 3305 3306 if (x[0].tx_buf == buf) 3307 mutex_unlock(&lock); 3308 else 3309 kfree(local_buf); 3310 3311 return status; 3312 } 3313 EXPORT_SYMBOL_GPL(spi_write_then_read); 3314 3315 /*-------------------------------------------------------------------------*/ 3316 3317 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 3318 static int __spi_of_device_match(struct device *dev, void *data) 3319 { 3320 return dev->of_node == data; 3321 } 3322 3323 /* must call put_device() when done with returned spi_device device */ 3324 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 3325 { 3326 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 3327 __spi_of_device_match); 3328 return dev ? to_spi_device(dev) : NULL; 3329 } 3330 3331 static int __spi_of_controller_match(struct device *dev, const void *data) 3332 { 3333 return dev->of_node == data; 3334 } 3335 3336 /* the spi controllers are not using spi_bus, so we find it with another way */ 3337 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) 3338 { 3339 struct device *dev; 3340 3341 dev = class_find_device(&spi_master_class, NULL, node, 3342 __spi_of_controller_match); 3343 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 3344 dev = class_find_device(&spi_slave_class, NULL, node, 3345 __spi_of_controller_match); 3346 if (!dev) 3347 return NULL; 3348 3349 /* reference got in class_find_device */ 3350 return container_of(dev, struct spi_controller, dev); 3351 } 3352 3353 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3354 void *arg) 3355 { 3356 struct of_reconfig_data *rd = arg; 3357 struct spi_controller *ctlr; 3358 struct spi_device *spi; 3359 3360 switch (of_reconfig_get_state_change(action, arg)) { 3361 case OF_RECONFIG_CHANGE_ADD: 3362 ctlr = of_find_spi_controller_by_node(rd->dn->parent); 3363 if (ctlr == NULL) 3364 return NOTIFY_OK; /* not for us */ 3365 3366 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3367 put_device(&ctlr->dev); 3368 return NOTIFY_OK; 3369 } 3370 3371 spi = of_register_spi_device(ctlr, rd->dn); 3372 put_device(&ctlr->dev); 3373 3374 if (IS_ERR(spi)) { 3375 pr_err("%s: failed to create for '%pOF'\n", 3376 __func__, rd->dn); 3377 of_node_clear_flag(rd->dn, OF_POPULATED); 3378 return notifier_from_errno(PTR_ERR(spi)); 3379 } 3380 break; 3381 3382 case OF_RECONFIG_CHANGE_REMOVE: 3383 /* already depopulated? */ 3384 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 3385 return NOTIFY_OK; 3386 3387 /* find our device by node */ 3388 spi = of_find_spi_device_by_node(rd->dn); 3389 if (spi == NULL) 3390 return NOTIFY_OK; /* no? not meant for us */ 3391 3392 /* unregister takes one ref away */ 3393 spi_unregister_device(spi); 3394 3395 /* and put the reference of the find */ 3396 put_device(&spi->dev); 3397 break; 3398 } 3399 3400 return NOTIFY_OK; 3401 } 3402 3403 static struct notifier_block spi_of_notifier = { 3404 .notifier_call = of_spi_notify, 3405 }; 3406 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3407 extern struct notifier_block spi_of_notifier; 3408 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3409 3410 #if IS_ENABLED(CONFIG_ACPI) 3411 static int spi_acpi_controller_match(struct device *dev, const void *data) 3412 { 3413 return ACPI_COMPANION(dev->parent) == data; 3414 } 3415 3416 static int spi_acpi_device_match(struct device *dev, void *data) 3417 { 3418 return ACPI_COMPANION(dev) == data; 3419 } 3420 3421 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) 3422 { 3423 struct device *dev; 3424 3425 dev = class_find_device(&spi_master_class, NULL, adev, 3426 spi_acpi_controller_match); 3427 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) 3428 dev = class_find_device(&spi_slave_class, NULL, adev, 3429 spi_acpi_controller_match); 3430 if (!dev) 3431 return NULL; 3432 3433 return container_of(dev, struct spi_controller, dev); 3434 } 3435 3436 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 3437 { 3438 struct device *dev; 3439 3440 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); 3441 3442 return dev ? to_spi_device(dev) : NULL; 3443 } 3444 3445 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 3446 void *arg) 3447 { 3448 struct acpi_device *adev = arg; 3449 struct spi_controller *ctlr; 3450 struct spi_device *spi; 3451 3452 switch (value) { 3453 case ACPI_RECONFIG_DEVICE_ADD: 3454 ctlr = acpi_spi_find_controller_by_adev(adev->parent); 3455 if (!ctlr) 3456 break; 3457 3458 acpi_register_spi_device(ctlr, adev); 3459 put_device(&ctlr->dev); 3460 break; 3461 case ACPI_RECONFIG_DEVICE_REMOVE: 3462 if (!acpi_device_enumerated(adev)) 3463 break; 3464 3465 spi = acpi_spi_find_device_by_adev(adev); 3466 if (!spi) 3467 break; 3468 3469 spi_unregister_device(spi); 3470 put_device(&spi->dev); 3471 break; 3472 } 3473 3474 return NOTIFY_OK; 3475 } 3476 3477 static struct notifier_block spi_acpi_notifier = { 3478 .notifier_call = acpi_spi_notify, 3479 }; 3480 #else 3481 extern struct notifier_block spi_acpi_notifier; 3482 #endif 3483 3484 static int __init spi_init(void) 3485 { 3486 int status; 3487 3488 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 3489 if (!buf) { 3490 status = -ENOMEM; 3491 goto err0; 3492 } 3493 3494 status = bus_register(&spi_bus_type); 3495 if (status < 0) 3496 goto err1; 3497 3498 status = class_register(&spi_master_class); 3499 if (status < 0) 3500 goto err2; 3501 3502 if (IS_ENABLED(CONFIG_SPI_SLAVE)) { 3503 status = class_register(&spi_slave_class); 3504 if (status < 0) 3505 goto err3; 3506 } 3507 3508 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3509 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 3510 if (IS_ENABLED(CONFIG_ACPI)) 3511 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 3512 3513 return 0; 3514 3515 err3: 3516 class_unregister(&spi_master_class); 3517 err2: 3518 bus_unregister(&spi_bus_type); 3519 err1: 3520 kfree(buf); 3521 buf = NULL; 3522 err0: 3523 return status; 3524 } 3525 3526 /* board_info is normally registered in arch_initcall(), 3527 * but even essential drivers wait till later 3528 * 3529 * REVISIT only boardinfo really needs static linking. the rest (device and 3530 * driver registration) _could_ be dynamically linked (modular) ... costs 3531 * include needing to have boardinfo data structures be much more public. 3532 */ 3533 postcore_initcall(spi_init); 3534 3535