1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/kmod.h> 24 #include <linux/device.h> 25 #include <linux/init.h> 26 #include <linux/cache.h> 27 #include <linux/dma-mapping.h> 28 #include <linux/dmaengine.h> 29 #include <linux/mutex.h> 30 #include <linux/of_device.h> 31 #include <linux/of_irq.h> 32 #include <linux/clk/clk-conf.h> 33 #include <linux/slab.h> 34 #include <linux/mod_devicetable.h> 35 #include <linux/spi/spi.h> 36 #include <linux/of_gpio.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/export.h> 39 #include <linux/sched/rt.h> 40 #include <linux/delay.h> 41 #include <linux/kthread.h> 42 #include <linux/ioport.h> 43 #include <linux/acpi.h> 44 45 #define CREATE_TRACE_POINTS 46 #include <trace/events/spi.h> 47 48 static void spidev_release(struct device *dev) 49 { 50 struct spi_device *spi = to_spi_device(dev); 51 52 /* spi masters may cleanup for released devices */ 53 if (spi->master->cleanup) 54 spi->master->cleanup(spi); 55 56 spi_master_put(spi->master); 57 kfree(spi); 58 } 59 60 static ssize_t 61 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 62 { 63 const struct spi_device *spi = to_spi_device(dev); 64 int len; 65 66 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 67 if (len != -ENODEV) 68 return len; 69 70 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 71 } 72 static DEVICE_ATTR_RO(modalias); 73 74 static struct attribute *spi_dev_attrs[] = { 75 &dev_attr_modalias.attr, 76 NULL, 77 }; 78 ATTRIBUTE_GROUPS(spi_dev); 79 80 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 81 * and the sysfs version makes coldplug work too. 82 */ 83 84 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 85 const struct spi_device *sdev) 86 { 87 while (id->name[0]) { 88 if (!strcmp(sdev->modalias, id->name)) 89 return id; 90 id++; 91 } 92 return NULL; 93 } 94 95 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 96 { 97 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 98 99 return spi_match_id(sdrv->id_table, sdev); 100 } 101 EXPORT_SYMBOL_GPL(spi_get_device_id); 102 103 static int spi_match_device(struct device *dev, struct device_driver *drv) 104 { 105 const struct spi_device *spi = to_spi_device(dev); 106 const struct spi_driver *sdrv = to_spi_driver(drv); 107 108 /* Attempt an OF style match */ 109 if (of_driver_match_device(dev, drv)) 110 return 1; 111 112 /* Then try ACPI */ 113 if (acpi_driver_match_device(dev, drv)) 114 return 1; 115 116 if (sdrv->id_table) 117 return !!spi_match_id(sdrv->id_table, spi); 118 119 return strcmp(spi->modalias, drv->name) == 0; 120 } 121 122 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 123 { 124 const struct spi_device *spi = to_spi_device(dev); 125 int rc; 126 127 rc = acpi_device_uevent_modalias(dev, env); 128 if (rc != -ENODEV) 129 return rc; 130 131 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 132 return 0; 133 } 134 135 #ifdef CONFIG_PM_SLEEP 136 static int spi_legacy_suspend(struct device *dev, pm_message_t message) 137 { 138 int value = 0; 139 struct spi_driver *drv = to_spi_driver(dev->driver); 140 141 /* suspend will stop irqs and dma; no more i/o */ 142 if (drv) { 143 if (drv->suspend) 144 value = drv->suspend(to_spi_device(dev), message); 145 else 146 dev_dbg(dev, "... can't suspend\n"); 147 } 148 return value; 149 } 150 151 static int spi_legacy_resume(struct device *dev) 152 { 153 int value = 0; 154 struct spi_driver *drv = to_spi_driver(dev->driver); 155 156 /* resume may restart the i/o queue */ 157 if (drv) { 158 if (drv->resume) 159 value = drv->resume(to_spi_device(dev)); 160 else 161 dev_dbg(dev, "... can't resume\n"); 162 } 163 return value; 164 } 165 166 static int spi_pm_suspend(struct device *dev) 167 { 168 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 169 170 if (pm) 171 return pm_generic_suspend(dev); 172 else 173 return spi_legacy_suspend(dev, PMSG_SUSPEND); 174 } 175 176 static int spi_pm_resume(struct device *dev) 177 { 178 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 179 180 if (pm) 181 return pm_generic_resume(dev); 182 else 183 return spi_legacy_resume(dev); 184 } 185 186 static int spi_pm_freeze(struct device *dev) 187 { 188 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 189 190 if (pm) 191 return pm_generic_freeze(dev); 192 else 193 return spi_legacy_suspend(dev, PMSG_FREEZE); 194 } 195 196 static int spi_pm_thaw(struct device *dev) 197 { 198 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 199 200 if (pm) 201 return pm_generic_thaw(dev); 202 else 203 return spi_legacy_resume(dev); 204 } 205 206 static int spi_pm_poweroff(struct device *dev) 207 { 208 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 209 210 if (pm) 211 return pm_generic_poweroff(dev); 212 else 213 return spi_legacy_suspend(dev, PMSG_HIBERNATE); 214 } 215 216 static int spi_pm_restore(struct device *dev) 217 { 218 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 219 220 if (pm) 221 return pm_generic_restore(dev); 222 else 223 return spi_legacy_resume(dev); 224 } 225 #else 226 #define spi_pm_suspend NULL 227 #define spi_pm_resume NULL 228 #define spi_pm_freeze NULL 229 #define spi_pm_thaw NULL 230 #define spi_pm_poweroff NULL 231 #define spi_pm_restore NULL 232 #endif 233 234 static const struct dev_pm_ops spi_pm = { 235 .suspend = spi_pm_suspend, 236 .resume = spi_pm_resume, 237 .freeze = spi_pm_freeze, 238 .thaw = spi_pm_thaw, 239 .poweroff = spi_pm_poweroff, 240 .restore = spi_pm_restore, 241 SET_RUNTIME_PM_OPS( 242 pm_generic_runtime_suspend, 243 pm_generic_runtime_resume, 244 NULL 245 ) 246 }; 247 248 struct bus_type spi_bus_type = { 249 .name = "spi", 250 .dev_groups = spi_dev_groups, 251 .match = spi_match_device, 252 .uevent = spi_uevent, 253 .pm = &spi_pm, 254 }; 255 EXPORT_SYMBOL_GPL(spi_bus_type); 256 257 258 static int spi_drv_probe(struct device *dev) 259 { 260 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 261 int ret; 262 263 ret = of_clk_set_defaults(dev->of_node, false); 264 if (ret) 265 return ret; 266 267 acpi_dev_pm_attach(dev, true); 268 ret = sdrv->probe(to_spi_device(dev)); 269 if (ret) 270 acpi_dev_pm_detach(dev, true); 271 272 return ret; 273 } 274 275 static int spi_drv_remove(struct device *dev) 276 { 277 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 278 int ret; 279 280 ret = sdrv->remove(to_spi_device(dev)); 281 acpi_dev_pm_detach(dev, true); 282 283 return ret; 284 } 285 286 static void spi_drv_shutdown(struct device *dev) 287 { 288 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 289 290 sdrv->shutdown(to_spi_device(dev)); 291 } 292 293 /** 294 * spi_register_driver - register a SPI driver 295 * @sdrv: the driver to register 296 * Context: can sleep 297 */ 298 int spi_register_driver(struct spi_driver *sdrv) 299 { 300 sdrv->driver.bus = &spi_bus_type; 301 if (sdrv->probe) 302 sdrv->driver.probe = spi_drv_probe; 303 if (sdrv->remove) 304 sdrv->driver.remove = spi_drv_remove; 305 if (sdrv->shutdown) 306 sdrv->driver.shutdown = spi_drv_shutdown; 307 return driver_register(&sdrv->driver); 308 } 309 EXPORT_SYMBOL_GPL(spi_register_driver); 310 311 /*-------------------------------------------------------------------------*/ 312 313 /* SPI devices should normally not be created by SPI device drivers; that 314 * would make them board-specific. Similarly with SPI master drivers. 315 * Device registration normally goes into like arch/.../mach.../board-YYY.c 316 * with other readonly (flashable) information about mainboard devices. 317 */ 318 319 struct boardinfo { 320 struct list_head list; 321 struct spi_board_info board_info; 322 }; 323 324 static LIST_HEAD(board_list); 325 static LIST_HEAD(spi_master_list); 326 327 /* 328 * Used to protect add/del opertion for board_info list and 329 * spi_master list, and their matching process 330 */ 331 static DEFINE_MUTEX(board_lock); 332 333 /** 334 * spi_alloc_device - Allocate a new SPI device 335 * @master: Controller to which device is connected 336 * Context: can sleep 337 * 338 * Allows a driver to allocate and initialize a spi_device without 339 * registering it immediately. This allows a driver to directly 340 * fill the spi_device with device parameters before calling 341 * spi_add_device() on it. 342 * 343 * Caller is responsible to call spi_add_device() on the returned 344 * spi_device structure to add it to the SPI master. If the caller 345 * needs to discard the spi_device without adding it, then it should 346 * call spi_dev_put() on it. 347 * 348 * Returns a pointer to the new device, or NULL. 349 */ 350 struct spi_device *spi_alloc_device(struct spi_master *master) 351 { 352 struct spi_device *spi; 353 354 if (!spi_master_get(master)) 355 return NULL; 356 357 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 358 if (!spi) { 359 spi_master_put(master); 360 return NULL; 361 } 362 363 spi->master = master; 364 spi->dev.parent = &master->dev; 365 spi->dev.bus = &spi_bus_type; 366 spi->dev.release = spidev_release; 367 spi->cs_gpio = -ENOENT; 368 device_initialize(&spi->dev); 369 return spi; 370 } 371 EXPORT_SYMBOL_GPL(spi_alloc_device); 372 373 static void spi_dev_set_name(struct spi_device *spi) 374 { 375 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 376 377 if (adev) { 378 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 379 return; 380 } 381 382 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 383 spi->chip_select); 384 } 385 386 static int spi_dev_check(struct device *dev, void *data) 387 { 388 struct spi_device *spi = to_spi_device(dev); 389 struct spi_device *new_spi = data; 390 391 if (spi->master == new_spi->master && 392 spi->chip_select == new_spi->chip_select) 393 return -EBUSY; 394 return 0; 395 } 396 397 /** 398 * spi_add_device - Add spi_device allocated with spi_alloc_device 399 * @spi: spi_device to register 400 * 401 * Companion function to spi_alloc_device. Devices allocated with 402 * spi_alloc_device can be added onto the spi bus with this function. 403 * 404 * Returns 0 on success; negative errno on failure 405 */ 406 int spi_add_device(struct spi_device *spi) 407 { 408 static DEFINE_MUTEX(spi_add_lock); 409 struct spi_master *master = spi->master; 410 struct device *dev = master->dev.parent; 411 int status; 412 413 /* Chipselects are numbered 0..max; validate. */ 414 if (spi->chip_select >= master->num_chipselect) { 415 dev_err(dev, "cs%d >= max %d\n", 416 spi->chip_select, 417 master->num_chipselect); 418 return -EINVAL; 419 } 420 421 /* Set the bus ID string */ 422 spi_dev_set_name(spi); 423 424 /* We need to make sure there's no other device with this 425 * chipselect **BEFORE** we call setup(), else we'll trash 426 * its configuration. Lock against concurrent add() calls. 427 */ 428 mutex_lock(&spi_add_lock); 429 430 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 431 if (status) { 432 dev_err(dev, "chipselect %d already in use\n", 433 spi->chip_select); 434 goto done; 435 } 436 437 if (master->cs_gpios) 438 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 439 440 /* Drivers may modify this initial i/o setup, but will 441 * normally rely on the device being setup. Devices 442 * using SPI_CS_HIGH can't coexist well otherwise... 443 */ 444 status = spi_setup(spi); 445 if (status < 0) { 446 dev_err(dev, "can't setup %s, status %d\n", 447 dev_name(&spi->dev), status); 448 goto done; 449 } 450 451 /* Device may be bound to an active driver when this returns */ 452 status = device_add(&spi->dev); 453 if (status < 0) 454 dev_err(dev, "can't add %s, status %d\n", 455 dev_name(&spi->dev), status); 456 else 457 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 458 459 done: 460 mutex_unlock(&spi_add_lock); 461 return status; 462 } 463 EXPORT_SYMBOL_GPL(spi_add_device); 464 465 /** 466 * spi_new_device - instantiate one new SPI device 467 * @master: Controller to which device is connected 468 * @chip: Describes the SPI device 469 * Context: can sleep 470 * 471 * On typical mainboards, this is purely internal; and it's not needed 472 * after board init creates the hard-wired devices. Some development 473 * platforms may not be able to use spi_register_board_info though, and 474 * this is exported so that for example a USB or parport based adapter 475 * driver could add devices (which it would learn about out-of-band). 476 * 477 * Returns the new device, or NULL. 478 */ 479 struct spi_device *spi_new_device(struct spi_master *master, 480 struct spi_board_info *chip) 481 { 482 struct spi_device *proxy; 483 int status; 484 485 /* NOTE: caller did any chip->bus_num checks necessary. 486 * 487 * Also, unless we change the return value convention to use 488 * error-or-pointer (not NULL-or-pointer), troubleshootability 489 * suggests syslogged diagnostics are best here (ugh). 490 */ 491 492 proxy = spi_alloc_device(master); 493 if (!proxy) 494 return NULL; 495 496 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 497 498 proxy->chip_select = chip->chip_select; 499 proxy->max_speed_hz = chip->max_speed_hz; 500 proxy->mode = chip->mode; 501 proxy->irq = chip->irq; 502 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 503 proxy->dev.platform_data = (void *) chip->platform_data; 504 proxy->controller_data = chip->controller_data; 505 proxy->controller_state = NULL; 506 507 status = spi_add_device(proxy); 508 if (status < 0) { 509 spi_dev_put(proxy); 510 return NULL; 511 } 512 513 return proxy; 514 } 515 EXPORT_SYMBOL_GPL(spi_new_device); 516 517 static void spi_match_master_to_boardinfo(struct spi_master *master, 518 struct spi_board_info *bi) 519 { 520 struct spi_device *dev; 521 522 if (master->bus_num != bi->bus_num) 523 return; 524 525 dev = spi_new_device(master, bi); 526 if (!dev) 527 dev_err(master->dev.parent, "can't create new device for %s\n", 528 bi->modalias); 529 } 530 531 /** 532 * spi_register_board_info - register SPI devices for a given board 533 * @info: array of chip descriptors 534 * @n: how many descriptors are provided 535 * Context: can sleep 536 * 537 * Board-specific early init code calls this (probably during arch_initcall) 538 * with segments of the SPI device table. Any device nodes are created later, 539 * after the relevant parent SPI controller (bus_num) is defined. We keep 540 * this table of devices forever, so that reloading a controller driver will 541 * not make Linux forget about these hard-wired devices. 542 * 543 * Other code can also call this, e.g. a particular add-on board might provide 544 * SPI devices through its expansion connector, so code initializing that board 545 * would naturally declare its SPI devices. 546 * 547 * The board info passed can safely be __initdata ... but be careful of 548 * any embedded pointers (platform_data, etc), they're copied as-is. 549 */ 550 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 551 { 552 struct boardinfo *bi; 553 int i; 554 555 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 556 if (!bi) 557 return -ENOMEM; 558 559 for (i = 0; i < n; i++, bi++, info++) { 560 struct spi_master *master; 561 562 memcpy(&bi->board_info, info, sizeof(*info)); 563 mutex_lock(&board_lock); 564 list_add_tail(&bi->list, &board_list); 565 list_for_each_entry(master, &spi_master_list, list) 566 spi_match_master_to_boardinfo(master, &bi->board_info); 567 mutex_unlock(&board_lock); 568 } 569 570 return 0; 571 } 572 573 /*-------------------------------------------------------------------------*/ 574 575 static void spi_set_cs(struct spi_device *spi, bool enable) 576 { 577 if (spi->mode & SPI_CS_HIGH) 578 enable = !enable; 579 580 if (spi->cs_gpio >= 0) 581 gpio_set_value(spi->cs_gpio, !enable); 582 else if (spi->master->set_cs) 583 spi->master->set_cs(spi, !enable); 584 } 585 586 #ifdef CONFIG_HAS_DMA 587 static int spi_map_buf(struct spi_master *master, struct device *dev, 588 struct sg_table *sgt, void *buf, size_t len, 589 enum dma_data_direction dir) 590 { 591 const bool vmalloced_buf = is_vmalloc_addr(buf); 592 const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; 593 const int sgs = DIV_ROUND_UP(len, desc_len); 594 struct page *vm_page; 595 void *sg_buf; 596 size_t min; 597 int i, ret; 598 599 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 600 if (ret != 0) 601 return ret; 602 603 for (i = 0; i < sgs; i++) { 604 min = min_t(size_t, len, desc_len); 605 606 if (vmalloced_buf) { 607 vm_page = vmalloc_to_page(buf); 608 if (!vm_page) { 609 sg_free_table(sgt); 610 return -ENOMEM; 611 } 612 sg_buf = page_address(vm_page) + 613 ((size_t)buf & ~PAGE_MASK); 614 } else { 615 sg_buf = buf; 616 } 617 618 sg_set_buf(&sgt->sgl[i], sg_buf, min); 619 620 buf += min; 621 len -= min; 622 } 623 624 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 625 if (!ret) 626 ret = -ENOMEM; 627 if (ret < 0) { 628 sg_free_table(sgt); 629 return ret; 630 } 631 632 sgt->nents = ret; 633 634 return 0; 635 } 636 637 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 638 struct sg_table *sgt, enum dma_data_direction dir) 639 { 640 if (sgt->orig_nents) { 641 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 642 sg_free_table(sgt); 643 } 644 } 645 646 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 647 { 648 struct device *tx_dev, *rx_dev; 649 struct spi_transfer *xfer; 650 int ret; 651 652 if (!master->can_dma) 653 return 0; 654 655 tx_dev = master->dma_tx->device->dev; 656 rx_dev = master->dma_rx->device->dev; 657 658 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 659 if (!master->can_dma(master, msg->spi, xfer)) 660 continue; 661 662 if (xfer->tx_buf != NULL) { 663 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 664 (void *)xfer->tx_buf, xfer->len, 665 DMA_TO_DEVICE); 666 if (ret != 0) 667 return ret; 668 } 669 670 if (xfer->rx_buf != NULL) { 671 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 672 xfer->rx_buf, xfer->len, 673 DMA_FROM_DEVICE); 674 if (ret != 0) { 675 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 676 DMA_TO_DEVICE); 677 return ret; 678 } 679 } 680 } 681 682 master->cur_msg_mapped = true; 683 684 return 0; 685 } 686 687 static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 688 { 689 struct spi_transfer *xfer; 690 struct device *tx_dev, *rx_dev; 691 692 if (!master->cur_msg_mapped || !master->can_dma) 693 return 0; 694 695 tx_dev = master->dma_tx->device->dev; 696 rx_dev = master->dma_rx->device->dev; 697 698 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 699 if (!master->can_dma(master, msg->spi, xfer)) 700 continue; 701 702 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 703 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 704 } 705 706 return 0; 707 } 708 #else /* !CONFIG_HAS_DMA */ 709 static inline int __spi_map_msg(struct spi_master *master, 710 struct spi_message *msg) 711 { 712 return 0; 713 } 714 715 static inline int spi_unmap_msg(struct spi_master *master, 716 struct spi_message *msg) 717 { 718 return 0; 719 } 720 #endif /* !CONFIG_HAS_DMA */ 721 722 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 723 { 724 struct spi_transfer *xfer; 725 void *tmp; 726 unsigned int max_tx, max_rx; 727 728 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 729 max_tx = 0; 730 max_rx = 0; 731 732 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 733 if ((master->flags & SPI_MASTER_MUST_TX) && 734 !xfer->tx_buf) 735 max_tx = max(xfer->len, max_tx); 736 if ((master->flags & SPI_MASTER_MUST_RX) && 737 !xfer->rx_buf) 738 max_rx = max(xfer->len, max_rx); 739 } 740 741 if (max_tx) { 742 tmp = krealloc(master->dummy_tx, max_tx, 743 GFP_KERNEL | GFP_DMA); 744 if (!tmp) 745 return -ENOMEM; 746 master->dummy_tx = tmp; 747 memset(tmp, 0, max_tx); 748 } 749 750 if (max_rx) { 751 tmp = krealloc(master->dummy_rx, max_rx, 752 GFP_KERNEL | GFP_DMA); 753 if (!tmp) 754 return -ENOMEM; 755 master->dummy_rx = tmp; 756 } 757 758 if (max_tx || max_rx) { 759 list_for_each_entry(xfer, &msg->transfers, 760 transfer_list) { 761 if (!xfer->tx_buf) 762 xfer->tx_buf = master->dummy_tx; 763 if (!xfer->rx_buf) 764 xfer->rx_buf = master->dummy_rx; 765 } 766 } 767 } 768 769 return __spi_map_msg(master, msg); 770 } 771 772 /* 773 * spi_transfer_one_message - Default implementation of transfer_one_message() 774 * 775 * This is a standard implementation of transfer_one_message() for 776 * drivers which impelment a transfer_one() operation. It provides 777 * standard handling of delays and chip select management. 778 */ 779 static int spi_transfer_one_message(struct spi_master *master, 780 struct spi_message *msg) 781 { 782 struct spi_transfer *xfer; 783 bool keep_cs = false; 784 int ret = 0; 785 int ms = 1; 786 787 spi_set_cs(msg->spi, true); 788 789 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 790 trace_spi_transfer_start(msg, xfer); 791 792 reinit_completion(&master->xfer_completion); 793 794 ret = master->transfer_one(master, msg->spi, xfer); 795 if (ret < 0) { 796 dev_err(&msg->spi->dev, 797 "SPI transfer failed: %d\n", ret); 798 goto out; 799 } 800 801 if (ret > 0) { 802 ret = 0; 803 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 804 ms += ms + 100; /* some tolerance */ 805 806 ms = wait_for_completion_timeout(&master->xfer_completion, 807 msecs_to_jiffies(ms)); 808 } 809 810 if (ms == 0) { 811 dev_err(&msg->spi->dev, "SPI transfer timed out\n"); 812 msg->status = -ETIMEDOUT; 813 } 814 815 trace_spi_transfer_stop(msg, xfer); 816 817 if (msg->status != -EINPROGRESS) 818 goto out; 819 820 if (xfer->delay_usecs) 821 udelay(xfer->delay_usecs); 822 823 if (xfer->cs_change) { 824 if (list_is_last(&xfer->transfer_list, 825 &msg->transfers)) { 826 keep_cs = true; 827 } else { 828 spi_set_cs(msg->spi, false); 829 udelay(10); 830 spi_set_cs(msg->spi, true); 831 } 832 } 833 834 msg->actual_length += xfer->len; 835 } 836 837 out: 838 if (ret != 0 || !keep_cs) 839 spi_set_cs(msg->spi, false); 840 841 if (msg->status == -EINPROGRESS) 842 msg->status = ret; 843 844 spi_finalize_current_message(master); 845 846 return ret; 847 } 848 849 /** 850 * spi_finalize_current_transfer - report completion of a transfer 851 * @master: the master reporting completion 852 * 853 * Called by SPI drivers using the core transfer_one_message() 854 * implementation to notify it that the current interrupt driven 855 * transfer has finished and the next one may be scheduled. 856 */ 857 void spi_finalize_current_transfer(struct spi_master *master) 858 { 859 complete(&master->xfer_completion); 860 } 861 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 862 863 /** 864 * spi_pump_messages - kthread work function which processes spi message queue 865 * @work: pointer to kthread work struct contained in the master struct 866 * 867 * This function checks if there is any spi message in the queue that 868 * needs processing and if so call out to the driver to initialize hardware 869 * and transfer each message. 870 * 871 */ 872 static void spi_pump_messages(struct kthread_work *work) 873 { 874 struct spi_master *master = 875 container_of(work, struct spi_master, pump_messages); 876 unsigned long flags; 877 bool was_busy = false; 878 int ret; 879 880 /* Lock queue and check for queue work */ 881 spin_lock_irqsave(&master->queue_lock, flags); 882 if (list_empty(&master->queue) || !master->running) { 883 if (!master->busy) { 884 spin_unlock_irqrestore(&master->queue_lock, flags); 885 return; 886 } 887 master->busy = false; 888 spin_unlock_irqrestore(&master->queue_lock, flags); 889 kfree(master->dummy_rx); 890 master->dummy_rx = NULL; 891 kfree(master->dummy_tx); 892 master->dummy_tx = NULL; 893 if (master->unprepare_transfer_hardware && 894 master->unprepare_transfer_hardware(master)) 895 dev_err(&master->dev, 896 "failed to unprepare transfer hardware\n"); 897 if (master->auto_runtime_pm) { 898 pm_runtime_mark_last_busy(master->dev.parent); 899 pm_runtime_put_autosuspend(master->dev.parent); 900 } 901 trace_spi_master_idle(master); 902 return; 903 } 904 905 /* Make sure we are not already running a message */ 906 if (master->cur_msg) { 907 spin_unlock_irqrestore(&master->queue_lock, flags); 908 return; 909 } 910 /* Extract head of queue */ 911 master->cur_msg = 912 list_first_entry(&master->queue, struct spi_message, queue); 913 914 list_del_init(&master->cur_msg->queue); 915 if (master->busy) 916 was_busy = true; 917 else 918 master->busy = true; 919 spin_unlock_irqrestore(&master->queue_lock, flags); 920 921 if (!was_busy && master->auto_runtime_pm) { 922 ret = pm_runtime_get_sync(master->dev.parent); 923 if (ret < 0) { 924 dev_err(&master->dev, "Failed to power device: %d\n", 925 ret); 926 return; 927 } 928 } 929 930 if (!was_busy) 931 trace_spi_master_busy(master); 932 933 if (!was_busy && master->prepare_transfer_hardware) { 934 ret = master->prepare_transfer_hardware(master); 935 if (ret) { 936 dev_err(&master->dev, 937 "failed to prepare transfer hardware\n"); 938 939 if (master->auto_runtime_pm) 940 pm_runtime_put(master->dev.parent); 941 return; 942 } 943 } 944 945 trace_spi_message_start(master->cur_msg); 946 947 if (master->prepare_message) { 948 ret = master->prepare_message(master, master->cur_msg); 949 if (ret) { 950 dev_err(&master->dev, 951 "failed to prepare message: %d\n", ret); 952 master->cur_msg->status = ret; 953 spi_finalize_current_message(master); 954 return; 955 } 956 master->cur_msg_prepared = true; 957 } 958 959 ret = spi_map_msg(master, master->cur_msg); 960 if (ret) { 961 master->cur_msg->status = ret; 962 spi_finalize_current_message(master); 963 return; 964 } 965 966 ret = master->transfer_one_message(master, master->cur_msg); 967 if (ret) { 968 dev_err(&master->dev, 969 "failed to transfer one message from queue\n"); 970 return; 971 } 972 } 973 974 static int spi_init_queue(struct spi_master *master) 975 { 976 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 977 978 INIT_LIST_HEAD(&master->queue); 979 spin_lock_init(&master->queue_lock); 980 981 master->running = false; 982 master->busy = false; 983 984 init_kthread_worker(&master->kworker); 985 master->kworker_task = kthread_run(kthread_worker_fn, 986 &master->kworker, "%s", 987 dev_name(&master->dev)); 988 if (IS_ERR(master->kworker_task)) { 989 dev_err(&master->dev, "failed to create message pump task\n"); 990 return -ENOMEM; 991 } 992 init_kthread_work(&master->pump_messages, spi_pump_messages); 993 994 /* 995 * Master config will indicate if this controller should run the 996 * message pump with high (realtime) priority to reduce the transfer 997 * latency on the bus by minimising the delay between a transfer 998 * request and the scheduling of the message pump thread. Without this 999 * setting the message pump thread will remain at default priority. 1000 */ 1001 if (master->rt) { 1002 dev_info(&master->dev, 1003 "will run message pump with realtime priority\n"); 1004 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1005 } 1006 1007 return 0; 1008 } 1009 1010 /** 1011 * spi_get_next_queued_message() - called by driver to check for queued 1012 * messages 1013 * @master: the master to check for queued messages 1014 * 1015 * If there are more messages in the queue, the next message is returned from 1016 * this call. 1017 */ 1018 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1019 { 1020 struct spi_message *next; 1021 unsigned long flags; 1022 1023 /* get a pointer to the next message, if any */ 1024 spin_lock_irqsave(&master->queue_lock, flags); 1025 next = list_first_entry_or_null(&master->queue, struct spi_message, 1026 queue); 1027 spin_unlock_irqrestore(&master->queue_lock, flags); 1028 1029 return next; 1030 } 1031 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1032 1033 /** 1034 * spi_finalize_current_message() - the current message is complete 1035 * @master: the master to return the message to 1036 * 1037 * Called by the driver to notify the core that the message in the front of the 1038 * queue is complete and can be removed from the queue. 1039 */ 1040 void spi_finalize_current_message(struct spi_master *master) 1041 { 1042 struct spi_message *mesg; 1043 unsigned long flags; 1044 int ret; 1045 1046 spin_lock_irqsave(&master->queue_lock, flags); 1047 mesg = master->cur_msg; 1048 master->cur_msg = NULL; 1049 1050 queue_kthread_work(&master->kworker, &master->pump_messages); 1051 spin_unlock_irqrestore(&master->queue_lock, flags); 1052 1053 spi_unmap_msg(master, mesg); 1054 1055 if (master->cur_msg_prepared && master->unprepare_message) { 1056 ret = master->unprepare_message(master, mesg); 1057 if (ret) { 1058 dev_err(&master->dev, 1059 "failed to unprepare message: %d\n", ret); 1060 } 1061 } 1062 master->cur_msg_prepared = false; 1063 1064 mesg->state = NULL; 1065 if (mesg->complete) 1066 mesg->complete(mesg->context); 1067 1068 trace_spi_message_done(mesg); 1069 } 1070 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1071 1072 static int spi_start_queue(struct spi_master *master) 1073 { 1074 unsigned long flags; 1075 1076 spin_lock_irqsave(&master->queue_lock, flags); 1077 1078 if (master->running || master->busy) { 1079 spin_unlock_irqrestore(&master->queue_lock, flags); 1080 return -EBUSY; 1081 } 1082 1083 master->running = true; 1084 master->cur_msg = NULL; 1085 spin_unlock_irqrestore(&master->queue_lock, flags); 1086 1087 queue_kthread_work(&master->kworker, &master->pump_messages); 1088 1089 return 0; 1090 } 1091 1092 static int spi_stop_queue(struct spi_master *master) 1093 { 1094 unsigned long flags; 1095 unsigned limit = 500; 1096 int ret = 0; 1097 1098 spin_lock_irqsave(&master->queue_lock, flags); 1099 1100 /* 1101 * This is a bit lame, but is optimized for the common execution path. 1102 * A wait_queue on the master->busy could be used, but then the common 1103 * execution path (pump_messages) would be required to call wake_up or 1104 * friends on every SPI message. Do this instead. 1105 */ 1106 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1107 spin_unlock_irqrestore(&master->queue_lock, flags); 1108 usleep_range(10000, 11000); 1109 spin_lock_irqsave(&master->queue_lock, flags); 1110 } 1111 1112 if (!list_empty(&master->queue) || master->busy) 1113 ret = -EBUSY; 1114 else 1115 master->running = false; 1116 1117 spin_unlock_irqrestore(&master->queue_lock, flags); 1118 1119 if (ret) { 1120 dev_warn(&master->dev, 1121 "could not stop message queue\n"); 1122 return ret; 1123 } 1124 return ret; 1125 } 1126 1127 static int spi_destroy_queue(struct spi_master *master) 1128 { 1129 int ret; 1130 1131 ret = spi_stop_queue(master); 1132 1133 /* 1134 * flush_kthread_worker will block until all work is done. 1135 * If the reason that stop_queue timed out is that the work will never 1136 * finish, then it does no good to call flush/stop thread, so 1137 * return anyway. 1138 */ 1139 if (ret) { 1140 dev_err(&master->dev, "problem destroying queue\n"); 1141 return ret; 1142 } 1143 1144 flush_kthread_worker(&master->kworker); 1145 kthread_stop(master->kworker_task); 1146 1147 return 0; 1148 } 1149 1150 /** 1151 * spi_queued_transfer - transfer function for queued transfers 1152 * @spi: spi device which is requesting transfer 1153 * @msg: spi message which is to handled is queued to driver queue 1154 */ 1155 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1156 { 1157 struct spi_master *master = spi->master; 1158 unsigned long flags; 1159 1160 spin_lock_irqsave(&master->queue_lock, flags); 1161 1162 if (!master->running) { 1163 spin_unlock_irqrestore(&master->queue_lock, flags); 1164 return -ESHUTDOWN; 1165 } 1166 msg->actual_length = 0; 1167 msg->status = -EINPROGRESS; 1168 1169 list_add_tail(&msg->queue, &master->queue); 1170 if (!master->busy) 1171 queue_kthread_work(&master->kworker, &master->pump_messages); 1172 1173 spin_unlock_irqrestore(&master->queue_lock, flags); 1174 return 0; 1175 } 1176 1177 static int spi_master_initialize_queue(struct spi_master *master) 1178 { 1179 int ret; 1180 1181 master->transfer = spi_queued_transfer; 1182 if (!master->transfer_one_message) 1183 master->transfer_one_message = spi_transfer_one_message; 1184 1185 /* Initialize and start queue */ 1186 ret = spi_init_queue(master); 1187 if (ret) { 1188 dev_err(&master->dev, "problem initializing queue\n"); 1189 goto err_init_queue; 1190 } 1191 master->queued = true; 1192 ret = spi_start_queue(master); 1193 if (ret) { 1194 dev_err(&master->dev, "problem starting queue\n"); 1195 goto err_start_queue; 1196 } 1197 1198 return 0; 1199 1200 err_start_queue: 1201 spi_destroy_queue(master); 1202 err_init_queue: 1203 return ret; 1204 } 1205 1206 /*-------------------------------------------------------------------------*/ 1207 1208 #if defined(CONFIG_OF) 1209 /** 1210 * of_register_spi_devices() - Register child devices onto the SPI bus 1211 * @master: Pointer to spi_master device 1212 * 1213 * Registers an spi_device for each child node of master node which has a 'reg' 1214 * property. 1215 */ 1216 static void of_register_spi_devices(struct spi_master *master) 1217 { 1218 struct spi_device *spi; 1219 struct device_node *nc; 1220 int rc; 1221 u32 value; 1222 1223 if (!master->dev.of_node) 1224 return; 1225 1226 for_each_available_child_of_node(master->dev.of_node, nc) { 1227 /* Alloc an spi_device */ 1228 spi = spi_alloc_device(master); 1229 if (!spi) { 1230 dev_err(&master->dev, "spi_device alloc error for %s\n", 1231 nc->full_name); 1232 spi_dev_put(spi); 1233 continue; 1234 } 1235 1236 /* Select device driver */ 1237 if (of_modalias_node(nc, spi->modalias, 1238 sizeof(spi->modalias)) < 0) { 1239 dev_err(&master->dev, "cannot find modalias for %s\n", 1240 nc->full_name); 1241 spi_dev_put(spi); 1242 continue; 1243 } 1244 1245 /* Device address */ 1246 rc = of_property_read_u32(nc, "reg", &value); 1247 if (rc) { 1248 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1249 nc->full_name, rc); 1250 spi_dev_put(spi); 1251 continue; 1252 } 1253 spi->chip_select = value; 1254 1255 /* Mode (clock phase/polarity/etc.) */ 1256 if (of_find_property(nc, "spi-cpha", NULL)) 1257 spi->mode |= SPI_CPHA; 1258 if (of_find_property(nc, "spi-cpol", NULL)) 1259 spi->mode |= SPI_CPOL; 1260 if (of_find_property(nc, "spi-cs-high", NULL)) 1261 spi->mode |= SPI_CS_HIGH; 1262 if (of_find_property(nc, "spi-3wire", NULL)) 1263 spi->mode |= SPI_3WIRE; 1264 if (of_find_property(nc, "spi-lsb-first", NULL)) 1265 spi->mode |= SPI_LSB_FIRST; 1266 1267 /* Device DUAL/QUAD mode */ 1268 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1269 switch (value) { 1270 case 1: 1271 break; 1272 case 2: 1273 spi->mode |= SPI_TX_DUAL; 1274 break; 1275 case 4: 1276 spi->mode |= SPI_TX_QUAD; 1277 break; 1278 default: 1279 dev_warn(&master->dev, 1280 "spi-tx-bus-width %d not supported\n", 1281 value); 1282 break; 1283 } 1284 } 1285 1286 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1287 switch (value) { 1288 case 1: 1289 break; 1290 case 2: 1291 spi->mode |= SPI_RX_DUAL; 1292 break; 1293 case 4: 1294 spi->mode |= SPI_RX_QUAD; 1295 break; 1296 default: 1297 dev_warn(&master->dev, 1298 "spi-rx-bus-width %d not supported\n", 1299 value); 1300 break; 1301 } 1302 } 1303 1304 /* Device speed */ 1305 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1306 if (rc) { 1307 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1308 nc->full_name, rc); 1309 spi_dev_put(spi); 1310 continue; 1311 } 1312 spi->max_speed_hz = value; 1313 1314 /* IRQ */ 1315 spi->irq = irq_of_parse_and_map(nc, 0); 1316 1317 /* Store a pointer to the node in the device structure */ 1318 of_node_get(nc); 1319 spi->dev.of_node = nc; 1320 1321 /* Register the new device */ 1322 request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias); 1323 rc = spi_add_device(spi); 1324 if (rc) { 1325 dev_err(&master->dev, "spi_device register error %s\n", 1326 nc->full_name); 1327 spi_dev_put(spi); 1328 } 1329 1330 } 1331 } 1332 #else 1333 static void of_register_spi_devices(struct spi_master *master) { } 1334 #endif 1335 1336 #ifdef CONFIG_ACPI 1337 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1338 { 1339 struct spi_device *spi = data; 1340 1341 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1342 struct acpi_resource_spi_serialbus *sb; 1343 1344 sb = &ares->data.spi_serial_bus; 1345 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1346 spi->chip_select = sb->device_selection; 1347 spi->max_speed_hz = sb->connection_speed; 1348 1349 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1350 spi->mode |= SPI_CPHA; 1351 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1352 spi->mode |= SPI_CPOL; 1353 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1354 spi->mode |= SPI_CS_HIGH; 1355 } 1356 } else if (spi->irq < 0) { 1357 struct resource r; 1358 1359 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1360 spi->irq = r.start; 1361 } 1362 1363 /* Always tell the ACPI core to skip this resource */ 1364 return 1; 1365 } 1366 1367 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1368 void *data, void **return_value) 1369 { 1370 struct spi_master *master = data; 1371 struct list_head resource_list; 1372 struct acpi_device *adev; 1373 struct spi_device *spi; 1374 int ret; 1375 1376 if (acpi_bus_get_device(handle, &adev)) 1377 return AE_OK; 1378 if (acpi_bus_get_status(adev) || !adev->status.present) 1379 return AE_OK; 1380 1381 spi = spi_alloc_device(master); 1382 if (!spi) { 1383 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1384 dev_name(&adev->dev)); 1385 return AE_NO_MEMORY; 1386 } 1387 1388 ACPI_COMPANION_SET(&spi->dev, adev); 1389 spi->irq = -1; 1390 1391 INIT_LIST_HEAD(&resource_list); 1392 ret = acpi_dev_get_resources(adev, &resource_list, 1393 acpi_spi_add_resource, spi); 1394 acpi_dev_free_resource_list(&resource_list); 1395 1396 if (ret < 0 || !spi->max_speed_hz) { 1397 spi_dev_put(spi); 1398 return AE_OK; 1399 } 1400 1401 adev->power.flags.ignore_parent = true; 1402 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 1403 if (spi_add_device(spi)) { 1404 adev->power.flags.ignore_parent = false; 1405 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1406 dev_name(&adev->dev)); 1407 spi_dev_put(spi); 1408 } 1409 1410 return AE_OK; 1411 } 1412 1413 static void acpi_register_spi_devices(struct spi_master *master) 1414 { 1415 acpi_status status; 1416 acpi_handle handle; 1417 1418 handle = ACPI_HANDLE(master->dev.parent); 1419 if (!handle) 1420 return; 1421 1422 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1423 acpi_spi_add_device, NULL, 1424 master, NULL); 1425 if (ACPI_FAILURE(status)) 1426 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1427 } 1428 #else 1429 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1430 #endif /* CONFIG_ACPI */ 1431 1432 static void spi_master_release(struct device *dev) 1433 { 1434 struct spi_master *master; 1435 1436 master = container_of(dev, struct spi_master, dev); 1437 kfree(master); 1438 } 1439 1440 static struct class spi_master_class = { 1441 .name = "spi_master", 1442 .owner = THIS_MODULE, 1443 .dev_release = spi_master_release, 1444 }; 1445 1446 1447 1448 /** 1449 * spi_alloc_master - allocate SPI master controller 1450 * @dev: the controller, possibly using the platform_bus 1451 * @size: how much zeroed driver-private data to allocate; the pointer to this 1452 * memory is in the driver_data field of the returned device, 1453 * accessible with spi_master_get_devdata(). 1454 * Context: can sleep 1455 * 1456 * This call is used only by SPI master controller drivers, which are the 1457 * only ones directly touching chip registers. It's how they allocate 1458 * an spi_master structure, prior to calling spi_register_master(). 1459 * 1460 * This must be called from context that can sleep. It returns the SPI 1461 * master structure on success, else NULL. 1462 * 1463 * The caller is responsible for assigning the bus number and initializing 1464 * the master's methods before calling spi_register_master(); and (after errors 1465 * adding the device) calling spi_master_put() and kfree() to prevent a memory 1466 * leak. 1467 */ 1468 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1469 { 1470 struct spi_master *master; 1471 1472 if (!dev) 1473 return NULL; 1474 1475 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1476 if (!master) 1477 return NULL; 1478 1479 device_initialize(&master->dev); 1480 master->bus_num = -1; 1481 master->num_chipselect = 1; 1482 master->dev.class = &spi_master_class; 1483 master->dev.parent = get_device(dev); 1484 spi_master_set_devdata(master, &master[1]); 1485 1486 return master; 1487 } 1488 EXPORT_SYMBOL_GPL(spi_alloc_master); 1489 1490 #ifdef CONFIG_OF 1491 static int of_spi_register_master(struct spi_master *master) 1492 { 1493 int nb, i, *cs; 1494 struct device_node *np = master->dev.of_node; 1495 1496 if (!np) 1497 return 0; 1498 1499 nb = of_gpio_named_count(np, "cs-gpios"); 1500 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1501 1502 /* Return error only for an incorrectly formed cs-gpios property */ 1503 if (nb == 0 || nb == -ENOENT) 1504 return 0; 1505 else if (nb < 0) 1506 return nb; 1507 1508 cs = devm_kzalloc(&master->dev, 1509 sizeof(int) * master->num_chipselect, 1510 GFP_KERNEL); 1511 master->cs_gpios = cs; 1512 1513 if (!master->cs_gpios) 1514 return -ENOMEM; 1515 1516 for (i = 0; i < master->num_chipselect; i++) 1517 cs[i] = -ENOENT; 1518 1519 for (i = 0; i < nb; i++) 1520 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1521 1522 return 0; 1523 } 1524 #else 1525 static int of_spi_register_master(struct spi_master *master) 1526 { 1527 return 0; 1528 } 1529 #endif 1530 1531 /** 1532 * spi_register_master - register SPI master controller 1533 * @master: initialized master, originally from spi_alloc_master() 1534 * Context: can sleep 1535 * 1536 * SPI master controllers connect to their drivers using some non-SPI bus, 1537 * such as the platform bus. The final stage of probe() in that code 1538 * includes calling spi_register_master() to hook up to this SPI bus glue. 1539 * 1540 * SPI controllers use board specific (often SOC specific) bus numbers, 1541 * and board-specific addressing for SPI devices combines those numbers 1542 * with chip select numbers. Since SPI does not directly support dynamic 1543 * device identification, boards need configuration tables telling which 1544 * chip is at which address. 1545 * 1546 * This must be called from context that can sleep. It returns zero on 1547 * success, else a negative error code (dropping the master's refcount). 1548 * After a successful return, the caller is responsible for calling 1549 * spi_unregister_master(). 1550 */ 1551 int spi_register_master(struct spi_master *master) 1552 { 1553 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1554 struct device *dev = master->dev.parent; 1555 struct boardinfo *bi; 1556 int status = -ENODEV; 1557 int dynamic = 0; 1558 1559 if (!dev) 1560 return -ENODEV; 1561 1562 status = of_spi_register_master(master); 1563 if (status) 1564 return status; 1565 1566 /* even if it's just one always-selected device, there must 1567 * be at least one chipselect 1568 */ 1569 if (master->num_chipselect == 0) 1570 return -EINVAL; 1571 1572 if ((master->bus_num < 0) && master->dev.of_node) 1573 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1574 1575 /* convention: dynamically assigned bus IDs count down from the max */ 1576 if (master->bus_num < 0) { 1577 /* FIXME switch to an IDR based scheme, something like 1578 * I2C now uses, so we can't run out of "dynamic" IDs 1579 */ 1580 master->bus_num = atomic_dec_return(&dyn_bus_id); 1581 dynamic = 1; 1582 } 1583 1584 spin_lock_init(&master->bus_lock_spinlock); 1585 mutex_init(&master->bus_lock_mutex); 1586 master->bus_lock_flag = 0; 1587 init_completion(&master->xfer_completion); 1588 if (!master->max_dma_len) 1589 master->max_dma_len = INT_MAX; 1590 1591 /* register the device, then userspace will see it. 1592 * registration fails if the bus ID is in use. 1593 */ 1594 dev_set_name(&master->dev, "spi%u", master->bus_num); 1595 status = device_add(&master->dev); 1596 if (status < 0) 1597 goto done; 1598 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1599 dynamic ? " (dynamic)" : ""); 1600 1601 /* If we're using a queued driver, start the queue */ 1602 if (master->transfer) 1603 dev_info(dev, "master is unqueued, this is deprecated\n"); 1604 else { 1605 status = spi_master_initialize_queue(master); 1606 if (status) { 1607 device_del(&master->dev); 1608 goto done; 1609 } 1610 } 1611 1612 mutex_lock(&board_lock); 1613 list_add_tail(&master->list, &spi_master_list); 1614 list_for_each_entry(bi, &board_list, list) 1615 spi_match_master_to_boardinfo(master, &bi->board_info); 1616 mutex_unlock(&board_lock); 1617 1618 /* Register devices from the device tree and ACPI */ 1619 of_register_spi_devices(master); 1620 acpi_register_spi_devices(master); 1621 done: 1622 return status; 1623 } 1624 EXPORT_SYMBOL_GPL(spi_register_master); 1625 1626 static void devm_spi_unregister(struct device *dev, void *res) 1627 { 1628 spi_unregister_master(*(struct spi_master **)res); 1629 } 1630 1631 /** 1632 * dev_spi_register_master - register managed SPI master controller 1633 * @dev: device managing SPI master 1634 * @master: initialized master, originally from spi_alloc_master() 1635 * Context: can sleep 1636 * 1637 * Register a SPI device as with spi_register_master() which will 1638 * automatically be unregister 1639 */ 1640 int devm_spi_register_master(struct device *dev, struct spi_master *master) 1641 { 1642 struct spi_master **ptr; 1643 int ret; 1644 1645 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1646 if (!ptr) 1647 return -ENOMEM; 1648 1649 ret = spi_register_master(master); 1650 if (!ret) { 1651 *ptr = master; 1652 devres_add(dev, ptr); 1653 } else { 1654 devres_free(ptr); 1655 } 1656 1657 return ret; 1658 } 1659 EXPORT_SYMBOL_GPL(devm_spi_register_master); 1660 1661 static int __unregister(struct device *dev, void *null) 1662 { 1663 spi_unregister_device(to_spi_device(dev)); 1664 return 0; 1665 } 1666 1667 /** 1668 * spi_unregister_master - unregister SPI master controller 1669 * @master: the master being unregistered 1670 * Context: can sleep 1671 * 1672 * This call is used only by SPI master controller drivers, which are the 1673 * only ones directly touching chip registers. 1674 * 1675 * This must be called from context that can sleep. 1676 */ 1677 void spi_unregister_master(struct spi_master *master) 1678 { 1679 int dummy; 1680 1681 if (master->queued) { 1682 if (spi_destroy_queue(master)) 1683 dev_err(&master->dev, "queue remove failed\n"); 1684 } 1685 1686 mutex_lock(&board_lock); 1687 list_del(&master->list); 1688 mutex_unlock(&board_lock); 1689 1690 dummy = device_for_each_child(&master->dev, NULL, __unregister); 1691 device_unregister(&master->dev); 1692 } 1693 EXPORT_SYMBOL_GPL(spi_unregister_master); 1694 1695 int spi_master_suspend(struct spi_master *master) 1696 { 1697 int ret; 1698 1699 /* Basically no-ops for non-queued masters */ 1700 if (!master->queued) 1701 return 0; 1702 1703 ret = spi_stop_queue(master); 1704 if (ret) 1705 dev_err(&master->dev, "queue stop failed\n"); 1706 1707 return ret; 1708 } 1709 EXPORT_SYMBOL_GPL(spi_master_suspend); 1710 1711 int spi_master_resume(struct spi_master *master) 1712 { 1713 int ret; 1714 1715 if (!master->queued) 1716 return 0; 1717 1718 ret = spi_start_queue(master); 1719 if (ret) 1720 dev_err(&master->dev, "queue restart failed\n"); 1721 1722 return ret; 1723 } 1724 EXPORT_SYMBOL_GPL(spi_master_resume); 1725 1726 static int __spi_master_match(struct device *dev, const void *data) 1727 { 1728 struct spi_master *m; 1729 const u16 *bus_num = data; 1730 1731 m = container_of(dev, struct spi_master, dev); 1732 return m->bus_num == *bus_num; 1733 } 1734 1735 /** 1736 * spi_busnum_to_master - look up master associated with bus_num 1737 * @bus_num: the master's bus number 1738 * Context: can sleep 1739 * 1740 * This call may be used with devices that are registered after 1741 * arch init time. It returns a refcounted pointer to the relevant 1742 * spi_master (which the caller must release), or NULL if there is 1743 * no such master registered. 1744 */ 1745 struct spi_master *spi_busnum_to_master(u16 bus_num) 1746 { 1747 struct device *dev; 1748 struct spi_master *master = NULL; 1749 1750 dev = class_find_device(&spi_master_class, NULL, &bus_num, 1751 __spi_master_match); 1752 if (dev) 1753 master = container_of(dev, struct spi_master, dev); 1754 /* reference got in class_find_device */ 1755 return master; 1756 } 1757 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 1758 1759 1760 /*-------------------------------------------------------------------------*/ 1761 1762 /* Core methods for SPI master protocol drivers. Some of the 1763 * other core methods are currently defined as inline functions. 1764 */ 1765 1766 /** 1767 * spi_setup - setup SPI mode and clock rate 1768 * @spi: the device whose settings are being modified 1769 * Context: can sleep, and no requests are queued to the device 1770 * 1771 * SPI protocol drivers may need to update the transfer mode if the 1772 * device doesn't work with its default. They may likewise need 1773 * to update clock rates or word sizes from initial values. This function 1774 * changes those settings, and must be called from a context that can sleep. 1775 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 1776 * effect the next time the device is selected and data is transferred to 1777 * or from it. When this function returns, the spi device is deselected. 1778 * 1779 * Note that this call will fail if the protocol driver specifies an option 1780 * that the underlying controller or its driver does not support. For 1781 * example, not all hardware supports wire transfers using nine bit words, 1782 * LSB-first wire encoding, or active-high chipselects. 1783 */ 1784 int spi_setup(struct spi_device *spi) 1785 { 1786 unsigned bad_bits, ugly_bits; 1787 int status = 0; 1788 1789 /* check mode to prevent that DUAL and QUAD set at the same time 1790 */ 1791 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 1792 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 1793 dev_err(&spi->dev, 1794 "setup: can not select dual and quad at the same time\n"); 1795 return -EINVAL; 1796 } 1797 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 1798 */ 1799 if ((spi->mode & SPI_3WIRE) && (spi->mode & 1800 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 1801 return -EINVAL; 1802 /* help drivers fail *cleanly* when they need options 1803 * that aren't supported with their current master 1804 */ 1805 bad_bits = spi->mode & ~spi->master->mode_bits; 1806 ugly_bits = bad_bits & 1807 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 1808 if (ugly_bits) { 1809 dev_warn(&spi->dev, 1810 "setup: ignoring unsupported mode bits %x\n", 1811 ugly_bits); 1812 spi->mode &= ~ugly_bits; 1813 bad_bits &= ~ugly_bits; 1814 } 1815 if (bad_bits) { 1816 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1817 bad_bits); 1818 return -EINVAL; 1819 } 1820 1821 if (!spi->bits_per_word) 1822 spi->bits_per_word = 8; 1823 1824 if (!spi->max_speed_hz) 1825 spi->max_speed_hz = spi->master->max_speed_hz; 1826 1827 if (spi->master->setup) 1828 status = spi->master->setup(spi); 1829 1830 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 1831 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 1832 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 1833 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 1834 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 1835 (spi->mode & SPI_LOOP) ? "loopback, " : "", 1836 spi->bits_per_word, spi->max_speed_hz, 1837 status); 1838 1839 return status; 1840 } 1841 EXPORT_SYMBOL_GPL(spi_setup); 1842 1843 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 1844 { 1845 struct spi_master *master = spi->master; 1846 struct spi_transfer *xfer; 1847 int w_size; 1848 1849 if (list_empty(&message->transfers)) 1850 return -EINVAL; 1851 1852 /* Half-duplex links include original MicroWire, and ones with 1853 * only one data pin like SPI_3WIRE (switches direction) or where 1854 * either MOSI or MISO is missing. They can also be caused by 1855 * software limitations. 1856 */ 1857 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 1858 || (spi->mode & SPI_3WIRE)) { 1859 unsigned flags = master->flags; 1860 1861 list_for_each_entry(xfer, &message->transfers, transfer_list) { 1862 if (xfer->rx_buf && xfer->tx_buf) 1863 return -EINVAL; 1864 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 1865 return -EINVAL; 1866 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 1867 return -EINVAL; 1868 } 1869 } 1870 1871 /** 1872 * Set transfer bits_per_word and max speed as spi device default if 1873 * it is not set for this transfer. 1874 * Set transfer tx_nbits and rx_nbits as single transfer default 1875 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 1876 */ 1877 list_for_each_entry(xfer, &message->transfers, transfer_list) { 1878 message->frame_length += xfer->len; 1879 if (!xfer->bits_per_word) 1880 xfer->bits_per_word = spi->bits_per_word; 1881 1882 if (!xfer->speed_hz) 1883 xfer->speed_hz = spi->max_speed_hz; 1884 1885 if (master->max_speed_hz && 1886 xfer->speed_hz > master->max_speed_hz) 1887 xfer->speed_hz = master->max_speed_hz; 1888 1889 if (master->bits_per_word_mask) { 1890 /* Only 32 bits fit in the mask */ 1891 if (xfer->bits_per_word > 32) 1892 return -EINVAL; 1893 if (!(master->bits_per_word_mask & 1894 BIT(xfer->bits_per_word - 1))) 1895 return -EINVAL; 1896 } 1897 1898 /* 1899 * SPI transfer length should be multiple of SPI word size 1900 * where SPI word size should be power-of-two multiple 1901 */ 1902 if (xfer->bits_per_word <= 8) 1903 w_size = 1; 1904 else if (xfer->bits_per_word <= 16) 1905 w_size = 2; 1906 else 1907 w_size = 4; 1908 1909 /* No partial transfers accepted */ 1910 if (xfer->len % w_size) 1911 return -EINVAL; 1912 1913 if (xfer->speed_hz && master->min_speed_hz && 1914 xfer->speed_hz < master->min_speed_hz) 1915 return -EINVAL; 1916 1917 if (xfer->tx_buf && !xfer->tx_nbits) 1918 xfer->tx_nbits = SPI_NBITS_SINGLE; 1919 if (xfer->rx_buf && !xfer->rx_nbits) 1920 xfer->rx_nbits = SPI_NBITS_SINGLE; 1921 /* check transfer tx/rx_nbits: 1922 * 1. check the value matches one of single, dual and quad 1923 * 2. check tx/rx_nbits match the mode in spi_device 1924 */ 1925 if (xfer->tx_buf) { 1926 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 1927 xfer->tx_nbits != SPI_NBITS_DUAL && 1928 xfer->tx_nbits != SPI_NBITS_QUAD) 1929 return -EINVAL; 1930 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 1931 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 1932 return -EINVAL; 1933 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 1934 !(spi->mode & SPI_TX_QUAD)) 1935 return -EINVAL; 1936 } 1937 /* check transfer rx_nbits */ 1938 if (xfer->rx_buf) { 1939 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 1940 xfer->rx_nbits != SPI_NBITS_DUAL && 1941 xfer->rx_nbits != SPI_NBITS_QUAD) 1942 return -EINVAL; 1943 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 1944 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 1945 return -EINVAL; 1946 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 1947 !(spi->mode & SPI_RX_QUAD)) 1948 return -EINVAL; 1949 } 1950 } 1951 1952 message->status = -EINPROGRESS; 1953 1954 return 0; 1955 } 1956 1957 static int __spi_async(struct spi_device *spi, struct spi_message *message) 1958 { 1959 struct spi_master *master = spi->master; 1960 1961 message->spi = spi; 1962 1963 trace_spi_message_submit(message); 1964 1965 return master->transfer(spi, message); 1966 } 1967 1968 /** 1969 * spi_async - asynchronous SPI transfer 1970 * @spi: device with which data will be exchanged 1971 * @message: describes the data transfers, including completion callback 1972 * Context: any (irqs may be blocked, etc) 1973 * 1974 * This call may be used in_irq and other contexts which can't sleep, 1975 * as well as from task contexts which can sleep. 1976 * 1977 * The completion callback is invoked in a context which can't sleep. 1978 * Before that invocation, the value of message->status is undefined. 1979 * When the callback is issued, message->status holds either zero (to 1980 * indicate complete success) or a negative error code. After that 1981 * callback returns, the driver which issued the transfer request may 1982 * deallocate the associated memory; it's no longer in use by any SPI 1983 * core or controller driver code. 1984 * 1985 * Note that although all messages to a spi_device are handled in 1986 * FIFO order, messages may go to different devices in other orders. 1987 * Some device might be higher priority, or have various "hard" access 1988 * time requirements, for example. 1989 * 1990 * On detection of any fault during the transfer, processing of 1991 * the entire message is aborted, and the device is deselected. 1992 * Until returning from the associated message completion callback, 1993 * no other spi_message queued to that device will be processed. 1994 * (This rule applies equally to all the synchronous transfer calls, 1995 * which are wrappers around this core asynchronous primitive.) 1996 */ 1997 int spi_async(struct spi_device *spi, struct spi_message *message) 1998 { 1999 struct spi_master *master = spi->master; 2000 int ret; 2001 unsigned long flags; 2002 2003 ret = __spi_validate(spi, message); 2004 if (ret != 0) 2005 return ret; 2006 2007 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2008 2009 if (master->bus_lock_flag) 2010 ret = -EBUSY; 2011 else 2012 ret = __spi_async(spi, message); 2013 2014 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2015 2016 return ret; 2017 } 2018 EXPORT_SYMBOL_GPL(spi_async); 2019 2020 /** 2021 * spi_async_locked - version of spi_async with exclusive bus usage 2022 * @spi: device with which data will be exchanged 2023 * @message: describes the data transfers, including completion callback 2024 * Context: any (irqs may be blocked, etc) 2025 * 2026 * This call may be used in_irq and other contexts which can't sleep, 2027 * as well as from task contexts which can sleep. 2028 * 2029 * The completion callback is invoked in a context which can't sleep. 2030 * Before that invocation, the value of message->status is undefined. 2031 * When the callback is issued, message->status holds either zero (to 2032 * indicate complete success) or a negative error code. After that 2033 * callback returns, the driver which issued the transfer request may 2034 * deallocate the associated memory; it's no longer in use by any SPI 2035 * core or controller driver code. 2036 * 2037 * Note that although all messages to a spi_device are handled in 2038 * FIFO order, messages may go to different devices in other orders. 2039 * Some device might be higher priority, or have various "hard" access 2040 * time requirements, for example. 2041 * 2042 * On detection of any fault during the transfer, processing of 2043 * the entire message is aborted, and the device is deselected. 2044 * Until returning from the associated message completion callback, 2045 * no other spi_message queued to that device will be processed. 2046 * (This rule applies equally to all the synchronous transfer calls, 2047 * which are wrappers around this core asynchronous primitive.) 2048 */ 2049 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2050 { 2051 struct spi_master *master = spi->master; 2052 int ret; 2053 unsigned long flags; 2054 2055 ret = __spi_validate(spi, message); 2056 if (ret != 0) 2057 return ret; 2058 2059 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2060 2061 ret = __spi_async(spi, message); 2062 2063 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2064 2065 return ret; 2066 2067 } 2068 EXPORT_SYMBOL_GPL(spi_async_locked); 2069 2070 2071 /*-------------------------------------------------------------------------*/ 2072 2073 /* Utility methods for SPI master protocol drivers, layered on 2074 * top of the core. Some other utility methods are defined as 2075 * inline functions. 2076 */ 2077 2078 static void spi_complete(void *arg) 2079 { 2080 complete(arg); 2081 } 2082 2083 static int __spi_sync(struct spi_device *spi, struct spi_message *message, 2084 int bus_locked) 2085 { 2086 DECLARE_COMPLETION_ONSTACK(done); 2087 int status; 2088 struct spi_master *master = spi->master; 2089 2090 message->complete = spi_complete; 2091 message->context = &done; 2092 2093 if (!bus_locked) 2094 mutex_lock(&master->bus_lock_mutex); 2095 2096 status = spi_async_locked(spi, message); 2097 2098 if (!bus_locked) 2099 mutex_unlock(&master->bus_lock_mutex); 2100 2101 if (status == 0) { 2102 wait_for_completion(&done); 2103 status = message->status; 2104 } 2105 message->context = NULL; 2106 return status; 2107 } 2108 2109 /** 2110 * spi_sync - blocking/synchronous SPI data transfers 2111 * @spi: device with which data will be exchanged 2112 * @message: describes the data transfers 2113 * Context: can sleep 2114 * 2115 * This call may only be used from a context that may sleep. The sleep 2116 * is non-interruptible, and has no timeout. Low-overhead controller 2117 * drivers may DMA directly into and out of the message buffers. 2118 * 2119 * Note that the SPI device's chip select is active during the message, 2120 * and then is normally disabled between messages. Drivers for some 2121 * frequently-used devices may want to minimize costs of selecting a chip, 2122 * by leaving it selected in anticipation that the next message will go 2123 * to the same chip. (That may increase power usage.) 2124 * 2125 * Also, the caller is guaranteeing that the memory associated with the 2126 * message will not be freed before this call returns. 2127 * 2128 * It returns zero on success, else a negative error code. 2129 */ 2130 int spi_sync(struct spi_device *spi, struct spi_message *message) 2131 { 2132 return __spi_sync(spi, message, 0); 2133 } 2134 EXPORT_SYMBOL_GPL(spi_sync); 2135 2136 /** 2137 * spi_sync_locked - version of spi_sync with exclusive bus usage 2138 * @spi: device with which data will be exchanged 2139 * @message: describes the data transfers 2140 * Context: can sleep 2141 * 2142 * This call may only be used from a context that may sleep. The sleep 2143 * is non-interruptible, and has no timeout. Low-overhead controller 2144 * drivers may DMA directly into and out of the message buffers. 2145 * 2146 * This call should be used by drivers that require exclusive access to the 2147 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2148 * be released by a spi_bus_unlock call when the exclusive access is over. 2149 * 2150 * It returns zero on success, else a negative error code. 2151 */ 2152 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2153 { 2154 return __spi_sync(spi, message, 1); 2155 } 2156 EXPORT_SYMBOL_GPL(spi_sync_locked); 2157 2158 /** 2159 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2160 * @master: SPI bus master that should be locked for exclusive bus access 2161 * Context: can sleep 2162 * 2163 * This call may only be used from a context that may sleep. The sleep 2164 * is non-interruptible, and has no timeout. 2165 * 2166 * This call should be used by drivers that require exclusive access to the 2167 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2168 * exclusive access is over. Data transfer must be done by spi_sync_locked 2169 * and spi_async_locked calls when the SPI bus lock is held. 2170 * 2171 * It returns zero on success, else a negative error code. 2172 */ 2173 int spi_bus_lock(struct spi_master *master) 2174 { 2175 unsigned long flags; 2176 2177 mutex_lock(&master->bus_lock_mutex); 2178 2179 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2180 master->bus_lock_flag = 1; 2181 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2182 2183 /* mutex remains locked until spi_bus_unlock is called */ 2184 2185 return 0; 2186 } 2187 EXPORT_SYMBOL_GPL(spi_bus_lock); 2188 2189 /** 2190 * spi_bus_unlock - release the lock for exclusive SPI bus usage 2191 * @master: SPI bus master that was locked for exclusive bus access 2192 * Context: can sleep 2193 * 2194 * This call may only be used from a context that may sleep. The sleep 2195 * is non-interruptible, and has no timeout. 2196 * 2197 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2198 * call. 2199 * 2200 * It returns zero on success, else a negative error code. 2201 */ 2202 int spi_bus_unlock(struct spi_master *master) 2203 { 2204 master->bus_lock_flag = 0; 2205 2206 mutex_unlock(&master->bus_lock_mutex); 2207 2208 return 0; 2209 } 2210 EXPORT_SYMBOL_GPL(spi_bus_unlock); 2211 2212 /* portable code must never pass more than 32 bytes */ 2213 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 2214 2215 static u8 *buf; 2216 2217 /** 2218 * spi_write_then_read - SPI synchronous write followed by read 2219 * @spi: device with which data will be exchanged 2220 * @txbuf: data to be written (need not be dma-safe) 2221 * @n_tx: size of txbuf, in bytes 2222 * @rxbuf: buffer into which data will be read (need not be dma-safe) 2223 * @n_rx: size of rxbuf, in bytes 2224 * Context: can sleep 2225 * 2226 * This performs a half duplex MicroWire style transaction with the 2227 * device, sending txbuf and then reading rxbuf. The return value 2228 * is zero for success, else a negative errno status code. 2229 * This call may only be used from a context that may sleep. 2230 * 2231 * Parameters to this routine are always copied using a small buffer; 2232 * portable code should never use this for more than 32 bytes. 2233 * Performance-sensitive or bulk transfer code should instead use 2234 * spi_{async,sync}() calls with dma-safe buffers. 2235 */ 2236 int spi_write_then_read(struct spi_device *spi, 2237 const void *txbuf, unsigned n_tx, 2238 void *rxbuf, unsigned n_rx) 2239 { 2240 static DEFINE_MUTEX(lock); 2241 2242 int status; 2243 struct spi_message message; 2244 struct spi_transfer x[2]; 2245 u8 *local_buf; 2246 2247 /* Use preallocated DMA-safe buffer if we can. We can't avoid 2248 * copying here, (as a pure convenience thing), but we can 2249 * keep heap costs out of the hot path unless someone else is 2250 * using the pre-allocated buffer or the transfer is too large. 2251 */ 2252 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 2253 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 2254 GFP_KERNEL | GFP_DMA); 2255 if (!local_buf) 2256 return -ENOMEM; 2257 } else { 2258 local_buf = buf; 2259 } 2260 2261 spi_message_init(&message); 2262 memset(x, 0, sizeof(x)); 2263 if (n_tx) { 2264 x[0].len = n_tx; 2265 spi_message_add_tail(&x[0], &message); 2266 } 2267 if (n_rx) { 2268 x[1].len = n_rx; 2269 spi_message_add_tail(&x[1], &message); 2270 } 2271 2272 memcpy(local_buf, txbuf, n_tx); 2273 x[0].tx_buf = local_buf; 2274 x[1].rx_buf = local_buf + n_tx; 2275 2276 /* do the i/o */ 2277 status = spi_sync(spi, &message); 2278 if (status == 0) 2279 memcpy(rxbuf, x[1].rx_buf, n_rx); 2280 2281 if (x[0].tx_buf == buf) 2282 mutex_unlock(&lock); 2283 else 2284 kfree(local_buf); 2285 2286 return status; 2287 } 2288 EXPORT_SYMBOL_GPL(spi_write_then_read); 2289 2290 /*-------------------------------------------------------------------------*/ 2291 2292 static int __init spi_init(void) 2293 { 2294 int status; 2295 2296 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2297 if (!buf) { 2298 status = -ENOMEM; 2299 goto err0; 2300 } 2301 2302 status = bus_register(&spi_bus_type); 2303 if (status < 0) 2304 goto err1; 2305 2306 status = class_register(&spi_master_class); 2307 if (status < 0) 2308 goto err2; 2309 return 0; 2310 2311 err2: 2312 bus_unregister(&spi_bus_type); 2313 err1: 2314 kfree(buf); 2315 buf = NULL; 2316 err0: 2317 return status; 2318 } 2319 2320 /* board_info is normally registered in arch_initcall(), 2321 * but even essential drivers wait till later 2322 * 2323 * REVISIT only boardinfo really needs static linking. the rest (device and 2324 * driver registration) _could_ be dynamically linked (modular) ... costs 2325 * include needing to have boardinfo data structures be much more public. 2326 */ 2327 postcore_initcall(spi_init); 2328 2329