1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/export.h> 35 #include <linux/sched/rt.h> 36 #include <linux/delay.h> 37 #include <linux/kthread.h> 38 #include <linux/ioport.h> 39 #include <linux/acpi.h> 40 41 #define CREATE_TRACE_POINTS 42 #include <trace/events/spi.h> 43 44 static void spidev_release(struct device *dev) 45 { 46 struct spi_device *spi = to_spi_device(dev); 47 48 /* spi masters may cleanup for released devices */ 49 if (spi->master->cleanup) 50 spi->master->cleanup(spi); 51 52 spi_master_put(spi->master); 53 kfree(spi); 54 } 55 56 static ssize_t 57 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 58 { 59 const struct spi_device *spi = to_spi_device(dev); 60 int len; 61 62 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 63 if (len != -ENODEV) 64 return len; 65 66 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 67 } 68 static DEVICE_ATTR_RO(modalias); 69 70 static struct attribute *spi_dev_attrs[] = { 71 &dev_attr_modalias.attr, 72 NULL, 73 }; 74 ATTRIBUTE_GROUPS(spi_dev); 75 76 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 77 * and the sysfs version makes coldplug work too. 78 */ 79 80 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 81 const struct spi_device *sdev) 82 { 83 while (id->name[0]) { 84 if (!strcmp(sdev->modalias, id->name)) 85 return id; 86 id++; 87 } 88 return NULL; 89 } 90 91 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 92 { 93 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 94 95 return spi_match_id(sdrv->id_table, sdev); 96 } 97 EXPORT_SYMBOL_GPL(spi_get_device_id); 98 99 static int spi_match_device(struct device *dev, struct device_driver *drv) 100 { 101 const struct spi_device *spi = to_spi_device(dev); 102 const struct spi_driver *sdrv = to_spi_driver(drv); 103 104 /* Attempt an OF style match */ 105 if (of_driver_match_device(dev, drv)) 106 return 1; 107 108 /* Then try ACPI */ 109 if (acpi_driver_match_device(dev, drv)) 110 return 1; 111 112 if (sdrv->id_table) 113 return !!spi_match_id(sdrv->id_table, spi); 114 115 return strcmp(spi->modalias, drv->name) == 0; 116 } 117 118 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 119 { 120 const struct spi_device *spi = to_spi_device(dev); 121 int rc; 122 123 rc = acpi_device_uevent_modalias(dev, env); 124 if (rc != -ENODEV) 125 return rc; 126 127 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 128 return 0; 129 } 130 131 struct bus_type spi_bus_type = { 132 .name = "spi", 133 .dev_groups = spi_dev_groups, 134 .match = spi_match_device, 135 .uevent = spi_uevent, 136 }; 137 EXPORT_SYMBOL_GPL(spi_bus_type); 138 139 140 static int spi_drv_probe(struct device *dev) 141 { 142 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 143 int ret; 144 145 ret = of_clk_set_defaults(dev->of_node, false); 146 if (ret) 147 return ret; 148 149 ret = dev_pm_domain_attach(dev, true); 150 if (ret != -EPROBE_DEFER) { 151 ret = sdrv->probe(to_spi_device(dev)); 152 if (ret) 153 dev_pm_domain_detach(dev, true); 154 } 155 156 return ret; 157 } 158 159 static int spi_drv_remove(struct device *dev) 160 { 161 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 162 int ret; 163 164 ret = sdrv->remove(to_spi_device(dev)); 165 dev_pm_domain_detach(dev, true); 166 167 return ret; 168 } 169 170 static void spi_drv_shutdown(struct device *dev) 171 { 172 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 173 174 sdrv->shutdown(to_spi_device(dev)); 175 } 176 177 /** 178 * spi_register_driver - register a SPI driver 179 * @sdrv: the driver to register 180 * Context: can sleep 181 */ 182 int spi_register_driver(struct spi_driver *sdrv) 183 { 184 sdrv->driver.bus = &spi_bus_type; 185 if (sdrv->probe) 186 sdrv->driver.probe = spi_drv_probe; 187 if (sdrv->remove) 188 sdrv->driver.remove = spi_drv_remove; 189 if (sdrv->shutdown) 190 sdrv->driver.shutdown = spi_drv_shutdown; 191 return driver_register(&sdrv->driver); 192 } 193 EXPORT_SYMBOL_GPL(spi_register_driver); 194 195 /*-------------------------------------------------------------------------*/ 196 197 /* SPI devices should normally not be created by SPI device drivers; that 198 * would make them board-specific. Similarly with SPI master drivers. 199 * Device registration normally goes into like arch/.../mach.../board-YYY.c 200 * with other readonly (flashable) information about mainboard devices. 201 */ 202 203 struct boardinfo { 204 struct list_head list; 205 struct spi_board_info board_info; 206 }; 207 208 static LIST_HEAD(board_list); 209 static LIST_HEAD(spi_master_list); 210 211 /* 212 * Used to protect add/del opertion for board_info list and 213 * spi_master list, and their matching process 214 */ 215 static DEFINE_MUTEX(board_lock); 216 217 /** 218 * spi_alloc_device - Allocate a new SPI device 219 * @master: Controller to which device is connected 220 * Context: can sleep 221 * 222 * Allows a driver to allocate and initialize a spi_device without 223 * registering it immediately. This allows a driver to directly 224 * fill the spi_device with device parameters before calling 225 * spi_add_device() on it. 226 * 227 * Caller is responsible to call spi_add_device() on the returned 228 * spi_device structure to add it to the SPI master. If the caller 229 * needs to discard the spi_device without adding it, then it should 230 * call spi_dev_put() on it. 231 * 232 * Returns a pointer to the new device, or NULL. 233 */ 234 struct spi_device *spi_alloc_device(struct spi_master *master) 235 { 236 struct spi_device *spi; 237 238 if (!spi_master_get(master)) 239 return NULL; 240 241 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 242 if (!spi) { 243 spi_master_put(master); 244 return NULL; 245 } 246 247 spi->master = master; 248 spi->dev.parent = &master->dev; 249 spi->dev.bus = &spi_bus_type; 250 spi->dev.release = spidev_release; 251 spi->cs_gpio = -ENOENT; 252 device_initialize(&spi->dev); 253 return spi; 254 } 255 EXPORT_SYMBOL_GPL(spi_alloc_device); 256 257 static void spi_dev_set_name(struct spi_device *spi) 258 { 259 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 260 261 if (adev) { 262 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 263 return; 264 } 265 266 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 267 spi->chip_select); 268 } 269 270 static int spi_dev_check(struct device *dev, void *data) 271 { 272 struct spi_device *spi = to_spi_device(dev); 273 struct spi_device *new_spi = data; 274 275 if (spi->master == new_spi->master && 276 spi->chip_select == new_spi->chip_select) 277 return -EBUSY; 278 return 0; 279 } 280 281 /** 282 * spi_add_device - Add spi_device allocated with spi_alloc_device 283 * @spi: spi_device to register 284 * 285 * Companion function to spi_alloc_device. Devices allocated with 286 * spi_alloc_device can be added onto the spi bus with this function. 287 * 288 * Returns 0 on success; negative errno on failure 289 */ 290 int spi_add_device(struct spi_device *spi) 291 { 292 static DEFINE_MUTEX(spi_add_lock); 293 struct spi_master *master = spi->master; 294 struct device *dev = master->dev.parent; 295 int status; 296 297 /* Chipselects are numbered 0..max; validate. */ 298 if (spi->chip_select >= master->num_chipselect) { 299 dev_err(dev, "cs%d >= max %d\n", 300 spi->chip_select, 301 master->num_chipselect); 302 return -EINVAL; 303 } 304 305 /* Set the bus ID string */ 306 spi_dev_set_name(spi); 307 308 /* We need to make sure there's no other device with this 309 * chipselect **BEFORE** we call setup(), else we'll trash 310 * its configuration. Lock against concurrent add() calls. 311 */ 312 mutex_lock(&spi_add_lock); 313 314 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 315 if (status) { 316 dev_err(dev, "chipselect %d already in use\n", 317 spi->chip_select); 318 goto done; 319 } 320 321 if (master->cs_gpios) 322 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 323 324 /* Drivers may modify this initial i/o setup, but will 325 * normally rely on the device being setup. Devices 326 * using SPI_CS_HIGH can't coexist well otherwise... 327 */ 328 status = spi_setup(spi); 329 if (status < 0) { 330 dev_err(dev, "can't setup %s, status %d\n", 331 dev_name(&spi->dev), status); 332 goto done; 333 } 334 335 /* Device may be bound to an active driver when this returns */ 336 status = device_add(&spi->dev); 337 if (status < 0) 338 dev_err(dev, "can't add %s, status %d\n", 339 dev_name(&spi->dev), status); 340 else 341 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 342 343 done: 344 mutex_unlock(&spi_add_lock); 345 return status; 346 } 347 EXPORT_SYMBOL_GPL(spi_add_device); 348 349 /** 350 * spi_new_device - instantiate one new SPI device 351 * @master: Controller to which device is connected 352 * @chip: Describes the SPI device 353 * Context: can sleep 354 * 355 * On typical mainboards, this is purely internal; and it's not needed 356 * after board init creates the hard-wired devices. Some development 357 * platforms may not be able to use spi_register_board_info though, and 358 * this is exported so that for example a USB or parport based adapter 359 * driver could add devices (which it would learn about out-of-band). 360 * 361 * Returns the new device, or NULL. 362 */ 363 struct spi_device *spi_new_device(struct spi_master *master, 364 struct spi_board_info *chip) 365 { 366 struct spi_device *proxy; 367 int status; 368 369 /* NOTE: caller did any chip->bus_num checks necessary. 370 * 371 * Also, unless we change the return value convention to use 372 * error-or-pointer (not NULL-or-pointer), troubleshootability 373 * suggests syslogged diagnostics are best here (ugh). 374 */ 375 376 proxy = spi_alloc_device(master); 377 if (!proxy) 378 return NULL; 379 380 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 381 382 proxy->chip_select = chip->chip_select; 383 proxy->max_speed_hz = chip->max_speed_hz; 384 proxy->mode = chip->mode; 385 proxy->irq = chip->irq; 386 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 387 proxy->dev.platform_data = (void *) chip->platform_data; 388 proxy->controller_data = chip->controller_data; 389 proxy->controller_state = NULL; 390 391 status = spi_add_device(proxy); 392 if (status < 0) { 393 spi_dev_put(proxy); 394 return NULL; 395 } 396 397 return proxy; 398 } 399 EXPORT_SYMBOL_GPL(spi_new_device); 400 401 static void spi_match_master_to_boardinfo(struct spi_master *master, 402 struct spi_board_info *bi) 403 { 404 struct spi_device *dev; 405 406 if (master->bus_num != bi->bus_num) 407 return; 408 409 dev = spi_new_device(master, bi); 410 if (!dev) 411 dev_err(master->dev.parent, "can't create new device for %s\n", 412 bi->modalias); 413 } 414 415 /** 416 * spi_register_board_info - register SPI devices for a given board 417 * @info: array of chip descriptors 418 * @n: how many descriptors are provided 419 * Context: can sleep 420 * 421 * Board-specific early init code calls this (probably during arch_initcall) 422 * with segments of the SPI device table. Any device nodes are created later, 423 * after the relevant parent SPI controller (bus_num) is defined. We keep 424 * this table of devices forever, so that reloading a controller driver will 425 * not make Linux forget about these hard-wired devices. 426 * 427 * Other code can also call this, e.g. a particular add-on board might provide 428 * SPI devices through its expansion connector, so code initializing that board 429 * would naturally declare its SPI devices. 430 * 431 * The board info passed can safely be __initdata ... but be careful of 432 * any embedded pointers (platform_data, etc), they're copied as-is. 433 */ 434 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 435 { 436 struct boardinfo *bi; 437 int i; 438 439 if (!n) 440 return -EINVAL; 441 442 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 443 if (!bi) 444 return -ENOMEM; 445 446 for (i = 0; i < n; i++, bi++, info++) { 447 struct spi_master *master; 448 449 memcpy(&bi->board_info, info, sizeof(*info)); 450 mutex_lock(&board_lock); 451 list_add_tail(&bi->list, &board_list); 452 list_for_each_entry(master, &spi_master_list, list) 453 spi_match_master_to_boardinfo(master, &bi->board_info); 454 mutex_unlock(&board_lock); 455 } 456 457 return 0; 458 } 459 460 /*-------------------------------------------------------------------------*/ 461 462 static void spi_set_cs(struct spi_device *spi, bool enable) 463 { 464 if (spi->mode & SPI_CS_HIGH) 465 enable = !enable; 466 467 if (spi->cs_gpio >= 0) 468 gpio_set_value(spi->cs_gpio, !enable); 469 else if (spi->master->set_cs) 470 spi->master->set_cs(spi, !enable); 471 } 472 473 #ifdef CONFIG_HAS_DMA 474 static int spi_map_buf(struct spi_master *master, struct device *dev, 475 struct sg_table *sgt, void *buf, size_t len, 476 enum dma_data_direction dir) 477 { 478 const bool vmalloced_buf = is_vmalloc_addr(buf); 479 const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; 480 const int sgs = DIV_ROUND_UP(len, desc_len); 481 struct page *vm_page; 482 void *sg_buf; 483 size_t min; 484 int i, ret; 485 486 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 487 if (ret != 0) 488 return ret; 489 490 for (i = 0; i < sgs; i++) { 491 min = min_t(size_t, len, desc_len); 492 493 if (vmalloced_buf) { 494 vm_page = vmalloc_to_page(buf); 495 if (!vm_page) { 496 sg_free_table(sgt); 497 return -ENOMEM; 498 } 499 sg_set_page(&sgt->sgl[i], vm_page, 500 min, offset_in_page(buf)); 501 } else { 502 sg_buf = buf; 503 sg_set_buf(&sgt->sgl[i], sg_buf, min); 504 } 505 506 507 buf += min; 508 len -= min; 509 } 510 511 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 512 if (!ret) 513 ret = -ENOMEM; 514 if (ret < 0) { 515 sg_free_table(sgt); 516 return ret; 517 } 518 519 sgt->nents = ret; 520 521 return 0; 522 } 523 524 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 525 struct sg_table *sgt, enum dma_data_direction dir) 526 { 527 if (sgt->orig_nents) { 528 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 529 sg_free_table(sgt); 530 } 531 } 532 533 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 534 { 535 struct device *tx_dev, *rx_dev; 536 struct spi_transfer *xfer; 537 int ret; 538 539 if (!master->can_dma) 540 return 0; 541 542 tx_dev = master->dma_tx->device->dev; 543 rx_dev = master->dma_rx->device->dev; 544 545 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 546 if (!master->can_dma(master, msg->spi, xfer)) 547 continue; 548 549 if (xfer->tx_buf != NULL) { 550 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 551 (void *)xfer->tx_buf, xfer->len, 552 DMA_TO_DEVICE); 553 if (ret != 0) 554 return ret; 555 } 556 557 if (xfer->rx_buf != NULL) { 558 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 559 xfer->rx_buf, xfer->len, 560 DMA_FROM_DEVICE); 561 if (ret != 0) { 562 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 563 DMA_TO_DEVICE); 564 return ret; 565 } 566 } 567 } 568 569 master->cur_msg_mapped = true; 570 571 return 0; 572 } 573 574 static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 575 { 576 struct spi_transfer *xfer; 577 struct device *tx_dev, *rx_dev; 578 579 if (!master->cur_msg_mapped || !master->can_dma) 580 return 0; 581 582 tx_dev = master->dma_tx->device->dev; 583 rx_dev = master->dma_rx->device->dev; 584 585 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 586 /* 587 * Restore the original value of tx_buf or rx_buf if they are 588 * NULL. 589 */ 590 if (xfer->tx_buf == master->dummy_tx) 591 xfer->tx_buf = NULL; 592 if (xfer->rx_buf == master->dummy_rx) 593 xfer->rx_buf = NULL; 594 595 if (!master->can_dma(master, msg->spi, xfer)) 596 continue; 597 598 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 599 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 600 } 601 602 return 0; 603 } 604 #else /* !CONFIG_HAS_DMA */ 605 static inline int __spi_map_msg(struct spi_master *master, 606 struct spi_message *msg) 607 { 608 return 0; 609 } 610 611 static inline int spi_unmap_msg(struct spi_master *master, 612 struct spi_message *msg) 613 { 614 return 0; 615 } 616 #endif /* !CONFIG_HAS_DMA */ 617 618 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 619 { 620 struct spi_transfer *xfer; 621 void *tmp; 622 unsigned int max_tx, max_rx; 623 624 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 625 max_tx = 0; 626 max_rx = 0; 627 628 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 629 if ((master->flags & SPI_MASTER_MUST_TX) && 630 !xfer->tx_buf) 631 max_tx = max(xfer->len, max_tx); 632 if ((master->flags & SPI_MASTER_MUST_RX) && 633 !xfer->rx_buf) 634 max_rx = max(xfer->len, max_rx); 635 } 636 637 if (max_tx) { 638 tmp = krealloc(master->dummy_tx, max_tx, 639 GFP_KERNEL | GFP_DMA); 640 if (!tmp) 641 return -ENOMEM; 642 master->dummy_tx = tmp; 643 memset(tmp, 0, max_tx); 644 } 645 646 if (max_rx) { 647 tmp = krealloc(master->dummy_rx, max_rx, 648 GFP_KERNEL | GFP_DMA); 649 if (!tmp) 650 return -ENOMEM; 651 master->dummy_rx = tmp; 652 } 653 654 if (max_tx || max_rx) { 655 list_for_each_entry(xfer, &msg->transfers, 656 transfer_list) { 657 if (!xfer->tx_buf) 658 xfer->tx_buf = master->dummy_tx; 659 if (!xfer->rx_buf) 660 xfer->rx_buf = master->dummy_rx; 661 } 662 } 663 } 664 665 return __spi_map_msg(master, msg); 666 } 667 668 /* 669 * spi_transfer_one_message - Default implementation of transfer_one_message() 670 * 671 * This is a standard implementation of transfer_one_message() for 672 * drivers which impelment a transfer_one() operation. It provides 673 * standard handling of delays and chip select management. 674 */ 675 static int spi_transfer_one_message(struct spi_master *master, 676 struct spi_message *msg) 677 { 678 struct spi_transfer *xfer; 679 bool keep_cs = false; 680 int ret = 0; 681 unsigned long ms = 1; 682 683 spi_set_cs(msg->spi, true); 684 685 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 686 trace_spi_transfer_start(msg, xfer); 687 688 if (xfer->tx_buf || xfer->rx_buf) { 689 reinit_completion(&master->xfer_completion); 690 691 ret = master->transfer_one(master, msg->spi, xfer); 692 if (ret < 0) { 693 dev_err(&msg->spi->dev, 694 "SPI transfer failed: %d\n", ret); 695 goto out; 696 } 697 698 if (ret > 0) { 699 ret = 0; 700 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 701 ms += ms + 100; /* some tolerance */ 702 703 ms = wait_for_completion_timeout(&master->xfer_completion, 704 msecs_to_jiffies(ms)); 705 } 706 707 if (ms == 0) { 708 dev_err(&msg->spi->dev, 709 "SPI transfer timed out\n"); 710 msg->status = -ETIMEDOUT; 711 } 712 } else { 713 if (xfer->len) 714 dev_err(&msg->spi->dev, 715 "Bufferless transfer has length %u\n", 716 xfer->len); 717 } 718 719 trace_spi_transfer_stop(msg, xfer); 720 721 if (msg->status != -EINPROGRESS) 722 goto out; 723 724 if (xfer->delay_usecs) 725 udelay(xfer->delay_usecs); 726 727 if (xfer->cs_change) { 728 if (list_is_last(&xfer->transfer_list, 729 &msg->transfers)) { 730 keep_cs = true; 731 } else { 732 spi_set_cs(msg->spi, false); 733 udelay(10); 734 spi_set_cs(msg->spi, true); 735 } 736 } 737 738 msg->actual_length += xfer->len; 739 } 740 741 out: 742 if (ret != 0 || !keep_cs) 743 spi_set_cs(msg->spi, false); 744 745 if (msg->status == -EINPROGRESS) 746 msg->status = ret; 747 748 if (msg->status && master->handle_err) 749 master->handle_err(master, msg); 750 751 spi_finalize_current_message(master); 752 753 return ret; 754 } 755 756 /** 757 * spi_finalize_current_transfer - report completion of a transfer 758 * @master: the master reporting completion 759 * 760 * Called by SPI drivers using the core transfer_one_message() 761 * implementation to notify it that the current interrupt driven 762 * transfer has finished and the next one may be scheduled. 763 */ 764 void spi_finalize_current_transfer(struct spi_master *master) 765 { 766 complete(&master->xfer_completion); 767 } 768 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 769 770 /** 771 * __spi_pump_messages - function which processes spi message queue 772 * @master: master to process queue for 773 * @in_kthread: true if we are in the context of the message pump thread 774 * 775 * This function checks if there is any spi message in the queue that 776 * needs processing and if so call out to the driver to initialize hardware 777 * and transfer each message. 778 * 779 * Note that it is called both from the kthread itself and also from 780 * inside spi_sync(); the queue extraction handling at the top of the 781 * function should deal with this safely. 782 */ 783 static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 784 { 785 unsigned long flags; 786 bool was_busy = false; 787 int ret; 788 789 /* Lock queue */ 790 spin_lock_irqsave(&master->queue_lock, flags); 791 792 /* Make sure we are not already running a message */ 793 if (master->cur_msg) { 794 spin_unlock_irqrestore(&master->queue_lock, flags); 795 return; 796 } 797 798 /* If another context is idling the device then defer */ 799 if (master->idling) { 800 queue_kthread_work(&master->kworker, &master->pump_messages); 801 spin_unlock_irqrestore(&master->queue_lock, flags); 802 return; 803 } 804 805 /* Check if the queue is idle */ 806 if (list_empty(&master->queue) || !master->running) { 807 if (!master->busy) { 808 spin_unlock_irqrestore(&master->queue_lock, flags); 809 return; 810 } 811 812 /* Only do teardown in the thread */ 813 if (!in_kthread) { 814 queue_kthread_work(&master->kworker, 815 &master->pump_messages); 816 spin_unlock_irqrestore(&master->queue_lock, flags); 817 return; 818 } 819 820 master->busy = false; 821 master->idling = true; 822 spin_unlock_irqrestore(&master->queue_lock, flags); 823 824 kfree(master->dummy_rx); 825 master->dummy_rx = NULL; 826 kfree(master->dummy_tx); 827 master->dummy_tx = NULL; 828 if (master->unprepare_transfer_hardware && 829 master->unprepare_transfer_hardware(master)) 830 dev_err(&master->dev, 831 "failed to unprepare transfer hardware\n"); 832 if (master->auto_runtime_pm) { 833 pm_runtime_mark_last_busy(master->dev.parent); 834 pm_runtime_put_autosuspend(master->dev.parent); 835 } 836 trace_spi_master_idle(master); 837 838 spin_lock_irqsave(&master->queue_lock, flags); 839 master->idling = false; 840 spin_unlock_irqrestore(&master->queue_lock, flags); 841 return; 842 } 843 844 /* Extract head of queue */ 845 master->cur_msg = 846 list_first_entry(&master->queue, struct spi_message, queue); 847 848 list_del_init(&master->cur_msg->queue); 849 if (master->busy) 850 was_busy = true; 851 else 852 master->busy = true; 853 spin_unlock_irqrestore(&master->queue_lock, flags); 854 855 if (!was_busy && master->auto_runtime_pm) { 856 ret = pm_runtime_get_sync(master->dev.parent); 857 if (ret < 0) { 858 dev_err(&master->dev, "Failed to power device: %d\n", 859 ret); 860 return; 861 } 862 } 863 864 if (!was_busy) 865 trace_spi_master_busy(master); 866 867 if (!was_busy && master->prepare_transfer_hardware) { 868 ret = master->prepare_transfer_hardware(master); 869 if (ret) { 870 dev_err(&master->dev, 871 "failed to prepare transfer hardware\n"); 872 873 if (master->auto_runtime_pm) 874 pm_runtime_put(master->dev.parent); 875 return; 876 } 877 } 878 879 trace_spi_message_start(master->cur_msg); 880 881 if (master->prepare_message) { 882 ret = master->prepare_message(master, master->cur_msg); 883 if (ret) { 884 dev_err(&master->dev, 885 "failed to prepare message: %d\n", ret); 886 master->cur_msg->status = ret; 887 spi_finalize_current_message(master); 888 return; 889 } 890 master->cur_msg_prepared = true; 891 } 892 893 ret = spi_map_msg(master, master->cur_msg); 894 if (ret) { 895 master->cur_msg->status = ret; 896 spi_finalize_current_message(master); 897 return; 898 } 899 900 ret = master->transfer_one_message(master, master->cur_msg); 901 if (ret) { 902 dev_err(&master->dev, 903 "failed to transfer one message from queue\n"); 904 return; 905 } 906 } 907 908 /** 909 * spi_pump_messages - kthread work function which processes spi message queue 910 * @work: pointer to kthread work struct contained in the master struct 911 */ 912 static void spi_pump_messages(struct kthread_work *work) 913 { 914 struct spi_master *master = 915 container_of(work, struct spi_master, pump_messages); 916 917 __spi_pump_messages(master, true); 918 } 919 920 static int spi_init_queue(struct spi_master *master) 921 { 922 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 923 924 master->running = false; 925 master->busy = false; 926 927 init_kthread_worker(&master->kworker); 928 master->kworker_task = kthread_run(kthread_worker_fn, 929 &master->kworker, "%s", 930 dev_name(&master->dev)); 931 if (IS_ERR(master->kworker_task)) { 932 dev_err(&master->dev, "failed to create message pump task\n"); 933 return PTR_ERR(master->kworker_task); 934 } 935 init_kthread_work(&master->pump_messages, spi_pump_messages); 936 937 /* 938 * Master config will indicate if this controller should run the 939 * message pump with high (realtime) priority to reduce the transfer 940 * latency on the bus by minimising the delay between a transfer 941 * request and the scheduling of the message pump thread. Without this 942 * setting the message pump thread will remain at default priority. 943 */ 944 if (master->rt) { 945 dev_info(&master->dev, 946 "will run message pump with realtime priority\n"); 947 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 948 } 949 950 return 0; 951 } 952 953 /** 954 * spi_get_next_queued_message() - called by driver to check for queued 955 * messages 956 * @master: the master to check for queued messages 957 * 958 * If there are more messages in the queue, the next message is returned from 959 * this call. 960 */ 961 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 962 { 963 struct spi_message *next; 964 unsigned long flags; 965 966 /* get a pointer to the next message, if any */ 967 spin_lock_irqsave(&master->queue_lock, flags); 968 next = list_first_entry_or_null(&master->queue, struct spi_message, 969 queue); 970 spin_unlock_irqrestore(&master->queue_lock, flags); 971 972 return next; 973 } 974 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 975 976 /** 977 * spi_finalize_current_message() - the current message is complete 978 * @master: the master to return the message to 979 * 980 * Called by the driver to notify the core that the message in the front of the 981 * queue is complete and can be removed from the queue. 982 */ 983 void spi_finalize_current_message(struct spi_master *master) 984 { 985 struct spi_message *mesg; 986 unsigned long flags; 987 int ret; 988 989 spin_lock_irqsave(&master->queue_lock, flags); 990 mesg = master->cur_msg; 991 master->cur_msg = NULL; 992 993 queue_kthread_work(&master->kworker, &master->pump_messages); 994 spin_unlock_irqrestore(&master->queue_lock, flags); 995 996 spi_unmap_msg(master, mesg); 997 998 if (master->cur_msg_prepared && master->unprepare_message) { 999 ret = master->unprepare_message(master, mesg); 1000 if (ret) { 1001 dev_err(&master->dev, 1002 "failed to unprepare message: %d\n", ret); 1003 } 1004 } 1005 1006 trace_spi_message_done(mesg); 1007 1008 master->cur_msg_prepared = false; 1009 1010 mesg->state = NULL; 1011 if (mesg->complete) 1012 mesg->complete(mesg->context); 1013 } 1014 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1015 1016 static int spi_start_queue(struct spi_master *master) 1017 { 1018 unsigned long flags; 1019 1020 spin_lock_irqsave(&master->queue_lock, flags); 1021 1022 if (master->running || master->busy) { 1023 spin_unlock_irqrestore(&master->queue_lock, flags); 1024 return -EBUSY; 1025 } 1026 1027 master->running = true; 1028 master->cur_msg = NULL; 1029 spin_unlock_irqrestore(&master->queue_lock, flags); 1030 1031 queue_kthread_work(&master->kworker, &master->pump_messages); 1032 1033 return 0; 1034 } 1035 1036 static int spi_stop_queue(struct spi_master *master) 1037 { 1038 unsigned long flags; 1039 unsigned limit = 500; 1040 int ret = 0; 1041 1042 spin_lock_irqsave(&master->queue_lock, flags); 1043 1044 /* 1045 * This is a bit lame, but is optimized for the common execution path. 1046 * A wait_queue on the master->busy could be used, but then the common 1047 * execution path (pump_messages) would be required to call wake_up or 1048 * friends on every SPI message. Do this instead. 1049 */ 1050 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1051 spin_unlock_irqrestore(&master->queue_lock, flags); 1052 usleep_range(10000, 11000); 1053 spin_lock_irqsave(&master->queue_lock, flags); 1054 } 1055 1056 if (!list_empty(&master->queue) || master->busy) 1057 ret = -EBUSY; 1058 else 1059 master->running = false; 1060 1061 spin_unlock_irqrestore(&master->queue_lock, flags); 1062 1063 if (ret) { 1064 dev_warn(&master->dev, 1065 "could not stop message queue\n"); 1066 return ret; 1067 } 1068 return ret; 1069 } 1070 1071 static int spi_destroy_queue(struct spi_master *master) 1072 { 1073 int ret; 1074 1075 ret = spi_stop_queue(master); 1076 1077 /* 1078 * flush_kthread_worker will block until all work is done. 1079 * If the reason that stop_queue timed out is that the work will never 1080 * finish, then it does no good to call flush/stop thread, so 1081 * return anyway. 1082 */ 1083 if (ret) { 1084 dev_err(&master->dev, "problem destroying queue\n"); 1085 return ret; 1086 } 1087 1088 flush_kthread_worker(&master->kworker); 1089 kthread_stop(master->kworker_task); 1090 1091 return 0; 1092 } 1093 1094 static int __spi_queued_transfer(struct spi_device *spi, 1095 struct spi_message *msg, 1096 bool need_pump) 1097 { 1098 struct spi_master *master = spi->master; 1099 unsigned long flags; 1100 1101 spin_lock_irqsave(&master->queue_lock, flags); 1102 1103 if (!master->running) { 1104 spin_unlock_irqrestore(&master->queue_lock, flags); 1105 return -ESHUTDOWN; 1106 } 1107 msg->actual_length = 0; 1108 msg->status = -EINPROGRESS; 1109 1110 list_add_tail(&msg->queue, &master->queue); 1111 if (!master->busy && need_pump) 1112 queue_kthread_work(&master->kworker, &master->pump_messages); 1113 1114 spin_unlock_irqrestore(&master->queue_lock, flags); 1115 return 0; 1116 } 1117 1118 /** 1119 * spi_queued_transfer - transfer function for queued transfers 1120 * @spi: spi device which is requesting transfer 1121 * @msg: spi message which is to handled is queued to driver queue 1122 */ 1123 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1124 { 1125 return __spi_queued_transfer(spi, msg, true); 1126 } 1127 1128 static int spi_master_initialize_queue(struct spi_master *master) 1129 { 1130 int ret; 1131 1132 master->transfer = spi_queued_transfer; 1133 if (!master->transfer_one_message) 1134 master->transfer_one_message = spi_transfer_one_message; 1135 1136 /* Initialize and start queue */ 1137 ret = spi_init_queue(master); 1138 if (ret) { 1139 dev_err(&master->dev, "problem initializing queue\n"); 1140 goto err_init_queue; 1141 } 1142 master->queued = true; 1143 ret = spi_start_queue(master); 1144 if (ret) { 1145 dev_err(&master->dev, "problem starting queue\n"); 1146 goto err_start_queue; 1147 } 1148 1149 return 0; 1150 1151 err_start_queue: 1152 spi_destroy_queue(master); 1153 err_init_queue: 1154 return ret; 1155 } 1156 1157 /*-------------------------------------------------------------------------*/ 1158 1159 #if defined(CONFIG_OF) 1160 static struct spi_device * 1161 of_register_spi_device(struct spi_master *master, struct device_node *nc) 1162 { 1163 struct spi_device *spi; 1164 int rc; 1165 u32 value; 1166 1167 /* Alloc an spi_device */ 1168 spi = spi_alloc_device(master); 1169 if (!spi) { 1170 dev_err(&master->dev, "spi_device alloc error for %s\n", 1171 nc->full_name); 1172 rc = -ENOMEM; 1173 goto err_out; 1174 } 1175 1176 /* Select device driver */ 1177 rc = of_modalias_node(nc, spi->modalias, 1178 sizeof(spi->modalias)); 1179 if (rc < 0) { 1180 dev_err(&master->dev, "cannot find modalias for %s\n", 1181 nc->full_name); 1182 goto err_out; 1183 } 1184 1185 /* Device address */ 1186 rc = of_property_read_u32(nc, "reg", &value); 1187 if (rc) { 1188 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1189 nc->full_name, rc); 1190 goto err_out; 1191 } 1192 spi->chip_select = value; 1193 1194 /* Mode (clock phase/polarity/etc.) */ 1195 if (of_find_property(nc, "spi-cpha", NULL)) 1196 spi->mode |= SPI_CPHA; 1197 if (of_find_property(nc, "spi-cpol", NULL)) 1198 spi->mode |= SPI_CPOL; 1199 if (of_find_property(nc, "spi-cs-high", NULL)) 1200 spi->mode |= SPI_CS_HIGH; 1201 if (of_find_property(nc, "spi-3wire", NULL)) 1202 spi->mode |= SPI_3WIRE; 1203 if (of_find_property(nc, "spi-lsb-first", NULL)) 1204 spi->mode |= SPI_LSB_FIRST; 1205 1206 /* Device DUAL/QUAD mode */ 1207 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1208 switch (value) { 1209 case 1: 1210 break; 1211 case 2: 1212 spi->mode |= SPI_TX_DUAL; 1213 break; 1214 case 4: 1215 spi->mode |= SPI_TX_QUAD; 1216 break; 1217 default: 1218 dev_warn(&master->dev, 1219 "spi-tx-bus-width %d not supported\n", 1220 value); 1221 break; 1222 } 1223 } 1224 1225 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1226 switch (value) { 1227 case 1: 1228 break; 1229 case 2: 1230 spi->mode |= SPI_RX_DUAL; 1231 break; 1232 case 4: 1233 spi->mode |= SPI_RX_QUAD; 1234 break; 1235 default: 1236 dev_warn(&master->dev, 1237 "spi-rx-bus-width %d not supported\n", 1238 value); 1239 break; 1240 } 1241 } 1242 1243 /* Device speed */ 1244 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1245 if (rc) { 1246 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1247 nc->full_name, rc); 1248 goto err_out; 1249 } 1250 spi->max_speed_hz = value; 1251 1252 /* IRQ */ 1253 spi->irq = irq_of_parse_and_map(nc, 0); 1254 1255 /* Store a pointer to the node in the device structure */ 1256 of_node_get(nc); 1257 spi->dev.of_node = nc; 1258 1259 /* Register the new device */ 1260 rc = spi_add_device(spi); 1261 if (rc) { 1262 dev_err(&master->dev, "spi_device register error %s\n", 1263 nc->full_name); 1264 goto err_out; 1265 } 1266 1267 return spi; 1268 1269 err_out: 1270 spi_dev_put(spi); 1271 return ERR_PTR(rc); 1272 } 1273 1274 /** 1275 * of_register_spi_devices() - Register child devices onto the SPI bus 1276 * @master: Pointer to spi_master device 1277 * 1278 * Registers an spi_device for each child node of master node which has a 'reg' 1279 * property. 1280 */ 1281 static void of_register_spi_devices(struct spi_master *master) 1282 { 1283 struct spi_device *spi; 1284 struct device_node *nc; 1285 1286 if (!master->dev.of_node) 1287 return; 1288 1289 for_each_available_child_of_node(master->dev.of_node, nc) { 1290 spi = of_register_spi_device(master, nc); 1291 if (IS_ERR(spi)) 1292 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1293 nc->full_name); 1294 } 1295 } 1296 #else 1297 static void of_register_spi_devices(struct spi_master *master) { } 1298 #endif 1299 1300 #ifdef CONFIG_ACPI 1301 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1302 { 1303 struct spi_device *spi = data; 1304 1305 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1306 struct acpi_resource_spi_serialbus *sb; 1307 1308 sb = &ares->data.spi_serial_bus; 1309 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1310 spi->chip_select = sb->device_selection; 1311 spi->max_speed_hz = sb->connection_speed; 1312 1313 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1314 spi->mode |= SPI_CPHA; 1315 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1316 spi->mode |= SPI_CPOL; 1317 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1318 spi->mode |= SPI_CS_HIGH; 1319 } 1320 } else if (spi->irq < 0) { 1321 struct resource r; 1322 1323 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1324 spi->irq = r.start; 1325 } 1326 1327 /* Always tell the ACPI core to skip this resource */ 1328 return 1; 1329 } 1330 1331 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1332 void *data, void **return_value) 1333 { 1334 struct spi_master *master = data; 1335 struct list_head resource_list; 1336 struct acpi_device *adev; 1337 struct spi_device *spi; 1338 int ret; 1339 1340 if (acpi_bus_get_device(handle, &adev)) 1341 return AE_OK; 1342 if (acpi_bus_get_status(adev) || !adev->status.present) 1343 return AE_OK; 1344 1345 spi = spi_alloc_device(master); 1346 if (!spi) { 1347 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1348 dev_name(&adev->dev)); 1349 return AE_NO_MEMORY; 1350 } 1351 1352 ACPI_COMPANION_SET(&spi->dev, adev); 1353 spi->irq = -1; 1354 1355 INIT_LIST_HEAD(&resource_list); 1356 ret = acpi_dev_get_resources(adev, &resource_list, 1357 acpi_spi_add_resource, spi); 1358 acpi_dev_free_resource_list(&resource_list); 1359 1360 if (ret < 0 || !spi->max_speed_hz) { 1361 spi_dev_put(spi); 1362 return AE_OK; 1363 } 1364 1365 adev->power.flags.ignore_parent = true; 1366 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 1367 if (spi_add_device(spi)) { 1368 adev->power.flags.ignore_parent = false; 1369 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1370 dev_name(&adev->dev)); 1371 spi_dev_put(spi); 1372 } 1373 1374 return AE_OK; 1375 } 1376 1377 static void acpi_register_spi_devices(struct spi_master *master) 1378 { 1379 acpi_status status; 1380 acpi_handle handle; 1381 1382 handle = ACPI_HANDLE(master->dev.parent); 1383 if (!handle) 1384 return; 1385 1386 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1387 acpi_spi_add_device, NULL, 1388 master, NULL); 1389 if (ACPI_FAILURE(status)) 1390 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1391 } 1392 #else 1393 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1394 #endif /* CONFIG_ACPI */ 1395 1396 static void spi_master_release(struct device *dev) 1397 { 1398 struct spi_master *master; 1399 1400 master = container_of(dev, struct spi_master, dev); 1401 kfree(master); 1402 } 1403 1404 static struct class spi_master_class = { 1405 .name = "spi_master", 1406 .owner = THIS_MODULE, 1407 .dev_release = spi_master_release, 1408 }; 1409 1410 1411 1412 /** 1413 * spi_alloc_master - allocate SPI master controller 1414 * @dev: the controller, possibly using the platform_bus 1415 * @size: how much zeroed driver-private data to allocate; the pointer to this 1416 * memory is in the driver_data field of the returned device, 1417 * accessible with spi_master_get_devdata(). 1418 * Context: can sleep 1419 * 1420 * This call is used only by SPI master controller drivers, which are the 1421 * only ones directly touching chip registers. It's how they allocate 1422 * an spi_master structure, prior to calling spi_register_master(). 1423 * 1424 * This must be called from context that can sleep. It returns the SPI 1425 * master structure on success, else NULL. 1426 * 1427 * The caller is responsible for assigning the bus number and initializing 1428 * the master's methods before calling spi_register_master(); and (after errors 1429 * adding the device) calling spi_master_put() and kfree() to prevent a memory 1430 * leak. 1431 */ 1432 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1433 { 1434 struct spi_master *master; 1435 1436 if (!dev) 1437 return NULL; 1438 1439 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1440 if (!master) 1441 return NULL; 1442 1443 device_initialize(&master->dev); 1444 master->bus_num = -1; 1445 master->num_chipselect = 1; 1446 master->dev.class = &spi_master_class; 1447 master->dev.parent = get_device(dev); 1448 spi_master_set_devdata(master, &master[1]); 1449 1450 return master; 1451 } 1452 EXPORT_SYMBOL_GPL(spi_alloc_master); 1453 1454 #ifdef CONFIG_OF 1455 static int of_spi_register_master(struct spi_master *master) 1456 { 1457 int nb, i, *cs; 1458 struct device_node *np = master->dev.of_node; 1459 1460 if (!np) 1461 return 0; 1462 1463 nb = of_gpio_named_count(np, "cs-gpios"); 1464 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1465 1466 /* Return error only for an incorrectly formed cs-gpios property */ 1467 if (nb == 0 || nb == -ENOENT) 1468 return 0; 1469 else if (nb < 0) 1470 return nb; 1471 1472 cs = devm_kzalloc(&master->dev, 1473 sizeof(int) * master->num_chipselect, 1474 GFP_KERNEL); 1475 master->cs_gpios = cs; 1476 1477 if (!master->cs_gpios) 1478 return -ENOMEM; 1479 1480 for (i = 0; i < master->num_chipselect; i++) 1481 cs[i] = -ENOENT; 1482 1483 for (i = 0; i < nb; i++) 1484 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1485 1486 return 0; 1487 } 1488 #else 1489 static int of_spi_register_master(struct spi_master *master) 1490 { 1491 return 0; 1492 } 1493 #endif 1494 1495 /** 1496 * spi_register_master - register SPI master controller 1497 * @master: initialized master, originally from spi_alloc_master() 1498 * Context: can sleep 1499 * 1500 * SPI master controllers connect to their drivers using some non-SPI bus, 1501 * such as the platform bus. The final stage of probe() in that code 1502 * includes calling spi_register_master() to hook up to this SPI bus glue. 1503 * 1504 * SPI controllers use board specific (often SOC specific) bus numbers, 1505 * and board-specific addressing for SPI devices combines those numbers 1506 * with chip select numbers. Since SPI does not directly support dynamic 1507 * device identification, boards need configuration tables telling which 1508 * chip is at which address. 1509 * 1510 * This must be called from context that can sleep. It returns zero on 1511 * success, else a negative error code (dropping the master's refcount). 1512 * After a successful return, the caller is responsible for calling 1513 * spi_unregister_master(). 1514 */ 1515 int spi_register_master(struct spi_master *master) 1516 { 1517 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1518 struct device *dev = master->dev.parent; 1519 struct boardinfo *bi; 1520 int status = -ENODEV; 1521 int dynamic = 0; 1522 1523 if (!dev) 1524 return -ENODEV; 1525 1526 status = of_spi_register_master(master); 1527 if (status) 1528 return status; 1529 1530 /* even if it's just one always-selected device, there must 1531 * be at least one chipselect 1532 */ 1533 if (master->num_chipselect == 0) 1534 return -EINVAL; 1535 1536 if ((master->bus_num < 0) && master->dev.of_node) 1537 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1538 1539 /* convention: dynamically assigned bus IDs count down from the max */ 1540 if (master->bus_num < 0) { 1541 /* FIXME switch to an IDR based scheme, something like 1542 * I2C now uses, so we can't run out of "dynamic" IDs 1543 */ 1544 master->bus_num = atomic_dec_return(&dyn_bus_id); 1545 dynamic = 1; 1546 } 1547 1548 INIT_LIST_HEAD(&master->queue); 1549 spin_lock_init(&master->queue_lock); 1550 spin_lock_init(&master->bus_lock_spinlock); 1551 mutex_init(&master->bus_lock_mutex); 1552 master->bus_lock_flag = 0; 1553 init_completion(&master->xfer_completion); 1554 if (!master->max_dma_len) 1555 master->max_dma_len = INT_MAX; 1556 1557 /* register the device, then userspace will see it. 1558 * registration fails if the bus ID is in use. 1559 */ 1560 dev_set_name(&master->dev, "spi%u", master->bus_num); 1561 status = device_add(&master->dev); 1562 if (status < 0) 1563 goto done; 1564 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1565 dynamic ? " (dynamic)" : ""); 1566 1567 /* If we're using a queued driver, start the queue */ 1568 if (master->transfer) 1569 dev_info(dev, "master is unqueued, this is deprecated\n"); 1570 else { 1571 status = spi_master_initialize_queue(master); 1572 if (status) { 1573 device_del(&master->dev); 1574 goto done; 1575 } 1576 } 1577 1578 mutex_lock(&board_lock); 1579 list_add_tail(&master->list, &spi_master_list); 1580 list_for_each_entry(bi, &board_list, list) 1581 spi_match_master_to_boardinfo(master, &bi->board_info); 1582 mutex_unlock(&board_lock); 1583 1584 /* Register devices from the device tree and ACPI */ 1585 of_register_spi_devices(master); 1586 acpi_register_spi_devices(master); 1587 done: 1588 return status; 1589 } 1590 EXPORT_SYMBOL_GPL(spi_register_master); 1591 1592 static void devm_spi_unregister(struct device *dev, void *res) 1593 { 1594 spi_unregister_master(*(struct spi_master **)res); 1595 } 1596 1597 /** 1598 * dev_spi_register_master - register managed SPI master controller 1599 * @dev: device managing SPI master 1600 * @master: initialized master, originally from spi_alloc_master() 1601 * Context: can sleep 1602 * 1603 * Register a SPI device as with spi_register_master() which will 1604 * automatically be unregister 1605 */ 1606 int devm_spi_register_master(struct device *dev, struct spi_master *master) 1607 { 1608 struct spi_master **ptr; 1609 int ret; 1610 1611 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1612 if (!ptr) 1613 return -ENOMEM; 1614 1615 ret = spi_register_master(master); 1616 if (!ret) { 1617 *ptr = master; 1618 devres_add(dev, ptr); 1619 } else { 1620 devres_free(ptr); 1621 } 1622 1623 return ret; 1624 } 1625 EXPORT_SYMBOL_GPL(devm_spi_register_master); 1626 1627 static int __unregister(struct device *dev, void *null) 1628 { 1629 spi_unregister_device(to_spi_device(dev)); 1630 return 0; 1631 } 1632 1633 /** 1634 * spi_unregister_master - unregister SPI master controller 1635 * @master: the master being unregistered 1636 * Context: can sleep 1637 * 1638 * This call is used only by SPI master controller drivers, which are the 1639 * only ones directly touching chip registers. 1640 * 1641 * This must be called from context that can sleep. 1642 */ 1643 void spi_unregister_master(struct spi_master *master) 1644 { 1645 int dummy; 1646 1647 if (master->queued) { 1648 if (spi_destroy_queue(master)) 1649 dev_err(&master->dev, "queue remove failed\n"); 1650 } 1651 1652 mutex_lock(&board_lock); 1653 list_del(&master->list); 1654 mutex_unlock(&board_lock); 1655 1656 dummy = device_for_each_child(&master->dev, NULL, __unregister); 1657 device_unregister(&master->dev); 1658 } 1659 EXPORT_SYMBOL_GPL(spi_unregister_master); 1660 1661 int spi_master_suspend(struct spi_master *master) 1662 { 1663 int ret; 1664 1665 /* Basically no-ops for non-queued masters */ 1666 if (!master->queued) 1667 return 0; 1668 1669 ret = spi_stop_queue(master); 1670 if (ret) 1671 dev_err(&master->dev, "queue stop failed\n"); 1672 1673 return ret; 1674 } 1675 EXPORT_SYMBOL_GPL(spi_master_suspend); 1676 1677 int spi_master_resume(struct spi_master *master) 1678 { 1679 int ret; 1680 1681 if (!master->queued) 1682 return 0; 1683 1684 ret = spi_start_queue(master); 1685 if (ret) 1686 dev_err(&master->dev, "queue restart failed\n"); 1687 1688 return ret; 1689 } 1690 EXPORT_SYMBOL_GPL(spi_master_resume); 1691 1692 static int __spi_master_match(struct device *dev, const void *data) 1693 { 1694 struct spi_master *m; 1695 const u16 *bus_num = data; 1696 1697 m = container_of(dev, struct spi_master, dev); 1698 return m->bus_num == *bus_num; 1699 } 1700 1701 /** 1702 * spi_busnum_to_master - look up master associated with bus_num 1703 * @bus_num: the master's bus number 1704 * Context: can sleep 1705 * 1706 * This call may be used with devices that are registered after 1707 * arch init time. It returns a refcounted pointer to the relevant 1708 * spi_master (which the caller must release), or NULL if there is 1709 * no such master registered. 1710 */ 1711 struct spi_master *spi_busnum_to_master(u16 bus_num) 1712 { 1713 struct device *dev; 1714 struct spi_master *master = NULL; 1715 1716 dev = class_find_device(&spi_master_class, NULL, &bus_num, 1717 __spi_master_match); 1718 if (dev) 1719 master = container_of(dev, struct spi_master, dev); 1720 /* reference got in class_find_device */ 1721 return master; 1722 } 1723 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 1724 1725 1726 /*-------------------------------------------------------------------------*/ 1727 1728 /* Core methods for SPI master protocol drivers. Some of the 1729 * other core methods are currently defined as inline functions. 1730 */ 1731 1732 /** 1733 * spi_setup - setup SPI mode and clock rate 1734 * @spi: the device whose settings are being modified 1735 * Context: can sleep, and no requests are queued to the device 1736 * 1737 * SPI protocol drivers may need to update the transfer mode if the 1738 * device doesn't work with its default. They may likewise need 1739 * to update clock rates or word sizes from initial values. This function 1740 * changes those settings, and must be called from a context that can sleep. 1741 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 1742 * effect the next time the device is selected and data is transferred to 1743 * or from it. When this function returns, the spi device is deselected. 1744 * 1745 * Note that this call will fail if the protocol driver specifies an option 1746 * that the underlying controller or its driver does not support. For 1747 * example, not all hardware supports wire transfers using nine bit words, 1748 * LSB-first wire encoding, or active-high chipselects. 1749 */ 1750 int spi_setup(struct spi_device *spi) 1751 { 1752 unsigned bad_bits, ugly_bits; 1753 int status = 0; 1754 1755 /* check mode to prevent that DUAL and QUAD set at the same time 1756 */ 1757 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 1758 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 1759 dev_err(&spi->dev, 1760 "setup: can not select dual and quad at the same time\n"); 1761 return -EINVAL; 1762 } 1763 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 1764 */ 1765 if ((spi->mode & SPI_3WIRE) && (spi->mode & 1766 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 1767 return -EINVAL; 1768 /* help drivers fail *cleanly* when they need options 1769 * that aren't supported with their current master 1770 */ 1771 bad_bits = spi->mode & ~spi->master->mode_bits; 1772 ugly_bits = bad_bits & 1773 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 1774 if (ugly_bits) { 1775 dev_warn(&spi->dev, 1776 "setup: ignoring unsupported mode bits %x\n", 1777 ugly_bits); 1778 spi->mode &= ~ugly_bits; 1779 bad_bits &= ~ugly_bits; 1780 } 1781 if (bad_bits) { 1782 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1783 bad_bits); 1784 return -EINVAL; 1785 } 1786 1787 if (!spi->bits_per_word) 1788 spi->bits_per_word = 8; 1789 1790 if (!spi->max_speed_hz) 1791 spi->max_speed_hz = spi->master->max_speed_hz; 1792 1793 spi_set_cs(spi, false); 1794 1795 if (spi->master->setup) 1796 status = spi->master->setup(spi); 1797 1798 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 1799 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 1800 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 1801 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 1802 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 1803 (spi->mode & SPI_LOOP) ? "loopback, " : "", 1804 spi->bits_per_word, spi->max_speed_hz, 1805 status); 1806 1807 return status; 1808 } 1809 EXPORT_SYMBOL_GPL(spi_setup); 1810 1811 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 1812 { 1813 struct spi_master *master = spi->master; 1814 struct spi_transfer *xfer; 1815 int w_size; 1816 1817 if (list_empty(&message->transfers)) 1818 return -EINVAL; 1819 1820 /* Half-duplex links include original MicroWire, and ones with 1821 * only one data pin like SPI_3WIRE (switches direction) or where 1822 * either MOSI or MISO is missing. They can also be caused by 1823 * software limitations. 1824 */ 1825 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 1826 || (spi->mode & SPI_3WIRE)) { 1827 unsigned flags = master->flags; 1828 1829 list_for_each_entry(xfer, &message->transfers, transfer_list) { 1830 if (xfer->rx_buf && xfer->tx_buf) 1831 return -EINVAL; 1832 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 1833 return -EINVAL; 1834 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 1835 return -EINVAL; 1836 } 1837 } 1838 1839 /** 1840 * Set transfer bits_per_word and max speed as spi device default if 1841 * it is not set for this transfer. 1842 * Set transfer tx_nbits and rx_nbits as single transfer default 1843 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 1844 */ 1845 list_for_each_entry(xfer, &message->transfers, transfer_list) { 1846 message->frame_length += xfer->len; 1847 if (!xfer->bits_per_word) 1848 xfer->bits_per_word = spi->bits_per_word; 1849 1850 if (!xfer->speed_hz) 1851 xfer->speed_hz = spi->max_speed_hz; 1852 1853 if (master->max_speed_hz && 1854 xfer->speed_hz > master->max_speed_hz) 1855 xfer->speed_hz = master->max_speed_hz; 1856 1857 if (master->bits_per_word_mask) { 1858 /* Only 32 bits fit in the mask */ 1859 if (xfer->bits_per_word > 32) 1860 return -EINVAL; 1861 if (!(master->bits_per_word_mask & 1862 BIT(xfer->bits_per_word - 1))) 1863 return -EINVAL; 1864 } 1865 1866 /* 1867 * SPI transfer length should be multiple of SPI word size 1868 * where SPI word size should be power-of-two multiple 1869 */ 1870 if (xfer->bits_per_word <= 8) 1871 w_size = 1; 1872 else if (xfer->bits_per_word <= 16) 1873 w_size = 2; 1874 else 1875 w_size = 4; 1876 1877 /* No partial transfers accepted */ 1878 if (xfer->len % w_size) 1879 return -EINVAL; 1880 1881 if (xfer->speed_hz && master->min_speed_hz && 1882 xfer->speed_hz < master->min_speed_hz) 1883 return -EINVAL; 1884 1885 if (xfer->tx_buf && !xfer->tx_nbits) 1886 xfer->tx_nbits = SPI_NBITS_SINGLE; 1887 if (xfer->rx_buf && !xfer->rx_nbits) 1888 xfer->rx_nbits = SPI_NBITS_SINGLE; 1889 /* check transfer tx/rx_nbits: 1890 * 1. check the value matches one of single, dual and quad 1891 * 2. check tx/rx_nbits match the mode in spi_device 1892 */ 1893 if (xfer->tx_buf) { 1894 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 1895 xfer->tx_nbits != SPI_NBITS_DUAL && 1896 xfer->tx_nbits != SPI_NBITS_QUAD) 1897 return -EINVAL; 1898 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 1899 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 1900 return -EINVAL; 1901 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 1902 !(spi->mode & SPI_TX_QUAD)) 1903 return -EINVAL; 1904 } 1905 /* check transfer rx_nbits */ 1906 if (xfer->rx_buf) { 1907 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 1908 xfer->rx_nbits != SPI_NBITS_DUAL && 1909 xfer->rx_nbits != SPI_NBITS_QUAD) 1910 return -EINVAL; 1911 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 1912 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 1913 return -EINVAL; 1914 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 1915 !(spi->mode & SPI_RX_QUAD)) 1916 return -EINVAL; 1917 } 1918 } 1919 1920 message->status = -EINPROGRESS; 1921 1922 return 0; 1923 } 1924 1925 static int __spi_async(struct spi_device *spi, struct spi_message *message) 1926 { 1927 struct spi_master *master = spi->master; 1928 1929 message->spi = spi; 1930 1931 trace_spi_message_submit(message); 1932 1933 return master->transfer(spi, message); 1934 } 1935 1936 /** 1937 * spi_async - asynchronous SPI transfer 1938 * @spi: device with which data will be exchanged 1939 * @message: describes the data transfers, including completion callback 1940 * Context: any (irqs may be blocked, etc) 1941 * 1942 * This call may be used in_irq and other contexts which can't sleep, 1943 * as well as from task contexts which can sleep. 1944 * 1945 * The completion callback is invoked in a context which can't sleep. 1946 * Before that invocation, the value of message->status is undefined. 1947 * When the callback is issued, message->status holds either zero (to 1948 * indicate complete success) or a negative error code. After that 1949 * callback returns, the driver which issued the transfer request may 1950 * deallocate the associated memory; it's no longer in use by any SPI 1951 * core or controller driver code. 1952 * 1953 * Note that although all messages to a spi_device are handled in 1954 * FIFO order, messages may go to different devices in other orders. 1955 * Some device might be higher priority, or have various "hard" access 1956 * time requirements, for example. 1957 * 1958 * On detection of any fault during the transfer, processing of 1959 * the entire message is aborted, and the device is deselected. 1960 * Until returning from the associated message completion callback, 1961 * no other spi_message queued to that device will be processed. 1962 * (This rule applies equally to all the synchronous transfer calls, 1963 * which are wrappers around this core asynchronous primitive.) 1964 */ 1965 int spi_async(struct spi_device *spi, struct spi_message *message) 1966 { 1967 struct spi_master *master = spi->master; 1968 int ret; 1969 unsigned long flags; 1970 1971 ret = __spi_validate(spi, message); 1972 if (ret != 0) 1973 return ret; 1974 1975 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1976 1977 if (master->bus_lock_flag) 1978 ret = -EBUSY; 1979 else 1980 ret = __spi_async(spi, message); 1981 1982 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1983 1984 return ret; 1985 } 1986 EXPORT_SYMBOL_GPL(spi_async); 1987 1988 /** 1989 * spi_async_locked - version of spi_async with exclusive bus usage 1990 * @spi: device with which data will be exchanged 1991 * @message: describes the data transfers, including completion callback 1992 * Context: any (irqs may be blocked, etc) 1993 * 1994 * This call may be used in_irq and other contexts which can't sleep, 1995 * as well as from task contexts which can sleep. 1996 * 1997 * The completion callback is invoked in a context which can't sleep. 1998 * Before that invocation, the value of message->status is undefined. 1999 * When the callback is issued, message->status holds either zero (to 2000 * indicate complete success) or a negative error code. After that 2001 * callback returns, the driver which issued the transfer request may 2002 * deallocate the associated memory; it's no longer in use by any SPI 2003 * core or controller driver code. 2004 * 2005 * Note that although all messages to a spi_device are handled in 2006 * FIFO order, messages may go to different devices in other orders. 2007 * Some device might be higher priority, or have various "hard" access 2008 * time requirements, for example. 2009 * 2010 * On detection of any fault during the transfer, processing of 2011 * the entire message is aborted, and the device is deselected. 2012 * Until returning from the associated message completion callback, 2013 * no other spi_message queued to that device will be processed. 2014 * (This rule applies equally to all the synchronous transfer calls, 2015 * which are wrappers around this core asynchronous primitive.) 2016 */ 2017 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2018 { 2019 struct spi_master *master = spi->master; 2020 int ret; 2021 unsigned long flags; 2022 2023 ret = __spi_validate(spi, message); 2024 if (ret != 0) 2025 return ret; 2026 2027 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2028 2029 ret = __spi_async(spi, message); 2030 2031 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2032 2033 return ret; 2034 2035 } 2036 EXPORT_SYMBOL_GPL(spi_async_locked); 2037 2038 2039 /*-------------------------------------------------------------------------*/ 2040 2041 /* Utility methods for SPI master protocol drivers, layered on 2042 * top of the core. Some other utility methods are defined as 2043 * inline functions. 2044 */ 2045 2046 static void spi_complete(void *arg) 2047 { 2048 complete(arg); 2049 } 2050 2051 static int __spi_sync(struct spi_device *spi, struct spi_message *message, 2052 int bus_locked) 2053 { 2054 DECLARE_COMPLETION_ONSTACK(done); 2055 int status; 2056 struct spi_master *master = spi->master; 2057 unsigned long flags; 2058 2059 status = __spi_validate(spi, message); 2060 if (status != 0) 2061 return status; 2062 2063 message->complete = spi_complete; 2064 message->context = &done; 2065 message->spi = spi; 2066 2067 if (!bus_locked) 2068 mutex_lock(&master->bus_lock_mutex); 2069 2070 /* If we're not using the legacy transfer method then we will 2071 * try to transfer in the calling context so special case. 2072 * This code would be less tricky if we could remove the 2073 * support for driver implemented message queues. 2074 */ 2075 if (master->transfer == spi_queued_transfer) { 2076 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2077 2078 trace_spi_message_submit(message); 2079 2080 status = __spi_queued_transfer(spi, message, false); 2081 2082 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2083 } else { 2084 status = spi_async_locked(spi, message); 2085 } 2086 2087 if (!bus_locked) 2088 mutex_unlock(&master->bus_lock_mutex); 2089 2090 if (status == 0) { 2091 /* Push out the messages in the calling context if we 2092 * can. 2093 */ 2094 if (master->transfer == spi_queued_transfer) 2095 __spi_pump_messages(master, false); 2096 2097 wait_for_completion(&done); 2098 status = message->status; 2099 } 2100 message->context = NULL; 2101 return status; 2102 } 2103 2104 /** 2105 * spi_sync - blocking/synchronous SPI data transfers 2106 * @spi: device with which data will be exchanged 2107 * @message: describes the data transfers 2108 * Context: can sleep 2109 * 2110 * This call may only be used from a context that may sleep. The sleep 2111 * is non-interruptible, and has no timeout. Low-overhead controller 2112 * drivers may DMA directly into and out of the message buffers. 2113 * 2114 * Note that the SPI device's chip select is active during the message, 2115 * and then is normally disabled between messages. Drivers for some 2116 * frequently-used devices may want to minimize costs of selecting a chip, 2117 * by leaving it selected in anticipation that the next message will go 2118 * to the same chip. (That may increase power usage.) 2119 * 2120 * Also, the caller is guaranteeing that the memory associated with the 2121 * message will not be freed before this call returns. 2122 * 2123 * It returns zero on success, else a negative error code. 2124 */ 2125 int spi_sync(struct spi_device *spi, struct spi_message *message) 2126 { 2127 return __spi_sync(spi, message, 0); 2128 } 2129 EXPORT_SYMBOL_GPL(spi_sync); 2130 2131 /** 2132 * spi_sync_locked - version of spi_sync with exclusive bus usage 2133 * @spi: device with which data will be exchanged 2134 * @message: describes the data transfers 2135 * Context: can sleep 2136 * 2137 * This call may only be used from a context that may sleep. The sleep 2138 * is non-interruptible, and has no timeout. Low-overhead controller 2139 * drivers may DMA directly into and out of the message buffers. 2140 * 2141 * This call should be used by drivers that require exclusive access to the 2142 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2143 * be released by a spi_bus_unlock call when the exclusive access is over. 2144 * 2145 * It returns zero on success, else a negative error code. 2146 */ 2147 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2148 { 2149 return __spi_sync(spi, message, 1); 2150 } 2151 EXPORT_SYMBOL_GPL(spi_sync_locked); 2152 2153 /** 2154 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2155 * @master: SPI bus master that should be locked for exclusive bus access 2156 * Context: can sleep 2157 * 2158 * This call may only be used from a context that may sleep. The sleep 2159 * is non-interruptible, and has no timeout. 2160 * 2161 * This call should be used by drivers that require exclusive access to the 2162 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2163 * exclusive access is over. Data transfer must be done by spi_sync_locked 2164 * and spi_async_locked calls when the SPI bus lock is held. 2165 * 2166 * It returns zero on success, else a negative error code. 2167 */ 2168 int spi_bus_lock(struct spi_master *master) 2169 { 2170 unsigned long flags; 2171 2172 mutex_lock(&master->bus_lock_mutex); 2173 2174 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2175 master->bus_lock_flag = 1; 2176 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2177 2178 /* mutex remains locked until spi_bus_unlock is called */ 2179 2180 return 0; 2181 } 2182 EXPORT_SYMBOL_GPL(spi_bus_lock); 2183 2184 /** 2185 * spi_bus_unlock - release the lock for exclusive SPI bus usage 2186 * @master: SPI bus master that was locked for exclusive bus access 2187 * Context: can sleep 2188 * 2189 * This call may only be used from a context that may sleep. The sleep 2190 * is non-interruptible, and has no timeout. 2191 * 2192 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2193 * call. 2194 * 2195 * It returns zero on success, else a negative error code. 2196 */ 2197 int spi_bus_unlock(struct spi_master *master) 2198 { 2199 master->bus_lock_flag = 0; 2200 2201 mutex_unlock(&master->bus_lock_mutex); 2202 2203 return 0; 2204 } 2205 EXPORT_SYMBOL_GPL(spi_bus_unlock); 2206 2207 /* portable code must never pass more than 32 bytes */ 2208 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 2209 2210 static u8 *buf; 2211 2212 /** 2213 * spi_write_then_read - SPI synchronous write followed by read 2214 * @spi: device with which data will be exchanged 2215 * @txbuf: data to be written (need not be dma-safe) 2216 * @n_tx: size of txbuf, in bytes 2217 * @rxbuf: buffer into which data will be read (need not be dma-safe) 2218 * @n_rx: size of rxbuf, in bytes 2219 * Context: can sleep 2220 * 2221 * This performs a half duplex MicroWire style transaction with the 2222 * device, sending txbuf and then reading rxbuf. The return value 2223 * is zero for success, else a negative errno status code. 2224 * This call may only be used from a context that may sleep. 2225 * 2226 * Parameters to this routine are always copied using a small buffer; 2227 * portable code should never use this for more than 32 bytes. 2228 * Performance-sensitive or bulk transfer code should instead use 2229 * spi_{async,sync}() calls with dma-safe buffers. 2230 */ 2231 int spi_write_then_read(struct spi_device *spi, 2232 const void *txbuf, unsigned n_tx, 2233 void *rxbuf, unsigned n_rx) 2234 { 2235 static DEFINE_MUTEX(lock); 2236 2237 int status; 2238 struct spi_message message; 2239 struct spi_transfer x[2]; 2240 u8 *local_buf; 2241 2242 /* Use preallocated DMA-safe buffer if we can. We can't avoid 2243 * copying here, (as a pure convenience thing), but we can 2244 * keep heap costs out of the hot path unless someone else is 2245 * using the pre-allocated buffer or the transfer is too large. 2246 */ 2247 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 2248 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 2249 GFP_KERNEL | GFP_DMA); 2250 if (!local_buf) 2251 return -ENOMEM; 2252 } else { 2253 local_buf = buf; 2254 } 2255 2256 spi_message_init(&message); 2257 memset(x, 0, sizeof(x)); 2258 if (n_tx) { 2259 x[0].len = n_tx; 2260 spi_message_add_tail(&x[0], &message); 2261 } 2262 if (n_rx) { 2263 x[1].len = n_rx; 2264 spi_message_add_tail(&x[1], &message); 2265 } 2266 2267 memcpy(local_buf, txbuf, n_tx); 2268 x[0].tx_buf = local_buf; 2269 x[1].rx_buf = local_buf + n_tx; 2270 2271 /* do the i/o */ 2272 status = spi_sync(spi, &message); 2273 if (status == 0) 2274 memcpy(rxbuf, x[1].rx_buf, n_rx); 2275 2276 if (x[0].tx_buf == buf) 2277 mutex_unlock(&lock); 2278 else 2279 kfree(local_buf); 2280 2281 return status; 2282 } 2283 EXPORT_SYMBOL_GPL(spi_write_then_read); 2284 2285 /*-------------------------------------------------------------------------*/ 2286 2287 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 2288 static int __spi_of_device_match(struct device *dev, void *data) 2289 { 2290 return dev->of_node == data; 2291 } 2292 2293 /* must call put_device() when done with returned spi_device device */ 2294 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 2295 { 2296 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 2297 __spi_of_device_match); 2298 return dev ? to_spi_device(dev) : NULL; 2299 } 2300 2301 static int __spi_of_master_match(struct device *dev, const void *data) 2302 { 2303 return dev->of_node == data; 2304 } 2305 2306 /* the spi masters are not using spi_bus, so we find it with another way */ 2307 static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 2308 { 2309 struct device *dev; 2310 2311 dev = class_find_device(&spi_master_class, NULL, node, 2312 __spi_of_master_match); 2313 if (!dev) 2314 return NULL; 2315 2316 /* reference got in class_find_device */ 2317 return container_of(dev, struct spi_master, dev); 2318 } 2319 2320 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 2321 void *arg) 2322 { 2323 struct of_reconfig_data *rd = arg; 2324 struct spi_master *master; 2325 struct spi_device *spi; 2326 2327 switch (of_reconfig_get_state_change(action, arg)) { 2328 case OF_RECONFIG_CHANGE_ADD: 2329 master = of_find_spi_master_by_node(rd->dn->parent); 2330 if (master == NULL) 2331 return NOTIFY_OK; /* not for us */ 2332 2333 spi = of_register_spi_device(master, rd->dn); 2334 put_device(&master->dev); 2335 2336 if (IS_ERR(spi)) { 2337 pr_err("%s: failed to create for '%s'\n", 2338 __func__, rd->dn->full_name); 2339 return notifier_from_errno(PTR_ERR(spi)); 2340 } 2341 break; 2342 2343 case OF_RECONFIG_CHANGE_REMOVE: 2344 /* find our device by node */ 2345 spi = of_find_spi_device_by_node(rd->dn); 2346 if (spi == NULL) 2347 return NOTIFY_OK; /* no? not meant for us */ 2348 2349 /* unregister takes one ref away */ 2350 spi_unregister_device(spi); 2351 2352 /* and put the reference of the find */ 2353 put_device(&spi->dev); 2354 break; 2355 } 2356 2357 return NOTIFY_OK; 2358 } 2359 2360 static struct notifier_block spi_of_notifier = { 2361 .notifier_call = of_spi_notify, 2362 }; 2363 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2364 extern struct notifier_block spi_of_notifier; 2365 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2366 2367 static int __init spi_init(void) 2368 { 2369 int status; 2370 2371 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2372 if (!buf) { 2373 status = -ENOMEM; 2374 goto err0; 2375 } 2376 2377 status = bus_register(&spi_bus_type); 2378 if (status < 0) 2379 goto err1; 2380 2381 status = class_register(&spi_master_class); 2382 if (status < 0) 2383 goto err2; 2384 2385 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 2386 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 2387 2388 return 0; 2389 2390 err2: 2391 bus_unregister(&spi_bus_type); 2392 err1: 2393 kfree(buf); 2394 buf = NULL; 2395 err0: 2396 return status; 2397 } 2398 2399 /* board_info is normally registered in arch_initcall(), 2400 * but even essential drivers wait till later 2401 * 2402 * REVISIT only boardinfo really needs static linking. the rest (device and 2403 * driver registration) _could_ be dynamically linked (modular) ... costs 2404 * include needing to have boardinfo data structures be much more public. 2405 */ 2406 postcore_initcall(spi_init); 2407 2408