1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/export.h> 35 #include <linux/sched/rt.h> 36 #include <linux/delay.h> 37 #include <linux/kthread.h> 38 #include <linux/ioport.h> 39 #include <linux/acpi.h> 40 41 #define CREATE_TRACE_POINTS 42 #include <trace/events/spi.h> 43 44 static void spidev_release(struct device *dev) 45 { 46 struct spi_device *spi = to_spi_device(dev); 47 48 /* spi masters may cleanup for released devices */ 49 if (spi->master->cleanup) 50 spi->master->cleanup(spi); 51 52 spi_master_put(spi->master); 53 kfree(spi); 54 } 55 56 static ssize_t 57 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 58 { 59 const struct spi_device *spi = to_spi_device(dev); 60 int len; 61 62 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 63 if (len != -ENODEV) 64 return len; 65 66 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 67 } 68 static DEVICE_ATTR_RO(modalias); 69 70 static struct attribute *spi_dev_attrs[] = { 71 &dev_attr_modalias.attr, 72 NULL, 73 }; 74 ATTRIBUTE_GROUPS(spi_dev); 75 76 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 77 * and the sysfs version makes coldplug work too. 78 */ 79 80 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 81 const struct spi_device *sdev) 82 { 83 while (id->name[0]) { 84 if (!strcmp(sdev->modalias, id->name)) 85 return id; 86 id++; 87 } 88 return NULL; 89 } 90 91 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 92 { 93 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 94 95 return spi_match_id(sdrv->id_table, sdev); 96 } 97 EXPORT_SYMBOL_GPL(spi_get_device_id); 98 99 static int spi_match_device(struct device *dev, struct device_driver *drv) 100 { 101 const struct spi_device *spi = to_spi_device(dev); 102 const struct spi_driver *sdrv = to_spi_driver(drv); 103 104 /* Attempt an OF style match */ 105 if (of_driver_match_device(dev, drv)) 106 return 1; 107 108 /* Then try ACPI */ 109 if (acpi_driver_match_device(dev, drv)) 110 return 1; 111 112 if (sdrv->id_table) 113 return !!spi_match_id(sdrv->id_table, spi); 114 115 return strcmp(spi->modalias, drv->name) == 0; 116 } 117 118 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 119 { 120 const struct spi_device *spi = to_spi_device(dev); 121 int rc; 122 123 rc = acpi_device_uevent_modalias(dev, env); 124 if (rc != -ENODEV) 125 return rc; 126 127 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 128 return 0; 129 } 130 131 struct bus_type spi_bus_type = { 132 .name = "spi", 133 .dev_groups = spi_dev_groups, 134 .match = spi_match_device, 135 .uevent = spi_uevent, 136 }; 137 EXPORT_SYMBOL_GPL(spi_bus_type); 138 139 140 static int spi_drv_probe(struct device *dev) 141 { 142 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 143 int ret; 144 145 ret = of_clk_set_defaults(dev->of_node, false); 146 if (ret) 147 return ret; 148 149 ret = dev_pm_domain_attach(dev, true); 150 if (ret != -EPROBE_DEFER) { 151 ret = sdrv->probe(to_spi_device(dev)); 152 if (ret) 153 dev_pm_domain_detach(dev, true); 154 } 155 156 return ret; 157 } 158 159 static int spi_drv_remove(struct device *dev) 160 { 161 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 162 int ret; 163 164 ret = sdrv->remove(to_spi_device(dev)); 165 dev_pm_domain_detach(dev, true); 166 167 return ret; 168 } 169 170 static void spi_drv_shutdown(struct device *dev) 171 { 172 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 173 174 sdrv->shutdown(to_spi_device(dev)); 175 } 176 177 /** 178 * spi_register_driver - register a SPI driver 179 * @sdrv: the driver to register 180 * Context: can sleep 181 */ 182 int spi_register_driver(struct spi_driver *sdrv) 183 { 184 sdrv->driver.bus = &spi_bus_type; 185 if (sdrv->probe) 186 sdrv->driver.probe = spi_drv_probe; 187 if (sdrv->remove) 188 sdrv->driver.remove = spi_drv_remove; 189 if (sdrv->shutdown) 190 sdrv->driver.shutdown = spi_drv_shutdown; 191 return driver_register(&sdrv->driver); 192 } 193 EXPORT_SYMBOL_GPL(spi_register_driver); 194 195 /*-------------------------------------------------------------------------*/ 196 197 /* SPI devices should normally not be created by SPI device drivers; that 198 * would make them board-specific. Similarly with SPI master drivers. 199 * Device registration normally goes into like arch/.../mach.../board-YYY.c 200 * with other readonly (flashable) information about mainboard devices. 201 */ 202 203 struct boardinfo { 204 struct list_head list; 205 struct spi_board_info board_info; 206 }; 207 208 static LIST_HEAD(board_list); 209 static LIST_HEAD(spi_master_list); 210 211 /* 212 * Used to protect add/del opertion for board_info list and 213 * spi_master list, and their matching process 214 */ 215 static DEFINE_MUTEX(board_lock); 216 217 /** 218 * spi_alloc_device - Allocate a new SPI device 219 * @master: Controller to which device is connected 220 * Context: can sleep 221 * 222 * Allows a driver to allocate and initialize a spi_device without 223 * registering it immediately. This allows a driver to directly 224 * fill the spi_device with device parameters before calling 225 * spi_add_device() on it. 226 * 227 * Caller is responsible to call spi_add_device() on the returned 228 * spi_device structure to add it to the SPI master. If the caller 229 * needs to discard the spi_device without adding it, then it should 230 * call spi_dev_put() on it. 231 * 232 * Returns a pointer to the new device, or NULL. 233 */ 234 struct spi_device *spi_alloc_device(struct spi_master *master) 235 { 236 struct spi_device *spi; 237 238 if (!spi_master_get(master)) 239 return NULL; 240 241 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 242 if (!spi) { 243 spi_master_put(master); 244 return NULL; 245 } 246 247 spi->master = master; 248 spi->dev.parent = &master->dev; 249 spi->dev.bus = &spi_bus_type; 250 spi->dev.release = spidev_release; 251 spi->cs_gpio = -ENOENT; 252 device_initialize(&spi->dev); 253 return spi; 254 } 255 EXPORT_SYMBOL_GPL(spi_alloc_device); 256 257 static void spi_dev_set_name(struct spi_device *spi) 258 { 259 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 260 261 if (adev) { 262 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 263 return; 264 } 265 266 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 267 spi->chip_select); 268 } 269 270 static int spi_dev_check(struct device *dev, void *data) 271 { 272 struct spi_device *spi = to_spi_device(dev); 273 struct spi_device *new_spi = data; 274 275 if (spi->master == new_spi->master && 276 spi->chip_select == new_spi->chip_select) 277 return -EBUSY; 278 return 0; 279 } 280 281 /** 282 * spi_add_device - Add spi_device allocated with spi_alloc_device 283 * @spi: spi_device to register 284 * 285 * Companion function to spi_alloc_device. Devices allocated with 286 * spi_alloc_device can be added onto the spi bus with this function. 287 * 288 * Returns 0 on success; negative errno on failure 289 */ 290 int spi_add_device(struct spi_device *spi) 291 { 292 static DEFINE_MUTEX(spi_add_lock); 293 struct spi_master *master = spi->master; 294 struct device *dev = master->dev.parent; 295 int status; 296 297 /* Chipselects are numbered 0..max; validate. */ 298 if (spi->chip_select >= master->num_chipselect) { 299 dev_err(dev, "cs%d >= max %d\n", 300 spi->chip_select, 301 master->num_chipselect); 302 return -EINVAL; 303 } 304 305 /* Set the bus ID string */ 306 spi_dev_set_name(spi); 307 308 /* We need to make sure there's no other device with this 309 * chipselect **BEFORE** we call setup(), else we'll trash 310 * its configuration. Lock against concurrent add() calls. 311 */ 312 mutex_lock(&spi_add_lock); 313 314 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 315 if (status) { 316 dev_err(dev, "chipselect %d already in use\n", 317 spi->chip_select); 318 goto done; 319 } 320 321 if (master->cs_gpios) 322 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 323 324 /* Drivers may modify this initial i/o setup, but will 325 * normally rely on the device being setup. Devices 326 * using SPI_CS_HIGH can't coexist well otherwise... 327 */ 328 status = spi_setup(spi); 329 if (status < 0) { 330 dev_err(dev, "can't setup %s, status %d\n", 331 dev_name(&spi->dev), status); 332 goto done; 333 } 334 335 /* Device may be bound to an active driver when this returns */ 336 status = device_add(&spi->dev); 337 if (status < 0) 338 dev_err(dev, "can't add %s, status %d\n", 339 dev_name(&spi->dev), status); 340 else 341 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 342 343 done: 344 mutex_unlock(&spi_add_lock); 345 return status; 346 } 347 EXPORT_SYMBOL_GPL(spi_add_device); 348 349 /** 350 * spi_new_device - instantiate one new SPI device 351 * @master: Controller to which device is connected 352 * @chip: Describes the SPI device 353 * Context: can sleep 354 * 355 * On typical mainboards, this is purely internal; and it's not needed 356 * after board init creates the hard-wired devices. Some development 357 * platforms may not be able to use spi_register_board_info though, and 358 * this is exported so that for example a USB or parport based adapter 359 * driver could add devices (which it would learn about out-of-band). 360 * 361 * Returns the new device, or NULL. 362 */ 363 struct spi_device *spi_new_device(struct spi_master *master, 364 struct spi_board_info *chip) 365 { 366 struct spi_device *proxy; 367 int status; 368 369 /* NOTE: caller did any chip->bus_num checks necessary. 370 * 371 * Also, unless we change the return value convention to use 372 * error-or-pointer (not NULL-or-pointer), troubleshootability 373 * suggests syslogged diagnostics are best here (ugh). 374 */ 375 376 proxy = spi_alloc_device(master); 377 if (!proxy) 378 return NULL; 379 380 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 381 382 proxy->chip_select = chip->chip_select; 383 proxy->max_speed_hz = chip->max_speed_hz; 384 proxy->mode = chip->mode; 385 proxy->irq = chip->irq; 386 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 387 proxy->dev.platform_data = (void *) chip->platform_data; 388 proxy->controller_data = chip->controller_data; 389 proxy->controller_state = NULL; 390 391 status = spi_add_device(proxy); 392 if (status < 0) { 393 spi_dev_put(proxy); 394 return NULL; 395 } 396 397 return proxy; 398 } 399 EXPORT_SYMBOL_GPL(spi_new_device); 400 401 static void spi_match_master_to_boardinfo(struct spi_master *master, 402 struct spi_board_info *bi) 403 { 404 struct spi_device *dev; 405 406 if (master->bus_num != bi->bus_num) 407 return; 408 409 dev = spi_new_device(master, bi); 410 if (!dev) 411 dev_err(master->dev.parent, "can't create new device for %s\n", 412 bi->modalias); 413 } 414 415 /** 416 * spi_register_board_info - register SPI devices for a given board 417 * @info: array of chip descriptors 418 * @n: how many descriptors are provided 419 * Context: can sleep 420 * 421 * Board-specific early init code calls this (probably during arch_initcall) 422 * with segments of the SPI device table. Any device nodes are created later, 423 * after the relevant parent SPI controller (bus_num) is defined. We keep 424 * this table of devices forever, so that reloading a controller driver will 425 * not make Linux forget about these hard-wired devices. 426 * 427 * Other code can also call this, e.g. a particular add-on board might provide 428 * SPI devices through its expansion connector, so code initializing that board 429 * would naturally declare its SPI devices. 430 * 431 * The board info passed can safely be __initdata ... but be careful of 432 * any embedded pointers (platform_data, etc), they're copied as-is. 433 */ 434 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 435 { 436 struct boardinfo *bi; 437 int i; 438 439 if (!n) 440 return -EINVAL; 441 442 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 443 if (!bi) 444 return -ENOMEM; 445 446 for (i = 0; i < n; i++, bi++, info++) { 447 struct spi_master *master; 448 449 memcpy(&bi->board_info, info, sizeof(*info)); 450 mutex_lock(&board_lock); 451 list_add_tail(&bi->list, &board_list); 452 list_for_each_entry(master, &spi_master_list, list) 453 spi_match_master_to_boardinfo(master, &bi->board_info); 454 mutex_unlock(&board_lock); 455 } 456 457 return 0; 458 } 459 460 /*-------------------------------------------------------------------------*/ 461 462 static void spi_set_cs(struct spi_device *spi, bool enable) 463 { 464 if (spi->mode & SPI_CS_HIGH) 465 enable = !enable; 466 467 if (spi->cs_gpio >= 0) 468 gpio_set_value(spi->cs_gpio, !enable); 469 else if (spi->master->set_cs) 470 spi->master->set_cs(spi, !enable); 471 } 472 473 #ifdef CONFIG_HAS_DMA 474 static int spi_map_buf(struct spi_master *master, struct device *dev, 475 struct sg_table *sgt, void *buf, size_t len, 476 enum dma_data_direction dir) 477 { 478 const bool vmalloced_buf = is_vmalloc_addr(buf); 479 const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; 480 const int sgs = DIV_ROUND_UP(len, desc_len); 481 struct page *vm_page; 482 void *sg_buf; 483 size_t min; 484 int i, ret; 485 486 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 487 if (ret != 0) 488 return ret; 489 490 for (i = 0; i < sgs; i++) { 491 min = min_t(size_t, len, desc_len); 492 493 if (vmalloced_buf) { 494 vm_page = vmalloc_to_page(buf); 495 if (!vm_page) { 496 sg_free_table(sgt); 497 return -ENOMEM; 498 } 499 sg_set_page(&sgt->sgl[i], vm_page, 500 min, offset_in_page(buf)); 501 } else { 502 sg_buf = buf; 503 sg_set_buf(&sgt->sgl[i], sg_buf, min); 504 } 505 506 507 buf += min; 508 len -= min; 509 } 510 511 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 512 if (!ret) 513 ret = -ENOMEM; 514 if (ret < 0) { 515 sg_free_table(sgt); 516 return ret; 517 } 518 519 sgt->nents = ret; 520 521 return 0; 522 } 523 524 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 525 struct sg_table *sgt, enum dma_data_direction dir) 526 { 527 if (sgt->orig_nents) { 528 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 529 sg_free_table(sgt); 530 } 531 } 532 533 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 534 { 535 struct device *tx_dev, *rx_dev; 536 struct spi_transfer *xfer; 537 int ret; 538 539 if (!master->can_dma) 540 return 0; 541 542 tx_dev = master->dma_tx->device->dev; 543 rx_dev = master->dma_rx->device->dev; 544 545 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 546 if (!master->can_dma(master, msg->spi, xfer)) 547 continue; 548 549 if (xfer->tx_buf != NULL) { 550 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 551 (void *)xfer->tx_buf, xfer->len, 552 DMA_TO_DEVICE); 553 if (ret != 0) 554 return ret; 555 } 556 557 if (xfer->rx_buf != NULL) { 558 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 559 xfer->rx_buf, xfer->len, 560 DMA_FROM_DEVICE); 561 if (ret != 0) { 562 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 563 DMA_TO_DEVICE); 564 return ret; 565 } 566 } 567 } 568 569 master->cur_msg_mapped = true; 570 571 return 0; 572 } 573 574 static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 575 { 576 struct spi_transfer *xfer; 577 struct device *tx_dev, *rx_dev; 578 579 if (!master->cur_msg_mapped || !master->can_dma) 580 return 0; 581 582 tx_dev = master->dma_tx->device->dev; 583 rx_dev = master->dma_rx->device->dev; 584 585 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 586 if (!master->can_dma(master, msg->spi, xfer)) 587 continue; 588 589 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 590 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 591 } 592 593 return 0; 594 } 595 #else /* !CONFIG_HAS_DMA */ 596 static inline int __spi_map_msg(struct spi_master *master, 597 struct spi_message *msg) 598 { 599 return 0; 600 } 601 602 static inline int spi_unmap_msg(struct spi_master *master, 603 struct spi_message *msg) 604 { 605 return 0; 606 } 607 #endif /* !CONFIG_HAS_DMA */ 608 609 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 610 { 611 struct spi_transfer *xfer; 612 void *tmp; 613 unsigned int max_tx, max_rx; 614 615 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 616 max_tx = 0; 617 max_rx = 0; 618 619 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 620 if ((master->flags & SPI_MASTER_MUST_TX) && 621 !xfer->tx_buf) 622 max_tx = max(xfer->len, max_tx); 623 if ((master->flags & SPI_MASTER_MUST_RX) && 624 !xfer->rx_buf) 625 max_rx = max(xfer->len, max_rx); 626 } 627 628 if (max_tx) { 629 tmp = krealloc(master->dummy_tx, max_tx, 630 GFP_KERNEL | GFP_DMA); 631 if (!tmp) 632 return -ENOMEM; 633 master->dummy_tx = tmp; 634 memset(tmp, 0, max_tx); 635 } 636 637 if (max_rx) { 638 tmp = krealloc(master->dummy_rx, max_rx, 639 GFP_KERNEL | GFP_DMA); 640 if (!tmp) 641 return -ENOMEM; 642 master->dummy_rx = tmp; 643 } 644 645 if (max_tx || max_rx) { 646 list_for_each_entry(xfer, &msg->transfers, 647 transfer_list) { 648 if (!xfer->tx_buf) 649 xfer->tx_buf = master->dummy_tx; 650 if (!xfer->rx_buf) 651 xfer->rx_buf = master->dummy_rx; 652 } 653 } 654 } 655 656 return __spi_map_msg(master, msg); 657 } 658 659 /* 660 * spi_transfer_one_message - Default implementation of transfer_one_message() 661 * 662 * This is a standard implementation of transfer_one_message() for 663 * drivers which impelment a transfer_one() operation. It provides 664 * standard handling of delays and chip select management. 665 */ 666 static int spi_transfer_one_message(struct spi_master *master, 667 struct spi_message *msg) 668 { 669 struct spi_transfer *xfer; 670 bool keep_cs = false; 671 int ret = 0; 672 unsigned long ms = 1; 673 674 spi_set_cs(msg->spi, true); 675 676 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 677 trace_spi_transfer_start(msg, xfer); 678 679 if (xfer->tx_buf || xfer->rx_buf) { 680 reinit_completion(&master->xfer_completion); 681 682 ret = master->transfer_one(master, msg->spi, xfer); 683 if (ret < 0) { 684 dev_err(&msg->spi->dev, 685 "SPI transfer failed: %d\n", ret); 686 goto out; 687 } 688 689 if (ret > 0) { 690 ret = 0; 691 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 692 ms += ms + 100; /* some tolerance */ 693 694 ms = wait_for_completion_timeout(&master->xfer_completion, 695 msecs_to_jiffies(ms)); 696 } 697 698 if (ms == 0) { 699 dev_err(&msg->spi->dev, 700 "SPI transfer timed out\n"); 701 msg->status = -ETIMEDOUT; 702 } 703 } else { 704 if (xfer->len) 705 dev_err(&msg->spi->dev, 706 "Bufferless transfer has length %u\n", 707 xfer->len); 708 } 709 710 trace_spi_transfer_stop(msg, xfer); 711 712 if (msg->status != -EINPROGRESS) 713 goto out; 714 715 if (xfer->delay_usecs) 716 udelay(xfer->delay_usecs); 717 718 if (xfer->cs_change) { 719 if (list_is_last(&xfer->transfer_list, 720 &msg->transfers)) { 721 keep_cs = true; 722 } else { 723 spi_set_cs(msg->spi, false); 724 udelay(10); 725 spi_set_cs(msg->spi, true); 726 } 727 } 728 729 msg->actual_length += xfer->len; 730 } 731 732 out: 733 if (ret != 0 || !keep_cs) 734 spi_set_cs(msg->spi, false); 735 736 if (msg->status == -EINPROGRESS) 737 msg->status = ret; 738 739 if (msg->status && master->handle_err) 740 master->handle_err(master, msg); 741 742 spi_finalize_current_message(master); 743 744 return ret; 745 } 746 747 /** 748 * spi_finalize_current_transfer - report completion of a transfer 749 * @master: the master reporting completion 750 * 751 * Called by SPI drivers using the core transfer_one_message() 752 * implementation to notify it that the current interrupt driven 753 * transfer has finished and the next one may be scheduled. 754 */ 755 void spi_finalize_current_transfer(struct spi_master *master) 756 { 757 complete(&master->xfer_completion); 758 } 759 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 760 761 /** 762 * __spi_pump_messages - function which processes spi message queue 763 * @master: master to process queue for 764 * @in_kthread: true if we are in the context of the message pump thread 765 * 766 * This function checks if there is any spi message in the queue that 767 * needs processing and if so call out to the driver to initialize hardware 768 * and transfer each message. 769 * 770 * Note that it is called both from the kthread itself and also from 771 * inside spi_sync(); the queue extraction handling at the top of the 772 * function should deal with this safely. 773 */ 774 static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 775 { 776 unsigned long flags; 777 bool was_busy = false; 778 int ret; 779 780 /* Lock queue */ 781 spin_lock_irqsave(&master->queue_lock, flags); 782 783 /* Make sure we are not already running a message */ 784 if (master->cur_msg) { 785 spin_unlock_irqrestore(&master->queue_lock, flags); 786 return; 787 } 788 789 /* If another context is idling the device then defer */ 790 if (master->idling) { 791 queue_kthread_work(&master->kworker, &master->pump_messages); 792 spin_unlock_irqrestore(&master->queue_lock, flags); 793 return; 794 } 795 796 /* Check if the queue is idle */ 797 if (list_empty(&master->queue) || !master->running) { 798 if (!master->busy) { 799 spin_unlock_irqrestore(&master->queue_lock, flags); 800 return; 801 } 802 803 /* Only do teardown in the thread */ 804 if (!in_kthread) { 805 queue_kthread_work(&master->kworker, 806 &master->pump_messages); 807 spin_unlock_irqrestore(&master->queue_lock, flags); 808 return; 809 } 810 811 master->busy = false; 812 master->idling = true; 813 spin_unlock_irqrestore(&master->queue_lock, flags); 814 815 kfree(master->dummy_rx); 816 master->dummy_rx = NULL; 817 kfree(master->dummy_tx); 818 master->dummy_tx = NULL; 819 if (master->unprepare_transfer_hardware && 820 master->unprepare_transfer_hardware(master)) 821 dev_err(&master->dev, 822 "failed to unprepare transfer hardware\n"); 823 if (master->auto_runtime_pm) { 824 pm_runtime_mark_last_busy(master->dev.parent); 825 pm_runtime_put_autosuspend(master->dev.parent); 826 } 827 trace_spi_master_idle(master); 828 829 spin_lock_irqsave(&master->queue_lock, flags); 830 master->idling = false; 831 spin_unlock_irqrestore(&master->queue_lock, flags); 832 return; 833 } 834 835 /* Extract head of queue */ 836 master->cur_msg = 837 list_first_entry(&master->queue, struct spi_message, queue); 838 839 list_del_init(&master->cur_msg->queue); 840 if (master->busy) 841 was_busy = true; 842 else 843 master->busy = true; 844 spin_unlock_irqrestore(&master->queue_lock, flags); 845 846 if (!was_busy && master->auto_runtime_pm) { 847 ret = pm_runtime_get_sync(master->dev.parent); 848 if (ret < 0) { 849 dev_err(&master->dev, "Failed to power device: %d\n", 850 ret); 851 return; 852 } 853 } 854 855 if (!was_busy) 856 trace_spi_master_busy(master); 857 858 if (!was_busy && master->prepare_transfer_hardware) { 859 ret = master->prepare_transfer_hardware(master); 860 if (ret) { 861 dev_err(&master->dev, 862 "failed to prepare transfer hardware\n"); 863 864 if (master->auto_runtime_pm) 865 pm_runtime_put(master->dev.parent); 866 return; 867 } 868 } 869 870 trace_spi_message_start(master->cur_msg); 871 872 if (master->prepare_message) { 873 ret = master->prepare_message(master, master->cur_msg); 874 if (ret) { 875 dev_err(&master->dev, 876 "failed to prepare message: %d\n", ret); 877 master->cur_msg->status = ret; 878 spi_finalize_current_message(master); 879 return; 880 } 881 master->cur_msg_prepared = true; 882 } 883 884 ret = spi_map_msg(master, master->cur_msg); 885 if (ret) { 886 master->cur_msg->status = ret; 887 spi_finalize_current_message(master); 888 return; 889 } 890 891 ret = master->transfer_one_message(master, master->cur_msg); 892 if (ret) { 893 dev_err(&master->dev, 894 "failed to transfer one message from queue\n"); 895 return; 896 } 897 } 898 899 /** 900 * spi_pump_messages - kthread work function which processes spi message queue 901 * @work: pointer to kthread work struct contained in the master struct 902 */ 903 static void spi_pump_messages(struct kthread_work *work) 904 { 905 struct spi_master *master = 906 container_of(work, struct spi_master, pump_messages); 907 908 __spi_pump_messages(master, true); 909 } 910 911 static int spi_init_queue(struct spi_master *master) 912 { 913 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 914 915 master->running = false; 916 master->busy = false; 917 918 init_kthread_worker(&master->kworker); 919 master->kworker_task = kthread_run(kthread_worker_fn, 920 &master->kworker, "%s", 921 dev_name(&master->dev)); 922 if (IS_ERR(master->kworker_task)) { 923 dev_err(&master->dev, "failed to create message pump task\n"); 924 return PTR_ERR(master->kworker_task); 925 } 926 init_kthread_work(&master->pump_messages, spi_pump_messages); 927 928 /* 929 * Master config will indicate if this controller should run the 930 * message pump with high (realtime) priority to reduce the transfer 931 * latency on the bus by minimising the delay between a transfer 932 * request and the scheduling of the message pump thread. Without this 933 * setting the message pump thread will remain at default priority. 934 */ 935 if (master->rt) { 936 dev_info(&master->dev, 937 "will run message pump with realtime priority\n"); 938 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 939 } 940 941 return 0; 942 } 943 944 /** 945 * spi_get_next_queued_message() - called by driver to check for queued 946 * messages 947 * @master: the master to check for queued messages 948 * 949 * If there are more messages in the queue, the next message is returned from 950 * this call. 951 */ 952 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 953 { 954 struct spi_message *next; 955 unsigned long flags; 956 957 /* get a pointer to the next message, if any */ 958 spin_lock_irqsave(&master->queue_lock, flags); 959 next = list_first_entry_or_null(&master->queue, struct spi_message, 960 queue); 961 spin_unlock_irqrestore(&master->queue_lock, flags); 962 963 return next; 964 } 965 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 966 967 /** 968 * spi_finalize_current_message() - the current message is complete 969 * @master: the master to return the message to 970 * 971 * Called by the driver to notify the core that the message in the front of the 972 * queue is complete and can be removed from the queue. 973 */ 974 void spi_finalize_current_message(struct spi_master *master) 975 { 976 struct spi_message *mesg; 977 unsigned long flags; 978 int ret; 979 980 spin_lock_irqsave(&master->queue_lock, flags); 981 mesg = master->cur_msg; 982 master->cur_msg = NULL; 983 984 queue_kthread_work(&master->kworker, &master->pump_messages); 985 spin_unlock_irqrestore(&master->queue_lock, flags); 986 987 spi_unmap_msg(master, mesg); 988 989 if (master->cur_msg_prepared && master->unprepare_message) { 990 ret = master->unprepare_message(master, mesg); 991 if (ret) { 992 dev_err(&master->dev, 993 "failed to unprepare message: %d\n", ret); 994 } 995 } 996 997 trace_spi_message_done(mesg); 998 999 master->cur_msg_prepared = false; 1000 1001 mesg->state = NULL; 1002 if (mesg->complete) 1003 mesg->complete(mesg->context); 1004 } 1005 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1006 1007 static int spi_start_queue(struct spi_master *master) 1008 { 1009 unsigned long flags; 1010 1011 spin_lock_irqsave(&master->queue_lock, flags); 1012 1013 if (master->running || master->busy) { 1014 spin_unlock_irqrestore(&master->queue_lock, flags); 1015 return -EBUSY; 1016 } 1017 1018 master->running = true; 1019 master->cur_msg = NULL; 1020 spin_unlock_irqrestore(&master->queue_lock, flags); 1021 1022 queue_kthread_work(&master->kworker, &master->pump_messages); 1023 1024 return 0; 1025 } 1026 1027 static int spi_stop_queue(struct spi_master *master) 1028 { 1029 unsigned long flags; 1030 unsigned limit = 500; 1031 int ret = 0; 1032 1033 spin_lock_irqsave(&master->queue_lock, flags); 1034 1035 /* 1036 * This is a bit lame, but is optimized for the common execution path. 1037 * A wait_queue on the master->busy could be used, but then the common 1038 * execution path (pump_messages) would be required to call wake_up or 1039 * friends on every SPI message. Do this instead. 1040 */ 1041 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1042 spin_unlock_irqrestore(&master->queue_lock, flags); 1043 usleep_range(10000, 11000); 1044 spin_lock_irqsave(&master->queue_lock, flags); 1045 } 1046 1047 if (!list_empty(&master->queue) || master->busy) 1048 ret = -EBUSY; 1049 else 1050 master->running = false; 1051 1052 spin_unlock_irqrestore(&master->queue_lock, flags); 1053 1054 if (ret) { 1055 dev_warn(&master->dev, 1056 "could not stop message queue\n"); 1057 return ret; 1058 } 1059 return ret; 1060 } 1061 1062 static int spi_destroy_queue(struct spi_master *master) 1063 { 1064 int ret; 1065 1066 ret = spi_stop_queue(master); 1067 1068 /* 1069 * flush_kthread_worker will block until all work is done. 1070 * If the reason that stop_queue timed out is that the work will never 1071 * finish, then it does no good to call flush/stop thread, so 1072 * return anyway. 1073 */ 1074 if (ret) { 1075 dev_err(&master->dev, "problem destroying queue\n"); 1076 return ret; 1077 } 1078 1079 flush_kthread_worker(&master->kworker); 1080 kthread_stop(master->kworker_task); 1081 1082 return 0; 1083 } 1084 1085 static int __spi_queued_transfer(struct spi_device *spi, 1086 struct spi_message *msg, 1087 bool need_pump) 1088 { 1089 struct spi_master *master = spi->master; 1090 unsigned long flags; 1091 1092 spin_lock_irqsave(&master->queue_lock, flags); 1093 1094 if (!master->running) { 1095 spin_unlock_irqrestore(&master->queue_lock, flags); 1096 return -ESHUTDOWN; 1097 } 1098 msg->actual_length = 0; 1099 msg->status = -EINPROGRESS; 1100 1101 list_add_tail(&msg->queue, &master->queue); 1102 if (!master->busy && need_pump) 1103 queue_kthread_work(&master->kworker, &master->pump_messages); 1104 1105 spin_unlock_irqrestore(&master->queue_lock, flags); 1106 return 0; 1107 } 1108 1109 /** 1110 * spi_queued_transfer - transfer function for queued transfers 1111 * @spi: spi device which is requesting transfer 1112 * @msg: spi message which is to handled is queued to driver queue 1113 */ 1114 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1115 { 1116 return __spi_queued_transfer(spi, msg, true); 1117 } 1118 1119 static int spi_master_initialize_queue(struct spi_master *master) 1120 { 1121 int ret; 1122 1123 master->transfer = spi_queued_transfer; 1124 if (!master->transfer_one_message) 1125 master->transfer_one_message = spi_transfer_one_message; 1126 1127 /* Initialize and start queue */ 1128 ret = spi_init_queue(master); 1129 if (ret) { 1130 dev_err(&master->dev, "problem initializing queue\n"); 1131 goto err_init_queue; 1132 } 1133 master->queued = true; 1134 ret = spi_start_queue(master); 1135 if (ret) { 1136 dev_err(&master->dev, "problem starting queue\n"); 1137 goto err_start_queue; 1138 } 1139 1140 return 0; 1141 1142 err_start_queue: 1143 spi_destroy_queue(master); 1144 err_init_queue: 1145 return ret; 1146 } 1147 1148 /*-------------------------------------------------------------------------*/ 1149 1150 #if defined(CONFIG_OF) 1151 static struct spi_device * 1152 of_register_spi_device(struct spi_master *master, struct device_node *nc) 1153 { 1154 struct spi_device *spi; 1155 int rc; 1156 u32 value; 1157 1158 /* Alloc an spi_device */ 1159 spi = spi_alloc_device(master); 1160 if (!spi) { 1161 dev_err(&master->dev, "spi_device alloc error for %s\n", 1162 nc->full_name); 1163 rc = -ENOMEM; 1164 goto err_out; 1165 } 1166 1167 /* Select device driver */ 1168 rc = of_modalias_node(nc, spi->modalias, 1169 sizeof(spi->modalias)); 1170 if (rc < 0) { 1171 dev_err(&master->dev, "cannot find modalias for %s\n", 1172 nc->full_name); 1173 goto err_out; 1174 } 1175 1176 /* Device address */ 1177 rc = of_property_read_u32(nc, "reg", &value); 1178 if (rc) { 1179 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1180 nc->full_name, rc); 1181 goto err_out; 1182 } 1183 spi->chip_select = value; 1184 1185 /* Mode (clock phase/polarity/etc.) */ 1186 if (of_find_property(nc, "spi-cpha", NULL)) 1187 spi->mode |= SPI_CPHA; 1188 if (of_find_property(nc, "spi-cpol", NULL)) 1189 spi->mode |= SPI_CPOL; 1190 if (of_find_property(nc, "spi-cs-high", NULL)) 1191 spi->mode |= SPI_CS_HIGH; 1192 if (of_find_property(nc, "spi-3wire", NULL)) 1193 spi->mode |= SPI_3WIRE; 1194 if (of_find_property(nc, "spi-lsb-first", NULL)) 1195 spi->mode |= SPI_LSB_FIRST; 1196 1197 /* Device DUAL/QUAD mode */ 1198 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1199 switch (value) { 1200 case 1: 1201 break; 1202 case 2: 1203 spi->mode |= SPI_TX_DUAL; 1204 break; 1205 case 4: 1206 spi->mode |= SPI_TX_QUAD; 1207 break; 1208 default: 1209 dev_warn(&master->dev, 1210 "spi-tx-bus-width %d not supported\n", 1211 value); 1212 break; 1213 } 1214 } 1215 1216 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1217 switch (value) { 1218 case 1: 1219 break; 1220 case 2: 1221 spi->mode |= SPI_RX_DUAL; 1222 break; 1223 case 4: 1224 spi->mode |= SPI_RX_QUAD; 1225 break; 1226 default: 1227 dev_warn(&master->dev, 1228 "spi-rx-bus-width %d not supported\n", 1229 value); 1230 break; 1231 } 1232 } 1233 1234 /* Device speed */ 1235 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1236 if (rc) { 1237 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1238 nc->full_name, rc); 1239 goto err_out; 1240 } 1241 spi->max_speed_hz = value; 1242 1243 /* IRQ */ 1244 spi->irq = irq_of_parse_and_map(nc, 0); 1245 1246 /* Store a pointer to the node in the device structure */ 1247 of_node_get(nc); 1248 spi->dev.of_node = nc; 1249 1250 /* Register the new device */ 1251 rc = spi_add_device(spi); 1252 if (rc) { 1253 dev_err(&master->dev, "spi_device register error %s\n", 1254 nc->full_name); 1255 goto err_out; 1256 } 1257 1258 return spi; 1259 1260 err_out: 1261 spi_dev_put(spi); 1262 return ERR_PTR(rc); 1263 } 1264 1265 /** 1266 * of_register_spi_devices() - Register child devices onto the SPI bus 1267 * @master: Pointer to spi_master device 1268 * 1269 * Registers an spi_device for each child node of master node which has a 'reg' 1270 * property. 1271 */ 1272 static void of_register_spi_devices(struct spi_master *master) 1273 { 1274 struct spi_device *spi; 1275 struct device_node *nc; 1276 1277 if (!master->dev.of_node) 1278 return; 1279 1280 for_each_available_child_of_node(master->dev.of_node, nc) { 1281 spi = of_register_spi_device(master, nc); 1282 if (IS_ERR(spi)) 1283 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1284 nc->full_name); 1285 } 1286 } 1287 #else 1288 static void of_register_spi_devices(struct spi_master *master) { } 1289 #endif 1290 1291 #ifdef CONFIG_ACPI 1292 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1293 { 1294 struct spi_device *spi = data; 1295 1296 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1297 struct acpi_resource_spi_serialbus *sb; 1298 1299 sb = &ares->data.spi_serial_bus; 1300 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1301 spi->chip_select = sb->device_selection; 1302 spi->max_speed_hz = sb->connection_speed; 1303 1304 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1305 spi->mode |= SPI_CPHA; 1306 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1307 spi->mode |= SPI_CPOL; 1308 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1309 spi->mode |= SPI_CS_HIGH; 1310 } 1311 } else if (spi->irq < 0) { 1312 struct resource r; 1313 1314 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1315 spi->irq = r.start; 1316 } 1317 1318 /* Always tell the ACPI core to skip this resource */ 1319 return 1; 1320 } 1321 1322 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1323 void *data, void **return_value) 1324 { 1325 struct spi_master *master = data; 1326 struct list_head resource_list; 1327 struct acpi_device *adev; 1328 struct spi_device *spi; 1329 int ret; 1330 1331 if (acpi_bus_get_device(handle, &adev)) 1332 return AE_OK; 1333 if (acpi_bus_get_status(adev) || !adev->status.present) 1334 return AE_OK; 1335 1336 spi = spi_alloc_device(master); 1337 if (!spi) { 1338 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1339 dev_name(&adev->dev)); 1340 return AE_NO_MEMORY; 1341 } 1342 1343 ACPI_COMPANION_SET(&spi->dev, adev); 1344 spi->irq = -1; 1345 1346 INIT_LIST_HEAD(&resource_list); 1347 ret = acpi_dev_get_resources(adev, &resource_list, 1348 acpi_spi_add_resource, spi); 1349 acpi_dev_free_resource_list(&resource_list); 1350 1351 if (ret < 0 || !spi->max_speed_hz) { 1352 spi_dev_put(spi); 1353 return AE_OK; 1354 } 1355 1356 adev->power.flags.ignore_parent = true; 1357 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 1358 if (spi_add_device(spi)) { 1359 adev->power.flags.ignore_parent = false; 1360 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1361 dev_name(&adev->dev)); 1362 spi_dev_put(spi); 1363 } 1364 1365 return AE_OK; 1366 } 1367 1368 static void acpi_register_spi_devices(struct spi_master *master) 1369 { 1370 acpi_status status; 1371 acpi_handle handle; 1372 1373 handle = ACPI_HANDLE(master->dev.parent); 1374 if (!handle) 1375 return; 1376 1377 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1378 acpi_spi_add_device, NULL, 1379 master, NULL); 1380 if (ACPI_FAILURE(status)) 1381 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1382 } 1383 #else 1384 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1385 #endif /* CONFIG_ACPI */ 1386 1387 static void spi_master_release(struct device *dev) 1388 { 1389 struct spi_master *master; 1390 1391 master = container_of(dev, struct spi_master, dev); 1392 kfree(master); 1393 } 1394 1395 static struct class spi_master_class = { 1396 .name = "spi_master", 1397 .owner = THIS_MODULE, 1398 .dev_release = spi_master_release, 1399 }; 1400 1401 1402 1403 /** 1404 * spi_alloc_master - allocate SPI master controller 1405 * @dev: the controller, possibly using the platform_bus 1406 * @size: how much zeroed driver-private data to allocate; the pointer to this 1407 * memory is in the driver_data field of the returned device, 1408 * accessible with spi_master_get_devdata(). 1409 * Context: can sleep 1410 * 1411 * This call is used only by SPI master controller drivers, which are the 1412 * only ones directly touching chip registers. It's how they allocate 1413 * an spi_master structure, prior to calling spi_register_master(). 1414 * 1415 * This must be called from context that can sleep. It returns the SPI 1416 * master structure on success, else NULL. 1417 * 1418 * The caller is responsible for assigning the bus number and initializing 1419 * the master's methods before calling spi_register_master(); and (after errors 1420 * adding the device) calling spi_master_put() and kfree() to prevent a memory 1421 * leak. 1422 */ 1423 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1424 { 1425 struct spi_master *master; 1426 1427 if (!dev) 1428 return NULL; 1429 1430 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1431 if (!master) 1432 return NULL; 1433 1434 device_initialize(&master->dev); 1435 master->bus_num = -1; 1436 master->num_chipselect = 1; 1437 master->dev.class = &spi_master_class; 1438 master->dev.parent = get_device(dev); 1439 spi_master_set_devdata(master, &master[1]); 1440 1441 return master; 1442 } 1443 EXPORT_SYMBOL_GPL(spi_alloc_master); 1444 1445 #ifdef CONFIG_OF 1446 static int of_spi_register_master(struct spi_master *master) 1447 { 1448 int nb, i, *cs; 1449 struct device_node *np = master->dev.of_node; 1450 1451 if (!np) 1452 return 0; 1453 1454 nb = of_gpio_named_count(np, "cs-gpios"); 1455 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1456 1457 /* Return error only for an incorrectly formed cs-gpios property */ 1458 if (nb == 0 || nb == -ENOENT) 1459 return 0; 1460 else if (nb < 0) 1461 return nb; 1462 1463 cs = devm_kzalloc(&master->dev, 1464 sizeof(int) * master->num_chipselect, 1465 GFP_KERNEL); 1466 master->cs_gpios = cs; 1467 1468 if (!master->cs_gpios) 1469 return -ENOMEM; 1470 1471 for (i = 0; i < master->num_chipselect; i++) 1472 cs[i] = -ENOENT; 1473 1474 for (i = 0; i < nb; i++) 1475 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1476 1477 return 0; 1478 } 1479 #else 1480 static int of_spi_register_master(struct spi_master *master) 1481 { 1482 return 0; 1483 } 1484 #endif 1485 1486 /** 1487 * spi_register_master - register SPI master controller 1488 * @master: initialized master, originally from spi_alloc_master() 1489 * Context: can sleep 1490 * 1491 * SPI master controllers connect to their drivers using some non-SPI bus, 1492 * such as the platform bus. The final stage of probe() in that code 1493 * includes calling spi_register_master() to hook up to this SPI bus glue. 1494 * 1495 * SPI controllers use board specific (often SOC specific) bus numbers, 1496 * and board-specific addressing for SPI devices combines those numbers 1497 * with chip select numbers. Since SPI does not directly support dynamic 1498 * device identification, boards need configuration tables telling which 1499 * chip is at which address. 1500 * 1501 * This must be called from context that can sleep. It returns zero on 1502 * success, else a negative error code (dropping the master's refcount). 1503 * After a successful return, the caller is responsible for calling 1504 * spi_unregister_master(). 1505 */ 1506 int spi_register_master(struct spi_master *master) 1507 { 1508 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1509 struct device *dev = master->dev.parent; 1510 struct boardinfo *bi; 1511 int status = -ENODEV; 1512 int dynamic = 0; 1513 1514 if (!dev) 1515 return -ENODEV; 1516 1517 status = of_spi_register_master(master); 1518 if (status) 1519 return status; 1520 1521 /* even if it's just one always-selected device, there must 1522 * be at least one chipselect 1523 */ 1524 if (master->num_chipselect == 0) 1525 return -EINVAL; 1526 1527 if ((master->bus_num < 0) && master->dev.of_node) 1528 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1529 1530 /* convention: dynamically assigned bus IDs count down from the max */ 1531 if (master->bus_num < 0) { 1532 /* FIXME switch to an IDR based scheme, something like 1533 * I2C now uses, so we can't run out of "dynamic" IDs 1534 */ 1535 master->bus_num = atomic_dec_return(&dyn_bus_id); 1536 dynamic = 1; 1537 } 1538 1539 INIT_LIST_HEAD(&master->queue); 1540 spin_lock_init(&master->queue_lock); 1541 spin_lock_init(&master->bus_lock_spinlock); 1542 mutex_init(&master->bus_lock_mutex); 1543 master->bus_lock_flag = 0; 1544 init_completion(&master->xfer_completion); 1545 if (!master->max_dma_len) 1546 master->max_dma_len = INT_MAX; 1547 1548 /* register the device, then userspace will see it. 1549 * registration fails if the bus ID is in use. 1550 */ 1551 dev_set_name(&master->dev, "spi%u", master->bus_num); 1552 status = device_add(&master->dev); 1553 if (status < 0) 1554 goto done; 1555 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1556 dynamic ? " (dynamic)" : ""); 1557 1558 /* If we're using a queued driver, start the queue */ 1559 if (master->transfer) 1560 dev_info(dev, "master is unqueued, this is deprecated\n"); 1561 else { 1562 status = spi_master_initialize_queue(master); 1563 if (status) { 1564 device_del(&master->dev); 1565 goto done; 1566 } 1567 } 1568 1569 mutex_lock(&board_lock); 1570 list_add_tail(&master->list, &spi_master_list); 1571 list_for_each_entry(bi, &board_list, list) 1572 spi_match_master_to_boardinfo(master, &bi->board_info); 1573 mutex_unlock(&board_lock); 1574 1575 /* Register devices from the device tree and ACPI */ 1576 of_register_spi_devices(master); 1577 acpi_register_spi_devices(master); 1578 done: 1579 return status; 1580 } 1581 EXPORT_SYMBOL_GPL(spi_register_master); 1582 1583 static void devm_spi_unregister(struct device *dev, void *res) 1584 { 1585 spi_unregister_master(*(struct spi_master **)res); 1586 } 1587 1588 /** 1589 * dev_spi_register_master - register managed SPI master controller 1590 * @dev: device managing SPI master 1591 * @master: initialized master, originally from spi_alloc_master() 1592 * Context: can sleep 1593 * 1594 * Register a SPI device as with spi_register_master() which will 1595 * automatically be unregister 1596 */ 1597 int devm_spi_register_master(struct device *dev, struct spi_master *master) 1598 { 1599 struct spi_master **ptr; 1600 int ret; 1601 1602 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1603 if (!ptr) 1604 return -ENOMEM; 1605 1606 ret = spi_register_master(master); 1607 if (!ret) { 1608 *ptr = master; 1609 devres_add(dev, ptr); 1610 } else { 1611 devres_free(ptr); 1612 } 1613 1614 return ret; 1615 } 1616 EXPORT_SYMBOL_GPL(devm_spi_register_master); 1617 1618 static int __unregister(struct device *dev, void *null) 1619 { 1620 spi_unregister_device(to_spi_device(dev)); 1621 return 0; 1622 } 1623 1624 /** 1625 * spi_unregister_master - unregister SPI master controller 1626 * @master: the master being unregistered 1627 * Context: can sleep 1628 * 1629 * This call is used only by SPI master controller drivers, which are the 1630 * only ones directly touching chip registers. 1631 * 1632 * This must be called from context that can sleep. 1633 */ 1634 void spi_unregister_master(struct spi_master *master) 1635 { 1636 int dummy; 1637 1638 if (master->queued) { 1639 if (spi_destroy_queue(master)) 1640 dev_err(&master->dev, "queue remove failed\n"); 1641 } 1642 1643 mutex_lock(&board_lock); 1644 list_del(&master->list); 1645 mutex_unlock(&board_lock); 1646 1647 dummy = device_for_each_child(&master->dev, NULL, __unregister); 1648 device_unregister(&master->dev); 1649 } 1650 EXPORT_SYMBOL_GPL(spi_unregister_master); 1651 1652 int spi_master_suspend(struct spi_master *master) 1653 { 1654 int ret; 1655 1656 /* Basically no-ops for non-queued masters */ 1657 if (!master->queued) 1658 return 0; 1659 1660 ret = spi_stop_queue(master); 1661 if (ret) 1662 dev_err(&master->dev, "queue stop failed\n"); 1663 1664 return ret; 1665 } 1666 EXPORT_SYMBOL_GPL(spi_master_suspend); 1667 1668 int spi_master_resume(struct spi_master *master) 1669 { 1670 int ret; 1671 1672 if (!master->queued) 1673 return 0; 1674 1675 ret = spi_start_queue(master); 1676 if (ret) 1677 dev_err(&master->dev, "queue restart failed\n"); 1678 1679 return ret; 1680 } 1681 EXPORT_SYMBOL_GPL(spi_master_resume); 1682 1683 static int __spi_master_match(struct device *dev, const void *data) 1684 { 1685 struct spi_master *m; 1686 const u16 *bus_num = data; 1687 1688 m = container_of(dev, struct spi_master, dev); 1689 return m->bus_num == *bus_num; 1690 } 1691 1692 /** 1693 * spi_busnum_to_master - look up master associated with bus_num 1694 * @bus_num: the master's bus number 1695 * Context: can sleep 1696 * 1697 * This call may be used with devices that are registered after 1698 * arch init time. It returns a refcounted pointer to the relevant 1699 * spi_master (which the caller must release), or NULL if there is 1700 * no such master registered. 1701 */ 1702 struct spi_master *spi_busnum_to_master(u16 bus_num) 1703 { 1704 struct device *dev; 1705 struct spi_master *master = NULL; 1706 1707 dev = class_find_device(&spi_master_class, NULL, &bus_num, 1708 __spi_master_match); 1709 if (dev) 1710 master = container_of(dev, struct spi_master, dev); 1711 /* reference got in class_find_device */ 1712 return master; 1713 } 1714 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 1715 1716 1717 /*-------------------------------------------------------------------------*/ 1718 1719 /* Core methods for SPI master protocol drivers. Some of the 1720 * other core methods are currently defined as inline functions. 1721 */ 1722 1723 /** 1724 * spi_setup - setup SPI mode and clock rate 1725 * @spi: the device whose settings are being modified 1726 * Context: can sleep, and no requests are queued to the device 1727 * 1728 * SPI protocol drivers may need to update the transfer mode if the 1729 * device doesn't work with its default. They may likewise need 1730 * to update clock rates or word sizes from initial values. This function 1731 * changes those settings, and must be called from a context that can sleep. 1732 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 1733 * effect the next time the device is selected and data is transferred to 1734 * or from it. When this function returns, the spi device is deselected. 1735 * 1736 * Note that this call will fail if the protocol driver specifies an option 1737 * that the underlying controller or its driver does not support. For 1738 * example, not all hardware supports wire transfers using nine bit words, 1739 * LSB-first wire encoding, or active-high chipselects. 1740 */ 1741 int spi_setup(struct spi_device *spi) 1742 { 1743 unsigned bad_bits, ugly_bits; 1744 int status = 0; 1745 1746 /* check mode to prevent that DUAL and QUAD set at the same time 1747 */ 1748 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 1749 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 1750 dev_err(&spi->dev, 1751 "setup: can not select dual and quad at the same time\n"); 1752 return -EINVAL; 1753 } 1754 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 1755 */ 1756 if ((spi->mode & SPI_3WIRE) && (spi->mode & 1757 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 1758 return -EINVAL; 1759 /* help drivers fail *cleanly* when they need options 1760 * that aren't supported with their current master 1761 */ 1762 bad_bits = spi->mode & ~spi->master->mode_bits; 1763 ugly_bits = bad_bits & 1764 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 1765 if (ugly_bits) { 1766 dev_warn(&spi->dev, 1767 "setup: ignoring unsupported mode bits %x\n", 1768 ugly_bits); 1769 spi->mode &= ~ugly_bits; 1770 bad_bits &= ~ugly_bits; 1771 } 1772 if (bad_bits) { 1773 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1774 bad_bits); 1775 return -EINVAL; 1776 } 1777 1778 if (!spi->bits_per_word) 1779 spi->bits_per_word = 8; 1780 1781 if (!spi->max_speed_hz) 1782 spi->max_speed_hz = spi->master->max_speed_hz; 1783 1784 spi_set_cs(spi, false); 1785 1786 if (spi->master->setup) 1787 status = spi->master->setup(spi); 1788 1789 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 1790 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 1791 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 1792 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 1793 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 1794 (spi->mode & SPI_LOOP) ? "loopback, " : "", 1795 spi->bits_per_word, spi->max_speed_hz, 1796 status); 1797 1798 return status; 1799 } 1800 EXPORT_SYMBOL_GPL(spi_setup); 1801 1802 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 1803 { 1804 struct spi_master *master = spi->master; 1805 struct spi_transfer *xfer; 1806 int w_size; 1807 1808 if (list_empty(&message->transfers)) 1809 return -EINVAL; 1810 1811 /* Half-duplex links include original MicroWire, and ones with 1812 * only one data pin like SPI_3WIRE (switches direction) or where 1813 * either MOSI or MISO is missing. They can also be caused by 1814 * software limitations. 1815 */ 1816 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 1817 || (spi->mode & SPI_3WIRE)) { 1818 unsigned flags = master->flags; 1819 1820 list_for_each_entry(xfer, &message->transfers, transfer_list) { 1821 if (xfer->rx_buf && xfer->tx_buf) 1822 return -EINVAL; 1823 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 1824 return -EINVAL; 1825 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 1826 return -EINVAL; 1827 } 1828 } 1829 1830 /** 1831 * Set transfer bits_per_word and max speed as spi device default if 1832 * it is not set for this transfer. 1833 * Set transfer tx_nbits and rx_nbits as single transfer default 1834 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 1835 */ 1836 list_for_each_entry(xfer, &message->transfers, transfer_list) { 1837 message->frame_length += xfer->len; 1838 if (!xfer->bits_per_word) 1839 xfer->bits_per_word = spi->bits_per_word; 1840 1841 if (!xfer->speed_hz) 1842 xfer->speed_hz = spi->max_speed_hz; 1843 1844 if (master->max_speed_hz && 1845 xfer->speed_hz > master->max_speed_hz) 1846 xfer->speed_hz = master->max_speed_hz; 1847 1848 if (master->bits_per_word_mask) { 1849 /* Only 32 bits fit in the mask */ 1850 if (xfer->bits_per_word > 32) 1851 return -EINVAL; 1852 if (!(master->bits_per_word_mask & 1853 BIT(xfer->bits_per_word - 1))) 1854 return -EINVAL; 1855 } 1856 1857 /* 1858 * SPI transfer length should be multiple of SPI word size 1859 * where SPI word size should be power-of-two multiple 1860 */ 1861 if (xfer->bits_per_word <= 8) 1862 w_size = 1; 1863 else if (xfer->bits_per_word <= 16) 1864 w_size = 2; 1865 else 1866 w_size = 4; 1867 1868 /* No partial transfers accepted */ 1869 if (xfer->len % w_size) 1870 return -EINVAL; 1871 1872 if (xfer->speed_hz && master->min_speed_hz && 1873 xfer->speed_hz < master->min_speed_hz) 1874 return -EINVAL; 1875 1876 if (xfer->tx_buf && !xfer->tx_nbits) 1877 xfer->tx_nbits = SPI_NBITS_SINGLE; 1878 if (xfer->rx_buf && !xfer->rx_nbits) 1879 xfer->rx_nbits = SPI_NBITS_SINGLE; 1880 /* check transfer tx/rx_nbits: 1881 * 1. check the value matches one of single, dual and quad 1882 * 2. check tx/rx_nbits match the mode in spi_device 1883 */ 1884 if (xfer->tx_buf) { 1885 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 1886 xfer->tx_nbits != SPI_NBITS_DUAL && 1887 xfer->tx_nbits != SPI_NBITS_QUAD) 1888 return -EINVAL; 1889 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 1890 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 1891 return -EINVAL; 1892 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 1893 !(spi->mode & SPI_TX_QUAD)) 1894 return -EINVAL; 1895 } 1896 /* check transfer rx_nbits */ 1897 if (xfer->rx_buf) { 1898 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 1899 xfer->rx_nbits != SPI_NBITS_DUAL && 1900 xfer->rx_nbits != SPI_NBITS_QUAD) 1901 return -EINVAL; 1902 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 1903 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 1904 return -EINVAL; 1905 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 1906 !(spi->mode & SPI_RX_QUAD)) 1907 return -EINVAL; 1908 } 1909 } 1910 1911 message->status = -EINPROGRESS; 1912 1913 return 0; 1914 } 1915 1916 static int __spi_async(struct spi_device *spi, struct spi_message *message) 1917 { 1918 struct spi_master *master = spi->master; 1919 1920 message->spi = spi; 1921 1922 trace_spi_message_submit(message); 1923 1924 return master->transfer(spi, message); 1925 } 1926 1927 /** 1928 * spi_async - asynchronous SPI transfer 1929 * @spi: device with which data will be exchanged 1930 * @message: describes the data transfers, including completion callback 1931 * Context: any (irqs may be blocked, etc) 1932 * 1933 * This call may be used in_irq and other contexts which can't sleep, 1934 * as well as from task contexts which can sleep. 1935 * 1936 * The completion callback is invoked in a context which can't sleep. 1937 * Before that invocation, the value of message->status is undefined. 1938 * When the callback is issued, message->status holds either zero (to 1939 * indicate complete success) or a negative error code. After that 1940 * callback returns, the driver which issued the transfer request may 1941 * deallocate the associated memory; it's no longer in use by any SPI 1942 * core or controller driver code. 1943 * 1944 * Note that although all messages to a spi_device are handled in 1945 * FIFO order, messages may go to different devices in other orders. 1946 * Some device might be higher priority, or have various "hard" access 1947 * time requirements, for example. 1948 * 1949 * On detection of any fault during the transfer, processing of 1950 * the entire message is aborted, and the device is deselected. 1951 * Until returning from the associated message completion callback, 1952 * no other spi_message queued to that device will be processed. 1953 * (This rule applies equally to all the synchronous transfer calls, 1954 * which are wrappers around this core asynchronous primitive.) 1955 */ 1956 int spi_async(struct spi_device *spi, struct spi_message *message) 1957 { 1958 struct spi_master *master = spi->master; 1959 int ret; 1960 unsigned long flags; 1961 1962 ret = __spi_validate(spi, message); 1963 if (ret != 0) 1964 return ret; 1965 1966 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1967 1968 if (master->bus_lock_flag) 1969 ret = -EBUSY; 1970 else 1971 ret = __spi_async(spi, message); 1972 1973 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1974 1975 return ret; 1976 } 1977 EXPORT_SYMBOL_GPL(spi_async); 1978 1979 /** 1980 * spi_async_locked - version of spi_async with exclusive bus usage 1981 * @spi: device with which data will be exchanged 1982 * @message: describes the data transfers, including completion callback 1983 * Context: any (irqs may be blocked, etc) 1984 * 1985 * This call may be used in_irq and other contexts which can't sleep, 1986 * as well as from task contexts which can sleep. 1987 * 1988 * The completion callback is invoked in a context which can't sleep. 1989 * Before that invocation, the value of message->status is undefined. 1990 * When the callback is issued, message->status holds either zero (to 1991 * indicate complete success) or a negative error code. After that 1992 * callback returns, the driver which issued the transfer request may 1993 * deallocate the associated memory; it's no longer in use by any SPI 1994 * core or controller driver code. 1995 * 1996 * Note that although all messages to a spi_device are handled in 1997 * FIFO order, messages may go to different devices in other orders. 1998 * Some device might be higher priority, or have various "hard" access 1999 * time requirements, for example. 2000 * 2001 * On detection of any fault during the transfer, processing of 2002 * the entire message is aborted, and the device is deselected. 2003 * Until returning from the associated message completion callback, 2004 * no other spi_message queued to that device will be processed. 2005 * (This rule applies equally to all the synchronous transfer calls, 2006 * which are wrappers around this core asynchronous primitive.) 2007 */ 2008 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2009 { 2010 struct spi_master *master = spi->master; 2011 int ret; 2012 unsigned long flags; 2013 2014 ret = __spi_validate(spi, message); 2015 if (ret != 0) 2016 return ret; 2017 2018 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2019 2020 ret = __spi_async(spi, message); 2021 2022 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2023 2024 return ret; 2025 2026 } 2027 EXPORT_SYMBOL_GPL(spi_async_locked); 2028 2029 2030 /*-------------------------------------------------------------------------*/ 2031 2032 /* Utility methods for SPI master protocol drivers, layered on 2033 * top of the core. Some other utility methods are defined as 2034 * inline functions. 2035 */ 2036 2037 static void spi_complete(void *arg) 2038 { 2039 complete(arg); 2040 } 2041 2042 static int __spi_sync(struct spi_device *spi, struct spi_message *message, 2043 int bus_locked) 2044 { 2045 DECLARE_COMPLETION_ONSTACK(done); 2046 int status; 2047 struct spi_master *master = spi->master; 2048 unsigned long flags; 2049 2050 status = __spi_validate(spi, message); 2051 if (status != 0) 2052 return status; 2053 2054 message->complete = spi_complete; 2055 message->context = &done; 2056 message->spi = spi; 2057 2058 if (!bus_locked) 2059 mutex_lock(&master->bus_lock_mutex); 2060 2061 /* If we're not using the legacy transfer method then we will 2062 * try to transfer in the calling context so special case. 2063 * This code would be less tricky if we could remove the 2064 * support for driver implemented message queues. 2065 */ 2066 if (master->transfer == spi_queued_transfer) { 2067 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2068 2069 trace_spi_message_submit(message); 2070 2071 status = __spi_queued_transfer(spi, message, false); 2072 2073 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2074 } else { 2075 status = spi_async_locked(spi, message); 2076 } 2077 2078 if (!bus_locked) 2079 mutex_unlock(&master->bus_lock_mutex); 2080 2081 if (status == 0) { 2082 /* Push out the messages in the calling context if we 2083 * can. 2084 */ 2085 if (master->transfer == spi_queued_transfer) 2086 __spi_pump_messages(master, false); 2087 2088 wait_for_completion(&done); 2089 status = message->status; 2090 } 2091 message->context = NULL; 2092 return status; 2093 } 2094 2095 /** 2096 * spi_sync - blocking/synchronous SPI data transfers 2097 * @spi: device with which data will be exchanged 2098 * @message: describes the data transfers 2099 * Context: can sleep 2100 * 2101 * This call may only be used from a context that may sleep. The sleep 2102 * is non-interruptible, and has no timeout. Low-overhead controller 2103 * drivers may DMA directly into and out of the message buffers. 2104 * 2105 * Note that the SPI device's chip select is active during the message, 2106 * and then is normally disabled between messages. Drivers for some 2107 * frequently-used devices may want to minimize costs of selecting a chip, 2108 * by leaving it selected in anticipation that the next message will go 2109 * to the same chip. (That may increase power usage.) 2110 * 2111 * Also, the caller is guaranteeing that the memory associated with the 2112 * message will not be freed before this call returns. 2113 * 2114 * It returns zero on success, else a negative error code. 2115 */ 2116 int spi_sync(struct spi_device *spi, struct spi_message *message) 2117 { 2118 return __spi_sync(spi, message, 0); 2119 } 2120 EXPORT_SYMBOL_GPL(spi_sync); 2121 2122 /** 2123 * spi_sync_locked - version of spi_sync with exclusive bus usage 2124 * @spi: device with which data will be exchanged 2125 * @message: describes the data transfers 2126 * Context: can sleep 2127 * 2128 * This call may only be used from a context that may sleep. The sleep 2129 * is non-interruptible, and has no timeout. Low-overhead controller 2130 * drivers may DMA directly into and out of the message buffers. 2131 * 2132 * This call should be used by drivers that require exclusive access to the 2133 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2134 * be released by a spi_bus_unlock call when the exclusive access is over. 2135 * 2136 * It returns zero on success, else a negative error code. 2137 */ 2138 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2139 { 2140 return __spi_sync(spi, message, 1); 2141 } 2142 EXPORT_SYMBOL_GPL(spi_sync_locked); 2143 2144 /** 2145 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2146 * @master: SPI bus master that should be locked for exclusive bus access 2147 * Context: can sleep 2148 * 2149 * This call may only be used from a context that may sleep. The sleep 2150 * is non-interruptible, and has no timeout. 2151 * 2152 * This call should be used by drivers that require exclusive access to the 2153 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2154 * exclusive access is over. Data transfer must be done by spi_sync_locked 2155 * and spi_async_locked calls when the SPI bus lock is held. 2156 * 2157 * It returns zero on success, else a negative error code. 2158 */ 2159 int spi_bus_lock(struct spi_master *master) 2160 { 2161 unsigned long flags; 2162 2163 mutex_lock(&master->bus_lock_mutex); 2164 2165 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2166 master->bus_lock_flag = 1; 2167 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2168 2169 /* mutex remains locked until spi_bus_unlock is called */ 2170 2171 return 0; 2172 } 2173 EXPORT_SYMBOL_GPL(spi_bus_lock); 2174 2175 /** 2176 * spi_bus_unlock - release the lock for exclusive SPI bus usage 2177 * @master: SPI bus master that was locked for exclusive bus access 2178 * Context: can sleep 2179 * 2180 * This call may only be used from a context that may sleep. The sleep 2181 * is non-interruptible, and has no timeout. 2182 * 2183 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2184 * call. 2185 * 2186 * It returns zero on success, else a negative error code. 2187 */ 2188 int spi_bus_unlock(struct spi_master *master) 2189 { 2190 master->bus_lock_flag = 0; 2191 2192 mutex_unlock(&master->bus_lock_mutex); 2193 2194 return 0; 2195 } 2196 EXPORT_SYMBOL_GPL(spi_bus_unlock); 2197 2198 /* portable code must never pass more than 32 bytes */ 2199 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 2200 2201 static u8 *buf; 2202 2203 /** 2204 * spi_write_then_read - SPI synchronous write followed by read 2205 * @spi: device with which data will be exchanged 2206 * @txbuf: data to be written (need not be dma-safe) 2207 * @n_tx: size of txbuf, in bytes 2208 * @rxbuf: buffer into which data will be read (need not be dma-safe) 2209 * @n_rx: size of rxbuf, in bytes 2210 * Context: can sleep 2211 * 2212 * This performs a half duplex MicroWire style transaction with the 2213 * device, sending txbuf and then reading rxbuf. The return value 2214 * is zero for success, else a negative errno status code. 2215 * This call may only be used from a context that may sleep. 2216 * 2217 * Parameters to this routine are always copied using a small buffer; 2218 * portable code should never use this for more than 32 bytes. 2219 * Performance-sensitive or bulk transfer code should instead use 2220 * spi_{async,sync}() calls with dma-safe buffers. 2221 */ 2222 int spi_write_then_read(struct spi_device *spi, 2223 const void *txbuf, unsigned n_tx, 2224 void *rxbuf, unsigned n_rx) 2225 { 2226 static DEFINE_MUTEX(lock); 2227 2228 int status; 2229 struct spi_message message; 2230 struct spi_transfer x[2]; 2231 u8 *local_buf; 2232 2233 /* Use preallocated DMA-safe buffer if we can. We can't avoid 2234 * copying here, (as a pure convenience thing), but we can 2235 * keep heap costs out of the hot path unless someone else is 2236 * using the pre-allocated buffer or the transfer is too large. 2237 */ 2238 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 2239 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 2240 GFP_KERNEL | GFP_DMA); 2241 if (!local_buf) 2242 return -ENOMEM; 2243 } else { 2244 local_buf = buf; 2245 } 2246 2247 spi_message_init(&message); 2248 memset(x, 0, sizeof(x)); 2249 if (n_tx) { 2250 x[0].len = n_tx; 2251 spi_message_add_tail(&x[0], &message); 2252 } 2253 if (n_rx) { 2254 x[1].len = n_rx; 2255 spi_message_add_tail(&x[1], &message); 2256 } 2257 2258 memcpy(local_buf, txbuf, n_tx); 2259 x[0].tx_buf = local_buf; 2260 x[1].rx_buf = local_buf + n_tx; 2261 2262 /* do the i/o */ 2263 status = spi_sync(spi, &message); 2264 if (status == 0) 2265 memcpy(rxbuf, x[1].rx_buf, n_rx); 2266 2267 if (x[0].tx_buf == buf) 2268 mutex_unlock(&lock); 2269 else 2270 kfree(local_buf); 2271 2272 return status; 2273 } 2274 EXPORT_SYMBOL_GPL(spi_write_then_read); 2275 2276 /*-------------------------------------------------------------------------*/ 2277 2278 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 2279 static int __spi_of_device_match(struct device *dev, void *data) 2280 { 2281 return dev->of_node == data; 2282 } 2283 2284 /* must call put_device() when done with returned spi_device device */ 2285 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 2286 { 2287 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 2288 __spi_of_device_match); 2289 return dev ? to_spi_device(dev) : NULL; 2290 } 2291 2292 static int __spi_of_master_match(struct device *dev, const void *data) 2293 { 2294 return dev->of_node == data; 2295 } 2296 2297 /* the spi masters are not using spi_bus, so we find it with another way */ 2298 static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 2299 { 2300 struct device *dev; 2301 2302 dev = class_find_device(&spi_master_class, NULL, node, 2303 __spi_of_master_match); 2304 if (!dev) 2305 return NULL; 2306 2307 /* reference got in class_find_device */ 2308 return container_of(dev, struct spi_master, dev); 2309 } 2310 2311 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 2312 void *arg) 2313 { 2314 struct of_reconfig_data *rd = arg; 2315 struct spi_master *master; 2316 struct spi_device *spi; 2317 2318 switch (of_reconfig_get_state_change(action, arg)) { 2319 case OF_RECONFIG_CHANGE_ADD: 2320 master = of_find_spi_master_by_node(rd->dn->parent); 2321 if (master == NULL) 2322 return NOTIFY_OK; /* not for us */ 2323 2324 spi = of_register_spi_device(master, rd->dn); 2325 put_device(&master->dev); 2326 2327 if (IS_ERR(spi)) { 2328 pr_err("%s: failed to create for '%s'\n", 2329 __func__, rd->dn->full_name); 2330 return notifier_from_errno(PTR_ERR(spi)); 2331 } 2332 break; 2333 2334 case OF_RECONFIG_CHANGE_REMOVE: 2335 /* find our device by node */ 2336 spi = of_find_spi_device_by_node(rd->dn); 2337 if (spi == NULL) 2338 return NOTIFY_OK; /* no? not meant for us */ 2339 2340 /* unregister takes one ref away */ 2341 spi_unregister_device(spi); 2342 2343 /* and put the reference of the find */ 2344 put_device(&spi->dev); 2345 break; 2346 } 2347 2348 return NOTIFY_OK; 2349 } 2350 2351 static struct notifier_block spi_of_notifier = { 2352 .notifier_call = of_spi_notify, 2353 }; 2354 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2355 extern struct notifier_block spi_of_notifier; 2356 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2357 2358 static int __init spi_init(void) 2359 { 2360 int status; 2361 2362 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2363 if (!buf) { 2364 status = -ENOMEM; 2365 goto err0; 2366 } 2367 2368 status = bus_register(&spi_bus_type); 2369 if (status < 0) 2370 goto err1; 2371 2372 status = class_register(&spi_master_class); 2373 if (status < 0) 2374 goto err2; 2375 2376 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 2377 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 2378 2379 return 0; 2380 2381 err2: 2382 bus_unregister(&spi_bus_type); 2383 err1: 2384 kfree(buf); 2385 buf = NULL; 2386 err0: 2387 return status; 2388 } 2389 2390 /* board_info is normally registered in arch_initcall(), 2391 * but even essential drivers wait till later 2392 * 2393 * REVISIT only boardinfo really needs static linking. the rest (device and 2394 * driver registration) _could_ be dynamically linked (modular) ... costs 2395 * include needing to have boardinfo data structures be much more public. 2396 */ 2397 postcore_initcall(spi_init); 2398 2399