1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/export.h> 35 #include <linux/sched/rt.h> 36 #include <linux/delay.h> 37 #include <linux/kthread.h> 38 #include <linux/ioport.h> 39 #include <linux/acpi.h> 40 41 #define CREATE_TRACE_POINTS 42 #include <trace/events/spi.h> 43 44 static void spidev_release(struct device *dev) 45 { 46 struct spi_device *spi = to_spi_device(dev); 47 48 /* spi masters may cleanup for released devices */ 49 if (spi->master->cleanup) 50 spi->master->cleanup(spi); 51 52 spi_master_put(spi->master); 53 kfree(spi); 54 } 55 56 static ssize_t 57 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 58 { 59 const struct spi_device *spi = to_spi_device(dev); 60 int len; 61 62 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 63 if (len != -ENODEV) 64 return len; 65 66 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 67 } 68 static DEVICE_ATTR_RO(modalias); 69 70 #define SPI_STATISTICS_ATTRS(field, file) \ 71 static ssize_t spi_master_##field##_show(struct device *dev, \ 72 struct device_attribute *attr, \ 73 char *buf) \ 74 { \ 75 struct spi_master *master = container_of(dev, \ 76 struct spi_master, dev); \ 77 return spi_statistics_##field##_show(&master->statistics, buf); \ 78 } \ 79 static struct device_attribute dev_attr_spi_master_##field = { \ 80 .attr = { .name = file, .mode = S_IRUGO }, \ 81 .show = spi_master_##field##_show, \ 82 }; \ 83 static ssize_t spi_device_##field##_show(struct device *dev, \ 84 struct device_attribute *attr, \ 85 char *buf) \ 86 { \ 87 struct spi_device *spi = container_of(dev, \ 88 struct spi_device, dev); \ 89 return spi_statistics_##field##_show(&spi->statistics, buf); \ 90 } \ 91 static struct device_attribute dev_attr_spi_device_##field = { \ 92 .attr = { .name = file, .mode = S_IRUGO }, \ 93 .show = spi_device_##field##_show, \ 94 } 95 96 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 97 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 98 char *buf) \ 99 { \ 100 unsigned long flags; \ 101 ssize_t len; \ 102 spin_lock_irqsave(&stat->lock, flags); \ 103 len = sprintf(buf, format_string, stat->field); \ 104 spin_unlock_irqrestore(&stat->lock, flags); \ 105 return len; \ 106 } \ 107 SPI_STATISTICS_ATTRS(name, file) 108 109 #define SPI_STATISTICS_SHOW(field, format_string) \ 110 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 111 field, format_string) 112 113 SPI_STATISTICS_SHOW(messages, "%lu"); 114 SPI_STATISTICS_SHOW(transfers, "%lu"); 115 SPI_STATISTICS_SHOW(errors, "%lu"); 116 SPI_STATISTICS_SHOW(timedout, "%lu"); 117 118 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 119 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 120 SPI_STATISTICS_SHOW(spi_async, "%lu"); 121 122 SPI_STATISTICS_SHOW(bytes, "%llu"); 123 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 124 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 125 126 static struct attribute *spi_dev_attrs[] = { 127 &dev_attr_modalias.attr, 128 NULL, 129 }; 130 131 static const struct attribute_group spi_dev_group = { 132 .attrs = spi_dev_attrs, 133 }; 134 135 static struct attribute *spi_device_statistics_attrs[] = { 136 &dev_attr_spi_device_messages.attr, 137 &dev_attr_spi_device_transfers.attr, 138 &dev_attr_spi_device_errors.attr, 139 &dev_attr_spi_device_timedout.attr, 140 &dev_attr_spi_device_spi_sync.attr, 141 &dev_attr_spi_device_spi_sync_immediate.attr, 142 &dev_attr_spi_device_spi_async.attr, 143 &dev_attr_spi_device_bytes.attr, 144 &dev_attr_spi_device_bytes_rx.attr, 145 &dev_attr_spi_device_bytes_tx.attr, 146 NULL, 147 }; 148 149 static const struct attribute_group spi_device_statistics_group = { 150 .name = "statistics", 151 .attrs = spi_device_statistics_attrs, 152 }; 153 154 static const struct attribute_group *spi_dev_groups[] = { 155 &spi_dev_group, 156 &spi_device_statistics_group, 157 NULL, 158 }; 159 160 static struct attribute *spi_master_statistics_attrs[] = { 161 &dev_attr_spi_master_messages.attr, 162 &dev_attr_spi_master_transfers.attr, 163 &dev_attr_spi_master_errors.attr, 164 &dev_attr_spi_master_timedout.attr, 165 &dev_attr_spi_master_spi_sync.attr, 166 &dev_attr_spi_master_spi_sync_immediate.attr, 167 &dev_attr_spi_master_spi_async.attr, 168 &dev_attr_spi_master_bytes.attr, 169 &dev_attr_spi_master_bytes_rx.attr, 170 &dev_attr_spi_master_bytes_tx.attr, 171 NULL, 172 }; 173 174 static const struct attribute_group spi_master_statistics_group = { 175 .name = "statistics", 176 .attrs = spi_master_statistics_attrs, 177 }; 178 179 static const struct attribute_group *spi_master_groups[] = { 180 &spi_master_statistics_group, 181 NULL, 182 }; 183 184 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 185 struct spi_transfer *xfer, 186 struct spi_master *master) 187 { 188 unsigned long flags; 189 190 spin_lock_irqsave(&stats->lock, flags); 191 192 stats->transfers++; 193 194 stats->bytes += xfer->len; 195 if ((xfer->tx_buf) && 196 (xfer->tx_buf != master->dummy_tx)) 197 stats->bytes_tx += xfer->len; 198 if ((xfer->rx_buf) && 199 (xfer->rx_buf != master->dummy_rx)) 200 stats->bytes_rx += xfer->len; 201 202 spin_unlock_irqrestore(&stats->lock, flags); 203 } 204 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 205 206 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 207 * and the sysfs version makes coldplug work too. 208 */ 209 210 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 211 const struct spi_device *sdev) 212 { 213 while (id->name[0]) { 214 if (!strcmp(sdev->modalias, id->name)) 215 return id; 216 id++; 217 } 218 return NULL; 219 } 220 221 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 222 { 223 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 224 225 return spi_match_id(sdrv->id_table, sdev); 226 } 227 EXPORT_SYMBOL_GPL(spi_get_device_id); 228 229 static int spi_match_device(struct device *dev, struct device_driver *drv) 230 { 231 const struct spi_device *spi = to_spi_device(dev); 232 const struct spi_driver *sdrv = to_spi_driver(drv); 233 234 /* Attempt an OF style match */ 235 if (of_driver_match_device(dev, drv)) 236 return 1; 237 238 /* Then try ACPI */ 239 if (acpi_driver_match_device(dev, drv)) 240 return 1; 241 242 if (sdrv->id_table) 243 return !!spi_match_id(sdrv->id_table, spi); 244 245 return strcmp(spi->modalias, drv->name) == 0; 246 } 247 248 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 249 { 250 const struct spi_device *spi = to_spi_device(dev); 251 int rc; 252 253 rc = acpi_device_uevent_modalias(dev, env); 254 if (rc != -ENODEV) 255 return rc; 256 257 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 258 return 0; 259 } 260 261 struct bus_type spi_bus_type = { 262 .name = "spi", 263 .dev_groups = spi_dev_groups, 264 .match = spi_match_device, 265 .uevent = spi_uevent, 266 }; 267 EXPORT_SYMBOL_GPL(spi_bus_type); 268 269 270 static int spi_drv_probe(struct device *dev) 271 { 272 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 273 int ret; 274 275 ret = of_clk_set_defaults(dev->of_node, false); 276 if (ret) 277 return ret; 278 279 ret = dev_pm_domain_attach(dev, true); 280 if (ret != -EPROBE_DEFER) { 281 ret = sdrv->probe(to_spi_device(dev)); 282 if (ret) 283 dev_pm_domain_detach(dev, true); 284 } 285 286 return ret; 287 } 288 289 static int spi_drv_remove(struct device *dev) 290 { 291 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 292 int ret; 293 294 ret = sdrv->remove(to_spi_device(dev)); 295 dev_pm_domain_detach(dev, true); 296 297 return ret; 298 } 299 300 static void spi_drv_shutdown(struct device *dev) 301 { 302 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 303 304 sdrv->shutdown(to_spi_device(dev)); 305 } 306 307 /** 308 * spi_register_driver - register a SPI driver 309 * @sdrv: the driver to register 310 * Context: can sleep 311 */ 312 int spi_register_driver(struct spi_driver *sdrv) 313 { 314 sdrv->driver.bus = &spi_bus_type; 315 if (sdrv->probe) 316 sdrv->driver.probe = spi_drv_probe; 317 if (sdrv->remove) 318 sdrv->driver.remove = spi_drv_remove; 319 if (sdrv->shutdown) 320 sdrv->driver.shutdown = spi_drv_shutdown; 321 return driver_register(&sdrv->driver); 322 } 323 EXPORT_SYMBOL_GPL(spi_register_driver); 324 325 /*-------------------------------------------------------------------------*/ 326 327 /* SPI devices should normally not be created by SPI device drivers; that 328 * would make them board-specific. Similarly with SPI master drivers. 329 * Device registration normally goes into like arch/.../mach.../board-YYY.c 330 * with other readonly (flashable) information about mainboard devices. 331 */ 332 333 struct boardinfo { 334 struct list_head list; 335 struct spi_board_info board_info; 336 }; 337 338 static LIST_HEAD(board_list); 339 static LIST_HEAD(spi_master_list); 340 341 /* 342 * Used to protect add/del opertion for board_info list and 343 * spi_master list, and their matching process 344 */ 345 static DEFINE_MUTEX(board_lock); 346 347 /** 348 * spi_alloc_device - Allocate a new SPI device 349 * @master: Controller to which device is connected 350 * Context: can sleep 351 * 352 * Allows a driver to allocate and initialize a spi_device without 353 * registering it immediately. This allows a driver to directly 354 * fill the spi_device with device parameters before calling 355 * spi_add_device() on it. 356 * 357 * Caller is responsible to call spi_add_device() on the returned 358 * spi_device structure to add it to the SPI master. If the caller 359 * needs to discard the spi_device without adding it, then it should 360 * call spi_dev_put() on it. 361 * 362 * Returns a pointer to the new device, or NULL. 363 */ 364 struct spi_device *spi_alloc_device(struct spi_master *master) 365 { 366 struct spi_device *spi; 367 368 if (!spi_master_get(master)) 369 return NULL; 370 371 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 372 if (!spi) { 373 spi_master_put(master); 374 return NULL; 375 } 376 377 spi->master = master; 378 spi->dev.parent = &master->dev; 379 spi->dev.bus = &spi_bus_type; 380 spi->dev.release = spidev_release; 381 spi->cs_gpio = -ENOENT; 382 383 spin_lock_init(&spi->statistics.lock); 384 385 device_initialize(&spi->dev); 386 return spi; 387 } 388 EXPORT_SYMBOL_GPL(spi_alloc_device); 389 390 static void spi_dev_set_name(struct spi_device *spi) 391 { 392 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 393 394 if (adev) { 395 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 396 return; 397 } 398 399 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 400 spi->chip_select); 401 } 402 403 static int spi_dev_check(struct device *dev, void *data) 404 { 405 struct spi_device *spi = to_spi_device(dev); 406 struct spi_device *new_spi = data; 407 408 if (spi->master == new_spi->master && 409 spi->chip_select == new_spi->chip_select) 410 return -EBUSY; 411 return 0; 412 } 413 414 /** 415 * spi_add_device - Add spi_device allocated with spi_alloc_device 416 * @spi: spi_device to register 417 * 418 * Companion function to spi_alloc_device. Devices allocated with 419 * spi_alloc_device can be added onto the spi bus with this function. 420 * 421 * Returns 0 on success; negative errno on failure 422 */ 423 int spi_add_device(struct spi_device *spi) 424 { 425 static DEFINE_MUTEX(spi_add_lock); 426 struct spi_master *master = spi->master; 427 struct device *dev = master->dev.parent; 428 int status; 429 430 /* Chipselects are numbered 0..max; validate. */ 431 if (spi->chip_select >= master->num_chipselect) { 432 dev_err(dev, "cs%d >= max %d\n", 433 spi->chip_select, 434 master->num_chipselect); 435 return -EINVAL; 436 } 437 438 /* Set the bus ID string */ 439 spi_dev_set_name(spi); 440 441 /* We need to make sure there's no other device with this 442 * chipselect **BEFORE** we call setup(), else we'll trash 443 * its configuration. Lock against concurrent add() calls. 444 */ 445 mutex_lock(&spi_add_lock); 446 447 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 448 if (status) { 449 dev_err(dev, "chipselect %d already in use\n", 450 spi->chip_select); 451 goto done; 452 } 453 454 if (master->cs_gpios) 455 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 456 457 /* Drivers may modify this initial i/o setup, but will 458 * normally rely on the device being setup. Devices 459 * using SPI_CS_HIGH can't coexist well otherwise... 460 */ 461 status = spi_setup(spi); 462 if (status < 0) { 463 dev_err(dev, "can't setup %s, status %d\n", 464 dev_name(&spi->dev), status); 465 goto done; 466 } 467 468 /* Device may be bound to an active driver when this returns */ 469 status = device_add(&spi->dev); 470 if (status < 0) 471 dev_err(dev, "can't add %s, status %d\n", 472 dev_name(&spi->dev), status); 473 else 474 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 475 476 done: 477 mutex_unlock(&spi_add_lock); 478 return status; 479 } 480 EXPORT_SYMBOL_GPL(spi_add_device); 481 482 /** 483 * spi_new_device - instantiate one new SPI device 484 * @master: Controller to which device is connected 485 * @chip: Describes the SPI device 486 * Context: can sleep 487 * 488 * On typical mainboards, this is purely internal; and it's not needed 489 * after board init creates the hard-wired devices. Some development 490 * platforms may not be able to use spi_register_board_info though, and 491 * this is exported so that for example a USB or parport based adapter 492 * driver could add devices (which it would learn about out-of-band). 493 * 494 * Returns the new device, or NULL. 495 */ 496 struct spi_device *spi_new_device(struct spi_master *master, 497 struct spi_board_info *chip) 498 { 499 struct spi_device *proxy; 500 int status; 501 502 /* NOTE: caller did any chip->bus_num checks necessary. 503 * 504 * Also, unless we change the return value convention to use 505 * error-or-pointer (not NULL-or-pointer), troubleshootability 506 * suggests syslogged diagnostics are best here (ugh). 507 */ 508 509 proxy = spi_alloc_device(master); 510 if (!proxy) 511 return NULL; 512 513 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 514 515 proxy->chip_select = chip->chip_select; 516 proxy->max_speed_hz = chip->max_speed_hz; 517 proxy->mode = chip->mode; 518 proxy->irq = chip->irq; 519 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 520 proxy->dev.platform_data = (void *) chip->platform_data; 521 proxy->controller_data = chip->controller_data; 522 proxy->controller_state = NULL; 523 524 status = spi_add_device(proxy); 525 if (status < 0) { 526 spi_dev_put(proxy); 527 return NULL; 528 } 529 530 return proxy; 531 } 532 EXPORT_SYMBOL_GPL(spi_new_device); 533 534 static void spi_match_master_to_boardinfo(struct spi_master *master, 535 struct spi_board_info *bi) 536 { 537 struct spi_device *dev; 538 539 if (master->bus_num != bi->bus_num) 540 return; 541 542 dev = spi_new_device(master, bi); 543 if (!dev) 544 dev_err(master->dev.parent, "can't create new device for %s\n", 545 bi->modalias); 546 } 547 548 /** 549 * spi_register_board_info - register SPI devices for a given board 550 * @info: array of chip descriptors 551 * @n: how many descriptors are provided 552 * Context: can sleep 553 * 554 * Board-specific early init code calls this (probably during arch_initcall) 555 * with segments of the SPI device table. Any device nodes are created later, 556 * after the relevant parent SPI controller (bus_num) is defined. We keep 557 * this table of devices forever, so that reloading a controller driver will 558 * not make Linux forget about these hard-wired devices. 559 * 560 * Other code can also call this, e.g. a particular add-on board might provide 561 * SPI devices through its expansion connector, so code initializing that board 562 * would naturally declare its SPI devices. 563 * 564 * The board info passed can safely be __initdata ... but be careful of 565 * any embedded pointers (platform_data, etc), they're copied as-is. 566 */ 567 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 568 { 569 struct boardinfo *bi; 570 int i; 571 572 if (!n) 573 return -EINVAL; 574 575 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 576 if (!bi) 577 return -ENOMEM; 578 579 for (i = 0; i < n; i++, bi++, info++) { 580 struct spi_master *master; 581 582 memcpy(&bi->board_info, info, sizeof(*info)); 583 mutex_lock(&board_lock); 584 list_add_tail(&bi->list, &board_list); 585 list_for_each_entry(master, &spi_master_list, list) 586 spi_match_master_to_boardinfo(master, &bi->board_info); 587 mutex_unlock(&board_lock); 588 } 589 590 return 0; 591 } 592 593 /*-------------------------------------------------------------------------*/ 594 595 static void spi_set_cs(struct spi_device *spi, bool enable) 596 { 597 if (spi->mode & SPI_CS_HIGH) 598 enable = !enable; 599 600 if (spi->cs_gpio >= 0) 601 gpio_set_value(spi->cs_gpio, !enable); 602 else if (spi->master->set_cs) 603 spi->master->set_cs(spi, !enable); 604 } 605 606 #ifdef CONFIG_HAS_DMA 607 static int spi_map_buf(struct spi_master *master, struct device *dev, 608 struct sg_table *sgt, void *buf, size_t len, 609 enum dma_data_direction dir) 610 { 611 const bool vmalloced_buf = is_vmalloc_addr(buf); 612 int desc_len; 613 int sgs; 614 struct page *vm_page; 615 void *sg_buf; 616 size_t min; 617 int i, ret; 618 619 if (vmalloced_buf) { 620 desc_len = PAGE_SIZE; 621 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 622 } else { 623 desc_len = master->max_dma_len; 624 sgs = DIV_ROUND_UP(len, desc_len); 625 } 626 627 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 628 if (ret != 0) 629 return ret; 630 631 for (i = 0; i < sgs; i++) { 632 633 if (vmalloced_buf) { 634 min = min_t(size_t, 635 len, desc_len - offset_in_page(buf)); 636 vm_page = vmalloc_to_page(buf); 637 if (!vm_page) { 638 sg_free_table(sgt); 639 return -ENOMEM; 640 } 641 sg_set_page(&sgt->sgl[i], vm_page, 642 min, offset_in_page(buf)); 643 } else { 644 min = min_t(size_t, len, desc_len); 645 sg_buf = buf; 646 sg_set_buf(&sgt->sgl[i], sg_buf, min); 647 } 648 649 650 buf += min; 651 len -= min; 652 } 653 654 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 655 if (!ret) 656 ret = -ENOMEM; 657 if (ret < 0) { 658 sg_free_table(sgt); 659 return ret; 660 } 661 662 sgt->nents = ret; 663 664 return 0; 665 } 666 667 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 668 struct sg_table *sgt, enum dma_data_direction dir) 669 { 670 if (sgt->orig_nents) { 671 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 672 sg_free_table(sgt); 673 } 674 } 675 676 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 677 { 678 struct device *tx_dev, *rx_dev; 679 struct spi_transfer *xfer; 680 int ret; 681 682 if (!master->can_dma) 683 return 0; 684 685 if (master->dma_tx) 686 tx_dev = master->dma_tx->device->dev; 687 else 688 tx_dev = &master->dev; 689 690 if (master->dma_rx) 691 rx_dev = master->dma_rx->device->dev; 692 else 693 rx_dev = &master->dev; 694 695 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 696 if (!master->can_dma(master, msg->spi, xfer)) 697 continue; 698 699 if (xfer->tx_buf != NULL) { 700 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 701 (void *)xfer->tx_buf, xfer->len, 702 DMA_TO_DEVICE); 703 if (ret != 0) 704 return ret; 705 } 706 707 if (xfer->rx_buf != NULL) { 708 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 709 xfer->rx_buf, xfer->len, 710 DMA_FROM_DEVICE); 711 if (ret != 0) { 712 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 713 DMA_TO_DEVICE); 714 return ret; 715 } 716 } 717 } 718 719 master->cur_msg_mapped = true; 720 721 return 0; 722 } 723 724 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 725 { 726 struct spi_transfer *xfer; 727 struct device *tx_dev, *rx_dev; 728 729 if (!master->cur_msg_mapped || !master->can_dma) 730 return 0; 731 732 if (master->dma_tx) 733 tx_dev = master->dma_tx->device->dev; 734 else 735 tx_dev = &master->dev; 736 737 if (master->dma_rx) 738 rx_dev = master->dma_rx->device->dev; 739 else 740 rx_dev = &master->dev; 741 742 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 743 if (!master->can_dma(master, msg->spi, xfer)) 744 continue; 745 746 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 747 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 748 } 749 750 return 0; 751 } 752 #else /* !CONFIG_HAS_DMA */ 753 static inline int __spi_map_msg(struct spi_master *master, 754 struct spi_message *msg) 755 { 756 return 0; 757 } 758 759 static inline int __spi_unmap_msg(struct spi_master *master, 760 struct spi_message *msg) 761 { 762 return 0; 763 } 764 #endif /* !CONFIG_HAS_DMA */ 765 766 static inline int spi_unmap_msg(struct spi_master *master, 767 struct spi_message *msg) 768 { 769 struct spi_transfer *xfer; 770 771 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 772 /* 773 * Restore the original value of tx_buf or rx_buf if they are 774 * NULL. 775 */ 776 if (xfer->tx_buf == master->dummy_tx) 777 xfer->tx_buf = NULL; 778 if (xfer->rx_buf == master->dummy_rx) 779 xfer->rx_buf = NULL; 780 } 781 782 return __spi_unmap_msg(master, msg); 783 } 784 785 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 786 { 787 struct spi_transfer *xfer; 788 void *tmp; 789 unsigned int max_tx, max_rx; 790 791 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 792 max_tx = 0; 793 max_rx = 0; 794 795 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 796 if ((master->flags & SPI_MASTER_MUST_TX) && 797 !xfer->tx_buf) 798 max_tx = max(xfer->len, max_tx); 799 if ((master->flags & SPI_MASTER_MUST_RX) && 800 !xfer->rx_buf) 801 max_rx = max(xfer->len, max_rx); 802 } 803 804 if (max_tx) { 805 tmp = krealloc(master->dummy_tx, max_tx, 806 GFP_KERNEL | GFP_DMA); 807 if (!tmp) 808 return -ENOMEM; 809 master->dummy_tx = tmp; 810 memset(tmp, 0, max_tx); 811 } 812 813 if (max_rx) { 814 tmp = krealloc(master->dummy_rx, max_rx, 815 GFP_KERNEL | GFP_DMA); 816 if (!tmp) 817 return -ENOMEM; 818 master->dummy_rx = tmp; 819 } 820 821 if (max_tx || max_rx) { 822 list_for_each_entry(xfer, &msg->transfers, 823 transfer_list) { 824 if (!xfer->tx_buf) 825 xfer->tx_buf = master->dummy_tx; 826 if (!xfer->rx_buf) 827 xfer->rx_buf = master->dummy_rx; 828 } 829 } 830 } 831 832 return __spi_map_msg(master, msg); 833 } 834 835 /* 836 * spi_transfer_one_message - Default implementation of transfer_one_message() 837 * 838 * This is a standard implementation of transfer_one_message() for 839 * drivers which impelment a transfer_one() operation. It provides 840 * standard handling of delays and chip select management. 841 */ 842 static int spi_transfer_one_message(struct spi_master *master, 843 struct spi_message *msg) 844 { 845 struct spi_transfer *xfer; 846 bool keep_cs = false; 847 int ret = 0; 848 unsigned long ms = 1; 849 struct spi_statistics *statm = &master->statistics; 850 struct spi_statistics *stats = &msg->spi->statistics; 851 852 spi_set_cs(msg->spi, true); 853 854 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 855 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 856 857 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 858 trace_spi_transfer_start(msg, xfer); 859 860 spi_statistics_add_transfer_stats(statm, xfer, master); 861 spi_statistics_add_transfer_stats(stats, xfer, master); 862 863 if (xfer->tx_buf || xfer->rx_buf) { 864 reinit_completion(&master->xfer_completion); 865 866 ret = master->transfer_one(master, msg->spi, xfer); 867 if (ret < 0) { 868 SPI_STATISTICS_INCREMENT_FIELD(statm, 869 errors); 870 SPI_STATISTICS_INCREMENT_FIELD(stats, 871 errors); 872 dev_err(&msg->spi->dev, 873 "SPI transfer failed: %d\n", ret); 874 goto out; 875 } 876 877 if (ret > 0) { 878 ret = 0; 879 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 880 ms += ms + 100; /* some tolerance */ 881 882 ms = wait_for_completion_timeout(&master->xfer_completion, 883 msecs_to_jiffies(ms)); 884 } 885 886 if (ms == 0) { 887 SPI_STATISTICS_INCREMENT_FIELD(statm, 888 timedout); 889 SPI_STATISTICS_INCREMENT_FIELD(stats, 890 timedout); 891 dev_err(&msg->spi->dev, 892 "SPI transfer timed out\n"); 893 msg->status = -ETIMEDOUT; 894 } 895 } else { 896 if (xfer->len) 897 dev_err(&msg->spi->dev, 898 "Bufferless transfer has length %u\n", 899 xfer->len); 900 } 901 902 trace_spi_transfer_stop(msg, xfer); 903 904 if (msg->status != -EINPROGRESS) 905 goto out; 906 907 if (xfer->delay_usecs) 908 udelay(xfer->delay_usecs); 909 910 if (xfer->cs_change) { 911 if (list_is_last(&xfer->transfer_list, 912 &msg->transfers)) { 913 keep_cs = true; 914 } else { 915 spi_set_cs(msg->spi, false); 916 udelay(10); 917 spi_set_cs(msg->spi, true); 918 } 919 } 920 921 msg->actual_length += xfer->len; 922 } 923 924 out: 925 if (ret != 0 || !keep_cs) 926 spi_set_cs(msg->spi, false); 927 928 if (msg->status == -EINPROGRESS) 929 msg->status = ret; 930 931 if (msg->status && master->handle_err) 932 master->handle_err(master, msg); 933 934 spi_finalize_current_message(master); 935 936 return ret; 937 } 938 939 /** 940 * spi_finalize_current_transfer - report completion of a transfer 941 * @master: the master reporting completion 942 * 943 * Called by SPI drivers using the core transfer_one_message() 944 * implementation to notify it that the current interrupt driven 945 * transfer has finished and the next one may be scheduled. 946 */ 947 void spi_finalize_current_transfer(struct spi_master *master) 948 { 949 complete(&master->xfer_completion); 950 } 951 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 952 953 /** 954 * __spi_pump_messages - function which processes spi message queue 955 * @master: master to process queue for 956 * @in_kthread: true if we are in the context of the message pump thread 957 * 958 * This function checks if there is any spi message in the queue that 959 * needs processing and if so call out to the driver to initialize hardware 960 * and transfer each message. 961 * 962 * Note that it is called both from the kthread itself and also from 963 * inside spi_sync(); the queue extraction handling at the top of the 964 * function should deal with this safely. 965 */ 966 static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 967 { 968 unsigned long flags; 969 bool was_busy = false; 970 int ret; 971 972 /* Lock queue */ 973 spin_lock_irqsave(&master->queue_lock, flags); 974 975 /* Make sure we are not already running a message */ 976 if (master->cur_msg) { 977 spin_unlock_irqrestore(&master->queue_lock, flags); 978 return; 979 } 980 981 /* If another context is idling the device then defer */ 982 if (master->idling) { 983 queue_kthread_work(&master->kworker, &master->pump_messages); 984 spin_unlock_irqrestore(&master->queue_lock, flags); 985 return; 986 } 987 988 /* Check if the queue is idle */ 989 if (list_empty(&master->queue) || !master->running) { 990 if (!master->busy) { 991 spin_unlock_irqrestore(&master->queue_lock, flags); 992 return; 993 } 994 995 /* Only do teardown in the thread */ 996 if (!in_kthread) { 997 queue_kthread_work(&master->kworker, 998 &master->pump_messages); 999 spin_unlock_irqrestore(&master->queue_lock, flags); 1000 return; 1001 } 1002 1003 master->busy = false; 1004 master->idling = true; 1005 spin_unlock_irqrestore(&master->queue_lock, flags); 1006 1007 kfree(master->dummy_rx); 1008 master->dummy_rx = NULL; 1009 kfree(master->dummy_tx); 1010 master->dummy_tx = NULL; 1011 if (master->unprepare_transfer_hardware && 1012 master->unprepare_transfer_hardware(master)) 1013 dev_err(&master->dev, 1014 "failed to unprepare transfer hardware\n"); 1015 if (master->auto_runtime_pm) { 1016 pm_runtime_mark_last_busy(master->dev.parent); 1017 pm_runtime_put_autosuspend(master->dev.parent); 1018 } 1019 trace_spi_master_idle(master); 1020 1021 spin_lock_irqsave(&master->queue_lock, flags); 1022 master->idling = false; 1023 spin_unlock_irqrestore(&master->queue_lock, flags); 1024 return; 1025 } 1026 1027 /* Extract head of queue */ 1028 master->cur_msg = 1029 list_first_entry(&master->queue, struct spi_message, queue); 1030 1031 list_del_init(&master->cur_msg->queue); 1032 if (master->busy) 1033 was_busy = true; 1034 else 1035 master->busy = true; 1036 spin_unlock_irqrestore(&master->queue_lock, flags); 1037 1038 if (!was_busy && master->auto_runtime_pm) { 1039 ret = pm_runtime_get_sync(master->dev.parent); 1040 if (ret < 0) { 1041 dev_err(&master->dev, "Failed to power device: %d\n", 1042 ret); 1043 return; 1044 } 1045 } 1046 1047 if (!was_busy) 1048 trace_spi_master_busy(master); 1049 1050 if (!was_busy && master->prepare_transfer_hardware) { 1051 ret = master->prepare_transfer_hardware(master); 1052 if (ret) { 1053 dev_err(&master->dev, 1054 "failed to prepare transfer hardware\n"); 1055 1056 if (master->auto_runtime_pm) 1057 pm_runtime_put(master->dev.parent); 1058 return; 1059 } 1060 } 1061 1062 trace_spi_message_start(master->cur_msg); 1063 1064 if (master->prepare_message) { 1065 ret = master->prepare_message(master, master->cur_msg); 1066 if (ret) { 1067 dev_err(&master->dev, 1068 "failed to prepare message: %d\n", ret); 1069 master->cur_msg->status = ret; 1070 spi_finalize_current_message(master); 1071 return; 1072 } 1073 master->cur_msg_prepared = true; 1074 } 1075 1076 ret = spi_map_msg(master, master->cur_msg); 1077 if (ret) { 1078 master->cur_msg->status = ret; 1079 spi_finalize_current_message(master); 1080 return; 1081 } 1082 1083 ret = master->transfer_one_message(master, master->cur_msg); 1084 if (ret) { 1085 dev_err(&master->dev, 1086 "failed to transfer one message from queue\n"); 1087 return; 1088 } 1089 } 1090 1091 /** 1092 * spi_pump_messages - kthread work function which processes spi message queue 1093 * @work: pointer to kthread work struct contained in the master struct 1094 */ 1095 static void spi_pump_messages(struct kthread_work *work) 1096 { 1097 struct spi_master *master = 1098 container_of(work, struct spi_master, pump_messages); 1099 1100 __spi_pump_messages(master, true); 1101 } 1102 1103 static int spi_init_queue(struct spi_master *master) 1104 { 1105 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1106 1107 master->running = false; 1108 master->busy = false; 1109 1110 init_kthread_worker(&master->kworker); 1111 master->kworker_task = kthread_run(kthread_worker_fn, 1112 &master->kworker, "%s", 1113 dev_name(&master->dev)); 1114 if (IS_ERR(master->kworker_task)) { 1115 dev_err(&master->dev, "failed to create message pump task\n"); 1116 return PTR_ERR(master->kworker_task); 1117 } 1118 init_kthread_work(&master->pump_messages, spi_pump_messages); 1119 1120 /* 1121 * Master config will indicate if this controller should run the 1122 * message pump with high (realtime) priority to reduce the transfer 1123 * latency on the bus by minimising the delay between a transfer 1124 * request and the scheduling of the message pump thread. Without this 1125 * setting the message pump thread will remain at default priority. 1126 */ 1127 if (master->rt) { 1128 dev_info(&master->dev, 1129 "will run message pump with realtime priority\n"); 1130 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1131 } 1132 1133 return 0; 1134 } 1135 1136 /** 1137 * spi_get_next_queued_message() - called by driver to check for queued 1138 * messages 1139 * @master: the master to check for queued messages 1140 * 1141 * If there are more messages in the queue, the next message is returned from 1142 * this call. 1143 */ 1144 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1145 { 1146 struct spi_message *next; 1147 unsigned long flags; 1148 1149 /* get a pointer to the next message, if any */ 1150 spin_lock_irqsave(&master->queue_lock, flags); 1151 next = list_first_entry_or_null(&master->queue, struct spi_message, 1152 queue); 1153 spin_unlock_irqrestore(&master->queue_lock, flags); 1154 1155 return next; 1156 } 1157 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1158 1159 /** 1160 * spi_finalize_current_message() - the current message is complete 1161 * @master: the master to return the message to 1162 * 1163 * Called by the driver to notify the core that the message in the front of the 1164 * queue is complete and can be removed from the queue. 1165 */ 1166 void spi_finalize_current_message(struct spi_master *master) 1167 { 1168 struct spi_message *mesg; 1169 unsigned long flags; 1170 int ret; 1171 1172 spin_lock_irqsave(&master->queue_lock, flags); 1173 mesg = master->cur_msg; 1174 spin_unlock_irqrestore(&master->queue_lock, flags); 1175 1176 spi_unmap_msg(master, mesg); 1177 1178 if (master->cur_msg_prepared && master->unprepare_message) { 1179 ret = master->unprepare_message(master, mesg); 1180 if (ret) { 1181 dev_err(&master->dev, 1182 "failed to unprepare message: %d\n", ret); 1183 } 1184 } 1185 1186 spin_lock_irqsave(&master->queue_lock, flags); 1187 master->cur_msg = NULL; 1188 master->cur_msg_prepared = false; 1189 queue_kthread_work(&master->kworker, &master->pump_messages); 1190 spin_unlock_irqrestore(&master->queue_lock, flags); 1191 1192 trace_spi_message_done(mesg); 1193 1194 mesg->state = NULL; 1195 if (mesg->complete) 1196 mesg->complete(mesg->context); 1197 } 1198 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1199 1200 static int spi_start_queue(struct spi_master *master) 1201 { 1202 unsigned long flags; 1203 1204 spin_lock_irqsave(&master->queue_lock, flags); 1205 1206 if (master->running || master->busy) { 1207 spin_unlock_irqrestore(&master->queue_lock, flags); 1208 return -EBUSY; 1209 } 1210 1211 master->running = true; 1212 master->cur_msg = NULL; 1213 spin_unlock_irqrestore(&master->queue_lock, flags); 1214 1215 queue_kthread_work(&master->kworker, &master->pump_messages); 1216 1217 return 0; 1218 } 1219 1220 static int spi_stop_queue(struct spi_master *master) 1221 { 1222 unsigned long flags; 1223 unsigned limit = 500; 1224 int ret = 0; 1225 1226 spin_lock_irqsave(&master->queue_lock, flags); 1227 1228 /* 1229 * This is a bit lame, but is optimized for the common execution path. 1230 * A wait_queue on the master->busy could be used, but then the common 1231 * execution path (pump_messages) would be required to call wake_up or 1232 * friends on every SPI message. Do this instead. 1233 */ 1234 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1235 spin_unlock_irqrestore(&master->queue_lock, flags); 1236 usleep_range(10000, 11000); 1237 spin_lock_irqsave(&master->queue_lock, flags); 1238 } 1239 1240 if (!list_empty(&master->queue) || master->busy) 1241 ret = -EBUSY; 1242 else 1243 master->running = false; 1244 1245 spin_unlock_irqrestore(&master->queue_lock, flags); 1246 1247 if (ret) { 1248 dev_warn(&master->dev, 1249 "could not stop message queue\n"); 1250 return ret; 1251 } 1252 return ret; 1253 } 1254 1255 static int spi_destroy_queue(struct spi_master *master) 1256 { 1257 int ret; 1258 1259 ret = spi_stop_queue(master); 1260 1261 /* 1262 * flush_kthread_worker will block until all work is done. 1263 * If the reason that stop_queue timed out is that the work will never 1264 * finish, then it does no good to call flush/stop thread, so 1265 * return anyway. 1266 */ 1267 if (ret) { 1268 dev_err(&master->dev, "problem destroying queue\n"); 1269 return ret; 1270 } 1271 1272 flush_kthread_worker(&master->kworker); 1273 kthread_stop(master->kworker_task); 1274 1275 return 0; 1276 } 1277 1278 static int __spi_queued_transfer(struct spi_device *spi, 1279 struct spi_message *msg, 1280 bool need_pump) 1281 { 1282 struct spi_master *master = spi->master; 1283 unsigned long flags; 1284 1285 spin_lock_irqsave(&master->queue_lock, flags); 1286 1287 if (!master->running) { 1288 spin_unlock_irqrestore(&master->queue_lock, flags); 1289 return -ESHUTDOWN; 1290 } 1291 msg->actual_length = 0; 1292 msg->status = -EINPROGRESS; 1293 1294 list_add_tail(&msg->queue, &master->queue); 1295 if (!master->busy && need_pump) 1296 queue_kthread_work(&master->kworker, &master->pump_messages); 1297 1298 spin_unlock_irqrestore(&master->queue_lock, flags); 1299 return 0; 1300 } 1301 1302 /** 1303 * spi_queued_transfer - transfer function for queued transfers 1304 * @spi: spi device which is requesting transfer 1305 * @msg: spi message which is to handled is queued to driver queue 1306 */ 1307 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1308 { 1309 return __spi_queued_transfer(spi, msg, true); 1310 } 1311 1312 static int spi_master_initialize_queue(struct spi_master *master) 1313 { 1314 int ret; 1315 1316 master->transfer = spi_queued_transfer; 1317 if (!master->transfer_one_message) 1318 master->transfer_one_message = spi_transfer_one_message; 1319 1320 /* Initialize and start queue */ 1321 ret = spi_init_queue(master); 1322 if (ret) { 1323 dev_err(&master->dev, "problem initializing queue\n"); 1324 goto err_init_queue; 1325 } 1326 master->queued = true; 1327 ret = spi_start_queue(master); 1328 if (ret) { 1329 dev_err(&master->dev, "problem starting queue\n"); 1330 goto err_start_queue; 1331 } 1332 1333 return 0; 1334 1335 err_start_queue: 1336 spi_destroy_queue(master); 1337 err_init_queue: 1338 return ret; 1339 } 1340 1341 /*-------------------------------------------------------------------------*/ 1342 1343 #if defined(CONFIG_OF) 1344 static struct spi_device * 1345 of_register_spi_device(struct spi_master *master, struct device_node *nc) 1346 { 1347 struct spi_device *spi; 1348 int rc; 1349 u32 value; 1350 1351 /* Alloc an spi_device */ 1352 spi = spi_alloc_device(master); 1353 if (!spi) { 1354 dev_err(&master->dev, "spi_device alloc error for %s\n", 1355 nc->full_name); 1356 rc = -ENOMEM; 1357 goto err_out; 1358 } 1359 1360 /* Select device driver */ 1361 rc = of_modalias_node(nc, spi->modalias, 1362 sizeof(spi->modalias)); 1363 if (rc < 0) { 1364 dev_err(&master->dev, "cannot find modalias for %s\n", 1365 nc->full_name); 1366 goto err_out; 1367 } 1368 1369 /* Device address */ 1370 rc = of_property_read_u32(nc, "reg", &value); 1371 if (rc) { 1372 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1373 nc->full_name, rc); 1374 goto err_out; 1375 } 1376 spi->chip_select = value; 1377 1378 /* Mode (clock phase/polarity/etc.) */ 1379 if (of_find_property(nc, "spi-cpha", NULL)) 1380 spi->mode |= SPI_CPHA; 1381 if (of_find_property(nc, "spi-cpol", NULL)) 1382 spi->mode |= SPI_CPOL; 1383 if (of_find_property(nc, "spi-cs-high", NULL)) 1384 spi->mode |= SPI_CS_HIGH; 1385 if (of_find_property(nc, "spi-3wire", NULL)) 1386 spi->mode |= SPI_3WIRE; 1387 if (of_find_property(nc, "spi-lsb-first", NULL)) 1388 spi->mode |= SPI_LSB_FIRST; 1389 1390 /* Device DUAL/QUAD mode */ 1391 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1392 switch (value) { 1393 case 1: 1394 break; 1395 case 2: 1396 spi->mode |= SPI_TX_DUAL; 1397 break; 1398 case 4: 1399 spi->mode |= SPI_TX_QUAD; 1400 break; 1401 default: 1402 dev_warn(&master->dev, 1403 "spi-tx-bus-width %d not supported\n", 1404 value); 1405 break; 1406 } 1407 } 1408 1409 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1410 switch (value) { 1411 case 1: 1412 break; 1413 case 2: 1414 spi->mode |= SPI_RX_DUAL; 1415 break; 1416 case 4: 1417 spi->mode |= SPI_RX_QUAD; 1418 break; 1419 default: 1420 dev_warn(&master->dev, 1421 "spi-rx-bus-width %d not supported\n", 1422 value); 1423 break; 1424 } 1425 } 1426 1427 /* Device speed */ 1428 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1429 if (rc) { 1430 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1431 nc->full_name, rc); 1432 goto err_out; 1433 } 1434 spi->max_speed_hz = value; 1435 1436 /* IRQ */ 1437 spi->irq = irq_of_parse_and_map(nc, 0); 1438 1439 /* Store a pointer to the node in the device structure */ 1440 of_node_get(nc); 1441 spi->dev.of_node = nc; 1442 1443 /* Register the new device */ 1444 rc = spi_add_device(spi); 1445 if (rc) { 1446 dev_err(&master->dev, "spi_device register error %s\n", 1447 nc->full_name); 1448 goto err_out; 1449 } 1450 1451 return spi; 1452 1453 err_out: 1454 spi_dev_put(spi); 1455 return ERR_PTR(rc); 1456 } 1457 1458 /** 1459 * of_register_spi_devices() - Register child devices onto the SPI bus 1460 * @master: Pointer to spi_master device 1461 * 1462 * Registers an spi_device for each child node of master node which has a 'reg' 1463 * property. 1464 */ 1465 static void of_register_spi_devices(struct spi_master *master) 1466 { 1467 struct spi_device *spi; 1468 struct device_node *nc; 1469 1470 if (!master->dev.of_node) 1471 return; 1472 1473 for_each_available_child_of_node(master->dev.of_node, nc) { 1474 spi = of_register_spi_device(master, nc); 1475 if (IS_ERR(spi)) 1476 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1477 nc->full_name); 1478 } 1479 } 1480 #else 1481 static void of_register_spi_devices(struct spi_master *master) { } 1482 #endif 1483 1484 #ifdef CONFIG_ACPI 1485 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1486 { 1487 struct spi_device *spi = data; 1488 1489 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1490 struct acpi_resource_spi_serialbus *sb; 1491 1492 sb = &ares->data.spi_serial_bus; 1493 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1494 spi->chip_select = sb->device_selection; 1495 spi->max_speed_hz = sb->connection_speed; 1496 1497 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1498 spi->mode |= SPI_CPHA; 1499 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1500 spi->mode |= SPI_CPOL; 1501 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1502 spi->mode |= SPI_CS_HIGH; 1503 } 1504 } else if (spi->irq < 0) { 1505 struct resource r; 1506 1507 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1508 spi->irq = r.start; 1509 } 1510 1511 /* Always tell the ACPI core to skip this resource */ 1512 return 1; 1513 } 1514 1515 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1516 void *data, void **return_value) 1517 { 1518 struct spi_master *master = data; 1519 struct list_head resource_list; 1520 struct acpi_device *adev; 1521 struct spi_device *spi; 1522 int ret; 1523 1524 if (acpi_bus_get_device(handle, &adev)) 1525 return AE_OK; 1526 if (acpi_bus_get_status(adev) || !adev->status.present) 1527 return AE_OK; 1528 1529 spi = spi_alloc_device(master); 1530 if (!spi) { 1531 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1532 dev_name(&adev->dev)); 1533 return AE_NO_MEMORY; 1534 } 1535 1536 ACPI_COMPANION_SET(&spi->dev, adev); 1537 spi->irq = -1; 1538 1539 INIT_LIST_HEAD(&resource_list); 1540 ret = acpi_dev_get_resources(adev, &resource_list, 1541 acpi_spi_add_resource, spi); 1542 acpi_dev_free_resource_list(&resource_list); 1543 1544 if (ret < 0 || !spi->max_speed_hz) { 1545 spi_dev_put(spi); 1546 return AE_OK; 1547 } 1548 1549 adev->power.flags.ignore_parent = true; 1550 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 1551 if (spi_add_device(spi)) { 1552 adev->power.flags.ignore_parent = false; 1553 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1554 dev_name(&adev->dev)); 1555 spi_dev_put(spi); 1556 } 1557 1558 return AE_OK; 1559 } 1560 1561 static void acpi_register_spi_devices(struct spi_master *master) 1562 { 1563 acpi_status status; 1564 acpi_handle handle; 1565 1566 handle = ACPI_HANDLE(master->dev.parent); 1567 if (!handle) 1568 return; 1569 1570 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1571 acpi_spi_add_device, NULL, 1572 master, NULL); 1573 if (ACPI_FAILURE(status)) 1574 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1575 } 1576 #else 1577 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1578 #endif /* CONFIG_ACPI */ 1579 1580 static void spi_master_release(struct device *dev) 1581 { 1582 struct spi_master *master; 1583 1584 master = container_of(dev, struct spi_master, dev); 1585 kfree(master); 1586 } 1587 1588 static struct class spi_master_class = { 1589 .name = "spi_master", 1590 .owner = THIS_MODULE, 1591 .dev_release = spi_master_release, 1592 .dev_groups = spi_master_groups, 1593 }; 1594 1595 1596 /** 1597 * spi_alloc_master - allocate SPI master controller 1598 * @dev: the controller, possibly using the platform_bus 1599 * @size: how much zeroed driver-private data to allocate; the pointer to this 1600 * memory is in the driver_data field of the returned device, 1601 * accessible with spi_master_get_devdata(). 1602 * Context: can sleep 1603 * 1604 * This call is used only by SPI master controller drivers, which are the 1605 * only ones directly touching chip registers. It's how they allocate 1606 * an spi_master structure, prior to calling spi_register_master(). 1607 * 1608 * This must be called from context that can sleep. It returns the SPI 1609 * master structure on success, else NULL. 1610 * 1611 * The caller is responsible for assigning the bus number and initializing 1612 * the master's methods before calling spi_register_master(); and (after errors 1613 * adding the device) calling spi_master_put() and kfree() to prevent a memory 1614 * leak. 1615 */ 1616 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1617 { 1618 struct spi_master *master; 1619 1620 if (!dev) 1621 return NULL; 1622 1623 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1624 if (!master) 1625 return NULL; 1626 1627 device_initialize(&master->dev); 1628 master->bus_num = -1; 1629 master->num_chipselect = 1; 1630 master->dev.class = &spi_master_class; 1631 master->dev.parent = get_device(dev); 1632 spi_master_set_devdata(master, &master[1]); 1633 1634 return master; 1635 } 1636 EXPORT_SYMBOL_GPL(spi_alloc_master); 1637 1638 #ifdef CONFIG_OF 1639 static int of_spi_register_master(struct spi_master *master) 1640 { 1641 int nb, i, *cs; 1642 struct device_node *np = master->dev.of_node; 1643 1644 if (!np) 1645 return 0; 1646 1647 nb = of_gpio_named_count(np, "cs-gpios"); 1648 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1649 1650 /* Return error only for an incorrectly formed cs-gpios property */ 1651 if (nb == 0 || nb == -ENOENT) 1652 return 0; 1653 else if (nb < 0) 1654 return nb; 1655 1656 cs = devm_kzalloc(&master->dev, 1657 sizeof(int) * master->num_chipselect, 1658 GFP_KERNEL); 1659 master->cs_gpios = cs; 1660 1661 if (!master->cs_gpios) 1662 return -ENOMEM; 1663 1664 for (i = 0; i < master->num_chipselect; i++) 1665 cs[i] = -ENOENT; 1666 1667 for (i = 0; i < nb; i++) 1668 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1669 1670 return 0; 1671 } 1672 #else 1673 static int of_spi_register_master(struct spi_master *master) 1674 { 1675 return 0; 1676 } 1677 #endif 1678 1679 /** 1680 * spi_register_master - register SPI master controller 1681 * @master: initialized master, originally from spi_alloc_master() 1682 * Context: can sleep 1683 * 1684 * SPI master controllers connect to their drivers using some non-SPI bus, 1685 * such as the platform bus. The final stage of probe() in that code 1686 * includes calling spi_register_master() to hook up to this SPI bus glue. 1687 * 1688 * SPI controllers use board specific (often SOC specific) bus numbers, 1689 * and board-specific addressing for SPI devices combines those numbers 1690 * with chip select numbers. Since SPI does not directly support dynamic 1691 * device identification, boards need configuration tables telling which 1692 * chip is at which address. 1693 * 1694 * This must be called from context that can sleep. It returns zero on 1695 * success, else a negative error code (dropping the master's refcount). 1696 * After a successful return, the caller is responsible for calling 1697 * spi_unregister_master(). 1698 */ 1699 int spi_register_master(struct spi_master *master) 1700 { 1701 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1702 struct device *dev = master->dev.parent; 1703 struct boardinfo *bi; 1704 int status = -ENODEV; 1705 int dynamic = 0; 1706 1707 if (!dev) 1708 return -ENODEV; 1709 1710 status = of_spi_register_master(master); 1711 if (status) 1712 return status; 1713 1714 /* even if it's just one always-selected device, there must 1715 * be at least one chipselect 1716 */ 1717 if (master->num_chipselect == 0) 1718 return -EINVAL; 1719 1720 if ((master->bus_num < 0) && master->dev.of_node) 1721 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1722 1723 /* convention: dynamically assigned bus IDs count down from the max */ 1724 if (master->bus_num < 0) { 1725 /* FIXME switch to an IDR based scheme, something like 1726 * I2C now uses, so we can't run out of "dynamic" IDs 1727 */ 1728 master->bus_num = atomic_dec_return(&dyn_bus_id); 1729 dynamic = 1; 1730 } 1731 1732 INIT_LIST_HEAD(&master->queue); 1733 spin_lock_init(&master->queue_lock); 1734 spin_lock_init(&master->bus_lock_spinlock); 1735 mutex_init(&master->bus_lock_mutex); 1736 master->bus_lock_flag = 0; 1737 init_completion(&master->xfer_completion); 1738 if (!master->max_dma_len) 1739 master->max_dma_len = INT_MAX; 1740 1741 /* register the device, then userspace will see it. 1742 * registration fails if the bus ID is in use. 1743 */ 1744 dev_set_name(&master->dev, "spi%u", master->bus_num); 1745 status = device_add(&master->dev); 1746 if (status < 0) 1747 goto done; 1748 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1749 dynamic ? " (dynamic)" : ""); 1750 1751 /* If we're using a queued driver, start the queue */ 1752 if (master->transfer) 1753 dev_info(dev, "master is unqueued, this is deprecated\n"); 1754 else { 1755 status = spi_master_initialize_queue(master); 1756 if (status) { 1757 device_del(&master->dev); 1758 goto done; 1759 } 1760 } 1761 /* add statistics */ 1762 spin_lock_init(&master->statistics.lock); 1763 1764 mutex_lock(&board_lock); 1765 list_add_tail(&master->list, &spi_master_list); 1766 list_for_each_entry(bi, &board_list, list) 1767 spi_match_master_to_boardinfo(master, &bi->board_info); 1768 mutex_unlock(&board_lock); 1769 1770 /* Register devices from the device tree and ACPI */ 1771 of_register_spi_devices(master); 1772 acpi_register_spi_devices(master); 1773 done: 1774 return status; 1775 } 1776 EXPORT_SYMBOL_GPL(spi_register_master); 1777 1778 static void devm_spi_unregister(struct device *dev, void *res) 1779 { 1780 spi_unregister_master(*(struct spi_master **)res); 1781 } 1782 1783 /** 1784 * dev_spi_register_master - register managed SPI master controller 1785 * @dev: device managing SPI master 1786 * @master: initialized master, originally from spi_alloc_master() 1787 * Context: can sleep 1788 * 1789 * Register a SPI device as with spi_register_master() which will 1790 * automatically be unregister 1791 */ 1792 int devm_spi_register_master(struct device *dev, struct spi_master *master) 1793 { 1794 struct spi_master **ptr; 1795 int ret; 1796 1797 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1798 if (!ptr) 1799 return -ENOMEM; 1800 1801 ret = spi_register_master(master); 1802 if (!ret) { 1803 *ptr = master; 1804 devres_add(dev, ptr); 1805 } else { 1806 devres_free(ptr); 1807 } 1808 1809 return ret; 1810 } 1811 EXPORT_SYMBOL_GPL(devm_spi_register_master); 1812 1813 static int __unregister(struct device *dev, void *null) 1814 { 1815 spi_unregister_device(to_spi_device(dev)); 1816 return 0; 1817 } 1818 1819 /** 1820 * spi_unregister_master - unregister SPI master controller 1821 * @master: the master being unregistered 1822 * Context: can sleep 1823 * 1824 * This call is used only by SPI master controller drivers, which are the 1825 * only ones directly touching chip registers. 1826 * 1827 * This must be called from context that can sleep. 1828 */ 1829 void spi_unregister_master(struct spi_master *master) 1830 { 1831 int dummy; 1832 1833 if (master->queued) { 1834 if (spi_destroy_queue(master)) 1835 dev_err(&master->dev, "queue remove failed\n"); 1836 } 1837 1838 mutex_lock(&board_lock); 1839 list_del(&master->list); 1840 mutex_unlock(&board_lock); 1841 1842 dummy = device_for_each_child(&master->dev, NULL, __unregister); 1843 device_unregister(&master->dev); 1844 } 1845 EXPORT_SYMBOL_GPL(spi_unregister_master); 1846 1847 int spi_master_suspend(struct spi_master *master) 1848 { 1849 int ret; 1850 1851 /* Basically no-ops for non-queued masters */ 1852 if (!master->queued) 1853 return 0; 1854 1855 ret = spi_stop_queue(master); 1856 if (ret) 1857 dev_err(&master->dev, "queue stop failed\n"); 1858 1859 return ret; 1860 } 1861 EXPORT_SYMBOL_GPL(spi_master_suspend); 1862 1863 int spi_master_resume(struct spi_master *master) 1864 { 1865 int ret; 1866 1867 if (!master->queued) 1868 return 0; 1869 1870 ret = spi_start_queue(master); 1871 if (ret) 1872 dev_err(&master->dev, "queue restart failed\n"); 1873 1874 return ret; 1875 } 1876 EXPORT_SYMBOL_GPL(spi_master_resume); 1877 1878 static int __spi_master_match(struct device *dev, const void *data) 1879 { 1880 struct spi_master *m; 1881 const u16 *bus_num = data; 1882 1883 m = container_of(dev, struct spi_master, dev); 1884 return m->bus_num == *bus_num; 1885 } 1886 1887 /** 1888 * spi_busnum_to_master - look up master associated with bus_num 1889 * @bus_num: the master's bus number 1890 * Context: can sleep 1891 * 1892 * This call may be used with devices that are registered after 1893 * arch init time. It returns a refcounted pointer to the relevant 1894 * spi_master (which the caller must release), or NULL if there is 1895 * no such master registered. 1896 */ 1897 struct spi_master *spi_busnum_to_master(u16 bus_num) 1898 { 1899 struct device *dev; 1900 struct spi_master *master = NULL; 1901 1902 dev = class_find_device(&spi_master_class, NULL, &bus_num, 1903 __spi_master_match); 1904 if (dev) 1905 master = container_of(dev, struct spi_master, dev); 1906 /* reference got in class_find_device */ 1907 return master; 1908 } 1909 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 1910 1911 1912 /*-------------------------------------------------------------------------*/ 1913 1914 /* Core methods for SPI master protocol drivers. Some of the 1915 * other core methods are currently defined as inline functions. 1916 */ 1917 1918 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 1919 { 1920 if (master->bits_per_word_mask) { 1921 /* Only 32 bits fit in the mask */ 1922 if (bits_per_word > 32) 1923 return -EINVAL; 1924 if (!(master->bits_per_word_mask & 1925 SPI_BPW_MASK(bits_per_word))) 1926 return -EINVAL; 1927 } 1928 1929 return 0; 1930 } 1931 1932 /** 1933 * spi_setup - setup SPI mode and clock rate 1934 * @spi: the device whose settings are being modified 1935 * Context: can sleep, and no requests are queued to the device 1936 * 1937 * SPI protocol drivers may need to update the transfer mode if the 1938 * device doesn't work with its default. They may likewise need 1939 * to update clock rates or word sizes from initial values. This function 1940 * changes those settings, and must be called from a context that can sleep. 1941 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 1942 * effect the next time the device is selected and data is transferred to 1943 * or from it. When this function returns, the spi device is deselected. 1944 * 1945 * Note that this call will fail if the protocol driver specifies an option 1946 * that the underlying controller or its driver does not support. For 1947 * example, not all hardware supports wire transfers using nine bit words, 1948 * LSB-first wire encoding, or active-high chipselects. 1949 */ 1950 int spi_setup(struct spi_device *spi) 1951 { 1952 unsigned bad_bits, ugly_bits; 1953 int status = 0; 1954 1955 /* check mode to prevent that DUAL and QUAD set at the same time 1956 */ 1957 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 1958 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 1959 dev_err(&spi->dev, 1960 "setup: can not select dual and quad at the same time\n"); 1961 return -EINVAL; 1962 } 1963 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 1964 */ 1965 if ((spi->mode & SPI_3WIRE) && (spi->mode & 1966 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 1967 return -EINVAL; 1968 /* help drivers fail *cleanly* when they need options 1969 * that aren't supported with their current master 1970 */ 1971 bad_bits = spi->mode & ~spi->master->mode_bits; 1972 ugly_bits = bad_bits & 1973 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 1974 if (ugly_bits) { 1975 dev_warn(&spi->dev, 1976 "setup: ignoring unsupported mode bits %x\n", 1977 ugly_bits); 1978 spi->mode &= ~ugly_bits; 1979 bad_bits &= ~ugly_bits; 1980 } 1981 if (bad_bits) { 1982 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1983 bad_bits); 1984 return -EINVAL; 1985 } 1986 1987 if (!spi->bits_per_word) 1988 spi->bits_per_word = 8; 1989 1990 if (__spi_validate_bits_per_word(spi->master, spi->bits_per_word)) 1991 return -EINVAL; 1992 1993 if (!spi->max_speed_hz) 1994 spi->max_speed_hz = spi->master->max_speed_hz; 1995 1996 spi_set_cs(spi, false); 1997 1998 if (spi->master->setup) 1999 status = spi->master->setup(spi); 2000 2001 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 2002 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 2003 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 2004 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 2005 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 2006 (spi->mode & SPI_LOOP) ? "loopback, " : "", 2007 spi->bits_per_word, spi->max_speed_hz, 2008 status); 2009 2010 return status; 2011 } 2012 EXPORT_SYMBOL_GPL(spi_setup); 2013 2014 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2015 { 2016 struct spi_master *master = spi->master; 2017 struct spi_transfer *xfer; 2018 int w_size; 2019 2020 if (list_empty(&message->transfers)) 2021 return -EINVAL; 2022 2023 /* Half-duplex links include original MicroWire, and ones with 2024 * only one data pin like SPI_3WIRE (switches direction) or where 2025 * either MOSI or MISO is missing. They can also be caused by 2026 * software limitations. 2027 */ 2028 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2029 || (spi->mode & SPI_3WIRE)) { 2030 unsigned flags = master->flags; 2031 2032 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2033 if (xfer->rx_buf && xfer->tx_buf) 2034 return -EINVAL; 2035 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2036 return -EINVAL; 2037 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2038 return -EINVAL; 2039 } 2040 } 2041 2042 /** 2043 * Set transfer bits_per_word and max speed as spi device default if 2044 * it is not set for this transfer. 2045 * Set transfer tx_nbits and rx_nbits as single transfer default 2046 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2047 */ 2048 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2049 message->frame_length += xfer->len; 2050 if (!xfer->bits_per_word) 2051 xfer->bits_per_word = spi->bits_per_word; 2052 2053 if (!xfer->speed_hz) 2054 xfer->speed_hz = spi->max_speed_hz; 2055 if (!xfer->speed_hz) 2056 xfer->speed_hz = master->max_speed_hz; 2057 2058 if (master->max_speed_hz && 2059 xfer->speed_hz > master->max_speed_hz) 2060 xfer->speed_hz = master->max_speed_hz; 2061 2062 if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2063 return -EINVAL; 2064 2065 /* 2066 * SPI transfer length should be multiple of SPI word size 2067 * where SPI word size should be power-of-two multiple 2068 */ 2069 if (xfer->bits_per_word <= 8) 2070 w_size = 1; 2071 else if (xfer->bits_per_word <= 16) 2072 w_size = 2; 2073 else 2074 w_size = 4; 2075 2076 /* No partial transfers accepted */ 2077 if (xfer->len % w_size) 2078 return -EINVAL; 2079 2080 if (xfer->speed_hz && master->min_speed_hz && 2081 xfer->speed_hz < master->min_speed_hz) 2082 return -EINVAL; 2083 2084 if (xfer->tx_buf && !xfer->tx_nbits) 2085 xfer->tx_nbits = SPI_NBITS_SINGLE; 2086 if (xfer->rx_buf && !xfer->rx_nbits) 2087 xfer->rx_nbits = SPI_NBITS_SINGLE; 2088 /* check transfer tx/rx_nbits: 2089 * 1. check the value matches one of single, dual and quad 2090 * 2. check tx/rx_nbits match the mode in spi_device 2091 */ 2092 if (xfer->tx_buf) { 2093 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2094 xfer->tx_nbits != SPI_NBITS_DUAL && 2095 xfer->tx_nbits != SPI_NBITS_QUAD) 2096 return -EINVAL; 2097 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2098 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2099 return -EINVAL; 2100 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2101 !(spi->mode & SPI_TX_QUAD)) 2102 return -EINVAL; 2103 } 2104 /* check transfer rx_nbits */ 2105 if (xfer->rx_buf) { 2106 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2107 xfer->rx_nbits != SPI_NBITS_DUAL && 2108 xfer->rx_nbits != SPI_NBITS_QUAD) 2109 return -EINVAL; 2110 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2111 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2112 return -EINVAL; 2113 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2114 !(spi->mode & SPI_RX_QUAD)) 2115 return -EINVAL; 2116 } 2117 } 2118 2119 message->status = -EINPROGRESS; 2120 2121 return 0; 2122 } 2123 2124 static int __spi_async(struct spi_device *spi, struct spi_message *message) 2125 { 2126 struct spi_master *master = spi->master; 2127 2128 message->spi = spi; 2129 2130 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2131 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2132 2133 trace_spi_message_submit(message); 2134 2135 return master->transfer(spi, message); 2136 } 2137 2138 /** 2139 * spi_async - asynchronous SPI transfer 2140 * @spi: device with which data will be exchanged 2141 * @message: describes the data transfers, including completion callback 2142 * Context: any (irqs may be blocked, etc) 2143 * 2144 * This call may be used in_irq and other contexts which can't sleep, 2145 * as well as from task contexts which can sleep. 2146 * 2147 * The completion callback is invoked in a context which can't sleep. 2148 * Before that invocation, the value of message->status is undefined. 2149 * When the callback is issued, message->status holds either zero (to 2150 * indicate complete success) or a negative error code. After that 2151 * callback returns, the driver which issued the transfer request may 2152 * deallocate the associated memory; it's no longer in use by any SPI 2153 * core or controller driver code. 2154 * 2155 * Note that although all messages to a spi_device are handled in 2156 * FIFO order, messages may go to different devices in other orders. 2157 * Some device might be higher priority, or have various "hard" access 2158 * time requirements, for example. 2159 * 2160 * On detection of any fault during the transfer, processing of 2161 * the entire message is aborted, and the device is deselected. 2162 * Until returning from the associated message completion callback, 2163 * no other spi_message queued to that device will be processed. 2164 * (This rule applies equally to all the synchronous transfer calls, 2165 * which are wrappers around this core asynchronous primitive.) 2166 */ 2167 int spi_async(struct spi_device *spi, struct spi_message *message) 2168 { 2169 struct spi_master *master = spi->master; 2170 int ret; 2171 unsigned long flags; 2172 2173 ret = __spi_validate(spi, message); 2174 if (ret != 0) 2175 return ret; 2176 2177 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2178 2179 if (master->bus_lock_flag) 2180 ret = -EBUSY; 2181 else 2182 ret = __spi_async(spi, message); 2183 2184 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2185 2186 return ret; 2187 } 2188 EXPORT_SYMBOL_GPL(spi_async); 2189 2190 /** 2191 * spi_async_locked - version of spi_async with exclusive bus usage 2192 * @spi: device with which data will be exchanged 2193 * @message: describes the data transfers, including completion callback 2194 * Context: any (irqs may be blocked, etc) 2195 * 2196 * This call may be used in_irq and other contexts which can't sleep, 2197 * as well as from task contexts which can sleep. 2198 * 2199 * The completion callback is invoked in a context which can't sleep. 2200 * Before that invocation, the value of message->status is undefined. 2201 * When the callback is issued, message->status holds either zero (to 2202 * indicate complete success) or a negative error code. After that 2203 * callback returns, the driver which issued the transfer request may 2204 * deallocate the associated memory; it's no longer in use by any SPI 2205 * core or controller driver code. 2206 * 2207 * Note that although all messages to a spi_device are handled in 2208 * FIFO order, messages may go to different devices in other orders. 2209 * Some device might be higher priority, or have various "hard" access 2210 * time requirements, for example. 2211 * 2212 * On detection of any fault during the transfer, processing of 2213 * the entire message is aborted, and the device is deselected. 2214 * Until returning from the associated message completion callback, 2215 * no other spi_message queued to that device will be processed. 2216 * (This rule applies equally to all the synchronous transfer calls, 2217 * which are wrappers around this core asynchronous primitive.) 2218 */ 2219 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2220 { 2221 struct spi_master *master = spi->master; 2222 int ret; 2223 unsigned long flags; 2224 2225 ret = __spi_validate(spi, message); 2226 if (ret != 0) 2227 return ret; 2228 2229 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2230 2231 ret = __spi_async(spi, message); 2232 2233 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2234 2235 return ret; 2236 2237 } 2238 EXPORT_SYMBOL_GPL(spi_async_locked); 2239 2240 2241 /*-------------------------------------------------------------------------*/ 2242 2243 /* Utility methods for SPI master protocol drivers, layered on 2244 * top of the core. Some other utility methods are defined as 2245 * inline functions. 2246 */ 2247 2248 static void spi_complete(void *arg) 2249 { 2250 complete(arg); 2251 } 2252 2253 static int __spi_sync(struct spi_device *spi, struct spi_message *message, 2254 int bus_locked) 2255 { 2256 DECLARE_COMPLETION_ONSTACK(done); 2257 int status; 2258 struct spi_master *master = spi->master; 2259 unsigned long flags; 2260 2261 status = __spi_validate(spi, message); 2262 if (status != 0) 2263 return status; 2264 2265 message->complete = spi_complete; 2266 message->context = &done; 2267 message->spi = spi; 2268 2269 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2270 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2271 2272 if (!bus_locked) 2273 mutex_lock(&master->bus_lock_mutex); 2274 2275 /* If we're not using the legacy transfer method then we will 2276 * try to transfer in the calling context so special case. 2277 * This code would be less tricky if we could remove the 2278 * support for driver implemented message queues. 2279 */ 2280 if (master->transfer == spi_queued_transfer) { 2281 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2282 2283 trace_spi_message_submit(message); 2284 2285 status = __spi_queued_transfer(spi, message, false); 2286 2287 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2288 } else { 2289 status = spi_async_locked(spi, message); 2290 } 2291 2292 if (!bus_locked) 2293 mutex_unlock(&master->bus_lock_mutex); 2294 2295 if (status == 0) { 2296 /* Push out the messages in the calling context if we 2297 * can. 2298 */ 2299 if (master->transfer == spi_queued_transfer) { 2300 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2301 spi_sync_immediate); 2302 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2303 spi_sync_immediate); 2304 __spi_pump_messages(master, false); 2305 } 2306 2307 wait_for_completion(&done); 2308 status = message->status; 2309 } 2310 message->context = NULL; 2311 return status; 2312 } 2313 2314 /** 2315 * spi_sync - blocking/synchronous SPI data transfers 2316 * @spi: device with which data will be exchanged 2317 * @message: describes the data transfers 2318 * Context: can sleep 2319 * 2320 * This call may only be used from a context that may sleep. The sleep 2321 * is non-interruptible, and has no timeout. Low-overhead controller 2322 * drivers may DMA directly into and out of the message buffers. 2323 * 2324 * Note that the SPI device's chip select is active during the message, 2325 * and then is normally disabled between messages. Drivers for some 2326 * frequently-used devices may want to minimize costs of selecting a chip, 2327 * by leaving it selected in anticipation that the next message will go 2328 * to the same chip. (That may increase power usage.) 2329 * 2330 * Also, the caller is guaranteeing that the memory associated with the 2331 * message will not be freed before this call returns. 2332 * 2333 * It returns zero on success, else a negative error code. 2334 */ 2335 int spi_sync(struct spi_device *spi, struct spi_message *message) 2336 { 2337 return __spi_sync(spi, message, 0); 2338 } 2339 EXPORT_SYMBOL_GPL(spi_sync); 2340 2341 /** 2342 * spi_sync_locked - version of spi_sync with exclusive bus usage 2343 * @spi: device with which data will be exchanged 2344 * @message: describes the data transfers 2345 * Context: can sleep 2346 * 2347 * This call may only be used from a context that may sleep. The sleep 2348 * is non-interruptible, and has no timeout. Low-overhead controller 2349 * drivers may DMA directly into and out of the message buffers. 2350 * 2351 * This call should be used by drivers that require exclusive access to the 2352 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2353 * be released by a spi_bus_unlock call when the exclusive access is over. 2354 * 2355 * It returns zero on success, else a negative error code. 2356 */ 2357 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2358 { 2359 return __spi_sync(spi, message, 1); 2360 } 2361 EXPORT_SYMBOL_GPL(spi_sync_locked); 2362 2363 /** 2364 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2365 * @master: SPI bus master that should be locked for exclusive bus access 2366 * Context: can sleep 2367 * 2368 * This call may only be used from a context that may sleep. The sleep 2369 * is non-interruptible, and has no timeout. 2370 * 2371 * This call should be used by drivers that require exclusive access to the 2372 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2373 * exclusive access is over. Data transfer must be done by spi_sync_locked 2374 * and spi_async_locked calls when the SPI bus lock is held. 2375 * 2376 * It returns zero on success, else a negative error code. 2377 */ 2378 int spi_bus_lock(struct spi_master *master) 2379 { 2380 unsigned long flags; 2381 2382 mutex_lock(&master->bus_lock_mutex); 2383 2384 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2385 master->bus_lock_flag = 1; 2386 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2387 2388 /* mutex remains locked until spi_bus_unlock is called */ 2389 2390 return 0; 2391 } 2392 EXPORT_SYMBOL_GPL(spi_bus_lock); 2393 2394 /** 2395 * spi_bus_unlock - release the lock for exclusive SPI bus usage 2396 * @master: SPI bus master that was locked for exclusive bus access 2397 * Context: can sleep 2398 * 2399 * This call may only be used from a context that may sleep. The sleep 2400 * is non-interruptible, and has no timeout. 2401 * 2402 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2403 * call. 2404 * 2405 * It returns zero on success, else a negative error code. 2406 */ 2407 int spi_bus_unlock(struct spi_master *master) 2408 { 2409 master->bus_lock_flag = 0; 2410 2411 mutex_unlock(&master->bus_lock_mutex); 2412 2413 return 0; 2414 } 2415 EXPORT_SYMBOL_GPL(spi_bus_unlock); 2416 2417 /* portable code must never pass more than 32 bytes */ 2418 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 2419 2420 static u8 *buf; 2421 2422 /** 2423 * spi_write_then_read - SPI synchronous write followed by read 2424 * @spi: device with which data will be exchanged 2425 * @txbuf: data to be written (need not be dma-safe) 2426 * @n_tx: size of txbuf, in bytes 2427 * @rxbuf: buffer into which data will be read (need not be dma-safe) 2428 * @n_rx: size of rxbuf, in bytes 2429 * Context: can sleep 2430 * 2431 * This performs a half duplex MicroWire style transaction with the 2432 * device, sending txbuf and then reading rxbuf. The return value 2433 * is zero for success, else a negative errno status code. 2434 * This call may only be used from a context that may sleep. 2435 * 2436 * Parameters to this routine are always copied using a small buffer; 2437 * portable code should never use this for more than 32 bytes. 2438 * Performance-sensitive or bulk transfer code should instead use 2439 * spi_{async,sync}() calls with dma-safe buffers. 2440 */ 2441 int spi_write_then_read(struct spi_device *spi, 2442 const void *txbuf, unsigned n_tx, 2443 void *rxbuf, unsigned n_rx) 2444 { 2445 static DEFINE_MUTEX(lock); 2446 2447 int status; 2448 struct spi_message message; 2449 struct spi_transfer x[2]; 2450 u8 *local_buf; 2451 2452 /* Use preallocated DMA-safe buffer if we can. We can't avoid 2453 * copying here, (as a pure convenience thing), but we can 2454 * keep heap costs out of the hot path unless someone else is 2455 * using the pre-allocated buffer or the transfer is too large. 2456 */ 2457 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 2458 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 2459 GFP_KERNEL | GFP_DMA); 2460 if (!local_buf) 2461 return -ENOMEM; 2462 } else { 2463 local_buf = buf; 2464 } 2465 2466 spi_message_init(&message); 2467 memset(x, 0, sizeof(x)); 2468 if (n_tx) { 2469 x[0].len = n_tx; 2470 spi_message_add_tail(&x[0], &message); 2471 } 2472 if (n_rx) { 2473 x[1].len = n_rx; 2474 spi_message_add_tail(&x[1], &message); 2475 } 2476 2477 memcpy(local_buf, txbuf, n_tx); 2478 x[0].tx_buf = local_buf; 2479 x[1].rx_buf = local_buf + n_tx; 2480 2481 /* do the i/o */ 2482 status = spi_sync(spi, &message); 2483 if (status == 0) 2484 memcpy(rxbuf, x[1].rx_buf, n_rx); 2485 2486 if (x[0].tx_buf == buf) 2487 mutex_unlock(&lock); 2488 else 2489 kfree(local_buf); 2490 2491 return status; 2492 } 2493 EXPORT_SYMBOL_GPL(spi_write_then_read); 2494 2495 /*-------------------------------------------------------------------------*/ 2496 2497 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 2498 static int __spi_of_device_match(struct device *dev, void *data) 2499 { 2500 return dev->of_node == data; 2501 } 2502 2503 /* must call put_device() when done with returned spi_device device */ 2504 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 2505 { 2506 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 2507 __spi_of_device_match); 2508 return dev ? to_spi_device(dev) : NULL; 2509 } 2510 2511 static int __spi_of_master_match(struct device *dev, const void *data) 2512 { 2513 return dev->of_node == data; 2514 } 2515 2516 /* the spi masters are not using spi_bus, so we find it with another way */ 2517 static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 2518 { 2519 struct device *dev; 2520 2521 dev = class_find_device(&spi_master_class, NULL, node, 2522 __spi_of_master_match); 2523 if (!dev) 2524 return NULL; 2525 2526 /* reference got in class_find_device */ 2527 return container_of(dev, struct spi_master, dev); 2528 } 2529 2530 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 2531 void *arg) 2532 { 2533 struct of_reconfig_data *rd = arg; 2534 struct spi_master *master; 2535 struct spi_device *spi; 2536 2537 switch (of_reconfig_get_state_change(action, arg)) { 2538 case OF_RECONFIG_CHANGE_ADD: 2539 master = of_find_spi_master_by_node(rd->dn->parent); 2540 if (master == NULL) 2541 return NOTIFY_OK; /* not for us */ 2542 2543 spi = of_register_spi_device(master, rd->dn); 2544 put_device(&master->dev); 2545 2546 if (IS_ERR(spi)) { 2547 pr_err("%s: failed to create for '%s'\n", 2548 __func__, rd->dn->full_name); 2549 return notifier_from_errno(PTR_ERR(spi)); 2550 } 2551 break; 2552 2553 case OF_RECONFIG_CHANGE_REMOVE: 2554 /* find our device by node */ 2555 spi = of_find_spi_device_by_node(rd->dn); 2556 if (spi == NULL) 2557 return NOTIFY_OK; /* no? not meant for us */ 2558 2559 /* unregister takes one ref away */ 2560 spi_unregister_device(spi); 2561 2562 /* and put the reference of the find */ 2563 put_device(&spi->dev); 2564 break; 2565 } 2566 2567 return NOTIFY_OK; 2568 } 2569 2570 static struct notifier_block spi_of_notifier = { 2571 .notifier_call = of_spi_notify, 2572 }; 2573 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2574 extern struct notifier_block spi_of_notifier; 2575 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2576 2577 static int __init spi_init(void) 2578 { 2579 int status; 2580 2581 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2582 if (!buf) { 2583 status = -ENOMEM; 2584 goto err0; 2585 } 2586 2587 status = bus_register(&spi_bus_type); 2588 if (status < 0) 2589 goto err1; 2590 2591 status = class_register(&spi_master_class); 2592 if (status < 0) 2593 goto err2; 2594 2595 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 2596 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 2597 2598 return 0; 2599 2600 err2: 2601 bus_unregister(&spi_bus_type); 2602 err1: 2603 kfree(buf); 2604 buf = NULL; 2605 err0: 2606 return status; 2607 } 2608 2609 /* board_info is normally registered in arch_initcall(), 2610 * but even essential drivers wait till later 2611 * 2612 * REVISIT only boardinfo really needs static linking. the rest (device and 2613 * driver registration) _could_ be dynamically linked (modular) ... costs 2614 * include needing to have boardinfo data structures be much more public. 2615 */ 2616 postcore_initcall(spi_init); 2617 2618