1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/export.h> 35 #include <linux/sched/rt.h> 36 #include <linux/delay.h> 37 #include <linux/kthread.h> 38 #include <linux/ioport.h> 39 #include <linux/acpi.h> 40 41 #define CREATE_TRACE_POINTS 42 #include <trace/events/spi.h> 43 44 static void spidev_release(struct device *dev) 45 { 46 struct spi_device *spi = to_spi_device(dev); 47 48 /* spi masters may cleanup for released devices */ 49 if (spi->master->cleanup) 50 spi->master->cleanup(spi); 51 52 spi_master_put(spi->master); 53 kfree(spi); 54 } 55 56 static ssize_t 57 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 58 { 59 const struct spi_device *spi = to_spi_device(dev); 60 int len; 61 62 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 63 if (len != -ENODEV) 64 return len; 65 66 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 67 } 68 static DEVICE_ATTR_RO(modalias); 69 70 #define SPI_STATISTICS_ATTRS(field, file) \ 71 static ssize_t spi_master_##field##_show(struct device *dev, \ 72 struct device_attribute *attr, \ 73 char *buf) \ 74 { \ 75 struct spi_master *master = container_of(dev, \ 76 struct spi_master, dev); \ 77 return spi_statistics_##field##_show(&master->statistics, buf); \ 78 } \ 79 static struct device_attribute dev_attr_spi_master_##field = { \ 80 .attr = { .name = file, .mode = S_IRUGO }, \ 81 .show = spi_master_##field##_show, \ 82 }; \ 83 static ssize_t spi_device_##field##_show(struct device *dev, \ 84 struct device_attribute *attr, \ 85 char *buf) \ 86 { \ 87 struct spi_device *spi = container_of(dev, \ 88 struct spi_device, dev); \ 89 return spi_statistics_##field##_show(&spi->statistics, buf); \ 90 } \ 91 static struct device_attribute dev_attr_spi_device_##field = { \ 92 .attr = { .name = file, .mode = S_IRUGO }, \ 93 .show = spi_device_##field##_show, \ 94 } 95 96 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 97 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 98 char *buf) \ 99 { \ 100 unsigned long flags; \ 101 ssize_t len; \ 102 spin_lock_irqsave(&stat->lock, flags); \ 103 len = sprintf(buf, format_string, stat->field); \ 104 spin_unlock_irqrestore(&stat->lock, flags); \ 105 return len; \ 106 } \ 107 SPI_STATISTICS_ATTRS(name, file) 108 109 #define SPI_STATISTICS_SHOW(field, format_string) \ 110 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 111 field, format_string) 112 113 SPI_STATISTICS_SHOW(messages, "%lu"); 114 SPI_STATISTICS_SHOW(transfers, "%lu"); 115 SPI_STATISTICS_SHOW(errors, "%lu"); 116 SPI_STATISTICS_SHOW(timedout, "%lu"); 117 118 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 119 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 120 SPI_STATISTICS_SHOW(spi_async, "%lu"); 121 122 SPI_STATISTICS_SHOW(bytes, "%llu"); 123 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 124 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 125 126 static struct attribute *spi_dev_attrs[] = { 127 &dev_attr_modalias.attr, 128 NULL, 129 }; 130 131 static const struct attribute_group spi_dev_group = { 132 .attrs = spi_dev_attrs, 133 }; 134 135 static struct attribute *spi_device_statistics_attrs[] = { 136 &dev_attr_spi_device_messages.attr, 137 &dev_attr_spi_device_transfers.attr, 138 &dev_attr_spi_device_errors.attr, 139 &dev_attr_spi_device_timedout.attr, 140 &dev_attr_spi_device_spi_sync.attr, 141 &dev_attr_spi_device_spi_sync_immediate.attr, 142 &dev_attr_spi_device_spi_async.attr, 143 &dev_attr_spi_device_bytes.attr, 144 &dev_attr_spi_device_bytes_rx.attr, 145 &dev_attr_spi_device_bytes_tx.attr, 146 NULL, 147 }; 148 149 static const struct attribute_group spi_device_statistics_group = { 150 .name = "statistics", 151 .attrs = spi_device_statistics_attrs, 152 }; 153 154 static const struct attribute_group *spi_dev_groups[] = { 155 &spi_dev_group, 156 &spi_device_statistics_group, 157 NULL, 158 }; 159 160 static struct attribute *spi_master_statistics_attrs[] = { 161 &dev_attr_spi_master_messages.attr, 162 &dev_attr_spi_master_transfers.attr, 163 &dev_attr_spi_master_errors.attr, 164 &dev_attr_spi_master_timedout.attr, 165 &dev_attr_spi_master_spi_sync.attr, 166 &dev_attr_spi_master_spi_sync_immediate.attr, 167 &dev_attr_spi_master_spi_async.attr, 168 &dev_attr_spi_master_bytes.attr, 169 &dev_attr_spi_master_bytes_rx.attr, 170 &dev_attr_spi_master_bytes_tx.attr, 171 NULL, 172 }; 173 174 static const struct attribute_group spi_master_statistics_group = { 175 .name = "statistics", 176 .attrs = spi_master_statistics_attrs, 177 }; 178 179 static const struct attribute_group *spi_master_groups[] = { 180 &spi_master_statistics_group, 181 NULL, 182 }; 183 184 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 185 struct spi_transfer *xfer, 186 struct spi_master *master) 187 { 188 unsigned long flags; 189 190 spin_lock_irqsave(&stats->lock, flags); 191 192 stats->transfers++; 193 194 stats->bytes += xfer->len; 195 if ((xfer->tx_buf) && 196 (xfer->tx_buf != master->dummy_tx)) 197 stats->bytes_tx += xfer->len; 198 if ((xfer->rx_buf) && 199 (xfer->rx_buf != master->dummy_rx)) 200 stats->bytes_rx += xfer->len; 201 202 spin_unlock_irqrestore(&stats->lock, flags); 203 } 204 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 205 206 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 207 * and the sysfs version makes coldplug work too. 208 */ 209 210 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 211 const struct spi_device *sdev) 212 { 213 while (id->name[0]) { 214 if (!strcmp(sdev->modalias, id->name)) 215 return id; 216 id++; 217 } 218 return NULL; 219 } 220 221 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 222 { 223 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 224 225 return spi_match_id(sdrv->id_table, sdev); 226 } 227 EXPORT_SYMBOL_GPL(spi_get_device_id); 228 229 static int spi_match_device(struct device *dev, struct device_driver *drv) 230 { 231 const struct spi_device *spi = to_spi_device(dev); 232 const struct spi_driver *sdrv = to_spi_driver(drv); 233 234 /* Attempt an OF style match */ 235 if (of_driver_match_device(dev, drv)) 236 return 1; 237 238 /* Then try ACPI */ 239 if (acpi_driver_match_device(dev, drv)) 240 return 1; 241 242 if (sdrv->id_table) 243 return !!spi_match_id(sdrv->id_table, spi); 244 245 return strcmp(spi->modalias, drv->name) == 0; 246 } 247 248 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 249 { 250 const struct spi_device *spi = to_spi_device(dev); 251 int rc; 252 253 rc = acpi_device_uevent_modalias(dev, env); 254 if (rc != -ENODEV) 255 return rc; 256 257 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 258 return 0; 259 } 260 261 struct bus_type spi_bus_type = { 262 .name = "spi", 263 .dev_groups = spi_dev_groups, 264 .match = spi_match_device, 265 .uevent = spi_uevent, 266 }; 267 EXPORT_SYMBOL_GPL(spi_bus_type); 268 269 270 static int spi_drv_probe(struct device *dev) 271 { 272 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 273 int ret; 274 275 ret = of_clk_set_defaults(dev->of_node, false); 276 if (ret) 277 return ret; 278 279 ret = dev_pm_domain_attach(dev, true); 280 if (ret != -EPROBE_DEFER) { 281 ret = sdrv->probe(to_spi_device(dev)); 282 if (ret) 283 dev_pm_domain_detach(dev, true); 284 } 285 286 return ret; 287 } 288 289 static int spi_drv_remove(struct device *dev) 290 { 291 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 292 int ret; 293 294 ret = sdrv->remove(to_spi_device(dev)); 295 dev_pm_domain_detach(dev, true); 296 297 return ret; 298 } 299 300 static void spi_drv_shutdown(struct device *dev) 301 { 302 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 303 304 sdrv->shutdown(to_spi_device(dev)); 305 } 306 307 /** 308 * spi_register_driver - register a SPI driver 309 * @sdrv: the driver to register 310 * Context: can sleep 311 */ 312 int spi_register_driver(struct spi_driver *sdrv) 313 { 314 sdrv->driver.bus = &spi_bus_type; 315 if (sdrv->probe) 316 sdrv->driver.probe = spi_drv_probe; 317 if (sdrv->remove) 318 sdrv->driver.remove = spi_drv_remove; 319 if (sdrv->shutdown) 320 sdrv->driver.shutdown = spi_drv_shutdown; 321 return driver_register(&sdrv->driver); 322 } 323 EXPORT_SYMBOL_GPL(spi_register_driver); 324 325 /*-------------------------------------------------------------------------*/ 326 327 /* SPI devices should normally not be created by SPI device drivers; that 328 * would make them board-specific. Similarly with SPI master drivers. 329 * Device registration normally goes into like arch/.../mach.../board-YYY.c 330 * with other readonly (flashable) information about mainboard devices. 331 */ 332 333 struct boardinfo { 334 struct list_head list; 335 struct spi_board_info board_info; 336 }; 337 338 static LIST_HEAD(board_list); 339 static LIST_HEAD(spi_master_list); 340 341 /* 342 * Used to protect add/del opertion for board_info list and 343 * spi_master list, and their matching process 344 */ 345 static DEFINE_MUTEX(board_lock); 346 347 /** 348 * spi_alloc_device - Allocate a new SPI device 349 * @master: Controller to which device is connected 350 * Context: can sleep 351 * 352 * Allows a driver to allocate and initialize a spi_device without 353 * registering it immediately. This allows a driver to directly 354 * fill the spi_device with device parameters before calling 355 * spi_add_device() on it. 356 * 357 * Caller is responsible to call spi_add_device() on the returned 358 * spi_device structure to add it to the SPI master. If the caller 359 * needs to discard the spi_device without adding it, then it should 360 * call spi_dev_put() on it. 361 * 362 * Returns a pointer to the new device, or NULL. 363 */ 364 struct spi_device *spi_alloc_device(struct spi_master *master) 365 { 366 struct spi_device *spi; 367 368 if (!spi_master_get(master)) 369 return NULL; 370 371 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 372 if (!spi) { 373 spi_master_put(master); 374 return NULL; 375 } 376 377 spi->master = master; 378 spi->dev.parent = &master->dev; 379 spi->dev.bus = &spi_bus_type; 380 spi->dev.release = spidev_release; 381 spi->cs_gpio = -ENOENT; 382 383 spin_lock_init(&spi->statistics.lock); 384 385 device_initialize(&spi->dev); 386 return spi; 387 } 388 EXPORT_SYMBOL_GPL(spi_alloc_device); 389 390 static void spi_dev_set_name(struct spi_device *spi) 391 { 392 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 393 394 if (adev) { 395 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 396 return; 397 } 398 399 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 400 spi->chip_select); 401 } 402 403 static int spi_dev_check(struct device *dev, void *data) 404 { 405 struct spi_device *spi = to_spi_device(dev); 406 struct spi_device *new_spi = data; 407 408 if (spi->master == new_spi->master && 409 spi->chip_select == new_spi->chip_select) 410 return -EBUSY; 411 return 0; 412 } 413 414 /** 415 * spi_add_device - Add spi_device allocated with spi_alloc_device 416 * @spi: spi_device to register 417 * 418 * Companion function to spi_alloc_device. Devices allocated with 419 * spi_alloc_device can be added onto the spi bus with this function. 420 * 421 * Returns 0 on success; negative errno on failure 422 */ 423 int spi_add_device(struct spi_device *spi) 424 { 425 static DEFINE_MUTEX(spi_add_lock); 426 struct spi_master *master = spi->master; 427 struct device *dev = master->dev.parent; 428 int status; 429 430 /* Chipselects are numbered 0..max; validate. */ 431 if (spi->chip_select >= master->num_chipselect) { 432 dev_err(dev, "cs%d >= max %d\n", 433 spi->chip_select, 434 master->num_chipselect); 435 return -EINVAL; 436 } 437 438 /* Set the bus ID string */ 439 spi_dev_set_name(spi); 440 441 /* We need to make sure there's no other device with this 442 * chipselect **BEFORE** we call setup(), else we'll trash 443 * its configuration. Lock against concurrent add() calls. 444 */ 445 mutex_lock(&spi_add_lock); 446 447 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 448 if (status) { 449 dev_err(dev, "chipselect %d already in use\n", 450 spi->chip_select); 451 goto done; 452 } 453 454 if (master->cs_gpios) 455 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 456 457 /* Drivers may modify this initial i/o setup, but will 458 * normally rely on the device being setup. Devices 459 * using SPI_CS_HIGH can't coexist well otherwise... 460 */ 461 status = spi_setup(spi); 462 if (status < 0) { 463 dev_err(dev, "can't setup %s, status %d\n", 464 dev_name(&spi->dev), status); 465 goto done; 466 } 467 468 /* Device may be bound to an active driver when this returns */ 469 status = device_add(&spi->dev); 470 if (status < 0) 471 dev_err(dev, "can't add %s, status %d\n", 472 dev_name(&spi->dev), status); 473 else 474 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 475 476 done: 477 mutex_unlock(&spi_add_lock); 478 return status; 479 } 480 EXPORT_SYMBOL_GPL(spi_add_device); 481 482 /** 483 * spi_new_device - instantiate one new SPI device 484 * @master: Controller to which device is connected 485 * @chip: Describes the SPI device 486 * Context: can sleep 487 * 488 * On typical mainboards, this is purely internal; and it's not needed 489 * after board init creates the hard-wired devices. Some development 490 * platforms may not be able to use spi_register_board_info though, and 491 * this is exported so that for example a USB or parport based adapter 492 * driver could add devices (which it would learn about out-of-band). 493 * 494 * Returns the new device, or NULL. 495 */ 496 struct spi_device *spi_new_device(struct spi_master *master, 497 struct spi_board_info *chip) 498 { 499 struct spi_device *proxy; 500 int status; 501 502 /* NOTE: caller did any chip->bus_num checks necessary. 503 * 504 * Also, unless we change the return value convention to use 505 * error-or-pointer (not NULL-or-pointer), troubleshootability 506 * suggests syslogged diagnostics are best here (ugh). 507 */ 508 509 proxy = spi_alloc_device(master); 510 if (!proxy) 511 return NULL; 512 513 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 514 515 proxy->chip_select = chip->chip_select; 516 proxy->max_speed_hz = chip->max_speed_hz; 517 proxy->mode = chip->mode; 518 proxy->irq = chip->irq; 519 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 520 proxy->dev.platform_data = (void *) chip->platform_data; 521 proxy->controller_data = chip->controller_data; 522 proxy->controller_state = NULL; 523 524 status = spi_add_device(proxy); 525 if (status < 0) { 526 spi_dev_put(proxy); 527 return NULL; 528 } 529 530 return proxy; 531 } 532 EXPORT_SYMBOL_GPL(spi_new_device); 533 534 static void spi_match_master_to_boardinfo(struct spi_master *master, 535 struct spi_board_info *bi) 536 { 537 struct spi_device *dev; 538 539 if (master->bus_num != bi->bus_num) 540 return; 541 542 dev = spi_new_device(master, bi); 543 if (!dev) 544 dev_err(master->dev.parent, "can't create new device for %s\n", 545 bi->modalias); 546 } 547 548 /** 549 * spi_register_board_info - register SPI devices for a given board 550 * @info: array of chip descriptors 551 * @n: how many descriptors are provided 552 * Context: can sleep 553 * 554 * Board-specific early init code calls this (probably during arch_initcall) 555 * with segments of the SPI device table. Any device nodes are created later, 556 * after the relevant parent SPI controller (bus_num) is defined. We keep 557 * this table of devices forever, so that reloading a controller driver will 558 * not make Linux forget about these hard-wired devices. 559 * 560 * Other code can also call this, e.g. a particular add-on board might provide 561 * SPI devices through its expansion connector, so code initializing that board 562 * would naturally declare its SPI devices. 563 * 564 * The board info passed can safely be __initdata ... but be careful of 565 * any embedded pointers (platform_data, etc), they're copied as-is. 566 */ 567 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 568 { 569 struct boardinfo *bi; 570 int i; 571 572 if (!n) 573 return -EINVAL; 574 575 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 576 if (!bi) 577 return -ENOMEM; 578 579 for (i = 0; i < n; i++, bi++, info++) { 580 struct spi_master *master; 581 582 memcpy(&bi->board_info, info, sizeof(*info)); 583 mutex_lock(&board_lock); 584 list_add_tail(&bi->list, &board_list); 585 list_for_each_entry(master, &spi_master_list, list) 586 spi_match_master_to_boardinfo(master, &bi->board_info); 587 mutex_unlock(&board_lock); 588 } 589 590 return 0; 591 } 592 593 /*-------------------------------------------------------------------------*/ 594 595 static void spi_set_cs(struct spi_device *spi, bool enable) 596 { 597 if (spi->mode & SPI_CS_HIGH) 598 enable = !enable; 599 600 if (spi->cs_gpio >= 0) 601 gpio_set_value(spi->cs_gpio, !enable); 602 else if (spi->master->set_cs) 603 spi->master->set_cs(spi, !enable); 604 } 605 606 #ifdef CONFIG_HAS_DMA 607 static int spi_map_buf(struct spi_master *master, struct device *dev, 608 struct sg_table *sgt, void *buf, size_t len, 609 enum dma_data_direction dir) 610 { 611 const bool vmalloced_buf = is_vmalloc_addr(buf); 612 int desc_len; 613 int sgs; 614 struct page *vm_page; 615 void *sg_buf; 616 size_t min; 617 int i, ret; 618 619 if (vmalloced_buf) { 620 desc_len = PAGE_SIZE; 621 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 622 } else { 623 desc_len = master->max_dma_len; 624 sgs = DIV_ROUND_UP(len, desc_len); 625 } 626 627 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 628 if (ret != 0) 629 return ret; 630 631 for (i = 0; i < sgs; i++) { 632 633 if (vmalloced_buf) { 634 min = min_t(size_t, 635 len, desc_len - offset_in_page(buf)); 636 vm_page = vmalloc_to_page(buf); 637 if (!vm_page) { 638 sg_free_table(sgt); 639 return -ENOMEM; 640 } 641 sg_set_page(&sgt->sgl[i], vm_page, 642 min, offset_in_page(buf)); 643 } else { 644 min = min_t(size_t, len, desc_len); 645 sg_buf = buf; 646 sg_set_buf(&sgt->sgl[i], sg_buf, min); 647 } 648 649 650 buf += min; 651 len -= min; 652 } 653 654 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 655 if (!ret) 656 ret = -ENOMEM; 657 if (ret < 0) { 658 sg_free_table(sgt); 659 return ret; 660 } 661 662 sgt->nents = ret; 663 664 return 0; 665 } 666 667 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 668 struct sg_table *sgt, enum dma_data_direction dir) 669 { 670 if (sgt->orig_nents) { 671 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 672 sg_free_table(sgt); 673 } 674 } 675 676 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 677 { 678 struct device *tx_dev, *rx_dev; 679 struct spi_transfer *xfer; 680 int ret; 681 682 if (!master->can_dma) 683 return 0; 684 685 if (master->dma_tx) 686 tx_dev = master->dma_tx->device->dev; 687 else 688 tx_dev = &master->dev; 689 690 if (master->dma_rx) 691 rx_dev = master->dma_rx->device->dev; 692 else 693 rx_dev = &master->dev; 694 695 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 696 if (!master->can_dma(master, msg->spi, xfer)) 697 continue; 698 699 if (xfer->tx_buf != NULL) { 700 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 701 (void *)xfer->tx_buf, xfer->len, 702 DMA_TO_DEVICE); 703 if (ret != 0) 704 return ret; 705 } 706 707 if (xfer->rx_buf != NULL) { 708 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 709 xfer->rx_buf, xfer->len, 710 DMA_FROM_DEVICE); 711 if (ret != 0) { 712 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 713 DMA_TO_DEVICE); 714 return ret; 715 } 716 } 717 } 718 719 master->cur_msg_mapped = true; 720 721 return 0; 722 } 723 724 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 725 { 726 struct spi_transfer *xfer; 727 struct device *tx_dev, *rx_dev; 728 729 if (!master->cur_msg_mapped || !master->can_dma) 730 return 0; 731 732 if (master->dma_tx) 733 tx_dev = master->dma_tx->device->dev; 734 else 735 tx_dev = &master->dev; 736 737 if (master->dma_rx) 738 rx_dev = master->dma_rx->device->dev; 739 else 740 rx_dev = &master->dev; 741 742 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 743 if (!master->can_dma(master, msg->spi, xfer)) 744 continue; 745 746 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 747 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 748 } 749 750 return 0; 751 } 752 #else /* !CONFIG_HAS_DMA */ 753 static inline int __spi_map_msg(struct spi_master *master, 754 struct spi_message *msg) 755 { 756 return 0; 757 } 758 759 static inline int __spi_unmap_msg(struct spi_master *master, 760 struct spi_message *msg) 761 { 762 return 0; 763 } 764 #endif /* !CONFIG_HAS_DMA */ 765 766 static inline int spi_unmap_msg(struct spi_master *master, 767 struct spi_message *msg) 768 { 769 struct spi_transfer *xfer; 770 771 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 772 /* 773 * Restore the original value of tx_buf or rx_buf if they are 774 * NULL. 775 */ 776 if (xfer->tx_buf == master->dummy_tx) 777 xfer->tx_buf = NULL; 778 if (xfer->rx_buf == master->dummy_rx) 779 xfer->rx_buf = NULL; 780 } 781 782 return __spi_unmap_msg(master, msg); 783 } 784 785 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 786 { 787 struct spi_transfer *xfer; 788 void *tmp; 789 unsigned int max_tx, max_rx; 790 791 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 792 max_tx = 0; 793 max_rx = 0; 794 795 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 796 if ((master->flags & SPI_MASTER_MUST_TX) && 797 !xfer->tx_buf) 798 max_tx = max(xfer->len, max_tx); 799 if ((master->flags & SPI_MASTER_MUST_RX) && 800 !xfer->rx_buf) 801 max_rx = max(xfer->len, max_rx); 802 } 803 804 if (max_tx) { 805 tmp = krealloc(master->dummy_tx, max_tx, 806 GFP_KERNEL | GFP_DMA); 807 if (!tmp) 808 return -ENOMEM; 809 master->dummy_tx = tmp; 810 memset(tmp, 0, max_tx); 811 } 812 813 if (max_rx) { 814 tmp = krealloc(master->dummy_rx, max_rx, 815 GFP_KERNEL | GFP_DMA); 816 if (!tmp) 817 return -ENOMEM; 818 master->dummy_rx = tmp; 819 } 820 821 if (max_tx || max_rx) { 822 list_for_each_entry(xfer, &msg->transfers, 823 transfer_list) { 824 if (!xfer->tx_buf) 825 xfer->tx_buf = master->dummy_tx; 826 if (!xfer->rx_buf) 827 xfer->rx_buf = master->dummy_rx; 828 } 829 } 830 } 831 832 return __spi_map_msg(master, msg); 833 } 834 835 /* 836 * spi_transfer_one_message - Default implementation of transfer_one_message() 837 * 838 * This is a standard implementation of transfer_one_message() for 839 * drivers which impelment a transfer_one() operation. It provides 840 * standard handling of delays and chip select management. 841 */ 842 static int spi_transfer_one_message(struct spi_master *master, 843 struct spi_message *msg) 844 { 845 struct spi_transfer *xfer; 846 bool keep_cs = false; 847 int ret = 0; 848 unsigned long ms = 1; 849 struct spi_statistics *statm = &master->statistics; 850 struct spi_statistics *stats = &msg->spi->statistics; 851 852 spi_set_cs(msg->spi, true); 853 854 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 855 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 856 857 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 858 trace_spi_transfer_start(msg, xfer); 859 860 spi_statistics_add_transfer_stats(statm, xfer, master); 861 spi_statistics_add_transfer_stats(stats, xfer, master); 862 863 if (xfer->tx_buf || xfer->rx_buf) { 864 reinit_completion(&master->xfer_completion); 865 866 ret = master->transfer_one(master, msg->spi, xfer); 867 if (ret < 0) { 868 SPI_STATISTICS_INCREMENT_FIELD(statm, 869 errors); 870 SPI_STATISTICS_INCREMENT_FIELD(stats, 871 errors); 872 dev_err(&msg->spi->dev, 873 "SPI transfer failed: %d\n", ret); 874 goto out; 875 } 876 877 if (ret > 0) { 878 ret = 0; 879 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 880 ms += ms + 100; /* some tolerance */ 881 882 ms = wait_for_completion_timeout(&master->xfer_completion, 883 msecs_to_jiffies(ms)); 884 } 885 886 if (ms == 0) { 887 SPI_STATISTICS_INCREMENT_FIELD(statm, 888 timedout); 889 SPI_STATISTICS_INCREMENT_FIELD(stats, 890 timedout); 891 dev_err(&msg->spi->dev, 892 "SPI transfer timed out\n"); 893 msg->status = -ETIMEDOUT; 894 } 895 } else { 896 if (xfer->len) 897 dev_err(&msg->spi->dev, 898 "Bufferless transfer has length %u\n", 899 xfer->len); 900 } 901 902 trace_spi_transfer_stop(msg, xfer); 903 904 if (msg->status != -EINPROGRESS) 905 goto out; 906 907 if (xfer->delay_usecs) 908 udelay(xfer->delay_usecs); 909 910 if (xfer->cs_change) { 911 if (list_is_last(&xfer->transfer_list, 912 &msg->transfers)) { 913 keep_cs = true; 914 } else { 915 spi_set_cs(msg->spi, false); 916 udelay(10); 917 spi_set_cs(msg->spi, true); 918 } 919 } 920 921 msg->actual_length += xfer->len; 922 } 923 924 out: 925 if (ret != 0 || !keep_cs) 926 spi_set_cs(msg->spi, false); 927 928 if (msg->status == -EINPROGRESS) 929 msg->status = ret; 930 931 if (msg->status && master->handle_err) 932 master->handle_err(master, msg); 933 934 spi_finalize_current_message(master); 935 936 return ret; 937 } 938 939 /** 940 * spi_finalize_current_transfer - report completion of a transfer 941 * @master: the master reporting completion 942 * 943 * Called by SPI drivers using the core transfer_one_message() 944 * implementation to notify it that the current interrupt driven 945 * transfer has finished and the next one may be scheduled. 946 */ 947 void spi_finalize_current_transfer(struct spi_master *master) 948 { 949 complete(&master->xfer_completion); 950 } 951 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 952 953 /** 954 * __spi_pump_messages - function which processes spi message queue 955 * @master: master to process queue for 956 * @in_kthread: true if we are in the context of the message pump thread 957 * 958 * This function checks if there is any spi message in the queue that 959 * needs processing and if so call out to the driver to initialize hardware 960 * and transfer each message. 961 * 962 * Note that it is called both from the kthread itself and also from 963 * inside spi_sync(); the queue extraction handling at the top of the 964 * function should deal with this safely. 965 */ 966 static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 967 { 968 unsigned long flags; 969 bool was_busy = false; 970 int ret; 971 972 /* Lock queue */ 973 spin_lock_irqsave(&master->queue_lock, flags); 974 975 /* Make sure we are not already running a message */ 976 if (master->cur_msg) { 977 spin_unlock_irqrestore(&master->queue_lock, flags); 978 return; 979 } 980 981 /* If another context is idling the device then defer */ 982 if (master->idling) { 983 queue_kthread_work(&master->kworker, &master->pump_messages); 984 spin_unlock_irqrestore(&master->queue_lock, flags); 985 return; 986 } 987 988 /* Check if the queue is idle */ 989 if (list_empty(&master->queue) || !master->running) { 990 if (!master->busy) { 991 spin_unlock_irqrestore(&master->queue_lock, flags); 992 return; 993 } 994 995 /* Only do teardown in the thread */ 996 if (!in_kthread) { 997 queue_kthread_work(&master->kworker, 998 &master->pump_messages); 999 spin_unlock_irqrestore(&master->queue_lock, flags); 1000 return; 1001 } 1002 1003 master->busy = false; 1004 master->idling = true; 1005 spin_unlock_irqrestore(&master->queue_lock, flags); 1006 1007 kfree(master->dummy_rx); 1008 master->dummy_rx = NULL; 1009 kfree(master->dummy_tx); 1010 master->dummy_tx = NULL; 1011 if (master->unprepare_transfer_hardware && 1012 master->unprepare_transfer_hardware(master)) 1013 dev_err(&master->dev, 1014 "failed to unprepare transfer hardware\n"); 1015 if (master->auto_runtime_pm) { 1016 pm_runtime_mark_last_busy(master->dev.parent); 1017 pm_runtime_put_autosuspend(master->dev.parent); 1018 } 1019 trace_spi_master_idle(master); 1020 1021 spin_lock_irqsave(&master->queue_lock, flags); 1022 master->idling = false; 1023 spin_unlock_irqrestore(&master->queue_lock, flags); 1024 return; 1025 } 1026 1027 /* Extract head of queue */ 1028 master->cur_msg = 1029 list_first_entry(&master->queue, struct spi_message, queue); 1030 1031 list_del_init(&master->cur_msg->queue); 1032 if (master->busy) 1033 was_busy = true; 1034 else 1035 master->busy = true; 1036 spin_unlock_irqrestore(&master->queue_lock, flags); 1037 1038 if (!was_busy && master->auto_runtime_pm) { 1039 ret = pm_runtime_get_sync(master->dev.parent); 1040 if (ret < 0) { 1041 dev_err(&master->dev, "Failed to power device: %d\n", 1042 ret); 1043 return; 1044 } 1045 } 1046 1047 if (!was_busy) 1048 trace_spi_master_busy(master); 1049 1050 if (!was_busy && master->prepare_transfer_hardware) { 1051 ret = master->prepare_transfer_hardware(master); 1052 if (ret) { 1053 dev_err(&master->dev, 1054 "failed to prepare transfer hardware\n"); 1055 1056 if (master->auto_runtime_pm) 1057 pm_runtime_put(master->dev.parent); 1058 return; 1059 } 1060 } 1061 1062 trace_spi_message_start(master->cur_msg); 1063 1064 if (master->prepare_message) { 1065 ret = master->prepare_message(master, master->cur_msg); 1066 if (ret) { 1067 dev_err(&master->dev, 1068 "failed to prepare message: %d\n", ret); 1069 master->cur_msg->status = ret; 1070 spi_finalize_current_message(master); 1071 return; 1072 } 1073 master->cur_msg_prepared = true; 1074 } 1075 1076 ret = spi_map_msg(master, master->cur_msg); 1077 if (ret) { 1078 master->cur_msg->status = ret; 1079 spi_finalize_current_message(master); 1080 return; 1081 } 1082 1083 ret = master->transfer_one_message(master, master->cur_msg); 1084 if (ret) { 1085 dev_err(&master->dev, 1086 "failed to transfer one message from queue\n"); 1087 return; 1088 } 1089 } 1090 1091 /** 1092 * spi_pump_messages - kthread work function which processes spi message queue 1093 * @work: pointer to kthread work struct contained in the master struct 1094 */ 1095 static void spi_pump_messages(struct kthread_work *work) 1096 { 1097 struct spi_master *master = 1098 container_of(work, struct spi_master, pump_messages); 1099 1100 __spi_pump_messages(master, true); 1101 } 1102 1103 static int spi_init_queue(struct spi_master *master) 1104 { 1105 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1106 1107 master->running = false; 1108 master->busy = false; 1109 1110 init_kthread_worker(&master->kworker); 1111 master->kworker_task = kthread_run(kthread_worker_fn, 1112 &master->kworker, "%s", 1113 dev_name(&master->dev)); 1114 if (IS_ERR(master->kworker_task)) { 1115 dev_err(&master->dev, "failed to create message pump task\n"); 1116 return PTR_ERR(master->kworker_task); 1117 } 1118 init_kthread_work(&master->pump_messages, spi_pump_messages); 1119 1120 /* 1121 * Master config will indicate if this controller should run the 1122 * message pump with high (realtime) priority to reduce the transfer 1123 * latency on the bus by minimising the delay between a transfer 1124 * request and the scheduling of the message pump thread. Without this 1125 * setting the message pump thread will remain at default priority. 1126 */ 1127 if (master->rt) { 1128 dev_info(&master->dev, 1129 "will run message pump with realtime priority\n"); 1130 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1131 } 1132 1133 return 0; 1134 } 1135 1136 /** 1137 * spi_get_next_queued_message() - called by driver to check for queued 1138 * messages 1139 * @master: the master to check for queued messages 1140 * 1141 * If there are more messages in the queue, the next message is returned from 1142 * this call. 1143 */ 1144 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1145 { 1146 struct spi_message *next; 1147 unsigned long flags; 1148 1149 /* get a pointer to the next message, if any */ 1150 spin_lock_irqsave(&master->queue_lock, flags); 1151 next = list_first_entry_or_null(&master->queue, struct spi_message, 1152 queue); 1153 spin_unlock_irqrestore(&master->queue_lock, flags); 1154 1155 return next; 1156 } 1157 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1158 1159 /** 1160 * spi_finalize_current_message() - the current message is complete 1161 * @master: the master to return the message to 1162 * 1163 * Called by the driver to notify the core that the message in the front of the 1164 * queue is complete and can be removed from the queue. 1165 */ 1166 void spi_finalize_current_message(struct spi_master *master) 1167 { 1168 struct spi_message *mesg; 1169 unsigned long flags; 1170 int ret; 1171 1172 spin_lock_irqsave(&master->queue_lock, flags); 1173 mesg = master->cur_msg; 1174 spin_unlock_irqrestore(&master->queue_lock, flags); 1175 1176 spi_unmap_msg(master, mesg); 1177 1178 if (master->cur_msg_prepared && master->unprepare_message) { 1179 ret = master->unprepare_message(master, mesg); 1180 if (ret) { 1181 dev_err(&master->dev, 1182 "failed to unprepare message: %d\n", ret); 1183 } 1184 } 1185 1186 spin_lock_irqsave(&master->queue_lock, flags); 1187 master->cur_msg = NULL; 1188 master->cur_msg_prepared = false; 1189 queue_kthread_work(&master->kworker, &master->pump_messages); 1190 spin_unlock_irqrestore(&master->queue_lock, flags); 1191 1192 trace_spi_message_done(mesg); 1193 1194 mesg->state = NULL; 1195 if (mesg->complete) 1196 mesg->complete(mesg->context); 1197 } 1198 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1199 1200 static int spi_start_queue(struct spi_master *master) 1201 { 1202 unsigned long flags; 1203 1204 spin_lock_irqsave(&master->queue_lock, flags); 1205 1206 if (master->running || master->busy) { 1207 spin_unlock_irqrestore(&master->queue_lock, flags); 1208 return -EBUSY; 1209 } 1210 1211 master->running = true; 1212 master->cur_msg = NULL; 1213 spin_unlock_irqrestore(&master->queue_lock, flags); 1214 1215 queue_kthread_work(&master->kworker, &master->pump_messages); 1216 1217 return 0; 1218 } 1219 1220 static int spi_stop_queue(struct spi_master *master) 1221 { 1222 unsigned long flags; 1223 unsigned limit = 500; 1224 int ret = 0; 1225 1226 spin_lock_irqsave(&master->queue_lock, flags); 1227 1228 /* 1229 * This is a bit lame, but is optimized for the common execution path. 1230 * A wait_queue on the master->busy could be used, but then the common 1231 * execution path (pump_messages) would be required to call wake_up or 1232 * friends on every SPI message. Do this instead. 1233 */ 1234 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1235 spin_unlock_irqrestore(&master->queue_lock, flags); 1236 usleep_range(10000, 11000); 1237 spin_lock_irqsave(&master->queue_lock, flags); 1238 } 1239 1240 if (!list_empty(&master->queue) || master->busy) 1241 ret = -EBUSY; 1242 else 1243 master->running = false; 1244 1245 spin_unlock_irqrestore(&master->queue_lock, flags); 1246 1247 if (ret) { 1248 dev_warn(&master->dev, 1249 "could not stop message queue\n"); 1250 return ret; 1251 } 1252 return ret; 1253 } 1254 1255 static int spi_destroy_queue(struct spi_master *master) 1256 { 1257 int ret; 1258 1259 ret = spi_stop_queue(master); 1260 1261 /* 1262 * flush_kthread_worker will block until all work is done. 1263 * If the reason that stop_queue timed out is that the work will never 1264 * finish, then it does no good to call flush/stop thread, so 1265 * return anyway. 1266 */ 1267 if (ret) { 1268 dev_err(&master->dev, "problem destroying queue\n"); 1269 return ret; 1270 } 1271 1272 flush_kthread_worker(&master->kworker); 1273 kthread_stop(master->kworker_task); 1274 1275 return 0; 1276 } 1277 1278 static int __spi_queued_transfer(struct spi_device *spi, 1279 struct spi_message *msg, 1280 bool need_pump) 1281 { 1282 struct spi_master *master = spi->master; 1283 unsigned long flags; 1284 1285 spin_lock_irqsave(&master->queue_lock, flags); 1286 1287 if (!master->running) { 1288 spin_unlock_irqrestore(&master->queue_lock, flags); 1289 return -ESHUTDOWN; 1290 } 1291 msg->actual_length = 0; 1292 msg->status = -EINPROGRESS; 1293 1294 list_add_tail(&msg->queue, &master->queue); 1295 if (!master->busy && need_pump) 1296 queue_kthread_work(&master->kworker, &master->pump_messages); 1297 1298 spin_unlock_irqrestore(&master->queue_lock, flags); 1299 return 0; 1300 } 1301 1302 /** 1303 * spi_queued_transfer - transfer function for queued transfers 1304 * @spi: spi device which is requesting transfer 1305 * @msg: spi message which is to handled is queued to driver queue 1306 */ 1307 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1308 { 1309 return __spi_queued_transfer(spi, msg, true); 1310 } 1311 1312 static int spi_master_initialize_queue(struct spi_master *master) 1313 { 1314 int ret; 1315 1316 master->transfer = spi_queued_transfer; 1317 if (!master->transfer_one_message) 1318 master->transfer_one_message = spi_transfer_one_message; 1319 1320 /* Initialize and start queue */ 1321 ret = spi_init_queue(master); 1322 if (ret) { 1323 dev_err(&master->dev, "problem initializing queue\n"); 1324 goto err_init_queue; 1325 } 1326 master->queued = true; 1327 ret = spi_start_queue(master); 1328 if (ret) { 1329 dev_err(&master->dev, "problem starting queue\n"); 1330 goto err_start_queue; 1331 } 1332 1333 return 0; 1334 1335 err_start_queue: 1336 spi_destroy_queue(master); 1337 err_init_queue: 1338 return ret; 1339 } 1340 1341 /*-------------------------------------------------------------------------*/ 1342 1343 #if defined(CONFIG_OF) 1344 static struct spi_device * 1345 of_register_spi_device(struct spi_master *master, struct device_node *nc) 1346 { 1347 struct spi_device *spi; 1348 int rc; 1349 u32 value; 1350 1351 /* Alloc an spi_device */ 1352 spi = spi_alloc_device(master); 1353 if (!spi) { 1354 dev_err(&master->dev, "spi_device alloc error for %s\n", 1355 nc->full_name); 1356 rc = -ENOMEM; 1357 goto err_out; 1358 } 1359 1360 /* Select device driver */ 1361 rc = of_modalias_node(nc, spi->modalias, 1362 sizeof(spi->modalias)); 1363 if (rc < 0) { 1364 dev_err(&master->dev, "cannot find modalias for %s\n", 1365 nc->full_name); 1366 goto err_out; 1367 } 1368 1369 /* Device address */ 1370 rc = of_property_read_u32(nc, "reg", &value); 1371 if (rc) { 1372 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1373 nc->full_name, rc); 1374 goto err_out; 1375 } 1376 spi->chip_select = value; 1377 1378 /* Mode (clock phase/polarity/etc.) */ 1379 if (of_find_property(nc, "spi-cpha", NULL)) 1380 spi->mode |= SPI_CPHA; 1381 if (of_find_property(nc, "spi-cpol", NULL)) 1382 spi->mode |= SPI_CPOL; 1383 if (of_find_property(nc, "spi-cs-high", NULL)) 1384 spi->mode |= SPI_CS_HIGH; 1385 if (of_find_property(nc, "spi-3wire", NULL)) 1386 spi->mode |= SPI_3WIRE; 1387 if (of_find_property(nc, "spi-lsb-first", NULL)) 1388 spi->mode |= SPI_LSB_FIRST; 1389 1390 /* Device DUAL/QUAD mode */ 1391 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1392 switch (value) { 1393 case 1: 1394 break; 1395 case 2: 1396 spi->mode |= SPI_TX_DUAL; 1397 break; 1398 case 4: 1399 spi->mode |= SPI_TX_QUAD; 1400 break; 1401 default: 1402 dev_warn(&master->dev, 1403 "spi-tx-bus-width %d not supported\n", 1404 value); 1405 break; 1406 } 1407 } 1408 1409 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1410 switch (value) { 1411 case 1: 1412 break; 1413 case 2: 1414 spi->mode |= SPI_RX_DUAL; 1415 break; 1416 case 4: 1417 spi->mode |= SPI_RX_QUAD; 1418 break; 1419 default: 1420 dev_warn(&master->dev, 1421 "spi-rx-bus-width %d not supported\n", 1422 value); 1423 break; 1424 } 1425 } 1426 1427 /* Device speed */ 1428 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1429 if (rc) { 1430 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1431 nc->full_name, rc); 1432 goto err_out; 1433 } 1434 spi->max_speed_hz = value; 1435 1436 /* IRQ */ 1437 spi->irq = irq_of_parse_and_map(nc, 0); 1438 1439 /* Store a pointer to the node in the device structure */ 1440 of_node_get(nc); 1441 spi->dev.of_node = nc; 1442 1443 /* Register the new device */ 1444 rc = spi_add_device(spi); 1445 if (rc) { 1446 dev_err(&master->dev, "spi_device register error %s\n", 1447 nc->full_name); 1448 goto err_out; 1449 } 1450 1451 return spi; 1452 1453 err_out: 1454 spi_dev_put(spi); 1455 return ERR_PTR(rc); 1456 } 1457 1458 /** 1459 * of_register_spi_devices() - Register child devices onto the SPI bus 1460 * @master: Pointer to spi_master device 1461 * 1462 * Registers an spi_device for each child node of master node which has a 'reg' 1463 * property. 1464 */ 1465 static void of_register_spi_devices(struct spi_master *master) 1466 { 1467 struct spi_device *spi; 1468 struct device_node *nc; 1469 1470 if (!master->dev.of_node) 1471 return; 1472 1473 for_each_available_child_of_node(master->dev.of_node, nc) { 1474 spi = of_register_spi_device(master, nc); 1475 if (IS_ERR(spi)) 1476 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1477 nc->full_name); 1478 } 1479 } 1480 #else 1481 static void of_register_spi_devices(struct spi_master *master) { } 1482 #endif 1483 1484 #ifdef CONFIG_ACPI 1485 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1486 { 1487 struct spi_device *spi = data; 1488 1489 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1490 struct acpi_resource_spi_serialbus *sb; 1491 1492 sb = &ares->data.spi_serial_bus; 1493 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1494 spi->chip_select = sb->device_selection; 1495 spi->max_speed_hz = sb->connection_speed; 1496 1497 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1498 spi->mode |= SPI_CPHA; 1499 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1500 spi->mode |= SPI_CPOL; 1501 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1502 spi->mode |= SPI_CS_HIGH; 1503 } 1504 } else if (spi->irq < 0) { 1505 struct resource r; 1506 1507 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1508 spi->irq = r.start; 1509 } 1510 1511 /* Always tell the ACPI core to skip this resource */ 1512 return 1; 1513 } 1514 1515 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1516 void *data, void **return_value) 1517 { 1518 struct spi_master *master = data; 1519 struct list_head resource_list; 1520 struct acpi_device *adev; 1521 struct spi_device *spi; 1522 int ret; 1523 1524 if (acpi_bus_get_device(handle, &adev)) 1525 return AE_OK; 1526 if (acpi_bus_get_status(adev) || !adev->status.present) 1527 return AE_OK; 1528 1529 spi = spi_alloc_device(master); 1530 if (!spi) { 1531 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1532 dev_name(&adev->dev)); 1533 return AE_NO_MEMORY; 1534 } 1535 1536 ACPI_COMPANION_SET(&spi->dev, adev); 1537 spi->irq = -1; 1538 1539 INIT_LIST_HEAD(&resource_list); 1540 ret = acpi_dev_get_resources(adev, &resource_list, 1541 acpi_spi_add_resource, spi); 1542 acpi_dev_free_resource_list(&resource_list); 1543 1544 if (ret < 0 || !spi->max_speed_hz) { 1545 spi_dev_put(spi); 1546 return AE_OK; 1547 } 1548 1549 adev->power.flags.ignore_parent = true; 1550 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 1551 if (spi_add_device(spi)) { 1552 adev->power.flags.ignore_parent = false; 1553 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1554 dev_name(&adev->dev)); 1555 spi_dev_put(spi); 1556 } 1557 1558 return AE_OK; 1559 } 1560 1561 static void acpi_register_spi_devices(struct spi_master *master) 1562 { 1563 acpi_status status; 1564 acpi_handle handle; 1565 1566 handle = ACPI_HANDLE(master->dev.parent); 1567 if (!handle) 1568 return; 1569 1570 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1571 acpi_spi_add_device, NULL, 1572 master, NULL); 1573 if (ACPI_FAILURE(status)) 1574 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1575 } 1576 #else 1577 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1578 #endif /* CONFIG_ACPI */ 1579 1580 static void spi_master_release(struct device *dev) 1581 { 1582 struct spi_master *master; 1583 1584 master = container_of(dev, struct spi_master, dev); 1585 kfree(master); 1586 } 1587 1588 static struct class spi_master_class = { 1589 .name = "spi_master", 1590 .owner = THIS_MODULE, 1591 .dev_release = spi_master_release, 1592 .dev_groups = spi_master_groups, 1593 }; 1594 1595 1596 /** 1597 * spi_alloc_master - allocate SPI master controller 1598 * @dev: the controller, possibly using the platform_bus 1599 * @size: how much zeroed driver-private data to allocate; the pointer to this 1600 * memory is in the driver_data field of the returned device, 1601 * accessible with spi_master_get_devdata(). 1602 * Context: can sleep 1603 * 1604 * This call is used only by SPI master controller drivers, which are the 1605 * only ones directly touching chip registers. It's how they allocate 1606 * an spi_master structure, prior to calling spi_register_master(). 1607 * 1608 * This must be called from context that can sleep. It returns the SPI 1609 * master structure on success, else NULL. 1610 * 1611 * The caller is responsible for assigning the bus number and initializing 1612 * the master's methods before calling spi_register_master(); and (after errors 1613 * adding the device) calling spi_master_put() to prevent a memory leak. 1614 */ 1615 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1616 { 1617 struct spi_master *master; 1618 1619 if (!dev) 1620 return NULL; 1621 1622 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1623 if (!master) 1624 return NULL; 1625 1626 device_initialize(&master->dev); 1627 master->bus_num = -1; 1628 master->num_chipselect = 1; 1629 master->dev.class = &spi_master_class; 1630 master->dev.parent = get_device(dev); 1631 spi_master_set_devdata(master, &master[1]); 1632 1633 return master; 1634 } 1635 EXPORT_SYMBOL_GPL(spi_alloc_master); 1636 1637 #ifdef CONFIG_OF 1638 static int of_spi_register_master(struct spi_master *master) 1639 { 1640 int nb, i, *cs; 1641 struct device_node *np = master->dev.of_node; 1642 1643 if (!np) 1644 return 0; 1645 1646 nb = of_gpio_named_count(np, "cs-gpios"); 1647 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1648 1649 /* Return error only for an incorrectly formed cs-gpios property */ 1650 if (nb == 0 || nb == -ENOENT) 1651 return 0; 1652 else if (nb < 0) 1653 return nb; 1654 1655 cs = devm_kzalloc(&master->dev, 1656 sizeof(int) * master->num_chipselect, 1657 GFP_KERNEL); 1658 master->cs_gpios = cs; 1659 1660 if (!master->cs_gpios) 1661 return -ENOMEM; 1662 1663 for (i = 0; i < master->num_chipselect; i++) 1664 cs[i] = -ENOENT; 1665 1666 for (i = 0; i < nb; i++) 1667 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1668 1669 return 0; 1670 } 1671 #else 1672 static int of_spi_register_master(struct spi_master *master) 1673 { 1674 return 0; 1675 } 1676 #endif 1677 1678 /** 1679 * spi_register_master - register SPI master controller 1680 * @master: initialized master, originally from spi_alloc_master() 1681 * Context: can sleep 1682 * 1683 * SPI master controllers connect to their drivers using some non-SPI bus, 1684 * such as the platform bus. The final stage of probe() in that code 1685 * includes calling spi_register_master() to hook up to this SPI bus glue. 1686 * 1687 * SPI controllers use board specific (often SOC specific) bus numbers, 1688 * and board-specific addressing for SPI devices combines those numbers 1689 * with chip select numbers. Since SPI does not directly support dynamic 1690 * device identification, boards need configuration tables telling which 1691 * chip is at which address. 1692 * 1693 * This must be called from context that can sleep. It returns zero on 1694 * success, else a negative error code (dropping the master's refcount). 1695 * After a successful return, the caller is responsible for calling 1696 * spi_unregister_master(). 1697 */ 1698 int spi_register_master(struct spi_master *master) 1699 { 1700 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1701 struct device *dev = master->dev.parent; 1702 struct boardinfo *bi; 1703 int status = -ENODEV; 1704 int dynamic = 0; 1705 1706 if (!dev) 1707 return -ENODEV; 1708 1709 status = of_spi_register_master(master); 1710 if (status) 1711 return status; 1712 1713 /* even if it's just one always-selected device, there must 1714 * be at least one chipselect 1715 */ 1716 if (master->num_chipselect == 0) 1717 return -EINVAL; 1718 1719 if ((master->bus_num < 0) && master->dev.of_node) 1720 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1721 1722 /* convention: dynamically assigned bus IDs count down from the max */ 1723 if (master->bus_num < 0) { 1724 /* FIXME switch to an IDR based scheme, something like 1725 * I2C now uses, so we can't run out of "dynamic" IDs 1726 */ 1727 master->bus_num = atomic_dec_return(&dyn_bus_id); 1728 dynamic = 1; 1729 } 1730 1731 INIT_LIST_HEAD(&master->queue); 1732 spin_lock_init(&master->queue_lock); 1733 spin_lock_init(&master->bus_lock_spinlock); 1734 mutex_init(&master->bus_lock_mutex); 1735 master->bus_lock_flag = 0; 1736 init_completion(&master->xfer_completion); 1737 if (!master->max_dma_len) 1738 master->max_dma_len = INT_MAX; 1739 1740 /* register the device, then userspace will see it. 1741 * registration fails if the bus ID is in use. 1742 */ 1743 dev_set_name(&master->dev, "spi%u", master->bus_num); 1744 status = device_add(&master->dev); 1745 if (status < 0) 1746 goto done; 1747 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1748 dynamic ? " (dynamic)" : ""); 1749 1750 /* If we're using a queued driver, start the queue */ 1751 if (master->transfer) 1752 dev_info(dev, "master is unqueued, this is deprecated\n"); 1753 else { 1754 status = spi_master_initialize_queue(master); 1755 if (status) { 1756 device_del(&master->dev); 1757 goto done; 1758 } 1759 } 1760 /* add statistics */ 1761 spin_lock_init(&master->statistics.lock); 1762 1763 mutex_lock(&board_lock); 1764 list_add_tail(&master->list, &spi_master_list); 1765 list_for_each_entry(bi, &board_list, list) 1766 spi_match_master_to_boardinfo(master, &bi->board_info); 1767 mutex_unlock(&board_lock); 1768 1769 /* Register devices from the device tree and ACPI */ 1770 of_register_spi_devices(master); 1771 acpi_register_spi_devices(master); 1772 done: 1773 return status; 1774 } 1775 EXPORT_SYMBOL_GPL(spi_register_master); 1776 1777 static void devm_spi_unregister(struct device *dev, void *res) 1778 { 1779 spi_unregister_master(*(struct spi_master **)res); 1780 } 1781 1782 /** 1783 * dev_spi_register_master - register managed SPI master controller 1784 * @dev: device managing SPI master 1785 * @master: initialized master, originally from spi_alloc_master() 1786 * Context: can sleep 1787 * 1788 * Register a SPI device as with spi_register_master() which will 1789 * automatically be unregister 1790 */ 1791 int devm_spi_register_master(struct device *dev, struct spi_master *master) 1792 { 1793 struct spi_master **ptr; 1794 int ret; 1795 1796 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1797 if (!ptr) 1798 return -ENOMEM; 1799 1800 ret = spi_register_master(master); 1801 if (!ret) { 1802 *ptr = master; 1803 devres_add(dev, ptr); 1804 } else { 1805 devres_free(ptr); 1806 } 1807 1808 return ret; 1809 } 1810 EXPORT_SYMBOL_GPL(devm_spi_register_master); 1811 1812 static int __unregister(struct device *dev, void *null) 1813 { 1814 spi_unregister_device(to_spi_device(dev)); 1815 return 0; 1816 } 1817 1818 /** 1819 * spi_unregister_master - unregister SPI master controller 1820 * @master: the master being unregistered 1821 * Context: can sleep 1822 * 1823 * This call is used only by SPI master controller drivers, which are the 1824 * only ones directly touching chip registers. 1825 * 1826 * This must be called from context that can sleep. 1827 */ 1828 void spi_unregister_master(struct spi_master *master) 1829 { 1830 int dummy; 1831 1832 if (master->queued) { 1833 if (spi_destroy_queue(master)) 1834 dev_err(&master->dev, "queue remove failed\n"); 1835 } 1836 1837 mutex_lock(&board_lock); 1838 list_del(&master->list); 1839 mutex_unlock(&board_lock); 1840 1841 dummy = device_for_each_child(&master->dev, NULL, __unregister); 1842 device_unregister(&master->dev); 1843 } 1844 EXPORT_SYMBOL_GPL(spi_unregister_master); 1845 1846 int spi_master_suspend(struct spi_master *master) 1847 { 1848 int ret; 1849 1850 /* Basically no-ops for non-queued masters */ 1851 if (!master->queued) 1852 return 0; 1853 1854 ret = spi_stop_queue(master); 1855 if (ret) 1856 dev_err(&master->dev, "queue stop failed\n"); 1857 1858 return ret; 1859 } 1860 EXPORT_SYMBOL_GPL(spi_master_suspend); 1861 1862 int spi_master_resume(struct spi_master *master) 1863 { 1864 int ret; 1865 1866 if (!master->queued) 1867 return 0; 1868 1869 ret = spi_start_queue(master); 1870 if (ret) 1871 dev_err(&master->dev, "queue restart failed\n"); 1872 1873 return ret; 1874 } 1875 EXPORT_SYMBOL_GPL(spi_master_resume); 1876 1877 static int __spi_master_match(struct device *dev, const void *data) 1878 { 1879 struct spi_master *m; 1880 const u16 *bus_num = data; 1881 1882 m = container_of(dev, struct spi_master, dev); 1883 return m->bus_num == *bus_num; 1884 } 1885 1886 /** 1887 * spi_busnum_to_master - look up master associated with bus_num 1888 * @bus_num: the master's bus number 1889 * Context: can sleep 1890 * 1891 * This call may be used with devices that are registered after 1892 * arch init time. It returns a refcounted pointer to the relevant 1893 * spi_master (which the caller must release), or NULL if there is 1894 * no such master registered. 1895 */ 1896 struct spi_master *spi_busnum_to_master(u16 bus_num) 1897 { 1898 struct device *dev; 1899 struct spi_master *master = NULL; 1900 1901 dev = class_find_device(&spi_master_class, NULL, &bus_num, 1902 __spi_master_match); 1903 if (dev) 1904 master = container_of(dev, struct spi_master, dev); 1905 /* reference got in class_find_device */ 1906 return master; 1907 } 1908 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 1909 1910 1911 /*-------------------------------------------------------------------------*/ 1912 1913 /* Core methods for SPI master protocol drivers. Some of the 1914 * other core methods are currently defined as inline functions. 1915 */ 1916 1917 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 1918 { 1919 if (master->bits_per_word_mask) { 1920 /* Only 32 bits fit in the mask */ 1921 if (bits_per_word > 32) 1922 return -EINVAL; 1923 if (!(master->bits_per_word_mask & 1924 SPI_BPW_MASK(bits_per_word))) 1925 return -EINVAL; 1926 } 1927 1928 return 0; 1929 } 1930 1931 /** 1932 * spi_setup - setup SPI mode and clock rate 1933 * @spi: the device whose settings are being modified 1934 * Context: can sleep, and no requests are queued to the device 1935 * 1936 * SPI protocol drivers may need to update the transfer mode if the 1937 * device doesn't work with its default. They may likewise need 1938 * to update clock rates or word sizes from initial values. This function 1939 * changes those settings, and must be called from a context that can sleep. 1940 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 1941 * effect the next time the device is selected and data is transferred to 1942 * or from it. When this function returns, the spi device is deselected. 1943 * 1944 * Note that this call will fail if the protocol driver specifies an option 1945 * that the underlying controller or its driver does not support. For 1946 * example, not all hardware supports wire transfers using nine bit words, 1947 * LSB-first wire encoding, or active-high chipselects. 1948 */ 1949 int spi_setup(struct spi_device *spi) 1950 { 1951 unsigned bad_bits, ugly_bits; 1952 int status = 0; 1953 1954 /* check mode to prevent that DUAL and QUAD set at the same time 1955 */ 1956 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 1957 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 1958 dev_err(&spi->dev, 1959 "setup: can not select dual and quad at the same time\n"); 1960 return -EINVAL; 1961 } 1962 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 1963 */ 1964 if ((spi->mode & SPI_3WIRE) && (spi->mode & 1965 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 1966 return -EINVAL; 1967 /* help drivers fail *cleanly* when they need options 1968 * that aren't supported with their current master 1969 */ 1970 bad_bits = spi->mode & ~spi->master->mode_bits; 1971 ugly_bits = bad_bits & 1972 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 1973 if (ugly_bits) { 1974 dev_warn(&spi->dev, 1975 "setup: ignoring unsupported mode bits %x\n", 1976 ugly_bits); 1977 spi->mode &= ~ugly_bits; 1978 bad_bits &= ~ugly_bits; 1979 } 1980 if (bad_bits) { 1981 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1982 bad_bits); 1983 return -EINVAL; 1984 } 1985 1986 if (!spi->bits_per_word) 1987 spi->bits_per_word = 8; 1988 1989 if (__spi_validate_bits_per_word(spi->master, spi->bits_per_word)) 1990 return -EINVAL; 1991 1992 if (!spi->max_speed_hz) 1993 spi->max_speed_hz = spi->master->max_speed_hz; 1994 1995 spi_set_cs(spi, false); 1996 1997 if (spi->master->setup) 1998 status = spi->master->setup(spi); 1999 2000 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 2001 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 2002 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 2003 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 2004 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 2005 (spi->mode & SPI_LOOP) ? "loopback, " : "", 2006 spi->bits_per_word, spi->max_speed_hz, 2007 status); 2008 2009 return status; 2010 } 2011 EXPORT_SYMBOL_GPL(spi_setup); 2012 2013 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2014 { 2015 struct spi_master *master = spi->master; 2016 struct spi_transfer *xfer; 2017 int w_size; 2018 2019 if (list_empty(&message->transfers)) 2020 return -EINVAL; 2021 2022 /* Half-duplex links include original MicroWire, and ones with 2023 * only one data pin like SPI_3WIRE (switches direction) or where 2024 * either MOSI or MISO is missing. They can also be caused by 2025 * software limitations. 2026 */ 2027 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2028 || (spi->mode & SPI_3WIRE)) { 2029 unsigned flags = master->flags; 2030 2031 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2032 if (xfer->rx_buf && xfer->tx_buf) 2033 return -EINVAL; 2034 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2035 return -EINVAL; 2036 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2037 return -EINVAL; 2038 } 2039 } 2040 2041 /** 2042 * Set transfer bits_per_word and max speed as spi device default if 2043 * it is not set for this transfer. 2044 * Set transfer tx_nbits and rx_nbits as single transfer default 2045 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2046 */ 2047 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2048 message->frame_length += xfer->len; 2049 if (!xfer->bits_per_word) 2050 xfer->bits_per_word = spi->bits_per_word; 2051 2052 if (!xfer->speed_hz) 2053 xfer->speed_hz = spi->max_speed_hz; 2054 if (!xfer->speed_hz) 2055 xfer->speed_hz = master->max_speed_hz; 2056 2057 if (master->max_speed_hz && 2058 xfer->speed_hz > master->max_speed_hz) 2059 xfer->speed_hz = master->max_speed_hz; 2060 2061 if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2062 return -EINVAL; 2063 2064 /* 2065 * SPI transfer length should be multiple of SPI word size 2066 * where SPI word size should be power-of-two multiple 2067 */ 2068 if (xfer->bits_per_word <= 8) 2069 w_size = 1; 2070 else if (xfer->bits_per_word <= 16) 2071 w_size = 2; 2072 else 2073 w_size = 4; 2074 2075 /* No partial transfers accepted */ 2076 if (xfer->len % w_size) 2077 return -EINVAL; 2078 2079 if (xfer->speed_hz && master->min_speed_hz && 2080 xfer->speed_hz < master->min_speed_hz) 2081 return -EINVAL; 2082 2083 if (xfer->tx_buf && !xfer->tx_nbits) 2084 xfer->tx_nbits = SPI_NBITS_SINGLE; 2085 if (xfer->rx_buf && !xfer->rx_nbits) 2086 xfer->rx_nbits = SPI_NBITS_SINGLE; 2087 /* check transfer tx/rx_nbits: 2088 * 1. check the value matches one of single, dual and quad 2089 * 2. check tx/rx_nbits match the mode in spi_device 2090 */ 2091 if (xfer->tx_buf) { 2092 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2093 xfer->tx_nbits != SPI_NBITS_DUAL && 2094 xfer->tx_nbits != SPI_NBITS_QUAD) 2095 return -EINVAL; 2096 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2097 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2098 return -EINVAL; 2099 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2100 !(spi->mode & SPI_TX_QUAD)) 2101 return -EINVAL; 2102 } 2103 /* check transfer rx_nbits */ 2104 if (xfer->rx_buf) { 2105 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2106 xfer->rx_nbits != SPI_NBITS_DUAL && 2107 xfer->rx_nbits != SPI_NBITS_QUAD) 2108 return -EINVAL; 2109 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2110 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2111 return -EINVAL; 2112 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2113 !(spi->mode & SPI_RX_QUAD)) 2114 return -EINVAL; 2115 } 2116 } 2117 2118 message->status = -EINPROGRESS; 2119 2120 return 0; 2121 } 2122 2123 static int __spi_async(struct spi_device *spi, struct spi_message *message) 2124 { 2125 struct spi_master *master = spi->master; 2126 2127 message->spi = spi; 2128 2129 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2130 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2131 2132 trace_spi_message_submit(message); 2133 2134 return master->transfer(spi, message); 2135 } 2136 2137 /** 2138 * spi_async - asynchronous SPI transfer 2139 * @spi: device with which data will be exchanged 2140 * @message: describes the data transfers, including completion callback 2141 * Context: any (irqs may be blocked, etc) 2142 * 2143 * This call may be used in_irq and other contexts which can't sleep, 2144 * as well as from task contexts which can sleep. 2145 * 2146 * The completion callback is invoked in a context which can't sleep. 2147 * Before that invocation, the value of message->status is undefined. 2148 * When the callback is issued, message->status holds either zero (to 2149 * indicate complete success) or a negative error code. After that 2150 * callback returns, the driver which issued the transfer request may 2151 * deallocate the associated memory; it's no longer in use by any SPI 2152 * core or controller driver code. 2153 * 2154 * Note that although all messages to a spi_device are handled in 2155 * FIFO order, messages may go to different devices in other orders. 2156 * Some device might be higher priority, or have various "hard" access 2157 * time requirements, for example. 2158 * 2159 * On detection of any fault during the transfer, processing of 2160 * the entire message is aborted, and the device is deselected. 2161 * Until returning from the associated message completion callback, 2162 * no other spi_message queued to that device will be processed. 2163 * (This rule applies equally to all the synchronous transfer calls, 2164 * which are wrappers around this core asynchronous primitive.) 2165 */ 2166 int spi_async(struct spi_device *spi, struct spi_message *message) 2167 { 2168 struct spi_master *master = spi->master; 2169 int ret; 2170 unsigned long flags; 2171 2172 ret = __spi_validate(spi, message); 2173 if (ret != 0) 2174 return ret; 2175 2176 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2177 2178 if (master->bus_lock_flag) 2179 ret = -EBUSY; 2180 else 2181 ret = __spi_async(spi, message); 2182 2183 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2184 2185 return ret; 2186 } 2187 EXPORT_SYMBOL_GPL(spi_async); 2188 2189 /** 2190 * spi_async_locked - version of spi_async with exclusive bus usage 2191 * @spi: device with which data will be exchanged 2192 * @message: describes the data transfers, including completion callback 2193 * Context: any (irqs may be blocked, etc) 2194 * 2195 * This call may be used in_irq and other contexts which can't sleep, 2196 * as well as from task contexts which can sleep. 2197 * 2198 * The completion callback is invoked in a context which can't sleep. 2199 * Before that invocation, the value of message->status is undefined. 2200 * When the callback is issued, message->status holds either zero (to 2201 * indicate complete success) or a negative error code. After that 2202 * callback returns, the driver which issued the transfer request may 2203 * deallocate the associated memory; it's no longer in use by any SPI 2204 * core or controller driver code. 2205 * 2206 * Note that although all messages to a spi_device are handled in 2207 * FIFO order, messages may go to different devices in other orders. 2208 * Some device might be higher priority, or have various "hard" access 2209 * time requirements, for example. 2210 * 2211 * On detection of any fault during the transfer, processing of 2212 * the entire message is aborted, and the device is deselected. 2213 * Until returning from the associated message completion callback, 2214 * no other spi_message queued to that device will be processed. 2215 * (This rule applies equally to all the synchronous transfer calls, 2216 * which are wrappers around this core asynchronous primitive.) 2217 */ 2218 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2219 { 2220 struct spi_master *master = spi->master; 2221 int ret; 2222 unsigned long flags; 2223 2224 ret = __spi_validate(spi, message); 2225 if (ret != 0) 2226 return ret; 2227 2228 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2229 2230 ret = __spi_async(spi, message); 2231 2232 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2233 2234 return ret; 2235 2236 } 2237 EXPORT_SYMBOL_GPL(spi_async_locked); 2238 2239 2240 /*-------------------------------------------------------------------------*/ 2241 2242 /* Utility methods for SPI master protocol drivers, layered on 2243 * top of the core. Some other utility methods are defined as 2244 * inline functions. 2245 */ 2246 2247 static void spi_complete(void *arg) 2248 { 2249 complete(arg); 2250 } 2251 2252 static int __spi_sync(struct spi_device *spi, struct spi_message *message, 2253 int bus_locked) 2254 { 2255 DECLARE_COMPLETION_ONSTACK(done); 2256 int status; 2257 struct spi_master *master = spi->master; 2258 unsigned long flags; 2259 2260 status = __spi_validate(spi, message); 2261 if (status != 0) 2262 return status; 2263 2264 message->complete = spi_complete; 2265 message->context = &done; 2266 message->spi = spi; 2267 2268 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2269 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2270 2271 if (!bus_locked) 2272 mutex_lock(&master->bus_lock_mutex); 2273 2274 /* If we're not using the legacy transfer method then we will 2275 * try to transfer in the calling context so special case. 2276 * This code would be less tricky if we could remove the 2277 * support for driver implemented message queues. 2278 */ 2279 if (master->transfer == spi_queued_transfer) { 2280 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2281 2282 trace_spi_message_submit(message); 2283 2284 status = __spi_queued_transfer(spi, message, false); 2285 2286 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2287 } else { 2288 status = spi_async_locked(spi, message); 2289 } 2290 2291 if (!bus_locked) 2292 mutex_unlock(&master->bus_lock_mutex); 2293 2294 if (status == 0) { 2295 /* Push out the messages in the calling context if we 2296 * can. 2297 */ 2298 if (master->transfer == spi_queued_transfer) { 2299 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2300 spi_sync_immediate); 2301 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2302 spi_sync_immediate); 2303 __spi_pump_messages(master, false); 2304 } 2305 2306 wait_for_completion(&done); 2307 status = message->status; 2308 } 2309 message->context = NULL; 2310 return status; 2311 } 2312 2313 /** 2314 * spi_sync - blocking/synchronous SPI data transfers 2315 * @spi: device with which data will be exchanged 2316 * @message: describes the data transfers 2317 * Context: can sleep 2318 * 2319 * This call may only be used from a context that may sleep. The sleep 2320 * is non-interruptible, and has no timeout. Low-overhead controller 2321 * drivers may DMA directly into and out of the message buffers. 2322 * 2323 * Note that the SPI device's chip select is active during the message, 2324 * and then is normally disabled between messages. Drivers for some 2325 * frequently-used devices may want to minimize costs of selecting a chip, 2326 * by leaving it selected in anticipation that the next message will go 2327 * to the same chip. (That may increase power usage.) 2328 * 2329 * Also, the caller is guaranteeing that the memory associated with the 2330 * message will not be freed before this call returns. 2331 * 2332 * It returns zero on success, else a negative error code. 2333 */ 2334 int spi_sync(struct spi_device *spi, struct spi_message *message) 2335 { 2336 return __spi_sync(spi, message, 0); 2337 } 2338 EXPORT_SYMBOL_GPL(spi_sync); 2339 2340 /** 2341 * spi_sync_locked - version of spi_sync with exclusive bus usage 2342 * @spi: device with which data will be exchanged 2343 * @message: describes the data transfers 2344 * Context: can sleep 2345 * 2346 * This call may only be used from a context that may sleep. The sleep 2347 * is non-interruptible, and has no timeout. Low-overhead controller 2348 * drivers may DMA directly into and out of the message buffers. 2349 * 2350 * This call should be used by drivers that require exclusive access to the 2351 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2352 * be released by a spi_bus_unlock call when the exclusive access is over. 2353 * 2354 * It returns zero on success, else a negative error code. 2355 */ 2356 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2357 { 2358 return __spi_sync(spi, message, 1); 2359 } 2360 EXPORT_SYMBOL_GPL(spi_sync_locked); 2361 2362 /** 2363 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2364 * @master: SPI bus master that should be locked for exclusive bus access 2365 * Context: can sleep 2366 * 2367 * This call may only be used from a context that may sleep. The sleep 2368 * is non-interruptible, and has no timeout. 2369 * 2370 * This call should be used by drivers that require exclusive access to the 2371 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2372 * exclusive access is over. Data transfer must be done by spi_sync_locked 2373 * and spi_async_locked calls when the SPI bus lock is held. 2374 * 2375 * It returns zero on success, else a negative error code. 2376 */ 2377 int spi_bus_lock(struct spi_master *master) 2378 { 2379 unsigned long flags; 2380 2381 mutex_lock(&master->bus_lock_mutex); 2382 2383 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2384 master->bus_lock_flag = 1; 2385 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2386 2387 /* mutex remains locked until spi_bus_unlock is called */ 2388 2389 return 0; 2390 } 2391 EXPORT_SYMBOL_GPL(spi_bus_lock); 2392 2393 /** 2394 * spi_bus_unlock - release the lock for exclusive SPI bus usage 2395 * @master: SPI bus master that was locked for exclusive bus access 2396 * Context: can sleep 2397 * 2398 * This call may only be used from a context that may sleep. The sleep 2399 * is non-interruptible, and has no timeout. 2400 * 2401 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2402 * call. 2403 * 2404 * It returns zero on success, else a negative error code. 2405 */ 2406 int spi_bus_unlock(struct spi_master *master) 2407 { 2408 master->bus_lock_flag = 0; 2409 2410 mutex_unlock(&master->bus_lock_mutex); 2411 2412 return 0; 2413 } 2414 EXPORT_SYMBOL_GPL(spi_bus_unlock); 2415 2416 /* portable code must never pass more than 32 bytes */ 2417 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 2418 2419 static u8 *buf; 2420 2421 /** 2422 * spi_write_then_read - SPI synchronous write followed by read 2423 * @spi: device with which data will be exchanged 2424 * @txbuf: data to be written (need not be dma-safe) 2425 * @n_tx: size of txbuf, in bytes 2426 * @rxbuf: buffer into which data will be read (need not be dma-safe) 2427 * @n_rx: size of rxbuf, in bytes 2428 * Context: can sleep 2429 * 2430 * This performs a half duplex MicroWire style transaction with the 2431 * device, sending txbuf and then reading rxbuf. The return value 2432 * is zero for success, else a negative errno status code. 2433 * This call may only be used from a context that may sleep. 2434 * 2435 * Parameters to this routine are always copied using a small buffer; 2436 * portable code should never use this for more than 32 bytes. 2437 * Performance-sensitive or bulk transfer code should instead use 2438 * spi_{async,sync}() calls with dma-safe buffers. 2439 */ 2440 int spi_write_then_read(struct spi_device *spi, 2441 const void *txbuf, unsigned n_tx, 2442 void *rxbuf, unsigned n_rx) 2443 { 2444 static DEFINE_MUTEX(lock); 2445 2446 int status; 2447 struct spi_message message; 2448 struct spi_transfer x[2]; 2449 u8 *local_buf; 2450 2451 /* Use preallocated DMA-safe buffer if we can. We can't avoid 2452 * copying here, (as a pure convenience thing), but we can 2453 * keep heap costs out of the hot path unless someone else is 2454 * using the pre-allocated buffer or the transfer is too large. 2455 */ 2456 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 2457 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 2458 GFP_KERNEL | GFP_DMA); 2459 if (!local_buf) 2460 return -ENOMEM; 2461 } else { 2462 local_buf = buf; 2463 } 2464 2465 spi_message_init(&message); 2466 memset(x, 0, sizeof(x)); 2467 if (n_tx) { 2468 x[0].len = n_tx; 2469 spi_message_add_tail(&x[0], &message); 2470 } 2471 if (n_rx) { 2472 x[1].len = n_rx; 2473 spi_message_add_tail(&x[1], &message); 2474 } 2475 2476 memcpy(local_buf, txbuf, n_tx); 2477 x[0].tx_buf = local_buf; 2478 x[1].rx_buf = local_buf + n_tx; 2479 2480 /* do the i/o */ 2481 status = spi_sync(spi, &message); 2482 if (status == 0) 2483 memcpy(rxbuf, x[1].rx_buf, n_rx); 2484 2485 if (x[0].tx_buf == buf) 2486 mutex_unlock(&lock); 2487 else 2488 kfree(local_buf); 2489 2490 return status; 2491 } 2492 EXPORT_SYMBOL_GPL(spi_write_then_read); 2493 2494 /*-------------------------------------------------------------------------*/ 2495 2496 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 2497 static int __spi_of_device_match(struct device *dev, void *data) 2498 { 2499 return dev->of_node == data; 2500 } 2501 2502 /* must call put_device() when done with returned spi_device device */ 2503 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 2504 { 2505 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 2506 __spi_of_device_match); 2507 return dev ? to_spi_device(dev) : NULL; 2508 } 2509 2510 static int __spi_of_master_match(struct device *dev, const void *data) 2511 { 2512 return dev->of_node == data; 2513 } 2514 2515 /* the spi masters are not using spi_bus, so we find it with another way */ 2516 static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 2517 { 2518 struct device *dev; 2519 2520 dev = class_find_device(&spi_master_class, NULL, node, 2521 __spi_of_master_match); 2522 if (!dev) 2523 return NULL; 2524 2525 /* reference got in class_find_device */ 2526 return container_of(dev, struct spi_master, dev); 2527 } 2528 2529 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 2530 void *arg) 2531 { 2532 struct of_reconfig_data *rd = arg; 2533 struct spi_master *master; 2534 struct spi_device *spi; 2535 2536 switch (of_reconfig_get_state_change(action, arg)) { 2537 case OF_RECONFIG_CHANGE_ADD: 2538 master = of_find_spi_master_by_node(rd->dn->parent); 2539 if (master == NULL) 2540 return NOTIFY_OK; /* not for us */ 2541 2542 spi = of_register_spi_device(master, rd->dn); 2543 put_device(&master->dev); 2544 2545 if (IS_ERR(spi)) { 2546 pr_err("%s: failed to create for '%s'\n", 2547 __func__, rd->dn->full_name); 2548 return notifier_from_errno(PTR_ERR(spi)); 2549 } 2550 break; 2551 2552 case OF_RECONFIG_CHANGE_REMOVE: 2553 /* find our device by node */ 2554 spi = of_find_spi_device_by_node(rd->dn); 2555 if (spi == NULL) 2556 return NOTIFY_OK; /* no? not meant for us */ 2557 2558 /* unregister takes one ref away */ 2559 spi_unregister_device(spi); 2560 2561 /* and put the reference of the find */ 2562 put_device(&spi->dev); 2563 break; 2564 } 2565 2566 return NOTIFY_OK; 2567 } 2568 2569 static struct notifier_block spi_of_notifier = { 2570 .notifier_call = of_spi_notify, 2571 }; 2572 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2573 extern struct notifier_block spi_of_notifier; 2574 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 2575 2576 static int __init spi_init(void) 2577 { 2578 int status; 2579 2580 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 2581 if (!buf) { 2582 status = -ENOMEM; 2583 goto err0; 2584 } 2585 2586 status = bus_register(&spi_bus_type); 2587 if (status < 0) 2588 goto err1; 2589 2590 status = class_register(&spi_master_class); 2591 if (status < 0) 2592 goto err2; 2593 2594 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 2595 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 2596 2597 return 0; 2598 2599 err2: 2600 bus_unregister(&spi_bus_type); 2601 err1: 2602 kfree(buf); 2603 buf = NULL; 2604 err0: 2605 return status; 2606 } 2607 2608 /* board_info is normally registered in arch_initcall(), 2609 * but even essential drivers wait till later 2610 * 2611 * REVISIT only boardinfo really needs static linking. the rest (device and 2612 * driver registration) _could_ be dynamically linked (modular) ... costs 2613 * include needing to have boardinfo data structures be much more public. 2614 */ 2615 postcore_initcall(spi_init); 2616 2617