1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/device.h> 23 #include <linux/init.h> 24 #include <linux/cache.h> 25 #include <linux/mutex.h> 26 #include <linux/of_device.h> 27 #include <linux/slab.h> 28 #include <linux/mod_devicetable.h> 29 #include <linux/spi/spi.h> 30 #include <linux/of_spi.h> 31 #include <linux/pm_runtime.h> 32 33 static void spidev_release(struct device *dev) 34 { 35 struct spi_device *spi = to_spi_device(dev); 36 37 /* spi masters may cleanup for released devices */ 38 if (spi->master->cleanup) 39 spi->master->cleanup(spi); 40 41 spi_master_put(spi->master); 42 kfree(spi); 43 } 44 45 static ssize_t 46 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 47 { 48 const struct spi_device *spi = to_spi_device(dev); 49 50 return sprintf(buf, "%s\n", spi->modalias); 51 } 52 53 static struct device_attribute spi_dev_attrs[] = { 54 __ATTR_RO(modalias), 55 __ATTR_NULL, 56 }; 57 58 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 59 * and the sysfs version makes coldplug work too. 60 */ 61 62 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 63 const struct spi_device *sdev) 64 { 65 while (id->name[0]) { 66 if (!strcmp(sdev->modalias, id->name)) 67 return id; 68 id++; 69 } 70 return NULL; 71 } 72 73 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 74 { 75 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 76 77 return spi_match_id(sdrv->id_table, sdev); 78 } 79 EXPORT_SYMBOL_GPL(spi_get_device_id); 80 81 static int spi_match_device(struct device *dev, struct device_driver *drv) 82 { 83 const struct spi_device *spi = to_spi_device(dev); 84 const struct spi_driver *sdrv = to_spi_driver(drv); 85 86 /* Attempt an OF style match */ 87 if (of_driver_match_device(dev, drv)) 88 return 1; 89 90 if (sdrv->id_table) 91 return !!spi_match_id(sdrv->id_table, spi); 92 93 return strcmp(spi->modalias, drv->name) == 0; 94 } 95 96 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 97 { 98 const struct spi_device *spi = to_spi_device(dev); 99 100 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 101 return 0; 102 } 103 104 #ifdef CONFIG_PM_SLEEP 105 static int spi_legacy_suspend(struct device *dev, pm_message_t message) 106 { 107 int value = 0; 108 struct spi_driver *drv = to_spi_driver(dev->driver); 109 110 /* suspend will stop irqs and dma; no more i/o */ 111 if (drv) { 112 if (drv->suspend) 113 value = drv->suspend(to_spi_device(dev), message); 114 else 115 dev_dbg(dev, "... can't suspend\n"); 116 } 117 return value; 118 } 119 120 static int spi_legacy_resume(struct device *dev) 121 { 122 int value = 0; 123 struct spi_driver *drv = to_spi_driver(dev->driver); 124 125 /* resume may restart the i/o queue */ 126 if (drv) { 127 if (drv->resume) 128 value = drv->resume(to_spi_device(dev)); 129 else 130 dev_dbg(dev, "... can't resume\n"); 131 } 132 return value; 133 } 134 135 static int spi_pm_suspend(struct device *dev) 136 { 137 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 138 139 if (pm) 140 return pm_generic_suspend(dev); 141 else 142 return spi_legacy_suspend(dev, PMSG_SUSPEND); 143 } 144 145 static int spi_pm_resume(struct device *dev) 146 { 147 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 148 149 if (pm) 150 return pm_generic_resume(dev); 151 else 152 return spi_legacy_resume(dev); 153 } 154 155 static int spi_pm_freeze(struct device *dev) 156 { 157 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 158 159 if (pm) 160 return pm_generic_freeze(dev); 161 else 162 return spi_legacy_suspend(dev, PMSG_FREEZE); 163 } 164 165 static int spi_pm_thaw(struct device *dev) 166 { 167 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 168 169 if (pm) 170 return pm_generic_thaw(dev); 171 else 172 return spi_legacy_resume(dev); 173 } 174 175 static int spi_pm_poweroff(struct device *dev) 176 { 177 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 178 179 if (pm) 180 return pm_generic_poweroff(dev); 181 else 182 return spi_legacy_suspend(dev, PMSG_HIBERNATE); 183 } 184 185 static int spi_pm_restore(struct device *dev) 186 { 187 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 188 189 if (pm) 190 return pm_generic_restore(dev); 191 else 192 return spi_legacy_resume(dev); 193 } 194 #else 195 #define spi_pm_suspend NULL 196 #define spi_pm_resume NULL 197 #define spi_pm_freeze NULL 198 #define spi_pm_thaw NULL 199 #define spi_pm_poweroff NULL 200 #define spi_pm_restore NULL 201 #endif 202 203 static const struct dev_pm_ops spi_pm = { 204 .suspend = spi_pm_suspend, 205 .resume = spi_pm_resume, 206 .freeze = spi_pm_freeze, 207 .thaw = spi_pm_thaw, 208 .poweroff = spi_pm_poweroff, 209 .restore = spi_pm_restore, 210 SET_RUNTIME_PM_OPS( 211 pm_generic_runtime_suspend, 212 pm_generic_runtime_resume, 213 pm_generic_runtime_idle 214 ) 215 }; 216 217 struct bus_type spi_bus_type = { 218 .name = "spi", 219 .dev_attrs = spi_dev_attrs, 220 .match = spi_match_device, 221 .uevent = spi_uevent, 222 .pm = &spi_pm, 223 }; 224 EXPORT_SYMBOL_GPL(spi_bus_type); 225 226 227 static int spi_drv_probe(struct device *dev) 228 { 229 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 230 231 return sdrv->probe(to_spi_device(dev)); 232 } 233 234 static int spi_drv_remove(struct device *dev) 235 { 236 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 237 238 return sdrv->remove(to_spi_device(dev)); 239 } 240 241 static void spi_drv_shutdown(struct device *dev) 242 { 243 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 244 245 sdrv->shutdown(to_spi_device(dev)); 246 } 247 248 /** 249 * spi_register_driver - register a SPI driver 250 * @sdrv: the driver to register 251 * Context: can sleep 252 */ 253 int spi_register_driver(struct spi_driver *sdrv) 254 { 255 sdrv->driver.bus = &spi_bus_type; 256 if (sdrv->probe) 257 sdrv->driver.probe = spi_drv_probe; 258 if (sdrv->remove) 259 sdrv->driver.remove = spi_drv_remove; 260 if (sdrv->shutdown) 261 sdrv->driver.shutdown = spi_drv_shutdown; 262 return driver_register(&sdrv->driver); 263 } 264 EXPORT_SYMBOL_GPL(spi_register_driver); 265 266 /*-------------------------------------------------------------------------*/ 267 268 /* SPI devices should normally not be created by SPI device drivers; that 269 * would make them board-specific. Similarly with SPI master drivers. 270 * Device registration normally goes into like arch/.../mach.../board-YYY.c 271 * with other readonly (flashable) information about mainboard devices. 272 */ 273 274 struct boardinfo { 275 struct list_head list; 276 struct spi_board_info board_info; 277 }; 278 279 static LIST_HEAD(board_list); 280 static LIST_HEAD(spi_master_list); 281 282 /* 283 * Used to protect add/del opertion for board_info list and 284 * spi_master list, and their matching process 285 */ 286 static DEFINE_MUTEX(board_lock); 287 288 /** 289 * spi_alloc_device - Allocate a new SPI device 290 * @master: Controller to which device is connected 291 * Context: can sleep 292 * 293 * Allows a driver to allocate and initialize a spi_device without 294 * registering it immediately. This allows a driver to directly 295 * fill the spi_device with device parameters before calling 296 * spi_add_device() on it. 297 * 298 * Caller is responsible to call spi_add_device() on the returned 299 * spi_device structure to add it to the SPI master. If the caller 300 * needs to discard the spi_device without adding it, then it should 301 * call spi_dev_put() on it. 302 * 303 * Returns a pointer to the new device, or NULL. 304 */ 305 struct spi_device *spi_alloc_device(struct spi_master *master) 306 { 307 struct spi_device *spi; 308 struct device *dev = master->dev.parent; 309 310 if (!spi_master_get(master)) 311 return NULL; 312 313 spi = kzalloc(sizeof *spi, GFP_KERNEL); 314 if (!spi) { 315 dev_err(dev, "cannot alloc spi_device\n"); 316 spi_master_put(master); 317 return NULL; 318 } 319 320 spi->master = master; 321 spi->dev.parent = dev; 322 spi->dev.bus = &spi_bus_type; 323 spi->dev.release = spidev_release; 324 device_initialize(&spi->dev); 325 return spi; 326 } 327 EXPORT_SYMBOL_GPL(spi_alloc_device); 328 329 /** 330 * spi_add_device - Add spi_device allocated with spi_alloc_device 331 * @spi: spi_device to register 332 * 333 * Companion function to spi_alloc_device. Devices allocated with 334 * spi_alloc_device can be added onto the spi bus with this function. 335 * 336 * Returns 0 on success; negative errno on failure 337 */ 338 int spi_add_device(struct spi_device *spi) 339 { 340 static DEFINE_MUTEX(spi_add_lock); 341 struct device *dev = spi->master->dev.parent; 342 struct device *d; 343 int status; 344 345 /* Chipselects are numbered 0..max; validate. */ 346 if (spi->chip_select >= spi->master->num_chipselect) { 347 dev_err(dev, "cs%d >= max %d\n", 348 spi->chip_select, 349 spi->master->num_chipselect); 350 return -EINVAL; 351 } 352 353 /* Set the bus ID string */ 354 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 355 spi->chip_select); 356 357 358 /* We need to make sure there's no other device with this 359 * chipselect **BEFORE** we call setup(), else we'll trash 360 * its configuration. Lock against concurrent add() calls. 361 */ 362 mutex_lock(&spi_add_lock); 363 364 d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); 365 if (d != NULL) { 366 dev_err(dev, "chipselect %d already in use\n", 367 spi->chip_select); 368 put_device(d); 369 status = -EBUSY; 370 goto done; 371 } 372 373 /* Drivers may modify this initial i/o setup, but will 374 * normally rely on the device being setup. Devices 375 * using SPI_CS_HIGH can't coexist well otherwise... 376 */ 377 status = spi_setup(spi); 378 if (status < 0) { 379 dev_err(dev, "can't setup %s, status %d\n", 380 dev_name(&spi->dev), status); 381 goto done; 382 } 383 384 /* Device may be bound to an active driver when this returns */ 385 status = device_add(&spi->dev); 386 if (status < 0) 387 dev_err(dev, "can't add %s, status %d\n", 388 dev_name(&spi->dev), status); 389 else 390 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 391 392 done: 393 mutex_unlock(&spi_add_lock); 394 return status; 395 } 396 EXPORT_SYMBOL_GPL(spi_add_device); 397 398 /** 399 * spi_new_device - instantiate one new SPI device 400 * @master: Controller to which device is connected 401 * @chip: Describes the SPI device 402 * Context: can sleep 403 * 404 * On typical mainboards, this is purely internal; and it's not needed 405 * after board init creates the hard-wired devices. Some development 406 * platforms may not be able to use spi_register_board_info though, and 407 * this is exported so that for example a USB or parport based adapter 408 * driver could add devices (which it would learn about out-of-band). 409 * 410 * Returns the new device, or NULL. 411 */ 412 struct spi_device *spi_new_device(struct spi_master *master, 413 struct spi_board_info *chip) 414 { 415 struct spi_device *proxy; 416 int status; 417 418 /* NOTE: caller did any chip->bus_num checks necessary. 419 * 420 * Also, unless we change the return value convention to use 421 * error-or-pointer (not NULL-or-pointer), troubleshootability 422 * suggests syslogged diagnostics are best here (ugh). 423 */ 424 425 proxy = spi_alloc_device(master); 426 if (!proxy) 427 return NULL; 428 429 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 430 431 proxy->chip_select = chip->chip_select; 432 proxy->max_speed_hz = chip->max_speed_hz; 433 proxy->mode = chip->mode; 434 proxy->irq = chip->irq; 435 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 436 proxy->dev.platform_data = (void *) chip->platform_data; 437 proxy->controller_data = chip->controller_data; 438 proxy->controller_state = NULL; 439 440 status = spi_add_device(proxy); 441 if (status < 0) { 442 spi_dev_put(proxy); 443 return NULL; 444 } 445 446 return proxy; 447 } 448 EXPORT_SYMBOL_GPL(spi_new_device); 449 450 static void spi_match_master_to_boardinfo(struct spi_master *master, 451 struct spi_board_info *bi) 452 { 453 struct spi_device *dev; 454 455 if (master->bus_num != bi->bus_num) 456 return; 457 458 dev = spi_new_device(master, bi); 459 if (!dev) 460 dev_err(master->dev.parent, "can't create new device for %s\n", 461 bi->modalias); 462 } 463 464 /** 465 * spi_register_board_info - register SPI devices for a given board 466 * @info: array of chip descriptors 467 * @n: how many descriptors are provided 468 * Context: can sleep 469 * 470 * Board-specific early init code calls this (probably during arch_initcall) 471 * with segments of the SPI device table. Any device nodes are created later, 472 * after the relevant parent SPI controller (bus_num) is defined. We keep 473 * this table of devices forever, so that reloading a controller driver will 474 * not make Linux forget about these hard-wired devices. 475 * 476 * Other code can also call this, e.g. a particular add-on board might provide 477 * SPI devices through its expansion connector, so code initializing that board 478 * would naturally declare its SPI devices. 479 * 480 * The board info passed can safely be __initdata ... but be careful of 481 * any embedded pointers (platform_data, etc), they're copied as-is. 482 */ 483 int __init 484 spi_register_board_info(struct spi_board_info const *info, unsigned n) 485 { 486 struct boardinfo *bi; 487 int i; 488 489 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 490 if (!bi) 491 return -ENOMEM; 492 493 for (i = 0; i < n; i++, bi++, info++) { 494 struct spi_master *master; 495 496 memcpy(&bi->board_info, info, sizeof(*info)); 497 mutex_lock(&board_lock); 498 list_add_tail(&bi->list, &board_list); 499 list_for_each_entry(master, &spi_master_list, list) 500 spi_match_master_to_boardinfo(master, &bi->board_info); 501 mutex_unlock(&board_lock); 502 } 503 504 return 0; 505 } 506 507 /*-------------------------------------------------------------------------*/ 508 509 static void spi_master_release(struct device *dev) 510 { 511 struct spi_master *master; 512 513 master = container_of(dev, struct spi_master, dev); 514 kfree(master); 515 } 516 517 static struct class spi_master_class = { 518 .name = "spi_master", 519 .owner = THIS_MODULE, 520 .dev_release = spi_master_release, 521 }; 522 523 524 /** 525 * spi_alloc_master - allocate SPI master controller 526 * @dev: the controller, possibly using the platform_bus 527 * @size: how much zeroed driver-private data to allocate; the pointer to this 528 * memory is in the driver_data field of the returned device, 529 * accessible with spi_master_get_devdata(). 530 * Context: can sleep 531 * 532 * This call is used only by SPI master controller drivers, which are the 533 * only ones directly touching chip registers. It's how they allocate 534 * an spi_master structure, prior to calling spi_register_master(). 535 * 536 * This must be called from context that can sleep. It returns the SPI 537 * master structure on success, else NULL. 538 * 539 * The caller is responsible for assigning the bus number and initializing 540 * the master's methods before calling spi_register_master(); and (after errors 541 * adding the device) calling spi_master_put() to prevent a memory leak. 542 */ 543 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 544 { 545 struct spi_master *master; 546 547 if (!dev) 548 return NULL; 549 550 master = kzalloc(size + sizeof *master, GFP_KERNEL); 551 if (!master) 552 return NULL; 553 554 device_initialize(&master->dev); 555 master->dev.class = &spi_master_class; 556 master->dev.parent = get_device(dev); 557 spi_master_set_devdata(master, &master[1]); 558 559 return master; 560 } 561 EXPORT_SYMBOL_GPL(spi_alloc_master); 562 563 /** 564 * spi_register_master - register SPI master controller 565 * @master: initialized master, originally from spi_alloc_master() 566 * Context: can sleep 567 * 568 * SPI master controllers connect to their drivers using some non-SPI bus, 569 * such as the platform bus. The final stage of probe() in that code 570 * includes calling spi_register_master() to hook up to this SPI bus glue. 571 * 572 * SPI controllers use board specific (often SOC specific) bus numbers, 573 * and board-specific addressing for SPI devices combines those numbers 574 * with chip select numbers. Since SPI does not directly support dynamic 575 * device identification, boards need configuration tables telling which 576 * chip is at which address. 577 * 578 * This must be called from context that can sleep. It returns zero on 579 * success, else a negative error code (dropping the master's refcount). 580 * After a successful return, the caller is responsible for calling 581 * spi_unregister_master(). 582 */ 583 int spi_register_master(struct spi_master *master) 584 { 585 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 586 struct device *dev = master->dev.parent; 587 struct boardinfo *bi; 588 int status = -ENODEV; 589 int dynamic = 0; 590 591 if (!dev) 592 return -ENODEV; 593 594 /* even if it's just one always-selected device, there must 595 * be at least one chipselect 596 */ 597 if (master->num_chipselect == 0) 598 return -EINVAL; 599 600 /* convention: dynamically assigned bus IDs count down from the max */ 601 if (master->bus_num < 0) { 602 /* FIXME switch to an IDR based scheme, something like 603 * I2C now uses, so we can't run out of "dynamic" IDs 604 */ 605 master->bus_num = atomic_dec_return(&dyn_bus_id); 606 dynamic = 1; 607 } 608 609 spin_lock_init(&master->bus_lock_spinlock); 610 mutex_init(&master->bus_lock_mutex); 611 master->bus_lock_flag = 0; 612 613 /* register the device, then userspace will see it. 614 * registration fails if the bus ID is in use. 615 */ 616 dev_set_name(&master->dev, "spi%u", master->bus_num); 617 status = device_add(&master->dev); 618 if (status < 0) 619 goto done; 620 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 621 dynamic ? " (dynamic)" : ""); 622 623 mutex_lock(&board_lock); 624 list_add_tail(&master->list, &spi_master_list); 625 list_for_each_entry(bi, &board_list, list) 626 spi_match_master_to_boardinfo(master, &bi->board_info); 627 mutex_unlock(&board_lock); 628 629 status = 0; 630 631 /* Register devices from the device tree */ 632 of_register_spi_devices(master); 633 done: 634 return status; 635 } 636 EXPORT_SYMBOL_GPL(spi_register_master); 637 638 639 static int __unregister(struct device *dev, void *null) 640 { 641 spi_unregister_device(to_spi_device(dev)); 642 return 0; 643 } 644 645 /** 646 * spi_unregister_master - unregister SPI master controller 647 * @master: the master being unregistered 648 * Context: can sleep 649 * 650 * This call is used only by SPI master controller drivers, which are the 651 * only ones directly touching chip registers. 652 * 653 * This must be called from context that can sleep. 654 */ 655 void spi_unregister_master(struct spi_master *master) 656 { 657 int dummy; 658 659 mutex_lock(&board_lock); 660 list_del(&master->list); 661 mutex_unlock(&board_lock); 662 663 dummy = device_for_each_child(&master->dev, NULL, __unregister); 664 device_unregister(&master->dev); 665 } 666 EXPORT_SYMBOL_GPL(spi_unregister_master); 667 668 static int __spi_master_match(struct device *dev, void *data) 669 { 670 struct spi_master *m; 671 u16 *bus_num = data; 672 673 m = container_of(dev, struct spi_master, dev); 674 return m->bus_num == *bus_num; 675 } 676 677 /** 678 * spi_busnum_to_master - look up master associated with bus_num 679 * @bus_num: the master's bus number 680 * Context: can sleep 681 * 682 * This call may be used with devices that are registered after 683 * arch init time. It returns a refcounted pointer to the relevant 684 * spi_master (which the caller must release), or NULL if there is 685 * no such master registered. 686 */ 687 struct spi_master *spi_busnum_to_master(u16 bus_num) 688 { 689 struct device *dev; 690 struct spi_master *master = NULL; 691 692 dev = class_find_device(&spi_master_class, NULL, &bus_num, 693 __spi_master_match); 694 if (dev) 695 master = container_of(dev, struct spi_master, dev); 696 /* reference got in class_find_device */ 697 return master; 698 } 699 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 700 701 702 /*-------------------------------------------------------------------------*/ 703 704 /* Core methods for SPI master protocol drivers. Some of the 705 * other core methods are currently defined as inline functions. 706 */ 707 708 /** 709 * spi_setup - setup SPI mode and clock rate 710 * @spi: the device whose settings are being modified 711 * Context: can sleep, and no requests are queued to the device 712 * 713 * SPI protocol drivers may need to update the transfer mode if the 714 * device doesn't work with its default. They may likewise need 715 * to update clock rates or word sizes from initial values. This function 716 * changes those settings, and must be called from a context that can sleep. 717 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 718 * effect the next time the device is selected and data is transferred to 719 * or from it. When this function returns, the spi device is deselected. 720 * 721 * Note that this call will fail if the protocol driver specifies an option 722 * that the underlying controller or its driver does not support. For 723 * example, not all hardware supports wire transfers using nine bit words, 724 * LSB-first wire encoding, or active-high chipselects. 725 */ 726 int spi_setup(struct spi_device *spi) 727 { 728 unsigned bad_bits; 729 int status; 730 731 /* help drivers fail *cleanly* when they need options 732 * that aren't supported with their current master 733 */ 734 bad_bits = spi->mode & ~spi->master->mode_bits; 735 if (bad_bits) { 736 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 737 bad_bits); 738 return -EINVAL; 739 } 740 741 if (!spi->bits_per_word) 742 spi->bits_per_word = 8; 743 744 status = spi->master->setup(spi); 745 746 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" 747 "%u bits/w, %u Hz max --> %d\n", 748 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 749 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 750 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 751 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 752 (spi->mode & SPI_LOOP) ? "loopback, " : "", 753 spi->bits_per_word, spi->max_speed_hz, 754 status); 755 756 return status; 757 } 758 EXPORT_SYMBOL_GPL(spi_setup); 759 760 static int __spi_async(struct spi_device *spi, struct spi_message *message) 761 { 762 struct spi_master *master = spi->master; 763 764 /* Half-duplex links include original MicroWire, and ones with 765 * only one data pin like SPI_3WIRE (switches direction) or where 766 * either MOSI or MISO is missing. They can also be caused by 767 * software limitations. 768 */ 769 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 770 || (spi->mode & SPI_3WIRE)) { 771 struct spi_transfer *xfer; 772 unsigned flags = master->flags; 773 774 list_for_each_entry(xfer, &message->transfers, transfer_list) { 775 if (xfer->rx_buf && xfer->tx_buf) 776 return -EINVAL; 777 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 778 return -EINVAL; 779 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 780 return -EINVAL; 781 } 782 } 783 784 message->spi = spi; 785 message->status = -EINPROGRESS; 786 return master->transfer(spi, message); 787 } 788 789 /** 790 * spi_async - asynchronous SPI transfer 791 * @spi: device with which data will be exchanged 792 * @message: describes the data transfers, including completion callback 793 * Context: any (irqs may be blocked, etc) 794 * 795 * This call may be used in_irq and other contexts which can't sleep, 796 * as well as from task contexts which can sleep. 797 * 798 * The completion callback is invoked in a context which can't sleep. 799 * Before that invocation, the value of message->status is undefined. 800 * When the callback is issued, message->status holds either zero (to 801 * indicate complete success) or a negative error code. After that 802 * callback returns, the driver which issued the transfer request may 803 * deallocate the associated memory; it's no longer in use by any SPI 804 * core or controller driver code. 805 * 806 * Note that although all messages to a spi_device are handled in 807 * FIFO order, messages may go to different devices in other orders. 808 * Some device might be higher priority, or have various "hard" access 809 * time requirements, for example. 810 * 811 * On detection of any fault during the transfer, processing of 812 * the entire message is aborted, and the device is deselected. 813 * Until returning from the associated message completion callback, 814 * no other spi_message queued to that device will be processed. 815 * (This rule applies equally to all the synchronous transfer calls, 816 * which are wrappers around this core asynchronous primitive.) 817 */ 818 int spi_async(struct spi_device *spi, struct spi_message *message) 819 { 820 struct spi_master *master = spi->master; 821 int ret; 822 unsigned long flags; 823 824 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 825 826 if (master->bus_lock_flag) 827 ret = -EBUSY; 828 else 829 ret = __spi_async(spi, message); 830 831 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 832 833 return ret; 834 } 835 EXPORT_SYMBOL_GPL(spi_async); 836 837 /** 838 * spi_async_locked - version of spi_async with exclusive bus usage 839 * @spi: device with which data will be exchanged 840 * @message: describes the data transfers, including completion callback 841 * Context: any (irqs may be blocked, etc) 842 * 843 * This call may be used in_irq and other contexts which can't sleep, 844 * as well as from task contexts which can sleep. 845 * 846 * The completion callback is invoked in a context which can't sleep. 847 * Before that invocation, the value of message->status is undefined. 848 * When the callback is issued, message->status holds either zero (to 849 * indicate complete success) or a negative error code. After that 850 * callback returns, the driver which issued the transfer request may 851 * deallocate the associated memory; it's no longer in use by any SPI 852 * core or controller driver code. 853 * 854 * Note that although all messages to a spi_device are handled in 855 * FIFO order, messages may go to different devices in other orders. 856 * Some device might be higher priority, or have various "hard" access 857 * time requirements, for example. 858 * 859 * On detection of any fault during the transfer, processing of 860 * the entire message is aborted, and the device is deselected. 861 * Until returning from the associated message completion callback, 862 * no other spi_message queued to that device will be processed. 863 * (This rule applies equally to all the synchronous transfer calls, 864 * which are wrappers around this core asynchronous primitive.) 865 */ 866 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 867 { 868 struct spi_master *master = spi->master; 869 int ret; 870 unsigned long flags; 871 872 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 873 874 ret = __spi_async(spi, message); 875 876 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 877 878 return ret; 879 880 } 881 EXPORT_SYMBOL_GPL(spi_async_locked); 882 883 884 /*-------------------------------------------------------------------------*/ 885 886 /* Utility methods for SPI master protocol drivers, layered on 887 * top of the core. Some other utility methods are defined as 888 * inline functions. 889 */ 890 891 static void spi_complete(void *arg) 892 { 893 complete(arg); 894 } 895 896 static int __spi_sync(struct spi_device *spi, struct spi_message *message, 897 int bus_locked) 898 { 899 DECLARE_COMPLETION_ONSTACK(done); 900 int status; 901 struct spi_master *master = spi->master; 902 903 message->complete = spi_complete; 904 message->context = &done; 905 906 if (!bus_locked) 907 mutex_lock(&master->bus_lock_mutex); 908 909 status = spi_async_locked(spi, message); 910 911 if (!bus_locked) 912 mutex_unlock(&master->bus_lock_mutex); 913 914 if (status == 0) { 915 wait_for_completion(&done); 916 status = message->status; 917 } 918 message->context = NULL; 919 return status; 920 } 921 922 /** 923 * spi_sync - blocking/synchronous SPI data transfers 924 * @spi: device with which data will be exchanged 925 * @message: describes the data transfers 926 * Context: can sleep 927 * 928 * This call may only be used from a context that may sleep. The sleep 929 * is non-interruptible, and has no timeout. Low-overhead controller 930 * drivers may DMA directly into and out of the message buffers. 931 * 932 * Note that the SPI device's chip select is active during the message, 933 * and then is normally disabled between messages. Drivers for some 934 * frequently-used devices may want to minimize costs of selecting a chip, 935 * by leaving it selected in anticipation that the next message will go 936 * to the same chip. (That may increase power usage.) 937 * 938 * Also, the caller is guaranteeing that the memory associated with the 939 * message will not be freed before this call returns. 940 * 941 * It returns zero on success, else a negative error code. 942 */ 943 int spi_sync(struct spi_device *spi, struct spi_message *message) 944 { 945 return __spi_sync(spi, message, 0); 946 } 947 EXPORT_SYMBOL_GPL(spi_sync); 948 949 /** 950 * spi_sync_locked - version of spi_sync with exclusive bus usage 951 * @spi: device with which data will be exchanged 952 * @message: describes the data transfers 953 * Context: can sleep 954 * 955 * This call may only be used from a context that may sleep. The sleep 956 * is non-interruptible, and has no timeout. Low-overhead controller 957 * drivers may DMA directly into and out of the message buffers. 958 * 959 * This call should be used by drivers that require exclusive access to the 960 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 961 * be released by a spi_bus_unlock call when the exclusive access is over. 962 * 963 * It returns zero on success, else a negative error code. 964 */ 965 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 966 { 967 return __spi_sync(spi, message, 1); 968 } 969 EXPORT_SYMBOL_GPL(spi_sync_locked); 970 971 /** 972 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 973 * @master: SPI bus master that should be locked for exclusive bus access 974 * Context: can sleep 975 * 976 * This call may only be used from a context that may sleep. The sleep 977 * is non-interruptible, and has no timeout. 978 * 979 * This call should be used by drivers that require exclusive access to the 980 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 981 * exclusive access is over. Data transfer must be done by spi_sync_locked 982 * and spi_async_locked calls when the SPI bus lock is held. 983 * 984 * It returns zero on success, else a negative error code. 985 */ 986 int spi_bus_lock(struct spi_master *master) 987 { 988 unsigned long flags; 989 990 mutex_lock(&master->bus_lock_mutex); 991 992 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 993 master->bus_lock_flag = 1; 994 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 995 996 /* mutex remains locked until spi_bus_unlock is called */ 997 998 return 0; 999 } 1000 EXPORT_SYMBOL_GPL(spi_bus_lock); 1001 1002 /** 1003 * spi_bus_unlock - release the lock for exclusive SPI bus usage 1004 * @master: SPI bus master that was locked for exclusive bus access 1005 * Context: can sleep 1006 * 1007 * This call may only be used from a context that may sleep. The sleep 1008 * is non-interruptible, and has no timeout. 1009 * 1010 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 1011 * call. 1012 * 1013 * It returns zero on success, else a negative error code. 1014 */ 1015 int spi_bus_unlock(struct spi_master *master) 1016 { 1017 master->bus_lock_flag = 0; 1018 1019 mutex_unlock(&master->bus_lock_mutex); 1020 1021 return 0; 1022 } 1023 EXPORT_SYMBOL_GPL(spi_bus_unlock); 1024 1025 /* portable code must never pass more than 32 bytes */ 1026 #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) 1027 1028 static u8 *buf; 1029 1030 /** 1031 * spi_write_then_read - SPI synchronous write followed by read 1032 * @spi: device with which data will be exchanged 1033 * @txbuf: data to be written (need not be dma-safe) 1034 * @n_tx: size of txbuf, in bytes 1035 * @rxbuf: buffer into which data will be read (need not be dma-safe) 1036 * @n_rx: size of rxbuf, in bytes 1037 * Context: can sleep 1038 * 1039 * This performs a half duplex MicroWire style transaction with the 1040 * device, sending txbuf and then reading rxbuf. The return value 1041 * is zero for success, else a negative errno status code. 1042 * This call may only be used from a context that may sleep. 1043 * 1044 * Parameters to this routine are always copied using a small buffer; 1045 * portable code should never use this for more than 32 bytes. 1046 * Performance-sensitive or bulk transfer code should instead use 1047 * spi_{async,sync}() calls with dma-safe buffers. 1048 */ 1049 int spi_write_then_read(struct spi_device *spi, 1050 const void *txbuf, unsigned n_tx, 1051 void *rxbuf, unsigned n_rx) 1052 { 1053 static DEFINE_MUTEX(lock); 1054 1055 int status; 1056 struct spi_message message; 1057 struct spi_transfer x[2]; 1058 u8 *local_buf; 1059 1060 /* Use preallocated DMA-safe buffer. We can't avoid copying here, 1061 * (as a pure convenience thing), but we can keep heap costs 1062 * out of the hot path ... 1063 */ 1064 if ((n_tx + n_rx) > SPI_BUFSIZ) 1065 return -EINVAL; 1066 1067 spi_message_init(&message); 1068 memset(x, 0, sizeof x); 1069 if (n_tx) { 1070 x[0].len = n_tx; 1071 spi_message_add_tail(&x[0], &message); 1072 } 1073 if (n_rx) { 1074 x[1].len = n_rx; 1075 spi_message_add_tail(&x[1], &message); 1076 } 1077 1078 /* ... unless someone else is using the pre-allocated buffer */ 1079 if (!mutex_trylock(&lock)) { 1080 local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 1081 if (!local_buf) 1082 return -ENOMEM; 1083 } else 1084 local_buf = buf; 1085 1086 memcpy(local_buf, txbuf, n_tx); 1087 x[0].tx_buf = local_buf; 1088 x[1].rx_buf = local_buf + n_tx; 1089 1090 /* do the i/o */ 1091 status = spi_sync(spi, &message); 1092 if (status == 0) 1093 memcpy(rxbuf, x[1].rx_buf, n_rx); 1094 1095 if (x[0].tx_buf == buf) 1096 mutex_unlock(&lock); 1097 else 1098 kfree(local_buf); 1099 1100 return status; 1101 } 1102 EXPORT_SYMBOL_GPL(spi_write_then_read); 1103 1104 /*-------------------------------------------------------------------------*/ 1105 1106 static int __init spi_init(void) 1107 { 1108 int status; 1109 1110 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 1111 if (!buf) { 1112 status = -ENOMEM; 1113 goto err0; 1114 } 1115 1116 status = bus_register(&spi_bus_type); 1117 if (status < 0) 1118 goto err1; 1119 1120 status = class_register(&spi_master_class); 1121 if (status < 0) 1122 goto err2; 1123 return 0; 1124 1125 err2: 1126 bus_unregister(&spi_bus_type); 1127 err1: 1128 kfree(buf); 1129 buf = NULL; 1130 err0: 1131 return status; 1132 } 1133 1134 /* board_info is normally registered in arch_initcall(), 1135 * but even essential drivers wait till later 1136 * 1137 * REVISIT only boardinfo really needs static linking. the rest (device and 1138 * driver registration) _could_ be dynamically linked (modular) ... costs 1139 * include needing to have boardinfo data structures be much more public. 1140 */ 1141 postcore_initcall(spi_init); 1142 1143