1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/device.h> 23 #include <linux/init.h> 24 #include <linux/cache.h> 25 #include <linux/mutex.h> 26 #include <linux/of_device.h> 27 #include <linux/slab.h> 28 #include <linux/mod_devicetable.h> 29 #include <linux/spi/spi.h> 30 #include <linux/of_spi.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/export.h> 33 #include <linux/sched.h> 34 #include <linux/delay.h> 35 #include <linux/kthread.h> 36 37 static void spidev_release(struct device *dev) 38 { 39 struct spi_device *spi = to_spi_device(dev); 40 41 /* spi masters may cleanup for released devices */ 42 if (spi->master->cleanup) 43 spi->master->cleanup(spi); 44 45 spi_master_put(spi->master); 46 kfree(spi); 47 } 48 49 static ssize_t 50 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 51 { 52 const struct spi_device *spi = to_spi_device(dev); 53 54 return sprintf(buf, "%s\n", spi->modalias); 55 } 56 57 static struct device_attribute spi_dev_attrs[] = { 58 __ATTR_RO(modalias), 59 __ATTR_NULL, 60 }; 61 62 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 63 * and the sysfs version makes coldplug work too. 64 */ 65 66 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 67 const struct spi_device *sdev) 68 { 69 while (id->name[0]) { 70 if (!strcmp(sdev->modalias, id->name)) 71 return id; 72 id++; 73 } 74 return NULL; 75 } 76 77 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 78 { 79 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 80 81 return spi_match_id(sdrv->id_table, sdev); 82 } 83 EXPORT_SYMBOL_GPL(spi_get_device_id); 84 85 static int spi_match_device(struct device *dev, struct device_driver *drv) 86 { 87 const struct spi_device *spi = to_spi_device(dev); 88 const struct spi_driver *sdrv = to_spi_driver(drv); 89 90 /* Attempt an OF style match */ 91 if (of_driver_match_device(dev, drv)) 92 return 1; 93 94 if (sdrv->id_table) 95 return !!spi_match_id(sdrv->id_table, spi); 96 97 return strcmp(spi->modalias, drv->name) == 0; 98 } 99 100 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 101 { 102 const struct spi_device *spi = to_spi_device(dev); 103 104 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 105 return 0; 106 } 107 108 #ifdef CONFIG_PM_SLEEP 109 static int spi_legacy_suspend(struct device *dev, pm_message_t message) 110 { 111 int value = 0; 112 struct spi_driver *drv = to_spi_driver(dev->driver); 113 114 /* suspend will stop irqs and dma; no more i/o */ 115 if (drv) { 116 if (drv->suspend) 117 value = drv->suspend(to_spi_device(dev), message); 118 else 119 dev_dbg(dev, "... can't suspend\n"); 120 } 121 return value; 122 } 123 124 static int spi_legacy_resume(struct device *dev) 125 { 126 int value = 0; 127 struct spi_driver *drv = to_spi_driver(dev->driver); 128 129 /* resume may restart the i/o queue */ 130 if (drv) { 131 if (drv->resume) 132 value = drv->resume(to_spi_device(dev)); 133 else 134 dev_dbg(dev, "... can't resume\n"); 135 } 136 return value; 137 } 138 139 static int spi_pm_suspend(struct device *dev) 140 { 141 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 142 143 if (pm) 144 return pm_generic_suspend(dev); 145 else 146 return spi_legacy_suspend(dev, PMSG_SUSPEND); 147 } 148 149 static int spi_pm_resume(struct device *dev) 150 { 151 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 152 153 if (pm) 154 return pm_generic_resume(dev); 155 else 156 return spi_legacy_resume(dev); 157 } 158 159 static int spi_pm_freeze(struct device *dev) 160 { 161 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 162 163 if (pm) 164 return pm_generic_freeze(dev); 165 else 166 return spi_legacy_suspend(dev, PMSG_FREEZE); 167 } 168 169 static int spi_pm_thaw(struct device *dev) 170 { 171 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 172 173 if (pm) 174 return pm_generic_thaw(dev); 175 else 176 return spi_legacy_resume(dev); 177 } 178 179 static int spi_pm_poweroff(struct device *dev) 180 { 181 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 182 183 if (pm) 184 return pm_generic_poweroff(dev); 185 else 186 return spi_legacy_suspend(dev, PMSG_HIBERNATE); 187 } 188 189 static int spi_pm_restore(struct device *dev) 190 { 191 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 192 193 if (pm) 194 return pm_generic_restore(dev); 195 else 196 return spi_legacy_resume(dev); 197 } 198 #else 199 #define spi_pm_suspend NULL 200 #define spi_pm_resume NULL 201 #define spi_pm_freeze NULL 202 #define spi_pm_thaw NULL 203 #define spi_pm_poweroff NULL 204 #define spi_pm_restore NULL 205 #endif 206 207 static const struct dev_pm_ops spi_pm = { 208 .suspend = spi_pm_suspend, 209 .resume = spi_pm_resume, 210 .freeze = spi_pm_freeze, 211 .thaw = spi_pm_thaw, 212 .poweroff = spi_pm_poweroff, 213 .restore = spi_pm_restore, 214 SET_RUNTIME_PM_OPS( 215 pm_generic_runtime_suspend, 216 pm_generic_runtime_resume, 217 pm_generic_runtime_idle 218 ) 219 }; 220 221 struct bus_type spi_bus_type = { 222 .name = "spi", 223 .dev_attrs = spi_dev_attrs, 224 .match = spi_match_device, 225 .uevent = spi_uevent, 226 .pm = &spi_pm, 227 }; 228 EXPORT_SYMBOL_GPL(spi_bus_type); 229 230 231 static int spi_drv_probe(struct device *dev) 232 { 233 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 234 235 return sdrv->probe(to_spi_device(dev)); 236 } 237 238 static int spi_drv_remove(struct device *dev) 239 { 240 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 241 242 return sdrv->remove(to_spi_device(dev)); 243 } 244 245 static void spi_drv_shutdown(struct device *dev) 246 { 247 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 248 249 sdrv->shutdown(to_spi_device(dev)); 250 } 251 252 /** 253 * spi_register_driver - register a SPI driver 254 * @sdrv: the driver to register 255 * Context: can sleep 256 */ 257 int spi_register_driver(struct spi_driver *sdrv) 258 { 259 sdrv->driver.bus = &spi_bus_type; 260 if (sdrv->probe) 261 sdrv->driver.probe = spi_drv_probe; 262 if (sdrv->remove) 263 sdrv->driver.remove = spi_drv_remove; 264 if (sdrv->shutdown) 265 sdrv->driver.shutdown = spi_drv_shutdown; 266 return driver_register(&sdrv->driver); 267 } 268 EXPORT_SYMBOL_GPL(spi_register_driver); 269 270 /*-------------------------------------------------------------------------*/ 271 272 /* SPI devices should normally not be created by SPI device drivers; that 273 * would make them board-specific. Similarly with SPI master drivers. 274 * Device registration normally goes into like arch/.../mach.../board-YYY.c 275 * with other readonly (flashable) information about mainboard devices. 276 */ 277 278 struct boardinfo { 279 struct list_head list; 280 struct spi_board_info board_info; 281 }; 282 283 static LIST_HEAD(board_list); 284 static LIST_HEAD(spi_master_list); 285 286 /* 287 * Used to protect add/del opertion for board_info list and 288 * spi_master list, and their matching process 289 */ 290 static DEFINE_MUTEX(board_lock); 291 292 /** 293 * spi_alloc_device - Allocate a new SPI device 294 * @master: Controller to which device is connected 295 * Context: can sleep 296 * 297 * Allows a driver to allocate and initialize a spi_device without 298 * registering it immediately. This allows a driver to directly 299 * fill the spi_device with device parameters before calling 300 * spi_add_device() on it. 301 * 302 * Caller is responsible to call spi_add_device() on the returned 303 * spi_device structure to add it to the SPI master. If the caller 304 * needs to discard the spi_device without adding it, then it should 305 * call spi_dev_put() on it. 306 * 307 * Returns a pointer to the new device, or NULL. 308 */ 309 struct spi_device *spi_alloc_device(struct spi_master *master) 310 { 311 struct spi_device *spi; 312 struct device *dev = master->dev.parent; 313 314 if (!spi_master_get(master)) 315 return NULL; 316 317 spi = kzalloc(sizeof *spi, GFP_KERNEL); 318 if (!spi) { 319 dev_err(dev, "cannot alloc spi_device\n"); 320 spi_master_put(master); 321 return NULL; 322 } 323 324 spi->master = master; 325 spi->dev.parent = &master->dev; 326 spi->dev.bus = &spi_bus_type; 327 spi->dev.release = spidev_release; 328 device_initialize(&spi->dev); 329 return spi; 330 } 331 EXPORT_SYMBOL_GPL(spi_alloc_device); 332 333 /** 334 * spi_add_device - Add spi_device allocated with spi_alloc_device 335 * @spi: spi_device to register 336 * 337 * Companion function to spi_alloc_device. Devices allocated with 338 * spi_alloc_device can be added onto the spi bus with this function. 339 * 340 * Returns 0 on success; negative errno on failure 341 */ 342 int spi_add_device(struct spi_device *spi) 343 { 344 static DEFINE_MUTEX(spi_add_lock); 345 struct device *dev = spi->master->dev.parent; 346 struct device *d; 347 int status; 348 349 /* Chipselects are numbered 0..max; validate. */ 350 if (spi->chip_select >= spi->master->num_chipselect) { 351 dev_err(dev, "cs%d >= max %d\n", 352 spi->chip_select, 353 spi->master->num_chipselect); 354 return -EINVAL; 355 } 356 357 /* Set the bus ID string */ 358 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 359 spi->chip_select); 360 361 362 /* We need to make sure there's no other device with this 363 * chipselect **BEFORE** we call setup(), else we'll trash 364 * its configuration. Lock against concurrent add() calls. 365 */ 366 mutex_lock(&spi_add_lock); 367 368 d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)); 369 if (d != NULL) { 370 dev_err(dev, "chipselect %d already in use\n", 371 spi->chip_select); 372 put_device(d); 373 status = -EBUSY; 374 goto done; 375 } 376 377 /* Drivers may modify this initial i/o setup, but will 378 * normally rely on the device being setup. Devices 379 * using SPI_CS_HIGH can't coexist well otherwise... 380 */ 381 status = spi_setup(spi); 382 if (status < 0) { 383 dev_err(dev, "can't setup %s, status %d\n", 384 dev_name(&spi->dev), status); 385 goto done; 386 } 387 388 /* Device may be bound to an active driver when this returns */ 389 status = device_add(&spi->dev); 390 if (status < 0) 391 dev_err(dev, "can't add %s, status %d\n", 392 dev_name(&spi->dev), status); 393 else 394 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 395 396 done: 397 mutex_unlock(&spi_add_lock); 398 return status; 399 } 400 EXPORT_SYMBOL_GPL(spi_add_device); 401 402 /** 403 * spi_new_device - instantiate one new SPI device 404 * @master: Controller to which device is connected 405 * @chip: Describes the SPI device 406 * Context: can sleep 407 * 408 * On typical mainboards, this is purely internal; and it's not needed 409 * after board init creates the hard-wired devices. Some development 410 * platforms may not be able to use spi_register_board_info though, and 411 * this is exported so that for example a USB or parport based adapter 412 * driver could add devices (which it would learn about out-of-band). 413 * 414 * Returns the new device, or NULL. 415 */ 416 struct spi_device *spi_new_device(struct spi_master *master, 417 struct spi_board_info *chip) 418 { 419 struct spi_device *proxy; 420 int status; 421 422 /* NOTE: caller did any chip->bus_num checks necessary. 423 * 424 * Also, unless we change the return value convention to use 425 * error-or-pointer (not NULL-or-pointer), troubleshootability 426 * suggests syslogged diagnostics are best here (ugh). 427 */ 428 429 proxy = spi_alloc_device(master); 430 if (!proxy) 431 return NULL; 432 433 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 434 435 proxy->chip_select = chip->chip_select; 436 proxy->max_speed_hz = chip->max_speed_hz; 437 proxy->mode = chip->mode; 438 proxy->irq = chip->irq; 439 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 440 proxy->dev.platform_data = (void *) chip->platform_data; 441 proxy->controller_data = chip->controller_data; 442 proxy->controller_state = NULL; 443 444 status = spi_add_device(proxy); 445 if (status < 0) { 446 spi_dev_put(proxy); 447 return NULL; 448 } 449 450 return proxy; 451 } 452 EXPORT_SYMBOL_GPL(spi_new_device); 453 454 static void spi_match_master_to_boardinfo(struct spi_master *master, 455 struct spi_board_info *bi) 456 { 457 struct spi_device *dev; 458 459 if (master->bus_num != bi->bus_num) 460 return; 461 462 dev = spi_new_device(master, bi); 463 if (!dev) 464 dev_err(master->dev.parent, "can't create new device for %s\n", 465 bi->modalias); 466 } 467 468 /** 469 * spi_register_board_info - register SPI devices for a given board 470 * @info: array of chip descriptors 471 * @n: how many descriptors are provided 472 * Context: can sleep 473 * 474 * Board-specific early init code calls this (probably during arch_initcall) 475 * with segments of the SPI device table. Any device nodes are created later, 476 * after the relevant parent SPI controller (bus_num) is defined. We keep 477 * this table of devices forever, so that reloading a controller driver will 478 * not make Linux forget about these hard-wired devices. 479 * 480 * Other code can also call this, e.g. a particular add-on board might provide 481 * SPI devices through its expansion connector, so code initializing that board 482 * would naturally declare its SPI devices. 483 * 484 * The board info passed can safely be __initdata ... but be careful of 485 * any embedded pointers (platform_data, etc), they're copied as-is. 486 */ 487 int __devinit 488 spi_register_board_info(struct spi_board_info const *info, unsigned n) 489 { 490 struct boardinfo *bi; 491 int i; 492 493 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 494 if (!bi) 495 return -ENOMEM; 496 497 for (i = 0; i < n; i++, bi++, info++) { 498 struct spi_master *master; 499 500 memcpy(&bi->board_info, info, sizeof(*info)); 501 mutex_lock(&board_lock); 502 list_add_tail(&bi->list, &board_list); 503 list_for_each_entry(master, &spi_master_list, list) 504 spi_match_master_to_boardinfo(master, &bi->board_info); 505 mutex_unlock(&board_lock); 506 } 507 508 return 0; 509 } 510 511 /*-------------------------------------------------------------------------*/ 512 513 /** 514 * spi_pump_messages - kthread work function which processes spi message queue 515 * @work: pointer to kthread work struct contained in the master struct 516 * 517 * This function checks if there is any spi message in the queue that 518 * needs processing and if so call out to the driver to initialize hardware 519 * and transfer each message. 520 * 521 */ 522 static void spi_pump_messages(struct kthread_work *work) 523 { 524 struct spi_master *master = 525 container_of(work, struct spi_master, pump_messages); 526 unsigned long flags; 527 bool was_busy = false; 528 int ret; 529 530 /* Lock queue and check for queue work */ 531 spin_lock_irqsave(&master->queue_lock, flags); 532 if (list_empty(&master->queue) || !master->running) { 533 if (master->busy) { 534 ret = master->unprepare_transfer_hardware(master); 535 if (ret) { 536 spin_unlock_irqrestore(&master->queue_lock, flags); 537 dev_err(&master->dev, 538 "failed to unprepare transfer hardware\n"); 539 return; 540 } 541 } 542 master->busy = false; 543 spin_unlock_irqrestore(&master->queue_lock, flags); 544 return; 545 } 546 547 /* Make sure we are not already running a message */ 548 if (master->cur_msg) { 549 spin_unlock_irqrestore(&master->queue_lock, flags); 550 return; 551 } 552 /* Extract head of queue */ 553 master->cur_msg = 554 list_entry(master->queue.next, struct spi_message, queue); 555 556 list_del_init(&master->cur_msg->queue); 557 if (master->busy) 558 was_busy = true; 559 else 560 master->busy = true; 561 spin_unlock_irqrestore(&master->queue_lock, flags); 562 563 if (!was_busy) { 564 ret = master->prepare_transfer_hardware(master); 565 if (ret) { 566 dev_err(&master->dev, 567 "failed to prepare transfer hardware\n"); 568 return; 569 } 570 } 571 572 ret = master->transfer_one_message(master, master->cur_msg); 573 if (ret) { 574 dev_err(&master->dev, 575 "failed to transfer one message from queue\n"); 576 return; 577 } 578 } 579 580 static int spi_init_queue(struct spi_master *master) 581 { 582 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 583 584 INIT_LIST_HEAD(&master->queue); 585 spin_lock_init(&master->queue_lock); 586 587 master->running = false; 588 master->busy = false; 589 590 init_kthread_worker(&master->kworker); 591 master->kworker_task = kthread_run(kthread_worker_fn, 592 &master->kworker, 593 dev_name(&master->dev)); 594 if (IS_ERR(master->kworker_task)) { 595 dev_err(&master->dev, "failed to create message pump task\n"); 596 return -ENOMEM; 597 } 598 init_kthread_work(&master->pump_messages, spi_pump_messages); 599 600 /* 601 * Master config will indicate if this controller should run the 602 * message pump with high (realtime) priority to reduce the transfer 603 * latency on the bus by minimising the delay between a transfer 604 * request and the scheduling of the message pump thread. Without this 605 * setting the message pump thread will remain at default priority. 606 */ 607 if (master->rt) { 608 dev_info(&master->dev, 609 "will run message pump with realtime priority\n"); 610 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 611 } 612 613 return 0; 614 } 615 616 /** 617 * spi_get_next_queued_message() - called by driver to check for queued 618 * messages 619 * @master: the master to check for queued messages 620 * 621 * If there are more messages in the queue, the next message is returned from 622 * this call. 623 */ 624 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 625 { 626 struct spi_message *next; 627 unsigned long flags; 628 629 /* get a pointer to the next message, if any */ 630 spin_lock_irqsave(&master->queue_lock, flags); 631 if (list_empty(&master->queue)) 632 next = NULL; 633 else 634 next = list_entry(master->queue.next, 635 struct spi_message, queue); 636 spin_unlock_irqrestore(&master->queue_lock, flags); 637 638 return next; 639 } 640 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 641 642 /** 643 * spi_finalize_current_message() - the current message is complete 644 * @master: the master to return the message to 645 * 646 * Called by the driver to notify the core that the message in the front of the 647 * queue is complete and can be removed from the queue. 648 */ 649 void spi_finalize_current_message(struct spi_master *master) 650 { 651 struct spi_message *mesg; 652 unsigned long flags; 653 654 spin_lock_irqsave(&master->queue_lock, flags); 655 mesg = master->cur_msg; 656 master->cur_msg = NULL; 657 658 queue_kthread_work(&master->kworker, &master->pump_messages); 659 spin_unlock_irqrestore(&master->queue_lock, flags); 660 661 mesg->state = NULL; 662 if (mesg->complete) 663 mesg->complete(mesg->context); 664 } 665 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 666 667 static int spi_start_queue(struct spi_master *master) 668 { 669 unsigned long flags; 670 671 spin_lock_irqsave(&master->queue_lock, flags); 672 673 if (master->running || master->busy) { 674 spin_unlock_irqrestore(&master->queue_lock, flags); 675 return -EBUSY; 676 } 677 678 master->running = true; 679 master->cur_msg = NULL; 680 spin_unlock_irqrestore(&master->queue_lock, flags); 681 682 queue_kthread_work(&master->kworker, &master->pump_messages); 683 684 return 0; 685 } 686 687 static int spi_stop_queue(struct spi_master *master) 688 { 689 unsigned long flags; 690 unsigned limit = 500; 691 int ret = 0; 692 693 spin_lock_irqsave(&master->queue_lock, flags); 694 695 /* 696 * This is a bit lame, but is optimized for the common execution path. 697 * A wait_queue on the master->busy could be used, but then the common 698 * execution path (pump_messages) would be required to call wake_up or 699 * friends on every SPI message. Do this instead. 700 */ 701 while ((!list_empty(&master->queue) || master->busy) && limit--) { 702 spin_unlock_irqrestore(&master->queue_lock, flags); 703 msleep(10); 704 spin_lock_irqsave(&master->queue_lock, flags); 705 } 706 707 if (!list_empty(&master->queue) || master->busy) 708 ret = -EBUSY; 709 else 710 master->running = false; 711 712 spin_unlock_irqrestore(&master->queue_lock, flags); 713 714 if (ret) { 715 dev_warn(&master->dev, 716 "could not stop message queue\n"); 717 return ret; 718 } 719 return ret; 720 } 721 722 static int spi_destroy_queue(struct spi_master *master) 723 { 724 int ret; 725 726 ret = spi_stop_queue(master); 727 728 /* 729 * flush_kthread_worker will block until all work is done. 730 * If the reason that stop_queue timed out is that the work will never 731 * finish, then it does no good to call flush/stop thread, so 732 * return anyway. 733 */ 734 if (ret) { 735 dev_err(&master->dev, "problem destroying queue\n"); 736 return ret; 737 } 738 739 flush_kthread_worker(&master->kworker); 740 kthread_stop(master->kworker_task); 741 742 return 0; 743 } 744 745 /** 746 * spi_queued_transfer - transfer function for queued transfers 747 * @spi: spi device which is requesting transfer 748 * @msg: spi message which is to handled is queued to driver queue 749 */ 750 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 751 { 752 struct spi_master *master = spi->master; 753 unsigned long flags; 754 755 spin_lock_irqsave(&master->queue_lock, flags); 756 757 if (!master->running) { 758 spin_unlock_irqrestore(&master->queue_lock, flags); 759 return -ESHUTDOWN; 760 } 761 msg->actual_length = 0; 762 msg->status = -EINPROGRESS; 763 764 list_add_tail(&msg->queue, &master->queue); 765 if (master->running && !master->busy) 766 queue_kthread_work(&master->kworker, &master->pump_messages); 767 768 spin_unlock_irqrestore(&master->queue_lock, flags); 769 return 0; 770 } 771 772 static int spi_master_initialize_queue(struct spi_master *master) 773 { 774 int ret; 775 776 master->queued = true; 777 master->transfer = spi_queued_transfer; 778 779 /* Initialize and start queue */ 780 ret = spi_init_queue(master); 781 if (ret) { 782 dev_err(&master->dev, "problem initializing queue\n"); 783 goto err_init_queue; 784 } 785 ret = spi_start_queue(master); 786 if (ret) { 787 dev_err(&master->dev, "problem starting queue\n"); 788 goto err_start_queue; 789 } 790 791 return 0; 792 793 err_start_queue: 794 err_init_queue: 795 spi_destroy_queue(master); 796 return ret; 797 } 798 799 /*-------------------------------------------------------------------------*/ 800 801 static void spi_master_release(struct device *dev) 802 { 803 struct spi_master *master; 804 805 master = container_of(dev, struct spi_master, dev); 806 kfree(master); 807 } 808 809 static struct class spi_master_class = { 810 .name = "spi_master", 811 .owner = THIS_MODULE, 812 .dev_release = spi_master_release, 813 }; 814 815 816 817 /** 818 * spi_alloc_master - allocate SPI master controller 819 * @dev: the controller, possibly using the platform_bus 820 * @size: how much zeroed driver-private data to allocate; the pointer to this 821 * memory is in the driver_data field of the returned device, 822 * accessible with spi_master_get_devdata(). 823 * Context: can sleep 824 * 825 * This call is used only by SPI master controller drivers, which are the 826 * only ones directly touching chip registers. It's how they allocate 827 * an spi_master structure, prior to calling spi_register_master(). 828 * 829 * This must be called from context that can sleep. It returns the SPI 830 * master structure on success, else NULL. 831 * 832 * The caller is responsible for assigning the bus number and initializing 833 * the master's methods before calling spi_register_master(); and (after errors 834 * adding the device) calling spi_master_put() and kfree() to prevent a memory 835 * leak. 836 */ 837 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 838 { 839 struct spi_master *master; 840 841 if (!dev) 842 return NULL; 843 844 master = kzalloc(size + sizeof *master, GFP_KERNEL); 845 if (!master) 846 return NULL; 847 848 device_initialize(&master->dev); 849 master->dev.class = &spi_master_class; 850 master->dev.parent = get_device(dev); 851 spi_master_set_devdata(master, &master[1]); 852 853 return master; 854 } 855 EXPORT_SYMBOL_GPL(spi_alloc_master); 856 857 /** 858 * spi_register_master - register SPI master controller 859 * @master: initialized master, originally from spi_alloc_master() 860 * Context: can sleep 861 * 862 * SPI master controllers connect to their drivers using some non-SPI bus, 863 * such as the platform bus. The final stage of probe() in that code 864 * includes calling spi_register_master() to hook up to this SPI bus glue. 865 * 866 * SPI controllers use board specific (often SOC specific) bus numbers, 867 * and board-specific addressing for SPI devices combines those numbers 868 * with chip select numbers. Since SPI does not directly support dynamic 869 * device identification, boards need configuration tables telling which 870 * chip is at which address. 871 * 872 * This must be called from context that can sleep. It returns zero on 873 * success, else a negative error code (dropping the master's refcount). 874 * After a successful return, the caller is responsible for calling 875 * spi_unregister_master(). 876 */ 877 int spi_register_master(struct spi_master *master) 878 { 879 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 880 struct device *dev = master->dev.parent; 881 struct boardinfo *bi; 882 int status = -ENODEV; 883 int dynamic = 0; 884 885 if (!dev) 886 return -ENODEV; 887 888 /* even if it's just one always-selected device, there must 889 * be at least one chipselect 890 */ 891 if (master->num_chipselect == 0) 892 return -EINVAL; 893 894 /* convention: dynamically assigned bus IDs count down from the max */ 895 if (master->bus_num < 0) { 896 /* FIXME switch to an IDR based scheme, something like 897 * I2C now uses, so we can't run out of "dynamic" IDs 898 */ 899 master->bus_num = atomic_dec_return(&dyn_bus_id); 900 dynamic = 1; 901 } 902 903 spin_lock_init(&master->bus_lock_spinlock); 904 mutex_init(&master->bus_lock_mutex); 905 master->bus_lock_flag = 0; 906 907 /* register the device, then userspace will see it. 908 * registration fails if the bus ID is in use. 909 */ 910 dev_set_name(&master->dev, "spi%u", master->bus_num); 911 status = device_add(&master->dev); 912 if (status < 0) 913 goto done; 914 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 915 dynamic ? " (dynamic)" : ""); 916 917 /* If we're using a queued driver, start the queue */ 918 if (master->transfer) 919 dev_info(dev, "master is unqueued, this is deprecated\n"); 920 else { 921 status = spi_master_initialize_queue(master); 922 if (status) { 923 device_unregister(&master->dev); 924 goto done; 925 } 926 } 927 928 mutex_lock(&board_lock); 929 list_add_tail(&master->list, &spi_master_list); 930 list_for_each_entry(bi, &board_list, list) 931 spi_match_master_to_boardinfo(master, &bi->board_info); 932 mutex_unlock(&board_lock); 933 934 /* Register devices from the device tree */ 935 of_register_spi_devices(master); 936 done: 937 return status; 938 } 939 EXPORT_SYMBOL_GPL(spi_register_master); 940 941 static int __unregister(struct device *dev, void *null) 942 { 943 spi_unregister_device(to_spi_device(dev)); 944 return 0; 945 } 946 947 /** 948 * spi_unregister_master - unregister SPI master controller 949 * @master: the master being unregistered 950 * Context: can sleep 951 * 952 * This call is used only by SPI master controller drivers, which are the 953 * only ones directly touching chip registers. 954 * 955 * This must be called from context that can sleep. 956 */ 957 void spi_unregister_master(struct spi_master *master) 958 { 959 int dummy; 960 961 if (master->queued) { 962 if (spi_destroy_queue(master)) 963 dev_err(&master->dev, "queue remove failed\n"); 964 } 965 966 mutex_lock(&board_lock); 967 list_del(&master->list); 968 mutex_unlock(&board_lock); 969 970 dummy = device_for_each_child(&master->dev, NULL, __unregister); 971 device_unregister(&master->dev); 972 } 973 EXPORT_SYMBOL_GPL(spi_unregister_master); 974 975 int spi_master_suspend(struct spi_master *master) 976 { 977 int ret; 978 979 /* Basically no-ops for non-queued masters */ 980 if (!master->queued) 981 return 0; 982 983 ret = spi_stop_queue(master); 984 if (ret) 985 dev_err(&master->dev, "queue stop failed\n"); 986 987 return ret; 988 } 989 EXPORT_SYMBOL_GPL(spi_master_suspend); 990 991 int spi_master_resume(struct spi_master *master) 992 { 993 int ret; 994 995 if (!master->queued) 996 return 0; 997 998 ret = spi_start_queue(master); 999 if (ret) 1000 dev_err(&master->dev, "queue restart failed\n"); 1001 1002 return ret; 1003 } 1004 EXPORT_SYMBOL_GPL(spi_master_resume); 1005 1006 static int __spi_master_match(struct device *dev, void *data) 1007 { 1008 struct spi_master *m; 1009 u16 *bus_num = data; 1010 1011 m = container_of(dev, struct spi_master, dev); 1012 return m->bus_num == *bus_num; 1013 } 1014 1015 /** 1016 * spi_busnum_to_master - look up master associated with bus_num 1017 * @bus_num: the master's bus number 1018 * Context: can sleep 1019 * 1020 * This call may be used with devices that are registered after 1021 * arch init time. It returns a refcounted pointer to the relevant 1022 * spi_master (which the caller must release), or NULL if there is 1023 * no such master registered. 1024 */ 1025 struct spi_master *spi_busnum_to_master(u16 bus_num) 1026 { 1027 struct device *dev; 1028 struct spi_master *master = NULL; 1029 1030 dev = class_find_device(&spi_master_class, NULL, &bus_num, 1031 __spi_master_match); 1032 if (dev) 1033 master = container_of(dev, struct spi_master, dev); 1034 /* reference got in class_find_device */ 1035 return master; 1036 } 1037 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 1038 1039 1040 /*-------------------------------------------------------------------------*/ 1041 1042 /* Core methods for SPI master protocol drivers. Some of the 1043 * other core methods are currently defined as inline functions. 1044 */ 1045 1046 /** 1047 * spi_setup - setup SPI mode and clock rate 1048 * @spi: the device whose settings are being modified 1049 * Context: can sleep, and no requests are queued to the device 1050 * 1051 * SPI protocol drivers may need to update the transfer mode if the 1052 * device doesn't work with its default. They may likewise need 1053 * to update clock rates or word sizes from initial values. This function 1054 * changes those settings, and must be called from a context that can sleep. 1055 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 1056 * effect the next time the device is selected and data is transferred to 1057 * or from it. When this function returns, the spi device is deselected. 1058 * 1059 * Note that this call will fail if the protocol driver specifies an option 1060 * that the underlying controller or its driver does not support. For 1061 * example, not all hardware supports wire transfers using nine bit words, 1062 * LSB-first wire encoding, or active-high chipselects. 1063 */ 1064 int spi_setup(struct spi_device *spi) 1065 { 1066 unsigned bad_bits; 1067 int status; 1068 1069 /* help drivers fail *cleanly* when they need options 1070 * that aren't supported with their current master 1071 */ 1072 bad_bits = spi->mode & ~spi->master->mode_bits; 1073 if (bad_bits) { 1074 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1075 bad_bits); 1076 return -EINVAL; 1077 } 1078 1079 if (!spi->bits_per_word) 1080 spi->bits_per_word = 8; 1081 1082 status = spi->master->setup(spi); 1083 1084 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s" 1085 "%u bits/w, %u Hz max --> %d\n", 1086 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 1087 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 1088 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 1089 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 1090 (spi->mode & SPI_LOOP) ? "loopback, " : "", 1091 spi->bits_per_word, spi->max_speed_hz, 1092 status); 1093 1094 return status; 1095 } 1096 EXPORT_SYMBOL_GPL(spi_setup); 1097 1098 static int __spi_async(struct spi_device *spi, struct spi_message *message) 1099 { 1100 struct spi_master *master = spi->master; 1101 1102 /* Half-duplex links include original MicroWire, and ones with 1103 * only one data pin like SPI_3WIRE (switches direction) or where 1104 * either MOSI or MISO is missing. They can also be caused by 1105 * software limitations. 1106 */ 1107 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 1108 || (spi->mode & SPI_3WIRE)) { 1109 struct spi_transfer *xfer; 1110 unsigned flags = master->flags; 1111 1112 list_for_each_entry(xfer, &message->transfers, transfer_list) { 1113 if (xfer->rx_buf && xfer->tx_buf) 1114 return -EINVAL; 1115 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 1116 return -EINVAL; 1117 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 1118 return -EINVAL; 1119 } 1120 } 1121 1122 message->spi = spi; 1123 message->status = -EINPROGRESS; 1124 return master->transfer(spi, message); 1125 } 1126 1127 /** 1128 * spi_async - asynchronous SPI transfer 1129 * @spi: device with which data will be exchanged 1130 * @message: describes the data transfers, including completion callback 1131 * Context: any (irqs may be blocked, etc) 1132 * 1133 * This call may be used in_irq and other contexts which can't sleep, 1134 * as well as from task contexts which can sleep. 1135 * 1136 * The completion callback is invoked in a context which can't sleep. 1137 * Before that invocation, the value of message->status is undefined. 1138 * When the callback is issued, message->status holds either zero (to 1139 * indicate complete success) or a negative error code. After that 1140 * callback returns, the driver which issued the transfer request may 1141 * deallocate the associated memory; it's no longer in use by any SPI 1142 * core or controller driver code. 1143 * 1144 * Note that although all messages to a spi_device are handled in 1145 * FIFO order, messages may go to different devices in other orders. 1146 * Some device might be higher priority, or have various "hard" access 1147 * time requirements, for example. 1148 * 1149 * On detection of any fault during the transfer, processing of 1150 * the entire message is aborted, and the device is deselected. 1151 * Until returning from the associated message completion callback, 1152 * no other spi_message queued to that device will be processed. 1153 * (This rule applies equally to all the synchronous transfer calls, 1154 * which are wrappers around this core asynchronous primitive.) 1155 */ 1156 int spi_async(struct spi_device *spi, struct spi_message *message) 1157 { 1158 struct spi_master *master = spi->master; 1159 int ret; 1160 unsigned long flags; 1161 1162 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1163 1164 if (master->bus_lock_flag) 1165 ret = -EBUSY; 1166 else 1167 ret = __spi_async(spi, message); 1168 1169 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1170 1171 return ret; 1172 } 1173 EXPORT_SYMBOL_GPL(spi_async); 1174 1175 /** 1176 * spi_async_locked - version of spi_async with exclusive bus usage 1177 * @spi: device with which data will be exchanged 1178 * @message: describes the data transfers, including completion callback 1179 * Context: any (irqs may be blocked, etc) 1180 * 1181 * This call may be used in_irq and other contexts which can't sleep, 1182 * as well as from task contexts which can sleep. 1183 * 1184 * The completion callback is invoked in a context which can't sleep. 1185 * Before that invocation, the value of message->status is undefined. 1186 * When the callback is issued, message->status holds either zero (to 1187 * indicate complete success) or a negative error code. After that 1188 * callback returns, the driver which issued the transfer request may 1189 * deallocate the associated memory; it's no longer in use by any SPI 1190 * core or controller driver code. 1191 * 1192 * Note that although all messages to a spi_device are handled in 1193 * FIFO order, messages may go to different devices in other orders. 1194 * Some device might be higher priority, or have various "hard" access 1195 * time requirements, for example. 1196 * 1197 * On detection of any fault during the transfer, processing of 1198 * the entire message is aborted, and the device is deselected. 1199 * Until returning from the associated message completion callback, 1200 * no other spi_message queued to that device will be processed. 1201 * (This rule applies equally to all the synchronous transfer calls, 1202 * which are wrappers around this core asynchronous primitive.) 1203 */ 1204 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 1205 { 1206 struct spi_master *master = spi->master; 1207 int ret; 1208 unsigned long flags; 1209 1210 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1211 1212 ret = __spi_async(spi, message); 1213 1214 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1215 1216 return ret; 1217 1218 } 1219 EXPORT_SYMBOL_GPL(spi_async_locked); 1220 1221 1222 /*-------------------------------------------------------------------------*/ 1223 1224 /* Utility methods for SPI master protocol drivers, layered on 1225 * top of the core. Some other utility methods are defined as 1226 * inline functions. 1227 */ 1228 1229 static void spi_complete(void *arg) 1230 { 1231 complete(arg); 1232 } 1233 1234 static int __spi_sync(struct spi_device *spi, struct spi_message *message, 1235 int bus_locked) 1236 { 1237 DECLARE_COMPLETION_ONSTACK(done); 1238 int status; 1239 struct spi_master *master = spi->master; 1240 1241 message->complete = spi_complete; 1242 message->context = &done; 1243 1244 if (!bus_locked) 1245 mutex_lock(&master->bus_lock_mutex); 1246 1247 status = spi_async_locked(spi, message); 1248 1249 if (!bus_locked) 1250 mutex_unlock(&master->bus_lock_mutex); 1251 1252 if (status == 0) { 1253 wait_for_completion(&done); 1254 status = message->status; 1255 } 1256 message->context = NULL; 1257 return status; 1258 } 1259 1260 /** 1261 * spi_sync - blocking/synchronous SPI data transfers 1262 * @spi: device with which data will be exchanged 1263 * @message: describes the data transfers 1264 * Context: can sleep 1265 * 1266 * This call may only be used from a context that may sleep. The sleep 1267 * is non-interruptible, and has no timeout. Low-overhead controller 1268 * drivers may DMA directly into and out of the message buffers. 1269 * 1270 * Note that the SPI device's chip select is active during the message, 1271 * and then is normally disabled between messages. Drivers for some 1272 * frequently-used devices may want to minimize costs of selecting a chip, 1273 * by leaving it selected in anticipation that the next message will go 1274 * to the same chip. (That may increase power usage.) 1275 * 1276 * Also, the caller is guaranteeing that the memory associated with the 1277 * message will not be freed before this call returns. 1278 * 1279 * It returns zero on success, else a negative error code. 1280 */ 1281 int spi_sync(struct spi_device *spi, struct spi_message *message) 1282 { 1283 return __spi_sync(spi, message, 0); 1284 } 1285 EXPORT_SYMBOL_GPL(spi_sync); 1286 1287 /** 1288 * spi_sync_locked - version of spi_sync with exclusive bus usage 1289 * @spi: device with which data will be exchanged 1290 * @message: describes the data transfers 1291 * Context: can sleep 1292 * 1293 * This call may only be used from a context that may sleep. The sleep 1294 * is non-interruptible, and has no timeout. Low-overhead controller 1295 * drivers may DMA directly into and out of the message buffers. 1296 * 1297 * This call should be used by drivers that require exclusive access to the 1298 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 1299 * be released by a spi_bus_unlock call when the exclusive access is over. 1300 * 1301 * It returns zero on success, else a negative error code. 1302 */ 1303 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 1304 { 1305 return __spi_sync(spi, message, 1); 1306 } 1307 EXPORT_SYMBOL_GPL(spi_sync_locked); 1308 1309 /** 1310 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 1311 * @master: SPI bus master that should be locked for exclusive bus access 1312 * Context: can sleep 1313 * 1314 * This call may only be used from a context that may sleep. The sleep 1315 * is non-interruptible, and has no timeout. 1316 * 1317 * This call should be used by drivers that require exclusive access to the 1318 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 1319 * exclusive access is over. Data transfer must be done by spi_sync_locked 1320 * and spi_async_locked calls when the SPI bus lock is held. 1321 * 1322 * It returns zero on success, else a negative error code. 1323 */ 1324 int spi_bus_lock(struct spi_master *master) 1325 { 1326 unsigned long flags; 1327 1328 mutex_lock(&master->bus_lock_mutex); 1329 1330 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 1331 master->bus_lock_flag = 1; 1332 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 1333 1334 /* mutex remains locked until spi_bus_unlock is called */ 1335 1336 return 0; 1337 } 1338 EXPORT_SYMBOL_GPL(spi_bus_lock); 1339 1340 /** 1341 * spi_bus_unlock - release the lock for exclusive SPI bus usage 1342 * @master: SPI bus master that was locked for exclusive bus access 1343 * Context: can sleep 1344 * 1345 * This call may only be used from a context that may sleep. The sleep 1346 * is non-interruptible, and has no timeout. 1347 * 1348 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 1349 * call. 1350 * 1351 * It returns zero on success, else a negative error code. 1352 */ 1353 int spi_bus_unlock(struct spi_master *master) 1354 { 1355 master->bus_lock_flag = 0; 1356 1357 mutex_unlock(&master->bus_lock_mutex); 1358 1359 return 0; 1360 } 1361 EXPORT_SYMBOL_GPL(spi_bus_unlock); 1362 1363 /* portable code must never pass more than 32 bytes */ 1364 #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) 1365 1366 static u8 *buf; 1367 1368 /** 1369 * spi_write_then_read - SPI synchronous write followed by read 1370 * @spi: device with which data will be exchanged 1371 * @txbuf: data to be written (need not be dma-safe) 1372 * @n_tx: size of txbuf, in bytes 1373 * @rxbuf: buffer into which data will be read (need not be dma-safe) 1374 * @n_rx: size of rxbuf, in bytes 1375 * Context: can sleep 1376 * 1377 * This performs a half duplex MicroWire style transaction with the 1378 * device, sending txbuf and then reading rxbuf. The return value 1379 * is zero for success, else a negative errno status code. 1380 * This call may only be used from a context that may sleep. 1381 * 1382 * Parameters to this routine are always copied using a small buffer; 1383 * portable code should never use this for more than 32 bytes. 1384 * Performance-sensitive or bulk transfer code should instead use 1385 * spi_{async,sync}() calls with dma-safe buffers. 1386 */ 1387 int spi_write_then_read(struct spi_device *spi, 1388 const void *txbuf, unsigned n_tx, 1389 void *rxbuf, unsigned n_rx) 1390 { 1391 static DEFINE_MUTEX(lock); 1392 1393 int status; 1394 struct spi_message message; 1395 struct spi_transfer x[2]; 1396 u8 *local_buf; 1397 1398 /* Use preallocated DMA-safe buffer. We can't avoid copying here, 1399 * (as a pure convenience thing), but we can keep heap costs 1400 * out of the hot path ... 1401 */ 1402 if ((n_tx + n_rx) > SPI_BUFSIZ) 1403 return -EINVAL; 1404 1405 spi_message_init(&message); 1406 memset(x, 0, sizeof x); 1407 if (n_tx) { 1408 x[0].len = n_tx; 1409 spi_message_add_tail(&x[0], &message); 1410 } 1411 if (n_rx) { 1412 x[1].len = n_rx; 1413 spi_message_add_tail(&x[1], &message); 1414 } 1415 1416 /* ... unless someone else is using the pre-allocated buffer */ 1417 if (!mutex_trylock(&lock)) { 1418 local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 1419 if (!local_buf) 1420 return -ENOMEM; 1421 } else 1422 local_buf = buf; 1423 1424 memcpy(local_buf, txbuf, n_tx); 1425 x[0].tx_buf = local_buf; 1426 x[1].rx_buf = local_buf + n_tx; 1427 1428 /* do the i/o */ 1429 status = spi_sync(spi, &message); 1430 if (status == 0) 1431 memcpy(rxbuf, x[1].rx_buf, n_rx); 1432 1433 if (x[0].tx_buf == buf) 1434 mutex_unlock(&lock); 1435 else 1436 kfree(local_buf); 1437 1438 return status; 1439 } 1440 EXPORT_SYMBOL_GPL(spi_write_then_read); 1441 1442 /*-------------------------------------------------------------------------*/ 1443 1444 static int __init spi_init(void) 1445 { 1446 int status; 1447 1448 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 1449 if (!buf) { 1450 status = -ENOMEM; 1451 goto err0; 1452 } 1453 1454 status = bus_register(&spi_bus_type); 1455 if (status < 0) 1456 goto err1; 1457 1458 status = class_register(&spi_master_class); 1459 if (status < 0) 1460 goto err2; 1461 return 0; 1462 1463 err2: 1464 bus_unregister(&spi_bus_type); 1465 err1: 1466 kfree(buf); 1467 buf = NULL; 1468 err0: 1469 return status; 1470 } 1471 1472 /* board_info is normally registered in arch_initcall(), 1473 * but even essential drivers wait till later 1474 * 1475 * REVISIT only boardinfo really needs static linking. the rest (device and 1476 * driver registration) _could_ be dynamically linked (modular) ... costs 1477 * include needing to have boardinfo data structures be much more public. 1478 */ 1479 postcore_initcall(spi_init); 1480 1481