1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/export.h> 35 #include <linux/sched/rt.h> 36 #include <linux/delay.h> 37 #include <linux/kthread.h> 38 #include <linux/ioport.h> 39 #include <linux/acpi.h> 40 #include <linux/highmem.h> 41 42 #define CREATE_TRACE_POINTS 43 #include <trace/events/spi.h> 44 45 static void spidev_release(struct device *dev) 46 { 47 struct spi_device *spi = to_spi_device(dev); 48 49 /* spi masters may cleanup for released devices */ 50 if (spi->master->cleanup) 51 spi->master->cleanup(spi); 52 53 spi_master_put(spi->master); 54 kfree(spi); 55 } 56 57 static ssize_t 58 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 59 { 60 const struct spi_device *spi = to_spi_device(dev); 61 int len; 62 63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 64 if (len != -ENODEV) 65 return len; 66 67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 68 } 69 static DEVICE_ATTR_RO(modalias); 70 71 #define SPI_STATISTICS_ATTRS(field, file) \ 72 static ssize_t spi_master_##field##_show(struct device *dev, \ 73 struct device_attribute *attr, \ 74 char *buf) \ 75 { \ 76 struct spi_master *master = container_of(dev, \ 77 struct spi_master, dev); \ 78 return spi_statistics_##field##_show(&master->statistics, buf); \ 79 } \ 80 static struct device_attribute dev_attr_spi_master_##field = { \ 81 .attr = { .name = file, .mode = S_IRUGO }, \ 82 .show = spi_master_##field##_show, \ 83 }; \ 84 static ssize_t spi_device_##field##_show(struct device *dev, \ 85 struct device_attribute *attr, \ 86 char *buf) \ 87 { \ 88 struct spi_device *spi = to_spi_device(dev); \ 89 return spi_statistics_##field##_show(&spi->statistics, buf); \ 90 } \ 91 static struct device_attribute dev_attr_spi_device_##field = { \ 92 .attr = { .name = file, .mode = S_IRUGO }, \ 93 .show = spi_device_##field##_show, \ 94 } 95 96 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 97 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 98 char *buf) \ 99 { \ 100 unsigned long flags; \ 101 ssize_t len; \ 102 spin_lock_irqsave(&stat->lock, flags); \ 103 len = sprintf(buf, format_string, stat->field); \ 104 spin_unlock_irqrestore(&stat->lock, flags); \ 105 return len; \ 106 } \ 107 SPI_STATISTICS_ATTRS(name, file) 108 109 #define SPI_STATISTICS_SHOW(field, format_string) \ 110 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 111 field, format_string) 112 113 SPI_STATISTICS_SHOW(messages, "%lu"); 114 SPI_STATISTICS_SHOW(transfers, "%lu"); 115 SPI_STATISTICS_SHOW(errors, "%lu"); 116 SPI_STATISTICS_SHOW(timedout, "%lu"); 117 118 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 119 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 120 SPI_STATISTICS_SHOW(spi_async, "%lu"); 121 122 SPI_STATISTICS_SHOW(bytes, "%llu"); 123 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 124 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 125 126 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 127 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 128 "transfer_bytes_histo_" number, \ 129 transfer_bytes_histo[index], "%lu") 130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 147 148 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 149 150 static struct attribute *spi_dev_attrs[] = { 151 &dev_attr_modalias.attr, 152 NULL, 153 }; 154 155 static const struct attribute_group spi_dev_group = { 156 .attrs = spi_dev_attrs, 157 }; 158 159 static struct attribute *spi_device_statistics_attrs[] = { 160 &dev_attr_spi_device_messages.attr, 161 &dev_attr_spi_device_transfers.attr, 162 &dev_attr_spi_device_errors.attr, 163 &dev_attr_spi_device_timedout.attr, 164 &dev_attr_spi_device_spi_sync.attr, 165 &dev_attr_spi_device_spi_sync_immediate.attr, 166 &dev_attr_spi_device_spi_async.attr, 167 &dev_attr_spi_device_bytes.attr, 168 &dev_attr_spi_device_bytes_rx.attr, 169 &dev_attr_spi_device_bytes_tx.attr, 170 &dev_attr_spi_device_transfer_bytes_histo0.attr, 171 &dev_attr_spi_device_transfer_bytes_histo1.attr, 172 &dev_attr_spi_device_transfer_bytes_histo2.attr, 173 &dev_attr_spi_device_transfer_bytes_histo3.attr, 174 &dev_attr_spi_device_transfer_bytes_histo4.attr, 175 &dev_attr_spi_device_transfer_bytes_histo5.attr, 176 &dev_attr_spi_device_transfer_bytes_histo6.attr, 177 &dev_attr_spi_device_transfer_bytes_histo7.attr, 178 &dev_attr_spi_device_transfer_bytes_histo8.attr, 179 &dev_attr_spi_device_transfer_bytes_histo9.attr, 180 &dev_attr_spi_device_transfer_bytes_histo10.attr, 181 &dev_attr_spi_device_transfer_bytes_histo11.attr, 182 &dev_attr_spi_device_transfer_bytes_histo12.attr, 183 &dev_attr_spi_device_transfer_bytes_histo13.attr, 184 &dev_attr_spi_device_transfer_bytes_histo14.attr, 185 &dev_attr_spi_device_transfer_bytes_histo15.attr, 186 &dev_attr_spi_device_transfer_bytes_histo16.attr, 187 &dev_attr_spi_device_transfers_split_maxsize.attr, 188 NULL, 189 }; 190 191 static const struct attribute_group spi_device_statistics_group = { 192 .name = "statistics", 193 .attrs = spi_device_statistics_attrs, 194 }; 195 196 static const struct attribute_group *spi_dev_groups[] = { 197 &spi_dev_group, 198 &spi_device_statistics_group, 199 NULL, 200 }; 201 202 static struct attribute *spi_master_statistics_attrs[] = { 203 &dev_attr_spi_master_messages.attr, 204 &dev_attr_spi_master_transfers.attr, 205 &dev_attr_spi_master_errors.attr, 206 &dev_attr_spi_master_timedout.attr, 207 &dev_attr_spi_master_spi_sync.attr, 208 &dev_attr_spi_master_spi_sync_immediate.attr, 209 &dev_attr_spi_master_spi_async.attr, 210 &dev_attr_spi_master_bytes.attr, 211 &dev_attr_spi_master_bytes_rx.attr, 212 &dev_attr_spi_master_bytes_tx.attr, 213 &dev_attr_spi_master_transfer_bytes_histo0.attr, 214 &dev_attr_spi_master_transfer_bytes_histo1.attr, 215 &dev_attr_spi_master_transfer_bytes_histo2.attr, 216 &dev_attr_spi_master_transfer_bytes_histo3.attr, 217 &dev_attr_spi_master_transfer_bytes_histo4.attr, 218 &dev_attr_spi_master_transfer_bytes_histo5.attr, 219 &dev_attr_spi_master_transfer_bytes_histo6.attr, 220 &dev_attr_spi_master_transfer_bytes_histo7.attr, 221 &dev_attr_spi_master_transfer_bytes_histo8.attr, 222 &dev_attr_spi_master_transfer_bytes_histo9.attr, 223 &dev_attr_spi_master_transfer_bytes_histo10.attr, 224 &dev_attr_spi_master_transfer_bytes_histo11.attr, 225 &dev_attr_spi_master_transfer_bytes_histo12.attr, 226 &dev_attr_spi_master_transfer_bytes_histo13.attr, 227 &dev_attr_spi_master_transfer_bytes_histo14.attr, 228 &dev_attr_spi_master_transfer_bytes_histo15.attr, 229 &dev_attr_spi_master_transfer_bytes_histo16.attr, 230 &dev_attr_spi_master_transfers_split_maxsize.attr, 231 NULL, 232 }; 233 234 static const struct attribute_group spi_master_statistics_group = { 235 .name = "statistics", 236 .attrs = spi_master_statistics_attrs, 237 }; 238 239 static const struct attribute_group *spi_master_groups[] = { 240 &spi_master_statistics_group, 241 NULL, 242 }; 243 244 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 245 struct spi_transfer *xfer, 246 struct spi_master *master) 247 { 248 unsigned long flags; 249 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 250 251 if (l2len < 0) 252 l2len = 0; 253 254 spin_lock_irqsave(&stats->lock, flags); 255 256 stats->transfers++; 257 stats->transfer_bytes_histo[l2len]++; 258 259 stats->bytes += xfer->len; 260 if ((xfer->tx_buf) && 261 (xfer->tx_buf != master->dummy_tx)) 262 stats->bytes_tx += xfer->len; 263 if ((xfer->rx_buf) && 264 (xfer->rx_buf != master->dummy_rx)) 265 stats->bytes_rx += xfer->len; 266 267 spin_unlock_irqrestore(&stats->lock, flags); 268 } 269 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 270 271 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 272 * and the sysfs version makes coldplug work too. 273 */ 274 275 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 276 const struct spi_device *sdev) 277 { 278 while (id->name[0]) { 279 if (!strcmp(sdev->modalias, id->name)) 280 return id; 281 id++; 282 } 283 return NULL; 284 } 285 286 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 287 { 288 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 289 290 return spi_match_id(sdrv->id_table, sdev); 291 } 292 EXPORT_SYMBOL_GPL(spi_get_device_id); 293 294 static int spi_match_device(struct device *dev, struct device_driver *drv) 295 { 296 const struct spi_device *spi = to_spi_device(dev); 297 const struct spi_driver *sdrv = to_spi_driver(drv); 298 299 /* Attempt an OF style match */ 300 if (of_driver_match_device(dev, drv)) 301 return 1; 302 303 /* Then try ACPI */ 304 if (acpi_driver_match_device(dev, drv)) 305 return 1; 306 307 if (sdrv->id_table) 308 return !!spi_match_id(sdrv->id_table, spi); 309 310 return strcmp(spi->modalias, drv->name) == 0; 311 } 312 313 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 314 { 315 const struct spi_device *spi = to_spi_device(dev); 316 int rc; 317 318 rc = acpi_device_uevent_modalias(dev, env); 319 if (rc != -ENODEV) 320 return rc; 321 322 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 323 return 0; 324 } 325 326 struct bus_type spi_bus_type = { 327 .name = "spi", 328 .dev_groups = spi_dev_groups, 329 .match = spi_match_device, 330 .uevent = spi_uevent, 331 }; 332 EXPORT_SYMBOL_GPL(spi_bus_type); 333 334 335 static int spi_drv_probe(struct device *dev) 336 { 337 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 338 struct spi_device *spi = to_spi_device(dev); 339 int ret; 340 341 ret = of_clk_set_defaults(dev->of_node, false); 342 if (ret) 343 return ret; 344 345 if (dev->of_node) { 346 spi->irq = of_irq_get(dev->of_node, 0); 347 if (spi->irq == -EPROBE_DEFER) 348 return -EPROBE_DEFER; 349 if (spi->irq < 0) 350 spi->irq = 0; 351 } 352 353 ret = dev_pm_domain_attach(dev, true); 354 if (ret != -EPROBE_DEFER) { 355 ret = sdrv->probe(spi); 356 if (ret) 357 dev_pm_domain_detach(dev, true); 358 } 359 360 return ret; 361 } 362 363 static int spi_drv_remove(struct device *dev) 364 { 365 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 366 int ret; 367 368 ret = sdrv->remove(to_spi_device(dev)); 369 dev_pm_domain_detach(dev, true); 370 371 return ret; 372 } 373 374 static void spi_drv_shutdown(struct device *dev) 375 { 376 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 377 378 sdrv->shutdown(to_spi_device(dev)); 379 } 380 381 /** 382 * __spi_register_driver - register a SPI driver 383 * @owner: owner module of the driver to register 384 * @sdrv: the driver to register 385 * Context: can sleep 386 * 387 * Return: zero on success, else a negative error code. 388 */ 389 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 390 { 391 sdrv->driver.owner = owner; 392 sdrv->driver.bus = &spi_bus_type; 393 if (sdrv->probe) 394 sdrv->driver.probe = spi_drv_probe; 395 if (sdrv->remove) 396 sdrv->driver.remove = spi_drv_remove; 397 if (sdrv->shutdown) 398 sdrv->driver.shutdown = spi_drv_shutdown; 399 return driver_register(&sdrv->driver); 400 } 401 EXPORT_SYMBOL_GPL(__spi_register_driver); 402 403 /*-------------------------------------------------------------------------*/ 404 405 /* SPI devices should normally not be created by SPI device drivers; that 406 * would make them board-specific. Similarly with SPI master drivers. 407 * Device registration normally goes into like arch/.../mach.../board-YYY.c 408 * with other readonly (flashable) information about mainboard devices. 409 */ 410 411 struct boardinfo { 412 struct list_head list; 413 struct spi_board_info board_info; 414 }; 415 416 static LIST_HEAD(board_list); 417 static LIST_HEAD(spi_master_list); 418 419 /* 420 * Used to protect add/del opertion for board_info list and 421 * spi_master list, and their matching process 422 */ 423 static DEFINE_MUTEX(board_lock); 424 425 /** 426 * spi_alloc_device - Allocate a new SPI device 427 * @master: Controller to which device is connected 428 * Context: can sleep 429 * 430 * Allows a driver to allocate and initialize a spi_device without 431 * registering it immediately. This allows a driver to directly 432 * fill the spi_device with device parameters before calling 433 * spi_add_device() on it. 434 * 435 * Caller is responsible to call spi_add_device() on the returned 436 * spi_device structure to add it to the SPI master. If the caller 437 * needs to discard the spi_device without adding it, then it should 438 * call spi_dev_put() on it. 439 * 440 * Return: a pointer to the new device, or NULL. 441 */ 442 struct spi_device *spi_alloc_device(struct spi_master *master) 443 { 444 struct spi_device *spi; 445 446 if (!spi_master_get(master)) 447 return NULL; 448 449 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 450 if (!spi) { 451 spi_master_put(master); 452 return NULL; 453 } 454 455 spi->master = master; 456 spi->dev.parent = &master->dev; 457 spi->dev.bus = &spi_bus_type; 458 spi->dev.release = spidev_release; 459 spi->cs_gpio = -ENOENT; 460 461 spin_lock_init(&spi->statistics.lock); 462 463 device_initialize(&spi->dev); 464 return spi; 465 } 466 EXPORT_SYMBOL_GPL(spi_alloc_device); 467 468 static void spi_dev_set_name(struct spi_device *spi) 469 { 470 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 471 472 if (adev) { 473 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 474 return; 475 } 476 477 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 478 spi->chip_select); 479 } 480 481 static int spi_dev_check(struct device *dev, void *data) 482 { 483 struct spi_device *spi = to_spi_device(dev); 484 struct spi_device *new_spi = data; 485 486 if (spi->master == new_spi->master && 487 spi->chip_select == new_spi->chip_select) 488 return -EBUSY; 489 return 0; 490 } 491 492 /** 493 * spi_add_device - Add spi_device allocated with spi_alloc_device 494 * @spi: spi_device to register 495 * 496 * Companion function to spi_alloc_device. Devices allocated with 497 * spi_alloc_device can be added onto the spi bus with this function. 498 * 499 * Return: 0 on success; negative errno on failure 500 */ 501 int spi_add_device(struct spi_device *spi) 502 { 503 static DEFINE_MUTEX(spi_add_lock); 504 struct spi_master *master = spi->master; 505 struct device *dev = master->dev.parent; 506 int status; 507 508 /* Chipselects are numbered 0..max; validate. */ 509 if (spi->chip_select >= master->num_chipselect) { 510 dev_err(dev, "cs%d >= max %d\n", 511 spi->chip_select, 512 master->num_chipselect); 513 return -EINVAL; 514 } 515 516 /* Set the bus ID string */ 517 spi_dev_set_name(spi); 518 519 /* We need to make sure there's no other device with this 520 * chipselect **BEFORE** we call setup(), else we'll trash 521 * its configuration. Lock against concurrent add() calls. 522 */ 523 mutex_lock(&spi_add_lock); 524 525 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 526 if (status) { 527 dev_err(dev, "chipselect %d already in use\n", 528 spi->chip_select); 529 goto done; 530 } 531 532 if (master->cs_gpios) 533 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 534 535 /* Drivers may modify this initial i/o setup, but will 536 * normally rely on the device being setup. Devices 537 * using SPI_CS_HIGH can't coexist well otherwise... 538 */ 539 status = spi_setup(spi); 540 if (status < 0) { 541 dev_err(dev, "can't setup %s, status %d\n", 542 dev_name(&spi->dev), status); 543 goto done; 544 } 545 546 /* Device may be bound to an active driver when this returns */ 547 status = device_add(&spi->dev); 548 if (status < 0) 549 dev_err(dev, "can't add %s, status %d\n", 550 dev_name(&spi->dev), status); 551 else 552 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 553 554 done: 555 mutex_unlock(&spi_add_lock); 556 return status; 557 } 558 EXPORT_SYMBOL_GPL(spi_add_device); 559 560 /** 561 * spi_new_device - instantiate one new SPI device 562 * @master: Controller to which device is connected 563 * @chip: Describes the SPI device 564 * Context: can sleep 565 * 566 * On typical mainboards, this is purely internal; and it's not needed 567 * after board init creates the hard-wired devices. Some development 568 * platforms may not be able to use spi_register_board_info though, and 569 * this is exported so that for example a USB or parport based adapter 570 * driver could add devices (which it would learn about out-of-band). 571 * 572 * Return: the new device, or NULL. 573 */ 574 struct spi_device *spi_new_device(struct spi_master *master, 575 struct spi_board_info *chip) 576 { 577 struct spi_device *proxy; 578 int status; 579 580 /* NOTE: caller did any chip->bus_num checks necessary. 581 * 582 * Also, unless we change the return value convention to use 583 * error-or-pointer (not NULL-or-pointer), troubleshootability 584 * suggests syslogged diagnostics are best here (ugh). 585 */ 586 587 proxy = spi_alloc_device(master); 588 if (!proxy) 589 return NULL; 590 591 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 592 593 proxy->chip_select = chip->chip_select; 594 proxy->max_speed_hz = chip->max_speed_hz; 595 proxy->mode = chip->mode; 596 proxy->irq = chip->irq; 597 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 598 proxy->dev.platform_data = (void *) chip->platform_data; 599 proxy->controller_data = chip->controller_data; 600 proxy->controller_state = NULL; 601 602 status = spi_add_device(proxy); 603 if (status < 0) { 604 spi_dev_put(proxy); 605 return NULL; 606 } 607 608 return proxy; 609 } 610 EXPORT_SYMBOL_GPL(spi_new_device); 611 612 /** 613 * spi_unregister_device - unregister a single SPI device 614 * @spi: spi_device to unregister 615 * 616 * Start making the passed SPI device vanish. Normally this would be handled 617 * by spi_unregister_master(). 618 */ 619 void spi_unregister_device(struct spi_device *spi) 620 { 621 if (!spi) 622 return; 623 624 if (spi->dev.of_node) { 625 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 626 of_node_put(spi->dev.of_node); 627 } 628 if (ACPI_COMPANION(&spi->dev)) 629 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 630 device_unregister(&spi->dev); 631 } 632 EXPORT_SYMBOL_GPL(spi_unregister_device); 633 634 static void spi_match_master_to_boardinfo(struct spi_master *master, 635 struct spi_board_info *bi) 636 { 637 struct spi_device *dev; 638 639 if (master->bus_num != bi->bus_num) 640 return; 641 642 dev = spi_new_device(master, bi); 643 if (!dev) 644 dev_err(master->dev.parent, "can't create new device for %s\n", 645 bi->modalias); 646 } 647 648 /** 649 * spi_register_board_info - register SPI devices for a given board 650 * @info: array of chip descriptors 651 * @n: how many descriptors are provided 652 * Context: can sleep 653 * 654 * Board-specific early init code calls this (probably during arch_initcall) 655 * with segments of the SPI device table. Any device nodes are created later, 656 * after the relevant parent SPI controller (bus_num) is defined. We keep 657 * this table of devices forever, so that reloading a controller driver will 658 * not make Linux forget about these hard-wired devices. 659 * 660 * Other code can also call this, e.g. a particular add-on board might provide 661 * SPI devices through its expansion connector, so code initializing that board 662 * would naturally declare its SPI devices. 663 * 664 * The board info passed can safely be __initdata ... but be careful of 665 * any embedded pointers (platform_data, etc), they're copied as-is. 666 * 667 * Return: zero on success, else a negative error code. 668 */ 669 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 670 { 671 struct boardinfo *bi; 672 int i; 673 674 if (!n) 675 return -EINVAL; 676 677 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 678 if (!bi) 679 return -ENOMEM; 680 681 for (i = 0; i < n; i++, bi++, info++) { 682 struct spi_master *master; 683 684 memcpy(&bi->board_info, info, sizeof(*info)); 685 mutex_lock(&board_lock); 686 list_add_tail(&bi->list, &board_list); 687 list_for_each_entry(master, &spi_master_list, list) 688 spi_match_master_to_boardinfo(master, &bi->board_info); 689 mutex_unlock(&board_lock); 690 } 691 692 return 0; 693 } 694 695 /*-------------------------------------------------------------------------*/ 696 697 static void spi_set_cs(struct spi_device *spi, bool enable) 698 { 699 if (spi->mode & SPI_CS_HIGH) 700 enable = !enable; 701 702 if (gpio_is_valid(spi->cs_gpio)) { 703 gpio_set_value(spi->cs_gpio, !enable); 704 /* Some SPI masters need both GPIO CS & slave_select */ 705 if ((spi->master->flags & SPI_MASTER_GPIO_SS) && 706 spi->master->set_cs) 707 spi->master->set_cs(spi, !enable); 708 } else if (spi->master->set_cs) { 709 spi->master->set_cs(spi, !enable); 710 } 711 } 712 713 #ifdef CONFIG_HAS_DMA 714 static int spi_map_buf(struct spi_master *master, struct device *dev, 715 struct sg_table *sgt, void *buf, size_t len, 716 enum dma_data_direction dir) 717 { 718 const bool vmalloced_buf = is_vmalloc_addr(buf); 719 unsigned int max_seg_size = dma_get_max_seg_size(dev); 720 #ifdef CONFIG_HIGHMEM 721 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 722 (unsigned long)buf < (PKMAP_BASE + 723 (LAST_PKMAP * PAGE_SIZE))); 724 #else 725 const bool kmap_buf = false; 726 #endif 727 int desc_len; 728 int sgs; 729 struct page *vm_page; 730 struct scatterlist *sg; 731 void *sg_buf; 732 size_t min; 733 int i, ret; 734 735 if (vmalloced_buf || kmap_buf) { 736 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 737 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 738 } else if (virt_addr_valid(buf)) { 739 desc_len = min_t(int, max_seg_size, master->max_dma_len); 740 sgs = DIV_ROUND_UP(len, desc_len); 741 } else { 742 return -EINVAL; 743 } 744 745 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 746 if (ret != 0) 747 return ret; 748 749 sg = &sgt->sgl[0]; 750 for (i = 0; i < sgs; i++) { 751 752 if (vmalloced_buf || kmap_buf) { 753 min = min_t(size_t, 754 len, desc_len - offset_in_page(buf)); 755 if (vmalloced_buf) 756 vm_page = vmalloc_to_page(buf); 757 else 758 vm_page = kmap_to_page(buf); 759 if (!vm_page) { 760 sg_free_table(sgt); 761 return -ENOMEM; 762 } 763 sg_set_page(sg, vm_page, 764 min, offset_in_page(buf)); 765 } else { 766 min = min_t(size_t, len, desc_len); 767 sg_buf = buf; 768 sg_set_buf(sg, sg_buf, min); 769 } 770 771 buf += min; 772 len -= min; 773 sg = sg_next(sg); 774 } 775 776 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 777 if (!ret) 778 ret = -ENOMEM; 779 if (ret < 0) { 780 sg_free_table(sgt); 781 return ret; 782 } 783 784 sgt->nents = ret; 785 786 return 0; 787 } 788 789 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 790 struct sg_table *sgt, enum dma_data_direction dir) 791 { 792 if (sgt->orig_nents) { 793 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 794 sg_free_table(sgt); 795 } 796 } 797 798 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 799 { 800 struct device *tx_dev, *rx_dev; 801 struct spi_transfer *xfer; 802 int ret; 803 804 if (!master->can_dma) 805 return 0; 806 807 if (master->dma_tx) 808 tx_dev = master->dma_tx->device->dev; 809 else 810 tx_dev = master->dev.parent; 811 812 if (master->dma_rx) 813 rx_dev = master->dma_rx->device->dev; 814 else 815 rx_dev = master->dev.parent; 816 817 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 818 if (!master->can_dma(master, msg->spi, xfer)) 819 continue; 820 821 if (xfer->tx_buf != NULL) { 822 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 823 (void *)xfer->tx_buf, xfer->len, 824 DMA_TO_DEVICE); 825 if (ret != 0) 826 return ret; 827 } 828 829 if (xfer->rx_buf != NULL) { 830 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 831 xfer->rx_buf, xfer->len, 832 DMA_FROM_DEVICE); 833 if (ret != 0) { 834 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 835 DMA_TO_DEVICE); 836 return ret; 837 } 838 } 839 } 840 841 master->cur_msg_mapped = true; 842 843 return 0; 844 } 845 846 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 847 { 848 struct spi_transfer *xfer; 849 struct device *tx_dev, *rx_dev; 850 851 if (!master->cur_msg_mapped || !master->can_dma) 852 return 0; 853 854 if (master->dma_tx) 855 tx_dev = master->dma_tx->device->dev; 856 else 857 tx_dev = master->dev.parent; 858 859 if (master->dma_rx) 860 rx_dev = master->dma_rx->device->dev; 861 else 862 rx_dev = master->dev.parent; 863 864 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 865 if (!master->can_dma(master, msg->spi, xfer)) 866 continue; 867 868 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 869 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 870 } 871 872 return 0; 873 } 874 #else /* !CONFIG_HAS_DMA */ 875 static inline int spi_map_buf(struct spi_master *master, 876 struct device *dev, struct sg_table *sgt, 877 void *buf, size_t len, 878 enum dma_data_direction dir) 879 { 880 return -EINVAL; 881 } 882 883 static inline void spi_unmap_buf(struct spi_master *master, 884 struct device *dev, struct sg_table *sgt, 885 enum dma_data_direction dir) 886 { 887 } 888 889 static inline int __spi_map_msg(struct spi_master *master, 890 struct spi_message *msg) 891 { 892 return 0; 893 } 894 895 static inline int __spi_unmap_msg(struct spi_master *master, 896 struct spi_message *msg) 897 { 898 return 0; 899 } 900 #endif /* !CONFIG_HAS_DMA */ 901 902 static inline int spi_unmap_msg(struct spi_master *master, 903 struct spi_message *msg) 904 { 905 struct spi_transfer *xfer; 906 907 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 908 /* 909 * Restore the original value of tx_buf or rx_buf if they are 910 * NULL. 911 */ 912 if (xfer->tx_buf == master->dummy_tx) 913 xfer->tx_buf = NULL; 914 if (xfer->rx_buf == master->dummy_rx) 915 xfer->rx_buf = NULL; 916 } 917 918 return __spi_unmap_msg(master, msg); 919 } 920 921 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 922 { 923 struct spi_transfer *xfer; 924 void *tmp; 925 unsigned int max_tx, max_rx; 926 927 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 928 max_tx = 0; 929 max_rx = 0; 930 931 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 932 if ((master->flags & SPI_MASTER_MUST_TX) && 933 !xfer->tx_buf) 934 max_tx = max(xfer->len, max_tx); 935 if ((master->flags & SPI_MASTER_MUST_RX) && 936 !xfer->rx_buf) 937 max_rx = max(xfer->len, max_rx); 938 } 939 940 if (max_tx) { 941 tmp = krealloc(master->dummy_tx, max_tx, 942 GFP_KERNEL | GFP_DMA); 943 if (!tmp) 944 return -ENOMEM; 945 master->dummy_tx = tmp; 946 memset(tmp, 0, max_tx); 947 } 948 949 if (max_rx) { 950 tmp = krealloc(master->dummy_rx, max_rx, 951 GFP_KERNEL | GFP_DMA); 952 if (!tmp) 953 return -ENOMEM; 954 master->dummy_rx = tmp; 955 } 956 957 if (max_tx || max_rx) { 958 list_for_each_entry(xfer, &msg->transfers, 959 transfer_list) { 960 if (!xfer->tx_buf) 961 xfer->tx_buf = master->dummy_tx; 962 if (!xfer->rx_buf) 963 xfer->rx_buf = master->dummy_rx; 964 } 965 } 966 } 967 968 return __spi_map_msg(master, msg); 969 } 970 971 /* 972 * spi_transfer_one_message - Default implementation of transfer_one_message() 973 * 974 * This is a standard implementation of transfer_one_message() for 975 * drivers which implement a transfer_one() operation. It provides 976 * standard handling of delays and chip select management. 977 */ 978 static int spi_transfer_one_message(struct spi_master *master, 979 struct spi_message *msg) 980 { 981 struct spi_transfer *xfer; 982 bool keep_cs = false; 983 int ret = 0; 984 unsigned long long ms = 1; 985 struct spi_statistics *statm = &master->statistics; 986 struct spi_statistics *stats = &msg->spi->statistics; 987 988 spi_set_cs(msg->spi, true); 989 990 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 991 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 992 993 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 994 trace_spi_transfer_start(msg, xfer); 995 996 spi_statistics_add_transfer_stats(statm, xfer, master); 997 spi_statistics_add_transfer_stats(stats, xfer, master); 998 999 if (xfer->tx_buf || xfer->rx_buf) { 1000 reinit_completion(&master->xfer_completion); 1001 1002 ret = master->transfer_one(master, msg->spi, xfer); 1003 if (ret < 0) { 1004 SPI_STATISTICS_INCREMENT_FIELD(statm, 1005 errors); 1006 SPI_STATISTICS_INCREMENT_FIELD(stats, 1007 errors); 1008 dev_err(&msg->spi->dev, 1009 "SPI transfer failed: %d\n", ret); 1010 goto out; 1011 } 1012 1013 if (ret > 0) { 1014 ret = 0; 1015 ms = 8LL * 1000LL * xfer->len; 1016 do_div(ms, xfer->speed_hz); 1017 ms += ms + 100; /* some tolerance */ 1018 1019 if (ms > UINT_MAX) 1020 ms = UINT_MAX; 1021 1022 ms = wait_for_completion_timeout(&master->xfer_completion, 1023 msecs_to_jiffies(ms)); 1024 } 1025 1026 if (ms == 0) { 1027 SPI_STATISTICS_INCREMENT_FIELD(statm, 1028 timedout); 1029 SPI_STATISTICS_INCREMENT_FIELD(stats, 1030 timedout); 1031 dev_err(&msg->spi->dev, 1032 "SPI transfer timed out\n"); 1033 msg->status = -ETIMEDOUT; 1034 } 1035 } else { 1036 if (xfer->len) 1037 dev_err(&msg->spi->dev, 1038 "Bufferless transfer has length %u\n", 1039 xfer->len); 1040 } 1041 1042 trace_spi_transfer_stop(msg, xfer); 1043 1044 if (msg->status != -EINPROGRESS) 1045 goto out; 1046 1047 if (xfer->delay_usecs) { 1048 u16 us = xfer->delay_usecs; 1049 1050 if (us <= 10) 1051 udelay(us); 1052 else 1053 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1054 } 1055 1056 if (xfer->cs_change) { 1057 if (list_is_last(&xfer->transfer_list, 1058 &msg->transfers)) { 1059 keep_cs = true; 1060 } else { 1061 spi_set_cs(msg->spi, false); 1062 udelay(10); 1063 spi_set_cs(msg->spi, true); 1064 } 1065 } 1066 1067 msg->actual_length += xfer->len; 1068 } 1069 1070 out: 1071 if (ret != 0 || !keep_cs) 1072 spi_set_cs(msg->spi, false); 1073 1074 if (msg->status == -EINPROGRESS) 1075 msg->status = ret; 1076 1077 if (msg->status && master->handle_err) 1078 master->handle_err(master, msg); 1079 1080 spi_res_release(master, msg); 1081 1082 spi_finalize_current_message(master); 1083 1084 return ret; 1085 } 1086 1087 /** 1088 * spi_finalize_current_transfer - report completion of a transfer 1089 * @master: the master reporting completion 1090 * 1091 * Called by SPI drivers using the core transfer_one_message() 1092 * implementation to notify it that the current interrupt driven 1093 * transfer has finished and the next one may be scheduled. 1094 */ 1095 void spi_finalize_current_transfer(struct spi_master *master) 1096 { 1097 complete(&master->xfer_completion); 1098 } 1099 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1100 1101 /** 1102 * __spi_pump_messages - function which processes spi message queue 1103 * @master: master to process queue for 1104 * @in_kthread: true if we are in the context of the message pump thread 1105 * 1106 * This function checks if there is any spi message in the queue that 1107 * needs processing and if so call out to the driver to initialize hardware 1108 * and transfer each message. 1109 * 1110 * Note that it is called both from the kthread itself and also from 1111 * inside spi_sync(); the queue extraction handling at the top of the 1112 * function should deal with this safely. 1113 */ 1114 static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1115 { 1116 unsigned long flags; 1117 bool was_busy = false; 1118 int ret; 1119 1120 /* Lock queue */ 1121 spin_lock_irqsave(&master->queue_lock, flags); 1122 1123 /* Make sure we are not already running a message */ 1124 if (master->cur_msg) { 1125 spin_unlock_irqrestore(&master->queue_lock, flags); 1126 return; 1127 } 1128 1129 /* If another context is idling the device then defer */ 1130 if (master->idling) { 1131 kthread_queue_work(&master->kworker, &master->pump_messages); 1132 spin_unlock_irqrestore(&master->queue_lock, flags); 1133 return; 1134 } 1135 1136 /* Check if the queue is idle */ 1137 if (list_empty(&master->queue) || !master->running) { 1138 if (!master->busy) { 1139 spin_unlock_irqrestore(&master->queue_lock, flags); 1140 return; 1141 } 1142 1143 /* Only do teardown in the thread */ 1144 if (!in_kthread) { 1145 kthread_queue_work(&master->kworker, 1146 &master->pump_messages); 1147 spin_unlock_irqrestore(&master->queue_lock, flags); 1148 return; 1149 } 1150 1151 master->busy = false; 1152 master->idling = true; 1153 spin_unlock_irqrestore(&master->queue_lock, flags); 1154 1155 kfree(master->dummy_rx); 1156 master->dummy_rx = NULL; 1157 kfree(master->dummy_tx); 1158 master->dummy_tx = NULL; 1159 if (master->unprepare_transfer_hardware && 1160 master->unprepare_transfer_hardware(master)) 1161 dev_err(&master->dev, 1162 "failed to unprepare transfer hardware\n"); 1163 if (master->auto_runtime_pm) { 1164 pm_runtime_mark_last_busy(master->dev.parent); 1165 pm_runtime_put_autosuspend(master->dev.parent); 1166 } 1167 trace_spi_master_idle(master); 1168 1169 spin_lock_irqsave(&master->queue_lock, flags); 1170 master->idling = false; 1171 spin_unlock_irqrestore(&master->queue_lock, flags); 1172 return; 1173 } 1174 1175 /* Extract head of queue */ 1176 master->cur_msg = 1177 list_first_entry(&master->queue, struct spi_message, queue); 1178 1179 list_del_init(&master->cur_msg->queue); 1180 if (master->busy) 1181 was_busy = true; 1182 else 1183 master->busy = true; 1184 spin_unlock_irqrestore(&master->queue_lock, flags); 1185 1186 mutex_lock(&master->io_mutex); 1187 1188 if (!was_busy && master->auto_runtime_pm) { 1189 ret = pm_runtime_get_sync(master->dev.parent); 1190 if (ret < 0) { 1191 dev_err(&master->dev, "Failed to power device: %d\n", 1192 ret); 1193 mutex_unlock(&master->io_mutex); 1194 return; 1195 } 1196 } 1197 1198 if (!was_busy) 1199 trace_spi_master_busy(master); 1200 1201 if (!was_busy && master->prepare_transfer_hardware) { 1202 ret = master->prepare_transfer_hardware(master); 1203 if (ret) { 1204 dev_err(&master->dev, 1205 "failed to prepare transfer hardware\n"); 1206 1207 if (master->auto_runtime_pm) 1208 pm_runtime_put(master->dev.parent); 1209 mutex_unlock(&master->io_mutex); 1210 return; 1211 } 1212 } 1213 1214 trace_spi_message_start(master->cur_msg); 1215 1216 if (master->prepare_message) { 1217 ret = master->prepare_message(master, master->cur_msg); 1218 if (ret) { 1219 dev_err(&master->dev, 1220 "failed to prepare message: %d\n", ret); 1221 master->cur_msg->status = ret; 1222 spi_finalize_current_message(master); 1223 goto out; 1224 } 1225 master->cur_msg_prepared = true; 1226 } 1227 1228 ret = spi_map_msg(master, master->cur_msg); 1229 if (ret) { 1230 master->cur_msg->status = ret; 1231 spi_finalize_current_message(master); 1232 goto out; 1233 } 1234 1235 ret = master->transfer_one_message(master, master->cur_msg); 1236 if (ret) { 1237 dev_err(&master->dev, 1238 "failed to transfer one message from queue\n"); 1239 goto out; 1240 } 1241 1242 out: 1243 mutex_unlock(&master->io_mutex); 1244 1245 /* Prod the scheduler in case transfer_one() was busy waiting */ 1246 if (!ret) 1247 cond_resched(); 1248 } 1249 1250 /** 1251 * spi_pump_messages - kthread work function which processes spi message queue 1252 * @work: pointer to kthread work struct contained in the master struct 1253 */ 1254 static void spi_pump_messages(struct kthread_work *work) 1255 { 1256 struct spi_master *master = 1257 container_of(work, struct spi_master, pump_messages); 1258 1259 __spi_pump_messages(master, true); 1260 } 1261 1262 static int spi_init_queue(struct spi_master *master) 1263 { 1264 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1265 1266 master->running = false; 1267 master->busy = false; 1268 1269 kthread_init_worker(&master->kworker); 1270 master->kworker_task = kthread_run(kthread_worker_fn, 1271 &master->kworker, "%s", 1272 dev_name(&master->dev)); 1273 if (IS_ERR(master->kworker_task)) { 1274 dev_err(&master->dev, "failed to create message pump task\n"); 1275 return PTR_ERR(master->kworker_task); 1276 } 1277 kthread_init_work(&master->pump_messages, spi_pump_messages); 1278 1279 /* 1280 * Master config will indicate if this controller should run the 1281 * message pump with high (realtime) priority to reduce the transfer 1282 * latency on the bus by minimising the delay between a transfer 1283 * request and the scheduling of the message pump thread. Without this 1284 * setting the message pump thread will remain at default priority. 1285 */ 1286 if (master->rt) { 1287 dev_info(&master->dev, 1288 "will run message pump with realtime priority\n"); 1289 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1290 } 1291 1292 return 0; 1293 } 1294 1295 /** 1296 * spi_get_next_queued_message() - called by driver to check for queued 1297 * messages 1298 * @master: the master to check for queued messages 1299 * 1300 * If there are more messages in the queue, the next message is returned from 1301 * this call. 1302 * 1303 * Return: the next message in the queue, else NULL if the queue is empty. 1304 */ 1305 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1306 { 1307 struct spi_message *next; 1308 unsigned long flags; 1309 1310 /* get a pointer to the next message, if any */ 1311 spin_lock_irqsave(&master->queue_lock, flags); 1312 next = list_first_entry_or_null(&master->queue, struct spi_message, 1313 queue); 1314 spin_unlock_irqrestore(&master->queue_lock, flags); 1315 1316 return next; 1317 } 1318 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1319 1320 /** 1321 * spi_finalize_current_message() - the current message is complete 1322 * @master: the master to return the message to 1323 * 1324 * Called by the driver to notify the core that the message in the front of the 1325 * queue is complete and can be removed from the queue. 1326 */ 1327 void spi_finalize_current_message(struct spi_master *master) 1328 { 1329 struct spi_message *mesg; 1330 unsigned long flags; 1331 int ret; 1332 1333 spin_lock_irqsave(&master->queue_lock, flags); 1334 mesg = master->cur_msg; 1335 spin_unlock_irqrestore(&master->queue_lock, flags); 1336 1337 spi_unmap_msg(master, mesg); 1338 1339 if (master->cur_msg_prepared && master->unprepare_message) { 1340 ret = master->unprepare_message(master, mesg); 1341 if (ret) { 1342 dev_err(&master->dev, 1343 "failed to unprepare message: %d\n", ret); 1344 } 1345 } 1346 1347 spin_lock_irqsave(&master->queue_lock, flags); 1348 master->cur_msg = NULL; 1349 master->cur_msg_prepared = false; 1350 kthread_queue_work(&master->kworker, &master->pump_messages); 1351 spin_unlock_irqrestore(&master->queue_lock, flags); 1352 1353 trace_spi_message_done(mesg); 1354 1355 mesg->state = NULL; 1356 if (mesg->complete) 1357 mesg->complete(mesg->context); 1358 } 1359 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1360 1361 static int spi_start_queue(struct spi_master *master) 1362 { 1363 unsigned long flags; 1364 1365 spin_lock_irqsave(&master->queue_lock, flags); 1366 1367 if (master->running || master->busy) { 1368 spin_unlock_irqrestore(&master->queue_lock, flags); 1369 return -EBUSY; 1370 } 1371 1372 master->running = true; 1373 master->cur_msg = NULL; 1374 spin_unlock_irqrestore(&master->queue_lock, flags); 1375 1376 kthread_queue_work(&master->kworker, &master->pump_messages); 1377 1378 return 0; 1379 } 1380 1381 static int spi_stop_queue(struct spi_master *master) 1382 { 1383 unsigned long flags; 1384 unsigned limit = 500; 1385 int ret = 0; 1386 1387 spin_lock_irqsave(&master->queue_lock, flags); 1388 1389 /* 1390 * This is a bit lame, but is optimized for the common execution path. 1391 * A wait_queue on the master->busy could be used, but then the common 1392 * execution path (pump_messages) would be required to call wake_up or 1393 * friends on every SPI message. Do this instead. 1394 */ 1395 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1396 spin_unlock_irqrestore(&master->queue_lock, flags); 1397 usleep_range(10000, 11000); 1398 spin_lock_irqsave(&master->queue_lock, flags); 1399 } 1400 1401 if (!list_empty(&master->queue) || master->busy) 1402 ret = -EBUSY; 1403 else 1404 master->running = false; 1405 1406 spin_unlock_irqrestore(&master->queue_lock, flags); 1407 1408 if (ret) { 1409 dev_warn(&master->dev, 1410 "could not stop message queue\n"); 1411 return ret; 1412 } 1413 return ret; 1414 } 1415 1416 static int spi_destroy_queue(struct spi_master *master) 1417 { 1418 int ret; 1419 1420 ret = spi_stop_queue(master); 1421 1422 /* 1423 * kthread_flush_worker will block until all work is done. 1424 * If the reason that stop_queue timed out is that the work will never 1425 * finish, then it does no good to call flush/stop thread, so 1426 * return anyway. 1427 */ 1428 if (ret) { 1429 dev_err(&master->dev, "problem destroying queue\n"); 1430 return ret; 1431 } 1432 1433 kthread_flush_worker(&master->kworker); 1434 kthread_stop(master->kworker_task); 1435 1436 return 0; 1437 } 1438 1439 static int __spi_queued_transfer(struct spi_device *spi, 1440 struct spi_message *msg, 1441 bool need_pump) 1442 { 1443 struct spi_master *master = spi->master; 1444 unsigned long flags; 1445 1446 spin_lock_irqsave(&master->queue_lock, flags); 1447 1448 if (!master->running) { 1449 spin_unlock_irqrestore(&master->queue_lock, flags); 1450 return -ESHUTDOWN; 1451 } 1452 msg->actual_length = 0; 1453 msg->status = -EINPROGRESS; 1454 1455 list_add_tail(&msg->queue, &master->queue); 1456 if (!master->busy && need_pump) 1457 kthread_queue_work(&master->kworker, &master->pump_messages); 1458 1459 spin_unlock_irqrestore(&master->queue_lock, flags); 1460 return 0; 1461 } 1462 1463 /** 1464 * spi_queued_transfer - transfer function for queued transfers 1465 * @spi: spi device which is requesting transfer 1466 * @msg: spi message which is to handled is queued to driver queue 1467 * 1468 * Return: zero on success, else a negative error code. 1469 */ 1470 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1471 { 1472 return __spi_queued_transfer(spi, msg, true); 1473 } 1474 1475 static int spi_master_initialize_queue(struct spi_master *master) 1476 { 1477 int ret; 1478 1479 master->transfer = spi_queued_transfer; 1480 if (!master->transfer_one_message) 1481 master->transfer_one_message = spi_transfer_one_message; 1482 1483 /* Initialize and start queue */ 1484 ret = spi_init_queue(master); 1485 if (ret) { 1486 dev_err(&master->dev, "problem initializing queue\n"); 1487 goto err_init_queue; 1488 } 1489 master->queued = true; 1490 ret = spi_start_queue(master); 1491 if (ret) { 1492 dev_err(&master->dev, "problem starting queue\n"); 1493 goto err_start_queue; 1494 } 1495 1496 return 0; 1497 1498 err_start_queue: 1499 spi_destroy_queue(master); 1500 err_init_queue: 1501 return ret; 1502 } 1503 1504 /*-------------------------------------------------------------------------*/ 1505 1506 #if defined(CONFIG_OF) 1507 static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, 1508 struct device_node *nc) 1509 { 1510 u32 value; 1511 int rc; 1512 1513 /* Device address */ 1514 rc = of_property_read_u32(nc, "reg", &value); 1515 if (rc) { 1516 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1517 nc->full_name, rc); 1518 return rc; 1519 } 1520 spi->chip_select = value; 1521 1522 /* Mode (clock phase/polarity/etc.) */ 1523 if (of_find_property(nc, "spi-cpha", NULL)) 1524 spi->mode |= SPI_CPHA; 1525 if (of_find_property(nc, "spi-cpol", NULL)) 1526 spi->mode |= SPI_CPOL; 1527 if (of_find_property(nc, "spi-cs-high", NULL)) 1528 spi->mode |= SPI_CS_HIGH; 1529 if (of_find_property(nc, "spi-3wire", NULL)) 1530 spi->mode |= SPI_3WIRE; 1531 if (of_find_property(nc, "spi-lsb-first", NULL)) 1532 spi->mode |= SPI_LSB_FIRST; 1533 1534 /* Device DUAL/QUAD mode */ 1535 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1536 switch (value) { 1537 case 1: 1538 break; 1539 case 2: 1540 spi->mode |= SPI_TX_DUAL; 1541 break; 1542 case 4: 1543 spi->mode |= SPI_TX_QUAD; 1544 break; 1545 default: 1546 dev_warn(&master->dev, 1547 "spi-tx-bus-width %d not supported\n", 1548 value); 1549 break; 1550 } 1551 } 1552 1553 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1554 switch (value) { 1555 case 1: 1556 break; 1557 case 2: 1558 spi->mode |= SPI_RX_DUAL; 1559 break; 1560 case 4: 1561 spi->mode |= SPI_RX_QUAD; 1562 break; 1563 default: 1564 dev_warn(&master->dev, 1565 "spi-rx-bus-width %d not supported\n", 1566 value); 1567 break; 1568 } 1569 } 1570 1571 /* Device speed */ 1572 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1573 if (rc) { 1574 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1575 nc->full_name, rc); 1576 return rc; 1577 } 1578 spi->max_speed_hz = value; 1579 1580 return 0; 1581 } 1582 1583 static struct spi_device * 1584 of_register_spi_device(struct spi_master *master, struct device_node *nc) 1585 { 1586 struct spi_device *spi; 1587 int rc; 1588 1589 /* Alloc an spi_device */ 1590 spi = spi_alloc_device(master); 1591 if (!spi) { 1592 dev_err(&master->dev, "spi_device alloc error for %s\n", 1593 nc->full_name); 1594 rc = -ENOMEM; 1595 goto err_out; 1596 } 1597 1598 /* Select device driver */ 1599 rc = of_modalias_node(nc, spi->modalias, 1600 sizeof(spi->modalias)); 1601 if (rc < 0) { 1602 dev_err(&master->dev, "cannot find modalias for %s\n", 1603 nc->full_name); 1604 goto err_out; 1605 } 1606 1607 rc = of_spi_parse_dt(master, spi, nc); 1608 if (rc) 1609 goto err_out; 1610 1611 /* Store a pointer to the node in the device structure */ 1612 of_node_get(nc); 1613 spi->dev.of_node = nc; 1614 1615 /* Register the new device */ 1616 rc = spi_add_device(spi); 1617 if (rc) { 1618 dev_err(&master->dev, "spi_device register error %s\n", 1619 nc->full_name); 1620 goto err_of_node_put; 1621 } 1622 1623 return spi; 1624 1625 err_of_node_put: 1626 of_node_put(nc); 1627 err_out: 1628 spi_dev_put(spi); 1629 return ERR_PTR(rc); 1630 } 1631 1632 /** 1633 * of_register_spi_devices() - Register child devices onto the SPI bus 1634 * @master: Pointer to spi_master device 1635 * 1636 * Registers an spi_device for each child node of master node which has a 'reg' 1637 * property. 1638 */ 1639 static void of_register_spi_devices(struct spi_master *master) 1640 { 1641 struct spi_device *spi; 1642 struct device_node *nc; 1643 1644 if (!master->dev.of_node) 1645 return; 1646 1647 for_each_available_child_of_node(master->dev.of_node, nc) { 1648 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1649 continue; 1650 spi = of_register_spi_device(master, nc); 1651 if (IS_ERR(spi)) { 1652 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1653 nc->full_name); 1654 of_node_clear_flag(nc, OF_POPULATED); 1655 } 1656 } 1657 } 1658 #else 1659 static void of_register_spi_devices(struct spi_master *master) { } 1660 #endif 1661 1662 #ifdef CONFIG_ACPI 1663 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1664 { 1665 struct spi_device *spi = data; 1666 struct spi_master *master = spi->master; 1667 1668 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1669 struct acpi_resource_spi_serialbus *sb; 1670 1671 sb = &ares->data.spi_serial_bus; 1672 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1673 /* 1674 * ACPI DeviceSelection numbering is handled by the 1675 * host controller driver in Windows and can vary 1676 * from driver to driver. In Linux we always expect 1677 * 0 .. max - 1 so we need to ask the driver to 1678 * translate between the two schemes. 1679 */ 1680 if (master->fw_translate_cs) { 1681 int cs = master->fw_translate_cs(master, 1682 sb->device_selection); 1683 if (cs < 0) 1684 return cs; 1685 spi->chip_select = cs; 1686 } else { 1687 spi->chip_select = sb->device_selection; 1688 } 1689 1690 spi->max_speed_hz = sb->connection_speed; 1691 1692 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1693 spi->mode |= SPI_CPHA; 1694 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1695 spi->mode |= SPI_CPOL; 1696 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1697 spi->mode |= SPI_CS_HIGH; 1698 } 1699 } else if (spi->irq < 0) { 1700 struct resource r; 1701 1702 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1703 spi->irq = r.start; 1704 } 1705 1706 /* Always tell the ACPI core to skip this resource */ 1707 return 1; 1708 } 1709 1710 static acpi_status acpi_register_spi_device(struct spi_master *master, 1711 struct acpi_device *adev) 1712 { 1713 struct list_head resource_list; 1714 struct spi_device *spi; 1715 int ret; 1716 1717 if (acpi_bus_get_status(adev) || !adev->status.present || 1718 acpi_device_enumerated(adev)) 1719 return AE_OK; 1720 1721 spi = spi_alloc_device(master); 1722 if (!spi) { 1723 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1724 dev_name(&adev->dev)); 1725 return AE_NO_MEMORY; 1726 } 1727 1728 ACPI_COMPANION_SET(&spi->dev, adev); 1729 spi->irq = -1; 1730 1731 INIT_LIST_HEAD(&resource_list); 1732 ret = acpi_dev_get_resources(adev, &resource_list, 1733 acpi_spi_add_resource, spi); 1734 acpi_dev_free_resource_list(&resource_list); 1735 1736 if (ret < 0 || !spi->max_speed_hz) { 1737 spi_dev_put(spi); 1738 return AE_OK; 1739 } 1740 1741 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 1742 sizeof(spi->modalias)); 1743 1744 if (spi->irq < 0) 1745 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 1746 1747 acpi_device_set_enumerated(adev); 1748 1749 adev->power.flags.ignore_parent = true; 1750 if (spi_add_device(spi)) { 1751 adev->power.flags.ignore_parent = false; 1752 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1753 dev_name(&adev->dev)); 1754 spi_dev_put(spi); 1755 } 1756 1757 return AE_OK; 1758 } 1759 1760 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1761 void *data, void **return_value) 1762 { 1763 struct spi_master *master = data; 1764 struct acpi_device *adev; 1765 1766 if (acpi_bus_get_device(handle, &adev)) 1767 return AE_OK; 1768 1769 return acpi_register_spi_device(master, adev); 1770 } 1771 1772 static void acpi_register_spi_devices(struct spi_master *master) 1773 { 1774 acpi_status status; 1775 acpi_handle handle; 1776 1777 handle = ACPI_HANDLE(master->dev.parent); 1778 if (!handle) 1779 return; 1780 1781 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1782 acpi_spi_add_device, NULL, 1783 master, NULL); 1784 if (ACPI_FAILURE(status)) 1785 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1786 } 1787 #else 1788 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1789 #endif /* CONFIG_ACPI */ 1790 1791 static void spi_master_release(struct device *dev) 1792 { 1793 struct spi_master *master; 1794 1795 master = container_of(dev, struct spi_master, dev); 1796 kfree(master); 1797 } 1798 1799 static struct class spi_master_class = { 1800 .name = "spi_master", 1801 .owner = THIS_MODULE, 1802 .dev_release = spi_master_release, 1803 .dev_groups = spi_master_groups, 1804 }; 1805 1806 1807 /** 1808 * spi_alloc_master - allocate SPI master controller 1809 * @dev: the controller, possibly using the platform_bus 1810 * @size: how much zeroed driver-private data to allocate; the pointer to this 1811 * memory is in the driver_data field of the returned device, 1812 * accessible with spi_master_get_devdata(). 1813 * Context: can sleep 1814 * 1815 * This call is used only by SPI master controller drivers, which are the 1816 * only ones directly touching chip registers. It's how they allocate 1817 * an spi_master structure, prior to calling spi_register_master(). 1818 * 1819 * This must be called from context that can sleep. 1820 * 1821 * The caller is responsible for assigning the bus number and initializing 1822 * the master's methods before calling spi_register_master(); and (after errors 1823 * adding the device) calling spi_master_put() to prevent a memory leak. 1824 * 1825 * Return: the SPI master structure on success, else NULL. 1826 */ 1827 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1828 { 1829 struct spi_master *master; 1830 1831 if (!dev) 1832 return NULL; 1833 1834 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1835 if (!master) 1836 return NULL; 1837 1838 device_initialize(&master->dev); 1839 master->bus_num = -1; 1840 master->num_chipselect = 1; 1841 master->dev.class = &spi_master_class; 1842 master->dev.parent = dev; 1843 pm_suspend_ignore_children(&master->dev, true); 1844 spi_master_set_devdata(master, &master[1]); 1845 1846 return master; 1847 } 1848 EXPORT_SYMBOL_GPL(spi_alloc_master); 1849 1850 #ifdef CONFIG_OF 1851 static int of_spi_register_master(struct spi_master *master) 1852 { 1853 int nb, i, *cs; 1854 struct device_node *np = master->dev.of_node; 1855 1856 if (!np) 1857 return 0; 1858 1859 nb = of_gpio_named_count(np, "cs-gpios"); 1860 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1861 1862 /* Return error only for an incorrectly formed cs-gpios property */ 1863 if (nb == 0 || nb == -ENOENT) 1864 return 0; 1865 else if (nb < 0) 1866 return nb; 1867 1868 cs = devm_kzalloc(&master->dev, 1869 sizeof(int) * master->num_chipselect, 1870 GFP_KERNEL); 1871 master->cs_gpios = cs; 1872 1873 if (!master->cs_gpios) 1874 return -ENOMEM; 1875 1876 for (i = 0; i < master->num_chipselect; i++) 1877 cs[i] = -ENOENT; 1878 1879 for (i = 0; i < nb; i++) 1880 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1881 1882 return 0; 1883 } 1884 #else 1885 static int of_spi_register_master(struct spi_master *master) 1886 { 1887 return 0; 1888 } 1889 #endif 1890 1891 /** 1892 * spi_register_master - register SPI master controller 1893 * @master: initialized master, originally from spi_alloc_master() 1894 * Context: can sleep 1895 * 1896 * SPI master controllers connect to their drivers using some non-SPI bus, 1897 * such as the platform bus. The final stage of probe() in that code 1898 * includes calling spi_register_master() to hook up to this SPI bus glue. 1899 * 1900 * SPI controllers use board specific (often SOC specific) bus numbers, 1901 * and board-specific addressing for SPI devices combines those numbers 1902 * with chip select numbers. Since SPI does not directly support dynamic 1903 * device identification, boards need configuration tables telling which 1904 * chip is at which address. 1905 * 1906 * This must be called from context that can sleep. It returns zero on 1907 * success, else a negative error code (dropping the master's refcount). 1908 * After a successful return, the caller is responsible for calling 1909 * spi_unregister_master(). 1910 * 1911 * Return: zero on success, else a negative error code. 1912 */ 1913 int spi_register_master(struct spi_master *master) 1914 { 1915 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1916 struct device *dev = master->dev.parent; 1917 struct boardinfo *bi; 1918 int status = -ENODEV; 1919 int dynamic = 0; 1920 1921 if (!dev) 1922 return -ENODEV; 1923 1924 status = of_spi_register_master(master); 1925 if (status) 1926 return status; 1927 1928 /* even if it's just one always-selected device, there must 1929 * be at least one chipselect 1930 */ 1931 if (master->num_chipselect == 0) 1932 return -EINVAL; 1933 1934 if ((master->bus_num < 0) && master->dev.of_node) 1935 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1936 1937 /* convention: dynamically assigned bus IDs count down from the max */ 1938 if (master->bus_num < 0) { 1939 /* FIXME switch to an IDR based scheme, something like 1940 * I2C now uses, so we can't run out of "dynamic" IDs 1941 */ 1942 master->bus_num = atomic_dec_return(&dyn_bus_id); 1943 dynamic = 1; 1944 } 1945 1946 INIT_LIST_HEAD(&master->queue); 1947 spin_lock_init(&master->queue_lock); 1948 spin_lock_init(&master->bus_lock_spinlock); 1949 mutex_init(&master->bus_lock_mutex); 1950 mutex_init(&master->io_mutex); 1951 master->bus_lock_flag = 0; 1952 init_completion(&master->xfer_completion); 1953 if (!master->max_dma_len) 1954 master->max_dma_len = INT_MAX; 1955 1956 /* register the device, then userspace will see it. 1957 * registration fails if the bus ID is in use. 1958 */ 1959 dev_set_name(&master->dev, "spi%u", master->bus_num); 1960 status = device_add(&master->dev); 1961 if (status < 0) 1962 goto done; 1963 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1964 dynamic ? " (dynamic)" : ""); 1965 1966 /* If we're using a queued driver, start the queue */ 1967 if (master->transfer) 1968 dev_info(dev, "master is unqueued, this is deprecated\n"); 1969 else { 1970 status = spi_master_initialize_queue(master); 1971 if (status) { 1972 device_del(&master->dev); 1973 goto done; 1974 } 1975 } 1976 /* add statistics */ 1977 spin_lock_init(&master->statistics.lock); 1978 1979 mutex_lock(&board_lock); 1980 list_add_tail(&master->list, &spi_master_list); 1981 list_for_each_entry(bi, &board_list, list) 1982 spi_match_master_to_boardinfo(master, &bi->board_info); 1983 mutex_unlock(&board_lock); 1984 1985 /* Register devices from the device tree and ACPI */ 1986 of_register_spi_devices(master); 1987 acpi_register_spi_devices(master); 1988 done: 1989 return status; 1990 } 1991 EXPORT_SYMBOL_GPL(spi_register_master); 1992 1993 static void devm_spi_unregister(struct device *dev, void *res) 1994 { 1995 spi_unregister_master(*(struct spi_master **)res); 1996 } 1997 1998 /** 1999 * dev_spi_register_master - register managed SPI master controller 2000 * @dev: device managing SPI master 2001 * @master: initialized master, originally from spi_alloc_master() 2002 * Context: can sleep 2003 * 2004 * Register a SPI device as with spi_register_master() which will 2005 * automatically be unregister 2006 * 2007 * Return: zero on success, else a negative error code. 2008 */ 2009 int devm_spi_register_master(struct device *dev, struct spi_master *master) 2010 { 2011 struct spi_master **ptr; 2012 int ret; 2013 2014 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 2015 if (!ptr) 2016 return -ENOMEM; 2017 2018 ret = spi_register_master(master); 2019 if (!ret) { 2020 *ptr = master; 2021 devres_add(dev, ptr); 2022 } else { 2023 devres_free(ptr); 2024 } 2025 2026 return ret; 2027 } 2028 EXPORT_SYMBOL_GPL(devm_spi_register_master); 2029 2030 static int __unregister(struct device *dev, void *null) 2031 { 2032 spi_unregister_device(to_spi_device(dev)); 2033 return 0; 2034 } 2035 2036 /** 2037 * spi_unregister_master - unregister SPI master controller 2038 * @master: the master being unregistered 2039 * Context: can sleep 2040 * 2041 * This call is used only by SPI master controller drivers, which are the 2042 * only ones directly touching chip registers. 2043 * 2044 * This must be called from context that can sleep. 2045 */ 2046 void spi_unregister_master(struct spi_master *master) 2047 { 2048 int dummy; 2049 2050 if (master->queued) { 2051 if (spi_destroy_queue(master)) 2052 dev_err(&master->dev, "queue remove failed\n"); 2053 } 2054 2055 mutex_lock(&board_lock); 2056 list_del(&master->list); 2057 mutex_unlock(&board_lock); 2058 2059 dummy = device_for_each_child(&master->dev, NULL, __unregister); 2060 device_unregister(&master->dev); 2061 } 2062 EXPORT_SYMBOL_GPL(spi_unregister_master); 2063 2064 int spi_master_suspend(struct spi_master *master) 2065 { 2066 int ret; 2067 2068 /* Basically no-ops for non-queued masters */ 2069 if (!master->queued) 2070 return 0; 2071 2072 ret = spi_stop_queue(master); 2073 if (ret) 2074 dev_err(&master->dev, "queue stop failed\n"); 2075 2076 return ret; 2077 } 2078 EXPORT_SYMBOL_GPL(spi_master_suspend); 2079 2080 int spi_master_resume(struct spi_master *master) 2081 { 2082 int ret; 2083 2084 if (!master->queued) 2085 return 0; 2086 2087 ret = spi_start_queue(master); 2088 if (ret) 2089 dev_err(&master->dev, "queue restart failed\n"); 2090 2091 return ret; 2092 } 2093 EXPORT_SYMBOL_GPL(spi_master_resume); 2094 2095 static int __spi_master_match(struct device *dev, const void *data) 2096 { 2097 struct spi_master *m; 2098 const u16 *bus_num = data; 2099 2100 m = container_of(dev, struct spi_master, dev); 2101 return m->bus_num == *bus_num; 2102 } 2103 2104 /** 2105 * spi_busnum_to_master - look up master associated with bus_num 2106 * @bus_num: the master's bus number 2107 * Context: can sleep 2108 * 2109 * This call may be used with devices that are registered after 2110 * arch init time. It returns a refcounted pointer to the relevant 2111 * spi_master (which the caller must release), or NULL if there is 2112 * no such master registered. 2113 * 2114 * Return: the SPI master structure on success, else NULL. 2115 */ 2116 struct spi_master *spi_busnum_to_master(u16 bus_num) 2117 { 2118 struct device *dev; 2119 struct spi_master *master = NULL; 2120 2121 dev = class_find_device(&spi_master_class, NULL, &bus_num, 2122 __spi_master_match); 2123 if (dev) 2124 master = container_of(dev, struct spi_master, dev); 2125 /* reference got in class_find_device */ 2126 return master; 2127 } 2128 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2129 2130 /*-------------------------------------------------------------------------*/ 2131 2132 /* Core methods for SPI resource management */ 2133 2134 /** 2135 * spi_res_alloc - allocate a spi resource that is life-cycle managed 2136 * during the processing of a spi_message while using 2137 * spi_transfer_one 2138 * @spi: the spi device for which we allocate memory 2139 * @release: the release code to execute for this resource 2140 * @size: size to alloc and return 2141 * @gfp: GFP allocation flags 2142 * 2143 * Return: the pointer to the allocated data 2144 * 2145 * This may get enhanced in the future to allocate from a memory pool 2146 * of the @spi_device or @spi_master to avoid repeated allocations. 2147 */ 2148 void *spi_res_alloc(struct spi_device *spi, 2149 spi_res_release_t release, 2150 size_t size, gfp_t gfp) 2151 { 2152 struct spi_res *sres; 2153 2154 sres = kzalloc(sizeof(*sres) + size, gfp); 2155 if (!sres) 2156 return NULL; 2157 2158 INIT_LIST_HEAD(&sres->entry); 2159 sres->release = release; 2160 2161 return sres->data; 2162 } 2163 EXPORT_SYMBOL_GPL(spi_res_alloc); 2164 2165 /** 2166 * spi_res_free - free an spi resource 2167 * @res: pointer to the custom data of a resource 2168 * 2169 */ 2170 void spi_res_free(void *res) 2171 { 2172 struct spi_res *sres = container_of(res, struct spi_res, data); 2173 2174 if (!res) 2175 return; 2176 2177 WARN_ON(!list_empty(&sres->entry)); 2178 kfree(sres); 2179 } 2180 EXPORT_SYMBOL_GPL(spi_res_free); 2181 2182 /** 2183 * spi_res_add - add a spi_res to the spi_message 2184 * @message: the spi message 2185 * @res: the spi_resource 2186 */ 2187 void spi_res_add(struct spi_message *message, void *res) 2188 { 2189 struct spi_res *sres = container_of(res, struct spi_res, data); 2190 2191 WARN_ON(!list_empty(&sres->entry)); 2192 list_add_tail(&sres->entry, &message->resources); 2193 } 2194 EXPORT_SYMBOL_GPL(spi_res_add); 2195 2196 /** 2197 * spi_res_release - release all spi resources for this message 2198 * @master: the @spi_master 2199 * @message: the @spi_message 2200 */ 2201 void spi_res_release(struct spi_master *master, 2202 struct spi_message *message) 2203 { 2204 struct spi_res *res; 2205 2206 while (!list_empty(&message->resources)) { 2207 res = list_last_entry(&message->resources, 2208 struct spi_res, entry); 2209 2210 if (res->release) 2211 res->release(master, message, res->data); 2212 2213 list_del(&res->entry); 2214 2215 kfree(res); 2216 } 2217 } 2218 EXPORT_SYMBOL_GPL(spi_res_release); 2219 2220 /*-------------------------------------------------------------------------*/ 2221 2222 /* Core methods for spi_message alterations */ 2223 2224 static void __spi_replace_transfers_release(struct spi_master *master, 2225 struct spi_message *msg, 2226 void *res) 2227 { 2228 struct spi_replaced_transfers *rxfer = res; 2229 size_t i; 2230 2231 /* call extra callback if requested */ 2232 if (rxfer->release) 2233 rxfer->release(master, msg, res); 2234 2235 /* insert replaced transfers back into the message */ 2236 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2237 2238 /* remove the formerly inserted entries */ 2239 for (i = 0; i < rxfer->inserted; i++) 2240 list_del(&rxfer->inserted_transfers[i].transfer_list); 2241 } 2242 2243 /** 2244 * spi_replace_transfers - replace transfers with several transfers 2245 * and register change with spi_message.resources 2246 * @msg: the spi_message we work upon 2247 * @xfer_first: the first spi_transfer we want to replace 2248 * @remove: number of transfers to remove 2249 * @insert: the number of transfers we want to insert instead 2250 * @release: extra release code necessary in some circumstances 2251 * @extradatasize: extra data to allocate (with alignment guarantees 2252 * of struct @spi_transfer) 2253 * @gfp: gfp flags 2254 * 2255 * Returns: pointer to @spi_replaced_transfers, 2256 * PTR_ERR(...) in case of errors. 2257 */ 2258 struct spi_replaced_transfers *spi_replace_transfers( 2259 struct spi_message *msg, 2260 struct spi_transfer *xfer_first, 2261 size_t remove, 2262 size_t insert, 2263 spi_replaced_release_t release, 2264 size_t extradatasize, 2265 gfp_t gfp) 2266 { 2267 struct spi_replaced_transfers *rxfer; 2268 struct spi_transfer *xfer; 2269 size_t i; 2270 2271 /* allocate the structure using spi_res */ 2272 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2273 insert * sizeof(struct spi_transfer) 2274 + sizeof(struct spi_replaced_transfers) 2275 + extradatasize, 2276 gfp); 2277 if (!rxfer) 2278 return ERR_PTR(-ENOMEM); 2279 2280 /* the release code to invoke before running the generic release */ 2281 rxfer->release = release; 2282 2283 /* assign extradata */ 2284 if (extradatasize) 2285 rxfer->extradata = 2286 &rxfer->inserted_transfers[insert]; 2287 2288 /* init the replaced_transfers list */ 2289 INIT_LIST_HEAD(&rxfer->replaced_transfers); 2290 2291 /* assign the list_entry after which we should reinsert 2292 * the @replaced_transfers - it may be spi_message.messages! 2293 */ 2294 rxfer->replaced_after = xfer_first->transfer_list.prev; 2295 2296 /* remove the requested number of transfers */ 2297 for (i = 0; i < remove; i++) { 2298 /* if the entry after replaced_after it is msg->transfers 2299 * then we have been requested to remove more transfers 2300 * than are in the list 2301 */ 2302 if (rxfer->replaced_after->next == &msg->transfers) { 2303 dev_err(&msg->spi->dev, 2304 "requested to remove more spi_transfers than are available\n"); 2305 /* insert replaced transfers back into the message */ 2306 list_splice(&rxfer->replaced_transfers, 2307 rxfer->replaced_after); 2308 2309 /* free the spi_replace_transfer structure */ 2310 spi_res_free(rxfer); 2311 2312 /* and return with an error */ 2313 return ERR_PTR(-EINVAL); 2314 } 2315 2316 /* remove the entry after replaced_after from list of 2317 * transfers and add it to list of replaced_transfers 2318 */ 2319 list_move_tail(rxfer->replaced_after->next, 2320 &rxfer->replaced_transfers); 2321 } 2322 2323 /* create copy of the given xfer with identical settings 2324 * based on the first transfer to get removed 2325 */ 2326 for (i = 0; i < insert; i++) { 2327 /* we need to run in reverse order */ 2328 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 2329 2330 /* copy all spi_transfer data */ 2331 memcpy(xfer, xfer_first, sizeof(*xfer)); 2332 2333 /* add to list */ 2334 list_add(&xfer->transfer_list, rxfer->replaced_after); 2335 2336 /* clear cs_change and delay_usecs for all but the last */ 2337 if (i) { 2338 xfer->cs_change = false; 2339 xfer->delay_usecs = 0; 2340 } 2341 } 2342 2343 /* set up inserted */ 2344 rxfer->inserted = insert; 2345 2346 /* and register it with spi_res/spi_message */ 2347 spi_res_add(msg, rxfer); 2348 2349 return rxfer; 2350 } 2351 EXPORT_SYMBOL_GPL(spi_replace_transfers); 2352 2353 static int __spi_split_transfer_maxsize(struct spi_master *master, 2354 struct spi_message *msg, 2355 struct spi_transfer **xferp, 2356 size_t maxsize, 2357 gfp_t gfp) 2358 { 2359 struct spi_transfer *xfer = *xferp, *xfers; 2360 struct spi_replaced_transfers *srt; 2361 size_t offset; 2362 size_t count, i; 2363 2364 /* warn once about this fact that we are splitting a transfer */ 2365 dev_warn_once(&msg->spi->dev, 2366 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n", 2367 xfer->len, maxsize); 2368 2369 /* calculate how many we have to replace */ 2370 count = DIV_ROUND_UP(xfer->len, maxsize); 2371 2372 /* create replacement */ 2373 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 2374 if (IS_ERR(srt)) 2375 return PTR_ERR(srt); 2376 xfers = srt->inserted_transfers; 2377 2378 /* now handle each of those newly inserted spi_transfers 2379 * note that the replacements spi_transfers all are preset 2380 * to the same values as *xferp, so tx_buf, rx_buf and len 2381 * are all identical (as well as most others) 2382 * so we just have to fix up len and the pointers. 2383 * 2384 * this also includes support for the depreciated 2385 * spi_message.is_dma_mapped interface 2386 */ 2387 2388 /* the first transfer just needs the length modified, so we 2389 * run it outside the loop 2390 */ 2391 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 2392 2393 /* all the others need rx_buf/tx_buf also set */ 2394 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 2395 /* update rx_buf, tx_buf and dma */ 2396 if (xfers[i].rx_buf) 2397 xfers[i].rx_buf += offset; 2398 if (xfers[i].rx_dma) 2399 xfers[i].rx_dma += offset; 2400 if (xfers[i].tx_buf) 2401 xfers[i].tx_buf += offset; 2402 if (xfers[i].tx_dma) 2403 xfers[i].tx_dma += offset; 2404 2405 /* update length */ 2406 xfers[i].len = min(maxsize, xfers[i].len - offset); 2407 } 2408 2409 /* we set up xferp to the last entry we have inserted, 2410 * so that we skip those already split transfers 2411 */ 2412 *xferp = &xfers[count - 1]; 2413 2414 /* increment statistics counters */ 2415 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2416 transfers_split_maxsize); 2417 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2418 transfers_split_maxsize); 2419 2420 return 0; 2421 } 2422 2423 /** 2424 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2425 * when an individual transfer exceeds a 2426 * certain size 2427 * @master: the @spi_master for this transfer 2428 * @msg: the @spi_message to transform 2429 * @maxsize: the maximum when to apply this 2430 * @gfp: GFP allocation flags 2431 * 2432 * Return: status of transformation 2433 */ 2434 int spi_split_transfers_maxsize(struct spi_master *master, 2435 struct spi_message *msg, 2436 size_t maxsize, 2437 gfp_t gfp) 2438 { 2439 struct spi_transfer *xfer; 2440 int ret; 2441 2442 /* iterate over the transfer_list, 2443 * but note that xfer is advanced to the last transfer inserted 2444 * to avoid checking sizes again unnecessarily (also xfer does 2445 * potentiall belong to a different list by the time the 2446 * replacement has happened 2447 */ 2448 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2449 if (xfer->len > maxsize) { 2450 ret = __spi_split_transfer_maxsize( 2451 master, msg, &xfer, maxsize, gfp); 2452 if (ret) 2453 return ret; 2454 } 2455 } 2456 2457 return 0; 2458 } 2459 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 2460 2461 /*-------------------------------------------------------------------------*/ 2462 2463 /* Core methods for SPI master protocol drivers. Some of the 2464 * other core methods are currently defined as inline functions. 2465 */ 2466 2467 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 2468 { 2469 if (master->bits_per_word_mask) { 2470 /* Only 32 bits fit in the mask */ 2471 if (bits_per_word > 32) 2472 return -EINVAL; 2473 if (!(master->bits_per_word_mask & 2474 SPI_BPW_MASK(bits_per_word))) 2475 return -EINVAL; 2476 } 2477 2478 return 0; 2479 } 2480 2481 /** 2482 * spi_setup - setup SPI mode and clock rate 2483 * @spi: the device whose settings are being modified 2484 * Context: can sleep, and no requests are queued to the device 2485 * 2486 * SPI protocol drivers may need to update the transfer mode if the 2487 * device doesn't work with its default. They may likewise need 2488 * to update clock rates or word sizes from initial values. This function 2489 * changes those settings, and must be called from a context that can sleep. 2490 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 2491 * effect the next time the device is selected and data is transferred to 2492 * or from it. When this function returns, the spi device is deselected. 2493 * 2494 * Note that this call will fail if the protocol driver specifies an option 2495 * that the underlying controller or its driver does not support. For 2496 * example, not all hardware supports wire transfers using nine bit words, 2497 * LSB-first wire encoding, or active-high chipselects. 2498 * 2499 * Return: zero on success, else a negative error code. 2500 */ 2501 int spi_setup(struct spi_device *spi) 2502 { 2503 unsigned bad_bits, ugly_bits; 2504 int status; 2505 2506 /* check mode to prevent that DUAL and QUAD set at the same time 2507 */ 2508 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2509 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2510 dev_err(&spi->dev, 2511 "setup: can not select dual and quad at the same time\n"); 2512 return -EINVAL; 2513 } 2514 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2515 */ 2516 if ((spi->mode & SPI_3WIRE) && (spi->mode & 2517 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2518 return -EINVAL; 2519 /* help drivers fail *cleanly* when they need options 2520 * that aren't supported with their current master 2521 */ 2522 bad_bits = spi->mode & ~spi->master->mode_bits; 2523 ugly_bits = bad_bits & 2524 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 2525 if (ugly_bits) { 2526 dev_warn(&spi->dev, 2527 "setup: ignoring unsupported mode bits %x\n", 2528 ugly_bits); 2529 spi->mode &= ~ugly_bits; 2530 bad_bits &= ~ugly_bits; 2531 } 2532 if (bad_bits) { 2533 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2534 bad_bits); 2535 return -EINVAL; 2536 } 2537 2538 if (!spi->bits_per_word) 2539 spi->bits_per_word = 8; 2540 2541 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 2542 if (status) 2543 return status; 2544 2545 if (!spi->max_speed_hz) 2546 spi->max_speed_hz = spi->master->max_speed_hz; 2547 2548 if (spi->master->setup) 2549 status = spi->master->setup(spi); 2550 2551 spi_set_cs(spi, false); 2552 2553 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 2554 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 2555 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 2556 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 2557 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 2558 (spi->mode & SPI_LOOP) ? "loopback, " : "", 2559 spi->bits_per_word, spi->max_speed_hz, 2560 status); 2561 2562 return status; 2563 } 2564 EXPORT_SYMBOL_GPL(spi_setup); 2565 2566 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2567 { 2568 struct spi_master *master = spi->master; 2569 struct spi_transfer *xfer; 2570 int w_size; 2571 2572 if (list_empty(&message->transfers)) 2573 return -EINVAL; 2574 2575 /* Half-duplex links include original MicroWire, and ones with 2576 * only one data pin like SPI_3WIRE (switches direction) or where 2577 * either MOSI or MISO is missing. They can also be caused by 2578 * software limitations. 2579 */ 2580 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2581 || (spi->mode & SPI_3WIRE)) { 2582 unsigned flags = master->flags; 2583 2584 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2585 if (xfer->rx_buf && xfer->tx_buf) 2586 return -EINVAL; 2587 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2588 return -EINVAL; 2589 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2590 return -EINVAL; 2591 } 2592 } 2593 2594 /** 2595 * Set transfer bits_per_word and max speed as spi device default if 2596 * it is not set for this transfer. 2597 * Set transfer tx_nbits and rx_nbits as single transfer default 2598 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2599 */ 2600 message->frame_length = 0; 2601 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2602 message->frame_length += xfer->len; 2603 if (!xfer->bits_per_word) 2604 xfer->bits_per_word = spi->bits_per_word; 2605 2606 if (!xfer->speed_hz) 2607 xfer->speed_hz = spi->max_speed_hz; 2608 if (!xfer->speed_hz) 2609 xfer->speed_hz = master->max_speed_hz; 2610 2611 if (master->max_speed_hz && 2612 xfer->speed_hz > master->max_speed_hz) 2613 xfer->speed_hz = master->max_speed_hz; 2614 2615 if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2616 return -EINVAL; 2617 2618 /* 2619 * SPI transfer length should be multiple of SPI word size 2620 * where SPI word size should be power-of-two multiple 2621 */ 2622 if (xfer->bits_per_word <= 8) 2623 w_size = 1; 2624 else if (xfer->bits_per_word <= 16) 2625 w_size = 2; 2626 else 2627 w_size = 4; 2628 2629 /* No partial transfers accepted */ 2630 if (xfer->len % w_size) 2631 return -EINVAL; 2632 2633 if (xfer->speed_hz && master->min_speed_hz && 2634 xfer->speed_hz < master->min_speed_hz) 2635 return -EINVAL; 2636 2637 if (xfer->tx_buf && !xfer->tx_nbits) 2638 xfer->tx_nbits = SPI_NBITS_SINGLE; 2639 if (xfer->rx_buf && !xfer->rx_nbits) 2640 xfer->rx_nbits = SPI_NBITS_SINGLE; 2641 /* check transfer tx/rx_nbits: 2642 * 1. check the value matches one of single, dual and quad 2643 * 2. check tx/rx_nbits match the mode in spi_device 2644 */ 2645 if (xfer->tx_buf) { 2646 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2647 xfer->tx_nbits != SPI_NBITS_DUAL && 2648 xfer->tx_nbits != SPI_NBITS_QUAD) 2649 return -EINVAL; 2650 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2651 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2652 return -EINVAL; 2653 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2654 !(spi->mode & SPI_TX_QUAD)) 2655 return -EINVAL; 2656 } 2657 /* check transfer rx_nbits */ 2658 if (xfer->rx_buf) { 2659 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2660 xfer->rx_nbits != SPI_NBITS_DUAL && 2661 xfer->rx_nbits != SPI_NBITS_QUAD) 2662 return -EINVAL; 2663 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2664 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2665 return -EINVAL; 2666 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2667 !(spi->mode & SPI_RX_QUAD)) 2668 return -EINVAL; 2669 } 2670 } 2671 2672 message->status = -EINPROGRESS; 2673 2674 return 0; 2675 } 2676 2677 static int __spi_async(struct spi_device *spi, struct spi_message *message) 2678 { 2679 struct spi_master *master = spi->master; 2680 2681 message->spi = spi; 2682 2683 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2684 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2685 2686 trace_spi_message_submit(message); 2687 2688 return master->transfer(spi, message); 2689 } 2690 2691 /** 2692 * spi_async - asynchronous SPI transfer 2693 * @spi: device with which data will be exchanged 2694 * @message: describes the data transfers, including completion callback 2695 * Context: any (irqs may be blocked, etc) 2696 * 2697 * This call may be used in_irq and other contexts which can't sleep, 2698 * as well as from task contexts which can sleep. 2699 * 2700 * The completion callback is invoked in a context which can't sleep. 2701 * Before that invocation, the value of message->status is undefined. 2702 * When the callback is issued, message->status holds either zero (to 2703 * indicate complete success) or a negative error code. After that 2704 * callback returns, the driver which issued the transfer request may 2705 * deallocate the associated memory; it's no longer in use by any SPI 2706 * core or controller driver code. 2707 * 2708 * Note that although all messages to a spi_device are handled in 2709 * FIFO order, messages may go to different devices in other orders. 2710 * Some device might be higher priority, or have various "hard" access 2711 * time requirements, for example. 2712 * 2713 * On detection of any fault during the transfer, processing of 2714 * the entire message is aborted, and the device is deselected. 2715 * Until returning from the associated message completion callback, 2716 * no other spi_message queued to that device will be processed. 2717 * (This rule applies equally to all the synchronous transfer calls, 2718 * which are wrappers around this core asynchronous primitive.) 2719 * 2720 * Return: zero on success, else a negative error code. 2721 */ 2722 int spi_async(struct spi_device *spi, struct spi_message *message) 2723 { 2724 struct spi_master *master = spi->master; 2725 int ret; 2726 unsigned long flags; 2727 2728 ret = __spi_validate(spi, message); 2729 if (ret != 0) 2730 return ret; 2731 2732 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2733 2734 if (master->bus_lock_flag) 2735 ret = -EBUSY; 2736 else 2737 ret = __spi_async(spi, message); 2738 2739 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2740 2741 return ret; 2742 } 2743 EXPORT_SYMBOL_GPL(spi_async); 2744 2745 /** 2746 * spi_async_locked - version of spi_async with exclusive bus usage 2747 * @spi: device with which data will be exchanged 2748 * @message: describes the data transfers, including completion callback 2749 * Context: any (irqs may be blocked, etc) 2750 * 2751 * This call may be used in_irq and other contexts which can't sleep, 2752 * as well as from task contexts which can sleep. 2753 * 2754 * The completion callback is invoked in a context which can't sleep. 2755 * Before that invocation, the value of message->status is undefined. 2756 * When the callback is issued, message->status holds either zero (to 2757 * indicate complete success) or a negative error code. After that 2758 * callback returns, the driver which issued the transfer request may 2759 * deallocate the associated memory; it's no longer in use by any SPI 2760 * core or controller driver code. 2761 * 2762 * Note that although all messages to a spi_device are handled in 2763 * FIFO order, messages may go to different devices in other orders. 2764 * Some device might be higher priority, or have various "hard" access 2765 * time requirements, for example. 2766 * 2767 * On detection of any fault during the transfer, processing of 2768 * the entire message is aborted, and the device is deselected. 2769 * Until returning from the associated message completion callback, 2770 * no other spi_message queued to that device will be processed. 2771 * (This rule applies equally to all the synchronous transfer calls, 2772 * which are wrappers around this core asynchronous primitive.) 2773 * 2774 * Return: zero on success, else a negative error code. 2775 */ 2776 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2777 { 2778 struct spi_master *master = spi->master; 2779 int ret; 2780 unsigned long flags; 2781 2782 ret = __spi_validate(spi, message); 2783 if (ret != 0) 2784 return ret; 2785 2786 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2787 2788 ret = __spi_async(spi, message); 2789 2790 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2791 2792 return ret; 2793 2794 } 2795 EXPORT_SYMBOL_GPL(spi_async_locked); 2796 2797 2798 int spi_flash_read(struct spi_device *spi, 2799 struct spi_flash_read_message *msg) 2800 2801 { 2802 struct spi_master *master = spi->master; 2803 struct device *rx_dev = NULL; 2804 int ret; 2805 2806 if ((msg->opcode_nbits == SPI_NBITS_DUAL || 2807 msg->addr_nbits == SPI_NBITS_DUAL) && 2808 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2809 return -EINVAL; 2810 if ((msg->opcode_nbits == SPI_NBITS_QUAD || 2811 msg->addr_nbits == SPI_NBITS_QUAD) && 2812 !(spi->mode & SPI_TX_QUAD)) 2813 return -EINVAL; 2814 if (msg->data_nbits == SPI_NBITS_DUAL && 2815 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2816 return -EINVAL; 2817 if (msg->data_nbits == SPI_NBITS_QUAD && 2818 !(spi->mode & SPI_RX_QUAD)) 2819 return -EINVAL; 2820 2821 if (master->auto_runtime_pm) { 2822 ret = pm_runtime_get_sync(master->dev.parent); 2823 if (ret < 0) { 2824 dev_err(&master->dev, "Failed to power device: %d\n", 2825 ret); 2826 return ret; 2827 } 2828 } 2829 2830 mutex_lock(&master->bus_lock_mutex); 2831 mutex_lock(&master->io_mutex); 2832 if (master->dma_rx) { 2833 rx_dev = master->dma_rx->device->dev; 2834 ret = spi_map_buf(master, rx_dev, &msg->rx_sg, 2835 msg->buf, msg->len, 2836 DMA_FROM_DEVICE); 2837 if (!ret) 2838 msg->cur_msg_mapped = true; 2839 } 2840 ret = master->spi_flash_read(spi, msg); 2841 if (msg->cur_msg_mapped) 2842 spi_unmap_buf(master, rx_dev, &msg->rx_sg, 2843 DMA_FROM_DEVICE); 2844 mutex_unlock(&master->io_mutex); 2845 mutex_unlock(&master->bus_lock_mutex); 2846 2847 if (master->auto_runtime_pm) 2848 pm_runtime_put(master->dev.parent); 2849 2850 return ret; 2851 } 2852 EXPORT_SYMBOL_GPL(spi_flash_read); 2853 2854 /*-------------------------------------------------------------------------*/ 2855 2856 /* Utility methods for SPI master protocol drivers, layered on 2857 * top of the core. Some other utility methods are defined as 2858 * inline functions. 2859 */ 2860 2861 static void spi_complete(void *arg) 2862 { 2863 complete(arg); 2864 } 2865 2866 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 2867 { 2868 DECLARE_COMPLETION_ONSTACK(done); 2869 int status; 2870 struct spi_master *master = spi->master; 2871 unsigned long flags; 2872 2873 status = __spi_validate(spi, message); 2874 if (status != 0) 2875 return status; 2876 2877 message->complete = spi_complete; 2878 message->context = &done; 2879 message->spi = spi; 2880 2881 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2882 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2883 2884 /* If we're not using the legacy transfer method then we will 2885 * try to transfer in the calling context so special case. 2886 * This code would be less tricky if we could remove the 2887 * support for driver implemented message queues. 2888 */ 2889 if (master->transfer == spi_queued_transfer) { 2890 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2891 2892 trace_spi_message_submit(message); 2893 2894 status = __spi_queued_transfer(spi, message, false); 2895 2896 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2897 } else { 2898 status = spi_async_locked(spi, message); 2899 } 2900 2901 if (status == 0) { 2902 /* Push out the messages in the calling context if we 2903 * can. 2904 */ 2905 if (master->transfer == spi_queued_transfer) { 2906 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2907 spi_sync_immediate); 2908 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2909 spi_sync_immediate); 2910 __spi_pump_messages(master, false); 2911 } 2912 2913 wait_for_completion(&done); 2914 status = message->status; 2915 } 2916 message->context = NULL; 2917 return status; 2918 } 2919 2920 /** 2921 * spi_sync - blocking/synchronous SPI data transfers 2922 * @spi: device with which data will be exchanged 2923 * @message: describes the data transfers 2924 * Context: can sleep 2925 * 2926 * This call may only be used from a context that may sleep. The sleep 2927 * is non-interruptible, and has no timeout. Low-overhead controller 2928 * drivers may DMA directly into and out of the message buffers. 2929 * 2930 * Note that the SPI device's chip select is active during the message, 2931 * and then is normally disabled between messages. Drivers for some 2932 * frequently-used devices may want to minimize costs of selecting a chip, 2933 * by leaving it selected in anticipation that the next message will go 2934 * to the same chip. (That may increase power usage.) 2935 * 2936 * Also, the caller is guaranteeing that the memory associated with the 2937 * message will not be freed before this call returns. 2938 * 2939 * Return: zero on success, else a negative error code. 2940 */ 2941 int spi_sync(struct spi_device *spi, struct spi_message *message) 2942 { 2943 int ret; 2944 2945 mutex_lock(&spi->master->bus_lock_mutex); 2946 ret = __spi_sync(spi, message); 2947 mutex_unlock(&spi->master->bus_lock_mutex); 2948 2949 return ret; 2950 } 2951 EXPORT_SYMBOL_GPL(spi_sync); 2952 2953 /** 2954 * spi_sync_locked - version of spi_sync with exclusive bus usage 2955 * @spi: device with which data will be exchanged 2956 * @message: describes the data transfers 2957 * Context: can sleep 2958 * 2959 * This call may only be used from a context that may sleep. The sleep 2960 * is non-interruptible, and has no timeout. Low-overhead controller 2961 * drivers may DMA directly into and out of the message buffers. 2962 * 2963 * This call should be used by drivers that require exclusive access to the 2964 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2965 * be released by a spi_bus_unlock call when the exclusive access is over. 2966 * 2967 * Return: zero on success, else a negative error code. 2968 */ 2969 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2970 { 2971 return __spi_sync(spi, message); 2972 } 2973 EXPORT_SYMBOL_GPL(spi_sync_locked); 2974 2975 /** 2976 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2977 * @master: SPI bus master that should be locked for exclusive bus access 2978 * Context: can sleep 2979 * 2980 * This call may only be used from a context that may sleep. The sleep 2981 * is non-interruptible, and has no timeout. 2982 * 2983 * This call should be used by drivers that require exclusive access to the 2984 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2985 * exclusive access is over. Data transfer must be done by spi_sync_locked 2986 * and spi_async_locked calls when the SPI bus lock is held. 2987 * 2988 * Return: always zero. 2989 */ 2990 int spi_bus_lock(struct spi_master *master) 2991 { 2992 unsigned long flags; 2993 2994 mutex_lock(&master->bus_lock_mutex); 2995 2996 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2997 master->bus_lock_flag = 1; 2998 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2999 3000 /* mutex remains locked until spi_bus_unlock is called */ 3001 3002 return 0; 3003 } 3004 EXPORT_SYMBOL_GPL(spi_bus_lock); 3005 3006 /** 3007 * spi_bus_unlock - release the lock for exclusive SPI bus usage 3008 * @master: SPI bus master that was locked for exclusive bus access 3009 * Context: can sleep 3010 * 3011 * This call may only be used from a context that may sleep. The sleep 3012 * is non-interruptible, and has no timeout. 3013 * 3014 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 3015 * call. 3016 * 3017 * Return: always zero. 3018 */ 3019 int spi_bus_unlock(struct spi_master *master) 3020 { 3021 master->bus_lock_flag = 0; 3022 3023 mutex_unlock(&master->bus_lock_mutex); 3024 3025 return 0; 3026 } 3027 EXPORT_SYMBOL_GPL(spi_bus_unlock); 3028 3029 /* portable code must never pass more than 32 bytes */ 3030 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 3031 3032 static u8 *buf; 3033 3034 /** 3035 * spi_write_then_read - SPI synchronous write followed by read 3036 * @spi: device with which data will be exchanged 3037 * @txbuf: data to be written (need not be dma-safe) 3038 * @n_tx: size of txbuf, in bytes 3039 * @rxbuf: buffer into which data will be read (need not be dma-safe) 3040 * @n_rx: size of rxbuf, in bytes 3041 * Context: can sleep 3042 * 3043 * This performs a half duplex MicroWire style transaction with the 3044 * device, sending txbuf and then reading rxbuf. The return value 3045 * is zero for success, else a negative errno status code. 3046 * This call may only be used from a context that may sleep. 3047 * 3048 * Parameters to this routine are always copied using a small buffer; 3049 * portable code should never use this for more than 32 bytes. 3050 * Performance-sensitive or bulk transfer code should instead use 3051 * spi_{async,sync}() calls with dma-safe buffers. 3052 * 3053 * Return: zero on success, else a negative error code. 3054 */ 3055 int spi_write_then_read(struct spi_device *spi, 3056 const void *txbuf, unsigned n_tx, 3057 void *rxbuf, unsigned n_rx) 3058 { 3059 static DEFINE_MUTEX(lock); 3060 3061 int status; 3062 struct spi_message message; 3063 struct spi_transfer x[2]; 3064 u8 *local_buf; 3065 3066 /* Use preallocated DMA-safe buffer if we can. We can't avoid 3067 * copying here, (as a pure convenience thing), but we can 3068 * keep heap costs out of the hot path unless someone else is 3069 * using the pre-allocated buffer or the transfer is too large. 3070 */ 3071 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 3072 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 3073 GFP_KERNEL | GFP_DMA); 3074 if (!local_buf) 3075 return -ENOMEM; 3076 } else { 3077 local_buf = buf; 3078 } 3079 3080 spi_message_init(&message); 3081 memset(x, 0, sizeof(x)); 3082 if (n_tx) { 3083 x[0].len = n_tx; 3084 spi_message_add_tail(&x[0], &message); 3085 } 3086 if (n_rx) { 3087 x[1].len = n_rx; 3088 spi_message_add_tail(&x[1], &message); 3089 } 3090 3091 memcpy(local_buf, txbuf, n_tx); 3092 x[0].tx_buf = local_buf; 3093 x[1].rx_buf = local_buf + n_tx; 3094 3095 /* do the i/o */ 3096 status = spi_sync(spi, &message); 3097 if (status == 0) 3098 memcpy(rxbuf, x[1].rx_buf, n_rx); 3099 3100 if (x[0].tx_buf == buf) 3101 mutex_unlock(&lock); 3102 else 3103 kfree(local_buf); 3104 3105 return status; 3106 } 3107 EXPORT_SYMBOL_GPL(spi_write_then_read); 3108 3109 /*-------------------------------------------------------------------------*/ 3110 3111 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 3112 static int __spi_of_device_match(struct device *dev, void *data) 3113 { 3114 return dev->of_node == data; 3115 } 3116 3117 /* must call put_device() when done with returned spi_device device */ 3118 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 3119 { 3120 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 3121 __spi_of_device_match); 3122 return dev ? to_spi_device(dev) : NULL; 3123 } 3124 3125 static int __spi_of_master_match(struct device *dev, const void *data) 3126 { 3127 return dev->of_node == data; 3128 } 3129 3130 /* the spi masters are not using spi_bus, so we find it with another way */ 3131 static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 3132 { 3133 struct device *dev; 3134 3135 dev = class_find_device(&spi_master_class, NULL, node, 3136 __spi_of_master_match); 3137 if (!dev) 3138 return NULL; 3139 3140 /* reference got in class_find_device */ 3141 return container_of(dev, struct spi_master, dev); 3142 } 3143 3144 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3145 void *arg) 3146 { 3147 struct of_reconfig_data *rd = arg; 3148 struct spi_master *master; 3149 struct spi_device *spi; 3150 3151 switch (of_reconfig_get_state_change(action, arg)) { 3152 case OF_RECONFIG_CHANGE_ADD: 3153 master = of_find_spi_master_by_node(rd->dn->parent); 3154 if (master == NULL) 3155 return NOTIFY_OK; /* not for us */ 3156 3157 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3158 put_device(&master->dev); 3159 return NOTIFY_OK; 3160 } 3161 3162 spi = of_register_spi_device(master, rd->dn); 3163 put_device(&master->dev); 3164 3165 if (IS_ERR(spi)) { 3166 pr_err("%s: failed to create for '%s'\n", 3167 __func__, rd->dn->full_name); 3168 of_node_clear_flag(rd->dn, OF_POPULATED); 3169 return notifier_from_errno(PTR_ERR(spi)); 3170 } 3171 break; 3172 3173 case OF_RECONFIG_CHANGE_REMOVE: 3174 /* already depopulated? */ 3175 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 3176 return NOTIFY_OK; 3177 3178 /* find our device by node */ 3179 spi = of_find_spi_device_by_node(rd->dn); 3180 if (spi == NULL) 3181 return NOTIFY_OK; /* no? not meant for us */ 3182 3183 /* unregister takes one ref away */ 3184 spi_unregister_device(spi); 3185 3186 /* and put the reference of the find */ 3187 put_device(&spi->dev); 3188 break; 3189 } 3190 3191 return NOTIFY_OK; 3192 } 3193 3194 static struct notifier_block spi_of_notifier = { 3195 .notifier_call = of_spi_notify, 3196 }; 3197 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3198 extern struct notifier_block spi_of_notifier; 3199 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3200 3201 #if IS_ENABLED(CONFIG_ACPI) 3202 static int spi_acpi_master_match(struct device *dev, const void *data) 3203 { 3204 return ACPI_COMPANION(dev->parent) == data; 3205 } 3206 3207 static int spi_acpi_device_match(struct device *dev, void *data) 3208 { 3209 return ACPI_COMPANION(dev) == data; 3210 } 3211 3212 static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev) 3213 { 3214 struct device *dev; 3215 3216 dev = class_find_device(&spi_master_class, NULL, adev, 3217 spi_acpi_master_match); 3218 if (!dev) 3219 return NULL; 3220 3221 return container_of(dev, struct spi_master, dev); 3222 } 3223 3224 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 3225 { 3226 struct device *dev; 3227 3228 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); 3229 3230 return dev ? to_spi_device(dev) : NULL; 3231 } 3232 3233 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 3234 void *arg) 3235 { 3236 struct acpi_device *adev = arg; 3237 struct spi_master *master; 3238 struct spi_device *spi; 3239 3240 switch (value) { 3241 case ACPI_RECONFIG_DEVICE_ADD: 3242 master = acpi_spi_find_master_by_adev(adev->parent); 3243 if (!master) 3244 break; 3245 3246 acpi_register_spi_device(master, adev); 3247 put_device(&master->dev); 3248 break; 3249 case ACPI_RECONFIG_DEVICE_REMOVE: 3250 if (!acpi_device_enumerated(adev)) 3251 break; 3252 3253 spi = acpi_spi_find_device_by_adev(adev); 3254 if (!spi) 3255 break; 3256 3257 spi_unregister_device(spi); 3258 put_device(&spi->dev); 3259 break; 3260 } 3261 3262 return NOTIFY_OK; 3263 } 3264 3265 static struct notifier_block spi_acpi_notifier = { 3266 .notifier_call = acpi_spi_notify, 3267 }; 3268 #else 3269 extern struct notifier_block spi_acpi_notifier; 3270 #endif 3271 3272 static int __init spi_init(void) 3273 { 3274 int status; 3275 3276 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 3277 if (!buf) { 3278 status = -ENOMEM; 3279 goto err0; 3280 } 3281 3282 status = bus_register(&spi_bus_type); 3283 if (status < 0) 3284 goto err1; 3285 3286 status = class_register(&spi_master_class); 3287 if (status < 0) 3288 goto err2; 3289 3290 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3291 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 3292 if (IS_ENABLED(CONFIG_ACPI)) 3293 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 3294 3295 return 0; 3296 3297 err2: 3298 bus_unregister(&spi_bus_type); 3299 err1: 3300 kfree(buf); 3301 buf = NULL; 3302 err0: 3303 return status; 3304 } 3305 3306 /* board_info is normally registered in arch_initcall(), 3307 * but even essential drivers wait till later 3308 * 3309 * REVISIT only boardinfo really needs static linking. the rest (device and 3310 * driver registration) _could_ be dynamically linked (modular) ... costs 3311 * include needing to have boardinfo data structures be much more public. 3312 */ 3313 postcore_initcall(spi_init); 3314 3315