1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/export.h> 35 #include <linux/sched/rt.h> 36 #include <linux/delay.h> 37 #include <linux/kthread.h> 38 #include <linux/ioport.h> 39 #include <linux/acpi.h> 40 #include <linux/highmem.h> 41 42 #define CREATE_TRACE_POINTS 43 #include <trace/events/spi.h> 44 45 static void spidev_release(struct device *dev) 46 { 47 struct spi_device *spi = to_spi_device(dev); 48 49 /* spi masters may cleanup for released devices */ 50 if (spi->master->cleanup) 51 spi->master->cleanup(spi); 52 53 spi_master_put(spi->master); 54 kfree(spi); 55 } 56 57 static ssize_t 58 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 59 { 60 const struct spi_device *spi = to_spi_device(dev); 61 int len; 62 63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 64 if (len != -ENODEV) 65 return len; 66 67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 68 } 69 static DEVICE_ATTR_RO(modalias); 70 71 #define SPI_STATISTICS_ATTRS(field, file) \ 72 static ssize_t spi_master_##field##_show(struct device *dev, \ 73 struct device_attribute *attr, \ 74 char *buf) \ 75 { \ 76 struct spi_master *master = container_of(dev, \ 77 struct spi_master, dev); \ 78 return spi_statistics_##field##_show(&master->statistics, buf); \ 79 } \ 80 static struct device_attribute dev_attr_spi_master_##field = { \ 81 .attr = { .name = file, .mode = S_IRUGO }, \ 82 .show = spi_master_##field##_show, \ 83 }; \ 84 static ssize_t spi_device_##field##_show(struct device *dev, \ 85 struct device_attribute *attr, \ 86 char *buf) \ 87 { \ 88 struct spi_device *spi = to_spi_device(dev); \ 89 return spi_statistics_##field##_show(&spi->statistics, buf); \ 90 } \ 91 static struct device_attribute dev_attr_spi_device_##field = { \ 92 .attr = { .name = file, .mode = S_IRUGO }, \ 93 .show = spi_device_##field##_show, \ 94 } 95 96 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 97 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 98 char *buf) \ 99 { \ 100 unsigned long flags; \ 101 ssize_t len; \ 102 spin_lock_irqsave(&stat->lock, flags); \ 103 len = sprintf(buf, format_string, stat->field); \ 104 spin_unlock_irqrestore(&stat->lock, flags); \ 105 return len; \ 106 } \ 107 SPI_STATISTICS_ATTRS(name, file) 108 109 #define SPI_STATISTICS_SHOW(field, format_string) \ 110 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 111 field, format_string) 112 113 SPI_STATISTICS_SHOW(messages, "%lu"); 114 SPI_STATISTICS_SHOW(transfers, "%lu"); 115 SPI_STATISTICS_SHOW(errors, "%lu"); 116 SPI_STATISTICS_SHOW(timedout, "%lu"); 117 118 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 119 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 120 SPI_STATISTICS_SHOW(spi_async, "%lu"); 121 122 SPI_STATISTICS_SHOW(bytes, "%llu"); 123 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 124 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 125 126 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 127 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 128 "transfer_bytes_histo_" number, \ 129 transfer_bytes_histo[index], "%lu") 130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 147 148 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 149 150 static struct attribute *spi_dev_attrs[] = { 151 &dev_attr_modalias.attr, 152 NULL, 153 }; 154 155 static const struct attribute_group spi_dev_group = { 156 .attrs = spi_dev_attrs, 157 }; 158 159 static struct attribute *spi_device_statistics_attrs[] = { 160 &dev_attr_spi_device_messages.attr, 161 &dev_attr_spi_device_transfers.attr, 162 &dev_attr_spi_device_errors.attr, 163 &dev_attr_spi_device_timedout.attr, 164 &dev_attr_spi_device_spi_sync.attr, 165 &dev_attr_spi_device_spi_sync_immediate.attr, 166 &dev_attr_spi_device_spi_async.attr, 167 &dev_attr_spi_device_bytes.attr, 168 &dev_attr_spi_device_bytes_rx.attr, 169 &dev_attr_spi_device_bytes_tx.attr, 170 &dev_attr_spi_device_transfer_bytes_histo0.attr, 171 &dev_attr_spi_device_transfer_bytes_histo1.attr, 172 &dev_attr_spi_device_transfer_bytes_histo2.attr, 173 &dev_attr_spi_device_transfer_bytes_histo3.attr, 174 &dev_attr_spi_device_transfer_bytes_histo4.attr, 175 &dev_attr_spi_device_transfer_bytes_histo5.attr, 176 &dev_attr_spi_device_transfer_bytes_histo6.attr, 177 &dev_attr_spi_device_transfer_bytes_histo7.attr, 178 &dev_attr_spi_device_transfer_bytes_histo8.attr, 179 &dev_attr_spi_device_transfer_bytes_histo9.attr, 180 &dev_attr_spi_device_transfer_bytes_histo10.attr, 181 &dev_attr_spi_device_transfer_bytes_histo11.attr, 182 &dev_attr_spi_device_transfer_bytes_histo12.attr, 183 &dev_attr_spi_device_transfer_bytes_histo13.attr, 184 &dev_attr_spi_device_transfer_bytes_histo14.attr, 185 &dev_attr_spi_device_transfer_bytes_histo15.attr, 186 &dev_attr_spi_device_transfer_bytes_histo16.attr, 187 &dev_attr_spi_device_transfers_split_maxsize.attr, 188 NULL, 189 }; 190 191 static const struct attribute_group spi_device_statistics_group = { 192 .name = "statistics", 193 .attrs = spi_device_statistics_attrs, 194 }; 195 196 static const struct attribute_group *spi_dev_groups[] = { 197 &spi_dev_group, 198 &spi_device_statistics_group, 199 NULL, 200 }; 201 202 static struct attribute *spi_master_statistics_attrs[] = { 203 &dev_attr_spi_master_messages.attr, 204 &dev_attr_spi_master_transfers.attr, 205 &dev_attr_spi_master_errors.attr, 206 &dev_attr_spi_master_timedout.attr, 207 &dev_attr_spi_master_spi_sync.attr, 208 &dev_attr_spi_master_spi_sync_immediate.attr, 209 &dev_attr_spi_master_spi_async.attr, 210 &dev_attr_spi_master_bytes.attr, 211 &dev_attr_spi_master_bytes_rx.attr, 212 &dev_attr_spi_master_bytes_tx.attr, 213 &dev_attr_spi_master_transfer_bytes_histo0.attr, 214 &dev_attr_spi_master_transfer_bytes_histo1.attr, 215 &dev_attr_spi_master_transfer_bytes_histo2.attr, 216 &dev_attr_spi_master_transfer_bytes_histo3.attr, 217 &dev_attr_spi_master_transfer_bytes_histo4.attr, 218 &dev_attr_spi_master_transfer_bytes_histo5.attr, 219 &dev_attr_spi_master_transfer_bytes_histo6.attr, 220 &dev_attr_spi_master_transfer_bytes_histo7.attr, 221 &dev_attr_spi_master_transfer_bytes_histo8.attr, 222 &dev_attr_spi_master_transfer_bytes_histo9.attr, 223 &dev_attr_spi_master_transfer_bytes_histo10.attr, 224 &dev_attr_spi_master_transfer_bytes_histo11.attr, 225 &dev_attr_spi_master_transfer_bytes_histo12.attr, 226 &dev_attr_spi_master_transfer_bytes_histo13.attr, 227 &dev_attr_spi_master_transfer_bytes_histo14.attr, 228 &dev_attr_spi_master_transfer_bytes_histo15.attr, 229 &dev_attr_spi_master_transfer_bytes_histo16.attr, 230 &dev_attr_spi_master_transfers_split_maxsize.attr, 231 NULL, 232 }; 233 234 static const struct attribute_group spi_master_statistics_group = { 235 .name = "statistics", 236 .attrs = spi_master_statistics_attrs, 237 }; 238 239 static const struct attribute_group *spi_master_groups[] = { 240 &spi_master_statistics_group, 241 NULL, 242 }; 243 244 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 245 struct spi_transfer *xfer, 246 struct spi_master *master) 247 { 248 unsigned long flags; 249 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 250 251 if (l2len < 0) 252 l2len = 0; 253 254 spin_lock_irqsave(&stats->lock, flags); 255 256 stats->transfers++; 257 stats->transfer_bytes_histo[l2len]++; 258 259 stats->bytes += xfer->len; 260 if ((xfer->tx_buf) && 261 (xfer->tx_buf != master->dummy_tx)) 262 stats->bytes_tx += xfer->len; 263 if ((xfer->rx_buf) && 264 (xfer->rx_buf != master->dummy_rx)) 265 stats->bytes_rx += xfer->len; 266 267 spin_unlock_irqrestore(&stats->lock, flags); 268 } 269 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 270 271 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 272 * and the sysfs version makes coldplug work too. 273 */ 274 275 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 276 const struct spi_device *sdev) 277 { 278 while (id->name[0]) { 279 if (!strcmp(sdev->modalias, id->name)) 280 return id; 281 id++; 282 } 283 return NULL; 284 } 285 286 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 287 { 288 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 289 290 return spi_match_id(sdrv->id_table, sdev); 291 } 292 EXPORT_SYMBOL_GPL(spi_get_device_id); 293 294 static int spi_match_device(struct device *dev, struct device_driver *drv) 295 { 296 const struct spi_device *spi = to_spi_device(dev); 297 const struct spi_driver *sdrv = to_spi_driver(drv); 298 299 /* Attempt an OF style match */ 300 if (of_driver_match_device(dev, drv)) 301 return 1; 302 303 /* Then try ACPI */ 304 if (acpi_driver_match_device(dev, drv)) 305 return 1; 306 307 if (sdrv->id_table) 308 return !!spi_match_id(sdrv->id_table, spi); 309 310 return strcmp(spi->modalias, drv->name) == 0; 311 } 312 313 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 314 { 315 const struct spi_device *spi = to_spi_device(dev); 316 int rc; 317 318 rc = acpi_device_uevent_modalias(dev, env); 319 if (rc != -ENODEV) 320 return rc; 321 322 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 323 return 0; 324 } 325 326 struct bus_type spi_bus_type = { 327 .name = "spi", 328 .dev_groups = spi_dev_groups, 329 .match = spi_match_device, 330 .uevent = spi_uevent, 331 }; 332 EXPORT_SYMBOL_GPL(spi_bus_type); 333 334 335 static int spi_drv_probe(struct device *dev) 336 { 337 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 338 struct spi_device *spi = to_spi_device(dev); 339 int ret; 340 341 ret = of_clk_set_defaults(dev->of_node, false); 342 if (ret) 343 return ret; 344 345 if (dev->of_node) { 346 spi->irq = of_irq_get(dev->of_node, 0); 347 if (spi->irq == -EPROBE_DEFER) 348 return -EPROBE_DEFER; 349 if (spi->irq < 0) 350 spi->irq = 0; 351 } 352 353 ret = dev_pm_domain_attach(dev, true); 354 if (ret != -EPROBE_DEFER) { 355 ret = sdrv->probe(spi); 356 if (ret) 357 dev_pm_domain_detach(dev, true); 358 } 359 360 return ret; 361 } 362 363 static int spi_drv_remove(struct device *dev) 364 { 365 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 366 int ret; 367 368 ret = sdrv->remove(to_spi_device(dev)); 369 dev_pm_domain_detach(dev, true); 370 371 return ret; 372 } 373 374 static void spi_drv_shutdown(struct device *dev) 375 { 376 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 377 378 sdrv->shutdown(to_spi_device(dev)); 379 } 380 381 /** 382 * __spi_register_driver - register a SPI driver 383 * @owner: owner module of the driver to register 384 * @sdrv: the driver to register 385 * Context: can sleep 386 * 387 * Return: zero on success, else a negative error code. 388 */ 389 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 390 { 391 sdrv->driver.owner = owner; 392 sdrv->driver.bus = &spi_bus_type; 393 if (sdrv->probe) 394 sdrv->driver.probe = spi_drv_probe; 395 if (sdrv->remove) 396 sdrv->driver.remove = spi_drv_remove; 397 if (sdrv->shutdown) 398 sdrv->driver.shutdown = spi_drv_shutdown; 399 return driver_register(&sdrv->driver); 400 } 401 EXPORT_SYMBOL_GPL(__spi_register_driver); 402 403 /*-------------------------------------------------------------------------*/ 404 405 /* SPI devices should normally not be created by SPI device drivers; that 406 * would make them board-specific. Similarly with SPI master drivers. 407 * Device registration normally goes into like arch/.../mach.../board-YYY.c 408 * with other readonly (flashable) information about mainboard devices. 409 */ 410 411 struct boardinfo { 412 struct list_head list; 413 struct spi_board_info board_info; 414 }; 415 416 static LIST_HEAD(board_list); 417 static LIST_HEAD(spi_master_list); 418 419 /* 420 * Used to protect add/del opertion for board_info list and 421 * spi_master list, and their matching process 422 */ 423 static DEFINE_MUTEX(board_lock); 424 425 /** 426 * spi_alloc_device - Allocate a new SPI device 427 * @master: Controller to which device is connected 428 * Context: can sleep 429 * 430 * Allows a driver to allocate and initialize a spi_device without 431 * registering it immediately. This allows a driver to directly 432 * fill the spi_device with device parameters before calling 433 * spi_add_device() on it. 434 * 435 * Caller is responsible to call spi_add_device() on the returned 436 * spi_device structure to add it to the SPI master. If the caller 437 * needs to discard the spi_device without adding it, then it should 438 * call spi_dev_put() on it. 439 * 440 * Return: a pointer to the new device, or NULL. 441 */ 442 struct spi_device *spi_alloc_device(struct spi_master *master) 443 { 444 struct spi_device *spi; 445 446 if (!spi_master_get(master)) 447 return NULL; 448 449 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 450 if (!spi) { 451 spi_master_put(master); 452 return NULL; 453 } 454 455 spi->master = master; 456 spi->dev.parent = &master->dev; 457 spi->dev.bus = &spi_bus_type; 458 spi->dev.release = spidev_release; 459 spi->cs_gpio = -ENOENT; 460 461 spin_lock_init(&spi->statistics.lock); 462 463 device_initialize(&spi->dev); 464 return spi; 465 } 466 EXPORT_SYMBOL_GPL(spi_alloc_device); 467 468 static void spi_dev_set_name(struct spi_device *spi) 469 { 470 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 471 472 if (adev) { 473 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 474 return; 475 } 476 477 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 478 spi->chip_select); 479 } 480 481 static int spi_dev_check(struct device *dev, void *data) 482 { 483 struct spi_device *spi = to_spi_device(dev); 484 struct spi_device *new_spi = data; 485 486 if (spi->master == new_spi->master && 487 spi->chip_select == new_spi->chip_select) 488 return -EBUSY; 489 return 0; 490 } 491 492 /** 493 * spi_add_device - Add spi_device allocated with spi_alloc_device 494 * @spi: spi_device to register 495 * 496 * Companion function to spi_alloc_device. Devices allocated with 497 * spi_alloc_device can be added onto the spi bus with this function. 498 * 499 * Return: 0 on success; negative errno on failure 500 */ 501 int spi_add_device(struct spi_device *spi) 502 { 503 static DEFINE_MUTEX(spi_add_lock); 504 struct spi_master *master = spi->master; 505 struct device *dev = master->dev.parent; 506 int status; 507 508 /* Chipselects are numbered 0..max; validate. */ 509 if (spi->chip_select >= master->num_chipselect) { 510 dev_err(dev, "cs%d >= max %d\n", 511 spi->chip_select, 512 master->num_chipselect); 513 return -EINVAL; 514 } 515 516 /* Set the bus ID string */ 517 spi_dev_set_name(spi); 518 519 /* We need to make sure there's no other device with this 520 * chipselect **BEFORE** we call setup(), else we'll trash 521 * its configuration. Lock against concurrent add() calls. 522 */ 523 mutex_lock(&spi_add_lock); 524 525 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 526 if (status) { 527 dev_err(dev, "chipselect %d already in use\n", 528 spi->chip_select); 529 goto done; 530 } 531 532 if (master->cs_gpios) 533 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 534 535 /* Drivers may modify this initial i/o setup, but will 536 * normally rely on the device being setup. Devices 537 * using SPI_CS_HIGH can't coexist well otherwise... 538 */ 539 status = spi_setup(spi); 540 if (status < 0) { 541 dev_err(dev, "can't setup %s, status %d\n", 542 dev_name(&spi->dev), status); 543 goto done; 544 } 545 546 /* Device may be bound to an active driver when this returns */ 547 status = device_add(&spi->dev); 548 if (status < 0) 549 dev_err(dev, "can't add %s, status %d\n", 550 dev_name(&spi->dev), status); 551 else 552 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 553 554 done: 555 mutex_unlock(&spi_add_lock); 556 return status; 557 } 558 EXPORT_SYMBOL_GPL(spi_add_device); 559 560 /** 561 * spi_new_device - instantiate one new SPI device 562 * @master: Controller to which device is connected 563 * @chip: Describes the SPI device 564 * Context: can sleep 565 * 566 * On typical mainboards, this is purely internal; and it's not needed 567 * after board init creates the hard-wired devices. Some development 568 * platforms may not be able to use spi_register_board_info though, and 569 * this is exported so that for example a USB or parport based adapter 570 * driver could add devices (which it would learn about out-of-band). 571 * 572 * Return: the new device, or NULL. 573 */ 574 struct spi_device *spi_new_device(struct spi_master *master, 575 struct spi_board_info *chip) 576 { 577 struct spi_device *proxy; 578 int status; 579 580 /* NOTE: caller did any chip->bus_num checks necessary. 581 * 582 * Also, unless we change the return value convention to use 583 * error-or-pointer (not NULL-or-pointer), troubleshootability 584 * suggests syslogged diagnostics are best here (ugh). 585 */ 586 587 proxy = spi_alloc_device(master); 588 if (!proxy) 589 return NULL; 590 591 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 592 593 proxy->chip_select = chip->chip_select; 594 proxy->max_speed_hz = chip->max_speed_hz; 595 proxy->mode = chip->mode; 596 proxy->irq = chip->irq; 597 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 598 proxy->dev.platform_data = (void *) chip->platform_data; 599 proxy->controller_data = chip->controller_data; 600 proxy->controller_state = NULL; 601 602 status = spi_add_device(proxy); 603 if (status < 0) { 604 spi_dev_put(proxy); 605 return NULL; 606 } 607 608 return proxy; 609 } 610 EXPORT_SYMBOL_GPL(spi_new_device); 611 612 /** 613 * spi_unregister_device - unregister a single SPI device 614 * @spi: spi_device to unregister 615 * 616 * Start making the passed SPI device vanish. Normally this would be handled 617 * by spi_unregister_master(). 618 */ 619 void spi_unregister_device(struct spi_device *spi) 620 { 621 if (!spi) 622 return; 623 624 if (spi->dev.of_node) 625 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 626 if (ACPI_COMPANION(&spi->dev)) 627 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 628 device_unregister(&spi->dev); 629 } 630 EXPORT_SYMBOL_GPL(spi_unregister_device); 631 632 static void spi_match_master_to_boardinfo(struct spi_master *master, 633 struct spi_board_info *bi) 634 { 635 struct spi_device *dev; 636 637 if (master->bus_num != bi->bus_num) 638 return; 639 640 dev = spi_new_device(master, bi); 641 if (!dev) 642 dev_err(master->dev.parent, "can't create new device for %s\n", 643 bi->modalias); 644 } 645 646 /** 647 * spi_register_board_info - register SPI devices for a given board 648 * @info: array of chip descriptors 649 * @n: how many descriptors are provided 650 * Context: can sleep 651 * 652 * Board-specific early init code calls this (probably during arch_initcall) 653 * with segments of the SPI device table. Any device nodes are created later, 654 * after the relevant parent SPI controller (bus_num) is defined. We keep 655 * this table of devices forever, so that reloading a controller driver will 656 * not make Linux forget about these hard-wired devices. 657 * 658 * Other code can also call this, e.g. a particular add-on board might provide 659 * SPI devices through its expansion connector, so code initializing that board 660 * would naturally declare its SPI devices. 661 * 662 * The board info passed can safely be __initdata ... but be careful of 663 * any embedded pointers (platform_data, etc), they're copied as-is. 664 * 665 * Return: zero on success, else a negative error code. 666 */ 667 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 668 { 669 struct boardinfo *bi; 670 int i; 671 672 if (!n) 673 return -EINVAL; 674 675 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); 676 if (!bi) 677 return -ENOMEM; 678 679 for (i = 0; i < n; i++, bi++, info++) { 680 struct spi_master *master; 681 682 memcpy(&bi->board_info, info, sizeof(*info)); 683 mutex_lock(&board_lock); 684 list_add_tail(&bi->list, &board_list); 685 list_for_each_entry(master, &spi_master_list, list) 686 spi_match_master_to_boardinfo(master, &bi->board_info); 687 mutex_unlock(&board_lock); 688 } 689 690 return 0; 691 } 692 693 /*-------------------------------------------------------------------------*/ 694 695 static void spi_set_cs(struct spi_device *spi, bool enable) 696 { 697 if (spi->mode & SPI_CS_HIGH) 698 enable = !enable; 699 700 if (gpio_is_valid(spi->cs_gpio)) { 701 gpio_set_value(spi->cs_gpio, !enable); 702 /* Some SPI masters need both GPIO CS & slave_select */ 703 if ((spi->master->flags & SPI_MASTER_GPIO_SS) && 704 spi->master->set_cs) 705 spi->master->set_cs(spi, !enable); 706 } else if (spi->master->set_cs) { 707 spi->master->set_cs(spi, !enable); 708 } 709 } 710 711 #ifdef CONFIG_HAS_DMA 712 static int spi_map_buf(struct spi_master *master, struct device *dev, 713 struct sg_table *sgt, void *buf, size_t len, 714 enum dma_data_direction dir) 715 { 716 const bool vmalloced_buf = is_vmalloc_addr(buf); 717 unsigned int max_seg_size = dma_get_max_seg_size(dev); 718 #ifdef CONFIG_HIGHMEM 719 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 720 (unsigned long)buf < (PKMAP_BASE + 721 (LAST_PKMAP * PAGE_SIZE))); 722 #else 723 const bool kmap_buf = false; 724 #endif 725 int desc_len; 726 int sgs; 727 struct page *vm_page; 728 struct scatterlist *sg; 729 void *sg_buf; 730 size_t min; 731 int i, ret; 732 733 if (vmalloced_buf || kmap_buf) { 734 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 735 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 736 } else if (virt_addr_valid(buf)) { 737 desc_len = min_t(int, max_seg_size, master->max_dma_len); 738 sgs = DIV_ROUND_UP(len, desc_len); 739 } else { 740 return -EINVAL; 741 } 742 743 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 744 if (ret != 0) 745 return ret; 746 747 sg = &sgt->sgl[0]; 748 for (i = 0; i < sgs; i++) { 749 750 if (vmalloced_buf || kmap_buf) { 751 min = min_t(size_t, 752 len, desc_len - offset_in_page(buf)); 753 if (vmalloced_buf) 754 vm_page = vmalloc_to_page(buf); 755 else 756 vm_page = kmap_to_page(buf); 757 if (!vm_page) { 758 sg_free_table(sgt); 759 return -ENOMEM; 760 } 761 sg_set_page(sg, vm_page, 762 min, offset_in_page(buf)); 763 } else { 764 min = min_t(size_t, len, desc_len); 765 sg_buf = buf; 766 sg_set_buf(sg, sg_buf, min); 767 } 768 769 buf += min; 770 len -= min; 771 sg = sg_next(sg); 772 } 773 774 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 775 if (!ret) 776 ret = -ENOMEM; 777 if (ret < 0) { 778 sg_free_table(sgt); 779 return ret; 780 } 781 782 sgt->nents = ret; 783 784 return 0; 785 } 786 787 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 788 struct sg_table *sgt, enum dma_data_direction dir) 789 { 790 if (sgt->orig_nents) { 791 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 792 sg_free_table(sgt); 793 } 794 } 795 796 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 797 { 798 struct device *tx_dev, *rx_dev; 799 struct spi_transfer *xfer; 800 int ret; 801 802 if (!master->can_dma) 803 return 0; 804 805 if (master->dma_tx) 806 tx_dev = master->dma_tx->device->dev; 807 else 808 tx_dev = &master->dev; 809 810 if (master->dma_rx) 811 rx_dev = master->dma_rx->device->dev; 812 else 813 rx_dev = &master->dev; 814 815 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 816 if (!master->can_dma(master, msg->spi, xfer)) 817 continue; 818 819 if (xfer->tx_buf != NULL) { 820 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 821 (void *)xfer->tx_buf, xfer->len, 822 DMA_TO_DEVICE); 823 if (ret != 0) 824 return ret; 825 } 826 827 if (xfer->rx_buf != NULL) { 828 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 829 xfer->rx_buf, xfer->len, 830 DMA_FROM_DEVICE); 831 if (ret != 0) { 832 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 833 DMA_TO_DEVICE); 834 return ret; 835 } 836 } 837 } 838 839 master->cur_msg_mapped = true; 840 841 return 0; 842 } 843 844 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 845 { 846 struct spi_transfer *xfer; 847 struct device *tx_dev, *rx_dev; 848 849 if (!master->cur_msg_mapped || !master->can_dma) 850 return 0; 851 852 if (master->dma_tx) 853 tx_dev = master->dma_tx->device->dev; 854 else 855 tx_dev = &master->dev; 856 857 if (master->dma_rx) 858 rx_dev = master->dma_rx->device->dev; 859 else 860 rx_dev = &master->dev; 861 862 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 863 if (!master->can_dma(master, msg->spi, xfer)) 864 continue; 865 866 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 867 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 868 } 869 870 return 0; 871 } 872 #else /* !CONFIG_HAS_DMA */ 873 static inline int spi_map_buf(struct spi_master *master, 874 struct device *dev, struct sg_table *sgt, 875 void *buf, size_t len, 876 enum dma_data_direction dir) 877 { 878 return -EINVAL; 879 } 880 881 static inline void spi_unmap_buf(struct spi_master *master, 882 struct device *dev, struct sg_table *sgt, 883 enum dma_data_direction dir) 884 { 885 } 886 887 static inline int __spi_map_msg(struct spi_master *master, 888 struct spi_message *msg) 889 { 890 return 0; 891 } 892 893 static inline int __spi_unmap_msg(struct spi_master *master, 894 struct spi_message *msg) 895 { 896 return 0; 897 } 898 #endif /* !CONFIG_HAS_DMA */ 899 900 static inline int spi_unmap_msg(struct spi_master *master, 901 struct spi_message *msg) 902 { 903 struct spi_transfer *xfer; 904 905 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 906 /* 907 * Restore the original value of tx_buf or rx_buf if they are 908 * NULL. 909 */ 910 if (xfer->tx_buf == master->dummy_tx) 911 xfer->tx_buf = NULL; 912 if (xfer->rx_buf == master->dummy_rx) 913 xfer->rx_buf = NULL; 914 } 915 916 return __spi_unmap_msg(master, msg); 917 } 918 919 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 920 { 921 struct spi_transfer *xfer; 922 void *tmp; 923 unsigned int max_tx, max_rx; 924 925 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 926 max_tx = 0; 927 max_rx = 0; 928 929 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 930 if ((master->flags & SPI_MASTER_MUST_TX) && 931 !xfer->tx_buf) 932 max_tx = max(xfer->len, max_tx); 933 if ((master->flags & SPI_MASTER_MUST_RX) && 934 !xfer->rx_buf) 935 max_rx = max(xfer->len, max_rx); 936 } 937 938 if (max_tx) { 939 tmp = krealloc(master->dummy_tx, max_tx, 940 GFP_KERNEL | GFP_DMA); 941 if (!tmp) 942 return -ENOMEM; 943 master->dummy_tx = tmp; 944 memset(tmp, 0, max_tx); 945 } 946 947 if (max_rx) { 948 tmp = krealloc(master->dummy_rx, max_rx, 949 GFP_KERNEL | GFP_DMA); 950 if (!tmp) 951 return -ENOMEM; 952 master->dummy_rx = tmp; 953 } 954 955 if (max_tx || max_rx) { 956 list_for_each_entry(xfer, &msg->transfers, 957 transfer_list) { 958 if (!xfer->tx_buf) 959 xfer->tx_buf = master->dummy_tx; 960 if (!xfer->rx_buf) 961 xfer->rx_buf = master->dummy_rx; 962 } 963 } 964 } 965 966 return __spi_map_msg(master, msg); 967 } 968 969 /* 970 * spi_transfer_one_message - Default implementation of transfer_one_message() 971 * 972 * This is a standard implementation of transfer_one_message() for 973 * drivers which implement a transfer_one() operation. It provides 974 * standard handling of delays and chip select management. 975 */ 976 static int spi_transfer_one_message(struct spi_master *master, 977 struct spi_message *msg) 978 { 979 struct spi_transfer *xfer; 980 bool keep_cs = false; 981 int ret = 0; 982 unsigned long long ms = 1; 983 struct spi_statistics *statm = &master->statistics; 984 struct spi_statistics *stats = &msg->spi->statistics; 985 986 spi_set_cs(msg->spi, true); 987 988 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 989 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 990 991 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 992 trace_spi_transfer_start(msg, xfer); 993 994 spi_statistics_add_transfer_stats(statm, xfer, master); 995 spi_statistics_add_transfer_stats(stats, xfer, master); 996 997 if (xfer->tx_buf || xfer->rx_buf) { 998 reinit_completion(&master->xfer_completion); 999 1000 ret = master->transfer_one(master, msg->spi, xfer); 1001 if (ret < 0) { 1002 SPI_STATISTICS_INCREMENT_FIELD(statm, 1003 errors); 1004 SPI_STATISTICS_INCREMENT_FIELD(stats, 1005 errors); 1006 dev_err(&msg->spi->dev, 1007 "SPI transfer failed: %d\n", ret); 1008 goto out; 1009 } 1010 1011 if (ret > 0) { 1012 ret = 0; 1013 ms = 8LL * 1000LL * xfer->len; 1014 do_div(ms, xfer->speed_hz); 1015 ms += ms + 100; /* some tolerance */ 1016 1017 if (ms > UINT_MAX) 1018 ms = UINT_MAX; 1019 1020 ms = wait_for_completion_timeout(&master->xfer_completion, 1021 msecs_to_jiffies(ms)); 1022 } 1023 1024 if (ms == 0) { 1025 SPI_STATISTICS_INCREMENT_FIELD(statm, 1026 timedout); 1027 SPI_STATISTICS_INCREMENT_FIELD(stats, 1028 timedout); 1029 dev_err(&msg->spi->dev, 1030 "SPI transfer timed out\n"); 1031 msg->status = -ETIMEDOUT; 1032 } 1033 } else { 1034 if (xfer->len) 1035 dev_err(&msg->spi->dev, 1036 "Bufferless transfer has length %u\n", 1037 xfer->len); 1038 } 1039 1040 trace_spi_transfer_stop(msg, xfer); 1041 1042 if (msg->status != -EINPROGRESS) 1043 goto out; 1044 1045 if (xfer->delay_usecs) { 1046 u16 us = xfer->delay_usecs; 1047 1048 if (us <= 10) 1049 udelay(us); 1050 else 1051 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1052 } 1053 1054 if (xfer->cs_change) { 1055 if (list_is_last(&xfer->transfer_list, 1056 &msg->transfers)) { 1057 keep_cs = true; 1058 } else { 1059 spi_set_cs(msg->spi, false); 1060 udelay(10); 1061 spi_set_cs(msg->spi, true); 1062 } 1063 } 1064 1065 msg->actual_length += xfer->len; 1066 } 1067 1068 out: 1069 if (ret != 0 || !keep_cs) 1070 spi_set_cs(msg->spi, false); 1071 1072 if (msg->status == -EINPROGRESS) 1073 msg->status = ret; 1074 1075 if (msg->status && master->handle_err) 1076 master->handle_err(master, msg); 1077 1078 spi_res_release(master, msg); 1079 1080 spi_finalize_current_message(master); 1081 1082 return ret; 1083 } 1084 1085 /** 1086 * spi_finalize_current_transfer - report completion of a transfer 1087 * @master: the master reporting completion 1088 * 1089 * Called by SPI drivers using the core transfer_one_message() 1090 * implementation to notify it that the current interrupt driven 1091 * transfer has finished and the next one may be scheduled. 1092 */ 1093 void spi_finalize_current_transfer(struct spi_master *master) 1094 { 1095 complete(&master->xfer_completion); 1096 } 1097 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1098 1099 /** 1100 * __spi_pump_messages - function which processes spi message queue 1101 * @master: master to process queue for 1102 * @in_kthread: true if we are in the context of the message pump thread 1103 * 1104 * This function checks if there is any spi message in the queue that 1105 * needs processing and if so call out to the driver to initialize hardware 1106 * and transfer each message. 1107 * 1108 * Note that it is called both from the kthread itself and also from 1109 * inside spi_sync(); the queue extraction handling at the top of the 1110 * function should deal with this safely. 1111 */ 1112 static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1113 { 1114 unsigned long flags; 1115 bool was_busy = false; 1116 int ret; 1117 1118 /* Lock queue */ 1119 spin_lock_irqsave(&master->queue_lock, flags); 1120 1121 /* Make sure we are not already running a message */ 1122 if (master->cur_msg) { 1123 spin_unlock_irqrestore(&master->queue_lock, flags); 1124 return; 1125 } 1126 1127 /* If another context is idling the device then defer */ 1128 if (master->idling) { 1129 kthread_queue_work(&master->kworker, &master->pump_messages); 1130 spin_unlock_irqrestore(&master->queue_lock, flags); 1131 return; 1132 } 1133 1134 /* Check if the queue is idle */ 1135 if (list_empty(&master->queue) || !master->running) { 1136 if (!master->busy) { 1137 spin_unlock_irqrestore(&master->queue_lock, flags); 1138 return; 1139 } 1140 1141 /* Only do teardown in the thread */ 1142 if (!in_kthread) { 1143 kthread_queue_work(&master->kworker, 1144 &master->pump_messages); 1145 spin_unlock_irqrestore(&master->queue_lock, flags); 1146 return; 1147 } 1148 1149 master->busy = false; 1150 master->idling = true; 1151 spin_unlock_irqrestore(&master->queue_lock, flags); 1152 1153 kfree(master->dummy_rx); 1154 master->dummy_rx = NULL; 1155 kfree(master->dummy_tx); 1156 master->dummy_tx = NULL; 1157 if (master->unprepare_transfer_hardware && 1158 master->unprepare_transfer_hardware(master)) 1159 dev_err(&master->dev, 1160 "failed to unprepare transfer hardware\n"); 1161 if (master->auto_runtime_pm) { 1162 pm_runtime_mark_last_busy(master->dev.parent); 1163 pm_runtime_put_autosuspend(master->dev.parent); 1164 } 1165 trace_spi_master_idle(master); 1166 1167 spin_lock_irqsave(&master->queue_lock, flags); 1168 master->idling = false; 1169 spin_unlock_irqrestore(&master->queue_lock, flags); 1170 return; 1171 } 1172 1173 /* Extract head of queue */ 1174 master->cur_msg = 1175 list_first_entry(&master->queue, struct spi_message, queue); 1176 1177 list_del_init(&master->cur_msg->queue); 1178 if (master->busy) 1179 was_busy = true; 1180 else 1181 master->busy = true; 1182 spin_unlock_irqrestore(&master->queue_lock, flags); 1183 1184 mutex_lock(&master->io_mutex); 1185 1186 if (!was_busy && master->auto_runtime_pm) { 1187 ret = pm_runtime_get_sync(master->dev.parent); 1188 if (ret < 0) { 1189 dev_err(&master->dev, "Failed to power device: %d\n", 1190 ret); 1191 mutex_unlock(&master->io_mutex); 1192 return; 1193 } 1194 } 1195 1196 if (!was_busy) 1197 trace_spi_master_busy(master); 1198 1199 if (!was_busy && master->prepare_transfer_hardware) { 1200 ret = master->prepare_transfer_hardware(master); 1201 if (ret) { 1202 dev_err(&master->dev, 1203 "failed to prepare transfer hardware\n"); 1204 1205 if (master->auto_runtime_pm) 1206 pm_runtime_put(master->dev.parent); 1207 mutex_unlock(&master->io_mutex); 1208 return; 1209 } 1210 } 1211 1212 trace_spi_message_start(master->cur_msg); 1213 1214 if (master->prepare_message) { 1215 ret = master->prepare_message(master, master->cur_msg); 1216 if (ret) { 1217 dev_err(&master->dev, 1218 "failed to prepare message: %d\n", ret); 1219 master->cur_msg->status = ret; 1220 spi_finalize_current_message(master); 1221 goto out; 1222 } 1223 master->cur_msg_prepared = true; 1224 } 1225 1226 ret = spi_map_msg(master, master->cur_msg); 1227 if (ret) { 1228 master->cur_msg->status = ret; 1229 spi_finalize_current_message(master); 1230 goto out; 1231 } 1232 1233 ret = master->transfer_one_message(master, master->cur_msg); 1234 if (ret) { 1235 dev_err(&master->dev, 1236 "failed to transfer one message from queue\n"); 1237 goto out; 1238 } 1239 1240 out: 1241 mutex_unlock(&master->io_mutex); 1242 1243 /* Prod the scheduler in case transfer_one() was busy waiting */ 1244 if (!ret) 1245 cond_resched(); 1246 } 1247 1248 /** 1249 * spi_pump_messages - kthread work function which processes spi message queue 1250 * @work: pointer to kthread work struct contained in the master struct 1251 */ 1252 static void spi_pump_messages(struct kthread_work *work) 1253 { 1254 struct spi_master *master = 1255 container_of(work, struct spi_master, pump_messages); 1256 1257 __spi_pump_messages(master, true); 1258 } 1259 1260 static int spi_init_queue(struct spi_master *master) 1261 { 1262 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1263 1264 master->running = false; 1265 master->busy = false; 1266 1267 kthread_init_worker(&master->kworker); 1268 master->kworker_task = kthread_run(kthread_worker_fn, 1269 &master->kworker, "%s", 1270 dev_name(&master->dev)); 1271 if (IS_ERR(master->kworker_task)) { 1272 dev_err(&master->dev, "failed to create message pump task\n"); 1273 return PTR_ERR(master->kworker_task); 1274 } 1275 kthread_init_work(&master->pump_messages, spi_pump_messages); 1276 1277 /* 1278 * Master config will indicate if this controller should run the 1279 * message pump with high (realtime) priority to reduce the transfer 1280 * latency on the bus by minimising the delay between a transfer 1281 * request and the scheduling of the message pump thread. Without this 1282 * setting the message pump thread will remain at default priority. 1283 */ 1284 if (master->rt) { 1285 dev_info(&master->dev, 1286 "will run message pump with realtime priority\n"); 1287 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1288 } 1289 1290 return 0; 1291 } 1292 1293 /** 1294 * spi_get_next_queued_message() - called by driver to check for queued 1295 * messages 1296 * @master: the master to check for queued messages 1297 * 1298 * If there are more messages in the queue, the next message is returned from 1299 * this call. 1300 * 1301 * Return: the next message in the queue, else NULL if the queue is empty. 1302 */ 1303 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1304 { 1305 struct spi_message *next; 1306 unsigned long flags; 1307 1308 /* get a pointer to the next message, if any */ 1309 spin_lock_irqsave(&master->queue_lock, flags); 1310 next = list_first_entry_or_null(&master->queue, struct spi_message, 1311 queue); 1312 spin_unlock_irqrestore(&master->queue_lock, flags); 1313 1314 return next; 1315 } 1316 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1317 1318 /** 1319 * spi_finalize_current_message() - the current message is complete 1320 * @master: the master to return the message to 1321 * 1322 * Called by the driver to notify the core that the message in the front of the 1323 * queue is complete and can be removed from the queue. 1324 */ 1325 void spi_finalize_current_message(struct spi_master *master) 1326 { 1327 struct spi_message *mesg; 1328 unsigned long flags; 1329 int ret; 1330 1331 spin_lock_irqsave(&master->queue_lock, flags); 1332 mesg = master->cur_msg; 1333 spin_unlock_irqrestore(&master->queue_lock, flags); 1334 1335 spi_unmap_msg(master, mesg); 1336 1337 if (master->cur_msg_prepared && master->unprepare_message) { 1338 ret = master->unprepare_message(master, mesg); 1339 if (ret) { 1340 dev_err(&master->dev, 1341 "failed to unprepare message: %d\n", ret); 1342 } 1343 } 1344 1345 spin_lock_irqsave(&master->queue_lock, flags); 1346 master->cur_msg = NULL; 1347 master->cur_msg_prepared = false; 1348 kthread_queue_work(&master->kworker, &master->pump_messages); 1349 spin_unlock_irqrestore(&master->queue_lock, flags); 1350 1351 trace_spi_message_done(mesg); 1352 1353 mesg->state = NULL; 1354 if (mesg->complete) 1355 mesg->complete(mesg->context); 1356 } 1357 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1358 1359 static int spi_start_queue(struct spi_master *master) 1360 { 1361 unsigned long flags; 1362 1363 spin_lock_irqsave(&master->queue_lock, flags); 1364 1365 if (master->running || master->busy) { 1366 spin_unlock_irqrestore(&master->queue_lock, flags); 1367 return -EBUSY; 1368 } 1369 1370 master->running = true; 1371 master->cur_msg = NULL; 1372 spin_unlock_irqrestore(&master->queue_lock, flags); 1373 1374 kthread_queue_work(&master->kworker, &master->pump_messages); 1375 1376 return 0; 1377 } 1378 1379 static int spi_stop_queue(struct spi_master *master) 1380 { 1381 unsigned long flags; 1382 unsigned limit = 500; 1383 int ret = 0; 1384 1385 spin_lock_irqsave(&master->queue_lock, flags); 1386 1387 /* 1388 * This is a bit lame, but is optimized for the common execution path. 1389 * A wait_queue on the master->busy could be used, but then the common 1390 * execution path (pump_messages) would be required to call wake_up or 1391 * friends on every SPI message. Do this instead. 1392 */ 1393 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1394 spin_unlock_irqrestore(&master->queue_lock, flags); 1395 usleep_range(10000, 11000); 1396 spin_lock_irqsave(&master->queue_lock, flags); 1397 } 1398 1399 if (!list_empty(&master->queue) || master->busy) 1400 ret = -EBUSY; 1401 else 1402 master->running = false; 1403 1404 spin_unlock_irqrestore(&master->queue_lock, flags); 1405 1406 if (ret) { 1407 dev_warn(&master->dev, 1408 "could not stop message queue\n"); 1409 return ret; 1410 } 1411 return ret; 1412 } 1413 1414 static int spi_destroy_queue(struct spi_master *master) 1415 { 1416 int ret; 1417 1418 ret = spi_stop_queue(master); 1419 1420 /* 1421 * kthread_flush_worker will block until all work is done. 1422 * If the reason that stop_queue timed out is that the work will never 1423 * finish, then it does no good to call flush/stop thread, so 1424 * return anyway. 1425 */ 1426 if (ret) { 1427 dev_err(&master->dev, "problem destroying queue\n"); 1428 return ret; 1429 } 1430 1431 kthread_flush_worker(&master->kworker); 1432 kthread_stop(master->kworker_task); 1433 1434 return 0; 1435 } 1436 1437 static int __spi_queued_transfer(struct spi_device *spi, 1438 struct spi_message *msg, 1439 bool need_pump) 1440 { 1441 struct spi_master *master = spi->master; 1442 unsigned long flags; 1443 1444 spin_lock_irqsave(&master->queue_lock, flags); 1445 1446 if (!master->running) { 1447 spin_unlock_irqrestore(&master->queue_lock, flags); 1448 return -ESHUTDOWN; 1449 } 1450 msg->actual_length = 0; 1451 msg->status = -EINPROGRESS; 1452 1453 list_add_tail(&msg->queue, &master->queue); 1454 if (!master->busy && need_pump) 1455 kthread_queue_work(&master->kworker, &master->pump_messages); 1456 1457 spin_unlock_irqrestore(&master->queue_lock, flags); 1458 return 0; 1459 } 1460 1461 /** 1462 * spi_queued_transfer - transfer function for queued transfers 1463 * @spi: spi device which is requesting transfer 1464 * @msg: spi message which is to handled is queued to driver queue 1465 * 1466 * Return: zero on success, else a negative error code. 1467 */ 1468 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1469 { 1470 return __spi_queued_transfer(spi, msg, true); 1471 } 1472 1473 static int spi_master_initialize_queue(struct spi_master *master) 1474 { 1475 int ret; 1476 1477 master->transfer = spi_queued_transfer; 1478 if (!master->transfer_one_message) 1479 master->transfer_one_message = spi_transfer_one_message; 1480 1481 /* Initialize and start queue */ 1482 ret = spi_init_queue(master); 1483 if (ret) { 1484 dev_err(&master->dev, "problem initializing queue\n"); 1485 goto err_init_queue; 1486 } 1487 master->queued = true; 1488 ret = spi_start_queue(master); 1489 if (ret) { 1490 dev_err(&master->dev, "problem starting queue\n"); 1491 goto err_start_queue; 1492 } 1493 1494 return 0; 1495 1496 err_start_queue: 1497 spi_destroy_queue(master); 1498 err_init_queue: 1499 return ret; 1500 } 1501 1502 /*-------------------------------------------------------------------------*/ 1503 1504 #if defined(CONFIG_OF) 1505 static struct spi_device * 1506 of_register_spi_device(struct spi_master *master, struct device_node *nc) 1507 { 1508 struct spi_device *spi; 1509 int rc; 1510 u32 value; 1511 1512 /* Alloc an spi_device */ 1513 spi = spi_alloc_device(master); 1514 if (!spi) { 1515 dev_err(&master->dev, "spi_device alloc error for %s\n", 1516 nc->full_name); 1517 rc = -ENOMEM; 1518 goto err_out; 1519 } 1520 1521 /* Select device driver */ 1522 rc = of_modalias_node(nc, spi->modalias, 1523 sizeof(spi->modalias)); 1524 if (rc < 0) { 1525 dev_err(&master->dev, "cannot find modalias for %s\n", 1526 nc->full_name); 1527 goto err_out; 1528 } 1529 1530 /* Device address */ 1531 rc = of_property_read_u32(nc, "reg", &value); 1532 if (rc) { 1533 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1534 nc->full_name, rc); 1535 goto err_out; 1536 } 1537 spi->chip_select = value; 1538 1539 /* Mode (clock phase/polarity/etc.) */ 1540 if (of_find_property(nc, "spi-cpha", NULL)) 1541 spi->mode |= SPI_CPHA; 1542 if (of_find_property(nc, "spi-cpol", NULL)) 1543 spi->mode |= SPI_CPOL; 1544 if (of_find_property(nc, "spi-cs-high", NULL)) 1545 spi->mode |= SPI_CS_HIGH; 1546 if (of_find_property(nc, "spi-3wire", NULL)) 1547 spi->mode |= SPI_3WIRE; 1548 if (of_find_property(nc, "spi-lsb-first", NULL)) 1549 spi->mode |= SPI_LSB_FIRST; 1550 1551 /* Device DUAL/QUAD mode */ 1552 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1553 switch (value) { 1554 case 1: 1555 break; 1556 case 2: 1557 spi->mode |= SPI_TX_DUAL; 1558 break; 1559 case 4: 1560 spi->mode |= SPI_TX_QUAD; 1561 break; 1562 default: 1563 dev_warn(&master->dev, 1564 "spi-tx-bus-width %d not supported\n", 1565 value); 1566 break; 1567 } 1568 } 1569 1570 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1571 switch (value) { 1572 case 1: 1573 break; 1574 case 2: 1575 spi->mode |= SPI_RX_DUAL; 1576 break; 1577 case 4: 1578 spi->mode |= SPI_RX_QUAD; 1579 break; 1580 default: 1581 dev_warn(&master->dev, 1582 "spi-rx-bus-width %d not supported\n", 1583 value); 1584 break; 1585 } 1586 } 1587 1588 /* Device speed */ 1589 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1590 if (rc) { 1591 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1592 nc->full_name, rc); 1593 goto err_out; 1594 } 1595 spi->max_speed_hz = value; 1596 1597 /* Store a pointer to the node in the device structure */ 1598 of_node_get(nc); 1599 spi->dev.of_node = nc; 1600 1601 /* Register the new device */ 1602 rc = spi_add_device(spi); 1603 if (rc) { 1604 dev_err(&master->dev, "spi_device register error %s\n", 1605 nc->full_name); 1606 goto err_out; 1607 } 1608 1609 return spi; 1610 1611 err_out: 1612 spi_dev_put(spi); 1613 return ERR_PTR(rc); 1614 } 1615 1616 /** 1617 * of_register_spi_devices() - Register child devices onto the SPI bus 1618 * @master: Pointer to spi_master device 1619 * 1620 * Registers an spi_device for each child node of master node which has a 'reg' 1621 * property. 1622 */ 1623 static void of_register_spi_devices(struct spi_master *master) 1624 { 1625 struct spi_device *spi; 1626 struct device_node *nc; 1627 1628 if (!master->dev.of_node) 1629 return; 1630 1631 for_each_available_child_of_node(master->dev.of_node, nc) { 1632 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1633 continue; 1634 spi = of_register_spi_device(master, nc); 1635 if (IS_ERR(spi)) { 1636 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1637 nc->full_name); 1638 of_node_clear_flag(nc, OF_POPULATED); 1639 } 1640 } 1641 } 1642 #else 1643 static void of_register_spi_devices(struct spi_master *master) { } 1644 #endif 1645 1646 #ifdef CONFIG_ACPI 1647 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1648 { 1649 struct spi_device *spi = data; 1650 struct spi_master *master = spi->master; 1651 1652 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1653 struct acpi_resource_spi_serialbus *sb; 1654 1655 sb = &ares->data.spi_serial_bus; 1656 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1657 /* 1658 * ACPI DeviceSelection numbering is handled by the 1659 * host controller driver in Windows and can vary 1660 * from driver to driver. In Linux we always expect 1661 * 0 .. max - 1 so we need to ask the driver to 1662 * translate between the two schemes. 1663 */ 1664 if (master->fw_translate_cs) { 1665 int cs = master->fw_translate_cs(master, 1666 sb->device_selection); 1667 if (cs < 0) 1668 return cs; 1669 spi->chip_select = cs; 1670 } else { 1671 spi->chip_select = sb->device_selection; 1672 } 1673 1674 spi->max_speed_hz = sb->connection_speed; 1675 1676 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1677 spi->mode |= SPI_CPHA; 1678 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1679 spi->mode |= SPI_CPOL; 1680 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1681 spi->mode |= SPI_CS_HIGH; 1682 } 1683 } else if (spi->irq < 0) { 1684 struct resource r; 1685 1686 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1687 spi->irq = r.start; 1688 } 1689 1690 /* Always tell the ACPI core to skip this resource */ 1691 return 1; 1692 } 1693 1694 static acpi_status acpi_register_spi_device(struct spi_master *master, 1695 struct acpi_device *adev) 1696 { 1697 struct list_head resource_list; 1698 struct spi_device *spi; 1699 int ret; 1700 1701 if (acpi_bus_get_status(adev) || !adev->status.present || 1702 acpi_device_enumerated(adev)) 1703 return AE_OK; 1704 1705 spi = spi_alloc_device(master); 1706 if (!spi) { 1707 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1708 dev_name(&adev->dev)); 1709 return AE_NO_MEMORY; 1710 } 1711 1712 ACPI_COMPANION_SET(&spi->dev, adev); 1713 spi->irq = -1; 1714 1715 INIT_LIST_HEAD(&resource_list); 1716 ret = acpi_dev_get_resources(adev, &resource_list, 1717 acpi_spi_add_resource, spi); 1718 acpi_dev_free_resource_list(&resource_list); 1719 1720 if (ret < 0 || !spi->max_speed_hz) { 1721 spi_dev_put(spi); 1722 return AE_OK; 1723 } 1724 1725 if (spi->irq < 0) 1726 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 1727 1728 acpi_device_set_enumerated(adev); 1729 1730 adev->power.flags.ignore_parent = true; 1731 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias)); 1732 if (spi_add_device(spi)) { 1733 adev->power.flags.ignore_parent = false; 1734 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1735 dev_name(&adev->dev)); 1736 spi_dev_put(spi); 1737 } 1738 1739 return AE_OK; 1740 } 1741 1742 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1743 void *data, void **return_value) 1744 { 1745 struct spi_master *master = data; 1746 struct acpi_device *adev; 1747 1748 if (acpi_bus_get_device(handle, &adev)) 1749 return AE_OK; 1750 1751 return acpi_register_spi_device(master, adev); 1752 } 1753 1754 static void acpi_register_spi_devices(struct spi_master *master) 1755 { 1756 acpi_status status; 1757 acpi_handle handle; 1758 1759 handle = ACPI_HANDLE(master->dev.parent); 1760 if (!handle) 1761 return; 1762 1763 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1764 acpi_spi_add_device, NULL, 1765 master, NULL); 1766 if (ACPI_FAILURE(status)) 1767 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1768 } 1769 #else 1770 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1771 #endif /* CONFIG_ACPI */ 1772 1773 static void spi_master_release(struct device *dev) 1774 { 1775 struct spi_master *master; 1776 1777 master = container_of(dev, struct spi_master, dev); 1778 kfree(master); 1779 } 1780 1781 static struct class spi_master_class = { 1782 .name = "spi_master", 1783 .owner = THIS_MODULE, 1784 .dev_release = spi_master_release, 1785 .dev_groups = spi_master_groups, 1786 }; 1787 1788 1789 /** 1790 * spi_alloc_master - allocate SPI master controller 1791 * @dev: the controller, possibly using the platform_bus 1792 * @size: how much zeroed driver-private data to allocate; the pointer to this 1793 * memory is in the driver_data field of the returned device, 1794 * accessible with spi_master_get_devdata(). 1795 * Context: can sleep 1796 * 1797 * This call is used only by SPI master controller drivers, which are the 1798 * only ones directly touching chip registers. It's how they allocate 1799 * an spi_master structure, prior to calling spi_register_master(). 1800 * 1801 * This must be called from context that can sleep. 1802 * 1803 * The caller is responsible for assigning the bus number and initializing 1804 * the master's methods before calling spi_register_master(); and (after errors 1805 * adding the device) calling spi_master_put() to prevent a memory leak. 1806 * 1807 * Return: the SPI master structure on success, else NULL. 1808 */ 1809 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1810 { 1811 struct spi_master *master; 1812 1813 if (!dev) 1814 return NULL; 1815 1816 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1817 if (!master) 1818 return NULL; 1819 1820 device_initialize(&master->dev); 1821 master->bus_num = -1; 1822 master->num_chipselect = 1; 1823 master->dev.class = &spi_master_class; 1824 master->dev.parent = dev; 1825 pm_suspend_ignore_children(&master->dev, true); 1826 spi_master_set_devdata(master, &master[1]); 1827 1828 return master; 1829 } 1830 EXPORT_SYMBOL_GPL(spi_alloc_master); 1831 1832 #ifdef CONFIG_OF 1833 static int of_spi_register_master(struct spi_master *master) 1834 { 1835 int nb, i, *cs; 1836 struct device_node *np = master->dev.of_node; 1837 1838 if (!np) 1839 return 0; 1840 1841 nb = of_gpio_named_count(np, "cs-gpios"); 1842 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1843 1844 /* Return error only for an incorrectly formed cs-gpios property */ 1845 if (nb == 0 || nb == -ENOENT) 1846 return 0; 1847 else if (nb < 0) 1848 return nb; 1849 1850 cs = devm_kzalloc(&master->dev, 1851 sizeof(int) * master->num_chipselect, 1852 GFP_KERNEL); 1853 master->cs_gpios = cs; 1854 1855 if (!master->cs_gpios) 1856 return -ENOMEM; 1857 1858 for (i = 0; i < master->num_chipselect; i++) 1859 cs[i] = -ENOENT; 1860 1861 for (i = 0; i < nb; i++) 1862 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1863 1864 return 0; 1865 } 1866 #else 1867 static int of_spi_register_master(struct spi_master *master) 1868 { 1869 return 0; 1870 } 1871 #endif 1872 1873 /** 1874 * spi_register_master - register SPI master controller 1875 * @master: initialized master, originally from spi_alloc_master() 1876 * Context: can sleep 1877 * 1878 * SPI master controllers connect to their drivers using some non-SPI bus, 1879 * such as the platform bus. The final stage of probe() in that code 1880 * includes calling spi_register_master() to hook up to this SPI bus glue. 1881 * 1882 * SPI controllers use board specific (often SOC specific) bus numbers, 1883 * and board-specific addressing for SPI devices combines those numbers 1884 * with chip select numbers. Since SPI does not directly support dynamic 1885 * device identification, boards need configuration tables telling which 1886 * chip is at which address. 1887 * 1888 * This must be called from context that can sleep. It returns zero on 1889 * success, else a negative error code (dropping the master's refcount). 1890 * After a successful return, the caller is responsible for calling 1891 * spi_unregister_master(). 1892 * 1893 * Return: zero on success, else a negative error code. 1894 */ 1895 int spi_register_master(struct spi_master *master) 1896 { 1897 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1898 struct device *dev = master->dev.parent; 1899 struct boardinfo *bi; 1900 int status = -ENODEV; 1901 int dynamic = 0; 1902 1903 if (!dev) 1904 return -ENODEV; 1905 1906 status = of_spi_register_master(master); 1907 if (status) 1908 return status; 1909 1910 /* even if it's just one always-selected device, there must 1911 * be at least one chipselect 1912 */ 1913 if (master->num_chipselect == 0) 1914 return -EINVAL; 1915 1916 if ((master->bus_num < 0) && master->dev.of_node) 1917 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1918 1919 /* convention: dynamically assigned bus IDs count down from the max */ 1920 if (master->bus_num < 0) { 1921 /* FIXME switch to an IDR based scheme, something like 1922 * I2C now uses, so we can't run out of "dynamic" IDs 1923 */ 1924 master->bus_num = atomic_dec_return(&dyn_bus_id); 1925 dynamic = 1; 1926 } 1927 1928 INIT_LIST_HEAD(&master->queue); 1929 spin_lock_init(&master->queue_lock); 1930 spin_lock_init(&master->bus_lock_spinlock); 1931 mutex_init(&master->bus_lock_mutex); 1932 mutex_init(&master->io_mutex); 1933 master->bus_lock_flag = 0; 1934 init_completion(&master->xfer_completion); 1935 if (!master->max_dma_len) 1936 master->max_dma_len = INT_MAX; 1937 1938 /* register the device, then userspace will see it. 1939 * registration fails if the bus ID is in use. 1940 */ 1941 dev_set_name(&master->dev, "spi%u", master->bus_num); 1942 status = device_add(&master->dev); 1943 if (status < 0) 1944 goto done; 1945 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1946 dynamic ? " (dynamic)" : ""); 1947 1948 /* If we're using a queued driver, start the queue */ 1949 if (master->transfer) 1950 dev_info(dev, "master is unqueued, this is deprecated\n"); 1951 else { 1952 status = spi_master_initialize_queue(master); 1953 if (status) { 1954 device_del(&master->dev); 1955 goto done; 1956 } 1957 } 1958 /* add statistics */ 1959 spin_lock_init(&master->statistics.lock); 1960 1961 mutex_lock(&board_lock); 1962 list_add_tail(&master->list, &spi_master_list); 1963 list_for_each_entry(bi, &board_list, list) 1964 spi_match_master_to_boardinfo(master, &bi->board_info); 1965 mutex_unlock(&board_lock); 1966 1967 /* Register devices from the device tree and ACPI */ 1968 of_register_spi_devices(master); 1969 acpi_register_spi_devices(master); 1970 done: 1971 return status; 1972 } 1973 EXPORT_SYMBOL_GPL(spi_register_master); 1974 1975 static void devm_spi_unregister(struct device *dev, void *res) 1976 { 1977 spi_unregister_master(*(struct spi_master **)res); 1978 } 1979 1980 /** 1981 * dev_spi_register_master - register managed SPI master controller 1982 * @dev: device managing SPI master 1983 * @master: initialized master, originally from spi_alloc_master() 1984 * Context: can sleep 1985 * 1986 * Register a SPI device as with spi_register_master() which will 1987 * automatically be unregister 1988 * 1989 * Return: zero on success, else a negative error code. 1990 */ 1991 int devm_spi_register_master(struct device *dev, struct spi_master *master) 1992 { 1993 struct spi_master **ptr; 1994 int ret; 1995 1996 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 1997 if (!ptr) 1998 return -ENOMEM; 1999 2000 ret = spi_register_master(master); 2001 if (!ret) { 2002 *ptr = master; 2003 devres_add(dev, ptr); 2004 } else { 2005 devres_free(ptr); 2006 } 2007 2008 return ret; 2009 } 2010 EXPORT_SYMBOL_GPL(devm_spi_register_master); 2011 2012 static int __unregister(struct device *dev, void *null) 2013 { 2014 spi_unregister_device(to_spi_device(dev)); 2015 return 0; 2016 } 2017 2018 /** 2019 * spi_unregister_master - unregister SPI master controller 2020 * @master: the master being unregistered 2021 * Context: can sleep 2022 * 2023 * This call is used only by SPI master controller drivers, which are the 2024 * only ones directly touching chip registers. 2025 * 2026 * This must be called from context that can sleep. 2027 */ 2028 void spi_unregister_master(struct spi_master *master) 2029 { 2030 int dummy; 2031 2032 if (master->queued) { 2033 if (spi_destroy_queue(master)) 2034 dev_err(&master->dev, "queue remove failed\n"); 2035 } 2036 2037 mutex_lock(&board_lock); 2038 list_del(&master->list); 2039 mutex_unlock(&board_lock); 2040 2041 dummy = device_for_each_child(&master->dev, NULL, __unregister); 2042 device_unregister(&master->dev); 2043 } 2044 EXPORT_SYMBOL_GPL(spi_unregister_master); 2045 2046 int spi_master_suspend(struct spi_master *master) 2047 { 2048 int ret; 2049 2050 /* Basically no-ops for non-queued masters */ 2051 if (!master->queued) 2052 return 0; 2053 2054 ret = spi_stop_queue(master); 2055 if (ret) 2056 dev_err(&master->dev, "queue stop failed\n"); 2057 2058 return ret; 2059 } 2060 EXPORT_SYMBOL_GPL(spi_master_suspend); 2061 2062 int spi_master_resume(struct spi_master *master) 2063 { 2064 int ret; 2065 2066 if (!master->queued) 2067 return 0; 2068 2069 ret = spi_start_queue(master); 2070 if (ret) 2071 dev_err(&master->dev, "queue restart failed\n"); 2072 2073 return ret; 2074 } 2075 EXPORT_SYMBOL_GPL(spi_master_resume); 2076 2077 static int __spi_master_match(struct device *dev, const void *data) 2078 { 2079 struct spi_master *m; 2080 const u16 *bus_num = data; 2081 2082 m = container_of(dev, struct spi_master, dev); 2083 return m->bus_num == *bus_num; 2084 } 2085 2086 /** 2087 * spi_busnum_to_master - look up master associated with bus_num 2088 * @bus_num: the master's bus number 2089 * Context: can sleep 2090 * 2091 * This call may be used with devices that are registered after 2092 * arch init time. It returns a refcounted pointer to the relevant 2093 * spi_master (which the caller must release), or NULL if there is 2094 * no such master registered. 2095 * 2096 * Return: the SPI master structure on success, else NULL. 2097 */ 2098 struct spi_master *spi_busnum_to_master(u16 bus_num) 2099 { 2100 struct device *dev; 2101 struct spi_master *master = NULL; 2102 2103 dev = class_find_device(&spi_master_class, NULL, &bus_num, 2104 __spi_master_match); 2105 if (dev) 2106 master = container_of(dev, struct spi_master, dev); 2107 /* reference got in class_find_device */ 2108 return master; 2109 } 2110 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2111 2112 /*-------------------------------------------------------------------------*/ 2113 2114 /* Core methods for SPI resource management */ 2115 2116 /** 2117 * spi_res_alloc - allocate a spi resource that is life-cycle managed 2118 * during the processing of a spi_message while using 2119 * spi_transfer_one 2120 * @spi: the spi device for which we allocate memory 2121 * @release: the release code to execute for this resource 2122 * @size: size to alloc and return 2123 * @gfp: GFP allocation flags 2124 * 2125 * Return: the pointer to the allocated data 2126 * 2127 * This may get enhanced in the future to allocate from a memory pool 2128 * of the @spi_device or @spi_master to avoid repeated allocations. 2129 */ 2130 void *spi_res_alloc(struct spi_device *spi, 2131 spi_res_release_t release, 2132 size_t size, gfp_t gfp) 2133 { 2134 struct spi_res *sres; 2135 2136 sres = kzalloc(sizeof(*sres) + size, gfp); 2137 if (!sres) 2138 return NULL; 2139 2140 INIT_LIST_HEAD(&sres->entry); 2141 sres->release = release; 2142 2143 return sres->data; 2144 } 2145 EXPORT_SYMBOL_GPL(spi_res_alloc); 2146 2147 /** 2148 * spi_res_free - free an spi resource 2149 * @res: pointer to the custom data of a resource 2150 * 2151 */ 2152 void spi_res_free(void *res) 2153 { 2154 struct spi_res *sres = container_of(res, struct spi_res, data); 2155 2156 if (!res) 2157 return; 2158 2159 WARN_ON(!list_empty(&sres->entry)); 2160 kfree(sres); 2161 } 2162 EXPORT_SYMBOL_GPL(spi_res_free); 2163 2164 /** 2165 * spi_res_add - add a spi_res to the spi_message 2166 * @message: the spi message 2167 * @res: the spi_resource 2168 */ 2169 void spi_res_add(struct spi_message *message, void *res) 2170 { 2171 struct spi_res *sres = container_of(res, struct spi_res, data); 2172 2173 WARN_ON(!list_empty(&sres->entry)); 2174 list_add_tail(&sres->entry, &message->resources); 2175 } 2176 EXPORT_SYMBOL_GPL(spi_res_add); 2177 2178 /** 2179 * spi_res_release - release all spi resources for this message 2180 * @master: the @spi_master 2181 * @message: the @spi_message 2182 */ 2183 void spi_res_release(struct spi_master *master, 2184 struct spi_message *message) 2185 { 2186 struct spi_res *res; 2187 2188 while (!list_empty(&message->resources)) { 2189 res = list_last_entry(&message->resources, 2190 struct spi_res, entry); 2191 2192 if (res->release) 2193 res->release(master, message, res->data); 2194 2195 list_del(&res->entry); 2196 2197 kfree(res); 2198 } 2199 } 2200 EXPORT_SYMBOL_GPL(spi_res_release); 2201 2202 /*-------------------------------------------------------------------------*/ 2203 2204 /* Core methods for spi_message alterations */ 2205 2206 static void __spi_replace_transfers_release(struct spi_master *master, 2207 struct spi_message *msg, 2208 void *res) 2209 { 2210 struct spi_replaced_transfers *rxfer = res; 2211 size_t i; 2212 2213 /* call extra callback if requested */ 2214 if (rxfer->release) 2215 rxfer->release(master, msg, res); 2216 2217 /* insert replaced transfers back into the message */ 2218 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2219 2220 /* remove the formerly inserted entries */ 2221 for (i = 0; i < rxfer->inserted; i++) 2222 list_del(&rxfer->inserted_transfers[i].transfer_list); 2223 } 2224 2225 /** 2226 * spi_replace_transfers - replace transfers with several transfers 2227 * and register change with spi_message.resources 2228 * @msg: the spi_message we work upon 2229 * @xfer_first: the first spi_transfer we want to replace 2230 * @remove: number of transfers to remove 2231 * @insert: the number of transfers we want to insert instead 2232 * @release: extra release code necessary in some circumstances 2233 * @extradatasize: extra data to allocate (with alignment guarantees 2234 * of struct @spi_transfer) 2235 * @gfp: gfp flags 2236 * 2237 * Returns: pointer to @spi_replaced_transfers, 2238 * PTR_ERR(...) in case of errors. 2239 */ 2240 struct spi_replaced_transfers *spi_replace_transfers( 2241 struct spi_message *msg, 2242 struct spi_transfer *xfer_first, 2243 size_t remove, 2244 size_t insert, 2245 spi_replaced_release_t release, 2246 size_t extradatasize, 2247 gfp_t gfp) 2248 { 2249 struct spi_replaced_transfers *rxfer; 2250 struct spi_transfer *xfer; 2251 size_t i; 2252 2253 /* allocate the structure using spi_res */ 2254 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2255 insert * sizeof(struct spi_transfer) 2256 + sizeof(struct spi_replaced_transfers) 2257 + extradatasize, 2258 gfp); 2259 if (!rxfer) 2260 return ERR_PTR(-ENOMEM); 2261 2262 /* the release code to invoke before running the generic release */ 2263 rxfer->release = release; 2264 2265 /* assign extradata */ 2266 if (extradatasize) 2267 rxfer->extradata = 2268 &rxfer->inserted_transfers[insert]; 2269 2270 /* init the replaced_transfers list */ 2271 INIT_LIST_HEAD(&rxfer->replaced_transfers); 2272 2273 /* assign the list_entry after which we should reinsert 2274 * the @replaced_transfers - it may be spi_message.messages! 2275 */ 2276 rxfer->replaced_after = xfer_first->transfer_list.prev; 2277 2278 /* remove the requested number of transfers */ 2279 for (i = 0; i < remove; i++) { 2280 /* if the entry after replaced_after it is msg->transfers 2281 * then we have been requested to remove more transfers 2282 * than are in the list 2283 */ 2284 if (rxfer->replaced_after->next == &msg->transfers) { 2285 dev_err(&msg->spi->dev, 2286 "requested to remove more spi_transfers than are available\n"); 2287 /* insert replaced transfers back into the message */ 2288 list_splice(&rxfer->replaced_transfers, 2289 rxfer->replaced_after); 2290 2291 /* free the spi_replace_transfer structure */ 2292 spi_res_free(rxfer); 2293 2294 /* and return with an error */ 2295 return ERR_PTR(-EINVAL); 2296 } 2297 2298 /* remove the entry after replaced_after from list of 2299 * transfers and add it to list of replaced_transfers 2300 */ 2301 list_move_tail(rxfer->replaced_after->next, 2302 &rxfer->replaced_transfers); 2303 } 2304 2305 /* create copy of the given xfer with identical settings 2306 * based on the first transfer to get removed 2307 */ 2308 for (i = 0; i < insert; i++) { 2309 /* we need to run in reverse order */ 2310 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 2311 2312 /* copy all spi_transfer data */ 2313 memcpy(xfer, xfer_first, sizeof(*xfer)); 2314 2315 /* add to list */ 2316 list_add(&xfer->transfer_list, rxfer->replaced_after); 2317 2318 /* clear cs_change and delay_usecs for all but the last */ 2319 if (i) { 2320 xfer->cs_change = false; 2321 xfer->delay_usecs = 0; 2322 } 2323 } 2324 2325 /* set up inserted */ 2326 rxfer->inserted = insert; 2327 2328 /* and register it with spi_res/spi_message */ 2329 spi_res_add(msg, rxfer); 2330 2331 return rxfer; 2332 } 2333 EXPORT_SYMBOL_GPL(spi_replace_transfers); 2334 2335 static int __spi_split_transfer_maxsize(struct spi_master *master, 2336 struct spi_message *msg, 2337 struct spi_transfer **xferp, 2338 size_t maxsize, 2339 gfp_t gfp) 2340 { 2341 struct spi_transfer *xfer = *xferp, *xfers; 2342 struct spi_replaced_transfers *srt; 2343 size_t offset; 2344 size_t count, i; 2345 2346 /* warn once about this fact that we are splitting a transfer */ 2347 dev_warn_once(&msg->spi->dev, 2348 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n", 2349 xfer->len, maxsize); 2350 2351 /* calculate how many we have to replace */ 2352 count = DIV_ROUND_UP(xfer->len, maxsize); 2353 2354 /* create replacement */ 2355 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 2356 if (IS_ERR(srt)) 2357 return PTR_ERR(srt); 2358 xfers = srt->inserted_transfers; 2359 2360 /* now handle each of those newly inserted spi_transfers 2361 * note that the replacements spi_transfers all are preset 2362 * to the same values as *xferp, so tx_buf, rx_buf and len 2363 * are all identical (as well as most others) 2364 * so we just have to fix up len and the pointers. 2365 * 2366 * this also includes support for the depreciated 2367 * spi_message.is_dma_mapped interface 2368 */ 2369 2370 /* the first transfer just needs the length modified, so we 2371 * run it outside the loop 2372 */ 2373 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 2374 2375 /* all the others need rx_buf/tx_buf also set */ 2376 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 2377 /* update rx_buf, tx_buf and dma */ 2378 if (xfers[i].rx_buf) 2379 xfers[i].rx_buf += offset; 2380 if (xfers[i].rx_dma) 2381 xfers[i].rx_dma += offset; 2382 if (xfers[i].tx_buf) 2383 xfers[i].tx_buf += offset; 2384 if (xfers[i].tx_dma) 2385 xfers[i].tx_dma += offset; 2386 2387 /* update length */ 2388 xfers[i].len = min(maxsize, xfers[i].len - offset); 2389 } 2390 2391 /* we set up xferp to the last entry we have inserted, 2392 * so that we skip those already split transfers 2393 */ 2394 *xferp = &xfers[count - 1]; 2395 2396 /* increment statistics counters */ 2397 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2398 transfers_split_maxsize); 2399 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2400 transfers_split_maxsize); 2401 2402 return 0; 2403 } 2404 2405 /** 2406 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2407 * when an individual transfer exceeds a 2408 * certain size 2409 * @master: the @spi_master for this transfer 2410 * @msg: the @spi_message to transform 2411 * @maxsize: the maximum when to apply this 2412 * @gfp: GFP allocation flags 2413 * 2414 * Return: status of transformation 2415 */ 2416 int spi_split_transfers_maxsize(struct spi_master *master, 2417 struct spi_message *msg, 2418 size_t maxsize, 2419 gfp_t gfp) 2420 { 2421 struct spi_transfer *xfer; 2422 int ret; 2423 2424 /* iterate over the transfer_list, 2425 * but note that xfer is advanced to the last transfer inserted 2426 * to avoid checking sizes again unnecessarily (also xfer does 2427 * potentiall belong to a different list by the time the 2428 * replacement has happened 2429 */ 2430 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2431 if (xfer->len > maxsize) { 2432 ret = __spi_split_transfer_maxsize( 2433 master, msg, &xfer, maxsize, gfp); 2434 if (ret) 2435 return ret; 2436 } 2437 } 2438 2439 return 0; 2440 } 2441 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 2442 2443 /*-------------------------------------------------------------------------*/ 2444 2445 /* Core methods for SPI master protocol drivers. Some of the 2446 * other core methods are currently defined as inline functions. 2447 */ 2448 2449 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 2450 { 2451 if (master->bits_per_word_mask) { 2452 /* Only 32 bits fit in the mask */ 2453 if (bits_per_word > 32) 2454 return -EINVAL; 2455 if (!(master->bits_per_word_mask & 2456 SPI_BPW_MASK(bits_per_word))) 2457 return -EINVAL; 2458 } 2459 2460 return 0; 2461 } 2462 2463 /** 2464 * spi_setup - setup SPI mode and clock rate 2465 * @spi: the device whose settings are being modified 2466 * Context: can sleep, and no requests are queued to the device 2467 * 2468 * SPI protocol drivers may need to update the transfer mode if the 2469 * device doesn't work with its default. They may likewise need 2470 * to update clock rates or word sizes from initial values. This function 2471 * changes those settings, and must be called from a context that can sleep. 2472 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 2473 * effect the next time the device is selected and data is transferred to 2474 * or from it. When this function returns, the spi device is deselected. 2475 * 2476 * Note that this call will fail if the protocol driver specifies an option 2477 * that the underlying controller or its driver does not support. For 2478 * example, not all hardware supports wire transfers using nine bit words, 2479 * LSB-first wire encoding, or active-high chipselects. 2480 * 2481 * Return: zero on success, else a negative error code. 2482 */ 2483 int spi_setup(struct spi_device *spi) 2484 { 2485 unsigned bad_bits, ugly_bits; 2486 int status; 2487 2488 /* check mode to prevent that DUAL and QUAD set at the same time 2489 */ 2490 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2491 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2492 dev_err(&spi->dev, 2493 "setup: can not select dual and quad at the same time\n"); 2494 return -EINVAL; 2495 } 2496 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2497 */ 2498 if ((spi->mode & SPI_3WIRE) && (spi->mode & 2499 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2500 return -EINVAL; 2501 /* help drivers fail *cleanly* when they need options 2502 * that aren't supported with their current master 2503 */ 2504 bad_bits = spi->mode & ~spi->master->mode_bits; 2505 ugly_bits = bad_bits & 2506 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 2507 if (ugly_bits) { 2508 dev_warn(&spi->dev, 2509 "setup: ignoring unsupported mode bits %x\n", 2510 ugly_bits); 2511 spi->mode &= ~ugly_bits; 2512 bad_bits &= ~ugly_bits; 2513 } 2514 if (bad_bits) { 2515 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2516 bad_bits); 2517 return -EINVAL; 2518 } 2519 2520 if (!spi->bits_per_word) 2521 spi->bits_per_word = 8; 2522 2523 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 2524 if (status) 2525 return status; 2526 2527 if (!spi->max_speed_hz) 2528 spi->max_speed_hz = spi->master->max_speed_hz; 2529 2530 if (spi->master->setup) 2531 status = spi->master->setup(spi); 2532 2533 spi_set_cs(spi, false); 2534 2535 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 2536 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 2537 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 2538 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 2539 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 2540 (spi->mode & SPI_LOOP) ? "loopback, " : "", 2541 spi->bits_per_word, spi->max_speed_hz, 2542 status); 2543 2544 return status; 2545 } 2546 EXPORT_SYMBOL_GPL(spi_setup); 2547 2548 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2549 { 2550 struct spi_master *master = spi->master; 2551 struct spi_transfer *xfer; 2552 int w_size; 2553 2554 if (list_empty(&message->transfers)) 2555 return -EINVAL; 2556 2557 /* Half-duplex links include original MicroWire, and ones with 2558 * only one data pin like SPI_3WIRE (switches direction) or where 2559 * either MOSI or MISO is missing. They can also be caused by 2560 * software limitations. 2561 */ 2562 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2563 || (spi->mode & SPI_3WIRE)) { 2564 unsigned flags = master->flags; 2565 2566 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2567 if (xfer->rx_buf && xfer->tx_buf) 2568 return -EINVAL; 2569 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2570 return -EINVAL; 2571 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2572 return -EINVAL; 2573 } 2574 } 2575 2576 /** 2577 * Set transfer bits_per_word and max speed as spi device default if 2578 * it is not set for this transfer. 2579 * Set transfer tx_nbits and rx_nbits as single transfer default 2580 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2581 */ 2582 message->frame_length = 0; 2583 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2584 message->frame_length += xfer->len; 2585 if (!xfer->bits_per_word) 2586 xfer->bits_per_word = spi->bits_per_word; 2587 2588 if (!xfer->speed_hz) 2589 xfer->speed_hz = spi->max_speed_hz; 2590 if (!xfer->speed_hz) 2591 xfer->speed_hz = master->max_speed_hz; 2592 2593 if (master->max_speed_hz && 2594 xfer->speed_hz > master->max_speed_hz) 2595 xfer->speed_hz = master->max_speed_hz; 2596 2597 if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2598 return -EINVAL; 2599 2600 /* 2601 * SPI transfer length should be multiple of SPI word size 2602 * where SPI word size should be power-of-two multiple 2603 */ 2604 if (xfer->bits_per_word <= 8) 2605 w_size = 1; 2606 else if (xfer->bits_per_word <= 16) 2607 w_size = 2; 2608 else 2609 w_size = 4; 2610 2611 /* No partial transfers accepted */ 2612 if (xfer->len % w_size) 2613 return -EINVAL; 2614 2615 if (xfer->speed_hz && master->min_speed_hz && 2616 xfer->speed_hz < master->min_speed_hz) 2617 return -EINVAL; 2618 2619 if (xfer->tx_buf && !xfer->tx_nbits) 2620 xfer->tx_nbits = SPI_NBITS_SINGLE; 2621 if (xfer->rx_buf && !xfer->rx_nbits) 2622 xfer->rx_nbits = SPI_NBITS_SINGLE; 2623 /* check transfer tx/rx_nbits: 2624 * 1. check the value matches one of single, dual and quad 2625 * 2. check tx/rx_nbits match the mode in spi_device 2626 */ 2627 if (xfer->tx_buf) { 2628 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2629 xfer->tx_nbits != SPI_NBITS_DUAL && 2630 xfer->tx_nbits != SPI_NBITS_QUAD) 2631 return -EINVAL; 2632 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2633 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2634 return -EINVAL; 2635 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2636 !(spi->mode & SPI_TX_QUAD)) 2637 return -EINVAL; 2638 } 2639 /* check transfer rx_nbits */ 2640 if (xfer->rx_buf) { 2641 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2642 xfer->rx_nbits != SPI_NBITS_DUAL && 2643 xfer->rx_nbits != SPI_NBITS_QUAD) 2644 return -EINVAL; 2645 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2646 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2647 return -EINVAL; 2648 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2649 !(spi->mode & SPI_RX_QUAD)) 2650 return -EINVAL; 2651 } 2652 } 2653 2654 message->status = -EINPROGRESS; 2655 2656 return 0; 2657 } 2658 2659 static int __spi_async(struct spi_device *spi, struct spi_message *message) 2660 { 2661 struct spi_master *master = spi->master; 2662 2663 message->spi = spi; 2664 2665 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2666 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2667 2668 trace_spi_message_submit(message); 2669 2670 return master->transfer(spi, message); 2671 } 2672 2673 /** 2674 * spi_async - asynchronous SPI transfer 2675 * @spi: device with which data will be exchanged 2676 * @message: describes the data transfers, including completion callback 2677 * Context: any (irqs may be blocked, etc) 2678 * 2679 * This call may be used in_irq and other contexts which can't sleep, 2680 * as well as from task contexts which can sleep. 2681 * 2682 * The completion callback is invoked in a context which can't sleep. 2683 * Before that invocation, the value of message->status is undefined. 2684 * When the callback is issued, message->status holds either zero (to 2685 * indicate complete success) or a negative error code. After that 2686 * callback returns, the driver which issued the transfer request may 2687 * deallocate the associated memory; it's no longer in use by any SPI 2688 * core or controller driver code. 2689 * 2690 * Note that although all messages to a spi_device are handled in 2691 * FIFO order, messages may go to different devices in other orders. 2692 * Some device might be higher priority, or have various "hard" access 2693 * time requirements, for example. 2694 * 2695 * On detection of any fault during the transfer, processing of 2696 * the entire message is aborted, and the device is deselected. 2697 * Until returning from the associated message completion callback, 2698 * no other spi_message queued to that device will be processed. 2699 * (This rule applies equally to all the synchronous transfer calls, 2700 * which are wrappers around this core asynchronous primitive.) 2701 * 2702 * Return: zero on success, else a negative error code. 2703 */ 2704 int spi_async(struct spi_device *spi, struct spi_message *message) 2705 { 2706 struct spi_master *master = spi->master; 2707 int ret; 2708 unsigned long flags; 2709 2710 ret = __spi_validate(spi, message); 2711 if (ret != 0) 2712 return ret; 2713 2714 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2715 2716 if (master->bus_lock_flag) 2717 ret = -EBUSY; 2718 else 2719 ret = __spi_async(spi, message); 2720 2721 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2722 2723 return ret; 2724 } 2725 EXPORT_SYMBOL_GPL(spi_async); 2726 2727 /** 2728 * spi_async_locked - version of spi_async with exclusive bus usage 2729 * @spi: device with which data will be exchanged 2730 * @message: describes the data transfers, including completion callback 2731 * Context: any (irqs may be blocked, etc) 2732 * 2733 * This call may be used in_irq and other contexts which can't sleep, 2734 * as well as from task contexts which can sleep. 2735 * 2736 * The completion callback is invoked in a context which can't sleep. 2737 * Before that invocation, the value of message->status is undefined. 2738 * When the callback is issued, message->status holds either zero (to 2739 * indicate complete success) or a negative error code. After that 2740 * callback returns, the driver which issued the transfer request may 2741 * deallocate the associated memory; it's no longer in use by any SPI 2742 * core or controller driver code. 2743 * 2744 * Note that although all messages to a spi_device are handled in 2745 * FIFO order, messages may go to different devices in other orders. 2746 * Some device might be higher priority, or have various "hard" access 2747 * time requirements, for example. 2748 * 2749 * On detection of any fault during the transfer, processing of 2750 * the entire message is aborted, and the device is deselected. 2751 * Until returning from the associated message completion callback, 2752 * no other spi_message queued to that device will be processed. 2753 * (This rule applies equally to all the synchronous transfer calls, 2754 * which are wrappers around this core asynchronous primitive.) 2755 * 2756 * Return: zero on success, else a negative error code. 2757 */ 2758 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2759 { 2760 struct spi_master *master = spi->master; 2761 int ret; 2762 unsigned long flags; 2763 2764 ret = __spi_validate(spi, message); 2765 if (ret != 0) 2766 return ret; 2767 2768 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2769 2770 ret = __spi_async(spi, message); 2771 2772 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2773 2774 return ret; 2775 2776 } 2777 EXPORT_SYMBOL_GPL(spi_async_locked); 2778 2779 2780 int spi_flash_read(struct spi_device *spi, 2781 struct spi_flash_read_message *msg) 2782 2783 { 2784 struct spi_master *master = spi->master; 2785 struct device *rx_dev = NULL; 2786 int ret; 2787 2788 if ((msg->opcode_nbits == SPI_NBITS_DUAL || 2789 msg->addr_nbits == SPI_NBITS_DUAL) && 2790 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2791 return -EINVAL; 2792 if ((msg->opcode_nbits == SPI_NBITS_QUAD || 2793 msg->addr_nbits == SPI_NBITS_QUAD) && 2794 !(spi->mode & SPI_TX_QUAD)) 2795 return -EINVAL; 2796 if (msg->data_nbits == SPI_NBITS_DUAL && 2797 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2798 return -EINVAL; 2799 if (msg->data_nbits == SPI_NBITS_QUAD && 2800 !(spi->mode & SPI_RX_QUAD)) 2801 return -EINVAL; 2802 2803 if (master->auto_runtime_pm) { 2804 ret = pm_runtime_get_sync(master->dev.parent); 2805 if (ret < 0) { 2806 dev_err(&master->dev, "Failed to power device: %d\n", 2807 ret); 2808 return ret; 2809 } 2810 } 2811 2812 mutex_lock(&master->bus_lock_mutex); 2813 mutex_lock(&master->io_mutex); 2814 if (master->dma_rx) { 2815 rx_dev = master->dma_rx->device->dev; 2816 ret = spi_map_buf(master, rx_dev, &msg->rx_sg, 2817 msg->buf, msg->len, 2818 DMA_FROM_DEVICE); 2819 if (!ret) 2820 msg->cur_msg_mapped = true; 2821 } 2822 ret = master->spi_flash_read(spi, msg); 2823 if (msg->cur_msg_mapped) 2824 spi_unmap_buf(master, rx_dev, &msg->rx_sg, 2825 DMA_FROM_DEVICE); 2826 mutex_unlock(&master->io_mutex); 2827 mutex_unlock(&master->bus_lock_mutex); 2828 2829 if (master->auto_runtime_pm) 2830 pm_runtime_put(master->dev.parent); 2831 2832 return ret; 2833 } 2834 EXPORT_SYMBOL_GPL(spi_flash_read); 2835 2836 /*-------------------------------------------------------------------------*/ 2837 2838 /* Utility methods for SPI master protocol drivers, layered on 2839 * top of the core. Some other utility methods are defined as 2840 * inline functions. 2841 */ 2842 2843 static void spi_complete(void *arg) 2844 { 2845 complete(arg); 2846 } 2847 2848 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 2849 { 2850 DECLARE_COMPLETION_ONSTACK(done); 2851 int status; 2852 struct spi_master *master = spi->master; 2853 unsigned long flags; 2854 2855 status = __spi_validate(spi, message); 2856 if (status != 0) 2857 return status; 2858 2859 message->complete = spi_complete; 2860 message->context = &done; 2861 message->spi = spi; 2862 2863 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2864 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2865 2866 /* If we're not using the legacy transfer method then we will 2867 * try to transfer in the calling context so special case. 2868 * This code would be less tricky if we could remove the 2869 * support for driver implemented message queues. 2870 */ 2871 if (master->transfer == spi_queued_transfer) { 2872 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2873 2874 trace_spi_message_submit(message); 2875 2876 status = __spi_queued_transfer(spi, message, false); 2877 2878 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2879 } else { 2880 status = spi_async_locked(spi, message); 2881 } 2882 2883 if (status == 0) { 2884 /* Push out the messages in the calling context if we 2885 * can. 2886 */ 2887 if (master->transfer == spi_queued_transfer) { 2888 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2889 spi_sync_immediate); 2890 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2891 spi_sync_immediate); 2892 __spi_pump_messages(master, false); 2893 } 2894 2895 wait_for_completion(&done); 2896 status = message->status; 2897 } 2898 message->context = NULL; 2899 return status; 2900 } 2901 2902 /** 2903 * spi_sync - blocking/synchronous SPI data transfers 2904 * @spi: device with which data will be exchanged 2905 * @message: describes the data transfers 2906 * Context: can sleep 2907 * 2908 * This call may only be used from a context that may sleep. The sleep 2909 * is non-interruptible, and has no timeout. Low-overhead controller 2910 * drivers may DMA directly into and out of the message buffers. 2911 * 2912 * Note that the SPI device's chip select is active during the message, 2913 * and then is normally disabled between messages. Drivers for some 2914 * frequently-used devices may want to minimize costs of selecting a chip, 2915 * by leaving it selected in anticipation that the next message will go 2916 * to the same chip. (That may increase power usage.) 2917 * 2918 * Also, the caller is guaranteeing that the memory associated with the 2919 * message will not be freed before this call returns. 2920 * 2921 * Return: zero on success, else a negative error code. 2922 */ 2923 int spi_sync(struct spi_device *spi, struct spi_message *message) 2924 { 2925 int ret; 2926 2927 mutex_lock(&spi->master->bus_lock_mutex); 2928 ret = __spi_sync(spi, message); 2929 mutex_unlock(&spi->master->bus_lock_mutex); 2930 2931 return ret; 2932 } 2933 EXPORT_SYMBOL_GPL(spi_sync); 2934 2935 /** 2936 * spi_sync_locked - version of spi_sync with exclusive bus usage 2937 * @spi: device with which data will be exchanged 2938 * @message: describes the data transfers 2939 * Context: can sleep 2940 * 2941 * This call may only be used from a context that may sleep. The sleep 2942 * is non-interruptible, and has no timeout. Low-overhead controller 2943 * drivers may DMA directly into and out of the message buffers. 2944 * 2945 * This call should be used by drivers that require exclusive access to the 2946 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2947 * be released by a spi_bus_unlock call when the exclusive access is over. 2948 * 2949 * Return: zero on success, else a negative error code. 2950 */ 2951 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2952 { 2953 return __spi_sync(spi, message); 2954 } 2955 EXPORT_SYMBOL_GPL(spi_sync_locked); 2956 2957 /** 2958 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2959 * @master: SPI bus master that should be locked for exclusive bus access 2960 * Context: can sleep 2961 * 2962 * This call may only be used from a context that may sleep. The sleep 2963 * is non-interruptible, and has no timeout. 2964 * 2965 * This call should be used by drivers that require exclusive access to the 2966 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2967 * exclusive access is over. Data transfer must be done by spi_sync_locked 2968 * and spi_async_locked calls when the SPI bus lock is held. 2969 * 2970 * Return: always zero. 2971 */ 2972 int spi_bus_lock(struct spi_master *master) 2973 { 2974 unsigned long flags; 2975 2976 mutex_lock(&master->bus_lock_mutex); 2977 2978 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2979 master->bus_lock_flag = 1; 2980 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2981 2982 /* mutex remains locked until spi_bus_unlock is called */ 2983 2984 return 0; 2985 } 2986 EXPORT_SYMBOL_GPL(spi_bus_lock); 2987 2988 /** 2989 * spi_bus_unlock - release the lock for exclusive SPI bus usage 2990 * @master: SPI bus master that was locked for exclusive bus access 2991 * Context: can sleep 2992 * 2993 * This call may only be used from a context that may sleep. The sleep 2994 * is non-interruptible, and has no timeout. 2995 * 2996 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 2997 * call. 2998 * 2999 * Return: always zero. 3000 */ 3001 int spi_bus_unlock(struct spi_master *master) 3002 { 3003 master->bus_lock_flag = 0; 3004 3005 mutex_unlock(&master->bus_lock_mutex); 3006 3007 return 0; 3008 } 3009 EXPORT_SYMBOL_GPL(spi_bus_unlock); 3010 3011 /* portable code must never pass more than 32 bytes */ 3012 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 3013 3014 static u8 *buf; 3015 3016 /** 3017 * spi_write_then_read - SPI synchronous write followed by read 3018 * @spi: device with which data will be exchanged 3019 * @txbuf: data to be written (need not be dma-safe) 3020 * @n_tx: size of txbuf, in bytes 3021 * @rxbuf: buffer into which data will be read (need not be dma-safe) 3022 * @n_rx: size of rxbuf, in bytes 3023 * Context: can sleep 3024 * 3025 * This performs a half duplex MicroWire style transaction with the 3026 * device, sending txbuf and then reading rxbuf. The return value 3027 * is zero for success, else a negative errno status code. 3028 * This call may only be used from a context that may sleep. 3029 * 3030 * Parameters to this routine are always copied using a small buffer; 3031 * portable code should never use this for more than 32 bytes. 3032 * Performance-sensitive or bulk transfer code should instead use 3033 * spi_{async,sync}() calls with dma-safe buffers. 3034 * 3035 * Return: zero on success, else a negative error code. 3036 */ 3037 int spi_write_then_read(struct spi_device *spi, 3038 const void *txbuf, unsigned n_tx, 3039 void *rxbuf, unsigned n_rx) 3040 { 3041 static DEFINE_MUTEX(lock); 3042 3043 int status; 3044 struct spi_message message; 3045 struct spi_transfer x[2]; 3046 u8 *local_buf; 3047 3048 /* Use preallocated DMA-safe buffer if we can. We can't avoid 3049 * copying here, (as a pure convenience thing), but we can 3050 * keep heap costs out of the hot path unless someone else is 3051 * using the pre-allocated buffer or the transfer is too large. 3052 */ 3053 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 3054 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 3055 GFP_KERNEL | GFP_DMA); 3056 if (!local_buf) 3057 return -ENOMEM; 3058 } else { 3059 local_buf = buf; 3060 } 3061 3062 spi_message_init(&message); 3063 memset(x, 0, sizeof(x)); 3064 if (n_tx) { 3065 x[0].len = n_tx; 3066 spi_message_add_tail(&x[0], &message); 3067 } 3068 if (n_rx) { 3069 x[1].len = n_rx; 3070 spi_message_add_tail(&x[1], &message); 3071 } 3072 3073 memcpy(local_buf, txbuf, n_tx); 3074 x[0].tx_buf = local_buf; 3075 x[1].rx_buf = local_buf + n_tx; 3076 3077 /* do the i/o */ 3078 status = spi_sync(spi, &message); 3079 if (status == 0) 3080 memcpy(rxbuf, x[1].rx_buf, n_rx); 3081 3082 if (x[0].tx_buf == buf) 3083 mutex_unlock(&lock); 3084 else 3085 kfree(local_buf); 3086 3087 return status; 3088 } 3089 EXPORT_SYMBOL_GPL(spi_write_then_read); 3090 3091 /*-------------------------------------------------------------------------*/ 3092 3093 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 3094 static int __spi_of_device_match(struct device *dev, void *data) 3095 { 3096 return dev->of_node == data; 3097 } 3098 3099 /* must call put_device() when done with returned spi_device device */ 3100 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 3101 { 3102 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 3103 __spi_of_device_match); 3104 return dev ? to_spi_device(dev) : NULL; 3105 } 3106 3107 static int __spi_of_master_match(struct device *dev, const void *data) 3108 { 3109 return dev->of_node == data; 3110 } 3111 3112 /* the spi masters are not using spi_bus, so we find it with another way */ 3113 static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 3114 { 3115 struct device *dev; 3116 3117 dev = class_find_device(&spi_master_class, NULL, node, 3118 __spi_of_master_match); 3119 if (!dev) 3120 return NULL; 3121 3122 /* reference got in class_find_device */ 3123 return container_of(dev, struct spi_master, dev); 3124 } 3125 3126 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3127 void *arg) 3128 { 3129 struct of_reconfig_data *rd = arg; 3130 struct spi_master *master; 3131 struct spi_device *spi; 3132 3133 switch (of_reconfig_get_state_change(action, arg)) { 3134 case OF_RECONFIG_CHANGE_ADD: 3135 master = of_find_spi_master_by_node(rd->dn->parent); 3136 if (master == NULL) 3137 return NOTIFY_OK; /* not for us */ 3138 3139 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3140 put_device(&master->dev); 3141 return NOTIFY_OK; 3142 } 3143 3144 spi = of_register_spi_device(master, rd->dn); 3145 put_device(&master->dev); 3146 3147 if (IS_ERR(spi)) { 3148 pr_err("%s: failed to create for '%s'\n", 3149 __func__, rd->dn->full_name); 3150 of_node_clear_flag(rd->dn, OF_POPULATED); 3151 return notifier_from_errno(PTR_ERR(spi)); 3152 } 3153 break; 3154 3155 case OF_RECONFIG_CHANGE_REMOVE: 3156 /* already depopulated? */ 3157 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 3158 return NOTIFY_OK; 3159 3160 /* find our device by node */ 3161 spi = of_find_spi_device_by_node(rd->dn); 3162 if (spi == NULL) 3163 return NOTIFY_OK; /* no? not meant for us */ 3164 3165 /* unregister takes one ref away */ 3166 spi_unregister_device(spi); 3167 3168 /* and put the reference of the find */ 3169 put_device(&spi->dev); 3170 break; 3171 } 3172 3173 return NOTIFY_OK; 3174 } 3175 3176 static struct notifier_block spi_of_notifier = { 3177 .notifier_call = of_spi_notify, 3178 }; 3179 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3180 extern struct notifier_block spi_of_notifier; 3181 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3182 3183 #if IS_ENABLED(CONFIG_ACPI) 3184 static int spi_acpi_master_match(struct device *dev, const void *data) 3185 { 3186 return ACPI_COMPANION(dev->parent) == data; 3187 } 3188 3189 static int spi_acpi_device_match(struct device *dev, void *data) 3190 { 3191 return ACPI_COMPANION(dev) == data; 3192 } 3193 3194 static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev) 3195 { 3196 struct device *dev; 3197 3198 dev = class_find_device(&spi_master_class, NULL, adev, 3199 spi_acpi_master_match); 3200 if (!dev) 3201 return NULL; 3202 3203 return container_of(dev, struct spi_master, dev); 3204 } 3205 3206 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 3207 { 3208 struct device *dev; 3209 3210 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); 3211 3212 return dev ? to_spi_device(dev) : NULL; 3213 } 3214 3215 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 3216 void *arg) 3217 { 3218 struct acpi_device *adev = arg; 3219 struct spi_master *master; 3220 struct spi_device *spi; 3221 3222 switch (value) { 3223 case ACPI_RECONFIG_DEVICE_ADD: 3224 master = acpi_spi_find_master_by_adev(adev->parent); 3225 if (!master) 3226 break; 3227 3228 acpi_register_spi_device(master, adev); 3229 put_device(&master->dev); 3230 break; 3231 case ACPI_RECONFIG_DEVICE_REMOVE: 3232 if (!acpi_device_enumerated(adev)) 3233 break; 3234 3235 spi = acpi_spi_find_device_by_adev(adev); 3236 if (!spi) 3237 break; 3238 3239 spi_unregister_device(spi); 3240 put_device(&spi->dev); 3241 break; 3242 } 3243 3244 return NOTIFY_OK; 3245 } 3246 3247 static struct notifier_block spi_acpi_notifier = { 3248 .notifier_call = acpi_spi_notify, 3249 }; 3250 #else 3251 extern struct notifier_block spi_acpi_notifier; 3252 #endif 3253 3254 static int __init spi_init(void) 3255 { 3256 int status; 3257 3258 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 3259 if (!buf) { 3260 status = -ENOMEM; 3261 goto err0; 3262 } 3263 3264 status = bus_register(&spi_bus_type); 3265 if (status < 0) 3266 goto err1; 3267 3268 status = class_register(&spi_master_class); 3269 if (status < 0) 3270 goto err2; 3271 3272 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3273 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 3274 if (IS_ENABLED(CONFIG_ACPI)) 3275 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 3276 3277 return 0; 3278 3279 err2: 3280 bus_unregister(&spi_bus_type); 3281 err1: 3282 kfree(buf); 3283 buf = NULL; 3284 err0: 3285 return status; 3286 } 3287 3288 /* board_info is normally registered in arch_initcall(), 3289 * but even essential drivers wait till later 3290 * 3291 * REVISIT only boardinfo really needs static linking. the rest (device and 3292 * driver registration) _could_ be dynamically linked (modular) ... costs 3293 * include needing to have boardinfo data structures be much more public. 3294 */ 3295 postcore_initcall(spi_init); 3296 3297