1 /* 2 * SPI init/core code 3 * 4 * Copyright (C) 2005 David Brownell 5 * Copyright (C) 2008 Secret Lab Technologies Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/init.h> 21 #include <linux/cache.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/dmaengine.h> 24 #include <linux/mutex.h> 25 #include <linux/of_device.h> 26 #include <linux/of_irq.h> 27 #include <linux/clk/clk-conf.h> 28 #include <linux/slab.h> 29 #include <linux/mod_devicetable.h> 30 #include <linux/spi/spi.h> 31 #include <linux/of_gpio.h> 32 #include <linux/pm_runtime.h> 33 #include <linux/pm_domain.h> 34 #include <linux/export.h> 35 #include <linux/sched/rt.h> 36 #include <uapi/linux/sched/types.h> 37 #include <linux/delay.h> 38 #include <linux/kthread.h> 39 #include <linux/ioport.h> 40 #include <linux/acpi.h> 41 #include <linux/highmem.h> 42 43 #define CREATE_TRACE_POINTS 44 #include <trace/events/spi.h> 45 46 static void spidev_release(struct device *dev) 47 { 48 struct spi_device *spi = to_spi_device(dev); 49 50 /* spi masters may cleanup for released devices */ 51 if (spi->master->cleanup) 52 spi->master->cleanup(spi); 53 54 spi_master_put(spi->master); 55 kfree(spi); 56 } 57 58 static ssize_t 59 modalias_show(struct device *dev, struct device_attribute *a, char *buf) 60 { 61 const struct spi_device *spi = to_spi_device(dev); 62 int len; 63 64 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 65 if (len != -ENODEV) 66 return len; 67 68 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); 69 } 70 static DEVICE_ATTR_RO(modalias); 71 72 #define SPI_STATISTICS_ATTRS(field, file) \ 73 static ssize_t spi_master_##field##_show(struct device *dev, \ 74 struct device_attribute *attr, \ 75 char *buf) \ 76 { \ 77 struct spi_master *master = container_of(dev, \ 78 struct spi_master, dev); \ 79 return spi_statistics_##field##_show(&master->statistics, buf); \ 80 } \ 81 static struct device_attribute dev_attr_spi_master_##field = { \ 82 .attr = { .name = file, .mode = S_IRUGO }, \ 83 .show = spi_master_##field##_show, \ 84 }; \ 85 static ssize_t spi_device_##field##_show(struct device *dev, \ 86 struct device_attribute *attr, \ 87 char *buf) \ 88 { \ 89 struct spi_device *spi = to_spi_device(dev); \ 90 return spi_statistics_##field##_show(&spi->statistics, buf); \ 91 } \ 92 static struct device_attribute dev_attr_spi_device_##field = { \ 93 .attr = { .name = file, .mode = S_IRUGO }, \ 94 .show = spi_device_##field##_show, \ 95 } 96 97 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \ 98 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \ 99 char *buf) \ 100 { \ 101 unsigned long flags; \ 102 ssize_t len; \ 103 spin_lock_irqsave(&stat->lock, flags); \ 104 len = sprintf(buf, format_string, stat->field); \ 105 spin_unlock_irqrestore(&stat->lock, flags); \ 106 return len; \ 107 } \ 108 SPI_STATISTICS_ATTRS(name, file) 109 110 #define SPI_STATISTICS_SHOW(field, format_string) \ 111 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ 112 field, format_string) 113 114 SPI_STATISTICS_SHOW(messages, "%lu"); 115 SPI_STATISTICS_SHOW(transfers, "%lu"); 116 SPI_STATISTICS_SHOW(errors, "%lu"); 117 SPI_STATISTICS_SHOW(timedout, "%lu"); 118 119 SPI_STATISTICS_SHOW(spi_sync, "%lu"); 120 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu"); 121 SPI_STATISTICS_SHOW(spi_async, "%lu"); 122 123 SPI_STATISTICS_SHOW(bytes, "%llu"); 124 SPI_STATISTICS_SHOW(bytes_rx, "%llu"); 125 SPI_STATISTICS_SHOW(bytes_tx, "%llu"); 126 127 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ 128 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ 129 "transfer_bytes_histo_" number, \ 130 transfer_bytes_histo[index], "%lu") 131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); 132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); 133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); 134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); 135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); 136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); 137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); 138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); 139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); 140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); 141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); 142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); 143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); 144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); 145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); 146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 147 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 148 149 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu"); 150 151 static struct attribute *spi_dev_attrs[] = { 152 &dev_attr_modalias.attr, 153 NULL, 154 }; 155 156 static const struct attribute_group spi_dev_group = { 157 .attrs = spi_dev_attrs, 158 }; 159 160 static struct attribute *spi_device_statistics_attrs[] = { 161 &dev_attr_spi_device_messages.attr, 162 &dev_attr_spi_device_transfers.attr, 163 &dev_attr_spi_device_errors.attr, 164 &dev_attr_spi_device_timedout.attr, 165 &dev_attr_spi_device_spi_sync.attr, 166 &dev_attr_spi_device_spi_sync_immediate.attr, 167 &dev_attr_spi_device_spi_async.attr, 168 &dev_attr_spi_device_bytes.attr, 169 &dev_attr_spi_device_bytes_rx.attr, 170 &dev_attr_spi_device_bytes_tx.attr, 171 &dev_attr_spi_device_transfer_bytes_histo0.attr, 172 &dev_attr_spi_device_transfer_bytes_histo1.attr, 173 &dev_attr_spi_device_transfer_bytes_histo2.attr, 174 &dev_attr_spi_device_transfer_bytes_histo3.attr, 175 &dev_attr_spi_device_transfer_bytes_histo4.attr, 176 &dev_attr_spi_device_transfer_bytes_histo5.attr, 177 &dev_attr_spi_device_transfer_bytes_histo6.attr, 178 &dev_attr_spi_device_transfer_bytes_histo7.attr, 179 &dev_attr_spi_device_transfer_bytes_histo8.attr, 180 &dev_attr_spi_device_transfer_bytes_histo9.attr, 181 &dev_attr_spi_device_transfer_bytes_histo10.attr, 182 &dev_attr_spi_device_transfer_bytes_histo11.attr, 183 &dev_attr_spi_device_transfer_bytes_histo12.attr, 184 &dev_attr_spi_device_transfer_bytes_histo13.attr, 185 &dev_attr_spi_device_transfer_bytes_histo14.attr, 186 &dev_attr_spi_device_transfer_bytes_histo15.attr, 187 &dev_attr_spi_device_transfer_bytes_histo16.attr, 188 &dev_attr_spi_device_transfers_split_maxsize.attr, 189 NULL, 190 }; 191 192 static const struct attribute_group spi_device_statistics_group = { 193 .name = "statistics", 194 .attrs = spi_device_statistics_attrs, 195 }; 196 197 static const struct attribute_group *spi_dev_groups[] = { 198 &spi_dev_group, 199 &spi_device_statistics_group, 200 NULL, 201 }; 202 203 static struct attribute *spi_master_statistics_attrs[] = { 204 &dev_attr_spi_master_messages.attr, 205 &dev_attr_spi_master_transfers.attr, 206 &dev_attr_spi_master_errors.attr, 207 &dev_attr_spi_master_timedout.attr, 208 &dev_attr_spi_master_spi_sync.attr, 209 &dev_attr_spi_master_spi_sync_immediate.attr, 210 &dev_attr_spi_master_spi_async.attr, 211 &dev_attr_spi_master_bytes.attr, 212 &dev_attr_spi_master_bytes_rx.attr, 213 &dev_attr_spi_master_bytes_tx.attr, 214 &dev_attr_spi_master_transfer_bytes_histo0.attr, 215 &dev_attr_spi_master_transfer_bytes_histo1.attr, 216 &dev_attr_spi_master_transfer_bytes_histo2.attr, 217 &dev_attr_spi_master_transfer_bytes_histo3.attr, 218 &dev_attr_spi_master_transfer_bytes_histo4.attr, 219 &dev_attr_spi_master_transfer_bytes_histo5.attr, 220 &dev_attr_spi_master_transfer_bytes_histo6.attr, 221 &dev_attr_spi_master_transfer_bytes_histo7.attr, 222 &dev_attr_spi_master_transfer_bytes_histo8.attr, 223 &dev_attr_spi_master_transfer_bytes_histo9.attr, 224 &dev_attr_spi_master_transfer_bytes_histo10.attr, 225 &dev_attr_spi_master_transfer_bytes_histo11.attr, 226 &dev_attr_spi_master_transfer_bytes_histo12.attr, 227 &dev_attr_spi_master_transfer_bytes_histo13.attr, 228 &dev_attr_spi_master_transfer_bytes_histo14.attr, 229 &dev_attr_spi_master_transfer_bytes_histo15.attr, 230 &dev_attr_spi_master_transfer_bytes_histo16.attr, 231 &dev_attr_spi_master_transfers_split_maxsize.attr, 232 NULL, 233 }; 234 235 static const struct attribute_group spi_master_statistics_group = { 236 .name = "statistics", 237 .attrs = spi_master_statistics_attrs, 238 }; 239 240 static const struct attribute_group *spi_master_groups[] = { 241 &spi_master_statistics_group, 242 NULL, 243 }; 244 245 void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 246 struct spi_transfer *xfer, 247 struct spi_master *master) 248 { 249 unsigned long flags; 250 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; 251 252 if (l2len < 0) 253 l2len = 0; 254 255 spin_lock_irqsave(&stats->lock, flags); 256 257 stats->transfers++; 258 stats->transfer_bytes_histo[l2len]++; 259 260 stats->bytes += xfer->len; 261 if ((xfer->tx_buf) && 262 (xfer->tx_buf != master->dummy_tx)) 263 stats->bytes_tx += xfer->len; 264 if ((xfer->rx_buf) && 265 (xfer->rx_buf != master->dummy_rx)) 266 stats->bytes_rx += xfer->len; 267 268 spin_unlock_irqrestore(&stats->lock, flags); 269 } 270 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats); 271 272 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work, 273 * and the sysfs version makes coldplug work too. 274 */ 275 276 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, 277 const struct spi_device *sdev) 278 { 279 while (id->name[0]) { 280 if (!strcmp(sdev->modalias, id->name)) 281 return id; 282 id++; 283 } 284 return NULL; 285 } 286 287 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) 288 { 289 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); 290 291 return spi_match_id(sdrv->id_table, sdev); 292 } 293 EXPORT_SYMBOL_GPL(spi_get_device_id); 294 295 static int spi_match_device(struct device *dev, struct device_driver *drv) 296 { 297 const struct spi_device *spi = to_spi_device(dev); 298 const struct spi_driver *sdrv = to_spi_driver(drv); 299 300 /* Attempt an OF style match */ 301 if (of_driver_match_device(dev, drv)) 302 return 1; 303 304 /* Then try ACPI */ 305 if (acpi_driver_match_device(dev, drv)) 306 return 1; 307 308 if (sdrv->id_table) 309 return !!spi_match_id(sdrv->id_table, spi); 310 311 return strcmp(spi->modalias, drv->name) == 0; 312 } 313 314 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) 315 { 316 const struct spi_device *spi = to_spi_device(dev); 317 int rc; 318 319 rc = acpi_device_uevent_modalias(dev, env); 320 if (rc != -ENODEV) 321 return rc; 322 323 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); 324 return 0; 325 } 326 327 struct bus_type spi_bus_type = { 328 .name = "spi", 329 .dev_groups = spi_dev_groups, 330 .match = spi_match_device, 331 .uevent = spi_uevent, 332 }; 333 EXPORT_SYMBOL_GPL(spi_bus_type); 334 335 336 static int spi_drv_probe(struct device *dev) 337 { 338 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 339 struct spi_device *spi = to_spi_device(dev); 340 int ret; 341 342 ret = of_clk_set_defaults(dev->of_node, false); 343 if (ret) 344 return ret; 345 346 if (dev->of_node) { 347 spi->irq = of_irq_get(dev->of_node, 0); 348 if (spi->irq == -EPROBE_DEFER) 349 return -EPROBE_DEFER; 350 if (spi->irq < 0) 351 spi->irq = 0; 352 } 353 354 ret = dev_pm_domain_attach(dev, true); 355 if (ret != -EPROBE_DEFER) { 356 ret = sdrv->probe(spi); 357 if (ret) 358 dev_pm_domain_detach(dev, true); 359 } 360 361 return ret; 362 } 363 364 static int spi_drv_remove(struct device *dev) 365 { 366 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 367 int ret; 368 369 ret = sdrv->remove(to_spi_device(dev)); 370 dev_pm_domain_detach(dev, true); 371 372 return ret; 373 } 374 375 static void spi_drv_shutdown(struct device *dev) 376 { 377 const struct spi_driver *sdrv = to_spi_driver(dev->driver); 378 379 sdrv->shutdown(to_spi_device(dev)); 380 } 381 382 /** 383 * __spi_register_driver - register a SPI driver 384 * @owner: owner module of the driver to register 385 * @sdrv: the driver to register 386 * Context: can sleep 387 * 388 * Return: zero on success, else a negative error code. 389 */ 390 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) 391 { 392 sdrv->driver.owner = owner; 393 sdrv->driver.bus = &spi_bus_type; 394 if (sdrv->probe) 395 sdrv->driver.probe = spi_drv_probe; 396 if (sdrv->remove) 397 sdrv->driver.remove = spi_drv_remove; 398 if (sdrv->shutdown) 399 sdrv->driver.shutdown = spi_drv_shutdown; 400 return driver_register(&sdrv->driver); 401 } 402 EXPORT_SYMBOL_GPL(__spi_register_driver); 403 404 /*-------------------------------------------------------------------------*/ 405 406 /* SPI devices should normally not be created by SPI device drivers; that 407 * would make them board-specific. Similarly with SPI master drivers. 408 * Device registration normally goes into like arch/.../mach.../board-YYY.c 409 * with other readonly (flashable) information about mainboard devices. 410 */ 411 412 struct boardinfo { 413 struct list_head list; 414 struct spi_board_info board_info; 415 }; 416 417 static LIST_HEAD(board_list); 418 static LIST_HEAD(spi_master_list); 419 420 /* 421 * Used to protect add/del opertion for board_info list and 422 * spi_master list, and their matching process 423 */ 424 static DEFINE_MUTEX(board_lock); 425 426 /** 427 * spi_alloc_device - Allocate a new SPI device 428 * @master: Controller to which device is connected 429 * Context: can sleep 430 * 431 * Allows a driver to allocate and initialize a spi_device without 432 * registering it immediately. This allows a driver to directly 433 * fill the spi_device with device parameters before calling 434 * spi_add_device() on it. 435 * 436 * Caller is responsible to call spi_add_device() on the returned 437 * spi_device structure to add it to the SPI master. If the caller 438 * needs to discard the spi_device without adding it, then it should 439 * call spi_dev_put() on it. 440 * 441 * Return: a pointer to the new device, or NULL. 442 */ 443 struct spi_device *spi_alloc_device(struct spi_master *master) 444 { 445 struct spi_device *spi; 446 447 if (!spi_master_get(master)) 448 return NULL; 449 450 spi = kzalloc(sizeof(*spi), GFP_KERNEL); 451 if (!spi) { 452 spi_master_put(master); 453 return NULL; 454 } 455 456 spi->master = master; 457 spi->dev.parent = &master->dev; 458 spi->dev.bus = &spi_bus_type; 459 spi->dev.release = spidev_release; 460 spi->cs_gpio = -ENOENT; 461 462 spin_lock_init(&spi->statistics.lock); 463 464 device_initialize(&spi->dev); 465 return spi; 466 } 467 EXPORT_SYMBOL_GPL(spi_alloc_device); 468 469 static void spi_dev_set_name(struct spi_device *spi) 470 { 471 struct acpi_device *adev = ACPI_COMPANION(&spi->dev); 472 473 if (adev) { 474 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); 475 return; 476 } 477 478 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev), 479 spi->chip_select); 480 } 481 482 static int spi_dev_check(struct device *dev, void *data) 483 { 484 struct spi_device *spi = to_spi_device(dev); 485 struct spi_device *new_spi = data; 486 487 if (spi->master == new_spi->master && 488 spi->chip_select == new_spi->chip_select) 489 return -EBUSY; 490 return 0; 491 } 492 493 /** 494 * spi_add_device - Add spi_device allocated with spi_alloc_device 495 * @spi: spi_device to register 496 * 497 * Companion function to spi_alloc_device. Devices allocated with 498 * spi_alloc_device can be added onto the spi bus with this function. 499 * 500 * Return: 0 on success; negative errno on failure 501 */ 502 int spi_add_device(struct spi_device *spi) 503 { 504 static DEFINE_MUTEX(spi_add_lock); 505 struct spi_master *master = spi->master; 506 struct device *dev = master->dev.parent; 507 int status; 508 509 /* Chipselects are numbered 0..max; validate. */ 510 if (spi->chip_select >= master->num_chipselect) { 511 dev_err(dev, "cs%d >= max %d\n", 512 spi->chip_select, 513 master->num_chipselect); 514 return -EINVAL; 515 } 516 517 /* Set the bus ID string */ 518 spi_dev_set_name(spi); 519 520 /* We need to make sure there's no other device with this 521 * chipselect **BEFORE** we call setup(), else we'll trash 522 * its configuration. Lock against concurrent add() calls. 523 */ 524 mutex_lock(&spi_add_lock); 525 526 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); 527 if (status) { 528 dev_err(dev, "chipselect %d already in use\n", 529 spi->chip_select); 530 goto done; 531 } 532 533 if (master->cs_gpios) 534 spi->cs_gpio = master->cs_gpios[spi->chip_select]; 535 536 /* Drivers may modify this initial i/o setup, but will 537 * normally rely on the device being setup. Devices 538 * using SPI_CS_HIGH can't coexist well otherwise... 539 */ 540 status = spi_setup(spi); 541 if (status < 0) { 542 dev_err(dev, "can't setup %s, status %d\n", 543 dev_name(&spi->dev), status); 544 goto done; 545 } 546 547 /* Device may be bound to an active driver when this returns */ 548 status = device_add(&spi->dev); 549 if (status < 0) 550 dev_err(dev, "can't add %s, status %d\n", 551 dev_name(&spi->dev), status); 552 else 553 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); 554 555 done: 556 mutex_unlock(&spi_add_lock); 557 return status; 558 } 559 EXPORT_SYMBOL_GPL(spi_add_device); 560 561 /** 562 * spi_new_device - instantiate one new SPI device 563 * @master: Controller to which device is connected 564 * @chip: Describes the SPI device 565 * Context: can sleep 566 * 567 * On typical mainboards, this is purely internal; and it's not needed 568 * after board init creates the hard-wired devices. Some development 569 * platforms may not be able to use spi_register_board_info though, and 570 * this is exported so that for example a USB or parport based adapter 571 * driver could add devices (which it would learn about out-of-band). 572 * 573 * Return: the new device, or NULL. 574 */ 575 struct spi_device *spi_new_device(struct spi_master *master, 576 struct spi_board_info *chip) 577 { 578 struct spi_device *proxy; 579 int status; 580 581 /* NOTE: caller did any chip->bus_num checks necessary. 582 * 583 * Also, unless we change the return value convention to use 584 * error-or-pointer (not NULL-or-pointer), troubleshootability 585 * suggests syslogged diagnostics are best here (ugh). 586 */ 587 588 proxy = spi_alloc_device(master); 589 if (!proxy) 590 return NULL; 591 592 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); 593 594 proxy->chip_select = chip->chip_select; 595 proxy->max_speed_hz = chip->max_speed_hz; 596 proxy->mode = chip->mode; 597 proxy->irq = chip->irq; 598 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); 599 proxy->dev.platform_data = (void *) chip->platform_data; 600 proxy->controller_data = chip->controller_data; 601 proxy->controller_state = NULL; 602 603 status = spi_add_device(proxy); 604 if (status < 0) { 605 spi_dev_put(proxy); 606 return NULL; 607 } 608 609 return proxy; 610 } 611 EXPORT_SYMBOL_GPL(spi_new_device); 612 613 /** 614 * spi_unregister_device - unregister a single SPI device 615 * @spi: spi_device to unregister 616 * 617 * Start making the passed SPI device vanish. Normally this would be handled 618 * by spi_unregister_master(). 619 */ 620 void spi_unregister_device(struct spi_device *spi) 621 { 622 if (!spi) 623 return; 624 625 if (spi->dev.of_node) { 626 of_node_clear_flag(spi->dev.of_node, OF_POPULATED); 627 of_node_put(spi->dev.of_node); 628 } 629 if (ACPI_COMPANION(&spi->dev)) 630 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); 631 device_unregister(&spi->dev); 632 } 633 EXPORT_SYMBOL_GPL(spi_unregister_device); 634 635 static void spi_match_master_to_boardinfo(struct spi_master *master, 636 struct spi_board_info *bi) 637 { 638 struct spi_device *dev; 639 640 if (master->bus_num != bi->bus_num) 641 return; 642 643 dev = spi_new_device(master, bi); 644 if (!dev) 645 dev_err(master->dev.parent, "can't create new device for %s\n", 646 bi->modalias); 647 } 648 649 /** 650 * spi_register_board_info - register SPI devices for a given board 651 * @info: array of chip descriptors 652 * @n: how many descriptors are provided 653 * Context: can sleep 654 * 655 * Board-specific early init code calls this (probably during arch_initcall) 656 * with segments of the SPI device table. Any device nodes are created later, 657 * after the relevant parent SPI controller (bus_num) is defined. We keep 658 * this table of devices forever, so that reloading a controller driver will 659 * not make Linux forget about these hard-wired devices. 660 * 661 * Other code can also call this, e.g. a particular add-on board might provide 662 * SPI devices through its expansion connector, so code initializing that board 663 * would naturally declare its SPI devices. 664 * 665 * The board info passed can safely be __initdata ... but be careful of 666 * any embedded pointers (platform_data, etc), they're copied as-is. 667 * 668 * Return: zero on success, else a negative error code. 669 */ 670 int spi_register_board_info(struct spi_board_info const *info, unsigned n) 671 { 672 struct boardinfo *bi; 673 int i; 674 675 if (!n) 676 return -EINVAL; 677 678 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); 679 if (!bi) 680 return -ENOMEM; 681 682 for (i = 0; i < n; i++, bi++, info++) { 683 struct spi_master *master; 684 685 memcpy(&bi->board_info, info, sizeof(*info)); 686 mutex_lock(&board_lock); 687 list_add_tail(&bi->list, &board_list); 688 list_for_each_entry(master, &spi_master_list, list) 689 spi_match_master_to_boardinfo(master, &bi->board_info); 690 mutex_unlock(&board_lock); 691 } 692 693 return 0; 694 } 695 696 /*-------------------------------------------------------------------------*/ 697 698 static void spi_set_cs(struct spi_device *spi, bool enable) 699 { 700 if (spi->mode & SPI_CS_HIGH) 701 enable = !enable; 702 703 if (gpio_is_valid(spi->cs_gpio)) { 704 gpio_set_value(spi->cs_gpio, !enable); 705 /* Some SPI masters need both GPIO CS & slave_select */ 706 if ((spi->master->flags & SPI_MASTER_GPIO_SS) && 707 spi->master->set_cs) 708 spi->master->set_cs(spi, !enable); 709 } else if (spi->master->set_cs) { 710 spi->master->set_cs(spi, !enable); 711 } 712 } 713 714 #ifdef CONFIG_HAS_DMA 715 static int spi_map_buf(struct spi_master *master, struct device *dev, 716 struct sg_table *sgt, void *buf, size_t len, 717 enum dma_data_direction dir) 718 { 719 const bool vmalloced_buf = is_vmalloc_addr(buf); 720 unsigned int max_seg_size = dma_get_max_seg_size(dev); 721 #ifdef CONFIG_HIGHMEM 722 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && 723 (unsigned long)buf < (PKMAP_BASE + 724 (LAST_PKMAP * PAGE_SIZE))); 725 #else 726 const bool kmap_buf = false; 727 #endif 728 int desc_len; 729 int sgs; 730 struct page *vm_page; 731 struct scatterlist *sg; 732 void *sg_buf; 733 size_t min; 734 int i, ret; 735 736 if (vmalloced_buf || kmap_buf) { 737 desc_len = min_t(int, max_seg_size, PAGE_SIZE); 738 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 739 } else if (virt_addr_valid(buf)) { 740 desc_len = min_t(int, max_seg_size, master->max_dma_len); 741 sgs = DIV_ROUND_UP(len, desc_len); 742 } else { 743 return -EINVAL; 744 } 745 746 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); 747 if (ret != 0) 748 return ret; 749 750 sg = &sgt->sgl[0]; 751 for (i = 0; i < sgs; i++) { 752 753 if (vmalloced_buf || kmap_buf) { 754 min = min_t(size_t, 755 len, desc_len - offset_in_page(buf)); 756 if (vmalloced_buf) 757 vm_page = vmalloc_to_page(buf); 758 else 759 vm_page = kmap_to_page(buf); 760 if (!vm_page) { 761 sg_free_table(sgt); 762 return -ENOMEM; 763 } 764 sg_set_page(sg, vm_page, 765 min, offset_in_page(buf)); 766 } else { 767 min = min_t(size_t, len, desc_len); 768 sg_buf = buf; 769 sg_set_buf(sg, sg_buf, min); 770 } 771 772 buf += min; 773 len -= min; 774 sg = sg_next(sg); 775 } 776 777 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); 778 if (!ret) 779 ret = -ENOMEM; 780 if (ret < 0) { 781 sg_free_table(sgt); 782 return ret; 783 } 784 785 sgt->nents = ret; 786 787 return 0; 788 } 789 790 static void spi_unmap_buf(struct spi_master *master, struct device *dev, 791 struct sg_table *sgt, enum dma_data_direction dir) 792 { 793 if (sgt->orig_nents) { 794 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); 795 sg_free_table(sgt); 796 } 797 } 798 799 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) 800 { 801 struct device *tx_dev, *rx_dev; 802 struct spi_transfer *xfer; 803 int ret; 804 805 if (!master->can_dma) 806 return 0; 807 808 if (master->dma_tx) 809 tx_dev = master->dma_tx->device->dev; 810 else 811 tx_dev = master->dev.parent; 812 813 if (master->dma_rx) 814 rx_dev = master->dma_rx->device->dev; 815 else 816 rx_dev = master->dev.parent; 817 818 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 819 if (!master->can_dma(master, msg->spi, xfer)) 820 continue; 821 822 if (xfer->tx_buf != NULL) { 823 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, 824 (void *)xfer->tx_buf, xfer->len, 825 DMA_TO_DEVICE); 826 if (ret != 0) 827 return ret; 828 } 829 830 if (xfer->rx_buf != NULL) { 831 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, 832 xfer->rx_buf, xfer->len, 833 DMA_FROM_DEVICE); 834 if (ret != 0) { 835 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, 836 DMA_TO_DEVICE); 837 return ret; 838 } 839 } 840 } 841 842 master->cur_msg_mapped = true; 843 844 return 0; 845 } 846 847 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 848 { 849 struct spi_transfer *xfer; 850 struct device *tx_dev, *rx_dev; 851 852 if (!master->cur_msg_mapped || !master->can_dma) 853 return 0; 854 855 if (master->dma_tx) 856 tx_dev = master->dma_tx->device->dev; 857 else 858 tx_dev = master->dev.parent; 859 860 if (master->dma_rx) 861 rx_dev = master->dma_rx->device->dev; 862 else 863 rx_dev = master->dev.parent; 864 865 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 866 if (!master->can_dma(master, msg->spi, xfer)) 867 continue; 868 869 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); 870 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); 871 } 872 873 return 0; 874 } 875 #else /* !CONFIG_HAS_DMA */ 876 static inline int spi_map_buf(struct spi_master *master, 877 struct device *dev, struct sg_table *sgt, 878 void *buf, size_t len, 879 enum dma_data_direction dir) 880 { 881 return -EINVAL; 882 } 883 884 static inline void spi_unmap_buf(struct spi_master *master, 885 struct device *dev, struct sg_table *sgt, 886 enum dma_data_direction dir) 887 { 888 } 889 890 static inline int __spi_map_msg(struct spi_master *master, 891 struct spi_message *msg) 892 { 893 return 0; 894 } 895 896 static inline int __spi_unmap_msg(struct spi_master *master, 897 struct spi_message *msg) 898 { 899 return 0; 900 } 901 #endif /* !CONFIG_HAS_DMA */ 902 903 static inline int spi_unmap_msg(struct spi_master *master, 904 struct spi_message *msg) 905 { 906 struct spi_transfer *xfer; 907 908 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 909 /* 910 * Restore the original value of tx_buf or rx_buf if they are 911 * NULL. 912 */ 913 if (xfer->tx_buf == master->dummy_tx) 914 xfer->tx_buf = NULL; 915 if (xfer->rx_buf == master->dummy_rx) 916 xfer->rx_buf = NULL; 917 } 918 919 return __spi_unmap_msg(master, msg); 920 } 921 922 static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 923 { 924 struct spi_transfer *xfer; 925 void *tmp; 926 unsigned int max_tx, max_rx; 927 928 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 929 max_tx = 0; 930 max_rx = 0; 931 932 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 933 if ((master->flags & SPI_MASTER_MUST_TX) && 934 !xfer->tx_buf) 935 max_tx = max(xfer->len, max_tx); 936 if ((master->flags & SPI_MASTER_MUST_RX) && 937 !xfer->rx_buf) 938 max_rx = max(xfer->len, max_rx); 939 } 940 941 if (max_tx) { 942 tmp = krealloc(master->dummy_tx, max_tx, 943 GFP_KERNEL | GFP_DMA); 944 if (!tmp) 945 return -ENOMEM; 946 master->dummy_tx = tmp; 947 memset(tmp, 0, max_tx); 948 } 949 950 if (max_rx) { 951 tmp = krealloc(master->dummy_rx, max_rx, 952 GFP_KERNEL | GFP_DMA); 953 if (!tmp) 954 return -ENOMEM; 955 master->dummy_rx = tmp; 956 } 957 958 if (max_tx || max_rx) { 959 list_for_each_entry(xfer, &msg->transfers, 960 transfer_list) { 961 if (!xfer->tx_buf) 962 xfer->tx_buf = master->dummy_tx; 963 if (!xfer->rx_buf) 964 xfer->rx_buf = master->dummy_rx; 965 } 966 } 967 } 968 969 return __spi_map_msg(master, msg); 970 } 971 972 /* 973 * spi_transfer_one_message - Default implementation of transfer_one_message() 974 * 975 * This is a standard implementation of transfer_one_message() for 976 * drivers which implement a transfer_one() operation. It provides 977 * standard handling of delays and chip select management. 978 */ 979 static int spi_transfer_one_message(struct spi_master *master, 980 struct spi_message *msg) 981 { 982 struct spi_transfer *xfer; 983 bool keep_cs = false; 984 int ret = 0; 985 unsigned long long ms = 1; 986 struct spi_statistics *statm = &master->statistics; 987 struct spi_statistics *stats = &msg->spi->statistics; 988 989 spi_set_cs(msg->spi, true); 990 991 SPI_STATISTICS_INCREMENT_FIELD(statm, messages); 992 SPI_STATISTICS_INCREMENT_FIELD(stats, messages); 993 994 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 995 trace_spi_transfer_start(msg, xfer); 996 997 spi_statistics_add_transfer_stats(statm, xfer, master); 998 spi_statistics_add_transfer_stats(stats, xfer, master); 999 1000 if (xfer->tx_buf || xfer->rx_buf) { 1001 reinit_completion(&master->xfer_completion); 1002 1003 ret = master->transfer_one(master, msg->spi, xfer); 1004 if (ret < 0) { 1005 SPI_STATISTICS_INCREMENT_FIELD(statm, 1006 errors); 1007 SPI_STATISTICS_INCREMENT_FIELD(stats, 1008 errors); 1009 dev_err(&msg->spi->dev, 1010 "SPI transfer failed: %d\n", ret); 1011 goto out; 1012 } 1013 1014 if (ret > 0) { 1015 ret = 0; 1016 ms = 8LL * 1000LL * xfer->len; 1017 do_div(ms, xfer->speed_hz); 1018 ms += ms + 100; /* some tolerance */ 1019 1020 if (ms > UINT_MAX) 1021 ms = UINT_MAX; 1022 1023 ms = wait_for_completion_timeout(&master->xfer_completion, 1024 msecs_to_jiffies(ms)); 1025 } 1026 1027 if (ms == 0) { 1028 SPI_STATISTICS_INCREMENT_FIELD(statm, 1029 timedout); 1030 SPI_STATISTICS_INCREMENT_FIELD(stats, 1031 timedout); 1032 dev_err(&msg->spi->dev, 1033 "SPI transfer timed out\n"); 1034 msg->status = -ETIMEDOUT; 1035 } 1036 } else { 1037 if (xfer->len) 1038 dev_err(&msg->spi->dev, 1039 "Bufferless transfer has length %u\n", 1040 xfer->len); 1041 } 1042 1043 trace_spi_transfer_stop(msg, xfer); 1044 1045 if (msg->status != -EINPROGRESS) 1046 goto out; 1047 1048 if (xfer->delay_usecs) { 1049 u16 us = xfer->delay_usecs; 1050 1051 if (us <= 10) 1052 udelay(us); 1053 else 1054 usleep_range(us, us + DIV_ROUND_UP(us, 10)); 1055 } 1056 1057 if (xfer->cs_change) { 1058 if (list_is_last(&xfer->transfer_list, 1059 &msg->transfers)) { 1060 keep_cs = true; 1061 } else { 1062 spi_set_cs(msg->spi, false); 1063 udelay(10); 1064 spi_set_cs(msg->spi, true); 1065 } 1066 } 1067 1068 msg->actual_length += xfer->len; 1069 } 1070 1071 out: 1072 if (ret != 0 || !keep_cs) 1073 spi_set_cs(msg->spi, false); 1074 1075 if (msg->status == -EINPROGRESS) 1076 msg->status = ret; 1077 1078 if (msg->status && master->handle_err) 1079 master->handle_err(master, msg); 1080 1081 spi_res_release(master, msg); 1082 1083 spi_finalize_current_message(master); 1084 1085 return ret; 1086 } 1087 1088 /** 1089 * spi_finalize_current_transfer - report completion of a transfer 1090 * @master: the master reporting completion 1091 * 1092 * Called by SPI drivers using the core transfer_one_message() 1093 * implementation to notify it that the current interrupt driven 1094 * transfer has finished and the next one may be scheduled. 1095 */ 1096 void spi_finalize_current_transfer(struct spi_master *master) 1097 { 1098 complete(&master->xfer_completion); 1099 } 1100 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); 1101 1102 /** 1103 * __spi_pump_messages - function which processes spi message queue 1104 * @master: master to process queue for 1105 * @in_kthread: true if we are in the context of the message pump thread 1106 * 1107 * This function checks if there is any spi message in the queue that 1108 * needs processing and if so call out to the driver to initialize hardware 1109 * and transfer each message. 1110 * 1111 * Note that it is called both from the kthread itself and also from 1112 * inside spi_sync(); the queue extraction handling at the top of the 1113 * function should deal with this safely. 1114 */ 1115 static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1116 { 1117 unsigned long flags; 1118 bool was_busy = false; 1119 int ret; 1120 1121 /* Lock queue */ 1122 spin_lock_irqsave(&master->queue_lock, flags); 1123 1124 /* Make sure we are not already running a message */ 1125 if (master->cur_msg) { 1126 spin_unlock_irqrestore(&master->queue_lock, flags); 1127 return; 1128 } 1129 1130 /* If another context is idling the device then defer */ 1131 if (master->idling) { 1132 kthread_queue_work(&master->kworker, &master->pump_messages); 1133 spin_unlock_irqrestore(&master->queue_lock, flags); 1134 return; 1135 } 1136 1137 /* Check if the queue is idle */ 1138 if (list_empty(&master->queue) || !master->running) { 1139 if (!master->busy) { 1140 spin_unlock_irqrestore(&master->queue_lock, flags); 1141 return; 1142 } 1143 1144 /* Only do teardown in the thread */ 1145 if (!in_kthread) { 1146 kthread_queue_work(&master->kworker, 1147 &master->pump_messages); 1148 spin_unlock_irqrestore(&master->queue_lock, flags); 1149 return; 1150 } 1151 1152 master->busy = false; 1153 master->idling = true; 1154 spin_unlock_irqrestore(&master->queue_lock, flags); 1155 1156 kfree(master->dummy_rx); 1157 master->dummy_rx = NULL; 1158 kfree(master->dummy_tx); 1159 master->dummy_tx = NULL; 1160 if (master->unprepare_transfer_hardware && 1161 master->unprepare_transfer_hardware(master)) 1162 dev_err(&master->dev, 1163 "failed to unprepare transfer hardware\n"); 1164 if (master->auto_runtime_pm) { 1165 pm_runtime_mark_last_busy(master->dev.parent); 1166 pm_runtime_put_autosuspend(master->dev.parent); 1167 } 1168 trace_spi_master_idle(master); 1169 1170 spin_lock_irqsave(&master->queue_lock, flags); 1171 master->idling = false; 1172 spin_unlock_irqrestore(&master->queue_lock, flags); 1173 return; 1174 } 1175 1176 /* Extract head of queue */ 1177 master->cur_msg = 1178 list_first_entry(&master->queue, struct spi_message, queue); 1179 1180 list_del_init(&master->cur_msg->queue); 1181 if (master->busy) 1182 was_busy = true; 1183 else 1184 master->busy = true; 1185 spin_unlock_irqrestore(&master->queue_lock, flags); 1186 1187 mutex_lock(&master->io_mutex); 1188 1189 if (!was_busy && master->auto_runtime_pm) { 1190 ret = pm_runtime_get_sync(master->dev.parent); 1191 if (ret < 0) { 1192 dev_err(&master->dev, "Failed to power device: %d\n", 1193 ret); 1194 mutex_unlock(&master->io_mutex); 1195 return; 1196 } 1197 } 1198 1199 if (!was_busy) 1200 trace_spi_master_busy(master); 1201 1202 if (!was_busy && master->prepare_transfer_hardware) { 1203 ret = master->prepare_transfer_hardware(master); 1204 if (ret) { 1205 dev_err(&master->dev, 1206 "failed to prepare transfer hardware\n"); 1207 1208 if (master->auto_runtime_pm) 1209 pm_runtime_put(master->dev.parent); 1210 mutex_unlock(&master->io_mutex); 1211 return; 1212 } 1213 } 1214 1215 trace_spi_message_start(master->cur_msg); 1216 1217 if (master->prepare_message) { 1218 ret = master->prepare_message(master, master->cur_msg); 1219 if (ret) { 1220 dev_err(&master->dev, 1221 "failed to prepare message: %d\n", ret); 1222 master->cur_msg->status = ret; 1223 spi_finalize_current_message(master); 1224 goto out; 1225 } 1226 master->cur_msg_prepared = true; 1227 } 1228 1229 ret = spi_map_msg(master, master->cur_msg); 1230 if (ret) { 1231 master->cur_msg->status = ret; 1232 spi_finalize_current_message(master); 1233 goto out; 1234 } 1235 1236 ret = master->transfer_one_message(master, master->cur_msg); 1237 if (ret) { 1238 dev_err(&master->dev, 1239 "failed to transfer one message from queue\n"); 1240 goto out; 1241 } 1242 1243 out: 1244 mutex_unlock(&master->io_mutex); 1245 1246 /* Prod the scheduler in case transfer_one() was busy waiting */ 1247 if (!ret) 1248 cond_resched(); 1249 } 1250 1251 /** 1252 * spi_pump_messages - kthread work function which processes spi message queue 1253 * @work: pointer to kthread work struct contained in the master struct 1254 */ 1255 static void spi_pump_messages(struct kthread_work *work) 1256 { 1257 struct spi_master *master = 1258 container_of(work, struct spi_master, pump_messages); 1259 1260 __spi_pump_messages(master, true); 1261 } 1262 1263 static int spi_init_queue(struct spi_master *master) 1264 { 1265 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; 1266 1267 master->running = false; 1268 master->busy = false; 1269 1270 kthread_init_worker(&master->kworker); 1271 master->kworker_task = kthread_run(kthread_worker_fn, 1272 &master->kworker, "%s", 1273 dev_name(&master->dev)); 1274 if (IS_ERR(master->kworker_task)) { 1275 dev_err(&master->dev, "failed to create message pump task\n"); 1276 return PTR_ERR(master->kworker_task); 1277 } 1278 kthread_init_work(&master->pump_messages, spi_pump_messages); 1279 1280 /* 1281 * Master config will indicate if this controller should run the 1282 * message pump with high (realtime) priority to reduce the transfer 1283 * latency on the bus by minimising the delay between a transfer 1284 * request and the scheduling of the message pump thread. Without this 1285 * setting the message pump thread will remain at default priority. 1286 */ 1287 if (master->rt) { 1288 dev_info(&master->dev, 1289 "will run message pump with realtime priority\n"); 1290 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m); 1291 } 1292 1293 return 0; 1294 } 1295 1296 /** 1297 * spi_get_next_queued_message() - called by driver to check for queued 1298 * messages 1299 * @master: the master to check for queued messages 1300 * 1301 * If there are more messages in the queue, the next message is returned from 1302 * this call. 1303 * 1304 * Return: the next message in the queue, else NULL if the queue is empty. 1305 */ 1306 struct spi_message *spi_get_next_queued_message(struct spi_master *master) 1307 { 1308 struct spi_message *next; 1309 unsigned long flags; 1310 1311 /* get a pointer to the next message, if any */ 1312 spin_lock_irqsave(&master->queue_lock, flags); 1313 next = list_first_entry_or_null(&master->queue, struct spi_message, 1314 queue); 1315 spin_unlock_irqrestore(&master->queue_lock, flags); 1316 1317 return next; 1318 } 1319 EXPORT_SYMBOL_GPL(spi_get_next_queued_message); 1320 1321 /** 1322 * spi_finalize_current_message() - the current message is complete 1323 * @master: the master to return the message to 1324 * 1325 * Called by the driver to notify the core that the message in the front of the 1326 * queue is complete and can be removed from the queue. 1327 */ 1328 void spi_finalize_current_message(struct spi_master *master) 1329 { 1330 struct spi_message *mesg; 1331 unsigned long flags; 1332 int ret; 1333 1334 spin_lock_irqsave(&master->queue_lock, flags); 1335 mesg = master->cur_msg; 1336 spin_unlock_irqrestore(&master->queue_lock, flags); 1337 1338 spi_unmap_msg(master, mesg); 1339 1340 if (master->cur_msg_prepared && master->unprepare_message) { 1341 ret = master->unprepare_message(master, mesg); 1342 if (ret) { 1343 dev_err(&master->dev, 1344 "failed to unprepare message: %d\n", ret); 1345 } 1346 } 1347 1348 spin_lock_irqsave(&master->queue_lock, flags); 1349 master->cur_msg = NULL; 1350 master->cur_msg_prepared = false; 1351 kthread_queue_work(&master->kworker, &master->pump_messages); 1352 spin_unlock_irqrestore(&master->queue_lock, flags); 1353 1354 trace_spi_message_done(mesg); 1355 1356 mesg->state = NULL; 1357 if (mesg->complete) 1358 mesg->complete(mesg->context); 1359 } 1360 EXPORT_SYMBOL_GPL(spi_finalize_current_message); 1361 1362 static int spi_start_queue(struct spi_master *master) 1363 { 1364 unsigned long flags; 1365 1366 spin_lock_irqsave(&master->queue_lock, flags); 1367 1368 if (master->running || master->busy) { 1369 spin_unlock_irqrestore(&master->queue_lock, flags); 1370 return -EBUSY; 1371 } 1372 1373 master->running = true; 1374 master->cur_msg = NULL; 1375 spin_unlock_irqrestore(&master->queue_lock, flags); 1376 1377 kthread_queue_work(&master->kworker, &master->pump_messages); 1378 1379 return 0; 1380 } 1381 1382 static int spi_stop_queue(struct spi_master *master) 1383 { 1384 unsigned long flags; 1385 unsigned limit = 500; 1386 int ret = 0; 1387 1388 spin_lock_irqsave(&master->queue_lock, flags); 1389 1390 /* 1391 * This is a bit lame, but is optimized for the common execution path. 1392 * A wait_queue on the master->busy could be used, but then the common 1393 * execution path (pump_messages) would be required to call wake_up or 1394 * friends on every SPI message. Do this instead. 1395 */ 1396 while ((!list_empty(&master->queue) || master->busy) && limit--) { 1397 spin_unlock_irqrestore(&master->queue_lock, flags); 1398 usleep_range(10000, 11000); 1399 spin_lock_irqsave(&master->queue_lock, flags); 1400 } 1401 1402 if (!list_empty(&master->queue) || master->busy) 1403 ret = -EBUSY; 1404 else 1405 master->running = false; 1406 1407 spin_unlock_irqrestore(&master->queue_lock, flags); 1408 1409 if (ret) { 1410 dev_warn(&master->dev, 1411 "could not stop message queue\n"); 1412 return ret; 1413 } 1414 return ret; 1415 } 1416 1417 static int spi_destroy_queue(struct spi_master *master) 1418 { 1419 int ret; 1420 1421 ret = spi_stop_queue(master); 1422 1423 /* 1424 * kthread_flush_worker will block until all work is done. 1425 * If the reason that stop_queue timed out is that the work will never 1426 * finish, then it does no good to call flush/stop thread, so 1427 * return anyway. 1428 */ 1429 if (ret) { 1430 dev_err(&master->dev, "problem destroying queue\n"); 1431 return ret; 1432 } 1433 1434 kthread_flush_worker(&master->kworker); 1435 kthread_stop(master->kworker_task); 1436 1437 return 0; 1438 } 1439 1440 static int __spi_queued_transfer(struct spi_device *spi, 1441 struct spi_message *msg, 1442 bool need_pump) 1443 { 1444 struct spi_master *master = spi->master; 1445 unsigned long flags; 1446 1447 spin_lock_irqsave(&master->queue_lock, flags); 1448 1449 if (!master->running) { 1450 spin_unlock_irqrestore(&master->queue_lock, flags); 1451 return -ESHUTDOWN; 1452 } 1453 msg->actual_length = 0; 1454 msg->status = -EINPROGRESS; 1455 1456 list_add_tail(&msg->queue, &master->queue); 1457 if (!master->busy && need_pump) 1458 kthread_queue_work(&master->kworker, &master->pump_messages); 1459 1460 spin_unlock_irqrestore(&master->queue_lock, flags); 1461 return 0; 1462 } 1463 1464 /** 1465 * spi_queued_transfer - transfer function for queued transfers 1466 * @spi: spi device which is requesting transfer 1467 * @msg: spi message which is to handled is queued to driver queue 1468 * 1469 * Return: zero on success, else a negative error code. 1470 */ 1471 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) 1472 { 1473 return __spi_queued_transfer(spi, msg, true); 1474 } 1475 1476 static int spi_master_initialize_queue(struct spi_master *master) 1477 { 1478 int ret; 1479 1480 master->transfer = spi_queued_transfer; 1481 if (!master->transfer_one_message) 1482 master->transfer_one_message = spi_transfer_one_message; 1483 1484 /* Initialize and start queue */ 1485 ret = spi_init_queue(master); 1486 if (ret) { 1487 dev_err(&master->dev, "problem initializing queue\n"); 1488 goto err_init_queue; 1489 } 1490 master->queued = true; 1491 ret = spi_start_queue(master); 1492 if (ret) { 1493 dev_err(&master->dev, "problem starting queue\n"); 1494 goto err_start_queue; 1495 } 1496 1497 return 0; 1498 1499 err_start_queue: 1500 spi_destroy_queue(master); 1501 err_init_queue: 1502 return ret; 1503 } 1504 1505 /*-------------------------------------------------------------------------*/ 1506 1507 #if defined(CONFIG_OF) 1508 static int of_spi_parse_dt(struct spi_master *master, struct spi_device *spi, 1509 struct device_node *nc) 1510 { 1511 u32 value; 1512 int rc; 1513 1514 /* Device address */ 1515 rc = of_property_read_u32(nc, "reg", &value); 1516 if (rc) { 1517 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n", 1518 nc->full_name, rc); 1519 return rc; 1520 } 1521 spi->chip_select = value; 1522 1523 /* Mode (clock phase/polarity/etc.) */ 1524 if (of_find_property(nc, "spi-cpha", NULL)) 1525 spi->mode |= SPI_CPHA; 1526 if (of_find_property(nc, "spi-cpol", NULL)) 1527 spi->mode |= SPI_CPOL; 1528 if (of_find_property(nc, "spi-cs-high", NULL)) 1529 spi->mode |= SPI_CS_HIGH; 1530 if (of_find_property(nc, "spi-3wire", NULL)) 1531 spi->mode |= SPI_3WIRE; 1532 if (of_find_property(nc, "spi-lsb-first", NULL)) 1533 spi->mode |= SPI_LSB_FIRST; 1534 1535 /* Device DUAL/QUAD mode */ 1536 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1537 switch (value) { 1538 case 1: 1539 break; 1540 case 2: 1541 spi->mode |= SPI_TX_DUAL; 1542 break; 1543 case 4: 1544 spi->mode |= SPI_TX_QUAD; 1545 break; 1546 default: 1547 dev_warn(&master->dev, 1548 "spi-tx-bus-width %d not supported\n", 1549 value); 1550 break; 1551 } 1552 } 1553 1554 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { 1555 switch (value) { 1556 case 1: 1557 break; 1558 case 2: 1559 spi->mode |= SPI_RX_DUAL; 1560 break; 1561 case 4: 1562 spi->mode |= SPI_RX_QUAD; 1563 break; 1564 default: 1565 dev_warn(&master->dev, 1566 "spi-rx-bus-width %d not supported\n", 1567 value); 1568 break; 1569 } 1570 } 1571 1572 /* Device speed */ 1573 rc = of_property_read_u32(nc, "spi-max-frequency", &value); 1574 if (rc) { 1575 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n", 1576 nc->full_name, rc); 1577 return rc; 1578 } 1579 spi->max_speed_hz = value; 1580 1581 return 0; 1582 } 1583 1584 static struct spi_device * 1585 of_register_spi_device(struct spi_master *master, struct device_node *nc) 1586 { 1587 struct spi_device *spi; 1588 int rc; 1589 1590 /* Alloc an spi_device */ 1591 spi = spi_alloc_device(master); 1592 if (!spi) { 1593 dev_err(&master->dev, "spi_device alloc error for %s\n", 1594 nc->full_name); 1595 rc = -ENOMEM; 1596 goto err_out; 1597 } 1598 1599 /* Select device driver */ 1600 rc = of_modalias_node(nc, spi->modalias, 1601 sizeof(spi->modalias)); 1602 if (rc < 0) { 1603 dev_err(&master->dev, "cannot find modalias for %s\n", 1604 nc->full_name); 1605 goto err_out; 1606 } 1607 1608 rc = of_spi_parse_dt(master, spi, nc); 1609 if (rc) 1610 goto err_out; 1611 1612 /* Store a pointer to the node in the device structure */ 1613 of_node_get(nc); 1614 spi->dev.of_node = nc; 1615 1616 /* Register the new device */ 1617 rc = spi_add_device(spi); 1618 if (rc) { 1619 dev_err(&master->dev, "spi_device register error %s\n", 1620 nc->full_name); 1621 goto err_of_node_put; 1622 } 1623 1624 return spi; 1625 1626 err_of_node_put: 1627 of_node_put(nc); 1628 err_out: 1629 spi_dev_put(spi); 1630 return ERR_PTR(rc); 1631 } 1632 1633 /** 1634 * of_register_spi_devices() - Register child devices onto the SPI bus 1635 * @master: Pointer to spi_master device 1636 * 1637 * Registers an spi_device for each child node of master node which has a 'reg' 1638 * property. 1639 */ 1640 static void of_register_spi_devices(struct spi_master *master) 1641 { 1642 struct spi_device *spi; 1643 struct device_node *nc; 1644 1645 if (!master->dev.of_node) 1646 return; 1647 1648 for_each_available_child_of_node(master->dev.of_node, nc) { 1649 if (of_node_test_and_set_flag(nc, OF_POPULATED)) 1650 continue; 1651 spi = of_register_spi_device(master, nc); 1652 if (IS_ERR(spi)) { 1653 dev_warn(&master->dev, "Failed to create SPI device for %s\n", 1654 nc->full_name); 1655 of_node_clear_flag(nc, OF_POPULATED); 1656 } 1657 } 1658 } 1659 #else 1660 static void of_register_spi_devices(struct spi_master *master) { } 1661 #endif 1662 1663 #ifdef CONFIG_ACPI 1664 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1665 { 1666 struct spi_device *spi = data; 1667 struct spi_master *master = spi->master; 1668 1669 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1670 struct acpi_resource_spi_serialbus *sb; 1671 1672 sb = &ares->data.spi_serial_bus; 1673 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1674 /* 1675 * ACPI DeviceSelection numbering is handled by the 1676 * host controller driver in Windows and can vary 1677 * from driver to driver. In Linux we always expect 1678 * 0 .. max - 1 so we need to ask the driver to 1679 * translate between the two schemes. 1680 */ 1681 if (master->fw_translate_cs) { 1682 int cs = master->fw_translate_cs(master, 1683 sb->device_selection); 1684 if (cs < 0) 1685 return cs; 1686 spi->chip_select = cs; 1687 } else { 1688 spi->chip_select = sb->device_selection; 1689 } 1690 1691 spi->max_speed_hz = sb->connection_speed; 1692 1693 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1694 spi->mode |= SPI_CPHA; 1695 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1696 spi->mode |= SPI_CPOL; 1697 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1698 spi->mode |= SPI_CS_HIGH; 1699 } 1700 } else if (spi->irq < 0) { 1701 struct resource r; 1702 1703 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1704 spi->irq = r.start; 1705 } 1706 1707 /* Always tell the ACPI core to skip this resource */ 1708 return 1; 1709 } 1710 1711 static acpi_status acpi_register_spi_device(struct spi_master *master, 1712 struct acpi_device *adev) 1713 { 1714 struct list_head resource_list; 1715 struct spi_device *spi; 1716 int ret; 1717 1718 if (acpi_bus_get_status(adev) || !adev->status.present || 1719 acpi_device_enumerated(adev)) 1720 return AE_OK; 1721 1722 spi = spi_alloc_device(master); 1723 if (!spi) { 1724 dev_err(&master->dev, "failed to allocate SPI device for %s\n", 1725 dev_name(&adev->dev)); 1726 return AE_NO_MEMORY; 1727 } 1728 1729 ACPI_COMPANION_SET(&spi->dev, adev); 1730 spi->irq = -1; 1731 1732 INIT_LIST_HEAD(&resource_list); 1733 ret = acpi_dev_get_resources(adev, &resource_list, 1734 acpi_spi_add_resource, spi); 1735 acpi_dev_free_resource_list(&resource_list); 1736 1737 if (ret < 0 || !spi->max_speed_hz) { 1738 spi_dev_put(spi); 1739 return AE_OK; 1740 } 1741 1742 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 1743 sizeof(spi->modalias)); 1744 1745 if (spi->irq < 0) 1746 spi->irq = acpi_dev_gpio_irq_get(adev, 0); 1747 1748 acpi_device_set_enumerated(adev); 1749 1750 adev->power.flags.ignore_parent = true; 1751 if (spi_add_device(spi)) { 1752 adev->power.flags.ignore_parent = false; 1753 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n", 1754 dev_name(&adev->dev)); 1755 spi_dev_put(spi); 1756 } 1757 1758 return AE_OK; 1759 } 1760 1761 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, 1762 void *data, void **return_value) 1763 { 1764 struct spi_master *master = data; 1765 struct acpi_device *adev; 1766 1767 if (acpi_bus_get_device(handle, &adev)) 1768 return AE_OK; 1769 1770 return acpi_register_spi_device(master, adev); 1771 } 1772 1773 static void acpi_register_spi_devices(struct spi_master *master) 1774 { 1775 acpi_status status; 1776 acpi_handle handle; 1777 1778 handle = ACPI_HANDLE(master->dev.parent); 1779 if (!handle) 1780 return; 1781 1782 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 1783 acpi_spi_add_device, NULL, 1784 master, NULL); 1785 if (ACPI_FAILURE(status)) 1786 dev_warn(&master->dev, "failed to enumerate SPI slaves\n"); 1787 } 1788 #else 1789 static inline void acpi_register_spi_devices(struct spi_master *master) {} 1790 #endif /* CONFIG_ACPI */ 1791 1792 static void spi_master_release(struct device *dev) 1793 { 1794 struct spi_master *master; 1795 1796 master = container_of(dev, struct spi_master, dev); 1797 kfree(master); 1798 } 1799 1800 static struct class spi_master_class = { 1801 .name = "spi_master", 1802 .owner = THIS_MODULE, 1803 .dev_release = spi_master_release, 1804 .dev_groups = spi_master_groups, 1805 }; 1806 1807 1808 /** 1809 * spi_alloc_master - allocate SPI master controller 1810 * @dev: the controller, possibly using the platform_bus 1811 * @size: how much zeroed driver-private data to allocate; the pointer to this 1812 * memory is in the driver_data field of the returned device, 1813 * accessible with spi_master_get_devdata(). 1814 * Context: can sleep 1815 * 1816 * This call is used only by SPI master controller drivers, which are the 1817 * only ones directly touching chip registers. It's how they allocate 1818 * an spi_master structure, prior to calling spi_register_master(). 1819 * 1820 * This must be called from context that can sleep. 1821 * 1822 * The caller is responsible for assigning the bus number and initializing 1823 * the master's methods before calling spi_register_master(); and (after errors 1824 * adding the device) calling spi_master_put() to prevent a memory leak. 1825 * 1826 * Return: the SPI master structure on success, else NULL. 1827 */ 1828 struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 1829 { 1830 struct spi_master *master; 1831 1832 if (!dev) 1833 return NULL; 1834 1835 master = kzalloc(size + sizeof(*master), GFP_KERNEL); 1836 if (!master) 1837 return NULL; 1838 1839 device_initialize(&master->dev); 1840 master->bus_num = -1; 1841 master->num_chipselect = 1; 1842 master->dev.class = &spi_master_class; 1843 master->dev.parent = dev; 1844 pm_suspend_ignore_children(&master->dev, true); 1845 spi_master_set_devdata(master, &master[1]); 1846 1847 return master; 1848 } 1849 EXPORT_SYMBOL_GPL(spi_alloc_master); 1850 1851 #ifdef CONFIG_OF 1852 static int of_spi_register_master(struct spi_master *master) 1853 { 1854 int nb, i, *cs; 1855 struct device_node *np = master->dev.of_node; 1856 1857 if (!np) 1858 return 0; 1859 1860 nb = of_gpio_named_count(np, "cs-gpios"); 1861 master->num_chipselect = max_t(int, nb, master->num_chipselect); 1862 1863 /* Return error only for an incorrectly formed cs-gpios property */ 1864 if (nb == 0 || nb == -ENOENT) 1865 return 0; 1866 else if (nb < 0) 1867 return nb; 1868 1869 cs = devm_kzalloc(&master->dev, 1870 sizeof(int) * master->num_chipselect, 1871 GFP_KERNEL); 1872 master->cs_gpios = cs; 1873 1874 if (!master->cs_gpios) 1875 return -ENOMEM; 1876 1877 for (i = 0; i < master->num_chipselect; i++) 1878 cs[i] = -ENOENT; 1879 1880 for (i = 0; i < nb; i++) 1881 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1882 1883 return 0; 1884 } 1885 #else 1886 static int of_spi_register_master(struct spi_master *master) 1887 { 1888 return 0; 1889 } 1890 #endif 1891 1892 /** 1893 * spi_register_master - register SPI master controller 1894 * @master: initialized master, originally from spi_alloc_master() 1895 * Context: can sleep 1896 * 1897 * SPI master controllers connect to their drivers using some non-SPI bus, 1898 * such as the platform bus. The final stage of probe() in that code 1899 * includes calling spi_register_master() to hook up to this SPI bus glue. 1900 * 1901 * SPI controllers use board specific (often SOC specific) bus numbers, 1902 * and board-specific addressing for SPI devices combines those numbers 1903 * with chip select numbers. Since SPI does not directly support dynamic 1904 * device identification, boards need configuration tables telling which 1905 * chip is at which address. 1906 * 1907 * This must be called from context that can sleep. It returns zero on 1908 * success, else a negative error code (dropping the master's refcount). 1909 * After a successful return, the caller is responsible for calling 1910 * spi_unregister_master(). 1911 * 1912 * Return: zero on success, else a negative error code. 1913 */ 1914 int spi_register_master(struct spi_master *master) 1915 { 1916 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); 1917 struct device *dev = master->dev.parent; 1918 struct boardinfo *bi; 1919 int status = -ENODEV; 1920 int dynamic = 0; 1921 1922 if (!dev) 1923 return -ENODEV; 1924 1925 status = of_spi_register_master(master); 1926 if (status) 1927 return status; 1928 1929 /* even if it's just one always-selected device, there must 1930 * be at least one chipselect 1931 */ 1932 if (master->num_chipselect == 0) 1933 return -EINVAL; 1934 1935 if ((master->bus_num < 0) && master->dev.of_node) 1936 master->bus_num = of_alias_get_id(master->dev.of_node, "spi"); 1937 1938 /* convention: dynamically assigned bus IDs count down from the max */ 1939 if (master->bus_num < 0) { 1940 /* FIXME switch to an IDR based scheme, something like 1941 * I2C now uses, so we can't run out of "dynamic" IDs 1942 */ 1943 master->bus_num = atomic_dec_return(&dyn_bus_id); 1944 dynamic = 1; 1945 } 1946 1947 INIT_LIST_HEAD(&master->queue); 1948 spin_lock_init(&master->queue_lock); 1949 spin_lock_init(&master->bus_lock_spinlock); 1950 mutex_init(&master->bus_lock_mutex); 1951 mutex_init(&master->io_mutex); 1952 master->bus_lock_flag = 0; 1953 init_completion(&master->xfer_completion); 1954 if (!master->max_dma_len) 1955 master->max_dma_len = INT_MAX; 1956 1957 /* register the device, then userspace will see it. 1958 * registration fails if the bus ID is in use. 1959 */ 1960 dev_set_name(&master->dev, "spi%u", master->bus_num); 1961 status = device_add(&master->dev); 1962 if (status < 0) 1963 goto done; 1964 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 1965 dynamic ? " (dynamic)" : ""); 1966 1967 /* If we're using a queued driver, start the queue */ 1968 if (master->transfer) 1969 dev_info(dev, "master is unqueued, this is deprecated\n"); 1970 else { 1971 status = spi_master_initialize_queue(master); 1972 if (status) { 1973 device_del(&master->dev); 1974 goto done; 1975 } 1976 } 1977 /* add statistics */ 1978 spin_lock_init(&master->statistics.lock); 1979 1980 mutex_lock(&board_lock); 1981 list_add_tail(&master->list, &spi_master_list); 1982 list_for_each_entry(bi, &board_list, list) 1983 spi_match_master_to_boardinfo(master, &bi->board_info); 1984 mutex_unlock(&board_lock); 1985 1986 /* Register devices from the device tree and ACPI */ 1987 of_register_spi_devices(master); 1988 acpi_register_spi_devices(master); 1989 done: 1990 return status; 1991 } 1992 EXPORT_SYMBOL_GPL(spi_register_master); 1993 1994 static void devm_spi_unregister(struct device *dev, void *res) 1995 { 1996 spi_unregister_master(*(struct spi_master **)res); 1997 } 1998 1999 /** 2000 * dev_spi_register_master - register managed SPI master controller 2001 * @dev: device managing SPI master 2002 * @master: initialized master, originally from spi_alloc_master() 2003 * Context: can sleep 2004 * 2005 * Register a SPI device as with spi_register_master() which will 2006 * automatically be unregister 2007 * 2008 * Return: zero on success, else a negative error code. 2009 */ 2010 int devm_spi_register_master(struct device *dev, struct spi_master *master) 2011 { 2012 struct spi_master **ptr; 2013 int ret; 2014 2015 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); 2016 if (!ptr) 2017 return -ENOMEM; 2018 2019 ret = spi_register_master(master); 2020 if (!ret) { 2021 *ptr = master; 2022 devres_add(dev, ptr); 2023 } else { 2024 devres_free(ptr); 2025 } 2026 2027 return ret; 2028 } 2029 EXPORT_SYMBOL_GPL(devm_spi_register_master); 2030 2031 static int __unregister(struct device *dev, void *null) 2032 { 2033 spi_unregister_device(to_spi_device(dev)); 2034 return 0; 2035 } 2036 2037 /** 2038 * spi_unregister_master - unregister SPI master controller 2039 * @master: the master being unregistered 2040 * Context: can sleep 2041 * 2042 * This call is used only by SPI master controller drivers, which are the 2043 * only ones directly touching chip registers. 2044 * 2045 * This must be called from context that can sleep. 2046 */ 2047 void spi_unregister_master(struct spi_master *master) 2048 { 2049 int dummy; 2050 2051 if (master->queued) { 2052 if (spi_destroy_queue(master)) 2053 dev_err(&master->dev, "queue remove failed\n"); 2054 } 2055 2056 mutex_lock(&board_lock); 2057 list_del(&master->list); 2058 mutex_unlock(&board_lock); 2059 2060 dummy = device_for_each_child(&master->dev, NULL, __unregister); 2061 device_unregister(&master->dev); 2062 } 2063 EXPORT_SYMBOL_GPL(spi_unregister_master); 2064 2065 int spi_master_suspend(struct spi_master *master) 2066 { 2067 int ret; 2068 2069 /* Basically no-ops for non-queued masters */ 2070 if (!master->queued) 2071 return 0; 2072 2073 ret = spi_stop_queue(master); 2074 if (ret) 2075 dev_err(&master->dev, "queue stop failed\n"); 2076 2077 return ret; 2078 } 2079 EXPORT_SYMBOL_GPL(spi_master_suspend); 2080 2081 int spi_master_resume(struct spi_master *master) 2082 { 2083 int ret; 2084 2085 if (!master->queued) 2086 return 0; 2087 2088 ret = spi_start_queue(master); 2089 if (ret) 2090 dev_err(&master->dev, "queue restart failed\n"); 2091 2092 return ret; 2093 } 2094 EXPORT_SYMBOL_GPL(spi_master_resume); 2095 2096 static int __spi_master_match(struct device *dev, const void *data) 2097 { 2098 struct spi_master *m; 2099 const u16 *bus_num = data; 2100 2101 m = container_of(dev, struct spi_master, dev); 2102 return m->bus_num == *bus_num; 2103 } 2104 2105 /** 2106 * spi_busnum_to_master - look up master associated with bus_num 2107 * @bus_num: the master's bus number 2108 * Context: can sleep 2109 * 2110 * This call may be used with devices that are registered after 2111 * arch init time. It returns a refcounted pointer to the relevant 2112 * spi_master (which the caller must release), or NULL if there is 2113 * no such master registered. 2114 * 2115 * Return: the SPI master structure on success, else NULL. 2116 */ 2117 struct spi_master *spi_busnum_to_master(u16 bus_num) 2118 { 2119 struct device *dev; 2120 struct spi_master *master = NULL; 2121 2122 dev = class_find_device(&spi_master_class, NULL, &bus_num, 2123 __spi_master_match); 2124 if (dev) 2125 master = container_of(dev, struct spi_master, dev); 2126 /* reference got in class_find_device */ 2127 return master; 2128 } 2129 EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2130 2131 /*-------------------------------------------------------------------------*/ 2132 2133 /* Core methods for SPI resource management */ 2134 2135 /** 2136 * spi_res_alloc - allocate a spi resource that is life-cycle managed 2137 * during the processing of a spi_message while using 2138 * spi_transfer_one 2139 * @spi: the spi device for which we allocate memory 2140 * @release: the release code to execute for this resource 2141 * @size: size to alloc and return 2142 * @gfp: GFP allocation flags 2143 * 2144 * Return: the pointer to the allocated data 2145 * 2146 * This may get enhanced in the future to allocate from a memory pool 2147 * of the @spi_device or @spi_master to avoid repeated allocations. 2148 */ 2149 void *spi_res_alloc(struct spi_device *spi, 2150 spi_res_release_t release, 2151 size_t size, gfp_t gfp) 2152 { 2153 struct spi_res *sres; 2154 2155 sres = kzalloc(sizeof(*sres) + size, gfp); 2156 if (!sres) 2157 return NULL; 2158 2159 INIT_LIST_HEAD(&sres->entry); 2160 sres->release = release; 2161 2162 return sres->data; 2163 } 2164 EXPORT_SYMBOL_GPL(spi_res_alloc); 2165 2166 /** 2167 * spi_res_free - free an spi resource 2168 * @res: pointer to the custom data of a resource 2169 * 2170 */ 2171 void spi_res_free(void *res) 2172 { 2173 struct spi_res *sres = container_of(res, struct spi_res, data); 2174 2175 if (!res) 2176 return; 2177 2178 WARN_ON(!list_empty(&sres->entry)); 2179 kfree(sres); 2180 } 2181 EXPORT_SYMBOL_GPL(spi_res_free); 2182 2183 /** 2184 * spi_res_add - add a spi_res to the spi_message 2185 * @message: the spi message 2186 * @res: the spi_resource 2187 */ 2188 void spi_res_add(struct spi_message *message, void *res) 2189 { 2190 struct spi_res *sres = container_of(res, struct spi_res, data); 2191 2192 WARN_ON(!list_empty(&sres->entry)); 2193 list_add_tail(&sres->entry, &message->resources); 2194 } 2195 EXPORT_SYMBOL_GPL(spi_res_add); 2196 2197 /** 2198 * spi_res_release - release all spi resources for this message 2199 * @master: the @spi_master 2200 * @message: the @spi_message 2201 */ 2202 void spi_res_release(struct spi_master *master, 2203 struct spi_message *message) 2204 { 2205 struct spi_res *res; 2206 2207 while (!list_empty(&message->resources)) { 2208 res = list_last_entry(&message->resources, 2209 struct spi_res, entry); 2210 2211 if (res->release) 2212 res->release(master, message, res->data); 2213 2214 list_del(&res->entry); 2215 2216 kfree(res); 2217 } 2218 } 2219 EXPORT_SYMBOL_GPL(spi_res_release); 2220 2221 /*-------------------------------------------------------------------------*/ 2222 2223 /* Core methods for spi_message alterations */ 2224 2225 static void __spi_replace_transfers_release(struct spi_master *master, 2226 struct spi_message *msg, 2227 void *res) 2228 { 2229 struct spi_replaced_transfers *rxfer = res; 2230 size_t i; 2231 2232 /* call extra callback if requested */ 2233 if (rxfer->release) 2234 rxfer->release(master, msg, res); 2235 2236 /* insert replaced transfers back into the message */ 2237 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); 2238 2239 /* remove the formerly inserted entries */ 2240 for (i = 0; i < rxfer->inserted; i++) 2241 list_del(&rxfer->inserted_transfers[i].transfer_list); 2242 } 2243 2244 /** 2245 * spi_replace_transfers - replace transfers with several transfers 2246 * and register change with spi_message.resources 2247 * @msg: the spi_message we work upon 2248 * @xfer_first: the first spi_transfer we want to replace 2249 * @remove: number of transfers to remove 2250 * @insert: the number of transfers we want to insert instead 2251 * @release: extra release code necessary in some circumstances 2252 * @extradatasize: extra data to allocate (with alignment guarantees 2253 * of struct @spi_transfer) 2254 * @gfp: gfp flags 2255 * 2256 * Returns: pointer to @spi_replaced_transfers, 2257 * PTR_ERR(...) in case of errors. 2258 */ 2259 struct spi_replaced_transfers *spi_replace_transfers( 2260 struct spi_message *msg, 2261 struct spi_transfer *xfer_first, 2262 size_t remove, 2263 size_t insert, 2264 spi_replaced_release_t release, 2265 size_t extradatasize, 2266 gfp_t gfp) 2267 { 2268 struct spi_replaced_transfers *rxfer; 2269 struct spi_transfer *xfer; 2270 size_t i; 2271 2272 /* allocate the structure using spi_res */ 2273 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2274 insert * sizeof(struct spi_transfer) 2275 + sizeof(struct spi_replaced_transfers) 2276 + extradatasize, 2277 gfp); 2278 if (!rxfer) 2279 return ERR_PTR(-ENOMEM); 2280 2281 /* the release code to invoke before running the generic release */ 2282 rxfer->release = release; 2283 2284 /* assign extradata */ 2285 if (extradatasize) 2286 rxfer->extradata = 2287 &rxfer->inserted_transfers[insert]; 2288 2289 /* init the replaced_transfers list */ 2290 INIT_LIST_HEAD(&rxfer->replaced_transfers); 2291 2292 /* assign the list_entry after which we should reinsert 2293 * the @replaced_transfers - it may be spi_message.messages! 2294 */ 2295 rxfer->replaced_after = xfer_first->transfer_list.prev; 2296 2297 /* remove the requested number of transfers */ 2298 for (i = 0; i < remove; i++) { 2299 /* if the entry after replaced_after it is msg->transfers 2300 * then we have been requested to remove more transfers 2301 * than are in the list 2302 */ 2303 if (rxfer->replaced_after->next == &msg->transfers) { 2304 dev_err(&msg->spi->dev, 2305 "requested to remove more spi_transfers than are available\n"); 2306 /* insert replaced transfers back into the message */ 2307 list_splice(&rxfer->replaced_transfers, 2308 rxfer->replaced_after); 2309 2310 /* free the spi_replace_transfer structure */ 2311 spi_res_free(rxfer); 2312 2313 /* and return with an error */ 2314 return ERR_PTR(-EINVAL); 2315 } 2316 2317 /* remove the entry after replaced_after from list of 2318 * transfers and add it to list of replaced_transfers 2319 */ 2320 list_move_tail(rxfer->replaced_after->next, 2321 &rxfer->replaced_transfers); 2322 } 2323 2324 /* create copy of the given xfer with identical settings 2325 * based on the first transfer to get removed 2326 */ 2327 for (i = 0; i < insert; i++) { 2328 /* we need to run in reverse order */ 2329 xfer = &rxfer->inserted_transfers[insert - 1 - i]; 2330 2331 /* copy all spi_transfer data */ 2332 memcpy(xfer, xfer_first, sizeof(*xfer)); 2333 2334 /* add to list */ 2335 list_add(&xfer->transfer_list, rxfer->replaced_after); 2336 2337 /* clear cs_change and delay_usecs for all but the last */ 2338 if (i) { 2339 xfer->cs_change = false; 2340 xfer->delay_usecs = 0; 2341 } 2342 } 2343 2344 /* set up inserted */ 2345 rxfer->inserted = insert; 2346 2347 /* and register it with spi_res/spi_message */ 2348 spi_res_add(msg, rxfer); 2349 2350 return rxfer; 2351 } 2352 EXPORT_SYMBOL_GPL(spi_replace_transfers); 2353 2354 static int __spi_split_transfer_maxsize(struct spi_master *master, 2355 struct spi_message *msg, 2356 struct spi_transfer **xferp, 2357 size_t maxsize, 2358 gfp_t gfp) 2359 { 2360 struct spi_transfer *xfer = *xferp, *xfers; 2361 struct spi_replaced_transfers *srt; 2362 size_t offset; 2363 size_t count, i; 2364 2365 /* warn once about this fact that we are splitting a transfer */ 2366 dev_warn_once(&msg->spi->dev, 2367 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n", 2368 xfer->len, maxsize); 2369 2370 /* calculate how many we have to replace */ 2371 count = DIV_ROUND_UP(xfer->len, maxsize); 2372 2373 /* create replacement */ 2374 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp); 2375 if (IS_ERR(srt)) 2376 return PTR_ERR(srt); 2377 xfers = srt->inserted_transfers; 2378 2379 /* now handle each of those newly inserted spi_transfers 2380 * note that the replacements spi_transfers all are preset 2381 * to the same values as *xferp, so tx_buf, rx_buf and len 2382 * are all identical (as well as most others) 2383 * so we just have to fix up len and the pointers. 2384 * 2385 * this also includes support for the depreciated 2386 * spi_message.is_dma_mapped interface 2387 */ 2388 2389 /* the first transfer just needs the length modified, so we 2390 * run it outside the loop 2391 */ 2392 xfers[0].len = min_t(size_t, maxsize, xfer[0].len); 2393 2394 /* all the others need rx_buf/tx_buf also set */ 2395 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { 2396 /* update rx_buf, tx_buf and dma */ 2397 if (xfers[i].rx_buf) 2398 xfers[i].rx_buf += offset; 2399 if (xfers[i].rx_dma) 2400 xfers[i].rx_dma += offset; 2401 if (xfers[i].tx_buf) 2402 xfers[i].tx_buf += offset; 2403 if (xfers[i].tx_dma) 2404 xfers[i].tx_dma += offset; 2405 2406 /* update length */ 2407 xfers[i].len = min(maxsize, xfers[i].len - offset); 2408 } 2409 2410 /* we set up xferp to the last entry we have inserted, 2411 * so that we skip those already split transfers 2412 */ 2413 *xferp = &xfers[count - 1]; 2414 2415 /* increment statistics counters */ 2416 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2417 transfers_split_maxsize); 2418 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics, 2419 transfers_split_maxsize); 2420 2421 return 0; 2422 } 2423 2424 /** 2425 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers 2426 * when an individual transfer exceeds a 2427 * certain size 2428 * @master: the @spi_master for this transfer 2429 * @msg: the @spi_message to transform 2430 * @maxsize: the maximum when to apply this 2431 * @gfp: GFP allocation flags 2432 * 2433 * Return: status of transformation 2434 */ 2435 int spi_split_transfers_maxsize(struct spi_master *master, 2436 struct spi_message *msg, 2437 size_t maxsize, 2438 gfp_t gfp) 2439 { 2440 struct spi_transfer *xfer; 2441 int ret; 2442 2443 /* iterate over the transfer_list, 2444 * but note that xfer is advanced to the last transfer inserted 2445 * to avoid checking sizes again unnecessarily (also xfer does 2446 * potentiall belong to a different list by the time the 2447 * replacement has happened 2448 */ 2449 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 2450 if (xfer->len > maxsize) { 2451 ret = __spi_split_transfer_maxsize( 2452 master, msg, &xfer, maxsize, gfp); 2453 if (ret) 2454 return ret; 2455 } 2456 } 2457 2458 return 0; 2459 } 2460 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); 2461 2462 /*-------------------------------------------------------------------------*/ 2463 2464 /* Core methods for SPI master protocol drivers. Some of the 2465 * other core methods are currently defined as inline functions. 2466 */ 2467 2468 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word) 2469 { 2470 if (master->bits_per_word_mask) { 2471 /* Only 32 bits fit in the mask */ 2472 if (bits_per_word > 32) 2473 return -EINVAL; 2474 if (!(master->bits_per_word_mask & 2475 SPI_BPW_MASK(bits_per_word))) 2476 return -EINVAL; 2477 } 2478 2479 return 0; 2480 } 2481 2482 /** 2483 * spi_setup - setup SPI mode and clock rate 2484 * @spi: the device whose settings are being modified 2485 * Context: can sleep, and no requests are queued to the device 2486 * 2487 * SPI protocol drivers may need to update the transfer mode if the 2488 * device doesn't work with its default. They may likewise need 2489 * to update clock rates or word sizes from initial values. This function 2490 * changes those settings, and must be called from a context that can sleep. 2491 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take 2492 * effect the next time the device is selected and data is transferred to 2493 * or from it. When this function returns, the spi device is deselected. 2494 * 2495 * Note that this call will fail if the protocol driver specifies an option 2496 * that the underlying controller or its driver does not support. For 2497 * example, not all hardware supports wire transfers using nine bit words, 2498 * LSB-first wire encoding, or active-high chipselects. 2499 * 2500 * Return: zero on success, else a negative error code. 2501 */ 2502 int spi_setup(struct spi_device *spi) 2503 { 2504 unsigned bad_bits, ugly_bits; 2505 int status; 2506 2507 /* check mode to prevent that DUAL and QUAD set at the same time 2508 */ 2509 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) || 2510 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) { 2511 dev_err(&spi->dev, 2512 "setup: can not select dual and quad at the same time\n"); 2513 return -EINVAL; 2514 } 2515 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden 2516 */ 2517 if ((spi->mode & SPI_3WIRE) && (spi->mode & 2518 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD))) 2519 return -EINVAL; 2520 /* help drivers fail *cleanly* when they need options 2521 * that aren't supported with their current master 2522 */ 2523 bad_bits = spi->mode & ~spi->master->mode_bits; 2524 ugly_bits = bad_bits & 2525 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); 2526 if (ugly_bits) { 2527 dev_warn(&spi->dev, 2528 "setup: ignoring unsupported mode bits %x\n", 2529 ugly_bits); 2530 spi->mode &= ~ugly_bits; 2531 bad_bits &= ~ugly_bits; 2532 } 2533 if (bad_bits) { 2534 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 2535 bad_bits); 2536 return -EINVAL; 2537 } 2538 2539 if (!spi->bits_per_word) 2540 spi->bits_per_word = 8; 2541 2542 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word); 2543 if (status) 2544 return status; 2545 2546 if (!spi->max_speed_hz) 2547 spi->max_speed_hz = spi->master->max_speed_hz; 2548 2549 if (spi->master->setup) 2550 status = spi->master->setup(spi); 2551 2552 spi_set_cs(spi, false); 2553 2554 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n", 2555 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)), 2556 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", 2557 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", 2558 (spi->mode & SPI_3WIRE) ? "3wire, " : "", 2559 (spi->mode & SPI_LOOP) ? "loopback, " : "", 2560 spi->bits_per_word, spi->max_speed_hz, 2561 status); 2562 2563 return status; 2564 } 2565 EXPORT_SYMBOL_GPL(spi_setup); 2566 2567 static int __spi_validate(struct spi_device *spi, struct spi_message *message) 2568 { 2569 struct spi_master *master = spi->master; 2570 struct spi_transfer *xfer; 2571 int w_size; 2572 2573 if (list_empty(&message->transfers)) 2574 return -EINVAL; 2575 2576 /* Half-duplex links include original MicroWire, and ones with 2577 * only one data pin like SPI_3WIRE (switches direction) or where 2578 * either MOSI or MISO is missing. They can also be caused by 2579 * software limitations. 2580 */ 2581 if ((master->flags & SPI_MASTER_HALF_DUPLEX) 2582 || (spi->mode & SPI_3WIRE)) { 2583 unsigned flags = master->flags; 2584 2585 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2586 if (xfer->rx_buf && xfer->tx_buf) 2587 return -EINVAL; 2588 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf) 2589 return -EINVAL; 2590 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf) 2591 return -EINVAL; 2592 } 2593 } 2594 2595 /** 2596 * Set transfer bits_per_word and max speed as spi device default if 2597 * it is not set for this transfer. 2598 * Set transfer tx_nbits and rx_nbits as single transfer default 2599 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 2600 */ 2601 message->frame_length = 0; 2602 list_for_each_entry(xfer, &message->transfers, transfer_list) { 2603 message->frame_length += xfer->len; 2604 if (!xfer->bits_per_word) 2605 xfer->bits_per_word = spi->bits_per_word; 2606 2607 if (!xfer->speed_hz) 2608 xfer->speed_hz = spi->max_speed_hz; 2609 if (!xfer->speed_hz) 2610 xfer->speed_hz = master->max_speed_hz; 2611 2612 if (master->max_speed_hz && 2613 xfer->speed_hz > master->max_speed_hz) 2614 xfer->speed_hz = master->max_speed_hz; 2615 2616 if (__spi_validate_bits_per_word(master, xfer->bits_per_word)) 2617 return -EINVAL; 2618 2619 /* 2620 * SPI transfer length should be multiple of SPI word size 2621 * where SPI word size should be power-of-two multiple 2622 */ 2623 if (xfer->bits_per_word <= 8) 2624 w_size = 1; 2625 else if (xfer->bits_per_word <= 16) 2626 w_size = 2; 2627 else 2628 w_size = 4; 2629 2630 /* No partial transfers accepted */ 2631 if (xfer->len % w_size) 2632 return -EINVAL; 2633 2634 if (xfer->speed_hz && master->min_speed_hz && 2635 xfer->speed_hz < master->min_speed_hz) 2636 return -EINVAL; 2637 2638 if (xfer->tx_buf && !xfer->tx_nbits) 2639 xfer->tx_nbits = SPI_NBITS_SINGLE; 2640 if (xfer->rx_buf && !xfer->rx_nbits) 2641 xfer->rx_nbits = SPI_NBITS_SINGLE; 2642 /* check transfer tx/rx_nbits: 2643 * 1. check the value matches one of single, dual and quad 2644 * 2. check tx/rx_nbits match the mode in spi_device 2645 */ 2646 if (xfer->tx_buf) { 2647 if (xfer->tx_nbits != SPI_NBITS_SINGLE && 2648 xfer->tx_nbits != SPI_NBITS_DUAL && 2649 xfer->tx_nbits != SPI_NBITS_QUAD) 2650 return -EINVAL; 2651 if ((xfer->tx_nbits == SPI_NBITS_DUAL) && 2652 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2653 return -EINVAL; 2654 if ((xfer->tx_nbits == SPI_NBITS_QUAD) && 2655 !(spi->mode & SPI_TX_QUAD)) 2656 return -EINVAL; 2657 } 2658 /* check transfer rx_nbits */ 2659 if (xfer->rx_buf) { 2660 if (xfer->rx_nbits != SPI_NBITS_SINGLE && 2661 xfer->rx_nbits != SPI_NBITS_DUAL && 2662 xfer->rx_nbits != SPI_NBITS_QUAD) 2663 return -EINVAL; 2664 if ((xfer->rx_nbits == SPI_NBITS_DUAL) && 2665 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2666 return -EINVAL; 2667 if ((xfer->rx_nbits == SPI_NBITS_QUAD) && 2668 !(spi->mode & SPI_RX_QUAD)) 2669 return -EINVAL; 2670 } 2671 } 2672 2673 message->status = -EINPROGRESS; 2674 2675 return 0; 2676 } 2677 2678 static int __spi_async(struct spi_device *spi, struct spi_message *message) 2679 { 2680 struct spi_master *master = spi->master; 2681 2682 message->spi = spi; 2683 2684 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async); 2685 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async); 2686 2687 trace_spi_message_submit(message); 2688 2689 return master->transfer(spi, message); 2690 } 2691 2692 /** 2693 * spi_async - asynchronous SPI transfer 2694 * @spi: device with which data will be exchanged 2695 * @message: describes the data transfers, including completion callback 2696 * Context: any (irqs may be blocked, etc) 2697 * 2698 * This call may be used in_irq and other contexts which can't sleep, 2699 * as well as from task contexts which can sleep. 2700 * 2701 * The completion callback is invoked in a context which can't sleep. 2702 * Before that invocation, the value of message->status is undefined. 2703 * When the callback is issued, message->status holds either zero (to 2704 * indicate complete success) or a negative error code. After that 2705 * callback returns, the driver which issued the transfer request may 2706 * deallocate the associated memory; it's no longer in use by any SPI 2707 * core or controller driver code. 2708 * 2709 * Note that although all messages to a spi_device are handled in 2710 * FIFO order, messages may go to different devices in other orders. 2711 * Some device might be higher priority, or have various "hard" access 2712 * time requirements, for example. 2713 * 2714 * On detection of any fault during the transfer, processing of 2715 * the entire message is aborted, and the device is deselected. 2716 * Until returning from the associated message completion callback, 2717 * no other spi_message queued to that device will be processed. 2718 * (This rule applies equally to all the synchronous transfer calls, 2719 * which are wrappers around this core asynchronous primitive.) 2720 * 2721 * Return: zero on success, else a negative error code. 2722 */ 2723 int spi_async(struct spi_device *spi, struct spi_message *message) 2724 { 2725 struct spi_master *master = spi->master; 2726 int ret; 2727 unsigned long flags; 2728 2729 ret = __spi_validate(spi, message); 2730 if (ret != 0) 2731 return ret; 2732 2733 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2734 2735 if (master->bus_lock_flag) 2736 ret = -EBUSY; 2737 else 2738 ret = __spi_async(spi, message); 2739 2740 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2741 2742 return ret; 2743 } 2744 EXPORT_SYMBOL_GPL(spi_async); 2745 2746 /** 2747 * spi_async_locked - version of spi_async with exclusive bus usage 2748 * @spi: device with which data will be exchanged 2749 * @message: describes the data transfers, including completion callback 2750 * Context: any (irqs may be blocked, etc) 2751 * 2752 * This call may be used in_irq and other contexts which can't sleep, 2753 * as well as from task contexts which can sleep. 2754 * 2755 * The completion callback is invoked in a context which can't sleep. 2756 * Before that invocation, the value of message->status is undefined. 2757 * When the callback is issued, message->status holds either zero (to 2758 * indicate complete success) or a negative error code. After that 2759 * callback returns, the driver which issued the transfer request may 2760 * deallocate the associated memory; it's no longer in use by any SPI 2761 * core or controller driver code. 2762 * 2763 * Note that although all messages to a spi_device are handled in 2764 * FIFO order, messages may go to different devices in other orders. 2765 * Some device might be higher priority, or have various "hard" access 2766 * time requirements, for example. 2767 * 2768 * On detection of any fault during the transfer, processing of 2769 * the entire message is aborted, and the device is deselected. 2770 * Until returning from the associated message completion callback, 2771 * no other spi_message queued to that device will be processed. 2772 * (This rule applies equally to all the synchronous transfer calls, 2773 * which are wrappers around this core asynchronous primitive.) 2774 * 2775 * Return: zero on success, else a negative error code. 2776 */ 2777 int spi_async_locked(struct spi_device *spi, struct spi_message *message) 2778 { 2779 struct spi_master *master = spi->master; 2780 int ret; 2781 unsigned long flags; 2782 2783 ret = __spi_validate(spi, message); 2784 if (ret != 0) 2785 return ret; 2786 2787 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2788 2789 ret = __spi_async(spi, message); 2790 2791 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2792 2793 return ret; 2794 2795 } 2796 EXPORT_SYMBOL_GPL(spi_async_locked); 2797 2798 2799 int spi_flash_read(struct spi_device *spi, 2800 struct spi_flash_read_message *msg) 2801 2802 { 2803 struct spi_master *master = spi->master; 2804 struct device *rx_dev = NULL; 2805 int ret; 2806 2807 if ((msg->opcode_nbits == SPI_NBITS_DUAL || 2808 msg->addr_nbits == SPI_NBITS_DUAL) && 2809 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) 2810 return -EINVAL; 2811 if ((msg->opcode_nbits == SPI_NBITS_QUAD || 2812 msg->addr_nbits == SPI_NBITS_QUAD) && 2813 !(spi->mode & SPI_TX_QUAD)) 2814 return -EINVAL; 2815 if (msg->data_nbits == SPI_NBITS_DUAL && 2816 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) 2817 return -EINVAL; 2818 if (msg->data_nbits == SPI_NBITS_QUAD && 2819 !(spi->mode & SPI_RX_QUAD)) 2820 return -EINVAL; 2821 2822 if (master->auto_runtime_pm) { 2823 ret = pm_runtime_get_sync(master->dev.parent); 2824 if (ret < 0) { 2825 dev_err(&master->dev, "Failed to power device: %d\n", 2826 ret); 2827 return ret; 2828 } 2829 } 2830 2831 mutex_lock(&master->bus_lock_mutex); 2832 mutex_lock(&master->io_mutex); 2833 if (master->dma_rx) { 2834 rx_dev = master->dma_rx->device->dev; 2835 ret = spi_map_buf(master, rx_dev, &msg->rx_sg, 2836 msg->buf, msg->len, 2837 DMA_FROM_DEVICE); 2838 if (!ret) 2839 msg->cur_msg_mapped = true; 2840 } 2841 ret = master->spi_flash_read(spi, msg); 2842 if (msg->cur_msg_mapped) 2843 spi_unmap_buf(master, rx_dev, &msg->rx_sg, 2844 DMA_FROM_DEVICE); 2845 mutex_unlock(&master->io_mutex); 2846 mutex_unlock(&master->bus_lock_mutex); 2847 2848 if (master->auto_runtime_pm) 2849 pm_runtime_put(master->dev.parent); 2850 2851 return ret; 2852 } 2853 EXPORT_SYMBOL_GPL(spi_flash_read); 2854 2855 /*-------------------------------------------------------------------------*/ 2856 2857 /* Utility methods for SPI master protocol drivers, layered on 2858 * top of the core. Some other utility methods are defined as 2859 * inline functions. 2860 */ 2861 2862 static void spi_complete(void *arg) 2863 { 2864 complete(arg); 2865 } 2866 2867 static int __spi_sync(struct spi_device *spi, struct spi_message *message) 2868 { 2869 DECLARE_COMPLETION_ONSTACK(done); 2870 int status; 2871 struct spi_master *master = spi->master; 2872 unsigned long flags; 2873 2874 status = __spi_validate(spi, message); 2875 if (status != 0) 2876 return status; 2877 2878 message->complete = spi_complete; 2879 message->context = &done; 2880 message->spi = spi; 2881 2882 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync); 2883 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync); 2884 2885 /* If we're not using the legacy transfer method then we will 2886 * try to transfer in the calling context so special case. 2887 * This code would be less tricky if we could remove the 2888 * support for driver implemented message queues. 2889 */ 2890 if (master->transfer == spi_queued_transfer) { 2891 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2892 2893 trace_spi_message_submit(message); 2894 2895 status = __spi_queued_transfer(spi, message, false); 2896 2897 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 2898 } else { 2899 status = spi_async_locked(spi, message); 2900 } 2901 2902 if (status == 0) { 2903 /* Push out the messages in the calling context if we 2904 * can. 2905 */ 2906 if (master->transfer == spi_queued_transfer) { 2907 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, 2908 spi_sync_immediate); 2909 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2910 spi_sync_immediate); 2911 __spi_pump_messages(master, false); 2912 } 2913 2914 wait_for_completion(&done); 2915 status = message->status; 2916 } 2917 message->context = NULL; 2918 return status; 2919 } 2920 2921 /** 2922 * spi_sync - blocking/synchronous SPI data transfers 2923 * @spi: device with which data will be exchanged 2924 * @message: describes the data transfers 2925 * Context: can sleep 2926 * 2927 * This call may only be used from a context that may sleep. The sleep 2928 * is non-interruptible, and has no timeout. Low-overhead controller 2929 * drivers may DMA directly into and out of the message buffers. 2930 * 2931 * Note that the SPI device's chip select is active during the message, 2932 * and then is normally disabled between messages. Drivers for some 2933 * frequently-used devices may want to minimize costs of selecting a chip, 2934 * by leaving it selected in anticipation that the next message will go 2935 * to the same chip. (That may increase power usage.) 2936 * 2937 * Also, the caller is guaranteeing that the memory associated with the 2938 * message will not be freed before this call returns. 2939 * 2940 * Return: zero on success, else a negative error code. 2941 */ 2942 int spi_sync(struct spi_device *spi, struct spi_message *message) 2943 { 2944 int ret; 2945 2946 mutex_lock(&spi->master->bus_lock_mutex); 2947 ret = __spi_sync(spi, message); 2948 mutex_unlock(&spi->master->bus_lock_mutex); 2949 2950 return ret; 2951 } 2952 EXPORT_SYMBOL_GPL(spi_sync); 2953 2954 /** 2955 * spi_sync_locked - version of spi_sync with exclusive bus usage 2956 * @spi: device with which data will be exchanged 2957 * @message: describes the data transfers 2958 * Context: can sleep 2959 * 2960 * This call may only be used from a context that may sleep. The sleep 2961 * is non-interruptible, and has no timeout. Low-overhead controller 2962 * drivers may DMA directly into and out of the message buffers. 2963 * 2964 * This call should be used by drivers that require exclusive access to the 2965 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must 2966 * be released by a spi_bus_unlock call when the exclusive access is over. 2967 * 2968 * Return: zero on success, else a negative error code. 2969 */ 2970 int spi_sync_locked(struct spi_device *spi, struct spi_message *message) 2971 { 2972 return __spi_sync(spi, message); 2973 } 2974 EXPORT_SYMBOL_GPL(spi_sync_locked); 2975 2976 /** 2977 * spi_bus_lock - obtain a lock for exclusive SPI bus usage 2978 * @master: SPI bus master that should be locked for exclusive bus access 2979 * Context: can sleep 2980 * 2981 * This call may only be used from a context that may sleep. The sleep 2982 * is non-interruptible, and has no timeout. 2983 * 2984 * This call should be used by drivers that require exclusive access to the 2985 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the 2986 * exclusive access is over. Data transfer must be done by spi_sync_locked 2987 * and spi_async_locked calls when the SPI bus lock is held. 2988 * 2989 * Return: always zero. 2990 */ 2991 int spi_bus_lock(struct spi_master *master) 2992 { 2993 unsigned long flags; 2994 2995 mutex_lock(&master->bus_lock_mutex); 2996 2997 spin_lock_irqsave(&master->bus_lock_spinlock, flags); 2998 master->bus_lock_flag = 1; 2999 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); 3000 3001 /* mutex remains locked until spi_bus_unlock is called */ 3002 3003 return 0; 3004 } 3005 EXPORT_SYMBOL_GPL(spi_bus_lock); 3006 3007 /** 3008 * spi_bus_unlock - release the lock for exclusive SPI bus usage 3009 * @master: SPI bus master that was locked for exclusive bus access 3010 * Context: can sleep 3011 * 3012 * This call may only be used from a context that may sleep. The sleep 3013 * is non-interruptible, and has no timeout. 3014 * 3015 * This call releases an SPI bus lock previously obtained by an spi_bus_lock 3016 * call. 3017 * 3018 * Return: always zero. 3019 */ 3020 int spi_bus_unlock(struct spi_master *master) 3021 { 3022 master->bus_lock_flag = 0; 3023 3024 mutex_unlock(&master->bus_lock_mutex); 3025 3026 return 0; 3027 } 3028 EXPORT_SYMBOL_GPL(spi_bus_unlock); 3029 3030 /* portable code must never pass more than 32 bytes */ 3031 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) 3032 3033 static u8 *buf; 3034 3035 /** 3036 * spi_write_then_read - SPI synchronous write followed by read 3037 * @spi: device with which data will be exchanged 3038 * @txbuf: data to be written (need not be dma-safe) 3039 * @n_tx: size of txbuf, in bytes 3040 * @rxbuf: buffer into which data will be read (need not be dma-safe) 3041 * @n_rx: size of rxbuf, in bytes 3042 * Context: can sleep 3043 * 3044 * This performs a half duplex MicroWire style transaction with the 3045 * device, sending txbuf and then reading rxbuf. The return value 3046 * is zero for success, else a negative errno status code. 3047 * This call may only be used from a context that may sleep. 3048 * 3049 * Parameters to this routine are always copied using a small buffer; 3050 * portable code should never use this for more than 32 bytes. 3051 * Performance-sensitive or bulk transfer code should instead use 3052 * spi_{async,sync}() calls with dma-safe buffers. 3053 * 3054 * Return: zero on success, else a negative error code. 3055 */ 3056 int spi_write_then_read(struct spi_device *spi, 3057 const void *txbuf, unsigned n_tx, 3058 void *rxbuf, unsigned n_rx) 3059 { 3060 static DEFINE_MUTEX(lock); 3061 3062 int status; 3063 struct spi_message message; 3064 struct spi_transfer x[2]; 3065 u8 *local_buf; 3066 3067 /* Use preallocated DMA-safe buffer if we can. We can't avoid 3068 * copying here, (as a pure convenience thing), but we can 3069 * keep heap costs out of the hot path unless someone else is 3070 * using the pre-allocated buffer or the transfer is too large. 3071 */ 3072 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { 3073 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), 3074 GFP_KERNEL | GFP_DMA); 3075 if (!local_buf) 3076 return -ENOMEM; 3077 } else { 3078 local_buf = buf; 3079 } 3080 3081 spi_message_init(&message); 3082 memset(x, 0, sizeof(x)); 3083 if (n_tx) { 3084 x[0].len = n_tx; 3085 spi_message_add_tail(&x[0], &message); 3086 } 3087 if (n_rx) { 3088 x[1].len = n_rx; 3089 spi_message_add_tail(&x[1], &message); 3090 } 3091 3092 memcpy(local_buf, txbuf, n_tx); 3093 x[0].tx_buf = local_buf; 3094 x[1].rx_buf = local_buf + n_tx; 3095 3096 /* do the i/o */ 3097 status = spi_sync(spi, &message); 3098 if (status == 0) 3099 memcpy(rxbuf, x[1].rx_buf, n_rx); 3100 3101 if (x[0].tx_buf == buf) 3102 mutex_unlock(&lock); 3103 else 3104 kfree(local_buf); 3105 3106 return status; 3107 } 3108 EXPORT_SYMBOL_GPL(spi_write_then_read); 3109 3110 /*-------------------------------------------------------------------------*/ 3111 3112 #if IS_ENABLED(CONFIG_OF_DYNAMIC) 3113 static int __spi_of_device_match(struct device *dev, void *data) 3114 { 3115 return dev->of_node == data; 3116 } 3117 3118 /* must call put_device() when done with returned spi_device device */ 3119 static struct spi_device *of_find_spi_device_by_node(struct device_node *node) 3120 { 3121 struct device *dev = bus_find_device(&spi_bus_type, NULL, node, 3122 __spi_of_device_match); 3123 return dev ? to_spi_device(dev) : NULL; 3124 } 3125 3126 static int __spi_of_master_match(struct device *dev, const void *data) 3127 { 3128 return dev->of_node == data; 3129 } 3130 3131 /* the spi masters are not using spi_bus, so we find it with another way */ 3132 static struct spi_master *of_find_spi_master_by_node(struct device_node *node) 3133 { 3134 struct device *dev; 3135 3136 dev = class_find_device(&spi_master_class, NULL, node, 3137 __spi_of_master_match); 3138 if (!dev) 3139 return NULL; 3140 3141 /* reference got in class_find_device */ 3142 return container_of(dev, struct spi_master, dev); 3143 } 3144 3145 static int of_spi_notify(struct notifier_block *nb, unsigned long action, 3146 void *arg) 3147 { 3148 struct of_reconfig_data *rd = arg; 3149 struct spi_master *master; 3150 struct spi_device *spi; 3151 3152 switch (of_reconfig_get_state_change(action, arg)) { 3153 case OF_RECONFIG_CHANGE_ADD: 3154 master = of_find_spi_master_by_node(rd->dn->parent); 3155 if (master == NULL) 3156 return NOTIFY_OK; /* not for us */ 3157 3158 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { 3159 put_device(&master->dev); 3160 return NOTIFY_OK; 3161 } 3162 3163 spi = of_register_spi_device(master, rd->dn); 3164 put_device(&master->dev); 3165 3166 if (IS_ERR(spi)) { 3167 pr_err("%s: failed to create for '%s'\n", 3168 __func__, rd->dn->full_name); 3169 of_node_clear_flag(rd->dn, OF_POPULATED); 3170 return notifier_from_errno(PTR_ERR(spi)); 3171 } 3172 break; 3173 3174 case OF_RECONFIG_CHANGE_REMOVE: 3175 /* already depopulated? */ 3176 if (!of_node_check_flag(rd->dn, OF_POPULATED)) 3177 return NOTIFY_OK; 3178 3179 /* find our device by node */ 3180 spi = of_find_spi_device_by_node(rd->dn); 3181 if (spi == NULL) 3182 return NOTIFY_OK; /* no? not meant for us */ 3183 3184 /* unregister takes one ref away */ 3185 spi_unregister_device(spi); 3186 3187 /* and put the reference of the find */ 3188 put_device(&spi->dev); 3189 break; 3190 } 3191 3192 return NOTIFY_OK; 3193 } 3194 3195 static struct notifier_block spi_of_notifier = { 3196 .notifier_call = of_spi_notify, 3197 }; 3198 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3199 extern struct notifier_block spi_of_notifier; 3200 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ 3201 3202 #if IS_ENABLED(CONFIG_ACPI) 3203 static int spi_acpi_master_match(struct device *dev, const void *data) 3204 { 3205 return ACPI_COMPANION(dev->parent) == data; 3206 } 3207 3208 static int spi_acpi_device_match(struct device *dev, void *data) 3209 { 3210 return ACPI_COMPANION(dev) == data; 3211 } 3212 3213 static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev) 3214 { 3215 struct device *dev; 3216 3217 dev = class_find_device(&spi_master_class, NULL, adev, 3218 spi_acpi_master_match); 3219 if (!dev) 3220 return NULL; 3221 3222 return container_of(dev, struct spi_master, dev); 3223 } 3224 3225 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) 3226 { 3227 struct device *dev; 3228 3229 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match); 3230 3231 return dev ? to_spi_device(dev) : NULL; 3232 } 3233 3234 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, 3235 void *arg) 3236 { 3237 struct acpi_device *adev = arg; 3238 struct spi_master *master; 3239 struct spi_device *spi; 3240 3241 switch (value) { 3242 case ACPI_RECONFIG_DEVICE_ADD: 3243 master = acpi_spi_find_master_by_adev(adev->parent); 3244 if (!master) 3245 break; 3246 3247 acpi_register_spi_device(master, adev); 3248 put_device(&master->dev); 3249 break; 3250 case ACPI_RECONFIG_DEVICE_REMOVE: 3251 if (!acpi_device_enumerated(adev)) 3252 break; 3253 3254 spi = acpi_spi_find_device_by_adev(adev); 3255 if (!spi) 3256 break; 3257 3258 spi_unregister_device(spi); 3259 put_device(&spi->dev); 3260 break; 3261 } 3262 3263 return NOTIFY_OK; 3264 } 3265 3266 static struct notifier_block spi_acpi_notifier = { 3267 .notifier_call = acpi_spi_notify, 3268 }; 3269 #else 3270 extern struct notifier_block spi_acpi_notifier; 3271 #endif 3272 3273 static int __init spi_init(void) 3274 { 3275 int status; 3276 3277 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); 3278 if (!buf) { 3279 status = -ENOMEM; 3280 goto err0; 3281 } 3282 3283 status = bus_register(&spi_bus_type); 3284 if (status < 0) 3285 goto err1; 3286 3287 status = class_register(&spi_master_class); 3288 if (status < 0) 3289 goto err2; 3290 3291 if (IS_ENABLED(CONFIG_OF_DYNAMIC)) 3292 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); 3293 if (IS_ENABLED(CONFIG_ACPI)) 3294 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); 3295 3296 return 0; 3297 3298 err2: 3299 bus_unregister(&spi_bus_type); 3300 err1: 3301 kfree(buf); 3302 buf = NULL; 3303 err0: 3304 return status; 3305 } 3306 3307 /* board_info is normally registered in arch_initcall(), 3308 * but even essential drivers wait till later 3309 * 3310 * REVISIT only boardinfo really needs static linking. the rest (device and 3311 * driver registration) _could_ be dynamically linked (modular) ... costs 3312 * include needing to have boardinfo data structures be much more public. 3313 */ 3314 postcore_initcall(spi_init); 3315 3316