1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/spi.h>
35 #include <linux/spi/spi-mem.h>
36 #include <uapi/linux/sched/types.h>
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/spi.h>
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42
43 #include "internals.h"
44
45 static DEFINE_IDR(spi_master_idr);
46
spidev_release(struct device * dev)47 static void spidev_release(struct device *dev)
48 {
49 struct spi_device *spi = to_spi_device(dev);
50
51 spi_controller_put(spi->controller);
52 kfree(spi->driver_override);
53 free_percpu(spi->pcpu_statistics);
54 kfree(spi);
55 }
56
57 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60 const struct spi_device *spi = to_spi_device(dev);
61 int len;
62
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 if (len != -ENODEV)
65 return len;
66
67 return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)71 static ssize_t driver_override_store(struct device *dev,
72 struct device_attribute *a,
73 const char *buf, size_t count)
74 {
75 struct spi_device *spi = to_spi_device(dev);
76 int ret;
77
78 ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 if (ret)
80 return ret;
81
82 return count;
83 }
84
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)85 static ssize_t driver_override_show(struct device *dev,
86 struct device_attribute *a, char *buf)
87 {
88 const struct spi_device *spi = to_spi_device(dev);
89 ssize_t len;
90
91 device_lock(dev);
92 len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93 device_unlock(dev);
94 return len;
95 }
96 static DEVICE_ATTR_RW(driver_override);
97
spi_alloc_pcpu_stats(struct device * dev)98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99 {
100 struct spi_statistics __percpu *pcpu_stats;
101
102 if (dev)
103 pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 else
105 pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106
107 if (pcpu_stats) {
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct spi_statistics *stat;
112
113 stat = per_cpu_ptr(pcpu_stats, cpu);
114 u64_stats_init(&stat->syncp);
115 }
116 }
117 return pcpu_stats;
118 }
119
spi_emit_pcpu_stats(struct spi_statistics __percpu * stat,char * buf,size_t offset)120 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121 char *buf, size_t offset)
122 {
123 u64 val = 0;
124 int i;
125
126 for_each_possible_cpu(i) {
127 const struct spi_statistics *pcpu_stats;
128 u64_stats_t *field;
129 unsigned int start;
130 u64 inc;
131
132 pcpu_stats = per_cpu_ptr(stat, i);
133 field = (void *)pcpu_stats + offset;
134 do {
135 start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136 inc = u64_stats_read(field);
137 } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138 val += inc;
139 }
140 return sysfs_emit(buf, "%llu\n", val);
141 }
142
143 #define SPI_STATISTICS_ATTRS(field, file) \
144 static ssize_t spi_controller_##field##_show(struct device *dev, \
145 struct device_attribute *attr, \
146 char *buf) \
147 { \
148 struct spi_controller *ctlr = container_of(dev, \
149 struct spi_controller, dev); \
150 return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151 } \
152 static struct device_attribute dev_attr_spi_controller_##field = { \
153 .attr = { .name = file, .mode = 0444 }, \
154 .show = spi_controller_##field##_show, \
155 }; \
156 static ssize_t spi_device_##field##_show(struct device *dev, \
157 struct device_attribute *attr, \
158 char *buf) \
159 { \
160 struct spi_device *spi = to_spi_device(dev); \
161 return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162 } \
163 static struct device_attribute dev_attr_spi_device_##field = { \
164 .attr = { .name = file, .mode = 0444 }, \
165 .show = spi_device_##field##_show, \
166 }
167
168 #define SPI_STATISTICS_SHOW_NAME(name, file, field) \
169 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170 char *buf) \
171 { \
172 return spi_emit_pcpu_stats(stat, buf, \
173 offsetof(struct spi_statistics, field)); \
174 } \
175 SPI_STATISTICS_ATTRS(name, file)
176
177 #define SPI_STATISTICS_SHOW(field) \
178 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
179 field)
180
181 SPI_STATISTICS_SHOW(messages);
182 SPI_STATISTICS_SHOW(transfers);
183 SPI_STATISTICS_SHOW(errors);
184 SPI_STATISTICS_SHOW(timedout);
185
186 SPI_STATISTICS_SHOW(spi_sync);
187 SPI_STATISTICS_SHOW(spi_sync_immediate);
188 SPI_STATISTICS_SHOW(spi_async);
189
190 SPI_STATISTICS_SHOW(bytes);
191 SPI_STATISTICS_SHOW(bytes_rx);
192 SPI_STATISTICS_SHOW(bytes_tx);
193
194 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
195 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
196 "transfer_bytes_histo_" number, \
197 transfer_bytes_histo[index])
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215
216 SPI_STATISTICS_SHOW(transfers_split_maxsize);
217
218 static struct attribute *spi_dev_attrs[] = {
219 &dev_attr_modalias.attr,
220 &dev_attr_driver_override.attr,
221 NULL,
222 };
223
224 static const struct attribute_group spi_dev_group = {
225 .attrs = spi_dev_attrs,
226 };
227
228 static struct attribute *spi_device_statistics_attrs[] = {
229 &dev_attr_spi_device_messages.attr,
230 &dev_attr_spi_device_transfers.attr,
231 &dev_attr_spi_device_errors.attr,
232 &dev_attr_spi_device_timedout.attr,
233 &dev_attr_spi_device_spi_sync.attr,
234 &dev_attr_spi_device_spi_sync_immediate.attr,
235 &dev_attr_spi_device_spi_async.attr,
236 &dev_attr_spi_device_bytes.attr,
237 &dev_attr_spi_device_bytes_rx.attr,
238 &dev_attr_spi_device_bytes_tx.attr,
239 &dev_attr_spi_device_transfer_bytes_histo0.attr,
240 &dev_attr_spi_device_transfer_bytes_histo1.attr,
241 &dev_attr_spi_device_transfer_bytes_histo2.attr,
242 &dev_attr_spi_device_transfer_bytes_histo3.attr,
243 &dev_attr_spi_device_transfer_bytes_histo4.attr,
244 &dev_attr_spi_device_transfer_bytes_histo5.attr,
245 &dev_attr_spi_device_transfer_bytes_histo6.attr,
246 &dev_attr_spi_device_transfer_bytes_histo7.attr,
247 &dev_attr_spi_device_transfer_bytes_histo8.attr,
248 &dev_attr_spi_device_transfer_bytes_histo9.attr,
249 &dev_attr_spi_device_transfer_bytes_histo10.attr,
250 &dev_attr_spi_device_transfer_bytes_histo11.attr,
251 &dev_attr_spi_device_transfer_bytes_histo12.attr,
252 &dev_attr_spi_device_transfer_bytes_histo13.attr,
253 &dev_attr_spi_device_transfer_bytes_histo14.attr,
254 &dev_attr_spi_device_transfer_bytes_histo15.attr,
255 &dev_attr_spi_device_transfer_bytes_histo16.attr,
256 &dev_attr_spi_device_transfers_split_maxsize.attr,
257 NULL,
258 };
259
260 static const struct attribute_group spi_device_statistics_group = {
261 .name = "statistics",
262 .attrs = spi_device_statistics_attrs,
263 };
264
265 static const struct attribute_group *spi_dev_groups[] = {
266 &spi_dev_group,
267 &spi_device_statistics_group,
268 NULL,
269 };
270
271 static struct attribute *spi_controller_statistics_attrs[] = {
272 &dev_attr_spi_controller_messages.attr,
273 &dev_attr_spi_controller_transfers.attr,
274 &dev_attr_spi_controller_errors.attr,
275 &dev_attr_spi_controller_timedout.attr,
276 &dev_attr_spi_controller_spi_sync.attr,
277 &dev_attr_spi_controller_spi_sync_immediate.attr,
278 &dev_attr_spi_controller_spi_async.attr,
279 &dev_attr_spi_controller_bytes.attr,
280 &dev_attr_spi_controller_bytes_rx.attr,
281 &dev_attr_spi_controller_bytes_tx.attr,
282 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
283 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
284 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
285 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
286 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
287 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
288 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
289 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
290 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
291 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
292 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
293 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
294 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
295 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
296 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
297 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
298 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
299 &dev_attr_spi_controller_transfers_split_maxsize.attr,
300 NULL,
301 };
302
303 static const struct attribute_group spi_controller_statistics_group = {
304 .name = "statistics",
305 .attrs = spi_controller_statistics_attrs,
306 };
307
308 static const struct attribute_group *spi_master_groups[] = {
309 &spi_controller_statistics_group,
310 NULL,
311 };
312
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_controller * ctlr)313 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314 struct spi_transfer *xfer,
315 struct spi_controller *ctlr)
316 {
317 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318 struct spi_statistics *stats;
319
320 if (l2len < 0)
321 l2len = 0;
322
323 get_cpu();
324 stats = this_cpu_ptr(pcpu_stats);
325 u64_stats_update_begin(&stats->syncp);
326
327 u64_stats_inc(&stats->transfers);
328 u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329
330 u64_stats_add(&stats->bytes, xfer->len);
331 if ((xfer->tx_buf) &&
332 (xfer->tx_buf != ctlr->dummy_tx))
333 u64_stats_add(&stats->bytes_tx, xfer->len);
334 if ((xfer->rx_buf) &&
335 (xfer->rx_buf != ctlr->dummy_rx))
336 u64_stats_add(&stats->bytes_rx, xfer->len);
337
338 u64_stats_update_end(&stats->syncp);
339 put_cpu();
340 }
341
342 /*
343 * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
344 * and the sysfs version makes coldplug work too.
345 */
spi_match_id(const struct spi_device_id * id,const char * name)346 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
347 {
348 while (id->name[0]) {
349 if (!strcmp(name, id->name))
350 return id;
351 id++;
352 }
353 return NULL;
354 }
355
spi_get_device_id(const struct spi_device * sdev)356 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
357 {
358 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
359
360 return spi_match_id(sdrv->id_table, sdev->modalias);
361 }
362 EXPORT_SYMBOL_GPL(spi_get_device_id);
363
spi_get_device_match_data(const struct spi_device * sdev)364 const void *spi_get_device_match_data(const struct spi_device *sdev)
365 {
366 const void *match;
367
368 match = device_get_match_data(&sdev->dev);
369 if (match)
370 return match;
371
372 return (const void *)spi_get_device_id(sdev)->driver_data;
373 }
374 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
375
spi_match_device(struct device * dev,struct device_driver * drv)376 static int spi_match_device(struct device *dev, struct device_driver *drv)
377 {
378 const struct spi_device *spi = to_spi_device(dev);
379 const struct spi_driver *sdrv = to_spi_driver(drv);
380
381 /* Check override first, and if set, only use the named driver */
382 if (spi->driver_override)
383 return strcmp(spi->driver_override, drv->name) == 0;
384
385 /* Attempt an OF style match */
386 if (of_driver_match_device(dev, drv))
387 return 1;
388
389 /* Then try ACPI */
390 if (acpi_driver_match_device(dev, drv))
391 return 1;
392
393 if (sdrv->id_table)
394 return !!spi_match_id(sdrv->id_table, spi->modalias);
395
396 return strcmp(spi->modalias, drv->name) == 0;
397 }
398
spi_uevent(const struct device * dev,struct kobj_uevent_env * env)399 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
400 {
401 const struct spi_device *spi = to_spi_device(dev);
402 int rc;
403
404 rc = acpi_device_uevent_modalias(dev, env);
405 if (rc != -ENODEV)
406 return rc;
407
408 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
409 }
410
spi_probe(struct device * dev)411 static int spi_probe(struct device *dev)
412 {
413 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
414 struct spi_device *spi = to_spi_device(dev);
415 int ret;
416
417 ret = of_clk_set_defaults(dev->of_node, false);
418 if (ret)
419 return ret;
420
421 if (dev->of_node) {
422 spi->irq = of_irq_get(dev->of_node, 0);
423 if (spi->irq == -EPROBE_DEFER)
424 return -EPROBE_DEFER;
425 if (spi->irq < 0)
426 spi->irq = 0;
427 }
428
429 if (has_acpi_companion(dev) && spi->irq < 0) {
430 struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
431
432 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
433 if (spi->irq == -EPROBE_DEFER)
434 return -EPROBE_DEFER;
435 if (spi->irq < 0)
436 spi->irq = 0;
437 }
438
439 ret = dev_pm_domain_attach(dev, true);
440 if (ret)
441 return ret;
442
443 if (sdrv->probe) {
444 ret = sdrv->probe(spi);
445 if (ret)
446 dev_pm_domain_detach(dev, true);
447 }
448
449 return ret;
450 }
451
spi_remove(struct device * dev)452 static void spi_remove(struct device *dev)
453 {
454 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
455
456 if (sdrv->remove)
457 sdrv->remove(to_spi_device(dev));
458
459 dev_pm_domain_detach(dev, true);
460 }
461
spi_shutdown(struct device * dev)462 static void spi_shutdown(struct device *dev)
463 {
464 if (dev->driver) {
465 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
466
467 if (sdrv->shutdown)
468 sdrv->shutdown(to_spi_device(dev));
469 }
470 }
471
472 struct bus_type spi_bus_type = {
473 .name = "spi",
474 .dev_groups = spi_dev_groups,
475 .match = spi_match_device,
476 .uevent = spi_uevent,
477 .probe = spi_probe,
478 .remove = spi_remove,
479 .shutdown = spi_shutdown,
480 };
481 EXPORT_SYMBOL_GPL(spi_bus_type);
482
483 /**
484 * __spi_register_driver - register a SPI driver
485 * @owner: owner module of the driver to register
486 * @sdrv: the driver to register
487 * Context: can sleep
488 *
489 * Return: zero on success, else a negative error code.
490 */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)491 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
492 {
493 sdrv->driver.owner = owner;
494 sdrv->driver.bus = &spi_bus_type;
495
496 /*
497 * For Really Good Reasons we use spi: modaliases not of:
498 * modaliases for DT so module autoloading won't work if we
499 * don't have a spi_device_id as well as a compatible string.
500 */
501 if (sdrv->driver.of_match_table) {
502 const struct of_device_id *of_id;
503
504 for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
505 of_id++) {
506 const char *of_name;
507
508 /* Strip off any vendor prefix */
509 of_name = strnchr(of_id->compatible,
510 sizeof(of_id->compatible), ',');
511 if (of_name)
512 of_name++;
513 else
514 of_name = of_id->compatible;
515
516 if (sdrv->id_table) {
517 const struct spi_device_id *spi_id;
518
519 spi_id = spi_match_id(sdrv->id_table, of_name);
520 if (spi_id)
521 continue;
522 } else {
523 if (strcmp(sdrv->driver.name, of_name) == 0)
524 continue;
525 }
526
527 pr_warn("SPI driver %s has no spi_device_id for %s\n",
528 sdrv->driver.name, of_id->compatible);
529 }
530 }
531
532 return driver_register(&sdrv->driver);
533 }
534 EXPORT_SYMBOL_GPL(__spi_register_driver);
535
536 /*-------------------------------------------------------------------------*/
537
538 /*
539 * SPI devices should normally not be created by SPI device drivers; that
540 * would make them board-specific. Similarly with SPI controller drivers.
541 * Device registration normally goes into like arch/.../mach.../board-YYY.c
542 * with other readonly (flashable) information about mainboard devices.
543 */
544
545 struct boardinfo {
546 struct list_head list;
547 struct spi_board_info board_info;
548 };
549
550 static LIST_HEAD(board_list);
551 static LIST_HEAD(spi_controller_list);
552
553 /*
554 * Used to protect add/del operation for board_info list and
555 * spi_controller list, and their matching process also used
556 * to protect object of type struct idr.
557 */
558 static DEFINE_MUTEX(board_lock);
559
560 /**
561 * spi_alloc_device - Allocate a new SPI device
562 * @ctlr: Controller to which device is connected
563 * Context: can sleep
564 *
565 * Allows a driver to allocate and initialize a spi_device without
566 * registering it immediately. This allows a driver to directly
567 * fill the spi_device with device parameters before calling
568 * spi_add_device() on it.
569 *
570 * Caller is responsible to call spi_add_device() on the returned
571 * spi_device structure to add it to the SPI controller. If the caller
572 * needs to discard the spi_device without adding it, then it should
573 * call spi_dev_put() on it.
574 *
575 * Return: a pointer to the new device, or NULL.
576 */
spi_alloc_device(struct spi_controller * ctlr)577 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
578 {
579 struct spi_device *spi;
580
581 if (!spi_controller_get(ctlr))
582 return NULL;
583
584 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
585 if (!spi) {
586 spi_controller_put(ctlr);
587 return NULL;
588 }
589
590 spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
591 if (!spi->pcpu_statistics) {
592 kfree(spi);
593 spi_controller_put(ctlr);
594 return NULL;
595 }
596
597 spi->master = spi->controller = ctlr;
598 spi->dev.parent = &ctlr->dev;
599 spi->dev.bus = &spi_bus_type;
600 spi->dev.release = spidev_release;
601 spi->mode = ctlr->buswidth_override_bits;
602
603 device_initialize(&spi->dev);
604 return spi;
605 }
606 EXPORT_SYMBOL_GPL(spi_alloc_device);
607
spi_dev_set_name(struct spi_device * spi)608 static void spi_dev_set_name(struct spi_device *spi)
609 {
610 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
611
612 if (adev) {
613 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
614 return;
615 }
616
617 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
618 spi_get_chipselect(spi, 0));
619 }
620
spi_dev_check(struct device * dev,void * data)621 static int spi_dev_check(struct device *dev, void *data)
622 {
623 struct spi_device *spi = to_spi_device(dev);
624 struct spi_device *new_spi = data;
625
626 if (spi->controller == new_spi->controller &&
627 spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
628 return -EBUSY;
629 return 0;
630 }
631
spi_cleanup(struct spi_device * spi)632 static void spi_cleanup(struct spi_device *spi)
633 {
634 if (spi->controller->cleanup)
635 spi->controller->cleanup(spi);
636 }
637
__spi_add_device(struct spi_device * spi)638 static int __spi_add_device(struct spi_device *spi)
639 {
640 struct spi_controller *ctlr = spi->controller;
641 struct device *dev = ctlr->dev.parent;
642 int status;
643
644 /* Chipselects are numbered 0..max; validate. */
645 if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
646 dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
647 ctlr->num_chipselect);
648 return -EINVAL;
649 }
650
651 /* Set the bus ID string */
652 spi_dev_set_name(spi);
653
654 /*
655 * We need to make sure there's no other device with this
656 * chipselect **BEFORE** we call setup(), else we'll trash
657 * its configuration.
658 */
659 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
660 if (status) {
661 dev_err(dev, "chipselect %d already in use\n",
662 spi_get_chipselect(spi, 0));
663 return status;
664 }
665
666 /* Controller may unregister concurrently */
667 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
668 !device_is_registered(&ctlr->dev)) {
669 return -ENODEV;
670 }
671
672 if (ctlr->cs_gpiods)
673 spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
674
675 /*
676 * Drivers may modify this initial i/o setup, but will
677 * normally rely on the device being setup. Devices
678 * using SPI_CS_HIGH can't coexist well otherwise...
679 */
680 status = spi_setup(spi);
681 if (status < 0) {
682 dev_err(dev, "can't setup %s, status %d\n",
683 dev_name(&spi->dev), status);
684 return status;
685 }
686
687 /* Device may be bound to an active driver when this returns */
688 status = device_add(&spi->dev);
689 if (status < 0) {
690 dev_err(dev, "can't add %s, status %d\n",
691 dev_name(&spi->dev), status);
692 spi_cleanup(spi);
693 } else {
694 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
695 }
696
697 return status;
698 }
699
700 /**
701 * spi_add_device - Add spi_device allocated with spi_alloc_device
702 * @spi: spi_device to register
703 *
704 * Companion function to spi_alloc_device. Devices allocated with
705 * spi_alloc_device can be added onto the SPI bus with this function.
706 *
707 * Return: 0 on success; negative errno on failure
708 */
spi_add_device(struct spi_device * spi)709 int spi_add_device(struct spi_device *spi)
710 {
711 struct spi_controller *ctlr = spi->controller;
712 int status;
713
714 mutex_lock(&ctlr->add_lock);
715 status = __spi_add_device(spi);
716 mutex_unlock(&ctlr->add_lock);
717 return status;
718 }
719 EXPORT_SYMBOL_GPL(spi_add_device);
720
721 /**
722 * spi_new_device - instantiate one new SPI device
723 * @ctlr: Controller to which device is connected
724 * @chip: Describes the SPI device
725 * Context: can sleep
726 *
727 * On typical mainboards, this is purely internal; and it's not needed
728 * after board init creates the hard-wired devices. Some development
729 * platforms may not be able to use spi_register_board_info though, and
730 * this is exported so that for example a USB or parport based adapter
731 * driver could add devices (which it would learn about out-of-band).
732 *
733 * Return: the new device, or NULL.
734 */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)735 struct spi_device *spi_new_device(struct spi_controller *ctlr,
736 struct spi_board_info *chip)
737 {
738 struct spi_device *proxy;
739 int status;
740
741 /*
742 * NOTE: caller did any chip->bus_num checks necessary.
743 *
744 * Also, unless we change the return value convention to use
745 * error-or-pointer (not NULL-or-pointer), troubleshootability
746 * suggests syslogged diagnostics are best here (ugh).
747 */
748
749 proxy = spi_alloc_device(ctlr);
750 if (!proxy)
751 return NULL;
752
753 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
754
755 spi_set_chipselect(proxy, 0, chip->chip_select);
756 proxy->max_speed_hz = chip->max_speed_hz;
757 proxy->mode = chip->mode;
758 proxy->irq = chip->irq;
759 strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
760 proxy->dev.platform_data = (void *) chip->platform_data;
761 proxy->controller_data = chip->controller_data;
762 proxy->controller_state = NULL;
763
764 if (chip->swnode) {
765 status = device_add_software_node(&proxy->dev, chip->swnode);
766 if (status) {
767 dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
768 chip->modalias, status);
769 goto err_dev_put;
770 }
771 }
772
773 status = spi_add_device(proxy);
774 if (status < 0)
775 goto err_dev_put;
776
777 return proxy;
778
779 err_dev_put:
780 device_remove_software_node(&proxy->dev);
781 spi_dev_put(proxy);
782 return NULL;
783 }
784 EXPORT_SYMBOL_GPL(spi_new_device);
785
786 /**
787 * spi_unregister_device - unregister a single SPI device
788 * @spi: spi_device to unregister
789 *
790 * Start making the passed SPI device vanish. Normally this would be handled
791 * by spi_unregister_controller().
792 */
spi_unregister_device(struct spi_device * spi)793 void spi_unregister_device(struct spi_device *spi)
794 {
795 if (!spi)
796 return;
797
798 if (spi->dev.of_node) {
799 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
800 of_node_put(spi->dev.of_node);
801 }
802 if (ACPI_COMPANION(&spi->dev))
803 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
804 device_remove_software_node(&spi->dev);
805 device_del(&spi->dev);
806 spi_cleanup(spi);
807 put_device(&spi->dev);
808 }
809 EXPORT_SYMBOL_GPL(spi_unregister_device);
810
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)811 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
812 struct spi_board_info *bi)
813 {
814 struct spi_device *dev;
815
816 if (ctlr->bus_num != bi->bus_num)
817 return;
818
819 dev = spi_new_device(ctlr, bi);
820 if (!dev)
821 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
822 bi->modalias);
823 }
824
825 /**
826 * spi_register_board_info - register SPI devices for a given board
827 * @info: array of chip descriptors
828 * @n: how many descriptors are provided
829 * Context: can sleep
830 *
831 * Board-specific early init code calls this (probably during arch_initcall)
832 * with segments of the SPI device table. Any device nodes are created later,
833 * after the relevant parent SPI controller (bus_num) is defined. We keep
834 * this table of devices forever, so that reloading a controller driver will
835 * not make Linux forget about these hard-wired devices.
836 *
837 * Other code can also call this, e.g. a particular add-on board might provide
838 * SPI devices through its expansion connector, so code initializing that board
839 * would naturally declare its SPI devices.
840 *
841 * The board info passed can safely be __initdata ... but be careful of
842 * any embedded pointers (platform_data, etc), they're copied as-is.
843 *
844 * Return: zero on success, else a negative error code.
845 */
spi_register_board_info(struct spi_board_info const * info,unsigned n)846 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
847 {
848 struct boardinfo *bi;
849 int i;
850
851 if (!n)
852 return 0;
853
854 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
855 if (!bi)
856 return -ENOMEM;
857
858 for (i = 0; i < n; i++, bi++, info++) {
859 struct spi_controller *ctlr;
860
861 memcpy(&bi->board_info, info, sizeof(*info));
862
863 mutex_lock(&board_lock);
864 list_add_tail(&bi->list, &board_list);
865 list_for_each_entry(ctlr, &spi_controller_list, list)
866 spi_match_controller_to_boardinfo(ctlr,
867 &bi->board_info);
868 mutex_unlock(&board_lock);
869 }
870
871 return 0;
872 }
873
874 /*-------------------------------------------------------------------------*/
875
876 /* Core methods for SPI resource management */
877
878 /**
879 * spi_res_alloc - allocate a spi resource that is life-cycle managed
880 * during the processing of a spi_message while using
881 * spi_transfer_one
882 * @spi: the SPI device for which we allocate memory
883 * @release: the release code to execute for this resource
884 * @size: size to alloc and return
885 * @gfp: GFP allocation flags
886 *
887 * Return: the pointer to the allocated data
888 *
889 * This may get enhanced in the future to allocate from a memory pool
890 * of the @spi_device or @spi_controller to avoid repeated allocations.
891 */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)892 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
893 size_t size, gfp_t gfp)
894 {
895 struct spi_res *sres;
896
897 sres = kzalloc(sizeof(*sres) + size, gfp);
898 if (!sres)
899 return NULL;
900
901 INIT_LIST_HEAD(&sres->entry);
902 sres->release = release;
903
904 return sres->data;
905 }
906
907 /**
908 * spi_res_free - free an SPI resource
909 * @res: pointer to the custom data of a resource
910 */
spi_res_free(void * res)911 static void spi_res_free(void *res)
912 {
913 struct spi_res *sres = container_of(res, struct spi_res, data);
914
915 if (!res)
916 return;
917
918 WARN_ON(!list_empty(&sres->entry));
919 kfree(sres);
920 }
921
922 /**
923 * spi_res_add - add a spi_res to the spi_message
924 * @message: the SPI message
925 * @res: the spi_resource
926 */
spi_res_add(struct spi_message * message,void * res)927 static void spi_res_add(struct spi_message *message, void *res)
928 {
929 struct spi_res *sres = container_of(res, struct spi_res, data);
930
931 WARN_ON(!list_empty(&sres->entry));
932 list_add_tail(&sres->entry, &message->resources);
933 }
934
935 /**
936 * spi_res_release - release all SPI resources for this message
937 * @ctlr: the @spi_controller
938 * @message: the @spi_message
939 */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)940 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
941 {
942 struct spi_res *res, *tmp;
943
944 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
945 if (res->release)
946 res->release(ctlr, message, res->data);
947
948 list_del(&res->entry);
949
950 kfree(res);
951 }
952 }
953
954 /*-------------------------------------------------------------------------*/
955
spi_set_cs(struct spi_device * spi,bool enable,bool force)956 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
957 {
958 bool activate = enable;
959
960 /*
961 * Avoid calling into the driver (or doing delays) if the chip select
962 * isn't actually changing from the last time this was called.
963 */
964 if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
965 (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
966 (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
967 return;
968
969 trace_spi_set_cs(spi, activate);
970
971 spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
972 spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
973
974 if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
975 spi_delay_exec(&spi->cs_hold, NULL);
976
977 if (spi->mode & SPI_CS_HIGH)
978 enable = !enable;
979
980 if (spi_get_csgpiod(spi, 0)) {
981 if (!(spi->mode & SPI_NO_CS)) {
982 /*
983 * Historically ACPI has no means of the GPIO polarity and
984 * thus the SPISerialBus() resource defines it on the per-chip
985 * basis. In order to avoid a chain of negations, the GPIO
986 * polarity is considered being Active High. Even for the cases
987 * when _DSD() is involved (in the updated versions of ACPI)
988 * the GPIO CS polarity must be defined Active High to avoid
989 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
990 * into account.
991 */
992 if (has_acpi_companion(&spi->dev))
993 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
994 else
995 /* Polarity handled by GPIO library */
996 gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
997 }
998 /* Some SPI masters need both GPIO CS & slave_select */
999 if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1000 spi->controller->set_cs)
1001 spi->controller->set_cs(spi, !enable);
1002 } else if (spi->controller->set_cs) {
1003 spi->controller->set_cs(spi, !enable);
1004 }
1005
1006 if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
1007 if (activate)
1008 spi_delay_exec(&spi->cs_setup, NULL);
1009 else
1010 spi_delay_exec(&spi->cs_inactive, NULL);
1011 }
1012 }
1013
1014 #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1015 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1016 struct sg_table *sgt, void *buf, size_t len,
1017 enum dma_data_direction dir, unsigned long attrs)
1018 {
1019 const bool vmalloced_buf = is_vmalloc_addr(buf);
1020 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1021 #ifdef CONFIG_HIGHMEM
1022 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1023 (unsigned long)buf < (PKMAP_BASE +
1024 (LAST_PKMAP * PAGE_SIZE)));
1025 #else
1026 const bool kmap_buf = false;
1027 #endif
1028 int desc_len;
1029 int sgs;
1030 struct page *vm_page;
1031 struct scatterlist *sg;
1032 void *sg_buf;
1033 size_t min;
1034 int i, ret;
1035
1036 if (vmalloced_buf || kmap_buf) {
1037 desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1038 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1039 } else if (virt_addr_valid(buf)) {
1040 desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1041 sgs = DIV_ROUND_UP(len, desc_len);
1042 } else {
1043 return -EINVAL;
1044 }
1045
1046 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1047 if (ret != 0)
1048 return ret;
1049
1050 sg = &sgt->sgl[0];
1051 for (i = 0; i < sgs; i++) {
1052
1053 if (vmalloced_buf || kmap_buf) {
1054 /*
1055 * Next scatterlist entry size is the minimum between
1056 * the desc_len and the remaining buffer length that
1057 * fits in a page.
1058 */
1059 min = min_t(size_t, desc_len,
1060 min_t(size_t, len,
1061 PAGE_SIZE - offset_in_page(buf)));
1062 if (vmalloced_buf)
1063 vm_page = vmalloc_to_page(buf);
1064 else
1065 vm_page = kmap_to_page(buf);
1066 if (!vm_page) {
1067 sg_free_table(sgt);
1068 return -ENOMEM;
1069 }
1070 sg_set_page(sg, vm_page,
1071 min, offset_in_page(buf));
1072 } else {
1073 min = min_t(size_t, len, desc_len);
1074 sg_buf = buf;
1075 sg_set_buf(sg, sg_buf, min);
1076 }
1077
1078 buf += min;
1079 len -= min;
1080 sg = sg_next(sg);
1081 }
1082
1083 ret = dma_map_sgtable(dev, sgt, dir, attrs);
1084 if (ret < 0) {
1085 sg_free_table(sgt);
1086 return ret;
1087 }
1088
1089 return 0;
1090 }
1091
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1092 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1093 struct sg_table *sgt, void *buf, size_t len,
1094 enum dma_data_direction dir)
1095 {
1096 return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1097 }
1098
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1099 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1100 struct device *dev, struct sg_table *sgt,
1101 enum dma_data_direction dir,
1102 unsigned long attrs)
1103 {
1104 if (sgt->orig_nents) {
1105 dma_unmap_sgtable(dev, sgt, dir, attrs);
1106 sg_free_table(sgt);
1107 sgt->orig_nents = 0;
1108 sgt->nents = 0;
1109 }
1110 }
1111
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1112 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1113 struct sg_table *sgt, enum dma_data_direction dir)
1114 {
1115 spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1116 }
1117
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1118 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1119 {
1120 struct device *tx_dev, *rx_dev;
1121 struct spi_transfer *xfer;
1122 int ret;
1123
1124 if (!ctlr->can_dma)
1125 return 0;
1126
1127 if (ctlr->dma_tx)
1128 tx_dev = ctlr->dma_tx->device->dev;
1129 else if (ctlr->dma_map_dev)
1130 tx_dev = ctlr->dma_map_dev;
1131 else
1132 tx_dev = ctlr->dev.parent;
1133
1134 if (ctlr->dma_rx)
1135 rx_dev = ctlr->dma_rx->device->dev;
1136 else if (ctlr->dma_map_dev)
1137 rx_dev = ctlr->dma_map_dev;
1138 else
1139 rx_dev = ctlr->dev.parent;
1140
1141 ret = -ENOMSG;
1142 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1143 /* The sync is done before each transfer. */
1144 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1145
1146 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1147 continue;
1148
1149 if (xfer->tx_buf != NULL) {
1150 ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1151 (void *)xfer->tx_buf,
1152 xfer->len, DMA_TO_DEVICE,
1153 attrs);
1154 if (ret != 0)
1155 return ret;
1156 }
1157
1158 if (xfer->rx_buf != NULL) {
1159 ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1160 xfer->rx_buf, xfer->len,
1161 DMA_FROM_DEVICE, attrs);
1162 if (ret != 0) {
1163 spi_unmap_buf_attrs(ctlr, tx_dev,
1164 &xfer->tx_sg, DMA_TO_DEVICE,
1165 attrs);
1166
1167 return ret;
1168 }
1169 }
1170 }
1171 /* No transfer has been mapped, bail out with success */
1172 if (ret)
1173 return 0;
1174
1175 ctlr->cur_rx_dma_dev = rx_dev;
1176 ctlr->cur_tx_dma_dev = tx_dev;
1177 ctlr->cur_msg_mapped = true;
1178
1179 return 0;
1180 }
1181
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1182 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1183 {
1184 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1185 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1186 struct spi_transfer *xfer;
1187
1188 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1189 return 0;
1190
1191 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1192 /* The sync has already been done after each transfer. */
1193 unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1194
1195 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1196 continue;
1197
1198 spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1199 DMA_FROM_DEVICE, attrs);
1200 spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1201 DMA_TO_DEVICE, attrs);
1202 }
1203
1204 ctlr->cur_msg_mapped = false;
1205
1206 return 0;
1207 }
1208
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_transfer * xfer)1209 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1210 struct spi_transfer *xfer)
1211 {
1212 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1213 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1214
1215 if (!ctlr->cur_msg_mapped)
1216 return;
1217
1218 if (xfer->tx_sg.orig_nents)
1219 dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1220 if (xfer->rx_sg.orig_nents)
1221 dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1222 }
1223
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_transfer * xfer)1224 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1225 struct spi_transfer *xfer)
1226 {
1227 struct device *rx_dev = ctlr->cur_rx_dma_dev;
1228 struct device *tx_dev = ctlr->cur_tx_dma_dev;
1229
1230 if (!ctlr->cur_msg_mapped)
1231 return;
1232
1233 if (xfer->rx_sg.orig_nents)
1234 dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1235 if (xfer->tx_sg.orig_nents)
1236 dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1237 }
1238 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1239 static inline int __spi_map_msg(struct spi_controller *ctlr,
1240 struct spi_message *msg)
1241 {
1242 return 0;
1243 }
1244
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1245 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1246 struct spi_message *msg)
1247 {
1248 return 0;
1249 }
1250
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_transfer * xfer)1251 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1252 struct spi_transfer *xfer)
1253 {
1254 }
1255
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_transfer * xfer)1256 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1257 struct spi_transfer *xfer)
1258 {
1259 }
1260 #endif /* !CONFIG_HAS_DMA */
1261
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1262 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1263 struct spi_message *msg)
1264 {
1265 struct spi_transfer *xfer;
1266
1267 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1268 /*
1269 * Restore the original value of tx_buf or rx_buf if they are
1270 * NULL.
1271 */
1272 if (xfer->tx_buf == ctlr->dummy_tx)
1273 xfer->tx_buf = NULL;
1274 if (xfer->rx_buf == ctlr->dummy_rx)
1275 xfer->rx_buf = NULL;
1276 }
1277
1278 return __spi_unmap_msg(ctlr, msg);
1279 }
1280
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1281 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1282 {
1283 struct spi_transfer *xfer;
1284 void *tmp;
1285 unsigned int max_tx, max_rx;
1286
1287 if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1288 && !(msg->spi->mode & SPI_3WIRE)) {
1289 max_tx = 0;
1290 max_rx = 0;
1291
1292 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1293 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1294 !xfer->tx_buf)
1295 max_tx = max(xfer->len, max_tx);
1296 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1297 !xfer->rx_buf)
1298 max_rx = max(xfer->len, max_rx);
1299 }
1300
1301 if (max_tx) {
1302 tmp = krealloc(ctlr->dummy_tx, max_tx,
1303 GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1304 if (!tmp)
1305 return -ENOMEM;
1306 ctlr->dummy_tx = tmp;
1307 }
1308
1309 if (max_rx) {
1310 tmp = krealloc(ctlr->dummy_rx, max_rx,
1311 GFP_KERNEL | GFP_DMA);
1312 if (!tmp)
1313 return -ENOMEM;
1314 ctlr->dummy_rx = tmp;
1315 }
1316
1317 if (max_tx || max_rx) {
1318 list_for_each_entry(xfer, &msg->transfers,
1319 transfer_list) {
1320 if (!xfer->len)
1321 continue;
1322 if (!xfer->tx_buf)
1323 xfer->tx_buf = ctlr->dummy_tx;
1324 if (!xfer->rx_buf)
1325 xfer->rx_buf = ctlr->dummy_rx;
1326 }
1327 }
1328 }
1329
1330 return __spi_map_msg(ctlr, msg);
1331 }
1332
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1333 static int spi_transfer_wait(struct spi_controller *ctlr,
1334 struct spi_message *msg,
1335 struct spi_transfer *xfer)
1336 {
1337 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1338 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1339 u32 speed_hz = xfer->speed_hz;
1340 unsigned long long ms;
1341
1342 if (spi_controller_is_slave(ctlr)) {
1343 if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1344 dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1345 return -EINTR;
1346 }
1347 } else {
1348 if (!speed_hz)
1349 speed_hz = 100000;
1350
1351 /*
1352 * For each byte we wait for 8 cycles of the SPI clock.
1353 * Since speed is defined in Hz and we want milliseconds,
1354 * use respective multiplier, but before the division,
1355 * otherwise we may get 0 for short transfers.
1356 */
1357 ms = 8LL * MSEC_PER_SEC * xfer->len;
1358 do_div(ms, speed_hz);
1359
1360 /*
1361 * Increase it twice and add 200 ms tolerance, use
1362 * predefined maximum in case of overflow.
1363 */
1364 ms += ms + 200;
1365 if (ms > UINT_MAX)
1366 ms = UINT_MAX;
1367
1368 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1369 msecs_to_jiffies(ms));
1370
1371 if (ms == 0) {
1372 SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1373 SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1374 dev_err(&msg->spi->dev,
1375 "SPI transfer timed out\n");
1376 return -ETIMEDOUT;
1377 }
1378 }
1379
1380 return 0;
1381 }
1382
_spi_transfer_delay_ns(u32 ns)1383 static void _spi_transfer_delay_ns(u32 ns)
1384 {
1385 if (!ns)
1386 return;
1387 if (ns <= NSEC_PER_USEC) {
1388 ndelay(ns);
1389 } else {
1390 u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1391
1392 if (us <= 10)
1393 udelay(us);
1394 else
1395 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1396 }
1397 }
1398
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1399 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1400 {
1401 u32 delay = _delay->value;
1402 u32 unit = _delay->unit;
1403 u32 hz;
1404
1405 if (!delay)
1406 return 0;
1407
1408 switch (unit) {
1409 case SPI_DELAY_UNIT_USECS:
1410 delay *= NSEC_PER_USEC;
1411 break;
1412 case SPI_DELAY_UNIT_NSECS:
1413 /* Nothing to do here */
1414 break;
1415 case SPI_DELAY_UNIT_SCK:
1416 /* Clock cycles need to be obtained from spi_transfer */
1417 if (!xfer)
1418 return -EINVAL;
1419 /*
1420 * If there is unknown effective speed, approximate it
1421 * by underestimating with half of the requested Hz.
1422 */
1423 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1424 if (!hz)
1425 return -EINVAL;
1426
1427 /* Convert delay to nanoseconds */
1428 delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1429 break;
1430 default:
1431 return -EINVAL;
1432 }
1433
1434 return delay;
1435 }
1436 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1437
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1438 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1439 {
1440 int delay;
1441
1442 might_sleep();
1443
1444 if (!_delay)
1445 return -EINVAL;
1446
1447 delay = spi_delay_to_ns(_delay, xfer);
1448 if (delay < 0)
1449 return delay;
1450
1451 _spi_transfer_delay_ns(delay);
1452
1453 return 0;
1454 }
1455 EXPORT_SYMBOL_GPL(spi_delay_exec);
1456
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1457 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1458 struct spi_transfer *xfer)
1459 {
1460 u32 default_delay_ns = 10 * NSEC_PER_USEC;
1461 u32 delay = xfer->cs_change_delay.value;
1462 u32 unit = xfer->cs_change_delay.unit;
1463 int ret;
1464
1465 /* Return early on "fast" mode - for everything but USECS */
1466 if (!delay) {
1467 if (unit == SPI_DELAY_UNIT_USECS)
1468 _spi_transfer_delay_ns(default_delay_ns);
1469 return;
1470 }
1471
1472 ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1473 if (ret) {
1474 dev_err_once(&msg->spi->dev,
1475 "Use of unsupported delay unit %i, using default of %luus\n",
1476 unit, default_delay_ns / NSEC_PER_USEC);
1477 _spi_transfer_delay_ns(default_delay_ns);
1478 }
1479 }
1480
spi_transfer_cs_change_delay_exec(struct spi_message * msg,struct spi_transfer * xfer)1481 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1482 struct spi_transfer *xfer)
1483 {
1484 _spi_transfer_cs_change_delay(msg, xfer);
1485 }
1486 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1487
1488 /*
1489 * spi_transfer_one_message - Default implementation of transfer_one_message()
1490 *
1491 * This is a standard implementation of transfer_one_message() for
1492 * drivers which implement a transfer_one() operation. It provides
1493 * standard handling of delays and chip select management.
1494 */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1495 static int spi_transfer_one_message(struct spi_controller *ctlr,
1496 struct spi_message *msg)
1497 {
1498 struct spi_transfer *xfer;
1499 bool keep_cs = false;
1500 int ret = 0;
1501 struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1502 struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1503
1504 xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1505 spi_set_cs(msg->spi, !xfer->cs_off, false);
1506
1507 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1508 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1509
1510 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1511 trace_spi_transfer_start(msg, xfer);
1512
1513 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1514 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1515
1516 if (!ctlr->ptp_sts_supported) {
1517 xfer->ptp_sts_word_pre = 0;
1518 ptp_read_system_prets(xfer->ptp_sts);
1519 }
1520
1521 if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1522 reinit_completion(&ctlr->xfer_completion);
1523
1524 fallback_pio:
1525 spi_dma_sync_for_device(ctlr, xfer);
1526 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1527 if (ret < 0) {
1528 spi_dma_sync_for_cpu(ctlr, xfer);
1529
1530 if (ctlr->cur_msg_mapped &&
1531 (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1532 __spi_unmap_msg(ctlr, msg);
1533 ctlr->fallback = true;
1534 xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1535 goto fallback_pio;
1536 }
1537
1538 SPI_STATISTICS_INCREMENT_FIELD(statm,
1539 errors);
1540 SPI_STATISTICS_INCREMENT_FIELD(stats,
1541 errors);
1542 dev_err(&msg->spi->dev,
1543 "SPI transfer failed: %d\n", ret);
1544 goto out;
1545 }
1546
1547 if (ret > 0) {
1548 ret = spi_transfer_wait(ctlr, msg, xfer);
1549 if (ret < 0)
1550 msg->status = ret;
1551 }
1552
1553 spi_dma_sync_for_cpu(ctlr, xfer);
1554 } else {
1555 if (xfer->len)
1556 dev_err(&msg->spi->dev,
1557 "Bufferless transfer has length %u\n",
1558 xfer->len);
1559 }
1560
1561 if (!ctlr->ptp_sts_supported) {
1562 ptp_read_system_postts(xfer->ptp_sts);
1563 xfer->ptp_sts_word_post = xfer->len;
1564 }
1565
1566 trace_spi_transfer_stop(msg, xfer);
1567
1568 if (msg->status != -EINPROGRESS)
1569 goto out;
1570
1571 spi_transfer_delay_exec(xfer);
1572
1573 if (xfer->cs_change) {
1574 if (list_is_last(&xfer->transfer_list,
1575 &msg->transfers)) {
1576 keep_cs = true;
1577 } else {
1578 if (!xfer->cs_off)
1579 spi_set_cs(msg->spi, false, false);
1580 _spi_transfer_cs_change_delay(msg, xfer);
1581 if (!list_next_entry(xfer, transfer_list)->cs_off)
1582 spi_set_cs(msg->spi, true, false);
1583 }
1584 } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1585 xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1586 spi_set_cs(msg->spi, xfer->cs_off, false);
1587 }
1588
1589 msg->actual_length += xfer->len;
1590 }
1591
1592 out:
1593 if (ret != 0 || !keep_cs)
1594 spi_set_cs(msg->spi, false, false);
1595
1596 if (msg->status == -EINPROGRESS)
1597 msg->status = ret;
1598
1599 if (msg->status && ctlr->handle_err)
1600 ctlr->handle_err(ctlr, msg);
1601
1602 spi_finalize_current_message(ctlr);
1603
1604 return ret;
1605 }
1606
1607 /**
1608 * spi_finalize_current_transfer - report completion of a transfer
1609 * @ctlr: the controller reporting completion
1610 *
1611 * Called by SPI drivers using the core transfer_one_message()
1612 * implementation to notify it that the current interrupt driven
1613 * transfer has finished and the next one may be scheduled.
1614 */
spi_finalize_current_transfer(struct spi_controller * ctlr)1615 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1616 {
1617 complete(&ctlr->xfer_completion);
1618 }
1619 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1620
spi_idle_runtime_pm(struct spi_controller * ctlr)1621 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1622 {
1623 if (ctlr->auto_runtime_pm) {
1624 pm_runtime_mark_last_busy(ctlr->dev.parent);
1625 pm_runtime_put_autosuspend(ctlr->dev.parent);
1626 }
1627 }
1628
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1629 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1630 struct spi_message *msg, bool was_busy)
1631 {
1632 struct spi_transfer *xfer;
1633 int ret;
1634
1635 if (!was_busy && ctlr->auto_runtime_pm) {
1636 ret = pm_runtime_get_sync(ctlr->dev.parent);
1637 if (ret < 0) {
1638 pm_runtime_put_noidle(ctlr->dev.parent);
1639 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1640 ret);
1641
1642 msg->status = ret;
1643 spi_finalize_current_message(ctlr);
1644
1645 return ret;
1646 }
1647 }
1648
1649 if (!was_busy)
1650 trace_spi_controller_busy(ctlr);
1651
1652 if (!was_busy && ctlr->prepare_transfer_hardware) {
1653 ret = ctlr->prepare_transfer_hardware(ctlr);
1654 if (ret) {
1655 dev_err(&ctlr->dev,
1656 "failed to prepare transfer hardware: %d\n",
1657 ret);
1658
1659 if (ctlr->auto_runtime_pm)
1660 pm_runtime_put(ctlr->dev.parent);
1661
1662 msg->status = ret;
1663 spi_finalize_current_message(ctlr);
1664
1665 return ret;
1666 }
1667 }
1668
1669 trace_spi_message_start(msg);
1670
1671 ret = spi_split_transfers_maxsize(ctlr, msg,
1672 spi_max_transfer_size(msg->spi),
1673 GFP_KERNEL | GFP_DMA);
1674 if (ret) {
1675 msg->status = ret;
1676 spi_finalize_current_message(ctlr);
1677 return ret;
1678 }
1679
1680 if (ctlr->prepare_message) {
1681 ret = ctlr->prepare_message(ctlr, msg);
1682 if (ret) {
1683 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1684 ret);
1685 msg->status = ret;
1686 spi_finalize_current_message(ctlr);
1687 return ret;
1688 }
1689 msg->prepared = true;
1690 }
1691
1692 ret = spi_map_msg(ctlr, msg);
1693 if (ret) {
1694 msg->status = ret;
1695 spi_finalize_current_message(ctlr);
1696 return ret;
1697 }
1698
1699 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1700 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1701 xfer->ptp_sts_word_pre = 0;
1702 ptp_read_system_prets(xfer->ptp_sts);
1703 }
1704 }
1705
1706 /*
1707 * Drivers implementation of transfer_one_message() must arrange for
1708 * spi_finalize_current_message() to get called. Most drivers will do
1709 * this in the calling context, but some don't. For those cases, a
1710 * completion is used to guarantee that this function does not return
1711 * until spi_finalize_current_message() is done accessing
1712 * ctlr->cur_msg.
1713 * Use of the following two flags enable to opportunistically skip the
1714 * use of the completion since its use involves expensive spin locks.
1715 * In case of a race with the context that calls
1716 * spi_finalize_current_message() the completion will always be used,
1717 * due to strict ordering of these flags using barriers.
1718 */
1719 WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1720 WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1721 reinit_completion(&ctlr->cur_msg_completion);
1722 smp_wmb(); /* Make these available to spi_finalize_current_message() */
1723
1724 ret = ctlr->transfer_one_message(ctlr, msg);
1725 if (ret) {
1726 dev_err(&ctlr->dev,
1727 "failed to transfer one message from queue\n");
1728 return ret;
1729 }
1730
1731 WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1732 smp_mb(); /* See spi_finalize_current_message()... */
1733 if (READ_ONCE(ctlr->cur_msg_incomplete))
1734 wait_for_completion(&ctlr->cur_msg_completion);
1735
1736 return 0;
1737 }
1738
1739 /**
1740 * __spi_pump_messages - function which processes SPI message queue
1741 * @ctlr: controller to process queue for
1742 * @in_kthread: true if we are in the context of the message pump thread
1743 *
1744 * This function checks if there is any SPI message in the queue that
1745 * needs processing and if so call out to the driver to initialize hardware
1746 * and transfer each message.
1747 *
1748 * Note that it is called both from the kthread itself and also from
1749 * inside spi_sync(); the queue extraction handling at the top of the
1750 * function should deal with this safely.
1751 */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1752 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1753 {
1754 struct spi_message *msg;
1755 bool was_busy = false;
1756 unsigned long flags;
1757 int ret;
1758
1759 /* Take the I/O mutex */
1760 mutex_lock(&ctlr->io_mutex);
1761
1762 /* Lock queue */
1763 spin_lock_irqsave(&ctlr->queue_lock, flags);
1764
1765 /* Make sure we are not already running a message */
1766 if (ctlr->cur_msg)
1767 goto out_unlock;
1768
1769 /* Check if the queue is idle */
1770 if (list_empty(&ctlr->queue) || !ctlr->running) {
1771 if (!ctlr->busy)
1772 goto out_unlock;
1773
1774 /* Defer any non-atomic teardown to the thread */
1775 if (!in_kthread) {
1776 if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1777 !ctlr->unprepare_transfer_hardware) {
1778 spi_idle_runtime_pm(ctlr);
1779 ctlr->busy = false;
1780 ctlr->queue_empty = true;
1781 trace_spi_controller_idle(ctlr);
1782 } else {
1783 kthread_queue_work(ctlr->kworker,
1784 &ctlr->pump_messages);
1785 }
1786 goto out_unlock;
1787 }
1788
1789 ctlr->busy = false;
1790 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1791
1792 kfree(ctlr->dummy_rx);
1793 ctlr->dummy_rx = NULL;
1794 kfree(ctlr->dummy_tx);
1795 ctlr->dummy_tx = NULL;
1796 if (ctlr->unprepare_transfer_hardware &&
1797 ctlr->unprepare_transfer_hardware(ctlr))
1798 dev_err(&ctlr->dev,
1799 "failed to unprepare transfer hardware\n");
1800 spi_idle_runtime_pm(ctlr);
1801 trace_spi_controller_idle(ctlr);
1802
1803 spin_lock_irqsave(&ctlr->queue_lock, flags);
1804 ctlr->queue_empty = true;
1805 goto out_unlock;
1806 }
1807
1808 /* Extract head of queue */
1809 msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1810 ctlr->cur_msg = msg;
1811
1812 list_del_init(&msg->queue);
1813 if (ctlr->busy)
1814 was_busy = true;
1815 else
1816 ctlr->busy = true;
1817 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1818
1819 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1820 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1821
1822 ctlr->cur_msg = NULL;
1823 ctlr->fallback = false;
1824
1825 mutex_unlock(&ctlr->io_mutex);
1826
1827 /* Prod the scheduler in case transfer_one() was busy waiting */
1828 if (!ret)
1829 cond_resched();
1830 return;
1831
1832 out_unlock:
1833 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1834 mutex_unlock(&ctlr->io_mutex);
1835 }
1836
1837 /**
1838 * spi_pump_messages - kthread work function which processes spi message queue
1839 * @work: pointer to kthread work struct contained in the controller struct
1840 */
spi_pump_messages(struct kthread_work * work)1841 static void spi_pump_messages(struct kthread_work *work)
1842 {
1843 struct spi_controller *ctlr =
1844 container_of(work, struct spi_controller, pump_messages);
1845
1846 __spi_pump_messages(ctlr, true);
1847 }
1848
1849 /**
1850 * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1851 * @ctlr: Pointer to the spi_controller structure of the driver
1852 * @xfer: Pointer to the transfer being timestamped
1853 * @progress: How many words (not bytes) have been transferred so far
1854 * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1855 * transfer, for less jitter in time measurement. Only compatible
1856 * with PIO drivers. If true, must follow up with
1857 * spi_take_timestamp_post or otherwise system will crash.
1858 * WARNING: for fully predictable results, the CPU frequency must
1859 * also be under control (governor).
1860 *
1861 * This is a helper for drivers to collect the beginning of the TX timestamp
1862 * for the requested byte from the SPI transfer. The frequency with which this
1863 * function must be called (once per word, once for the whole transfer, once
1864 * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1865 * greater than or equal to the requested byte at the time of the call. The
1866 * timestamp is only taken once, at the first such call. It is assumed that
1867 * the driver advances its @tx buffer pointer monotonically.
1868 */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1869 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1870 struct spi_transfer *xfer,
1871 size_t progress, bool irqs_off)
1872 {
1873 if (!xfer->ptp_sts)
1874 return;
1875
1876 if (xfer->timestamped)
1877 return;
1878
1879 if (progress > xfer->ptp_sts_word_pre)
1880 return;
1881
1882 /* Capture the resolution of the timestamp */
1883 xfer->ptp_sts_word_pre = progress;
1884
1885 if (irqs_off) {
1886 local_irq_save(ctlr->irq_flags);
1887 preempt_disable();
1888 }
1889
1890 ptp_read_system_prets(xfer->ptp_sts);
1891 }
1892 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1893
1894 /**
1895 * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1896 * @ctlr: Pointer to the spi_controller structure of the driver
1897 * @xfer: Pointer to the transfer being timestamped
1898 * @progress: How many words (not bytes) have been transferred so far
1899 * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1900 *
1901 * This is a helper for drivers to collect the end of the TX timestamp for
1902 * the requested byte from the SPI transfer. Can be called with an arbitrary
1903 * frequency: only the first call where @tx exceeds or is equal to the
1904 * requested word will be timestamped.
1905 */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1906 void spi_take_timestamp_post(struct spi_controller *ctlr,
1907 struct spi_transfer *xfer,
1908 size_t progress, bool irqs_off)
1909 {
1910 if (!xfer->ptp_sts)
1911 return;
1912
1913 if (xfer->timestamped)
1914 return;
1915
1916 if (progress < xfer->ptp_sts_word_post)
1917 return;
1918
1919 ptp_read_system_postts(xfer->ptp_sts);
1920
1921 if (irqs_off) {
1922 local_irq_restore(ctlr->irq_flags);
1923 preempt_enable();
1924 }
1925
1926 /* Capture the resolution of the timestamp */
1927 xfer->ptp_sts_word_post = progress;
1928
1929 xfer->timestamped = 1;
1930 }
1931 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1932
1933 /**
1934 * spi_set_thread_rt - set the controller to pump at realtime priority
1935 * @ctlr: controller to boost priority of
1936 *
1937 * This can be called because the controller requested realtime priority
1938 * (by setting the ->rt value before calling spi_register_controller()) or
1939 * because a device on the bus said that its transfers needed realtime
1940 * priority.
1941 *
1942 * NOTE: at the moment if any device on a bus says it needs realtime then
1943 * the thread will be at realtime priority for all transfers on that
1944 * controller. If this eventually becomes a problem we may see if we can
1945 * find a way to boost the priority only temporarily during relevant
1946 * transfers.
1947 */
spi_set_thread_rt(struct spi_controller * ctlr)1948 static void spi_set_thread_rt(struct spi_controller *ctlr)
1949 {
1950 dev_info(&ctlr->dev,
1951 "will run message pump with realtime priority\n");
1952 sched_set_fifo(ctlr->kworker->task);
1953 }
1954
spi_init_queue(struct spi_controller * ctlr)1955 static int spi_init_queue(struct spi_controller *ctlr)
1956 {
1957 ctlr->running = false;
1958 ctlr->busy = false;
1959 ctlr->queue_empty = true;
1960
1961 ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
1962 if (IS_ERR(ctlr->kworker)) {
1963 dev_err(&ctlr->dev, "failed to create message pump kworker\n");
1964 return PTR_ERR(ctlr->kworker);
1965 }
1966
1967 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1968
1969 /*
1970 * Controller config will indicate if this controller should run the
1971 * message pump with high (realtime) priority to reduce the transfer
1972 * latency on the bus by minimising the delay between a transfer
1973 * request and the scheduling of the message pump thread. Without this
1974 * setting the message pump thread will remain at default priority.
1975 */
1976 if (ctlr->rt)
1977 spi_set_thread_rt(ctlr);
1978
1979 return 0;
1980 }
1981
1982 /**
1983 * spi_get_next_queued_message() - called by driver to check for queued
1984 * messages
1985 * @ctlr: the controller to check for queued messages
1986 *
1987 * If there are more messages in the queue, the next message is returned from
1988 * this call.
1989 *
1990 * Return: the next message in the queue, else NULL if the queue is empty.
1991 */
spi_get_next_queued_message(struct spi_controller * ctlr)1992 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1993 {
1994 struct spi_message *next;
1995 unsigned long flags;
1996
1997 /* Get a pointer to the next message, if any */
1998 spin_lock_irqsave(&ctlr->queue_lock, flags);
1999 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2000 queue);
2001 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2002
2003 return next;
2004 }
2005 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2006
2007 /**
2008 * spi_finalize_current_message() - the current message is complete
2009 * @ctlr: the controller to return the message to
2010 *
2011 * Called by the driver to notify the core that the message in the front of the
2012 * queue is complete and can be removed from the queue.
2013 */
spi_finalize_current_message(struct spi_controller * ctlr)2014 void spi_finalize_current_message(struct spi_controller *ctlr)
2015 {
2016 struct spi_transfer *xfer;
2017 struct spi_message *mesg;
2018 int ret;
2019
2020 mesg = ctlr->cur_msg;
2021
2022 if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2023 list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2024 ptp_read_system_postts(xfer->ptp_sts);
2025 xfer->ptp_sts_word_post = xfer->len;
2026 }
2027 }
2028
2029 if (unlikely(ctlr->ptp_sts_supported))
2030 list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2031 WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2032
2033 spi_unmap_msg(ctlr, mesg);
2034
2035 /*
2036 * In the prepare_messages callback the SPI bus has the opportunity
2037 * to split a transfer to smaller chunks.
2038 *
2039 * Release the split transfers here since spi_map_msg() is done on
2040 * the split transfers.
2041 */
2042 spi_res_release(ctlr, mesg);
2043
2044 if (mesg->prepared && ctlr->unprepare_message) {
2045 ret = ctlr->unprepare_message(ctlr, mesg);
2046 if (ret) {
2047 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2048 ret);
2049 }
2050 }
2051
2052 mesg->prepared = false;
2053
2054 WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2055 smp_mb(); /* See __spi_pump_transfer_message()... */
2056 if (READ_ONCE(ctlr->cur_msg_need_completion))
2057 complete(&ctlr->cur_msg_completion);
2058
2059 trace_spi_message_done(mesg);
2060
2061 mesg->state = NULL;
2062 if (mesg->complete)
2063 mesg->complete(mesg->context);
2064 }
2065 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2066
spi_start_queue(struct spi_controller * ctlr)2067 static int spi_start_queue(struct spi_controller *ctlr)
2068 {
2069 unsigned long flags;
2070
2071 spin_lock_irqsave(&ctlr->queue_lock, flags);
2072
2073 if (ctlr->running || ctlr->busy) {
2074 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2075 return -EBUSY;
2076 }
2077
2078 ctlr->running = true;
2079 ctlr->cur_msg = NULL;
2080 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2081
2082 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2083
2084 return 0;
2085 }
2086
spi_stop_queue(struct spi_controller * ctlr)2087 static int spi_stop_queue(struct spi_controller *ctlr)
2088 {
2089 unsigned long flags;
2090 unsigned limit = 500;
2091 int ret = 0;
2092
2093 spin_lock_irqsave(&ctlr->queue_lock, flags);
2094
2095 /*
2096 * This is a bit lame, but is optimized for the common execution path.
2097 * A wait_queue on the ctlr->busy could be used, but then the common
2098 * execution path (pump_messages) would be required to call wake_up or
2099 * friends on every SPI message. Do this instead.
2100 */
2101 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2102 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2103 usleep_range(10000, 11000);
2104 spin_lock_irqsave(&ctlr->queue_lock, flags);
2105 }
2106
2107 if (!list_empty(&ctlr->queue) || ctlr->busy)
2108 ret = -EBUSY;
2109 else
2110 ctlr->running = false;
2111
2112 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2113
2114 if (ret) {
2115 dev_warn(&ctlr->dev, "could not stop message queue\n");
2116 return ret;
2117 }
2118 return ret;
2119 }
2120
spi_destroy_queue(struct spi_controller * ctlr)2121 static int spi_destroy_queue(struct spi_controller *ctlr)
2122 {
2123 int ret;
2124
2125 ret = spi_stop_queue(ctlr);
2126
2127 /*
2128 * kthread_flush_worker will block until all work is done.
2129 * If the reason that stop_queue timed out is that the work will never
2130 * finish, then it does no good to call flush/stop thread, so
2131 * return anyway.
2132 */
2133 if (ret) {
2134 dev_err(&ctlr->dev, "problem destroying queue\n");
2135 return ret;
2136 }
2137
2138 kthread_destroy_worker(ctlr->kworker);
2139
2140 return 0;
2141 }
2142
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2143 static int __spi_queued_transfer(struct spi_device *spi,
2144 struct spi_message *msg,
2145 bool need_pump)
2146 {
2147 struct spi_controller *ctlr = spi->controller;
2148 unsigned long flags;
2149
2150 spin_lock_irqsave(&ctlr->queue_lock, flags);
2151
2152 if (!ctlr->running) {
2153 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2154 return -ESHUTDOWN;
2155 }
2156 msg->actual_length = 0;
2157 msg->status = -EINPROGRESS;
2158
2159 list_add_tail(&msg->queue, &ctlr->queue);
2160 ctlr->queue_empty = false;
2161 if (!ctlr->busy && need_pump)
2162 kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2163
2164 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2165 return 0;
2166 }
2167
2168 /**
2169 * spi_queued_transfer - transfer function for queued transfers
2170 * @spi: SPI device which is requesting transfer
2171 * @msg: SPI message which is to handled is queued to driver queue
2172 *
2173 * Return: zero on success, else a negative error code.
2174 */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2175 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2176 {
2177 return __spi_queued_transfer(spi, msg, true);
2178 }
2179
spi_controller_initialize_queue(struct spi_controller * ctlr)2180 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2181 {
2182 int ret;
2183
2184 ctlr->transfer = spi_queued_transfer;
2185 if (!ctlr->transfer_one_message)
2186 ctlr->transfer_one_message = spi_transfer_one_message;
2187
2188 /* Initialize and start queue */
2189 ret = spi_init_queue(ctlr);
2190 if (ret) {
2191 dev_err(&ctlr->dev, "problem initializing queue\n");
2192 goto err_init_queue;
2193 }
2194 ctlr->queued = true;
2195 ret = spi_start_queue(ctlr);
2196 if (ret) {
2197 dev_err(&ctlr->dev, "problem starting queue\n");
2198 goto err_start_queue;
2199 }
2200
2201 return 0;
2202
2203 err_start_queue:
2204 spi_destroy_queue(ctlr);
2205 err_init_queue:
2206 return ret;
2207 }
2208
2209 /**
2210 * spi_flush_queue - Send all pending messages in the queue from the callers'
2211 * context
2212 * @ctlr: controller to process queue for
2213 *
2214 * This should be used when one wants to ensure all pending messages have been
2215 * sent before doing something. Is used by the spi-mem code to make sure SPI
2216 * memory operations do not preempt regular SPI transfers that have been queued
2217 * before the spi-mem operation.
2218 */
spi_flush_queue(struct spi_controller * ctlr)2219 void spi_flush_queue(struct spi_controller *ctlr)
2220 {
2221 if (ctlr->transfer == spi_queued_transfer)
2222 __spi_pump_messages(ctlr, false);
2223 }
2224
2225 /*-------------------------------------------------------------------------*/
2226
2227 #if defined(CONFIG_OF)
of_spi_parse_dt_cs_delay(struct device_node * nc,struct spi_delay * delay,const char * prop)2228 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2229 struct spi_delay *delay, const char *prop)
2230 {
2231 u32 value;
2232
2233 if (!of_property_read_u32(nc, prop, &value)) {
2234 if (value > U16_MAX) {
2235 delay->value = DIV_ROUND_UP(value, 1000);
2236 delay->unit = SPI_DELAY_UNIT_USECS;
2237 } else {
2238 delay->value = value;
2239 delay->unit = SPI_DELAY_UNIT_NSECS;
2240 }
2241 }
2242 }
2243
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2244 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2245 struct device_node *nc)
2246 {
2247 u32 value;
2248 int rc;
2249
2250 /* Mode (clock phase/polarity/etc.) */
2251 if (of_property_read_bool(nc, "spi-cpha"))
2252 spi->mode |= SPI_CPHA;
2253 if (of_property_read_bool(nc, "spi-cpol"))
2254 spi->mode |= SPI_CPOL;
2255 if (of_property_read_bool(nc, "spi-3wire"))
2256 spi->mode |= SPI_3WIRE;
2257 if (of_property_read_bool(nc, "spi-lsb-first"))
2258 spi->mode |= SPI_LSB_FIRST;
2259 if (of_property_read_bool(nc, "spi-cs-high"))
2260 spi->mode |= SPI_CS_HIGH;
2261
2262 /* Device DUAL/QUAD mode */
2263 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2264 switch (value) {
2265 case 0:
2266 spi->mode |= SPI_NO_TX;
2267 break;
2268 case 1:
2269 break;
2270 case 2:
2271 spi->mode |= SPI_TX_DUAL;
2272 break;
2273 case 4:
2274 spi->mode |= SPI_TX_QUAD;
2275 break;
2276 case 8:
2277 spi->mode |= SPI_TX_OCTAL;
2278 break;
2279 default:
2280 dev_warn(&ctlr->dev,
2281 "spi-tx-bus-width %d not supported\n",
2282 value);
2283 break;
2284 }
2285 }
2286
2287 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2288 switch (value) {
2289 case 0:
2290 spi->mode |= SPI_NO_RX;
2291 break;
2292 case 1:
2293 break;
2294 case 2:
2295 spi->mode |= SPI_RX_DUAL;
2296 break;
2297 case 4:
2298 spi->mode |= SPI_RX_QUAD;
2299 break;
2300 case 8:
2301 spi->mode |= SPI_RX_OCTAL;
2302 break;
2303 default:
2304 dev_warn(&ctlr->dev,
2305 "spi-rx-bus-width %d not supported\n",
2306 value);
2307 break;
2308 }
2309 }
2310
2311 if (spi_controller_is_slave(ctlr)) {
2312 if (!of_node_name_eq(nc, "slave")) {
2313 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2314 nc);
2315 return -EINVAL;
2316 }
2317 return 0;
2318 }
2319
2320 /* Device address */
2321 rc = of_property_read_u32(nc, "reg", &value);
2322 if (rc) {
2323 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2324 nc, rc);
2325 return rc;
2326 }
2327 spi_set_chipselect(spi, 0, value);
2328
2329 /* Device speed */
2330 if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2331 spi->max_speed_hz = value;
2332
2333 /* Device CS delays */
2334 of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2335 of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2336 of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2337
2338 return 0;
2339 }
2340
2341 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2342 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2343 {
2344 struct spi_device *spi;
2345 int rc;
2346
2347 /* Alloc an spi_device */
2348 spi = spi_alloc_device(ctlr);
2349 if (!spi) {
2350 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2351 rc = -ENOMEM;
2352 goto err_out;
2353 }
2354
2355 /* Select device driver */
2356 rc = of_alias_from_compatible(nc, spi->modalias,
2357 sizeof(spi->modalias));
2358 if (rc < 0) {
2359 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2360 goto err_out;
2361 }
2362
2363 rc = of_spi_parse_dt(ctlr, spi, nc);
2364 if (rc)
2365 goto err_out;
2366
2367 /* Store a pointer to the node in the device structure */
2368 of_node_get(nc);
2369
2370 device_set_node(&spi->dev, of_fwnode_handle(nc));
2371
2372 /* Register the new device */
2373 rc = spi_add_device(spi);
2374 if (rc) {
2375 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2376 goto err_of_node_put;
2377 }
2378
2379 return spi;
2380
2381 err_of_node_put:
2382 of_node_put(nc);
2383 err_out:
2384 spi_dev_put(spi);
2385 return ERR_PTR(rc);
2386 }
2387
2388 /**
2389 * of_register_spi_devices() - Register child devices onto the SPI bus
2390 * @ctlr: Pointer to spi_controller device
2391 *
2392 * Registers an spi_device for each child node of controller node which
2393 * represents a valid SPI slave.
2394 */
of_register_spi_devices(struct spi_controller * ctlr)2395 static void of_register_spi_devices(struct spi_controller *ctlr)
2396 {
2397 struct spi_device *spi;
2398 struct device_node *nc;
2399
2400 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2401 if (of_node_test_and_set_flag(nc, OF_POPULATED))
2402 continue;
2403 spi = of_register_spi_device(ctlr, nc);
2404 if (IS_ERR(spi)) {
2405 dev_warn(&ctlr->dev,
2406 "Failed to create SPI device for %pOF\n", nc);
2407 of_node_clear_flag(nc, OF_POPULATED);
2408 }
2409 }
2410 }
2411 #else
of_register_spi_devices(struct spi_controller * ctlr)2412 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2413 #endif
2414
2415 /**
2416 * spi_new_ancillary_device() - Register ancillary SPI device
2417 * @spi: Pointer to the main SPI device registering the ancillary device
2418 * @chip_select: Chip Select of the ancillary device
2419 *
2420 * Register an ancillary SPI device; for example some chips have a chip-select
2421 * for normal device usage and another one for setup/firmware upload.
2422 *
2423 * This may only be called from main SPI device's probe routine.
2424 *
2425 * Return: 0 on success; negative errno on failure
2426 */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2427 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2428 u8 chip_select)
2429 {
2430 struct spi_controller *ctlr = spi->controller;
2431 struct spi_device *ancillary;
2432 int rc = 0;
2433
2434 /* Alloc an spi_device */
2435 ancillary = spi_alloc_device(ctlr);
2436 if (!ancillary) {
2437 rc = -ENOMEM;
2438 goto err_out;
2439 }
2440
2441 strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2442
2443 /* Use provided chip-select for ancillary device */
2444 spi_set_chipselect(ancillary, 0, chip_select);
2445
2446 /* Take over SPI mode/speed from SPI main device */
2447 ancillary->max_speed_hz = spi->max_speed_hz;
2448 ancillary->mode = spi->mode;
2449
2450 WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2451
2452 /* Register the new device */
2453 rc = __spi_add_device(ancillary);
2454 if (rc) {
2455 dev_err(&spi->dev, "failed to register ancillary device\n");
2456 goto err_out;
2457 }
2458
2459 return ancillary;
2460
2461 err_out:
2462 spi_dev_put(ancillary);
2463 return ERR_PTR(rc);
2464 }
2465 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2466
2467 #ifdef CONFIG_ACPI
2468 struct acpi_spi_lookup {
2469 struct spi_controller *ctlr;
2470 u32 max_speed_hz;
2471 u32 mode;
2472 int irq;
2473 u8 bits_per_word;
2474 u8 chip_select;
2475 int n;
2476 int index;
2477 };
2478
acpi_spi_count(struct acpi_resource * ares,void * data)2479 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2480 {
2481 struct acpi_resource_spi_serialbus *sb;
2482 int *count = data;
2483
2484 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2485 return 1;
2486
2487 sb = &ares->data.spi_serial_bus;
2488 if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2489 return 1;
2490
2491 *count = *count + 1;
2492
2493 return 1;
2494 }
2495
2496 /**
2497 * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2498 * @adev: ACPI device
2499 *
2500 * Return: the number of SpiSerialBus resources in the ACPI-device's
2501 * resource-list; or a negative error code.
2502 */
acpi_spi_count_resources(struct acpi_device * adev)2503 int acpi_spi_count_resources(struct acpi_device *adev)
2504 {
2505 LIST_HEAD(r);
2506 int count = 0;
2507 int ret;
2508
2509 ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2510 if (ret < 0)
2511 return ret;
2512
2513 acpi_dev_free_resource_list(&r);
2514
2515 return count;
2516 }
2517 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2518
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2519 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2520 struct acpi_spi_lookup *lookup)
2521 {
2522 const union acpi_object *obj;
2523
2524 if (!x86_apple_machine)
2525 return;
2526
2527 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2528 && obj->buffer.length >= 4)
2529 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2530
2531 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2532 && obj->buffer.length == 8)
2533 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2534
2535 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2536 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2537 lookup->mode |= SPI_LSB_FIRST;
2538
2539 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2540 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2541 lookup->mode |= SPI_CPOL;
2542
2543 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2544 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
2545 lookup->mode |= SPI_CPHA;
2546 }
2547
2548 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
2549
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2550 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2551 {
2552 struct acpi_spi_lookup *lookup = data;
2553 struct spi_controller *ctlr = lookup->ctlr;
2554
2555 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2556 struct acpi_resource_spi_serialbus *sb;
2557 acpi_handle parent_handle;
2558 acpi_status status;
2559
2560 sb = &ares->data.spi_serial_bus;
2561 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2562
2563 if (lookup->index != -1 && lookup->n++ != lookup->index)
2564 return 1;
2565
2566 status = acpi_get_handle(NULL,
2567 sb->resource_source.string_ptr,
2568 &parent_handle);
2569
2570 if (ACPI_FAILURE(status))
2571 return -ENODEV;
2572
2573 if (ctlr) {
2574 if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2575 return -ENODEV;
2576 } else {
2577 struct acpi_device *adev;
2578
2579 adev = acpi_fetch_acpi_dev(parent_handle);
2580 if (!adev)
2581 return -ENODEV;
2582
2583 ctlr = acpi_spi_find_controller_by_adev(adev);
2584 if (!ctlr)
2585 return -EPROBE_DEFER;
2586
2587 lookup->ctlr = ctlr;
2588 }
2589
2590 /*
2591 * ACPI DeviceSelection numbering is handled by the
2592 * host controller driver in Windows and can vary
2593 * from driver to driver. In Linux we always expect
2594 * 0 .. max - 1 so we need to ask the driver to
2595 * translate between the two schemes.
2596 */
2597 if (ctlr->fw_translate_cs) {
2598 int cs = ctlr->fw_translate_cs(ctlr,
2599 sb->device_selection);
2600 if (cs < 0)
2601 return cs;
2602 lookup->chip_select = cs;
2603 } else {
2604 lookup->chip_select = sb->device_selection;
2605 }
2606
2607 lookup->max_speed_hz = sb->connection_speed;
2608 lookup->bits_per_word = sb->data_bit_length;
2609
2610 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2611 lookup->mode |= SPI_CPHA;
2612 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2613 lookup->mode |= SPI_CPOL;
2614 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2615 lookup->mode |= SPI_CS_HIGH;
2616 }
2617 } else if (lookup->irq < 0) {
2618 struct resource r;
2619
2620 if (acpi_dev_resource_interrupt(ares, 0, &r))
2621 lookup->irq = r.start;
2622 }
2623
2624 /* Always tell the ACPI core to skip this resource */
2625 return 1;
2626 }
2627
2628 /**
2629 * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2630 * @ctlr: controller to which the spi device belongs
2631 * @adev: ACPI Device for the spi device
2632 * @index: Index of the spi resource inside the ACPI Node
2633 *
2634 * This should be used to allocate a new SPI device from and ACPI Device node.
2635 * The caller is responsible for calling spi_add_device to register the SPI device.
2636 *
2637 * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2638 * using the resource.
2639 * If index is set to -1, index is not used.
2640 * Note: If index is -1, ctlr must be set.
2641 *
2642 * Return: a pointer to the new device, or ERR_PTR on error.
2643 */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2644 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2645 struct acpi_device *adev,
2646 int index)
2647 {
2648 acpi_handle parent_handle = NULL;
2649 struct list_head resource_list;
2650 struct acpi_spi_lookup lookup = {};
2651 struct spi_device *spi;
2652 int ret;
2653
2654 if (!ctlr && index == -1)
2655 return ERR_PTR(-EINVAL);
2656
2657 lookup.ctlr = ctlr;
2658 lookup.irq = -1;
2659 lookup.index = index;
2660 lookup.n = 0;
2661
2662 INIT_LIST_HEAD(&resource_list);
2663 ret = acpi_dev_get_resources(adev, &resource_list,
2664 acpi_spi_add_resource, &lookup);
2665 acpi_dev_free_resource_list(&resource_list);
2666
2667 if (ret < 0)
2668 /* Found SPI in _CRS but it points to another controller */
2669 return ERR_PTR(ret);
2670
2671 if (!lookup.max_speed_hz &&
2672 ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2673 ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2674 /* Apple does not use _CRS but nested devices for SPI slaves */
2675 acpi_spi_parse_apple_properties(adev, &lookup);
2676 }
2677
2678 if (!lookup.max_speed_hz)
2679 return ERR_PTR(-ENODEV);
2680
2681 spi = spi_alloc_device(lookup.ctlr);
2682 if (!spi) {
2683 dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2684 dev_name(&adev->dev));
2685 return ERR_PTR(-ENOMEM);
2686 }
2687
2688 ACPI_COMPANION_SET(&spi->dev, adev);
2689 spi->max_speed_hz = lookup.max_speed_hz;
2690 spi->mode |= lookup.mode;
2691 spi->irq = lookup.irq;
2692 spi->bits_per_word = lookup.bits_per_word;
2693 spi_set_chipselect(spi, 0, lookup.chip_select);
2694
2695 return spi;
2696 }
2697 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2698
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2699 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2700 struct acpi_device *adev)
2701 {
2702 struct spi_device *spi;
2703
2704 if (acpi_bus_get_status(adev) || !adev->status.present ||
2705 acpi_device_enumerated(adev))
2706 return AE_OK;
2707
2708 spi = acpi_spi_device_alloc(ctlr, adev, -1);
2709 if (IS_ERR(spi)) {
2710 if (PTR_ERR(spi) == -ENOMEM)
2711 return AE_NO_MEMORY;
2712 else
2713 return AE_OK;
2714 }
2715
2716 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2717 sizeof(spi->modalias));
2718
2719 acpi_device_set_enumerated(adev);
2720
2721 adev->power.flags.ignore_parent = true;
2722 if (spi_add_device(spi)) {
2723 adev->power.flags.ignore_parent = false;
2724 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2725 dev_name(&adev->dev));
2726 spi_dev_put(spi);
2727 }
2728
2729 return AE_OK;
2730 }
2731
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2732 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2733 void *data, void **return_value)
2734 {
2735 struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2736 struct spi_controller *ctlr = data;
2737
2738 if (!adev)
2739 return AE_OK;
2740
2741 return acpi_register_spi_device(ctlr, adev);
2742 }
2743
2744 #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2745
acpi_register_spi_devices(struct spi_controller * ctlr)2746 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2747 {
2748 acpi_status status;
2749 acpi_handle handle;
2750
2751 handle = ACPI_HANDLE(ctlr->dev.parent);
2752 if (!handle)
2753 return;
2754
2755 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2756 SPI_ACPI_ENUMERATE_MAX_DEPTH,
2757 acpi_spi_add_device, NULL, ctlr, NULL);
2758 if (ACPI_FAILURE(status))
2759 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2760 }
2761 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2762 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2763 #endif /* CONFIG_ACPI */
2764
spi_controller_release(struct device * dev)2765 static void spi_controller_release(struct device *dev)
2766 {
2767 struct spi_controller *ctlr;
2768
2769 ctlr = container_of(dev, struct spi_controller, dev);
2770 kfree(ctlr);
2771 }
2772
2773 static struct class spi_master_class = {
2774 .name = "spi_master",
2775 .dev_release = spi_controller_release,
2776 .dev_groups = spi_master_groups,
2777 };
2778
2779 #ifdef CONFIG_SPI_SLAVE
2780 /**
2781 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2782 * controller
2783 * @spi: device used for the current transfer
2784 */
spi_slave_abort(struct spi_device * spi)2785 int spi_slave_abort(struct spi_device *spi)
2786 {
2787 struct spi_controller *ctlr = spi->controller;
2788
2789 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2790 return ctlr->slave_abort(ctlr);
2791
2792 return -ENOTSUPP;
2793 }
2794 EXPORT_SYMBOL_GPL(spi_slave_abort);
2795
spi_target_abort(struct spi_device * spi)2796 int spi_target_abort(struct spi_device *spi)
2797 {
2798 struct spi_controller *ctlr = spi->controller;
2799
2800 if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2801 return ctlr->target_abort(ctlr);
2802
2803 return -ENOTSUPP;
2804 }
2805 EXPORT_SYMBOL_GPL(spi_target_abort);
2806
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2807 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2808 char *buf)
2809 {
2810 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2811 dev);
2812 struct device *child;
2813
2814 child = device_find_any_child(&ctlr->dev);
2815 return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2816 }
2817
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2818 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2819 const char *buf, size_t count)
2820 {
2821 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2822 dev);
2823 struct spi_device *spi;
2824 struct device *child;
2825 char name[32];
2826 int rc;
2827
2828 rc = sscanf(buf, "%31s", name);
2829 if (rc != 1 || !name[0])
2830 return -EINVAL;
2831
2832 child = device_find_any_child(&ctlr->dev);
2833 if (child) {
2834 /* Remove registered slave */
2835 device_unregister(child);
2836 put_device(child);
2837 }
2838
2839 if (strcmp(name, "(null)")) {
2840 /* Register new slave */
2841 spi = spi_alloc_device(ctlr);
2842 if (!spi)
2843 return -ENOMEM;
2844
2845 strscpy(spi->modalias, name, sizeof(spi->modalias));
2846
2847 rc = spi_add_device(spi);
2848 if (rc) {
2849 spi_dev_put(spi);
2850 return rc;
2851 }
2852 }
2853
2854 return count;
2855 }
2856
2857 static DEVICE_ATTR_RW(slave);
2858
2859 static struct attribute *spi_slave_attrs[] = {
2860 &dev_attr_slave.attr,
2861 NULL,
2862 };
2863
2864 static const struct attribute_group spi_slave_group = {
2865 .attrs = spi_slave_attrs,
2866 };
2867
2868 static const struct attribute_group *spi_slave_groups[] = {
2869 &spi_controller_statistics_group,
2870 &spi_slave_group,
2871 NULL,
2872 };
2873
2874 static struct class spi_slave_class = {
2875 .name = "spi_slave",
2876 .dev_release = spi_controller_release,
2877 .dev_groups = spi_slave_groups,
2878 };
2879 #else
2880 extern struct class spi_slave_class; /* dummy */
2881 #endif
2882
2883 /**
2884 * __spi_alloc_controller - allocate an SPI master or slave controller
2885 * @dev: the controller, possibly using the platform_bus
2886 * @size: how much zeroed driver-private data to allocate; the pointer to this
2887 * memory is in the driver_data field of the returned device, accessible
2888 * with spi_controller_get_devdata(); the memory is cacheline aligned;
2889 * drivers granting DMA access to portions of their private data need to
2890 * round up @size using ALIGN(size, dma_get_cache_alignment()).
2891 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2892 * slave (true) controller
2893 * Context: can sleep
2894 *
2895 * This call is used only by SPI controller drivers, which are the
2896 * only ones directly touching chip registers. It's how they allocate
2897 * an spi_controller structure, prior to calling spi_register_controller().
2898 *
2899 * This must be called from context that can sleep.
2900 *
2901 * The caller is responsible for assigning the bus number and initializing the
2902 * controller's methods before calling spi_register_controller(); and (after
2903 * errors adding the device) calling spi_controller_put() to prevent a memory
2904 * leak.
2905 *
2906 * Return: the SPI controller structure on success, else NULL.
2907 */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2908 struct spi_controller *__spi_alloc_controller(struct device *dev,
2909 unsigned int size, bool slave)
2910 {
2911 struct spi_controller *ctlr;
2912 size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
2913
2914 if (!dev)
2915 return NULL;
2916
2917 ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
2918 if (!ctlr)
2919 return NULL;
2920
2921 device_initialize(&ctlr->dev);
2922 INIT_LIST_HEAD(&ctlr->queue);
2923 spin_lock_init(&ctlr->queue_lock);
2924 spin_lock_init(&ctlr->bus_lock_spinlock);
2925 mutex_init(&ctlr->bus_lock_mutex);
2926 mutex_init(&ctlr->io_mutex);
2927 mutex_init(&ctlr->add_lock);
2928 ctlr->bus_num = -1;
2929 ctlr->num_chipselect = 1;
2930 ctlr->slave = slave;
2931 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2932 ctlr->dev.class = &spi_slave_class;
2933 else
2934 ctlr->dev.class = &spi_master_class;
2935 ctlr->dev.parent = dev;
2936 pm_suspend_ignore_children(&ctlr->dev, true);
2937 spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
2938
2939 return ctlr;
2940 }
2941 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2942
devm_spi_release_controller(struct device * dev,void * ctlr)2943 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2944 {
2945 spi_controller_put(*(struct spi_controller **)ctlr);
2946 }
2947
2948 /**
2949 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2950 * @dev: physical device of SPI controller
2951 * @size: how much zeroed driver-private data to allocate
2952 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2953 * Context: can sleep
2954 *
2955 * Allocate an SPI controller and automatically release a reference on it
2956 * when @dev is unbound from its driver. Drivers are thus relieved from
2957 * having to call spi_controller_put().
2958 *
2959 * The arguments to this function are identical to __spi_alloc_controller().
2960 *
2961 * Return: the SPI controller structure on success, else NULL.
2962 */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)2963 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2964 unsigned int size,
2965 bool slave)
2966 {
2967 struct spi_controller **ptr, *ctlr;
2968
2969 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2970 GFP_KERNEL);
2971 if (!ptr)
2972 return NULL;
2973
2974 ctlr = __spi_alloc_controller(dev, size, slave);
2975 if (ctlr) {
2976 ctlr->devm_allocated = true;
2977 *ptr = ctlr;
2978 devres_add(dev, ptr);
2979 } else {
2980 devres_free(ptr);
2981 }
2982
2983 return ctlr;
2984 }
2985 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2986
2987 /**
2988 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2989 * @ctlr: The SPI master to grab GPIO descriptors for
2990 */
spi_get_gpio_descs(struct spi_controller * ctlr)2991 static int spi_get_gpio_descs(struct spi_controller *ctlr)
2992 {
2993 int nb, i;
2994 struct gpio_desc **cs;
2995 struct device *dev = &ctlr->dev;
2996 unsigned long native_cs_mask = 0;
2997 unsigned int num_cs_gpios = 0;
2998
2999 nb = gpiod_count(dev, "cs");
3000 if (nb < 0) {
3001 /* No GPIOs at all is fine, else return the error */
3002 if (nb == -ENOENT)
3003 return 0;
3004 return nb;
3005 }
3006
3007 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3008
3009 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3010 GFP_KERNEL);
3011 if (!cs)
3012 return -ENOMEM;
3013 ctlr->cs_gpiods = cs;
3014
3015 for (i = 0; i < nb; i++) {
3016 /*
3017 * Most chipselects are active low, the inverted
3018 * semantics are handled by special quirks in gpiolib,
3019 * so initializing them GPIOD_OUT_LOW here means
3020 * "unasserted", in most cases this will drive the physical
3021 * line high.
3022 */
3023 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3024 GPIOD_OUT_LOW);
3025 if (IS_ERR(cs[i]))
3026 return PTR_ERR(cs[i]);
3027
3028 if (cs[i]) {
3029 /*
3030 * If we find a CS GPIO, name it after the device and
3031 * chip select line.
3032 */
3033 char *gpioname;
3034
3035 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3036 dev_name(dev), i);
3037 if (!gpioname)
3038 return -ENOMEM;
3039 gpiod_set_consumer_name(cs[i], gpioname);
3040 num_cs_gpios++;
3041 continue;
3042 }
3043
3044 if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3045 dev_err(dev, "Invalid native chip select %d\n", i);
3046 return -EINVAL;
3047 }
3048 native_cs_mask |= BIT(i);
3049 }
3050
3051 ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3052
3053 if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3054 ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3055 dev_err(dev, "No unused native chip select available\n");
3056 return -EINVAL;
3057 }
3058
3059 return 0;
3060 }
3061
spi_controller_check_ops(struct spi_controller * ctlr)3062 static int spi_controller_check_ops(struct spi_controller *ctlr)
3063 {
3064 /*
3065 * The controller may implement only the high-level SPI-memory like
3066 * operations if it does not support regular SPI transfers, and this is
3067 * valid use case.
3068 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3069 * one of the ->transfer_xxx() method be implemented.
3070 */
3071 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3072 if (!ctlr->transfer && !ctlr->transfer_one &&
3073 !ctlr->transfer_one_message) {
3074 return -EINVAL;
3075 }
3076 }
3077
3078 return 0;
3079 }
3080
3081 /* Allocate dynamic bus number using Linux idr */
spi_controller_id_alloc(struct spi_controller * ctlr,int start,int end)3082 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3083 {
3084 int id;
3085
3086 mutex_lock(&board_lock);
3087 id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3088 mutex_unlock(&board_lock);
3089 if (WARN(id < 0, "couldn't get idr"))
3090 return id == -ENOSPC ? -EBUSY : id;
3091 ctlr->bus_num = id;
3092 return 0;
3093 }
3094
3095 /**
3096 * spi_register_controller - register SPI master or slave controller
3097 * @ctlr: initialized master, originally from spi_alloc_master() or
3098 * spi_alloc_slave()
3099 * Context: can sleep
3100 *
3101 * SPI controllers connect to their drivers using some non-SPI bus,
3102 * such as the platform bus. The final stage of probe() in that code
3103 * includes calling spi_register_controller() to hook up to this SPI bus glue.
3104 *
3105 * SPI controllers use board specific (often SOC specific) bus numbers,
3106 * and board-specific addressing for SPI devices combines those numbers
3107 * with chip select numbers. Since SPI does not directly support dynamic
3108 * device identification, boards need configuration tables telling which
3109 * chip is at which address.
3110 *
3111 * This must be called from context that can sleep. It returns zero on
3112 * success, else a negative error code (dropping the controller's refcount).
3113 * After a successful return, the caller is responsible for calling
3114 * spi_unregister_controller().
3115 *
3116 * Return: zero on success, else a negative error code.
3117 */
spi_register_controller(struct spi_controller * ctlr)3118 int spi_register_controller(struct spi_controller *ctlr)
3119 {
3120 struct device *dev = ctlr->dev.parent;
3121 struct boardinfo *bi;
3122 int first_dynamic;
3123 int status;
3124
3125 if (!dev)
3126 return -ENODEV;
3127
3128 /*
3129 * Make sure all necessary hooks are implemented before registering
3130 * the SPI controller.
3131 */
3132 status = spi_controller_check_ops(ctlr);
3133 if (status)
3134 return status;
3135
3136 if (ctlr->bus_num < 0)
3137 ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3138 if (ctlr->bus_num >= 0) {
3139 /* Devices with a fixed bus num must check-in with the num */
3140 status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3141 if (status)
3142 return status;
3143 }
3144 if (ctlr->bus_num < 0) {
3145 first_dynamic = of_alias_get_highest_id("spi");
3146 if (first_dynamic < 0)
3147 first_dynamic = 0;
3148 else
3149 first_dynamic++;
3150
3151 status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3152 if (status)
3153 return status;
3154 }
3155 ctlr->bus_lock_flag = 0;
3156 init_completion(&ctlr->xfer_completion);
3157 init_completion(&ctlr->cur_msg_completion);
3158 if (!ctlr->max_dma_len)
3159 ctlr->max_dma_len = INT_MAX;
3160
3161 /*
3162 * Register the device, then userspace will see it.
3163 * Registration fails if the bus ID is in use.
3164 */
3165 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3166
3167 if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3168 status = spi_get_gpio_descs(ctlr);
3169 if (status)
3170 goto free_bus_id;
3171 /*
3172 * A controller using GPIO descriptors always
3173 * supports SPI_CS_HIGH if need be.
3174 */
3175 ctlr->mode_bits |= SPI_CS_HIGH;
3176 }
3177
3178 /*
3179 * Even if it's just one always-selected device, there must
3180 * be at least one chipselect.
3181 */
3182 if (!ctlr->num_chipselect) {
3183 status = -EINVAL;
3184 goto free_bus_id;
3185 }
3186
3187 /* Setting last_cs to -1 means no chip selected */
3188 ctlr->last_cs = -1;
3189
3190 status = device_add(&ctlr->dev);
3191 if (status < 0)
3192 goto free_bus_id;
3193 dev_dbg(dev, "registered %s %s\n",
3194 spi_controller_is_slave(ctlr) ? "slave" : "master",
3195 dev_name(&ctlr->dev));
3196
3197 /*
3198 * If we're using a queued driver, start the queue. Note that we don't
3199 * need the queueing logic if the driver is only supporting high-level
3200 * memory operations.
3201 */
3202 if (ctlr->transfer) {
3203 dev_info(dev, "controller is unqueued, this is deprecated\n");
3204 } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3205 status = spi_controller_initialize_queue(ctlr);
3206 if (status) {
3207 device_del(&ctlr->dev);
3208 goto free_bus_id;
3209 }
3210 }
3211 /* Add statistics */
3212 ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3213 if (!ctlr->pcpu_statistics) {
3214 dev_err(dev, "Error allocating per-cpu statistics\n");
3215 status = -ENOMEM;
3216 goto destroy_queue;
3217 }
3218
3219 mutex_lock(&board_lock);
3220 list_add_tail(&ctlr->list, &spi_controller_list);
3221 list_for_each_entry(bi, &board_list, list)
3222 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3223 mutex_unlock(&board_lock);
3224
3225 /* Register devices from the device tree and ACPI */
3226 of_register_spi_devices(ctlr);
3227 acpi_register_spi_devices(ctlr);
3228 return status;
3229
3230 destroy_queue:
3231 spi_destroy_queue(ctlr);
3232 free_bus_id:
3233 mutex_lock(&board_lock);
3234 idr_remove(&spi_master_idr, ctlr->bus_num);
3235 mutex_unlock(&board_lock);
3236 return status;
3237 }
3238 EXPORT_SYMBOL_GPL(spi_register_controller);
3239
devm_spi_unregister(struct device * dev,void * res)3240 static void devm_spi_unregister(struct device *dev, void *res)
3241 {
3242 spi_unregister_controller(*(struct spi_controller **)res);
3243 }
3244
3245 /**
3246 * devm_spi_register_controller - register managed SPI master or slave
3247 * controller
3248 * @dev: device managing SPI controller
3249 * @ctlr: initialized controller, originally from spi_alloc_master() or
3250 * spi_alloc_slave()
3251 * Context: can sleep
3252 *
3253 * Register a SPI device as with spi_register_controller() which will
3254 * automatically be unregistered and freed.
3255 *
3256 * Return: zero on success, else a negative error code.
3257 */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3258 int devm_spi_register_controller(struct device *dev,
3259 struct spi_controller *ctlr)
3260 {
3261 struct spi_controller **ptr;
3262 int ret;
3263
3264 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3265 if (!ptr)
3266 return -ENOMEM;
3267
3268 ret = spi_register_controller(ctlr);
3269 if (!ret) {
3270 *ptr = ctlr;
3271 devres_add(dev, ptr);
3272 } else {
3273 devres_free(ptr);
3274 }
3275
3276 return ret;
3277 }
3278 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3279
__unregister(struct device * dev,void * null)3280 static int __unregister(struct device *dev, void *null)
3281 {
3282 spi_unregister_device(to_spi_device(dev));
3283 return 0;
3284 }
3285
3286 /**
3287 * spi_unregister_controller - unregister SPI master or slave controller
3288 * @ctlr: the controller being unregistered
3289 * Context: can sleep
3290 *
3291 * This call is used only by SPI controller drivers, which are the
3292 * only ones directly touching chip registers.
3293 *
3294 * This must be called from context that can sleep.
3295 *
3296 * Note that this function also drops a reference to the controller.
3297 */
spi_unregister_controller(struct spi_controller * ctlr)3298 void spi_unregister_controller(struct spi_controller *ctlr)
3299 {
3300 struct spi_controller *found;
3301 int id = ctlr->bus_num;
3302
3303 /* Prevent addition of new devices, unregister existing ones */
3304 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3305 mutex_lock(&ctlr->add_lock);
3306
3307 device_for_each_child(&ctlr->dev, NULL, __unregister);
3308
3309 /* First make sure that this controller was ever added */
3310 mutex_lock(&board_lock);
3311 found = idr_find(&spi_master_idr, id);
3312 mutex_unlock(&board_lock);
3313 if (ctlr->queued) {
3314 if (spi_destroy_queue(ctlr))
3315 dev_err(&ctlr->dev, "queue remove failed\n");
3316 }
3317 mutex_lock(&board_lock);
3318 list_del(&ctlr->list);
3319 mutex_unlock(&board_lock);
3320
3321 device_del(&ctlr->dev);
3322
3323 /* Free bus id */
3324 mutex_lock(&board_lock);
3325 if (found == ctlr)
3326 idr_remove(&spi_master_idr, id);
3327 mutex_unlock(&board_lock);
3328
3329 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3330 mutex_unlock(&ctlr->add_lock);
3331
3332 /*
3333 * Release the last reference on the controller if its driver
3334 * has not yet been converted to devm_spi_alloc_master/slave().
3335 */
3336 if (!ctlr->devm_allocated)
3337 put_device(&ctlr->dev);
3338 }
3339 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3340
__spi_check_suspended(const struct spi_controller * ctlr)3341 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3342 {
3343 return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3344 }
3345
__spi_mark_suspended(struct spi_controller * ctlr)3346 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3347 {
3348 mutex_lock(&ctlr->bus_lock_mutex);
3349 ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3350 mutex_unlock(&ctlr->bus_lock_mutex);
3351 }
3352
__spi_mark_resumed(struct spi_controller * ctlr)3353 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3354 {
3355 mutex_lock(&ctlr->bus_lock_mutex);
3356 ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3357 mutex_unlock(&ctlr->bus_lock_mutex);
3358 }
3359
spi_controller_suspend(struct spi_controller * ctlr)3360 int spi_controller_suspend(struct spi_controller *ctlr)
3361 {
3362 int ret = 0;
3363
3364 /* Basically no-ops for non-queued controllers */
3365 if (ctlr->queued) {
3366 ret = spi_stop_queue(ctlr);
3367 if (ret)
3368 dev_err(&ctlr->dev, "queue stop failed\n");
3369 }
3370
3371 __spi_mark_suspended(ctlr);
3372 return ret;
3373 }
3374 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3375
spi_controller_resume(struct spi_controller * ctlr)3376 int spi_controller_resume(struct spi_controller *ctlr)
3377 {
3378 int ret = 0;
3379
3380 __spi_mark_resumed(ctlr);
3381
3382 if (ctlr->queued) {
3383 ret = spi_start_queue(ctlr);
3384 if (ret)
3385 dev_err(&ctlr->dev, "queue restart failed\n");
3386 }
3387 return ret;
3388 }
3389 EXPORT_SYMBOL_GPL(spi_controller_resume);
3390
3391 /*-------------------------------------------------------------------------*/
3392
3393 /* Core methods for spi_message alterations */
3394
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3395 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3396 struct spi_message *msg,
3397 void *res)
3398 {
3399 struct spi_replaced_transfers *rxfer = res;
3400 size_t i;
3401
3402 /* Call extra callback if requested */
3403 if (rxfer->release)
3404 rxfer->release(ctlr, msg, res);
3405
3406 /* Insert replaced transfers back into the message */
3407 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3408
3409 /* Remove the formerly inserted entries */
3410 for (i = 0; i < rxfer->inserted; i++)
3411 list_del(&rxfer->inserted_transfers[i].transfer_list);
3412 }
3413
3414 /**
3415 * spi_replace_transfers - replace transfers with several transfers
3416 * and register change with spi_message.resources
3417 * @msg: the spi_message we work upon
3418 * @xfer_first: the first spi_transfer we want to replace
3419 * @remove: number of transfers to remove
3420 * @insert: the number of transfers we want to insert instead
3421 * @release: extra release code necessary in some circumstances
3422 * @extradatasize: extra data to allocate (with alignment guarantees
3423 * of struct @spi_transfer)
3424 * @gfp: gfp flags
3425 *
3426 * Returns: pointer to @spi_replaced_transfers,
3427 * PTR_ERR(...) in case of errors.
3428 */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3429 static struct spi_replaced_transfers *spi_replace_transfers(
3430 struct spi_message *msg,
3431 struct spi_transfer *xfer_first,
3432 size_t remove,
3433 size_t insert,
3434 spi_replaced_release_t release,
3435 size_t extradatasize,
3436 gfp_t gfp)
3437 {
3438 struct spi_replaced_transfers *rxfer;
3439 struct spi_transfer *xfer;
3440 size_t i;
3441
3442 /* Allocate the structure using spi_res */
3443 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3444 struct_size(rxfer, inserted_transfers, insert)
3445 + extradatasize,
3446 gfp);
3447 if (!rxfer)
3448 return ERR_PTR(-ENOMEM);
3449
3450 /* The release code to invoke before running the generic release */
3451 rxfer->release = release;
3452
3453 /* Assign extradata */
3454 if (extradatasize)
3455 rxfer->extradata =
3456 &rxfer->inserted_transfers[insert];
3457
3458 /* Init the replaced_transfers list */
3459 INIT_LIST_HEAD(&rxfer->replaced_transfers);
3460
3461 /*
3462 * Assign the list_entry after which we should reinsert
3463 * the @replaced_transfers - it may be spi_message.messages!
3464 */
3465 rxfer->replaced_after = xfer_first->transfer_list.prev;
3466
3467 /* Remove the requested number of transfers */
3468 for (i = 0; i < remove; i++) {
3469 /*
3470 * If the entry after replaced_after it is msg->transfers
3471 * then we have been requested to remove more transfers
3472 * than are in the list.
3473 */
3474 if (rxfer->replaced_after->next == &msg->transfers) {
3475 dev_err(&msg->spi->dev,
3476 "requested to remove more spi_transfers than are available\n");
3477 /* Insert replaced transfers back into the message */
3478 list_splice(&rxfer->replaced_transfers,
3479 rxfer->replaced_after);
3480
3481 /* Free the spi_replace_transfer structure... */
3482 spi_res_free(rxfer);
3483
3484 /* ...and return with an error */
3485 return ERR_PTR(-EINVAL);
3486 }
3487
3488 /*
3489 * Remove the entry after replaced_after from list of
3490 * transfers and add it to list of replaced_transfers.
3491 */
3492 list_move_tail(rxfer->replaced_after->next,
3493 &rxfer->replaced_transfers);
3494 }
3495
3496 /*
3497 * Create copy of the given xfer with identical settings
3498 * based on the first transfer to get removed.
3499 */
3500 for (i = 0; i < insert; i++) {
3501 /* We need to run in reverse order */
3502 xfer = &rxfer->inserted_transfers[insert - 1 - i];
3503
3504 /* Copy all spi_transfer data */
3505 memcpy(xfer, xfer_first, sizeof(*xfer));
3506
3507 /* Add to list */
3508 list_add(&xfer->transfer_list, rxfer->replaced_after);
3509
3510 /* Clear cs_change and delay for all but the last */
3511 if (i) {
3512 xfer->cs_change = false;
3513 xfer->delay.value = 0;
3514 }
3515 }
3516
3517 /* Set up inserted... */
3518 rxfer->inserted = insert;
3519
3520 /* ...and register it with spi_res/spi_message */
3521 spi_res_add(msg, rxfer);
3522
3523 return rxfer;
3524 }
3525
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize,gfp_t gfp)3526 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3527 struct spi_message *msg,
3528 struct spi_transfer **xferp,
3529 size_t maxsize,
3530 gfp_t gfp)
3531 {
3532 struct spi_transfer *xfer = *xferp, *xfers;
3533 struct spi_replaced_transfers *srt;
3534 size_t offset;
3535 size_t count, i;
3536
3537 /* Calculate how many we have to replace */
3538 count = DIV_ROUND_UP(xfer->len, maxsize);
3539
3540 /* Create replacement */
3541 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3542 if (IS_ERR(srt))
3543 return PTR_ERR(srt);
3544 xfers = srt->inserted_transfers;
3545
3546 /*
3547 * Now handle each of those newly inserted spi_transfers.
3548 * Note that the replacements spi_transfers all are preset
3549 * to the same values as *xferp, so tx_buf, rx_buf and len
3550 * are all identical (as well as most others)
3551 * so we just have to fix up len and the pointers.
3552 *
3553 * This also includes support for the depreciated
3554 * spi_message.is_dma_mapped interface.
3555 */
3556
3557 /*
3558 * The first transfer just needs the length modified, so we
3559 * run it outside the loop.
3560 */
3561 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3562
3563 /* All the others need rx_buf/tx_buf also set */
3564 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3565 /* Update rx_buf, tx_buf and DMA */
3566 if (xfers[i].rx_buf)
3567 xfers[i].rx_buf += offset;
3568 if (xfers[i].rx_dma)
3569 xfers[i].rx_dma += offset;
3570 if (xfers[i].tx_buf)
3571 xfers[i].tx_buf += offset;
3572 if (xfers[i].tx_dma)
3573 xfers[i].tx_dma += offset;
3574
3575 /* Update length */
3576 xfers[i].len = min(maxsize, xfers[i].len - offset);
3577 }
3578
3579 /*
3580 * We set up xferp to the last entry we have inserted,
3581 * so that we skip those already split transfers.
3582 */
3583 *xferp = &xfers[count - 1];
3584
3585 /* Increment statistics counters */
3586 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3587 transfers_split_maxsize);
3588 SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3589 transfers_split_maxsize);
3590
3591 return 0;
3592 }
3593
3594 /**
3595 * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3596 * when an individual transfer exceeds a
3597 * certain size
3598 * @ctlr: the @spi_controller for this transfer
3599 * @msg: the @spi_message to transform
3600 * @maxsize: the maximum when to apply this
3601 * @gfp: GFP allocation flags
3602 *
3603 * Return: status of transformation
3604 */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize,gfp_t gfp)3605 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3606 struct spi_message *msg,
3607 size_t maxsize,
3608 gfp_t gfp)
3609 {
3610 struct spi_transfer *xfer;
3611 int ret;
3612
3613 /*
3614 * Iterate over the transfer_list,
3615 * but note that xfer is advanced to the last transfer inserted
3616 * to avoid checking sizes again unnecessarily (also xfer does
3617 * potentially belong to a different list by the time the
3618 * replacement has happened).
3619 */
3620 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3621 if (xfer->len > maxsize) {
3622 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3623 maxsize, gfp);
3624 if (ret)
3625 return ret;
3626 }
3627 }
3628
3629 return 0;
3630 }
3631 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3632
3633
3634 /**
3635 * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3636 * when an individual transfer exceeds a
3637 * certain number of SPI words
3638 * @ctlr: the @spi_controller for this transfer
3639 * @msg: the @spi_message to transform
3640 * @maxwords: the number of words to limit each transfer to
3641 * @gfp: GFP allocation flags
3642 *
3643 * Return: status of transformation
3644 */
spi_split_transfers_maxwords(struct spi_controller * ctlr,struct spi_message * msg,size_t maxwords,gfp_t gfp)3645 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3646 struct spi_message *msg,
3647 size_t maxwords,
3648 gfp_t gfp)
3649 {
3650 struct spi_transfer *xfer;
3651
3652 /*
3653 * Iterate over the transfer_list,
3654 * but note that xfer is advanced to the last transfer inserted
3655 * to avoid checking sizes again unnecessarily (also xfer does
3656 * potentially belong to a different list by the time the
3657 * replacement has happened).
3658 */
3659 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3660 size_t maxsize;
3661 int ret;
3662
3663 maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3664 if (xfer->len > maxsize) {
3665 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3666 maxsize, gfp);
3667 if (ret)
3668 return ret;
3669 }
3670 }
3671
3672 return 0;
3673 }
3674 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3675
3676 /*-------------------------------------------------------------------------*/
3677
3678 /*
3679 * Core methods for SPI controller protocol drivers. Some of the
3680 * other core methods are currently defined as inline functions.
3681 */
3682
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3683 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3684 u8 bits_per_word)
3685 {
3686 if (ctlr->bits_per_word_mask) {
3687 /* Only 32 bits fit in the mask */
3688 if (bits_per_word > 32)
3689 return -EINVAL;
3690 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3691 return -EINVAL;
3692 }
3693
3694 return 0;
3695 }
3696
3697 /**
3698 * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3699 * @spi: the device that requires specific CS timing configuration
3700 *
3701 * Return: zero on success, else a negative error code.
3702 */
spi_set_cs_timing(struct spi_device * spi)3703 static int spi_set_cs_timing(struct spi_device *spi)
3704 {
3705 struct device *parent = spi->controller->dev.parent;
3706 int status = 0;
3707
3708 if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3709 if (spi->controller->auto_runtime_pm) {
3710 status = pm_runtime_get_sync(parent);
3711 if (status < 0) {
3712 pm_runtime_put_noidle(parent);
3713 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3714 status);
3715 return status;
3716 }
3717
3718 status = spi->controller->set_cs_timing(spi);
3719 pm_runtime_mark_last_busy(parent);
3720 pm_runtime_put_autosuspend(parent);
3721 } else {
3722 status = spi->controller->set_cs_timing(spi);
3723 }
3724 }
3725 return status;
3726 }
3727
3728 /**
3729 * spi_setup - setup SPI mode and clock rate
3730 * @spi: the device whose settings are being modified
3731 * Context: can sleep, and no requests are queued to the device
3732 *
3733 * SPI protocol drivers may need to update the transfer mode if the
3734 * device doesn't work with its default. They may likewise need
3735 * to update clock rates or word sizes from initial values. This function
3736 * changes those settings, and must be called from a context that can sleep.
3737 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3738 * effect the next time the device is selected and data is transferred to
3739 * or from it. When this function returns, the SPI device is deselected.
3740 *
3741 * Note that this call will fail if the protocol driver specifies an option
3742 * that the underlying controller or its driver does not support. For
3743 * example, not all hardware supports wire transfers using nine bit words,
3744 * LSB-first wire encoding, or active-high chipselects.
3745 *
3746 * Return: zero on success, else a negative error code.
3747 */
spi_setup(struct spi_device * spi)3748 int spi_setup(struct spi_device *spi)
3749 {
3750 unsigned bad_bits, ugly_bits;
3751 int status = 0;
3752
3753 /*
3754 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3755 * are set at the same time.
3756 */
3757 if ((hweight_long(spi->mode &
3758 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3759 (hweight_long(spi->mode &
3760 (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3761 dev_err(&spi->dev,
3762 "setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3763 return -EINVAL;
3764 }
3765 /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3766 if ((spi->mode & SPI_3WIRE) && (spi->mode &
3767 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3768 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3769 return -EINVAL;
3770 /*
3771 * Help drivers fail *cleanly* when they need options
3772 * that aren't supported with their current controller.
3773 * SPI_CS_WORD has a fallback software implementation,
3774 * so it is ignored here.
3775 */
3776 bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3777 SPI_NO_TX | SPI_NO_RX);
3778 ugly_bits = bad_bits &
3779 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3780 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3781 if (ugly_bits) {
3782 dev_warn(&spi->dev,
3783 "setup: ignoring unsupported mode bits %x\n",
3784 ugly_bits);
3785 spi->mode &= ~ugly_bits;
3786 bad_bits &= ~ugly_bits;
3787 }
3788 if (bad_bits) {
3789 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3790 bad_bits);
3791 return -EINVAL;
3792 }
3793
3794 if (!spi->bits_per_word) {
3795 spi->bits_per_word = 8;
3796 } else {
3797 /*
3798 * Some controllers may not support the default 8 bits-per-word
3799 * so only perform the check when this is explicitly provided.
3800 */
3801 status = __spi_validate_bits_per_word(spi->controller,
3802 spi->bits_per_word);
3803 if (status)
3804 return status;
3805 }
3806
3807 if (spi->controller->max_speed_hz &&
3808 (!spi->max_speed_hz ||
3809 spi->max_speed_hz > spi->controller->max_speed_hz))
3810 spi->max_speed_hz = spi->controller->max_speed_hz;
3811
3812 mutex_lock(&spi->controller->io_mutex);
3813
3814 if (spi->controller->setup) {
3815 status = spi->controller->setup(spi);
3816 if (status) {
3817 mutex_unlock(&spi->controller->io_mutex);
3818 dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3819 status);
3820 return status;
3821 }
3822 }
3823
3824 status = spi_set_cs_timing(spi);
3825 if (status) {
3826 mutex_unlock(&spi->controller->io_mutex);
3827 return status;
3828 }
3829
3830 if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3831 status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3832 if (status < 0) {
3833 mutex_unlock(&spi->controller->io_mutex);
3834 dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3835 status);
3836 return status;
3837 }
3838
3839 /*
3840 * We do not want to return positive value from pm_runtime_get,
3841 * there are many instances of devices calling spi_setup() and
3842 * checking for a non-zero return value instead of a negative
3843 * return value.
3844 */
3845 status = 0;
3846
3847 spi_set_cs(spi, false, true);
3848 pm_runtime_mark_last_busy(spi->controller->dev.parent);
3849 pm_runtime_put_autosuspend(spi->controller->dev.parent);
3850 } else {
3851 spi_set_cs(spi, false, true);
3852 }
3853
3854 mutex_unlock(&spi->controller->io_mutex);
3855
3856 if (spi->rt && !spi->controller->rt) {
3857 spi->controller->rt = true;
3858 spi_set_thread_rt(spi->controller);
3859 }
3860
3861 trace_spi_setup(spi, status);
3862
3863 dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3864 spi->mode & SPI_MODE_X_MASK,
3865 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3866 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3867 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
3868 (spi->mode & SPI_LOOP) ? "loopback, " : "",
3869 spi->bits_per_word, spi->max_speed_hz,
3870 status);
3871
3872 return status;
3873 }
3874 EXPORT_SYMBOL_GPL(spi_setup);
3875
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)3876 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
3877 struct spi_device *spi)
3878 {
3879 int delay1, delay2;
3880
3881 delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
3882 if (delay1 < 0)
3883 return delay1;
3884
3885 delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
3886 if (delay2 < 0)
3887 return delay2;
3888
3889 if (delay1 < delay2)
3890 memcpy(&xfer->word_delay, &spi->word_delay,
3891 sizeof(xfer->word_delay));
3892
3893 return 0;
3894 }
3895
__spi_validate(struct spi_device * spi,struct spi_message * message)3896 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3897 {
3898 struct spi_controller *ctlr = spi->controller;
3899 struct spi_transfer *xfer;
3900 int w_size;
3901
3902 if (list_empty(&message->transfers))
3903 return -EINVAL;
3904
3905 /*
3906 * If an SPI controller does not support toggling the CS line on each
3907 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
3908 * for the CS line, we can emulate the CS-per-word hardware function by
3909 * splitting transfers into one-word transfers and ensuring that
3910 * cs_change is set for each transfer.
3911 */
3912 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3913 spi_get_csgpiod(spi, 0))) {
3914 size_t maxsize = BITS_TO_BYTES(spi->bits_per_word);
3915 int ret;
3916
3917 /* spi_split_transfers_maxsize() requires message->spi */
3918 message->spi = spi;
3919
3920 ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3921 GFP_KERNEL);
3922 if (ret)
3923 return ret;
3924
3925 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3926 /* Don't change cs_change on the last entry in the list */
3927 if (list_is_last(&xfer->transfer_list, &message->transfers))
3928 break;
3929 xfer->cs_change = 1;
3930 }
3931 }
3932
3933 /*
3934 * Half-duplex links include original MicroWire, and ones with
3935 * only one data pin like SPI_3WIRE (switches direction) or where
3936 * either MOSI or MISO is missing. They can also be caused by
3937 * software limitations.
3938 */
3939 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
3940 (spi->mode & SPI_3WIRE)) {
3941 unsigned flags = ctlr->flags;
3942
3943 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3944 if (xfer->rx_buf && xfer->tx_buf)
3945 return -EINVAL;
3946 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3947 return -EINVAL;
3948 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3949 return -EINVAL;
3950 }
3951 }
3952
3953 /*
3954 * Set transfer bits_per_word and max speed as spi device default if
3955 * it is not set for this transfer.
3956 * Set transfer tx_nbits and rx_nbits as single transfer default
3957 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3958 * Ensure transfer word_delay is at least as long as that required by
3959 * device itself.
3960 */
3961 message->frame_length = 0;
3962 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3963 xfer->effective_speed_hz = 0;
3964 message->frame_length += xfer->len;
3965 if (!xfer->bits_per_word)
3966 xfer->bits_per_word = spi->bits_per_word;
3967
3968 if (!xfer->speed_hz)
3969 xfer->speed_hz = spi->max_speed_hz;
3970
3971 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
3972 xfer->speed_hz = ctlr->max_speed_hz;
3973
3974 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3975 return -EINVAL;
3976
3977 /*
3978 * SPI transfer length should be multiple of SPI word size
3979 * where SPI word size should be power-of-two multiple.
3980 */
3981 if (xfer->bits_per_word <= 8)
3982 w_size = 1;
3983 else if (xfer->bits_per_word <= 16)
3984 w_size = 2;
3985 else
3986 w_size = 4;
3987
3988 /* No partial transfers accepted */
3989 if (xfer->len % w_size)
3990 return -EINVAL;
3991
3992 if (xfer->speed_hz && ctlr->min_speed_hz &&
3993 xfer->speed_hz < ctlr->min_speed_hz)
3994 return -EINVAL;
3995
3996 if (xfer->tx_buf && !xfer->tx_nbits)
3997 xfer->tx_nbits = SPI_NBITS_SINGLE;
3998 if (xfer->rx_buf && !xfer->rx_nbits)
3999 xfer->rx_nbits = SPI_NBITS_SINGLE;
4000 /*
4001 * Check transfer tx/rx_nbits:
4002 * 1. check the value matches one of single, dual and quad
4003 * 2. check tx/rx_nbits match the mode in spi_device
4004 */
4005 if (xfer->tx_buf) {
4006 if (spi->mode & SPI_NO_TX)
4007 return -EINVAL;
4008 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4009 xfer->tx_nbits != SPI_NBITS_DUAL &&
4010 xfer->tx_nbits != SPI_NBITS_QUAD &&
4011 xfer->tx_nbits != SPI_NBITS_OCTAL)
4012 return -EINVAL;
4013 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4014 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4015 return -EINVAL;
4016 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4017 !(spi->mode & SPI_TX_QUAD))
4018 return -EINVAL;
4019 }
4020 /* Check transfer rx_nbits */
4021 if (xfer->rx_buf) {
4022 if (spi->mode & SPI_NO_RX)
4023 return -EINVAL;
4024 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4025 xfer->rx_nbits != SPI_NBITS_DUAL &&
4026 xfer->rx_nbits != SPI_NBITS_QUAD &&
4027 xfer->rx_nbits != SPI_NBITS_OCTAL)
4028 return -EINVAL;
4029 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4030 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4031 return -EINVAL;
4032 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4033 !(spi->mode & SPI_RX_QUAD))
4034 return -EINVAL;
4035 }
4036
4037 if (_spi_xfer_word_delay_update(xfer, spi))
4038 return -EINVAL;
4039 }
4040
4041 message->status = -EINPROGRESS;
4042
4043 return 0;
4044 }
4045
__spi_async(struct spi_device * spi,struct spi_message * message)4046 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4047 {
4048 struct spi_controller *ctlr = spi->controller;
4049 struct spi_transfer *xfer;
4050
4051 /*
4052 * Some controllers do not support doing regular SPI transfers. Return
4053 * ENOTSUPP when this is the case.
4054 */
4055 if (!ctlr->transfer)
4056 return -ENOTSUPP;
4057
4058 message->spi = spi;
4059
4060 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4061 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4062
4063 trace_spi_message_submit(message);
4064
4065 if (!ctlr->ptp_sts_supported) {
4066 list_for_each_entry(xfer, &message->transfers, transfer_list) {
4067 xfer->ptp_sts_word_pre = 0;
4068 ptp_read_system_prets(xfer->ptp_sts);
4069 }
4070 }
4071
4072 return ctlr->transfer(spi, message);
4073 }
4074
4075 /**
4076 * spi_async - asynchronous SPI transfer
4077 * @spi: device with which data will be exchanged
4078 * @message: describes the data transfers, including completion callback
4079 * Context: any (IRQs may be blocked, etc)
4080 *
4081 * This call may be used in_irq and other contexts which can't sleep,
4082 * as well as from task contexts which can sleep.
4083 *
4084 * The completion callback is invoked in a context which can't sleep.
4085 * Before that invocation, the value of message->status is undefined.
4086 * When the callback is issued, message->status holds either zero (to
4087 * indicate complete success) or a negative error code. After that
4088 * callback returns, the driver which issued the transfer request may
4089 * deallocate the associated memory; it's no longer in use by any SPI
4090 * core or controller driver code.
4091 *
4092 * Note that although all messages to a spi_device are handled in
4093 * FIFO order, messages may go to different devices in other orders.
4094 * Some device might be higher priority, or have various "hard" access
4095 * time requirements, for example.
4096 *
4097 * On detection of any fault during the transfer, processing of
4098 * the entire message is aborted, and the device is deselected.
4099 * Until returning from the associated message completion callback,
4100 * no other spi_message queued to that device will be processed.
4101 * (This rule applies equally to all the synchronous transfer calls,
4102 * which are wrappers around this core asynchronous primitive.)
4103 *
4104 * Return: zero on success, else a negative error code.
4105 */
spi_async(struct spi_device * spi,struct spi_message * message)4106 int spi_async(struct spi_device *spi, struct spi_message *message)
4107 {
4108 struct spi_controller *ctlr = spi->controller;
4109 int ret;
4110 unsigned long flags;
4111
4112 ret = __spi_validate(spi, message);
4113 if (ret != 0)
4114 return ret;
4115
4116 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4117
4118 if (ctlr->bus_lock_flag)
4119 ret = -EBUSY;
4120 else
4121 ret = __spi_async(spi, message);
4122
4123 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4124
4125 return ret;
4126 }
4127 EXPORT_SYMBOL_GPL(spi_async);
4128
4129 /**
4130 * spi_async_locked - version of spi_async with exclusive bus usage
4131 * @spi: device with which data will be exchanged
4132 * @message: describes the data transfers, including completion callback
4133 * Context: any (IRQs may be blocked, etc)
4134 *
4135 * This call may be used in_irq and other contexts which can't sleep,
4136 * as well as from task contexts which can sleep.
4137 *
4138 * The completion callback is invoked in a context which can't sleep.
4139 * Before that invocation, the value of message->status is undefined.
4140 * When the callback is issued, message->status holds either zero (to
4141 * indicate complete success) or a negative error code. After that
4142 * callback returns, the driver which issued the transfer request may
4143 * deallocate the associated memory; it's no longer in use by any SPI
4144 * core or controller driver code.
4145 *
4146 * Note that although all messages to a spi_device are handled in
4147 * FIFO order, messages may go to different devices in other orders.
4148 * Some device might be higher priority, or have various "hard" access
4149 * time requirements, for example.
4150 *
4151 * On detection of any fault during the transfer, processing of
4152 * the entire message is aborted, and the device is deselected.
4153 * Until returning from the associated message completion callback,
4154 * no other spi_message queued to that device will be processed.
4155 * (This rule applies equally to all the synchronous transfer calls,
4156 * which are wrappers around this core asynchronous primitive.)
4157 *
4158 * Return: zero on success, else a negative error code.
4159 */
spi_async_locked(struct spi_device * spi,struct spi_message * message)4160 static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
4161 {
4162 struct spi_controller *ctlr = spi->controller;
4163 int ret;
4164 unsigned long flags;
4165
4166 ret = __spi_validate(spi, message);
4167 if (ret != 0)
4168 return ret;
4169
4170 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4171
4172 ret = __spi_async(spi, message);
4173
4174 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4175
4176 return ret;
4177
4178 }
4179
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4180 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4181 {
4182 bool was_busy;
4183 int ret;
4184
4185 mutex_lock(&ctlr->io_mutex);
4186
4187 was_busy = ctlr->busy;
4188
4189 ctlr->cur_msg = msg;
4190 ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4191 if (ret)
4192 dev_err(&ctlr->dev, "noqueue transfer failed\n");
4193 ctlr->cur_msg = NULL;
4194 ctlr->fallback = false;
4195
4196 if (!was_busy) {
4197 kfree(ctlr->dummy_rx);
4198 ctlr->dummy_rx = NULL;
4199 kfree(ctlr->dummy_tx);
4200 ctlr->dummy_tx = NULL;
4201 if (ctlr->unprepare_transfer_hardware &&
4202 ctlr->unprepare_transfer_hardware(ctlr))
4203 dev_err(&ctlr->dev,
4204 "failed to unprepare transfer hardware\n");
4205 spi_idle_runtime_pm(ctlr);
4206 }
4207
4208 mutex_unlock(&ctlr->io_mutex);
4209 }
4210
4211 /*-------------------------------------------------------------------------*/
4212
4213 /*
4214 * Utility methods for SPI protocol drivers, layered on
4215 * top of the core. Some other utility methods are defined as
4216 * inline functions.
4217 */
4218
spi_complete(void * arg)4219 static void spi_complete(void *arg)
4220 {
4221 complete(arg);
4222 }
4223
__spi_sync(struct spi_device * spi,struct spi_message * message)4224 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4225 {
4226 DECLARE_COMPLETION_ONSTACK(done);
4227 int status;
4228 struct spi_controller *ctlr = spi->controller;
4229
4230 if (__spi_check_suspended(ctlr)) {
4231 dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4232 return -ESHUTDOWN;
4233 }
4234
4235 status = __spi_validate(spi, message);
4236 if (status != 0)
4237 return status;
4238
4239 message->spi = spi;
4240
4241 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4242 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4243
4244 /*
4245 * Checking queue_empty here only guarantees async/sync message
4246 * ordering when coming from the same context. It does not need to
4247 * guard against reentrancy from a different context. The io_mutex
4248 * will catch those cases.
4249 */
4250 if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4251 message->actual_length = 0;
4252 message->status = -EINPROGRESS;
4253
4254 trace_spi_message_submit(message);
4255
4256 SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4257 SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4258
4259 __spi_transfer_message_noqueue(ctlr, message);
4260
4261 return message->status;
4262 }
4263
4264 /*
4265 * There are messages in the async queue that could have originated
4266 * from the same context, so we need to preserve ordering.
4267 * Therefor we send the message to the async queue and wait until they
4268 * are completed.
4269 */
4270 message->complete = spi_complete;
4271 message->context = &done;
4272 status = spi_async_locked(spi, message);
4273 if (status == 0) {
4274 wait_for_completion(&done);
4275 status = message->status;
4276 }
4277 message->complete = NULL;
4278 message->context = NULL;
4279
4280 return status;
4281 }
4282
4283 /**
4284 * spi_sync - blocking/synchronous SPI data transfers
4285 * @spi: device with which data will be exchanged
4286 * @message: describes the data transfers
4287 * Context: can sleep
4288 *
4289 * This call may only be used from a context that may sleep. The sleep
4290 * is non-interruptible, and has no timeout. Low-overhead controller
4291 * drivers may DMA directly into and out of the message buffers.
4292 *
4293 * Note that the SPI device's chip select is active during the message,
4294 * and then is normally disabled between messages. Drivers for some
4295 * frequently-used devices may want to minimize costs of selecting a chip,
4296 * by leaving it selected in anticipation that the next message will go
4297 * to the same chip. (That may increase power usage.)
4298 *
4299 * Also, the caller is guaranteeing that the memory associated with the
4300 * message will not be freed before this call returns.
4301 *
4302 * Return: zero on success, else a negative error code.
4303 */
spi_sync(struct spi_device * spi,struct spi_message * message)4304 int spi_sync(struct spi_device *spi, struct spi_message *message)
4305 {
4306 int ret;
4307
4308 mutex_lock(&spi->controller->bus_lock_mutex);
4309 ret = __spi_sync(spi, message);
4310 mutex_unlock(&spi->controller->bus_lock_mutex);
4311
4312 return ret;
4313 }
4314 EXPORT_SYMBOL_GPL(spi_sync);
4315
4316 /**
4317 * spi_sync_locked - version of spi_sync with exclusive bus usage
4318 * @spi: device with which data will be exchanged
4319 * @message: describes the data transfers
4320 * Context: can sleep
4321 *
4322 * This call may only be used from a context that may sleep. The sleep
4323 * is non-interruptible, and has no timeout. Low-overhead controller
4324 * drivers may DMA directly into and out of the message buffers.
4325 *
4326 * This call should be used by drivers that require exclusive access to the
4327 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4328 * be released by a spi_bus_unlock call when the exclusive access is over.
4329 *
4330 * Return: zero on success, else a negative error code.
4331 */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4332 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4333 {
4334 return __spi_sync(spi, message);
4335 }
4336 EXPORT_SYMBOL_GPL(spi_sync_locked);
4337
4338 /**
4339 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4340 * @ctlr: SPI bus master that should be locked for exclusive bus access
4341 * Context: can sleep
4342 *
4343 * This call may only be used from a context that may sleep. The sleep
4344 * is non-interruptible, and has no timeout.
4345 *
4346 * This call should be used by drivers that require exclusive access to the
4347 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4348 * exclusive access is over. Data transfer must be done by spi_sync_locked
4349 * and spi_async_locked calls when the SPI bus lock is held.
4350 *
4351 * Return: always zero.
4352 */
spi_bus_lock(struct spi_controller * ctlr)4353 int spi_bus_lock(struct spi_controller *ctlr)
4354 {
4355 unsigned long flags;
4356
4357 mutex_lock(&ctlr->bus_lock_mutex);
4358
4359 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4360 ctlr->bus_lock_flag = 1;
4361 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4362
4363 /* Mutex remains locked until spi_bus_unlock() is called */
4364
4365 return 0;
4366 }
4367 EXPORT_SYMBOL_GPL(spi_bus_lock);
4368
4369 /**
4370 * spi_bus_unlock - release the lock for exclusive SPI bus usage
4371 * @ctlr: SPI bus master that was locked for exclusive bus access
4372 * Context: can sleep
4373 *
4374 * This call may only be used from a context that may sleep. The sleep
4375 * is non-interruptible, and has no timeout.
4376 *
4377 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4378 * call.
4379 *
4380 * Return: always zero.
4381 */
spi_bus_unlock(struct spi_controller * ctlr)4382 int spi_bus_unlock(struct spi_controller *ctlr)
4383 {
4384 ctlr->bus_lock_flag = 0;
4385
4386 mutex_unlock(&ctlr->bus_lock_mutex);
4387
4388 return 0;
4389 }
4390 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4391
4392 /* Portable code must never pass more than 32 bytes */
4393 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
4394
4395 static u8 *buf;
4396
4397 /**
4398 * spi_write_then_read - SPI synchronous write followed by read
4399 * @spi: device with which data will be exchanged
4400 * @txbuf: data to be written (need not be DMA-safe)
4401 * @n_tx: size of txbuf, in bytes
4402 * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4403 * @n_rx: size of rxbuf, in bytes
4404 * Context: can sleep
4405 *
4406 * This performs a half duplex MicroWire style transaction with the
4407 * device, sending txbuf and then reading rxbuf. The return value
4408 * is zero for success, else a negative errno status code.
4409 * This call may only be used from a context that may sleep.
4410 *
4411 * Parameters to this routine are always copied using a small buffer.
4412 * Performance-sensitive or bulk transfer code should instead use
4413 * spi_{async,sync}() calls with DMA-safe buffers.
4414 *
4415 * Return: zero on success, else a negative error code.
4416 */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4417 int spi_write_then_read(struct spi_device *spi,
4418 const void *txbuf, unsigned n_tx,
4419 void *rxbuf, unsigned n_rx)
4420 {
4421 static DEFINE_MUTEX(lock);
4422
4423 int status;
4424 struct spi_message message;
4425 struct spi_transfer x[2];
4426 u8 *local_buf;
4427
4428 /*
4429 * Use preallocated DMA-safe buffer if we can. We can't avoid
4430 * copying here, (as a pure convenience thing), but we can
4431 * keep heap costs out of the hot path unless someone else is
4432 * using the pre-allocated buffer or the transfer is too large.
4433 */
4434 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4435 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4436 GFP_KERNEL | GFP_DMA);
4437 if (!local_buf)
4438 return -ENOMEM;
4439 } else {
4440 local_buf = buf;
4441 }
4442
4443 spi_message_init(&message);
4444 memset(x, 0, sizeof(x));
4445 if (n_tx) {
4446 x[0].len = n_tx;
4447 spi_message_add_tail(&x[0], &message);
4448 }
4449 if (n_rx) {
4450 x[1].len = n_rx;
4451 spi_message_add_tail(&x[1], &message);
4452 }
4453
4454 memcpy(local_buf, txbuf, n_tx);
4455 x[0].tx_buf = local_buf;
4456 x[1].rx_buf = local_buf + n_tx;
4457
4458 /* Do the I/O */
4459 status = spi_sync(spi, &message);
4460 if (status == 0)
4461 memcpy(rxbuf, x[1].rx_buf, n_rx);
4462
4463 if (x[0].tx_buf == buf)
4464 mutex_unlock(&lock);
4465 else
4466 kfree(local_buf);
4467
4468 return status;
4469 }
4470 EXPORT_SYMBOL_GPL(spi_write_then_read);
4471
4472 /*-------------------------------------------------------------------------*/
4473
4474 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4475 /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4476 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4477 {
4478 struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4479
4480 return dev ? to_spi_device(dev) : NULL;
4481 }
4482
4483 /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4484 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4485 {
4486 struct device *dev;
4487
4488 dev = class_find_device_by_of_node(&spi_master_class, node);
4489 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4490 dev = class_find_device_by_of_node(&spi_slave_class, node);
4491 if (!dev)
4492 return NULL;
4493
4494 /* Reference got in class_find_device */
4495 return container_of(dev, struct spi_controller, dev);
4496 }
4497
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4498 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4499 void *arg)
4500 {
4501 struct of_reconfig_data *rd = arg;
4502 struct spi_controller *ctlr;
4503 struct spi_device *spi;
4504
4505 switch (of_reconfig_get_state_change(action, arg)) {
4506 case OF_RECONFIG_CHANGE_ADD:
4507 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4508 if (ctlr == NULL)
4509 return NOTIFY_OK; /* Not for us */
4510
4511 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4512 put_device(&ctlr->dev);
4513 return NOTIFY_OK;
4514 }
4515
4516 /*
4517 * Clear the flag before adding the device so that fw_devlink
4518 * doesn't skip adding consumers to this device.
4519 */
4520 rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4521 spi = of_register_spi_device(ctlr, rd->dn);
4522 put_device(&ctlr->dev);
4523
4524 if (IS_ERR(spi)) {
4525 pr_err("%s: failed to create for '%pOF'\n",
4526 __func__, rd->dn);
4527 of_node_clear_flag(rd->dn, OF_POPULATED);
4528 return notifier_from_errno(PTR_ERR(spi));
4529 }
4530 break;
4531
4532 case OF_RECONFIG_CHANGE_REMOVE:
4533 /* Already depopulated? */
4534 if (!of_node_check_flag(rd->dn, OF_POPULATED))
4535 return NOTIFY_OK;
4536
4537 /* Find our device by node */
4538 spi = of_find_spi_device_by_node(rd->dn);
4539 if (spi == NULL)
4540 return NOTIFY_OK; /* No? not meant for us */
4541
4542 /* Unregister takes one ref away */
4543 spi_unregister_device(spi);
4544
4545 /* And put the reference of the find */
4546 put_device(&spi->dev);
4547 break;
4548 }
4549
4550 return NOTIFY_OK;
4551 }
4552
4553 static struct notifier_block spi_of_notifier = {
4554 .notifier_call = of_spi_notify,
4555 };
4556 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4557 extern struct notifier_block spi_of_notifier;
4558 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4559
4560 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4561 static int spi_acpi_controller_match(struct device *dev, const void *data)
4562 {
4563 return ACPI_COMPANION(dev->parent) == data;
4564 }
4565
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4566 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4567 {
4568 struct device *dev;
4569
4570 dev = class_find_device(&spi_master_class, NULL, adev,
4571 spi_acpi_controller_match);
4572 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4573 dev = class_find_device(&spi_slave_class, NULL, adev,
4574 spi_acpi_controller_match);
4575 if (!dev)
4576 return NULL;
4577
4578 return container_of(dev, struct spi_controller, dev);
4579 }
4580
acpi_spi_find_device_by_adev(struct acpi_device * adev)4581 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4582 {
4583 struct device *dev;
4584
4585 dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4586 return to_spi_device(dev);
4587 }
4588
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4589 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4590 void *arg)
4591 {
4592 struct acpi_device *adev = arg;
4593 struct spi_controller *ctlr;
4594 struct spi_device *spi;
4595
4596 switch (value) {
4597 case ACPI_RECONFIG_DEVICE_ADD:
4598 ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4599 if (!ctlr)
4600 break;
4601
4602 acpi_register_spi_device(ctlr, adev);
4603 put_device(&ctlr->dev);
4604 break;
4605 case ACPI_RECONFIG_DEVICE_REMOVE:
4606 if (!acpi_device_enumerated(adev))
4607 break;
4608
4609 spi = acpi_spi_find_device_by_adev(adev);
4610 if (!spi)
4611 break;
4612
4613 spi_unregister_device(spi);
4614 put_device(&spi->dev);
4615 break;
4616 }
4617
4618 return NOTIFY_OK;
4619 }
4620
4621 static struct notifier_block spi_acpi_notifier = {
4622 .notifier_call = acpi_spi_notify,
4623 };
4624 #else
4625 extern struct notifier_block spi_acpi_notifier;
4626 #endif
4627
spi_init(void)4628 static int __init spi_init(void)
4629 {
4630 int status;
4631
4632 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4633 if (!buf) {
4634 status = -ENOMEM;
4635 goto err0;
4636 }
4637
4638 status = bus_register(&spi_bus_type);
4639 if (status < 0)
4640 goto err1;
4641
4642 status = class_register(&spi_master_class);
4643 if (status < 0)
4644 goto err2;
4645
4646 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4647 status = class_register(&spi_slave_class);
4648 if (status < 0)
4649 goto err3;
4650 }
4651
4652 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4653 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4654 if (IS_ENABLED(CONFIG_ACPI))
4655 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4656
4657 return 0;
4658
4659 err3:
4660 class_unregister(&spi_master_class);
4661 err2:
4662 bus_unregister(&spi_bus_type);
4663 err1:
4664 kfree(buf);
4665 buf = NULL;
4666 err0:
4667 return status;
4668 }
4669
4670 /*
4671 * A board_info is normally registered in arch_initcall(),
4672 * but even essential drivers wait till later.
4673 *
4674 * REVISIT only boardinfo really needs static linking. The rest (device and
4675 * driver registration) _could_ be dynamically linked (modular) ... Costs
4676 * include needing to have boardinfo data structures be much more public.
4677 */
4678 postcore_initcall(spi_init);
4679