xref: /openbmc/linux/drivers/spi/spi.c (revision e23feb16)
1 /*
2  * SPI init/core code
3  *
4  * Copyright (C) 2005 David Brownell
5  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/kmod.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/cache.h>
27 #include <linux/mutex.h>
28 #include <linux/of_device.h>
29 #include <linux/of_irq.h>
30 #include <linux/slab.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/spi/spi.h>
33 #include <linux/of_gpio.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/export.h>
36 #include <linux/sched/rt.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/ioport.h>
40 #include <linux/acpi.h>
41 
42 static void spidev_release(struct device *dev)
43 {
44 	struct spi_device	*spi = to_spi_device(dev);
45 
46 	/* spi masters may cleanup for released devices */
47 	if (spi->master->cleanup)
48 		spi->master->cleanup(spi);
49 
50 	spi_master_put(spi->master);
51 	kfree(spi);
52 }
53 
54 static ssize_t
55 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
56 {
57 	const struct spi_device	*spi = to_spi_device(dev);
58 
59 	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
60 }
61 
62 static struct device_attribute spi_dev_attrs[] = {
63 	__ATTR_RO(modalias),
64 	__ATTR_NULL,
65 };
66 
67 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
68  * and the sysfs version makes coldplug work too.
69  */
70 
71 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
72 						const struct spi_device *sdev)
73 {
74 	while (id->name[0]) {
75 		if (!strcmp(sdev->modalias, id->name))
76 			return id;
77 		id++;
78 	}
79 	return NULL;
80 }
81 
82 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
83 {
84 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
85 
86 	return spi_match_id(sdrv->id_table, sdev);
87 }
88 EXPORT_SYMBOL_GPL(spi_get_device_id);
89 
90 static int spi_match_device(struct device *dev, struct device_driver *drv)
91 {
92 	const struct spi_device	*spi = to_spi_device(dev);
93 	const struct spi_driver	*sdrv = to_spi_driver(drv);
94 
95 	/* Attempt an OF style match */
96 	if (of_driver_match_device(dev, drv))
97 		return 1;
98 
99 	/* Then try ACPI */
100 	if (acpi_driver_match_device(dev, drv))
101 		return 1;
102 
103 	if (sdrv->id_table)
104 		return !!spi_match_id(sdrv->id_table, spi);
105 
106 	return strcmp(spi->modalias, drv->name) == 0;
107 }
108 
109 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
110 {
111 	const struct spi_device		*spi = to_spi_device(dev);
112 
113 	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
114 	return 0;
115 }
116 
117 #ifdef CONFIG_PM_SLEEP
118 static int spi_legacy_suspend(struct device *dev, pm_message_t message)
119 {
120 	int			value = 0;
121 	struct spi_driver	*drv = to_spi_driver(dev->driver);
122 
123 	/* suspend will stop irqs and dma; no more i/o */
124 	if (drv) {
125 		if (drv->suspend)
126 			value = drv->suspend(to_spi_device(dev), message);
127 		else
128 			dev_dbg(dev, "... can't suspend\n");
129 	}
130 	return value;
131 }
132 
133 static int spi_legacy_resume(struct device *dev)
134 {
135 	int			value = 0;
136 	struct spi_driver	*drv = to_spi_driver(dev->driver);
137 
138 	/* resume may restart the i/o queue */
139 	if (drv) {
140 		if (drv->resume)
141 			value = drv->resume(to_spi_device(dev));
142 		else
143 			dev_dbg(dev, "... can't resume\n");
144 	}
145 	return value;
146 }
147 
148 static int spi_pm_suspend(struct device *dev)
149 {
150 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
151 
152 	if (pm)
153 		return pm_generic_suspend(dev);
154 	else
155 		return spi_legacy_suspend(dev, PMSG_SUSPEND);
156 }
157 
158 static int spi_pm_resume(struct device *dev)
159 {
160 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
161 
162 	if (pm)
163 		return pm_generic_resume(dev);
164 	else
165 		return spi_legacy_resume(dev);
166 }
167 
168 static int spi_pm_freeze(struct device *dev)
169 {
170 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
171 
172 	if (pm)
173 		return pm_generic_freeze(dev);
174 	else
175 		return spi_legacy_suspend(dev, PMSG_FREEZE);
176 }
177 
178 static int spi_pm_thaw(struct device *dev)
179 {
180 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
181 
182 	if (pm)
183 		return pm_generic_thaw(dev);
184 	else
185 		return spi_legacy_resume(dev);
186 }
187 
188 static int spi_pm_poweroff(struct device *dev)
189 {
190 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
191 
192 	if (pm)
193 		return pm_generic_poweroff(dev);
194 	else
195 		return spi_legacy_suspend(dev, PMSG_HIBERNATE);
196 }
197 
198 static int spi_pm_restore(struct device *dev)
199 {
200 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
201 
202 	if (pm)
203 		return pm_generic_restore(dev);
204 	else
205 		return spi_legacy_resume(dev);
206 }
207 #else
208 #define spi_pm_suspend	NULL
209 #define spi_pm_resume	NULL
210 #define spi_pm_freeze	NULL
211 #define spi_pm_thaw	NULL
212 #define spi_pm_poweroff	NULL
213 #define spi_pm_restore	NULL
214 #endif
215 
216 static const struct dev_pm_ops spi_pm = {
217 	.suspend = spi_pm_suspend,
218 	.resume = spi_pm_resume,
219 	.freeze = spi_pm_freeze,
220 	.thaw = spi_pm_thaw,
221 	.poweroff = spi_pm_poweroff,
222 	.restore = spi_pm_restore,
223 	SET_RUNTIME_PM_OPS(
224 		pm_generic_runtime_suspend,
225 		pm_generic_runtime_resume,
226 		NULL
227 	)
228 };
229 
230 struct bus_type spi_bus_type = {
231 	.name		= "spi",
232 	.dev_attrs	= spi_dev_attrs,
233 	.match		= spi_match_device,
234 	.uevent		= spi_uevent,
235 	.pm		= &spi_pm,
236 };
237 EXPORT_SYMBOL_GPL(spi_bus_type);
238 
239 
240 static int spi_drv_probe(struct device *dev)
241 {
242 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
243 
244 	return sdrv->probe(to_spi_device(dev));
245 }
246 
247 static int spi_drv_remove(struct device *dev)
248 {
249 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
250 
251 	return sdrv->remove(to_spi_device(dev));
252 }
253 
254 static void spi_drv_shutdown(struct device *dev)
255 {
256 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
257 
258 	sdrv->shutdown(to_spi_device(dev));
259 }
260 
261 /**
262  * spi_register_driver - register a SPI driver
263  * @sdrv: the driver to register
264  * Context: can sleep
265  */
266 int spi_register_driver(struct spi_driver *sdrv)
267 {
268 	sdrv->driver.bus = &spi_bus_type;
269 	if (sdrv->probe)
270 		sdrv->driver.probe = spi_drv_probe;
271 	if (sdrv->remove)
272 		sdrv->driver.remove = spi_drv_remove;
273 	if (sdrv->shutdown)
274 		sdrv->driver.shutdown = spi_drv_shutdown;
275 	return driver_register(&sdrv->driver);
276 }
277 EXPORT_SYMBOL_GPL(spi_register_driver);
278 
279 /*-------------------------------------------------------------------------*/
280 
281 /* SPI devices should normally not be created by SPI device drivers; that
282  * would make them board-specific.  Similarly with SPI master drivers.
283  * Device registration normally goes into like arch/.../mach.../board-YYY.c
284  * with other readonly (flashable) information about mainboard devices.
285  */
286 
287 struct boardinfo {
288 	struct list_head	list;
289 	struct spi_board_info	board_info;
290 };
291 
292 static LIST_HEAD(board_list);
293 static LIST_HEAD(spi_master_list);
294 
295 /*
296  * Used to protect add/del opertion for board_info list and
297  * spi_master list, and their matching process
298  */
299 static DEFINE_MUTEX(board_lock);
300 
301 /**
302  * spi_alloc_device - Allocate a new SPI device
303  * @master: Controller to which device is connected
304  * Context: can sleep
305  *
306  * Allows a driver to allocate and initialize a spi_device without
307  * registering it immediately.  This allows a driver to directly
308  * fill the spi_device with device parameters before calling
309  * spi_add_device() on it.
310  *
311  * Caller is responsible to call spi_add_device() on the returned
312  * spi_device structure to add it to the SPI master.  If the caller
313  * needs to discard the spi_device without adding it, then it should
314  * call spi_dev_put() on it.
315  *
316  * Returns a pointer to the new device, or NULL.
317  */
318 struct spi_device *spi_alloc_device(struct spi_master *master)
319 {
320 	struct spi_device	*spi;
321 	struct device		*dev = master->dev.parent;
322 
323 	if (!spi_master_get(master))
324 		return NULL;
325 
326 	spi = kzalloc(sizeof *spi, GFP_KERNEL);
327 	if (!spi) {
328 		dev_err(dev, "cannot alloc spi_device\n");
329 		spi_master_put(master);
330 		return NULL;
331 	}
332 
333 	spi->master = master;
334 	spi->dev.parent = &master->dev;
335 	spi->dev.bus = &spi_bus_type;
336 	spi->dev.release = spidev_release;
337 	spi->cs_gpio = -ENOENT;
338 	device_initialize(&spi->dev);
339 	return spi;
340 }
341 EXPORT_SYMBOL_GPL(spi_alloc_device);
342 
343 /**
344  * spi_add_device - Add spi_device allocated with spi_alloc_device
345  * @spi: spi_device to register
346  *
347  * Companion function to spi_alloc_device.  Devices allocated with
348  * spi_alloc_device can be added onto the spi bus with this function.
349  *
350  * Returns 0 on success; negative errno on failure
351  */
352 int spi_add_device(struct spi_device *spi)
353 {
354 	static DEFINE_MUTEX(spi_add_lock);
355 	struct spi_master *master = spi->master;
356 	struct device *dev = master->dev.parent;
357 	struct device *d;
358 	int status;
359 
360 	/* Chipselects are numbered 0..max; validate. */
361 	if (spi->chip_select >= master->num_chipselect) {
362 		dev_err(dev, "cs%d >= max %d\n",
363 			spi->chip_select,
364 			master->num_chipselect);
365 		return -EINVAL;
366 	}
367 
368 	/* Set the bus ID string */
369 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
370 			spi->chip_select);
371 
372 
373 	/* We need to make sure there's no other device with this
374 	 * chipselect **BEFORE** we call setup(), else we'll trash
375 	 * its configuration.  Lock against concurrent add() calls.
376 	 */
377 	mutex_lock(&spi_add_lock);
378 
379 	d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
380 	if (d != NULL) {
381 		dev_err(dev, "chipselect %d already in use\n",
382 				spi->chip_select);
383 		put_device(d);
384 		status = -EBUSY;
385 		goto done;
386 	}
387 
388 	if (master->cs_gpios)
389 		spi->cs_gpio = master->cs_gpios[spi->chip_select];
390 
391 	/* Drivers may modify this initial i/o setup, but will
392 	 * normally rely on the device being setup.  Devices
393 	 * using SPI_CS_HIGH can't coexist well otherwise...
394 	 */
395 	status = spi_setup(spi);
396 	if (status < 0) {
397 		dev_err(dev, "can't setup %s, status %d\n",
398 				dev_name(&spi->dev), status);
399 		goto done;
400 	}
401 
402 	/* Device may be bound to an active driver when this returns */
403 	status = device_add(&spi->dev);
404 	if (status < 0)
405 		dev_err(dev, "can't add %s, status %d\n",
406 				dev_name(&spi->dev), status);
407 	else
408 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
409 
410 done:
411 	mutex_unlock(&spi_add_lock);
412 	return status;
413 }
414 EXPORT_SYMBOL_GPL(spi_add_device);
415 
416 /**
417  * spi_new_device - instantiate one new SPI device
418  * @master: Controller to which device is connected
419  * @chip: Describes the SPI device
420  * Context: can sleep
421  *
422  * On typical mainboards, this is purely internal; and it's not needed
423  * after board init creates the hard-wired devices.  Some development
424  * platforms may not be able to use spi_register_board_info though, and
425  * this is exported so that for example a USB or parport based adapter
426  * driver could add devices (which it would learn about out-of-band).
427  *
428  * Returns the new device, or NULL.
429  */
430 struct spi_device *spi_new_device(struct spi_master *master,
431 				  struct spi_board_info *chip)
432 {
433 	struct spi_device	*proxy;
434 	int			status;
435 
436 	/* NOTE:  caller did any chip->bus_num checks necessary.
437 	 *
438 	 * Also, unless we change the return value convention to use
439 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
440 	 * suggests syslogged diagnostics are best here (ugh).
441 	 */
442 
443 	proxy = spi_alloc_device(master);
444 	if (!proxy)
445 		return NULL;
446 
447 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
448 
449 	proxy->chip_select = chip->chip_select;
450 	proxy->max_speed_hz = chip->max_speed_hz;
451 	proxy->mode = chip->mode;
452 	proxy->irq = chip->irq;
453 	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
454 	proxy->dev.platform_data = (void *) chip->platform_data;
455 	proxy->controller_data = chip->controller_data;
456 	proxy->controller_state = NULL;
457 
458 	status = spi_add_device(proxy);
459 	if (status < 0) {
460 		spi_dev_put(proxy);
461 		return NULL;
462 	}
463 
464 	return proxy;
465 }
466 EXPORT_SYMBOL_GPL(spi_new_device);
467 
468 static void spi_match_master_to_boardinfo(struct spi_master *master,
469 				struct spi_board_info *bi)
470 {
471 	struct spi_device *dev;
472 
473 	if (master->bus_num != bi->bus_num)
474 		return;
475 
476 	dev = spi_new_device(master, bi);
477 	if (!dev)
478 		dev_err(master->dev.parent, "can't create new device for %s\n",
479 			bi->modalias);
480 }
481 
482 /**
483  * spi_register_board_info - register SPI devices for a given board
484  * @info: array of chip descriptors
485  * @n: how many descriptors are provided
486  * Context: can sleep
487  *
488  * Board-specific early init code calls this (probably during arch_initcall)
489  * with segments of the SPI device table.  Any device nodes are created later,
490  * after the relevant parent SPI controller (bus_num) is defined.  We keep
491  * this table of devices forever, so that reloading a controller driver will
492  * not make Linux forget about these hard-wired devices.
493  *
494  * Other code can also call this, e.g. a particular add-on board might provide
495  * SPI devices through its expansion connector, so code initializing that board
496  * would naturally declare its SPI devices.
497  *
498  * The board info passed can safely be __initdata ... but be careful of
499  * any embedded pointers (platform_data, etc), they're copied as-is.
500  */
501 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
502 {
503 	struct boardinfo *bi;
504 	int i;
505 
506 	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
507 	if (!bi)
508 		return -ENOMEM;
509 
510 	for (i = 0; i < n; i++, bi++, info++) {
511 		struct spi_master *master;
512 
513 		memcpy(&bi->board_info, info, sizeof(*info));
514 		mutex_lock(&board_lock);
515 		list_add_tail(&bi->list, &board_list);
516 		list_for_each_entry(master, &spi_master_list, list)
517 			spi_match_master_to_boardinfo(master, &bi->board_info);
518 		mutex_unlock(&board_lock);
519 	}
520 
521 	return 0;
522 }
523 
524 /*-------------------------------------------------------------------------*/
525 
526 /**
527  * spi_pump_messages - kthread work function which processes spi message queue
528  * @work: pointer to kthread work struct contained in the master struct
529  *
530  * This function checks if there is any spi message in the queue that
531  * needs processing and if so call out to the driver to initialize hardware
532  * and transfer each message.
533  *
534  */
535 static void spi_pump_messages(struct kthread_work *work)
536 {
537 	struct spi_master *master =
538 		container_of(work, struct spi_master, pump_messages);
539 	unsigned long flags;
540 	bool was_busy = false;
541 	int ret;
542 
543 	/* Lock queue and check for queue work */
544 	spin_lock_irqsave(&master->queue_lock, flags);
545 	if (list_empty(&master->queue) || !master->running) {
546 		if (!master->busy) {
547 			spin_unlock_irqrestore(&master->queue_lock, flags);
548 			return;
549 		}
550 		master->busy = false;
551 		spin_unlock_irqrestore(&master->queue_lock, flags);
552 		if (master->unprepare_transfer_hardware &&
553 		    master->unprepare_transfer_hardware(master))
554 			dev_err(&master->dev,
555 				"failed to unprepare transfer hardware\n");
556 		if (master->auto_runtime_pm) {
557 			pm_runtime_mark_last_busy(master->dev.parent);
558 			pm_runtime_put_autosuspend(master->dev.parent);
559 		}
560 		return;
561 	}
562 
563 	/* Make sure we are not already running a message */
564 	if (master->cur_msg) {
565 		spin_unlock_irqrestore(&master->queue_lock, flags);
566 		return;
567 	}
568 	/* Extract head of queue */
569 	master->cur_msg =
570 	    list_entry(master->queue.next, struct spi_message, queue);
571 
572 	list_del_init(&master->cur_msg->queue);
573 	if (master->busy)
574 		was_busy = true;
575 	else
576 		master->busy = true;
577 	spin_unlock_irqrestore(&master->queue_lock, flags);
578 
579 	if (!was_busy && master->auto_runtime_pm) {
580 		ret = pm_runtime_get_sync(master->dev.parent);
581 		if (ret < 0) {
582 			dev_err(&master->dev, "Failed to power device: %d\n",
583 				ret);
584 			return;
585 		}
586 	}
587 
588 	if (!was_busy && master->prepare_transfer_hardware) {
589 		ret = master->prepare_transfer_hardware(master);
590 		if (ret) {
591 			dev_err(&master->dev,
592 				"failed to prepare transfer hardware\n");
593 
594 			if (master->auto_runtime_pm)
595 				pm_runtime_put(master->dev.parent);
596 			return;
597 		}
598 	}
599 
600 	ret = master->transfer_one_message(master, master->cur_msg);
601 	if (ret) {
602 		dev_err(&master->dev,
603 			"failed to transfer one message from queue\n");
604 		return;
605 	}
606 }
607 
608 static int spi_init_queue(struct spi_master *master)
609 {
610 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
611 
612 	INIT_LIST_HEAD(&master->queue);
613 	spin_lock_init(&master->queue_lock);
614 
615 	master->running = false;
616 	master->busy = false;
617 
618 	init_kthread_worker(&master->kworker);
619 	master->kworker_task = kthread_run(kthread_worker_fn,
620 					   &master->kworker, "%s",
621 					   dev_name(&master->dev));
622 	if (IS_ERR(master->kworker_task)) {
623 		dev_err(&master->dev, "failed to create message pump task\n");
624 		return -ENOMEM;
625 	}
626 	init_kthread_work(&master->pump_messages, spi_pump_messages);
627 
628 	/*
629 	 * Master config will indicate if this controller should run the
630 	 * message pump with high (realtime) priority to reduce the transfer
631 	 * latency on the bus by minimising the delay between a transfer
632 	 * request and the scheduling of the message pump thread. Without this
633 	 * setting the message pump thread will remain at default priority.
634 	 */
635 	if (master->rt) {
636 		dev_info(&master->dev,
637 			"will run message pump with realtime priority\n");
638 		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
639 	}
640 
641 	return 0;
642 }
643 
644 /**
645  * spi_get_next_queued_message() - called by driver to check for queued
646  * messages
647  * @master: the master to check for queued messages
648  *
649  * If there are more messages in the queue, the next message is returned from
650  * this call.
651  */
652 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
653 {
654 	struct spi_message *next;
655 	unsigned long flags;
656 
657 	/* get a pointer to the next message, if any */
658 	spin_lock_irqsave(&master->queue_lock, flags);
659 	if (list_empty(&master->queue))
660 		next = NULL;
661 	else
662 		next = list_entry(master->queue.next,
663 				  struct spi_message, queue);
664 	spin_unlock_irqrestore(&master->queue_lock, flags);
665 
666 	return next;
667 }
668 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
669 
670 /**
671  * spi_finalize_current_message() - the current message is complete
672  * @master: the master to return the message to
673  *
674  * Called by the driver to notify the core that the message in the front of the
675  * queue is complete and can be removed from the queue.
676  */
677 void spi_finalize_current_message(struct spi_master *master)
678 {
679 	struct spi_message *mesg;
680 	unsigned long flags;
681 
682 	spin_lock_irqsave(&master->queue_lock, flags);
683 	mesg = master->cur_msg;
684 	master->cur_msg = NULL;
685 
686 	queue_kthread_work(&master->kworker, &master->pump_messages);
687 	spin_unlock_irqrestore(&master->queue_lock, flags);
688 
689 	mesg->state = NULL;
690 	if (mesg->complete)
691 		mesg->complete(mesg->context);
692 }
693 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
694 
695 static int spi_start_queue(struct spi_master *master)
696 {
697 	unsigned long flags;
698 
699 	spin_lock_irqsave(&master->queue_lock, flags);
700 
701 	if (master->running || master->busy) {
702 		spin_unlock_irqrestore(&master->queue_lock, flags);
703 		return -EBUSY;
704 	}
705 
706 	master->running = true;
707 	master->cur_msg = NULL;
708 	spin_unlock_irqrestore(&master->queue_lock, flags);
709 
710 	queue_kthread_work(&master->kworker, &master->pump_messages);
711 
712 	return 0;
713 }
714 
715 static int spi_stop_queue(struct spi_master *master)
716 {
717 	unsigned long flags;
718 	unsigned limit = 500;
719 	int ret = 0;
720 
721 	spin_lock_irqsave(&master->queue_lock, flags);
722 
723 	/*
724 	 * This is a bit lame, but is optimized for the common execution path.
725 	 * A wait_queue on the master->busy could be used, but then the common
726 	 * execution path (pump_messages) would be required to call wake_up or
727 	 * friends on every SPI message. Do this instead.
728 	 */
729 	while ((!list_empty(&master->queue) || master->busy) && limit--) {
730 		spin_unlock_irqrestore(&master->queue_lock, flags);
731 		msleep(10);
732 		spin_lock_irqsave(&master->queue_lock, flags);
733 	}
734 
735 	if (!list_empty(&master->queue) || master->busy)
736 		ret = -EBUSY;
737 	else
738 		master->running = false;
739 
740 	spin_unlock_irqrestore(&master->queue_lock, flags);
741 
742 	if (ret) {
743 		dev_warn(&master->dev,
744 			 "could not stop message queue\n");
745 		return ret;
746 	}
747 	return ret;
748 }
749 
750 static int spi_destroy_queue(struct spi_master *master)
751 {
752 	int ret;
753 
754 	ret = spi_stop_queue(master);
755 
756 	/*
757 	 * flush_kthread_worker will block until all work is done.
758 	 * If the reason that stop_queue timed out is that the work will never
759 	 * finish, then it does no good to call flush/stop thread, so
760 	 * return anyway.
761 	 */
762 	if (ret) {
763 		dev_err(&master->dev, "problem destroying queue\n");
764 		return ret;
765 	}
766 
767 	flush_kthread_worker(&master->kworker);
768 	kthread_stop(master->kworker_task);
769 
770 	return 0;
771 }
772 
773 /**
774  * spi_queued_transfer - transfer function for queued transfers
775  * @spi: spi device which is requesting transfer
776  * @msg: spi message which is to handled is queued to driver queue
777  */
778 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
779 {
780 	struct spi_master *master = spi->master;
781 	unsigned long flags;
782 
783 	spin_lock_irqsave(&master->queue_lock, flags);
784 
785 	if (!master->running) {
786 		spin_unlock_irqrestore(&master->queue_lock, flags);
787 		return -ESHUTDOWN;
788 	}
789 	msg->actual_length = 0;
790 	msg->status = -EINPROGRESS;
791 
792 	list_add_tail(&msg->queue, &master->queue);
793 	if (!master->busy)
794 		queue_kthread_work(&master->kworker, &master->pump_messages);
795 
796 	spin_unlock_irqrestore(&master->queue_lock, flags);
797 	return 0;
798 }
799 
800 static int spi_master_initialize_queue(struct spi_master *master)
801 {
802 	int ret;
803 
804 	master->queued = true;
805 	master->transfer = spi_queued_transfer;
806 
807 	/* Initialize and start queue */
808 	ret = spi_init_queue(master);
809 	if (ret) {
810 		dev_err(&master->dev, "problem initializing queue\n");
811 		goto err_init_queue;
812 	}
813 	ret = spi_start_queue(master);
814 	if (ret) {
815 		dev_err(&master->dev, "problem starting queue\n");
816 		goto err_start_queue;
817 	}
818 
819 	return 0;
820 
821 err_start_queue:
822 err_init_queue:
823 	spi_destroy_queue(master);
824 	return ret;
825 }
826 
827 /*-------------------------------------------------------------------------*/
828 
829 #if defined(CONFIG_OF)
830 /**
831  * of_register_spi_devices() - Register child devices onto the SPI bus
832  * @master:	Pointer to spi_master device
833  *
834  * Registers an spi_device for each child node of master node which has a 'reg'
835  * property.
836  */
837 static void of_register_spi_devices(struct spi_master *master)
838 {
839 	struct spi_device *spi;
840 	struct device_node *nc;
841 	const __be32 *prop;
842 	char modalias[SPI_NAME_SIZE + 4];
843 	int rc;
844 	int len;
845 
846 	if (!master->dev.of_node)
847 		return;
848 
849 	for_each_available_child_of_node(master->dev.of_node, nc) {
850 		/* Alloc an spi_device */
851 		spi = spi_alloc_device(master);
852 		if (!spi) {
853 			dev_err(&master->dev, "spi_device alloc error for %s\n",
854 				nc->full_name);
855 			spi_dev_put(spi);
856 			continue;
857 		}
858 
859 		/* Select device driver */
860 		if (of_modalias_node(nc, spi->modalias,
861 				     sizeof(spi->modalias)) < 0) {
862 			dev_err(&master->dev, "cannot find modalias for %s\n",
863 				nc->full_name);
864 			spi_dev_put(spi);
865 			continue;
866 		}
867 
868 		/* Device address */
869 		prop = of_get_property(nc, "reg", &len);
870 		if (!prop || len < sizeof(*prop)) {
871 			dev_err(&master->dev, "%s has no 'reg' property\n",
872 				nc->full_name);
873 			spi_dev_put(spi);
874 			continue;
875 		}
876 		spi->chip_select = be32_to_cpup(prop);
877 
878 		/* Mode (clock phase/polarity/etc.) */
879 		if (of_find_property(nc, "spi-cpha", NULL))
880 			spi->mode |= SPI_CPHA;
881 		if (of_find_property(nc, "spi-cpol", NULL))
882 			spi->mode |= SPI_CPOL;
883 		if (of_find_property(nc, "spi-cs-high", NULL))
884 			spi->mode |= SPI_CS_HIGH;
885 		if (of_find_property(nc, "spi-3wire", NULL))
886 			spi->mode |= SPI_3WIRE;
887 
888 		/* Device DUAL/QUAD mode */
889 		prop = of_get_property(nc, "spi-tx-bus-width", &len);
890 		if (prop && len == sizeof(*prop)) {
891 			switch (be32_to_cpup(prop)) {
892 			case SPI_NBITS_SINGLE:
893 				break;
894 			case SPI_NBITS_DUAL:
895 				spi->mode |= SPI_TX_DUAL;
896 				break;
897 			case SPI_NBITS_QUAD:
898 				spi->mode |= SPI_TX_QUAD;
899 				break;
900 			default:
901 				dev_err(&master->dev,
902 					"spi-tx-bus-width %d not supported\n",
903 					be32_to_cpup(prop));
904 				spi_dev_put(spi);
905 				continue;
906 			}
907 		}
908 
909 		prop = of_get_property(nc, "spi-rx-bus-width", &len);
910 		if (prop && len == sizeof(*prop)) {
911 			switch (be32_to_cpup(prop)) {
912 			case SPI_NBITS_SINGLE:
913 				break;
914 			case SPI_NBITS_DUAL:
915 				spi->mode |= SPI_RX_DUAL;
916 				break;
917 			case SPI_NBITS_QUAD:
918 				spi->mode |= SPI_RX_QUAD;
919 				break;
920 			default:
921 				dev_err(&master->dev,
922 					"spi-rx-bus-width %d not supported\n",
923 					be32_to_cpup(prop));
924 				spi_dev_put(spi);
925 				continue;
926 			}
927 		}
928 
929 		/* Device speed */
930 		prop = of_get_property(nc, "spi-max-frequency", &len);
931 		if (!prop || len < sizeof(*prop)) {
932 			dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
933 				nc->full_name);
934 			spi_dev_put(spi);
935 			continue;
936 		}
937 		spi->max_speed_hz = be32_to_cpup(prop);
938 
939 		/* IRQ */
940 		spi->irq = irq_of_parse_and_map(nc, 0);
941 
942 		/* Store a pointer to the node in the device structure */
943 		of_node_get(nc);
944 		spi->dev.of_node = nc;
945 
946 		/* Register the new device */
947 		snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX,
948 			 spi->modalias);
949 		request_module(modalias);
950 		rc = spi_add_device(spi);
951 		if (rc) {
952 			dev_err(&master->dev, "spi_device register error %s\n",
953 				nc->full_name);
954 			spi_dev_put(spi);
955 		}
956 
957 	}
958 }
959 #else
960 static void of_register_spi_devices(struct spi_master *master) { }
961 #endif
962 
963 #ifdef CONFIG_ACPI
964 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
965 {
966 	struct spi_device *spi = data;
967 
968 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
969 		struct acpi_resource_spi_serialbus *sb;
970 
971 		sb = &ares->data.spi_serial_bus;
972 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
973 			spi->chip_select = sb->device_selection;
974 			spi->max_speed_hz = sb->connection_speed;
975 
976 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
977 				spi->mode |= SPI_CPHA;
978 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
979 				spi->mode |= SPI_CPOL;
980 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
981 				spi->mode |= SPI_CS_HIGH;
982 		}
983 	} else if (spi->irq < 0) {
984 		struct resource r;
985 
986 		if (acpi_dev_resource_interrupt(ares, 0, &r))
987 			spi->irq = r.start;
988 	}
989 
990 	/* Always tell the ACPI core to skip this resource */
991 	return 1;
992 }
993 
994 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
995 				       void *data, void **return_value)
996 {
997 	struct spi_master *master = data;
998 	struct list_head resource_list;
999 	struct acpi_device *adev;
1000 	struct spi_device *spi;
1001 	int ret;
1002 
1003 	if (acpi_bus_get_device(handle, &adev))
1004 		return AE_OK;
1005 	if (acpi_bus_get_status(adev) || !adev->status.present)
1006 		return AE_OK;
1007 
1008 	spi = spi_alloc_device(master);
1009 	if (!spi) {
1010 		dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1011 			dev_name(&adev->dev));
1012 		return AE_NO_MEMORY;
1013 	}
1014 
1015 	ACPI_HANDLE_SET(&spi->dev, handle);
1016 	spi->irq = -1;
1017 
1018 	INIT_LIST_HEAD(&resource_list);
1019 	ret = acpi_dev_get_resources(adev, &resource_list,
1020 				     acpi_spi_add_resource, spi);
1021 	acpi_dev_free_resource_list(&resource_list);
1022 
1023 	if (ret < 0 || !spi->max_speed_hz) {
1024 		spi_dev_put(spi);
1025 		return AE_OK;
1026 	}
1027 
1028 	strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias));
1029 	if (spi_add_device(spi)) {
1030 		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1031 			dev_name(&adev->dev));
1032 		spi_dev_put(spi);
1033 	}
1034 
1035 	return AE_OK;
1036 }
1037 
1038 static void acpi_register_spi_devices(struct spi_master *master)
1039 {
1040 	acpi_status status;
1041 	acpi_handle handle;
1042 
1043 	handle = ACPI_HANDLE(master->dev.parent);
1044 	if (!handle)
1045 		return;
1046 
1047 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1048 				     acpi_spi_add_device, NULL,
1049 				     master, NULL);
1050 	if (ACPI_FAILURE(status))
1051 		dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1052 }
1053 #else
1054 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1055 #endif /* CONFIG_ACPI */
1056 
1057 static void spi_master_release(struct device *dev)
1058 {
1059 	struct spi_master *master;
1060 
1061 	master = container_of(dev, struct spi_master, dev);
1062 	kfree(master);
1063 }
1064 
1065 static struct class spi_master_class = {
1066 	.name		= "spi_master",
1067 	.owner		= THIS_MODULE,
1068 	.dev_release	= spi_master_release,
1069 };
1070 
1071 
1072 
1073 /**
1074  * spi_alloc_master - allocate SPI master controller
1075  * @dev: the controller, possibly using the platform_bus
1076  * @size: how much zeroed driver-private data to allocate; the pointer to this
1077  *	memory is in the driver_data field of the returned device,
1078  *	accessible with spi_master_get_devdata().
1079  * Context: can sleep
1080  *
1081  * This call is used only by SPI master controller drivers, which are the
1082  * only ones directly touching chip registers.  It's how they allocate
1083  * an spi_master structure, prior to calling spi_register_master().
1084  *
1085  * This must be called from context that can sleep.  It returns the SPI
1086  * master structure on success, else NULL.
1087  *
1088  * The caller is responsible for assigning the bus number and initializing
1089  * the master's methods before calling spi_register_master(); and (after errors
1090  * adding the device) calling spi_master_put() and kfree() to prevent a memory
1091  * leak.
1092  */
1093 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1094 {
1095 	struct spi_master	*master;
1096 
1097 	if (!dev)
1098 		return NULL;
1099 
1100 	master = kzalloc(size + sizeof *master, GFP_KERNEL);
1101 	if (!master)
1102 		return NULL;
1103 
1104 	device_initialize(&master->dev);
1105 	master->bus_num = -1;
1106 	master->num_chipselect = 1;
1107 	master->dev.class = &spi_master_class;
1108 	master->dev.parent = get_device(dev);
1109 	spi_master_set_devdata(master, &master[1]);
1110 
1111 	return master;
1112 }
1113 EXPORT_SYMBOL_GPL(spi_alloc_master);
1114 
1115 #ifdef CONFIG_OF
1116 static int of_spi_register_master(struct spi_master *master)
1117 {
1118 	int nb, i, *cs;
1119 	struct device_node *np = master->dev.of_node;
1120 
1121 	if (!np)
1122 		return 0;
1123 
1124 	nb = of_gpio_named_count(np, "cs-gpios");
1125 	master->num_chipselect = max(nb, (int)master->num_chipselect);
1126 
1127 	/* Return error only for an incorrectly formed cs-gpios property */
1128 	if (nb == 0 || nb == -ENOENT)
1129 		return 0;
1130 	else if (nb < 0)
1131 		return nb;
1132 
1133 	cs = devm_kzalloc(&master->dev,
1134 			  sizeof(int) * master->num_chipselect,
1135 			  GFP_KERNEL);
1136 	master->cs_gpios = cs;
1137 
1138 	if (!master->cs_gpios)
1139 		return -ENOMEM;
1140 
1141 	for (i = 0; i < master->num_chipselect; i++)
1142 		cs[i] = -ENOENT;
1143 
1144 	for (i = 0; i < nb; i++)
1145 		cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1146 
1147 	return 0;
1148 }
1149 #else
1150 static int of_spi_register_master(struct spi_master *master)
1151 {
1152 	return 0;
1153 }
1154 #endif
1155 
1156 /**
1157  * spi_register_master - register SPI master controller
1158  * @master: initialized master, originally from spi_alloc_master()
1159  * Context: can sleep
1160  *
1161  * SPI master controllers connect to their drivers using some non-SPI bus,
1162  * such as the platform bus.  The final stage of probe() in that code
1163  * includes calling spi_register_master() to hook up to this SPI bus glue.
1164  *
1165  * SPI controllers use board specific (often SOC specific) bus numbers,
1166  * and board-specific addressing for SPI devices combines those numbers
1167  * with chip select numbers.  Since SPI does not directly support dynamic
1168  * device identification, boards need configuration tables telling which
1169  * chip is at which address.
1170  *
1171  * This must be called from context that can sleep.  It returns zero on
1172  * success, else a negative error code (dropping the master's refcount).
1173  * After a successful return, the caller is responsible for calling
1174  * spi_unregister_master().
1175  */
1176 int spi_register_master(struct spi_master *master)
1177 {
1178 	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1179 	struct device		*dev = master->dev.parent;
1180 	struct boardinfo	*bi;
1181 	int			status = -ENODEV;
1182 	int			dynamic = 0;
1183 
1184 	if (!dev)
1185 		return -ENODEV;
1186 
1187 	status = of_spi_register_master(master);
1188 	if (status)
1189 		return status;
1190 
1191 	/* even if it's just one always-selected device, there must
1192 	 * be at least one chipselect
1193 	 */
1194 	if (master->num_chipselect == 0)
1195 		return -EINVAL;
1196 
1197 	if ((master->bus_num < 0) && master->dev.of_node)
1198 		master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1199 
1200 	/* convention:  dynamically assigned bus IDs count down from the max */
1201 	if (master->bus_num < 0) {
1202 		/* FIXME switch to an IDR based scheme, something like
1203 		 * I2C now uses, so we can't run out of "dynamic" IDs
1204 		 */
1205 		master->bus_num = atomic_dec_return(&dyn_bus_id);
1206 		dynamic = 1;
1207 	}
1208 
1209 	spin_lock_init(&master->bus_lock_spinlock);
1210 	mutex_init(&master->bus_lock_mutex);
1211 	master->bus_lock_flag = 0;
1212 
1213 	/* register the device, then userspace will see it.
1214 	 * registration fails if the bus ID is in use.
1215 	 */
1216 	dev_set_name(&master->dev, "spi%u", master->bus_num);
1217 	status = device_add(&master->dev);
1218 	if (status < 0)
1219 		goto done;
1220 	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1221 			dynamic ? " (dynamic)" : "");
1222 
1223 	/* If we're using a queued driver, start the queue */
1224 	if (master->transfer)
1225 		dev_info(dev, "master is unqueued, this is deprecated\n");
1226 	else {
1227 		status = spi_master_initialize_queue(master);
1228 		if (status) {
1229 			device_del(&master->dev);
1230 			goto done;
1231 		}
1232 	}
1233 
1234 	mutex_lock(&board_lock);
1235 	list_add_tail(&master->list, &spi_master_list);
1236 	list_for_each_entry(bi, &board_list, list)
1237 		spi_match_master_to_boardinfo(master, &bi->board_info);
1238 	mutex_unlock(&board_lock);
1239 
1240 	/* Register devices from the device tree and ACPI */
1241 	of_register_spi_devices(master);
1242 	acpi_register_spi_devices(master);
1243 done:
1244 	return status;
1245 }
1246 EXPORT_SYMBOL_GPL(spi_register_master);
1247 
1248 static int __unregister(struct device *dev, void *null)
1249 {
1250 	spi_unregister_device(to_spi_device(dev));
1251 	return 0;
1252 }
1253 
1254 /**
1255  * spi_unregister_master - unregister SPI master controller
1256  * @master: the master being unregistered
1257  * Context: can sleep
1258  *
1259  * This call is used only by SPI master controller drivers, which are the
1260  * only ones directly touching chip registers.
1261  *
1262  * This must be called from context that can sleep.
1263  */
1264 void spi_unregister_master(struct spi_master *master)
1265 {
1266 	int dummy;
1267 
1268 	if (master->queued) {
1269 		if (spi_destroy_queue(master))
1270 			dev_err(&master->dev, "queue remove failed\n");
1271 	}
1272 
1273 	mutex_lock(&board_lock);
1274 	list_del(&master->list);
1275 	mutex_unlock(&board_lock);
1276 
1277 	dummy = device_for_each_child(&master->dev, NULL, __unregister);
1278 	device_unregister(&master->dev);
1279 }
1280 EXPORT_SYMBOL_GPL(spi_unregister_master);
1281 
1282 int spi_master_suspend(struct spi_master *master)
1283 {
1284 	int ret;
1285 
1286 	/* Basically no-ops for non-queued masters */
1287 	if (!master->queued)
1288 		return 0;
1289 
1290 	ret = spi_stop_queue(master);
1291 	if (ret)
1292 		dev_err(&master->dev, "queue stop failed\n");
1293 
1294 	return ret;
1295 }
1296 EXPORT_SYMBOL_GPL(spi_master_suspend);
1297 
1298 int spi_master_resume(struct spi_master *master)
1299 {
1300 	int ret;
1301 
1302 	if (!master->queued)
1303 		return 0;
1304 
1305 	ret = spi_start_queue(master);
1306 	if (ret)
1307 		dev_err(&master->dev, "queue restart failed\n");
1308 
1309 	return ret;
1310 }
1311 EXPORT_SYMBOL_GPL(spi_master_resume);
1312 
1313 static int __spi_master_match(struct device *dev, const void *data)
1314 {
1315 	struct spi_master *m;
1316 	const u16 *bus_num = data;
1317 
1318 	m = container_of(dev, struct spi_master, dev);
1319 	return m->bus_num == *bus_num;
1320 }
1321 
1322 /**
1323  * spi_busnum_to_master - look up master associated with bus_num
1324  * @bus_num: the master's bus number
1325  * Context: can sleep
1326  *
1327  * This call may be used with devices that are registered after
1328  * arch init time.  It returns a refcounted pointer to the relevant
1329  * spi_master (which the caller must release), or NULL if there is
1330  * no such master registered.
1331  */
1332 struct spi_master *spi_busnum_to_master(u16 bus_num)
1333 {
1334 	struct device		*dev;
1335 	struct spi_master	*master = NULL;
1336 
1337 	dev = class_find_device(&spi_master_class, NULL, &bus_num,
1338 				__spi_master_match);
1339 	if (dev)
1340 		master = container_of(dev, struct spi_master, dev);
1341 	/* reference got in class_find_device */
1342 	return master;
1343 }
1344 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1345 
1346 
1347 /*-------------------------------------------------------------------------*/
1348 
1349 /* Core methods for SPI master protocol drivers.  Some of the
1350  * other core methods are currently defined as inline functions.
1351  */
1352 
1353 /**
1354  * spi_setup - setup SPI mode and clock rate
1355  * @spi: the device whose settings are being modified
1356  * Context: can sleep, and no requests are queued to the device
1357  *
1358  * SPI protocol drivers may need to update the transfer mode if the
1359  * device doesn't work with its default.  They may likewise need
1360  * to update clock rates or word sizes from initial values.  This function
1361  * changes those settings, and must be called from a context that can sleep.
1362  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
1363  * effect the next time the device is selected and data is transferred to
1364  * or from it.  When this function returns, the spi device is deselected.
1365  *
1366  * Note that this call will fail if the protocol driver specifies an option
1367  * that the underlying controller or its driver does not support.  For
1368  * example, not all hardware supports wire transfers using nine bit words,
1369  * LSB-first wire encoding, or active-high chipselects.
1370  */
1371 int spi_setup(struct spi_device *spi)
1372 {
1373 	unsigned	bad_bits;
1374 	int		status = 0;
1375 
1376 	/* check mode to prevent that DUAL and QUAD set at the same time
1377 	 */
1378 	if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
1379 		((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
1380 		dev_err(&spi->dev,
1381 		"setup: can not select dual and quad at the same time\n");
1382 		return -EINVAL;
1383 	}
1384 	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
1385 	 */
1386 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
1387 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
1388 		return -EINVAL;
1389 	/* help drivers fail *cleanly* when they need options
1390 	 * that aren't supported with their current master
1391 	 */
1392 	bad_bits = spi->mode & ~spi->master->mode_bits;
1393 	if (bad_bits) {
1394 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1395 			bad_bits);
1396 		return -EINVAL;
1397 	}
1398 
1399 	if (!spi->bits_per_word)
1400 		spi->bits_per_word = 8;
1401 
1402 	if (spi->master->setup)
1403 		status = spi->master->setup(spi);
1404 
1405 	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
1406 				"%u bits/w, %u Hz max --> %d\n",
1407 			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
1408 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
1409 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
1410 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
1411 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
1412 			spi->bits_per_word, spi->max_speed_hz,
1413 			status);
1414 
1415 	return status;
1416 }
1417 EXPORT_SYMBOL_GPL(spi_setup);
1418 
1419 static int __spi_async(struct spi_device *spi, struct spi_message *message)
1420 {
1421 	struct spi_master *master = spi->master;
1422 	struct spi_transfer *xfer;
1423 
1424 	if (list_empty(&message->transfers))
1425 		return -EINVAL;
1426 	if (!message->complete)
1427 		return -EINVAL;
1428 
1429 	/* Half-duplex links include original MicroWire, and ones with
1430 	 * only one data pin like SPI_3WIRE (switches direction) or where
1431 	 * either MOSI or MISO is missing.  They can also be caused by
1432 	 * software limitations.
1433 	 */
1434 	if ((master->flags & SPI_MASTER_HALF_DUPLEX)
1435 			|| (spi->mode & SPI_3WIRE)) {
1436 		unsigned flags = master->flags;
1437 
1438 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
1439 			if (xfer->rx_buf && xfer->tx_buf)
1440 				return -EINVAL;
1441 			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
1442 				return -EINVAL;
1443 			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
1444 				return -EINVAL;
1445 		}
1446 	}
1447 
1448 	/**
1449 	 * Set transfer bits_per_word and max speed as spi device default if
1450 	 * it is not set for this transfer.
1451 	 * Set transfer tx_nbits and rx_nbits as single transfer default
1452 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
1453 	 */
1454 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
1455 		message->frame_length += xfer->len;
1456 		if (!xfer->bits_per_word)
1457 			xfer->bits_per_word = spi->bits_per_word;
1458 		if (!xfer->speed_hz) {
1459 			xfer->speed_hz = spi->max_speed_hz;
1460 			if (master->max_speed_hz &&
1461 			    xfer->speed_hz > master->max_speed_hz)
1462 				xfer->speed_hz = master->max_speed_hz;
1463 		}
1464 
1465 		if (master->bits_per_word_mask) {
1466 			/* Only 32 bits fit in the mask */
1467 			if (xfer->bits_per_word > 32)
1468 				return -EINVAL;
1469 			if (!(master->bits_per_word_mask &
1470 					BIT(xfer->bits_per_word - 1)))
1471 				return -EINVAL;
1472 		}
1473 
1474 		if (xfer->speed_hz && master->min_speed_hz &&
1475 		    xfer->speed_hz < master->min_speed_hz)
1476 			return -EINVAL;
1477 		if (xfer->speed_hz && master->max_speed_hz &&
1478 		    xfer->speed_hz > master->max_speed_hz)
1479 			return -EINVAL;
1480 
1481 		if (xfer->tx_buf && !xfer->tx_nbits)
1482 			xfer->tx_nbits = SPI_NBITS_SINGLE;
1483 		if (xfer->rx_buf && !xfer->rx_nbits)
1484 			xfer->rx_nbits = SPI_NBITS_SINGLE;
1485 		/* check transfer tx/rx_nbits:
1486 		 * 1. keep the value is not out of single, dual and quad
1487 		 * 2. keep tx/rx_nbits is contained by mode in spi_device
1488 		 * 3. if SPI_3WIRE, tx/rx_nbits should be in single
1489 		 */
1490 		if (xfer->tx_buf) {
1491 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
1492 				xfer->tx_nbits != SPI_NBITS_DUAL &&
1493 				xfer->tx_nbits != SPI_NBITS_QUAD)
1494 				return -EINVAL;
1495 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
1496 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
1497 				return -EINVAL;
1498 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
1499 				!(spi->mode & SPI_TX_QUAD))
1500 				return -EINVAL;
1501 			if ((spi->mode & SPI_3WIRE) &&
1502 				(xfer->tx_nbits != SPI_NBITS_SINGLE))
1503 				return -EINVAL;
1504 		}
1505 		/* check transfer rx_nbits */
1506 		if (xfer->rx_buf) {
1507 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
1508 				xfer->rx_nbits != SPI_NBITS_DUAL &&
1509 				xfer->rx_nbits != SPI_NBITS_QUAD)
1510 				return -EINVAL;
1511 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
1512 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
1513 				return -EINVAL;
1514 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
1515 				!(spi->mode & SPI_RX_QUAD))
1516 				return -EINVAL;
1517 			if ((spi->mode & SPI_3WIRE) &&
1518 				(xfer->rx_nbits != SPI_NBITS_SINGLE))
1519 				return -EINVAL;
1520 		}
1521 	}
1522 
1523 	message->spi = spi;
1524 	message->status = -EINPROGRESS;
1525 	return master->transfer(spi, message);
1526 }
1527 
1528 /**
1529  * spi_async - asynchronous SPI transfer
1530  * @spi: device with which data will be exchanged
1531  * @message: describes the data transfers, including completion callback
1532  * Context: any (irqs may be blocked, etc)
1533  *
1534  * This call may be used in_irq and other contexts which can't sleep,
1535  * as well as from task contexts which can sleep.
1536  *
1537  * The completion callback is invoked in a context which can't sleep.
1538  * Before that invocation, the value of message->status is undefined.
1539  * When the callback is issued, message->status holds either zero (to
1540  * indicate complete success) or a negative error code.  After that
1541  * callback returns, the driver which issued the transfer request may
1542  * deallocate the associated memory; it's no longer in use by any SPI
1543  * core or controller driver code.
1544  *
1545  * Note that although all messages to a spi_device are handled in
1546  * FIFO order, messages may go to different devices in other orders.
1547  * Some device might be higher priority, or have various "hard" access
1548  * time requirements, for example.
1549  *
1550  * On detection of any fault during the transfer, processing of
1551  * the entire message is aborted, and the device is deselected.
1552  * Until returning from the associated message completion callback,
1553  * no other spi_message queued to that device will be processed.
1554  * (This rule applies equally to all the synchronous transfer calls,
1555  * which are wrappers around this core asynchronous primitive.)
1556  */
1557 int spi_async(struct spi_device *spi, struct spi_message *message)
1558 {
1559 	struct spi_master *master = spi->master;
1560 	int ret;
1561 	unsigned long flags;
1562 
1563 	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
1564 
1565 	if (master->bus_lock_flag)
1566 		ret = -EBUSY;
1567 	else
1568 		ret = __spi_async(spi, message);
1569 
1570 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
1571 
1572 	return ret;
1573 }
1574 EXPORT_SYMBOL_GPL(spi_async);
1575 
1576 /**
1577  * spi_async_locked - version of spi_async with exclusive bus usage
1578  * @spi: device with which data will be exchanged
1579  * @message: describes the data transfers, including completion callback
1580  * Context: any (irqs may be blocked, etc)
1581  *
1582  * This call may be used in_irq and other contexts which can't sleep,
1583  * as well as from task contexts which can sleep.
1584  *
1585  * The completion callback is invoked in a context which can't sleep.
1586  * Before that invocation, the value of message->status is undefined.
1587  * When the callback is issued, message->status holds either zero (to
1588  * indicate complete success) or a negative error code.  After that
1589  * callback returns, the driver which issued the transfer request may
1590  * deallocate the associated memory; it's no longer in use by any SPI
1591  * core or controller driver code.
1592  *
1593  * Note that although all messages to a spi_device are handled in
1594  * FIFO order, messages may go to different devices in other orders.
1595  * Some device might be higher priority, or have various "hard" access
1596  * time requirements, for example.
1597  *
1598  * On detection of any fault during the transfer, processing of
1599  * the entire message is aborted, and the device is deselected.
1600  * Until returning from the associated message completion callback,
1601  * no other spi_message queued to that device will be processed.
1602  * (This rule applies equally to all the synchronous transfer calls,
1603  * which are wrappers around this core asynchronous primitive.)
1604  */
1605 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
1606 {
1607 	struct spi_master *master = spi->master;
1608 	int ret;
1609 	unsigned long flags;
1610 
1611 	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
1612 
1613 	ret = __spi_async(spi, message);
1614 
1615 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
1616 
1617 	return ret;
1618 
1619 }
1620 EXPORT_SYMBOL_GPL(spi_async_locked);
1621 
1622 
1623 /*-------------------------------------------------------------------------*/
1624 
1625 /* Utility methods for SPI master protocol drivers, layered on
1626  * top of the core.  Some other utility methods are defined as
1627  * inline functions.
1628  */
1629 
1630 static void spi_complete(void *arg)
1631 {
1632 	complete(arg);
1633 }
1634 
1635 static int __spi_sync(struct spi_device *spi, struct spi_message *message,
1636 		      int bus_locked)
1637 {
1638 	DECLARE_COMPLETION_ONSTACK(done);
1639 	int status;
1640 	struct spi_master *master = spi->master;
1641 
1642 	message->complete = spi_complete;
1643 	message->context = &done;
1644 
1645 	if (!bus_locked)
1646 		mutex_lock(&master->bus_lock_mutex);
1647 
1648 	status = spi_async_locked(spi, message);
1649 
1650 	if (!bus_locked)
1651 		mutex_unlock(&master->bus_lock_mutex);
1652 
1653 	if (status == 0) {
1654 		wait_for_completion(&done);
1655 		status = message->status;
1656 	}
1657 	message->context = NULL;
1658 	return status;
1659 }
1660 
1661 /**
1662  * spi_sync - blocking/synchronous SPI data transfers
1663  * @spi: device with which data will be exchanged
1664  * @message: describes the data transfers
1665  * Context: can sleep
1666  *
1667  * This call may only be used from a context that may sleep.  The sleep
1668  * is non-interruptible, and has no timeout.  Low-overhead controller
1669  * drivers may DMA directly into and out of the message buffers.
1670  *
1671  * Note that the SPI device's chip select is active during the message,
1672  * and then is normally disabled between messages.  Drivers for some
1673  * frequently-used devices may want to minimize costs of selecting a chip,
1674  * by leaving it selected in anticipation that the next message will go
1675  * to the same chip.  (That may increase power usage.)
1676  *
1677  * Also, the caller is guaranteeing that the memory associated with the
1678  * message will not be freed before this call returns.
1679  *
1680  * It returns zero on success, else a negative error code.
1681  */
1682 int spi_sync(struct spi_device *spi, struct spi_message *message)
1683 {
1684 	return __spi_sync(spi, message, 0);
1685 }
1686 EXPORT_SYMBOL_GPL(spi_sync);
1687 
1688 /**
1689  * spi_sync_locked - version of spi_sync with exclusive bus usage
1690  * @spi: device with which data will be exchanged
1691  * @message: describes the data transfers
1692  * Context: can sleep
1693  *
1694  * This call may only be used from a context that may sleep.  The sleep
1695  * is non-interruptible, and has no timeout.  Low-overhead controller
1696  * drivers may DMA directly into and out of the message buffers.
1697  *
1698  * This call should be used by drivers that require exclusive access to the
1699  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
1700  * be released by a spi_bus_unlock call when the exclusive access is over.
1701  *
1702  * It returns zero on success, else a negative error code.
1703  */
1704 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
1705 {
1706 	return __spi_sync(spi, message, 1);
1707 }
1708 EXPORT_SYMBOL_GPL(spi_sync_locked);
1709 
1710 /**
1711  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
1712  * @master: SPI bus master that should be locked for exclusive bus access
1713  * Context: can sleep
1714  *
1715  * This call may only be used from a context that may sleep.  The sleep
1716  * is non-interruptible, and has no timeout.
1717  *
1718  * This call should be used by drivers that require exclusive access to the
1719  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
1720  * exclusive access is over. Data transfer must be done by spi_sync_locked
1721  * and spi_async_locked calls when the SPI bus lock is held.
1722  *
1723  * It returns zero on success, else a negative error code.
1724  */
1725 int spi_bus_lock(struct spi_master *master)
1726 {
1727 	unsigned long flags;
1728 
1729 	mutex_lock(&master->bus_lock_mutex);
1730 
1731 	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
1732 	master->bus_lock_flag = 1;
1733 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
1734 
1735 	/* mutex remains locked until spi_bus_unlock is called */
1736 
1737 	return 0;
1738 }
1739 EXPORT_SYMBOL_GPL(spi_bus_lock);
1740 
1741 /**
1742  * spi_bus_unlock - release the lock for exclusive SPI bus usage
1743  * @master: SPI bus master that was locked for exclusive bus access
1744  * Context: can sleep
1745  *
1746  * This call may only be used from a context that may sleep.  The sleep
1747  * is non-interruptible, and has no timeout.
1748  *
1749  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
1750  * call.
1751  *
1752  * It returns zero on success, else a negative error code.
1753  */
1754 int spi_bus_unlock(struct spi_master *master)
1755 {
1756 	master->bus_lock_flag = 0;
1757 
1758 	mutex_unlock(&master->bus_lock_mutex);
1759 
1760 	return 0;
1761 }
1762 EXPORT_SYMBOL_GPL(spi_bus_unlock);
1763 
1764 /* portable code must never pass more than 32 bytes */
1765 #define	SPI_BUFSIZ	max(32,SMP_CACHE_BYTES)
1766 
1767 static u8	*buf;
1768 
1769 /**
1770  * spi_write_then_read - SPI synchronous write followed by read
1771  * @spi: device with which data will be exchanged
1772  * @txbuf: data to be written (need not be dma-safe)
1773  * @n_tx: size of txbuf, in bytes
1774  * @rxbuf: buffer into which data will be read (need not be dma-safe)
1775  * @n_rx: size of rxbuf, in bytes
1776  * Context: can sleep
1777  *
1778  * This performs a half duplex MicroWire style transaction with the
1779  * device, sending txbuf and then reading rxbuf.  The return value
1780  * is zero for success, else a negative errno status code.
1781  * This call may only be used from a context that may sleep.
1782  *
1783  * Parameters to this routine are always copied using a small buffer;
1784  * portable code should never use this for more than 32 bytes.
1785  * Performance-sensitive or bulk transfer code should instead use
1786  * spi_{async,sync}() calls with dma-safe buffers.
1787  */
1788 int spi_write_then_read(struct spi_device *spi,
1789 		const void *txbuf, unsigned n_tx,
1790 		void *rxbuf, unsigned n_rx)
1791 {
1792 	static DEFINE_MUTEX(lock);
1793 
1794 	int			status;
1795 	struct spi_message	message;
1796 	struct spi_transfer	x[2];
1797 	u8			*local_buf;
1798 
1799 	/* Use preallocated DMA-safe buffer if we can.  We can't avoid
1800 	 * copying here, (as a pure convenience thing), but we can
1801 	 * keep heap costs out of the hot path unless someone else is
1802 	 * using the pre-allocated buffer or the transfer is too large.
1803 	 */
1804 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
1805 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
1806 				    GFP_KERNEL | GFP_DMA);
1807 		if (!local_buf)
1808 			return -ENOMEM;
1809 	} else {
1810 		local_buf = buf;
1811 	}
1812 
1813 	spi_message_init(&message);
1814 	memset(x, 0, sizeof x);
1815 	if (n_tx) {
1816 		x[0].len = n_tx;
1817 		spi_message_add_tail(&x[0], &message);
1818 	}
1819 	if (n_rx) {
1820 		x[1].len = n_rx;
1821 		spi_message_add_tail(&x[1], &message);
1822 	}
1823 
1824 	memcpy(local_buf, txbuf, n_tx);
1825 	x[0].tx_buf = local_buf;
1826 	x[1].rx_buf = local_buf + n_tx;
1827 
1828 	/* do the i/o */
1829 	status = spi_sync(spi, &message);
1830 	if (status == 0)
1831 		memcpy(rxbuf, x[1].rx_buf, n_rx);
1832 
1833 	if (x[0].tx_buf == buf)
1834 		mutex_unlock(&lock);
1835 	else
1836 		kfree(local_buf);
1837 
1838 	return status;
1839 }
1840 EXPORT_SYMBOL_GPL(spi_write_then_read);
1841 
1842 /*-------------------------------------------------------------------------*/
1843 
1844 static int __init spi_init(void)
1845 {
1846 	int	status;
1847 
1848 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
1849 	if (!buf) {
1850 		status = -ENOMEM;
1851 		goto err0;
1852 	}
1853 
1854 	status = bus_register(&spi_bus_type);
1855 	if (status < 0)
1856 		goto err1;
1857 
1858 	status = class_register(&spi_master_class);
1859 	if (status < 0)
1860 		goto err2;
1861 	return 0;
1862 
1863 err2:
1864 	bus_unregister(&spi_bus_type);
1865 err1:
1866 	kfree(buf);
1867 	buf = NULL;
1868 err0:
1869 	return status;
1870 }
1871 
1872 /* board_info is normally registered in arch_initcall(),
1873  * but even essential drivers wait till later
1874  *
1875  * REVISIT only boardinfo really needs static linking. the rest (device and
1876  * driver registration) _could_ be dynamically linked (modular) ... costs
1877  * include needing to have boardinfo data structures be much more public.
1878  */
1879 postcore_initcall(spi_init);
1880 
1881