1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPI-Engine SPI controller driver
4  * Copyright 2015 Analog Devices Inc.
5  *  Author: Lars-Peter Clausen <lars@metafoo.de>
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/fpga/adi-axi-common.h>
10 #include <linux/idr.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/of.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/spi/spi.h>
17 
18 #define SPI_ENGINE_REG_RESET			0x40
19 
20 #define SPI_ENGINE_REG_INT_ENABLE		0x80
21 #define SPI_ENGINE_REG_INT_PENDING		0x84
22 #define SPI_ENGINE_REG_INT_SOURCE		0x88
23 
24 #define SPI_ENGINE_REG_SYNC_ID			0xc0
25 
26 #define SPI_ENGINE_REG_CMD_FIFO_ROOM		0xd0
27 #define SPI_ENGINE_REG_SDO_FIFO_ROOM		0xd4
28 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL		0xd8
29 
30 #define SPI_ENGINE_REG_CMD_FIFO			0xe0
31 #define SPI_ENGINE_REG_SDO_DATA_FIFO		0xe4
32 #define SPI_ENGINE_REG_SDI_DATA_FIFO		0xe8
33 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK	0xec
34 
35 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY		BIT(0)
36 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY		BIT(1)
37 #define SPI_ENGINE_INT_SDI_ALMOST_FULL		BIT(2)
38 #define SPI_ENGINE_INT_SYNC			BIT(3)
39 
40 #define SPI_ENGINE_CONFIG_CPHA			BIT(0)
41 #define SPI_ENGINE_CONFIG_CPOL			BIT(1)
42 #define SPI_ENGINE_CONFIG_3WIRE			BIT(2)
43 
44 #define SPI_ENGINE_INST_TRANSFER		0x0
45 #define SPI_ENGINE_INST_ASSERT			0x1
46 #define SPI_ENGINE_INST_WRITE			0x2
47 #define SPI_ENGINE_INST_MISC			0x3
48 
49 #define SPI_ENGINE_CMD_REG_CLK_DIV		0x0
50 #define SPI_ENGINE_CMD_REG_CONFIG		0x1
51 
52 #define SPI_ENGINE_MISC_SYNC			0x0
53 #define SPI_ENGINE_MISC_SLEEP			0x1
54 
55 #define SPI_ENGINE_TRANSFER_WRITE		0x1
56 #define SPI_ENGINE_TRANSFER_READ		0x2
57 
58 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
59 	(((inst) << 12) | ((arg1) << 8) | (arg2))
60 
61 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
62 	SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
63 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
64 	SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
65 #define SPI_ENGINE_CMD_WRITE(reg, val) \
66 	SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
67 #define SPI_ENGINE_CMD_SLEEP(delay) \
68 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
69 #define SPI_ENGINE_CMD_SYNC(id) \
70 	SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
71 
72 struct spi_engine_program {
73 	unsigned int length;
74 	uint16_t instructions[];
75 };
76 
77 /**
78  * struct spi_engine_message_state - SPI engine per-message state
79  */
80 struct spi_engine_message_state {
81 	/** Instructions for executing this message. */
82 	struct spi_engine_program *p;
83 	/** Number of elements in cmd_buf array. */
84 	unsigned cmd_length;
85 	/** Array of commands not yet written to CMD FIFO. */
86 	const uint16_t *cmd_buf;
87 	/** Next xfer with tx_buf not yet fully written to TX FIFO. */
88 	struct spi_transfer *tx_xfer;
89 	/** Size of tx_buf in bytes. */
90 	unsigned int tx_length;
91 	/** Bytes not yet written to TX FIFO. */
92 	const uint8_t *tx_buf;
93 	/** Next xfer with rx_buf not yet fully written to RX FIFO. */
94 	struct spi_transfer *rx_xfer;
95 	/** Size of tx_buf in bytes. */
96 	unsigned int rx_length;
97 	/** Bytes not yet written to the RX FIFO. */
98 	uint8_t *rx_buf;
99 	/** ID to correlate SYNC interrupts with this message. */
100 	u8 sync_id;
101 };
102 
103 struct spi_engine {
104 	struct clk *clk;
105 	struct clk *ref_clk;
106 
107 	spinlock_t lock;
108 
109 	void __iomem *base;
110 
111 	struct spi_message *msg;
112 	struct ida sync_ida;
113 	unsigned int completed_id;
114 
115 	unsigned int int_enable;
116 };
117 
118 static void spi_engine_program_add_cmd(struct spi_engine_program *p,
119 	bool dry, uint16_t cmd)
120 {
121 	if (!dry)
122 		p->instructions[p->length] = cmd;
123 	p->length++;
124 }
125 
126 static unsigned int spi_engine_get_config(struct spi_device *spi)
127 {
128 	unsigned int config = 0;
129 
130 	if (spi->mode & SPI_CPOL)
131 		config |= SPI_ENGINE_CONFIG_CPOL;
132 	if (spi->mode & SPI_CPHA)
133 		config |= SPI_ENGINE_CONFIG_CPHA;
134 	if (spi->mode & SPI_3WIRE)
135 		config |= SPI_ENGINE_CONFIG_3WIRE;
136 
137 	return config;
138 }
139 
140 static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
141 	struct spi_device *spi, struct spi_transfer *xfer)
142 {
143 	unsigned int clk_div;
144 
145 	clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
146 		xfer->speed_hz * 2);
147 	if (clk_div > 255)
148 		clk_div = 255;
149 	else if (clk_div > 0)
150 		clk_div -= 1;
151 
152 	return clk_div;
153 }
154 
155 static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
156 	struct spi_transfer *xfer)
157 {
158 	unsigned int len = xfer->len;
159 
160 	while (len) {
161 		unsigned int n = min(len, 256U);
162 		unsigned int flags = 0;
163 
164 		if (xfer->tx_buf)
165 			flags |= SPI_ENGINE_TRANSFER_WRITE;
166 		if (xfer->rx_buf)
167 			flags |= SPI_ENGINE_TRANSFER_READ;
168 
169 		spi_engine_program_add_cmd(p, dry,
170 			SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
171 		len -= n;
172 	}
173 }
174 
175 static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
176 	struct spi_engine *spi_engine, unsigned int clk_div,
177 	struct spi_transfer *xfer)
178 {
179 	unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
180 	unsigned int t;
181 	int delay;
182 
183 	delay = spi_delay_to_ns(&xfer->delay, xfer);
184 	if (delay < 0)
185 		return;
186 	delay /= 1000;
187 
188 	if (delay == 0)
189 		return;
190 
191 	t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
192 	while (t) {
193 		unsigned int n = min(t, 256U);
194 
195 		spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
196 		t -= n;
197 	}
198 }
199 
200 static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
201 		struct spi_device *spi, bool assert)
202 {
203 	unsigned int mask = 0xff;
204 
205 	if (assert)
206 		mask ^= BIT(spi_get_chipselect(spi, 0));
207 
208 	spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
209 }
210 
211 static int spi_engine_compile_message(struct spi_engine *spi_engine,
212 	struct spi_message *msg, bool dry, struct spi_engine_program *p)
213 {
214 	struct spi_device *spi = msg->spi;
215 	struct spi_transfer *xfer;
216 	int clk_div, new_clk_div;
217 	bool cs_change = true;
218 
219 	clk_div = -1;
220 
221 	spi_engine_program_add_cmd(p, dry,
222 		SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
223 			spi_engine_get_config(spi)));
224 
225 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
226 		new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
227 		if (new_clk_div != clk_div) {
228 			clk_div = new_clk_div;
229 			spi_engine_program_add_cmd(p, dry,
230 				SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
231 					clk_div));
232 		}
233 
234 		if (cs_change)
235 			spi_engine_gen_cs(p, dry, spi, true);
236 
237 		spi_engine_gen_xfer(p, dry, xfer);
238 		spi_engine_gen_sleep(p, dry, spi_engine, clk_div, xfer);
239 
240 		cs_change = xfer->cs_change;
241 		if (list_is_last(&xfer->transfer_list, &msg->transfers))
242 			cs_change = !cs_change;
243 
244 		if (cs_change)
245 			spi_engine_gen_cs(p, dry, spi, false);
246 	}
247 
248 	return 0;
249 }
250 
251 static void spi_engine_xfer_next(struct spi_engine *spi_engine,
252 	struct spi_transfer **_xfer)
253 {
254 	struct spi_message *msg = spi_engine->msg;
255 	struct spi_transfer *xfer = *_xfer;
256 
257 	if (!xfer) {
258 		xfer = list_first_entry(&msg->transfers,
259 			struct spi_transfer, transfer_list);
260 	} else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
261 		xfer = NULL;
262 	} else {
263 		xfer = list_next_entry(xfer, transfer_list);
264 	}
265 
266 	*_xfer = xfer;
267 }
268 
269 static void spi_engine_tx_next(struct spi_engine *spi_engine)
270 {
271 	struct spi_engine_message_state *st = spi_engine->msg->state;
272 	struct spi_transfer *xfer = st->tx_xfer;
273 
274 	do {
275 		spi_engine_xfer_next(spi_engine, &xfer);
276 	} while (xfer && !xfer->tx_buf);
277 
278 	st->tx_xfer = xfer;
279 	if (xfer) {
280 		st->tx_length = xfer->len;
281 		st->tx_buf = xfer->tx_buf;
282 	} else {
283 		st->tx_buf = NULL;
284 	}
285 }
286 
287 static void spi_engine_rx_next(struct spi_engine *spi_engine)
288 {
289 	struct spi_engine_message_state *st = spi_engine->msg->state;
290 	struct spi_transfer *xfer = st->rx_xfer;
291 
292 	do {
293 		spi_engine_xfer_next(spi_engine, &xfer);
294 	} while (xfer && !xfer->rx_buf);
295 
296 	st->rx_xfer = xfer;
297 	if (xfer) {
298 		st->rx_length = xfer->len;
299 		st->rx_buf = xfer->rx_buf;
300 	} else {
301 		st->rx_buf = NULL;
302 	}
303 }
304 
305 static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
306 {
307 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
308 	struct spi_engine_message_state *st = spi_engine->msg->state;
309 	unsigned int n, m, i;
310 	const uint16_t *buf;
311 
312 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
313 	while (n && st->cmd_length) {
314 		m = min(n, st->cmd_length);
315 		buf = st->cmd_buf;
316 		for (i = 0; i < m; i++)
317 			writel_relaxed(buf[i], addr);
318 		st->cmd_buf += m;
319 		st->cmd_length -= m;
320 		n -= m;
321 	}
322 
323 	return st->cmd_length != 0;
324 }
325 
326 static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
327 {
328 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
329 	struct spi_engine_message_state *st = spi_engine->msg->state;
330 	unsigned int n, m, i;
331 	const uint8_t *buf;
332 
333 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
334 	while (n && st->tx_length) {
335 		m = min(n, st->tx_length);
336 		buf = st->tx_buf;
337 		for (i = 0; i < m; i++)
338 			writel_relaxed(buf[i], addr);
339 		st->tx_buf += m;
340 		st->tx_length -= m;
341 		n -= m;
342 		if (st->tx_length == 0)
343 			spi_engine_tx_next(spi_engine);
344 	}
345 
346 	return st->tx_length != 0;
347 }
348 
349 static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
350 {
351 	void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
352 	struct spi_engine_message_state *st = spi_engine->msg->state;
353 	unsigned int n, m, i;
354 	uint8_t *buf;
355 
356 	n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
357 	while (n && st->rx_length) {
358 		m = min(n, st->rx_length);
359 		buf = st->rx_buf;
360 		for (i = 0; i < m; i++)
361 			buf[i] = readl_relaxed(addr);
362 		st->rx_buf += m;
363 		st->rx_length -= m;
364 		n -= m;
365 		if (st->rx_length == 0)
366 			spi_engine_rx_next(spi_engine);
367 	}
368 
369 	return st->rx_length != 0;
370 }
371 
372 static irqreturn_t spi_engine_irq(int irq, void *devid)
373 {
374 	struct spi_controller *host = devid;
375 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
376 	unsigned int disable_int = 0;
377 	unsigned int pending;
378 
379 	pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
380 
381 	if (pending & SPI_ENGINE_INT_SYNC) {
382 		writel_relaxed(SPI_ENGINE_INT_SYNC,
383 			spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
384 		spi_engine->completed_id = readl_relaxed(
385 			spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
386 	}
387 
388 	spin_lock(&spi_engine->lock);
389 
390 	if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
391 		if (!spi_engine_write_cmd_fifo(spi_engine))
392 			disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
393 	}
394 
395 	if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
396 		if (!spi_engine_write_tx_fifo(spi_engine))
397 			disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
398 	}
399 
400 	if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
401 		if (!spi_engine_read_rx_fifo(spi_engine))
402 			disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
403 	}
404 
405 	if (pending & SPI_ENGINE_INT_SYNC && spi_engine->msg) {
406 		struct spi_engine_message_state *st = spi_engine->msg->state;
407 
408 		if (spi_engine->completed_id == st->sync_id) {
409 			struct spi_message *msg = spi_engine->msg;
410 			struct spi_engine_message_state *st = msg->state;
411 
412 			ida_free(&spi_engine->sync_ida, st->sync_id);
413 			kfree(st->p);
414 			kfree(st);
415 			msg->status = 0;
416 			msg->actual_length = msg->frame_length;
417 			spi_engine->msg = NULL;
418 			spi_finalize_current_message(host);
419 			disable_int |= SPI_ENGINE_INT_SYNC;
420 		}
421 	}
422 
423 	if (disable_int) {
424 		spi_engine->int_enable &= ~disable_int;
425 		writel_relaxed(spi_engine->int_enable,
426 			spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
427 	}
428 
429 	spin_unlock(&spi_engine->lock);
430 
431 	return IRQ_HANDLED;
432 }
433 
434 static int spi_engine_transfer_one_message(struct spi_controller *host,
435 	struct spi_message *msg)
436 {
437 	struct spi_engine_program p_dry, *p;
438 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
439 	struct spi_engine_message_state *st;
440 	unsigned int int_enable = 0;
441 	unsigned long flags;
442 	size_t size;
443 	int ret;
444 
445 	st = kzalloc(sizeof(*st), GFP_KERNEL);
446 	if (!st)
447 		return -ENOMEM;
448 
449 	p_dry.length = 0;
450 	spi_engine_compile_message(spi_engine, msg, true, &p_dry);
451 
452 	size = sizeof(*p->instructions) * (p_dry.length + 1);
453 	p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
454 	if (!p) {
455 		kfree(st);
456 		return -ENOMEM;
457 	}
458 
459 	ret = ida_alloc_range(&spi_engine->sync_ida, 0, U8_MAX, GFP_KERNEL);
460 	if (ret < 0) {
461 		kfree(p);
462 		kfree(st);
463 		return ret;
464 	}
465 
466 	st->sync_id = ret;
467 
468 	spi_engine_compile_message(spi_engine, msg, false, p);
469 
470 	spin_lock_irqsave(&spi_engine->lock, flags);
471 	spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC(st->sync_id));
472 
473 	msg->state = st;
474 	spi_engine->msg = msg;
475 	st->p = p;
476 
477 	st->cmd_buf = p->instructions;
478 	st->cmd_length = p->length;
479 	if (spi_engine_write_cmd_fifo(spi_engine))
480 		int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
481 
482 	spi_engine_tx_next(spi_engine);
483 	if (spi_engine_write_tx_fifo(spi_engine))
484 		int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
485 
486 	spi_engine_rx_next(spi_engine);
487 	if (st->rx_length != 0)
488 		int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
489 
490 	int_enable |= SPI_ENGINE_INT_SYNC;
491 
492 	writel_relaxed(int_enable,
493 		spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
494 	spi_engine->int_enable = int_enable;
495 	spin_unlock_irqrestore(&spi_engine->lock, flags);
496 
497 	return 0;
498 }
499 
500 static int spi_engine_probe(struct platform_device *pdev)
501 {
502 	struct spi_engine *spi_engine;
503 	struct spi_controller *host;
504 	unsigned int version;
505 	int irq;
506 	int ret;
507 
508 	irq = platform_get_irq(pdev, 0);
509 	if (irq < 0)
510 		return irq;
511 
512 	host = devm_spi_alloc_host(&pdev->dev, sizeof(*spi_engine));
513 	if (!host)
514 		return -ENOMEM;
515 
516 	spi_engine = spi_controller_get_devdata(host);
517 
518 	spin_lock_init(&spi_engine->lock);
519 	ida_init(&spi_engine->sync_ida);
520 
521 	spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
522 	if (IS_ERR(spi_engine->clk))
523 		return PTR_ERR(spi_engine->clk);
524 
525 	spi_engine->ref_clk = devm_clk_get_enabled(&pdev->dev, "spi_clk");
526 	if (IS_ERR(spi_engine->ref_clk))
527 		return PTR_ERR(spi_engine->ref_clk);
528 
529 	spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
530 	if (IS_ERR(spi_engine->base))
531 		return PTR_ERR(spi_engine->base);
532 
533 	version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
534 	if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
535 		dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
536 			ADI_AXI_PCORE_VER_MAJOR(version),
537 			ADI_AXI_PCORE_VER_MINOR(version),
538 			ADI_AXI_PCORE_VER_PATCH(version));
539 		return -ENODEV;
540 	}
541 
542 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
543 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
544 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
545 
546 	ret = request_irq(irq, spi_engine_irq, 0, pdev->name, host);
547 	if (ret)
548 		return ret;
549 
550 	host->dev.of_node = pdev->dev.of_node;
551 	host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
552 	host->bits_per_word_mask = SPI_BPW_MASK(8);
553 	host->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
554 	host->transfer_one_message = spi_engine_transfer_one_message;
555 	host->num_chipselect = 8;
556 
557 	ret = spi_register_controller(host);
558 	if (ret)
559 		goto err_free_irq;
560 
561 	platform_set_drvdata(pdev, host);
562 
563 	return 0;
564 err_free_irq:
565 	free_irq(irq, host);
566 	return ret;
567 }
568 
569 static void spi_engine_remove(struct platform_device *pdev)
570 {
571 	struct spi_controller *host = platform_get_drvdata(pdev);
572 	struct spi_engine *spi_engine = spi_controller_get_devdata(host);
573 	int irq = platform_get_irq(pdev, 0);
574 
575 	spi_unregister_controller(host);
576 
577 	free_irq(irq, host);
578 
579 	writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
580 	writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
581 	writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
582 }
583 
584 static const struct of_device_id spi_engine_match_table[] = {
585 	{ .compatible = "adi,axi-spi-engine-1.00.a" },
586 	{ },
587 };
588 MODULE_DEVICE_TABLE(of, spi_engine_match_table);
589 
590 static struct platform_driver spi_engine_driver = {
591 	.probe = spi_engine_probe,
592 	.remove_new = spi_engine_remove,
593 	.driver = {
594 		.name = "spi-engine",
595 		.of_match_table = spi_engine_match_table,
596 	},
597 };
598 module_platform_driver(spi_engine_driver);
599 
600 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
601 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
602 MODULE_LICENSE("GPL");
603