xref: /openbmc/linux/drivers/spi/spi-ti-qspi.c (revision e23feb16)
1 /*
2  * TI QSPI driver
3  *
4  * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
5  * Author: Sourav Poddar <sourav.poddar@ti.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GPLv2.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/device.h>
21 #include <linux/delay.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/omap-dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/err.h>
27 #include <linux/clk.h>
28 #include <linux/io.h>
29 #include <linux/slab.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/of.h>
32 #include <linux/of_device.h>
33 #include <linux/pinctrl/consumer.h>
34 
35 #include <linux/spi/spi.h>
36 
37 struct ti_qspi_regs {
38 	u32 clkctrl;
39 };
40 
41 struct ti_qspi {
42 	struct completion       transfer_complete;
43 
44 	/* IRQ synchronization */
45 	spinlock_t              lock;
46 
47 	/* list synchronization */
48 	struct mutex            list_lock;
49 
50 	struct spi_master	*master;
51 	void __iomem            *base;
52 	struct clk		*fclk;
53 	struct device           *dev;
54 
55 	struct ti_qspi_regs     ctx_reg;
56 
57 	u32 spi_max_frequency;
58 	u32 cmd;
59 	u32 dc;
60 	u32 stat;
61 };
62 
63 #define QSPI_PID			(0x0)
64 #define QSPI_SYSCONFIG			(0x10)
65 #define QSPI_INTR_STATUS_RAW_SET	(0x20)
66 #define QSPI_INTR_STATUS_ENABLED_CLEAR	(0x24)
67 #define QSPI_INTR_ENABLE_SET_REG	(0x28)
68 #define QSPI_INTR_ENABLE_CLEAR_REG	(0x2c)
69 #define QSPI_SPI_CLOCK_CNTRL_REG	(0x40)
70 #define QSPI_SPI_DC_REG			(0x44)
71 #define QSPI_SPI_CMD_REG		(0x48)
72 #define QSPI_SPI_STATUS_REG		(0x4c)
73 #define QSPI_SPI_DATA_REG		(0x50)
74 #define QSPI_SPI_SETUP0_REG		(0x54)
75 #define QSPI_SPI_SWITCH_REG		(0x64)
76 #define QSPI_SPI_SETUP1_REG		(0x58)
77 #define QSPI_SPI_SETUP2_REG		(0x5c)
78 #define QSPI_SPI_SETUP3_REG		(0x60)
79 #define QSPI_SPI_DATA_REG_1		(0x68)
80 #define QSPI_SPI_DATA_REG_2		(0x6c)
81 #define QSPI_SPI_DATA_REG_3		(0x70)
82 
83 #define QSPI_COMPLETION_TIMEOUT		msecs_to_jiffies(2000)
84 
85 #define QSPI_FCLK			192000000
86 
87 /* Clock Control */
88 #define QSPI_CLK_EN			(1 << 31)
89 #define QSPI_CLK_DIV_MAX		0xffff
90 
91 /* Command */
92 #define QSPI_EN_CS(n)			(n << 28)
93 #define QSPI_WLEN(n)			((n - 1) << 19)
94 #define QSPI_3_PIN			(1 << 18)
95 #define QSPI_RD_SNGL			(1 << 16)
96 #define QSPI_WR_SNGL			(2 << 16)
97 #define QSPI_RD_DUAL			(3 << 16)
98 #define QSPI_RD_QUAD			(7 << 16)
99 #define QSPI_INVAL			(4 << 16)
100 #define QSPI_WC_CMD_INT_EN			(1 << 14)
101 #define QSPI_FLEN(n)			((n - 1) << 0)
102 
103 /* STATUS REGISTER */
104 #define WC				0x02
105 
106 /* INTERRUPT REGISTER */
107 #define QSPI_WC_INT_EN				(1 << 1)
108 #define QSPI_WC_INT_DISABLE			(1 << 1)
109 
110 /* Device Control */
111 #define QSPI_DD(m, n)			(m << (3 + n * 8))
112 #define QSPI_CKPHA(n)			(1 << (2 + n * 8))
113 #define QSPI_CSPOL(n)			(1 << (1 + n * 8))
114 #define QSPI_CKPOL(n)			(1 << (n * 8))
115 
116 #define	QSPI_FRAME			4096
117 
118 #define QSPI_AUTOSUSPEND_TIMEOUT         2000
119 
120 static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
121 		unsigned long reg)
122 {
123 	return readl(qspi->base + reg);
124 }
125 
126 static inline void ti_qspi_write(struct ti_qspi *qspi,
127 		unsigned long val, unsigned long reg)
128 {
129 	writel(val, qspi->base + reg);
130 }
131 
132 static int ti_qspi_setup(struct spi_device *spi)
133 {
134 	struct ti_qspi	*qspi = spi_master_get_devdata(spi->master);
135 	struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
136 	int clk_div = 0, ret;
137 	u32 clk_ctrl_reg, clk_rate, clk_mask;
138 
139 	if (spi->master->busy) {
140 		dev_dbg(qspi->dev, "master busy doing other trasnfers\n");
141 		return -EBUSY;
142 	}
143 
144 	if (!qspi->spi_max_frequency) {
145 		dev_err(qspi->dev, "spi max frequency not defined\n");
146 		return -EINVAL;
147 	}
148 
149 	clk_rate = clk_get_rate(qspi->fclk);
150 
151 	clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1;
152 
153 	if (clk_div < 0) {
154 		dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n");
155 		return -EINVAL;
156 	}
157 
158 	if (clk_div > QSPI_CLK_DIV_MAX) {
159 		dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n",
160 				QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1);
161 		return -EINVAL;
162 	}
163 
164 	dev_dbg(qspi->dev, "hz: %d, clock divider %d\n",
165 			qspi->spi_max_frequency, clk_div);
166 
167 	ret = pm_runtime_get_sync(qspi->dev);
168 	if (ret) {
169 		dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
170 		return ret;
171 	}
172 
173 	clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
174 
175 	clk_ctrl_reg &= ~QSPI_CLK_EN;
176 
177 	/* disable SCLK */
178 	ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
179 
180 	/* enable SCLK */
181 	clk_mask = QSPI_CLK_EN | clk_div;
182 	ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG);
183 	ctx_reg->clkctrl = clk_mask;
184 
185 	pm_runtime_mark_last_busy(qspi->dev);
186 	ret = pm_runtime_put_autosuspend(qspi->dev);
187 	if (ret < 0) {
188 		dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
189 		return ret;
190 	}
191 
192 	return 0;
193 }
194 
195 static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
196 {
197 	struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
198 
199 	ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
200 }
201 
202 static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
203 {
204 	int wlen, count, ret;
205 	unsigned int cmd;
206 	const u8 *txbuf;
207 
208 	txbuf = t->tx_buf;
209 	cmd = qspi->cmd | QSPI_WR_SNGL;
210 	count = t->len;
211 	wlen = t->bits_per_word;
212 
213 	while (count) {
214 		switch (wlen) {
215 		case 8:
216 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
217 					cmd, qspi->dc, *txbuf);
218 			writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
219 			ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
220 			ret = wait_for_completion_timeout(&qspi->transfer_complete,
221 					QSPI_COMPLETION_TIMEOUT);
222 			if (ret == 0) {
223 				dev_err(qspi->dev, "write timed out\n");
224 				return -ETIMEDOUT;
225 			}
226 			txbuf += 1;
227 			count -= 1;
228 			break;
229 		case 16:
230 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
231 					cmd, qspi->dc, *txbuf);
232 			writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
233 			ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
234 			ret = wait_for_completion_timeout(&qspi->transfer_complete,
235 				QSPI_COMPLETION_TIMEOUT);
236 			if (ret == 0) {
237 				dev_err(qspi->dev, "write timed out\n");
238 				return -ETIMEDOUT;
239 			}
240 			txbuf += 2;
241 			count -= 2;
242 			break;
243 		case 32:
244 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
245 					cmd, qspi->dc, *txbuf);
246 			writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
247 			ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
248 			ret = wait_for_completion_timeout(&qspi->transfer_complete,
249 				QSPI_COMPLETION_TIMEOUT);
250 			if (ret == 0) {
251 				dev_err(qspi->dev, "write timed out\n");
252 				return -ETIMEDOUT;
253 			}
254 			txbuf += 4;
255 			count -= 4;
256 			break;
257 		}
258 	}
259 
260 	return 0;
261 }
262 
263 static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
264 {
265 	int wlen, count, ret;
266 	unsigned int cmd;
267 	u8 *rxbuf;
268 
269 	rxbuf = t->rx_buf;
270 	cmd = qspi->cmd;
271 	switch (t->rx_nbits) {
272 	case SPI_NBITS_DUAL:
273 		cmd |= QSPI_RD_DUAL;
274 		break;
275 	case SPI_NBITS_QUAD:
276 		cmd |= QSPI_RD_QUAD;
277 		break;
278 	default:
279 		cmd |= QSPI_RD_SNGL;
280 		break;
281 	}
282 	count = t->len;
283 	wlen = t->bits_per_word;
284 
285 	while (count) {
286 		dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
287 		ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
288 		ret = wait_for_completion_timeout(&qspi->transfer_complete,
289 				QSPI_COMPLETION_TIMEOUT);
290 		if (ret == 0) {
291 			dev_err(qspi->dev, "read timed out\n");
292 			return -ETIMEDOUT;
293 		}
294 		switch (wlen) {
295 		case 8:
296 			*rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
297 			rxbuf += 1;
298 			count -= 1;
299 			break;
300 		case 16:
301 			*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
302 			rxbuf += 2;
303 			count -= 2;
304 			break;
305 		case 32:
306 			*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
307 			rxbuf += 4;
308 			count -= 4;
309 			break;
310 		}
311 	}
312 
313 	return 0;
314 }
315 
316 static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
317 {
318 	int ret;
319 
320 	if (t->tx_buf) {
321 		ret = qspi_write_msg(qspi, t);
322 		if (ret) {
323 			dev_dbg(qspi->dev, "Error while writing\n");
324 			return ret;
325 		}
326 	}
327 
328 	if (t->rx_buf) {
329 		ret = qspi_read_msg(qspi, t);
330 		if (ret) {
331 			dev_dbg(qspi->dev, "Error while reading\n");
332 			return ret;
333 		}
334 	}
335 
336 	return 0;
337 }
338 
339 static int ti_qspi_start_transfer_one(struct spi_master *master,
340 		struct spi_message *m)
341 {
342 	struct ti_qspi *qspi = spi_master_get_devdata(master);
343 	struct spi_device *spi = m->spi;
344 	struct spi_transfer *t;
345 	int status = 0, ret;
346 	int frame_length;
347 
348 	/* setup device control reg */
349 	qspi->dc = 0;
350 
351 	if (spi->mode & SPI_CPHA)
352 		qspi->dc |= QSPI_CKPHA(spi->chip_select);
353 	if (spi->mode & SPI_CPOL)
354 		qspi->dc |= QSPI_CKPOL(spi->chip_select);
355 	if (spi->mode & SPI_CS_HIGH)
356 		qspi->dc |= QSPI_CSPOL(spi->chip_select);
357 
358 	frame_length = (m->frame_length << 3) / spi->bits_per_word;
359 
360 	frame_length = clamp(frame_length, 0, QSPI_FRAME);
361 
362 	/* setup command reg */
363 	qspi->cmd = 0;
364 	qspi->cmd |= QSPI_EN_CS(spi->chip_select);
365 	qspi->cmd |= QSPI_FLEN(frame_length);
366 	qspi->cmd |= QSPI_WC_CMD_INT_EN;
367 
368 	ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG);
369 	ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
370 
371 	mutex_lock(&qspi->list_lock);
372 
373 	list_for_each_entry(t, &m->transfers, transfer_list) {
374 		qspi->cmd |= QSPI_WLEN(t->bits_per_word);
375 
376 		ret = qspi_transfer_msg(qspi, t);
377 		if (ret) {
378 			dev_dbg(qspi->dev, "transfer message failed\n");
379 			mutex_unlock(&qspi->list_lock);
380 			return -EINVAL;
381 		}
382 
383 		m->actual_length += t->len;
384 	}
385 
386 	mutex_unlock(&qspi->list_lock);
387 
388 	m->status = status;
389 	spi_finalize_current_message(master);
390 
391 	ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
392 
393 	return status;
394 }
395 
396 static irqreturn_t ti_qspi_isr(int irq, void *dev_id)
397 {
398 	struct ti_qspi *qspi = dev_id;
399 	u16 int_stat;
400 
401 	irqreturn_t ret = IRQ_HANDLED;
402 
403 	spin_lock(&qspi->lock);
404 
405 	int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR);
406 	qspi->stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
407 
408 	if (!int_stat) {
409 		dev_dbg(qspi->dev, "No IRQ triggered\n");
410 		ret = IRQ_NONE;
411 		goto out;
412 	}
413 
414 	ret = IRQ_WAKE_THREAD;
415 
416 	ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
417 	ti_qspi_write(qspi, QSPI_WC_INT_DISABLE,
418 				QSPI_INTR_STATUS_ENABLED_CLEAR);
419 
420 out:
421 	spin_unlock(&qspi->lock);
422 
423 	return ret;
424 }
425 
426 static irqreturn_t ti_qspi_threaded_isr(int this_irq, void *dev_id)
427 {
428 	struct ti_qspi *qspi = dev_id;
429 	unsigned long flags;
430 
431 	spin_lock_irqsave(&qspi->lock, flags);
432 
433 	if (qspi->stat & WC)
434 		complete(&qspi->transfer_complete);
435 
436 	spin_unlock_irqrestore(&qspi->lock, flags);
437 
438 	ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG);
439 
440 	return IRQ_HANDLED;
441 }
442 
443 static int ti_qspi_runtime_resume(struct device *dev)
444 {
445 	struct ti_qspi      *qspi;
446 	struct spi_master       *master;
447 
448 	master = dev_get_drvdata(dev);
449 	qspi = spi_master_get_devdata(master);
450 	ti_qspi_restore_ctx(qspi);
451 
452 	return 0;
453 }
454 
455 static const struct of_device_id ti_qspi_match[] = {
456 	{.compatible = "ti,dra7xxx-qspi" },
457 	{.compatible = "ti,am4372-qspi" },
458 	{},
459 };
460 MODULE_DEVICE_TABLE(of, ti_qspi_match);
461 
462 static int ti_qspi_probe(struct platform_device *pdev)
463 {
464 	struct  ti_qspi *qspi;
465 	struct spi_master *master;
466 	struct resource         *r;
467 	struct device_node *np = pdev->dev.of_node;
468 	u32 max_freq;
469 	int ret = 0, num_cs, irq;
470 
471 	master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
472 	if (!master)
473 		return -ENOMEM;
474 
475 	master->mode_bits = SPI_CPOL | SPI_CPHA;
476 
477 	master->bus_num = -1;
478 	master->flags = SPI_MASTER_HALF_DUPLEX;
479 	master->setup = ti_qspi_setup;
480 	master->auto_runtime_pm = true;
481 	master->transfer_one_message = ti_qspi_start_transfer_one;
482 	master->dev.of_node = pdev->dev.of_node;
483 	master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
484 
485 	if (!of_property_read_u32(np, "num-cs", &num_cs))
486 		master->num_chipselect = num_cs;
487 
488 	platform_set_drvdata(pdev, master);
489 
490 	qspi = spi_master_get_devdata(master);
491 	qspi->master = master;
492 	qspi->dev = &pdev->dev;
493 
494 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
495 
496 	irq = platform_get_irq(pdev, 0);
497 	if (irq < 0) {
498 		dev_err(&pdev->dev, "no irq resource?\n");
499 		return irq;
500 	}
501 
502 	spin_lock_init(&qspi->lock);
503 	mutex_init(&qspi->list_lock);
504 
505 	qspi->base = devm_ioremap_resource(&pdev->dev, r);
506 	if (IS_ERR(qspi->base)) {
507 		ret = PTR_ERR(qspi->base);
508 		goto free_master;
509 	}
510 
511 	ret = devm_request_threaded_irq(&pdev->dev, irq, ti_qspi_isr,
512 			ti_qspi_threaded_isr, 0,
513 			dev_name(&pdev->dev), qspi);
514 	if (ret < 0) {
515 		dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
516 				irq);
517 		goto free_master;
518 	}
519 
520 	qspi->fclk = devm_clk_get(&pdev->dev, "fck");
521 	if (IS_ERR(qspi->fclk)) {
522 		ret = PTR_ERR(qspi->fclk);
523 		dev_err(&pdev->dev, "could not get clk: %d\n", ret);
524 	}
525 
526 	init_completion(&qspi->transfer_complete);
527 
528 	pm_runtime_use_autosuspend(&pdev->dev);
529 	pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
530 	pm_runtime_enable(&pdev->dev);
531 
532 	if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
533 		qspi->spi_max_frequency = max_freq;
534 
535 	ret = spi_register_master(master);
536 	if (ret)
537 		goto free_master;
538 
539 	return 0;
540 
541 free_master:
542 	spi_master_put(master);
543 	return ret;
544 }
545 
546 static int ti_qspi_remove(struct platform_device *pdev)
547 {
548 	struct	ti_qspi *qspi = platform_get_drvdata(pdev);
549 
550 	spi_unregister_master(qspi->master);
551 
552 	return 0;
553 }
554 
555 static const struct dev_pm_ops ti_qspi_pm_ops = {
556 	.runtime_resume = ti_qspi_runtime_resume,
557 };
558 
559 static struct platform_driver ti_qspi_driver = {
560 	.probe	= ti_qspi_probe,
561 	.remove	= ti_qspi_remove,
562 	.driver = {
563 		.name	= "ti,dra7xxx-qspi",
564 		.owner	= THIS_MODULE,
565 		.pm =   &ti_qspi_pm_ops,
566 		.of_match_table = ti_qspi_match,
567 	}
568 };
569 
570 module_platform_driver(ti_qspi_driver);
571 
572 MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
573 MODULE_LICENSE("GPL v2");
574 MODULE_DESCRIPTION("TI QSPI controller driver");
575