xref: /openbmc/linux/drivers/spi/spi-xlp.c (revision d0e22329)
1 /*
2  * Copyright (C) 2003-2015 Broadcom Corporation
3  * All Rights Reserved
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 (GPL v2)
7  * as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 #include <linux/acpi.h>
15 #include <linux/clk.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/spi/spi.h>
20 #include <linux/of.h>
21 #include <linux/interrupt.h>
22 
23 /* SPI Configuration Register */
24 #define XLP_SPI_CONFIG			0x00
25 #define XLP_SPI_CPHA			BIT(0)
26 #define XLP_SPI_CPOL			BIT(1)
27 #define XLP_SPI_CS_POL			BIT(2)
28 #define XLP_SPI_TXMISO_EN		BIT(3)
29 #define XLP_SPI_TXMOSI_EN		BIT(4)
30 #define XLP_SPI_RXMISO_EN		BIT(5)
31 #define XLP_SPI_CS_LSBFE		BIT(10)
32 #define XLP_SPI_RXCAP_EN		BIT(11)
33 
34 /* SPI Frequency Divider Register */
35 #define XLP_SPI_FDIV			0x04
36 
37 /* SPI Command Register */
38 #define XLP_SPI_CMD			0x08
39 #define XLP_SPI_CMD_IDLE_MASK		0x0
40 #define XLP_SPI_CMD_TX_MASK		0x1
41 #define XLP_SPI_CMD_RX_MASK		0x2
42 #define XLP_SPI_CMD_TXRX_MASK		0x3
43 #define XLP_SPI_CMD_CONT		BIT(4)
44 #define XLP_SPI_XFR_BITCNT_SHIFT	16
45 
46 /* SPI Status Register */
47 #define XLP_SPI_STATUS			0x0c
48 #define XLP_SPI_XFR_PENDING		BIT(0)
49 #define XLP_SPI_XFR_DONE		BIT(1)
50 #define XLP_SPI_TX_INT			BIT(2)
51 #define XLP_SPI_RX_INT			BIT(3)
52 #define XLP_SPI_TX_UF			BIT(4)
53 #define XLP_SPI_RX_OF			BIT(5)
54 #define XLP_SPI_STAT_MASK		0x3f
55 
56 /* SPI Interrupt Enable Register */
57 #define XLP_SPI_INTR_EN			0x10
58 #define XLP_SPI_INTR_DONE		BIT(0)
59 #define XLP_SPI_INTR_TXTH		BIT(1)
60 #define XLP_SPI_INTR_RXTH		BIT(2)
61 #define XLP_SPI_INTR_TXUF		BIT(3)
62 #define XLP_SPI_INTR_RXOF		BIT(4)
63 
64 /* SPI FIFO Threshold Register */
65 #define XLP_SPI_FIFO_THRESH		0x14
66 
67 /* SPI FIFO Word Count Register */
68 #define XLP_SPI_FIFO_WCNT		0x18
69 #define XLP_SPI_RXFIFO_WCNT_MASK	0xf
70 #define XLP_SPI_TXFIFO_WCNT_MASK	0xf0
71 #define XLP_SPI_TXFIFO_WCNT_SHIFT	4
72 
73 /* SPI Transmit Data FIFO Register */
74 #define XLP_SPI_TXDATA_FIFO		0x1c
75 
76 /* SPI Receive Data FIFO Register */
77 #define XLP_SPI_RXDATA_FIFO		0x20
78 
79 /* SPI System Control Register */
80 #define XLP_SPI_SYSCTRL			0x100
81 #define XLP_SPI_SYS_RESET		BIT(0)
82 #define XLP_SPI_SYS_CLKDIS		BIT(1)
83 #define XLP_SPI_SYS_PMEN		BIT(8)
84 
85 #define SPI_CS_OFFSET			0x40
86 #define XLP_SPI_TXRXTH			0x80
87 #define XLP_SPI_FIFO_SIZE		8
88 #define XLP_SPI_MAX_CS			4
89 #define XLP_SPI_DEFAULT_FREQ		133333333
90 #define XLP_SPI_FDIV_MIN		4
91 #define XLP_SPI_FDIV_MAX		65535
92 /*
93  * SPI can transfer only 28 bytes properly at a time. So split the
94  * transfer into 28 bytes size.
95  */
96 #define XLP_SPI_XFER_SIZE		28
97 
98 struct xlp_spi_priv {
99 	struct device		dev;		/* device structure */
100 	void __iomem		*base;		/* spi registers base address */
101 	const u8		*tx_buf;	/* tx data buffer */
102 	u8			*rx_buf;	/* rx data buffer */
103 	int			tx_len;		/* tx xfer length */
104 	int			rx_len;		/* rx xfer length */
105 	int			txerrors;	/* TXFIFO underflow count */
106 	int			rxerrors;	/* RXFIFO overflow count */
107 	int			cs;		/* slave device chip select */
108 	u32			spi_clk;	/* spi clock frequency */
109 	bool			cmd_cont;	/* cs active */
110 	struct completion	done;		/* completion notification */
111 };
112 
113 static inline u32 xlp_spi_reg_read(struct xlp_spi_priv *priv,
114 				int cs, int regoff)
115 {
116 	return readl(priv->base + regoff + cs * SPI_CS_OFFSET);
117 }
118 
119 static inline void xlp_spi_reg_write(struct xlp_spi_priv *priv, int cs,
120 				int regoff, u32 val)
121 {
122 	writel(val, priv->base + regoff + cs * SPI_CS_OFFSET);
123 }
124 
125 static inline void xlp_spi_sysctl_write(struct xlp_spi_priv *priv,
126 				int regoff, u32 val)
127 {
128 	writel(val, priv->base + regoff);
129 }
130 
131 /*
132  * Setup global SPI_SYSCTRL register for all SPI channels.
133  */
134 static void xlp_spi_sysctl_setup(struct xlp_spi_priv *xspi)
135 {
136 	int cs;
137 
138 	for (cs = 0; cs < XLP_SPI_MAX_CS; cs++)
139 		xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL,
140 				XLP_SPI_SYS_RESET << cs);
141 	xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL, XLP_SPI_SYS_PMEN);
142 }
143 
144 static int xlp_spi_setup(struct spi_device *spi)
145 {
146 	struct xlp_spi_priv *xspi;
147 	u32 fdiv, cfg;
148 	int cs;
149 
150 	xspi = spi_master_get_devdata(spi->master);
151 	cs = spi->chip_select;
152 	/*
153 	 * The value of fdiv must be between 4 and 65535.
154 	 */
155 	fdiv = DIV_ROUND_UP(xspi->spi_clk, spi->max_speed_hz);
156 	if (fdiv > XLP_SPI_FDIV_MAX)
157 		fdiv = XLP_SPI_FDIV_MAX;
158 	else if (fdiv < XLP_SPI_FDIV_MIN)
159 		fdiv = XLP_SPI_FDIV_MIN;
160 
161 	xlp_spi_reg_write(xspi, cs, XLP_SPI_FDIV, fdiv);
162 	xlp_spi_reg_write(xspi, cs, XLP_SPI_FIFO_THRESH, XLP_SPI_TXRXTH);
163 	cfg = xlp_spi_reg_read(xspi, cs, XLP_SPI_CONFIG);
164 	if (spi->mode & SPI_CPHA)
165 		cfg |= XLP_SPI_CPHA;
166 	else
167 		cfg &= ~XLP_SPI_CPHA;
168 	if (spi->mode & SPI_CPOL)
169 		cfg |= XLP_SPI_CPOL;
170 	else
171 		cfg &= ~XLP_SPI_CPOL;
172 	if (!(spi->mode & SPI_CS_HIGH))
173 		cfg |= XLP_SPI_CS_POL;
174 	else
175 		cfg &= ~XLP_SPI_CS_POL;
176 	if (spi->mode & SPI_LSB_FIRST)
177 		cfg |= XLP_SPI_CS_LSBFE;
178 	else
179 		cfg &= ~XLP_SPI_CS_LSBFE;
180 
181 	cfg |= XLP_SPI_TXMOSI_EN | XLP_SPI_RXMISO_EN;
182 	if (fdiv == 4)
183 		cfg |= XLP_SPI_RXCAP_EN;
184 	xlp_spi_reg_write(xspi, cs, XLP_SPI_CONFIG, cfg);
185 
186 	return 0;
187 }
188 
189 static void xlp_spi_read_rxfifo(struct xlp_spi_priv *xspi)
190 {
191 	u32 rx_data, rxfifo_cnt;
192 	int i, j, nbytes;
193 
194 	rxfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
195 	rxfifo_cnt &= XLP_SPI_RXFIFO_WCNT_MASK;
196 	while (rxfifo_cnt) {
197 		rx_data = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_RXDATA_FIFO);
198 		j = 0;
199 		nbytes = min(xspi->rx_len, 4);
200 		for (i = nbytes - 1; i >= 0; i--, j++)
201 			xspi->rx_buf[i] = (rx_data >> (j * 8)) & 0xff;
202 
203 		xspi->rx_len -= nbytes;
204 		xspi->rx_buf += nbytes;
205 		rxfifo_cnt--;
206 	}
207 }
208 
209 static void xlp_spi_fill_txfifo(struct xlp_spi_priv *xspi)
210 {
211 	u32 tx_data, txfifo_cnt;
212 	int i, j, nbytes;
213 
214 	txfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
215 	txfifo_cnt &= XLP_SPI_TXFIFO_WCNT_MASK;
216 	txfifo_cnt >>= XLP_SPI_TXFIFO_WCNT_SHIFT;
217 	while (xspi->tx_len && (txfifo_cnt < XLP_SPI_FIFO_SIZE)) {
218 		j = 0;
219 		tx_data = 0;
220 		nbytes = min(xspi->tx_len, 4);
221 		for (i = nbytes - 1; i >= 0; i--, j++)
222 			tx_data |= xspi->tx_buf[i] << (j * 8);
223 
224 		xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_TXDATA_FIFO, tx_data);
225 		xspi->tx_len -= nbytes;
226 		xspi->tx_buf += nbytes;
227 		txfifo_cnt++;
228 	}
229 }
230 
231 static irqreturn_t xlp_spi_interrupt(int irq, void *dev_id)
232 {
233 	struct xlp_spi_priv *xspi = dev_id;
234 	u32 stat;
235 
236 	stat = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_STATUS) &
237 		XLP_SPI_STAT_MASK;
238 	if (!stat)
239 		return IRQ_NONE;
240 
241 	if (stat & XLP_SPI_TX_INT) {
242 		if (xspi->tx_len)
243 			xlp_spi_fill_txfifo(xspi);
244 		if (stat & XLP_SPI_TX_UF)
245 			xspi->txerrors++;
246 	}
247 
248 	if (stat & XLP_SPI_RX_INT) {
249 		if (xspi->rx_len)
250 			xlp_spi_read_rxfifo(xspi);
251 		if (stat & XLP_SPI_RX_OF)
252 			xspi->rxerrors++;
253 	}
254 
255 	/* write status back to clear interrupts */
256 	xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_STATUS, stat);
257 	if (stat & XLP_SPI_XFR_DONE)
258 		complete(&xspi->done);
259 
260 	return IRQ_HANDLED;
261 }
262 
263 static void xlp_spi_send_cmd(struct xlp_spi_priv *xspi, int xfer_len,
264 			int cmd_cont)
265 {
266 	u32 cmd = 0;
267 
268 	if (xspi->tx_buf)
269 		cmd |= XLP_SPI_CMD_TX_MASK;
270 	if (xspi->rx_buf)
271 		cmd |= XLP_SPI_CMD_RX_MASK;
272 	if (cmd_cont)
273 		cmd |= XLP_SPI_CMD_CONT;
274 	cmd |= ((xfer_len * 8 - 1) << XLP_SPI_XFR_BITCNT_SHIFT);
275 	xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_CMD, cmd);
276 }
277 
278 static int xlp_spi_xfer_block(struct  xlp_spi_priv *xs,
279 		const unsigned char *tx_buf,
280 		unsigned char *rx_buf, int xfer_len, int cmd_cont)
281 {
282 	int timeout;
283 	u32 intr_mask = 0;
284 
285 	xs->tx_buf = tx_buf;
286 	xs->rx_buf = rx_buf;
287 	xs->tx_len = (xs->tx_buf == NULL) ? 0 : xfer_len;
288 	xs->rx_len = (xs->rx_buf == NULL) ? 0 : xfer_len;
289 	xs->txerrors = xs->rxerrors = 0;
290 
291 	/* fill TXDATA_FIFO, then send the CMD */
292 	if (xs->tx_len)
293 		xlp_spi_fill_txfifo(xs);
294 
295 	xlp_spi_send_cmd(xs, xfer_len, cmd_cont);
296 
297 	/*
298 	 * We are getting some spurious tx interrupts, so avoid enabling
299 	 * tx interrupts when only rx is in process.
300 	 * Enable all the interrupts in tx case.
301 	 */
302 	if (xs->tx_len)
303 		intr_mask |= XLP_SPI_INTR_TXTH | XLP_SPI_INTR_TXUF |
304 				XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
305 	else
306 		intr_mask |= XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
307 
308 	intr_mask |= XLP_SPI_INTR_DONE;
309 	xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, intr_mask);
310 
311 	timeout = wait_for_completion_timeout(&xs->done,
312 				msecs_to_jiffies(1000));
313 	/* Disable interrupts */
314 	xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, 0x0);
315 	if (!timeout) {
316 		dev_err(&xs->dev, "xfer timedout!\n");
317 		goto out;
318 	}
319 	if (xs->txerrors || xs->rxerrors)
320 		dev_err(&xs->dev, "Over/Underflow rx %d tx %d xfer %d!\n",
321 				xs->rxerrors, xs->txerrors, xfer_len);
322 
323 	return xfer_len;
324 out:
325 	return -ETIMEDOUT;
326 }
327 
328 static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t)
329 {
330 	int bytesleft, sz;
331 	unsigned char *rx_buf;
332 	const unsigned char *tx_buf;
333 
334 	tx_buf = t->tx_buf;
335 	rx_buf = t->rx_buf;
336 	bytesleft = t->len;
337 	while (bytesleft) {
338 		if (bytesleft > XLP_SPI_XFER_SIZE)
339 			sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
340 					XLP_SPI_XFER_SIZE, 1);
341 		else
342 			sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
343 					bytesleft, xs->cmd_cont);
344 		if (sz < 0)
345 			return sz;
346 		bytesleft -= sz;
347 		if (tx_buf)
348 			tx_buf += sz;
349 		if (rx_buf)
350 			rx_buf += sz;
351 	}
352 	return bytesleft;
353 }
354 
355 static int xlp_spi_transfer_one(struct spi_master *master,
356 					struct spi_device *spi,
357 					struct spi_transfer *t)
358 {
359 	struct xlp_spi_priv *xspi = spi_master_get_devdata(master);
360 	int ret = 0;
361 
362 	xspi->cs = spi->chip_select;
363 	xspi->dev = spi->dev;
364 
365 	if (spi_transfer_is_last(master, t))
366 		xspi->cmd_cont = 0;
367 	else
368 		xspi->cmd_cont = 1;
369 
370 	if (xlp_spi_txrx_bufs(xspi, t))
371 		ret = -EIO;
372 
373 	spi_finalize_current_transfer(master);
374 	return ret;
375 }
376 
377 static int xlp_spi_probe(struct platform_device *pdev)
378 {
379 	struct spi_master *master;
380 	struct xlp_spi_priv *xspi;
381 	struct resource *res;
382 	struct clk *clk;
383 	int irq, err;
384 
385 	xspi = devm_kzalloc(&pdev->dev, sizeof(*xspi), GFP_KERNEL);
386 	if (!xspi)
387 		return -ENOMEM;
388 
389 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
390 	xspi->base = devm_ioremap_resource(&pdev->dev, res);
391 	if (IS_ERR(xspi->base))
392 		return PTR_ERR(xspi->base);
393 
394 	irq = platform_get_irq(pdev, 0);
395 	if (irq < 0) {
396 		dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq);
397 		return irq;
398 	}
399 	err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
400 			pdev->name, xspi);
401 	if (err) {
402 		dev_err(&pdev->dev, "unable to request irq %d\n", irq);
403 		return err;
404 	}
405 
406 	clk = devm_clk_get(&pdev->dev, NULL);
407 	if (IS_ERR(clk)) {
408 		dev_err(&pdev->dev, "could not get spi clock\n");
409 		return PTR_ERR(clk);
410 	}
411 
412 	xspi->spi_clk = clk_get_rate(clk);
413 
414 	master = spi_alloc_master(&pdev->dev, 0);
415 	if (!master) {
416 		dev_err(&pdev->dev, "could not alloc master\n");
417 		return -ENOMEM;
418 	}
419 
420 	master->bus_num = 0;
421 	master->num_chipselect = XLP_SPI_MAX_CS;
422 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
423 	master->setup = xlp_spi_setup;
424 	master->transfer_one = xlp_spi_transfer_one;
425 	master->dev.of_node = pdev->dev.of_node;
426 
427 	init_completion(&xspi->done);
428 	spi_master_set_devdata(master, xspi);
429 	xlp_spi_sysctl_setup(xspi);
430 
431 	/* register spi controller */
432 	err = devm_spi_register_master(&pdev->dev, master);
433 	if (err) {
434 		dev_err(&pdev->dev, "spi register master failed!\n");
435 		spi_master_put(master);
436 		return err;
437 	}
438 
439 	return 0;
440 }
441 
442 #ifdef CONFIG_ACPI
443 static const struct acpi_device_id xlp_spi_acpi_match[] = {
444 	{ "BRCM900D", 0 },
445 	{ "CAV900D",  0 },
446 	{ },
447 };
448 MODULE_DEVICE_TABLE(acpi, xlp_spi_acpi_match);
449 #endif
450 
451 static const struct of_device_id xlp_spi_dt_id[] = {
452 	{ .compatible = "netlogic,xlp832-spi" },
453 	{ },
454 };
455 MODULE_DEVICE_TABLE(of, xlp_spi_dt_id);
456 
457 static struct platform_driver xlp_spi_driver = {
458 	.probe	= xlp_spi_probe,
459 	.driver = {
460 		.name	= "xlp-spi",
461 		.of_match_table = xlp_spi_dt_id,
462 		.acpi_match_table = ACPI_PTR(xlp_spi_acpi_match),
463 	},
464 };
465 module_platform_driver(xlp_spi_driver);
466 
467 MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>");
468 MODULE_DESCRIPTION("Netlogic XLP SPI controller driver");
469 MODULE_LICENSE("GPL v2");
470