xref: /openbmc/linux/drivers/spi/spi-dw-core.c (revision 002dff36)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Designware SPI core controller driver (refer pxa2xx_spi.c)
4  *
5  * Copyright (c) 2009, Intel Corporation.
6  */
7 
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/highmem.h>
12 #include <linux/delay.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
15 
16 #include "spi-dw.h"
17 
18 #ifdef CONFIG_DEBUG_FS
19 #include <linux/debugfs.h>
20 #endif
21 
22 /* Slave spi_dev related */
23 struct chip_data {
24 	u8 tmode;		/* TR/TO/RO/EEPROM */
25 	u8 type;		/* SPI/SSP/MicroWire */
26 
27 	u16 clk_div;		/* baud rate divider */
28 	u32 speed_hz;		/* baud rate */
29 };
30 
31 #ifdef CONFIG_DEBUG_FS
32 
33 #define DW_SPI_DBGFS_REG(_name, _off)	\
34 {					\
35 	.name = _name,			\
36 	.offset = _off,			\
37 }
38 
39 static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
40 	DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0),
41 	DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1),
42 	DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR),
43 	DW_SPI_DBGFS_REG("SER", DW_SPI_SER),
44 	DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR),
45 	DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR),
46 	DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR),
47 	DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR),
48 	DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR),
49 	DW_SPI_DBGFS_REG("SR", DW_SPI_SR),
50 	DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR),
51 	DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR),
52 	DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR),
53 	DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR),
54 	DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR),
55 };
56 
57 static int dw_spi_debugfs_init(struct dw_spi *dws)
58 {
59 	char name[32];
60 
61 	snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
62 	dws->debugfs = debugfs_create_dir(name, NULL);
63 	if (!dws->debugfs)
64 		return -ENOMEM;
65 
66 	dws->regset.regs = dw_spi_dbgfs_regs;
67 	dws->regset.nregs = ARRAY_SIZE(dw_spi_dbgfs_regs);
68 	dws->regset.base = dws->regs;
69 	debugfs_create_regset32("registers", 0400, dws->debugfs, &dws->regset);
70 
71 	return 0;
72 }
73 
74 static void dw_spi_debugfs_remove(struct dw_spi *dws)
75 {
76 	debugfs_remove_recursive(dws->debugfs);
77 }
78 
79 #else
80 static inline int dw_spi_debugfs_init(struct dw_spi *dws)
81 {
82 	return 0;
83 }
84 
85 static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
86 {
87 }
88 #endif /* CONFIG_DEBUG_FS */
89 
90 void dw_spi_set_cs(struct spi_device *spi, bool enable)
91 {
92 	struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
93 	bool cs_high = !!(spi->mode & SPI_CS_HIGH);
94 
95 	/*
96 	 * DW SPI controller demands any native CS being set in order to
97 	 * proceed with data transfer. So in order to activate the SPI
98 	 * communications we must set a corresponding bit in the Slave
99 	 * Enable register no matter whether the SPI core is configured to
100 	 * support active-high or active-low CS level.
101 	 */
102 	if (cs_high == enable)
103 		dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
104 	else if (dws->cs_override)
105 		dw_writel(dws, DW_SPI_SER, 0);
106 }
107 EXPORT_SYMBOL_GPL(dw_spi_set_cs);
108 
109 /* Return the max entries we can fill into tx fifo */
110 static inline u32 tx_max(struct dw_spi *dws)
111 {
112 	u32 tx_left, tx_room, rxtx_gap;
113 
114 	tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
115 	tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
116 
117 	/*
118 	 * Another concern is about the tx/rx mismatch, we
119 	 * though to use (dws->fifo_len - rxflr - txflr) as
120 	 * one maximum value for tx, but it doesn't cover the
121 	 * data which is out of tx/rx fifo and inside the
122 	 * shift registers. So a control from sw point of
123 	 * view is taken.
124 	 */
125 	rxtx_gap =  ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
126 			/ dws->n_bytes;
127 
128 	return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
129 }
130 
131 /* Return the max entries we should read out of rx fifo */
132 static inline u32 rx_max(struct dw_spi *dws)
133 {
134 	u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
135 
136 	return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
137 }
138 
139 static void dw_writer(struct dw_spi *dws)
140 {
141 	u32 max;
142 	u16 txw = 0;
143 
144 	spin_lock(&dws->buf_lock);
145 	max = tx_max(dws);
146 	while (max--) {
147 		/* Set the tx word if the transfer's original "tx" is not null */
148 		if (dws->tx_end - dws->len) {
149 			if (dws->n_bytes == 1)
150 				txw = *(u8 *)(dws->tx);
151 			else
152 				txw = *(u16 *)(dws->tx);
153 		}
154 		dw_write_io_reg(dws, DW_SPI_DR, txw);
155 		dws->tx += dws->n_bytes;
156 	}
157 	spin_unlock(&dws->buf_lock);
158 }
159 
160 static void dw_reader(struct dw_spi *dws)
161 {
162 	u32 max;
163 	u16 rxw;
164 
165 	spin_lock(&dws->buf_lock);
166 	max = rx_max(dws);
167 	while (max--) {
168 		rxw = dw_read_io_reg(dws, DW_SPI_DR);
169 		/* Care rx only if the transfer's original "rx" is not null */
170 		if (dws->rx_end - dws->len) {
171 			if (dws->n_bytes == 1)
172 				*(u8 *)(dws->rx) = rxw;
173 			else
174 				*(u16 *)(dws->rx) = rxw;
175 		}
176 		dws->rx += dws->n_bytes;
177 	}
178 	spin_unlock(&dws->buf_lock);
179 }
180 
181 static void int_error_stop(struct dw_spi *dws, const char *msg)
182 {
183 	spi_reset_chip(dws);
184 
185 	dev_err(&dws->master->dev, "%s\n", msg);
186 	dws->master->cur_msg->status = -EIO;
187 	spi_finalize_current_transfer(dws->master);
188 }
189 
190 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
191 {
192 	u16 irq_status = dw_readl(dws, DW_SPI_ISR);
193 
194 	/* Error handling */
195 	if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
196 		dw_readl(dws, DW_SPI_ICR);
197 		int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
198 		return IRQ_HANDLED;
199 	}
200 
201 	dw_reader(dws);
202 	if (dws->rx_end == dws->rx) {
203 		spi_mask_intr(dws, SPI_INT_TXEI);
204 		spi_finalize_current_transfer(dws->master);
205 		return IRQ_HANDLED;
206 	}
207 	if (irq_status & SPI_INT_TXEI) {
208 		spi_mask_intr(dws, SPI_INT_TXEI);
209 		dw_writer(dws);
210 		/* Enable TX irq always, it will be disabled when RX finished */
211 		spi_umask_intr(dws, SPI_INT_TXEI);
212 	}
213 
214 	return IRQ_HANDLED;
215 }
216 
217 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
218 {
219 	struct spi_controller *master = dev_id;
220 	struct dw_spi *dws = spi_controller_get_devdata(master);
221 	u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
222 
223 	if (!irq_status)
224 		return IRQ_NONE;
225 
226 	if (!master->cur_msg) {
227 		spi_mask_intr(dws, SPI_INT_TXEI);
228 		return IRQ_HANDLED;
229 	}
230 
231 	return dws->transfer_handler(dws);
232 }
233 
234 /* Configure CTRLR0 for DW_apb_ssi */
235 u32 dw_spi_update_cr0(struct spi_controller *master, struct spi_device *spi,
236 		      struct spi_transfer *transfer)
237 {
238 	struct chip_data *chip = spi_get_ctldata(spi);
239 	u32 cr0;
240 
241 	/* Default SPI mode is SCPOL = 0, SCPH = 0 */
242 	cr0 = (transfer->bits_per_word - 1)
243 		| (chip->type << SPI_FRF_OFFSET)
244 		| ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
245 		   (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) |
246 		   (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET))
247 		| (chip->tmode << SPI_TMOD_OFFSET);
248 
249 	return cr0;
250 }
251 EXPORT_SYMBOL_GPL(dw_spi_update_cr0);
252 
253 /* Configure CTRLR0 for DWC_ssi */
254 u32 dw_spi_update_cr0_v1_01a(struct spi_controller *master,
255 			     struct spi_device *spi,
256 			     struct spi_transfer *transfer)
257 {
258 	struct chip_data *chip = spi_get_ctldata(spi);
259 	u32 cr0;
260 
261 	/* CTRLR0[ 4: 0] Data Frame Size */
262 	cr0 = (transfer->bits_per_word - 1);
263 
264 	/* CTRLR0[ 7: 6] Frame Format */
265 	cr0 |= chip->type << DWC_SSI_CTRLR0_FRF_OFFSET;
266 
267 	/*
268 	 * SPI mode (SCPOL|SCPH)
269 	 * CTRLR0[ 8] Serial Clock Phase
270 	 * CTRLR0[ 9] Serial Clock Polarity
271 	 */
272 	cr0 |= ((spi->mode & SPI_CPOL) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET;
273 	cr0 |= ((spi->mode & SPI_CPHA) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET;
274 
275 	/* CTRLR0[11:10] Transfer Mode */
276 	cr0 |= chip->tmode << DWC_SSI_CTRLR0_TMOD_OFFSET;
277 
278 	/* CTRLR0[13] Shift Register Loop */
279 	cr0 |= ((spi->mode & SPI_LOOP) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET;
280 
281 	return cr0;
282 }
283 EXPORT_SYMBOL_GPL(dw_spi_update_cr0_v1_01a);
284 
285 static int dw_spi_transfer_one(struct spi_controller *master,
286 		struct spi_device *spi, struct spi_transfer *transfer)
287 {
288 	struct dw_spi *dws = spi_controller_get_devdata(master);
289 	struct chip_data *chip = spi_get_ctldata(spi);
290 	unsigned long flags;
291 	u8 imask = 0;
292 	u16 txlevel = 0;
293 	u32 cr0;
294 	int ret;
295 
296 	dws->dma_mapped = 0;
297 	spin_lock_irqsave(&dws->buf_lock, flags);
298 	dws->tx = (void *)transfer->tx_buf;
299 	dws->tx_end = dws->tx + transfer->len;
300 	dws->rx = transfer->rx_buf;
301 	dws->rx_end = dws->rx + transfer->len;
302 	dws->len = transfer->len;
303 	spin_unlock_irqrestore(&dws->buf_lock, flags);
304 
305 	/* Ensure dw->rx and dw->rx_end are visible */
306 	smp_mb();
307 
308 	spi_enable_chip(dws, 0);
309 
310 	/* Handle per transfer options for bpw and speed */
311 	if (transfer->speed_hz != dws->current_freq) {
312 		if (transfer->speed_hz != chip->speed_hz) {
313 			/* clk_div doesn't support odd number */
314 			chip->clk_div = (DIV_ROUND_UP(dws->max_freq, transfer->speed_hz) + 1) & 0xfffe;
315 			chip->speed_hz = transfer->speed_hz;
316 		}
317 		dws->current_freq = transfer->speed_hz;
318 		spi_set_clk(dws, chip->clk_div);
319 	}
320 
321 	transfer->effective_speed_hz = dws->max_freq / chip->clk_div;
322 	dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
323 
324 	cr0 = dws->update_cr0(master, spi, transfer);
325 	dw_writel(dws, DW_SPI_CTRLR0, cr0);
326 
327 	/* Check if current transfer is a DMA transaction */
328 	if (master->can_dma && master->can_dma(master, spi, transfer))
329 		dws->dma_mapped = master->cur_msg_mapped;
330 
331 	/* For poll mode just disable all interrupts */
332 	spi_mask_intr(dws, 0xff);
333 
334 	/*
335 	 * Interrupt mode
336 	 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
337 	 */
338 	if (dws->dma_mapped) {
339 		ret = dws->dma_ops->dma_setup(dws, transfer);
340 		if (ret < 0) {
341 			spi_enable_chip(dws, 1);
342 			return ret;
343 		}
344 	} else {
345 		txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
346 		dw_writel(dws, DW_SPI_TXFTLR, txlevel);
347 
348 		/* Set the interrupt mask */
349 		imask |= SPI_INT_TXEI | SPI_INT_TXOI |
350 			 SPI_INT_RXUI | SPI_INT_RXOI;
351 		spi_umask_intr(dws, imask);
352 
353 		dws->transfer_handler = interrupt_transfer;
354 	}
355 
356 	spi_enable_chip(dws, 1);
357 
358 	if (dws->dma_mapped)
359 		return dws->dma_ops->dma_transfer(dws, transfer);
360 
361 	return 1;
362 }
363 
364 static void dw_spi_handle_err(struct spi_controller *master,
365 		struct spi_message *msg)
366 {
367 	struct dw_spi *dws = spi_controller_get_devdata(master);
368 
369 	if (dws->dma_mapped)
370 		dws->dma_ops->dma_stop(dws);
371 
372 	spi_reset_chip(dws);
373 }
374 
375 /* This may be called twice for each spi dev */
376 static int dw_spi_setup(struct spi_device *spi)
377 {
378 	struct chip_data *chip;
379 
380 	/* Only alloc on first setup */
381 	chip = spi_get_ctldata(spi);
382 	if (!chip) {
383 		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
384 		if (!chip)
385 			return -ENOMEM;
386 		spi_set_ctldata(spi, chip);
387 	}
388 
389 	chip->tmode = SPI_TMOD_TR;
390 
391 	return 0;
392 }
393 
394 static void dw_spi_cleanup(struct spi_device *spi)
395 {
396 	struct chip_data *chip = spi_get_ctldata(spi);
397 
398 	kfree(chip);
399 	spi_set_ctldata(spi, NULL);
400 }
401 
402 /* Restart the controller, disable all interrupts, clean rx fifo */
403 static void spi_hw_init(struct device *dev, struct dw_spi *dws)
404 {
405 	spi_reset_chip(dws);
406 
407 	/*
408 	 * Try to detect the FIFO depth if not set by interface driver,
409 	 * the depth could be from 2 to 256 from HW spec
410 	 */
411 	if (!dws->fifo_len) {
412 		u32 fifo;
413 
414 		for (fifo = 1; fifo < 256; fifo++) {
415 			dw_writel(dws, DW_SPI_TXFTLR, fifo);
416 			if (fifo != dw_readl(dws, DW_SPI_TXFTLR))
417 				break;
418 		}
419 		dw_writel(dws, DW_SPI_TXFTLR, 0);
420 
421 		dws->fifo_len = (fifo == 1) ? 0 : fifo;
422 		dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
423 	}
424 
425 	/* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
426 	if (dws->cs_override)
427 		dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
428 }
429 
430 int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
431 {
432 	struct spi_controller *master;
433 	int ret;
434 
435 	if (!dws)
436 		return -EINVAL;
437 
438 	master = spi_alloc_master(dev, 0);
439 	if (!master)
440 		return -ENOMEM;
441 
442 	dws->master = master;
443 	dws->type = SSI_MOTO_SPI;
444 	dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
445 	spin_lock_init(&dws->buf_lock);
446 
447 	spi_controller_set_devdata(master, dws);
448 
449 	ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
450 			  master);
451 	if (ret < 0) {
452 		dev_err(dev, "can not get IRQ\n");
453 		goto err_free_master;
454 	}
455 
456 	master->use_gpio_descriptors = true;
457 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
458 	master->bits_per_word_mask =  SPI_BPW_RANGE_MASK(4, 16);
459 	master->bus_num = dws->bus_num;
460 	master->num_chipselect = dws->num_cs;
461 	master->setup = dw_spi_setup;
462 	master->cleanup = dw_spi_cleanup;
463 	master->set_cs = dw_spi_set_cs;
464 	master->transfer_one = dw_spi_transfer_one;
465 	master->handle_err = dw_spi_handle_err;
466 	master->max_speed_hz = dws->max_freq;
467 	master->dev.of_node = dev->of_node;
468 	master->dev.fwnode = dev->fwnode;
469 	master->flags = SPI_MASTER_GPIO_SS;
470 	master->auto_runtime_pm = true;
471 
472 	if (dws->set_cs)
473 		master->set_cs = dws->set_cs;
474 
475 	/* Basic HW init */
476 	spi_hw_init(dev, dws);
477 
478 	if (dws->dma_ops && dws->dma_ops->dma_init) {
479 		ret = dws->dma_ops->dma_init(dev, dws);
480 		if (ret) {
481 			dev_warn(dev, "DMA init failed\n");
482 		} else {
483 			master->can_dma = dws->dma_ops->can_dma;
484 			master->flags |= SPI_CONTROLLER_MUST_TX;
485 		}
486 	}
487 
488 	ret = spi_register_controller(master);
489 	if (ret) {
490 		dev_err(&master->dev, "problem registering spi master\n");
491 		goto err_dma_exit;
492 	}
493 
494 	dw_spi_debugfs_init(dws);
495 	return 0;
496 
497 err_dma_exit:
498 	if (dws->dma_ops && dws->dma_ops->dma_exit)
499 		dws->dma_ops->dma_exit(dws);
500 	spi_enable_chip(dws, 0);
501 	free_irq(dws->irq, master);
502 err_free_master:
503 	spi_controller_put(master);
504 	return ret;
505 }
506 EXPORT_SYMBOL_GPL(dw_spi_add_host);
507 
508 void dw_spi_remove_host(struct dw_spi *dws)
509 {
510 	dw_spi_debugfs_remove(dws);
511 
512 	spi_unregister_controller(dws->master);
513 
514 	if (dws->dma_ops && dws->dma_ops->dma_exit)
515 		dws->dma_ops->dma_exit(dws);
516 
517 	spi_shutdown_chip(dws);
518 
519 	free_irq(dws->irq, dws->master);
520 }
521 EXPORT_SYMBOL_GPL(dw_spi_remove_host);
522 
523 int dw_spi_suspend_host(struct dw_spi *dws)
524 {
525 	int ret;
526 
527 	ret = spi_controller_suspend(dws->master);
528 	if (ret)
529 		return ret;
530 
531 	spi_shutdown_chip(dws);
532 	return 0;
533 }
534 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
535 
536 int dw_spi_resume_host(struct dw_spi *dws)
537 {
538 	spi_hw_init(&dws->master->dev, dws);
539 	return spi_controller_resume(dws->master);
540 }
541 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
542 
543 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
544 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
545 MODULE_LICENSE("GPL v2");
546