1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Renesas RZ/V2M Clocked Serial Interface (CSI) driver
4 *
5 * Copyright (C) 2023 Renesas Electronics Corporation
6 */
7
8 #include <linux/bits.h>
9 #include <linux/clk.h>
10 #include <linux/count_zeros.h>
11 #include <linux/interrupt.h>
12 #include <linux/iopoll.h>
13 #include <linux/log2.h>
14 #include <linux/platform_device.h>
15 #include <linux/property.h>
16 #include <linux/reset.h>
17 #include <linux/spi/spi.h>
18 #include <linux/units.h>
19
20 /* Registers */
21 #define CSI_MODE 0x00 /* CSI mode control */
22 #define CSI_CLKSEL 0x04 /* CSI clock select */
23 #define CSI_CNT 0x08 /* CSI control */
24 #define CSI_INT 0x0C /* CSI interrupt status */
25 #define CSI_IFIFOL 0x10 /* CSI receive FIFO level display */
26 #define CSI_OFIFOL 0x14 /* CSI transmit FIFO level display */
27 #define CSI_IFIFO 0x18 /* CSI receive window */
28 #define CSI_OFIFO 0x1C /* CSI transmit window */
29 #define CSI_FIFOTRG 0x20 /* CSI FIFO trigger level */
30
31 /* CSI_MODE */
32 #define CSI_MODE_CSIE BIT(7)
33 #define CSI_MODE_TRMD BIT(6)
34 #define CSI_MODE_CCL BIT(5)
35 #define CSI_MODE_DIR BIT(4)
36 #define CSI_MODE_CSOT BIT(0)
37
38 #define CSI_MODE_SETUP 0x00000040
39
40 /* CSI_CLKSEL */
41 #define CSI_CLKSEL_CKP BIT(17)
42 #define CSI_CLKSEL_DAP BIT(16)
43 #define CSI_CLKSEL_MODE (CSI_CLKSEL_CKP|CSI_CLKSEL_DAP)
44 #define CSI_CLKSEL_SLAVE BIT(15)
45 #define CSI_CLKSEL_CKS GENMASK(14, 1)
46
47 /* CSI_CNT */
48 #define CSI_CNT_CSIRST BIT(28)
49 #define CSI_CNT_R_TRGEN BIT(19)
50 #define CSI_CNT_UNDER_E BIT(13)
51 #define CSI_CNT_OVERF_E BIT(12)
52 #define CSI_CNT_TREND_E BIT(9)
53 #define CSI_CNT_CSIEND_E BIT(8)
54 #define CSI_CNT_T_TRGR_E BIT(4)
55 #define CSI_CNT_R_TRGR_E BIT(0)
56
57 /* CSI_INT */
58 #define CSI_INT_UNDER BIT(13)
59 #define CSI_INT_OVERF BIT(12)
60 #define CSI_INT_TREND BIT(9)
61 #define CSI_INT_CSIEND BIT(8)
62 #define CSI_INT_T_TRGR BIT(4)
63 #define CSI_INT_R_TRGR BIT(0)
64
65 /* CSI_FIFOTRG */
66 #define CSI_FIFOTRG_R_TRG GENMASK(2, 0)
67
68 #define CSI_FIFO_SIZE_BYTES 32U
69 #define CSI_FIFO_HALF_SIZE 16U
70 #define CSI_EN_DIS_TIMEOUT_US 100
71 /*
72 * Clock "csiclk" gets divided by 2 * CSI_CLKSEL_CKS in order to generate the
73 * serial clock (output from master), with CSI_CLKSEL_CKS ranging from 0x1 (that
74 * means "csiclk" is divided by 2) to 0x3FFF ("csiclk" is divided by 32766).
75 */
76 #define CSI_CKS_MAX GENMASK(13, 0)
77
78 #define UNDERRUN_ERROR BIT(0)
79 #define OVERFLOW_ERROR BIT(1)
80 #define TX_TIMEOUT_ERROR BIT(2)
81 #define RX_TIMEOUT_ERROR BIT(3)
82
83 #define CSI_MAX_SPI_SCKO (8 * HZ_PER_MHZ)
84
85 struct rzv2m_csi_priv {
86 void __iomem *base;
87 struct clk *csiclk;
88 struct clk *pclk;
89 struct device *dev;
90 struct spi_controller *controller;
91 const void *txbuf;
92 void *rxbuf;
93 unsigned int buffer_len;
94 unsigned int bytes_sent;
95 unsigned int bytes_received;
96 unsigned int bytes_to_transfer;
97 unsigned int words_to_transfer;
98 unsigned int bytes_per_word;
99 wait_queue_head_t wait;
100 u32 errors;
101 u32 status;
102 };
103
rzv2m_csi_reg_write_bit(const struct rzv2m_csi_priv * csi,int reg_offs,int bit_mask,u32 value)104 static void rzv2m_csi_reg_write_bit(const struct rzv2m_csi_priv *csi,
105 int reg_offs, int bit_mask, u32 value)
106 {
107 int nr_zeros;
108 u32 tmp;
109
110 nr_zeros = count_trailing_zeros(bit_mask);
111 value <<= nr_zeros;
112
113 tmp = (readl(csi->base + reg_offs) & ~bit_mask) | value;
114 writel(tmp, csi->base + reg_offs);
115 }
116
rzv2m_csi_sw_reset(struct rzv2m_csi_priv * csi,int assert)117 static int rzv2m_csi_sw_reset(struct rzv2m_csi_priv *csi, int assert)
118 {
119 u32 reg;
120
121 rzv2m_csi_reg_write_bit(csi, CSI_CNT, CSI_CNT_CSIRST, assert);
122
123 if (!assert)
124 return 0;
125
126 return readl_poll_timeout(csi->base + CSI_MODE, reg,
127 !(reg & CSI_MODE_CSOT), 0,
128 CSI_EN_DIS_TIMEOUT_US);
129 }
130
rzv2m_csi_start_stop_operation(const struct rzv2m_csi_priv * csi,int enable,bool wait)131 static int rzv2m_csi_start_stop_operation(const struct rzv2m_csi_priv *csi,
132 int enable, bool wait)
133 {
134 u32 reg;
135
136 rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_CSIE, enable);
137
138 if (enable || !wait)
139 return 0;
140
141 return readl_poll_timeout(csi->base + CSI_MODE, reg,
142 !(reg & CSI_MODE_CSOT), 0,
143 CSI_EN_DIS_TIMEOUT_US);
144 }
145
rzv2m_csi_fill_txfifo(struct rzv2m_csi_priv * csi)146 static int rzv2m_csi_fill_txfifo(struct rzv2m_csi_priv *csi)
147 {
148 unsigned int i;
149
150 if (readl(csi->base + CSI_OFIFOL))
151 return -EIO;
152
153 if (csi->bytes_per_word == 2) {
154 const u16 *buf = csi->txbuf;
155
156 for (i = 0; i < csi->words_to_transfer; i++)
157 writel(buf[i], csi->base + CSI_OFIFO);
158 } else {
159 const u8 *buf = csi->txbuf;
160
161 for (i = 0; i < csi->words_to_transfer; i++)
162 writel(buf[i], csi->base + CSI_OFIFO);
163 }
164
165 csi->txbuf += csi->bytes_to_transfer;
166 csi->bytes_sent += csi->bytes_to_transfer;
167
168 return 0;
169 }
170
rzv2m_csi_read_rxfifo(struct rzv2m_csi_priv * csi)171 static int rzv2m_csi_read_rxfifo(struct rzv2m_csi_priv *csi)
172 {
173 unsigned int i;
174
175 if (readl(csi->base + CSI_IFIFOL) != csi->bytes_to_transfer)
176 return -EIO;
177
178 if (csi->bytes_per_word == 2) {
179 u16 *buf = csi->rxbuf;
180
181 for (i = 0; i < csi->words_to_transfer; i++)
182 buf[i] = (u16)readl(csi->base + CSI_IFIFO);
183 } else {
184 u8 *buf = csi->rxbuf;
185
186 for (i = 0; i < csi->words_to_transfer; i++)
187 buf[i] = (u8)readl(csi->base + CSI_IFIFO);
188 }
189
190 csi->rxbuf += csi->bytes_to_transfer;
191 csi->bytes_received += csi->bytes_to_transfer;
192
193 return 0;
194 }
195
rzv2m_csi_calc_current_transfer(struct rzv2m_csi_priv * csi)196 static inline void rzv2m_csi_calc_current_transfer(struct rzv2m_csi_priv *csi)
197 {
198 unsigned int bytes_transferred = max(csi->bytes_received, csi->bytes_sent);
199 unsigned int bytes_remaining = csi->buffer_len - bytes_transferred;
200 unsigned int to_transfer;
201
202 if (csi->txbuf)
203 /*
204 * Leaving a little bit of headroom in the FIFOs makes it very
205 * hard to raise an overflow error (which is only possible
206 * when IP transmits and receives at the same time).
207 */
208 to_transfer = min(CSI_FIFO_HALF_SIZE, bytes_remaining);
209 else
210 to_transfer = min(CSI_FIFO_SIZE_BYTES, bytes_remaining);
211
212 if (csi->bytes_per_word == 2)
213 to_transfer >>= 1;
214
215 /*
216 * We can only choose a trigger level from a predefined set of values.
217 * This will pick a value that is the greatest possible integer that's
218 * less than or equal to the number of bytes we need to transfer.
219 * This may result in multiple smaller transfers.
220 */
221 csi->words_to_transfer = rounddown_pow_of_two(to_transfer);
222
223 if (csi->bytes_per_word == 2)
224 csi->bytes_to_transfer = csi->words_to_transfer << 1;
225 else
226 csi->bytes_to_transfer = csi->words_to_transfer;
227 }
228
rzv2m_csi_set_rx_fifo_trigger_level(struct rzv2m_csi_priv * csi)229 static inline void rzv2m_csi_set_rx_fifo_trigger_level(struct rzv2m_csi_priv *csi)
230 {
231 rzv2m_csi_reg_write_bit(csi, CSI_FIFOTRG, CSI_FIFOTRG_R_TRG,
232 ilog2(csi->words_to_transfer));
233 }
234
rzv2m_csi_enable_rx_trigger(struct rzv2m_csi_priv * csi,bool enable)235 static inline void rzv2m_csi_enable_rx_trigger(struct rzv2m_csi_priv *csi,
236 bool enable)
237 {
238 rzv2m_csi_reg_write_bit(csi, CSI_CNT, CSI_CNT_R_TRGEN, enable);
239 }
240
rzv2m_csi_disable_irqs(const struct rzv2m_csi_priv * csi,u32 enable_bits)241 static void rzv2m_csi_disable_irqs(const struct rzv2m_csi_priv *csi,
242 u32 enable_bits)
243 {
244 u32 cnt = readl(csi->base + CSI_CNT);
245
246 writel(cnt & ~enable_bits, csi->base + CSI_CNT);
247 }
248
rzv2m_csi_disable_all_irqs(struct rzv2m_csi_priv * csi)249 static void rzv2m_csi_disable_all_irqs(struct rzv2m_csi_priv *csi)
250 {
251 rzv2m_csi_disable_irqs(csi, CSI_CNT_R_TRGR_E | CSI_CNT_T_TRGR_E |
252 CSI_CNT_CSIEND_E | CSI_CNT_TREND_E |
253 CSI_CNT_OVERF_E | CSI_CNT_UNDER_E);
254 }
255
rzv2m_csi_clear_irqs(struct rzv2m_csi_priv * csi,u32 irqs)256 static inline void rzv2m_csi_clear_irqs(struct rzv2m_csi_priv *csi, u32 irqs)
257 {
258 writel(irqs, csi->base + CSI_INT);
259 }
260
rzv2m_csi_clear_all_irqs(struct rzv2m_csi_priv * csi)261 static void rzv2m_csi_clear_all_irqs(struct rzv2m_csi_priv *csi)
262 {
263 rzv2m_csi_clear_irqs(csi, CSI_INT_UNDER | CSI_INT_OVERF |
264 CSI_INT_TREND | CSI_INT_CSIEND | CSI_INT_T_TRGR |
265 CSI_INT_R_TRGR);
266 }
267
rzv2m_csi_enable_irqs(struct rzv2m_csi_priv * csi,u32 enable_bits)268 static void rzv2m_csi_enable_irqs(struct rzv2m_csi_priv *csi, u32 enable_bits)
269 {
270 u32 cnt = readl(csi->base + CSI_CNT);
271
272 writel(cnt | enable_bits, csi->base + CSI_CNT);
273 }
274
rzv2m_csi_wait_for_interrupt(struct rzv2m_csi_priv * csi,u32 wait_mask,u32 enable_bits)275 static int rzv2m_csi_wait_for_interrupt(struct rzv2m_csi_priv *csi,
276 u32 wait_mask, u32 enable_bits)
277 {
278 int ret;
279
280 rzv2m_csi_enable_irqs(csi, enable_bits);
281
282 ret = wait_event_timeout(csi->wait,
283 ((csi->status & wait_mask) == wait_mask) ||
284 csi->errors, HZ);
285
286 rzv2m_csi_disable_irqs(csi, enable_bits);
287
288 if (csi->errors)
289 return -EIO;
290
291 if (!ret)
292 return -ETIMEDOUT;
293
294 return 0;
295 }
296
rzv2m_csi_wait_for_tx_empty(struct rzv2m_csi_priv * csi)297 static int rzv2m_csi_wait_for_tx_empty(struct rzv2m_csi_priv *csi)
298 {
299 int ret;
300
301 if (readl(csi->base + CSI_OFIFOL) == 0)
302 return 0;
303
304 ret = rzv2m_csi_wait_for_interrupt(csi, CSI_INT_TREND, CSI_CNT_TREND_E);
305 if (ret == -ETIMEDOUT)
306 csi->errors |= TX_TIMEOUT_ERROR;
307
308 return ret;
309 }
310
rzv2m_csi_wait_for_rx_ready(struct rzv2m_csi_priv * csi)311 static inline int rzv2m_csi_wait_for_rx_ready(struct rzv2m_csi_priv *csi)
312 {
313 int ret;
314
315 if (readl(csi->base + CSI_IFIFOL) == csi->bytes_to_transfer)
316 return 0;
317
318 ret = rzv2m_csi_wait_for_interrupt(csi, CSI_INT_R_TRGR,
319 CSI_CNT_R_TRGR_E);
320 if (ret == -ETIMEDOUT)
321 csi->errors |= RX_TIMEOUT_ERROR;
322
323 return ret;
324 }
325
rzv2m_csi_irq_handler(int irq,void * data)326 static irqreturn_t rzv2m_csi_irq_handler(int irq, void *data)
327 {
328 struct rzv2m_csi_priv *csi = data;
329
330 csi->status = readl(csi->base + CSI_INT);
331 rzv2m_csi_disable_irqs(csi, csi->status);
332
333 if (csi->status & CSI_INT_OVERF)
334 csi->errors |= OVERFLOW_ERROR;
335 if (csi->status & CSI_INT_UNDER)
336 csi->errors |= UNDERRUN_ERROR;
337
338 wake_up(&csi->wait);
339
340 return IRQ_HANDLED;
341 }
342
rzv2m_csi_setup_clock(struct rzv2m_csi_priv * csi,u32 spi_hz)343 static void rzv2m_csi_setup_clock(struct rzv2m_csi_priv *csi, u32 spi_hz)
344 {
345 unsigned long csiclk_rate = clk_get_rate(csi->csiclk);
346 unsigned long pclk_rate = clk_get_rate(csi->pclk);
347 unsigned long csiclk_rate_limit = pclk_rate >> 1;
348 u32 cks;
349
350 /*
351 * There is a restriction on the frequency of CSICLK, it has to be <=
352 * PCLK / 2.
353 */
354 if (csiclk_rate > csiclk_rate_limit) {
355 clk_set_rate(csi->csiclk, csiclk_rate >> 1);
356 csiclk_rate = clk_get_rate(csi->csiclk);
357 } else if ((csiclk_rate << 1) <= csiclk_rate_limit) {
358 clk_set_rate(csi->csiclk, csiclk_rate << 1);
359 csiclk_rate = clk_get_rate(csi->csiclk);
360 }
361
362 spi_hz = spi_hz > CSI_MAX_SPI_SCKO ? CSI_MAX_SPI_SCKO : spi_hz;
363
364 cks = DIV_ROUND_UP(csiclk_rate, spi_hz << 1);
365 if (cks > CSI_CKS_MAX)
366 cks = CSI_CKS_MAX;
367
368 dev_dbg(csi->dev, "SPI clk rate is %ldHz\n", csiclk_rate / (cks << 1));
369
370 rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_CKS, cks);
371 }
372
rzv2m_csi_setup_operating_mode(struct rzv2m_csi_priv * csi,struct spi_transfer * t)373 static void rzv2m_csi_setup_operating_mode(struct rzv2m_csi_priv *csi,
374 struct spi_transfer *t)
375 {
376 if (t->rx_buf && !t->tx_buf)
377 /* Reception-only mode */
378 rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_TRMD, 0);
379 else
380 /* Send and receive mode */
381 rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_TRMD, 1);
382
383 csi->bytes_per_word = t->bits_per_word / 8;
384 rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_CCL,
385 csi->bytes_per_word == 2);
386 }
387
rzv2m_csi_setup(struct spi_device * spi)388 static int rzv2m_csi_setup(struct spi_device *spi)
389 {
390 struct rzv2m_csi_priv *csi = spi_controller_get_devdata(spi->controller);
391 int ret;
392
393 rzv2m_csi_sw_reset(csi, 0);
394
395 writel(CSI_MODE_SETUP, csi->base + CSI_MODE);
396
397 /* Setup clock polarity and phase timing */
398 rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_MODE,
399 ~spi->mode & SPI_MODE_X_MASK);
400
401 /* Setup serial data order */
402 rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_DIR,
403 !!(spi->mode & SPI_LSB_FIRST));
404
405 /* Set the operation mode as master */
406 rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_SLAVE, 0);
407
408 /* Give the IP a SW reset */
409 ret = rzv2m_csi_sw_reset(csi, 1);
410 if (ret)
411 return ret;
412 rzv2m_csi_sw_reset(csi, 0);
413
414 /*
415 * We need to enable the communication so that the clock will settle
416 * for the right polarity before enabling the CS.
417 */
418 rzv2m_csi_start_stop_operation(csi, 1, false);
419 udelay(10);
420 rzv2m_csi_start_stop_operation(csi, 0, false);
421
422 return 0;
423 }
424
rzv2m_csi_pio_transfer(struct rzv2m_csi_priv * csi)425 static int rzv2m_csi_pio_transfer(struct rzv2m_csi_priv *csi)
426 {
427 bool tx_completed = !csi->txbuf;
428 bool rx_completed = !csi->rxbuf;
429 int ret = 0;
430
431 /* Make sure the TX FIFO is empty */
432 writel(0, csi->base + CSI_OFIFOL);
433
434 csi->bytes_sent = 0;
435 csi->bytes_received = 0;
436 csi->errors = 0;
437
438 rzv2m_csi_disable_all_irqs(csi);
439 rzv2m_csi_clear_all_irqs(csi);
440 rzv2m_csi_enable_rx_trigger(csi, true);
441
442 while (!tx_completed || !rx_completed) {
443 /*
444 * Decide how many words we are going to transfer during
445 * this cycle (for both TX and RX), then set the RX FIFO trigger
446 * level accordingly. No need to set a trigger level for the
447 * TX FIFO, as this IP comes with an interrupt that fires when
448 * the TX FIFO is empty.
449 */
450 rzv2m_csi_calc_current_transfer(csi);
451 rzv2m_csi_set_rx_fifo_trigger_level(csi);
452
453 rzv2m_csi_enable_irqs(csi, CSI_INT_OVERF | CSI_INT_UNDER);
454
455 /* Make sure the RX FIFO is empty */
456 writel(0, csi->base + CSI_IFIFOL);
457
458 writel(readl(csi->base + CSI_INT), csi->base + CSI_INT);
459 csi->status = 0;
460
461 rzv2m_csi_start_stop_operation(csi, 1, false);
462
463 /* TX */
464 if (csi->txbuf) {
465 ret = rzv2m_csi_fill_txfifo(csi);
466 if (ret)
467 break;
468
469 ret = rzv2m_csi_wait_for_tx_empty(csi);
470 if (ret)
471 break;
472
473 if (csi->bytes_sent == csi->buffer_len)
474 tx_completed = true;
475 }
476
477 /*
478 * Make sure the RX FIFO contains the desired number of words.
479 * We then either flush its content, or we copy it onto
480 * csi->rxbuf.
481 */
482 ret = rzv2m_csi_wait_for_rx_ready(csi);
483 if (ret)
484 break;
485
486 /* RX */
487 if (csi->rxbuf) {
488 rzv2m_csi_start_stop_operation(csi, 0, false);
489
490 ret = rzv2m_csi_read_rxfifo(csi);
491 if (ret)
492 break;
493
494 if (csi->bytes_received == csi->buffer_len)
495 rx_completed = true;
496 }
497
498 ret = rzv2m_csi_start_stop_operation(csi, 0, true);
499 if (ret)
500 goto pio_quit;
501
502 if (csi->errors) {
503 ret = -EIO;
504 goto pio_quit;
505 }
506 }
507
508 rzv2m_csi_start_stop_operation(csi, 0, true);
509
510 pio_quit:
511 rzv2m_csi_disable_all_irqs(csi);
512 rzv2m_csi_enable_rx_trigger(csi, false);
513 rzv2m_csi_clear_all_irqs(csi);
514
515 return ret;
516 }
517
rzv2m_csi_transfer_one(struct spi_controller * controller,struct spi_device * spi,struct spi_transfer * transfer)518 static int rzv2m_csi_transfer_one(struct spi_controller *controller,
519 struct spi_device *spi,
520 struct spi_transfer *transfer)
521 {
522 struct rzv2m_csi_priv *csi = spi_controller_get_devdata(controller);
523 struct device *dev = csi->dev;
524 int ret;
525
526 csi->txbuf = transfer->tx_buf;
527 csi->rxbuf = transfer->rx_buf;
528 csi->buffer_len = transfer->len;
529
530 rzv2m_csi_setup_operating_mode(csi, transfer);
531
532 rzv2m_csi_setup_clock(csi, transfer->speed_hz);
533
534 ret = rzv2m_csi_pio_transfer(csi);
535 if (ret) {
536 if (csi->errors & UNDERRUN_ERROR)
537 dev_err(dev, "Underrun error\n");
538 if (csi->errors & OVERFLOW_ERROR)
539 dev_err(dev, "Overflow error\n");
540 if (csi->errors & TX_TIMEOUT_ERROR)
541 dev_err(dev, "TX timeout error\n");
542 if (csi->errors & RX_TIMEOUT_ERROR)
543 dev_err(dev, "RX timeout error\n");
544 }
545
546 return ret;
547 }
548
rzv2m_csi_probe(struct platform_device * pdev)549 static int rzv2m_csi_probe(struct platform_device *pdev)
550 {
551 struct spi_controller *controller;
552 struct device *dev = &pdev->dev;
553 struct rzv2m_csi_priv *csi;
554 struct reset_control *rstc;
555 int irq;
556 int ret;
557
558 controller = devm_spi_alloc_host(dev, sizeof(*csi));
559 if (!controller)
560 return -ENOMEM;
561
562 csi = spi_controller_get_devdata(controller);
563 platform_set_drvdata(pdev, csi);
564
565 csi->dev = dev;
566 csi->controller = controller;
567
568 csi->base = devm_platform_ioremap_resource(pdev, 0);
569 if (IS_ERR(csi->base))
570 return PTR_ERR(csi->base);
571
572 irq = platform_get_irq(pdev, 0);
573 if (irq < 0)
574 return irq;
575
576 csi->csiclk = devm_clk_get(dev, "csiclk");
577 if (IS_ERR(csi->csiclk))
578 return dev_err_probe(dev, PTR_ERR(csi->csiclk),
579 "could not get csiclk\n");
580
581 csi->pclk = devm_clk_get(dev, "pclk");
582 if (IS_ERR(csi->pclk))
583 return dev_err_probe(dev, PTR_ERR(csi->pclk),
584 "could not get pclk\n");
585
586 rstc = devm_reset_control_get_shared(dev, NULL);
587 if (IS_ERR(rstc))
588 return dev_err_probe(dev, PTR_ERR(rstc), "Missing reset ctrl\n");
589
590 init_waitqueue_head(&csi->wait);
591
592 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
593 controller->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
594 controller->setup = rzv2m_csi_setup;
595 controller->transfer_one = rzv2m_csi_transfer_one;
596 controller->use_gpio_descriptors = true;
597
598 device_set_node(&controller->dev, dev_fwnode(dev));
599
600 ret = devm_request_irq(dev, irq, rzv2m_csi_irq_handler, 0,
601 dev_name(dev), csi);
602 if (ret)
603 return dev_err_probe(dev, ret, "cannot request IRQ\n");
604
605 /*
606 * The reset also affects other HW that is not under the control
607 * of Linux. Therefore, all we can do is make sure the reset is
608 * deasserted.
609 */
610 reset_control_deassert(rstc);
611
612 /* Make sure the IP is in SW reset state */
613 ret = rzv2m_csi_sw_reset(csi, 1);
614 if (ret)
615 return ret;
616
617 ret = clk_prepare_enable(csi->csiclk);
618 if (ret)
619 return dev_err_probe(dev, ret, "could not enable csiclk\n");
620
621 ret = spi_register_controller(controller);
622 if (ret) {
623 clk_disable_unprepare(csi->csiclk);
624 return dev_err_probe(dev, ret, "register controller failed\n");
625 }
626
627 return 0;
628 }
629
rzv2m_csi_remove(struct platform_device * pdev)630 static void rzv2m_csi_remove(struct platform_device *pdev)
631 {
632 struct rzv2m_csi_priv *csi = platform_get_drvdata(pdev);
633
634 spi_unregister_controller(csi->controller);
635 rzv2m_csi_sw_reset(csi, 1);
636 clk_disable_unprepare(csi->csiclk);
637 }
638
639 static const struct of_device_id rzv2m_csi_match[] = {
640 { .compatible = "renesas,rzv2m-csi" },
641 { /* sentinel */ }
642 };
643 MODULE_DEVICE_TABLE(of, rzv2m_csi_match);
644
645 static struct platform_driver rzv2m_csi_drv = {
646 .probe = rzv2m_csi_probe,
647 .remove_new = rzv2m_csi_remove,
648 .driver = {
649 .name = "rzv2m_csi",
650 .of_match_table = rzv2m_csi_match,
651 },
652 };
653 module_platform_driver(rzv2m_csi_drv);
654
655 MODULE_LICENSE("GPL");
656 MODULE_AUTHOR("Fabrizio Castro <castro.fabrizio.jz@renesas.com>");
657 MODULE_DESCRIPTION("Clocked Serial Interface Driver");
658