1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Renesas RZ/V2M Clocked Serial Interface (CSI) driver 4 * 5 * Copyright (C) 2023 Renesas Electronics Corporation 6 */ 7 8 #include <linux/clk.h> 9 #include <linux/count_zeros.h> 10 #include <linux/interrupt.h> 11 #include <linux/iopoll.h> 12 #include <linux/platform_device.h> 13 #include <linux/reset.h> 14 #include <linux/spi/spi.h> 15 16 /* Registers */ 17 #define CSI_MODE 0x00 /* CSI mode control */ 18 #define CSI_CLKSEL 0x04 /* CSI clock select */ 19 #define CSI_CNT 0x08 /* CSI control */ 20 #define CSI_INT 0x0C /* CSI interrupt status */ 21 #define CSI_IFIFOL 0x10 /* CSI receive FIFO level display */ 22 #define CSI_OFIFOL 0x14 /* CSI transmit FIFO level display */ 23 #define CSI_IFIFO 0x18 /* CSI receive window */ 24 #define CSI_OFIFO 0x1C /* CSI transmit window */ 25 #define CSI_FIFOTRG 0x20 /* CSI FIFO trigger level */ 26 27 /* CSI_MODE */ 28 #define CSI_MODE_CSIE BIT(7) 29 #define CSI_MODE_TRMD BIT(6) 30 #define CSI_MODE_CCL BIT(5) 31 #define CSI_MODE_DIR BIT(4) 32 #define CSI_MODE_CSOT BIT(0) 33 34 #define CSI_MODE_SETUP 0x00000040 35 36 /* CSI_CLKSEL */ 37 #define CSI_CLKSEL_CKP BIT(17) 38 #define CSI_CLKSEL_DAP BIT(16) 39 #define CSI_CLKSEL_SLAVE BIT(15) 40 #define CSI_CLKSEL_CKS GENMASK(14, 1) 41 42 /* CSI_CNT */ 43 #define CSI_CNT_CSIRST BIT(28) 44 #define CSI_CNT_R_TRGEN BIT(19) 45 #define CSI_CNT_UNDER_E BIT(13) 46 #define CSI_CNT_OVERF_E BIT(12) 47 #define CSI_CNT_TREND_E BIT(9) 48 #define CSI_CNT_CSIEND_E BIT(8) 49 #define CSI_CNT_T_TRGR_E BIT(4) 50 #define CSI_CNT_R_TRGR_E BIT(0) 51 52 /* CSI_INT */ 53 #define CSI_INT_UNDER BIT(13) 54 #define CSI_INT_OVERF BIT(12) 55 #define CSI_INT_TREND BIT(9) 56 #define CSI_INT_CSIEND BIT(8) 57 #define CSI_INT_T_TRGR BIT(4) 58 #define CSI_INT_R_TRGR BIT(0) 59 60 /* CSI_FIFOTRG */ 61 #define CSI_FIFOTRG_R_TRG GENMASK(2, 0) 62 63 #define CSI_FIFO_SIZE_BYTES 32 64 #define CSI_FIFO_HALF_SIZE 16 65 #define CSI_EN_DIS_TIMEOUT_US 100 66 #define CSI_CKS_MAX 0x3FFF 67 68 #define UNDERRUN_ERROR BIT(0) 69 #define OVERFLOW_ERROR BIT(1) 70 #define TX_TIMEOUT_ERROR BIT(2) 71 #define RX_TIMEOUT_ERROR BIT(3) 72 73 #define CSI_MAX_SPI_SCKO 8000000 74 75 struct rzv2m_csi_priv { 76 void __iomem *base; 77 struct clk *csiclk; 78 struct clk *pclk; 79 struct device *dev; 80 struct spi_controller *controller; 81 const u8 *txbuf; 82 u8 *rxbuf; 83 int buffer_len; 84 int bytes_sent; 85 int bytes_received; 86 int bytes_to_transfer; 87 int words_to_transfer; 88 unsigned char bytes_per_word; 89 wait_queue_head_t wait; 90 u8 errors; 91 u32 status; 92 }; 93 94 static const unsigned char x_trg[] = { 95 0, 1, 1, 2, 2, 2, 2, 3, 96 3, 3, 3, 3, 3, 3, 3, 4, 97 4, 4, 4, 4, 4, 4, 4, 4, 98 4, 4, 4, 4, 4, 4, 4, 5 99 }; 100 101 static const unsigned char x_trg_words[] = { 102 1, 2, 2, 4, 4, 4, 4, 8, 103 8, 8, 8, 8, 8, 8, 8, 16, 104 16, 16, 16, 16, 16, 16, 16, 16, 105 16, 16, 16, 16, 16, 16, 16, 32 106 }; 107 108 static void rzv2m_csi_reg_write_bit(const struct rzv2m_csi_priv *csi, 109 int reg_offs, int bit_mask, u32 value) 110 { 111 int nr_zeros; 112 u32 tmp; 113 114 nr_zeros = count_trailing_zeros(bit_mask); 115 value <<= nr_zeros; 116 117 tmp = (readl(csi->base + reg_offs) & ~bit_mask) | value; 118 writel(tmp, csi->base + reg_offs); 119 } 120 121 static int rzv2m_csi_sw_reset(struct rzv2m_csi_priv *csi, int assert) 122 { 123 u32 reg; 124 125 rzv2m_csi_reg_write_bit(csi, CSI_CNT, CSI_CNT_CSIRST, assert); 126 127 if (assert) { 128 return readl_poll_timeout(csi->base + CSI_MODE, reg, 129 !(reg & CSI_MODE_CSOT), 0, 130 CSI_EN_DIS_TIMEOUT_US); 131 } 132 133 return 0; 134 } 135 136 static int rzv2m_csi_start_stop_operation(const struct rzv2m_csi_priv *csi, 137 int enable, bool wait) 138 { 139 u32 reg; 140 141 rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_CSIE, enable); 142 143 if (!enable && wait) 144 return readl_poll_timeout(csi->base + CSI_MODE, reg, 145 !(reg & CSI_MODE_CSOT), 0, 146 CSI_EN_DIS_TIMEOUT_US); 147 148 return 0; 149 } 150 151 static int rzv2m_csi_fill_txfifo(struct rzv2m_csi_priv *csi) 152 { 153 int i; 154 155 if (readl(csi->base + CSI_OFIFOL)) 156 return -EIO; 157 158 if (csi->bytes_per_word == 2) { 159 u16 *buf = (u16 *)csi->txbuf; 160 161 for (i = 0; i < csi->words_to_transfer; i++) 162 writel(buf[i], csi->base + CSI_OFIFO); 163 } else { 164 u8 *buf = (u8 *)csi->txbuf; 165 166 for (i = 0; i < csi->words_to_transfer; i++) 167 writel(buf[i], csi->base + CSI_OFIFO); 168 } 169 170 csi->txbuf += csi->bytes_to_transfer; 171 csi->bytes_sent += csi->bytes_to_transfer; 172 173 return 0; 174 } 175 176 static int rzv2m_csi_read_rxfifo(struct rzv2m_csi_priv *csi) 177 { 178 int i; 179 180 if (readl(csi->base + CSI_IFIFOL) != csi->bytes_to_transfer) 181 return -EIO; 182 183 if (csi->bytes_per_word == 2) { 184 u16 *buf = (u16 *)csi->rxbuf; 185 186 for (i = 0; i < csi->words_to_transfer; i++) 187 buf[i] = (u16)readl(csi->base + CSI_IFIFO); 188 } else { 189 u8 *buf = (u8 *)csi->rxbuf; 190 191 for (i = 0; i < csi->words_to_transfer; i++) 192 buf[i] = (u8)readl(csi->base + CSI_IFIFO); 193 } 194 195 csi->rxbuf += csi->bytes_to_transfer; 196 csi->bytes_received += csi->bytes_to_transfer; 197 198 return 0; 199 } 200 201 static inline void rzv2m_csi_calc_current_transfer(struct rzv2m_csi_priv *csi) 202 { 203 int bytes_transferred = max_t(int, csi->bytes_received, csi->bytes_sent); 204 int bytes_remaining = csi->buffer_len - bytes_transferred; 205 int to_transfer; 206 207 if (csi->txbuf) 208 /* 209 * Leaving a little bit of headroom in the FIFOs makes it very 210 * hard to raise an overflow error (which is only possible 211 * when IP transmits and receives at the same time). 212 */ 213 to_transfer = min_t(int, CSI_FIFO_HALF_SIZE, bytes_remaining); 214 else 215 to_transfer = min_t(int, CSI_FIFO_SIZE_BYTES, bytes_remaining); 216 217 if (csi->bytes_per_word == 2) 218 to_transfer >>= 1; 219 220 /* 221 * We can only choose a trigger level from a predefined set of values. 222 * This will pick a value that is the greatest possible integer that's 223 * less than or equal to the number of bytes we need to transfer. 224 * This may result in multiple smaller transfers. 225 */ 226 csi->words_to_transfer = x_trg_words[to_transfer - 1]; 227 228 if (csi->bytes_per_word == 2) 229 csi->bytes_to_transfer = csi->words_to_transfer << 1; 230 else 231 csi->bytes_to_transfer = csi->words_to_transfer; 232 } 233 234 static inline void rzv2m_csi_set_rx_fifo_trigger_level(struct rzv2m_csi_priv *csi) 235 { 236 rzv2m_csi_reg_write_bit(csi, CSI_FIFOTRG, CSI_FIFOTRG_R_TRG, 237 x_trg[csi->words_to_transfer - 1]); 238 } 239 240 static inline void rzv2m_csi_enable_rx_trigger(struct rzv2m_csi_priv *csi, 241 bool enable) 242 { 243 rzv2m_csi_reg_write_bit(csi, CSI_CNT, CSI_CNT_R_TRGEN, enable); 244 } 245 246 static void rzv2m_csi_disable_irqs(const struct rzv2m_csi_priv *csi, 247 u32 enable_bits) 248 { 249 u32 cnt = readl(csi->base + CSI_CNT); 250 251 writel(cnt & ~enable_bits, csi->base + CSI_CNT); 252 } 253 254 static void rzv2m_csi_disable_all_irqs(struct rzv2m_csi_priv *csi) 255 { 256 rzv2m_csi_disable_irqs(csi, CSI_CNT_R_TRGR_E | CSI_CNT_T_TRGR_E | 257 CSI_CNT_CSIEND_E | CSI_CNT_TREND_E | 258 CSI_CNT_OVERF_E | CSI_CNT_UNDER_E); 259 } 260 261 static inline void rzv2m_csi_clear_irqs(struct rzv2m_csi_priv *csi, u32 irqs) 262 { 263 writel(irqs, csi->base + CSI_INT); 264 } 265 266 static void rzv2m_csi_clear_all_irqs(struct rzv2m_csi_priv *csi) 267 { 268 rzv2m_csi_clear_irqs(csi, CSI_INT_UNDER | CSI_INT_OVERF | 269 CSI_INT_TREND | CSI_INT_CSIEND | CSI_INT_T_TRGR | 270 CSI_INT_R_TRGR); 271 } 272 273 static void rzv2m_csi_enable_irqs(struct rzv2m_csi_priv *csi, u32 enable_bits) 274 { 275 u32 cnt = readl(csi->base + CSI_CNT); 276 277 writel(cnt | enable_bits, csi->base + CSI_CNT); 278 } 279 280 static int rzv2m_csi_wait_for_interrupt(struct rzv2m_csi_priv *csi, 281 u32 wait_mask, u32 enable_bits) 282 { 283 int ret; 284 285 rzv2m_csi_enable_irqs(csi, enable_bits); 286 287 ret = wait_event_timeout(csi->wait, 288 ((csi->status & wait_mask) == wait_mask) || 289 csi->errors, HZ); 290 291 rzv2m_csi_disable_irqs(csi, enable_bits); 292 293 if (csi->errors) 294 return -EIO; 295 296 if (!ret) 297 return -ETIMEDOUT; 298 299 return 0; 300 } 301 302 static int rzv2m_csi_wait_for_tx_empty(struct rzv2m_csi_priv *csi) 303 { 304 int ret; 305 306 if (readl(csi->base + CSI_OFIFOL) == 0) 307 return 0; 308 309 ret = rzv2m_csi_wait_for_interrupt(csi, CSI_INT_TREND, CSI_CNT_TREND_E); 310 311 if (ret == -ETIMEDOUT) 312 csi->errors |= TX_TIMEOUT_ERROR; 313 314 return ret; 315 } 316 317 static inline int rzv2m_csi_wait_for_rx_ready(struct rzv2m_csi_priv *csi) 318 { 319 int ret; 320 321 if (readl(csi->base + CSI_IFIFOL) == csi->bytes_to_transfer) 322 return 0; 323 324 ret = rzv2m_csi_wait_for_interrupt(csi, CSI_INT_R_TRGR, 325 CSI_CNT_R_TRGR_E); 326 327 if (ret == -ETIMEDOUT) 328 csi->errors |= RX_TIMEOUT_ERROR; 329 330 return ret; 331 } 332 333 static irqreturn_t rzv2m_csi_irq_handler(int irq, void *data) 334 { 335 struct rzv2m_csi_priv *csi = (struct rzv2m_csi_priv *)data; 336 337 csi->status = readl(csi->base + CSI_INT); 338 rzv2m_csi_disable_irqs(csi, csi->status); 339 340 if (csi->status & CSI_INT_OVERF) 341 csi->errors |= OVERFLOW_ERROR; 342 if (csi->status & CSI_INT_UNDER) 343 csi->errors |= UNDERRUN_ERROR; 344 345 wake_up(&csi->wait); 346 347 return IRQ_HANDLED; 348 } 349 350 static void rzv2m_csi_setup_clock(struct rzv2m_csi_priv *csi, u32 spi_hz) 351 { 352 unsigned long csiclk_rate = clk_get_rate(csi->csiclk); 353 unsigned long pclk_rate = clk_get_rate(csi->pclk); 354 unsigned long csiclk_rate_limit = pclk_rate >> 1; 355 u32 cks; 356 357 /* 358 * There is a restriction on the frequency of CSICLK, it has to be <= 359 * PCLK / 2. 360 */ 361 if (csiclk_rate > csiclk_rate_limit) { 362 clk_set_rate(csi->csiclk, csiclk_rate >> 1); 363 csiclk_rate = clk_get_rate(csi->csiclk); 364 } else if ((csiclk_rate << 1) <= csiclk_rate_limit) { 365 clk_set_rate(csi->csiclk, csiclk_rate << 1); 366 csiclk_rate = clk_get_rate(csi->csiclk); 367 } 368 369 spi_hz = spi_hz > CSI_MAX_SPI_SCKO ? CSI_MAX_SPI_SCKO : spi_hz; 370 371 cks = DIV_ROUND_UP(csiclk_rate, spi_hz << 1); 372 if (cks > CSI_CKS_MAX) 373 cks = CSI_CKS_MAX; 374 375 dev_dbg(csi->dev, "SPI clk rate is %ldHz\n", csiclk_rate / (cks << 1)); 376 377 rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_CKS, cks); 378 } 379 380 static void rzv2m_csi_setup_operating_mode(struct rzv2m_csi_priv *csi, 381 struct spi_transfer *t) 382 { 383 if (t->rx_buf && !t->tx_buf) 384 /* Reception-only mode */ 385 rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_TRMD, 0); 386 else 387 /* Send and receive mode */ 388 rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_TRMD, 1); 389 390 csi->bytes_per_word = t->bits_per_word / 8; 391 rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_CCL, 392 csi->bytes_per_word == 2); 393 } 394 395 static int rzv2m_csi_setup(struct spi_device *spi) 396 { 397 struct rzv2m_csi_priv *csi = spi_controller_get_devdata(spi->controller); 398 int ret; 399 400 rzv2m_csi_sw_reset(csi, 0); 401 402 writel(CSI_MODE_SETUP, csi->base + CSI_MODE); 403 404 /* Setup clock polarity and phase timing */ 405 rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_CKP, 406 !(spi->mode & SPI_CPOL)); 407 rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_DAP, 408 !(spi->mode & SPI_CPHA)); 409 410 /* Setup serial data order */ 411 rzv2m_csi_reg_write_bit(csi, CSI_MODE, CSI_MODE_DIR, 412 !!(spi->mode & SPI_LSB_FIRST)); 413 414 /* Set the operation mode as master */ 415 rzv2m_csi_reg_write_bit(csi, CSI_CLKSEL, CSI_CLKSEL_SLAVE, 0); 416 417 /* Give the IP a SW reset */ 418 ret = rzv2m_csi_sw_reset(csi, 1); 419 if (ret) 420 return ret; 421 rzv2m_csi_sw_reset(csi, 0); 422 423 /* 424 * We need to enable the communication so that the clock will settle 425 * for the right polarity before enabling the CS. 426 */ 427 rzv2m_csi_start_stop_operation(csi, 1, false); 428 udelay(10); 429 rzv2m_csi_start_stop_operation(csi, 0, false); 430 431 return 0; 432 } 433 434 static int rzv2m_csi_pio_transfer(struct rzv2m_csi_priv *csi) 435 { 436 bool tx_completed = csi->txbuf ? false : true; 437 bool rx_completed = csi->rxbuf ? false : true; 438 int ret = 0; 439 440 /* Make sure the TX FIFO is empty */ 441 writel(0, csi->base + CSI_OFIFOL); 442 443 csi->bytes_sent = 0; 444 csi->bytes_received = 0; 445 csi->errors = 0; 446 447 rzv2m_csi_disable_all_irqs(csi); 448 rzv2m_csi_clear_all_irqs(csi); 449 rzv2m_csi_enable_rx_trigger(csi, true); 450 451 while (!tx_completed || !rx_completed) { 452 /* 453 * Decide how many words we are going to transfer during 454 * this cycle (for both TX and RX), then set the RX FIFO trigger 455 * level accordingly. No need to set a trigger level for the 456 * TX FIFO, as this IP comes with an interrupt that fires when 457 * the TX FIFO is empty. 458 */ 459 rzv2m_csi_calc_current_transfer(csi); 460 rzv2m_csi_set_rx_fifo_trigger_level(csi); 461 462 rzv2m_csi_enable_irqs(csi, CSI_INT_OVERF | CSI_INT_UNDER); 463 464 /* Make sure the RX FIFO is empty */ 465 writel(0, csi->base + CSI_IFIFOL); 466 467 writel(readl(csi->base + CSI_INT), csi->base + CSI_INT); 468 csi->status = 0; 469 470 rzv2m_csi_start_stop_operation(csi, 1, false); 471 472 /* TX */ 473 if (csi->txbuf) { 474 ret = rzv2m_csi_fill_txfifo(csi); 475 if (ret) 476 break; 477 478 ret = rzv2m_csi_wait_for_tx_empty(csi); 479 if (ret) 480 break; 481 482 if (csi->bytes_sent == csi->buffer_len) 483 tx_completed = true; 484 } 485 486 /* 487 * Make sure the RX FIFO contains the desired number of words. 488 * We then either flush its content, or we copy it onto 489 * csi->rxbuf. 490 */ 491 ret = rzv2m_csi_wait_for_rx_ready(csi); 492 if (ret) 493 break; 494 495 /* RX */ 496 if (csi->rxbuf) { 497 rzv2m_csi_start_stop_operation(csi, 0, false); 498 499 ret = rzv2m_csi_read_rxfifo(csi); 500 if (ret) 501 break; 502 503 if (csi->bytes_received == csi->buffer_len) 504 rx_completed = true; 505 } 506 507 ret = rzv2m_csi_start_stop_operation(csi, 0, true); 508 if (ret) 509 goto pio_quit; 510 511 if (csi->errors) { 512 ret = -EIO; 513 goto pio_quit; 514 } 515 } 516 517 rzv2m_csi_start_stop_operation(csi, 0, true); 518 519 pio_quit: 520 rzv2m_csi_disable_all_irqs(csi); 521 rzv2m_csi_enable_rx_trigger(csi, false); 522 rzv2m_csi_clear_all_irqs(csi); 523 524 return ret; 525 } 526 527 static int rzv2m_csi_transfer_one(struct spi_controller *controller, 528 struct spi_device *spi, 529 struct spi_transfer *transfer) 530 { 531 struct rzv2m_csi_priv *csi = spi_controller_get_devdata(controller); 532 struct device *dev = csi->dev; 533 int ret; 534 535 csi->txbuf = transfer->tx_buf; 536 csi->rxbuf = transfer->rx_buf; 537 csi->buffer_len = transfer->len; 538 539 rzv2m_csi_setup_operating_mode(csi, transfer); 540 541 rzv2m_csi_setup_clock(csi, transfer->speed_hz); 542 543 ret = rzv2m_csi_pio_transfer(csi); 544 if (ret) { 545 if (csi->errors & UNDERRUN_ERROR) 546 dev_err(dev, "Underrun error\n"); 547 if (csi->errors & OVERFLOW_ERROR) 548 dev_err(dev, "Overflow error\n"); 549 if (csi->errors & TX_TIMEOUT_ERROR) 550 dev_err(dev, "TX timeout error\n"); 551 if (csi->errors & RX_TIMEOUT_ERROR) 552 dev_err(dev, "RX timeout error\n"); 553 } 554 555 return ret; 556 } 557 558 static int rzv2m_csi_probe(struct platform_device *pdev) 559 { 560 struct spi_controller *controller; 561 struct device *dev = &pdev->dev; 562 struct rzv2m_csi_priv *csi; 563 struct reset_control *rstc; 564 int irq; 565 int ret; 566 567 controller = devm_spi_alloc_master(dev, sizeof(*csi)); 568 if (!controller) 569 return -ENOMEM; 570 571 csi = spi_controller_get_devdata(controller); 572 platform_set_drvdata(pdev, csi); 573 574 csi->dev = dev; 575 csi->controller = controller; 576 577 csi->base = devm_platform_ioremap_resource(pdev, 0); 578 if (IS_ERR(csi->base)) 579 return PTR_ERR(csi->base); 580 581 irq = platform_get_irq(pdev, 0); 582 if (irq < 0) 583 return irq; 584 585 csi->csiclk = devm_clk_get(dev, "csiclk"); 586 if (IS_ERR(csi->csiclk)) 587 return dev_err_probe(dev, PTR_ERR(csi->csiclk), 588 "could not get csiclk\n"); 589 590 csi->pclk = devm_clk_get(dev, "pclk"); 591 if (IS_ERR(csi->pclk)) 592 return dev_err_probe(dev, PTR_ERR(csi->pclk), 593 "could not get pclk\n"); 594 595 rstc = devm_reset_control_get_shared(dev, NULL); 596 if (IS_ERR(rstc)) 597 return dev_err_probe(dev, PTR_ERR(rstc), "Missing reset ctrl\n"); 598 599 init_waitqueue_head(&csi->wait); 600 601 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; 602 controller->dev.of_node = pdev->dev.of_node; 603 controller->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8); 604 controller->setup = rzv2m_csi_setup; 605 controller->transfer_one = rzv2m_csi_transfer_one; 606 controller->use_gpio_descriptors = true; 607 608 ret = devm_request_irq(dev, irq, rzv2m_csi_irq_handler, 0, 609 dev_name(dev), csi); 610 if (ret) 611 return dev_err_probe(dev, ret, "cannot request IRQ\n"); 612 613 /* 614 * The reset also affects other HW that is not under the control 615 * of Linux. Therefore, all we can do is make sure the reset is 616 * deasserted. 617 */ 618 reset_control_deassert(rstc); 619 620 /* Make sure the IP is in SW reset state */ 621 ret = rzv2m_csi_sw_reset(csi, 1); 622 if (ret) 623 return ret; 624 625 ret = clk_prepare_enable(csi->csiclk); 626 if (ret) 627 return dev_err_probe(dev, ret, "could not enable csiclk\n"); 628 629 ret = spi_register_controller(controller); 630 if (ret) { 631 clk_disable_unprepare(csi->csiclk); 632 return dev_err_probe(dev, ret, "register controller failed\n"); 633 } 634 635 return 0; 636 } 637 638 static int rzv2m_csi_remove(struct platform_device *pdev) 639 { 640 struct rzv2m_csi_priv *csi = platform_get_drvdata(pdev); 641 642 spi_unregister_controller(csi->controller); 643 rzv2m_csi_sw_reset(csi, 1); 644 clk_disable_unprepare(csi->csiclk); 645 646 return 0; 647 } 648 649 static const struct of_device_id rzv2m_csi_match[] = { 650 { .compatible = "renesas,rzv2m-csi" }, 651 { /* sentinel */ } 652 }; 653 MODULE_DEVICE_TABLE(of, rzv2m_csi_match); 654 655 static struct platform_driver rzv2m_csi_drv = { 656 .probe = rzv2m_csi_probe, 657 .remove = rzv2m_csi_remove, 658 .driver = { 659 .name = "rzv2m_csi", 660 .of_match_table = rzv2m_csi_match, 661 }, 662 }; 663 module_platform_driver(rzv2m_csi_drv); 664 665 MODULE_LICENSE("GPL"); 666 MODULE_AUTHOR("Fabrizio Castro <castro.fabrizio.jz@renesas.com>"); 667 MODULE_DESCRIPTION("Clocked Serial Interface Driver"); 668