1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Maxime Coquelin 2015 4 * Copyright (C) STMicroelectronics SA 2017 5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com> 6 * Gerald Baeza <gerald.baeza@foss.st.com> 7 * Erwan Le Ray <erwan.leray@foss.st.com> 8 * 9 * Inspired by st-asc.c from STMicroelectronics (c) 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/console.h> 14 #include <linux/delay.h> 15 #include <linux/dma-direction.h> 16 #include <linux/dmaengine.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/irq.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_platform.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/serial_core.h> 29 #include <linux/serial.h> 30 #include <linux/spinlock.h> 31 #include <linux/sysrq.h> 32 #include <linux/tty_flip.h> 33 #include <linux/tty.h> 34 35 #include "serial_mctrl_gpio.h" 36 #include "stm32-usart.h" 37 38 static void stm32_usart_stop_tx(struct uart_port *port); 39 static void stm32_usart_transmit_chars(struct uart_port *port); 40 41 static inline struct stm32_port *to_stm32_port(struct uart_port *port) 42 { 43 return container_of(port, struct stm32_port, port); 44 } 45 46 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits) 47 { 48 u32 val; 49 50 val = readl_relaxed(port->membase + reg); 51 val |= bits; 52 writel_relaxed(val, port->membase + reg); 53 } 54 55 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits) 56 { 57 u32 val; 58 59 val = readl_relaxed(port->membase + reg); 60 val &= ~bits; 61 writel_relaxed(val, port->membase + reg); 62 } 63 64 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, 65 u32 delay_DDE, u32 baud) 66 { 67 u32 rs485_deat_dedt; 68 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT); 69 bool over8; 70 71 *cr3 |= USART_CR3_DEM; 72 over8 = *cr1 & USART_CR1_OVER8; 73 74 if (over8) 75 rs485_deat_dedt = delay_ADE * baud * 8; 76 else 77 rs485_deat_dedt = delay_ADE * baud * 16; 78 79 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 80 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 81 rs485_deat_dedt_max : rs485_deat_dedt; 82 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) & 83 USART_CR1_DEAT_MASK; 84 *cr1 |= rs485_deat_dedt; 85 86 if (over8) 87 rs485_deat_dedt = delay_DDE * baud * 8; 88 else 89 rs485_deat_dedt = delay_DDE * baud * 16; 90 91 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 92 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 93 rs485_deat_dedt_max : rs485_deat_dedt; 94 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) & 95 USART_CR1_DEDT_MASK; 96 *cr1 |= rs485_deat_dedt; 97 } 98 99 static int stm32_usart_config_rs485(struct uart_port *port, 100 struct serial_rs485 *rs485conf) 101 { 102 struct stm32_port *stm32_port = to_stm32_port(port); 103 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 104 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 105 u32 usartdiv, baud, cr1, cr3; 106 bool over8; 107 108 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 109 110 port->rs485 = *rs485conf; 111 112 rs485conf->flags |= SER_RS485_RX_DURING_TX; 113 114 if (rs485conf->flags & SER_RS485_ENABLED) { 115 cr1 = readl_relaxed(port->membase + ofs->cr1); 116 cr3 = readl_relaxed(port->membase + ofs->cr3); 117 usartdiv = readl_relaxed(port->membase + ofs->brr); 118 usartdiv = usartdiv & GENMASK(15, 0); 119 over8 = cr1 & USART_CR1_OVER8; 120 121 if (over8) 122 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0)) 123 << USART_BRR_04_R_SHIFT; 124 125 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv); 126 stm32_usart_config_reg_rs485(&cr1, &cr3, 127 rs485conf->delay_rts_before_send, 128 rs485conf->delay_rts_after_send, 129 baud); 130 131 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 132 cr3 &= ~USART_CR3_DEP; 133 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; 134 } else { 135 cr3 |= USART_CR3_DEP; 136 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; 137 } 138 139 writel_relaxed(cr3, port->membase + ofs->cr3); 140 writel_relaxed(cr1, port->membase + ofs->cr1); 141 } else { 142 stm32_usart_clr_bits(port, ofs->cr3, 143 USART_CR3_DEM | USART_CR3_DEP); 144 stm32_usart_clr_bits(port, ofs->cr1, 145 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 146 } 147 148 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 149 150 return 0; 151 } 152 153 static int stm32_usart_init_rs485(struct uart_port *port, 154 struct platform_device *pdev) 155 { 156 struct serial_rs485 *rs485conf = &port->rs485; 157 158 rs485conf->flags = 0; 159 rs485conf->delay_rts_before_send = 0; 160 rs485conf->delay_rts_after_send = 0; 161 162 if (!pdev->dev.of_node) 163 return -ENODEV; 164 165 return uart_get_rs485_mode(port); 166 } 167 168 static bool stm32_usart_rx_dma_enabled(struct uart_port *port) 169 { 170 struct stm32_port *stm32_port = to_stm32_port(port); 171 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 172 173 if (!stm32_port->rx_ch) 174 return false; 175 176 return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR); 177 } 178 179 /* Return true when data is pending (in pio mode), and false when no data is pending. */ 180 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr) 181 { 182 struct stm32_port *stm32_port = to_stm32_port(port); 183 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 184 185 *sr = readl_relaxed(port->membase + ofs->isr); 186 /* Get pending characters in RDR or FIFO */ 187 if (*sr & USART_SR_RXNE) { 188 /* Get all pending characters from the RDR or the FIFO when using interrupts */ 189 if (!stm32_usart_rx_dma_enabled(port)) 190 return true; 191 192 /* Handle only RX data errors when using DMA */ 193 if (*sr & USART_SR_ERR_MASK) 194 return true; 195 } 196 197 return false; 198 } 199 200 static unsigned long stm32_usart_get_char_pio(struct uart_port *port) 201 { 202 struct stm32_port *stm32_port = to_stm32_port(port); 203 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 204 unsigned long c; 205 206 c = readl_relaxed(port->membase + ofs->rdr); 207 /* Apply RDR data mask */ 208 c &= stm32_port->rdr_mask; 209 210 return c; 211 } 212 213 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port) 214 { 215 struct stm32_port *stm32_port = to_stm32_port(port); 216 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 217 unsigned long c; 218 unsigned int size = 0; 219 u32 sr; 220 char flag; 221 222 while (stm32_usart_pending_rx_pio(port, &sr)) { 223 sr |= USART_SR_DUMMY_RX; 224 flag = TTY_NORMAL; 225 226 /* 227 * Status bits has to be cleared before reading the RDR: 228 * In FIFO mode, reading the RDR will pop the next data 229 * (if any) along with its status bits into the SR. 230 * Not doing so leads to misalignement between RDR and SR, 231 * and clear status bits of the next rx data. 232 * 233 * Clear errors flags for stm32f7 and stm32h7 compatible 234 * devices. On stm32f4 compatible devices, the error bit is 235 * cleared by the sequence [read SR - read DR]. 236 */ 237 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG) 238 writel_relaxed(sr & USART_SR_ERR_MASK, 239 port->membase + ofs->icr); 240 241 c = stm32_usart_get_char_pio(port); 242 port->icount.rx++; 243 size++; 244 if (sr & USART_SR_ERR_MASK) { 245 if (sr & USART_SR_ORE) { 246 port->icount.overrun++; 247 } else if (sr & USART_SR_PE) { 248 port->icount.parity++; 249 } else if (sr & USART_SR_FE) { 250 /* Break detection if character is null */ 251 if (!c) { 252 port->icount.brk++; 253 if (uart_handle_break(port)) 254 continue; 255 } else { 256 port->icount.frame++; 257 } 258 } 259 260 sr &= port->read_status_mask; 261 262 if (sr & USART_SR_PE) { 263 flag = TTY_PARITY; 264 } else if (sr & USART_SR_FE) { 265 if (!c) 266 flag = TTY_BREAK; 267 else 268 flag = TTY_FRAME; 269 } 270 } 271 272 if (uart_prepare_sysrq_char(port, c)) 273 continue; 274 uart_insert_char(port, sr, USART_SR_ORE, c, flag); 275 } 276 277 return size; 278 } 279 280 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size) 281 { 282 struct stm32_port *stm32_port = to_stm32_port(port); 283 struct tty_port *ttyport = &stm32_port->port.state->port; 284 unsigned char *dma_start; 285 int dma_count, i; 286 287 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res); 288 289 /* 290 * Apply rdr_mask on buffer in order to mask parity bit. 291 * This loop is useless in cs8 mode because DMA copies only 292 * 8 bits and already ignores parity bit. 293 */ 294 if (!(stm32_port->rdr_mask == (BIT(8) - 1))) 295 for (i = 0; i < dma_size; i++) 296 *(dma_start + i) &= stm32_port->rdr_mask; 297 298 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size); 299 port->icount.rx += dma_count; 300 if (dma_count != dma_size) 301 port->icount.buf_overrun++; 302 stm32_port->last_res -= dma_count; 303 if (stm32_port->last_res == 0) 304 stm32_port->last_res = RX_BUF_L; 305 } 306 307 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port) 308 { 309 struct stm32_port *stm32_port = to_stm32_port(port); 310 unsigned int dma_size, size = 0; 311 312 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */ 313 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { 314 /* Conditional first part: from last_res to end of DMA buffer */ 315 dma_size = stm32_port->last_res; 316 stm32_usart_push_buffer_dma(port, dma_size); 317 size = dma_size; 318 } 319 320 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue; 321 stm32_usart_push_buffer_dma(port, dma_size); 322 size += dma_size; 323 324 return size; 325 } 326 327 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush) 328 { 329 struct stm32_port *stm32_port = to_stm32_port(port); 330 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 331 enum dma_status rx_dma_status; 332 u32 sr; 333 unsigned int size = 0; 334 335 if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) { 336 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 337 stm32_port->rx_ch->cookie, 338 &stm32_port->rx_dma_state); 339 if (rx_dma_status == DMA_IN_PROGRESS) { 340 /* Empty DMA buffer */ 341 size = stm32_usart_receive_chars_dma(port); 342 sr = readl_relaxed(port->membase + ofs->isr); 343 if (sr & USART_SR_ERR_MASK) { 344 /* Disable DMA request line */ 345 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 346 347 /* Switch to PIO mode to handle the errors */ 348 size += stm32_usart_receive_chars_pio(port); 349 350 /* Switch back to DMA mode */ 351 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 352 } 353 } else { 354 /* Disable RX DMA */ 355 dmaengine_terminate_async(stm32_port->rx_ch); 356 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 357 /* Fall back to interrupt mode */ 358 dev_dbg(port->dev, "DMA error, fallback to irq mode\n"); 359 size = stm32_usart_receive_chars_pio(port); 360 } 361 } else { 362 size = stm32_usart_receive_chars_pio(port); 363 } 364 365 return size; 366 } 367 368 static void stm32_usart_tx_dma_complete(void *arg) 369 { 370 struct uart_port *port = arg; 371 struct stm32_port *stm32port = to_stm32_port(port); 372 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 373 unsigned long flags; 374 375 dmaengine_terminate_async(stm32port->tx_ch); 376 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 377 stm32port->tx_dma_busy = false; 378 379 /* Let's see if we have pending data to send */ 380 spin_lock_irqsave(&port->lock, flags); 381 stm32_usart_transmit_chars(port); 382 spin_unlock_irqrestore(&port->lock, flags); 383 } 384 385 static void stm32_usart_tx_interrupt_enable(struct uart_port *port) 386 { 387 struct stm32_port *stm32_port = to_stm32_port(port); 388 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 389 390 /* 391 * Enables TX FIFO threashold irq when FIFO is enabled, 392 * or TX empty irq when FIFO is disabled 393 */ 394 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 395 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE); 396 else 397 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE); 398 } 399 400 static void stm32_usart_rx_dma_complete(void *arg) 401 { 402 struct uart_port *port = arg; 403 struct tty_port *tport = &port->state->port; 404 unsigned int size; 405 unsigned long flags; 406 407 spin_lock_irqsave(&port->lock, flags); 408 size = stm32_usart_receive_chars(port, false); 409 uart_unlock_and_check_sysrq_irqrestore(port, flags); 410 if (size) 411 tty_flip_buffer_push(tport); 412 } 413 414 static void stm32_usart_tx_interrupt_disable(struct uart_port *port) 415 { 416 struct stm32_port *stm32_port = to_stm32_port(port); 417 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 418 419 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 420 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE); 421 else 422 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE); 423 } 424 425 static void stm32_usart_transmit_chars_pio(struct uart_port *port) 426 { 427 struct stm32_port *stm32_port = to_stm32_port(port); 428 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 429 struct circ_buf *xmit = &port->state->xmit; 430 431 if (stm32_port->tx_dma_busy) { 432 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 433 stm32_port->tx_dma_busy = false; 434 } 435 436 while (!uart_circ_empty(xmit)) { 437 /* Check that TDR is empty before filling FIFO */ 438 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) 439 break; 440 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); 441 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 442 port->icount.tx++; 443 } 444 445 /* rely on TXE irq (mask or unmask) for sending remaining data */ 446 if (uart_circ_empty(xmit)) 447 stm32_usart_tx_interrupt_disable(port); 448 else 449 stm32_usart_tx_interrupt_enable(port); 450 } 451 452 static void stm32_usart_transmit_chars_dma(struct uart_port *port) 453 { 454 struct stm32_port *stm32port = to_stm32_port(port); 455 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 456 struct circ_buf *xmit = &port->state->xmit; 457 struct dma_async_tx_descriptor *desc = NULL; 458 unsigned int count, i; 459 460 if (stm32port->tx_dma_busy) 461 return; 462 463 stm32port->tx_dma_busy = true; 464 465 count = uart_circ_chars_pending(xmit); 466 467 if (count > TX_BUF_L) 468 count = TX_BUF_L; 469 470 if (xmit->tail < xmit->head) { 471 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count); 472 } else { 473 size_t one = UART_XMIT_SIZE - xmit->tail; 474 size_t two; 475 476 if (one > count) 477 one = count; 478 two = count - one; 479 480 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one); 481 if (two) 482 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two); 483 } 484 485 desc = dmaengine_prep_slave_single(stm32port->tx_ch, 486 stm32port->tx_dma_buf, 487 count, 488 DMA_MEM_TO_DEV, 489 DMA_PREP_INTERRUPT); 490 491 if (!desc) 492 goto fallback_err; 493 494 desc->callback = stm32_usart_tx_dma_complete; 495 desc->callback_param = port; 496 497 /* Push current DMA TX transaction in the pending queue */ 498 if (dma_submit_error(dmaengine_submit(desc))) { 499 /* dma no yet started, safe to free resources */ 500 dmaengine_terminate_async(stm32port->tx_ch); 501 goto fallback_err; 502 } 503 504 /* Issue pending DMA TX requests */ 505 dma_async_issue_pending(stm32port->tx_ch); 506 507 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 508 509 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); 510 port->icount.tx += count; 511 return; 512 513 fallback_err: 514 for (i = count; i > 0; i--) 515 stm32_usart_transmit_chars_pio(port); 516 } 517 518 static void stm32_usart_transmit_chars(struct uart_port *port) 519 { 520 struct stm32_port *stm32_port = to_stm32_port(port); 521 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 522 struct circ_buf *xmit = &port->state->xmit; 523 524 if (port->x_char) { 525 if (stm32_port->tx_dma_busy) 526 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 527 writel_relaxed(port->x_char, port->membase + ofs->tdr); 528 port->x_char = 0; 529 port->icount.tx++; 530 if (stm32_port->tx_dma_busy) 531 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 532 return; 533 } 534 535 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 536 stm32_usart_tx_interrupt_disable(port); 537 return; 538 } 539 540 if (ofs->icr == UNDEF_REG) 541 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC); 542 else 543 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr); 544 545 if (stm32_port->tx_ch) 546 stm32_usart_transmit_chars_dma(port); 547 else 548 stm32_usart_transmit_chars_pio(port); 549 550 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 551 uart_write_wakeup(port); 552 553 if (uart_circ_empty(xmit)) 554 stm32_usart_tx_interrupt_disable(port); 555 } 556 557 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) 558 { 559 struct uart_port *port = ptr; 560 struct tty_port *tport = &port->state->port; 561 struct stm32_port *stm32_port = to_stm32_port(port); 562 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 563 u32 sr; 564 unsigned int size; 565 566 sr = readl_relaxed(port->membase + ofs->isr); 567 568 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) 569 writel_relaxed(USART_ICR_RTOCF, 570 port->membase + ofs->icr); 571 572 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) { 573 /* Clear wake up flag and disable wake up interrupt */ 574 writel_relaxed(USART_ICR_WUCF, 575 port->membase + ofs->icr); 576 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 577 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) 578 pm_wakeup_event(tport->tty->dev, 0); 579 } 580 581 /* 582 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request 583 * line has been masked by HW and rx data are stacking in FIFO. 584 */ 585 if (!stm32_port->throttled) { 586 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) || 587 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) { 588 spin_lock(&port->lock); 589 size = stm32_usart_receive_chars(port, false); 590 uart_unlock_and_check_sysrq(port); 591 if (size) 592 tty_flip_buffer_push(tport); 593 } 594 } 595 596 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) { 597 spin_lock(&port->lock); 598 stm32_usart_transmit_chars(port); 599 spin_unlock(&port->lock); 600 } 601 602 if (stm32_usart_rx_dma_enabled(port)) 603 return IRQ_WAKE_THREAD; 604 else 605 return IRQ_HANDLED; 606 } 607 608 static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr) 609 { 610 struct uart_port *port = ptr; 611 struct tty_port *tport = &port->state->port; 612 struct stm32_port *stm32_port = to_stm32_port(port); 613 unsigned int size; 614 unsigned long flags; 615 616 /* Receiver timeout irq for DMA RX */ 617 if (!stm32_port->throttled) { 618 spin_lock_irqsave(&port->lock, flags); 619 size = stm32_usart_receive_chars(port, false); 620 uart_unlock_and_check_sysrq_irqrestore(port, flags); 621 if (size) 622 tty_flip_buffer_push(tport); 623 } 624 625 return IRQ_HANDLED; 626 } 627 628 static unsigned int stm32_usart_tx_empty(struct uart_port *port) 629 { 630 struct stm32_port *stm32_port = to_stm32_port(port); 631 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 632 633 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) 634 return TIOCSER_TEMT; 635 636 return 0; 637 } 638 639 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl) 640 { 641 struct stm32_port *stm32_port = to_stm32_port(port); 642 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 643 644 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) 645 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE); 646 else 647 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE); 648 649 mctrl_gpio_set(stm32_port->gpios, mctrl); 650 } 651 652 static unsigned int stm32_usart_get_mctrl(struct uart_port *port) 653 { 654 struct stm32_port *stm32_port = to_stm32_port(port); 655 unsigned int ret; 656 657 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */ 658 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 659 660 return mctrl_gpio_get(stm32_port->gpios, &ret); 661 } 662 663 static void stm32_usart_enable_ms(struct uart_port *port) 664 { 665 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios); 666 } 667 668 static void stm32_usart_disable_ms(struct uart_port *port) 669 { 670 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); 671 } 672 673 /* Transmit stop */ 674 static void stm32_usart_stop_tx(struct uart_port *port) 675 { 676 struct stm32_port *stm32_port = to_stm32_port(port); 677 struct serial_rs485 *rs485conf = &port->rs485; 678 679 stm32_usart_tx_interrupt_disable(port); 680 681 if (rs485conf->flags & SER_RS485_ENABLED) { 682 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 683 mctrl_gpio_set(stm32_port->gpios, 684 stm32_port->port.mctrl & ~TIOCM_RTS); 685 } else { 686 mctrl_gpio_set(stm32_port->gpios, 687 stm32_port->port.mctrl | TIOCM_RTS); 688 } 689 } 690 } 691 692 /* There are probably characters waiting to be transmitted. */ 693 static void stm32_usart_start_tx(struct uart_port *port) 694 { 695 struct stm32_port *stm32_port = to_stm32_port(port); 696 struct serial_rs485 *rs485conf = &port->rs485; 697 struct circ_buf *xmit = &port->state->xmit; 698 699 if (uart_circ_empty(xmit)) 700 return; 701 702 if (rs485conf->flags & SER_RS485_ENABLED) { 703 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 704 mctrl_gpio_set(stm32_port->gpios, 705 stm32_port->port.mctrl | TIOCM_RTS); 706 } else { 707 mctrl_gpio_set(stm32_port->gpios, 708 stm32_port->port.mctrl & ~TIOCM_RTS); 709 } 710 } 711 712 stm32_usart_transmit_chars(port); 713 } 714 715 /* Flush the transmit buffer. */ 716 static void stm32_usart_flush_buffer(struct uart_port *port) 717 { 718 struct stm32_port *stm32_port = to_stm32_port(port); 719 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 720 721 if (stm32_port->tx_ch) { 722 dmaengine_terminate_async(stm32_port->tx_ch); 723 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 724 stm32_port->tx_dma_busy = false; 725 } 726 } 727 728 /* Throttle the remote when input buffer is about to overflow. */ 729 static void stm32_usart_throttle(struct uart_port *port) 730 { 731 struct stm32_port *stm32_port = to_stm32_port(port); 732 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 733 unsigned long flags; 734 735 spin_lock_irqsave(&port->lock, flags); 736 737 /* 738 * Disable DMA request line if enabled, so the RX data gets queued into the FIFO. 739 * Hardware flow control is triggered when RX FIFO is full. 740 */ 741 if (stm32_usart_rx_dma_enabled(port)) 742 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 743 744 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 745 if (stm32_port->cr3_irq) 746 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 747 748 stm32_port->throttled = true; 749 spin_unlock_irqrestore(&port->lock, flags); 750 } 751 752 /* Unthrottle the remote, the input buffer can now accept data. */ 753 static void stm32_usart_unthrottle(struct uart_port *port) 754 { 755 struct stm32_port *stm32_port = to_stm32_port(port); 756 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 757 unsigned long flags; 758 759 spin_lock_irqsave(&port->lock, flags); 760 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq); 761 if (stm32_port->cr3_irq) 762 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq); 763 764 /* 765 * Switch back to DMA mode (re-enable DMA request line). 766 * Hardware flow control is stopped when FIFO is not full any more. 767 */ 768 if (stm32_port->rx_ch) 769 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 770 771 stm32_port->throttled = false; 772 spin_unlock_irqrestore(&port->lock, flags); 773 } 774 775 /* Receive stop */ 776 static void stm32_usart_stop_rx(struct uart_port *port) 777 { 778 struct stm32_port *stm32_port = to_stm32_port(port); 779 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 780 781 /* Disable DMA request line. */ 782 if (stm32_port->rx_ch) 783 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 784 785 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 786 if (stm32_port->cr3_irq) 787 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 788 } 789 790 /* Handle breaks - ignored by us */ 791 static void stm32_usart_break_ctl(struct uart_port *port, int break_state) 792 { 793 } 794 795 static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port) 796 { 797 struct stm32_port *stm32_port = to_stm32_port(port); 798 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 799 struct dma_async_tx_descriptor *desc; 800 int ret; 801 802 stm32_port->last_res = RX_BUF_L; 803 /* Prepare a DMA cyclic transaction */ 804 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch, 805 stm32_port->rx_dma_buf, 806 RX_BUF_L, RX_BUF_P, 807 DMA_DEV_TO_MEM, 808 DMA_PREP_INTERRUPT); 809 if (!desc) { 810 dev_err(port->dev, "rx dma prep cyclic failed\n"); 811 return -ENODEV; 812 } 813 814 desc->callback = stm32_usart_rx_dma_complete; 815 desc->callback_param = port; 816 817 /* Push current DMA transaction in the pending queue */ 818 ret = dma_submit_error(dmaengine_submit(desc)); 819 if (ret) { 820 dmaengine_terminate_sync(stm32_port->rx_ch); 821 return ret; 822 } 823 824 /* Issue pending DMA requests */ 825 dma_async_issue_pending(stm32_port->rx_ch); 826 827 /* 828 * DMA request line not re-enabled at resume when port is throttled. 829 * It will be re-enabled by unthrottle ops. 830 */ 831 if (!stm32_port->throttled) 832 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 833 834 return 0; 835 } 836 837 static int stm32_usart_startup(struct uart_port *port) 838 { 839 struct stm32_port *stm32_port = to_stm32_port(port); 840 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 841 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 842 const char *name = to_platform_device(port->dev)->name; 843 u32 val; 844 int ret; 845 846 ret = request_threaded_irq(port->irq, stm32_usart_interrupt, 847 stm32_usart_threaded_interrupt, 848 IRQF_ONESHOT | IRQF_NO_SUSPEND, 849 name, port); 850 if (ret) 851 return ret; 852 853 if (stm32_port->swap) { 854 val = readl_relaxed(port->membase + ofs->cr2); 855 val |= USART_CR2_SWAP; 856 writel_relaxed(val, port->membase + ofs->cr2); 857 } 858 859 /* RX FIFO Flush */ 860 if (ofs->rqr != UNDEF_REG) 861 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr); 862 863 if (stm32_port->rx_ch) { 864 ret = stm32_usart_start_rx_dma_cyclic(port); 865 if (ret) { 866 free_irq(port->irq, port); 867 return ret; 868 } 869 } 870 871 /* RX enabling */ 872 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit); 873 stm32_usart_set_bits(port, ofs->cr1, val); 874 875 return 0; 876 } 877 878 static void stm32_usart_shutdown(struct uart_port *port) 879 { 880 struct stm32_port *stm32_port = to_stm32_port(port); 881 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 882 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 883 u32 val, isr; 884 int ret; 885 886 if (stm32_port->tx_dma_busy) { 887 dmaengine_terminate_async(stm32_port->tx_ch); 888 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 889 } 890 891 /* Disable modem control interrupts */ 892 stm32_usart_disable_ms(port); 893 894 val = USART_CR1_TXEIE | USART_CR1_TE; 895 val |= stm32_port->cr1_irq | USART_CR1_RE; 896 val |= BIT(cfg->uart_enable_bit); 897 if (stm32_port->fifoen) 898 val |= USART_CR1_FIFOEN; 899 900 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr, 901 isr, (isr & USART_SR_TC), 902 10, 100000); 903 904 /* Send the TC error message only when ISR_TC is not set */ 905 if (ret) 906 dev_err(port->dev, "Transmission is not complete\n"); 907 908 /* Disable RX DMA. */ 909 if (stm32_port->rx_ch) 910 dmaengine_terminate_async(stm32_port->rx_ch); 911 912 /* flush RX & TX FIFO */ 913 if (ofs->rqr != UNDEF_REG) 914 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 915 port->membase + ofs->rqr); 916 917 stm32_usart_clr_bits(port, ofs->cr1, val); 918 919 free_irq(port->irq, port); 920 } 921 922 static void stm32_usart_set_termios(struct uart_port *port, 923 struct ktermios *termios, 924 struct ktermios *old) 925 { 926 struct stm32_port *stm32_port = to_stm32_port(port); 927 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 928 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 929 struct serial_rs485 *rs485conf = &port->rs485; 930 unsigned int baud, bits; 931 u32 usartdiv, mantissa, fraction, oversampling; 932 tcflag_t cflag = termios->c_cflag; 933 u32 cr1, cr2, cr3, isr; 934 unsigned long flags; 935 int ret; 936 937 if (!stm32_port->hw_flow_control) 938 cflag &= ~CRTSCTS; 939 940 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8); 941 942 spin_lock_irqsave(&port->lock, flags); 943 944 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 945 isr, 946 (isr & USART_SR_TC), 947 10, 100000); 948 949 /* Send the TC error message only when ISR_TC is not set. */ 950 if (ret) 951 dev_err(port->dev, "Transmission is not complete\n"); 952 953 /* Stop serial port and reset value */ 954 writel_relaxed(0, port->membase + ofs->cr1); 955 956 /* flush RX & TX FIFO */ 957 if (ofs->rqr != UNDEF_REG) 958 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 959 port->membase + ofs->rqr); 960 961 cr1 = USART_CR1_TE | USART_CR1_RE; 962 if (stm32_port->fifoen) 963 cr1 |= USART_CR1_FIFOEN; 964 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0; 965 966 /* Tx and RX FIFO configuration */ 967 cr3 = readl_relaxed(port->membase + ofs->cr3); 968 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE; 969 if (stm32_port->fifoen) { 970 if (stm32_port->txftcfg >= 0) 971 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT; 972 if (stm32_port->rxftcfg >= 0) 973 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT; 974 } 975 976 if (cflag & CSTOPB) 977 cr2 |= USART_CR2_STOP_2B; 978 979 bits = tty_get_char_size(cflag); 980 stm32_port->rdr_mask = (BIT(bits) - 1); 981 982 if (cflag & PARENB) { 983 bits++; 984 cr1 |= USART_CR1_PCE; 985 } 986 987 /* 988 * Word length configuration: 989 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01 990 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10 991 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 992 * M0 and M1 already cleared by cr1 initialization. 993 */ 994 if (bits == 9) 995 cr1 |= USART_CR1_M0; 996 else if ((bits == 7) && cfg->has_7bits_data) 997 cr1 |= USART_CR1_M1; 998 else if (bits != 8) 999 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n" 1000 , bits); 1001 1002 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || 1003 (stm32_port->fifoen && 1004 stm32_port->rxftcfg >= 0))) { 1005 if (cflag & CSTOPB) 1006 bits = bits + 3; /* 1 start bit + 2 stop bits */ 1007 else 1008 bits = bits + 2; /* 1 start bit + 1 stop bit */ 1009 1010 /* RX timeout irq to occur after last stop bit + bits */ 1011 stm32_port->cr1_irq = USART_CR1_RTOIE; 1012 writel_relaxed(bits, port->membase + ofs->rtor); 1013 cr2 |= USART_CR2_RTOEN; 1014 /* 1015 * Enable fifo threshold irq in two cases, either when there is no DMA, or when 1016 * wake up over usart, from low power until the DMA gets re-enabled by resume. 1017 */ 1018 stm32_port->cr3_irq = USART_CR3_RXFTIE; 1019 } 1020 1021 cr1 |= stm32_port->cr1_irq; 1022 cr3 |= stm32_port->cr3_irq; 1023 1024 if (cflag & PARODD) 1025 cr1 |= USART_CR1_PS; 1026 1027 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 1028 if (cflag & CRTSCTS) { 1029 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 1030 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE; 1031 } 1032 1033 usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud); 1034 1035 /* 1036 * The USART supports 16 or 8 times oversampling. 1037 * By default we prefer 16 times oversampling, so that the receiver 1038 * has a better tolerance to clock deviations. 1039 * 8 times oversampling is only used to achieve higher speeds. 1040 */ 1041 if (usartdiv < 16) { 1042 oversampling = 8; 1043 cr1 |= USART_CR1_OVER8; 1044 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8); 1045 } else { 1046 oversampling = 16; 1047 cr1 &= ~USART_CR1_OVER8; 1048 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8); 1049 } 1050 1051 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT; 1052 fraction = usartdiv % oversampling; 1053 writel_relaxed(mantissa | fraction, port->membase + ofs->brr); 1054 1055 uart_update_timeout(port, cflag, baud); 1056 1057 port->read_status_mask = USART_SR_ORE; 1058 if (termios->c_iflag & INPCK) 1059 port->read_status_mask |= USART_SR_PE | USART_SR_FE; 1060 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1061 port->read_status_mask |= USART_SR_FE; 1062 1063 /* Characters to ignore */ 1064 port->ignore_status_mask = 0; 1065 if (termios->c_iflag & IGNPAR) 1066 port->ignore_status_mask = USART_SR_PE | USART_SR_FE; 1067 if (termios->c_iflag & IGNBRK) { 1068 port->ignore_status_mask |= USART_SR_FE; 1069 /* 1070 * If we're ignoring parity and break indicators, 1071 * ignore overruns too (for real raw support). 1072 */ 1073 if (termios->c_iflag & IGNPAR) 1074 port->ignore_status_mask |= USART_SR_ORE; 1075 } 1076 1077 /* Ignore all characters if CREAD is not set */ 1078 if ((termios->c_cflag & CREAD) == 0) 1079 port->ignore_status_mask |= USART_SR_DUMMY_RX; 1080 1081 if (stm32_port->rx_ch) { 1082 /* 1083 * Setup DMA to collect only valid data and enable error irqs. 1084 * This also enables break reception when using DMA. 1085 */ 1086 cr1 |= USART_CR1_PEIE; 1087 cr3 |= USART_CR3_EIE; 1088 cr3 |= USART_CR3_DMAR; 1089 cr3 |= USART_CR3_DDRE; 1090 } 1091 1092 if (rs485conf->flags & SER_RS485_ENABLED) { 1093 stm32_usart_config_reg_rs485(&cr1, &cr3, 1094 rs485conf->delay_rts_before_send, 1095 rs485conf->delay_rts_after_send, 1096 baud); 1097 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 1098 cr3 &= ~USART_CR3_DEP; 1099 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; 1100 } else { 1101 cr3 |= USART_CR3_DEP; 1102 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; 1103 } 1104 1105 } else { 1106 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP); 1107 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 1108 } 1109 1110 /* Configure wake up from low power on start bit detection */ 1111 if (stm32_port->wakeup_src) { 1112 cr3 &= ~USART_CR3_WUS_MASK; 1113 cr3 |= USART_CR3_WUS_START_BIT; 1114 } 1115 1116 writel_relaxed(cr3, port->membase + ofs->cr3); 1117 writel_relaxed(cr2, port->membase + ofs->cr2); 1118 writel_relaxed(cr1, port->membase + ofs->cr1); 1119 1120 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1121 spin_unlock_irqrestore(&port->lock, flags); 1122 1123 /* Handle modem control interrupts */ 1124 if (UART_ENABLE_MS(port, termios->c_cflag)) 1125 stm32_usart_enable_ms(port); 1126 else 1127 stm32_usart_disable_ms(port); 1128 } 1129 1130 static const char *stm32_usart_type(struct uart_port *port) 1131 { 1132 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL; 1133 } 1134 1135 static void stm32_usart_release_port(struct uart_port *port) 1136 { 1137 } 1138 1139 static int stm32_usart_request_port(struct uart_port *port) 1140 { 1141 return 0; 1142 } 1143 1144 static void stm32_usart_config_port(struct uart_port *port, int flags) 1145 { 1146 if (flags & UART_CONFIG_TYPE) 1147 port->type = PORT_STM32; 1148 } 1149 1150 static int 1151 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser) 1152 { 1153 /* No user changeable parameters */ 1154 return -EINVAL; 1155 } 1156 1157 static void stm32_usart_pm(struct uart_port *port, unsigned int state, 1158 unsigned int oldstate) 1159 { 1160 struct stm32_port *stm32port = container_of(port, 1161 struct stm32_port, port); 1162 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1163 const struct stm32_usart_config *cfg = &stm32port->info->cfg; 1164 unsigned long flags; 1165 1166 switch (state) { 1167 case UART_PM_STATE_ON: 1168 pm_runtime_get_sync(port->dev); 1169 break; 1170 case UART_PM_STATE_OFF: 1171 spin_lock_irqsave(&port->lock, flags); 1172 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1173 spin_unlock_irqrestore(&port->lock, flags); 1174 pm_runtime_put_sync(port->dev); 1175 break; 1176 } 1177 } 1178 1179 static const struct uart_ops stm32_uart_ops = { 1180 .tx_empty = stm32_usart_tx_empty, 1181 .set_mctrl = stm32_usart_set_mctrl, 1182 .get_mctrl = stm32_usart_get_mctrl, 1183 .stop_tx = stm32_usart_stop_tx, 1184 .start_tx = stm32_usart_start_tx, 1185 .throttle = stm32_usart_throttle, 1186 .unthrottle = stm32_usart_unthrottle, 1187 .stop_rx = stm32_usart_stop_rx, 1188 .enable_ms = stm32_usart_enable_ms, 1189 .break_ctl = stm32_usart_break_ctl, 1190 .startup = stm32_usart_startup, 1191 .shutdown = stm32_usart_shutdown, 1192 .flush_buffer = stm32_usart_flush_buffer, 1193 .set_termios = stm32_usart_set_termios, 1194 .pm = stm32_usart_pm, 1195 .type = stm32_usart_type, 1196 .release_port = stm32_usart_release_port, 1197 .request_port = stm32_usart_request_port, 1198 .config_port = stm32_usart_config_port, 1199 .verify_port = stm32_usart_verify_port, 1200 }; 1201 1202 /* 1203 * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG) 1204 * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case, 1205 * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE. 1206 * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1. 1207 */ 1208 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 }; 1209 1210 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p, 1211 int *ftcfg) 1212 { 1213 u32 bytes, i; 1214 1215 /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */ 1216 if (of_property_read_u32(pdev->dev.of_node, p, &bytes)) 1217 bytes = 8; 1218 1219 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) 1220 if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes) 1221 break; 1222 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg)) 1223 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1; 1224 1225 dev_dbg(&pdev->dev, "%s set to %d bytes\n", p, 1226 stm32h7_usart_fifo_thresh_cfg[i]); 1227 1228 /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */ 1229 if (i) 1230 *ftcfg = i - 1; 1231 else 1232 *ftcfg = -EINVAL; 1233 } 1234 1235 static void stm32_usart_deinit_port(struct stm32_port *stm32port) 1236 { 1237 clk_disable_unprepare(stm32port->clk); 1238 } 1239 1240 static int stm32_usart_init_port(struct stm32_port *stm32port, 1241 struct platform_device *pdev) 1242 { 1243 struct uart_port *port = &stm32port->port; 1244 struct resource *res; 1245 int ret, irq; 1246 1247 irq = platform_get_irq(pdev, 0); 1248 if (irq < 0) 1249 return irq; 1250 1251 port->iotype = UPIO_MEM; 1252 port->flags = UPF_BOOT_AUTOCONF; 1253 port->ops = &stm32_uart_ops; 1254 port->dev = &pdev->dev; 1255 port->fifosize = stm32port->info->cfg.fifosize; 1256 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE); 1257 port->irq = irq; 1258 port->rs485_config = stm32_usart_config_rs485; 1259 1260 ret = stm32_usart_init_rs485(port, pdev); 1261 if (ret) 1262 return ret; 1263 1264 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup && 1265 of_property_read_bool(pdev->dev.of_node, "wakeup-source"); 1266 1267 stm32port->swap = stm32port->info->cfg.has_swap && 1268 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"); 1269 1270 stm32port->fifoen = stm32port->info->cfg.has_fifo; 1271 if (stm32port->fifoen) { 1272 stm32_usart_get_ftcfg(pdev, "rx-threshold", 1273 &stm32port->rxftcfg); 1274 stm32_usart_get_ftcfg(pdev, "tx-threshold", 1275 &stm32port->txftcfg); 1276 } 1277 1278 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1279 if (IS_ERR(port->membase)) 1280 return PTR_ERR(port->membase); 1281 port->mapbase = res->start; 1282 1283 spin_lock_init(&port->lock); 1284 1285 stm32port->clk = devm_clk_get(&pdev->dev, NULL); 1286 if (IS_ERR(stm32port->clk)) 1287 return PTR_ERR(stm32port->clk); 1288 1289 /* Ensure that clk rate is correct by enabling the clk */ 1290 ret = clk_prepare_enable(stm32port->clk); 1291 if (ret) 1292 return ret; 1293 1294 stm32port->port.uartclk = clk_get_rate(stm32port->clk); 1295 if (!stm32port->port.uartclk) { 1296 ret = -EINVAL; 1297 goto err_clk; 1298 } 1299 1300 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0); 1301 if (IS_ERR(stm32port->gpios)) { 1302 ret = PTR_ERR(stm32port->gpios); 1303 goto err_clk; 1304 } 1305 1306 /* 1307 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" 1308 * properties should not be specified. 1309 */ 1310 if (stm32port->hw_flow_control) { 1311 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) || 1312 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) { 1313 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n"); 1314 ret = -EINVAL; 1315 goto err_clk; 1316 } 1317 } 1318 1319 return ret; 1320 1321 err_clk: 1322 clk_disable_unprepare(stm32port->clk); 1323 1324 return ret; 1325 } 1326 1327 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev) 1328 { 1329 struct device_node *np = pdev->dev.of_node; 1330 int id; 1331 1332 if (!np) 1333 return NULL; 1334 1335 id = of_alias_get_id(np, "serial"); 1336 if (id < 0) { 1337 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id); 1338 return NULL; 1339 } 1340 1341 if (WARN_ON(id >= STM32_MAX_PORTS)) 1342 return NULL; 1343 1344 stm32_ports[id].hw_flow_control = 1345 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ || 1346 of_property_read_bool (np, "uart-has-rtscts"); 1347 stm32_ports[id].port.line = id; 1348 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE; 1349 stm32_ports[id].cr3_irq = 0; 1350 stm32_ports[id].last_res = RX_BUF_L; 1351 return &stm32_ports[id]; 1352 } 1353 1354 #ifdef CONFIG_OF 1355 static const struct of_device_id stm32_match[] = { 1356 { .compatible = "st,stm32-uart", .data = &stm32f4_info}, 1357 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info}, 1358 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info}, 1359 {}, 1360 }; 1361 1362 MODULE_DEVICE_TABLE(of, stm32_match); 1363 #endif 1364 1365 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port, 1366 struct platform_device *pdev) 1367 { 1368 if (stm32port->rx_buf) 1369 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, 1370 stm32port->rx_dma_buf); 1371 } 1372 1373 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, 1374 struct platform_device *pdev) 1375 { 1376 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1377 struct uart_port *port = &stm32port->port; 1378 struct device *dev = &pdev->dev; 1379 struct dma_slave_config config; 1380 int ret; 1381 1382 /* 1383 * Using DMA and threaded handler for the console could lead to 1384 * deadlocks. 1385 */ 1386 if (uart_console(port)) 1387 return -ENODEV; 1388 1389 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L, 1390 &stm32port->rx_dma_buf, 1391 GFP_KERNEL); 1392 if (!stm32port->rx_buf) 1393 return -ENOMEM; 1394 1395 /* Configure DMA channel */ 1396 memset(&config, 0, sizeof(config)); 1397 config.src_addr = port->mapbase + ofs->rdr; 1398 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1399 1400 ret = dmaengine_slave_config(stm32port->rx_ch, &config); 1401 if (ret < 0) { 1402 dev_err(dev, "rx dma channel config failed\n"); 1403 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1404 return ret; 1405 } 1406 1407 return 0; 1408 } 1409 1410 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port, 1411 struct platform_device *pdev) 1412 { 1413 if (stm32port->tx_buf) 1414 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, 1415 stm32port->tx_dma_buf); 1416 } 1417 1418 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, 1419 struct platform_device *pdev) 1420 { 1421 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1422 struct uart_port *port = &stm32port->port; 1423 struct device *dev = &pdev->dev; 1424 struct dma_slave_config config; 1425 int ret; 1426 1427 stm32port->tx_dma_busy = false; 1428 1429 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L, 1430 &stm32port->tx_dma_buf, 1431 GFP_KERNEL); 1432 if (!stm32port->tx_buf) 1433 return -ENOMEM; 1434 1435 /* Configure DMA channel */ 1436 memset(&config, 0, sizeof(config)); 1437 config.dst_addr = port->mapbase + ofs->tdr; 1438 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1439 1440 ret = dmaengine_slave_config(stm32port->tx_ch, &config); 1441 if (ret < 0) { 1442 dev_err(dev, "tx dma channel config failed\n"); 1443 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1444 return ret; 1445 } 1446 1447 return 0; 1448 } 1449 1450 static int stm32_usart_serial_probe(struct platform_device *pdev) 1451 { 1452 struct stm32_port *stm32port; 1453 int ret; 1454 1455 stm32port = stm32_usart_of_get_port(pdev); 1456 if (!stm32port) 1457 return -ENODEV; 1458 1459 stm32port->info = of_device_get_match_data(&pdev->dev); 1460 if (!stm32port->info) 1461 return -EINVAL; 1462 1463 ret = stm32_usart_init_port(stm32port, pdev); 1464 if (ret) 1465 return ret; 1466 1467 if (stm32port->wakeup_src) { 1468 device_set_wakeup_capable(&pdev->dev, true); 1469 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); 1470 if (ret) 1471 goto err_deinit_port; 1472 } 1473 1474 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); 1475 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) { 1476 ret = -EPROBE_DEFER; 1477 goto err_wakeirq; 1478 } 1479 /* Fall back in interrupt mode for any non-deferral error */ 1480 if (IS_ERR(stm32port->rx_ch)) 1481 stm32port->rx_ch = NULL; 1482 1483 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); 1484 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) { 1485 ret = -EPROBE_DEFER; 1486 goto err_dma_rx; 1487 } 1488 /* Fall back in interrupt mode for any non-deferral error */ 1489 if (IS_ERR(stm32port->tx_ch)) 1490 stm32port->tx_ch = NULL; 1491 1492 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { 1493 /* Fall back in interrupt mode */ 1494 dma_release_channel(stm32port->rx_ch); 1495 stm32port->rx_ch = NULL; 1496 } 1497 1498 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { 1499 /* Fall back in interrupt mode */ 1500 dma_release_channel(stm32port->tx_ch); 1501 stm32port->tx_ch = NULL; 1502 } 1503 1504 if (!stm32port->rx_ch) 1505 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); 1506 if (!stm32port->tx_ch) 1507 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n"); 1508 1509 platform_set_drvdata(pdev, &stm32port->port); 1510 1511 pm_runtime_get_noresume(&pdev->dev); 1512 pm_runtime_set_active(&pdev->dev); 1513 pm_runtime_enable(&pdev->dev); 1514 1515 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port); 1516 if (ret) 1517 goto err_port; 1518 1519 pm_runtime_put_sync(&pdev->dev); 1520 1521 return 0; 1522 1523 err_port: 1524 pm_runtime_disable(&pdev->dev); 1525 pm_runtime_set_suspended(&pdev->dev); 1526 pm_runtime_put_noidle(&pdev->dev); 1527 1528 if (stm32port->tx_ch) { 1529 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1530 dma_release_channel(stm32port->tx_ch); 1531 } 1532 1533 if (stm32port->rx_ch) 1534 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1535 1536 err_dma_rx: 1537 if (stm32port->rx_ch) 1538 dma_release_channel(stm32port->rx_ch); 1539 1540 err_wakeirq: 1541 if (stm32port->wakeup_src) 1542 dev_pm_clear_wake_irq(&pdev->dev); 1543 1544 err_deinit_port: 1545 if (stm32port->wakeup_src) 1546 device_set_wakeup_capable(&pdev->dev, false); 1547 1548 stm32_usart_deinit_port(stm32port); 1549 1550 return ret; 1551 } 1552 1553 static int stm32_usart_serial_remove(struct platform_device *pdev) 1554 { 1555 struct uart_port *port = platform_get_drvdata(pdev); 1556 struct stm32_port *stm32_port = to_stm32_port(port); 1557 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1558 int err; 1559 u32 cr3; 1560 1561 pm_runtime_get_sync(&pdev->dev); 1562 err = uart_remove_one_port(&stm32_usart_driver, port); 1563 if (err) 1564 return(err); 1565 1566 pm_runtime_disable(&pdev->dev); 1567 pm_runtime_set_suspended(&pdev->dev); 1568 pm_runtime_put_noidle(&pdev->dev); 1569 1570 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE); 1571 cr3 = readl_relaxed(port->membase + ofs->cr3); 1572 cr3 &= ~USART_CR3_EIE; 1573 cr3 &= ~USART_CR3_DMAR; 1574 cr3 &= ~USART_CR3_DDRE; 1575 writel_relaxed(cr3, port->membase + ofs->cr3); 1576 1577 if (stm32_port->tx_ch) { 1578 stm32_usart_of_dma_tx_remove(stm32_port, pdev); 1579 dma_release_channel(stm32_port->tx_ch); 1580 } 1581 1582 if (stm32_port->rx_ch) { 1583 stm32_usart_of_dma_rx_remove(stm32_port, pdev); 1584 dma_release_channel(stm32_port->rx_ch); 1585 } 1586 1587 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1588 1589 if (stm32_port->wakeup_src) { 1590 dev_pm_clear_wake_irq(&pdev->dev); 1591 device_init_wakeup(&pdev->dev, false); 1592 } 1593 1594 stm32_usart_deinit_port(stm32_port); 1595 1596 return 0; 1597 } 1598 1599 #ifdef CONFIG_SERIAL_STM32_CONSOLE 1600 static void stm32_usart_console_putchar(struct uart_port *port, int ch) 1601 { 1602 struct stm32_port *stm32_port = to_stm32_port(port); 1603 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1604 1605 while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) 1606 cpu_relax(); 1607 1608 writel_relaxed(ch, port->membase + ofs->tdr); 1609 } 1610 1611 static void stm32_usart_console_write(struct console *co, const char *s, 1612 unsigned int cnt) 1613 { 1614 struct uart_port *port = &stm32_ports[co->index].port; 1615 struct stm32_port *stm32_port = to_stm32_port(port); 1616 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1617 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1618 unsigned long flags; 1619 u32 old_cr1, new_cr1; 1620 int locked = 1; 1621 1622 if (oops_in_progress) 1623 locked = spin_trylock_irqsave(&port->lock, flags); 1624 else 1625 spin_lock_irqsave(&port->lock, flags); 1626 1627 /* Save and disable interrupts, enable the transmitter */ 1628 old_cr1 = readl_relaxed(port->membase + ofs->cr1); 1629 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK; 1630 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit); 1631 writel_relaxed(new_cr1, port->membase + ofs->cr1); 1632 1633 uart_console_write(port, s, cnt, stm32_usart_console_putchar); 1634 1635 /* Restore interrupt state */ 1636 writel_relaxed(old_cr1, port->membase + ofs->cr1); 1637 1638 if (locked) 1639 spin_unlock_irqrestore(&port->lock, flags); 1640 } 1641 1642 static int stm32_usart_console_setup(struct console *co, char *options) 1643 { 1644 struct stm32_port *stm32port; 1645 int baud = 9600; 1646 int bits = 8; 1647 int parity = 'n'; 1648 int flow = 'n'; 1649 1650 if (co->index >= STM32_MAX_PORTS) 1651 return -ENODEV; 1652 1653 stm32port = &stm32_ports[co->index]; 1654 1655 /* 1656 * This driver does not support early console initialization 1657 * (use ARM early printk support instead), so we only expect 1658 * this to be called during the uart port registration when the 1659 * driver gets probed and the port should be mapped at that point. 1660 */ 1661 if (stm32port->port.mapbase == 0 || !stm32port->port.membase) 1662 return -ENXIO; 1663 1664 if (options) 1665 uart_parse_options(options, &baud, &parity, &bits, &flow); 1666 1667 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow); 1668 } 1669 1670 static struct console stm32_console = { 1671 .name = STM32_SERIAL_NAME, 1672 .device = uart_console_device, 1673 .write = stm32_usart_console_write, 1674 .setup = stm32_usart_console_setup, 1675 .flags = CON_PRINTBUFFER, 1676 .index = -1, 1677 .data = &stm32_usart_driver, 1678 }; 1679 1680 #define STM32_SERIAL_CONSOLE (&stm32_console) 1681 1682 #else 1683 #define STM32_SERIAL_CONSOLE NULL 1684 #endif /* CONFIG_SERIAL_STM32_CONSOLE */ 1685 1686 static struct uart_driver stm32_usart_driver = { 1687 .driver_name = DRIVER_NAME, 1688 .dev_name = STM32_SERIAL_NAME, 1689 .major = 0, 1690 .minor = 0, 1691 .nr = STM32_MAX_PORTS, 1692 .cons = STM32_SERIAL_CONSOLE, 1693 }; 1694 1695 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, 1696 bool enable) 1697 { 1698 struct stm32_port *stm32_port = to_stm32_port(port); 1699 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1700 struct tty_port *tport = &port->state->port; 1701 int ret; 1702 unsigned int size; 1703 unsigned long flags; 1704 1705 if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) 1706 return 0; 1707 1708 /* 1709 * Enable low-power wake-up and wake-up irq if argument is set to 1710 * "enable", disable low-power wake-up and wake-up irq otherwise 1711 */ 1712 if (enable) { 1713 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM); 1714 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE); 1715 1716 /* 1717 * When DMA is used for reception, it must be disabled before 1718 * entering low-power mode and re-enabled when exiting from 1719 * low-power mode. 1720 */ 1721 if (stm32_port->rx_ch) { 1722 spin_lock_irqsave(&port->lock, flags); 1723 /* Avoid race with RX IRQ when DMAR is cleared */ 1724 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 1725 /* Poll data from DMA RX buffer if any */ 1726 size = stm32_usart_receive_chars(port, true); 1727 dmaengine_terminate_async(stm32_port->rx_ch); 1728 uart_unlock_and_check_sysrq_irqrestore(port, flags); 1729 if (size) 1730 tty_flip_buffer_push(tport); 1731 } 1732 1733 /* Poll data from RX FIFO if any */ 1734 stm32_usart_receive_chars(port, false); 1735 } else { 1736 if (stm32_port->rx_ch) { 1737 ret = stm32_usart_start_rx_dma_cyclic(port); 1738 if (ret) 1739 return ret; 1740 } 1741 1742 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM); 1743 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 1744 } 1745 1746 return 0; 1747 } 1748 1749 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) 1750 { 1751 struct uart_port *port = dev_get_drvdata(dev); 1752 int ret; 1753 1754 uart_suspend_port(&stm32_usart_driver, port); 1755 1756 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 1757 ret = stm32_usart_serial_en_wakeup(port, true); 1758 if (ret) 1759 return ret; 1760 } 1761 1762 /* 1763 * When "no_console_suspend" is enabled, keep the pinctrl default state 1764 * and rely on bootloader stage to restore this state upon resume. 1765 * Otherwise, apply the idle or sleep states depending on wakeup 1766 * capabilities. 1767 */ 1768 if (console_suspend_enabled || !uart_console(port)) { 1769 if (device_may_wakeup(dev) || device_wakeup_path(dev)) 1770 pinctrl_pm_select_idle_state(dev); 1771 else 1772 pinctrl_pm_select_sleep_state(dev); 1773 } 1774 1775 return 0; 1776 } 1777 1778 static int __maybe_unused stm32_usart_serial_resume(struct device *dev) 1779 { 1780 struct uart_port *port = dev_get_drvdata(dev); 1781 int ret; 1782 1783 pinctrl_pm_select_default_state(dev); 1784 1785 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 1786 ret = stm32_usart_serial_en_wakeup(port, false); 1787 if (ret) 1788 return ret; 1789 } 1790 1791 return uart_resume_port(&stm32_usart_driver, port); 1792 } 1793 1794 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev) 1795 { 1796 struct uart_port *port = dev_get_drvdata(dev); 1797 struct stm32_port *stm32port = container_of(port, 1798 struct stm32_port, port); 1799 1800 clk_disable_unprepare(stm32port->clk); 1801 1802 return 0; 1803 } 1804 1805 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev) 1806 { 1807 struct uart_port *port = dev_get_drvdata(dev); 1808 struct stm32_port *stm32port = container_of(port, 1809 struct stm32_port, port); 1810 1811 return clk_prepare_enable(stm32port->clk); 1812 } 1813 1814 static const struct dev_pm_ops stm32_serial_pm_ops = { 1815 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend, 1816 stm32_usart_runtime_resume, NULL) 1817 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend, 1818 stm32_usart_serial_resume) 1819 }; 1820 1821 static struct platform_driver stm32_serial_driver = { 1822 .probe = stm32_usart_serial_probe, 1823 .remove = stm32_usart_serial_remove, 1824 .driver = { 1825 .name = DRIVER_NAME, 1826 .pm = &stm32_serial_pm_ops, 1827 .of_match_table = of_match_ptr(stm32_match), 1828 }, 1829 }; 1830 1831 static int __init stm32_usart_init(void) 1832 { 1833 static char banner[] __initdata = "STM32 USART driver initialized"; 1834 int ret; 1835 1836 pr_info("%s\n", banner); 1837 1838 ret = uart_register_driver(&stm32_usart_driver); 1839 if (ret) 1840 return ret; 1841 1842 ret = platform_driver_register(&stm32_serial_driver); 1843 if (ret) 1844 uart_unregister_driver(&stm32_usart_driver); 1845 1846 return ret; 1847 } 1848 1849 static void __exit stm32_usart_exit(void) 1850 { 1851 platform_driver_unregister(&stm32_serial_driver); 1852 uart_unregister_driver(&stm32_usart_driver); 1853 } 1854 1855 module_init(stm32_usart_init); 1856 module_exit(stm32_usart_exit); 1857 1858 MODULE_ALIAS("platform:" DRIVER_NAME); 1859 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver"); 1860 MODULE_LICENSE("GPL v2"); 1861