1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Maxime Coquelin 2015 4 * Copyright (C) STMicroelectronics SA 2017 5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com> 6 * Gerald Baeza <gerald.baeza@foss.st.com> 7 * Erwan Le Ray <erwan.leray@foss.st.com> 8 * 9 * Inspired by st-asc.c from STMicroelectronics (c) 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/console.h> 14 #include <linux/delay.h> 15 #include <linux/dma-direction.h> 16 #include <linux/dmaengine.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/irq.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_platform.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/serial_core.h> 29 #include <linux/serial.h> 30 #include <linux/spinlock.h> 31 #include <linux/sysrq.h> 32 #include <linux/tty_flip.h> 33 #include <linux/tty.h> 34 35 #include "serial_mctrl_gpio.h" 36 #include "stm32-usart.h" 37 38 39 /* Register offsets */ 40 static struct stm32_usart_info __maybe_unused stm32f4_info = { 41 .ofs = { 42 .isr = 0x00, 43 .rdr = 0x04, 44 .tdr = 0x04, 45 .brr = 0x08, 46 .cr1 = 0x0c, 47 .cr2 = 0x10, 48 .cr3 = 0x14, 49 .gtpr = 0x18, 50 .rtor = UNDEF_REG, 51 .rqr = UNDEF_REG, 52 .icr = UNDEF_REG, 53 }, 54 .cfg = { 55 .uart_enable_bit = 13, 56 .has_7bits_data = false, 57 .fifosize = 1, 58 } 59 }; 60 61 static struct stm32_usart_info __maybe_unused stm32f7_info = { 62 .ofs = { 63 .cr1 = 0x00, 64 .cr2 = 0x04, 65 .cr3 = 0x08, 66 .brr = 0x0c, 67 .gtpr = 0x10, 68 .rtor = 0x14, 69 .rqr = 0x18, 70 .isr = 0x1c, 71 .icr = 0x20, 72 .rdr = 0x24, 73 .tdr = 0x28, 74 }, 75 .cfg = { 76 .uart_enable_bit = 0, 77 .has_7bits_data = true, 78 .has_swap = true, 79 .fifosize = 1, 80 } 81 }; 82 83 static struct stm32_usart_info __maybe_unused stm32h7_info = { 84 .ofs = { 85 .cr1 = 0x00, 86 .cr2 = 0x04, 87 .cr3 = 0x08, 88 .brr = 0x0c, 89 .gtpr = 0x10, 90 .rtor = 0x14, 91 .rqr = 0x18, 92 .isr = 0x1c, 93 .icr = 0x20, 94 .rdr = 0x24, 95 .tdr = 0x28, 96 }, 97 .cfg = { 98 .uart_enable_bit = 0, 99 .has_7bits_data = true, 100 .has_swap = true, 101 .has_wakeup = true, 102 .has_fifo = true, 103 .fifosize = 16, 104 } 105 }; 106 107 static void stm32_usart_stop_tx(struct uart_port *port); 108 static void stm32_usart_transmit_chars(struct uart_port *port); 109 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch); 110 111 static inline struct stm32_port *to_stm32_port(struct uart_port *port) 112 { 113 return container_of(port, struct stm32_port, port); 114 } 115 116 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits) 117 { 118 u32 val; 119 120 val = readl_relaxed(port->membase + reg); 121 val |= bits; 122 writel_relaxed(val, port->membase + reg); 123 } 124 125 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits) 126 { 127 u32 val; 128 129 val = readl_relaxed(port->membase + reg); 130 val &= ~bits; 131 writel_relaxed(val, port->membase + reg); 132 } 133 134 static unsigned int stm32_usart_tx_empty(struct uart_port *port) 135 { 136 struct stm32_port *stm32_port = to_stm32_port(port); 137 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 138 139 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) 140 return TIOCSER_TEMT; 141 142 return 0; 143 } 144 145 static void stm32_usart_rs485_rts_enable(struct uart_port *port) 146 { 147 struct stm32_port *stm32_port = to_stm32_port(port); 148 struct serial_rs485 *rs485conf = &port->rs485; 149 150 if (stm32_port->hw_flow_control || 151 !(rs485conf->flags & SER_RS485_ENABLED)) 152 return; 153 154 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 155 mctrl_gpio_set(stm32_port->gpios, 156 stm32_port->port.mctrl | TIOCM_RTS); 157 } else { 158 mctrl_gpio_set(stm32_port->gpios, 159 stm32_port->port.mctrl & ~TIOCM_RTS); 160 } 161 } 162 163 static void stm32_usart_rs485_rts_disable(struct uart_port *port) 164 { 165 struct stm32_port *stm32_port = to_stm32_port(port); 166 struct serial_rs485 *rs485conf = &port->rs485; 167 168 if (stm32_port->hw_flow_control || 169 !(rs485conf->flags & SER_RS485_ENABLED)) 170 return; 171 172 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 173 mctrl_gpio_set(stm32_port->gpios, 174 stm32_port->port.mctrl & ~TIOCM_RTS); 175 } else { 176 mctrl_gpio_set(stm32_port->gpios, 177 stm32_port->port.mctrl | TIOCM_RTS); 178 } 179 } 180 181 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, 182 u32 delay_DDE, u32 baud) 183 { 184 u32 rs485_deat_dedt; 185 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT); 186 bool over8; 187 188 *cr3 |= USART_CR3_DEM; 189 over8 = *cr1 & USART_CR1_OVER8; 190 191 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 192 193 if (over8) 194 rs485_deat_dedt = delay_ADE * baud * 8; 195 else 196 rs485_deat_dedt = delay_ADE * baud * 16; 197 198 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 199 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 200 rs485_deat_dedt_max : rs485_deat_dedt; 201 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) & 202 USART_CR1_DEAT_MASK; 203 *cr1 |= rs485_deat_dedt; 204 205 if (over8) 206 rs485_deat_dedt = delay_DDE * baud * 8; 207 else 208 rs485_deat_dedt = delay_DDE * baud * 16; 209 210 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 211 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 212 rs485_deat_dedt_max : rs485_deat_dedt; 213 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) & 214 USART_CR1_DEDT_MASK; 215 *cr1 |= rs485_deat_dedt; 216 } 217 218 static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios, 219 struct serial_rs485 *rs485conf) 220 { 221 struct stm32_port *stm32_port = to_stm32_port(port); 222 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 223 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 224 u32 usartdiv, baud, cr1, cr3; 225 bool over8; 226 227 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 228 229 if (rs485conf->flags & SER_RS485_ENABLED) { 230 cr1 = readl_relaxed(port->membase + ofs->cr1); 231 cr3 = readl_relaxed(port->membase + ofs->cr3); 232 usartdiv = readl_relaxed(port->membase + ofs->brr); 233 usartdiv = usartdiv & GENMASK(15, 0); 234 over8 = cr1 & USART_CR1_OVER8; 235 236 if (over8) 237 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0)) 238 << USART_BRR_04_R_SHIFT; 239 240 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv); 241 stm32_usart_config_reg_rs485(&cr1, &cr3, 242 rs485conf->delay_rts_before_send, 243 rs485conf->delay_rts_after_send, 244 baud); 245 246 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) 247 cr3 &= ~USART_CR3_DEP; 248 else 249 cr3 |= USART_CR3_DEP; 250 251 writel_relaxed(cr3, port->membase + ofs->cr3); 252 writel_relaxed(cr1, port->membase + ofs->cr1); 253 254 rs485conf->flags |= SER_RS485_RX_DURING_TX; 255 } else { 256 stm32_usart_clr_bits(port, ofs->cr3, 257 USART_CR3_DEM | USART_CR3_DEP); 258 stm32_usart_clr_bits(port, ofs->cr1, 259 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 260 } 261 262 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 263 264 /* Adjust RTS polarity in case it's driven in software */ 265 if (stm32_usart_tx_empty(port)) 266 stm32_usart_rs485_rts_disable(port); 267 else 268 stm32_usart_rs485_rts_enable(port); 269 270 return 0; 271 } 272 273 static int stm32_usart_init_rs485(struct uart_port *port, 274 struct platform_device *pdev) 275 { 276 struct serial_rs485 *rs485conf = &port->rs485; 277 278 rs485conf->flags = 0; 279 rs485conf->delay_rts_before_send = 0; 280 rs485conf->delay_rts_after_send = 0; 281 282 if (!pdev->dev.of_node) 283 return -ENODEV; 284 285 return uart_get_rs485_mode(port); 286 } 287 288 static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port) 289 { 290 return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false; 291 } 292 293 static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port) 294 { 295 dmaengine_terminate_async(stm32_port->rx_ch); 296 stm32_port->rx_dma_busy = false; 297 } 298 299 static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port, 300 struct dma_chan *chan, 301 enum dma_status expected_status, 302 int dmaengine_pause_or_resume(struct dma_chan *), 303 bool stm32_usart_xx_dma_started(struct stm32_port *), 304 void stm32_usart_xx_dma_terminate(struct stm32_port *)) 305 { 306 struct uart_port *port = &stm32_port->port; 307 enum dma_status dma_status; 308 int ret; 309 310 if (!stm32_usart_xx_dma_started(stm32_port)) 311 return -EPERM; 312 313 dma_status = dmaengine_tx_status(chan, chan->cookie, NULL); 314 if (dma_status != expected_status) 315 return -EAGAIN; 316 317 ret = dmaengine_pause_or_resume(chan); 318 if (ret) { 319 dev_err(port->dev, "DMA failed with error code: %d\n", ret); 320 stm32_usart_xx_dma_terminate(stm32_port); 321 } 322 return ret; 323 } 324 325 static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port) 326 { 327 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch, 328 DMA_IN_PROGRESS, dmaengine_pause, 329 stm32_usart_rx_dma_started, 330 stm32_usart_rx_dma_terminate); 331 } 332 333 static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port) 334 { 335 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch, 336 DMA_PAUSED, dmaengine_resume, 337 stm32_usart_rx_dma_started, 338 stm32_usart_rx_dma_terminate); 339 } 340 341 /* Return true when data is pending (in pio mode), and false when no data is pending. */ 342 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr) 343 { 344 struct stm32_port *stm32_port = to_stm32_port(port); 345 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 346 347 *sr = readl_relaxed(port->membase + ofs->isr); 348 /* Get pending characters in RDR or FIFO */ 349 if (*sr & USART_SR_RXNE) { 350 /* Get all pending characters from the RDR or the FIFO when using interrupts */ 351 if (!stm32_usart_rx_dma_started(stm32_port)) 352 return true; 353 354 /* Handle only RX data errors when using DMA */ 355 if (*sr & USART_SR_ERR_MASK) 356 return true; 357 } 358 359 return false; 360 } 361 362 static u8 stm32_usart_get_char_pio(struct uart_port *port) 363 { 364 struct stm32_port *stm32_port = to_stm32_port(port); 365 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 366 unsigned long c; 367 368 c = readl_relaxed(port->membase + ofs->rdr); 369 /* Apply RDR data mask */ 370 c &= stm32_port->rdr_mask; 371 372 return c; 373 } 374 375 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port) 376 { 377 struct stm32_port *stm32_port = to_stm32_port(port); 378 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 379 unsigned int size = 0; 380 u32 sr; 381 u8 c, flag; 382 383 while (stm32_usart_pending_rx_pio(port, &sr)) { 384 sr |= USART_SR_DUMMY_RX; 385 flag = TTY_NORMAL; 386 387 /* 388 * Status bits has to be cleared before reading the RDR: 389 * In FIFO mode, reading the RDR will pop the next data 390 * (if any) along with its status bits into the SR. 391 * Not doing so leads to misalignement between RDR and SR, 392 * and clear status bits of the next rx data. 393 * 394 * Clear errors flags for stm32f7 and stm32h7 compatible 395 * devices. On stm32f4 compatible devices, the error bit is 396 * cleared by the sequence [read SR - read DR]. 397 */ 398 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG) 399 writel_relaxed(sr & USART_SR_ERR_MASK, 400 port->membase + ofs->icr); 401 402 c = stm32_usart_get_char_pio(port); 403 port->icount.rx++; 404 size++; 405 if (sr & USART_SR_ERR_MASK) { 406 if (sr & USART_SR_ORE) { 407 port->icount.overrun++; 408 } else if (sr & USART_SR_PE) { 409 port->icount.parity++; 410 } else if (sr & USART_SR_FE) { 411 /* Break detection if character is null */ 412 if (!c) { 413 port->icount.brk++; 414 if (uart_handle_break(port)) 415 continue; 416 } else { 417 port->icount.frame++; 418 } 419 } 420 421 sr &= port->read_status_mask; 422 423 if (sr & USART_SR_PE) { 424 flag = TTY_PARITY; 425 } else if (sr & USART_SR_FE) { 426 if (!c) 427 flag = TTY_BREAK; 428 else 429 flag = TTY_FRAME; 430 } 431 } 432 433 if (uart_prepare_sysrq_char(port, c)) 434 continue; 435 uart_insert_char(port, sr, USART_SR_ORE, c, flag); 436 } 437 438 return size; 439 } 440 441 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size) 442 { 443 struct stm32_port *stm32_port = to_stm32_port(port); 444 struct tty_port *ttyport = &stm32_port->port.state->port; 445 unsigned char *dma_start; 446 int dma_count, i; 447 448 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res); 449 450 /* 451 * Apply rdr_mask on buffer in order to mask parity bit. 452 * This loop is useless in cs8 mode because DMA copies only 453 * 8 bits and already ignores parity bit. 454 */ 455 if (!(stm32_port->rdr_mask == (BIT(8) - 1))) 456 for (i = 0; i < dma_size; i++) 457 *(dma_start + i) &= stm32_port->rdr_mask; 458 459 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size); 460 port->icount.rx += dma_count; 461 if (dma_count != dma_size) 462 port->icount.buf_overrun++; 463 stm32_port->last_res -= dma_count; 464 if (stm32_port->last_res == 0) 465 stm32_port->last_res = RX_BUF_L; 466 } 467 468 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port) 469 { 470 struct stm32_port *stm32_port = to_stm32_port(port); 471 unsigned int dma_size, size = 0; 472 473 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */ 474 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { 475 /* Conditional first part: from last_res to end of DMA buffer */ 476 dma_size = stm32_port->last_res; 477 stm32_usart_push_buffer_dma(port, dma_size); 478 size = dma_size; 479 } 480 481 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue; 482 stm32_usart_push_buffer_dma(port, dma_size); 483 size += dma_size; 484 485 return size; 486 } 487 488 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush) 489 { 490 struct stm32_port *stm32_port = to_stm32_port(port); 491 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 492 enum dma_status rx_dma_status; 493 u32 sr; 494 unsigned int size = 0; 495 496 if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) { 497 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 498 stm32_port->rx_ch->cookie, 499 &stm32_port->rx_dma_state); 500 if (rx_dma_status == DMA_IN_PROGRESS || 501 rx_dma_status == DMA_PAUSED) { 502 /* Empty DMA buffer */ 503 size = stm32_usart_receive_chars_dma(port); 504 sr = readl_relaxed(port->membase + ofs->isr); 505 if (sr & USART_SR_ERR_MASK) { 506 /* Disable DMA request line */ 507 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 508 509 /* Switch to PIO mode to handle the errors */ 510 size += stm32_usart_receive_chars_pio(port); 511 512 /* Switch back to DMA mode */ 513 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 514 } 515 } else { 516 /* Disable RX DMA */ 517 stm32_usart_rx_dma_terminate(stm32_port); 518 /* Fall back to interrupt mode */ 519 dev_dbg(port->dev, "DMA error, fallback to irq mode\n"); 520 size = stm32_usart_receive_chars_pio(port); 521 } 522 } else { 523 size = stm32_usart_receive_chars_pio(port); 524 } 525 526 return size; 527 } 528 529 static void stm32_usart_rx_dma_complete(void *arg) 530 { 531 struct uart_port *port = arg; 532 struct tty_port *tport = &port->state->port; 533 unsigned int size; 534 unsigned long flags; 535 536 spin_lock_irqsave(&port->lock, flags); 537 size = stm32_usart_receive_chars(port, false); 538 uart_unlock_and_check_sysrq_irqrestore(port, flags); 539 if (size) 540 tty_flip_buffer_push(tport); 541 } 542 543 static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port) 544 { 545 struct stm32_port *stm32_port = to_stm32_port(port); 546 struct dma_async_tx_descriptor *desc; 547 enum dma_status rx_dma_status; 548 int ret; 549 550 if (stm32_port->throttled) 551 return 0; 552 553 if (stm32_port->rx_dma_busy) { 554 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 555 stm32_port->rx_ch->cookie, 556 NULL); 557 if (rx_dma_status == DMA_IN_PROGRESS) 558 return 0; 559 560 if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port)) 561 return 0; 562 563 dev_err(port->dev, "DMA failed : status error.\n"); 564 stm32_usart_rx_dma_terminate(stm32_port); 565 } 566 567 stm32_port->rx_dma_busy = true; 568 569 stm32_port->last_res = RX_BUF_L; 570 /* Prepare a DMA cyclic transaction */ 571 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch, 572 stm32_port->rx_dma_buf, 573 RX_BUF_L, RX_BUF_P, 574 DMA_DEV_TO_MEM, 575 DMA_PREP_INTERRUPT); 576 if (!desc) { 577 dev_err(port->dev, "rx dma prep cyclic failed\n"); 578 stm32_port->rx_dma_busy = false; 579 return -ENODEV; 580 } 581 582 desc->callback = stm32_usart_rx_dma_complete; 583 desc->callback_param = port; 584 585 /* Push current DMA transaction in the pending queue */ 586 ret = dma_submit_error(dmaengine_submit(desc)); 587 if (ret) { 588 dmaengine_terminate_sync(stm32_port->rx_ch); 589 stm32_port->rx_dma_busy = false; 590 return ret; 591 } 592 593 /* Issue pending DMA requests */ 594 dma_async_issue_pending(stm32_port->rx_ch); 595 596 return 0; 597 } 598 599 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port) 600 { 601 dmaengine_terminate_async(stm32_port->tx_ch); 602 stm32_port->tx_dma_busy = false; 603 } 604 605 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port) 606 { 607 /* 608 * We cannot use the function "dmaengine_tx_status" to know the 609 * status of DMA. This function does not show if the "dma complete" 610 * callback of the DMA transaction has been called. So we prefer 611 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the 612 * same time. 613 */ 614 return stm32_port->tx_dma_busy; 615 } 616 617 static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port) 618 { 619 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch, 620 DMA_IN_PROGRESS, dmaengine_pause, 621 stm32_usart_tx_dma_started, 622 stm32_usart_tx_dma_terminate); 623 } 624 625 static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port) 626 { 627 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch, 628 DMA_PAUSED, dmaengine_resume, 629 stm32_usart_tx_dma_started, 630 stm32_usart_tx_dma_terminate); 631 } 632 633 static void stm32_usart_tx_dma_complete(void *arg) 634 { 635 struct uart_port *port = arg; 636 struct stm32_port *stm32port = to_stm32_port(port); 637 unsigned long flags; 638 639 stm32_usart_tx_dma_terminate(stm32port); 640 641 /* Let's see if we have pending data to send */ 642 spin_lock_irqsave(&port->lock, flags); 643 stm32_usart_transmit_chars(port); 644 spin_unlock_irqrestore(&port->lock, flags); 645 } 646 647 static void stm32_usart_tx_interrupt_enable(struct uart_port *port) 648 { 649 struct stm32_port *stm32_port = to_stm32_port(port); 650 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 651 652 /* 653 * Enables TX FIFO threashold irq when FIFO is enabled, 654 * or TX empty irq when FIFO is disabled 655 */ 656 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 657 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE); 658 else 659 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE); 660 } 661 662 static void stm32_usart_tc_interrupt_enable(struct uart_port *port) 663 { 664 struct stm32_port *stm32_port = to_stm32_port(port); 665 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 666 667 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE); 668 } 669 670 static void stm32_usart_tx_interrupt_disable(struct uart_port *port) 671 { 672 struct stm32_port *stm32_port = to_stm32_port(port); 673 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 674 675 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 676 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE); 677 else 678 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE); 679 } 680 681 static void stm32_usart_tc_interrupt_disable(struct uart_port *port) 682 { 683 struct stm32_port *stm32_port = to_stm32_port(port); 684 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 685 686 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE); 687 } 688 689 static void stm32_usart_transmit_chars_pio(struct uart_port *port) 690 { 691 struct stm32_port *stm32_port = to_stm32_port(port); 692 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 693 struct circ_buf *xmit = &port->state->xmit; 694 695 while (!uart_circ_empty(xmit)) { 696 /* Check that TDR is empty before filling FIFO */ 697 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) 698 break; 699 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); 700 uart_xmit_advance(port, 1); 701 } 702 703 /* rely on TXE irq (mask or unmask) for sending remaining data */ 704 if (uart_circ_empty(xmit)) 705 stm32_usart_tx_interrupt_disable(port); 706 else 707 stm32_usart_tx_interrupt_enable(port); 708 } 709 710 static void stm32_usart_transmit_chars_dma(struct uart_port *port) 711 { 712 struct stm32_port *stm32port = to_stm32_port(port); 713 struct circ_buf *xmit = &port->state->xmit; 714 struct dma_async_tx_descriptor *desc = NULL; 715 unsigned int count; 716 int ret; 717 718 if (stm32_usart_tx_dma_started(stm32port)) { 719 ret = stm32_usart_tx_dma_resume(stm32port); 720 if (ret < 0 && ret != -EAGAIN) 721 goto fallback_err; 722 return; 723 } 724 725 count = uart_circ_chars_pending(xmit); 726 727 if (count > TX_BUF_L) 728 count = TX_BUF_L; 729 730 if (xmit->tail < xmit->head) { 731 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count); 732 } else { 733 size_t one = UART_XMIT_SIZE - xmit->tail; 734 size_t two; 735 736 if (one > count) 737 one = count; 738 two = count - one; 739 740 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one); 741 if (two) 742 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two); 743 } 744 745 desc = dmaengine_prep_slave_single(stm32port->tx_ch, 746 stm32port->tx_dma_buf, 747 count, 748 DMA_MEM_TO_DEV, 749 DMA_PREP_INTERRUPT); 750 751 if (!desc) 752 goto fallback_err; 753 754 /* 755 * Set "tx_dma_busy" flag. This flag will be released when 756 * dmaengine_terminate_async will be called. This flag helps 757 * transmit_chars_dma not to start another DMA transaction 758 * if the callback of the previous is not yet called. 759 */ 760 stm32port->tx_dma_busy = true; 761 762 desc->callback = stm32_usart_tx_dma_complete; 763 desc->callback_param = port; 764 765 /* Push current DMA TX transaction in the pending queue */ 766 /* DMA no yet started, safe to free resources */ 767 ret = dma_submit_error(dmaengine_submit(desc)); 768 if (ret) { 769 dev_err(port->dev, "DMA failed with error code: %d\n", ret); 770 stm32_usart_tx_dma_terminate(stm32port); 771 goto fallback_err; 772 } 773 774 /* Issue pending DMA TX requests */ 775 dma_async_issue_pending(stm32port->tx_ch); 776 777 uart_xmit_advance(port, count); 778 779 return; 780 781 fallback_err: 782 stm32_usart_transmit_chars_pio(port); 783 } 784 785 static void stm32_usart_transmit_chars(struct uart_port *port) 786 { 787 struct stm32_port *stm32_port = to_stm32_port(port); 788 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 789 struct circ_buf *xmit = &port->state->xmit; 790 u32 isr; 791 int ret; 792 793 if (!stm32_port->hw_flow_control && 794 port->rs485.flags & SER_RS485_ENABLED && 795 (port->x_char || 796 !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) { 797 stm32_usart_tc_interrupt_disable(port); 798 stm32_usart_rs485_rts_enable(port); 799 } 800 801 if (port->x_char) { 802 /* dma terminate may have been called in case of dma pause failure */ 803 stm32_usart_tx_dma_pause(stm32_port); 804 805 /* Check that TDR is empty before filling FIFO */ 806 ret = 807 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 808 isr, 809 (isr & USART_SR_TXE), 810 10, 1000); 811 if (ret) 812 dev_warn(port->dev, "1 character may be erased\n"); 813 814 writel_relaxed(port->x_char, port->membase + ofs->tdr); 815 port->x_char = 0; 816 port->icount.tx++; 817 818 /* dma terminate may have been called in case of dma resume failure */ 819 stm32_usart_tx_dma_resume(stm32_port); 820 return; 821 } 822 823 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 824 stm32_usart_tx_interrupt_disable(port); 825 return; 826 } 827 828 if (ofs->icr == UNDEF_REG) 829 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC); 830 else 831 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr); 832 833 if (stm32_port->tx_ch) 834 stm32_usart_transmit_chars_dma(port); 835 else 836 stm32_usart_transmit_chars_pio(port); 837 838 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 839 uart_write_wakeup(port); 840 841 if (uart_circ_empty(xmit)) { 842 stm32_usart_tx_interrupt_disable(port); 843 if (!stm32_port->hw_flow_control && 844 port->rs485.flags & SER_RS485_ENABLED) { 845 stm32_usart_tc_interrupt_enable(port); 846 } 847 } 848 } 849 850 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) 851 { 852 struct uart_port *port = ptr; 853 struct tty_port *tport = &port->state->port; 854 struct stm32_port *stm32_port = to_stm32_port(port); 855 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 856 u32 sr; 857 unsigned int size; 858 859 sr = readl_relaxed(port->membase + ofs->isr); 860 861 if (!stm32_port->hw_flow_control && 862 port->rs485.flags & SER_RS485_ENABLED && 863 (sr & USART_SR_TC)) { 864 stm32_usart_tc_interrupt_disable(port); 865 stm32_usart_rs485_rts_disable(port); 866 } 867 868 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) 869 writel_relaxed(USART_ICR_RTOCF, 870 port->membase + ofs->icr); 871 872 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) { 873 /* Clear wake up flag and disable wake up interrupt */ 874 writel_relaxed(USART_ICR_WUCF, 875 port->membase + ofs->icr); 876 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 877 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) 878 pm_wakeup_event(tport->tty->dev, 0); 879 } 880 881 /* 882 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request 883 * line has been masked by HW and rx data are stacking in FIFO. 884 */ 885 if (!stm32_port->throttled) { 886 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) || 887 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) { 888 spin_lock(&port->lock); 889 size = stm32_usart_receive_chars(port, false); 890 uart_unlock_and_check_sysrq(port); 891 if (size) 892 tty_flip_buffer_push(tport); 893 } 894 } 895 896 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) { 897 spin_lock(&port->lock); 898 stm32_usart_transmit_chars(port); 899 spin_unlock(&port->lock); 900 } 901 902 /* Receiver timeout irq for DMA RX */ 903 if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) { 904 spin_lock(&port->lock); 905 size = stm32_usart_receive_chars(port, false); 906 uart_unlock_and_check_sysrq(port); 907 if (size) 908 tty_flip_buffer_push(tport); 909 } 910 911 return IRQ_HANDLED; 912 } 913 914 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl) 915 { 916 struct stm32_port *stm32_port = to_stm32_port(port); 917 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 918 919 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) 920 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE); 921 else 922 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE); 923 924 mctrl_gpio_set(stm32_port->gpios, mctrl); 925 } 926 927 static unsigned int stm32_usart_get_mctrl(struct uart_port *port) 928 { 929 struct stm32_port *stm32_port = to_stm32_port(port); 930 unsigned int ret; 931 932 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */ 933 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 934 935 return mctrl_gpio_get(stm32_port->gpios, &ret); 936 } 937 938 static void stm32_usart_enable_ms(struct uart_port *port) 939 { 940 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios); 941 } 942 943 static void stm32_usart_disable_ms(struct uart_port *port) 944 { 945 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); 946 } 947 948 /* Transmit stop */ 949 static void stm32_usart_stop_tx(struct uart_port *port) 950 { 951 struct stm32_port *stm32_port = to_stm32_port(port); 952 953 stm32_usart_tx_interrupt_disable(port); 954 955 /* dma terminate may have been called in case of dma pause failure */ 956 stm32_usart_tx_dma_pause(stm32_port); 957 958 stm32_usart_rs485_rts_disable(port); 959 } 960 961 /* There are probably characters waiting to be transmitted. */ 962 static void stm32_usart_start_tx(struct uart_port *port) 963 { 964 struct circ_buf *xmit = &port->state->xmit; 965 966 if (uart_circ_empty(xmit) && !port->x_char) { 967 stm32_usart_rs485_rts_disable(port); 968 return; 969 } 970 971 stm32_usart_rs485_rts_enable(port); 972 973 stm32_usart_transmit_chars(port); 974 } 975 976 /* Flush the transmit buffer. */ 977 static void stm32_usart_flush_buffer(struct uart_port *port) 978 { 979 struct stm32_port *stm32_port = to_stm32_port(port); 980 981 if (stm32_port->tx_ch) 982 stm32_usart_tx_dma_terminate(stm32_port); 983 } 984 985 /* Throttle the remote when input buffer is about to overflow. */ 986 static void stm32_usart_throttle(struct uart_port *port) 987 { 988 struct stm32_port *stm32_port = to_stm32_port(port); 989 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 990 unsigned long flags; 991 992 spin_lock_irqsave(&port->lock, flags); 993 994 /* 995 * Pause DMA transfer, so the RX data gets queued into the FIFO. 996 * Hardware flow control is triggered when RX FIFO is full. 997 */ 998 stm32_usart_rx_dma_pause(stm32_port); 999 1000 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 1001 if (stm32_port->cr3_irq) 1002 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 1003 1004 stm32_port->throttled = true; 1005 spin_unlock_irqrestore(&port->lock, flags); 1006 } 1007 1008 /* Unthrottle the remote, the input buffer can now accept data. */ 1009 static void stm32_usart_unthrottle(struct uart_port *port) 1010 { 1011 struct stm32_port *stm32_port = to_stm32_port(port); 1012 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1013 unsigned long flags; 1014 1015 spin_lock_irqsave(&port->lock, flags); 1016 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq); 1017 if (stm32_port->cr3_irq) 1018 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq); 1019 1020 stm32_port->throttled = false; 1021 1022 /* 1023 * Switch back to DMA mode (resume DMA). 1024 * Hardware flow control is stopped when FIFO is not full any more. 1025 */ 1026 if (stm32_port->rx_ch) 1027 stm32_usart_rx_dma_start_or_resume(port); 1028 1029 spin_unlock_irqrestore(&port->lock, flags); 1030 } 1031 1032 /* Receive stop */ 1033 static void stm32_usart_stop_rx(struct uart_port *port) 1034 { 1035 struct stm32_port *stm32_port = to_stm32_port(port); 1036 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1037 1038 /* Disable DMA request line. */ 1039 stm32_usart_rx_dma_pause(stm32_port); 1040 1041 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 1042 if (stm32_port->cr3_irq) 1043 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 1044 } 1045 1046 /* Handle breaks - ignored by us */ 1047 static void stm32_usart_break_ctl(struct uart_port *port, int break_state) 1048 { 1049 } 1050 1051 static int stm32_usart_startup(struct uart_port *port) 1052 { 1053 struct stm32_port *stm32_port = to_stm32_port(port); 1054 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1055 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1056 const char *name = to_platform_device(port->dev)->name; 1057 u32 val; 1058 int ret; 1059 1060 ret = request_irq(port->irq, stm32_usart_interrupt, 1061 IRQF_NO_SUSPEND, name, port); 1062 if (ret) 1063 return ret; 1064 1065 if (stm32_port->swap) { 1066 val = readl_relaxed(port->membase + ofs->cr2); 1067 val |= USART_CR2_SWAP; 1068 writel_relaxed(val, port->membase + ofs->cr2); 1069 } 1070 1071 /* RX FIFO Flush */ 1072 if (ofs->rqr != UNDEF_REG) 1073 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr); 1074 1075 if (stm32_port->rx_ch) { 1076 ret = stm32_usart_rx_dma_start_or_resume(port); 1077 if (ret) { 1078 free_irq(port->irq, port); 1079 return ret; 1080 } 1081 } 1082 1083 /* RX enabling */ 1084 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit); 1085 stm32_usart_set_bits(port, ofs->cr1, val); 1086 1087 return 0; 1088 } 1089 1090 static void stm32_usart_shutdown(struct uart_port *port) 1091 { 1092 struct stm32_port *stm32_port = to_stm32_port(port); 1093 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1094 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1095 u32 val, isr; 1096 int ret; 1097 1098 if (stm32_usart_tx_dma_started(stm32_port)) 1099 stm32_usart_tx_dma_terminate(stm32_port); 1100 1101 if (stm32_port->tx_ch) 1102 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1103 1104 /* Disable modem control interrupts */ 1105 stm32_usart_disable_ms(port); 1106 1107 val = USART_CR1_TXEIE | USART_CR1_TE; 1108 val |= stm32_port->cr1_irq | USART_CR1_RE; 1109 val |= BIT(cfg->uart_enable_bit); 1110 if (stm32_port->fifoen) 1111 val |= USART_CR1_FIFOEN; 1112 1113 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr, 1114 isr, (isr & USART_SR_TC), 1115 10, 100000); 1116 1117 /* Send the TC error message only when ISR_TC is not set */ 1118 if (ret) 1119 dev_err(port->dev, "Transmission is not complete\n"); 1120 1121 /* Disable RX DMA. */ 1122 if (stm32_port->rx_ch) { 1123 stm32_usart_rx_dma_terminate(stm32_port); 1124 dmaengine_synchronize(stm32_port->rx_ch); 1125 } 1126 1127 /* flush RX & TX FIFO */ 1128 if (ofs->rqr != UNDEF_REG) 1129 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1130 port->membase + ofs->rqr); 1131 1132 stm32_usart_clr_bits(port, ofs->cr1, val); 1133 1134 free_irq(port->irq, port); 1135 } 1136 1137 static void stm32_usart_set_termios(struct uart_port *port, 1138 struct ktermios *termios, 1139 const struct ktermios *old) 1140 { 1141 struct stm32_port *stm32_port = to_stm32_port(port); 1142 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1143 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1144 struct serial_rs485 *rs485conf = &port->rs485; 1145 unsigned int baud, bits; 1146 u32 usartdiv, mantissa, fraction, oversampling; 1147 tcflag_t cflag = termios->c_cflag; 1148 u32 cr1, cr2, cr3, isr; 1149 unsigned long flags; 1150 int ret; 1151 1152 if (!stm32_port->hw_flow_control) 1153 cflag &= ~CRTSCTS; 1154 1155 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8); 1156 1157 spin_lock_irqsave(&port->lock, flags); 1158 1159 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 1160 isr, 1161 (isr & USART_SR_TC), 1162 10, 100000); 1163 1164 /* Send the TC error message only when ISR_TC is not set. */ 1165 if (ret) 1166 dev_err(port->dev, "Transmission is not complete\n"); 1167 1168 /* Stop serial port and reset value */ 1169 writel_relaxed(0, port->membase + ofs->cr1); 1170 1171 /* flush RX & TX FIFO */ 1172 if (ofs->rqr != UNDEF_REG) 1173 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1174 port->membase + ofs->rqr); 1175 1176 cr1 = USART_CR1_TE | USART_CR1_RE; 1177 if (stm32_port->fifoen) 1178 cr1 |= USART_CR1_FIFOEN; 1179 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0; 1180 1181 /* Tx and RX FIFO configuration */ 1182 cr3 = readl_relaxed(port->membase + ofs->cr3); 1183 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE; 1184 if (stm32_port->fifoen) { 1185 if (stm32_port->txftcfg >= 0) 1186 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT; 1187 if (stm32_port->rxftcfg >= 0) 1188 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT; 1189 } 1190 1191 if (cflag & CSTOPB) 1192 cr2 |= USART_CR2_STOP_2B; 1193 1194 bits = tty_get_char_size(cflag); 1195 stm32_port->rdr_mask = (BIT(bits) - 1); 1196 1197 if (cflag & PARENB) { 1198 bits++; 1199 cr1 |= USART_CR1_PCE; 1200 } 1201 1202 /* 1203 * Word length configuration: 1204 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01 1205 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10 1206 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 1207 * M0 and M1 already cleared by cr1 initialization. 1208 */ 1209 if (bits == 9) { 1210 cr1 |= USART_CR1_M0; 1211 } else if ((bits == 7) && cfg->has_7bits_data) { 1212 cr1 |= USART_CR1_M1; 1213 } else if (bits != 8) { 1214 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n" 1215 , bits); 1216 cflag &= ~CSIZE; 1217 cflag |= CS8; 1218 termios->c_cflag = cflag; 1219 bits = 8; 1220 if (cflag & PARENB) { 1221 bits++; 1222 cr1 |= USART_CR1_M0; 1223 } 1224 } 1225 1226 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || 1227 (stm32_port->fifoen && 1228 stm32_port->rxftcfg >= 0))) { 1229 if (cflag & CSTOPB) 1230 bits = bits + 3; /* 1 start bit + 2 stop bits */ 1231 else 1232 bits = bits + 2; /* 1 start bit + 1 stop bit */ 1233 1234 /* RX timeout irq to occur after last stop bit + bits */ 1235 stm32_port->cr1_irq = USART_CR1_RTOIE; 1236 writel_relaxed(bits, port->membase + ofs->rtor); 1237 cr2 |= USART_CR2_RTOEN; 1238 /* 1239 * Enable fifo threshold irq in two cases, either when there is no DMA, or when 1240 * wake up over usart, from low power until the DMA gets re-enabled by resume. 1241 */ 1242 stm32_port->cr3_irq = USART_CR3_RXFTIE; 1243 } 1244 1245 cr1 |= stm32_port->cr1_irq; 1246 cr3 |= stm32_port->cr3_irq; 1247 1248 if (cflag & PARODD) 1249 cr1 |= USART_CR1_PS; 1250 1251 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 1252 if (cflag & CRTSCTS) { 1253 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 1254 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE; 1255 } 1256 1257 usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud); 1258 1259 /* 1260 * The USART supports 16 or 8 times oversampling. 1261 * By default we prefer 16 times oversampling, so that the receiver 1262 * has a better tolerance to clock deviations. 1263 * 8 times oversampling is only used to achieve higher speeds. 1264 */ 1265 if (usartdiv < 16) { 1266 oversampling = 8; 1267 cr1 |= USART_CR1_OVER8; 1268 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8); 1269 } else { 1270 oversampling = 16; 1271 cr1 &= ~USART_CR1_OVER8; 1272 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8); 1273 } 1274 1275 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT; 1276 fraction = usartdiv % oversampling; 1277 writel_relaxed(mantissa | fraction, port->membase + ofs->brr); 1278 1279 uart_update_timeout(port, cflag, baud); 1280 1281 port->read_status_mask = USART_SR_ORE; 1282 if (termios->c_iflag & INPCK) 1283 port->read_status_mask |= USART_SR_PE | USART_SR_FE; 1284 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1285 port->read_status_mask |= USART_SR_FE; 1286 1287 /* Characters to ignore */ 1288 port->ignore_status_mask = 0; 1289 if (termios->c_iflag & IGNPAR) 1290 port->ignore_status_mask = USART_SR_PE | USART_SR_FE; 1291 if (termios->c_iflag & IGNBRK) { 1292 port->ignore_status_mask |= USART_SR_FE; 1293 /* 1294 * If we're ignoring parity and break indicators, 1295 * ignore overruns too (for real raw support). 1296 */ 1297 if (termios->c_iflag & IGNPAR) 1298 port->ignore_status_mask |= USART_SR_ORE; 1299 } 1300 1301 /* Ignore all characters if CREAD is not set */ 1302 if ((termios->c_cflag & CREAD) == 0) 1303 port->ignore_status_mask |= USART_SR_DUMMY_RX; 1304 1305 if (stm32_port->rx_ch) { 1306 /* 1307 * Setup DMA to collect only valid data and enable error irqs. 1308 * This also enables break reception when using DMA. 1309 */ 1310 cr1 |= USART_CR1_PEIE; 1311 cr3 |= USART_CR3_EIE; 1312 cr3 |= USART_CR3_DMAR; 1313 cr3 |= USART_CR3_DDRE; 1314 } 1315 1316 if (stm32_port->tx_ch) 1317 cr3 |= USART_CR3_DMAT; 1318 1319 if (rs485conf->flags & SER_RS485_ENABLED) { 1320 stm32_usart_config_reg_rs485(&cr1, &cr3, 1321 rs485conf->delay_rts_before_send, 1322 rs485conf->delay_rts_after_send, 1323 baud); 1324 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 1325 cr3 &= ~USART_CR3_DEP; 1326 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; 1327 } else { 1328 cr3 |= USART_CR3_DEP; 1329 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; 1330 } 1331 1332 } else { 1333 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP); 1334 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 1335 } 1336 1337 /* Configure wake up from low power on start bit detection */ 1338 if (stm32_port->wakeup_src) { 1339 cr3 &= ~USART_CR3_WUS_MASK; 1340 cr3 |= USART_CR3_WUS_START_BIT; 1341 } 1342 1343 writel_relaxed(cr3, port->membase + ofs->cr3); 1344 writel_relaxed(cr2, port->membase + ofs->cr2); 1345 writel_relaxed(cr1, port->membase + ofs->cr1); 1346 1347 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1348 spin_unlock_irqrestore(&port->lock, flags); 1349 1350 /* Handle modem control interrupts */ 1351 if (UART_ENABLE_MS(port, termios->c_cflag)) 1352 stm32_usart_enable_ms(port); 1353 else 1354 stm32_usart_disable_ms(port); 1355 } 1356 1357 static const char *stm32_usart_type(struct uart_port *port) 1358 { 1359 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL; 1360 } 1361 1362 static void stm32_usart_release_port(struct uart_port *port) 1363 { 1364 } 1365 1366 static int stm32_usart_request_port(struct uart_port *port) 1367 { 1368 return 0; 1369 } 1370 1371 static void stm32_usart_config_port(struct uart_port *port, int flags) 1372 { 1373 if (flags & UART_CONFIG_TYPE) 1374 port->type = PORT_STM32; 1375 } 1376 1377 static int 1378 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser) 1379 { 1380 /* No user changeable parameters */ 1381 return -EINVAL; 1382 } 1383 1384 static void stm32_usart_pm(struct uart_port *port, unsigned int state, 1385 unsigned int oldstate) 1386 { 1387 struct stm32_port *stm32port = container_of(port, 1388 struct stm32_port, port); 1389 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1390 const struct stm32_usart_config *cfg = &stm32port->info->cfg; 1391 unsigned long flags; 1392 1393 switch (state) { 1394 case UART_PM_STATE_ON: 1395 pm_runtime_get_sync(port->dev); 1396 break; 1397 case UART_PM_STATE_OFF: 1398 spin_lock_irqsave(&port->lock, flags); 1399 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1400 spin_unlock_irqrestore(&port->lock, flags); 1401 pm_runtime_put_sync(port->dev); 1402 break; 1403 } 1404 } 1405 1406 #if defined(CONFIG_CONSOLE_POLL) 1407 1408 /* Callbacks for characters polling in debug context (i.e. KGDB). */ 1409 static int stm32_usart_poll_init(struct uart_port *port) 1410 { 1411 struct stm32_port *stm32_port = to_stm32_port(port); 1412 1413 return clk_prepare_enable(stm32_port->clk); 1414 } 1415 1416 static int stm32_usart_poll_get_char(struct uart_port *port) 1417 { 1418 struct stm32_port *stm32_port = to_stm32_port(port); 1419 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1420 1421 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE)) 1422 return NO_POLL_CHAR; 1423 1424 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask; 1425 } 1426 1427 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch) 1428 { 1429 stm32_usart_console_putchar(port, ch); 1430 } 1431 #endif /* CONFIG_CONSOLE_POLL */ 1432 1433 static const struct uart_ops stm32_uart_ops = { 1434 .tx_empty = stm32_usart_tx_empty, 1435 .set_mctrl = stm32_usart_set_mctrl, 1436 .get_mctrl = stm32_usart_get_mctrl, 1437 .stop_tx = stm32_usart_stop_tx, 1438 .start_tx = stm32_usart_start_tx, 1439 .throttle = stm32_usart_throttle, 1440 .unthrottle = stm32_usart_unthrottle, 1441 .stop_rx = stm32_usart_stop_rx, 1442 .enable_ms = stm32_usart_enable_ms, 1443 .break_ctl = stm32_usart_break_ctl, 1444 .startup = stm32_usart_startup, 1445 .shutdown = stm32_usart_shutdown, 1446 .flush_buffer = stm32_usart_flush_buffer, 1447 .set_termios = stm32_usart_set_termios, 1448 .pm = stm32_usart_pm, 1449 .type = stm32_usart_type, 1450 .release_port = stm32_usart_release_port, 1451 .request_port = stm32_usart_request_port, 1452 .config_port = stm32_usart_config_port, 1453 .verify_port = stm32_usart_verify_port, 1454 #if defined(CONFIG_CONSOLE_POLL) 1455 .poll_init = stm32_usart_poll_init, 1456 .poll_get_char = stm32_usart_poll_get_char, 1457 .poll_put_char = stm32_usart_poll_put_char, 1458 #endif /* CONFIG_CONSOLE_POLL */ 1459 }; 1460 1461 /* 1462 * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG) 1463 * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case, 1464 * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE. 1465 * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1. 1466 */ 1467 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 }; 1468 1469 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p, 1470 int *ftcfg) 1471 { 1472 u32 bytes, i; 1473 1474 /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */ 1475 if (of_property_read_u32(pdev->dev.of_node, p, &bytes)) 1476 bytes = 8; 1477 1478 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) 1479 if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes) 1480 break; 1481 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg)) 1482 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1; 1483 1484 dev_dbg(&pdev->dev, "%s set to %d bytes\n", p, 1485 stm32h7_usart_fifo_thresh_cfg[i]); 1486 1487 /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */ 1488 if (i) 1489 *ftcfg = i - 1; 1490 else 1491 *ftcfg = -EINVAL; 1492 } 1493 1494 static void stm32_usart_deinit_port(struct stm32_port *stm32port) 1495 { 1496 clk_disable_unprepare(stm32port->clk); 1497 } 1498 1499 static const struct serial_rs485 stm32_rs485_supported = { 1500 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | 1501 SER_RS485_RX_DURING_TX, 1502 .delay_rts_before_send = 1, 1503 .delay_rts_after_send = 1, 1504 }; 1505 1506 static int stm32_usart_init_port(struct stm32_port *stm32port, 1507 struct platform_device *pdev) 1508 { 1509 struct uart_port *port = &stm32port->port; 1510 struct resource *res; 1511 int ret, irq; 1512 1513 irq = platform_get_irq(pdev, 0); 1514 if (irq < 0) 1515 return irq; 1516 1517 port->iotype = UPIO_MEM; 1518 port->flags = UPF_BOOT_AUTOCONF; 1519 port->ops = &stm32_uart_ops; 1520 port->dev = &pdev->dev; 1521 port->fifosize = stm32port->info->cfg.fifosize; 1522 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE); 1523 port->irq = irq; 1524 port->rs485_config = stm32_usart_config_rs485; 1525 port->rs485_supported = stm32_rs485_supported; 1526 1527 ret = stm32_usart_init_rs485(port, pdev); 1528 if (ret) 1529 return ret; 1530 1531 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup && 1532 of_property_read_bool(pdev->dev.of_node, "wakeup-source"); 1533 1534 stm32port->swap = stm32port->info->cfg.has_swap && 1535 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"); 1536 1537 stm32port->fifoen = stm32port->info->cfg.has_fifo; 1538 if (stm32port->fifoen) { 1539 stm32_usart_get_ftcfg(pdev, "rx-threshold", 1540 &stm32port->rxftcfg); 1541 stm32_usart_get_ftcfg(pdev, "tx-threshold", 1542 &stm32port->txftcfg); 1543 } 1544 1545 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1546 if (IS_ERR(port->membase)) 1547 return PTR_ERR(port->membase); 1548 port->mapbase = res->start; 1549 1550 spin_lock_init(&port->lock); 1551 1552 stm32port->clk = devm_clk_get(&pdev->dev, NULL); 1553 if (IS_ERR(stm32port->clk)) 1554 return PTR_ERR(stm32port->clk); 1555 1556 /* Ensure that clk rate is correct by enabling the clk */ 1557 ret = clk_prepare_enable(stm32port->clk); 1558 if (ret) 1559 return ret; 1560 1561 stm32port->port.uartclk = clk_get_rate(stm32port->clk); 1562 if (!stm32port->port.uartclk) { 1563 ret = -EINVAL; 1564 goto err_clk; 1565 } 1566 1567 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0); 1568 if (IS_ERR(stm32port->gpios)) { 1569 ret = PTR_ERR(stm32port->gpios); 1570 goto err_clk; 1571 } 1572 1573 /* 1574 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" 1575 * properties should not be specified. 1576 */ 1577 if (stm32port->hw_flow_control) { 1578 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) || 1579 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) { 1580 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n"); 1581 ret = -EINVAL; 1582 goto err_clk; 1583 } 1584 } 1585 1586 return ret; 1587 1588 err_clk: 1589 clk_disable_unprepare(stm32port->clk); 1590 1591 return ret; 1592 } 1593 1594 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev) 1595 { 1596 struct device_node *np = pdev->dev.of_node; 1597 int id; 1598 1599 if (!np) 1600 return NULL; 1601 1602 id = of_alias_get_id(np, "serial"); 1603 if (id < 0) { 1604 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id); 1605 return NULL; 1606 } 1607 1608 if (WARN_ON(id >= STM32_MAX_PORTS)) 1609 return NULL; 1610 1611 stm32_ports[id].hw_flow_control = 1612 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ || 1613 of_property_read_bool (np, "uart-has-rtscts"); 1614 stm32_ports[id].port.line = id; 1615 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE; 1616 stm32_ports[id].cr3_irq = 0; 1617 stm32_ports[id].last_res = RX_BUF_L; 1618 return &stm32_ports[id]; 1619 } 1620 1621 #ifdef CONFIG_OF 1622 static const struct of_device_id stm32_match[] = { 1623 { .compatible = "st,stm32-uart", .data = &stm32f4_info}, 1624 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info}, 1625 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info}, 1626 {}, 1627 }; 1628 1629 MODULE_DEVICE_TABLE(of, stm32_match); 1630 #endif 1631 1632 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port, 1633 struct platform_device *pdev) 1634 { 1635 if (stm32port->rx_buf) 1636 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, 1637 stm32port->rx_dma_buf); 1638 } 1639 1640 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, 1641 struct platform_device *pdev) 1642 { 1643 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1644 struct uart_port *port = &stm32port->port; 1645 struct device *dev = &pdev->dev; 1646 struct dma_slave_config config; 1647 int ret; 1648 1649 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L, 1650 &stm32port->rx_dma_buf, 1651 GFP_KERNEL); 1652 if (!stm32port->rx_buf) 1653 return -ENOMEM; 1654 1655 /* Configure DMA channel */ 1656 memset(&config, 0, sizeof(config)); 1657 config.src_addr = port->mapbase + ofs->rdr; 1658 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1659 1660 ret = dmaengine_slave_config(stm32port->rx_ch, &config); 1661 if (ret < 0) { 1662 dev_err(dev, "rx dma channel config failed\n"); 1663 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1664 return ret; 1665 } 1666 1667 return 0; 1668 } 1669 1670 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port, 1671 struct platform_device *pdev) 1672 { 1673 if (stm32port->tx_buf) 1674 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, 1675 stm32port->tx_dma_buf); 1676 } 1677 1678 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, 1679 struct platform_device *pdev) 1680 { 1681 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1682 struct uart_port *port = &stm32port->port; 1683 struct device *dev = &pdev->dev; 1684 struct dma_slave_config config; 1685 int ret; 1686 1687 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L, 1688 &stm32port->tx_dma_buf, 1689 GFP_KERNEL); 1690 if (!stm32port->tx_buf) 1691 return -ENOMEM; 1692 1693 /* Configure DMA channel */ 1694 memset(&config, 0, sizeof(config)); 1695 config.dst_addr = port->mapbase + ofs->tdr; 1696 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1697 1698 ret = dmaengine_slave_config(stm32port->tx_ch, &config); 1699 if (ret < 0) { 1700 dev_err(dev, "tx dma channel config failed\n"); 1701 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1702 return ret; 1703 } 1704 1705 return 0; 1706 } 1707 1708 static int stm32_usart_serial_probe(struct platform_device *pdev) 1709 { 1710 struct stm32_port *stm32port; 1711 int ret; 1712 1713 stm32port = stm32_usart_of_get_port(pdev); 1714 if (!stm32port) 1715 return -ENODEV; 1716 1717 stm32port->info = of_device_get_match_data(&pdev->dev); 1718 if (!stm32port->info) 1719 return -EINVAL; 1720 1721 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); 1722 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) 1723 return -EPROBE_DEFER; 1724 1725 /* Fall back in interrupt mode for any non-deferral error */ 1726 if (IS_ERR(stm32port->rx_ch)) 1727 stm32port->rx_ch = NULL; 1728 1729 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); 1730 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) { 1731 ret = -EPROBE_DEFER; 1732 goto err_dma_rx; 1733 } 1734 /* Fall back in interrupt mode for any non-deferral error */ 1735 if (IS_ERR(stm32port->tx_ch)) 1736 stm32port->tx_ch = NULL; 1737 1738 ret = stm32_usart_init_port(stm32port, pdev); 1739 if (ret) 1740 goto err_dma_tx; 1741 1742 if (stm32port->wakeup_src) { 1743 device_set_wakeup_capable(&pdev->dev, true); 1744 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); 1745 if (ret) 1746 goto err_deinit_port; 1747 } 1748 1749 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { 1750 /* Fall back in interrupt mode */ 1751 dma_release_channel(stm32port->rx_ch); 1752 stm32port->rx_ch = NULL; 1753 } 1754 1755 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { 1756 /* Fall back in interrupt mode */ 1757 dma_release_channel(stm32port->tx_ch); 1758 stm32port->tx_ch = NULL; 1759 } 1760 1761 if (!stm32port->rx_ch) 1762 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); 1763 if (!stm32port->tx_ch) 1764 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n"); 1765 1766 platform_set_drvdata(pdev, &stm32port->port); 1767 1768 pm_runtime_get_noresume(&pdev->dev); 1769 pm_runtime_set_active(&pdev->dev); 1770 pm_runtime_enable(&pdev->dev); 1771 1772 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port); 1773 if (ret) 1774 goto err_port; 1775 1776 pm_runtime_put_sync(&pdev->dev); 1777 1778 return 0; 1779 1780 err_port: 1781 pm_runtime_disable(&pdev->dev); 1782 pm_runtime_set_suspended(&pdev->dev); 1783 pm_runtime_put_noidle(&pdev->dev); 1784 1785 if (stm32port->tx_ch) 1786 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1787 if (stm32port->rx_ch) 1788 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1789 1790 if (stm32port->wakeup_src) 1791 dev_pm_clear_wake_irq(&pdev->dev); 1792 1793 err_deinit_port: 1794 if (stm32port->wakeup_src) 1795 device_set_wakeup_capable(&pdev->dev, false); 1796 1797 stm32_usart_deinit_port(stm32port); 1798 1799 err_dma_tx: 1800 if (stm32port->tx_ch) 1801 dma_release_channel(stm32port->tx_ch); 1802 1803 err_dma_rx: 1804 if (stm32port->rx_ch) 1805 dma_release_channel(stm32port->rx_ch); 1806 1807 return ret; 1808 } 1809 1810 static int stm32_usart_serial_remove(struct platform_device *pdev) 1811 { 1812 struct uart_port *port = platform_get_drvdata(pdev); 1813 struct stm32_port *stm32_port = to_stm32_port(port); 1814 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1815 u32 cr3; 1816 1817 pm_runtime_get_sync(&pdev->dev); 1818 uart_remove_one_port(&stm32_usart_driver, port); 1819 1820 pm_runtime_disable(&pdev->dev); 1821 pm_runtime_set_suspended(&pdev->dev); 1822 pm_runtime_put_noidle(&pdev->dev); 1823 1824 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE); 1825 1826 if (stm32_port->tx_ch) { 1827 stm32_usart_of_dma_tx_remove(stm32_port, pdev); 1828 dma_release_channel(stm32_port->tx_ch); 1829 } 1830 1831 if (stm32_port->rx_ch) { 1832 stm32_usart_of_dma_rx_remove(stm32_port, pdev); 1833 dma_release_channel(stm32_port->rx_ch); 1834 } 1835 1836 cr3 = readl_relaxed(port->membase + ofs->cr3); 1837 cr3 &= ~USART_CR3_EIE; 1838 cr3 &= ~USART_CR3_DMAR; 1839 cr3 &= ~USART_CR3_DMAT; 1840 cr3 &= ~USART_CR3_DDRE; 1841 writel_relaxed(cr3, port->membase + ofs->cr3); 1842 1843 if (stm32_port->wakeup_src) { 1844 dev_pm_clear_wake_irq(&pdev->dev); 1845 device_init_wakeup(&pdev->dev, false); 1846 } 1847 1848 stm32_usart_deinit_port(stm32_port); 1849 1850 return 0; 1851 } 1852 1853 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1854 { 1855 struct stm32_port *stm32_port = to_stm32_port(port); 1856 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1857 u32 isr; 1858 int ret; 1859 1860 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, 1861 (isr & USART_SR_TXE), 100, 1862 STM32_USART_TIMEOUT_USEC); 1863 if (ret != 0) { 1864 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret); 1865 return; 1866 } 1867 writel_relaxed(ch, port->membase + ofs->tdr); 1868 } 1869 1870 #ifdef CONFIG_SERIAL_STM32_CONSOLE 1871 static void stm32_usart_console_write(struct console *co, const char *s, 1872 unsigned int cnt) 1873 { 1874 struct uart_port *port = &stm32_ports[co->index].port; 1875 struct stm32_port *stm32_port = to_stm32_port(port); 1876 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1877 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1878 unsigned long flags; 1879 u32 old_cr1, new_cr1; 1880 int locked = 1; 1881 1882 if (oops_in_progress) 1883 locked = spin_trylock_irqsave(&port->lock, flags); 1884 else 1885 spin_lock_irqsave(&port->lock, flags); 1886 1887 /* Save and disable interrupts, enable the transmitter */ 1888 old_cr1 = readl_relaxed(port->membase + ofs->cr1); 1889 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK; 1890 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit); 1891 writel_relaxed(new_cr1, port->membase + ofs->cr1); 1892 1893 uart_console_write(port, s, cnt, stm32_usart_console_putchar); 1894 1895 /* Restore interrupt state */ 1896 writel_relaxed(old_cr1, port->membase + ofs->cr1); 1897 1898 if (locked) 1899 spin_unlock_irqrestore(&port->lock, flags); 1900 } 1901 1902 static int stm32_usart_console_setup(struct console *co, char *options) 1903 { 1904 struct stm32_port *stm32port; 1905 int baud = 9600; 1906 int bits = 8; 1907 int parity = 'n'; 1908 int flow = 'n'; 1909 1910 if (co->index >= STM32_MAX_PORTS) 1911 return -ENODEV; 1912 1913 stm32port = &stm32_ports[co->index]; 1914 1915 /* 1916 * This driver does not support early console initialization 1917 * (use ARM early printk support instead), so we only expect 1918 * this to be called during the uart port registration when the 1919 * driver gets probed and the port should be mapped at that point. 1920 */ 1921 if (stm32port->port.mapbase == 0 || !stm32port->port.membase) 1922 return -ENXIO; 1923 1924 if (options) 1925 uart_parse_options(options, &baud, &parity, &bits, &flow); 1926 1927 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow); 1928 } 1929 1930 static struct console stm32_console = { 1931 .name = STM32_SERIAL_NAME, 1932 .device = uart_console_device, 1933 .write = stm32_usart_console_write, 1934 .setup = stm32_usart_console_setup, 1935 .flags = CON_PRINTBUFFER, 1936 .index = -1, 1937 .data = &stm32_usart_driver, 1938 }; 1939 1940 #define STM32_SERIAL_CONSOLE (&stm32_console) 1941 1942 #else 1943 #define STM32_SERIAL_CONSOLE NULL 1944 #endif /* CONFIG_SERIAL_STM32_CONSOLE */ 1945 1946 #ifdef CONFIG_SERIAL_EARLYCON 1947 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1948 { 1949 struct stm32_usart_info *info = port->private_data; 1950 1951 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE)) 1952 cpu_relax(); 1953 1954 writel_relaxed(ch, port->membase + info->ofs.tdr); 1955 } 1956 1957 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count) 1958 { 1959 struct earlycon_device *device = console->data; 1960 struct uart_port *port = &device->port; 1961 1962 uart_console_write(port, s, count, early_stm32_usart_console_putchar); 1963 } 1964 1965 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options) 1966 { 1967 if (!(device->port.membase || device->port.iobase)) 1968 return -ENODEV; 1969 device->port.private_data = &stm32h7_info; 1970 device->con->write = early_stm32_serial_write; 1971 return 0; 1972 } 1973 1974 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options) 1975 { 1976 if (!(device->port.membase || device->port.iobase)) 1977 return -ENODEV; 1978 device->port.private_data = &stm32f7_info; 1979 device->con->write = early_stm32_serial_write; 1980 return 0; 1981 } 1982 1983 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options) 1984 { 1985 if (!(device->port.membase || device->port.iobase)) 1986 return -ENODEV; 1987 device->port.private_data = &stm32f4_info; 1988 device->con->write = early_stm32_serial_write; 1989 return 0; 1990 } 1991 1992 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup); 1993 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup); 1994 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup); 1995 #endif /* CONFIG_SERIAL_EARLYCON */ 1996 1997 static struct uart_driver stm32_usart_driver = { 1998 .driver_name = DRIVER_NAME, 1999 .dev_name = STM32_SERIAL_NAME, 2000 .major = 0, 2001 .minor = 0, 2002 .nr = STM32_MAX_PORTS, 2003 .cons = STM32_SERIAL_CONSOLE, 2004 }; 2005 2006 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, 2007 bool enable) 2008 { 2009 struct stm32_port *stm32_port = to_stm32_port(port); 2010 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 2011 struct tty_port *tport = &port->state->port; 2012 int ret; 2013 unsigned int size = 0; 2014 unsigned long flags; 2015 2016 if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) 2017 return 0; 2018 2019 /* 2020 * Enable low-power wake-up and wake-up irq if argument is set to 2021 * "enable", disable low-power wake-up and wake-up irq otherwise 2022 */ 2023 if (enable) { 2024 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM); 2025 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE); 2026 mctrl_gpio_enable_irq_wake(stm32_port->gpios); 2027 2028 /* 2029 * When DMA is used for reception, it must be disabled before 2030 * entering low-power mode and re-enabled when exiting from 2031 * low-power mode. 2032 */ 2033 if (stm32_port->rx_ch) { 2034 spin_lock_irqsave(&port->lock, flags); 2035 /* Poll data from DMA RX buffer if any */ 2036 if (!stm32_usart_rx_dma_pause(stm32_port)) 2037 size += stm32_usart_receive_chars(port, true); 2038 stm32_usart_rx_dma_terminate(stm32_port); 2039 uart_unlock_and_check_sysrq_irqrestore(port, flags); 2040 if (size) 2041 tty_flip_buffer_push(tport); 2042 } 2043 2044 /* Poll data from RX FIFO if any */ 2045 stm32_usart_receive_chars(port, false); 2046 } else { 2047 if (stm32_port->rx_ch) { 2048 ret = stm32_usart_rx_dma_start_or_resume(port); 2049 if (ret) 2050 return ret; 2051 } 2052 mctrl_gpio_disable_irq_wake(stm32_port->gpios); 2053 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM); 2054 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 2055 } 2056 2057 return 0; 2058 } 2059 2060 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) 2061 { 2062 struct uart_port *port = dev_get_drvdata(dev); 2063 int ret; 2064 2065 uart_suspend_port(&stm32_usart_driver, port); 2066 2067 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2068 ret = stm32_usart_serial_en_wakeup(port, true); 2069 if (ret) 2070 return ret; 2071 } 2072 2073 /* 2074 * When "no_console_suspend" is enabled, keep the pinctrl default state 2075 * and rely on bootloader stage to restore this state upon resume. 2076 * Otherwise, apply the idle or sleep states depending on wakeup 2077 * capabilities. 2078 */ 2079 if (console_suspend_enabled || !uart_console(port)) { 2080 if (device_may_wakeup(dev) || device_wakeup_path(dev)) 2081 pinctrl_pm_select_idle_state(dev); 2082 else 2083 pinctrl_pm_select_sleep_state(dev); 2084 } 2085 2086 return 0; 2087 } 2088 2089 static int __maybe_unused stm32_usart_serial_resume(struct device *dev) 2090 { 2091 struct uart_port *port = dev_get_drvdata(dev); 2092 int ret; 2093 2094 pinctrl_pm_select_default_state(dev); 2095 2096 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2097 ret = stm32_usart_serial_en_wakeup(port, false); 2098 if (ret) 2099 return ret; 2100 } 2101 2102 return uart_resume_port(&stm32_usart_driver, port); 2103 } 2104 2105 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev) 2106 { 2107 struct uart_port *port = dev_get_drvdata(dev); 2108 struct stm32_port *stm32port = container_of(port, 2109 struct stm32_port, port); 2110 2111 clk_disable_unprepare(stm32port->clk); 2112 2113 return 0; 2114 } 2115 2116 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev) 2117 { 2118 struct uart_port *port = dev_get_drvdata(dev); 2119 struct stm32_port *stm32port = container_of(port, 2120 struct stm32_port, port); 2121 2122 return clk_prepare_enable(stm32port->clk); 2123 } 2124 2125 static const struct dev_pm_ops stm32_serial_pm_ops = { 2126 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend, 2127 stm32_usart_runtime_resume, NULL) 2128 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend, 2129 stm32_usart_serial_resume) 2130 }; 2131 2132 static struct platform_driver stm32_serial_driver = { 2133 .probe = stm32_usart_serial_probe, 2134 .remove = stm32_usart_serial_remove, 2135 .driver = { 2136 .name = DRIVER_NAME, 2137 .pm = &stm32_serial_pm_ops, 2138 .of_match_table = of_match_ptr(stm32_match), 2139 }, 2140 }; 2141 2142 static int __init stm32_usart_init(void) 2143 { 2144 static char banner[] __initdata = "STM32 USART driver initialized"; 2145 int ret; 2146 2147 pr_info("%s\n", banner); 2148 2149 ret = uart_register_driver(&stm32_usart_driver); 2150 if (ret) 2151 return ret; 2152 2153 ret = platform_driver_register(&stm32_serial_driver); 2154 if (ret) 2155 uart_unregister_driver(&stm32_usart_driver); 2156 2157 return ret; 2158 } 2159 2160 static void __exit stm32_usart_exit(void) 2161 { 2162 platform_driver_unregister(&stm32_serial_driver); 2163 uart_unregister_driver(&stm32_usart_driver); 2164 } 2165 2166 module_init(stm32_usart_init); 2167 module_exit(stm32_usart_exit); 2168 2169 MODULE_ALIAS("platform:" DRIVER_NAME); 2170 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver"); 2171 MODULE_LICENSE("GPL v2"); 2172