1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Maxime Coquelin 2015 4 * Copyright (C) STMicroelectronics SA 2017 5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com> 6 * Gerald Baeza <gerald.baeza@foss.st.com> 7 * Erwan Le Ray <erwan.leray@foss.st.com> 8 * 9 * Inspired by st-asc.c from STMicroelectronics (c) 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/console.h> 14 #include <linux/delay.h> 15 #include <linux/dma-direction.h> 16 #include <linux/dmaengine.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/irq.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_platform.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/serial_core.h> 29 #include <linux/serial.h> 30 #include <linux/spinlock.h> 31 #include <linux/sysrq.h> 32 #include <linux/tty_flip.h> 33 #include <linux/tty.h> 34 35 #include "serial_mctrl_gpio.h" 36 #include "stm32-usart.h" 37 38 39 /* Register offsets */ 40 static struct stm32_usart_info __maybe_unused stm32f4_info = { 41 .ofs = { 42 .isr = 0x00, 43 .rdr = 0x04, 44 .tdr = 0x04, 45 .brr = 0x08, 46 .cr1 = 0x0c, 47 .cr2 = 0x10, 48 .cr3 = 0x14, 49 .gtpr = 0x18, 50 .rtor = UNDEF_REG, 51 .rqr = UNDEF_REG, 52 .icr = UNDEF_REG, 53 }, 54 .cfg = { 55 .uart_enable_bit = 13, 56 .has_7bits_data = false, 57 .fifosize = 1, 58 } 59 }; 60 61 static struct stm32_usart_info __maybe_unused stm32f7_info = { 62 .ofs = { 63 .cr1 = 0x00, 64 .cr2 = 0x04, 65 .cr3 = 0x08, 66 .brr = 0x0c, 67 .gtpr = 0x10, 68 .rtor = 0x14, 69 .rqr = 0x18, 70 .isr = 0x1c, 71 .icr = 0x20, 72 .rdr = 0x24, 73 .tdr = 0x28, 74 }, 75 .cfg = { 76 .uart_enable_bit = 0, 77 .has_7bits_data = true, 78 .has_swap = true, 79 .fifosize = 1, 80 } 81 }; 82 83 static struct stm32_usart_info __maybe_unused stm32h7_info = { 84 .ofs = { 85 .cr1 = 0x00, 86 .cr2 = 0x04, 87 .cr3 = 0x08, 88 .brr = 0x0c, 89 .gtpr = 0x10, 90 .rtor = 0x14, 91 .rqr = 0x18, 92 .isr = 0x1c, 93 .icr = 0x20, 94 .rdr = 0x24, 95 .tdr = 0x28, 96 }, 97 .cfg = { 98 .uart_enable_bit = 0, 99 .has_7bits_data = true, 100 .has_swap = true, 101 .has_wakeup = true, 102 .has_fifo = true, 103 .fifosize = 16, 104 } 105 }; 106 107 static void stm32_usart_stop_tx(struct uart_port *port); 108 static void stm32_usart_transmit_chars(struct uart_port *port); 109 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch); 110 111 static inline struct stm32_port *to_stm32_port(struct uart_port *port) 112 { 113 return container_of(port, struct stm32_port, port); 114 } 115 116 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits) 117 { 118 u32 val; 119 120 val = readl_relaxed(port->membase + reg); 121 val |= bits; 122 writel_relaxed(val, port->membase + reg); 123 } 124 125 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits) 126 { 127 u32 val; 128 129 val = readl_relaxed(port->membase + reg); 130 val &= ~bits; 131 writel_relaxed(val, port->membase + reg); 132 } 133 134 static unsigned int stm32_usart_tx_empty(struct uart_port *port) 135 { 136 struct stm32_port *stm32_port = to_stm32_port(port); 137 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 138 139 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) 140 return TIOCSER_TEMT; 141 142 return 0; 143 } 144 145 static void stm32_usart_rs485_rts_enable(struct uart_port *port) 146 { 147 struct stm32_port *stm32_port = to_stm32_port(port); 148 struct serial_rs485 *rs485conf = &port->rs485; 149 150 if (stm32_port->hw_flow_control || 151 !(rs485conf->flags & SER_RS485_ENABLED)) 152 return; 153 154 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 155 mctrl_gpio_set(stm32_port->gpios, 156 stm32_port->port.mctrl | TIOCM_RTS); 157 } else { 158 mctrl_gpio_set(stm32_port->gpios, 159 stm32_port->port.mctrl & ~TIOCM_RTS); 160 } 161 } 162 163 static void stm32_usart_rs485_rts_disable(struct uart_port *port) 164 { 165 struct stm32_port *stm32_port = to_stm32_port(port); 166 struct serial_rs485 *rs485conf = &port->rs485; 167 168 if (stm32_port->hw_flow_control || 169 !(rs485conf->flags & SER_RS485_ENABLED)) 170 return; 171 172 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 173 mctrl_gpio_set(stm32_port->gpios, 174 stm32_port->port.mctrl & ~TIOCM_RTS); 175 } else { 176 mctrl_gpio_set(stm32_port->gpios, 177 stm32_port->port.mctrl | TIOCM_RTS); 178 } 179 } 180 181 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, 182 u32 delay_DDE, u32 baud) 183 { 184 u32 rs485_deat_dedt; 185 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT); 186 bool over8; 187 188 *cr3 |= USART_CR3_DEM; 189 over8 = *cr1 & USART_CR1_OVER8; 190 191 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 192 193 if (over8) 194 rs485_deat_dedt = delay_ADE * baud * 8; 195 else 196 rs485_deat_dedt = delay_ADE * baud * 16; 197 198 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 199 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 200 rs485_deat_dedt_max : rs485_deat_dedt; 201 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) & 202 USART_CR1_DEAT_MASK; 203 *cr1 |= rs485_deat_dedt; 204 205 if (over8) 206 rs485_deat_dedt = delay_DDE * baud * 8; 207 else 208 rs485_deat_dedt = delay_DDE * baud * 16; 209 210 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 211 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 212 rs485_deat_dedt_max : rs485_deat_dedt; 213 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) & 214 USART_CR1_DEDT_MASK; 215 *cr1 |= rs485_deat_dedt; 216 } 217 218 static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios, 219 struct serial_rs485 *rs485conf) 220 { 221 struct stm32_port *stm32_port = to_stm32_port(port); 222 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 223 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 224 u32 usartdiv, baud, cr1, cr3; 225 bool over8; 226 227 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 228 229 rs485conf->flags |= SER_RS485_RX_DURING_TX; 230 231 if (rs485conf->flags & SER_RS485_ENABLED) { 232 cr1 = readl_relaxed(port->membase + ofs->cr1); 233 cr3 = readl_relaxed(port->membase + ofs->cr3); 234 usartdiv = readl_relaxed(port->membase + ofs->brr); 235 usartdiv = usartdiv & GENMASK(15, 0); 236 over8 = cr1 & USART_CR1_OVER8; 237 238 if (over8) 239 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0)) 240 << USART_BRR_04_R_SHIFT; 241 242 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv); 243 stm32_usart_config_reg_rs485(&cr1, &cr3, 244 rs485conf->delay_rts_before_send, 245 rs485conf->delay_rts_after_send, 246 baud); 247 248 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) 249 cr3 &= ~USART_CR3_DEP; 250 else 251 cr3 |= USART_CR3_DEP; 252 253 writel_relaxed(cr3, port->membase + ofs->cr3); 254 writel_relaxed(cr1, port->membase + ofs->cr1); 255 } else { 256 stm32_usart_clr_bits(port, ofs->cr3, 257 USART_CR3_DEM | USART_CR3_DEP); 258 stm32_usart_clr_bits(port, ofs->cr1, 259 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 260 } 261 262 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 263 264 /* Adjust RTS polarity in case it's driven in software */ 265 if (stm32_usart_tx_empty(port)) 266 stm32_usart_rs485_rts_disable(port); 267 else 268 stm32_usart_rs485_rts_enable(port); 269 270 return 0; 271 } 272 273 static int stm32_usart_init_rs485(struct uart_port *port, 274 struct platform_device *pdev) 275 { 276 struct serial_rs485 *rs485conf = &port->rs485; 277 278 rs485conf->flags = 0; 279 rs485conf->delay_rts_before_send = 0; 280 rs485conf->delay_rts_after_send = 0; 281 282 if (!pdev->dev.of_node) 283 return -ENODEV; 284 285 return uart_get_rs485_mode(port); 286 } 287 288 static bool stm32_usart_rx_dma_enabled(struct uart_port *port) 289 { 290 struct stm32_port *stm32_port = to_stm32_port(port); 291 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 292 293 if (!stm32_port->rx_ch) 294 return false; 295 296 return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR); 297 } 298 299 /* Return true when data is pending (in pio mode), and false when no data is pending. */ 300 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr) 301 { 302 struct stm32_port *stm32_port = to_stm32_port(port); 303 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 304 305 *sr = readl_relaxed(port->membase + ofs->isr); 306 /* Get pending characters in RDR or FIFO */ 307 if (*sr & USART_SR_RXNE) { 308 /* Get all pending characters from the RDR or the FIFO when using interrupts */ 309 if (!stm32_usart_rx_dma_enabled(port)) 310 return true; 311 312 /* Handle only RX data errors when using DMA */ 313 if (*sr & USART_SR_ERR_MASK) 314 return true; 315 } 316 317 return false; 318 } 319 320 static unsigned long stm32_usart_get_char_pio(struct uart_port *port) 321 { 322 struct stm32_port *stm32_port = to_stm32_port(port); 323 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 324 unsigned long c; 325 326 c = readl_relaxed(port->membase + ofs->rdr); 327 /* Apply RDR data mask */ 328 c &= stm32_port->rdr_mask; 329 330 return c; 331 } 332 333 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port) 334 { 335 struct stm32_port *stm32_port = to_stm32_port(port); 336 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 337 unsigned long c; 338 unsigned int size = 0; 339 u32 sr; 340 char flag; 341 342 while (stm32_usart_pending_rx_pio(port, &sr)) { 343 sr |= USART_SR_DUMMY_RX; 344 flag = TTY_NORMAL; 345 346 /* 347 * Status bits has to be cleared before reading the RDR: 348 * In FIFO mode, reading the RDR will pop the next data 349 * (if any) along with its status bits into the SR. 350 * Not doing so leads to misalignement between RDR and SR, 351 * and clear status bits of the next rx data. 352 * 353 * Clear errors flags for stm32f7 and stm32h7 compatible 354 * devices. On stm32f4 compatible devices, the error bit is 355 * cleared by the sequence [read SR - read DR]. 356 */ 357 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG) 358 writel_relaxed(sr & USART_SR_ERR_MASK, 359 port->membase + ofs->icr); 360 361 c = stm32_usart_get_char_pio(port); 362 port->icount.rx++; 363 size++; 364 if (sr & USART_SR_ERR_MASK) { 365 if (sr & USART_SR_ORE) { 366 port->icount.overrun++; 367 } else if (sr & USART_SR_PE) { 368 port->icount.parity++; 369 } else if (sr & USART_SR_FE) { 370 /* Break detection if character is null */ 371 if (!c) { 372 port->icount.brk++; 373 if (uart_handle_break(port)) 374 continue; 375 } else { 376 port->icount.frame++; 377 } 378 } 379 380 sr &= port->read_status_mask; 381 382 if (sr & USART_SR_PE) { 383 flag = TTY_PARITY; 384 } else if (sr & USART_SR_FE) { 385 if (!c) 386 flag = TTY_BREAK; 387 else 388 flag = TTY_FRAME; 389 } 390 } 391 392 if (uart_prepare_sysrq_char(port, c)) 393 continue; 394 uart_insert_char(port, sr, USART_SR_ORE, c, flag); 395 } 396 397 return size; 398 } 399 400 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size) 401 { 402 struct stm32_port *stm32_port = to_stm32_port(port); 403 struct tty_port *ttyport = &stm32_port->port.state->port; 404 unsigned char *dma_start; 405 int dma_count, i; 406 407 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res); 408 409 /* 410 * Apply rdr_mask on buffer in order to mask parity bit. 411 * This loop is useless in cs8 mode because DMA copies only 412 * 8 bits and already ignores parity bit. 413 */ 414 if (!(stm32_port->rdr_mask == (BIT(8) - 1))) 415 for (i = 0; i < dma_size; i++) 416 *(dma_start + i) &= stm32_port->rdr_mask; 417 418 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size); 419 port->icount.rx += dma_count; 420 if (dma_count != dma_size) 421 port->icount.buf_overrun++; 422 stm32_port->last_res -= dma_count; 423 if (stm32_port->last_res == 0) 424 stm32_port->last_res = RX_BUF_L; 425 } 426 427 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port) 428 { 429 struct stm32_port *stm32_port = to_stm32_port(port); 430 unsigned int dma_size, size = 0; 431 432 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */ 433 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { 434 /* Conditional first part: from last_res to end of DMA buffer */ 435 dma_size = stm32_port->last_res; 436 stm32_usart_push_buffer_dma(port, dma_size); 437 size = dma_size; 438 } 439 440 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue; 441 stm32_usart_push_buffer_dma(port, dma_size); 442 size += dma_size; 443 444 return size; 445 } 446 447 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush) 448 { 449 struct stm32_port *stm32_port = to_stm32_port(port); 450 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 451 enum dma_status rx_dma_status; 452 u32 sr; 453 unsigned int size = 0; 454 455 if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) { 456 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 457 stm32_port->rx_ch->cookie, 458 &stm32_port->rx_dma_state); 459 if (rx_dma_status == DMA_IN_PROGRESS) { 460 /* Empty DMA buffer */ 461 size = stm32_usart_receive_chars_dma(port); 462 sr = readl_relaxed(port->membase + ofs->isr); 463 if (sr & USART_SR_ERR_MASK) { 464 /* Disable DMA request line */ 465 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 466 467 /* Switch to PIO mode to handle the errors */ 468 size += stm32_usart_receive_chars_pio(port); 469 470 /* Switch back to DMA mode */ 471 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 472 } 473 } else { 474 /* Disable RX DMA */ 475 dmaengine_terminate_async(stm32_port->rx_ch); 476 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 477 /* Fall back to interrupt mode */ 478 dev_dbg(port->dev, "DMA error, fallback to irq mode\n"); 479 size = stm32_usart_receive_chars_pio(port); 480 } 481 } else { 482 size = stm32_usart_receive_chars_pio(port); 483 } 484 485 return size; 486 } 487 488 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port) 489 { 490 dmaengine_terminate_async(stm32_port->tx_ch); 491 stm32_port->tx_dma_busy = false; 492 } 493 494 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port) 495 { 496 /* 497 * We cannot use the function "dmaengine_tx_status" to know the 498 * status of DMA. This function does not show if the "dma complete" 499 * callback of the DMA transaction has been called. So we prefer 500 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the 501 * same time. 502 */ 503 return stm32_port->tx_dma_busy; 504 } 505 506 static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port) 507 { 508 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 509 510 return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT); 511 } 512 513 static void stm32_usart_tx_dma_complete(void *arg) 514 { 515 struct uart_port *port = arg; 516 struct stm32_port *stm32port = to_stm32_port(port); 517 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 518 unsigned long flags; 519 520 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 521 stm32_usart_tx_dma_terminate(stm32port); 522 523 /* Let's see if we have pending data to send */ 524 spin_lock_irqsave(&port->lock, flags); 525 stm32_usart_transmit_chars(port); 526 spin_unlock_irqrestore(&port->lock, flags); 527 } 528 529 static void stm32_usart_tx_interrupt_enable(struct uart_port *port) 530 { 531 struct stm32_port *stm32_port = to_stm32_port(port); 532 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 533 534 /* 535 * Enables TX FIFO threashold irq when FIFO is enabled, 536 * or TX empty irq when FIFO is disabled 537 */ 538 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 539 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE); 540 else 541 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE); 542 } 543 544 static void stm32_usart_tc_interrupt_enable(struct uart_port *port) 545 { 546 struct stm32_port *stm32_port = to_stm32_port(port); 547 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 548 549 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE); 550 } 551 552 static void stm32_usart_rx_dma_complete(void *arg) 553 { 554 struct uart_port *port = arg; 555 struct tty_port *tport = &port->state->port; 556 unsigned int size; 557 unsigned long flags; 558 559 spin_lock_irqsave(&port->lock, flags); 560 size = stm32_usart_receive_chars(port, false); 561 uart_unlock_and_check_sysrq_irqrestore(port, flags); 562 if (size) 563 tty_flip_buffer_push(tport); 564 } 565 566 static void stm32_usart_tx_interrupt_disable(struct uart_port *port) 567 { 568 struct stm32_port *stm32_port = to_stm32_port(port); 569 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 570 571 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 572 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE); 573 else 574 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE); 575 } 576 577 static void stm32_usart_tc_interrupt_disable(struct uart_port *port) 578 { 579 struct stm32_port *stm32_port = to_stm32_port(port); 580 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 581 582 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE); 583 } 584 585 static void stm32_usart_transmit_chars_pio(struct uart_port *port) 586 { 587 struct stm32_port *stm32_port = to_stm32_port(port); 588 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 589 struct circ_buf *xmit = &port->state->xmit; 590 591 if (stm32_usart_tx_dma_enabled(stm32_port)) 592 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 593 594 while (!uart_circ_empty(xmit)) { 595 /* Check that TDR is empty before filling FIFO */ 596 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) 597 break; 598 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); 599 uart_xmit_advance(port, 1); 600 } 601 602 /* rely on TXE irq (mask or unmask) for sending remaining data */ 603 if (uart_circ_empty(xmit)) 604 stm32_usart_tx_interrupt_disable(port); 605 else 606 stm32_usart_tx_interrupt_enable(port); 607 } 608 609 static void stm32_usart_transmit_chars_dma(struct uart_port *port) 610 { 611 struct stm32_port *stm32port = to_stm32_port(port); 612 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 613 struct circ_buf *xmit = &port->state->xmit; 614 struct dma_async_tx_descriptor *desc = NULL; 615 unsigned int count; 616 617 if (stm32_usart_tx_dma_started(stm32port)) { 618 if (!stm32_usart_tx_dma_enabled(stm32port)) 619 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 620 return; 621 } 622 623 count = uart_circ_chars_pending(xmit); 624 625 if (count > TX_BUF_L) 626 count = TX_BUF_L; 627 628 if (xmit->tail < xmit->head) { 629 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count); 630 } else { 631 size_t one = UART_XMIT_SIZE - xmit->tail; 632 size_t two; 633 634 if (one > count) 635 one = count; 636 two = count - one; 637 638 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one); 639 if (two) 640 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two); 641 } 642 643 desc = dmaengine_prep_slave_single(stm32port->tx_ch, 644 stm32port->tx_dma_buf, 645 count, 646 DMA_MEM_TO_DEV, 647 DMA_PREP_INTERRUPT); 648 649 if (!desc) 650 goto fallback_err; 651 652 /* 653 * Set "tx_dma_busy" flag. This flag will be released when 654 * dmaengine_terminate_async will be called. This flag helps 655 * transmit_chars_dma not to start another DMA transaction 656 * if the callback of the previous is not yet called. 657 */ 658 stm32port->tx_dma_busy = true; 659 660 desc->callback = stm32_usart_tx_dma_complete; 661 desc->callback_param = port; 662 663 /* Push current DMA TX transaction in the pending queue */ 664 if (dma_submit_error(dmaengine_submit(desc))) { 665 /* dma no yet started, safe to free resources */ 666 stm32_usart_tx_dma_terminate(stm32port); 667 goto fallback_err; 668 } 669 670 /* Issue pending DMA TX requests */ 671 dma_async_issue_pending(stm32port->tx_ch); 672 673 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 674 675 uart_xmit_advance(port, count); 676 677 return; 678 679 fallback_err: 680 stm32_usart_transmit_chars_pio(port); 681 } 682 683 static void stm32_usart_transmit_chars(struct uart_port *port) 684 { 685 struct stm32_port *stm32_port = to_stm32_port(port); 686 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 687 struct circ_buf *xmit = &port->state->xmit; 688 u32 isr; 689 int ret; 690 691 if (!stm32_port->hw_flow_control && 692 port->rs485.flags & SER_RS485_ENABLED) { 693 stm32_port->txdone = false; 694 stm32_usart_tc_interrupt_disable(port); 695 stm32_usart_rs485_rts_enable(port); 696 } 697 698 if (port->x_char) { 699 if (stm32_usart_tx_dma_started(stm32_port) && 700 stm32_usart_tx_dma_enabled(stm32_port)) 701 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 702 703 /* Check that TDR is empty before filling FIFO */ 704 ret = 705 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 706 isr, 707 (isr & USART_SR_TXE), 708 10, 1000); 709 if (ret) 710 dev_warn(port->dev, "1 character may be erased\n"); 711 712 writel_relaxed(port->x_char, port->membase + ofs->tdr); 713 port->x_char = 0; 714 port->icount.tx++; 715 if (stm32_usart_tx_dma_started(stm32_port)) 716 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 717 return; 718 } 719 720 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 721 stm32_usart_tx_interrupt_disable(port); 722 return; 723 } 724 725 if (ofs->icr == UNDEF_REG) 726 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC); 727 else 728 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr); 729 730 if (stm32_port->tx_ch) 731 stm32_usart_transmit_chars_dma(port); 732 else 733 stm32_usart_transmit_chars_pio(port); 734 735 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 736 uart_write_wakeup(port); 737 738 if (uart_circ_empty(xmit)) { 739 stm32_usart_tx_interrupt_disable(port); 740 if (!stm32_port->hw_flow_control && 741 port->rs485.flags & SER_RS485_ENABLED) { 742 stm32_port->txdone = true; 743 stm32_usart_tc_interrupt_enable(port); 744 } 745 } 746 } 747 748 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) 749 { 750 struct uart_port *port = ptr; 751 struct tty_port *tport = &port->state->port; 752 struct stm32_port *stm32_port = to_stm32_port(port); 753 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 754 u32 sr; 755 unsigned int size; 756 757 sr = readl_relaxed(port->membase + ofs->isr); 758 759 if (!stm32_port->hw_flow_control && 760 port->rs485.flags & SER_RS485_ENABLED && 761 (sr & USART_SR_TC)) { 762 stm32_usart_tc_interrupt_disable(port); 763 stm32_usart_rs485_rts_disable(port); 764 } 765 766 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) 767 writel_relaxed(USART_ICR_RTOCF, 768 port->membase + ofs->icr); 769 770 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) { 771 /* Clear wake up flag and disable wake up interrupt */ 772 writel_relaxed(USART_ICR_WUCF, 773 port->membase + ofs->icr); 774 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 775 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) 776 pm_wakeup_event(tport->tty->dev, 0); 777 } 778 779 /* 780 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request 781 * line has been masked by HW and rx data are stacking in FIFO. 782 */ 783 if (!stm32_port->throttled) { 784 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) || 785 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) { 786 spin_lock(&port->lock); 787 size = stm32_usart_receive_chars(port, false); 788 uart_unlock_and_check_sysrq(port); 789 if (size) 790 tty_flip_buffer_push(tport); 791 } 792 } 793 794 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) { 795 spin_lock(&port->lock); 796 stm32_usart_transmit_chars(port); 797 spin_unlock(&port->lock); 798 } 799 800 /* Receiver timeout irq for DMA RX */ 801 if (stm32_usart_rx_dma_enabled(port) && !stm32_port->throttled) { 802 spin_lock(&port->lock); 803 size = stm32_usart_receive_chars(port, false); 804 uart_unlock_and_check_sysrq_irqrestore(port, flags); 805 if (size) 806 tty_flip_buffer_push(tport); 807 } 808 809 return IRQ_HANDLED; 810 } 811 812 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl) 813 { 814 struct stm32_port *stm32_port = to_stm32_port(port); 815 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 816 817 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) 818 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE); 819 else 820 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE); 821 822 mctrl_gpio_set(stm32_port->gpios, mctrl); 823 } 824 825 static unsigned int stm32_usart_get_mctrl(struct uart_port *port) 826 { 827 struct stm32_port *stm32_port = to_stm32_port(port); 828 unsigned int ret; 829 830 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */ 831 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 832 833 return mctrl_gpio_get(stm32_port->gpios, &ret); 834 } 835 836 static void stm32_usart_enable_ms(struct uart_port *port) 837 { 838 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios); 839 } 840 841 static void stm32_usart_disable_ms(struct uart_port *port) 842 { 843 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); 844 } 845 846 /* Transmit stop */ 847 static void stm32_usart_stop_tx(struct uart_port *port) 848 { 849 struct stm32_port *stm32_port = to_stm32_port(port); 850 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 851 852 stm32_usart_tx_interrupt_disable(port); 853 if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port)) 854 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 855 856 stm32_usart_rs485_rts_disable(port); 857 } 858 859 /* There are probably characters waiting to be transmitted. */ 860 static void stm32_usart_start_tx(struct uart_port *port) 861 { 862 struct circ_buf *xmit = &port->state->xmit; 863 864 if (uart_circ_empty(xmit) && !port->x_char) { 865 stm32_usart_rs485_rts_disable(port); 866 return; 867 } 868 869 stm32_usart_rs485_rts_enable(port); 870 871 stm32_usart_transmit_chars(port); 872 } 873 874 /* Flush the transmit buffer. */ 875 static void stm32_usart_flush_buffer(struct uart_port *port) 876 { 877 struct stm32_port *stm32_port = to_stm32_port(port); 878 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 879 880 if (stm32_port->tx_ch) { 881 stm32_usart_tx_dma_terminate(stm32_port); 882 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 883 } 884 } 885 886 /* Throttle the remote when input buffer is about to overflow. */ 887 static void stm32_usart_throttle(struct uart_port *port) 888 { 889 struct stm32_port *stm32_port = to_stm32_port(port); 890 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 891 unsigned long flags; 892 893 spin_lock_irqsave(&port->lock, flags); 894 895 /* 896 * Disable DMA request line if enabled, so the RX data gets queued into the FIFO. 897 * Hardware flow control is triggered when RX FIFO is full. 898 */ 899 if (stm32_usart_rx_dma_enabled(port)) 900 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 901 902 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 903 if (stm32_port->cr3_irq) 904 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 905 906 stm32_port->throttled = true; 907 spin_unlock_irqrestore(&port->lock, flags); 908 } 909 910 /* Unthrottle the remote, the input buffer can now accept data. */ 911 static void stm32_usart_unthrottle(struct uart_port *port) 912 { 913 struct stm32_port *stm32_port = to_stm32_port(port); 914 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 915 unsigned long flags; 916 917 spin_lock_irqsave(&port->lock, flags); 918 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq); 919 if (stm32_port->cr3_irq) 920 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq); 921 922 /* 923 * Switch back to DMA mode (re-enable DMA request line). 924 * Hardware flow control is stopped when FIFO is not full any more. 925 */ 926 if (stm32_port->rx_ch) 927 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 928 929 stm32_port->throttled = false; 930 spin_unlock_irqrestore(&port->lock, flags); 931 } 932 933 /* Receive stop */ 934 static void stm32_usart_stop_rx(struct uart_port *port) 935 { 936 struct stm32_port *stm32_port = to_stm32_port(port); 937 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 938 939 /* Disable DMA request line. */ 940 if (stm32_port->rx_ch) 941 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 942 943 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 944 if (stm32_port->cr3_irq) 945 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 946 } 947 948 /* Handle breaks - ignored by us */ 949 static void stm32_usart_break_ctl(struct uart_port *port, int break_state) 950 { 951 } 952 953 static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port) 954 { 955 struct stm32_port *stm32_port = to_stm32_port(port); 956 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 957 struct dma_async_tx_descriptor *desc; 958 int ret; 959 960 stm32_port->last_res = RX_BUF_L; 961 /* Prepare a DMA cyclic transaction */ 962 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch, 963 stm32_port->rx_dma_buf, 964 RX_BUF_L, RX_BUF_P, 965 DMA_DEV_TO_MEM, 966 DMA_PREP_INTERRUPT); 967 if (!desc) { 968 dev_err(port->dev, "rx dma prep cyclic failed\n"); 969 return -ENODEV; 970 } 971 972 desc->callback = stm32_usart_rx_dma_complete; 973 desc->callback_param = port; 974 975 /* Push current DMA transaction in the pending queue */ 976 ret = dma_submit_error(dmaengine_submit(desc)); 977 if (ret) { 978 dmaengine_terminate_sync(stm32_port->rx_ch); 979 return ret; 980 } 981 982 /* Issue pending DMA requests */ 983 dma_async_issue_pending(stm32_port->rx_ch); 984 985 /* 986 * DMA request line not re-enabled at resume when port is throttled. 987 * It will be re-enabled by unthrottle ops. 988 */ 989 if (!stm32_port->throttled) 990 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 991 992 return 0; 993 } 994 995 static int stm32_usart_startup(struct uart_port *port) 996 { 997 struct stm32_port *stm32_port = to_stm32_port(port); 998 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 999 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1000 const char *name = to_platform_device(port->dev)->name; 1001 u32 val; 1002 int ret; 1003 1004 ret = request_irq(port->irq, stm32_usart_interrupt, 1005 IRQF_NO_SUSPEND, name, port); 1006 if (ret) 1007 return ret; 1008 1009 if (stm32_port->swap) { 1010 val = readl_relaxed(port->membase + ofs->cr2); 1011 val |= USART_CR2_SWAP; 1012 writel_relaxed(val, port->membase + ofs->cr2); 1013 } 1014 1015 /* RX FIFO Flush */ 1016 if (ofs->rqr != UNDEF_REG) 1017 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr); 1018 1019 if (stm32_port->rx_ch) { 1020 ret = stm32_usart_start_rx_dma_cyclic(port); 1021 if (ret) { 1022 free_irq(port->irq, port); 1023 return ret; 1024 } 1025 } 1026 1027 /* RX enabling */ 1028 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit); 1029 stm32_usart_set_bits(port, ofs->cr1, val); 1030 1031 return 0; 1032 } 1033 1034 static void stm32_usart_shutdown(struct uart_port *port) 1035 { 1036 struct stm32_port *stm32_port = to_stm32_port(port); 1037 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1038 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1039 u32 val, isr; 1040 int ret; 1041 1042 if (stm32_usart_tx_dma_enabled(stm32_port)) 1043 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1044 1045 if (stm32_usart_tx_dma_started(stm32_port)) 1046 stm32_usart_tx_dma_terminate(stm32_port); 1047 1048 /* Disable modem control interrupts */ 1049 stm32_usart_disable_ms(port); 1050 1051 val = USART_CR1_TXEIE | USART_CR1_TE; 1052 val |= stm32_port->cr1_irq | USART_CR1_RE; 1053 val |= BIT(cfg->uart_enable_bit); 1054 if (stm32_port->fifoen) 1055 val |= USART_CR1_FIFOEN; 1056 1057 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr, 1058 isr, (isr & USART_SR_TC), 1059 10, 100000); 1060 1061 /* Send the TC error message only when ISR_TC is not set */ 1062 if (ret) 1063 dev_err(port->dev, "Transmission is not complete\n"); 1064 1065 /* Disable RX DMA. */ 1066 if (stm32_port->rx_ch) 1067 dmaengine_terminate_async(stm32_port->rx_ch); 1068 1069 /* flush RX & TX FIFO */ 1070 if (ofs->rqr != UNDEF_REG) 1071 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1072 port->membase + ofs->rqr); 1073 1074 stm32_usart_clr_bits(port, ofs->cr1, val); 1075 1076 free_irq(port->irq, port); 1077 } 1078 1079 static void stm32_usart_set_termios(struct uart_port *port, 1080 struct ktermios *termios, 1081 const struct ktermios *old) 1082 { 1083 struct stm32_port *stm32_port = to_stm32_port(port); 1084 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1085 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1086 struct serial_rs485 *rs485conf = &port->rs485; 1087 unsigned int baud, bits; 1088 u32 usartdiv, mantissa, fraction, oversampling; 1089 tcflag_t cflag = termios->c_cflag; 1090 u32 cr1, cr2, cr3, isr; 1091 unsigned long flags; 1092 int ret; 1093 1094 if (!stm32_port->hw_flow_control) 1095 cflag &= ~CRTSCTS; 1096 1097 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8); 1098 1099 spin_lock_irqsave(&port->lock, flags); 1100 1101 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 1102 isr, 1103 (isr & USART_SR_TC), 1104 10, 100000); 1105 1106 /* Send the TC error message only when ISR_TC is not set. */ 1107 if (ret) 1108 dev_err(port->dev, "Transmission is not complete\n"); 1109 1110 /* Stop serial port and reset value */ 1111 writel_relaxed(0, port->membase + ofs->cr1); 1112 1113 /* flush RX & TX FIFO */ 1114 if (ofs->rqr != UNDEF_REG) 1115 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1116 port->membase + ofs->rqr); 1117 1118 cr1 = USART_CR1_TE | USART_CR1_RE; 1119 if (stm32_port->fifoen) 1120 cr1 |= USART_CR1_FIFOEN; 1121 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0; 1122 1123 /* Tx and RX FIFO configuration */ 1124 cr3 = readl_relaxed(port->membase + ofs->cr3); 1125 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE; 1126 if (stm32_port->fifoen) { 1127 if (stm32_port->txftcfg >= 0) 1128 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT; 1129 if (stm32_port->rxftcfg >= 0) 1130 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT; 1131 } 1132 1133 if (cflag & CSTOPB) 1134 cr2 |= USART_CR2_STOP_2B; 1135 1136 bits = tty_get_char_size(cflag); 1137 stm32_port->rdr_mask = (BIT(bits) - 1); 1138 1139 if (cflag & PARENB) { 1140 bits++; 1141 cr1 |= USART_CR1_PCE; 1142 } 1143 1144 /* 1145 * Word length configuration: 1146 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01 1147 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10 1148 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 1149 * M0 and M1 already cleared by cr1 initialization. 1150 */ 1151 if (bits == 9) { 1152 cr1 |= USART_CR1_M0; 1153 } else if ((bits == 7) && cfg->has_7bits_data) { 1154 cr1 |= USART_CR1_M1; 1155 } else if (bits != 8) { 1156 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n" 1157 , bits); 1158 cflag &= ~CSIZE; 1159 cflag |= CS8; 1160 termios->c_cflag = cflag; 1161 bits = 8; 1162 if (cflag & PARENB) { 1163 bits++; 1164 cr1 |= USART_CR1_M0; 1165 } 1166 } 1167 1168 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || 1169 (stm32_port->fifoen && 1170 stm32_port->rxftcfg >= 0))) { 1171 if (cflag & CSTOPB) 1172 bits = bits + 3; /* 1 start bit + 2 stop bits */ 1173 else 1174 bits = bits + 2; /* 1 start bit + 1 stop bit */ 1175 1176 /* RX timeout irq to occur after last stop bit + bits */ 1177 stm32_port->cr1_irq = USART_CR1_RTOIE; 1178 writel_relaxed(bits, port->membase + ofs->rtor); 1179 cr2 |= USART_CR2_RTOEN; 1180 /* 1181 * Enable fifo threshold irq in two cases, either when there is no DMA, or when 1182 * wake up over usart, from low power until the DMA gets re-enabled by resume. 1183 */ 1184 stm32_port->cr3_irq = USART_CR3_RXFTIE; 1185 } 1186 1187 cr1 |= stm32_port->cr1_irq; 1188 cr3 |= stm32_port->cr3_irq; 1189 1190 if (cflag & PARODD) 1191 cr1 |= USART_CR1_PS; 1192 1193 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 1194 if (cflag & CRTSCTS) { 1195 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 1196 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE; 1197 } 1198 1199 usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud); 1200 1201 /* 1202 * The USART supports 16 or 8 times oversampling. 1203 * By default we prefer 16 times oversampling, so that the receiver 1204 * has a better tolerance to clock deviations. 1205 * 8 times oversampling is only used to achieve higher speeds. 1206 */ 1207 if (usartdiv < 16) { 1208 oversampling = 8; 1209 cr1 |= USART_CR1_OVER8; 1210 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8); 1211 } else { 1212 oversampling = 16; 1213 cr1 &= ~USART_CR1_OVER8; 1214 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8); 1215 } 1216 1217 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT; 1218 fraction = usartdiv % oversampling; 1219 writel_relaxed(mantissa | fraction, port->membase + ofs->brr); 1220 1221 uart_update_timeout(port, cflag, baud); 1222 1223 port->read_status_mask = USART_SR_ORE; 1224 if (termios->c_iflag & INPCK) 1225 port->read_status_mask |= USART_SR_PE | USART_SR_FE; 1226 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1227 port->read_status_mask |= USART_SR_FE; 1228 1229 /* Characters to ignore */ 1230 port->ignore_status_mask = 0; 1231 if (termios->c_iflag & IGNPAR) 1232 port->ignore_status_mask = USART_SR_PE | USART_SR_FE; 1233 if (termios->c_iflag & IGNBRK) { 1234 port->ignore_status_mask |= USART_SR_FE; 1235 /* 1236 * If we're ignoring parity and break indicators, 1237 * ignore overruns too (for real raw support). 1238 */ 1239 if (termios->c_iflag & IGNPAR) 1240 port->ignore_status_mask |= USART_SR_ORE; 1241 } 1242 1243 /* Ignore all characters if CREAD is not set */ 1244 if ((termios->c_cflag & CREAD) == 0) 1245 port->ignore_status_mask |= USART_SR_DUMMY_RX; 1246 1247 if (stm32_port->rx_ch) { 1248 /* 1249 * Setup DMA to collect only valid data and enable error irqs. 1250 * This also enables break reception when using DMA. 1251 */ 1252 cr1 |= USART_CR1_PEIE; 1253 cr3 |= USART_CR3_EIE; 1254 cr3 |= USART_CR3_DMAR; 1255 cr3 |= USART_CR3_DDRE; 1256 } 1257 1258 if (rs485conf->flags & SER_RS485_ENABLED) { 1259 stm32_usart_config_reg_rs485(&cr1, &cr3, 1260 rs485conf->delay_rts_before_send, 1261 rs485conf->delay_rts_after_send, 1262 baud); 1263 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 1264 cr3 &= ~USART_CR3_DEP; 1265 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; 1266 } else { 1267 cr3 |= USART_CR3_DEP; 1268 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; 1269 } 1270 1271 } else { 1272 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP); 1273 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 1274 } 1275 1276 /* Configure wake up from low power on start bit detection */ 1277 if (stm32_port->wakeup_src) { 1278 cr3 &= ~USART_CR3_WUS_MASK; 1279 cr3 |= USART_CR3_WUS_START_BIT; 1280 } 1281 1282 writel_relaxed(cr3, port->membase + ofs->cr3); 1283 writel_relaxed(cr2, port->membase + ofs->cr2); 1284 writel_relaxed(cr1, port->membase + ofs->cr1); 1285 1286 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1287 spin_unlock_irqrestore(&port->lock, flags); 1288 1289 /* Handle modem control interrupts */ 1290 if (UART_ENABLE_MS(port, termios->c_cflag)) 1291 stm32_usart_enable_ms(port); 1292 else 1293 stm32_usart_disable_ms(port); 1294 } 1295 1296 static const char *stm32_usart_type(struct uart_port *port) 1297 { 1298 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL; 1299 } 1300 1301 static void stm32_usart_release_port(struct uart_port *port) 1302 { 1303 } 1304 1305 static int stm32_usart_request_port(struct uart_port *port) 1306 { 1307 return 0; 1308 } 1309 1310 static void stm32_usart_config_port(struct uart_port *port, int flags) 1311 { 1312 if (flags & UART_CONFIG_TYPE) 1313 port->type = PORT_STM32; 1314 } 1315 1316 static int 1317 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser) 1318 { 1319 /* No user changeable parameters */ 1320 return -EINVAL; 1321 } 1322 1323 static void stm32_usart_pm(struct uart_port *port, unsigned int state, 1324 unsigned int oldstate) 1325 { 1326 struct stm32_port *stm32port = container_of(port, 1327 struct stm32_port, port); 1328 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1329 const struct stm32_usart_config *cfg = &stm32port->info->cfg; 1330 unsigned long flags; 1331 1332 switch (state) { 1333 case UART_PM_STATE_ON: 1334 pm_runtime_get_sync(port->dev); 1335 break; 1336 case UART_PM_STATE_OFF: 1337 spin_lock_irqsave(&port->lock, flags); 1338 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1339 spin_unlock_irqrestore(&port->lock, flags); 1340 pm_runtime_put_sync(port->dev); 1341 break; 1342 } 1343 } 1344 1345 #if defined(CONFIG_CONSOLE_POLL) 1346 1347 /* Callbacks for characters polling in debug context (i.e. KGDB). */ 1348 static int stm32_usart_poll_init(struct uart_port *port) 1349 { 1350 struct stm32_port *stm32_port = to_stm32_port(port); 1351 1352 return clk_prepare_enable(stm32_port->clk); 1353 } 1354 1355 static int stm32_usart_poll_get_char(struct uart_port *port) 1356 { 1357 struct stm32_port *stm32_port = to_stm32_port(port); 1358 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1359 1360 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE)) 1361 return NO_POLL_CHAR; 1362 1363 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask; 1364 } 1365 1366 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch) 1367 { 1368 stm32_usart_console_putchar(port, ch); 1369 } 1370 #endif /* CONFIG_CONSOLE_POLL */ 1371 1372 static const struct uart_ops stm32_uart_ops = { 1373 .tx_empty = stm32_usart_tx_empty, 1374 .set_mctrl = stm32_usart_set_mctrl, 1375 .get_mctrl = stm32_usart_get_mctrl, 1376 .stop_tx = stm32_usart_stop_tx, 1377 .start_tx = stm32_usart_start_tx, 1378 .throttle = stm32_usart_throttle, 1379 .unthrottle = stm32_usart_unthrottle, 1380 .stop_rx = stm32_usart_stop_rx, 1381 .enable_ms = stm32_usart_enable_ms, 1382 .break_ctl = stm32_usart_break_ctl, 1383 .startup = stm32_usart_startup, 1384 .shutdown = stm32_usart_shutdown, 1385 .flush_buffer = stm32_usart_flush_buffer, 1386 .set_termios = stm32_usart_set_termios, 1387 .pm = stm32_usart_pm, 1388 .type = stm32_usart_type, 1389 .release_port = stm32_usart_release_port, 1390 .request_port = stm32_usart_request_port, 1391 .config_port = stm32_usart_config_port, 1392 .verify_port = stm32_usart_verify_port, 1393 #if defined(CONFIG_CONSOLE_POLL) 1394 .poll_init = stm32_usart_poll_init, 1395 .poll_get_char = stm32_usart_poll_get_char, 1396 .poll_put_char = stm32_usart_poll_put_char, 1397 #endif /* CONFIG_CONSOLE_POLL */ 1398 }; 1399 1400 /* 1401 * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG) 1402 * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case, 1403 * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE. 1404 * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1. 1405 */ 1406 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 }; 1407 1408 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p, 1409 int *ftcfg) 1410 { 1411 u32 bytes, i; 1412 1413 /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */ 1414 if (of_property_read_u32(pdev->dev.of_node, p, &bytes)) 1415 bytes = 8; 1416 1417 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) 1418 if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes) 1419 break; 1420 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg)) 1421 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1; 1422 1423 dev_dbg(&pdev->dev, "%s set to %d bytes\n", p, 1424 stm32h7_usart_fifo_thresh_cfg[i]); 1425 1426 /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */ 1427 if (i) 1428 *ftcfg = i - 1; 1429 else 1430 *ftcfg = -EINVAL; 1431 } 1432 1433 static void stm32_usart_deinit_port(struct stm32_port *stm32port) 1434 { 1435 clk_disable_unprepare(stm32port->clk); 1436 } 1437 1438 static const struct serial_rs485 stm32_rs485_supported = { 1439 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | 1440 SER_RS485_RX_DURING_TX, 1441 .delay_rts_before_send = 1, 1442 .delay_rts_after_send = 1, 1443 }; 1444 1445 static int stm32_usart_init_port(struct stm32_port *stm32port, 1446 struct platform_device *pdev) 1447 { 1448 struct uart_port *port = &stm32port->port; 1449 struct resource *res; 1450 int ret, irq; 1451 1452 irq = platform_get_irq(pdev, 0); 1453 if (irq < 0) 1454 return irq; 1455 1456 port->iotype = UPIO_MEM; 1457 port->flags = UPF_BOOT_AUTOCONF; 1458 port->ops = &stm32_uart_ops; 1459 port->dev = &pdev->dev; 1460 port->fifosize = stm32port->info->cfg.fifosize; 1461 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE); 1462 port->irq = irq; 1463 port->rs485_config = stm32_usart_config_rs485; 1464 port->rs485_supported = stm32_rs485_supported; 1465 1466 ret = stm32_usart_init_rs485(port, pdev); 1467 if (ret) 1468 return ret; 1469 1470 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup && 1471 of_property_read_bool(pdev->dev.of_node, "wakeup-source"); 1472 1473 stm32port->swap = stm32port->info->cfg.has_swap && 1474 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"); 1475 1476 stm32port->fifoen = stm32port->info->cfg.has_fifo; 1477 if (stm32port->fifoen) { 1478 stm32_usart_get_ftcfg(pdev, "rx-threshold", 1479 &stm32port->rxftcfg); 1480 stm32_usart_get_ftcfg(pdev, "tx-threshold", 1481 &stm32port->txftcfg); 1482 } 1483 1484 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1485 if (IS_ERR(port->membase)) 1486 return PTR_ERR(port->membase); 1487 port->mapbase = res->start; 1488 1489 spin_lock_init(&port->lock); 1490 1491 stm32port->clk = devm_clk_get(&pdev->dev, NULL); 1492 if (IS_ERR(stm32port->clk)) 1493 return PTR_ERR(stm32port->clk); 1494 1495 /* Ensure that clk rate is correct by enabling the clk */ 1496 ret = clk_prepare_enable(stm32port->clk); 1497 if (ret) 1498 return ret; 1499 1500 stm32port->port.uartclk = clk_get_rate(stm32port->clk); 1501 if (!stm32port->port.uartclk) { 1502 ret = -EINVAL; 1503 goto err_clk; 1504 } 1505 1506 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0); 1507 if (IS_ERR(stm32port->gpios)) { 1508 ret = PTR_ERR(stm32port->gpios); 1509 goto err_clk; 1510 } 1511 1512 /* 1513 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" 1514 * properties should not be specified. 1515 */ 1516 if (stm32port->hw_flow_control) { 1517 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) || 1518 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) { 1519 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n"); 1520 ret = -EINVAL; 1521 goto err_clk; 1522 } 1523 } 1524 1525 return ret; 1526 1527 err_clk: 1528 clk_disable_unprepare(stm32port->clk); 1529 1530 return ret; 1531 } 1532 1533 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev) 1534 { 1535 struct device_node *np = pdev->dev.of_node; 1536 int id; 1537 1538 if (!np) 1539 return NULL; 1540 1541 id = of_alias_get_id(np, "serial"); 1542 if (id < 0) { 1543 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id); 1544 return NULL; 1545 } 1546 1547 if (WARN_ON(id >= STM32_MAX_PORTS)) 1548 return NULL; 1549 1550 stm32_ports[id].hw_flow_control = 1551 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ || 1552 of_property_read_bool (np, "uart-has-rtscts"); 1553 stm32_ports[id].port.line = id; 1554 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE; 1555 stm32_ports[id].cr3_irq = 0; 1556 stm32_ports[id].last_res = RX_BUF_L; 1557 return &stm32_ports[id]; 1558 } 1559 1560 #ifdef CONFIG_OF 1561 static const struct of_device_id stm32_match[] = { 1562 { .compatible = "st,stm32-uart", .data = &stm32f4_info}, 1563 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info}, 1564 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info}, 1565 {}, 1566 }; 1567 1568 MODULE_DEVICE_TABLE(of, stm32_match); 1569 #endif 1570 1571 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port, 1572 struct platform_device *pdev) 1573 { 1574 if (stm32port->rx_buf) 1575 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, 1576 stm32port->rx_dma_buf); 1577 } 1578 1579 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, 1580 struct platform_device *pdev) 1581 { 1582 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1583 struct uart_port *port = &stm32port->port; 1584 struct device *dev = &pdev->dev; 1585 struct dma_slave_config config; 1586 int ret; 1587 1588 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L, 1589 &stm32port->rx_dma_buf, 1590 GFP_KERNEL); 1591 if (!stm32port->rx_buf) 1592 return -ENOMEM; 1593 1594 /* Configure DMA channel */ 1595 memset(&config, 0, sizeof(config)); 1596 config.src_addr = port->mapbase + ofs->rdr; 1597 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1598 1599 ret = dmaengine_slave_config(stm32port->rx_ch, &config); 1600 if (ret < 0) { 1601 dev_err(dev, "rx dma channel config failed\n"); 1602 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1603 return ret; 1604 } 1605 1606 return 0; 1607 } 1608 1609 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port, 1610 struct platform_device *pdev) 1611 { 1612 if (stm32port->tx_buf) 1613 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, 1614 stm32port->tx_dma_buf); 1615 } 1616 1617 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, 1618 struct platform_device *pdev) 1619 { 1620 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1621 struct uart_port *port = &stm32port->port; 1622 struct device *dev = &pdev->dev; 1623 struct dma_slave_config config; 1624 int ret; 1625 1626 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L, 1627 &stm32port->tx_dma_buf, 1628 GFP_KERNEL); 1629 if (!stm32port->tx_buf) 1630 return -ENOMEM; 1631 1632 /* Configure DMA channel */ 1633 memset(&config, 0, sizeof(config)); 1634 config.dst_addr = port->mapbase + ofs->tdr; 1635 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1636 1637 ret = dmaengine_slave_config(stm32port->tx_ch, &config); 1638 if (ret < 0) { 1639 dev_err(dev, "tx dma channel config failed\n"); 1640 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1641 return ret; 1642 } 1643 1644 return 0; 1645 } 1646 1647 static int stm32_usart_serial_probe(struct platform_device *pdev) 1648 { 1649 struct stm32_port *stm32port; 1650 int ret; 1651 1652 stm32port = stm32_usart_of_get_port(pdev); 1653 if (!stm32port) 1654 return -ENODEV; 1655 1656 stm32port->info = of_device_get_match_data(&pdev->dev); 1657 if (!stm32port->info) 1658 return -EINVAL; 1659 1660 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); 1661 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) 1662 return -EPROBE_DEFER; 1663 1664 /* Fall back in interrupt mode for any non-deferral error */ 1665 if (IS_ERR(stm32port->rx_ch)) 1666 stm32port->rx_ch = NULL; 1667 1668 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); 1669 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) { 1670 ret = -EPROBE_DEFER; 1671 goto err_dma_rx; 1672 } 1673 /* Fall back in interrupt mode for any non-deferral error */ 1674 if (IS_ERR(stm32port->tx_ch)) 1675 stm32port->tx_ch = NULL; 1676 1677 ret = stm32_usart_init_port(stm32port, pdev); 1678 if (ret) 1679 goto err_dma_tx; 1680 1681 if (stm32port->wakeup_src) { 1682 device_set_wakeup_capable(&pdev->dev, true); 1683 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); 1684 if (ret) 1685 goto err_deinit_port; 1686 } 1687 1688 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { 1689 /* Fall back in interrupt mode */ 1690 dma_release_channel(stm32port->rx_ch); 1691 stm32port->rx_ch = NULL; 1692 } 1693 1694 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { 1695 /* Fall back in interrupt mode */ 1696 dma_release_channel(stm32port->tx_ch); 1697 stm32port->tx_ch = NULL; 1698 } 1699 1700 if (!stm32port->rx_ch) 1701 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); 1702 if (!stm32port->tx_ch) 1703 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n"); 1704 1705 platform_set_drvdata(pdev, &stm32port->port); 1706 1707 pm_runtime_get_noresume(&pdev->dev); 1708 pm_runtime_set_active(&pdev->dev); 1709 pm_runtime_enable(&pdev->dev); 1710 1711 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port); 1712 if (ret) 1713 goto err_port; 1714 1715 pm_runtime_put_sync(&pdev->dev); 1716 1717 return 0; 1718 1719 err_port: 1720 pm_runtime_disable(&pdev->dev); 1721 pm_runtime_set_suspended(&pdev->dev); 1722 pm_runtime_put_noidle(&pdev->dev); 1723 1724 if (stm32port->tx_ch) 1725 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1726 if (stm32port->rx_ch) 1727 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1728 1729 if (stm32port->wakeup_src) 1730 dev_pm_clear_wake_irq(&pdev->dev); 1731 1732 err_deinit_port: 1733 if (stm32port->wakeup_src) 1734 device_set_wakeup_capable(&pdev->dev, false); 1735 1736 stm32_usart_deinit_port(stm32port); 1737 1738 err_dma_tx: 1739 if (stm32port->tx_ch) 1740 dma_release_channel(stm32port->tx_ch); 1741 1742 err_dma_rx: 1743 if (stm32port->rx_ch) 1744 dma_release_channel(stm32port->rx_ch); 1745 1746 return ret; 1747 } 1748 1749 static int stm32_usart_serial_remove(struct platform_device *pdev) 1750 { 1751 struct uart_port *port = platform_get_drvdata(pdev); 1752 struct stm32_port *stm32_port = to_stm32_port(port); 1753 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1754 int err; 1755 u32 cr3; 1756 1757 pm_runtime_get_sync(&pdev->dev); 1758 err = uart_remove_one_port(&stm32_usart_driver, port); 1759 if (err) 1760 return(err); 1761 1762 pm_runtime_disable(&pdev->dev); 1763 pm_runtime_set_suspended(&pdev->dev); 1764 pm_runtime_put_noidle(&pdev->dev); 1765 1766 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE); 1767 cr3 = readl_relaxed(port->membase + ofs->cr3); 1768 cr3 &= ~USART_CR3_EIE; 1769 cr3 &= ~USART_CR3_DMAR; 1770 cr3 &= ~USART_CR3_DDRE; 1771 writel_relaxed(cr3, port->membase + ofs->cr3); 1772 1773 if (stm32_port->tx_ch) { 1774 stm32_usart_of_dma_tx_remove(stm32_port, pdev); 1775 dma_release_channel(stm32_port->tx_ch); 1776 } 1777 1778 if (stm32_port->rx_ch) { 1779 stm32_usart_of_dma_rx_remove(stm32_port, pdev); 1780 dma_release_channel(stm32_port->rx_ch); 1781 } 1782 1783 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1784 1785 if (stm32_port->wakeup_src) { 1786 dev_pm_clear_wake_irq(&pdev->dev); 1787 device_init_wakeup(&pdev->dev, false); 1788 } 1789 1790 stm32_usart_deinit_port(stm32_port); 1791 1792 return 0; 1793 } 1794 1795 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1796 { 1797 struct stm32_port *stm32_port = to_stm32_port(port); 1798 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1799 u32 isr; 1800 int ret; 1801 1802 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, 1803 (isr & USART_SR_TXE), 100, 1804 STM32_USART_TIMEOUT_USEC); 1805 if (ret != 0) { 1806 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret); 1807 return; 1808 } 1809 writel_relaxed(ch, port->membase + ofs->tdr); 1810 } 1811 1812 #ifdef CONFIG_SERIAL_STM32_CONSOLE 1813 static void stm32_usart_console_write(struct console *co, const char *s, 1814 unsigned int cnt) 1815 { 1816 struct uart_port *port = &stm32_ports[co->index].port; 1817 struct stm32_port *stm32_port = to_stm32_port(port); 1818 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1819 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1820 unsigned long flags; 1821 u32 old_cr1, new_cr1; 1822 int locked = 1; 1823 1824 if (oops_in_progress) 1825 locked = spin_trylock_irqsave(&port->lock, flags); 1826 else 1827 spin_lock_irqsave(&port->lock, flags); 1828 1829 /* Save and disable interrupts, enable the transmitter */ 1830 old_cr1 = readl_relaxed(port->membase + ofs->cr1); 1831 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK; 1832 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit); 1833 writel_relaxed(new_cr1, port->membase + ofs->cr1); 1834 1835 uart_console_write(port, s, cnt, stm32_usart_console_putchar); 1836 1837 /* Restore interrupt state */ 1838 writel_relaxed(old_cr1, port->membase + ofs->cr1); 1839 1840 if (locked) 1841 spin_unlock_irqrestore(&port->lock, flags); 1842 } 1843 1844 static int stm32_usart_console_setup(struct console *co, char *options) 1845 { 1846 struct stm32_port *stm32port; 1847 int baud = 9600; 1848 int bits = 8; 1849 int parity = 'n'; 1850 int flow = 'n'; 1851 1852 if (co->index >= STM32_MAX_PORTS) 1853 return -ENODEV; 1854 1855 stm32port = &stm32_ports[co->index]; 1856 1857 /* 1858 * This driver does not support early console initialization 1859 * (use ARM early printk support instead), so we only expect 1860 * this to be called during the uart port registration when the 1861 * driver gets probed and the port should be mapped at that point. 1862 */ 1863 if (stm32port->port.mapbase == 0 || !stm32port->port.membase) 1864 return -ENXIO; 1865 1866 if (options) 1867 uart_parse_options(options, &baud, &parity, &bits, &flow); 1868 1869 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow); 1870 } 1871 1872 static struct console stm32_console = { 1873 .name = STM32_SERIAL_NAME, 1874 .device = uart_console_device, 1875 .write = stm32_usart_console_write, 1876 .setup = stm32_usart_console_setup, 1877 .flags = CON_PRINTBUFFER, 1878 .index = -1, 1879 .data = &stm32_usart_driver, 1880 }; 1881 1882 #define STM32_SERIAL_CONSOLE (&stm32_console) 1883 1884 #else 1885 #define STM32_SERIAL_CONSOLE NULL 1886 #endif /* CONFIG_SERIAL_STM32_CONSOLE */ 1887 1888 #ifdef CONFIG_SERIAL_EARLYCON 1889 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1890 { 1891 struct stm32_usart_info *info = port->private_data; 1892 1893 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE)) 1894 cpu_relax(); 1895 1896 writel_relaxed(ch, port->membase + info->ofs.tdr); 1897 } 1898 1899 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count) 1900 { 1901 struct earlycon_device *device = console->data; 1902 struct uart_port *port = &device->port; 1903 1904 uart_console_write(port, s, count, early_stm32_usart_console_putchar); 1905 } 1906 1907 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options) 1908 { 1909 if (!(device->port.membase || device->port.iobase)) 1910 return -ENODEV; 1911 device->port.private_data = &stm32h7_info; 1912 device->con->write = early_stm32_serial_write; 1913 return 0; 1914 } 1915 1916 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options) 1917 { 1918 if (!(device->port.membase || device->port.iobase)) 1919 return -ENODEV; 1920 device->port.private_data = &stm32f7_info; 1921 device->con->write = early_stm32_serial_write; 1922 return 0; 1923 } 1924 1925 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options) 1926 { 1927 if (!(device->port.membase || device->port.iobase)) 1928 return -ENODEV; 1929 device->port.private_data = &stm32f4_info; 1930 device->con->write = early_stm32_serial_write; 1931 return 0; 1932 } 1933 1934 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup); 1935 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup); 1936 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup); 1937 #endif /* CONFIG_SERIAL_EARLYCON */ 1938 1939 static struct uart_driver stm32_usart_driver = { 1940 .driver_name = DRIVER_NAME, 1941 .dev_name = STM32_SERIAL_NAME, 1942 .major = 0, 1943 .minor = 0, 1944 .nr = STM32_MAX_PORTS, 1945 .cons = STM32_SERIAL_CONSOLE, 1946 }; 1947 1948 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, 1949 bool enable) 1950 { 1951 struct stm32_port *stm32_port = to_stm32_port(port); 1952 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1953 struct tty_port *tport = &port->state->port; 1954 int ret; 1955 unsigned int size; 1956 unsigned long flags; 1957 1958 if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) 1959 return 0; 1960 1961 /* 1962 * Enable low-power wake-up and wake-up irq if argument is set to 1963 * "enable", disable low-power wake-up and wake-up irq otherwise 1964 */ 1965 if (enable) { 1966 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM); 1967 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE); 1968 mctrl_gpio_enable_irq_wake(stm32_port->gpios); 1969 1970 /* 1971 * When DMA is used for reception, it must be disabled before 1972 * entering low-power mode and re-enabled when exiting from 1973 * low-power mode. 1974 */ 1975 if (stm32_port->rx_ch) { 1976 spin_lock_irqsave(&port->lock, flags); 1977 /* Avoid race with RX IRQ when DMAR is cleared */ 1978 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 1979 /* Poll data from DMA RX buffer if any */ 1980 size = stm32_usart_receive_chars(port, true); 1981 dmaengine_terminate_async(stm32_port->rx_ch); 1982 uart_unlock_and_check_sysrq_irqrestore(port, flags); 1983 if (size) 1984 tty_flip_buffer_push(tport); 1985 } 1986 1987 /* Poll data from RX FIFO if any */ 1988 stm32_usart_receive_chars(port, false); 1989 } else { 1990 if (stm32_port->rx_ch) { 1991 ret = stm32_usart_start_rx_dma_cyclic(port); 1992 if (ret) 1993 return ret; 1994 } 1995 mctrl_gpio_disable_irq_wake(stm32_port->gpios); 1996 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM); 1997 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 1998 } 1999 2000 return 0; 2001 } 2002 2003 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) 2004 { 2005 struct uart_port *port = dev_get_drvdata(dev); 2006 int ret; 2007 2008 uart_suspend_port(&stm32_usart_driver, port); 2009 2010 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2011 ret = stm32_usart_serial_en_wakeup(port, true); 2012 if (ret) 2013 return ret; 2014 } 2015 2016 /* 2017 * When "no_console_suspend" is enabled, keep the pinctrl default state 2018 * and rely on bootloader stage to restore this state upon resume. 2019 * Otherwise, apply the idle or sleep states depending on wakeup 2020 * capabilities. 2021 */ 2022 if (console_suspend_enabled || !uart_console(port)) { 2023 if (device_may_wakeup(dev) || device_wakeup_path(dev)) 2024 pinctrl_pm_select_idle_state(dev); 2025 else 2026 pinctrl_pm_select_sleep_state(dev); 2027 } 2028 2029 return 0; 2030 } 2031 2032 static int __maybe_unused stm32_usart_serial_resume(struct device *dev) 2033 { 2034 struct uart_port *port = dev_get_drvdata(dev); 2035 int ret; 2036 2037 pinctrl_pm_select_default_state(dev); 2038 2039 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2040 ret = stm32_usart_serial_en_wakeup(port, false); 2041 if (ret) 2042 return ret; 2043 } 2044 2045 return uart_resume_port(&stm32_usart_driver, port); 2046 } 2047 2048 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev) 2049 { 2050 struct uart_port *port = dev_get_drvdata(dev); 2051 struct stm32_port *stm32port = container_of(port, 2052 struct stm32_port, port); 2053 2054 clk_disable_unprepare(stm32port->clk); 2055 2056 return 0; 2057 } 2058 2059 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev) 2060 { 2061 struct uart_port *port = dev_get_drvdata(dev); 2062 struct stm32_port *stm32port = container_of(port, 2063 struct stm32_port, port); 2064 2065 return clk_prepare_enable(stm32port->clk); 2066 } 2067 2068 static const struct dev_pm_ops stm32_serial_pm_ops = { 2069 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend, 2070 stm32_usart_runtime_resume, NULL) 2071 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend, 2072 stm32_usart_serial_resume) 2073 }; 2074 2075 static struct platform_driver stm32_serial_driver = { 2076 .probe = stm32_usart_serial_probe, 2077 .remove = stm32_usart_serial_remove, 2078 .driver = { 2079 .name = DRIVER_NAME, 2080 .pm = &stm32_serial_pm_ops, 2081 .of_match_table = of_match_ptr(stm32_match), 2082 }, 2083 }; 2084 2085 static int __init stm32_usart_init(void) 2086 { 2087 static char banner[] __initdata = "STM32 USART driver initialized"; 2088 int ret; 2089 2090 pr_info("%s\n", banner); 2091 2092 ret = uart_register_driver(&stm32_usart_driver); 2093 if (ret) 2094 return ret; 2095 2096 ret = platform_driver_register(&stm32_serial_driver); 2097 if (ret) 2098 uart_unregister_driver(&stm32_usart_driver); 2099 2100 return ret; 2101 } 2102 2103 static void __exit stm32_usart_exit(void) 2104 { 2105 platform_driver_unregister(&stm32_serial_driver); 2106 uart_unregister_driver(&stm32_usart_driver); 2107 } 2108 2109 module_init(stm32_usart_init); 2110 module_exit(stm32_usart_exit); 2111 2112 MODULE_ALIAS("platform:" DRIVER_NAME); 2113 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver"); 2114 MODULE_LICENSE("GPL v2"); 2115