1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Maxime Coquelin 2015 4 * Copyright (C) STMicroelectronics SA 2017 5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com> 6 * Gerald Baeza <gerald.baeza@foss.st.com> 7 * Erwan Le Ray <erwan.leray@foss.st.com> 8 * 9 * Inspired by st-asc.c from STMicroelectronics (c) 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/console.h> 14 #include <linux/delay.h> 15 #include <linux/dma-direction.h> 16 #include <linux/dmaengine.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/irq.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_platform.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/serial_core.h> 29 #include <linux/serial.h> 30 #include <linux/spinlock.h> 31 #include <linux/sysrq.h> 32 #include <linux/tty_flip.h> 33 #include <linux/tty.h> 34 35 #include "serial_mctrl_gpio.h" 36 #include "stm32-usart.h" 37 38 39 /* Register offsets */ 40 static struct stm32_usart_info __maybe_unused stm32f4_info = { 41 .ofs = { 42 .isr = 0x00, 43 .rdr = 0x04, 44 .tdr = 0x04, 45 .brr = 0x08, 46 .cr1 = 0x0c, 47 .cr2 = 0x10, 48 .cr3 = 0x14, 49 .gtpr = 0x18, 50 .rtor = UNDEF_REG, 51 .rqr = UNDEF_REG, 52 .icr = UNDEF_REG, 53 }, 54 .cfg = { 55 .uart_enable_bit = 13, 56 .has_7bits_data = false, 57 .fifosize = 1, 58 } 59 }; 60 61 static struct stm32_usart_info __maybe_unused stm32f7_info = { 62 .ofs = { 63 .cr1 = 0x00, 64 .cr2 = 0x04, 65 .cr3 = 0x08, 66 .brr = 0x0c, 67 .gtpr = 0x10, 68 .rtor = 0x14, 69 .rqr = 0x18, 70 .isr = 0x1c, 71 .icr = 0x20, 72 .rdr = 0x24, 73 .tdr = 0x28, 74 }, 75 .cfg = { 76 .uart_enable_bit = 0, 77 .has_7bits_data = true, 78 .has_swap = true, 79 .fifosize = 1, 80 } 81 }; 82 83 static struct stm32_usart_info __maybe_unused stm32h7_info = { 84 .ofs = { 85 .cr1 = 0x00, 86 .cr2 = 0x04, 87 .cr3 = 0x08, 88 .brr = 0x0c, 89 .gtpr = 0x10, 90 .rtor = 0x14, 91 .rqr = 0x18, 92 .isr = 0x1c, 93 .icr = 0x20, 94 .rdr = 0x24, 95 .tdr = 0x28, 96 }, 97 .cfg = { 98 .uart_enable_bit = 0, 99 .has_7bits_data = true, 100 .has_swap = true, 101 .has_wakeup = true, 102 .has_fifo = true, 103 .fifosize = 16, 104 } 105 }; 106 107 static void stm32_usart_stop_tx(struct uart_port *port); 108 static void stm32_usart_transmit_chars(struct uart_port *port); 109 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch); 110 111 static inline struct stm32_port *to_stm32_port(struct uart_port *port) 112 { 113 return container_of(port, struct stm32_port, port); 114 } 115 116 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits) 117 { 118 u32 val; 119 120 val = readl_relaxed(port->membase + reg); 121 val |= bits; 122 writel_relaxed(val, port->membase + reg); 123 } 124 125 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits) 126 { 127 u32 val; 128 129 val = readl_relaxed(port->membase + reg); 130 val &= ~bits; 131 writel_relaxed(val, port->membase + reg); 132 } 133 134 static unsigned int stm32_usart_tx_empty(struct uart_port *port) 135 { 136 struct stm32_port *stm32_port = to_stm32_port(port); 137 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 138 139 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) 140 return TIOCSER_TEMT; 141 142 return 0; 143 } 144 145 static void stm32_usart_rs485_rts_enable(struct uart_port *port) 146 { 147 struct stm32_port *stm32_port = to_stm32_port(port); 148 struct serial_rs485 *rs485conf = &port->rs485; 149 150 if (stm32_port->hw_flow_control || 151 !(rs485conf->flags & SER_RS485_ENABLED)) 152 return; 153 154 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 155 mctrl_gpio_set(stm32_port->gpios, 156 stm32_port->port.mctrl | TIOCM_RTS); 157 } else { 158 mctrl_gpio_set(stm32_port->gpios, 159 stm32_port->port.mctrl & ~TIOCM_RTS); 160 } 161 } 162 163 static void stm32_usart_rs485_rts_disable(struct uart_port *port) 164 { 165 struct stm32_port *stm32_port = to_stm32_port(port); 166 struct serial_rs485 *rs485conf = &port->rs485; 167 168 if (stm32_port->hw_flow_control || 169 !(rs485conf->flags & SER_RS485_ENABLED)) 170 return; 171 172 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 173 mctrl_gpio_set(stm32_port->gpios, 174 stm32_port->port.mctrl & ~TIOCM_RTS); 175 } else { 176 mctrl_gpio_set(stm32_port->gpios, 177 stm32_port->port.mctrl | TIOCM_RTS); 178 } 179 } 180 181 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, 182 u32 delay_DDE, u32 baud) 183 { 184 u32 rs485_deat_dedt; 185 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT); 186 bool over8; 187 188 *cr3 |= USART_CR3_DEM; 189 over8 = *cr1 & USART_CR1_OVER8; 190 191 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 192 193 if (over8) 194 rs485_deat_dedt = delay_ADE * baud * 8; 195 else 196 rs485_deat_dedt = delay_ADE * baud * 16; 197 198 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 199 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 200 rs485_deat_dedt_max : rs485_deat_dedt; 201 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) & 202 USART_CR1_DEAT_MASK; 203 *cr1 |= rs485_deat_dedt; 204 205 if (over8) 206 rs485_deat_dedt = delay_DDE * baud * 8; 207 else 208 rs485_deat_dedt = delay_DDE * baud * 16; 209 210 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 211 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 212 rs485_deat_dedt_max : rs485_deat_dedt; 213 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) & 214 USART_CR1_DEDT_MASK; 215 *cr1 |= rs485_deat_dedt; 216 } 217 218 static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios, 219 struct serial_rs485 *rs485conf) 220 { 221 struct stm32_port *stm32_port = to_stm32_port(port); 222 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 223 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 224 u32 usartdiv, baud, cr1, cr3; 225 bool over8; 226 227 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 228 229 if (port->rs485_rx_during_tx_gpio) 230 gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio, 231 !!(rs485conf->flags & SER_RS485_RX_DURING_TX)); 232 else 233 rs485conf->flags |= SER_RS485_RX_DURING_TX; 234 235 if (rs485conf->flags & SER_RS485_ENABLED) { 236 cr1 = readl_relaxed(port->membase + ofs->cr1); 237 cr3 = readl_relaxed(port->membase + ofs->cr3); 238 usartdiv = readl_relaxed(port->membase + ofs->brr); 239 usartdiv = usartdiv & GENMASK(15, 0); 240 over8 = cr1 & USART_CR1_OVER8; 241 242 if (over8) 243 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0)) 244 << USART_BRR_04_R_SHIFT; 245 246 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv); 247 stm32_usart_config_reg_rs485(&cr1, &cr3, 248 rs485conf->delay_rts_before_send, 249 rs485conf->delay_rts_after_send, 250 baud); 251 252 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) 253 cr3 &= ~USART_CR3_DEP; 254 else 255 cr3 |= USART_CR3_DEP; 256 257 writel_relaxed(cr3, port->membase + ofs->cr3); 258 writel_relaxed(cr1, port->membase + ofs->cr1); 259 } else { 260 stm32_usart_clr_bits(port, ofs->cr3, 261 USART_CR3_DEM | USART_CR3_DEP); 262 stm32_usart_clr_bits(port, ofs->cr1, 263 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 264 } 265 266 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 267 268 /* Adjust RTS polarity in case it's driven in software */ 269 if (stm32_usart_tx_empty(port)) 270 stm32_usart_rs485_rts_disable(port); 271 else 272 stm32_usart_rs485_rts_enable(port); 273 274 return 0; 275 } 276 277 static int stm32_usart_init_rs485(struct uart_port *port, 278 struct platform_device *pdev) 279 { 280 struct serial_rs485 *rs485conf = &port->rs485; 281 282 rs485conf->flags = 0; 283 rs485conf->delay_rts_before_send = 0; 284 rs485conf->delay_rts_after_send = 0; 285 286 if (!pdev->dev.of_node) 287 return -ENODEV; 288 289 return uart_get_rs485_mode(port); 290 } 291 292 static bool stm32_usart_rx_dma_enabled(struct uart_port *port) 293 { 294 struct stm32_port *stm32_port = to_stm32_port(port); 295 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 296 297 if (!stm32_port->rx_ch) 298 return false; 299 300 return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR); 301 } 302 303 /* Return true when data is pending (in pio mode), and false when no data is pending. */ 304 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr) 305 { 306 struct stm32_port *stm32_port = to_stm32_port(port); 307 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 308 309 *sr = readl_relaxed(port->membase + ofs->isr); 310 /* Get pending characters in RDR or FIFO */ 311 if (*sr & USART_SR_RXNE) { 312 /* Get all pending characters from the RDR or the FIFO when using interrupts */ 313 if (!stm32_usart_rx_dma_enabled(port)) 314 return true; 315 316 /* Handle only RX data errors when using DMA */ 317 if (*sr & USART_SR_ERR_MASK) 318 return true; 319 } 320 321 return false; 322 } 323 324 static u8 stm32_usart_get_char_pio(struct uart_port *port) 325 { 326 struct stm32_port *stm32_port = to_stm32_port(port); 327 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 328 unsigned long c; 329 330 c = readl_relaxed(port->membase + ofs->rdr); 331 /* Apply RDR data mask */ 332 c &= stm32_port->rdr_mask; 333 334 return c; 335 } 336 337 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port) 338 { 339 struct stm32_port *stm32_port = to_stm32_port(port); 340 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 341 unsigned int size = 0; 342 u32 sr; 343 u8 c, flag; 344 345 while (stm32_usart_pending_rx_pio(port, &sr)) { 346 sr |= USART_SR_DUMMY_RX; 347 flag = TTY_NORMAL; 348 349 /* 350 * Status bits has to be cleared before reading the RDR: 351 * In FIFO mode, reading the RDR will pop the next data 352 * (if any) along with its status bits into the SR. 353 * Not doing so leads to misalignement between RDR and SR, 354 * and clear status bits of the next rx data. 355 * 356 * Clear errors flags for stm32f7 and stm32h7 compatible 357 * devices. On stm32f4 compatible devices, the error bit is 358 * cleared by the sequence [read SR - read DR]. 359 */ 360 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG) 361 writel_relaxed(sr & USART_SR_ERR_MASK, 362 port->membase + ofs->icr); 363 364 c = stm32_usart_get_char_pio(port); 365 port->icount.rx++; 366 size++; 367 if (sr & USART_SR_ERR_MASK) { 368 if (sr & USART_SR_ORE) { 369 port->icount.overrun++; 370 } else if (sr & USART_SR_PE) { 371 port->icount.parity++; 372 } else if (sr & USART_SR_FE) { 373 /* Break detection if character is null */ 374 if (!c) { 375 port->icount.brk++; 376 if (uart_handle_break(port)) 377 continue; 378 } else { 379 port->icount.frame++; 380 } 381 } 382 383 sr &= port->read_status_mask; 384 385 if (sr & USART_SR_PE) { 386 flag = TTY_PARITY; 387 } else if (sr & USART_SR_FE) { 388 if (!c) 389 flag = TTY_BREAK; 390 else 391 flag = TTY_FRAME; 392 } 393 } 394 395 if (uart_prepare_sysrq_char(port, c)) 396 continue; 397 uart_insert_char(port, sr, USART_SR_ORE, c, flag); 398 } 399 400 return size; 401 } 402 403 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size) 404 { 405 struct stm32_port *stm32_port = to_stm32_port(port); 406 struct tty_port *ttyport = &stm32_port->port.state->port; 407 unsigned char *dma_start; 408 int dma_count, i; 409 410 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res); 411 412 /* 413 * Apply rdr_mask on buffer in order to mask parity bit. 414 * This loop is useless in cs8 mode because DMA copies only 415 * 8 bits and already ignores parity bit. 416 */ 417 if (!(stm32_port->rdr_mask == (BIT(8) - 1))) 418 for (i = 0; i < dma_size; i++) 419 *(dma_start + i) &= stm32_port->rdr_mask; 420 421 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size); 422 port->icount.rx += dma_count; 423 if (dma_count != dma_size) 424 port->icount.buf_overrun++; 425 stm32_port->last_res -= dma_count; 426 if (stm32_port->last_res == 0) 427 stm32_port->last_res = RX_BUF_L; 428 } 429 430 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port) 431 { 432 struct stm32_port *stm32_port = to_stm32_port(port); 433 unsigned int dma_size, size = 0; 434 435 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */ 436 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { 437 /* Conditional first part: from last_res to end of DMA buffer */ 438 dma_size = stm32_port->last_res; 439 stm32_usart_push_buffer_dma(port, dma_size); 440 size = dma_size; 441 } 442 443 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue; 444 stm32_usart_push_buffer_dma(port, dma_size); 445 size += dma_size; 446 447 return size; 448 } 449 450 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush) 451 { 452 struct stm32_port *stm32_port = to_stm32_port(port); 453 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 454 enum dma_status rx_dma_status; 455 u32 sr; 456 unsigned int size = 0; 457 458 if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) { 459 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 460 stm32_port->rx_ch->cookie, 461 &stm32_port->rx_dma_state); 462 if (rx_dma_status == DMA_IN_PROGRESS) { 463 /* Empty DMA buffer */ 464 size = stm32_usart_receive_chars_dma(port); 465 sr = readl_relaxed(port->membase + ofs->isr); 466 if (sr & USART_SR_ERR_MASK) { 467 /* Disable DMA request line */ 468 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 469 470 /* Switch to PIO mode to handle the errors */ 471 size += stm32_usart_receive_chars_pio(port); 472 473 /* Switch back to DMA mode */ 474 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 475 } 476 } else { 477 /* Disable RX DMA */ 478 dmaengine_terminate_async(stm32_port->rx_ch); 479 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 480 /* Fall back to interrupt mode */ 481 dev_dbg(port->dev, "DMA error, fallback to irq mode\n"); 482 size = stm32_usart_receive_chars_pio(port); 483 } 484 } else { 485 size = stm32_usart_receive_chars_pio(port); 486 } 487 488 return size; 489 } 490 491 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port) 492 { 493 dmaengine_terminate_async(stm32_port->tx_ch); 494 stm32_port->tx_dma_busy = false; 495 } 496 497 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port) 498 { 499 /* 500 * We cannot use the function "dmaengine_tx_status" to know the 501 * status of DMA. This function does not show if the "dma complete" 502 * callback of the DMA transaction has been called. So we prefer 503 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the 504 * same time. 505 */ 506 return stm32_port->tx_dma_busy; 507 } 508 509 static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port) 510 { 511 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 512 513 return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT); 514 } 515 516 static void stm32_usart_tx_dma_complete(void *arg) 517 { 518 struct uart_port *port = arg; 519 struct stm32_port *stm32port = to_stm32_port(port); 520 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 521 unsigned long flags; 522 523 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 524 stm32_usart_tx_dma_terminate(stm32port); 525 526 /* Let's see if we have pending data to send */ 527 spin_lock_irqsave(&port->lock, flags); 528 stm32_usart_transmit_chars(port); 529 spin_unlock_irqrestore(&port->lock, flags); 530 } 531 532 static void stm32_usart_tx_interrupt_enable(struct uart_port *port) 533 { 534 struct stm32_port *stm32_port = to_stm32_port(port); 535 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 536 537 /* 538 * Enables TX FIFO threashold irq when FIFO is enabled, 539 * or TX empty irq when FIFO is disabled 540 */ 541 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 542 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE); 543 else 544 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE); 545 } 546 547 static void stm32_usart_tc_interrupt_enable(struct uart_port *port) 548 { 549 struct stm32_port *stm32_port = to_stm32_port(port); 550 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 551 552 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE); 553 } 554 555 static void stm32_usart_rx_dma_complete(void *arg) 556 { 557 struct uart_port *port = arg; 558 struct tty_port *tport = &port->state->port; 559 unsigned int size; 560 unsigned long flags; 561 562 spin_lock_irqsave(&port->lock, flags); 563 size = stm32_usart_receive_chars(port, false); 564 uart_unlock_and_check_sysrq_irqrestore(port, flags); 565 if (size) 566 tty_flip_buffer_push(tport); 567 } 568 569 static void stm32_usart_tx_interrupt_disable(struct uart_port *port) 570 { 571 struct stm32_port *stm32_port = to_stm32_port(port); 572 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 573 574 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 575 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE); 576 else 577 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE); 578 } 579 580 static void stm32_usart_tc_interrupt_disable(struct uart_port *port) 581 { 582 struct stm32_port *stm32_port = to_stm32_port(port); 583 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 584 585 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE); 586 } 587 588 static void stm32_usart_transmit_chars_pio(struct uart_port *port) 589 { 590 struct stm32_port *stm32_port = to_stm32_port(port); 591 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 592 struct circ_buf *xmit = &port->state->xmit; 593 594 if (stm32_usart_tx_dma_enabled(stm32_port)) 595 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 596 597 while (!uart_circ_empty(xmit)) { 598 /* Check that TDR is empty before filling FIFO */ 599 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) 600 break; 601 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); 602 uart_xmit_advance(port, 1); 603 } 604 605 /* rely on TXE irq (mask or unmask) for sending remaining data */ 606 if (uart_circ_empty(xmit)) 607 stm32_usart_tx_interrupt_disable(port); 608 else 609 stm32_usart_tx_interrupt_enable(port); 610 } 611 612 static void stm32_usart_transmit_chars_dma(struct uart_port *port) 613 { 614 struct stm32_port *stm32port = to_stm32_port(port); 615 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 616 struct circ_buf *xmit = &port->state->xmit; 617 struct dma_async_tx_descriptor *desc = NULL; 618 unsigned int count; 619 620 if (stm32_usart_tx_dma_started(stm32port)) { 621 if (!stm32_usart_tx_dma_enabled(stm32port)) 622 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 623 return; 624 } 625 626 count = uart_circ_chars_pending(xmit); 627 628 if (count > TX_BUF_L) 629 count = TX_BUF_L; 630 631 if (xmit->tail < xmit->head) { 632 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count); 633 } else { 634 size_t one = UART_XMIT_SIZE - xmit->tail; 635 size_t two; 636 637 if (one > count) 638 one = count; 639 two = count - one; 640 641 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one); 642 if (two) 643 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two); 644 } 645 646 desc = dmaengine_prep_slave_single(stm32port->tx_ch, 647 stm32port->tx_dma_buf, 648 count, 649 DMA_MEM_TO_DEV, 650 DMA_PREP_INTERRUPT); 651 652 if (!desc) 653 goto fallback_err; 654 655 /* 656 * Set "tx_dma_busy" flag. This flag will be released when 657 * dmaengine_terminate_async will be called. This flag helps 658 * transmit_chars_dma not to start another DMA transaction 659 * if the callback of the previous is not yet called. 660 */ 661 stm32port->tx_dma_busy = true; 662 663 desc->callback = stm32_usart_tx_dma_complete; 664 desc->callback_param = port; 665 666 /* Push current DMA TX transaction in the pending queue */ 667 if (dma_submit_error(dmaengine_submit(desc))) { 668 /* dma no yet started, safe to free resources */ 669 stm32_usart_tx_dma_terminate(stm32port); 670 goto fallback_err; 671 } 672 673 /* Issue pending DMA TX requests */ 674 dma_async_issue_pending(stm32port->tx_ch); 675 676 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 677 678 uart_xmit_advance(port, count); 679 680 return; 681 682 fallback_err: 683 stm32_usart_transmit_chars_pio(port); 684 } 685 686 static void stm32_usart_transmit_chars(struct uart_port *port) 687 { 688 struct stm32_port *stm32_port = to_stm32_port(port); 689 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 690 struct circ_buf *xmit = &port->state->xmit; 691 u32 isr; 692 int ret; 693 694 if (!stm32_port->hw_flow_control && 695 port->rs485.flags & SER_RS485_ENABLED && 696 (port->x_char || 697 !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) { 698 stm32_usart_tc_interrupt_disable(port); 699 stm32_usart_rs485_rts_enable(port); 700 } 701 702 if (port->x_char) { 703 if (stm32_usart_tx_dma_started(stm32_port) && 704 stm32_usart_tx_dma_enabled(stm32_port)) 705 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 706 707 /* Check that TDR is empty before filling FIFO */ 708 ret = 709 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 710 isr, 711 (isr & USART_SR_TXE), 712 10, 1000); 713 if (ret) 714 dev_warn(port->dev, "1 character may be erased\n"); 715 716 writel_relaxed(port->x_char, port->membase + ofs->tdr); 717 port->x_char = 0; 718 port->icount.tx++; 719 if (stm32_usart_tx_dma_started(stm32_port)) 720 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 721 return; 722 } 723 724 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 725 stm32_usart_tx_interrupt_disable(port); 726 return; 727 } 728 729 if (ofs->icr == UNDEF_REG) 730 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC); 731 else 732 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr); 733 734 if (stm32_port->tx_ch) 735 stm32_usart_transmit_chars_dma(port); 736 else 737 stm32_usart_transmit_chars_pio(port); 738 739 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 740 uart_write_wakeup(port); 741 742 if (uart_circ_empty(xmit)) { 743 stm32_usart_tx_interrupt_disable(port); 744 if (!stm32_port->hw_flow_control && 745 port->rs485.flags & SER_RS485_ENABLED) { 746 stm32_usart_tc_interrupt_enable(port); 747 } 748 } 749 } 750 751 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) 752 { 753 struct uart_port *port = ptr; 754 struct tty_port *tport = &port->state->port; 755 struct stm32_port *stm32_port = to_stm32_port(port); 756 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 757 u32 sr; 758 unsigned int size; 759 760 sr = readl_relaxed(port->membase + ofs->isr); 761 762 if (!stm32_port->hw_flow_control && 763 port->rs485.flags & SER_RS485_ENABLED && 764 (sr & USART_SR_TC)) { 765 stm32_usart_tc_interrupt_disable(port); 766 stm32_usart_rs485_rts_disable(port); 767 } 768 769 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) 770 writel_relaxed(USART_ICR_RTOCF, 771 port->membase + ofs->icr); 772 773 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) { 774 /* Clear wake up flag and disable wake up interrupt */ 775 writel_relaxed(USART_ICR_WUCF, 776 port->membase + ofs->icr); 777 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 778 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) 779 pm_wakeup_event(tport->tty->dev, 0); 780 } 781 782 /* 783 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request 784 * line has been masked by HW and rx data are stacking in FIFO. 785 */ 786 if (!stm32_port->throttled) { 787 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) || 788 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) { 789 spin_lock(&port->lock); 790 size = stm32_usart_receive_chars(port, false); 791 uart_unlock_and_check_sysrq(port); 792 if (size) 793 tty_flip_buffer_push(tport); 794 } 795 } 796 797 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) { 798 spin_lock(&port->lock); 799 stm32_usart_transmit_chars(port); 800 spin_unlock(&port->lock); 801 } 802 803 /* Receiver timeout irq for DMA RX */ 804 if (stm32_usart_rx_dma_enabled(port) && !stm32_port->throttled) { 805 spin_lock(&port->lock); 806 size = stm32_usart_receive_chars(port, false); 807 uart_unlock_and_check_sysrq(port); 808 if (size) 809 tty_flip_buffer_push(tport); 810 } 811 812 return IRQ_HANDLED; 813 } 814 815 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl) 816 { 817 struct stm32_port *stm32_port = to_stm32_port(port); 818 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 819 820 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) 821 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE); 822 else 823 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE); 824 825 mctrl_gpio_set(stm32_port->gpios, mctrl); 826 } 827 828 static unsigned int stm32_usart_get_mctrl(struct uart_port *port) 829 { 830 struct stm32_port *stm32_port = to_stm32_port(port); 831 unsigned int ret; 832 833 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */ 834 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 835 836 return mctrl_gpio_get(stm32_port->gpios, &ret); 837 } 838 839 static void stm32_usart_enable_ms(struct uart_port *port) 840 { 841 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios); 842 } 843 844 static void stm32_usart_disable_ms(struct uart_port *port) 845 { 846 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); 847 } 848 849 /* Transmit stop */ 850 static void stm32_usart_stop_tx(struct uart_port *port) 851 { 852 struct stm32_port *stm32_port = to_stm32_port(port); 853 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 854 855 stm32_usart_tx_interrupt_disable(port); 856 if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port)) 857 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 858 859 stm32_usart_rs485_rts_disable(port); 860 } 861 862 /* There are probably characters waiting to be transmitted. */ 863 static void stm32_usart_start_tx(struct uart_port *port) 864 { 865 struct circ_buf *xmit = &port->state->xmit; 866 867 if (uart_circ_empty(xmit) && !port->x_char) { 868 stm32_usart_rs485_rts_disable(port); 869 return; 870 } 871 872 stm32_usart_rs485_rts_enable(port); 873 874 stm32_usart_transmit_chars(port); 875 } 876 877 /* Flush the transmit buffer. */ 878 static void stm32_usart_flush_buffer(struct uart_port *port) 879 { 880 struct stm32_port *stm32_port = to_stm32_port(port); 881 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 882 883 if (stm32_port->tx_ch) { 884 stm32_usart_tx_dma_terminate(stm32_port); 885 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 886 } 887 } 888 889 /* Throttle the remote when input buffer is about to overflow. */ 890 static void stm32_usart_throttle(struct uart_port *port) 891 { 892 struct stm32_port *stm32_port = to_stm32_port(port); 893 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 894 unsigned long flags; 895 896 spin_lock_irqsave(&port->lock, flags); 897 898 /* 899 * Disable DMA request line if enabled, so the RX data gets queued into the FIFO. 900 * Hardware flow control is triggered when RX FIFO is full. 901 */ 902 if (stm32_usart_rx_dma_enabled(port)) 903 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 904 905 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 906 if (stm32_port->cr3_irq) 907 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 908 909 stm32_port->throttled = true; 910 spin_unlock_irqrestore(&port->lock, flags); 911 } 912 913 /* Unthrottle the remote, the input buffer can now accept data. */ 914 static void stm32_usart_unthrottle(struct uart_port *port) 915 { 916 struct stm32_port *stm32_port = to_stm32_port(port); 917 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 918 unsigned long flags; 919 920 spin_lock_irqsave(&port->lock, flags); 921 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq); 922 if (stm32_port->cr3_irq) 923 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq); 924 925 /* 926 * Switch back to DMA mode (re-enable DMA request line). 927 * Hardware flow control is stopped when FIFO is not full any more. 928 */ 929 if (stm32_port->rx_ch) 930 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 931 932 stm32_port->throttled = false; 933 spin_unlock_irqrestore(&port->lock, flags); 934 } 935 936 /* Receive stop */ 937 static void stm32_usart_stop_rx(struct uart_port *port) 938 { 939 struct stm32_port *stm32_port = to_stm32_port(port); 940 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 941 942 /* Disable DMA request line. */ 943 if (stm32_port->rx_ch) 944 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 945 946 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 947 if (stm32_port->cr3_irq) 948 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 949 } 950 951 /* Handle breaks - ignored by us */ 952 static void stm32_usart_break_ctl(struct uart_port *port, int break_state) 953 { 954 } 955 956 static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port) 957 { 958 struct stm32_port *stm32_port = to_stm32_port(port); 959 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 960 struct dma_async_tx_descriptor *desc; 961 int ret; 962 963 stm32_port->last_res = RX_BUF_L; 964 /* Prepare a DMA cyclic transaction */ 965 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch, 966 stm32_port->rx_dma_buf, 967 RX_BUF_L, RX_BUF_P, 968 DMA_DEV_TO_MEM, 969 DMA_PREP_INTERRUPT); 970 if (!desc) { 971 dev_err(port->dev, "rx dma prep cyclic failed\n"); 972 return -ENODEV; 973 } 974 975 desc->callback = stm32_usart_rx_dma_complete; 976 desc->callback_param = port; 977 978 /* Push current DMA transaction in the pending queue */ 979 ret = dma_submit_error(dmaengine_submit(desc)); 980 if (ret) { 981 dmaengine_terminate_sync(stm32_port->rx_ch); 982 return ret; 983 } 984 985 /* Issue pending DMA requests */ 986 dma_async_issue_pending(stm32_port->rx_ch); 987 988 /* 989 * DMA request line not re-enabled at resume when port is throttled. 990 * It will be re-enabled by unthrottle ops. 991 */ 992 if (!stm32_port->throttled) 993 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 994 995 return 0; 996 } 997 998 static int stm32_usart_startup(struct uart_port *port) 999 { 1000 struct stm32_port *stm32_port = to_stm32_port(port); 1001 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1002 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1003 const char *name = to_platform_device(port->dev)->name; 1004 u32 val; 1005 int ret; 1006 1007 ret = request_irq(port->irq, stm32_usart_interrupt, 1008 IRQF_NO_SUSPEND, name, port); 1009 if (ret) 1010 return ret; 1011 1012 if (stm32_port->swap) { 1013 val = readl_relaxed(port->membase + ofs->cr2); 1014 val |= USART_CR2_SWAP; 1015 writel_relaxed(val, port->membase + ofs->cr2); 1016 } 1017 1018 /* RX FIFO Flush */ 1019 if (ofs->rqr != UNDEF_REG) 1020 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr); 1021 1022 if (stm32_port->rx_ch) { 1023 ret = stm32_usart_start_rx_dma_cyclic(port); 1024 if (ret) { 1025 free_irq(port->irq, port); 1026 return ret; 1027 } 1028 } 1029 1030 /* RX enabling */ 1031 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit); 1032 stm32_usart_set_bits(port, ofs->cr1, val); 1033 1034 return 0; 1035 } 1036 1037 static void stm32_usart_shutdown(struct uart_port *port) 1038 { 1039 struct stm32_port *stm32_port = to_stm32_port(port); 1040 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1041 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1042 u32 val, isr; 1043 int ret; 1044 1045 if (stm32_usart_tx_dma_enabled(stm32_port)) 1046 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1047 1048 if (stm32_usart_tx_dma_started(stm32_port)) 1049 stm32_usart_tx_dma_terminate(stm32_port); 1050 1051 /* Disable modem control interrupts */ 1052 stm32_usart_disable_ms(port); 1053 1054 val = USART_CR1_TXEIE | USART_CR1_TE; 1055 val |= stm32_port->cr1_irq | USART_CR1_RE; 1056 val |= BIT(cfg->uart_enable_bit); 1057 if (stm32_port->fifoen) 1058 val |= USART_CR1_FIFOEN; 1059 1060 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr, 1061 isr, (isr & USART_SR_TC), 1062 10, 100000); 1063 1064 /* Send the TC error message only when ISR_TC is not set */ 1065 if (ret) 1066 dev_err(port->dev, "Transmission is not complete\n"); 1067 1068 /* Disable RX DMA. */ 1069 if (stm32_port->rx_ch) 1070 dmaengine_terminate_async(stm32_port->rx_ch); 1071 1072 /* flush RX & TX FIFO */ 1073 if (ofs->rqr != UNDEF_REG) 1074 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1075 port->membase + ofs->rqr); 1076 1077 stm32_usart_clr_bits(port, ofs->cr1, val); 1078 1079 free_irq(port->irq, port); 1080 } 1081 1082 static void stm32_usart_set_termios(struct uart_port *port, 1083 struct ktermios *termios, 1084 const struct ktermios *old) 1085 { 1086 struct stm32_port *stm32_port = to_stm32_port(port); 1087 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1088 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1089 struct serial_rs485 *rs485conf = &port->rs485; 1090 unsigned int baud, bits; 1091 u32 usartdiv, mantissa, fraction, oversampling; 1092 tcflag_t cflag = termios->c_cflag; 1093 u32 cr1, cr2, cr3, isr; 1094 unsigned long flags; 1095 int ret; 1096 1097 if (!stm32_port->hw_flow_control) 1098 cflag &= ~CRTSCTS; 1099 1100 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8); 1101 1102 spin_lock_irqsave(&port->lock, flags); 1103 1104 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 1105 isr, 1106 (isr & USART_SR_TC), 1107 10, 100000); 1108 1109 /* Send the TC error message only when ISR_TC is not set. */ 1110 if (ret) 1111 dev_err(port->dev, "Transmission is not complete\n"); 1112 1113 /* Stop serial port and reset value */ 1114 writel_relaxed(0, port->membase + ofs->cr1); 1115 1116 /* flush RX & TX FIFO */ 1117 if (ofs->rqr != UNDEF_REG) 1118 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1119 port->membase + ofs->rqr); 1120 1121 cr1 = USART_CR1_TE | USART_CR1_RE; 1122 if (stm32_port->fifoen) 1123 cr1 |= USART_CR1_FIFOEN; 1124 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0; 1125 1126 /* Tx and RX FIFO configuration */ 1127 cr3 = readl_relaxed(port->membase + ofs->cr3); 1128 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE; 1129 if (stm32_port->fifoen) { 1130 if (stm32_port->txftcfg >= 0) 1131 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT; 1132 if (stm32_port->rxftcfg >= 0) 1133 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT; 1134 } 1135 1136 if (cflag & CSTOPB) 1137 cr2 |= USART_CR2_STOP_2B; 1138 1139 bits = tty_get_char_size(cflag); 1140 stm32_port->rdr_mask = (BIT(bits) - 1); 1141 1142 if (cflag & PARENB) { 1143 bits++; 1144 cr1 |= USART_CR1_PCE; 1145 } 1146 1147 /* 1148 * Word length configuration: 1149 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01 1150 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10 1151 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 1152 * M0 and M1 already cleared by cr1 initialization. 1153 */ 1154 if (bits == 9) { 1155 cr1 |= USART_CR1_M0; 1156 } else if ((bits == 7) && cfg->has_7bits_data) { 1157 cr1 |= USART_CR1_M1; 1158 } else if (bits != 8) { 1159 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n" 1160 , bits); 1161 cflag &= ~CSIZE; 1162 cflag |= CS8; 1163 termios->c_cflag = cflag; 1164 bits = 8; 1165 if (cflag & PARENB) { 1166 bits++; 1167 cr1 |= USART_CR1_M0; 1168 } 1169 } 1170 1171 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || 1172 (stm32_port->fifoen && 1173 stm32_port->rxftcfg >= 0))) { 1174 if (cflag & CSTOPB) 1175 bits = bits + 3; /* 1 start bit + 2 stop bits */ 1176 else 1177 bits = bits + 2; /* 1 start bit + 1 stop bit */ 1178 1179 /* RX timeout irq to occur after last stop bit + bits */ 1180 stm32_port->cr1_irq = USART_CR1_RTOIE; 1181 writel_relaxed(bits, port->membase + ofs->rtor); 1182 cr2 |= USART_CR2_RTOEN; 1183 /* 1184 * Enable fifo threshold irq in two cases, either when there is no DMA, or when 1185 * wake up over usart, from low power until the DMA gets re-enabled by resume. 1186 */ 1187 stm32_port->cr3_irq = USART_CR3_RXFTIE; 1188 } 1189 1190 cr1 |= stm32_port->cr1_irq; 1191 cr3 |= stm32_port->cr3_irq; 1192 1193 if (cflag & PARODD) 1194 cr1 |= USART_CR1_PS; 1195 1196 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 1197 if (cflag & CRTSCTS) { 1198 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 1199 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE; 1200 } 1201 1202 usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud); 1203 1204 /* 1205 * The USART supports 16 or 8 times oversampling. 1206 * By default we prefer 16 times oversampling, so that the receiver 1207 * has a better tolerance to clock deviations. 1208 * 8 times oversampling is only used to achieve higher speeds. 1209 */ 1210 if (usartdiv < 16) { 1211 oversampling = 8; 1212 cr1 |= USART_CR1_OVER8; 1213 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8); 1214 } else { 1215 oversampling = 16; 1216 cr1 &= ~USART_CR1_OVER8; 1217 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8); 1218 } 1219 1220 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT; 1221 fraction = usartdiv % oversampling; 1222 writel_relaxed(mantissa | fraction, port->membase + ofs->brr); 1223 1224 uart_update_timeout(port, cflag, baud); 1225 1226 port->read_status_mask = USART_SR_ORE; 1227 if (termios->c_iflag & INPCK) 1228 port->read_status_mask |= USART_SR_PE | USART_SR_FE; 1229 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1230 port->read_status_mask |= USART_SR_FE; 1231 1232 /* Characters to ignore */ 1233 port->ignore_status_mask = 0; 1234 if (termios->c_iflag & IGNPAR) 1235 port->ignore_status_mask = USART_SR_PE | USART_SR_FE; 1236 if (termios->c_iflag & IGNBRK) { 1237 port->ignore_status_mask |= USART_SR_FE; 1238 /* 1239 * If we're ignoring parity and break indicators, 1240 * ignore overruns too (for real raw support). 1241 */ 1242 if (termios->c_iflag & IGNPAR) 1243 port->ignore_status_mask |= USART_SR_ORE; 1244 } 1245 1246 /* Ignore all characters if CREAD is not set */ 1247 if ((termios->c_cflag & CREAD) == 0) 1248 port->ignore_status_mask |= USART_SR_DUMMY_RX; 1249 1250 if (stm32_port->rx_ch) { 1251 /* 1252 * Setup DMA to collect only valid data and enable error irqs. 1253 * This also enables break reception when using DMA. 1254 */ 1255 cr1 |= USART_CR1_PEIE; 1256 cr3 |= USART_CR3_EIE; 1257 cr3 |= USART_CR3_DMAR; 1258 cr3 |= USART_CR3_DDRE; 1259 } 1260 1261 if (rs485conf->flags & SER_RS485_ENABLED) { 1262 stm32_usart_config_reg_rs485(&cr1, &cr3, 1263 rs485conf->delay_rts_before_send, 1264 rs485conf->delay_rts_after_send, 1265 baud); 1266 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 1267 cr3 &= ~USART_CR3_DEP; 1268 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; 1269 } else { 1270 cr3 |= USART_CR3_DEP; 1271 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; 1272 } 1273 1274 } else { 1275 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP); 1276 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 1277 } 1278 1279 /* Configure wake up from low power on start bit detection */ 1280 if (stm32_port->wakeup_src) { 1281 cr3 &= ~USART_CR3_WUS_MASK; 1282 cr3 |= USART_CR3_WUS_START_BIT; 1283 } 1284 1285 writel_relaxed(cr3, port->membase + ofs->cr3); 1286 writel_relaxed(cr2, port->membase + ofs->cr2); 1287 writel_relaxed(cr1, port->membase + ofs->cr1); 1288 1289 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1290 spin_unlock_irqrestore(&port->lock, flags); 1291 1292 /* Handle modem control interrupts */ 1293 if (UART_ENABLE_MS(port, termios->c_cflag)) 1294 stm32_usart_enable_ms(port); 1295 else 1296 stm32_usart_disable_ms(port); 1297 } 1298 1299 static const char *stm32_usart_type(struct uart_port *port) 1300 { 1301 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL; 1302 } 1303 1304 static void stm32_usart_release_port(struct uart_port *port) 1305 { 1306 } 1307 1308 static int stm32_usart_request_port(struct uart_port *port) 1309 { 1310 return 0; 1311 } 1312 1313 static void stm32_usart_config_port(struct uart_port *port, int flags) 1314 { 1315 if (flags & UART_CONFIG_TYPE) 1316 port->type = PORT_STM32; 1317 } 1318 1319 static int 1320 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser) 1321 { 1322 /* No user changeable parameters */ 1323 return -EINVAL; 1324 } 1325 1326 static void stm32_usart_pm(struct uart_port *port, unsigned int state, 1327 unsigned int oldstate) 1328 { 1329 struct stm32_port *stm32port = container_of(port, 1330 struct stm32_port, port); 1331 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1332 const struct stm32_usart_config *cfg = &stm32port->info->cfg; 1333 unsigned long flags; 1334 1335 switch (state) { 1336 case UART_PM_STATE_ON: 1337 pm_runtime_get_sync(port->dev); 1338 break; 1339 case UART_PM_STATE_OFF: 1340 spin_lock_irqsave(&port->lock, flags); 1341 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1342 spin_unlock_irqrestore(&port->lock, flags); 1343 pm_runtime_put_sync(port->dev); 1344 break; 1345 } 1346 } 1347 1348 #if defined(CONFIG_CONSOLE_POLL) 1349 1350 /* Callbacks for characters polling in debug context (i.e. KGDB). */ 1351 static int stm32_usart_poll_init(struct uart_port *port) 1352 { 1353 struct stm32_port *stm32_port = to_stm32_port(port); 1354 1355 return clk_prepare_enable(stm32_port->clk); 1356 } 1357 1358 static int stm32_usart_poll_get_char(struct uart_port *port) 1359 { 1360 struct stm32_port *stm32_port = to_stm32_port(port); 1361 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1362 1363 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE)) 1364 return NO_POLL_CHAR; 1365 1366 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask; 1367 } 1368 1369 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch) 1370 { 1371 stm32_usart_console_putchar(port, ch); 1372 } 1373 #endif /* CONFIG_CONSOLE_POLL */ 1374 1375 static const struct uart_ops stm32_uart_ops = { 1376 .tx_empty = stm32_usart_tx_empty, 1377 .set_mctrl = stm32_usart_set_mctrl, 1378 .get_mctrl = stm32_usart_get_mctrl, 1379 .stop_tx = stm32_usart_stop_tx, 1380 .start_tx = stm32_usart_start_tx, 1381 .throttle = stm32_usart_throttle, 1382 .unthrottle = stm32_usart_unthrottle, 1383 .stop_rx = stm32_usart_stop_rx, 1384 .enable_ms = stm32_usart_enable_ms, 1385 .break_ctl = stm32_usart_break_ctl, 1386 .startup = stm32_usart_startup, 1387 .shutdown = stm32_usart_shutdown, 1388 .flush_buffer = stm32_usart_flush_buffer, 1389 .set_termios = stm32_usart_set_termios, 1390 .pm = stm32_usart_pm, 1391 .type = stm32_usart_type, 1392 .release_port = stm32_usart_release_port, 1393 .request_port = stm32_usart_request_port, 1394 .config_port = stm32_usart_config_port, 1395 .verify_port = stm32_usart_verify_port, 1396 #if defined(CONFIG_CONSOLE_POLL) 1397 .poll_init = stm32_usart_poll_init, 1398 .poll_get_char = stm32_usart_poll_get_char, 1399 .poll_put_char = stm32_usart_poll_put_char, 1400 #endif /* CONFIG_CONSOLE_POLL */ 1401 }; 1402 1403 /* 1404 * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG) 1405 * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case, 1406 * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE. 1407 * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1. 1408 */ 1409 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 }; 1410 1411 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p, 1412 int *ftcfg) 1413 { 1414 u32 bytes, i; 1415 1416 /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */ 1417 if (of_property_read_u32(pdev->dev.of_node, p, &bytes)) 1418 bytes = 8; 1419 1420 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) 1421 if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes) 1422 break; 1423 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg)) 1424 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1; 1425 1426 dev_dbg(&pdev->dev, "%s set to %d bytes\n", p, 1427 stm32h7_usart_fifo_thresh_cfg[i]); 1428 1429 /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */ 1430 if (i) 1431 *ftcfg = i - 1; 1432 else 1433 *ftcfg = -EINVAL; 1434 } 1435 1436 static void stm32_usart_deinit_port(struct stm32_port *stm32port) 1437 { 1438 clk_disable_unprepare(stm32port->clk); 1439 } 1440 1441 static const struct serial_rs485 stm32_rs485_supported = { 1442 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | 1443 SER_RS485_RX_DURING_TX, 1444 .delay_rts_before_send = 1, 1445 .delay_rts_after_send = 1, 1446 }; 1447 1448 static int stm32_usart_init_port(struct stm32_port *stm32port, 1449 struct platform_device *pdev) 1450 { 1451 struct uart_port *port = &stm32port->port; 1452 struct resource *res; 1453 int ret, irq; 1454 1455 irq = platform_get_irq(pdev, 0); 1456 if (irq < 0) 1457 return irq; 1458 1459 port->iotype = UPIO_MEM; 1460 port->flags = UPF_BOOT_AUTOCONF; 1461 port->ops = &stm32_uart_ops; 1462 port->dev = &pdev->dev; 1463 port->fifosize = stm32port->info->cfg.fifosize; 1464 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE); 1465 port->irq = irq; 1466 port->rs485_config = stm32_usart_config_rs485; 1467 port->rs485_supported = stm32_rs485_supported; 1468 1469 ret = stm32_usart_init_rs485(port, pdev); 1470 if (ret) 1471 return ret; 1472 1473 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup && 1474 of_property_read_bool(pdev->dev.of_node, "wakeup-source"); 1475 1476 stm32port->swap = stm32port->info->cfg.has_swap && 1477 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"); 1478 1479 stm32port->fifoen = stm32port->info->cfg.has_fifo; 1480 if (stm32port->fifoen) { 1481 stm32_usart_get_ftcfg(pdev, "rx-threshold", 1482 &stm32port->rxftcfg); 1483 stm32_usart_get_ftcfg(pdev, "tx-threshold", 1484 &stm32port->txftcfg); 1485 } 1486 1487 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1488 if (IS_ERR(port->membase)) 1489 return PTR_ERR(port->membase); 1490 port->mapbase = res->start; 1491 1492 spin_lock_init(&port->lock); 1493 1494 stm32port->clk = devm_clk_get(&pdev->dev, NULL); 1495 if (IS_ERR(stm32port->clk)) 1496 return PTR_ERR(stm32port->clk); 1497 1498 /* Ensure that clk rate is correct by enabling the clk */ 1499 ret = clk_prepare_enable(stm32port->clk); 1500 if (ret) 1501 return ret; 1502 1503 stm32port->port.uartclk = clk_get_rate(stm32port->clk); 1504 if (!stm32port->port.uartclk) { 1505 ret = -EINVAL; 1506 goto err_clk; 1507 } 1508 1509 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0); 1510 if (IS_ERR(stm32port->gpios)) { 1511 ret = PTR_ERR(stm32port->gpios); 1512 goto err_clk; 1513 } 1514 1515 /* 1516 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" 1517 * properties should not be specified. 1518 */ 1519 if (stm32port->hw_flow_control) { 1520 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) || 1521 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) { 1522 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n"); 1523 ret = -EINVAL; 1524 goto err_clk; 1525 } 1526 } 1527 1528 return ret; 1529 1530 err_clk: 1531 clk_disable_unprepare(stm32port->clk); 1532 1533 return ret; 1534 } 1535 1536 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev) 1537 { 1538 struct device_node *np = pdev->dev.of_node; 1539 int id; 1540 1541 if (!np) 1542 return NULL; 1543 1544 id = of_alias_get_id(np, "serial"); 1545 if (id < 0) { 1546 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id); 1547 return NULL; 1548 } 1549 1550 if (WARN_ON(id >= STM32_MAX_PORTS)) 1551 return NULL; 1552 1553 stm32_ports[id].hw_flow_control = 1554 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ || 1555 of_property_read_bool (np, "uart-has-rtscts"); 1556 stm32_ports[id].port.line = id; 1557 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE; 1558 stm32_ports[id].cr3_irq = 0; 1559 stm32_ports[id].last_res = RX_BUF_L; 1560 return &stm32_ports[id]; 1561 } 1562 1563 #ifdef CONFIG_OF 1564 static const struct of_device_id stm32_match[] = { 1565 { .compatible = "st,stm32-uart", .data = &stm32f4_info}, 1566 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info}, 1567 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info}, 1568 {}, 1569 }; 1570 1571 MODULE_DEVICE_TABLE(of, stm32_match); 1572 #endif 1573 1574 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port, 1575 struct platform_device *pdev) 1576 { 1577 if (stm32port->rx_buf) 1578 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, 1579 stm32port->rx_dma_buf); 1580 } 1581 1582 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, 1583 struct platform_device *pdev) 1584 { 1585 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1586 struct uart_port *port = &stm32port->port; 1587 struct device *dev = &pdev->dev; 1588 struct dma_slave_config config; 1589 int ret; 1590 1591 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L, 1592 &stm32port->rx_dma_buf, 1593 GFP_KERNEL); 1594 if (!stm32port->rx_buf) 1595 return -ENOMEM; 1596 1597 /* Configure DMA channel */ 1598 memset(&config, 0, sizeof(config)); 1599 config.src_addr = port->mapbase + ofs->rdr; 1600 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1601 1602 ret = dmaengine_slave_config(stm32port->rx_ch, &config); 1603 if (ret < 0) { 1604 dev_err(dev, "rx dma channel config failed\n"); 1605 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1606 return ret; 1607 } 1608 1609 return 0; 1610 } 1611 1612 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port, 1613 struct platform_device *pdev) 1614 { 1615 if (stm32port->tx_buf) 1616 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, 1617 stm32port->tx_dma_buf); 1618 } 1619 1620 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, 1621 struct platform_device *pdev) 1622 { 1623 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1624 struct uart_port *port = &stm32port->port; 1625 struct device *dev = &pdev->dev; 1626 struct dma_slave_config config; 1627 int ret; 1628 1629 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L, 1630 &stm32port->tx_dma_buf, 1631 GFP_KERNEL); 1632 if (!stm32port->tx_buf) 1633 return -ENOMEM; 1634 1635 /* Configure DMA channel */ 1636 memset(&config, 0, sizeof(config)); 1637 config.dst_addr = port->mapbase + ofs->tdr; 1638 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1639 1640 ret = dmaengine_slave_config(stm32port->tx_ch, &config); 1641 if (ret < 0) { 1642 dev_err(dev, "tx dma channel config failed\n"); 1643 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1644 return ret; 1645 } 1646 1647 return 0; 1648 } 1649 1650 static int stm32_usart_serial_probe(struct platform_device *pdev) 1651 { 1652 struct stm32_port *stm32port; 1653 int ret; 1654 1655 stm32port = stm32_usart_of_get_port(pdev); 1656 if (!stm32port) 1657 return -ENODEV; 1658 1659 stm32port->info = of_device_get_match_data(&pdev->dev); 1660 if (!stm32port->info) 1661 return -EINVAL; 1662 1663 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); 1664 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) 1665 return -EPROBE_DEFER; 1666 1667 /* Fall back in interrupt mode for any non-deferral error */ 1668 if (IS_ERR(stm32port->rx_ch)) 1669 stm32port->rx_ch = NULL; 1670 1671 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); 1672 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) { 1673 ret = -EPROBE_DEFER; 1674 goto err_dma_rx; 1675 } 1676 /* Fall back in interrupt mode for any non-deferral error */ 1677 if (IS_ERR(stm32port->tx_ch)) 1678 stm32port->tx_ch = NULL; 1679 1680 ret = stm32_usart_init_port(stm32port, pdev); 1681 if (ret) 1682 goto err_dma_tx; 1683 1684 if (stm32port->wakeup_src) { 1685 device_set_wakeup_capable(&pdev->dev, true); 1686 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); 1687 if (ret) 1688 goto err_deinit_port; 1689 } 1690 1691 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { 1692 /* Fall back in interrupt mode */ 1693 dma_release_channel(stm32port->rx_ch); 1694 stm32port->rx_ch = NULL; 1695 } 1696 1697 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { 1698 /* Fall back in interrupt mode */ 1699 dma_release_channel(stm32port->tx_ch); 1700 stm32port->tx_ch = NULL; 1701 } 1702 1703 if (!stm32port->rx_ch) 1704 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); 1705 if (!stm32port->tx_ch) 1706 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n"); 1707 1708 platform_set_drvdata(pdev, &stm32port->port); 1709 1710 pm_runtime_get_noresume(&pdev->dev); 1711 pm_runtime_set_active(&pdev->dev); 1712 pm_runtime_enable(&pdev->dev); 1713 1714 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port); 1715 if (ret) 1716 goto err_port; 1717 1718 pm_runtime_put_sync(&pdev->dev); 1719 1720 return 0; 1721 1722 err_port: 1723 pm_runtime_disable(&pdev->dev); 1724 pm_runtime_set_suspended(&pdev->dev); 1725 pm_runtime_put_noidle(&pdev->dev); 1726 1727 if (stm32port->tx_ch) 1728 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1729 if (stm32port->rx_ch) 1730 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1731 1732 if (stm32port->wakeup_src) 1733 dev_pm_clear_wake_irq(&pdev->dev); 1734 1735 err_deinit_port: 1736 if (stm32port->wakeup_src) 1737 device_set_wakeup_capable(&pdev->dev, false); 1738 1739 stm32_usart_deinit_port(stm32port); 1740 1741 err_dma_tx: 1742 if (stm32port->tx_ch) 1743 dma_release_channel(stm32port->tx_ch); 1744 1745 err_dma_rx: 1746 if (stm32port->rx_ch) 1747 dma_release_channel(stm32port->rx_ch); 1748 1749 return ret; 1750 } 1751 1752 static int stm32_usart_serial_remove(struct platform_device *pdev) 1753 { 1754 struct uart_port *port = platform_get_drvdata(pdev); 1755 struct stm32_port *stm32_port = to_stm32_port(port); 1756 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1757 u32 cr3; 1758 1759 pm_runtime_get_sync(&pdev->dev); 1760 uart_remove_one_port(&stm32_usart_driver, port); 1761 1762 pm_runtime_disable(&pdev->dev); 1763 pm_runtime_set_suspended(&pdev->dev); 1764 pm_runtime_put_noidle(&pdev->dev); 1765 1766 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE); 1767 cr3 = readl_relaxed(port->membase + ofs->cr3); 1768 cr3 &= ~USART_CR3_EIE; 1769 cr3 &= ~USART_CR3_DMAR; 1770 cr3 &= ~USART_CR3_DDRE; 1771 writel_relaxed(cr3, port->membase + ofs->cr3); 1772 1773 if (stm32_port->tx_ch) { 1774 stm32_usart_of_dma_tx_remove(stm32_port, pdev); 1775 dma_release_channel(stm32_port->tx_ch); 1776 } 1777 1778 if (stm32_port->rx_ch) { 1779 stm32_usart_of_dma_rx_remove(stm32_port, pdev); 1780 dma_release_channel(stm32_port->rx_ch); 1781 } 1782 1783 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1784 1785 if (stm32_port->wakeup_src) { 1786 dev_pm_clear_wake_irq(&pdev->dev); 1787 device_init_wakeup(&pdev->dev, false); 1788 } 1789 1790 stm32_usart_deinit_port(stm32_port); 1791 1792 return 0; 1793 } 1794 1795 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1796 { 1797 struct stm32_port *stm32_port = to_stm32_port(port); 1798 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1799 u32 isr; 1800 int ret; 1801 1802 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, 1803 (isr & USART_SR_TXE), 100, 1804 STM32_USART_TIMEOUT_USEC); 1805 if (ret != 0) { 1806 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret); 1807 return; 1808 } 1809 writel_relaxed(ch, port->membase + ofs->tdr); 1810 } 1811 1812 #ifdef CONFIG_SERIAL_STM32_CONSOLE 1813 static void stm32_usart_console_write(struct console *co, const char *s, 1814 unsigned int cnt) 1815 { 1816 struct uart_port *port = &stm32_ports[co->index].port; 1817 struct stm32_port *stm32_port = to_stm32_port(port); 1818 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1819 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1820 unsigned long flags; 1821 u32 old_cr1, new_cr1; 1822 int locked = 1; 1823 1824 if (oops_in_progress) 1825 locked = spin_trylock_irqsave(&port->lock, flags); 1826 else 1827 spin_lock_irqsave(&port->lock, flags); 1828 1829 /* Save and disable interrupts, enable the transmitter */ 1830 old_cr1 = readl_relaxed(port->membase + ofs->cr1); 1831 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK; 1832 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit); 1833 writel_relaxed(new_cr1, port->membase + ofs->cr1); 1834 1835 uart_console_write(port, s, cnt, stm32_usart_console_putchar); 1836 1837 /* Restore interrupt state */ 1838 writel_relaxed(old_cr1, port->membase + ofs->cr1); 1839 1840 if (locked) 1841 spin_unlock_irqrestore(&port->lock, flags); 1842 } 1843 1844 static int stm32_usart_console_setup(struct console *co, char *options) 1845 { 1846 struct stm32_port *stm32port; 1847 int baud = 9600; 1848 int bits = 8; 1849 int parity = 'n'; 1850 int flow = 'n'; 1851 1852 if (co->index >= STM32_MAX_PORTS) 1853 return -ENODEV; 1854 1855 stm32port = &stm32_ports[co->index]; 1856 1857 /* 1858 * This driver does not support early console initialization 1859 * (use ARM early printk support instead), so we only expect 1860 * this to be called during the uart port registration when the 1861 * driver gets probed and the port should be mapped at that point. 1862 */ 1863 if (stm32port->port.mapbase == 0 || !stm32port->port.membase) 1864 return -ENXIO; 1865 1866 if (options) 1867 uart_parse_options(options, &baud, &parity, &bits, &flow); 1868 1869 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow); 1870 } 1871 1872 static struct console stm32_console = { 1873 .name = STM32_SERIAL_NAME, 1874 .device = uart_console_device, 1875 .write = stm32_usart_console_write, 1876 .setup = stm32_usart_console_setup, 1877 .flags = CON_PRINTBUFFER, 1878 .index = -1, 1879 .data = &stm32_usart_driver, 1880 }; 1881 1882 #define STM32_SERIAL_CONSOLE (&stm32_console) 1883 1884 #else 1885 #define STM32_SERIAL_CONSOLE NULL 1886 #endif /* CONFIG_SERIAL_STM32_CONSOLE */ 1887 1888 #ifdef CONFIG_SERIAL_EARLYCON 1889 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1890 { 1891 struct stm32_usart_info *info = port->private_data; 1892 1893 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE)) 1894 cpu_relax(); 1895 1896 writel_relaxed(ch, port->membase + info->ofs.tdr); 1897 } 1898 1899 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count) 1900 { 1901 struct earlycon_device *device = console->data; 1902 struct uart_port *port = &device->port; 1903 1904 uart_console_write(port, s, count, early_stm32_usart_console_putchar); 1905 } 1906 1907 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options) 1908 { 1909 if (!(device->port.membase || device->port.iobase)) 1910 return -ENODEV; 1911 device->port.private_data = &stm32h7_info; 1912 device->con->write = early_stm32_serial_write; 1913 return 0; 1914 } 1915 1916 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options) 1917 { 1918 if (!(device->port.membase || device->port.iobase)) 1919 return -ENODEV; 1920 device->port.private_data = &stm32f7_info; 1921 device->con->write = early_stm32_serial_write; 1922 return 0; 1923 } 1924 1925 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options) 1926 { 1927 if (!(device->port.membase || device->port.iobase)) 1928 return -ENODEV; 1929 device->port.private_data = &stm32f4_info; 1930 device->con->write = early_stm32_serial_write; 1931 return 0; 1932 } 1933 1934 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup); 1935 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup); 1936 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup); 1937 #endif /* CONFIG_SERIAL_EARLYCON */ 1938 1939 static struct uart_driver stm32_usart_driver = { 1940 .driver_name = DRIVER_NAME, 1941 .dev_name = STM32_SERIAL_NAME, 1942 .major = 0, 1943 .minor = 0, 1944 .nr = STM32_MAX_PORTS, 1945 .cons = STM32_SERIAL_CONSOLE, 1946 }; 1947 1948 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, 1949 bool enable) 1950 { 1951 struct stm32_port *stm32_port = to_stm32_port(port); 1952 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1953 struct tty_port *tport = &port->state->port; 1954 int ret; 1955 unsigned int size; 1956 unsigned long flags; 1957 1958 if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) 1959 return 0; 1960 1961 /* 1962 * Enable low-power wake-up and wake-up irq if argument is set to 1963 * "enable", disable low-power wake-up and wake-up irq otherwise 1964 */ 1965 if (enable) { 1966 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM); 1967 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE); 1968 mctrl_gpio_enable_irq_wake(stm32_port->gpios); 1969 1970 /* 1971 * When DMA is used for reception, it must be disabled before 1972 * entering low-power mode and re-enabled when exiting from 1973 * low-power mode. 1974 */ 1975 if (stm32_port->rx_ch) { 1976 spin_lock_irqsave(&port->lock, flags); 1977 /* Avoid race with RX IRQ when DMAR is cleared */ 1978 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 1979 /* Poll data from DMA RX buffer if any */ 1980 size = stm32_usart_receive_chars(port, true); 1981 dmaengine_terminate_async(stm32_port->rx_ch); 1982 uart_unlock_and_check_sysrq_irqrestore(port, flags); 1983 if (size) 1984 tty_flip_buffer_push(tport); 1985 } 1986 1987 /* Poll data from RX FIFO if any */ 1988 stm32_usart_receive_chars(port, false); 1989 } else { 1990 if (stm32_port->rx_ch) { 1991 ret = stm32_usart_start_rx_dma_cyclic(port); 1992 if (ret) 1993 return ret; 1994 } 1995 mctrl_gpio_disable_irq_wake(stm32_port->gpios); 1996 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM); 1997 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 1998 } 1999 2000 return 0; 2001 } 2002 2003 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) 2004 { 2005 struct uart_port *port = dev_get_drvdata(dev); 2006 int ret; 2007 2008 uart_suspend_port(&stm32_usart_driver, port); 2009 2010 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2011 ret = stm32_usart_serial_en_wakeup(port, true); 2012 if (ret) 2013 return ret; 2014 } 2015 2016 /* 2017 * When "no_console_suspend" is enabled, keep the pinctrl default state 2018 * and rely on bootloader stage to restore this state upon resume. 2019 * Otherwise, apply the idle or sleep states depending on wakeup 2020 * capabilities. 2021 */ 2022 if (console_suspend_enabled || !uart_console(port)) { 2023 if (device_may_wakeup(dev) || device_wakeup_path(dev)) 2024 pinctrl_pm_select_idle_state(dev); 2025 else 2026 pinctrl_pm_select_sleep_state(dev); 2027 } 2028 2029 return 0; 2030 } 2031 2032 static int __maybe_unused stm32_usart_serial_resume(struct device *dev) 2033 { 2034 struct uart_port *port = dev_get_drvdata(dev); 2035 int ret; 2036 2037 pinctrl_pm_select_default_state(dev); 2038 2039 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2040 ret = stm32_usart_serial_en_wakeup(port, false); 2041 if (ret) 2042 return ret; 2043 } 2044 2045 return uart_resume_port(&stm32_usart_driver, port); 2046 } 2047 2048 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev) 2049 { 2050 struct uart_port *port = dev_get_drvdata(dev); 2051 struct stm32_port *stm32port = container_of(port, 2052 struct stm32_port, port); 2053 2054 clk_disable_unprepare(stm32port->clk); 2055 2056 return 0; 2057 } 2058 2059 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev) 2060 { 2061 struct uart_port *port = dev_get_drvdata(dev); 2062 struct stm32_port *stm32port = container_of(port, 2063 struct stm32_port, port); 2064 2065 return clk_prepare_enable(stm32port->clk); 2066 } 2067 2068 static const struct dev_pm_ops stm32_serial_pm_ops = { 2069 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend, 2070 stm32_usart_runtime_resume, NULL) 2071 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend, 2072 stm32_usart_serial_resume) 2073 }; 2074 2075 static struct platform_driver stm32_serial_driver = { 2076 .probe = stm32_usart_serial_probe, 2077 .remove = stm32_usart_serial_remove, 2078 .driver = { 2079 .name = DRIVER_NAME, 2080 .pm = &stm32_serial_pm_ops, 2081 .of_match_table = of_match_ptr(stm32_match), 2082 }, 2083 }; 2084 2085 static int __init stm32_usart_init(void) 2086 { 2087 static char banner[] __initdata = "STM32 USART driver initialized"; 2088 int ret; 2089 2090 pr_info("%s\n", banner); 2091 2092 ret = uart_register_driver(&stm32_usart_driver); 2093 if (ret) 2094 return ret; 2095 2096 ret = platform_driver_register(&stm32_serial_driver); 2097 if (ret) 2098 uart_unregister_driver(&stm32_usart_driver); 2099 2100 return ret; 2101 } 2102 2103 static void __exit stm32_usart_exit(void) 2104 { 2105 platform_driver_unregister(&stm32_serial_driver); 2106 uart_unregister_driver(&stm32_usart_driver); 2107 } 2108 2109 module_init(stm32_usart_init); 2110 module_exit(stm32_usart_exit); 2111 2112 MODULE_ALIAS("platform:" DRIVER_NAME); 2113 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver"); 2114 MODULE_LICENSE("GPL v2"); 2115