1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) Maxime Coquelin 2015 4 * Copyright (C) STMicroelectronics SA 2017 5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com> 6 * Gerald Baeza <gerald.baeza@foss.st.com> 7 * Erwan Le Ray <erwan.leray@foss.st.com> 8 * 9 * Inspired by st-asc.c from STMicroelectronics (c) 10 */ 11 12 #include <linux/clk.h> 13 #include <linux/console.h> 14 #include <linux/delay.h> 15 #include <linux/dma-direction.h> 16 #include <linux/dmaengine.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/irq.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_platform.h> 24 #include <linux/pinctrl/consumer.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/pm_wakeirq.h> 28 #include <linux/serial_core.h> 29 #include <linux/serial.h> 30 #include <linux/spinlock.h> 31 #include <linux/sysrq.h> 32 #include <linux/tty_flip.h> 33 #include <linux/tty.h> 34 35 #include "serial_mctrl_gpio.h" 36 #include "stm32-usart.h" 37 38 39 /* Register offsets */ 40 static struct stm32_usart_info __maybe_unused stm32f4_info = { 41 .ofs = { 42 .isr = 0x00, 43 .rdr = 0x04, 44 .tdr = 0x04, 45 .brr = 0x08, 46 .cr1 = 0x0c, 47 .cr2 = 0x10, 48 .cr3 = 0x14, 49 .gtpr = 0x18, 50 .rtor = UNDEF_REG, 51 .rqr = UNDEF_REG, 52 .icr = UNDEF_REG, 53 }, 54 .cfg = { 55 .uart_enable_bit = 13, 56 .has_7bits_data = false, 57 .fifosize = 1, 58 } 59 }; 60 61 static struct stm32_usart_info __maybe_unused stm32f7_info = { 62 .ofs = { 63 .cr1 = 0x00, 64 .cr2 = 0x04, 65 .cr3 = 0x08, 66 .brr = 0x0c, 67 .gtpr = 0x10, 68 .rtor = 0x14, 69 .rqr = 0x18, 70 .isr = 0x1c, 71 .icr = 0x20, 72 .rdr = 0x24, 73 .tdr = 0x28, 74 }, 75 .cfg = { 76 .uart_enable_bit = 0, 77 .has_7bits_data = true, 78 .has_swap = true, 79 .fifosize = 1, 80 } 81 }; 82 83 static struct stm32_usart_info __maybe_unused stm32h7_info = { 84 .ofs = { 85 .cr1 = 0x00, 86 .cr2 = 0x04, 87 .cr3 = 0x08, 88 .brr = 0x0c, 89 .gtpr = 0x10, 90 .rtor = 0x14, 91 .rqr = 0x18, 92 .isr = 0x1c, 93 .icr = 0x20, 94 .rdr = 0x24, 95 .tdr = 0x28, 96 }, 97 .cfg = { 98 .uart_enable_bit = 0, 99 .has_7bits_data = true, 100 .has_swap = true, 101 .has_wakeup = true, 102 .has_fifo = true, 103 .fifosize = 16, 104 } 105 }; 106 107 static void stm32_usart_stop_tx(struct uart_port *port); 108 static void stm32_usart_transmit_chars(struct uart_port *port); 109 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch); 110 111 static inline struct stm32_port *to_stm32_port(struct uart_port *port) 112 { 113 return container_of(port, struct stm32_port, port); 114 } 115 116 static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits) 117 { 118 u32 val; 119 120 val = readl_relaxed(port->membase + reg); 121 val |= bits; 122 writel_relaxed(val, port->membase + reg); 123 } 124 125 static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits) 126 { 127 u32 val; 128 129 val = readl_relaxed(port->membase + reg); 130 val &= ~bits; 131 writel_relaxed(val, port->membase + reg); 132 } 133 134 static unsigned int stm32_usart_tx_empty(struct uart_port *port) 135 { 136 struct stm32_port *stm32_port = to_stm32_port(port); 137 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 138 139 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC) 140 return TIOCSER_TEMT; 141 142 return 0; 143 } 144 145 static void stm32_usart_rs485_rts_enable(struct uart_port *port) 146 { 147 struct stm32_port *stm32_port = to_stm32_port(port); 148 struct serial_rs485 *rs485conf = &port->rs485; 149 150 if (stm32_port->hw_flow_control || 151 !(rs485conf->flags & SER_RS485_ENABLED)) 152 return; 153 154 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 155 mctrl_gpio_set(stm32_port->gpios, 156 stm32_port->port.mctrl | TIOCM_RTS); 157 } else { 158 mctrl_gpio_set(stm32_port->gpios, 159 stm32_port->port.mctrl & ~TIOCM_RTS); 160 } 161 } 162 163 static void stm32_usart_rs485_rts_disable(struct uart_port *port) 164 { 165 struct stm32_port *stm32_port = to_stm32_port(port); 166 struct serial_rs485 *rs485conf = &port->rs485; 167 168 if (stm32_port->hw_flow_control || 169 !(rs485conf->flags & SER_RS485_ENABLED)) 170 return; 171 172 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 173 mctrl_gpio_set(stm32_port->gpios, 174 stm32_port->port.mctrl & ~TIOCM_RTS); 175 } else { 176 mctrl_gpio_set(stm32_port->gpios, 177 stm32_port->port.mctrl | TIOCM_RTS); 178 } 179 } 180 181 static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, 182 u32 delay_DDE, u32 baud) 183 { 184 u32 rs485_deat_dedt; 185 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT); 186 bool over8; 187 188 *cr3 |= USART_CR3_DEM; 189 over8 = *cr1 & USART_CR1_OVER8; 190 191 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 192 193 if (over8) 194 rs485_deat_dedt = delay_ADE * baud * 8; 195 else 196 rs485_deat_dedt = delay_ADE * baud * 16; 197 198 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 199 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 200 rs485_deat_dedt_max : rs485_deat_dedt; 201 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) & 202 USART_CR1_DEAT_MASK; 203 *cr1 |= rs485_deat_dedt; 204 205 if (over8) 206 rs485_deat_dedt = delay_DDE * baud * 8; 207 else 208 rs485_deat_dedt = delay_DDE * baud * 16; 209 210 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000); 211 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ? 212 rs485_deat_dedt_max : rs485_deat_dedt; 213 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) & 214 USART_CR1_DEDT_MASK; 215 *cr1 |= rs485_deat_dedt; 216 } 217 218 static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios, 219 struct serial_rs485 *rs485conf) 220 { 221 struct stm32_port *stm32_port = to_stm32_port(port); 222 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 223 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 224 u32 usartdiv, baud, cr1, cr3; 225 bool over8; 226 227 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 228 229 if (port->rs485_rx_during_tx_gpio) 230 gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio, 231 !!(rs485conf->flags & SER_RS485_RX_DURING_TX)); 232 else 233 rs485conf->flags |= SER_RS485_RX_DURING_TX; 234 235 if (rs485conf->flags & SER_RS485_ENABLED) { 236 cr1 = readl_relaxed(port->membase + ofs->cr1); 237 cr3 = readl_relaxed(port->membase + ofs->cr3); 238 usartdiv = readl_relaxed(port->membase + ofs->brr); 239 usartdiv = usartdiv & GENMASK(15, 0); 240 over8 = cr1 & USART_CR1_OVER8; 241 242 if (over8) 243 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0)) 244 << USART_BRR_04_R_SHIFT; 245 246 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv); 247 stm32_usart_config_reg_rs485(&cr1, &cr3, 248 rs485conf->delay_rts_before_send, 249 rs485conf->delay_rts_after_send, 250 baud); 251 252 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) 253 cr3 &= ~USART_CR3_DEP; 254 else 255 cr3 |= USART_CR3_DEP; 256 257 writel_relaxed(cr3, port->membase + ofs->cr3); 258 writel_relaxed(cr1, port->membase + ofs->cr1); 259 } else { 260 stm32_usart_clr_bits(port, ofs->cr3, 261 USART_CR3_DEM | USART_CR3_DEP); 262 stm32_usart_clr_bits(port, ofs->cr1, 263 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 264 } 265 266 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 267 268 /* Adjust RTS polarity in case it's driven in software */ 269 if (stm32_usart_tx_empty(port)) 270 stm32_usart_rs485_rts_disable(port); 271 else 272 stm32_usart_rs485_rts_enable(port); 273 274 return 0; 275 } 276 277 static int stm32_usart_init_rs485(struct uart_port *port, 278 struct platform_device *pdev) 279 { 280 struct serial_rs485 *rs485conf = &port->rs485; 281 282 rs485conf->flags = 0; 283 rs485conf->delay_rts_before_send = 0; 284 rs485conf->delay_rts_after_send = 0; 285 286 if (!pdev->dev.of_node) 287 return -ENODEV; 288 289 return uart_get_rs485_mode(port); 290 } 291 292 static bool stm32_usart_rx_dma_enabled(struct uart_port *port) 293 { 294 struct stm32_port *stm32_port = to_stm32_port(port); 295 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 296 297 if (!stm32_port->rx_ch) 298 return false; 299 300 return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR); 301 } 302 303 /* Return true when data is pending (in pio mode), and false when no data is pending. */ 304 static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr) 305 { 306 struct stm32_port *stm32_port = to_stm32_port(port); 307 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 308 309 *sr = readl_relaxed(port->membase + ofs->isr); 310 /* Get pending characters in RDR or FIFO */ 311 if (*sr & USART_SR_RXNE) { 312 /* Get all pending characters from the RDR or the FIFO when using interrupts */ 313 if (!stm32_usart_rx_dma_enabled(port)) 314 return true; 315 316 /* Handle only RX data errors when using DMA */ 317 if (*sr & USART_SR_ERR_MASK) 318 return true; 319 } 320 321 return false; 322 } 323 324 static unsigned long stm32_usart_get_char_pio(struct uart_port *port) 325 { 326 struct stm32_port *stm32_port = to_stm32_port(port); 327 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 328 unsigned long c; 329 330 c = readl_relaxed(port->membase + ofs->rdr); 331 /* Apply RDR data mask */ 332 c &= stm32_port->rdr_mask; 333 334 return c; 335 } 336 337 static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port) 338 { 339 struct stm32_port *stm32_port = to_stm32_port(port); 340 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 341 unsigned long c; 342 unsigned int size = 0; 343 u32 sr; 344 char flag; 345 346 while (stm32_usart_pending_rx_pio(port, &sr)) { 347 sr |= USART_SR_DUMMY_RX; 348 flag = TTY_NORMAL; 349 350 /* 351 * Status bits has to be cleared before reading the RDR: 352 * In FIFO mode, reading the RDR will pop the next data 353 * (if any) along with its status bits into the SR. 354 * Not doing so leads to misalignement between RDR and SR, 355 * and clear status bits of the next rx data. 356 * 357 * Clear errors flags for stm32f7 and stm32h7 compatible 358 * devices. On stm32f4 compatible devices, the error bit is 359 * cleared by the sequence [read SR - read DR]. 360 */ 361 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG) 362 writel_relaxed(sr & USART_SR_ERR_MASK, 363 port->membase + ofs->icr); 364 365 c = stm32_usart_get_char_pio(port); 366 port->icount.rx++; 367 size++; 368 if (sr & USART_SR_ERR_MASK) { 369 if (sr & USART_SR_ORE) { 370 port->icount.overrun++; 371 } else if (sr & USART_SR_PE) { 372 port->icount.parity++; 373 } else if (sr & USART_SR_FE) { 374 /* Break detection if character is null */ 375 if (!c) { 376 port->icount.brk++; 377 if (uart_handle_break(port)) 378 continue; 379 } else { 380 port->icount.frame++; 381 } 382 } 383 384 sr &= port->read_status_mask; 385 386 if (sr & USART_SR_PE) { 387 flag = TTY_PARITY; 388 } else if (sr & USART_SR_FE) { 389 if (!c) 390 flag = TTY_BREAK; 391 else 392 flag = TTY_FRAME; 393 } 394 } 395 396 if (uart_prepare_sysrq_char(port, c)) 397 continue; 398 uart_insert_char(port, sr, USART_SR_ORE, c, flag); 399 } 400 401 return size; 402 } 403 404 static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size) 405 { 406 struct stm32_port *stm32_port = to_stm32_port(port); 407 struct tty_port *ttyport = &stm32_port->port.state->port; 408 unsigned char *dma_start; 409 int dma_count, i; 410 411 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res); 412 413 /* 414 * Apply rdr_mask on buffer in order to mask parity bit. 415 * This loop is useless in cs8 mode because DMA copies only 416 * 8 bits and already ignores parity bit. 417 */ 418 if (!(stm32_port->rdr_mask == (BIT(8) - 1))) 419 for (i = 0; i < dma_size; i++) 420 *(dma_start + i) &= stm32_port->rdr_mask; 421 422 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size); 423 port->icount.rx += dma_count; 424 if (dma_count != dma_size) 425 port->icount.buf_overrun++; 426 stm32_port->last_res -= dma_count; 427 if (stm32_port->last_res == 0) 428 stm32_port->last_res = RX_BUF_L; 429 } 430 431 static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port) 432 { 433 struct stm32_port *stm32_port = to_stm32_port(port); 434 unsigned int dma_size, size = 0; 435 436 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */ 437 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { 438 /* Conditional first part: from last_res to end of DMA buffer */ 439 dma_size = stm32_port->last_res; 440 stm32_usart_push_buffer_dma(port, dma_size); 441 size = dma_size; 442 } 443 444 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue; 445 stm32_usart_push_buffer_dma(port, dma_size); 446 size += dma_size; 447 448 return size; 449 } 450 451 static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush) 452 { 453 struct stm32_port *stm32_port = to_stm32_port(port); 454 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 455 enum dma_status rx_dma_status; 456 u32 sr; 457 unsigned int size = 0; 458 459 if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) { 460 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch, 461 stm32_port->rx_ch->cookie, 462 &stm32_port->rx_dma_state); 463 if (rx_dma_status == DMA_IN_PROGRESS) { 464 /* Empty DMA buffer */ 465 size = stm32_usart_receive_chars_dma(port); 466 sr = readl_relaxed(port->membase + ofs->isr); 467 if (sr & USART_SR_ERR_MASK) { 468 /* Disable DMA request line */ 469 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 470 471 /* Switch to PIO mode to handle the errors */ 472 size += stm32_usart_receive_chars_pio(port); 473 474 /* Switch back to DMA mode */ 475 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 476 } 477 } else { 478 /* Disable RX DMA */ 479 dmaengine_terminate_async(stm32_port->rx_ch); 480 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 481 /* Fall back to interrupt mode */ 482 dev_dbg(port->dev, "DMA error, fallback to irq mode\n"); 483 size = stm32_usart_receive_chars_pio(port); 484 } 485 } else { 486 size = stm32_usart_receive_chars_pio(port); 487 } 488 489 return size; 490 } 491 492 static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port) 493 { 494 dmaengine_terminate_async(stm32_port->tx_ch); 495 stm32_port->tx_dma_busy = false; 496 } 497 498 static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port) 499 { 500 /* 501 * We cannot use the function "dmaengine_tx_status" to know the 502 * status of DMA. This function does not show if the "dma complete" 503 * callback of the DMA transaction has been called. So we prefer 504 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the 505 * same time. 506 */ 507 return stm32_port->tx_dma_busy; 508 } 509 510 static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port) 511 { 512 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 513 514 return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT); 515 } 516 517 static void stm32_usart_tx_dma_complete(void *arg) 518 { 519 struct uart_port *port = arg; 520 struct stm32_port *stm32port = to_stm32_port(port); 521 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 522 unsigned long flags; 523 524 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 525 stm32_usart_tx_dma_terminate(stm32port); 526 527 /* Let's see if we have pending data to send */ 528 spin_lock_irqsave(&port->lock, flags); 529 stm32_usart_transmit_chars(port); 530 spin_unlock_irqrestore(&port->lock, flags); 531 } 532 533 static void stm32_usart_tx_interrupt_enable(struct uart_port *port) 534 { 535 struct stm32_port *stm32_port = to_stm32_port(port); 536 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 537 538 /* 539 * Enables TX FIFO threashold irq when FIFO is enabled, 540 * or TX empty irq when FIFO is disabled 541 */ 542 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 543 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE); 544 else 545 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE); 546 } 547 548 static void stm32_usart_tc_interrupt_enable(struct uart_port *port) 549 { 550 struct stm32_port *stm32_port = to_stm32_port(port); 551 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 552 553 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE); 554 } 555 556 static void stm32_usart_rx_dma_complete(void *arg) 557 { 558 struct uart_port *port = arg; 559 struct tty_port *tport = &port->state->port; 560 unsigned int size; 561 unsigned long flags; 562 563 spin_lock_irqsave(&port->lock, flags); 564 size = stm32_usart_receive_chars(port, false); 565 uart_unlock_and_check_sysrq_irqrestore(port, flags); 566 if (size) 567 tty_flip_buffer_push(tport); 568 } 569 570 static void stm32_usart_tx_interrupt_disable(struct uart_port *port) 571 { 572 struct stm32_port *stm32_port = to_stm32_port(port); 573 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 574 575 if (stm32_port->fifoen && stm32_port->txftcfg >= 0) 576 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE); 577 else 578 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE); 579 } 580 581 static void stm32_usart_tc_interrupt_disable(struct uart_port *port) 582 { 583 struct stm32_port *stm32_port = to_stm32_port(port); 584 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 585 586 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE); 587 } 588 589 static void stm32_usart_transmit_chars_pio(struct uart_port *port) 590 { 591 struct stm32_port *stm32_port = to_stm32_port(port); 592 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 593 struct circ_buf *xmit = &port->state->xmit; 594 595 if (stm32_usart_tx_dma_enabled(stm32_port)) 596 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 597 598 while (!uart_circ_empty(xmit)) { 599 /* Check that TDR is empty before filling FIFO */ 600 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) 601 break; 602 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); 603 uart_xmit_advance(port, 1); 604 } 605 606 /* rely on TXE irq (mask or unmask) for sending remaining data */ 607 if (uart_circ_empty(xmit)) 608 stm32_usart_tx_interrupt_disable(port); 609 else 610 stm32_usart_tx_interrupt_enable(port); 611 } 612 613 static void stm32_usart_transmit_chars_dma(struct uart_port *port) 614 { 615 struct stm32_port *stm32port = to_stm32_port(port); 616 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 617 struct circ_buf *xmit = &port->state->xmit; 618 struct dma_async_tx_descriptor *desc = NULL; 619 unsigned int count; 620 621 if (stm32_usart_tx_dma_started(stm32port)) { 622 if (!stm32_usart_tx_dma_enabled(stm32port)) 623 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 624 return; 625 } 626 627 count = uart_circ_chars_pending(xmit); 628 629 if (count > TX_BUF_L) 630 count = TX_BUF_L; 631 632 if (xmit->tail < xmit->head) { 633 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count); 634 } else { 635 size_t one = UART_XMIT_SIZE - xmit->tail; 636 size_t two; 637 638 if (one > count) 639 one = count; 640 two = count - one; 641 642 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one); 643 if (two) 644 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two); 645 } 646 647 desc = dmaengine_prep_slave_single(stm32port->tx_ch, 648 stm32port->tx_dma_buf, 649 count, 650 DMA_MEM_TO_DEV, 651 DMA_PREP_INTERRUPT); 652 653 if (!desc) 654 goto fallback_err; 655 656 /* 657 * Set "tx_dma_busy" flag. This flag will be released when 658 * dmaengine_terminate_async will be called. This flag helps 659 * transmit_chars_dma not to start another DMA transaction 660 * if the callback of the previous is not yet called. 661 */ 662 stm32port->tx_dma_busy = true; 663 664 desc->callback = stm32_usart_tx_dma_complete; 665 desc->callback_param = port; 666 667 /* Push current DMA TX transaction in the pending queue */ 668 if (dma_submit_error(dmaengine_submit(desc))) { 669 /* dma no yet started, safe to free resources */ 670 stm32_usart_tx_dma_terminate(stm32port); 671 goto fallback_err; 672 } 673 674 /* Issue pending DMA TX requests */ 675 dma_async_issue_pending(stm32port->tx_ch); 676 677 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 678 679 uart_xmit_advance(port, count); 680 681 return; 682 683 fallback_err: 684 stm32_usart_transmit_chars_pio(port); 685 } 686 687 static void stm32_usart_transmit_chars(struct uart_port *port) 688 { 689 struct stm32_port *stm32_port = to_stm32_port(port); 690 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 691 struct circ_buf *xmit = &port->state->xmit; 692 u32 isr; 693 int ret; 694 695 if (!stm32_port->hw_flow_control && 696 port->rs485.flags & SER_RS485_ENABLED) { 697 stm32_port->txdone = false; 698 stm32_usart_tc_interrupt_disable(port); 699 stm32_usart_rs485_rts_enable(port); 700 } 701 702 if (port->x_char) { 703 if (stm32_usart_tx_dma_started(stm32_port) && 704 stm32_usart_tx_dma_enabled(stm32_port)) 705 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 706 707 /* Check that TDR is empty before filling FIFO */ 708 ret = 709 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 710 isr, 711 (isr & USART_SR_TXE), 712 10, 1000); 713 if (ret) 714 dev_warn(port->dev, "1 character may be erased\n"); 715 716 writel_relaxed(port->x_char, port->membase + ofs->tdr); 717 port->x_char = 0; 718 port->icount.tx++; 719 if (stm32_usart_tx_dma_started(stm32_port)) 720 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT); 721 return; 722 } 723 724 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { 725 stm32_usart_tx_interrupt_disable(port); 726 return; 727 } 728 729 if (ofs->icr == UNDEF_REG) 730 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC); 731 else 732 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr); 733 734 if (stm32_port->tx_ch) 735 stm32_usart_transmit_chars_dma(port); 736 else 737 stm32_usart_transmit_chars_pio(port); 738 739 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 740 uart_write_wakeup(port); 741 742 if (uart_circ_empty(xmit)) { 743 stm32_usart_tx_interrupt_disable(port); 744 if (!stm32_port->hw_flow_control && 745 port->rs485.flags & SER_RS485_ENABLED) { 746 stm32_port->txdone = true; 747 stm32_usart_tc_interrupt_enable(port); 748 } 749 } 750 } 751 752 static irqreturn_t stm32_usart_interrupt(int irq, void *ptr) 753 { 754 struct uart_port *port = ptr; 755 struct tty_port *tport = &port->state->port; 756 struct stm32_port *stm32_port = to_stm32_port(port); 757 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 758 u32 sr; 759 unsigned int size; 760 761 sr = readl_relaxed(port->membase + ofs->isr); 762 763 if (!stm32_port->hw_flow_control && 764 port->rs485.flags & SER_RS485_ENABLED && 765 (sr & USART_SR_TC)) { 766 stm32_usart_tc_interrupt_disable(port); 767 stm32_usart_rs485_rts_disable(port); 768 } 769 770 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) 771 writel_relaxed(USART_ICR_RTOCF, 772 port->membase + ofs->icr); 773 774 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) { 775 /* Clear wake up flag and disable wake up interrupt */ 776 writel_relaxed(USART_ICR_WUCF, 777 port->membase + ofs->icr); 778 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 779 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq))) 780 pm_wakeup_event(tport->tty->dev, 0); 781 } 782 783 /* 784 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request 785 * line has been masked by HW and rx data are stacking in FIFO. 786 */ 787 if (!stm32_port->throttled) { 788 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) || 789 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) { 790 spin_lock(&port->lock); 791 size = stm32_usart_receive_chars(port, false); 792 uart_unlock_and_check_sysrq(port); 793 if (size) 794 tty_flip_buffer_push(tport); 795 } 796 } 797 798 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) { 799 spin_lock(&port->lock); 800 stm32_usart_transmit_chars(port); 801 spin_unlock(&port->lock); 802 } 803 804 /* Receiver timeout irq for DMA RX */ 805 if (stm32_usart_rx_dma_enabled(port) && !stm32_port->throttled) { 806 spin_lock(&port->lock); 807 size = stm32_usart_receive_chars(port, false); 808 uart_unlock_and_check_sysrq(port); 809 if (size) 810 tty_flip_buffer_push(tport); 811 } 812 813 return IRQ_HANDLED; 814 } 815 816 static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl) 817 { 818 struct stm32_port *stm32_port = to_stm32_port(port); 819 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 820 821 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS)) 822 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE); 823 else 824 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE); 825 826 mctrl_gpio_set(stm32_port->gpios, mctrl); 827 } 828 829 static unsigned int stm32_usart_get_mctrl(struct uart_port *port) 830 { 831 struct stm32_port *stm32_port = to_stm32_port(port); 832 unsigned int ret; 833 834 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */ 835 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; 836 837 return mctrl_gpio_get(stm32_port->gpios, &ret); 838 } 839 840 static void stm32_usart_enable_ms(struct uart_port *port) 841 { 842 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios); 843 } 844 845 static void stm32_usart_disable_ms(struct uart_port *port) 846 { 847 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); 848 } 849 850 /* Transmit stop */ 851 static void stm32_usart_stop_tx(struct uart_port *port) 852 { 853 struct stm32_port *stm32_port = to_stm32_port(port); 854 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 855 856 stm32_usart_tx_interrupt_disable(port); 857 if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port)) 858 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 859 860 stm32_usart_rs485_rts_disable(port); 861 } 862 863 /* There are probably characters waiting to be transmitted. */ 864 static void stm32_usart_start_tx(struct uart_port *port) 865 { 866 struct circ_buf *xmit = &port->state->xmit; 867 868 if (uart_circ_empty(xmit) && !port->x_char) { 869 stm32_usart_rs485_rts_disable(port); 870 return; 871 } 872 873 stm32_usart_rs485_rts_enable(port); 874 875 stm32_usart_transmit_chars(port); 876 } 877 878 /* Flush the transmit buffer. */ 879 static void stm32_usart_flush_buffer(struct uart_port *port) 880 { 881 struct stm32_port *stm32_port = to_stm32_port(port); 882 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 883 884 if (stm32_port->tx_ch) { 885 stm32_usart_tx_dma_terminate(stm32_port); 886 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 887 } 888 } 889 890 /* Throttle the remote when input buffer is about to overflow. */ 891 static void stm32_usart_throttle(struct uart_port *port) 892 { 893 struct stm32_port *stm32_port = to_stm32_port(port); 894 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 895 unsigned long flags; 896 897 spin_lock_irqsave(&port->lock, flags); 898 899 /* 900 * Disable DMA request line if enabled, so the RX data gets queued into the FIFO. 901 * Hardware flow control is triggered when RX FIFO is full. 902 */ 903 if (stm32_usart_rx_dma_enabled(port)) 904 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 905 906 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 907 if (stm32_port->cr3_irq) 908 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 909 910 stm32_port->throttled = true; 911 spin_unlock_irqrestore(&port->lock, flags); 912 } 913 914 /* Unthrottle the remote, the input buffer can now accept data. */ 915 static void stm32_usart_unthrottle(struct uart_port *port) 916 { 917 struct stm32_port *stm32_port = to_stm32_port(port); 918 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 919 unsigned long flags; 920 921 spin_lock_irqsave(&port->lock, flags); 922 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq); 923 if (stm32_port->cr3_irq) 924 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq); 925 926 /* 927 * Switch back to DMA mode (re-enable DMA request line). 928 * Hardware flow control is stopped when FIFO is not full any more. 929 */ 930 if (stm32_port->rx_ch) 931 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 932 933 stm32_port->throttled = false; 934 spin_unlock_irqrestore(&port->lock, flags); 935 } 936 937 /* Receive stop */ 938 static void stm32_usart_stop_rx(struct uart_port *port) 939 { 940 struct stm32_port *stm32_port = to_stm32_port(port); 941 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 942 943 /* Disable DMA request line. */ 944 if (stm32_port->rx_ch) 945 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 946 947 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); 948 if (stm32_port->cr3_irq) 949 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq); 950 } 951 952 /* Handle breaks - ignored by us */ 953 static void stm32_usart_break_ctl(struct uart_port *port, int break_state) 954 { 955 } 956 957 static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port) 958 { 959 struct stm32_port *stm32_port = to_stm32_port(port); 960 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 961 struct dma_async_tx_descriptor *desc; 962 int ret; 963 964 stm32_port->last_res = RX_BUF_L; 965 /* Prepare a DMA cyclic transaction */ 966 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch, 967 stm32_port->rx_dma_buf, 968 RX_BUF_L, RX_BUF_P, 969 DMA_DEV_TO_MEM, 970 DMA_PREP_INTERRUPT); 971 if (!desc) { 972 dev_err(port->dev, "rx dma prep cyclic failed\n"); 973 return -ENODEV; 974 } 975 976 desc->callback = stm32_usart_rx_dma_complete; 977 desc->callback_param = port; 978 979 /* Push current DMA transaction in the pending queue */ 980 ret = dma_submit_error(dmaengine_submit(desc)); 981 if (ret) { 982 dmaengine_terminate_sync(stm32_port->rx_ch); 983 return ret; 984 } 985 986 /* Issue pending DMA requests */ 987 dma_async_issue_pending(stm32_port->rx_ch); 988 989 /* 990 * DMA request line not re-enabled at resume when port is throttled. 991 * It will be re-enabled by unthrottle ops. 992 */ 993 if (!stm32_port->throttled) 994 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR); 995 996 return 0; 997 } 998 999 static int stm32_usart_startup(struct uart_port *port) 1000 { 1001 struct stm32_port *stm32_port = to_stm32_port(port); 1002 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1003 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1004 const char *name = to_platform_device(port->dev)->name; 1005 u32 val; 1006 int ret; 1007 1008 ret = request_irq(port->irq, stm32_usart_interrupt, 1009 IRQF_NO_SUSPEND, name, port); 1010 if (ret) 1011 return ret; 1012 1013 if (stm32_port->swap) { 1014 val = readl_relaxed(port->membase + ofs->cr2); 1015 val |= USART_CR2_SWAP; 1016 writel_relaxed(val, port->membase + ofs->cr2); 1017 } 1018 1019 /* RX FIFO Flush */ 1020 if (ofs->rqr != UNDEF_REG) 1021 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr); 1022 1023 if (stm32_port->rx_ch) { 1024 ret = stm32_usart_start_rx_dma_cyclic(port); 1025 if (ret) { 1026 free_irq(port->irq, port); 1027 return ret; 1028 } 1029 } 1030 1031 /* RX enabling */ 1032 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit); 1033 stm32_usart_set_bits(port, ofs->cr1, val); 1034 1035 return 0; 1036 } 1037 1038 static void stm32_usart_shutdown(struct uart_port *port) 1039 { 1040 struct stm32_port *stm32_port = to_stm32_port(port); 1041 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1042 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1043 u32 val, isr; 1044 int ret; 1045 1046 if (stm32_usart_tx_dma_enabled(stm32_port)) 1047 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1048 1049 if (stm32_usart_tx_dma_started(stm32_port)) 1050 stm32_usart_tx_dma_terminate(stm32_port); 1051 1052 /* Disable modem control interrupts */ 1053 stm32_usart_disable_ms(port); 1054 1055 val = USART_CR1_TXEIE | USART_CR1_TE; 1056 val |= stm32_port->cr1_irq | USART_CR1_RE; 1057 val |= BIT(cfg->uart_enable_bit); 1058 if (stm32_port->fifoen) 1059 val |= USART_CR1_FIFOEN; 1060 1061 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr, 1062 isr, (isr & USART_SR_TC), 1063 10, 100000); 1064 1065 /* Send the TC error message only when ISR_TC is not set */ 1066 if (ret) 1067 dev_err(port->dev, "Transmission is not complete\n"); 1068 1069 /* Disable RX DMA. */ 1070 if (stm32_port->rx_ch) 1071 dmaengine_terminate_async(stm32_port->rx_ch); 1072 1073 /* flush RX & TX FIFO */ 1074 if (ofs->rqr != UNDEF_REG) 1075 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1076 port->membase + ofs->rqr); 1077 1078 stm32_usart_clr_bits(port, ofs->cr1, val); 1079 1080 free_irq(port->irq, port); 1081 } 1082 1083 static void stm32_usart_set_termios(struct uart_port *port, 1084 struct ktermios *termios, 1085 const struct ktermios *old) 1086 { 1087 struct stm32_port *stm32_port = to_stm32_port(port); 1088 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1089 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1090 struct serial_rs485 *rs485conf = &port->rs485; 1091 unsigned int baud, bits; 1092 u32 usartdiv, mantissa, fraction, oversampling; 1093 tcflag_t cflag = termios->c_cflag; 1094 u32 cr1, cr2, cr3, isr; 1095 unsigned long flags; 1096 int ret; 1097 1098 if (!stm32_port->hw_flow_control) 1099 cflag &= ~CRTSCTS; 1100 1101 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8); 1102 1103 spin_lock_irqsave(&port->lock, flags); 1104 1105 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, 1106 isr, 1107 (isr & USART_SR_TC), 1108 10, 100000); 1109 1110 /* Send the TC error message only when ISR_TC is not set. */ 1111 if (ret) 1112 dev_err(port->dev, "Transmission is not complete\n"); 1113 1114 /* Stop serial port and reset value */ 1115 writel_relaxed(0, port->membase + ofs->cr1); 1116 1117 /* flush RX & TX FIFO */ 1118 if (ofs->rqr != UNDEF_REG) 1119 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ, 1120 port->membase + ofs->rqr); 1121 1122 cr1 = USART_CR1_TE | USART_CR1_RE; 1123 if (stm32_port->fifoen) 1124 cr1 |= USART_CR1_FIFOEN; 1125 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0; 1126 1127 /* Tx and RX FIFO configuration */ 1128 cr3 = readl_relaxed(port->membase + ofs->cr3); 1129 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE; 1130 if (stm32_port->fifoen) { 1131 if (stm32_port->txftcfg >= 0) 1132 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT; 1133 if (stm32_port->rxftcfg >= 0) 1134 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT; 1135 } 1136 1137 if (cflag & CSTOPB) 1138 cr2 |= USART_CR2_STOP_2B; 1139 1140 bits = tty_get_char_size(cflag); 1141 stm32_port->rdr_mask = (BIT(bits) - 1); 1142 1143 if (cflag & PARENB) { 1144 bits++; 1145 cr1 |= USART_CR1_PCE; 1146 } 1147 1148 /* 1149 * Word length configuration: 1150 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01 1151 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10 1152 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 1153 * M0 and M1 already cleared by cr1 initialization. 1154 */ 1155 if (bits == 9) { 1156 cr1 |= USART_CR1_M0; 1157 } else if ((bits == 7) && cfg->has_7bits_data) { 1158 cr1 |= USART_CR1_M1; 1159 } else if (bits != 8) { 1160 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n" 1161 , bits); 1162 cflag &= ~CSIZE; 1163 cflag |= CS8; 1164 termios->c_cflag = cflag; 1165 bits = 8; 1166 if (cflag & PARENB) { 1167 bits++; 1168 cr1 |= USART_CR1_M0; 1169 } 1170 } 1171 1172 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch || 1173 (stm32_port->fifoen && 1174 stm32_port->rxftcfg >= 0))) { 1175 if (cflag & CSTOPB) 1176 bits = bits + 3; /* 1 start bit + 2 stop bits */ 1177 else 1178 bits = bits + 2; /* 1 start bit + 1 stop bit */ 1179 1180 /* RX timeout irq to occur after last stop bit + bits */ 1181 stm32_port->cr1_irq = USART_CR1_RTOIE; 1182 writel_relaxed(bits, port->membase + ofs->rtor); 1183 cr2 |= USART_CR2_RTOEN; 1184 /* 1185 * Enable fifo threshold irq in two cases, either when there is no DMA, or when 1186 * wake up over usart, from low power until the DMA gets re-enabled by resume. 1187 */ 1188 stm32_port->cr3_irq = USART_CR3_RXFTIE; 1189 } 1190 1191 cr1 |= stm32_port->cr1_irq; 1192 cr3 |= stm32_port->cr3_irq; 1193 1194 if (cflag & PARODD) 1195 cr1 |= USART_CR1_PS; 1196 1197 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); 1198 if (cflag & CRTSCTS) { 1199 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; 1200 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE; 1201 } 1202 1203 usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud); 1204 1205 /* 1206 * The USART supports 16 or 8 times oversampling. 1207 * By default we prefer 16 times oversampling, so that the receiver 1208 * has a better tolerance to clock deviations. 1209 * 8 times oversampling is only used to achieve higher speeds. 1210 */ 1211 if (usartdiv < 16) { 1212 oversampling = 8; 1213 cr1 |= USART_CR1_OVER8; 1214 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8); 1215 } else { 1216 oversampling = 16; 1217 cr1 &= ~USART_CR1_OVER8; 1218 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8); 1219 } 1220 1221 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT; 1222 fraction = usartdiv % oversampling; 1223 writel_relaxed(mantissa | fraction, port->membase + ofs->brr); 1224 1225 uart_update_timeout(port, cflag, baud); 1226 1227 port->read_status_mask = USART_SR_ORE; 1228 if (termios->c_iflag & INPCK) 1229 port->read_status_mask |= USART_SR_PE | USART_SR_FE; 1230 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) 1231 port->read_status_mask |= USART_SR_FE; 1232 1233 /* Characters to ignore */ 1234 port->ignore_status_mask = 0; 1235 if (termios->c_iflag & IGNPAR) 1236 port->ignore_status_mask = USART_SR_PE | USART_SR_FE; 1237 if (termios->c_iflag & IGNBRK) { 1238 port->ignore_status_mask |= USART_SR_FE; 1239 /* 1240 * If we're ignoring parity and break indicators, 1241 * ignore overruns too (for real raw support). 1242 */ 1243 if (termios->c_iflag & IGNPAR) 1244 port->ignore_status_mask |= USART_SR_ORE; 1245 } 1246 1247 /* Ignore all characters if CREAD is not set */ 1248 if ((termios->c_cflag & CREAD) == 0) 1249 port->ignore_status_mask |= USART_SR_DUMMY_RX; 1250 1251 if (stm32_port->rx_ch) { 1252 /* 1253 * Setup DMA to collect only valid data and enable error irqs. 1254 * This also enables break reception when using DMA. 1255 */ 1256 cr1 |= USART_CR1_PEIE; 1257 cr3 |= USART_CR3_EIE; 1258 cr3 |= USART_CR3_DMAR; 1259 cr3 |= USART_CR3_DDRE; 1260 } 1261 1262 if (rs485conf->flags & SER_RS485_ENABLED) { 1263 stm32_usart_config_reg_rs485(&cr1, &cr3, 1264 rs485conf->delay_rts_before_send, 1265 rs485conf->delay_rts_after_send, 1266 baud); 1267 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) { 1268 cr3 &= ~USART_CR3_DEP; 1269 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND; 1270 } else { 1271 cr3 |= USART_CR3_DEP; 1272 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND; 1273 } 1274 1275 } else { 1276 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP); 1277 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); 1278 } 1279 1280 /* Configure wake up from low power on start bit detection */ 1281 if (stm32_port->wakeup_src) { 1282 cr3 &= ~USART_CR3_WUS_MASK; 1283 cr3 |= USART_CR3_WUS_START_BIT; 1284 } 1285 1286 writel_relaxed(cr3, port->membase + ofs->cr3); 1287 writel_relaxed(cr2, port->membase + ofs->cr2); 1288 writel_relaxed(cr1, port->membase + ofs->cr1); 1289 1290 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1291 spin_unlock_irqrestore(&port->lock, flags); 1292 1293 /* Handle modem control interrupts */ 1294 if (UART_ENABLE_MS(port, termios->c_cflag)) 1295 stm32_usart_enable_ms(port); 1296 else 1297 stm32_usart_disable_ms(port); 1298 } 1299 1300 static const char *stm32_usart_type(struct uart_port *port) 1301 { 1302 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL; 1303 } 1304 1305 static void stm32_usart_release_port(struct uart_port *port) 1306 { 1307 } 1308 1309 static int stm32_usart_request_port(struct uart_port *port) 1310 { 1311 return 0; 1312 } 1313 1314 static void stm32_usart_config_port(struct uart_port *port, int flags) 1315 { 1316 if (flags & UART_CONFIG_TYPE) 1317 port->type = PORT_STM32; 1318 } 1319 1320 static int 1321 stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser) 1322 { 1323 /* No user changeable parameters */ 1324 return -EINVAL; 1325 } 1326 1327 static void stm32_usart_pm(struct uart_port *port, unsigned int state, 1328 unsigned int oldstate) 1329 { 1330 struct stm32_port *stm32port = container_of(port, 1331 struct stm32_port, port); 1332 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1333 const struct stm32_usart_config *cfg = &stm32port->info->cfg; 1334 unsigned long flags; 1335 1336 switch (state) { 1337 case UART_PM_STATE_ON: 1338 pm_runtime_get_sync(port->dev); 1339 break; 1340 case UART_PM_STATE_OFF: 1341 spin_lock_irqsave(&port->lock, flags); 1342 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit)); 1343 spin_unlock_irqrestore(&port->lock, flags); 1344 pm_runtime_put_sync(port->dev); 1345 break; 1346 } 1347 } 1348 1349 #if defined(CONFIG_CONSOLE_POLL) 1350 1351 /* Callbacks for characters polling in debug context (i.e. KGDB). */ 1352 static int stm32_usart_poll_init(struct uart_port *port) 1353 { 1354 struct stm32_port *stm32_port = to_stm32_port(port); 1355 1356 return clk_prepare_enable(stm32_port->clk); 1357 } 1358 1359 static int stm32_usart_poll_get_char(struct uart_port *port) 1360 { 1361 struct stm32_port *stm32_port = to_stm32_port(port); 1362 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1363 1364 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE)) 1365 return NO_POLL_CHAR; 1366 1367 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask; 1368 } 1369 1370 static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch) 1371 { 1372 stm32_usart_console_putchar(port, ch); 1373 } 1374 #endif /* CONFIG_CONSOLE_POLL */ 1375 1376 static const struct uart_ops stm32_uart_ops = { 1377 .tx_empty = stm32_usart_tx_empty, 1378 .set_mctrl = stm32_usart_set_mctrl, 1379 .get_mctrl = stm32_usart_get_mctrl, 1380 .stop_tx = stm32_usart_stop_tx, 1381 .start_tx = stm32_usart_start_tx, 1382 .throttle = stm32_usart_throttle, 1383 .unthrottle = stm32_usart_unthrottle, 1384 .stop_rx = stm32_usart_stop_rx, 1385 .enable_ms = stm32_usart_enable_ms, 1386 .break_ctl = stm32_usart_break_ctl, 1387 .startup = stm32_usart_startup, 1388 .shutdown = stm32_usart_shutdown, 1389 .flush_buffer = stm32_usart_flush_buffer, 1390 .set_termios = stm32_usart_set_termios, 1391 .pm = stm32_usart_pm, 1392 .type = stm32_usart_type, 1393 .release_port = stm32_usart_release_port, 1394 .request_port = stm32_usart_request_port, 1395 .config_port = stm32_usart_config_port, 1396 .verify_port = stm32_usart_verify_port, 1397 #if defined(CONFIG_CONSOLE_POLL) 1398 .poll_init = stm32_usart_poll_init, 1399 .poll_get_char = stm32_usart_poll_get_char, 1400 .poll_put_char = stm32_usart_poll_put_char, 1401 #endif /* CONFIG_CONSOLE_POLL */ 1402 }; 1403 1404 /* 1405 * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG) 1406 * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case, 1407 * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE. 1408 * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1. 1409 */ 1410 static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 }; 1411 1412 static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p, 1413 int *ftcfg) 1414 { 1415 u32 bytes, i; 1416 1417 /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */ 1418 if (of_property_read_u32(pdev->dev.of_node, p, &bytes)) 1419 bytes = 8; 1420 1421 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) 1422 if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes) 1423 break; 1424 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg)) 1425 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1; 1426 1427 dev_dbg(&pdev->dev, "%s set to %d bytes\n", p, 1428 stm32h7_usart_fifo_thresh_cfg[i]); 1429 1430 /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */ 1431 if (i) 1432 *ftcfg = i - 1; 1433 else 1434 *ftcfg = -EINVAL; 1435 } 1436 1437 static void stm32_usart_deinit_port(struct stm32_port *stm32port) 1438 { 1439 clk_disable_unprepare(stm32port->clk); 1440 } 1441 1442 static const struct serial_rs485 stm32_rs485_supported = { 1443 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | 1444 SER_RS485_RX_DURING_TX, 1445 .delay_rts_before_send = 1, 1446 .delay_rts_after_send = 1, 1447 }; 1448 1449 static int stm32_usart_init_port(struct stm32_port *stm32port, 1450 struct platform_device *pdev) 1451 { 1452 struct uart_port *port = &stm32port->port; 1453 struct resource *res; 1454 int ret, irq; 1455 1456 irq = platform_get_irq(pdev, 0); 1457 if (irq < 0) 1458 return irq; 1459 1460 port->iotype = UPIO_MEM; 1461 port->flags = UPF_BOOT_AUTOCONF; 1462 port->ops = &stm32_uart_ops; 1463 port->dev = &pdev->dev; 1464 port->fifosize = stm32port->info->cfg.fifosize; 1465 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE); 1466 port->irq = irq; 1467 port->rs485_config = stm32_usart_config_rs485; 1468 port->rs485_supported = stm32_rs485_supported; 1469 1470 ret = stm32_usart_init_rs485(port, pdev); 1471 if (ret) 1472 return ret; 1473 1474 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup && 1475 of_property_read_bool(pdev->dev.of_node, "wakeup-source"); 1476 1477 stm32port->swap = stm32port->info->cfg.has_swap && 1478 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"); 1479 1480 stm32port->fifoen = stm32port->info->cfg.has_fifo; 1481 if (stm32port->fifoen) { 1482 stm32_usart_get_ftcfg(pdev, "rx-threshold", 1483 &stm32port->rxftcfg); 1484 stm32_usart_get_ftcfg(pdev, "tx-threshold", 1485 &stm32port->txftcfg); 1486 } 1487 1488 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1489 if (IS_ERR(port->membase)) 1490 return PTR_ERR(port->membase); 1491 port->mapbase = res->start; 1492 1493 spin_lock_init(&port->lock); 1494 1495 stm32port->clk = devm_clk_get(&pdev->dev, NULL); 1496 if (IS_ERR(stm32port->clk)) 1497 return PTR_ERR(stm32port->clk); 1498 1499 /* Ensure that clk rate is correct by enabling the clk */ 1500 ret = clk_prepare_enable(stm32port->clk); 1501 if (ret) 1502 return ret; 1503 1504 stm32port->port.uartclk = clk_get_rate(stm32port->clk); 1505 if (!stm32port->port.uartclk) { 1506 ret = -EINVAL; 1507 goto err_clk; 1508 } 1509 1510 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0); 1511 if (IS_ERR(stm32port->gpios)) { 1512 ret = PTR_ERR(stm32port->gpios); 1513 goto err_clk; 1514 } 1515 1516 /* 1517 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" 1518 * properties should not be specified. 1519 */ 1520 if (stm32port->hw_flow_control) { 1521 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) || 1522 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) { 1523 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n"); 1524 ret = -EINVAL; 1525 goto err_clk; 1526 } 1527 } 1528 1529 return ret; 1530 1531 err_clk: 1532 clk_disable_unprepare(stm32port->clk); 1533 1534 return ret; 1535 } 1536 1537 static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev) 1538 { 1539 struct device_node *np = pdev->dev.of_node; 1540 int id; 1541 1542 if (!np) 1543 return NULL; 1544 1545 id = of_alias_get_id(np, "serial"); 1546 if (id < 0) { 1547 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id); 1548 return NULL; 1549 } 1550 1551 if (WARN_ON(id >= STM32_MAX_PORTS)) 1552 return NULL; 1553 1554 stm32_ports[id].hw_flow_control = 1555 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ || 1556 of_property_read_bool (np, "uart-has-rtscts"); 1557 stm32_ports[id].port.line = id; 1558 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE; 1559 stm32_ports[id].cr3_irq = 0; 1560 stm32_ports[id].last_res = RX_BUF_L; 1561 return &stm32_ports[id]; 1562 } 1563 1564 #ifdef CONFIG_OF 1565 static const struct of_device_id stm32_match[] = { 1566 { .compatible = "st,stm32-uart", .data = &stm32f4_info}, 1567 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info}, 1568 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info}, 1569 {}, 1570 }; 1571 1572 MODULE_DEVICE_TABLE(of, stm32_match); 1573 #endif 1574 1575 static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port, 1576 struct platform_device *pdev) 1577 { 1578 if (stm32port->rx_buf) 1579 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf, 1580 stm32port->rx_dma_buf); 1581 } 1582 1583 static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, 1584 struct platform_device *pdev) 1585 { 1586 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1587 struct uart_port *port = &stm32port->port; 1588 struct device *dev = &pdev->dev; 1589 struct dma_slave_config config; 1590 int ret; 1591 1592 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L, 1593 &stm32port->rx_dma_buf, 1594 GFP_KERNEL); 1595 if (!stm32port->rx_buf) 1596 return -ENOMEM; 1597 1598 /* Configure DMA channel */ 1599 memset(&config, 0, sizeof(config)); 1600 config.src_addr = port->mapbase + ofs->rdr; 1601 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1602 1603 ret = dmaengine_slave_config(stm32port->rx_ch, &config); 1604 if (ret < 0) { 1605 dev_err(dev, "rx dma channel config failed\n"); 1606 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1607 return ret; 1608 } 1609 1610 return 0; 1611 } 1612 1613 static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port, 1614 struct platform_device *pdev) 1615 { 1616 if (stm32port->tx_buf) 1617 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf, 1618 stm32port->tx_dma_buf); 1619 } 1620 1621 static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port, 1622 struct platform_device *pdev) 1623 { 1624 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs; 1625 struct uart_port *port = &stm32port->port; 1626 struct device *dev = &pdev->dev; 1627 struct dma_slave_config config; 1628 int ret; 1629 1630 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L, 1631 &stm32port->tx_dma_buf, 1632 GFP_KERNEL); 1633 if (!stm32port->tx_buf) 1634 return -ENOMEM; 1635 1636 /* Configure DMA channel */ 1637 memset(&config, 0, sizeof(config)); 1638 config.dst_addr = port->mapbase + ofs->tdr; 1639 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 1640 1641 ret = dmaengine_slave_config(stm32port->tx_ch, &config); 1642 if (ret < 0) { 1643 dev_err(dev, "tx dma channel config failed\n"); 1644 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1645 return ret; 1646 } 1647 1648 return 0; 1649 } 1650 1651 static int stm32_usart_serial_probe(struct platform_device *pdev) 1652 { 1653 struct stm32_port *stm32port; 1654 int ret; 1655 1656 stm32port = stm32_usart_of_get_port(pdev); 1657 if (!stm32port) 1658 return -ENODEV; 1659 1660 stm32port->info = of_device_get_match_data(&pdev->dev); 1661 if (!stm32port->info) 1662 return -EINVAL; 1663 1664 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); 1665 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) 1666 return -EPROBE_DEFER; 1667 1668 /* Fall back in interrupt mode for any non-deferral error */ 1669 if (IS_ERR(stm32port->rx_ch)) 1670 stm32port->rx_ch = NULL; 1671 1672 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); 1673 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) { 1674 ret = -EPROBE_DEFER; 1675 goto err_dma_rx; 1676 } 1677 /* Fall back in interrupt mode for any non-deferral error */ 1678 if (IS_ERR(stm32port->tx_ch)) 1679 stm32port->tx_ch = NULL; 1680 1681 ret = stm32_usart_init_port(stm32port, pdev); 1682 if (ret) 1683 goto err_dma_tx; 1684 1685 if (stm32port->wakeup_src) { 1686 device_set_wakeup_capable(&pdev->dev, true); 1687 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); 1688 if (ret) 1689 goto err_deinit_port; 1690 } 1691 1692 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { 1693 /* Fall back in interrupt mode */ 1694 dma_release_channel(stm32port->rx_ch); 1695 stm32port->rx_ch = NULL; 1696 } 1697 1698 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { 1699 /* Fall back in interrupt mode */ 1700 dma_release_channel(stm32port->tx_ch); 1701 stm32port->tx_ch = NULL; 1702 } 1703 1704 if (!stm32port->rx_ch) 1705 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); 1706 if (!stm32port->tx_ch) 1707 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n"); 1708 1709 platform_set_drvdata(pdev, &stm32port->port); 1710 1711 pm_runtime_get_noresume(&pdev->dev); 1712 pm_runtime_set_active(&pdev->dev); 1713 pm_runtime_enable(&pdev->dev); 1714 1715 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port); 1716 if (ret) 1717 goto err_port; 1718 1719 pm_runtime_put_sync(&pdev->dev); 1720 1721 return 0; 1722 1723 err_port: 1724 pm_runtime_disable(&pdev->dev); 1725 pm_runtime_set_suspended(&pdev->dev); 1726 pm_runtime_put_noidle(&pdev->dev); 1727 1728 if (stm32port->tx_ch) 1729 stm32_usart_of_dma_tx_remove(stm32port, pdev); 1730 if (stm32port->rx_ch) 1731 stm32_usart_of_dma_rx_remove(stm32port, pdev); 1732 1733 if (stm32port->wakeup_src) 1734 dev_pm_clear_wake_irq(&pdev->dev); 1735 1736 err_deinit_port: 1737 if (stm32port->wakeup_src) 1738 device_set_wakeup_capable(&pdev->dev, false); 1739 1740 stm32_usart_deinit_port(stm32port); 1741 1742 err_dma_tx: 1743 if (stm32port->tx_ch) 1744 dma_release_channel(stm32port->tx_ch); 1745 1746 err_dma_rx: 1747 if (stm32port->rx_ch) 1748 dma_release_channel(stm32port->rx_ch); 1749 1750 return ret; 1751 } 1752 1753 static int stm32_usart_serial_remove(struct platform_device *pdev) 1754 { 1755 struct uart_port *port = platform_get_drvdata(pdev); 1756 struct stm32_port *stm32_port = to_stm32_port(port); 1757 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1758 int err; 1759 u32 cr3; 1760 1761 pm_runtime_get_sync(&pdev->dev); 1762 err = uart_remove_one_port(&stm32_usart_driver, port); 1763 if (err) 1764 return(err); 1765 1766 pm_runtime_disable(&pdev->dev); 1767 pm_runtime_set_suspended(&pdev->dev); 1768 pm_runtime_put_noidle(&pdev->dev); 1769 1770 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE); 1771 cr3 = readl_relaxed(port->membase + ofs->cr3); 1772 cr3 &= ~USART_CR3_EIE; 1773 cr3 &= ~USART_CR3_DMAR; 1774 cr3 &= ~USART_CR3_DDRE; 1775 writel_relaxed(cr3, port->membase + ofs->cr3); 1776 1777 if (stm32_port->tx_ch) { 1778 stm32_usart_of_dma_tx_remove(stm32_port, pdev); 1779 dma_release_channel(stm32_port->tx_ch); 1780 } 1781 1782 if (stm32_port->rx_ch) { 1783 stm32_usart_of_dma_rx_remove(stm32_port, pdev); 1784 dma_release_channel(stm32_port->rx_ch); 1785 } 1786 1787 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT); 1788 1789 if (stm32_port->wakeup_src) { 1790 dev_pm_clear_wake_irq(&pdev->dev); 1791 device_init_wakeup(&pdev->dev, false); 1792 } 1793 1794 stm32_usart_deinit_port(stm32_port); 1795 1796 return 0; 1797 } 1798 1799 static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1800 { 1801 struct stm32_port *stm32_port = to_stm32_port(port); 1802 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1803 u32 isr; 1804 int ret; 1805 1806 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr, 1807 (isr & USART_SR_TXE), 100, 1808 STM32_USART_TIMEOUT_USEC); 1809 if (ret != 0) { 1810 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret); 1811 return; 1812 } 1813 writel_relaxed(ch, port->membase + ofs->tdr); 1814 } 1815 1816 #ifdef CONFIG_SERIAL_STM32_CONSOLE 1817 static void stm32_usart_console_write(struct console *co, const char *s, 1818 unsigned int cnt) 1819 { 1820 struct uart_port *port = &stm32_ports[co->index].port; 1821 struct stm32_port *stm32_port = to_stm32_port(port); 1822 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1823 const struct stm32_usart_config *cfg = &stm32_port->info->cfg; 1824 unsigned long flags; 1825 u32 old_cr1, new_cr1; 1826 int locked = 1; 1827 1828 if (oops_in_progress) 1829 locked = spin_trylock_irqsave(&port->lock, flags); 1830 else 1831 spin_lock_irqsave(&port->lock, flags); 1832 1833 /* Save and disable interrupts, enable the transmitter */ 1834 old_cr1 = readl_relaxed(port->membase + ofs->cr1); 1835 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK; 1836 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit); 1837 writel_relaxed(new_cr1, port->membase + ofs->cr1); 1838 1839 uart_console_write(port, s, cnt, stm32_usart_console_putchar); 1840 1841 /* Restore interrupt state */ 1842 writel_relaxed(old_cr1, port->membase + ofs->cr1); 1843 1844 if (locked) 1845 spin_unlock_irqrestore(&port->lock, flags); 1846 } 1847 1848 static int stm32_usart_console_setup(struct console *co, char *options) 1849 { 1850 struct stm32_port *stm32port; 1851 int baud = 9600; 1852 int bits = 8; 1853 int parity = 'n'; 1854 int flow = 'n'; 1855 1856 if (co->index >= STM32_MAX_PORTS) 1857 return -ENODEV; 1858 1859 stm32port = &stm32_ports[co->index]; 1860 1861 /* 1862 * This driver does not support early console initialization 1863 * (use ARM early printk support instead), so we only expect 1864 * this to be called during the uart port registration when the 1865 * driver gets probed and the port should be mapped at that point. 1866 */ 1867 if (stm32port->port.mapbase == 0 || !stm32port->port.membase) 1868 return -ENXIO; 1869 1870 if (options) 1871 uart_parse_options(options, &baud, &parity, &bits, &flow); 1872 1873 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow); 1874 } 1875 1876 static struct console stm32_console = { 1877 .name = STM32_SERIAL_NAME, 1878 .device = uart_console_device, 1879 .write = stm32_usart_console_write, 1880 .setup = stm32_usart_console_setup, 1881 .flags = CON_PRINTBUFFER, 1882 .index = -1, 1883 .data = &stm32_usart_driver, 1884 }; 1885 1886 #define STM32_SERIAL_CONSOLE (&stm32_console) 1887 1888 #else 1889 #define STM32_SERIAL_CONSOLE NULL 1890 #endif /* CONFIG_SERIAL_STM32_CONSOLE */ 1891 1892 #ifdef CONFIG_SERIAL_EARLYCON 1893 static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch) 1894 { 1895 struct stm32_usart_info *info = port->private_data; 1896 1897 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE)) 1898 cpu_relax(); 1899 1900 writel_relaxed(ch, port->membase + info->ofs.tdr); 1901 } 1902 1903 static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count) 1904 { 1905 struct earlycon_device *device = console->data; 1906 struct uart_port *port = &device->port; 1907 1908 uart_console_write(port, s, count, early_stm32_usart_console_putchar); 1909 } 1910 1911 static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options) 1912 { 1913 if (!(device->port.membase || device->port.iobase)) 1914 return -ENODEV; 1915 device->port.private_data = &stm32h7_info; 1916 device->con->write = early_stm32_serial_write; 1917 return 0; 1918 } 1919 1920 static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options) 1921 { 1922 if (!(device->port.membase || device->port.iobase)) 1923 return -ENODEV; 1924 device->port.private_data = &stm32f7_info; 1925 device->con->write = early_stm32_serial_write; 1926 return 0; 1927 } 1928 1929 static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options) 1930 { 1931 if (!(device->port.membase || device->port.iobase)) 1932 return -ENODEV; 1933 device->port.private_data = &stm32f4_info; 1934 device->con->write = early_stm32_serial_write; 1935 return 0; 1936 } 1937 1938 OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup); 1939 OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup); 1940 OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup); 1941 #endif /* CONFIG_SERIAL_EARLYCON */ 1942 1943 static struct uart_driver stm32_usart_driver = { 1944 .driver_name = DRIVER_NAME, 1945 .dev_name = STM32_SERIAL_NAME, 1946 .major = 0, 1947 .minor = 0, 1948 .nr = STM32_MAX_PORTS, 1949 .cons = STM32_SERIAL_CONSOLE, 1950 }; 1951 1952 static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port, 1953 bool enable) 1954 { 1955 struct stm32_port *stm32_port = to_stm32_port(port); 1956 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; 1957 struct tty_port *tport = &port->state->port; 1958 int ret; 1959 unsigned int size; 1960 unsigned long flags; 1961 1962 if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) 1963 return 0; 1964 1965 /* 1966 * Enable low-power wake-up and wake-up irq if argument is set to 1967 * "enable", disable low-power wake-up and wake-up irq otherwise 1968 */ 1969 if (enable) { 1970 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM); 1971 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE); 1972 mctrl_gpio_enable_irq_wake(stm32_port->gpios); 1973 1974 /* 1975 * When DMA is used for reception, it must be disabled before 1976 * entering low-power mode and re-enabled when exiting from 1977 * low-power mode. 1978 */ 1979 if (stm32_port->rx_ch) { 1980 spin_lock_irqsave(&port->lock, flags); 1981 /* Avoid race with RX IRQ when DMAR is cleared */ 1982 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR); 1983 /* Poll data from DMA RX buffer if any */ 1984 size = stm32_usart_receive_chars(port, true); 1985 dmaengine_terminate_async(stm32_port->rx_ch); 1986 uart_unlock_and_check_sysrq_irqrestore(port, flags); 1987 if (size) 1988 tty_flip_buffer_push(tport); 1989 } 1990 1991 /* Poll data from RX FIFO if any */ 1992 stm32_usart_receive_chars(port, false); 1993 } else { 1994 if (stm32_port->rx_ch) { 1995 ret = stm32_usart_start_rx_dma_cyclic(port); 1996 if (ret) 1997 return ret; 1998 } 1999 mctrl_gpio_disable_irq_wake(stm32_port->gpios); 2000 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM); 2001 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); 2002 } 2003 2004 return 0; 2005 } 2006 2007 static int __maybe_unused stm32_usart_serial_suspend(struct device *dev) 2008 { 2009 struct uart_port *port = dev_get_drvdata(dev); 2010 int ret; 2011 2012 uart_suspend_port(&stm32_usart_driver, port); 2013 2014 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2015 ret = stm32_usart_serial_en_wakeup(port, true); 2016 if (ret) 2017 return ret; 2018 } 2019 2020 /* 2021 * When "no_console_suspend" is enabled, keep the pinctrl default state 2022 * and rely on bootloader stage to restore this state upon resume. 2023 * Otherwise, apply the idle or sleep states depending on wakeup 2024 * capabilities. 2025 */ 2026 if (console_suspend_enabled || !uart_console(port)) { 2027 if (device_may_wakeup(dev) || device_wakeup_path(dev)) 2028 pinctrl_pm_select_idle_state(dev); 2029 else 2030 pinctrl_pm_select_sleep_state(dev); 2031 } 2032 2033 return 0; 2034 } 2035 2036 static int __maybe_unused stm32_usart_serial_resume(struct device *dev) 2037 { 2038 struct uart_port *port = dev_get_drvdata(dev); 2039 int ret; 2040 2041 pinctrl_pm_select_default_state(dev); 2042 2043 if (device_may_wakeup(dev) || device_wakeup_path(dev)) { 2044 ret = stm32_usart_serial_en_wakeup(port, false); 2045 if (ret) 2046 return ret; 2047 } 2048 2049 return uart_resume_port(&stm32_usart_driver, port); 2050 } 2051 2052 static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev) 2053 { 2054 struct uart_port *port = dev_get_drvdata(dev); 2055 struct stm32_port *stm32port = container_of(port, 2056 struct stm32_port, port); 2057 2058 clk_disable_unprepare(stm32port->clk); 2059 2060 return 0; 2061 } 2062 2063 static int __maybe_unused stm32_usart_runtime_resume(struct device *dev) 2064 { 2065 struct uart_port *port = dev_get_drvdata(dev); 2066 struct stm32_port *stm32port = container_of(port, 2067 struct stm32_port, port); 2068 2069 return clk_prepare_enable(stm32port->clk); 2070 } 2071 2072 static const struct dev_pm_ops stm32_serial_pm_ops = { 2073 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend, 2074 stm32_usart_runtime_resume, NULL) 2075 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend, 2076 stm32_usart_serial_resume) 2077 }; 2078 2079 static struct platform_driver stm32_serial_driver = { 2080 .probe = stm32_usart_serial_probe, 2081 .remove = stm32_usart_serial_remove, 2082 .driver = { 2083 .name = DRIVER_NAME, 2084 .pm = &stm32_serial_pm_ops, 2085 .of_match_table = of_match_ptr(stm32_match), 2086 }, 2087 }; 2088 2089 static int __init stm32_usart_init(void) 2090 { 2091 static char banner[] __initdata = "STM32 USART driver initialized"; 2092 int ret; 2093 2094 pr_info("%s\n", banner); 2095 2096 ret = uart_register_driver(&stm32_usart_driver); 2097 if (ret) 2098 return ret; 2099 2100 ret = platform_driver_register(&stm32_serial_driver); 2101 if (ret) 2102 uart_unregister_driver(&stm32_usart_driver); 2103 2104 return ret; 2105 } 2106 2107 static void __exit stm32_usart_exit(void) 2108 { 2109 platform_driver_unregister(&stm32_serial_driver); 2110 uart_unregister_driver(&stm32_usart_driver); 2111 } 2112 2113 module_init(stm32_usart_init); 2114 module_exit(stm32_usart_exit); 2115 2116 MODULE_ALIAS("platform:" DRIVER_NAME); 2117 MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver"); 2118 MODULE_LICENSE("GPL v2"); 2119