Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) Maxime Coquelin 2015
4 * Copyright (C) STMicroelectronics SA 2017
5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
6 * Gerald Baeza <gerald.baeza@foss.st.com>
7 * Erwan Le Ray <erwan.leray@foss.st.com>
8 *
9 * Inspired by st-asc.c from STMicroelectronics (c)
10 */
11
12#include <linux/clk.h>
13#include <linux/console.h>
14#include <linux/delay.h>
15#include <linux/dma-direction.h>
16#include <linux/dmaengine.h>
17#include <linux/dma-mapping.h>
18#include <linux/io.h>
19#include <linux/iopoll.h>
20#include <linux/irq.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_platform.h>
24#include <linux/pinctrl/consumer.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/pm_wakeirq.h>
28#include <linux/serial_core.h>
29#include <linux/serial.h>
30#include <linux/spinlock.h>
31#include <linux/sysrq.h>
32#include <linux/tty_flip.h>
33#include <linux/tty.h>
34
35#include "serial_mctrl_gpio.h"
36#include "stm32-usart.h"
37
38
39/* Register offsets */
40static struct stm32_usart_info __maybe_unused stm32f4_info = {
41 .ofs = {
42 .isr = 0x00,
43 .rdr = 0x04,
44 .tdr = 0x04,
45 .brr = 0x08,
46 .cr1 = 0x0c,
47 .cr2 = 0x10,
48 .cr3 = 0x14,
49 .gtpr = 0x18,
50 .rtor = UNDEF_REG,
51 .rqr = UNDEF_REG,
52 .icr = UNDEF_REG,
53 },
54 .cfg = {
55 .uart_enable_bit = 13,
56 .has_7bits_data = false,
57 .fifosize = 1,
58 }
59};
60
61static struct stm32_usart_info __maybe_unused stm32f7_info = {
62 .ofs = {
63 .cr1 = 0x00,
64 .cr2 = 0x04,
65 .cr3 = 0x08,
66 .brr = 0x0c,
67 .gtpr = 0x10,
68 .rtor = 0x14,
69 .rqr = 0x18,
70 .isr = 0x1c,
71 .icr = 0x20,
72 .rdr = 0x24,
73 .tdr = 0x28,
74 },
75 .cfg = {
76 .uart_enable_bit = 0,
77 .has_7bits_data = true,
78 .has_swap = true,
79 .fifosize = 1,
80 }
81};
82
83static struct stm32_usart_info __maybe_unused stm32h7_info = {
84 .ofs = {
85 .cr1 = 0x00,
86 .cr2 = 0x04,
87 .cr3 = 0x08,
88 .brr = 0x0c,
89 .gtpr = 0x10,
90 .rtor = 0x14,
91 .rqr = 0x18,
92 .isr = 0x1c,
93 .icr = 0x20,
94 .rdr = 0x24,
95 .tdr = 0x28,
96 },
97 .cfg = {
98 .uart_enable_bit = 0,
99 .has_7bits_data = true,
100 .has_swap = true,
101 .has_wakeup = true,
102 .has_fifo = true,
103 .fifosize = 16,
104 }
105};
106
107static void stm32_usart_stop_tx(struct uart_port *port);
108static void stm32_usart_transmit_chars(struct uart_port *port);
109static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
110
111static inline struct stm32_port *to_stm32_port(struct uart_port *port)
112{
113 return container_of(port, struct stm32_port, port);
114}
115
116static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
117{
118 u32 val;
119
120 val = readl_relaxed(port->membase + reg);
121 val |= bits;
122 writel_relaxed(val, port->membase + reg);
123}
124
125static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
126{
127 u32 val;
128
129 val = readl_relaxed(port->membase + reg);
130 val &= ~bits;
131 writel_relaxed(val, port->membase + reg);
132}
133
134static unsigned int stm32_usart_tx_empty(struct uart_port *port)
135{
136 struct stm32_port *stm32_port = to_stm32_port(port);
137 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
138
139 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
140 return TIOCSER_TEMT;
141
142 return 0;
143}
144
145static void stm32_usart_rs485_rts_enable(struct uart_port *port)
146{
147 struct stm32_port *stm32_port = to_stm32_port(port);
148 struct serial_rs485 *rs485conf = &port->rs485;
149
150 if (stm32_port->hw_flow_control ||
151 !(rs485conf->flags & SER_RS485_ENABLED))
152 return;
153
154 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
155 mctrl_gpio_set(stm32_port->gpios,
156 stm32_port->port.mctrl | TIOCM_RTS);
157 } else {
158 mctrl_gpio_set(stm32_port->gpios,
159 stm32_port->port.mctrl & ~TIOCM_RTS);
160 }
161}
162
163static void stm32_usart_rs485_rts_disable(struct uart_port *port)
164{
165 struct stm32_port *stm32_port = to_stm32_port(port);
166 struct serial_rs485 *rs485conf = &port->rs485;
167
168 if (stm32_port->hw_flow_control ||
169 !(rs485conf->flags & SER_RS485_ENABLED))
170 return;
171
172 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
173 mctrl_gpio_set(stm32_port->gpios,
174 stm32_port->port.mctrl & ~TIOCM_RTS);
175 } else {
176 mctrl_gpio_set(stm32_port->gpios,
177 stm32_port->port.mctrl | TIOCM_RTS);
178 }
179}
180
181static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
182 u32 delay_DDE, u32 baud)
183{
184 u32 rs485_deat_dedt;
185 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
186 bool over8;
187
188 *cr3 |= USART_CR3_DEM;
189 over8 = *cr1 & USART_CR1_OVER8;
190
191 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
192
193 if (over8)
194 rs485_deat_dedt = delay_ADE * baud * 8;
195 else
196 rs485_deat_dedt = delay_ADE * baud * 16;
197
198 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
199 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
200 rs485_deat_dedt_max : rs485_deat_dedt;
201 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
202 USART_CR1_DEAT_MASK;
203 *cr1 |= rs485_deat_dedt;
204
205 if (over8)
206 rs485_deat_dedt = delay_DDE * baud * 8;
207 else
208 rs485_deat_dedt = delay_DDE * baud * 16;
209
210 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
211 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
212 rs485_deat_dedt_max : rs485_deat_dedt;
213 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
214 USART_CR1_DEDT_MASK;
215 *cr1 |= rs485_deat_dedt;
216}
217
218static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
219 struct serial_rs485 *rs485conf)
220{
221 struct stm32_port *stm32_port = to_stm32_port(port);
222 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
223 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
224 u32 usartdiv, baud, cr1, cr3;
225 bool over8;
226
227 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
228
229 if (rs485conf->flags & SER_RS485_ENABLED) {
230 cr1 = readl_relaxed(port->membase + ofs->cr1);
231 cr3 = readl_relaxed(port->membase + ofs->cr3);
232 usartdiv = readl_relaxed(port->membase + ofs->brr);
233 usartdiv = usartdiv & GENMASK(15, 0);
234 over8 = cr1 & USART_CR1_OVER8;
235
236 if (over8)
237 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
238 << USART_BRR_04_R_SHIFT;
239
240 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
241 stm32_usart_config_reg_rs485(&cr1, &cr3,
242 rs485conf->delay_rts_before_send,
243 rs485conf->delay_rts_after_send,
244 baud);
245
246 if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
247 cr3 &= ~USART_CR3_DEP;
248 else
249 cr3 |= USART_CR3_DEP;
250
251 writel_relaxed(cr3, port->membase + ofs->cr3);
252 writel_relaxed(cr1, port->membase + ofs->cr1);
253
254 if (!port->rs485_rx_during_tx_gpio)
255 rs485conf->flags |= SER_RS485_RX_DURING_TX;
256
257 } else {
258 stm32_usart_clr_bits(port, ofs->cr3,
259 USART_CR3_DEM | USART_CR3_DEP);
260 stm32_usart_clr_bits(port, ofs->cr1,
261 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
262 }
263
264 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
265
266 /* Adjust RTS polarity in case it's driven in software */
267 if (stm32_usart_tx_empty(port))
268 stm32_usart_rs485_rts_disable(port);
269 else
270 stm32_usart_rs485_rts_enable(port);
271
272 return 0;
273}
274
275static int stm32_usart_init_rs485(struct uart_port *port,
276 struct platform_device *pdev)
277{
278 struct serial_rs485 *rs485conf = &port->rs485;
279
280 rs485conf->flags = 0;
281 rs485conf->delay_rts_before_send = 0;
282 rs485conf->delay_rts_after_send = 0;
283
284 if (!pdev->dev.of_node)
285 return -ENODEV;
286
287 return uart_get_rs485_mode(port);
288}
289
290static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port)
291{
292 return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false;
293}
294
295static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port)
296{
297 dmaengine_terminate_async(stm32_port->rx_ch);
298 stm32_port->rx_dma_busy = false;
299}
300
301static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port,
302 struct dma_chan *chan,
303 enum dma_status expected_status,
304 int dmaengine_pause_or_resume(struct dma_chan *),
305 bool stm32_usart_xx_dma_started(struct stm32_port *),
306 void stm32_usart_xx_dma_terminate(struct stm32_port *))
307{
308 struct uart_port *port = &stm32_port->port;
309 enum dma_status dma_status;
310 int ret;
311
312 if (!stm32_usart_xx_dma_started(stm32_port))
313 return -EPERM;
314
315 dma_status = dmaengine_tx_status(chan, chan->cookie, NULL);
316 if (dma_status != expected_status)
317 return -EAGAIN;
318
319 ret = dmaengine_pause_or_resume(chan);
320 if (ret) {
321 dev_err(port->dev, "DMA failed with error code: %d\n", ret);
322 stm32_usart_xx_dma_terminate(stm32_port);
323 }
324 return ret;
325}
326
327static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port)
328{
329 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
330 DMA_IN_PROGRESS, dmaengine_pause,
331 stm32_usart_rx_dma_started,
332 stm32_usart_rx_dma_terminate);
333}
334
335static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port)
336{
337 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
338 DMA_PAUSED, dmaengine_resume,
339 stm32_usart_rx_dma_started,
340 stm32_usart_rx_dma_terminate);
341}
342
343/* Return true when data is pending (in pio mode), and false when no data is pending. */
344static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
345{
346 struct stm32_port *stm32_port = to_stm32_port(port);
347 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
348
349 *sr = readl_relaxed(port->membase + ofs->isr);
350 /* Get pending characters in RDR or FIFO */
351 if (*sr & USART_SR_RXNE) {
352 /* Get all pending characters from the RDR or the FIFO when using interrupts */
353 if (!stm32_usart_rx_dma_started(stm32_port))
354 return true;
355
356 /* Handle only RX data errors when using DMA */
357 if (*sr & USART_SR_ERR_MASK)
358 return true;
359 }
360
361 return false;
362}
363
364static u8 stm32_usart_get_char_pio(struct uart_port *port)
365{
366 struct stm32_port *stm32_port = to_stm32_port(port);
367 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
368 unsigned long c;
369
370 c = readl_relaxed(port->membase + ofs->rdr);
371 /* Apply RDR data mask */
372 c &= stm32_port->rdr_mask;
373
374 return c;
375}
376
377static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
378{
379 struct stm32_port *stm32_port = to_stm32_port(port);
380 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
381 unsigned int size = 0;
382 u32 sr;
383 u8 c, flag;
384
385 while (stm32_usart_pending_rx_pio(port, &sr)) {
386 sr |= USART_SR_DUMMY_RX;
387 flag = TTY_NORMAL;
388
389 /*
390 * Status bits has to be cleared before reading the RDR:
391 * In FIFO mode, reading the RDR will pop the next data
392 * (if any) along with its status bits into the SR.
393 * Not doing so leads to misalignement between RDR and SR,
394 * and clear status bits of the next rx data.
395 *
396 * Clear errors flags for stm32f7 and stm32h7 compatible
397 * devices. On stm32f4 compatible devices, the error bit is
398 * cleared by the sequence [read SR - read DR].
399 */
400 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
401 writel_relaxed(sr & USART_SR_ERR_MASK,
402 port->membase + ofs->icr);
403
404 c = stm32_usart_get_char_pio(port);
405 port->icount.rx++;
406 size++;
407 if (sr & USART_SR_ERR_MASK) {
408 if (sr & USART_SR_ORE) {
409 port->icount.overrun++;
410 } else if (sr & USART_SR_PE) {
411 port->icount.parity++;
412 } else if (sr & USART_SR_FE) {
413 /* Break detection if character is null */
414 if (!c) {
415 port->icount.brk++;
416 if (uart_handle_break(port))
417 continue;
418 } else {
419 port->icount.frame++;
420 }
421 }
422
423 sr &= port->read_status_mask;
424
425 if (sr & USART_SR_PE) {
426 flag = TTY_PARITY;
427 } else if (sr & USART_SR_FE) {
428 if (!c)
429 flag = TTY_BREAK;
430 else
431 flag = TTY_FRAME;
432 }
433 }
434
435 if (uart_prepare_sysrq_char(port, c))
436 continue;
437 uart_insert_char(port, sr, USART_SR_ORE, c, flag);
438 }
439
440 return size;
441}
442
443static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
444{
445 struct stm32_port *stm32_port = to_stm32_port(port);
446 struct tty_port *ttyport = &stm32_port->port.state->port;
447 unsigned char *dma_start;
448 int dma_count, i;
449
450 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
451
452 /*
453 * Apply rdr_mask on buffer in order to mask parity bit.
454 * This loop is useless in cs8 mode because DMA copies only
455 * 8 bits and already ignores parity bit.
456 */
457 if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
458 for (i = 0; i < dma_size; i++)
459 *(dma_start + i) &= stm32_port->rdr_mask;
460
461 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
462 port->icount.rx += dma_count;
463 if (dma_count != dma_size)
464 port->icount.buf_overrun++;
465 stm32_port->last_res -= dma_count;
466 if (stm32_port->last_res == 0)
467 stm32_port->last_res = RX_BUF_L;
468}
469
470static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
471{
472 struct stm32_port *stm32_port = to_stm32_port(port);
473 unsigned int dma_size, size = 0;
474
475 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
476 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
477 /* Conditional first part: from last_res to end of DMA buffer */
478 dma_size = stm32_port->last_res;
479 stm32_usart_push_buffer_dma(port, dma_size);
480 size = dma_size;
481 }
482
483 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
484 stm32_usart_push_buffer_dma(port, dma_size);
485 size += dma_size;
486
487 return size;
488}
489
490static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
491{
492 struct stm32_port *stm32_port = to_stm32_port(port);
493 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
494 enum dma_status rx_dma_status;
495 u32 sr;
496 unsigned int size = 0;
497
498 if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) {
499 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
500 stm32_port->rx_ch->cookie,
501 &stm32_port->rx_dma_state);
502 if (rx_dma_status == DMA_IN_PROGRESS ||
503 rx_dma_status == DMA_PAUSED) {
504 /* Empty DMA buffer */
505 size = stm32_usart_receive_chars_dma(port);
506 sr = readl_relaxed(port->membase + ofs->isr);
507 if (sr & USART_SR_ERR_MASK) {
508 /* Disable DMA request line */
509 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
510
511 /* Switch to PIO mode to handle the errors */
512 size += stm32_usart_receive_chars_pio(port);
513
514 /* Switch back to DMA mode */
515 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
516 }
517 } else {
518 /* Disable RX DMA */
519 stm32_usart_rx_dma_terminate(stm32_port);
520 /* Fall back to interrupt mode */
521 dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
522 size = stm32_usart_receive_chars_pio(port);
523 }
524 } else {
525 size = stm32_usart_receive_chars_pio(port);
526 }
527
528 return size;
529}
530
531static void stm32_usart_rx_dma_complete(void *arg)
532{
533 struct uart_port *port = arg;
534 struct tty_port *tport = &port->state->port;
535 unsigned int size;
536 unsigned long flags;
537
538 uart_port_lock_irqsave(port, &flags);
539 size = stm32_usart_receive_chars(port, false);
540 uart_unlock_and_check_sysrq_irqrestore(port, flags);
541 if (size)
542 tty_flip_buffer_push(tport);
543}
544
545static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port)
546{
547 struct stm32_port *stm32_port = to_stm32_port(port);
548 struct dma_async_tx_descriptor *desc;
549 enum dma_status rx_dma_status;
550 int ret;
551
552 if (stm32_port->throttled)
553 return 0;
554
555 if (stm32_port->rx_dma_busy) {
556 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
557 stm32_port->rx_ch->cookie,
558 NULL);
559 if (rx_dma_status == DMA_IN_PROGRESS)
560 return 0;
561
562 if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port))
563 return 0;
564
565 dev_err(port->dev, "DMA failed : status error.\n");
566 stm32_usart_rx_dma_terminate(stm32_port);
567 }
568
569 stm32_port->rx_dma_busy = true;
570
571 stm32_port->last_res = RX_BUF_L;
572 /* Prepare a DMA cyclic transaction */
573 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
574 stm32_port->rx_dma_buf,
575 RX_BUF_L, RX_BUF_P,
576 DMA_DEV_TO_MEM,
577 DMA_PREP_INTERRUPT);
578 if (!desc) {
579 dev_err(port->dev, "rx dma prep cyclic failed\n");
580 stm32_port->rx_dma_busy = false;
581 return -ENODEV;
582 }
583
584 desc->callback = stm32_usart_rx_dma_complete;
585 desc->callback_param = port;
586
587 /* Push current DMA transaction in the pending queue */
588 ret = dma_submit_error(dmaengine_submit(desc));
589 if (ret) {
590 dmaengine_terminate_sync(stm32_port->rx_ch);
591 stm32_port->rx_dma_busy = false;
592 return ret;
593 }
594
595 /* Issue pending DMA requests */
596 dma_async_issue_pending(stm32_port->rx_ch);
597
598 return 0;
599}
600
601static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
602{
603 dmaengine_terminate_async(stm32_port->tx_ch);
604 stm32_port->tx_dma_busy = false;
605}
606
607static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
608{
609 /*
610 * We cannot use the function "dmaengine_tx_status" to know the
611 * status of DMA. This function does not show if the "dma complete"
612 * callback of the DMA transaction has been called. So we prefer
613 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
614 * same time.
615 */
616 return stm32_port->tx_dma_busy;
617}
618
619static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port)
620{
621 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
622 DMA_IN_PROGRESS, dmaengine_pause,
623 stm32_usart_tx_dma_started,
624 stm32_usart_tx_dma_terminate);
625}
626
627static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port)
628{
629 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
630 DMA_PAUSED, dmaengine_resume,
631 stm32_usart_tx_dma_started,
632 stm32_usart_tx_dma_terminate);
633}
634
635static void stm32_usart_tx_dma_complete(void *arg)
636{
637 struct uart_port *port = arg;
638 struct stm32_port *stm32port = to_stm32_port(port);
639 unsigned long flags;
640
641 stm32_usart_tx_dma_terminate(stm32port);
642
643 /* Let's see if we have pending data to send */
644 uart_port_lock_irqsave(port, &flags);
645 stm32_usart_transmit_chars(port);
646 uart_port_unlock_irqrestore(port, flags);
647}
648
649static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
650{
651 struct stm32_port *stm32_port = to_stm32_port(port);
652 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
653
654 /*
655 * Enables TX FIFO threashold irq when FIFO is enabled,
656 * or TX empty irq when FIFO is disabled
657 */
658 if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
659 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
660 else
661 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
662}
663
664static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
665{
666 struct stm32_port *stm32_port = to_stm32_port(port);
667 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
668
669 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
670}
671
672static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
673{
674 struct stm32_port *stm32_port = to_stm32_port(port);
675 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
676
677 if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
678 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
679 else
680 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
681}
682
683static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
684{
685 struct stm32_port *stm32_port = to_stm32_port(port);
686 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
687
688 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
689}
690
691static void stm32_usart_transmit_chars_pio(struct uart_port *port)
692{
693 struct stm32_port *stm32_port = to_stm32_port(port);
694 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
695 struct circ_buf *xmit = &port->state->xmit;
696
697 while (!uart_circ_empty(xmit)) {
698 /* Check that TDR is empty before filling FIFO */
699 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
700 break;
701 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
702 uart_xmit_advance(port, 1);
703 }
704
705 /* rely on TXE irq (mask or unmask) for sending remaining data */
706 if (uart_circ_empty(xmit))
707 stm32_usart_tx_interrupt_disable(port);
708 else
709 stm32_usart_tx_interrupt_enable(port);
710}
711
712static void stm32_usart_transmit_chars_dma(struct uart_port *port)
713{
714 struct stm32_port *stm32port = to_stm32_port(port);
715 struct circ_buf *xmit = &port->state->xmit;
716 struct dma_async_tx_descriptor *desc = NULL;
717 unsigned int count;
718 int ret;
719
720 if (stm32_usart_tx_dma_started(stm32port)) {
721 ret = stm32_usart_tx_dma_resume(stm32port);
722 if (ret < 0 && ret != -EAGAIN)
723 goto fallback_err;
724 return;
725 }
726
727 count = uart_circ_chars_pending(xmit);
728
729 if (count > TX_BUF_L)
730 count = TX_BUF_L;
731
732 if (xmit->tail < xmit->head) {
733 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
734 } else {
735 size_t one = UART_XMIT_SIZE - xmit->tail;
736 size_t two;
737
738 if (one > count)
739 one = count;
740 two = count - one;
741
742 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
743 if (two)
744 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
745 }
746
747 desc = dmaengine_prep_slave_single(stm32port->tx_ch,
748 stm32port->tx_dma_buf,
749 count,
750 DMA_MEM_TO_DEV,
751 DMA_PREP_INTERRUPT);
752
753 if (!desc)
754 goto fallback_err;
755
756 /*
757 * Set "tx_dma_busy" flag. This flag will be released when
758 * dmaengine_terminate_async will be called. This flag helps
759 * transmit_chars_dma not to start another DMA transaction
760 * if the callback of the previous is not yet called.
761 */
762 stm32port->tx_dma_busy = true;
763
764 desc->callback = stm32_usart_tx_dma_complete;
765 desc->callback_param = port;
766
767 /* Push current DMA TX transaction in the pending queue */
768 /* DMA no yet started, safe to free resources */
769 ret = dma_submit_error(dmaengine_submit(desc));
770 if (ret) {
771 dev_err(port->dev, "DMA failed with error code: %d\n", ret);
772 stm32_usart_tx_dma_terminate(stm32port);
773 goto fallback_err;
774 }
775
776 /* Issue pending DMA TX requests */
777 dma_async_issue_pending(stm32port->tx_ch);
778
779 uart_xmit_advance(port, count);
780
781 return;
782
783fallback_err:
784 stm32_usart_transmit_chars_pio(port);
785}
786
787static void stm32_usart_transmit_chars(struct uart_port *port)
788{
789 struct stm32_port *stm32_port = to_stm32_port(port);
790 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
791 struct circ_buf *xmit = &port->state->xmit;
792 u32 isr;
793 int ret;
794
795 if (!stm32_port->hw_flow_control &&
796 port->rs485.flags & SER_RS485_ENABLED &&
797 (port->x_char ||
798 !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) {
799 stm32_usart_tc_interrupt_disable(port);
800 stm32_usart_rs485_rts_enable(port);
801 }
802
803 if (port->x_char) {
804 /* dma terminate may have been called in case of dma pause failure */
805 stm32_usart_tx_dma_pause(stm32_port);
806
807 /* Check that TDR is empty before filling FIFO */
808 ret =
809 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
810 isr,
811 (isr & USART_SR_TXE),
812 10, 1000);
813 if (ret)
814 dev_warn(port->dev, "1 character may be erased\n");
815
816 writel_relaxed(port->x_char, port->membase + ofs->tdr);
817 port->x_char = 0;
818 port->icount.tx++;
819
820 /* dma terminate may have been called in case of dma resume failure */
821 stm32_usart_tx_dma_resume(stm32_port);
822 return;
823 }
824
825 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
826 stm32_usart_tx_interrupt_disable(port);
827 return;
828 }
829
830 if (ofs->icr == UNDEF_REG)
831 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
832 else
833 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
834
835 if (stm32_port->tx_ch)
836 stm32_usart_transmit_chars_dma(port);
837 else
838 stm32_usart_transmit_chars_pio(port);
839
840 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
841 uart_write_wakeup(port);
842
843 if (uart_circ_empty(xmit)) {
844 stm32_usart_tx_interrupt_disable(port);
845 if (!stm32_port->hw_flow_control &&
846 port->rs485.flags & SER_RS485_ENABLED) {
847 stm32_usart_tc_interrupt_enable(port);
848 }
849 }
850}
851
852static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
853{
854 struct uart_port *port = ptr;
855 struct tty_port *tport = &port->state->port;
856 struct stm32_port *stm32_port = to_stm32_port(port);
857 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
858 u32 sr;
859 unsigned int size;
860
861 sr = readl_relaxed(port->membase + ofs->isr);
862
863 if (!stm32_port->hw_flow_control &&
864 port->rs485.flags & SER_RS485_ENABLED &&
865 (sr & USART_SR_TC)) {
866 stm32_usart_tc_interrupt_disable(port);
867 stm32_usart_rs485_rts_disable(port);
868 }
869
870 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
871 writel_relaxed(USART_ICR_RTOCF,
872 port->membase + ofs->icr);
873
874 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
875 /* Clear wake up flag and disable wake up interrupt */
876 writel_relaxed(USART_ICR_WUCF,
877 port->membase + ofs->icr);
878 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
879 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
880 pm_wakeup_event(tport->tty->dev, 0);
881 }
882
883 /*
884 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
885 * line has been masked by HW and rx data are stacking in FIFO.
886 */
887 if (!stm32_port->throttled) {
888 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
889 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
890 uart_port_lock(port);
891 size = stm32_usart_receive_chars(port, false);
892 uart_unlock_and_check_sysrq(port);
893 if (size)
894 tty_flip_buffer_push(tport);
895 }
896 }
897
898 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
899 uart_port_lock(port);
900 stm32_usart_transmit_chars(port);
901 uart_port_unlock(port);
902 }
903
904 /* Receiver timeout irq for DMA RX */
905 if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
906 uart_port_lock(port);
907 size = stm32_usart_receive_chars(port, false);
908 uart_unlock_and_check_sysrq(port);
909 if (size)
910 tty_flip_buffer_push(tport);
911 }
912
913 return IRQ_HANDLED;
914}
915
916static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
917{
918 struct stm32_port *stm32_port = to_stm32_port(port);
919 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
920
921 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
922 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
923 else
924 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
925
926 mctrl_gpio_set(stm32_port->gpios, mctrl);
927}
928
929static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
930{
931 struct stm32_port *stm32_port = to_stm32_port(port);
932 unsigned int ret;
933
934 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */
935 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
936
937 return mctrl_gpio_get(stm32_port->gpios, &ret);
938}
939
940static void stm32_usart_enable_ms(struct uart_port *port)
941{
942 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
943}
944
945static void stm32_usart_disable_ms(struct uart_port *port)
946{
947 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
948}
949
950/* Transmit stop */
951static void stm32_usart_stop_tx(struct uart_port *port)
952{
953 struct stm32_port *stm32_port = to_stm32_port(port);
954
955 stm32_usart_tx_interrupt_disable(port);
956
957 /* dma terminate may have been called in case of dma pause failure */
958 stm32_usart_tx_dma_pause(stm32_port);
959
960 stm32_usart_rs485_rts_disable(port);
961}
962
963/* There are probably characters waiting to be transmitted. */
964static void stm32_usart_start_tx(struct uart_port *port)
965{
966 struct circ_buf *xmit = &port->state->xmit;
967
968 if (uart_circ_empty(xmit) && !port->x_char) {
969 stm32_usart_rs485_rts_disable(port);
970 return;
971 }
972
973 stm32_usart_rs485_rts_enable(port);
974
975 stm32_usart_transmit_chars(port);
976}
977
978/* Flush the transmit buffer. */
979static void stm32_usart_flush_buffer(struct uart_port *port)
980{
981 struct stm32_port *stm32_port = to_stm32_port(port);
982
983 if (stm32_port->tx_ch)
984 stm32_usart_tx_dma_terminate(stm32_port);
985}
986
987/* Throttle the remote when input buffer is about to overflow. */
988static void stm32_usart_throttle(struct uart_port *port)
989{
990 struct stm32_port *stm32_port = to_stm32_port(port);
991 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
992 unsigned long flags;
993
994 uart_port_lock_irqsave(port, &flags);
995
996 /*
997 * Pause DMA transfer, so the RX data gets queued into the FIFO.
998 * Hardware flow control is triggered when RX FIFO is full.
999 */
1000 stm32_usart_rx_dma_pause(stm32_port);
1001
1002 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1003 if (stm32_port->cr3_irq)
1004 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1005
1006 stm32_port->throttled = true;
1007 uart_port_unlock_irqrestore(port, flags);
1008}
1009
1010/* Unthrottle the remote, the input buffer can now accept data. */
1011static void stm32_usart_unthrottle(struct uart_port *port)
1012{
1013 struct stm32_port *stm32_port = to_stm32_port(port);
1014 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1015 unsigned long flags;
1016
1017 uart_port_lock_irqsave(port, &flags);
1018 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
1019 if (stm32_port->cr3_irq)
1020 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
1021
1022 stm32_port->throttled = false;
1023
1024 /*
1025 * Switch back to DMA mode (resume DMA).
1026 * Hardware flow control is stopped when FIFO is not full any more.
1027 */
1028 if (stm32_port->rx_ch)
1029 stm32_usart_rx_dma_start_or_resume(port);
1030
1031 uart_port_unlock_irqrestore(port, flags);
1032}
1033
1034/* Receive stop */
1035static void stm32_usart_stop_rx(struct uart_port *port)
1036{
1037 struct stm32_port *stm32_port = to_stm32_port(port);
1038 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1039
1040 /* Disable DMA request line. */
1041 stm32_usart_rx_dma_pause(stm32_port);
1042
1043 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1044 if (stm32_port->cr3_irq)
1045 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1046}
1047
1048static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
1049{
1050 struct stm32_port *stm32_port = to_stm32_port(port);
1051 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1052 unsigned long flags;
1053
1054 spin_lock_irqsave(&port->lock, flags);
1055
1056 if (break_state)
1057 stm32_usart_set_bits(port, ofs->rqr, USART_RQR_SBKRQ);
1058 else
1059 stm32_usart_clr_bits(port, ofs->rqr, USART_RQR_SBKRQ);
1060
1061 spin_unlock_irqrestore(&port->lock, flags);
1062}
1063
1064static int stm32_usart_startup(struct uart_port *port)
1065{
1066 struct stm32_port *stm32_port = to_stm32_port(port);
1067 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1068 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1069 const char *name = to_platform_device(port->dev)->name;
1070 u32 val;
1071 int ret;
1072
1073 ret = request_irq(port->irq, stm32_usart_interrupt,
1074 IRQF_NO_SUSPEND, name, port);
1075 if (ret)
1076 return ret;
1077
1078 if (stm32_port->swap) {
1079 val = readl_relaxed(port->membase + ofs->cr2);
1080 val |= USART_CR2_SWAP;
1081 writel_relaxed(val, port->membase + ofs->cr2);
1082 }
1083
1084 /* RX FIFO Flush */
1085 if (ofs->rqr != UNDEF_REG)
1086 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
1087
1088 if (stm32_port->rx_ch) {
1089 ret = stm32_usart_rx_dma_start_or_resume(port);
1090 if (ret) {
1091 free_irq(port->irq, port);
1092 return ret;
1093 }
1094 }
1095
1096 /* RX enabling */
1097 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
1098 stm32_usart_set_bits(port, ofs->cr1, val);
1099
1100 return 0;
1101}
1102
1103static void stm32_usart_shutdown(struct uart_port *port)
1104{
1105 struct stm32_port *stm32_port = to_stm32_port(port);
1106 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1107 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1108 u32 val, isr;
1109 int ret;
1110
1111 if (stm32_usart_tx_dma_started(stm32_port))
1112 stm32_usart_tx_dma_terminate(stm32_port);
1113
1114 if (stm32_port->tx_ch)
1115 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
1116
1117 /* Disable modem control interrupts */
1118 stm32_usart_disable_ms(port);
1119
1120 val = USART_CR1_TXEIE | USART_CR1_TE;
1121 val |= stm32_port->cr1_irq | USART_CR1_RE;
1122 val |= BIT(cfg->uart_enable_bit);
1123 if (stm32_port->fifoen)
1124 val |= USART_CR1_FIFOEN;
1125
1126 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
1127 isr, (isr & USART_SR_TC),
1128 10, 100000);
1129
1130 /* Send the TC error message only when ISR_TC is not set */
1131 if (ret)
1132 dev_err(port->dev, "Transmission is not complete\n");
1133
1134 /* Disable RX DMA. */
1135 if (stm32_port->rx_ch) {
1136 stm32_usart_rx_dma_terminate(stm32_port);
1137 dmaengine_synchronize(stm32_port->rx_ch);
1138 }
1139
1140 /* flush RX & TX FIFO */
1141 if (ofs->rqr != UNDEF_REG)
1142 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1143 port->membase + ofs->rqr);
1144
1145 stm32_usart_clr_bits(port, ofs->cr1, val);
1146
1147 free_irq(port->irq, port);
1148}
1149
1150static void stm32_usart_set_termios(struct uart_port *port,
1151 struct ktermios *termios,
1152 const struct ktermios *old)
1153{
1154 struct stm32_port *stm32_port = to_stm32_port(port);
1155 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1156 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1157 struct serial_rs485 *rs485conf = &port->rs485;
1158 unsigned int baud, bits;
1159 u32 usartdiv, mantissa, fraction, oversampling;
1160 tcflag_t cflag = termios->c_cflag;
1161 u32 cr1, cr2, cr3, isr;
1162 unsigned long flags;
1163 int ret;
1164
1165 if (!stm32_port->hw_flow_control)
1166 cflag &= ~CRTSCTS;
1167
1168 baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
1169
1170 uart_port_lock_irqsave(port, &flags);
1171
1172 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
1173 isr,
1174 (isr & USART_SR_TC),
1175 10, 100000);
1176
1177 /* Send the TC error message only when ISR_TC is not set. */
1178 if (ret)
1179 dev_err(port->dev, "Transmission is not complete\n");
1180
1181 /* Stop serial port and reset value */
1182 writel_relaxed(0, port->membase + ofs->cr1);
1183
1184 /* flush RX & TX FIFO */
1185 if (ofs->rqr != UNDEF_REG)
1186 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1187 port->membase + ofs->rqr);
1188
1189 cr1 = USART_CR1_TE | USART_CR1_RE;
1190 if (stm32_port->fifoen)
1191 cr1 |= USART_CR1_FIFOEN;
1192 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0;
1193
1194 /* Tx and RX FIFO configuration */
1195 cr3 = readl_relaxed(port->membase + ofs->cr3);
1196 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
1197 if (stm32_port->fifoen) {
1198 if (stm32_port->txftcfg >= 0)
1199 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT;
1200 if (stm32_port->rxftcfg >= 0)
1201 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT;
1202 }
1203
1204 if (cflag & CSTOPB)
1205 cr2 |= USART_CR2_STOP_2B;
1206
1207 bits = tty_get_char_size(cflag);
1208 stm32_port->rdr_mask = (BIT(bits) - 1);
1209
1210 if (cflag & PARENB) {
1211 bits++;
1212 cr1 |= USART_CR1_PCE;
1213 }
1214
1215 /*
1216 * Word length configuration:
1217 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
1218 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
1219 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
1220 * M0 and M1 already cleared by cr1 initialization.
1221 */
1222 if (bits == 9) {
1223 cr1 |= USART_CR1_M0;
1224 } else if ((bits == 7) && cfg->has_7bits_data) {
1225 cr1 |= USART_CR1_M1;
1226 } else if (bits != 8) {
1227 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
1228 , bits);
1229 cflag &= ~CSIZE;
1230 cflag |= CS8;
1231 termios->c_cflag = cflag;
1232 bits = 8;
1233 if (cflag & PARENB) {
1234 bits++;
1235 cr1 |= USART_CR1_M0;
1236 }
1237 }
1238
1239 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
1240 (stm32_port->fifoen &&
1241 stm32_port->rxftcfg >= 0))) {
1242 if (cflag & CSTOPB)
1243 bits = bits + 3; /* 1 start bit + 2 stop bits */
1244 else
1245 bits = bits + 2; /* 1 start bit + 1 stop bit */
1246
1247 /* RX timeout irq to occur after last stop bit + bits */
1248 stm32_port->cr1_irq = USART_CR1_RTOIE;
1249 writel_relaxed(bits, port->membase + ofs->rtor);
1250 cr2 |= USART_CR2_RTOEN;
1251 /*
1252 * Enable fifo threshold irq in two cases, either when there is no DMA, or when
1253 * wake up over usart, from low power until the DMA gets re-enabled by resume.
1254 */
1255 stm32_port->cr3_irq = USART_CR3_RXFTIE;
1256 }
1257
1258 cr1 |= stm32_port->cr1_irq;
1259 cr3 |= stm32_port->cr3_irq;
1260
1261 if (cflag & PARODD)
1262 cr1 |= USART_CR1_PS;
1263
1264 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1265 if (cflag & CRTSCTS) {
1266 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
1267 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
1268 }
1269
1270 usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
1271
1272 /*
1273 * The USART supports 16 or 8 times oversampling.
1274 * By default we prefer 16 times oversampling, so that the receiver
1275 * has a better tolerance to clock deviations.
1276 * 8 times oversampling is only used to achieve higher speeds.
1277 */
1278 if (usartdiv < 16) {
1279 oversampling = 8;
1280 cr1 |= USART_CR1_OVER8;
1281 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
1282 } else {
1283 oversampling = 16;
1284 cr1 &= ~USART_CR1_OVER8;
1285 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
1286 }
1287
1288 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
1289 fraction = usartdiv % oversampling;
1290 writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
1291
1292 uart_update_timeout(port, cflag, baud);
1293
1294 port->read_status_mask = USART_SR_ORE;
1295 if (termios->c_iflag & INPCK)
1296 port->read_status_mask |= USART_SR_PE | USART_SR_FE;
1297 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1298 port->read_status_mask |= USART_SR_FE;
1299
1300 /* Characters to ignore */
1301 port->ignore_status_mask = 0;
1302 if (termios->c_iflag & IGNPAR)
1303 port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
1304 if (termios->c_iflag & IGNBRK) {
1305 port->ignore_status_mask |= USART_SR_FE;
1306 /*
1307 * If we're ignoring parity and break indicators,
1308 * ignore overruns too (for real raw support).
1309 */
1310 if (termios->c_iflag & IGNPAR)
1311 port->ignore_status_mask |= USART_SR_ORE;
1312 }
1313
1314 /* Ignore all characters if CREAD is not set */
1315 if ((termios->c_cflag & CREAD) == 0)
1316 port->ignore_status_mask |= USART_SR_DUMMY_RX;
1317
1318 if (stm32_port->rx_ch) {
1319 /*
1320 * Setup DMA to collect only valid data and enable error irqs.
1321 * This also enables break reception when using DMA.
1322 */
1323 cr1 |= USART_CR1_PEIE;
1324 cr3 |= USART_CR3_EIE;
1325 cr3 |= USART_CR3_DMAR;
1326 cr3 |= USART_CR3_DDRE;
1327 }
1328
1329 if (stm32_port->tx_ch)
1330 cr3 |= USART_CR3_DMAT;
1331
1332 if (rs485conf->flags & SER_RS485_ENABLED) {
1333 stm32_usart_config_reg_rs485(&cr1, &cr3,
1334 rs485conf->delay_rts_before_send,
1335 rs485conf->delay_rts_after_send,
1336 baud);
1337 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
1338 cr3 &= ~USART_CR3_DEP;
1339 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
1340 } else {
1341 cr3 |= USART_CR3_DEP;
1342 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1343 }
1344
1345 } else {
1346 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
1347 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
1348 }
1349
1350 /* Configure wake up from low power on start bit detection */
1351 if (stm32_port->wakeup_src) {
1352 cr3 &= ~USART_CR3_WUS_MASK;
1353 cr3 |= USART_CR3_WUS_START_BIT;
1354 }
1355
1356 writel_relaxed(cr3, port->membase + ofs->cr3);
1357 writel_relaxed(cr2, port->membase + ofs->cr2);
1358 writel_relaxed(cr1, port->membase + ofs->cr1);
1359
1360 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1361 uart_port_unlock_irqrestore(port, flags);
1362
1363 /* Handle modem control interrupts */
1364 if (UART_ENABLE_MS(port, termios->c_cflag))
1365 stm32_usart_enable_ms(port);
1366 else
1367 stm32_usart_disable_ms(port);
1368}
1369
1370static const char *stm32_usart_type(struct uart_port *port)
1371{
1372 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
1373}
1374
1375static void stm32_usart_release_port(struct uart_port *port)
1376{
1377}
1378
1379static int stm32_usart_request_port(struct uart_port *port)
1380{
1381 return 0;
1382}
1383
1384static void stm32_usart_config_port(struct uart_port *port, int flags)
1385{
1386 if (flags & UART_CONFIG_TYPE)
1387 port->type = PORT_STM32;
1388}
1389
1390static int
1391stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
1392{
1393 /* No user changeable parameters */
1394 return -EINVAL;
1395}
1396
1397static void stm32_usart_pm(struct uart_port *port, unsigned int state,
1398 unsigned int oldstate)
1399{
1400 struct stm32_port *stm32port = container_of(port,
1401 struct stm32_port, port);
1402 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1403 const struct stm32_usart_config *cfg = &stm32port->info->cfg;
1404 unsigned long flags;
1405
1406 switch (state) {
1407 case UART_PM_STATE_ON:
1408 pm_runtime_get_sync(port->dev);
1409 break;
1410 case UART_PM_STATE_OFF:
1411 uart_port_lock_irqsave(port, &flags);
1412 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1413 uart_port_unlock_irqrestore(port, flags);
1414 pm_runtime_put_sync(port->dev);
1415 break;
1416 }
1417}
1418
1419#if defined(CONFIG_CONSOLE_POLL)
1420
1421 /* Callbacks for characters polling in debug context (i.e. KGDB). */
1422static int stm32_usart_poll_init(struct uart_port *port)
1423{
1424 struct stm32_port *stm32_port = to_stm32_port(port);
1425
1426 return clk_prepare_enable(stm32_port->clk);
1427}
1428
1429static int stm32_usart_poll_get_char(struct uart_port *port)
1430{
1431 struct stm32_port *stm32_port = to_stm32_port(port);
1432 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1433
1434 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
1435 return NO_POLL_CHAR;
1436
1437 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
1438}
1439
1440static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
1441{
1442 stm32_usart_console_putchar(port, ch);
1443}
1444#endif /* CONFIG_CONSOLE_POLL */
1445
1446static const struct uart_ops stm32_uart_ops = {
1447 .tx_empty = stm32_usart_tx_empty,
1448 .set_mctrl = stm32_usart_set_mctrl,
1449 .get_mctrl = stm32_usart_get_mctrl,
1450 .stop_tx = stm32_usart_stop_tx,
1451 .start_tx = stm32_usart_start_tx,
1452 .throttle = stm32_usart_throttle,
1453 .unthrottle = stm32_usart_unthrottle,
1454 .stop_rx = stm32_usart_stop_rx,
1455 .enable_ms = stm32_usart_enable_ms,
1456 .break_ctl = stm32_usart_break_ctl,
1457 .startup = stm32_usart_startup,
1458 .shutdown = stm32_usart_shutdown,
1459 .flush_buffer = stm32_usart_flush_buffer,
1460 .set_termios = stm32_usart_set_termios,
1461 .pm = stm32_usart_pm,
1462 .type = stm32_usart_type,
1463 .release_port = stm32_usart_release_port,
1464 .request_port = stm32_usart_request_port,
1465 .config_port = stm32_usart_config_port,
1466 .verify_port = stm32_usart_verify_port,
1467#if defined(CONFIG_CONSOLE_POLL)
1468 .poll_init = stm32_usart_poll_init,
1469 .poll_get_char = stm32_usart_poll_get_char,
1470 .poll_put_char = stm32_usart_poll_put_char,
1471#endif /* CONFIG_CONSOLE_POLL */
1472};
1473
1474/*
1475 * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG)
1476 * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case,
1477 * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE.
1478 * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
1479 */
1480static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 };
1481
1482static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p,
1483 int *ftcfg)
1484{
1485 u32 bytes, i;
1486
1487 /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
1488 if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
1489 bytes = 8;
1490
1491 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++)
1492 if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes)
1493 break;
1494 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
1495 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
1496
1497 dev_dbg(&pdev->dev, "%s set to %d bytes\n", p,
1498 stm32h7_usart_fifo_thresh_cfg[i]);
1499
1500 /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
1501 if (i)
1502 *ftcfg = i - 1;
1503 else
1504 *ftcfg = -EINVAL;
1505}
1506
1507static void stm32_usart_deinit_port(struct stm32_port *stm32port)
1508{
1509 clk_disable_unprepare(stm32port->clk);
1510}
1511
1512static const struct serial_rs485 stm32_rs485_supported = {
1513 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
1514 SER_RS485_RX_DURING_TX,
1515 .delay_rts_before_send = 1,
1516 .delay_rts_after_send = 1,
1517};
1518
1519static int stm32_usart_init_port(struct stm32_port *stm32port,
1520 struct platform_device *pdev)
1521{
1522 struct uart_port *port = &stm32port->port;
1523 struct resource *res;
1524 int ret, irq;
1525
1526 irq = platform_get_irq(pdev, 0);
1527 if (irq < 0)
1528 return irq;
1529
1530 port->iotype = UPIO_MEM;
1531 port->flags = UPF_BOOT_AUTOCONF;
1532 port->ops = &stm32_uart_ops;
1533 port->dev = &pdev->dev;
1534 port->fifosize = stm32port->info->cfg.fifosize;
1535 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
1536 port->irq = irq;
1537 port->rs485_config = stm32_usart_config_rs485;
1538 port->rs485_supported = stm32_rs485_supported;
1539
1540 ret = stm32_usart_init_rs485(port, pdev);
1541 if (ret)
1542 return ret;
1543
1544 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup &&
1545 of_property_read_bool(pdev->dev.of_node, "wakeup-source");
1546
1547 stm32port->swap = stm32port->info->cfg.has_swap &&
1548 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
1549
1550 stm32port->fifoen = stm32port->info->cfg.has_fifo;
1551 if (stm32port->fifoen) {
1552 stm32_usart_get_ftcfg(pdev, "rx-threshold",
1553 &stm32port->rxftcfg);
1554 stm32_usart_get_ftcfg(pdev, "tx-threshold",
1555 &stm32port->txftcfg);
1556 }
1557
1558 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1559 if (IS_ERR(port->membase))
1560 return PTR_ERR(port->membase);
1561 port->mapbase = res->start;
1562
1563 spin_lock_init(&port->lock);
1564
1565 stm32port->clk = devm_clk_get(&pdev->dev, NULL);
1566 if (IS_ERR(stm32port->clk))
1567 return PTR_ERR(stm32port->clk);
1568
1569 /* Ensure that clk rate is correct by enabling the clk */
1570 ret = clk_prepare_enable(stm32port->clk);
1571 if (ret)
1572 return ret;
1573
1574 stm32port->port.uartclk = clk_get_rate(stm32port->clk);
1575 if (!stm32port->port.uartclk) {
1576 ret = -EINVAL;
1577 goto err_clk;
1578 }
1579
1580 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
1581 if (IS_ERR(stm32port->gpios)) {
1582 ret = PTR_ERR(stm32port->gpios);
1583 goto err_clk;
1584 }
1585
1586 /*
1587 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
1588 * properties should not be specified.
1589 */
1590 if (stm32port->hw_flow_control) {
1591 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
1592 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
1593 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
1594 ret = -EINVAL;
1595 goto err_clk;
1596 }
1597 }
1598
1599 return ret;
1600
1601err_clk:
1602 clk_disable_unprepare(stm32port->clk);
1603
1604 return ret;
1605}
1606
1607static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
1608{
1609 struct device_node *np = pdev->dev.of_node;
1610 int id;
1611
1612 if (!np)
1613 return NULL;
1614
1615 id = of_alias_get_id(np, "serial");
1616 if (id < 0) {
1617 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
1618 return NULL;
1619 }
1620
1621 if (WARN_ON(id >= STM32_MAX_PORTS))
1622 return NULL;
1623
1624 stm32_ports[id].hw_flow_control =
1625 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
1626 of_property_read_bool (np, "uart-has-rtscts");
1627 stm32_ports[id].port.line = id;
1628 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
1629 stm32_ports[id].cr3_irq = 0;
1630 stm32_ports[id].last_res = RX_BUF_L;
1631 return &stm32_ports[id];
1632}
1633
1634#ifdef CONFIG_OF
1635static const struct of_device_id stm32_match[] = {
1636 { .compatible = "st,stm32-uart", .data = &stm32f4_info},
1637 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
1638 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
1639 {},
1640};
1641
1642MODULE_DEVICE_TABLE(of, stm32_match);
1643#endif
1644
1645static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
1646 struct platform_device *pdev)
1647{
1648 if (stm32port->rx_buf)
1649 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
1650 stm32port->rx_dma_buf);
1651}
1652
1653static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
1654 struct platform_device *pdev)
1655{
1656 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1657 struct uart_port *port = &stm32port->port;
1658 struct device *dev = &pdev->dev;
1659 struct dma_slave_config config;
1660 int ret;
1661
1662 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
1663 &stm32port->rx_dma_buf,
1664 GFP_KERNEL);
1665 if (!stm32port->rx_buf)
1666 return -ENOMEM;
1667
1668 /* Configure DMA channel */
1669 memset(&config, 0, sizeof(config));
1670 config.src_addr = port->mapbase + ofs->rdr;
1671 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1672
1673 ret = dmaengine_slave_config(stm32port->rx_ch, &config);
1674 if (ret < 0) {
1675 dev_err(dev, "rx dma channel config failed\n");
1676 stm32_usart_of_dma_rx_remove(stm32port, pdev);
1677 return ret;
1678 }
1679
1680 return 0;
1681}
1682
1683static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
1684 struct platform_device *pdev)
1685{
1686 if (stm32port->tx_buf)
1687 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
1688 stm32port->tx_dma_buf);
1689}
1690
1691static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
1692 struct platform_device *pdev)
1693{
1694 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1695 struct uart_port *port = &stm32port->port;
1696 struct device *dev = &pdev->dev;
1697 struct dma_slave_config config;
1698 int ret;
1699
1700 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
1701 &stm32port->tx_dma_buf,
1702 GFP_KERNEL);
1703 if (!stm32port->tx_buf)
1704 return -ENOMEM;
1705
1706 /* Configure DMA channel */
1707 memset(&config, 0, sizeof(config));
1708 config.dst_addr = port->mapbase + ofs->tdr;
1709 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1710
1711 ret = dmaengine_slave_config(stm32port->tx_ch, &config);
1712 if (ret < 0) {
1713 dev_err(dev, "tx dma channel config failed\n");
1714 stm32_usart_of_dma_tx_remove(stm32port, pdev);
1715 return ret;
1716 }
1717
1718 return 0;
1719}
1720
1721static int stm32_usart_serial_probe(struct platform_device *pdev)
1722{
1723 struct stm32_port *stm32port;
1724 int ret;
1725
1726 stm32port = stm32_usart_of_get_port(pdev);
1727 if (!stm32port)
1728 return -ENODEV;
1729
1730 stm32port->info = of_device_get_match_data(&pdev->dev);
1731 if (!stm32port->info)
1732 return -EINVAL;
1733
1734 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
1735 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER)
1736 return -EPROBE_DEFER;
1737
1738 /* Fall back in interrupt mode for any non-deferral error */
1739 if (IS_ERR(stm32port->rx_ch))
1740 stm32port->rx_ch = NULL;
1741
1742 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
1743 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
1744 ret = -EPROBE_DEFER;
1745 goto err_dma_rx;
1746 }
1747 /* Fall back in interrupt mode for any non-deferral error */
1748 if (IS_ERR(stm32port->tx_ch))
1749 stm32port->tx_ch = NULL;
1750
1751 ret = stm32_usart_init_port(stm32port, pdev);
1752 if (ret)
1753 goto err_dma_tx;
1754
1755 if (stm32port->wakeup_src) {
1756 device_set_wakeup_capable(&pdev->dev, true);
1757 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
1758 if (ret)
1759 goto err_deinit_port;
1760 }
1761
1762 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
1763 /* Fall back in interrupt mode */
1764 dma_release_channel(stm32port->rx_ch);
1765 stm32port->rx_ch = NULL;
1766 }
1767
1768 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
1769 /* Fall back in interrupt mode */
1770 dma_release_channel(stm32port->tx_ch);
1771 stm32port->tx_ch = NULL;
1772 }
1773
1774 if (!stm32port->rx_ch)
1775 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
1776 if (!stm32port->tx_ch)
1777 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
1778
1779 platform_set_drvdata(pdev, &stm32port->port);
1780
1781 pm_runtime_get_noresume(&pdev->dev);
1782 pm_runtime_set_active(&pdev->dev);
1783 pm_runtime_enable(&pdev->dev);
1784
1785 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
1786 if (ret)
1787 goto err_port;
1788
1789 pm_runtime_put_sync(&pdev->dev);
1790
1791 return 0;
1792
1793err_port:
1794 pm_runtime_disable(&pdev->dev);
1795 pm_runtime_set_suspended(&pdev->dev);
1796 pm_runtime_put_noidle(&pdev->dev);
1797
1798 if (stm32port->tx_ch)
1799 stm32_usart_of_dma_tx_remove(stm32port, pdev);
1800 if (stm32port->rx_ch)
1801 stm32_usart_of_dma_rx_remove(stm32port, pdev);
1802
1803 if (stm32port->wakeup_src)
1804 dev_pm_clear_wake_irq(&pdev->dev);
1805
1806err_deinit_port:
1807 if (stm32port->wakeup_src)
1808 device_set_wakeup_capable(&pdev->dev, false);
1809
1810 stm32_usart_deinit_port(stm32port);
1811
1812err_dma_tx:
1813 if (stm32port->tx_ch)
1814 dma_release_channel(stm32port->tx_ch);
1815
1816err_dma_rx:
1817 if (stm32port->rx_ch)
1818 dma_release_channel(stm32port->rx_ch);
1819
1820 return ret;
1821}
1822
1823static void stm32_usart_serial_remove(struct platform_device *pdev)
1824{
1825 struct uart_port *port = platform_get_drvdata(pdev);
1826 struct stm32_port *stm32_port = to_stm32_port(port);
1827 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1828 u32 cr3;
1829
1830 pm_runtime_get_sync(&pdev->dev);
1831 uart_remove_one_port(&stm32_usart_driver, port);
1832
1833 pm_runtime_disable(&pdev->dev);
1834 pm_runtime_set_suspended(&pdev->dev);
1835 pm_runtime_put_noidle(&pdev->dev);
1836
1837 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
1838
1839 if (stm32_port->tx_ch) {
1840 stm32_usart_of_dma_tx_remove(stm32_port, pdev);
1841 dma_release_channel(stm32_port->tx_ch);
1842 }
1843
1844 if (stm32_port->rx_ch) {
1845 stm32_usart_of_dma_rx_remove(stm32_port, pdev);
1846 dma_release_channel(stm32_port->rx_ch);
1847 }
1848
1849 cr3 = readl_relaxed(port->membase + ofs->cr3);
1850 cr3 &= ~USART_CR3_EIE;
1851 cr3 &= ~USART_CR3_DMAR;
1852 cr3 &= ~USART_CR3_DMAT;
1853 cr3 &= ~USART_CR3_DDRE;
1854 writel_relaxed(cr3, port->membase + ofs->cr3);
1855
1856 if (stm32_port->wakeup_src) {
1857 dev_pm_clear_wake_irq(&pdev->dev);
1858 device_init_wakeup(&pdev->dev, false);
1859 }
1860
1861 stm32_usart_deinit_port(stm32_port);
1862}
1863
1864static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1865{
1866 struct stm32_port *stm32_port = to_stm32_port(port);
1867 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1868 u32 isr;
1869 int ret;
1870
1871 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
1872 (isr & USART_SR_TXE), 100,
1873 STM32_USART_TIMEOUT_USEC);
1874 if (ret != 0) {
1875 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
1876 return;
1877 }
1878 writel_relaxed(ch, port->membase + ofs->tdr);
1879}
1880
1881#ifdef CONFIG_SERIAL_STM32_CONSOLE
1882static void stm32_usart_console_write(struct console *co, const char *s,
1883 unsigned int cnt)
1884{
1885 struct uart_port *port = &stm32_ports[co->index].port;
1886 struct stm32_port *stm32_port = to_stm32_port(port);
1887 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1888 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1889 unsigned long flags;
1890 u32 old_cr1, new_cr1;
1891 int locked = 1;
1892
1893 if (oops_in_progress)
1894 locked = uart_port_trylock_irqsave(port, &flags);
1895 else
1896 uart_port_lock_irqsave(port, &flags);
1897
1898 /* Save and disable interrupts, enable the transmitter */
1899 old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1900 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
1901 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit);
1902 writel_relaxed(new_cr1, port->membase + ofs->cr1);
1903
1904 uart_console_write(port, s, cnt, stm32_usart_console_putchar);
1905
1906 /* Restore interrupt state */
1907 writel_relaxed(old_cr1, port->membase + ofs->cr1);
1908
1909 if (locked)
1910 uart_port_unlock_irqrestore(port, flags);
1911}
1912
1913static int stm32_usart_console_setup(struct console *co, char *options)
1914{
1915 struct stm32_port *stm32port;
1916 int baud = 9600;
1917 int bits = 8;
1918 int parity = 'n';
1919 int flow = 'n';
1920
1921 if (co->index >= STM32_MAX_PORTS)
1922 return -ENODEV;
1923
1924 stm32port = &stm32_ports[co->index];
1925
1926 /*
1927 * This driver does not support early console initialization
1928 * (use ARM early printk support instead), so we only expect
1929 * this to be called during the uart port registration when the
1930 * driver gets probed and the port should be mapped at that point.
1931 */
1932 if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
1933 return -ENXIO;
1934
1935 if (options)
1936 uart_parse_options(options, &baud, &parity, &bits, &flow);
1937
1938 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
1939}
1940
1941static struct console stm32_console = {
1942 .name = STM32_SERIAL_NAME,
1943 .device = uart_console_device,
1944 .write = stm32_usart_console_write,
1945 .setup = stm32_usart_console_setup,
1946 .flags = CON_PRINTBUFFER,
1947 .index = -1,
1948 .data = &stm32_usart_driver,
1949};
1950
1951#define STM32_SERIAL_CONSOLE (&stm32_console)
1952
1953#else
1954#define STM32_SERIAL_CONSOLE NULL
1955#endif /* CONFIG_SERIAL_STM32_CONSOLE */
1956
1957#ifdef CONFIG_SERIAL_EARLYCON
1958static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1959{
1960 struct stm32_usart_info *info = port->private_data;
1961
1962 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
1963 cpu_relax();
1964
1965 writel_relaxed(ch, port->membase + info->ofs.tdr);
1966}
1967
1968static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
1969{
1970 struct earlycon_device *device = console->data;
1971 struct uart_port *port = &device->port;
1972
1973 uart_console_write(port, s, count, early_stm32_usart_console_putchar);
1974}
1975
1976static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
1977{
1978 if (!(device->port.membase || device->port.iobase))
1979 return -ENODEV;
1980 device->port.private_data = &stm32h7_info;
1981 device->con->write = early_stm32_serial_write;
1982 return 0;
1983}
1984
1985static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
1986{
1987 if (!(device->port.membase || device->port.iobase))
1988 return -ENODEV;
1989 device->port.private_data = &stm32f7_info;
1990 device->con->write = early_stm32_serial_write;
1991 return 0;
1992}
1993
1994static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
1995{
1996 if (!(device->port.membase || device->port.iobase))
1997 return -ENODEV;
1998 device->port.private_data = &stm32f4_info;
1999 device->con->write = early_stm32_serial_write;
2000 return 0;
2001}
2002
2003OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
2004OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
2005OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
2006#endif /* CONFIG_SERIAL_EARLYCON */
2007
2008static struct uart_driver stm32_usart_driver = {
2009 .driver_name = DRIVER_NAME,
2010 .dev_name = STM32_SERIAL_NAME,
2011 .major = 0,
2012 .minor = 0,
2013 .nr = STM32_MAX_PORTS,
2014 .cons = STM32_SERIAL_CONSOLE,
2015};
2016
2017static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
2018 bool enable)
2019{
2020 struct stm32_port *stm32_port = to_stm32_port(port);
2021 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
2022 struct tty_port *tport = &port->state->port;
2023 int ret;
2024 unsigned int size = 0;
2025 unsigned long flags;
2026
2027 if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
2028 return 0;
2029
2030 /*
2031 * Enable low-power wake-up and wake-up irq if argument is set to
2032 * "enable", disable low-power wake-up and wake-up irq otherwise
2033 */
2034 if (enable) {
2035 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
2036 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
2037 mctrl_gpio_enable_irq_wake(stm32_port->gpios);
2038
2039 /*
2040 * When DMA is used for reception, it must be disabled before
2041 * entering low-power mode and re-enabled when exiting from
2042 * low-power mode.
2043 */
2044 if (stm32_port->rx_ch) {
2045 uart_port_lock_irqsave(port, &flags);
2046 /* Poll data from DMA RX buffer if any */
2047 if (!stm32_usart_rx_dma_pause(stm32_port))
2048 size += stm32_usart_receive_chars(port, true);
2049 stm32_usart_rx_dma_terminate(stm32_port);
2050 uart_unlock_and_check_sysrq_irqrestore(port, flags);
2051 if (size)
2052 tty_flip_buffer_push(tport);
2053 }
2054
2055 /* Poll data from RX FIFO if any */
2056 stm32_usart_receive_chars(port, false);
2057 } else {
2058 if (stm32_port->rx_ch) {
2059 ret = stm32_usart_rx_dma_start_or_resume(port);
2060 if (ret)
2061 return ret;
2062 }
2063 mctrl_gpio_disable_irq_wake(stm32_port->gpios);
2064 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
2065 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
2066 }
2067
2068 return 0;
2069}
2070
2071static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
2072{
2073 struct uart_port *port = dev_get_drvdata(dev);
2074 int ret;
2075
2076 uart_suspend_port(&stm32_usart_driver, port);
2077
2078 if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2079 ret = stm32_usart_serial_en_wakeup(port, true);
2080 if (ret)
2081 return ret;
2082 }
2083
2084 /*
2085 * When "no_console_suspend" is enabled, keep the pinctrl default state
2086 * and rely on bootloader stage to restore this state upon resume.
2087 * Otherwise, apply the idle or sleep states depending on wakeup
2088 * capabilities.
2089 */
2090 if (console_suspend_enabled || !uart_console(port)) {
2091 if (device_may_wakeup(dev) || device_wakeup_path(dev))
2092 pinctrl_pm_select_idle_state(dev);
2093 else
2094 pinctrl_pm_select_sleep_state(dev);
2095 }
2096
2097 return 0;
2098}
2099
2100static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
2101{
2102 struct uart_port *port = dev_get_drvdata(dev);
2103 int ret;
2104
2105 pinctrl_pm_select_default_state(dev);
2106
2107 if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2108 ret = stm32_usart_serial_en_wakeup(port, false);
2109 if (ret)
2110 return ret;
2111 }
2112
2113 return uart_resume_port(&stm32_usart_driver, port);
2114}
2115
2116static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
2117{
2118 struct uart_port *port = dev_get_drvdata(dev);
2119 struct stm32_port *stm32port = container_of(port,
2120 struct stm32_port, port);
2121
2122 clk_disable_unprepare(stm32port->clk);
2123
2124 return 0;
2125}
2126
2127static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
2128{
2129 struct uart_port *port = dev_get_drvdata(dev);
2130 struct stm32_port *stm32port = container_of(port,
2131 struct stm32_port, port);
2132
2133 return clk_prepare_enable(stm32port->clk);
2134}
2135
2136static const struct dev_pm_ops stm32_serial_pm_ops = {
2137 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
2138 stm32_usart_runtime_resume, NULL)
2139 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
2140 stm32_usart_serial_resume)
2141};
2142
2143static struct platform_driver stm32_serial_driver = {
2144 .probe = stm32_usart_serial_probe,
2145 .remove_new = stm32_usart_serial_remove,
2146 .driver = {
2147 .name = DRIVER_NAME,
2148 .pm = &stm32_serial_pm_ops,
2149 .of_match_table = of_match_ptr(stm32_match),
2150 },
2151};
2152
2153static int __init stm32_usart_init(void)
2154{
2155 static char banner[] __initdata = "STM32 USART driver initialized";
2156 int ret;
2157
2158 pr_info("%s\n", banner);
2159
2160 ret = uart_register_driver(&stm32_usart_driver);
2161 if (ret)
2162 return ret;
2163
2164 ret = platform_driver_register(&stm32_serial_driver);
2165 if (ret)
2166 uart_unregister_driver(&stm32_usart_driver);
2167
2168 return ret;
2169}
2170
2171static void __exit stm32_usart_exit(void)
2172{
2173 platform_driver_unregister(&stm32_serial_driver);
2174 uart_unregister_driver(&stm32_usart_driver);
2175}
2176
2177module_init(stm32_usart_init);
2178module_exit(stm32_usart_exit);
2179
2180MODULE_ALIAS("platform:" DRIVER_NAME);
2181MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
2182MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) Maxime Coquelin 2015
4 * Copyright (C) STMicroelectronics SA 2017
5 * Authors: Maxime Coquelin <mcoquelin.stm32@gmail.com>
6 * Gerald Baeza <gerald.baeza@foss.st.com>
7 * Erwan Le Ray <erwan.leray@foss.st.com>
8 *
9 * Inspired by st-asc.c from STMicroelectronics (c)
10 */
11
12#include <linux/bitfield.h>
13#include <linux/clk.h>
14#include <linux/console.h>
15#include <linux/delay.h>
16#include <linux/dma-direction.h>
17#include <linux/dmaengine.h>
18#include <linux/dma-mapping.h>
19#include <linux/io.h>
20#include <linux/iopoll.h>
21#include <linux/irq.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/pinctrl/consumer.h>
26#include <linux/platform_device.h>
27#include <linux/pm_runtime.h>
28#include <linux/pm_wakeirq.h>
29#include <linux/serial_core.h>
30#include <linux/serial.h>
31#include <linux/spinlock.h>
32#include <linux/sysrq.h>
33#include <linux/tty_flip.h>
34#include <linux/tty.h>
35
36#include "serial_mctrl_gpio.h"
37#include "stm32-usart.h"
38
39
40/* Register offsets */
41static struct stm32_usart_info __maybe_unused stm32f4_info = {
42 .ofs = {
43 .isr = 0x00,
44 .rdr = 0x04,
45 .tdr = 0x04,
46 .brr = 0x08,
47 .cr1 = 0x0c,
48 .cr2 = 0x10,
49 .cr3 = 0x14,
50 .gtpr = 0x18,
51 .rtor = UNDEF_REG,
52 .rqr = UNDEF_REG,
53 .icr = UNDEF_REG,
54 .presc = UNDEF_REG,
55 .hwcfgr1 = UNDEF_REG,
56 },
57 .cfg = {
58 .uart_enable_bit = 13,
59 .has_7bits_data = false,
60 }
61};
62
63static struct stm32_usart_info __maybe_unused stm32f7_info = {
64 .ofs = {
65 .cr1 = 0x00,
66 .cr2 = 0x04,
67 .cr3 = 0x08,
68 .brr = 0x0c,
69 .gtpr = 0x10,
70 .rtor = 0x14,
71 .rqr = 0x18,
72 .isr = 0x1c,
73 .icr = 0x20,
74 .rdr = 0x24,
75 .tdr = 0x28,
76 .presc = UNDEF_REG,
77 .hwcfgr1 = UNDEF_REG,
78 },
79 .cfg = {
80 .uart_enable_bit = 0,
81 .has_7bits_data = true,
82 .has_swap = true,
83 }
84};
85
86static struct stm32_usart_info __maybe_unused stm32h7_info = {
87 .ofs = {
88 .cr1 = 0x00,
89 .cr2 = 0x04,
90 .cr3 = 0x08,
91 .brr = 0x0c,
92 .gtpr = 0x10,
93 .rtor = 0x14,
94 .rqr = 0x18,
95 .isr = 0x1c,
96 .icr = 0x20,
97 .rdr = 0x24,
98 .tdr = 0x28,
99 .presc = 0x2c,
100 .hwcfgr1 = 0x3f0,
101 },
102 .cfg = {
103 .uart_enable_bit = 0,
104 .has_7bits_data = true,
105 .has_swap = true,
106 .has_wakeup = true,
107 .has_fifo = true,
108 }
109};
110
111static void stm32_usart_stop_tx(struct uart_port *port);
112static void stm32_usart_transmit_chars(struct uart_port *port);
113static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch);
114
115static inline struct stm32_port *to_stm32_port(struct uart_port *port)
116{
117 return container_of(port, struct stm32_port, port);
118}
119
120static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
121{
122 u32 val;
123
124 val = readl_relaxed(port->membase + reg);
125 val |= bits;
126 writel_relaxed(val, port->membase + reg);
127}
128
129static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
130{
131 u32 val;
132
133 val = readl_relaxed(port->membase + reg);
134 val &= ~bits;
135 writel_relaxed(val, port->membase + reg);
136}
137
138static unsigned int stm32_usart_tx_empty(struct uart_port *port)
139{
140 struct stm32_port *stm32_port = to_stm32_port(port);
141 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
142
143 if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
144 return TIOCSER_TEMT;
145
146 return 0;
147}
148
149static void stm32_usart_rs485_rts_enable(struct uart_port *port)
150{
151 struct stm32_port *stm32_port = to_stm32_port(port);
152 struct serial_rs485 *rs485conf = &port->rs485;
153
154 if (stm32_port->hw_flow_control ||
155 !(rs485conf->flags & SER_RS485_ENABLED))
156 return;
157
158 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
159 mctrl_gpio_set(stm32_port->gpios,
160 stm32_port->port.mctrl | TIOCM_RTS);
161 } else {
162 mctrl_gpio_set(stm32_port->gpios,
163 stm32_port->port.mctrl & ~TIOCM_RTS);
164 }
165}
166
167static void stm32_usart_rs485_rts_disable(struct uart_port *port)
168{
169 struct stm32_port *stm32_port = to_stm32_port(port);
170 struct serial_rs485 *rs485conf = &port->rs485;
171
172 if (stm32_port->hw_flow_control ||
173 !(rs485conf->flags & SER_RS485_ENABLED))
174 return;
175
176 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
177 mctrl_gpio_set(stm32_port->gpios,
178 stm32_port->port.mctrl & ~TIOCM_RTS);
179 } else {
180 mctrl_gpio_set(stm32_port->gpios,
181 stm32_port->port.mctrl | TIOCM_RTS);
182 }
183}
184
185static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
186 u32 delay_DDE, u32 baud)
187{
188 u32 rs485_deat_dedt;
189 u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
190 bool over8;
191
192 *cr3 |= USART_CR3_DEM;
193 over8 = *cr1 & USART_CR1_OVER8;
194
195 *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
196
197 if (over8)
198 rs485_deat_dedt = delay_ADE * baud * 8;
199 else
200 rs485_deat_dedt = delay_ADE * baud * 16;
201
202 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
203 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
204 rs485_deat_dedt_max : rs485_deat_dedt;
205 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEAT_SHIFT) &
206 USART_CR1_DEAT_MASK;
207 *cr1 |= rs485_deat_dedt;
208
209 if (over8)
210 rs485_deat_dedt = delay_DDE * baud * 8;
211 else
212 rs485_deat_dedt = delay_DDE * baud * 16;
213
214 rs485_deat_dedt = DIV_ROUND_CLOSEST(rs485_deat_dedt, 1000);
215 rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
216 rs485_deat_dedt_max : rs485_deat_dedt;
217 rs485_deat_dedt = (rs485_deat_dedt << USART_CR1_DEDT_SHIFT) &
218 USART_CR1_DEDT_MASK;
219 *cr1 |= rs485_deat_dedt;
220}
221
222static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *termios,
223 struct serial_rs485 *rs485conf)
224{
225 struct stm32_port *stm32_port = to_stm32_port(port);
226 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
227 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
228 u32 usartdiv, baud, cr1, cr3;
229 bool over8;
230
231 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
232
233 if (rs485conf->flags & SER_RS485_ENABLED) {
234 cr1 = readl_relaxed(port->membase + ofs->cr1);
235 cr3 = readl_relaxed(port->membase + ofs->cr3);
236 usartdiv = readl_relaxed(port->membase + ofs->brr);
237 usartdiv = usartdiv & GENMASK(15, 0);
238 over8 = cr1 & USART_CR1_OVER8;
239
240 if (over8)
241 usartdiv = usartdiv | (usartdiv & GENMASK(4, 0))
242 << USART_BRR_04_R_SHIFT;
243
244 baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
245 stm32_usart_config_reg_rs485(&cr1, &cr3,
246 rs485conf->delay_rts_before_send,
247 rs485conf->delay_rts_after_send,
248 baud);
249
250 if (rs485conf->flags & SER_RS485_RTS_ON_SEND)
251 cr3 &= ~USART_CR3_DEP;
252 else
253 cr3 |= USART_CR3_DEP;
254
255 writel_relaxed(cr3, port->membase + ofs->cr3);
256 writel_relaxed(cr1, port->membase + ofs->cr1);
257
258 if (!port->rs485_rx_during_tx_gpio)
259 rs485conf->flags |= SER_RS485_RX_DURING_TX;
260
261 } else {
262 stm32_usart_clr_bits(port, ofs->cr3,
263 USART_CR3_DEM | USART_CR3_DEP);
264 stm32_usart_clr_bits(port, ofs->cr1,
265 USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
266 }
267
268 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
269
270 /* Adjust RTS polarity in case it's driven in software */
271 if (stm32_usart_tx_empty(port))
272 stm32_usart_rs485_rts_disable(port);
273 else
274 stm32_usart_rs485_rts_enable(port);
275
276 return 0;
277}
278
279static int stm32_usart_init_rs485(struct uart_port *port,
280 struct platform_device *pdev)
281{
282 struct serial_rs485 *rs485conf = &port->rs485;
283
284 rs485conf->flags = 0;
285 rs485conf->delay_rts_before_send = 0;
286 rs485conf->delay_rts_after_send = 0;
287
288 if (!pdev->dev.of_node)
289 return -ENODEV;
290
291 return uart_get_rs485_mode(port);
292}
293
294static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port)
295{
296 return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false;
297}
298
299static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port)
300{
301 dmaengine_terminate_async(stm32_port->rx_ch);
302 stm32_port->rx_dma_busy = false;
303}
304
305static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port,
306 struct dma_chan *chan,
307 enum dma_status expected_status,
308 int dmaengine_pause_or_resume(struct dma_chan *),
309 bool stm32_usart_xx_dma_started(struct stm32_port *),
310 void stm32_usart_xx_dma_terminate(struct stm32_port *))
311{
312 struct uart_port *port = &stm32_port->port;
313 enum dma_status dma_status;
314 int ret;
315
316 if (!stm32_usart_xx_dma_started(stm32_port))
317 return -EPERM;
318
319 dma_status = dmaengine_tx_status(chan, chan->cookie, NULL);
320 if (dma_status != expected_status)
321 return -EAGAIN;
322
323 ret = dmaengine_pause_or_resume(chan);
324 if (ret) {
325 dev_err(port->dev, "DMA failed with error code: %d\n", ret);
326 stm32_usart_xx_dma_terminate(stm32_port);
327 }
328 return ret;
329}
330
331static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port)
332{
333 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
334 DMA_IN_PROGRESS, dmaengine_pause,
335 stm32_usart_rx_dma_started,
336 stm32_usart_rx_dma_terminate);
337}
338
339static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port)
340{
341 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
342 DMA_PAUSED, dmaengine_resume,
343 stm32_usart_rx_dma_started,
344 stm32_usart_rx_dma_terminate);
345}
346
347/* Return true when data is pending (in pio mode), and false when no data is pending. */
348static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
349{
350 struct stm32_port *stm32_port = to_stm32_port(port);
351 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
352
353 *sr = readl_relaxed(port->membase + ofs->isr);
354 /* Get pending characters in RDR or FIFO */
355 if (*sr & USART_SR_RXNE) {
356 /* Get all pending characters from the RDR or the FIFO when using interrupts */
357 if (!stm32_usart_rx_dma_started(stm32_port))
358 return true;
359
360 /* Handle only RX data errors when using DMA */
361 if (*sr & USART_SR_ERR_MASK)
362 return true;
363 }
364
365 return false;
366}
367
368static u8 stm32_usart_get_char_pio(struct uart_port *port)
369{
370 struct stm32_port *stm32_port = to_stm32_port(port);
371 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
372 unsigned long c;
373
374 c = readl_relaxed(port->membase + ofs->rdr);
375 /* Apply RDR data mask */
376 c &= stm32_port->rdr_mask;
377
378 return c;
379}
380
381static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
382{
383 struct stm32_port *stm32_port = to_stm32_port(port);
384 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
385 unsigned int size = 0;
386 u32 sr;
387 u8 c, flag;
388
389 while (stm32_usart_pending_rx_pio(port, &sr)) {
390 sr |= USART_SR_DUMMY_RX;
391 flag = TTY_NORMAL;
392
393 /*
394 * Status bits has to be cleared before reading the RDR:
395 * In FIFO mode, reading the RDR will pop the next data
396 * (if any) along with its status bits into the SR.
397 * Not doing so leads to misalignement between RDR and SR,
398 * and clear status bits of the next rx data.
399 *
400 * Clear errors flags for stm32f7 and stm32h7 compatible
401 * devices. On stm32f4 compatible devices, the error bit is
402 * cleared by the sequence [read SR - read DR].
403 */
404 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
405 writel_relaxed(sr & USART_SR_ERR_MASK,
406 port->membase + ofs->icr);
407
408 c = stm32_usart_get_char_pio(port);
409 port->icount.rx++;
410 size++;
411 if (sr & USART_SR_ERR_MASK) {
412 if (sr & USART_SR_ORE) {
413 port->icount.overrun++;
414 } else if (sr & USART_SR_PE) {
415 port->icount.parity++;
416 } else if (sr & USART_SR_FE) {
417 /* Break detection if character is null */
418 if (!c) {
419 port->icount.brk++;
420 if (uart_handle_break(port))
421 continue;
422 } else {
423 port->icount.frame++;
424 }
425 }
426
427 sr &= port->read_status_mask;
428
429 if (sr & USART_SR_PE) {
430 flag = TTY_PARITY;
431 } else if (sr & USART_SR_FE) {
432 if (!c)
433 flag = TTY_BREAK;
434 else
435 flag = TTY_FRAME;
436 }
437 }
438
439 if (uart_prepare_sysrq_char(port, c))
440 continue;
441 uart_insert_char(port, sr, USART_SR_ORE, c, flag);
442 }
443
444 return size;
445}
446
447static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
448{
449 struct stm32_port *stm32_port = to_stm32_port(port);
450 struct tty_port *ttyport = &stm32_port->port.state->port;
451 unsigned char *dma_start;
452 int dma_count, i;
453
454 dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
455
456 /*
457 * Apply rdr_mask on buffer in order to mask parity bit.
458 * This loop is useless in cs8 mode because DMA copies only
459 * 8 bits and already ignores parity bit.
460 */
461 if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
462 for (i = 0; i < dma_size; i++)
463 *(dma_start + i) &= stm32_port->rdr_mask;
464
465 dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
466 port->icount.rx += dma_count;
467 if (dma_count != dma_size)
468 port->icount.buf_overrun++;
469 stm32_port->last_res -= dma_count;
470 if (stm32_port->last_res == 0)
471 stm32_port->last_res = RX_BUF_L;
472}
473
474static unsigned int stm32_usart_receive_chars_dma(struct uart_port *port)
475{
476 struct stm32_port *stm32_port = to_stm32_port(port);
477 unsigned int dma_size, size = 0;
478
479 /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
480 if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
481 /* Conditional first part: from last_res to end of DMA buffer */
482 dma_size = stm32_port->last_res;
483 stm32_usart_push_buffer_dma(port, dma_size);
484 size = dma_size;
485 }
486
487 dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
488 stm32_usart_push_buffer_dma(port, dma_size);
489 size += dma_size;
490
491 return size;
492}
493
494static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force_dma_flush)
495{
496 struct stm32_port *stm32_port = to_stm32_port(port);
497 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
498 enum dma_status rx_dma_status;
499 u32 sr;
500 unsigned int size = 0;
501
502 if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) {
503 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
504 stm32_port->rx_ch->cookie,
505 &stm32_port->rx_dma_state);
506 if (rx_dma_status == DMA_IN_PROGRESS ||
507 rx_dma_status == DMA_PAUSED) {
508 /* Empty DMA buffer */
509 size = stm32_usart_receive_chars_dma(port);
510 sr = readl_relaxed(port->membase + ofs->isr);
511 if (sr & USART_SR_ERR_MASK) {
512 /* Disable DMA request line */
513 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
514
515 /* Switch to PIO mode to handle the errors */
516 size += stm32_usart_receive_chars_pio(port);
517
518 /* Switch back to DMA mode */
519 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
520 }
521 } else {
522 /* Disable RX DMA */
523 stm32_usart_rx_dma_terminate(stm32_port);
524 /* Fall back to interrupt mode */
525 dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
526 size = stm32_usart_receive_chars_pio(port);
527 }
528 } else {
529 size = stm32_usart_receive_chars_pio(port);
530 }
531
532 return size;
533}
534
535static void stm32_usart_rx_dma_complete(void *arg)
536{
537 struct uart_port *port = arg;
538 struct tty_port *tport = &port->state->port;
539 unsigned int size;
540 unsigned long flags;
541
542 uart_port_lock_irqsave(port, &flags);
543 size = stm32_usart_receive_chars(port, false);
544 uart_unlock_and_check_sysrq_irqrestore(port, flags);
545 if (size)
546 tty_flip_buffer_push(tport);
547}
548
549static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port)
550{
551 struct stm32_port *stm32_port = to_stm32_port(port);
552 struct dma_async_tx_descriptor *desc;
553 enum dma_status rx_dma_status;
554 int ret;
555
556 if (stm32_port->throttled)
557 return 0;
558
559 if (stm32_port->rx_dma_busy) {
560 rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
561 stm32_port->rx_ch->cookie,
562 NULL);
563 if (rx_dma_status == DMA_IN_PROGRESS)
564 return 0;
565
566 if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port))
567 return 0;
568
569 dev_err(port->dev, "DMA failed : status error.\n");
570 stm32_usart_rx_dma_terminate(stm32_port);
571 }
572
573 stm32_port->rx_dma_busy = true;
574
575 stm32_port->last_res = RX_BUF_L;
576 /* Prepare a DMA cyclic transaction */
577 desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
578 stm32_port->rx_dma_buf,
579 RX_BUF_L, RX_BUF_P,
580 DMA_DEV_TO_MEM,
581 DMA_PREP_INTERRUPT);
582 if (!desc) {
583 dev_err(port->dev, "rx dma prep cyclic failed\n");
584 stm32_port->rx_dma_busy = false;
585 return -ENODEV;
586 }
587
588 desc->callback = stm32_usart_rx_dma_complete;
589 desc->callback_param = port;
590
591 /* Push current DMA transaction in the pending queue */
592 ret = dma_submit_error(dmaengine_submit(desc));
593 if (ret) {
594 dmaengine_terminate_sync(stm32_port->rx_ch);
595 stm32_port->rx_dma_busy = false;
596 return ret;
597 }
598
599 /* Issue pending DMA requests */
600 dma_async_issue_pending(stm32_port->rx_ch);
601
602 return 0;
603}
604
605static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
606{
607 dmaengine_terminate_async(stm32_port->tx_ch);
608 stm32_port->tx_dma_busy = false;
609}
610
611static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
612{
613 /*
614 * We cannot use the function "dmaengine_tx_status" to know the
615 * status of DMA. This function does not show if the "dma complete"
616 * callback of the DMA transaction has been called. So we prefer
617 * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
618 * same time.
619 */
620 return stm32_port->tx_dma_busy;
621}
622
623static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port)
624{
625 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
626 DMA_IN_PROGRESS, dmaengine_pause,
627 stm32_usart_tx_dma_started,
628 stm32_usart_tx_dma_terminate);
629}
630
631static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port)
632{
633 return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
634 DMA_PAUSED, dmaengine_resume,
635 stm32_usart_tx_dma_started,
636 stm32_usart_tx_dma_terminate);
637}
638
639static void stm32_usart_tx_dma_complete(void *arg)
640{
641 struct uart_port *port = arg;
642 struct stm32_port *stm32port = to_stm32_port(port);
643 unsigned long flags;
644
645 stm32_usart_tx_dma_terminate(stm32port);
646
647 /* Let's see if we have pending data to send */
648 uart_port_lock_irqsave(port, &flags);
649 stm32_usart_transmit_chars(port);
650 uart_port_unlock_irqrestore(port, flags);
651}
652
653static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
654{
655 struct stm32_port *stm32_port = to_stm32_port(port);
656 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
657
658 /*
659 * Enables TX FIFO threashold irq when FIFO is enabled,
660 * or TX empty irq when FIFO is disabled
661 */
662 if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
663 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
664 else
665 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
666}
667
668static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
669{
670 struct stm32_port *stm32_port = to_stm32_port(port);
671 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
672
673 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
674}
675
676static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
677{
678 struct stm32_port *stm32_port = to_stm32_port(port);
679 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
680
681 if (stm32_port->fifoen && stm32_port->txftcfg >= 0)
682 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
683 else
684 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
685}
686
687static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
688{
689 struct stm32_port *stm32_port = to_stm32_port(port);
690 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
691
692 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
693}
694
695static void stm32_usart_transmit_chars_pio(struct uart_port *port)
696{
697 struct stm32_port *stm32_port = to_stm32_port(port);
698 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
699 struct circ_buf *xmit = &port->state->xmit;
700
701 while (!uart_circ_empty(xmit)) {
702 /* Check that TDR is empty before filling FIFO */
703 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
704 break;
705 writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr);
706 uart_xmit_advance(port, 1);
707 }
708
709 /* rely on TXE irq (mask or unmask) for sending remaining data */
710 if (uart_circ_empty(xmit))
711 stm32_usart_tx_interrupt_disable(port);
712 else
713 stm32_usart_tx_interrupt_enable(port);
714}
715
716static void stm32_usart_transmit_chars_dma(struct uart_port *port)
717{
718 struct stm32_port *stm32port = to_stm32_port(port);
719 struct circ_buf *xmit = &port->state->xmit;
720 struct dma_async_tx_descriptor *desc = NULL;
721 unsigned int count;
722 int ret;
723
724 if (stm32_usart_tx_dma_started(stm32port)) {
725 ret = stm32_usart_tx_dma_resume(stm32port);
726 if (ret < 0 && ret != -EAGAIN)
727 goto fallback_err;
728 return;
729 }
730
731 count = uart_circ_chars_pending(xmit);
732
733 if (count > TX_BUF_L)
734 count = TX_BUF_L;
735
736 if (xmit->tail < xmit->head) {
737 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count);
738 } else {
739 size_t one = UART_XMIT_SIZE - xmit->tail;
740 size_t two;
741
742 if (one > count)
743 one = count;
744 two = count - one;
745
746 memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one);
747 if (two)
748 memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two);
749 }
750
751 desc = dmaengine_prep_slave_single(stm32port->tx_ch,
752 stm32port->tx_dma_buf,
753 count,
754 DMA_MEM_TO_DEV,
755 DMA_PREP_INTERRUPT);
756
757 if (!desc)
758 goto fallback_err;
759
760 /*
761 * Set "tx_dma_busy" flag. This flag will be released when
762 * dmaengine_terminate_async will be called. This flag helps
763 * transmit_chars_dma not to start another DMA transaction
764 * if the callback of the previous is not yet called.
765 */
766 stm32port->tx_dma_busy = true;
767
768 desc->callback = stm32_usart_tx_dma_complete;
769 desc->callback_param = port;
770
771 /* Push current DMA TX transaction in the pending queue */
772 /* DMA no yet started, safe to free resources */
773 ret = dma_submit_error(dmaengine_submit(desc));
774 if (ret) {
775 dev_err(port->dev, "DMA failed with error code: %d\n", ret);
776 stm32_usart_tx_dma_terminate(stm32port);
777 goto fallback_err;
778 }
779
780 /* Issue pending DMA TX requests */
781 dma_async_issue_pending(stm32port->tx_ch);
782
783 uart_xmit_advance(port, count);
784
785 return;
786
787fallback_err:
788 stm32_usart_transmit_chars_pio(port);
789}
790
791static void stm32_usart_transmit_chars(struct uart_port *port)
792{
793 struct stm32_port *stm32_port = to_stm32_port(port);
794 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
795 struct circ_buf *xmit = &port->state->xmit;
796 u32 isr;
797 int ret;
798
799 if (!stm32_port->hw_flow_control &&
800 port->rs485.flags & SER_RS485_ENABLED &&
801 (port->x_char ||
802 !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) {
803 stm32_usart_tc_interrupt_disable(port);
804 stm32_usart_rs485_rts_enable(port);
805 }
806
807 if (port->x_char) {
808 /* dma terminate may have been called in case of dma pause failure */
809 stm32_usart_tx_dma_pause(stm32_port);
810
811 /* Check that TDR is empty before filling FIFO */
812 ret =
813 readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
814 isr,
815 (isr & USART_SR_TXE),
816 10, 1000);
817 if (ret)
818 dev_warn(port->dev, "1 character may be erased\n");
819
820 writel_relaxed(port->x_char, port->membase + ofs->tdr);
821 port->x_char = 0;
822 port->icount.tx++;
823
824 /* dma terminate may have been called in case of dma resume failure */
825 stm32_usart_tx_dma_resume(stm32_port);
826 return;
827 }
828
829 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
830 stm32_usart_tx_interrupt_disable(port);
831 return;
832 }
833
834 if (ofs->icr == UNDEF_REG)
835 stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
836 else
837 writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
838
839 if (stm32_port->tx_ch)
840 stm32_usart_transmit_chars_dma(port);
841 else
842 stm32_usart_transmit_chars_pio(port);
843
844 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
845 uart_write_wakeup(port);
846
847 if (uart_circ_empty(xmit)) {
848 stm32_usart_tx_interrupt_disable(port);
849 if (!stm32_port->hw_flow_control &&
850 port->rs485.flags & SER_RS485_ENABLED) {
851 stm32_usart_tc_interrupt_enable(port);
852 }
853 }
854}
855
856static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
857{
858 struct uart_port *port = ptr;
859 struct tty_port *tport = &port->state->port;
860 struct stm32_port *stm32_port = to_stm32_port(port);
861 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
862 u32 sr;
863 unsigned int size;
864 irqreturn_t ret = IRQ_NONE;
865
866 sr = readl_relaxed(port->membase + ofs->isr);
867
868 if (!stm32_port->hw_flow_control &&
869 port->rs485.flags & SER_RS485_ENABLED &&
870 (sr & USART_SR_TC)) {
871 stm32_usart_tc_interrupt_disable(port);
872 stm32_usart_rs485_rts_disable(port);
873 ret = IRQ_HANDLED;
874 }
875
876 if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
877 writel_relaxed(USART_ICR_RTOCF,
878 port->membase + ofs->icr);
879 ret = IRQ_HANDLED;
880 }
881
882 if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
883 /* Clear wake up flag and disable wake up interrupt */
884 writel_relaxed(USART_ICR_WUCF,
885 port->membase + ofs->icr);
886 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
887 if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
888 pm_wakeup_event(tport->tty->dev, 0);
889 ret = IRQ_HANDLED;
890 }
891
892 /*
893 * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
894 * line has been masked by HW and rx data are stacking in FIFO.
895 */
896 if (!stm32_port->throttled) {
897 if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
898 ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
899 uart_port_lock(port);
900 size = stm32_usart_receive_chars(port, false);
901 uart_unlock_and_check_sysrq(port);
902 if (size)
903 tty_flip_buffer_push(tport);
904 ret = IRQ_HANDLED;
905 }
906 }
907
908 if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
909 uart_port_lock(port);
910 stm32_usart_transmit_chars(port);
911 uart_port_unlock(port);
912 ret = IRQ_HANDLED;
913 }
914
915 /* Receiver timeout irq for DMA RX */
916 if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
917 uart_port_lock(port);
918 size = stm32_usart_receive_chars(port, false);
919 uart_unlock_and_check_sysrq(port);
920 if (size)
921 tty_flip_buffer_push(tport);
922 ret = IRQ_HANDLED;
923 }
924
925 return ret;
926}
927
928static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
929{
930 struct stm32_port *stm32_port = to_stm32_port(port);
931 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
932
933 if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
934 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
935 else
936 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
937
938 mctrl_gpio_set(stm32_port->gpios, mctrl);
939}
940
941static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
942{
943 struct stm32_port *stm32_port = to_stm32_port(port);
944 unsigned int ret;
945
946 /* This routine is used to get signals of: DCD, DSR, RI, and CTS */
947 ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
948
949 return mctrl_gpio_get(stm32_port->gpios, &ret);
950}
951
952static void stm32_usart_enable_ms(struct uart_port *port)
953{
954 mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
955}
956
957static void stm32_usart_disable_ms(struct uart_port *port)
958{
959 mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
960}
961
962/* Transmit stop */
963static void stm32_usart_stop_tx(struct uart_port *port)
964{
965 struct stm32_port *stm32_port = to_stm32_port(port);
966
967 stm32_usart_tx_interrupt_disable(port);
968
969 /* dma terminate may have been called in case of dma pause failure */
970 stm32_usart_tx_dma_pause(stm32_port);
971
972 stm32_usart_rs485_rts_disable(port);
973}
974
975/* There are probably characters waiting to be transmitted. */
976static void stm32_usart_start_tx(struct uart_port *port)
977{
978 struct circ_buf *xmit = &port->state->xmit;
979
980 if (uart_circ_empty(xmit) && !port->x_char) {
981 stm32_usart_rs485_rts_disable(port);
982 return;
983 }
984
985 stm32_usart_rs485_rts_enable(port);
986
987 stm32_usart_transmit_chars(port);
988}
989
990/* Flush the transmit buffer. */
991static void stm32_usart_flush_buffer(struct uart_port *port)
992{
993 struct stm32_port *stm32_port = to_stm32_port(port);
994
995 if (stm32_port->tx_ch)
996 stm32_usart_tx_dma_terminate(stm32_port);
997}
998
999/* Throttle the remote when input buffer is about to overflow. */
1000static void stm32_usart_throttle(struct uart_port *port)
1001{
1002 struct stm32_port *stm32_port = to_stm32_port(port);
1003 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1004 unsigned long flags;
1005
1006 uart_port_lock_irqsave(port, &flags);
1007
1008 /*
1009 * Pause DMA transfer, so the RX data gets queued into the FIFO.
1010 * Hardware flow control is triggered when RX FIFO is full.
1011 */
1012 stm32_usart_rx_dma_pause(stm32_port);
1013
1014 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1015 if (stm32_port->cr3_irq)
1016 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1017
1018 stm32_port->throttled = true;
1019 uart_port_unlock_irqrestore(port, flags);
1020}
1021
1022/* Unthrottle the remote, the input buffer can now accept data. */
1023static void stm32_usart_unthrottle(struct uart_port *port)
1024{
1025 struct stm32_port *stm32_port = to_stm32_port(port);
1026 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1027 unsigned long flags;
1028
1029 uart_port_lock_irqsave(port, &flags);
1030 stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
1031 if (stm32_port->cr3_irq)
1032 stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
1033
1034 stm32_port->throttled = false;
1035
1036 /*
1037 * Switch back to DMA mode (resume DMA).
1038 * Hardware flow control is stopped when FIFO is not full any more.
1039 */
1040 if (stm32_port->rx_ch)
1041 stm32_usart_rx_dma_start_or_resume(port);
1042
1043 uart_port_unlock_irqrestore(port, flags);
1044}
1045
1046/* Receive stop */
1047static void stm32_usart_stop_rx(struct uart_port *port)
1048{
1049 struct stm32_port *stm32_port = to_stm32_port(port);
1050 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1051
1052 /* Disable DMA request line. */
1053 stm32_usart_rx_dma_pause(stm32_port);
1054
1055 stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
1056 if (stm32_port->cr3_irq)
1057 stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
1058}
1059
1060static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
1061{
1062 struct stm32_port *stm32_port = to_stm32_port(port);
1063 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1064 unsigned long flags;
1065
1066 spin_lock_irqsave(&port->lock, flags);
1067
1068 if (break_state)
1069 stm32_usart_set_bits(port, ofs->rqr, USART_RQR_SBKRQ);
1070 else
1071 stm32_usart_clr_bits(port, ofs->rqr, USART_RQR_SBKRQ);
1072
1073 spin_unlock_irqrestore(&port->lock, flags);
1074}
1075
1076static int stm32_usart_startup(struct uart_port *port)
1077{
1078 struct stm32_port *stm32_port = to_stm32_port(port);
1079 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1080 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1081 const char *name = to_platform_device(port->dev)->name;
1082 u32 val;
1083 int ret;
1084
1085 ret = request_irq(port->irq, stm32_usart_interrupt,
1086 IRQF_NO_SUSPEND, name, port);
1087 if (ret)
1088 return ret;
1089
1090 if (stm32_port->swap) {
1091 val = readl_relaxed(port->membase + ofs->cr2);
1092 val |= USART_CR2_SWAP;
1093 writel_relaxed(val, port->membase + ofs->cr2);
1094 }
1095 stm32_port->throttled = false;
1096
1097 /* RX FIFO Flush */
1098 if (ofs->rqr != UNDEF_REG)
1099 writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
1100
1101 if (stm32_port->rx_ch) {
1102 ret = stm32_usart_rx_dma_start_or_resume(port);
1103 if (ret) {
1104 free_irq(port->irq, port);
1105 return ret;
1106 }
1107 }
1108
1109 /* RX enabling */
1110 val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
1111 stm32_usart_set_bits(port, ofs->cr1, val);
1112
1113 return 0;
1114}
1115
1116static void stm32_usart_shutdown(struct uart_port *port)
1117{
1118 struct stm32_port *stm32_port = to_stm32_port(port);
1119 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1120 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1121 u32 val, isr;
1122 int ret;
1123
1124 if (stm32_usart_tx_dma_started(stm32_port))
1125 stm32_usart_tx_dma_terminate(stm32_port);
1126
1127 if (stm32_port->tx_ch)
1128 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
1129
1130 /* Disable modem control interrupts */
1131 stm32_usart_disable_ms(port);
1132
1133 val = USART_CR1_TXEIE | USART_CR1_TE;
1134 val |= stm32_port->cr1_irq | USART_CR1_RE;
1135 val |= BIT(cfg->uart_enable_bit);
1136 if (stm32_port->fifoen)
1137 val |= USART_CR1_FIFOEN;
1138
1139 ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
1140 isr, (isr & USART_SR_TC),
1141 10, 100000);
1142
1143 /* Send the TC error message only when ISR_TC is not set */
1144 if (ret)
1145 dev_err(port->dev, "Transmission is not complete\n");
1146
1147 /* Disable RX DMA. */
1148 if (stm32_port->rx_ch) {
1149 stm32_usart_rx_dma_terminate(stm32_port);
1150 dmaengine_synchronize(stm32_port->rx_ch);
1151 }
1152
1153 /* flush RX & TX FIFO */
1154 if (ofs->rqr != UNDEF_REG)
1155 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1156 port->membase + ofs->rqr);
1157
1158 stm32_usart_clr_bits(port, ofs->cr1, val);
1159
1160 free_irq(port->irq, port);
1161}
1162
1163static const unsigned int stm32_usart_presc_val[] = {1, 2, 4, 6, 8, 10, 12, 16, 32, 64, 128, 256};
1164
1165static void stm32_usart_set_termios(struct uart_port *port,
1166 struct ktermios *termios,
1167 const struct ktermios *old)
1168{
1169 struct stm32_port *stm32_port = to_stm32_port(port);
1170 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1171 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1172 struct serial_rs485 *rs485conf = &port->rs485;
1173 unsigned int baud, bits, uart_clk, uart_clk_pres;
1174 u32 usartdiv, mantissa, fraction, oversampling;
1175 tcflag_t cflag = termios->c_cflag;
1176 u32 cr1, cr2, cr3, isr, brr, presc;
1177 unsigned long flags;
1178 int ret;
1179
1180 if (!stm32_port->hw_flow_control)
1181 cflag &= ~CRTSCTS;
1182
1183 uart_clk = clk_get_rate(stm32_port->clk);
1184
1185 baud = uart_get_baud_rate(port, termios, old, 0, uart_clk / 8);
1186
1187 uart_port_lock_irqsave(port, &flags);
1188
1189 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
1190 isr,
1191 (isr & USART_SR_TC),
1192 10, 100000);
1193
1194 /* Send the TC error message only when ISR_TC is not set. */
1195 if (ret)
1196 dev_err(port->dev, "Transmission is not complete\n");
1197
1198 /* Stop serial port and reset value */
1199 writel_relaxed(0, port->membase + ofs->cr1);
1200
1201 /* flush RX & TX FIFO */
1202 if (ofs->rqr != UNDEF_REG)
1203 writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
1204 port->membase + ofs->rqr);
1205
1206 cr1 = USART_CR1_TE | USART_CR1_RE;
1207 if (stm32_port->fifoen)
1208 cr1 |= USART_CR1_FIFOEN;
1209 cr2 = stm32_port->swap ? USART_CR2_SWAP : 0;
1210
1211 /* Tx and RX FIFO configuration */
1212 cr3 = readl_relaxed(port->membase + ofs->cr3);
1213 cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
1214 if (stm32_port->fifoen) {
1215 if (stm32_port->txftcfg >= 0)
1216 cr3 |= stm32_port->txftcfg << USART_CR3_TXFTCFG_SHIFT;
1217 if (stm32_port->rxftcfg >= 0)
1218 cr3 |= stm32_port->rxftcfg << USART_CR3_RXFTCFG_SHIFT;
1219 }
1220
1221 if (cflag & CSTOPB)
1222 cr2 |= USART_CR2_STOP_2B;
1223
1224 bits = tty_get_char_size(cflag);
1225 stm32_port->rdr_mask = (BIT(bits) - 1);
1226
1227 if (cflag & PARENB) {
1228 bits++;
1229 cr1 |= USART_CR1_PCE;
1230 }
1231
1232 /*
1233 * Word length configuration:
1234 * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
1235 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
1236 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
1237 * M0 and M1 already cleared by cr1 initialization.
1238 */
1239 if (bits == 9) {
1240 cr1 |= USART_CR1_M0;
1241 } else if ((bits == 7) && cfg->has_7bits_data) {
1242 cr1 |= USART_CR1_M1;
1243 } else if (bits != 8) {
1244 dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
1245 , bits);
1246 cflag &= ~CSIZE;
1247 cflag |= CS8;
1248 termios->c_cflag = cflag;
1249 bits = 8;
1250 if (cflag & PARENB) {
1251 bits++;
1252 cr1 |= USART_CR1_M0;
1253 }
1254 }
1255
1256 if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
1257 (stm32_port->fifoen &&
1258 stm32_port->rxftcfg >= 0))) {
1259 if (cflag & CSTOPB)
1260 bits = bits + 3; /* 1 start bit + 2 stop bits */
1261 else
1262 bits = bits + 2; /* 1 start bit + 1 stop bit */
1263
1264 /* RX timeout irq to occur after last stop bit + bits */
1265 stm32_port->cr1_irq = USART_CR1_RTOIE;
1266 writel_relaxed(bits, port->membase + ofs->rtor);
1267 cr2 |= USART_CR2_RTOEN;
1268 /*
1269 * Enable fifo threshold irq in two cases, either when there is no DMA, or when
1270 * wake up over usart, from low power until the DMA gets re-enabled by resume.
1271 */
1272 stm32_port->cr3_irq = USART_CR3_RXFTIE;
1273 }
1274
1275 cr1 |= stm32_port->cr1_irq;
1276 cr3 |= stm32_port->cr3_irq;
1277
1278 if (cflag & PARODD)
1279 cr1 |= USART_CR1_PS;
1280
1281 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
1282 if (cflag & CRTSCTS) {
1283 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
1284 cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
1285 }
1286
1287 for (presc = 0; presc <= USART_PRESC_MAX; presc++) {
1288 uart_clk_pres = DIV_ROUND_CLOSEST(uart_clk, stm32_usart_presc_val[presc]);
1289 usartdiv = DIV_ROUND_CLOSEST(uart_clk_pres, baud);
1290
1291 /*
1292 * The USART supports 16 or 8 times oversampling.
1293 * By default we prefer 16 times oversampling, so that the receiver
1294 * has a better tolerance to clock deviations.
1295 * 8 times oversampling is only used to achieve higher speeds.
1296 */
1297 if (usartdiv < 16) {
1298 oversampling = 8;
1299 cr1 |= USART_CR1_OVER8;
1300 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
1301 } else {
1302 oversampling = 16;
1303 cr1 &= ~USART_CR1_OVER8;
1304 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
1305 }
1306
1307 mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
1308 fraction = usartdiv % oversampling;
1309 brr = mantissa | fraction;
1310
1311 if (FIELD_FIT(USART_BRR_MASK, brr)) {
1312 if (ofs->presc != UNDEF_REG) {
1313 port->uartclk = uart_clk_pres;
1314 writel_relaxed(presc, port->membase + ofs->presc);
1315 } else if (presc) {
1316 /* We need a prescaler but we don't have it (STM32F4, STM32F7) */
1317 dev_err(port->dev,
1318 "unable to set baudrate, input clock is too high");
1319 }
1320 break;
1321 } else if (presc == USART_PRESC_MAX) {
1322 /* Even with prescaler and brr at max value we can't set baudrate */
1323 dev_err(port->dev, "unable to set baudrate, input clock is too high");
1324 break;
1325 }
1326 }
1327
1328 writel_relaxed(brr, port->membase + ofs->brr);
1329
1330 uart_update_timeout(port, cflag, baud);
1331
1332 port->read_status_mask = USART_SR_ORE;
1333 if (termios->c_iflag & INPCK)
1334 port->read_status_mask |= USART_SR_PE | USART_SR_FE;
1335 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1336 port->read_status_mask |= USART_SR_FE;
1337
1338 /* Characters to ignore */
1339 port->ignore_status_mask = 0;
1340 if (termios->c_iflag & IGNPAR)
1341 port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
1342 if (termios->c_iflag & IGNBRK) {
1343 port->ignore_status_mask |= USART_SR_FE;
1344 /*
1345 * If we're ignoring parity and break indicators,
1346 * ignore overruns too (for real raw support).
1347 */
1348 if (termios->c_iflag & IGNPAR)
1349 port->ignore_status_mask |= USART_SR_ORE;
1350 }
1351
1352 /* Ignore all characters if CREAD is not set */
1353 if ((termios->c_cflag & CREAD) == 0)
1354 port->ignore_status_mask |= USART_SR_DUMMY_RX;
1355
1356 if (stm32_port->rx_ch) {
1357 /*
1358 * Setup DMA to collect only valid data and enable error irqs.
1359 * This also enables break reception when using DMA.
1360 */
1361 cr1 |= USART_CR1_PEIE;
1362 cr3 |= USART_CR3_EIE;
1363 cr3 |= USART_CR3_DMAR;
1364 cr3 |= USART_CR3_DDRE;
1365 }
1366
1367 if (stm32_port->tx_ch)
1368 cr3 |= USART_CR3_DMAT;
1369
1370 if (rs485conf->flags & SER_RS485_ENABLED) {
1371 stm32_usart_config_reg_rs485(&cr1, &cr3,
1372 rs485conf->delay_rts_before_send,
1373 rs485conf->delay_rts_after_send,
1374 baud);
1375 if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
1376 cr3 &= ~USART_CR3_DEP;
1377 rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
1378 } else {
1379 cr3 |= USART_CR3_DEP;
1380 rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
1381 }
1382
1383 } else {
1384 cr3 &= ~(USART_CR3_DEM | USART_CR3_DEP);
1385 cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
1386 }
1387
1388 /* Configure wake up from low power on start bit detection */
1389 if (stm32_port->wakeup_src) {
1390 cr3 &= ~USART_CR3_WUS_MASK;
1391 cr3 |= USART_CR3_WUS_START_BIT;
1392 }
1393
1394 writel_relaxed(cr3, port->membase + ofs->cr3);
1395 writel_relaxed(cr2, port->membase + ofs->cr2);
1396 writel_relaxed(cr1, port->membase + ofs->cr1);
1397
1398 stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1399 uart_port_unlock_irqrestore(port, flags);
1400
1401 /* Handle modem control interrupts */
1402 if (UART_ENABLE_MS(port, termios->c_cflag))
1403 stm32_usart_enable_ms(port);
1404 else
1405 stm32_usart_disable_ms(port);
1406}
1407
1408static const char *stm32_usart_type(struct uart_port *port)
1409{
1410 return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
1411}
1412
1413static void stm32_usart_release_port(struct uart_port *port)
1414{
1415}
1416
1417static int stm32_usart_request_port(struct uart_port *port)
1418{
1419 return 0;
1420}
1421
1422static void stm32_usart_config_port(struct uart_port *port, int flags)
1423{
1424 if (flags & UART_CONFIG_TYPE)
1425 port->type = PORT_STM32;
1426}
1427
1428static int
1429stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
1430{
1431 /* No user changeable parameters */
1432 return -EINVAL;
1433}
1434
1435static void stm32_usart_pm(struct uart_port *port, unsigned int state,
1436 unsigned int oldstate)
1437{
1438 struct stm32_port *stm32port = container_of(port,
1439 struct stm32_port, port);
1440 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1441 const struct stm32_usart_config *cfg = &stm32port->info->cfg;
1442 unsigned long flags;
1443
1444 switch (state) {
1445 case UART_PM_STATE_ON:
1446 pm_runtime_get_sync(port->dev);
1447 break;
1448 case UART_PM_STATE_OFF:
1449 uart_port_lock_irqsave(port, &flags);
1450 stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
1451 uart_port_unlock_irqrestore(port, flags);
1452 pm_runtime_put_sync(port->dev);
1453 break;
1454 }
1455}
1456
1457#if defined(CONFIG_CONSOLE_POLL)
1458
1459 /* Callbacks for characters polling in debug context (i.e. KGDB). */
1460static int stm32_usart_poll_init(struct uart_port *port)
1461{
1462 struct stm32_port *stm32_port = to_stm32_port(port);
1463
1464 return clk_prepare_enable(stm32_port->clk);
1465}
1466
1467static int stm32_usart_poll_get_char(struct uart_port *port)
1468{
1469 struct stm32_port *stm32_port = to_stm32_port(port);
1470 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1471
1472 if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_RXNE))
1473 return NO_POLL_CHAR;
1474
1475 return readl_relaxed(port->membase + ofs->rdr) & stm32_port->rdr_mask;
1476}
1477
1478static void stm32_usart_poll_put_char(struct uart_port *port, unsigned char ch)
1479{
1480 stm32_usart_console_putchar(port, ch);
1481}
1482#endif /* CONFIG_CONSOLE_POLL */
1483
1484static const struct uart_ops stm32_uart_ops = {
1485 .tx_empty = stm32_usart_tx_empty,
1486 .set_mctrl = stm32_usart_set_mctrl,
1487 .get_mctrl = stm32_usart_get_mctrl,
1488 .stop_tx = stm32_usart_stop_tx,
1489 .start_tx = stm32_usart_start_tx,
1490 .throttle = stm32_usart_throttle,
1491 .unthrottle = stm32_usart_unthrottle,
1492 .stop_rx = stm32_usart_stop_rx,
1493 .enable_ms = stm32_usart_enable_ms,
1494 .break_ctl = stm32_usart_break_ctl,
1495 .startup = stm32_usart_startup,
1496 .shutdown = stm32_usart_shutdown,
1497 .flush_buffer = stm32_usart_flush_buffer,
1498 .set_termios = stm32_usart_set_termios,
1499 .pm = stm32_usart_pm,
1500 .type = stm32_usart_type,
1501 .release_port = stm32_usart_release_port,
1502 .request_port = stm32_usart_request_port,
1503 .config_port = stm32_usart_config_port,
1504 .verify_port = stm32_usart_verify_port,
1505#if defined(CONFIG_CONSOLE_POLL)
1506 .poll_init = stm32_usart_poll_init,
1507 .poll_get_char = stm32_usart_poll_get_char,
1508 .poll_put_char = stm32_usart_poll_put_char,
1509#endif /* CONFIG_CONSOLE_POLL */
1510};
1511
1512struct stm32_usart_thresh_ratio {
1513 int mul;
1514 int div;
1515};
1516
1517static const struct stm32_usart_thresh_ratio stm32h7_usart_fifo_thresh_cfg[] = {
1518 {1, 8}, {1, 4}, {1, 2}, {3, 4}, {7, 8}, {1, 1} };
1519
1520static int stm32_usart_get_thresh_value(u32 fifo_size, int index)
1521{
1522 return fifo_size * stm32h7_usart_fifo_thresh_cfg[index].mul /
1523 stm32h7_usart_fifo_thresh_cfg[index].div;
1524}
1525
1526static int stm32_usart_get_ftcfg(struct platform_device *pdev, struct stm32_port *stm32port,
1527 const char *p, int *ftcfg)
1528{
1529 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1530 u32 bytes, i, cfg8;
1531 int fifo_size;
1532
1533 if (WARN_ON(ofs->hwcfgr1 == UNDEF_REG))
1534 return 1;
1535
1536 cfg8 = FIELD_GET(USART_HWCFGR1_CFG8,
1537 readl_relaxed(stm32port->port.membase + ofs->hwcfgr1));
1538
1539 /* On STM32H7, hwcfgr is not present, so returned value will be 0 */
1540 fifo_size = cfg8 ? 1 << cfg8 : STM32H7_USART_FIFO_SIZE;
1541
1542 /* DT option to get RX & TX FIFO threshold (default to half fifo size) */
1543 if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
1544 bytes = fifo_size / 2;
1545
1546 if (bytes < stm32_usart_get_thresh_value(fifo_size, 0)) {
1547 *ftcfg = -EINVAL;
1548 return fifo_size;
1549 }
1550
1551 for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) {
1552 if (stm32_usart_get_thresh_value(fifo_size, i) >= bytes)
1553 break;
1554 }
1555 if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
1556 i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
1557
1558 dev_dbg(&pdev->dev, "%s set to %d/%d bytes\n", p,
1559 stm32_usart_get_thresh_value(fifo_size, i), fifo_size);
1560
1561 *ftcfg = i;
1562 return fifo_size;
1563}
1564
1565static void stm32_usart_deinit_port(struct stm32_port *stm32port)
1566{
1567 clk_disable_unprepare(stm32port->clk);
1568}
1569
1570static const struct serial_rs485 stm32_rs485_supported = {
1571 .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND |
1572 SER_RS485_RX_DURING_TX,
1573 .delay_rts_before_send = 1,
1574 .delay_rts_after_send = 1,
1575};
1576
1577static int stm32_usart_init_port(struct stm32_port *stm32port,
1578 struct platform_device *pdev)
1579{
1580 struct uart_port *port = &stm32port->port;
1581 struct resource *res;
1582 int ret, irq;
1583
1584 irq = platform_get_irq(pdev, 0);
1585 if (irq < 0)
1586 return irq;
1587
1588 port->iotype = UPIO_MEM;
1589 port->flags = UPF_BOOT_AUTOCONF;
1590 port->ops = &stm32_uart_ops;
1591 port->dev = &pdev->dev;
1592 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
1593 port->irq = irq;
1594 port->rs485_config = stm32_usart_config_rs485;
1595 port->rs485_supported = stm32_rs485_supported;
1596
1597 ret = stm32_usart_init_rs485(port, pdev);
1598 if (ret)
1599 return ret;
1600
1601 stm32port->wakeup_src = stm32port->info->cfg.has_wakeup &&
1602 of_property_read_bool(pdev->dev.of_node, "wakeup-source");
1603
1604 stm32port->swap = stm32port->info->cfg.has_swap &&
1605 of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
1606
1607 port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1608 if (IS_ERR(port->membase))
1609 return PTR_ERR(port->membase);
1610 port->mapbase = res->start;
1611
1612 spin_lock_init(&port->lock);
1613
1614 stm32port->clk = devm_clk_get(&pdev->dev, NULL);
1615 if (IS_ERR(stm32port->clk))
1616 return PTR_ERR(stm32port->clk);
1617
1618 /* Ensure that clk rate is correct by enabling the clk */
1619 ret = clk_prepare_enable(stm32port->clk);
1620 if (ret)
1621 return ret;
1622
1623 stm32port->port.uartclk = clk_get_rate(stm32port->clk);
1624 if (!stm32port->port.uartclk) {
1625 ret = -EINVAL;
1626 goto err_clk;
1627 }
1628
1629 stm32port->fifoen = stm32port->info->cfg.has_fifo;
1630 if (stm32port->fifoen) {
1631 stm32_usart_get_ftcfg(pdev, stm32port, "rx-threshold", &stm32port->rxftcfg);
1632 port->fifosize = stm32_usart_get_ftcfg(pdev, stm32port, "tx-threshold",
1633 &stm32port->txftcfg);
1634 } else {
1635 port->fifosize = 1;
1636 }
1637
1638 stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
1639 if (IS_ERR(stm32port->gpios)) {
1640 ret = PTR_ERR(stm32port->gpios);
1641 goto err_clk;
1642 }
1643
1644 /*
1645 * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts"
1646 * properties should not be specified.
1647 */
1648 if (stm32port->hw_flow_control) {
1649 if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
1650 mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
1651 dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
1652 ret = -EINVAL;
1653 goto err_clk;
1654 }
1655 }
1656
1657 return ret;
1658
1659err_clk:
1660 clk_disable_unprepare(stm32port->clk);
1661
1662 return ret;
1663}
1664
1665static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
1666{
1667 struct device_node *np = pdev->dev.of_node;
1668 int id;
1669
1670 if (!np)
1671 return NULL;
1672
1673 id = of_alias_get_id(np, "serial");
1674 if (id < 0) {
1675 dev_err(&pdev->dev, "failed to get alias id, errno %d\n", id);
1676 return NULL;
1677 }
1678
1679 if (WARN_ON(id >= STM32_MAX_PORTS))
1680 return NULL;
1681
1682 stm32_ports[id].hw_flow_control =
1683 of_property_read_bool (np, "st,hw-flow-ctrl") /*deprecated*/ ||
1684 of_property_read_bool (np, "uart-has-rtscts");
1685 stm32_ports[id].port.line = id;
1686 stm32_ports[id].cr1_irq = USART_CR1_RXNEIE;
1687 stm32_ports[id].cr3_irq = 0;
1688 stm32_ports[id].last_res = RX_BUF_L;
1689 return &stm32_ports[id];
1690}
1691
1692#ifdef CONFIG_OF
1693static const struct of_device_id stm32_match[] = {
1694 { .compatible = "st,stm32-uart", .data = &stm32f4_info},
1695 { .compatible = "st,stm32f7-uart", .data = &stm32f7_info},
1696 { .compatible = "st,stm32h7-uart", .data = &stm32h7_info},
1697 {},
1698};
1699
1700MODULE_DEVICE_TABLE(of, stm32_match);
1701#endif
1702
1703static void stm32_usart_of_dma_rx_remove(struct stm32_port *stm32port,
1704 struct platform_device *pdev)
1705{
1706 if (stm32port->rx_buf)
1707 dma_free_coherent(&pdev->dev, RX_BUF_L, stm32port->rx_buf,
1708 stm32port->rx_dma_buf);
1709}
1710
1711static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
1712 struct platform_device *pdev)
1713{
1714 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1715 struct uart_port *port = &stm32port->port;
1716 struct device *dev = &pdev->dev;
1717 struct dma_slave_config config;
1718 int ret;
1719
1720 stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
1721 &stm32port->rx_dma_buf,
1722 GFP_KERNEL);
1723 if (!stm32port->rx_buf)
1724 return -ENOMEM;
1725
1726 /* Configure DMA channel */
1727 memset(&config, 0, sizeof(config));
1728 config.src_addr = port->mapbase + ofs->rdr;
1729 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1730
1731 ret = dmaengine_slave_config(stm32port->rx_ch, &config);
1732 if (ret < 0) {
1733 dev_err(dev, "rx dma channel config failed\n");
1734 stm32_usart_of_dma_rx_remove(stm32port, pdev);
1735 return ret;
1736 }
1737
1738 return 0;
1739}
1740
1741static void stm32_usart_of_dma_tx_remove(struct stm32_port *stm32port,
1742 struct platform_device *pdev)
1743{
1744 if (stm32port->tx_buf)
1745 dma_free_coherent(&pdev->dev, TX_BUF_L, stm32port->tx_buf,
1746 stm32port->tx_dma_buf);
1747}
1748
1749static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
1750 struct platform_device *pdev)
1751{
1752 const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
1753 struct uart_port *port = &stm32port->port;
1754 struct device *dev = &pdev->dev;
1755 struct dma_slave_config config;
1756 int ret;
1757
1758 stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
1759 &stm32port->tx_dma_buf,
1760 GFP_KERNEL);
1761 if (!stm32port->tx_buf)
1762 return -ENOMEM;
1763
1764 /* Configure DMA channel */
1765 memset(&config, 0, sizeof(config));
1766 config.dst_addr = port->mapbase + ofs->tdr;
1767 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1768
1769 ret = dmaengine_slave_config(stm32port->tx_ch, &config);
1770 if (ret < 0) {
1771 dev_err(dev, "tx dma channel config failed\n");
1772 stm32_usart_of_dma_tx_remove(stm32port, pdev);
1773 return ret;
1774 }
1775
1776 return 0;
1777}
1778
1779static int stm32_usart_serial_probe(struct platform_device *pdev)
1780{
1781 struct stm32_port *stm32port;
1782 int ret;
1783
1784 stm32port = stm32_usart_of_get_port(pdev);
1785 if (!stm32port)
1786 return -ENODEV;
1787
1788 stm32port->info = of_device_get_match_data(&pdev->dev);
1789 if (!stm32port->info)
1790 return -EINVAL;
1791
1792 stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx");
1793 if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER)
1794 return -EPROBE_DEFER;
1795
1796 /* Fall back in interrupt mode for any non-deferral error */
1797 if (IS_ERR(stm32port->rx_ch))
1798 stm32port->rx_ch = NULL;
1799
1800 stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx");
1801 if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
1802 ret = -EPROBE_DEFER;
1803 goto err_dma_rx;
1804 }
1805 /* Fall back in interrupt mode for any non-deferral error */
1806 if (IS_ERR(stm32port->tx_ch))
1807 stm32port->tx_ch = NULL;
1808
1809 ret = stm32_usart_init_port(stm32port, pdev);
1810 if (ret)
1811 goto err_dma_tx;
1812
1813 if (stm32port->wakeup_src) {
1814 device_set_wakeup_capable(&pdev->dev, true);
1815 ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq);
1816 if (ret)
1817 goto err_deinit_port;
1818 }
1819
1820 if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) {
1821 /* Fall back in interrupt mode */
1822 dma_release_channel(stm32port->rx_ch);
1823 stm32port->rx_ch = NULL;
1824 }
1825
1826 if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) {
1827 /* Fall back in interrupt mode */
1828 dma_release_channel(stm32port->tx_ch);
1829 stm32port->tx_ch = NULL;
1830 }
1831
1832 if (!stm32port->rx_ch)
1833 dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n");
1834 if (!stm32port->tx_ch)
1835 dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
1836
1837 platform_set_drvdata(pdev, &stm32port->port);
1838
1839 pm_runtime_get_noresume(&pdev->dev);
1840 pm_runtime_set_active(&pdev->dev);
1841 pm_runtime_enable(&pdev->dev);
1842
1843 ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
1844 if (ret)
1845 goto err_port;
1846
1847 pm_runtime_put_sync(&pdev->dev);
1848
1849 return 0;
1850
1851err_port:
1852 pm_runtime_disable(&pdev->dev);
1853 pm_runtime_set_suspended(&pdev->dev);
1854 pm_runtime_put_noidle(&pdev->dev);
1855
1856 if (stm32port->tx_ch)
1857 stm32_usart_of_dma_tx_remove(stm32port, pdev);
1858 if (stm32port->rx_ch)
1859 stm32_usart_of_dma_rx_remove(stm32port, pdev);
1860
1861 if (stm32port->wakeup_src)
1862 dev_pm_clear_wake_irq(&pdev->dev);
1863
1864err_deinit_port:
1865 if (stm32port->wakeup_src)
1866 device_set_wakeup_capable(&pdev->dev, false);
1867
1868 stm32_usart_deinit_port(stm32port);
1869
1870err_dma_tx:
1871 if (stm32port->tx_ch)
1872 dma_release_channel(stm32port->tx_ch);
1873
1874err_dma_rx:
1875 if (stm32port->rx_ch)
1876 dma_release_channel(stm32port->rx_ch);
1877
1878 return ret;
1879}
1880
1881static void stm32_usart_serial_remove(struct platform_device *pdev)
1882{
1883 struct uart_port *port = platform_get_drvdata(pdev);
1884 struct stm32_port *stm32_port = to_stm32_port(port);
1885 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1886 u32 cr3;
1887
1888 pm_runtime_get_sync(&pdev->dev);
1889 uart_remove_one_port(&stm32_usart_driver, port);
1890
1891 pm_runtime_disable(&pdev->dev);
1892 pm_runtime_set_suspended(&pdev->dev);
1893 pm_runtime_put_noidle(&pdev->dev);
1894
1895 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
1896
1897 if (stm32_port->tx_ch) {
1898 stm32_usart_of_dma_tx_remove(stm32_port, pdev);
1899 dma_release_channel(stm32_port->tx_ch);
1900 }
1901
1902 if (stm32_port->rx_ch) {
1903 stm32_usart_of_dma_rx_remove(stm32_port, pdev);
1904 dma_release_channel(stm32_port->rx_ch);
1905 }
1906
1907 cr3 = readl_relaxed(port->membase + ofs->cr3);
1908 cr3 &= ~USART_CR3_EIE;
1909 cr3 &= ~USART_CR3_DMAR;
1910 cr3 &= ~USART_CR3_DMAT;
1911 cr3 &= ~USART_CR3_DDRE;
1912 writel_relaxed(cr3, port->membase + ofs->cr3);
1913
1914 if (stm32_port->wakeup_src) {
1915 dev_pm_clear_wake_irq(&pdev->dev);
1916 device_init_wakeup(&pdev->dev, false);
1917 }
1918
1919 stm32_usart_deinit_port(stm32_port);
1920}
1921
1922static void __maybe_unused stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
1923{
1924 struct stm32_port *stm32_port = to_stm32_port(port);
1925 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1926 u32 isr;
1927 int ret;
1928
1929 ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr, isr,
1930 (isr & USART_SR_TXE), 100,
1931 STM32_USART_TIMEOUT_USEC);
1932 if (ret != 0) {
1933 dev_err(port->dev, "Error while sending data in UART TX : %d\n", ret);
1934 return;
1935 }
1936 writel_relaxed(ch, port->membase + ofs->tdr);
1937}
1938
1939#ifdef CONFIG_SERIAL_STM32_CONSOLE
1940static void stm32_usart_console_write(struct console *co, const char *s,
1941 unsigned int cnt)
1942{
1943 struct uart_port *port = &stm32_ports[co->index].port;
1944 struct stm32_port *stm32_port = to_stm32_port(port);
1945 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
1946 const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
1947 unsigned long flags;
1948 u32 old_cr1, new_cr1;
1949 int locked = 1;
1950
1951 if (oops_in_progress)
1952 locked = uart_port_trylock_irqsave(port, &flags);
1953 else
1954 uart_port_lock_irqsave(port, &flags);
1955
1956 /* Save and disable interrupts, enable the transmitter */
1957 old_cr1 = readl_relaxed(port->membase + ofs->cr1);
1958 new_cr1 = old_cr1 & ~USART_CR1_IE_MASK;
1959 new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit);
1960 writel_relaxed(new_cr1, port->membase + ofs->cr1);
1961
1962 uart_console_write(port, s, cnt, stm32_usart_console_putchar);
1963
1964 /* Restore interrupt state */
1965 writel_relaxed(old_cr1, port->membase + ofs->cr1);
1966
1967 if (locked)
1968 uart_port_unlock_irqrestore(port, flags);
1969}
1970
1971static int stm32_usart_console_setup(struct console *co, char *options)
1972{
1973 struct stm32_port *stm32port;
1974 int baud = 9600;
1975 int bits = 8;
1976 int parity = 'n';
1977 int flow = 'n';
1978
1979 if (co->index >= STM32_MAX_PORTS)
1980 return -ENODEV;
1981
1982 stm32port = &stm32_ports[co->index];
1983
1984 /*
1985 * This driver does not support early console initialization
1986 * (use ARM early printk support instead), so we only expect
1987 * this to be called during the uart port registration when the
1988 * driver gets probed and the port should be mapped at that point.
1989 */
1990 if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
1991 return -ENXIO;
1992
1993 if (options)
1994 uart_parse_options(options, &baud, &parity, &bits, &flow);
1995
1996 return uart_set_options(&stm32port->port, co, baud, parity, bits, flow);
1997}
1998
1999static struct console stm32_console = {
2000 .name = STM32_SERIAL_NAME,
2001 .device = uart_console_device,
2002 .write = stm32_usart_console_write,
2003 .setup = stm32_usart_console_setup,
2004 .flags = CON_PRINTBUFFER,
2005 .index = -1,
2006 .data = &stm32_usart_driver,
2007};
2008
2009#define STM32_SERIAL_CONSOLE (&stm32_console)
2010
2011#else
2012#define STM32_SERIAL_CONSOLE NULL
2013#endif /* CONFIG_SERIAL_STM32_CONSOLE */
2014
2015#ifdef CONFIG_SERIAL_EARLYCON
2016static void early_stm32_usart_console_putchar(struct uart_port *port, unsigned char ch)
2017{
2018 struct stm32_usart_info *info = port->private_data;
2019
2020 while (!(readl_relaxed(port->membase + info->ofs.isr) & USART_SR_TXE))
2021 cpu_relax();
2022
2023 writel_relaxed(ch, port->membase + info->ofs.tdr);
2024}
2025
2026static void early_stm32_serial_write(struct console *console, const char *s, unsigned int count)
2027{
2028 struct earlycon_device *device = console->data;
2029 struct uart_port *port = &device->port;
2030
2031 uart_console_write(port, s, count, early_stm32_usart_console_putchar);
2032}
2033
2034static int __init early_stm32_h7_serial_setup(struct earlycon_device *device, const char *options)
2035{
2036 if (!(device->port.membase || device->port.iobase))
2037 return -ENODEV;
2038 device->port.private_data = &stm32h7_info;
2039 device->con->write = early_stm32_serial_write;
2040 return 0;
2041}
2042
2043static int __init early_stm32_f7_serial_setup(struct earlycon_device *device, const char *options)
2044{
2045 if (!(device->port.membase || device->port.iobase))
2046 return -ENODEV;
2047 device->port.private_data = &stm32f7_info;
2048 device->con->write = early_stm32_serial_write;
2049 return 0;
2050}
2051
2052static int __init early_stm32_f4_serial_setup(struct earlycon_device *device, const char *options)
2053{
2054 if (!(device->port.membase || device->port.iobase))
2055 return -ENODEV;
2056 device->port.private_data = &stm32f4_info;
2057 device->con->write = early_stm32_serial_write;
2058 return 0;
2059}
2060
2061OF_EARLYCON_DECLARE(stm32, "st,stm32h7-uart", early_stm32_h7_serial_setup);
2062OF_EARLYCON_DECLARE(stm32, "st,stm32f7-uart", early_stm32_f7_serial_setup);
2063OF_EARLYCON_DECLARE(stm32, "st,stm32-uart", early_stm32_f4_serial_setup);
2064#endif /* CONFIG_SERIAL_EARLYCON */
2065
2066static struct uart_driver stm32_usart_driver = {
2067 .driver_name = DRIVER_NAME,
2068 .dev_name = STM32_SERIAL_NAME,
2069 .major = 0,
2070 .minor = 0,
2071 .nr = STM32_MAX_PORTS,
2072 .cons = STM32_SERIAL_CONSOLE,
2073};
2074
2075static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
2076 bool enable)
2077{
2078 struct stm32_port *stm32_port = to_stm32_port(port);
2079 const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
2080 struct tty_port *tport = &port->state->port;
2081 int ret;
2082 unsigned int size = 0;
2083 unsigned long flags;
2084
2085 if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
2086 return 0;
2087
2088 /*
2089 * Enable low-power wake-up and wake-up irq if argument is set to
2090 * "enable", disable low-power wake-up and wake-up irq otherwise
2091 */
2092 if (enable) {
2093 stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
2094 stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
2095 mctrl_gpio_enable_irq_wake(stm32_port->gpios);
2096
2097 /*
2098 * When DMA is used for reception, it must be disabled before
2099 * entering low-power mode and re-enabled when exiting from
2100 * low-power mode.
2101 */
2102 if (stm32_port->rx_ch) {
2103 uart_port_lock_irqsave(port, &flags);
2104 /* Poll data from DMA RX buffer if any */
2105 if (!stm32_usart_rx_dma_pause(stm32_port))
2106 size += stm32_usart_receive_chars(port, true);
2107 stm32_usart_rx_dma_terminate(stm32_port);
2108 uart_unlock_and_check_sysrq_irqrestore(port, flags);
2109 if (size)
2110 tty_flip_buffer_push(tport);
2111 }
2112
2113 /* Poll data from RX FIFO if any */
2114 stm32_usart_receive_chars(port, false);
2115 } else {
2116 if (stm32_port->rx_ch) {
2117 ret = stm32_usart_rx_dma_start_or_resume(port);
2118 if (ret)
2119 return ret;
2120 }
2121 mctrl_gpio_disable_irq_wake(stm32_port->gpios);
2122 stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
2123 stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
2124 }
2125
2126 return 0;
2127}
2128
2129static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
2130{
2131 struct uart_port *port = dev_get_drvdata(dev);
2132 int ret;
2133
2134 uart_suspend_port(&stm32_usart_driver, port);
2135
2136 if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2137 ret = stm32_usart_serial_en_wakeup(port, true);
2138 if (ret)
2139 return ret;
2140 }
2141
2142 /*
2143 * When "no_console_suspend" is enabled, keep the pinctrl default state
2144 * and rely on bootloader stage to restore this state upon resume.
2145 * Otherwise, apply the idle or sleep states depending on wakeup
2146 * capabilities.
2147 */
2148 if (console_suspend_enabled || !uart_console(port)) {
2149 if (device_may_wakeup(dev) || device_wakeup_path(dev))
2150 pinctrl_pm_select_idle_state(dev);
2151 else
2152 pinctrl_pm_select_sleep_state(dev);
2153 }
2154
2155 return 0;
2156}
2157
2158static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
2159{
2160 struct uart_port *port = dev_get_drvdata(dev);
2161 int ret;
2162
2163 pinctrl_pm_select_default_state(dev);
2164
2165 if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
2166 ret = stm32_usart_serial_en_wakeup(port, false);
2167 if (ret)
2168 return ret;
2169 }
2170
2171 return uart_resume_port(&stm32_usart_driver, port);
2172}
2173
2174static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
2175{
2176 struct uart_port *port = dev_get_drvdata(dev);
2177 struct stm32_port *stm32port = container_of(port,
2178 struct stm32_port, port);
2179
2180 clk_disable_unprepare(stm32port->clk);
2181
2182 return 0;
2183}
2184
2185static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
2186{
2187 struct uart_port *port = dev_get_drvdata(dev);
2188 struct stm32_port *stm32port = container_of(port,
2189 struct stm32_port, port);
2190
2191 return clk_prepare_enable(stm32port->clk);
2192}
2193
2194static const struct dev_pm_ops stm32_serial_pm_ops = {
2195 SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
2196 stm32_usart_runtime_resume, NULL)
2197 SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
2198 stm32_usart_serial_resume)
2199};
2200
2201static struct platform_driver stm32_serial_driver = {
2202 .probe = stm32_usart_serial_probe,
2203 .remove_new = stm32_usart_serial_remove,
2204 .driver = {
2205 .name = DRIVER_NAME,
2206 .pm = &stm32_serial_pm_ops,
2207 .of_match_table = of_match_ptr(stm32_match),
2208 },
2209};
2210
2211static int __init stm32_usart_init(void)
2212{
2213 static char banner[] __initdata = "STM32 USART driver initialized";
2214 int ret;
2215
2216 pr_info("%s\n", banner);
2217
2218 ret = uart_register_driver(&stm32_usart_driver);
2219 if (ret)
2220 return ret;
2221
2222 ret = platform_driver_register(&stm32_serial_driver);
2223 if (ret)
2224 uart_unregister_driver(&stm32_usart_driver);
2225
2226 return ret;
2227}
2228
2229static void __exit stm32_usart_exit(void)
2230{
2231 platform_driver_unregister(&stm32_serial_driver);
2232 uart_unregister_driver(&stm32_usart_driver);
2233}
2234
2235module_init(stm32_usart_init);
2236module_exit(stm32_usart_exit);
2237
2238MODULE_ALIAS("platform:" DRIVER_NAME);
2239MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
2240MODULE_LICENSE("GPL v2");