Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * 8250_dma.c - DMA Engine API support for 8250.c
4 : *
5 : * Copyright (C) 2013 Intel Corporation
6 : */
7 : #include <linux/tty.h>
8 : #include <linux/tty_flip.h>
9 : #include <linux/serial_reg.h>
10 : #include <linux/dma-mapping.h>
11 :
12 : #include "8250.h"
13 :
14 0 : static void __dma_tx_complete(void *param)
15 : {
16 0 : struct uart_8250_port *p = param;
17 0 : struct uart_8250_dma *dma = p->dma;
18 0 : struct tty_port *tport = &p->port.state->port;
19 0 : unsigned long flags;
20 0 : int ret;
21 :
22 0 : dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
23 0 : UART_XMIT_SIZE, DMA_TO_DEVICE);
24 :
25 0 : uart_port_lock_irqsave(&p->port, &flags);
26 :
27 0 : dma->tx_running = 0;
28 :
29 0 : uart_xmit_advance(&p->port, dma->tx_size);
30 :
31 0 : if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
32 0 : uart_write_wakeup(&p->port);
33 :
34 0 : ret = serial8250_tx_dma(p);
35 0 : if (ret || !dma->tx_running)
36 0 : serial8250_set_THRI(p);
37 :
38 0 : uart_port_unlock_irqrestore(&p->port, flags);
39 0 : }
40 :
41 0 : static void __dma_rx_complete(struct uart_8250_port *p)
42 : {
43 0 : struct uart_8250_dma *dma = p->dma;
44 0 : struct tty_port *tty_port = &p->port.state->port;
45 0 : struct dma_tx_state state;
46 0 : enum dma_status dma_status;
47 0 : int count;
48 :
49 : /*
50 : * New DMA Rx can be started during the completion handler before it
51 : * could acquire port's lock and it might still be ongoing. Don't to
52 : * anything in such case.
53 : */
54 0 : dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
55 0 : if (dma_status == DMA_IN_PROGRESS)
56 0 : return;
57 :
58 0 : count = dma->rx_size - state.residue;
59 :
60 0 : tty_insert_flip_string(tty_port, dma->rx_buf, count);
61 0 : p->port.icount.rx += count;
62 0 : dma->rx_running = 0;
63 :
64 0 : tty_flip_buffer_push(tty_port);
65 0 : }
66 :
67 0 : static void dma_rx_complete(void *param)
68 : {
69 0 : struct uart_8250_port *p = param;
70 0 : struct uart_8250_dma *dma = p->dma;
71 0 : unsigned long flags;
72 :
73 0 : uart_port_lock_irqsave(&p->port, &flags);
74 0 : if (dma->rx_running)
75 0 : __dma_rx_complete(p);
76 :
77 : /*
78 : * Cannot be combined with the previous check because __dma_rx_complete()
79 : * changes dma->rx_running.
80 : */
81 0 : if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
82 0 : p->dma->rx_dma(p);
83 0 : uart_port_unlock_irqrestore(&p->port, flags);
84 0 : }
85 :
86 0 : int serial8250_tx_dma(struct uart_8250_port *p)
87 : {
88 0 : struct uart_8250_dma *dma = p->dma;
89 0 : struct tty_port *tport = &p->port.state->port;
90 0 : struct dma_async_tx_descriptor *desc;
91 0 : struct uart_port *up = &p->port;
92 0 : struct scatterlist *sg;
93 0 : struct scatterlist sgl[2];
94 0 : int i;
95 0 : int ret;
96 :
97 0 : if (dma->tx_running) {
98 0 : if (up->x_char) {
99 0 : dmaengine_pause(dma->txchan);
100 0 : uart_xchar_out(up, UART_TX);
101 0 : dmaengine_resume(dma->txchan);
102 0 : }
103 0 : return 0;
104 0 : } else if (up->x_char) {
105 0 : uart_xchar_out(up, UART_TX);
106 0 : }
107 :
108 0 : if (uart_tx_stopped(&p->port) || kfifo_is_empty(&tport->xmit_fifo)) {
109 : /* We have been called from __dma_tx_complete() */
110 0 : return 0;
111 : }
112 :
113 0 : serial8250_do_prepare_tx_dma(p);
114 :
115 0 : sg_init_table(sgl, ARRAY_SIZE(sgl));
116 :
117 0 : ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, sgl, ARRAY_SIZE(sgl),
118 : UART_XMIT_SIZE, dma->tx_addr);
119 :
120 0 : dma->tx_size = 0;
121 :
122 0 : for_each_sg(sgl, sg, ret, i)
123 0 : dma->tx_size += sg_dma_len(sg);
124 :
125 0 : desc = dmaengine_prep_slave_sg(dma->txchan, sgl, ret,
126 : DMA_MEM_TO_DEV,
127 : DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
128 0 : if (!desc) {
129 0 : ret = -EBUSY;
130 0 : goto err;
131 : }
132 :
133 0 : dma->tx_running = 1;
134 0 : desc->callback = __dma_tx_complete;
135 0 : desc->callback_param = p;
136 :
137 0 : dma->tx_cookie = dmaengine_submit(desc);
138 :
139 0 : dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
140 0 : UART_XMIT_SIZE, DMA_TO_DEVICE);
141 :
142 0 : dma_async_issue_pending(dma->txchan);
143 0 : serial8250_clear_THRI(p);
144 0 : dma->tx_err = 0;
145 :
146 0 : return 0;
147 : err:
148 0 : dma->tx_err = 1;
149 0 : return ret;
150 0 : }
151 :
152 0 : void serial8250_tx_dma_flush(struct uart_8250_port *p)
153 : {
154 0 : struct uart_8250_dma *dma = p->dma;
155 :
156 0 : if (!dma->tx_running)
157 0 : return;
158 :
159 : /*
160 : * kfifo_reset() has been called by the serial core, avoid
161 : * advancing and underflowing in __dma_tx_complete().
162 : */
163 0 : dma->tx_size = 0;
164 :
165 0 : dmaengine_terminate_async(dma->txchan);
166 0 : }
167 :
168 0 : int serial8250_rx_dma(struct uart_8250_port *p)
169 : {
170 0 : struct uart_8250_dma *dma = p->dma;
171 0 : struct dma_async_tx_descriptor *desc;
172 :
173 0 : if (dma->rx_running)
174 0 : return 0;
175 :
176 0 : serial8250_do_prepare_rx_dma(p);
177 :
178 0 : desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
179 0 : dma->rx_size, DMA_DEV_TO_MEM,
180 : DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
181 0 : if (!desc)
182 0 : return -EBUSY;
183 :
184 0 : dma->rx_running = 1;
185 0 : desc->callback = dma_rx_complete;
186 0 : desc->callback_param = p;
187 :
188 0 : dma->rx_cookie = dmaengine_submit(desc);
189 :
190 0 : dma_async_issue_pending(dma->rxchan);
191 :
192 0 : return 0;
193 0 : }
194 :
195 0 : void serial8250_rx_dma_flush(struct uart_8250_port *p)
196 : {
197 0 : struct uart_8250_dma *dma = p->dma;
198 :
199 0 : if (dma->rx_running) {
200 0 : dmaengine_pause(dma->rxchan);
201 0 : __dma_rx_complete(p);
202 0 : dmaengine_terminate_async(dma->rxchan);
203 0 : }
204 0 : }
205 : EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
206 :
207 0 : int serial8250_request_dma(struct uart_8250_port *p)
208 : {
209 0 : struct uart_8250_dma *dma = p->dma;
210 0 : phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
211 0 : dma->rx_dma_addr : p->port.mapbase;
212 0 : phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
213 0 : dma->tx_dma_addr : p->port.mapbase;
214 0 : dma_cap_mask_t mask;
215 0 : struct dma_slave_caps caps;
216 0 : int ret;
217 :
218 : /* Default slave configuration parameters */
219 0 : dma->rxconf.direction = DMA_DEV_TO_MEM;
220 0 : dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
221 0 : dma->rxconf.src_addr = rx_dma_addr + UART_RX;
222 :
223 0 : dma->txconf.direction = DMA_MEM_TO_DEV;
224 0 : dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
225 0 : dma->txconf.dst_addr = tx_dma_addr + UART_TX;
226 :
227 0 : dma_cap_zero(mask);
228 0 : dma_cap_set(DMA_SLAVE, mask);
229 :
230 : /* Get a channel for RX */
231 0 : dma->rxchan = dma_request_slave_channel_compat(mask,
232 0 : dma->fn, dma->rx_param,
233 0 : p->port.dev, "rx");
234 0 : if (!dma->rxchan)
235 0 : return -ENODEV;
236 :
237 : /* 8250 rx dma requires dmaengine driver to support pause/terminate */
238 0 : ret = dma_get_slave_caps(dma->rxchan, &caps);
239 0 : if (ret)
240 0 : goto release_rx;
241 0 : if (!caps.cmd_pause || !caps.cmd_terminate ||
242 0 : caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
243 0 : ret = -EINVAL;
244 0 : goto release_rx;
245 : }
246 :
247 0 : dmaengine_slave_config(dma->rxchan, &dma->rxconf);
248 :
249 : /* Get a channel for TX */
250 0 : dma->txchan = dma_request_slave_channel_compat(mask,
251 0 : dma->fn, dma->tx_param,
252 0 : p->port.dev, "tx");
253 0 : if (!dma->txchan) {
254 0 : ret = -ENODEV;
255 0 : goto release_rx;
256 : }
257 :
258 : /* 8250 tx dma requires dmaengine driver to support terminate */
259 0 : ret = dma_get_slave_caps(dma->txchan, &caps);
260 0 : if (ret)
261 0 : goto err;
262 0 : if (!caps.cmd_terminate) {
263 0 : ret = -EINVAL;
264 0 : goto err;
265 : }
266 :
267 0 : dmaengine_slave_config(dma->txchan, &dma->txconf);
268 :
269 : /* RX buffer */
270 0 : if (!dma->rx_size)
271 0 : dma->rx_size = PAGE_SIZE;
272 :
273 0 : dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
274 0 : &dma->rx_addr, GFP_KERNEL);
275 0 : if (!dma->rx_buf) {
276 0 : ret = -ENOMEM;
277 0 : goto err;
278 : }
279 :
280 : /* TX buffer */
281 0 : dma->tx_addr = dma_map_single(dma->txchan->device->dev,
282 : p->port.state->port.xmit_buf,
283 : UART_XMIT_SIZE,
284 : DMA_TO_DEVICE);
285 0 : if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
286 0 : dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
287 0 : dma->rx_buf, dma->rx_addr);
288 0 : ret = -ENOMEM;
289 0 : goto err;
290 : }
291 :
292 : dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
293 :
294 0 : return 0;
295 : err:
296 0 : dma_release_channel(dma->txchan);
297 : release_rx:
298 0 : dma_release_channel(dma->rxchan);
299 0 : return ret;
300 0 : }
301 : EXPORT_SYMBOL_GPL(serial8250_request_dma);
302 :
303 0 : void serial8250_release_dma(struct uart_8250_port *p)
304 : {
305 0 : struct uart_8250_dma *dma = p->dma;
306 :
307 0 : if (!dma)
308 0 : return;
309 :
310 : /* Release RX resources */
311 0 : dmaengine_terminate_sync(dma->rxchan);
312 0 : dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
313 0 : dma->rx_addr);
314 0 : dma_release_channel(dma->rxchan);
315 0 : dma->rxchan = NULL;
316 :
317 : /* Release TX resources */
318 0 : dmaengine_terminate_sync(dma->txchan);
319 0 : dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
320 : UART_XMIT_SIZE, DMA_TO_DEVICE);
321 0 : dma_release_channel(dma->txchan);
322 0 : dma->txchan = NULL;
323 0 : dma->tx_running = 0;
324 :
325 : dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
326 0 : }
327 : EXPORT_SYMBOL_GPL(serial8250_release_dma);
|