Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * PXA2xx SPI DMA engine support.
4 *
5 * Copyright (C) 2013, 2021 Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include <linux/device.h>
10#include <linux/dma-mapping.h>
11#include <linux/dmaengine.h>
12#include <linux/scatterlist.h>
13#include <linux/sizes.h>
14
15#include <linux/spi/pxa2xx_spi.h>
16#include <linux/spi/spi.h>
17
18#include "spi-pxa2xx.h"
19
20static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
21 bool error)
22{
23 struct spi_message *msg = drv_data->controller->cur_msg;
24
25 /*
26 * It is possible that one CPU is handling ROR interrupt and other
27 * just gets DMA completion. Calling pump_transfers() twice for the
28 * same transfer leads to problems thus we prevent concurrent calls
29 * by using dma_running.
30 */
31 if (atomic_dec_and_test(&drv_data->dma_running)) {
32 /*
33 * If the other CPU is still handling the ROR interrupt we
34 * might not know about the error yet. So we re-check the
35 * ROR bit here before we clear the status register.
36 */
37 if (!error)
38 error = read_SSSR_bits(drv_data, drv_data->mask_sr) & SSSR_ROR;
39
40 /* Clear status & disable interrupts */
41 clear_SSCR1_bits(drv_data, drv_data->dma_cr1);
42 write_SSSR_CS(drv_data, drv_data->clear_sr);
43 if (!pxa25x_ssp_comp(drv_data))
44 pxa2xx_spi_write(drv_data, SSTO, 0);
45
46 if (error) {
47 /* In case we got an error we disable the SSP now */
48 pxa_ssp_disable(drv_data->ssp);
49 msg->status = -EIO;
50 }
51
52 spi_finalize_current_transfer(drv_data->controller);
53 }
54}
55
56static void pxa2xx_spi_dma_callback(void *data)
57{
58 pxa2xx_spi_dma_transfer_complete(data, false);
59}
60
61static struct dma_async_tx_descriptor *
62pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
63 enum dma_transfer_direction dir,
64 struct spi_transfer *xfer)
65{
66 struct chip_data *chip =
67 spi_get_ctldata(drv_data->controller->cur_msg->spi);
68 enum dma_slave_buswidth width;
69 struct dma_slave_config cfg;
70 struct dma_chan *chan;
71 struct sg_table *sgt;
72 int ret;
73
74 switch (drv_data->n_bytes) {
75 case 1:
76 width = DMA_SLAVE_BUSWIDTH_1_BYTE;
77 break;
78 case 2:
79 width = DMA_SLAVE_BUSWIDTH_2_BYTES;
80 break;
81 default:
82 width = DMA_SLAVE_BUSWIDTH_4_BYTES;
83 break;
84 }
85
86 memset(&cfg, 0, sizeof(cfg));
87 cfg.direction = dir;
88
89 if (dir == DMA_MEM_TO_DEV) {
90 cfg.dst_addr = drv_data->ssp->phys_base + SSDR;
91 cfg.dst_addr_width = width;
92 cfg.dst_maxburst = chip->dma_burst_size;
93
94 sgt = &xfer->tx_sg;
95 chan = drv_data->controller->dma_tx;
96 } else {
97 cfg.src_addr = drv_data->ssp->phys_base + SSDR;
98 cfg.src_addr_width = width;
99 cfg.src_maxburst = chip->dma_burst_size;
100
101 sgt = &xfer->rx_sg;
102 chan = drv_data->controller->dma_rx;
103 }
104
105 ret = dmaengine_slave_config(chan, &cfg);
106 if (ret) {
107 dev_warn(drv_data->ssp->dev, "DMA slave config failed\n");
108 return NULL;
109 }
110
111 return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir,
112 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
113}
114
115irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
116{
117 u32 status;
118
119 status = read_SSSR_bits(drv_data, drv_data->mask_sr);
120 if (status & SSSR_ROR) {
121 dev_err(drv_data->ssp->dev, "FIFO overrun\n");
122
123 dmaengine_terminate_async(drv_data->controller->dma_rx);
124 dmaengine_terminate_async(drv_data->controller->dma_tx);
125
126 pxa2xx_spi_dma_transfer_complete(drv_data, true);
127 return IRQ_HANDLED;
128 }
129
130 return IRQ_NONE;
131}
132
133int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
134 struct spi_transfer *xfer)
135{
136 struct dma_async_tx_descriptor *tx_desc, *rx_desc;
137 int err;
138
139 tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer);
140 if (!tx_desc) {
141 dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n");
142 err = -EBUSY;
143 goto err_tx;
144 }
145
146 rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer);
147 if (!rx_desc) {
148 dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n");
149 err = -EBUSY;
150 goto err_rx;
151 }
152
153 /* We are ready when RX completes */
154 rx_desc->callback = pxa2xx_spi_dma_callback;
155 rx_desc->callback_param = drv_data;
156
157 dmaengine_submit(rx_desc);
158 dmaengine_submit(tx_desc);
159 return 0;
160
161err_rx:
162 dmaengine_terminate_async(drv_data->controller->dma_tx);
163err_tx:
164 return err;
165}
166
167void pxa2xx_spi_dma_start(struct driver_data *drv_data)
168{
169 dma_async_issue_pending(drv_data->controller->dma_rx);
170 dma_async_issue_pending(drv_data->controller->dma_tx);
171
172 atomic_set(&drv_data->dma_running, 1);
173}
174
175void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
176{
177 atomic_set(&drv_data->dma_running, 0);
178 dmaengine_terminate_sync(drv_data->controller->dma_rx);
179 dmaengine_terminate_sync(drv_data->controller->dma_tx);
180}
181
182int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
183{
184 struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
185 struct spi_controller *controller = drv_data->controller;
186 struct device *dev = drv_data->ssp->dev;
187 dma_cap_mask_t mask;
188
189 dma_cap_zero(mask);
190 dma_cap_set(DMA_SLAVE, mask);
191
192 controller->dma_tx = dma_request_slave_channel_compat(mask,
193 pdata->dma_filter, pdata->tx_param, dev, "tx");
194 if (!controller->dma_tx)
195 return -ENODEV;
196
197 controller->dma_rx = dma_request_slave_channel_compat(mask,
198 pdata->dma_filter, pdata->rx_param, dev, "rx");
199 if (!controller->dma_rx) {
200 dma_release_channel(controller->dma_tx);
201 controller->dma_tx = NULL;
202 return -ENODEV;
203 }
204
205 return 0;
206}
207
208void pxa2xx_spi_dma_release(struct driver_data *drv_data)
209{
210 struct spi_controller *controller = drv_data->controller;
211
212 if (controller->dma_rx) {
213 dmaengine_terminate_sync(controller->dma_rx);
214 dma_release_channel(controller->dma_rx);
215 controller->dma_rx = NULL;
216 }
217 if (controller->dma_tx) {
218 dmaengine_terminate_sync(controller->dma_tx);
219 dma_release_channel(controller->dma_tx);
220 controller->dma_tx = NULL;
221 }
222}
223
224int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
225 struct spi_device *spi,
226 u8 bits_per_word, u32 *burst_code,
227 u32 *threshold)
228{
229 struct pxa2xx_spi_chip *chip_info = spi->controller_data;
230 struct driver_data *drv_data = spi_controller_get_devdata(spi->controller);
231 u32 dma_burst_size = drv_data->controller_info->dma_burst_size;
232
233 /*
234 * If the DMA burst size is given in chip_info we use that,
235 * otherwise we use the default. Also we use the default FIFO
236 * thresholds for now.
237 */
238 *burst_code = chip_info ? chip_info->dma_burst_size : dma_burst_size;
239 *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
240 | SSCR1_TxTresh(TX_THRESH_DFLT);
241
242 return 0;
243}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * PXA2xx SPI DMA engine support.
4 *
5 * Copyright (C) 2013, 2021 Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
7 */
8
9#include <linux/atomic.h>
10#include <linux/dev_printk.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmaengine.h>
13#include <linux/errno.h>
14#include <linux/irqreturn.h>
15#include <linux/scatterlist.h>
16#include <linux/string.h>
17#include <linux/types.h>
18
19#include <linux/spi/spi.h>
20
21#include "spi-pxa2xx.h"
22
23struct device;
24
25static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
26 bool error)
27{
28 struct spi_message *msg = drv_data->controller->cur_msg;
29
30 /*
31 * It is possible that one CPU is handling ROR interrupt and other
32 * just gets DMA completion. Calling pump_transfers() twice for the
33 * same transfer leads to problems thus we prevent concurrent calls
34 * by using dma_running.
35 */
36 if (atomic_dec_and_test(&drv_data->dma_running)) {
37 /*
38 * If the other CPU is still handling the ROR interrupt we
39 * might not know about the error yet. So we re-check the
40 * ROR bit here before we clear the status register.
41 */
42 if (!error)
43 error = read_SSSR_bits(drv_data, drv_data->mask_sr) & SSSR_ROR;
44
45 /* Clear status & disable interrupts */
46 clear_SSCR1_bits(drv_data, drv_data->dma_cr1);
47 write_SSSR_CS(drv_data, drv_data->clear_sr);
48 if (!pxa25x_ssp_comp(drv_data))
49 pxa2xx_spi_write(drv_data, SSTO, 0);
50
51 if (error) {
52 /* In case we got an error we disable the SSP now */
53 pxa_ssp_disable(drv_data->ssp);
54 msg->status = -EIO;
55 }
56
57 spi_finalize_current_transfer(drv_data->controller);
58 }
59}
60
61static void pxa2xx_spi_dma_callback(void *data)
62{
63 pxa2xx_spi_dma_transfer_complete(data, false);
64}
65
66static struct dma_async_tx_descriptor *
67pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
68 enum dma_transfer_direction dir,
69 struct spi_transfer *xfer)
70{
71 enum dma_slave_buswidth width;
72 struct dma_slave_config cfg;
73 struct dma_chan *chan;
74 struct sg_table *sgt;
75 int ret;
76
77 switch (drv_data->n_bytes) {
78 case 1:
79 width = DMA_SLAVE_BUSWIDTH_1_BYTE;
80 break;
81 case 2:
82 width = DMA_SLAVE_BUSWIDTH_2_BYTES;
83 break;
84 default:
85 width = DMA_SLAVE_BUSWIDTH_4_BYTES;
86 break;
87 }
88
89 memset(&cfg, 0, sizeof(cfg));
90 cfg.direction = dir;
91
92 if (dir == DMA_MEM_TO_DEV) {
93 cfg.dst_addr = drv_data->ssp->phys_base + SSDR;
94 cfg.dst_addr_width = width;
95 cfg.dst_maxburst = drv_data->controller_info->dma_burst_size;
96
97 sgt = &xfer->tx_sg;
98 chan = drv_data->controller->dma_tx;
99 } else {
100 cfg.src_addr = drv_data->ssp->phys_base + SSDR;
101 cfg.src_addr_width = width;
102 cfg.src_maxburst = drv_data->controller_info->dma_burst_size;
103
104 sgt = &xfer->rx_sg;
105 chan = drv_data->controller->dma_rx;
106 }
107
108 ret = dmaengine_slave_config(chan, &cfg);
109 if (ret) {
110 dev_warn(drv_data->ssp->dev, "DMA slave config failed\n");
111 return NULL;
112 }
113
114 return dmaengine_prep_slave_sg(chan, sgt->sgl, sgt->nents, dir,
115 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
116}
117
118irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
119{
120 u32 status;
121
122 status = read_SSSR_bits(drv_data, drv_data->mask_sr);
123 if (status & SSSR_ROR) {
124 dev_err(drv_data->ssp->dev, "FIFO overrun\n");
125
126 dmaengine_terminate_async(drv_data->controller->dma_rx);
127 dmaengine_terminate_async(drv_data->controller->dma_tx);
128
129 pxa2xx_spi_dma_transfer_complete(drv_data, true);
130 return IRQ_HANDLED;
131 }
132
133 return IRQ_NONE;
134}
135
136int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
137 struct spi_transfer *xfer)
138{
139 struct dma_async_tx_descriptor *tx_desc, *rx_desc;
140 int err;
141
142 tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV, xfer);
143 if (!tx_desc) {
144 dev_err(drv_data->ssp->dev, "failed to get DMA TX descriptor\n");
145 err = -EBUSY;
146 goto err_tx;
147 }
148
149 rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM, xfer);
150 if (!rx_desc) {
151 dev_err(drv_data->ssp->dev, "failed to get DMA RX descriptor\n");
152 err = -EBUSY;
153 goto err_rx;
154 }
155
156 /* We are ready when RX completes */
157 rx_desc->callback = pxa2xx_spi_dma_callback;
158 rx_desc->callback_param = drv_data;
159
160 dmaengine_submit(rx_desc);
161 dmaengine_submit(tx_desc);
162 return 0;
163
164err_rx:
165 dmaengine_terminate_async(drv_data->controller->dma_tx);
166err_tx:
167 return err;
168}
169
170void pxa2xx_spi_dma_start(struct driver_data *drv_data)
171{
172 dma_async_issue_pending(drv_data->controller->dma_rx);
173 dma_async_issue_pending(drv_data->controller->dma_tx);
174
175 atomic_set(&drv_data->dma_running, 1);
176}
177
178void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
179{
180 atomic_set(&drv_data->dma_running, 0);
181 dmaengine_terminate_sync(drv_data->controller->dma_rx);
182 dmaengine_terminate_sync(drv_data->controller->dma_tx);
183}
184
185int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
186{
187 struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
188 struct spi_controller *controller = drv_data->controller;
189 struct device *dev = drv_data->ssp->dev;
190 dma_cap_mask_t mask;
191
192 dma_cap_zero(mask);
193 dma_cap_set(DMA_SLAVE, mask);
194
195 controller->dma_tx = dma_request_slave_channel_compat(mask,
196 pdata->dma_filter, pdata->tx_param, dev, "tx");
197 if (!controller->dma_tx)
198 return -ENODEV;
199
200 controller->dma_rx = dma_request_slave_channel_compat(mask,
201 pdata->dma_filter, pdata->rx_param, dev, "rx");
202 if (!controller->dma_rx) {
203 dma_release_channel(controller->dma_tx);
204 controller->dma_tx = NULL;
205 return -ENODEV;
206 }
207
208 return 0;
209}
210
211void pxa2xx_spi_dma_release(struct driver_data *drv_data)
212{
213 struct spi_controller *controller = drv_data->controller;
214
215 if (controller->dma_rx) {
216 dmaengine_terminate_sync(controller->dma_rx);
217 dma_release_channel(controller->dma_rx);
218 controller->dma_rx = NULL;
219 }
220 if (controller->dma_tx) {
221 dmaengine_terminate_sync(controller->dma_tx);
222 dma_release_channel(controller->dma_tx);
223 controller->dma_tx = NULL;
224 }
225}