Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) STMicroelectronics 2016
  4 * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
  5 */
  6
  7#include <linux/bitfield.h>
  8#include <linux/mfd/stm32-timers.h>
  9#include <linux/module.h>
 10#include <linux/of_platform.h>
 11#include <linux/reset.h>
 12
 13#define STM32_TIMERS_MAX_REGISTERS	0x3fc
 14
 15/* DIER register DMA enable bits */
 16static const u32 stm32_timers_dier_dmaen[STM32_TIMERS_MAX_DMAS] = {
 17	TIM_DIER_CC1DE,
 18	TIM_DIER_CC2DE,
 19	TIM_DIER_CC3DE,
 20	TIM_DIER_CC4DE,
 21	TIM_DIER_UIE,
 22	TIM_DIER_TDE,
 23	TIM_DIER_COMDE
 24};
 25
 26static void stm32_timers_dma_done(void *p)
 27{
 28	struct stm32_timers_dma *dma = p;
 29	struct dma_tx_state state;
 30	enum dma_status status;
 31
 32	status = dmaengine_tx_status(dma->chan, dma->chan->cookie, &state);
 33	if (status == DMA_COMPLETE)
 34		complete(&dma->completion);
 35}
 36
 37/**
 38 * stm32_timers_dma_burst_read - Read from timers registers using DMA.
 39 *
 40 * Read from STM32 timers registers using DMA on a single event.
 41 * @dev: reference to stm32_timers MFD device
 42 * @buf: DMA'able destination buffer
 43 * @id: stm32_timers_dmas event identifier (ch[1..4], up, trig or com)
 44 * @reg: registers start offset for DMA to read from (like CCRx for capture)
 45 * @num_reg: number of registers to read upon each DMA request, starting @reg.
 46 * @bursts: number of bursts to read (e.g. like two for pwm period capture)
 47 * @tmo_ms: timeout (milliseconds)
 48 */
 49int stm32_timers_dma_burst_read(struct device *dev, u32 *buf,
 50				enum stm32_timers_dmas id, u32 reg,
 51				unsigned int num_reg, unsigned int bursts,
 52				unsigned long tmo_ms)
 53{
 54	struct stm32_timers *ddata = dev_get_drvdata(dev);
 55	unsigned long timeout = msecs_to_jiffies(tmo_ms);
 56	struct regmap *regmap = ddata->regmap;
 57	struct stm32_timers_dma *dma = &ddata->dma;
 58	size_t len = num_reg * bursts * sizeof(u32);
 59	struct dma_async_tx_descriptor *desc;
 60	struct dma_slave_config config;
 61	dma_cookie_t cookie;
 62	dma_addr_t dma_buf;
 63	u32 dbl, dba;
 64	long err;
 65	int ret;
 66
 67	/* Sanity check */
 68	if (id < STM32_TIMERS_DMA_CH1 || id >= STM32_TIMERS_MAX_DMAS)
 69		return -EINVAL;
 70
 71	if (!num_reg || !bursts || reg > STM32_TIMERS_MAX_REGISTERS ||
 72	    (reg + num_reg * sizeof(u32)) > STM32_TIMERS_MAX_REGISTERS)
 73		return -EINVAL;
 74
 75	if (!dma->chans[id])
 76		return -ENODEV;
 77	mutex_lock(&dma->lock);
 78
 79	/* Select DMA channel in use */
 80	dma->chan = dma->chans[id];
 81	dma_buf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
 82	if (dma_mapping_error(dev, dma_buf)) {
 83		ret = -ENOMEM;
 84		goto unlock;
 85	}
 86
 87	/* Prepare DMA read from timer registers, using DMA burst mode */
 88	memset(&config, 0, sizeof(config));
 89	config.src_addr = (dma_addr_t)dma->phys_base + TIM_DMAR;
 90	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 91	ret = dmaengine_slave_config(dma->chan, &config);
 92	if (ret)
 93		goto unmap;
 94
 95	desc = dmaengine_prep_slave_single(dma->chan, dma_buf, len,
 96					   DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
 97	if (!desc) {
 98		ret = -EBUSY;
 99		goto unmap;
100	}
101
102	desc->callback = stm32_timers_dma_done;
103	desc->callback_param = dma;
104	cookie = dmaengine_submit(desc);
105	ret = dma_submit_error(cookie);
106	if (ret)
107		goto dma_term;
108
109	reinit_completion(&dma->completion);
110	dma_async_issue_pending(dma->chan);
111
112	/* Setup and enable timer DMA burst mode */
113	dbl = FIELD_PREP(TIM_DCR_DBL, bursts - 1);
114	dba = FIELD_PREP(TIM_DCR_DBA, reg >> 2);
115	ret = regmap_write(regmap, TIM_DCR, dbl | dba);
116	if (ret)
117		goto dma_term;
118
119	/* Clear pending flags before enabling DMA request */
120	ret = regmap_write(regmap, TIM_SR, 0);
121	if (ret)
122		goto dcr_clr;
123
124	ret = regmap_update_bits(regmap, TIM_DIER, stm32_timers_dier_dmaen[id],
125				 stm32_timers_dier_dmaen[id]);
126	if (ret)
127		goto dcr_clr;
128
129	err = wait_for_completion_interruptible_timeout(&dma->completion,
130							timeout);
131	if (err == 0)
132		ret = -ETIMEDOUT;
133	else if (err < 0)
134		ret = err;
135
136	regmap_update_bits(regmap, TIM_DIER, stm32_timers_dier_dmaen[id], 0);
137	regmap_write(regmap, TIM_SR, 0);
138dcr_clr:
139	regmap_write(regmap, TIM_DCR, 0);
140dma_term:
141	dmaengine_terminate_all(dma->chan);
142unmap:
143	dma_unmap_single(dev, dma_buf, len, DMA_FROM_DEVICE);
144unlock:
145	dma->chan = NULL;
146	mutex_unlock(&dma->lock);
147
148	return ret;
149}
150EXPORT_SYMBOL_GPL(stm32_timers_dma_burst_read);
151
152static const struct regmap_config stm32_timers_regmap_cfg = {
153	.reg_bits = 32,
154	.val_bits = 32,
155	.reg_stride = sizeof(u32),
156	.max_register = STM32_TIMERS_MAX_REGISTERS,
157};
158
159static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
160{
 
 
 
 
 
161	/*
162	 * Only the available bits will be written so when readback
163	 * we get the maximum value of auto reload register
164	 */
165	regmap_write(ddata->regmap, TIM_ARR, ~0L);
166	regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
167	regmap_write(ddata->regmap, TIM_ARR, 0x0);
168}
169
170static int stm32_timers_dma_probe(struct device *dev,
171				   struct stm32_timers *ddata)
172{
173	int i;
174	int ret = 0;
175	char name[4];
176
177	init_completion(&ddata->dma.completion);
178	mutex_init(&ddata->dma.lock);
179
180	/* Optional DMA support: get valid DMA channel(s) or NULL */
181	for (i = STM32_TIMERS_DMA_CH1; i <= STM32_TIMERS_DMA_CH4; i++) {
182		snprintf(name, ARRAY_SIZE(name), "ch%1d", i + 1);
183		ddata->dma.chans[i] = dma_request_chan(dev, name);
184	}
185	ddata->dma.chans[STM32_TIMERS_DMA_UP] = dma_request_chan(dev, "up");
186	ddata->dma.chans[STM32_TIMERS_DMA_TRIG] = dma_request_chan(dev, "trig");
187	ddata->dma.chans[STM32_TIMERS_DMA_COM] = dma_request_chan(dev, "com");
188
189	for (i = STM32_TIMERS_DMA_CH1; i < STM32_TIMERS_MAX_DMAS; i++) {
190		if (IS_ERR(ddata->dma.chans[i])) {
191			/* Save the first error code to return */
192			if (PTR_ERR(ddata->dma.chans[i]) != -ENODEV && !ret)
193				ret = PTR_ERR(ddata->dma.chans[i]);
194
195			ddata->dma.chans[i] = NULL;
196		}
197	}
198
199	return ret;
200}
201
202static void stm32_timers_dma_remove(struct device *dev,
203				    struct stm32_timers *ddata)
204{
205	int i;
206
207	for (i = STM32_TIMERS_DMA_CH1; i < STM32_TIMERS_MAX_DMAS; i++)
208		if (ddata->dma.chans[i])
209			dma_release_channel(ddata->dma.chans[i]);
210}
211
212static int stm32_timers_probe(struct platform_device *pdev)
213{
214	struct device *dev = &pdev->dev;
215	struct stm32_timers *ddata;
216	struct resource *res;
217	void __iomem *mmio;
218	int ret;
219
220	ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
221	if (!ddata)
222		return -ENOMEM;
223
224	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
225	mmio = devm_ioremap_resource(dev, res);
226	if (IS_ERR(mmio))
227		return PTR_ERR(mmio);
228
229	/* Timer physical addr for DMA */
230	ddata->dma.phys_base = res->start;
231
232	ddata->regmap = devm_regmap_init_mmio_clk(dev, "int", mmio,
233						  &stm32_timers_regmap_cfg);
234	if (IS_ERR(ddata->regmap))
235		return PTR_ERR(ddata->regmap);
236
237	ddata->clk = devm_clk_get(dev, NULL);
238	if (IS_ERR(ddata->clk))
239		return PTR_ERR(ddata->clk);
240
241	stm32_timers_get_arr_size(ddata);
242
243	ret = stm32_timers_dma_probe(dev, ddata);
244	if (ret) {
245		stm32_timers_dma_remove(dev, ddata);
246		return ret;
247	}
248
249	platform_set_drvdata(pdev, ddata);
250
251	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
252	if (ret)
253		stm32_timers_dma_remove(dev, ddata);
254
255	return ret;
256}
257
258static int stm32_timers_remove(struct platform_device *pdev)
259{
260	struct stm32_timers *ddata = platform_get_drvdata(pdev);
261
262	/*
263	 * Don't use devm_ here: enfore of_platform_depopulate() happens before
264	 * DMA are released, to avoid race on DMA.
265	 */
266	of_platform_depopulate(&pdev->dev);
267	stm32_timers_dma_remove(&pdev->dev, ddata);
268
269	return 0;
270}
271
272static const struct of_device_id stm32_timers_of_match[] = {
273	{ .compatible = "st,stm32-timers", },
274	{ /* end node */ },
275};
276MODULE_DEVICE_TABLE(of, stm32_timers_of_match);
277
278static struct platform_driver stm32_timers_driver = {
279	.probe = stm32_timers_probe,
280	.remove = stm32_timers_remove,
281	.driver	= {
282		.name = "stm32-timers",
283		.of_match_table = stm32_timers_of_match,
284	},
285};
286module_platform_driver(stm32_timers_driver);
287
288MODULE_DESCRIPTION("STMicroelectronics STM32 Timers");
289MODULE_LICENSE("GPL v2");
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) STMicroelectronics 2016
  4 * Author: Benjamin Gaignard <benjamin.gaignard@st.com>
  5 */
  6
  7#include <linux/bitfield.h>
  8#include <linux/mfd/stm32-timers.h>
  9#include <linux/module.h>
 10#include <linux/of_platform.h>
 11#include <linux/reset.h>
 12
 13#define STM32_TIMERS_MAX_REGISTERS	0x3fc
 14
 15/* DIER register DMA enable bits */
 16static const u32 stm32_timers_dier_dmaen[STM32_TIMERS_MAX_DMAS] = {
 17	TIM_DIER_CC1DE,
 18	TIM_DIER_CC2DE,
 19	TIM_DIER_CC3DE,
 20	TIM_DIER_CC4DE,
 21	TIM_DIER_UIE,
 22	TIM_DIER_TDE,
 23	TIM_DIER_COMDE
 24};
 25
 26static void stm32_timers_dma_done(void *p)
 27{
 28	struct stm32_timers_dma *dma = p;
 29	struct dma_tx_state state;
 30	enum dma_status status;
 31
 32	status = dmaengine_tx_status(dma->chan, dma->chan->cookie, &state);
 33	if (status == DMA_COMPLETE)
 34		complete(&dma->completion);
 35}
 36
 37/**
 38 * stm32_timers_dma_burst_read - Read from timers registers using DMA.
 39 *
 40 * Read from STM32 timers registers using DMA on a single event.
 41 * @dev: reference to stm32_timers MFD device
 42 * @buf: DMA'able destination buffer
 43 * @id: stm32_timers_dmas event identifier (ch[1..4], up, trig or com)
 44 * @reg: registers start offset for DMA to read from (like CCRx for capture)
 45 * @num_reg: number of registers to read upon each DMA request, starting @reg.
 46 * @bursts: number of bursts to read (e.g. like two for pwm period capture)
 47 * @tmo_ms: timeout (milliseconds)
 48 */
 49int stm32_timers_dma_burst_read(struct device *dev, u32 *buf,
 50				enum stm32_timers_dmas id, u32 reg,
 51				unsigned int num_reg, unsigned int bursts,
 52				unsigned long tmo_ms)
 53{
 54	struct stm32_timers *ddata = dev_get_drvdata(dev);
 55	unsigned long timeout = msecs_to_jiffies(tmo_ms);
 56	struct regmap *regmap = ddata->regmap;
 57	struct stm32_timers_dma *dma = &ddata->dma;
 58	size_t len = num_reg * bursts * sizeof(u32);
 59	struct dma_async_tx_descriptor *desc;
 60	struct dma_slave_config config;
 61	dma_cookie_t cookie;
 62	dma_addr_t dma_buf;
 63	u32 dbl, dba;
 64	long err;
 65	int ret;
 66
 67	/* Sanity check */
 68	if (id < STM32_TIMERS_DMA_CH1 || id >= STM32_TIMERS_MAX_DMAS)
 69		return -EINVAL;
 70
 71	if (!num_reg || !bursts || reg > STM32_TIMERS_MAX_REGISTERS ||
 72	    (reg + num_reg * sizeof(u32)) > STM32_TIMERS_MAX_REGISTERS)
 73		return -EINVAL;
 74
 75	if (!dma->chans[id])
 76		return -ENODEV;
 77	mutex_lock(&dma->lock);
 78
 79	/* Select DMA channel in use */
 80	dma->chan = dma->chans[id];
 81	dma_buf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
 82	if (dma_mapping_error(dev, dma_buf)) {
 83		ret = -ENOMEM;
 84		goto unlock;
 85	}
 86
 87	/* Prepare DMA read from timer registers, using DMA burst mode */
 88	memset(&config, 0, sizeof(config));
 89	config.src_addr = (dma_addr_t)dma->phys_base + TIM_DMAR;
 90	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 91	ret = dmaengine_slave_config(dma->chan, &config);
 92	if (ret)
 93		goto unmap;
 94
 95	desc = dmaengine_prep_slave_single(dma->chan, dma_buf, len,
 96					   DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
 97	if (!desc) {
 98		ret = -EBUSY;
 99		goto unmap;
100	}
101
102	desc->callback = stm32_timers_dma_done;
103	desc->callback_param = dma;
104	cookie = dmaengine_submit(desc);
105	ret = dma_submit_error(cookie);
106	if (ret)
107		goto dma_term;
108
109	reinit_completion(&dma->completion);
110	dma_async_issue_pending(dma->chan);
111
112	/* Setup and enable timer DMA burst mode */
113	dbl = FIELD_PREP(TIM_DCR_DBL, bursts - 1);
114	dba = FIELD_PREP(TIM_DCR_DBA, reg >> 2);
115	ret = regmap_write(regmap, TIM_DCR, dbl | dba);
116	if (ret)
117		goto dma_term;
118
119	/* Clear pending flags before enabling DMA request */
120	ret = regmap_write(regmap, TIM_SR, 0);
121	if (ret)
122		goto dcr_clr;
123
124	ret = regmap_update_bits(regmap, TIM_DIER, stm32_timers_dier_dmaen[id],
125				 stm32_timers_dier_dmaen[id]);
126	if (ret)
127		goto dcr_clr;
128
129	err = wait_for_completion_interruptible_timeout(&dma->completion,
130							timeout);
131	if (err == 0)
132		ret = -ETIMEDOUT;
133	else if (err < 0)
134		ret = err;
135
136	regmap_update_bits(regmap, TIM_DIER, stm32_timers_dier_dmaen[id], 0);
137	regmap_write(regmap, TIM_SR, 0);
138dcr_clr:
139	regmap_write(regmap, TIM_DCR, 0);
140dma_term:
141	dmaengine_terminate_all(dma->chan);
142unmap:
143	dma_unmap_single(dev, dma_buf, len, DMA_FROM_DEVICE);
144unlock:
145	dma->chan = NULL;
146	mutex_unlock(&dma->lock);
147
148	return ret;
149}
150EXPORT_SYMBOL_GPL(stm32_timers_dma_burst_read);
151
152static const struct regmap_config stm32_timers_regmap_cfg = {
153	.reg_bits = 32,
154	.val_bits = 32,
155	.reg_stride = sizeof(u32),
156	.max_register = STM32_TIMERS_MAX_REGISTERS,
157};
158
159static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
160{
161	u32 arr;
162
163	/* Backup ARR to restore it after getting the maximum value */
164	regmap_read(ddata->regmap, TIM_ARR, &arr);
165
166	/*
167	 * Only the available bits will be written so when readback
168	 * we get the maximum value of auto reload register
169	 */
170	regmap_write(ddata->regmap, TIM_ARR, ~0L);
171	regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
172	regmap_write(ddata->regmap, TIM_ARR, arr);
173}
174
175static int stm32_timers_dma_probe(struct device *dev,
176				   struct stm32_timers *ddata)
177{
178	int i;
179	int ret = 0;
180	char name[4];
181
182	init_completion(&ddata->dma.completion);
183	mutex_init(&ddata->dma.lock);
184
185	/* Optional DMA support: get valid DMA channel(s) or NULL */
186	for (i = STM32_TIMERS_DMA_CH1; i <= STM32_TIMERS_DMA_CH4; i++) {
187		snprintf(name, ARRAY_SIZE(name), "ch%1d", i + 1);
188		ddata->dma.chans[i] = dma_request_chan(dev, name);
189	}
190	ddata->dma.chans[STM32_TIMERS_DMA_UP] = dma_request_chan(dev, "up");
191	ddata->dma.chans[STM32_TIMERS_DMA_TRIG] = dma_request_chan(dev, "trig");
192	ddata->dma.chans[STM32_TIMERS_DMA_COM] = dma_request_chan(dev, "com");
193
194	for (i = STM32_TIMERS_DMA_CH1; i < STM32_TIMERS_MAX_DMAS; i++) {
195		if (IS_ERR(ddata->dma.chans[i])) {
196			/* Save the first error code to return */
197			if (PTR_ERR(ddata->dma.chans[i]) != -ENODEV && !ret)
198				ret = PTR_ERR(ddata->dma.chans[i]);
199
200			ddata->dma.chans[i] = NULL;
201		}
202	}
203
204	return ret;
205}
206
207static void stm32_timers_dma_remove(struct device *dev,
208				    struct stm32_timers *ddata)
209{
210	int i;
211
212	for (i = STM32_TIMERS_DMA_CH1; i < STM32_TIMERS_MAX_DMAS; i++)
213		if (ddata->dma.chans[i])
214			dma_release_channel(ddata->dma.chans[i]);
215}
216
217static int stm32_timers_probe(struct platform_device *pdev)
218{
219	struct device *dev = &pdev->dev;
220	struct stm32_timers *ddata;
221	struct resource *res;
222	void __iomem *mmio;
223	int ret;
224
225	ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
226	if (!ddata)
227		return -ENOMEM;
228
229	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
230	mmio = devm_ioremap_resource(dev, res);
231	if (IS_ERR(mmio))
232		return PTR_ERR(mmio);
233
234	/* Timer physical addr for DMA */
235	ddata->dma.phys_base = res->start;
236
237	ddata->regmap = devm_regmap_init_mmio_clk(dev, "int", mmio,
238						  &stm32_timers_regmap_cfg);
239	if (IS_ERR(ddata->regmap))
240		return PTR_ERR(ddata->regmap);
241
242	ddata->clk = devm_clk_get(dev, NULL);
243	if (IS_ERR(ddata->clk))
244		return PTR_ERR(ddata->clk);
245
246	stm32_timers_get_arr_size(ddata);
247
248	ret = stm32_timers_dma_probe(dev, ddata);
249	if (ret) {
250		stm32_timers_dma_remove(dev, ddata);
251		return ret;
252	}
253
254	platform_set_drvdata(pdev, ddata);
255
256	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
257	if (ret)
258		stm32_timers_dma_remove(dev, ddata);
259
260	return ret;
261}
262
263static int stm32_timers_remove(struct platform_device *pdev)
264{
265	struct stm32_timers *ddata = platform_get_drvdata(pdev);
266
267	/*
268	 * Don't use devm_ here: enfore of_platform_depopulate() happens before
269	 * DMA are released, to avoid race on DMA.
270	 */
271	of_platform_depopulate(&pdev->dev);
272	stm32_timers_dma_remove(&pdev->dev, ddata);
273
274	return 0;
275}
276
277static const struct of_device_id stm32_timers_of_match[] = {
278	{ .compatible = "st,stm32-timers", },
279	{ /* end node */ },
280};
281MODULE_DEVICE_TABLE(of, stm32_timers_of_match);
282
283static struct platform_driver stm32_timers_driver = {
284	.probe = stm32_timers_probe,
285	.remove = stm32_timers_remove,
286	.driver	= {
287		.name = "stm32-timers",
288		.of_match_table = stm32_timers_of_match,
289	},
290};
291module_platform_driver(stm32_timers_driver);
292
293MODULE_DESCRIPTION("STMicroelectronics STM32 Timers");
294MODULE_LICENSE("GPL v2");