Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Qualcomm Technologies HIDMA DMA engine Management interface
  4 *
  5 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  6 */
  7
  8#include <linux/dmaengine.h>
  9#include <linux/acpi.h>
 10#include <linux/of.h>
 11#include <linux/property.h>
 12#include <linux/of_address.h>
 13#include <linux/of_irq.h>
 14#include <linux/of_platform.h>
 15#include <linux/of_device.h>
 16#include <linux/platform_device.h>
 17#include <linux/module.h>
 18#include <linux/uaccess.h>
 19#include <linux/slab.h>
 20#include <linux/pm_runtime.h>
 21#include <linux/bitops.h>
 22#include <linux/dma-mapping.h>
 23
 24#include "hidma_mgmt.h"
 25
 26#define HIDMA_QOS_N_OFFSET		0x700
 27#define HIDMA_CFG_OFFSET		0x400
 28#define HIDMA_MAX_BUS_REQ_LEN_OFFSET	0x41C
 29#define HIDMA_MAX_XACTIONS_OFFSET	0x420
 30#define HIDMA_HW_VERSION_OFFSET	0x424
 31#define HIDMA_CHRESET_TIMEOUT_OFFSET	0x418
 32
 33#define HIDMA_MAX_WR_XACTIONS_MASK	GENMASK(4, 0)
 34#define HIDMA_MAX_RD_XACTIONS_MASK	GENMASK(4, 0)
 35#define HIDMA_WEIGHT_MASK		GENMASK(6, 0)
 36#define HIDMA_MAX_BUS_REQ_LEN_MASK	GENMASK(15, 0)
 37#define HIDMA_CHRESET_TIMEOUT_MASK	GENMASK(19, 0)
 38
 39#define HIDMA_MAX_WR_XACTIONS_BIT_POS	16
 40#define HIDMA_MAX_BUS_WR_REQ_BIT_POS	16
 41#define HIDMA_WRR_BIT_POS		8
 42#define HIDMA_PRIORITY_BIT_POS		15
 43
 44#define HIDMA_AUTOSUSPEND_TIMEOUT	2000
 45#define HIDMA_MAX_CHANNEL_WEIGHT	15
 46
 47static unsigned int max_write_request;
 48module_param(max_write_request, uint, 0644);
 49MODULE_PARM_DESC(max_write_request,
 50		"maximum write burst (default: ACPI/DT value)");
 51
 52static unsigned int max_read_request;
 53module_param(max_read_request, uint, 0644);
 54MODULE_PARM_DESC(max_read_request,
 55		"maximum read burst (default: ACPI/DT value)");
 56
 57static unsigned int max_wr_xactions;
 58module_param(max_wr_xactions, uint, 0644);
 59MODULE_PARM_DESC(max_wr_xactions,
 60	"maximum number of write transactions (default: ACPI/DT value)");
 61
 62static unsigned int max_rd_xactions;
 63module_param(max_rd_xactions, uint, 0644);
 64MODULE_PARM_DESC(max_rd_xactions,
 65	"maximum number of read transactions (default: ACPI/DT value)");
 66
 67int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
 68{
 69	unsigned int i;
 70	u32 val;
 71
 72	if (!is_power_of_2(mgmtdev->max_write_request) ||
 73	    (mgmtdev->max_write_request < 128) ||
 74	    (mgmtdev->max_write_request > 1024)) {
 75		dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
 76			mgmtdev->max_write_request);
 77		return -EINVAL;
 78	}
 79
 80	if (!is_power_of_2(mgmtdev->max_read_request) ||
 81	    (mgmtdev->max_read_request < 128) ||
 82	    (mgmtdev->max_read_request > 1024)) {
 83		dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
 84			mgmtdev->max_read_request);
 85		return -EINVAL;
 86	}
 87
 88	if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
 89		dev_err(&mgmtdev->pdev->dev,
 90			"max_wr_xactions cannot be bigger than %ld\n",
 91			HIDMA_MAX_WR_XACTIONS_MASK);
 92		return -EINVAL;
 93	}
 94
 95	if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
 96		dev_err(&mgmtdev->pdev->dev,
 97			"max_rd_xactions cannot be bigger than %ld\n",
 98			HIDMA_MAX_RD_XACTIONS_MASK);
 99		return -EINVAL;
100	}
101
102	for (i = 0; i < mgmtdev->dma_channels; i++) {
103		if (mgmtdev->priority[i] > 1) {
104			dev_err(&mgmtdev->pdev->dev,
105				"priority can be 0 or 1\n");
106			return -EINVAL;
107		}
108
109		if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
110			dev_err(&mgmtdev->pdev->dev,
111				"max value of weight can be %d.\n",
112				HIDMA_MAX_CHANNEL_WEIGHT);
113			return -EINVAL;
114		}
115
116		/* weight needs to be at least one */
117		if (mgmtdev->weight[i] == 0)
118			mgmtdev->weight[i] = 1;
119	}
120
121	pm_runtime_get_sync(&mgmtdev->pdev->dev);
122	val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
123	val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
124	val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
125	val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
126	val |= mgmtdev->max_read_request;
127	writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
128
129	val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
130	val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
131	val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
132	val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
133	val |= mgmtdev->max_rd_xactions;
134	writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
135
136	mgmtdev->hw_version =
137	    readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
138	mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
139	mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
140
141	for (i = 0; i < mgmtdev->dma_channels; i++) {
142		u32 weight = mgmtdev->weight[i];
143		u32 priority = mgmtdev->priority[i];
144
145		val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
146		val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
147		val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
148		val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
149		val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
150		writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
151	}
152
153	val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
154	val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
155	val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
156	writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
157
158	pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
159	pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
160	return 0;
161}
162EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
163
164static int hidma_mgmt_probe(struct platform_device *pdev)
165{
166	struct hidma_mgmt_dev *mgmtdev;
167	struct resource *res;
168	void __iomem *virtaddr;
169	int irq;
170	int rc;
171	u32 val;
172
173	pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
174	pm_runtime_use_autosuspend(&pdev->dev);
175	pm_runtime_set_active(&pdev->dev);
176	pm_runtime_enable(&pdev->dev);
177	pm_runtime_get_sync(&pdev->dev);
178
179	virtaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
 
180	if (IS_ERR(virtaddr)) {
181		rc = PTR_ERR(virtaddr);
182		goto out;
183	}
184
185	irq = platform_get_irq(pdev, 0);
186	if (irq < 0) {
187		rc = irq;
188		goto out;
189	}
190
191	mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
192	if (!mgmtdev) {
193		rc = -ENOMEM;
194		goto out;
195	}
196
197	mgmtdev->pdev = pdev;
198	mgmtdev->addrsize = resource_size(res);
199	mgmtdev->virtaddr = virtaddr;
200
201	rc = device_property_read_u32(&pdev->dev, "dma-channels",
202				      &mgmtdev->dma_channels);
203	if (rc) {
204		dev_err(&pdev->dev, "number of channels missing\n");
205		goto out;
206	}
207
208	rc = device_property_read_u32(&pdev->dev,
209				      "channel-reset-timeout-cycles",
210				      &mgmtdev->chreset_timeout_cycles);
211	if (rc) {
212		dev_err(&pdev->dev, "channel reset timeout missing\n");
213		goto out;
214	}
215
216	rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
217				      &mgmtdev->max_write_request);
218	if (rc) {
219		dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
220		goto out;
221	}
222
223	if (max_write_request &&
224			(max_write_request != mgmtdev->max_write_request)) {
225		dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
226			max_write_request);
227		mgmtdev->max_write_request = max_write_request;
228	} else
229		max_write_request = mgmtdev->max_write_request;
230
231	rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
232				      &mgmtdev->max_read_request);
233	if (rc) {
234		dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
235		goto out;
236	}
237	if (max_read_request &&
238			(max_read_request != mgmtdev->max_read_request)) {
239		dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
240			max_read_request);
241		mgmtdev->max_read_request = max_read_request;
242	} else
243		max_read_request = mgmtdev->max_read_request;
244
245	rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
246				      &mgmtdev->max_wr_xactions);
247	if (rc) {
248		dev_err(&pdev->dev, "max-write-transactions missing\n");
249		goto out;
250	}
251	if (max_wr_xactions &&
252			(max_wr_xactions != mgmtdev->max_wr_xactions)) {
253		dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
254			max_wr_xactions);
255		mgmtdev->max_wr_xactions = max_wr_xactions;
256	} else
257		max_wr_xactions = mgmtdev->max_wr_xactions;
258
259	rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
260				      &mgmtdev->max_rd_xactions);
261	if (rc) {
262		dev_err(&pdev->dev, "max-read-transactions missing\n");
263		goto out;
264	}
265	if (max_rd_xactions &&
266			(max_rd_xactions != mgmtdev->max_rd_xactions)) {
267		dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
268			max_rd_xactions);
269		mgmtdev->max_rd_xactions = max_rd_xactions;
270	} else
271		max_rd_xactions = mgmtdev->max_rd_xactions;
272
273	mgmtdev->priority = devm_kcalloc(&pdev->dev,
274					 mgmtdev->dma_channels,
275					 sizeof(*mgmtdev->priority),
276					 GFP_KERNEL);
277	if (!mgmtdev->priority) {
278		rc = -ENOMEM;
279		goto out;
280	}
281
282	mgmtdev->weight = devm_kcalloc(&pdev->dev,
283				       mgmtdev->dma_channels,
284				       sizeof(*mgmtdev->weight), GFP_KERNEL);
285	if (!mgmtdev->weight) {
286		rc = -ENOMEM;
287		goto out;
288	}
289
290	rc = hidma_mgmt_setup(mgmtdev);
291	if (rc) {
292		dev_err(&pdev->dev, "setup failed\n");
293		goto out;
294	}
295
296	/* start the HW */
297	val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
298	val |= 1;
299	writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
300
301	rc = hidma_mgmt_init_sys(mgmtdev);
302	if (rc) {
303		dev_err(&pdev->dev, "sysfs setup failed\n");
304		goto out;
305	}
306
307	dev_info(&pdev->dev,
308		 "HW rev: %d.%d @ %pa with %d physical channels\n",
309		 mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
310		 &res->start, mgmtdev->dma_channels);
311
312	platform_set_drvdata(pdev, mgmtdev);
313	pm_runtime_mark_last_busy(&pdev->dev);
314	pm_runtime_put_autosuspend(&pdev->dev);
315	return 0;
316out:
317	pm_runtime_put_sync_suspend(&pdev->dev);
318	pm_runtime_disable(&pdev->dev);
319	return rc;
320}
321
322#if IS_ENABLED(CONFIG_ACPI)
323static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
324	{"QCOM8060"},
325	{},
326};
327MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
328#endif
329
330static const struct of_device_id hidma_mgmt_match[] = {
331	{.compatible = "qcom,hidma-mgmt-1.0",},
332	{},
333};
334MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
335
336static struct platform_driver hidma_mgmt_driver = {
337	.probe = hidma_mgmt_probe,
338	.driver = {
339		   .name = "hidma-mgmt",
340		   .of_match_table = hidma_mgmt_match,
341		   .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
342	},
343};
344
345#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
346static int object_counter;
347
348static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
349{
350	struct platform_device *pdev_parent = of_find_device_by_node(np);
351	struct platform_device_info pdevinfo;
352	struct device_node *child;
353	struct resource *res;
354	int ret = 0;
355
356	/* allocate a resource array */
357	res = kcalloc(3, sizeof(*res), GFP_KERNEL);
358	if (!res)
359		return -ENOMEM;
360
361	for_each_available_child_of_node(np, child) {
362		struct platform_device *new_pdev;
363
364		ret = of_address_to_resource(child, 0, &res[0]);
365		if (!ret)
366			goto out;
367
368		ret = of_address_to_resource(child, 1, &res[1]);
369		if (!ret)
370			goto out;
371
372		ret = of_irq_to_resource(child, 0, &res[2]);
373		if (ret <= 0)
374			goto out;
375
376		memset(&pdevinfo, 0, sizeof(pdevinfo));
377		pdevinfo.fwnode = &child->fwnode;
378		pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
379		pdevinfo.name = child->name;
380		pdevinfo.id = object_counter++;
381		pdevinfo.res = res;
382		pdevinfo.num_res = 3;
383		pdevinfo.data = NULL;
384		pdevinfo.size_data = 0;
385		pdevinfo.dma_mask = DMA_BIT_MASK(64);
386		new_pdev = platform_device_register_full(&pdevinfo);
387		if (IS_ERR(new_pdev)) {
388			ret = PTR_ERR(new_pdev);
389			goto out;
390		}
391		new_pdev->dev.of_node = child;
392		of_dma_configure(&new_pdev->dev, child, true);
393		/*
394		 * It is assumed that calling of_msi_configure is safe on
395		 * platforms with or without MSI support.
396		 */
397		of_msi_configure(&new_pdev->dev, child);
398	}
399
400	kfree(res);
401
402	return ret;
403
404out:
405	of_node_put(child);
406	kfree(res);
407
408	return ret;
409}
410#endif
411
412static int __init hidma_mgmt_init(void)
413{
414#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
415	struct device_node *child;
416
417	for_each_matching_node(child, hidma_mgmt_match) {
418		/* device tree based firmware here */
419		hidma_mgmt_of_populate_channels(child);
420	}
421#endif
422	/*
423	 * We do not check for return value here, as it is assumed that
424	 * platform_driver_register must not fail. The reason for this is that
425	 * the (potential) hidma_mgmt_of_populate_channels calls above are not
426	 * cleaned up if it does fail, and to do this work is quite
427	 * complicated. In particular, various calls of of_address_to_resource,
428	 * of_irq_to_resource, platform_device_register_full, of_dma_configure,
429	 * and of_msi_configure which then call other functions and so on, must
430	 * be cleaned up - this is not a trivial exercise.
431	 *
432	 * Currently, this module is not intended to be unloaded, and there is
433	 * no module_exit function defined which does the needed cleanup. For
434	 * this reason, we have to assume success here.
435	 */
436	platform_driver_register(&hidma_mgmt_driver);
437
438	return 0;
439}
440module_init(hidma_mgmt_init);
441MODULE_LICENSE("GPL v2");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Qualcomm Technologies HIDMA DMA engine Management interface
  4 *
  5 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  6 */
  7
  8#include <linux/dmaengine.h>
  9#include <linux/acpi.h>
 10#include <linux/of.h>
 11#include <linux/property.h>
 12#include <linux/of_address.h>
 13#include <linux/of_irq.h>
 14#include <linux/of_platform.h>
 
 
 15#include <linux/module.h>
 16#include <linux/uaccess.h>
 17#include <linux/slab.h>
 18#include <linux/pm_runtime.h>
 19#include <linux/bitops.h>
 20#include <linux/dma-mapping.h>
 21
 22#include "hidma_mgmt.h"
 23
 24#define HIDMA_QOS_N_OFFSET		0x700
 25#define HIDMA_CFG_OFFSET		0x400
 26#define HIDMA_MAX_BUS_REQ_LEN_OFFSET	0x41C
 27#define HIDMA_MAX_XACTIONS_OFFSET	0x420
 28#define HIDMA_HW_VERSION_OFFSET	0x424
 29#define HIDMA_CHRESET_TIMEOUT_OFFSET	0x418
 30
 31#define HIDMA_MAX_WR_XACTIONS_MASK	GENMASK(4, 0)
 32#define HIDMA_MAX_RD_XACTIONS_MASK	GENMASK(4, 0)
 33#define HIDMA_WEIGHT_MASK		GENMASK(6, 0)
 34#define HIDMA_MAX_BUS_REQ_LEN_MASK	GENMASK(15, 0)
 35#define HIDMA_CHRESET_TIMEOUT_MASK	GENMASK(19, 0)
 36
 37#define HIDMA_MAX_WR_XACTIONS_BIT_POS	16
 38#define HIDMA_MAX_BUS_WR_REQ_BIT_POS	16
 39#define HIDMA_WRR_BIT_POS		8
 40#define HIDMA_PRIORITY_BIT_POS		15
 41
 42#define HIDMA_AUTOSUSPEND_TIMEOUT	2000
 43#define HIDMA_MAX_CHANNEL_WEIGHT	15
 44
 45static unsigned int max_write_request;
 46module_param(max_write_request, uint, 0644);
 47MODULE_PARM_DESC(max_write_request,
 48		"maximum write burst (default: ACPI/DT value)");
 49
 50static unsigned int max_read_request;
 51module_param(max_read_request, uint, 0644);
 52MODULE_PARM_DESC(max_read_request,
 53		"maximum read burst (default: ACPI/DT value)");
 54
 55static unsigned int max_wr_xactions;
 56module_param(max_wr_xactions, uint, 0644);
 57MODULE_PARM_DESC(max_wr_xactions,
 58	"maximum number of write transactions (default: ACPI/DT value)");
 59
 60static unsigned int max_rd_xactions;
 61module_param(max_rd_xactions, uint, 0644);
 62MODULE_PARM_DESC(max_rd_xactions,
 63	"maximum number of read transactions (default: ACPI/DT value)");
 64
 65int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
 66{
 67	unsigned int i;
 68	u32 val;
 69
 70	if (!is_power_of_2(mgmtdev->max_write_request) ||
 71	    (mgmtdev->max_write_request < 128) ||
 72	    (mgmtdev->max_write_request > 1024)) {
 73		dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
 74			mgmtdev->max_write_request);
 75		return -EINVAL;
 76	}
 77
 78	if (!is_power_of_2(mgmtdev->max_read_request) ||
 79	    (mgmtdev->max_read_request < 128) ||
 80	    (mgmtdev->max_read_request > 1024)) {
 81		dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
 82			mgmtdev->max_read_request);
 83		return -EINVAL;
 84	}
 85
 86	if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
 87		dev_err(&mgmtdev->pdev->dev,
 88			"max_wr_xactions cannot be bigger than %ld\n",
 89			HIDMA_MAX_WR_XACTIONS_MASK);
 90		return -EINVAL;
 91	}
 92
 93	if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
 94		dev_err(&mgmtdev->pdev->dev,
 95			"max_rd_xactions cannot be bigger than %ld\n",
 96			HIDMA_MAX_RD_XACTIONS_MASK);
 97		return -EINVAL;
 98	}
 99
100	for (i = 0; i < mgmtdev->dma_channels; i++) {
101		if (mgmtdev->priority[i] > 1) {
102			dev_err(&mgmtdev->pdev->dev,
103				"priority can be 0 or 1\n");
104			return -EINVAL;
105		}
106
107		if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
108			dev_err(&mgmtdev->pdev->dev,
109				"max value of weight can be %d.\n",
110				HIDMA_MAX_CHANNEL_WEIGHT);
111			return -EINVAL;
112		}
113
114		/* weight needs to be at least one */
115		if (mgmtdev->weight[i] == 0)
116			mgmtdev->weight[i] = 1;
117	}
118
119	pm_runtime_get_sync(&mgmtdev->pdev->dev);
120	val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
121	val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
122	val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
123	val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
124	val |= mgmtdev->max_read_request;
125	writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
126
127	val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
128	val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
129	val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
130	val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
131	val |= mgmtdev->max_rd_xactions;
132	writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
133
134	mgmtdev->hw_version =
135	    readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
136	mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
137	mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
138
139	for (i = 0; i < mgmtdev->dma_channels; i++) {
140		u32 weight = mgmtdev->weight[i];
141		u32 priority = mgmtdev->priority[i];
142
143		val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
144		val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
145		val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
146		val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
147		val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
148		writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
149	}
150
151	val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
152	val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
153	val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
154	writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
155
156	pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
157	pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
158	return 0;
159}
160EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
161
162static int hidma_mgmt_probe(struct platform_device *pdev)
163{
164	struct hidma_mgmt_dev *mgmtdev;
165	struct resource *res;
166	void __iomem *virtaddr;
167	int irq;
168	int rc;
169	u32 val;
170
171	pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
172	pm_runtime_use_autosuspend(&pdev->dev);
173	pm_runtime_set_active(&pdev->dev);
174	pm_runtime_enable(&pdev->dev);
175	pm_runtime_get_sync(&pdev->dev);
176
177	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
178	virtaddr = devm_ioremap_resource(&pdev->dev, res);
179	if (IS_ERR(virtaddr)) {
180		rc = -ENOMEM;
181		goto out;
182	}
183
184	irq = platform_get_irq(pdev, 0);
185	if (irq < 0) {
186		rc = irq;
187		goto out;
188	}
189
190	mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
191	if (!mgmtdev) {
192		rc = -ENOMEM;
193		goto out;
194	}
195
196	mgmtdev->pdev = pdev;
197	mgmtdev->addrsize = resource_size(res);
198	mgmtdev->virtaddr = virtaddr;
199
200	rc = device_property_read_u32(&pdev->dev, "dma-channels",
201				      &mgmtdev->dma_channels);
202	if (rc) {
203		dev_err(&pdev->dev, "number of channels missing\n");
204		goto out;
205	}
206
207	rc = device_property_read_u32(&pdev->dev,
208				      "channel-reset-timeout-cycles",
209				      &mgmtdev->chreset_timeout_cycles);
210	if (rc) {
211		dev_err(&pdev->dev, "channel reset timeout missing\n");
212		goto out;
213	}
214
215	rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
216				      &mgmtdev->max_write_request);
217	if (rc) {
218		dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
219		goto out;
220	}
221
222	if (max_write_request &&
223			(max_write_request != mgmtdev->max_write_request)) {
224		dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
225			max_write_request);
226		mgmtdev->max_write_request = max_write_request;
227	} else
228		max_write_request = mgmtdev->max_write_request;
229
230	rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
231				      &mgmtdev->max_read_request);
232	if (rc) {
233		dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
234		goto out;
235	}
236	if (max_read_request &&
237			(max_read_request != mgmtdev->max_read_request)) {
238		dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
239			max_read_request);
240		mgmtdev->max_read_request = max_read_request;
241	} else
242		max_read_request = mgmtdev->max_read_request;
243
244	rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
245				      &mgmtdev->max_wr_xactions);
246	if (rc) {
247		dev_err(&pdev->dev, "max-write-transactions missing\n");
248		goto out;
249	}
250	if (max_wr_xactions &&
251			(max_wr_xactions != mgmtdev->max_wr_xactions)) {
252		dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
253			max_wr_xactions);
254		mgmtdev->max_wr_xactions = max_wr_xactions;
255	} else
256		max_wr_xactions = mgmtdev->max_wr_xactions;
257
258	rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
259				      &mgmtdev->max_rd_xactions);
260	if (rc) {
261		dev_err(&pdev->dev, "max-read-transactions missing\n");
262		goto out;
263	}
264	if (max_rd_xactions &&
265			(max_rd_xactions != mgmtdev->max_rd_xactions)) {
266		dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
267			max_rd_xactions);
268		mgmtdev->max_rd_xactions = max_rd_xactions;
269	} else
270		max_rd_xactions = mgmtdev->max_rd_xactions;
271
272	mgmtdev->priority = devm_kcalloc(&pdev->dev,
273					 mgmtdev->dma_channels,
274					 sizeof(*mgmtdev->priority),
275					 GFP_KERNEL);
276	if (!mgmtdev->priority) {
277		rc = -ENOMEM;
278		goto out;
279	}
280
281	mgmtdev->weight = devm_kcalloc(&pdev->dev,
282				       mgmtdev->dma_channels,
283				       sizeof(*mgmtdev->weight), GFP_KERNEL);
284	if (!mgmtdev->weight) {
285		rc = -ENOMEM;
286		goto out;
287	}
288
289	rc = hidma_mgmt_setup(mgmtdev);
290	if (rc) {
291		dev_err(&pdev->dev, "setup failed\n");
292		goto out;
293	}
294
295	/* start the HW */
296	val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
297	val |= 1;
298	writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
299
300	rc = hidma_mgmt_init_sys(mgmtdev);
301	if (rc) {
302		dev_err(&pdev->dev, "sysfs setup failed\n");
303		goto out;
304	}
305
306	dev_info(&pdev->dev,
307		 "HW rev: %d.%d @ %pa with %d physical channels\n",
308		 mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
309		 &res->start, mgmtdev->dma_channels);
310
311	platform_set_drvdata(pdev, mgmtdev);
312	pm_runtime_mark_last_busy(&pdev->dev);
313	pm_runtime_put_autosuspend(&pdev->dev);
314	return 0;
315out:
316	pm_runtime_put_sync_suspend(&pdev->dev);
317	pm_runtime_disable(&pdev->dev);
318	return rc;
319}
320
321#if IS_ENABLED(CONFIG_ACPI)
322static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
323	{"QCOM8060"},
324	{},
325};
326MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
327#endif
328
329static const struct of_device_id hidma_mgmt_match[] = {
330	{.compatible = "qcom,hidma-mgmt-1.0",},
331	{},
332};
333MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
334
335static struct platform_driver hidma_mgmt_driver = {
336	.probe = hidma_mgmt_probe,
337	.driver = {
338		   .name = "hidma-mgmt",
339		   .of_match_table = hidma_mgmt_match,
340		   .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
341	},
342};
343
344#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
345static int object_counter;
346
347static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
348{
349	struct platform_device *pdev_parent = of_find_device_by_node(np);
350	struct platform_device_info pdevinfo;
351	struct device_node *child;
352	struct resource *res;
353	int ret = 0;
354
355	/* allocate a resource array */
356	res = kcalloc(3, sizeof(*res), GFP_KERNEL);
357	if (!res)
358		return -ENOMEM;
359
360	for_each_available_child_of_node(np, child) {
361		struct platform_device *new_pdev;
362
363		ret = of_address_to_resource(child, 0, &res[0]);
364		if (!ret)
365			goto out;
366
367		ret = of_address_to_resource(child, 1, &res[1]);
368		if (!ret)
369			goto out;
370
371		ret = of_irq_to_resource(child, 0, &res[2]);
372		if (ret <= 0)
373			goto out;
374
375		memset(&pdevinfo, 0, sizeof(pdevinfo));
376		pdevinfo.fwnode = &child->fwnode;
377		pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
378		pdevinfo.name = child->name;
379		pdevinfo.id = object_counter++;
380		pdevinfo.res = res;
381		pdevinfo.num_res = 3;
382		pdevinfo.data = NULL;
383		pdevinfo.size_data = 0;
384		pdevinfo.dma_mask = DMA_BIT_MASK(64);
385		new_pdev = platform_device_register_full(&pdevinfo);
386		if (IS_ERR(new_pdev)) {
387			ret = PTR_ERR(new_pdev);
388			goto out;
389		}
390		new_pdev->dev.of_node = child;
391		of_dma_configure(&new_pdev->dev, child, true);
392		/*
393		 * It is assumed that calling of_msi_configure is safe on
394		 * platforms with or without MSI support.
395		 */
396		of_msi_configure(&new_pdev->dev, child);
397	}
398
399	kfree(res);
400
401	return ret;
402
403out:
404	of_node_put(child);
405	kfree(res);
406
407	return ret;
408}
409#endif
410
411static int __init hidma_mgmt_init(void)
412{
413#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
414	struct device_node *child;
415
416	for_each_matching_node(child, hidma_mgmt_match) {
417		/* device tree based firmware here */
418		hidma_mgmt_of_populate_channels(child);
419	}
420#endif
421	return platform_driver_register(&hidma_mgmt_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422
 
423}
424module_init(hidma_mgmt_init);
425MODULE_LICENSE("GPL v2");