Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2019, Linaro Ltd
  4 */
 
  5#include <linux/clk-provider.h>
  6#include <linux/interrupt.h>
  7#include <linux/io.h>
  8#include <linux/mailbox_client.h>
  9#include <linux/module.h>
 10#include <linux/of_platform.h>
 11#include <linux/platform_device.h>
 
 12#include <linux/thermal.h>
 13#include <linux/slab.h>
 14#include <linux/soc/qcom/qcom_aoss.h>
 15
 16#define QMP_DESC_MAGIC			0x0
 17#define QMP_DESC_VERSION		0x4
 18#define QMP_DESC_FEATURES		0x8
 19
 20/* AOP-side offsets */
 21#define QMP_DESC_UCORE_LINK_STATE	0xc
 22#define QMP_DESC_UCORE_LINK_STATE_ACK	0x10
 23#define QMP_DESC_UCORE_CH_STATE		0x14
 24#define QMP_DESC_UCORE_CH_STATE_ACK	0x18
 25#define QMP_DESC_UCORE_MBOX_SIZE	0x1c
 26#define QMP_DESC_UCORE_MBOX_OFFSET	0x20
 27
 28/* Linux-side offsets */
 29#define QMP_DESC_MCORE_LINK_STATE	0x24
 30#define QMP_DESC_MCORE_LINK_STATE_ACK	0x28
 31#define QMP_DESC_MCORE_CH_STATE		0x2c
 32#define QMP_DESC_MCORE_CH_STATE_ACK	0x30
 33#define QMP_DESC_MCORE_MBOX_SIZE	0x34
 34#define QMP_DESC_MCORE_MBOX_OFFSET	0x38
 35
 36#define QMP_STATE_UP			GENMASK(15, 0)
 37#define QMP_STATE_DOWN			GENMASK(31, 16)
 38
 39#define QMP_MAGIC			0x4d41494c /* mail */
 40#define QMP_VERSION			1
 41
 42/* 64 bytes is enough to store the requests and provides padding to 4 bytes */
 43#define QMP_MSG_LEN			64
 44
 45#define QMP_NUM_COOLING_RESOURCES	2
 46
 47static bool qmp_cdev_max_state = 1;
 48
 49struct qmp_cooling_device {
 50	struct thermal_cooling_device *cdev;
 51	struct qmp *qmp;
 52	char *name;
 53	bool state;
 54};
 55
 56/**
 57 * struct qmp - driver state for QMP implementation
 58 * @msgram: iomem referencing the message RAM used for communication
 59 * @dev: reference to QMP device
 60 * @mbox_client: mailbox client used to ring the doorbell on transmit
 61 * @mbox_chan: mailbox channel used to ring the doorbell on transmit
 62 * @offset: offset within @msgram where messages should be written
 63 * @size: maximum size of the messages to be transmitted
 64 * @event: wait_queue for synchronization with the IRQ
 65 * @tx_lock: provides synchronization between multiple callers of qmp_send()
 66 * @qdss_clk: QDSS clock hw struct
 67 * @cooling_devs: thermal cooling devices
 68 */
 69struct qmp {
 70	void __iomem *msgram;
 71	struct device *dev;
 72
 73	struct mbox_client mbox_client;
 74	struct mbox_chan *mbox_chan;
 75
 76	size_t offset;
 77	size_t size;
 78
 79	wait_queue_head_t event;
 80
 81	struct mutex tx_lock;
 82
 83	struct clk_hw qdss_clk;
 
 84	struct qmp_cooling_device *cooling_devs;
 85};
 86
 
 
 
 
 
 
 
 87static void qmp_kick(struct qmp *qmp)
 88{
 89	mbox_send_message(qmp->mbox_chan, NULL);
 90	mbox_client_txdone(qmp->mbox_chan, 0);
 91}
 92
 93static bool qmp_magic_valid(struct qmp *qmp)
 94{
 95	return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
 96}
 97
 98static bool qmp_link_acked(struct qmp *qmp)
 99{
100	return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
101}
102
103static bool qmp_mcore_channel_acked(struct qmp *qmp)
104{
105	return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
106}
107
108static bool qmp_ucore_channel_up(struct qmp *qmp)
109{
110	return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
111}
112
113static int qmp_open(struct qmp *qmp)
114{
115	int ret;
116	u32 val;
117
118	if (!qmp_magic_valid(qmp)) {
119		dev_err(qmp->dev, "QMP magic doesn't match\n");
120		return -EINVAL;
121	}
122
123	val = readl(qmp->msgram + QMP_DESC_VERSION);
124	if (val != QMP_VERSION) {
125		dev_err(qmp->dev, "unsupported QMP version %d\n", val);
126		return -EINVAL;
127	}
128
129	qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
130	qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
131	if (!qmp->size) {
132		dev_err(qmp->dev, "invalid mailbox size\n");
133		return -EINVAL;
134	}
135
136	/* Ack remote core's link state */
137	val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
138	writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
139
140	/* Set local core's link state to up */
141	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
142
143	qmp_kick(qmp);
144
145	ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
146	if (!ret) {
147		dev_err(qmp->dev, "ucore didn't ack link\n");
148		goto timeout_close_link;
149	}
150
151	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
152
153	qmp_kick(qmp);
154
155	ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
156	if (!ret) {
157		dev_err(qmp->dev, "ucore didn't open channel\n");
158		goto timeout_close_channel;
159	}
160
161	/* Ack remote core's channel state */
162	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
163
164	qmp_kick(qmp);
165
166	ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
167	if (!ret) {
168		dev_err(qmp->dev, "ucore didn't ack channel\n");
169		goto timeout_close_channel;
170	}
171
172	return 0;
173
174timeout_close_channel:
175	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
176
177timeout_close_link:
178	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
179	qmp_kick(qmp);
180
181	return -ETIMEDOUT;
182}
183
184static void qmp_close(struct qmp *qmp)
185{
186	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
187	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
188	qmp_kick(qmp);
189}
190
191static irqreturn_t qmp_intr(int irq, void *data)
192{
193	struct qmp *qmp = data;
194
195	wake_up_all(&qmp->event);
196
197	return IRQ_HANDLED;
198}
199
200static bool qmp_message_empty(struct qmp *qmp)
201{
202	return readl(qmp->msgram + qmp->offset) == 0;
203}
204
205/**
206 * qmp_send() - send a message to the AOSS
207 * @qmp: qmp context
208 * @fmt: format string for message to be sent
209 * @...: arguments for the format string
210 *
211 * Transmit message to AOSS and wait for the AOSS to acknowledge the message.
212 * data must not be longer than the mailbox size. Access is synchronized by
213 * this implementation.
214 *
215 * Return: 0 on success, negative errno on failure
216 */
217int qmp_send(struct qmp *qmp, const char *fmt, ...)
218{
219	char buf[QMP_MSG_LEN];
220	long time_left;
221	va_list args;
222	int len;
223	int ret;
224
225	if (WARN_ON(IS_ERR_OR_NULL(qmp) || !fmt))
226		return -EINVAL;
227
228	memset(buf, 0, sizeof(buf));
229	va_start(args, fmt);
230	len = vsnprintf(buf, sizeof(buf), fmt, args);
231	va_end(args);
232
233	if (WARN_ON(len >= sizeof(buf)))
234		return -EINVAL;
235
236	mutex_lock(&qmp->tx_lock);
237
238	/* The message RAM only implements 32-bit accesses */
239	__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
240			 buf, sizeof(buf) / sizeof(u32));
241	writel(sizeof(buf), qmp->msgram + qmp->offset);
242
243	/* Read back length to confirm data written in message RAM */
244	readl(qmp->msgram + qmp->offset);
245	qmp_kick(qmp);
246
247	time_left = wait_event_interruptible_timeout(qmp->event,
248						     qmp_message_empty(qmp), HZ);
249	if (!time_left) {
250		dev_err(qmp->dev, "ucore did not ack channel\n");
251		ret = -ETIMEDOUT;
252
253		/* Clear message from buffer */
254		writel(0, qmp->msgram + qmp->offset);
255	} else {
256		ret = 0;
257	}
258
259	mutex_unlock(&qmp->tx_lock);
260
261	return ret;
262}
263EXPORT_SYMBOL_GPL(qmp_send);
264
265static int qmp_qdss_clk_prepare(struct clk_hw *hw)
266{
267	static const char *buf = "{class: clock, res: qdss, val: 1}";
268	struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
269
270	return qmp_send(qmp, buf);
271}
272
273static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
274{
275	static const char *buf = "{class: clock, res: qdss, val: 0}";
276	struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
277
278	qmp_send(qmp, buf);
279}
280
281static const struct clk_ops qmp_qdss_clk_ops = {
282	.prepare = qmp_qdss_clk_prepare,
283	.unprepare = qmp_qdss_clk_unprepare,
284};
285
286static int qmp_qdss_clk_add(struct qmp *qmp)
287{
288	static const struct clk_init_data qdss_init = {
289		.ops = &qmp_qdss_clk_ops,
290		.name = "qdss",
291	};
292	int ret;
293
294	qmp->qdss_clk.init = &qdss_init;
295	ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
296	if (ret < 0) {
297		dev_err(qmp->dev, "failed to register qdss clock\n");
298		return ret;
299	}
300
301	ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
302				     &qmp->qdss_clk);
303	if (ret < 0) {
304		dev_err(qmp->dev, "unable to register of clk hw provider\n");
305		clk_hw_unregister(&qmp->qdss_clk);
306	}
307
308	return ret;
309}
310
311static void qmp_qdss_clk_remove(struct qmp *qmp)
312{
313	of_clk_del_provider(qmp->dev->of_node);
314	clk_hw_unregister(&qmp->qdss_clk);
315}
316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
318				  unsigned long *state)
319{
320	*state = qmp_cdev_max_state;
321	return 0;
322}
323
324static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev,
325				  unsigned long *state)
326{
327	struct qmp_cooling_device *qmp_cdev = cdev->devdata;
328
329	*state = qmp_cdev->state;
330	return 0;
331}
332
333static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
334				  unsigned long state)
335{
336	struct qmp_cooling_device *qmp_cdev = cdev->devdata;
 
337	bool cdev_state;
338	int ret;
339
340	/* Normalize state */
341	cdev_state = !!state;
342
343	if (qmp_cdev->state == state)
344		return 0;
345
346	ret = qmp_send(qmp_cdev->qmp, "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
347		       qmp_cdev->name, cdev_state ? "on" : "off");
 
 
 
 
 
348	if (!ret)
349		qmp_cdev->state = cdev_state;
350
351	return ret;
352}
353
354static const struct thermal_cooling_device_ops qmp_cooling_device_ops = {
355	.get_max_state = qmp_cdev_get_max_state,
356	.get_cur_state = qmp_cdev_get_cur_state,
357	.set_cur_state = qmp_cdev_set_cur_state,
358};
359
360static int qmp_cooling_device_add(struct qmp *qmp,
361				  struct qmp_cooling_device *qmp_cdev,
362				  struct device_node *node)
363{
364	char *cdev_name = (char *)node->name;
365
366	qmp_cdev->qmp = qmp;
367	qmp_cdev->state = !qmp_cdev_max_state;
368	qmp_cdev->name = cdev_name;
369	qmp_cdev->cdev = devm_thermal_of_cooling_device_register
370				(qmp->dev, node,
371				cdev_name,
372				qmp_cdev, &qmp_cooling_device_ops);
373
374	if (IS_ERR(qmp_cdev->cdev))
375		dev_err(qmp->dev, "unable to register %s cooling device\n",
376			cdev_name);
377
378	return PTR_ERR_OR_ZERO(qmp_cdev->cdev);
379}
380
381static int qmp_cooling_devices_register(struct qmp *qmp)
382{
383	struct device_node *np, *child;
384	int count = 0;
385	int ret;
386
387	np = qmp->dev->of_node;
388
389	qmp->cooling_devs = devm_kcalloc(qmp->dev, QMP_NUM_COOLING_RESOURCES,
390					 sizeof(*qmp->cooling_devs),
391					 GFP_KERNEL);
392
393	if (!qmp->cooling_devs)
394		return -ENOMEM;
395
396	for_each_available_child_of_node(np, child) {
397		if (!of_property_present(child, "#cooling-cells"))
398			continue;
399		ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
400					     child);
401		if (ret) {
402			of_node_put(child);
403			goto unroll;
404		}
405	}
406
407	if (!count)
408		devm_kfree(qmp->dev, qmp->cooling_devs);
409
410	return 0;
411
412unroll:
413	while (--count >= 0)
414		thermal_cooling_device_unregister
415			(qmp->cooling_devs[count].cdev);
416	devm_kfree(qmp->dev, qmp->cooling_devs);
417
418	return ret;
419}
420
421static void qmp_cooling_devices_remove(struct qmp *qmp)
422{
423	int i;
424
425	for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++)
426		thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
427}
428
429/**
430 * qmp_get() - get a qmp handle from a device
431 * @dev: client device pointer
432 *
433 * Return: handle to qmp device on success, ERR_PTR() on failure
434 */
435struct qmp *qmp_get(struct device *dev)
436{
437	struct platform_device *pdev;
438	struct device_node *np;
439	struct qmp *qmp;
440
441	if (!dev || !dev->of_node)
442		return ERR_PTR(-EINVAL);
443
444	np = of_parse_phandle(dev->of_node, "qcom,qmp", 0);
445	if (!np)
446		return ERR_PTR(-ENODEV);
447
448	pdev = of_find_device_by_node(np);
449	of_node_put(np);
450	if (!pdev)
451		return ERR_PTR(-EINVAL);
452
453	qmp = platform_get_drvdata(pdev);
454
455	if (!qmp) {
456		put_device(&pdev->dev);
457		return ERR_PTR(-EPROBE_DEFER);
458	}
459	return qmp;
460}
461EXPORT_SYMBOL_GPL(qmp_get);
462
463/**
464 * qmp_put() - release a qmp handle
465 * @qmp: qmp handle obtained from qmp_get()
466 */
467void qmp_put(struct qmp *qmp)
468{
469	/*
470	 * Match get_device() inside of_find_device_by_node() in
471	 * qmp_get()
472	 */
473	if (!IS_ERR_OR_NULL(qmp))
474		put_device(qmp->dev);
475}
476EXPORT_SYMBOL_GPL(qmp_put);
477
478static int qmp_probe(struct platform_device *pdev)
479{
 
480	struct qmp *qmp;
481	int irq;
482	int ret;
483
484	qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
485	if (!qmp)
486		return -ENOMEM;
487
488	qmp->dev = &pdev->dev;
489	init_waitqueue_head(&qmp->event);
490	mutex_init(&qmp->tx_lock);
491
492	qmp->msgram = devm_platform_ioremap_resource(pdev, 0);
 
493	if (IS_ERR(qmp->msgram))
494		return PTR_ERR(qmp->msgram);
495
496	qmp->mbox_client.dev = &pdev->dev;
497	qmp->mbox_client.knows_txdone = true;
498	qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
499	if (IS_ERR(qmp->mbox_chan)) {
500		dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
501		return PTR_ERR(qmp->mbox_chan);
502	}
503
504	irq = platform_get_irq(pdev, 0);
505	ret = devm_request_irq(&pdev->dev, irq, qmp_intr, 0,
506			       "aoss-qmp", qmp);
507	if (ret < 0) {
508		dev_err(&pdev->dev, "failed to request interrupt\n");
509		goto err_free_mbox;
510	}
511
512	ret = qmp_open(qmp);
513	if (ret < 0)
514		goto err_free_mbox;
515
516	ret = qmp_qdss_clk_add(qmp);
517	if (ret)
518		goto err_close_qmp;
519
 
 
 
 
520	ret = qmp_cooling_devices_register(qmp);
521	if (ret)
522		dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
523
524	platform_set_drvdata(pdev, qmp);
525
526	return 0;
527
 
 
528err_close_qmp:
529	qmp_close(qmp);
530err_free_mbox:
531	mbox_free_channel(qmp->mbox_chan);
532
533	return ret;
534}
535
536static void qmp_remove(struct platform_device *pdev)
537{
538	struct qmp *qmp = platform_get_drvdata(pdev);
539
540	qmp_qdss_clk_remove(qmp);
 
541	qmp_cooling_devices_remove(qmp);
542
543	qmp_close(qmp);
544	mbox_free_channel(qmp->mbox_chan);
 
 
545}
546
547static const struct of_device_id qmp_dt_match[] = {
548	{ .compatible = "qcom,sc7180-aoss-qmp", },
549	{ .compatible = "qcom,sc7280-aoss-qmp", },
550	{ .compatible = "qcom,sdm845-aoss-qmp", },
551	{ .compatible = "qcom,sm8150-aoss-qmp", },
552	{ .compatible = "qcom,sm8250-aoss-qmp", },
553	{ .compatible = "qcom,sm8350-aoss-qmp", },
554	{ .compatible = "qcom,aoss-qmp", },
555	{}
556};
557MODULE_DEVICE_TABLE(of, qmp_dt_match);
558
559static struct platform_driver qmp_driver = {
560	.driver = {
561		.name		= "qcom_aoss_qmp",
562		.of_match_table	= qmp_dt_match,
563		.suppress_bind_attrs = true,
564	},
565	.probe = qmp_probe,
566	.remove_new = qmp_remove,
567};
568module_platform_driver(qmp_driver);
569
570MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
571MODULE_LICENSE("GPL v2");
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2019, Linaro Ltd
  4 */
  5#include <dt-bindings/power/qcom-aoss-qmp.h>
  6#include <linux/clk-provider.h>
  7#include <linux/interrupt.h>
  8#include <linux/io.h>
  9#include <linux/mailbox_client.h>
 10#include <linux/module.h>
 
 11#include <linux/platform_device.h>
 12#include <linux/pm_domain.h>
 13#include <linux/thermal.h>
 14#include <linux/slab.h>
 
 15
 16#define QMP_DESC_MAGIC			0x0
 17#define QMP_DESC_VERSION		0x4
 18#define QMP_DESC_FEATURES		0x8
 19
 20/* AOP-side offsets */
 21#define QMP_DESC_UCORE_LINK_STATE	0xc
 22#define QMP_DESC_UCORE_LINK_STATE_ACK	0x10
 23#define QMP_DESC_UCORE_CH_STATE		0x14
 24#define QMP_DESC_UCORE_CH_STATE_ACK	0x18
 25#define QMP_DESC_UCORE_MBOX_SIZE	0x1c
 26#define QMP_DESC_UCORE_MBOX_OFFSET	0x20
 27
 28/* Linux-side offsets */
 29#define QMP_DESC_MCORE_LINK_STATE	0x24
 30#define QMP_DESC_MCORE_LINK_STATE_ACK	0x28
 31#define QMP_DESC_MCORE_CH_STATE		0x2c
 32#define QMP_DESC_MCORE_CH_STATE_ACK	0x30
 33#define QMP_DESC_MCORE_MBOX_SIZE	0x34
 34#define QMP_DESC_MCORE_MBOX_OFFSET	0x38
 35
 36#define QMP_STATE_UP			GENMASK(15, 0)
 37#define QMP_STATE_DOWN			GENMASK(31, 16)
 38
 39#define QMP_MAGIC			0x4d41494c /* mail */
 40#define QMP_VERSION			1
 41
 42/* 64 bytes is enough to store the requests and provides padding to 4 bytes */
 43#define QMP_MSG_LEN			64
 44
 45#define QMP_NUM_COOLING_RESOURCES	2
 46
 47static bool qmp_cdev_init_state = 1;
 48
 49struct qmp_cooling_device {
 50	struct thermal_cooling_device *cdev;
 51	struct qmp *qmp;
 52	char *name;
 53	bool state;
 54};
 55
 56/**
 57 * struct qmp - driver state for QMP implementation
 58 * @msgram: iomem referencing the message RAM used for communication
 59 * @dev: reference to QMP device
 60 * @mbox_client: mailbox client used to ring the doorbell on transmit
 61 * @mbox_chan: mailbox channel used to ring the doorbell on transmit
 62 * @offset: offset within @msgram where messages should be written
 63 * @size: maximum size of the messages to be transmitted
 64 * @event: wait_queue for synchronization with the IRQ
 65 * @tx_lock: provides synchronization between multiple callers of qmp_send()
 66 * @qdss_clk: QDSS clock hw struct
 67 * @pd_data: genpd data
 68 */
 69struct qmp {
 70	void __iomem *msgram;
 71	struct device *dev;
 72
 73	struct mbox_client mbox_client;
 74	struct mbox_chan *mbox_chan;
 75
 76	size_t offset;
 77	size_t size;
 78
 79	wait_queue_head_t event;
 80
 81	struct mutex tx_lock;
 82
 83	struct clk_hw qdss_clk;
 84	struct genpd_onecell_data pd_data;
 85	struct qmp_cooling_device *cooling_devs;
 86};
 87
 88struct qmp_pd {
 89	struct qmp *qmp;
 90	struct generic_pm_domain pd;
 91};
 92
 93#define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd)
 94
 95static void qmp_kick(struct qmp *qmp)
 96{
 97	mbox_send_message(qmp->mbox_chan, NULL);
 98	mbox_client_txdone(qmp->mbox_chan, 0);
 99}
100
101static bool qmp_magic_valid(struct qmp *qmp)
102{
103	return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
104}
105
106static bool qmp_link_acked(struct qmp *qmp)
107{
108	return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
109}
110
111static bool qmp_mcore_channel_acked(struct qmp *qmp)
112{
113	return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
114}
115
116static bool qmp_ucore_channel_up(struct qmp *qmp)
117{
118	return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
119}
120
121static int qmp_open(struct qmp *qmp)
122{
123	int ret;
124	u32 val;
125
126	if (!qmp_magic_valid(qmp)) {
127		dev_err(qmp->dev, "QMP magic doesn't match\n");
128		return -EINVAL;
129	}
130
131	val = readl(qmp->msgram + QMP_DESC_VERSION);
132	if (val != QMP_VERSION) {
133		dev_err(qmp->dev, "unsupported QMP version %d\n", val);
134		return -EINVAL;
135	}
136
137	qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
138	qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
139	if (!qmp->size) {
140		dev_err(qmp->dev, "invalid mailbox size\n");
141		return -EINVAL;
142	}
143
144	/* Ack remote core's link state */
145	val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
146	writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
147
148	/* Set local core's link state to up */
149	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
150
151	qmp_kick(qmp);
152
153	ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
154	if (!ret) {
155		dev_err(qmp->dev, "ucore didn't ack link\n");
156		goto timeout_close_link;
157	}
158
159	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
160
161	qmp_kick(qmp);
162
163	ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
164	if (!ret) {
165		dev_err(qmp->dev, "ucore didn't open channel\n");
166		goto timeout_close_channel;
167	}
168
169	/* Ack remote core's channel state */
170	writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
171
172	qmp_kick(qmp);
173
174	ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
175	if (!ret) {
176		dev_err(qmp->dev, "ucore didn't ack channel\n");
177		goto timeout_close_channel;
178	}
179
180	return 0;
181
182timeout_close_channel:
183	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
184
185timeout_close_link:
186	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
187	qmp_kick(qmp);
188
189	return -ETIMEDOUT;
190}
191
192static void qmp_close(struct qmp *qmp)
193{
194	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
195	writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
196	qmp_kick(qmp);
197}
198
199static irqreturn_t qmp_intr(int irq, void *data)
200{
201	struct qmp *qmp = data;
202
203	wake_up_interruptible_all(&qmp->event);
204
205	return IRQ_HANDLED;
206}
207
208static bool qmp_message_empty(struct qmp *qmp)
209{
210	return readl(qmp->msgram + qmp->offset) == 0;
211}
212
213/**
214 * qmp_send() - send a message to the AOSS
215 * @qmp: qmp context
216 * @data: message to be sent
217 * @len: length of the message
218 *
219 * Transmit @data to AOSS and wait for the AOSS to acknowledge the message.
220 * @len must be a multiple of 4 and not longer than the mailbox size. Access is
221 * synchronized by this implementation.
222 *
223 * Return: 0 on success, negative errno on failure
224 */
225static int qmp_send(struct qmp *qmp, const void *data, size_t len)
226{
 
227	long time_left;
 
 
228	int ret;
229
230	if (WARN_ON(len + sizeof(u32) > qmp->size))
231		return -EINVAL;
232
233	if (WARN_ON(len % sizeof(u32)))
 
 
 
 
 
234		return -EINVAL;
235
236	mutex_lock(&qmp->tx_lock);
237
238	/* The message RAM only implements 32-bit accesses */
239	__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
240			 data, len / sizeof(u32));
241	writel(len, qmp->msgram + qmp->offset);
 
 
 
242	qmp_kick(qmp);
243
244	time_left = wait_event_interruptible_timeout(qmp->event,
245						     qmp_message_empty(qmp), HZ);
246	if (!time_left) {
247		dev_err(qmp->dev, "ucore did not ack channel\n");
248		ret = -ETIMEDOUT;
249
250		/* Clear message from buffer */
251		writel(0, qmp->msgram + qmp->offset);
252	} else {
253		ret = 0;
254	}
255
256	mutex_unlock(&qmp->tx_lock);
257
258	return ret;
259}
 
260
261static int qmp_qdss_clk_prepare(struct clk_hw *hw)
262{
263	static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}";
264	struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
265
266	return qmp_send(qmp, buf, sizeof(buf));
267}
268
269static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
270{
271	static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}";
272	struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
273
274	qmp_send(qmp, buf, sizeof(buf));
275}
276
277static const struct clk_ops qmp_qdss_clk_ops = {
278	.prepare = qmp_qdss_clk_prepare,
279	.unprepare = qmp_qdss_clk_unprepare,
280};
281
282static int qmp_qdss_clk_add(struct qmp *qmp)
283{
284	static const struct clk_init_data qdss_init = {
285		.ops = &qmp_qdss_clk_ops,
286		.name = "qdss",
287	};
288	int ret;
289
290	qmp->qdss_clk.init = &qdss_init;
291	ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
292	if (ret < 0) {
293		dev_err(qmp->dev, "failed to register qdss clock\n");
294		return ret;
295	}
296
297	ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
298				     &qmp->qdss_clk);
299	if (ret < 0) {
300		dev_err(qmp->dev, "unable to register of clk hw provider\n");
301		clk_hw_unregister(&qmp->qdss_clk);
302	}
303
304	return ret;
305}
306
307static void qmp_qdss_clk_remove(struct qmp *qmp)
308{
309	of_clk_del_provider(qmp->dev->of_node);
310	clk_hw_unregister(&qmp->qdss_clk);
311}
312
313static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable)
314{
315	char buf[QMP_MSG_LEN] = {};
316
317	snprintf(buf, sizeof(buf),
318		 "{class: image, res: load_state, name: %s, val: %s}",
319		 res->pd.name, enable ? "on" : "off");
320	return qmp_send(res->qmp, buf, sizeof(buf));
321}
322
323static int qmp_pd_power_on(struct generic_pm_domain *domain)
324{
325	return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true);
326}
327
328static int qmp_pd_power_off(struct generic_pm_domain *domain)
329{
330	return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false);
331}
332
333static const char * const sdm845_resources[] = {
334	[AOSS_QMP_LS_CDSP] = "cdsp",
335	[AOSS_QMP_LS_LPASS] = "adsp",
336	[AOSS_QMP_LS_MODEM] = "modem",
337	[AOSS_QMP_LS_SLPI] = "slpi",
338	[AOSS_QMP_LS_SPSS] = "spss",
339	[AOSS_QMP_LS_VENUS] = "venus",
340};
341
342static int qmp_pd_add(struct qmp *qmp)
343{
344	struct genpd_onecell_data *data = &qmp->pd_data;
345	struct device *dev = qmp->dev;
346	struct qmp_pd *res;
347	size_t num = ARRAY_SIZE(sdm845_resources);
348	int ret;
349	int i;
350
351	res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL);
352	if (!res)
353		return -ENOMEM;
354
355	data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
356				     GFP_KERNEL);
357	if (!data->domains)
358		return -ENOMEM;
359
360	for (i = 0; i < num; i++) {
361		res[i].qmp = qmp;
362		res[i].pd.name = sdm845_resources[i];
363		res[i].pd.power_on = qmp_pd_power_on;
364		res[i].pd.power_off = qmp_pd_power_off;
365
366		ret = pm_genpd_init(&res[i].pd, NULL, true);
367		if (ret < 0) {
368			dev_err(dev, "failed to init genpd\n");
369			goto unroll_genpds;
370		}
371
372		data->domains[i] = &res[i].pd;
373	}
374
375	data->num_domains = i;
376
377	ret = of_genpd_add_provider_onecell(dev->of_node, data);
378	if (ret < 0)
379		goto unroll_genpds;
380
381	return 0;
382
383unroll_genpds:
384	for (i--; i >= 0; i--)
385		pm_genpd_remove(data->domains[i]);
386
387	return ret;
388}
389
390static void qmp_pd_remove(struct qmp *qmp)
391{
392	struct genpd_onecell_data *data = &qmp->pd_data;
393	struct device *dev = qmp->dev;
394	int i;
395
396	of_genpd_del_provider(dev->of_node);
397
398	for (i = 0; i < data->num_domains; i++)
399		pm_genpd_remove(data->domains[i]);
400}
401
402static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
403				  unsigned long *state)
404{
405	*state = qmp_cdev_init_state;
406	return 0;
407}
408
409static int qmp_cdev_get_cur_state(struct thermal_cooling_device *cdev,
410				  unsigned long *state)
411{
412	struct qmp_cooling_device *qmp_cdev = cdev->devdata;
413
414	*state = qmp_cdev->state;
415	return 0;
416}
417
418static int qmp_cdev_set_cur_state(struct thermal_cooling_device *cdev,
419				  unsigned long state)
420{
421	struct qmp_cooling_device *qmp_cdev = cdev->devdata;
422	char buf[QMP_MSG_LEN] = {};
423	bool cdev_state;
424	int ret;
425
426	/* Normalize state */
427	cdev_state = !!state;
428
429	if (qmp_cdev->state == state)
430		return 0;
431
432	snprintf(buf, sizeof(buf),
433		 "{class: volt_flr, event:zero_temp, res:%s, value:%s}",
434			qmp_cdev->name,
435			cdev_state ? "off" : "on");
436
437	ret = qmp_send(qmp_cdev->qmp, buf, sizeof(buf));
438
439	if (!ret)
440		qmp_cdev->state = cdev_state;
441
442	return ret;
443}
444
445static struct thermal_cooling_device_ops qmp_cooling_device_ops = {
446	.get_max_state = qmp_cdev_get_max_state,
447	.get_cur_state = qmp_cdev_get_cur_state,
448	.set_cur_state = qmp_cdev_set_cur_state,
449};
450
451static int qmp_cooling_device_add(struct qmp *qmp,
452				  struct qmp_cooling_device *qmp_cdev,
453				  struct device_node *node)
454{
455	char *cdev_name = (char *)node->name;
456
457	qmp_cdev->qmp = qmp;
458	qmp_cdev->state = qmp_cdev_init_state;
459	qmp_cdev->name = cdev_name;
460	qmp_cdev->cdev = devm_thermal_of_cooling_device_register
461				(qmp->dev, node,
462				cdev_name,
463				qmp_cdev, &qmp_cooling_device_ops);
464
465	if (IS_ERR(qmp_cdev->cdev))
466		dev_err(qmp->dev, "unable to register %s cooling device\n",
467			cdev_name);
468
469	return PTR_ERR_OR_ZERO(qmp_cdev->cdev);
470}
471
472static int qmp_cooling_devices_register(struct qmp *qmp)
473{
474	struct device_node *np, *child;
475	int count = QMP_NUM_COOLING_RESOURCES;
476	int ret;
477
478	np = qmp->dev->of_node;
479
480	qmp->cooling_devs = devm_kcalloc(qmp->dev, count,
481					 sizeof(*qmp->cooling_devs),
482					 GFP_KERNEL);
483
484	if (!qmp->cooling_devs)
485		return -ENOMEM;
486
487	for_each_available_child_of_node(np, child) {
488		if (!of_find_property(child, "#cooling-cells", NULL))
489			continue;
490		ret = qmp_cooling_device_add(qmp, &qmp->cooling_devs[count++],
491					     child);
492		if (ret)
 
493			goto unroll;
 
494	}
495
 
 
 
496	return 0;
497
498unroll:
499	while (--count >= 0)
500		thermal_cooling_device_unregister
501			(qmp->cooling_devs[count].cdev);
 
502
503	return ret;
504}
505
506static void qmp_cooling_devices_remove(struct qmp *qmp)
507{
508	int i;
509
510	for (i = 0; i < QMP_NUM_COOLING_RESOURCES; i++)
511		thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
512}
513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514static int qmp_probe(struct platform_device *pdev)
515{
516	struct resource *res;
517	struct qmp *qmp;
518	int irq;
519	int ret;
520
521	qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
522	if (!qmp)
523		return -ENOMEM;
524
525	qmp->dev = &pdev->dev;
526	init_waitqueue_head(&qmp->event);
527	mutex_init(&qmp->tx_lock);
528
529	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
530	qmp->msgram = devm_ioremap_resource(&pdev->dev, res);
531	if (IS_ERR(qmp->msgram))
532		return PTR_ERR(qmp->msgram);
533
534	qmp->mbox_client.dev = &pdev->dev;
535	qmp->mbox_client.knows_txdone = true;
536	qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
537	if (IS_ERR(qmp->mbox_chan)) {
538		dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
539		return PTR_ERR(qmp->mbox_chan);
540	}
541
542	irq = platform_get_irq(pdev, 0);
543	ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT,
544			       "aoss-qmp", qmp);
545	if (ret < 0) {
546		dev_err(&pdev->dev, "failed to request interrupt\n");
547		goto err_free_mbox;
548	}
549
550	ret = qmp_open(qmp);
551	if (ret < 0)
552		goto err_free_mbox;
553
554	ret = qmp_qdss_clk_add(qmp);
555	if (ret)
556		goto err_close_qmp;
557
558	ret = qmp_pd_add(qmp);
559	if (ret)
560		goto err_remove_qdss_clk;
561
562	ret = qmp_cooling_devices_register(qmp);
563	if (ret)
564		dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
565
566	platform_set_drvdata(pdev, qmp);
567
568	return 0;
569
570err_remove_qdss_clk:
571	qmp_qdss_clk_remove(qmp);
572err_close_qmp:
573	qmp_close(qmp);
574err_free_mbox:
575	mbox_free_channel(qmp->mbox_chan);
576
577	return ret;
578}
579
580static int qmp_remove(struct platform_device *pdev)
581{
582	struct qmp *qmp = platform_get_drvdata(pdev);
583
584	qmp_qdss_clk_remove(qmp);
585	qmp_pd_remove(qmp);
586	qmp_cooling_devices_remove(qmp);
587
588	qmp_close(qmp);
589	mbox_free_channel(qmp->mbox_chan);
590
591	return 0;
592}
593
594static const struct of_device_id qmp_dt_match[] = {
595	{ .compatible = "qcom,sc7180-aoss-qmp", },
 
596	{ .compatible = "qcom,sdm845-aoss-qmp", },
597	{ .compatible = "qcom,sm8150-aoss-qmp", },
 
 
 
598	{}
599};
600MODULE_DEVICE_TABLE(of, qmp_dt_match);
601
602static struct platform_driver qmp_driver = {
603	.driver = {
604		.name		= "qcom_aoss_qmp",
605		.of_match_table	= qmp_dt_match,
 
606	},
607	.probe = qmp_probe,
608	.remove	= qmp_remove,
609};
610module_platform_driver(qmp_driver);
611
612MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
613MODULE_LICENSE("GPL v2");