Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v4.6
 
  1/*
  2 * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
  3 *
  4 * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
  5 * Author : Chanwoo Choi <cw00.choi@samsung.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
 12 */
 13
 14#include <linux/clk.h>
 15#include <linux/io.h>
 16#include <linux/kernel.h>
 17#include <linux/module.h>
 18#include <linux/mutex.h>
 19#include <linux/of_address.h>
 
 20#include <linux/platform_device.h>
 
 21#include <linux/suspend.h>
 22#include <linux/devfreq-event.h>
 23
 24#include "exynos-ppmu.h"
 25
 
 
 
 
 
 26struct exynos_ppmu_data {
 27	void __iomem *base;
 28	struct clk *clk;
 29};
 30
 31struct exynos_ppmu {
 32	struct devfreq_event_dev **edev;
 33	struct devfreq_event_desc *desc;
 34	unsigned int num_events;
 35
 36	struct device *dev;
 37	struct mutex lock;
 38
 39	struct exynos_ppmu_data ppmu;
 
 40};
 41
 42#define PPMU_EVENT(name)			\
 43	{ "ppmu-event0-"#name, PPMU_PMNCNT0 },	\
 44	{ "ppmu-event1-"#name, PPMU_PMNCNT1 },	\
 45	{ "ppmu-event2-"#name, PPMU_PMNCNT2 },	\
 46	{ "ppmu-event3-"#name, PPMU_PMNCNT3 }
 47
 48struct __exynos_ppmu_events {
 49	char *name;
 50	int id;
 51} ppmu_events[] = {
 52	/* For Exynos3250, Exynos4 and Exynos5260 */
 53	PPMU_EVENT(g3d),
 54	PPMU_EVENT(fsys),
 55
 56	/* For Exynos4 SoCs and Exynos3250 */
 57	PPMU_EVENT(dmc0),
 58	PPMU_EVENT(dmc1),
 59	PPMU_EVENT(cpu),
 60	PPMU_EVENT(rightbus),
 61	PPMU_EVENT(leftbus),
 62	PPMU_EVENT(lcd0),
 63	PPMU_EVENT(camif),
 64
 65	/* Only for Exynos3250 and Exynos5260 */
 66	PPMU_EVENT(mfc),
 67
 68	/* Only for Exynos4 SoCs */
 69	PPMU_EVENT(mfc-left),
 70	PPMU_EVENT(mfc-right),
 71
 72	/* Only for Exynos5260 SoCs */
 73	PPMU_EVENT(drex0-s0),
 74	PPMU_EVENT(drex0-s1),
 75	PPMU_EVENT(drex1-s0),
 76	PPMU_EVENT(drex1-s1),
 77	PPMU_EVENT(eagle),
 78	PPMU_EVENT(kfc),
 79	PPMU_EVENT(isp),
 80	PPMU_EVENT(fimc),
 81	PPMU_EVENT(gscl),
 82	PPMU_EVENT(mscl),
 83	PPMU_EVENT(fimd0x),
 84	PPMU_EVENT(fimd1x),
 85
 86	/* Only for Exynos5433 SoCs */
 87	PPMU_EVENT(d0-cpu),
 88	PPMU_EVENT(d0-general),
 89	PPMU_EVENT(d0-rt),
 90	PPMU_EVENT(d1-cpu),
 91	PPMU_EVENT(d1-general),
 92	PPMU_EVENT(d1-rt),
 93
 94	{ /* sentinel */ },
 
 
 
 
 95};
 96
 97static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
 98{
 99	int i;
100
101	for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
102		if (!strcmp(edev->desc->name, ppmu_events[i].name))
103			return ppmu_events[i].id;
104
105	return -EINVAL;
106}
107
108/*
109 * The devfreq-event ops structure for PPMU v1.1
110 */
111static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
112{
113	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
 
114	u32 pmnc;
115
116	/* Disable all counters */
117	__raw_writel(PPMU_CCNT_MASK |
118		     PPMU_PMCNT0_MASK |
119		     PPMU_PMCNT1_MASK |
120		     PPMU_PMCNT2_MASK |
121		     PPMU_PMCNT3_MASK,
122		     info->ppmu.base + PPMU_CNTENC);
 
 
123
124	/* Disable PPMU */
125	pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
 
 
 
126	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
127	__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
 
 
128
129	return 0;
130}
131
132static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
133{
134	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
135	int id = exynos_ppmu_find_ppmu_id(edev);
 
136	u32 pmnc, cntens;
137
138	if (id < 0)
139		return id;
140
141	/* Enable specific counter */
142	cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
 
 
 
143	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
144	__raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
 
 
145
146	/* Set the event of Read/Write data count  */
147	__raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
148			info->ppmu.base + PPMU_BEVTxSEL(id));
 
 
149
150	/* Reset cycle counter/performance counter and enable PPMU */
151	pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
 
 
 
152	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
153			| PPMU_PMNC_COUNTER_RESET_MASK
154			| PPMU_PMNC_CC_RESET_MASK);
155	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
156	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
157	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
158	__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
 
 
159
160	return 0;
161}
162
163static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
164				struct devfreq_event_data *edata)
165{
166	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
167	int id = exynos_ppmu_find_ppmu_id(edev);
168	u32 pmnc, cntenc;
 
 
 
169
170	if (id < 0)
171		return -EINVAL;
172
173	/* Disable PPMU */
174	pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
 
 
 
175	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
176	__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
 
 
177
178	/* Read cycle count */
179	edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
 
 
 
180
181	/* Read performance count */
182	switch (id) {
183	case PPMU_PMNCNT0:
184	case PPMU_PMNCNT1:
185	case PPMU_PMNCNT2:
186		edata->load_count
187			= __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
 
 
188		break;
189	case PPMU_PMNCNT3:
190		edata->load_count =
191			((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
192			| __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
 
 
 
 
 
 
193		break;
194	default:
195		return -EINVAL;
196	}
197
198	/* Disable specific counter */
199	cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
 
 
 
200	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
201	__raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
 
 
202
203	dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
204					edata->load_count, edata->total_count);
205
206	return 0;
207}
208
209static const struct devfreq_event_ops exynos_ppmu_ops = {
210	.disable = exynos_ppmu_disable,
211	.set_event = exynos_ppmu_set_event,
212	.get_event = exynos_ppmu_get_event,
213};
214
215/*
216 * The devfreq-event ops structure for PPMU v2.0
217 */
218static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
219{
220	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
 
221	u32 pmnc, clear;
222
223	/* Disable all counters */
224	clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
225		| PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
 
 
 
226
227	__raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
228	__raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
229	__raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
230	__raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
231
232	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
233	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
234	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
235	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
236	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
237	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
238	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
239	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
240	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
241	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
242	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
243	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
244	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
245	__raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
247	/* Disable PPMU */
248	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
 
 
 
249	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
250	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
 
 
251
252	return 0;
253}
254
255static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
256{
257	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
 
258	int id = exynos_ppmu_find_ppmu_id(edev);
259	u32 pmnc, cntens;
260
261	/* Enable all counters */
262	cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
 
 
 
263	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
264	__raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
 
 
265
266	/* Set the event of Read/Write data count  */
267	switch (id) {
268	case PPMU_PMNCNT0:
269	case PPMU_PMNCNT1:
270	case PPMU_PMNCNT2:
271		__raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
272				info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
273		break;
274	case PPMU_PMNCNT3:
275		__raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
276				info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
277		break;
278	}
279
280	/* Reset cycle counter/performance counter and enable PPMU */
281	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
 
 
 
282	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
283			| PPMU_PMNC_COUNTER_RESET_MASK
284			| PPMU_PMNC_CC_RESET_MASK
285			| PPMU_PMNC_CC_DIVIDER_MASK
286			| PPMU_V2_PMNC_START_MODE_MASK);
287	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
288	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
289	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
290	pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
291	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
 
 
 
292
293	return 0;
294}
295
296static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
297				    struct devfreq_event_data *edata)
298{
299	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
300	int id = exynos_ppmu_find_ppmu_id(edev);
301	u32 pmnc, cntenc;
302	u32 pmcnt_high, pmcnt_low;
303	u64 load_count = 0;
 
 
304
305	/* Disable PPMU */
306	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
 
 
 
307	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
308	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
 
 
309
310	/* Read cycle count and performance count */
311	edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
 
 
 
312
313	switch (id) {
314	case PPMU_PMNCNT0:
315	case PPMU_PMNCNT1:
316	case PPMU_PMNCNT2:
317		load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
 
 
 
318		break;
319	case PPMU_PMNCNT3:
320		pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
321		pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
322		load_count = ((u64)((pmcnt_high & 0xff)) << 32)
323			   + (u64)pmcnt_low;
 
 
 
 
 
 
324		break;
325	}
326	edata->load_count = load_count;
327
328	/* Disable all counters */
329	cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
 
 
 
330	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
331	__raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
 
 
332
333	dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
334					edata->load_count, edata->total_count);
335	return 0;
336}
337
338static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
339	.disable = exynos_ppmu_v2_disable,
340	.set_event = exynos_ppmu_v2_set_event,
341	.get_event = exynos_ppmu_v2_get_event,
342};
343
344static const struct of_device_id exynos_ppmu_id_match[] = {
345	{
346		.compatible = "samsung,exynos-ppmu",
347		.data = (void *)&exynos_ppmu_ops,
348	}, {
349		.compatible = "samsung,exynos-ppmu-v2",
350		.data = (void *)&exynos_ppmu_v2_ops,
351	},
352	{ /* sentinel */ },
353};
354
355static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
356{
357	const struct of_device_id *match;
358
359	match = of_match_node(exynos_ppmu_id_match, np);
360	return (struct devfreq_event_ops *)match->data;
361}
362
363static int of_get_devfreq_events(struct device_node *np,
364				 struct exynos_ppmu *info)
365{
366	struct devfreq_event_desc *desc;
367	struct devfreq_event_ops *event_ops;
368	struct device *dev = info->dev;
369	struct device_node *events_np, *node;
370	int i, j, count;
 
 
371
372	events_np = of_get_child_by_name(np, "events");
373	if (!events_np) {
374		dev_err(dev,
375			"failed to get child node of devfreq-event devices\n");
376		return -EINVAL;
377	}
378	event_ops = exynos_bus_get_ops(np);
379
380	count = of_get_child_count(events_np);
381	desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
382	if (!desc)
383		return -ENOMEM;
384	info->num_events = count;
385
 
 
 
 
 
 
386	j = 0;
387	for_each_child_of_node(events_np, node) {
388		for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
389			if (!ppmu_events[i].name)
390				continue;
391
392			if (!of_node_cmp(node->name, ppmu_events[i].name))
393				break;
394		}
395
396		if (i == ARRAY_SIZE(ppmu_events)) {
397			dev_warn(dev,
398				"don't know how to configure events : %s\n",
399				node->name);
400			continue;
401		}
402
403		desc[j].ops = event_ops;
 
 
 
 
 
 
 
 
404		desc[j].driver_data = info;
405
406		of_property_read_string(node, "event-name", &desc[j].name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
407
408		j++;
409
410		of_node_put(node);
411	}
412	info->desc = desc;
413
414	of_node_put(events_np);
415
416	return 0;
417}
418
419static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
 
 
 
 
 
 
 
420{
421	struct device *dev = info->dev;
422	struct device_node *np = dev->of_node;
 
 
423	int ret = 0;
424
425	if (!np) {
426		dev_err(dev, "failed to find devicetree node\n");
427		return -EINVAL;
428	}
429
430	/* Maps the memory mapped IO to control PPMU register */
431	info->ppmu.base = of_iomap(np, 0);
432	if (IS_ERR_OR_NULL(info->ppmu.base)) {
433		dev_err(dev, "failed to map memory region\n");
434		return -ENOMEM;
 
 
 
 
 
 
 
435	}
436
437	info->ppmu.clk = devm_clk_get(dev, "ppmu");
438	if (IS_ERR(info->ppmu.clk)) {
439		info->ppmu.clk = NULL;
440		dev_warn(dev, "cannot get PPMU clock\n");
441	}
442
443	ret = of_get_devfreq_events(np, info);
444	if (ret < 0) {
445		dev_err(dev, "failed to parse exynos ppmu dt node\n");
446		goto err;
447	}
448
449	return 0;
450
451err:
452	iounmap(info->ppmu.base);
453
454	return ret;
455}
456
457static int exynos_ppmu_probe(struct platform_device *pdev)
458{
459	struct exynos_ppmu *info;
460	struct devfreq_event_dev **edev;
461	struct devfreq_event_desc *desc;
462	int i, ret = 0, size;
463
464	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
465	if (!info)
466		return -ENOMEM;
467
468	mutex_init(&info->lock);
469	info->dev = &pdev->dev;
470
471	/* Parse dt data to get resource */
472	ret = exynos_ppmu_parse_dt(info);
473	if (ret < 0) {
474		dev_err(&pdev->dev,
475			"failed to parse devicetree for resource\n");
476		return ret;
477	}
478	desc = info->desc;
479
480	size = sizeof(struct devfreq_event_dev *) * info->num_events;
481	info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
482	if (!info->edev) {
483		dev_err(&pdev->dev,
484			"failed to allocate memory devfreq-event devices\n");
485		return -ENOMEM;
486	}
487	edev = info->edev;
488	platform_set_drvdata(pdev, info);
489
490	for (i = 0; i < info->num_events; i++) {
491		edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
492		if (IS_ERR(edev[i])) {
493			ret = PTR_ERR(edev[i]);
494			dev_err(&pdev->dev,
495				"failed to add devfreq-event device\n");
496			goto err;
497		}
 
 
 
498	}
499
500	clk_prepare_enable(info->ppmu.clk);
 
 
 
 
501
502	return 0;
503err:
504	iounmap(info->ppmu.base);
505
506	return ret;
507}
508
509static int exynos_ppmu_remove(struct platform_device *pdev)
510{
511	struct exynos_ppmu *info = platform_get_drvdata(pdev);
512
513	clk_disable_unprepare(info->ppmu.clk);
514	iounmap(info->ppmu.base);
515
516	return 0;
517}
518
519static struct platform_driver exynos_ppmu_driver = {
520	.probe	= exynos_ppmu_probe,
521	.remove	= exynos_ppmu_remove,
522	.driver = {
523		.name	= "exynos-ppmu",
524		.of_match_table = exynos_ppmu_id_match,
525	},
526};
527module_platform_driver(exynos_ppmu_driver);
528
529MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
530MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
531MODULE_LICENSE("GPL");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
  4 *
  5 * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
  6 * Author : Chanwoo Choi <cw00.choi@samsung.com>
  7 *
 
 
 
 
  8 * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
  9 */
 10
 11#include <linux/clk.h>
 12#include <linux/io.h>
 13#include <linux/kernel.h>
 14#include <linux/module.h>
 
 15#include <linux/of_address.h>
 16#include <linux/of_device.h>
 17#include <linux/platform_device.h>
 18#include <linux/regmap.h>
 19#include <linux/suspend.h>
 20#include <linux/devfreq-event.h>
 21
 22#include "exynos-ppmu.h"
 23
 24enum exynos_ppmu_type {
 25	EXYNOS_TYPE_PPMU,
 26	EXYNOS_TYPE_PPMU_V2,
 27};
 28
 29struct exynos_ppmu_data {
 
 30	struct clk *clk;
 31};
 32
 33struct exynos_ppmu {
 34	struct devfreq_event_dev **edev;
 35	struct devfreq_event_desc *desc;
 36	unsigned int num_events;
 37
 38	struct device *dev;
 39	struct regmap *regmap;
 40
 41	struct exynos_ppmu_data ppmu;
 42	enum exynos_ppmu_type ppmu_type;
 43};
 44
 45#define PPMU_EVENT(name)			\
 46	{ "ppmu-event0-"#name, PPMU_PMNCNT0 },	\
 47	{ "ppmu-event1-"#name, PPMU_PMNCNT1 },	\
 48	{ "ppmu-event2-"#name, PPMU_PMNCNT2 },	\
 49	{ "ppmu-event3-"#name, PPMU_PMNCNT3 }
 50
 51static struct __exynos_ppmu_events {
 52	char *name;
 53	int id;
 54} ppmu_events[] = {
 55	/* For Exynos3250, Exynos4 and Exynos5260 */
 56	PPMU_EVENT(g3d),
 57	PPMU_EVENT(fsys),
 58
 59	/* For Exynos4 SoCs and Exynos3250 */
 60	PPMU_EVENT(dmc0),
 61	PPMU_EVENT(dmc1),
 62	PPMU_EVENT(cpu),
 63	PPMU_EVENT(rightbus),
 64	PPMU_EVENT(leftbus),
 65	PPMU_EVENT(lcd0),
 66	PPMU_EVENT(camif),
 67
 68	/* Only for Exynos3250 and Exynos5260 */
 69	PPMU_EVENT(mfc),
 70
 71	/* Only for Exynos4 SoCs */
 72	PPMU_EVENT(mfc-left),
 73	PPMU_EVENT(mfc-right),
 74
 75	/* Only for Exynos5260 SoCs */
 76	PPMU_EVENT(drex0-s0),
 77	PPMU_EVENT(drex0-s1),
 78	PPMU_EVENT(drex1-s0),
 79	PPMU_EVENT(drex1-s1),
 80	PPMU_EVENT(eagle),
 81	PPMU_EVENT(kfc),
 82	PPMU_EVENT(isp),
 83	PPMU_EVENT(fimc),
 84	PPMU_EVENT(gscl),
 85	PPMU_EVENT(mscl),
 86	PPMU_EVENT(fimd0x),
 87	PPMU_EVENT(fimd1x),
 88
 89	/* Only for Exynos5433 SoCs */
 90	PPMU_EVENT(d0-cpu),
 91	PPMU_EVENT(d0-general),
 92	PPMU_EVENT(d0-rt),
 93	PPMU_EVENT(d1-cpu),
 94	PPMU_EVENT(d1-general),
 95	PPMU_EVENT(d1-rt),
 96
 97	/* For Exynos5422 SoC */
 98	PPMU_EVENT(dmc0_0),
 99	PPMU_EVENT(dmc0_1),
100	PPMU_EVENT(dmc1_0),
101	PPMU_EVENT(dmc1_1),
102};
103
104static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
105{
106	int i;
107
108	for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
109		if (!strcmp(edev->desc->name, ppmu_events[i].name))
110			return ppmu_events[i].id;
111
112	return -EINVAL;
113}
114
115/*
116 * The devfreq-event ops structure for PPMU v1.1
117 */
118static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
119{
120	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
121	int ret;
122	u32 pmnc;
123
124	/* Disable all counters */
125	ret = regmap_write(info->regmap, PPMU_CNTENC,
126				PPMU_CCNT_MASK |
127				PPMU_PMCNT0_MASK |
128				PPMU_PMCNT1_MASK |
129				PPMU_PMCNT2_MASK |
130				PPMU_PMCNT3_MASK);
131	if (ret < 0)
132		return ret;
133
134	/* Disable PPMU */
135	ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
136	if (ret < 0)
137		return ret;
138
139	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
140	ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
141	if (ret < 0)
142		return ret;
143
144	return 0;
145}
146
147static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
148{
149	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
150	int id = exynos_ppmu_find_ppmu_id(edev);
151	int ret;
152	u32 pmnc, cntens;
153
154	if (id < 0)
155		return id;
156
157	/* Enable specific counter */
158	ret = regmap_read(info->regmap, PPMU_CNTENS, &cntens);
159	if (ret < 0)
160		return ret;
161
162	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
163	ret = regmap_write(info->regmap, PPMU_CNTENS, cntens);
164	if (ret < 0)
165		return ret;
166
167	/* Set the event of proper data type monitoring */
168	ret = regmap_write(info->regmap, PPMU_BEVTxSEL(id),
169			   edev->desc->event_type);
170	if (ret < 0)
171		return ret;
172
173	/* Reset cycle counter/performance counter and enable PPMU */
174	ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
175	if (ret < 0)
176		return ret;
177
178	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
179			| PPMU_PMNC_COUNTER_RESET_MASK
180			| PPMU_PMNC_CC_RESET_MASK);
181	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
182	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
183	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
184	ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
185	if (ret < 0)
186		return ret;
187
188	return 0;
189}
190
191static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
192				struct devfreq_event_data *edata)
193{
194	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
195	int id = exynos_ppmu_find_ppmu_id(edev);
196	unsigned int total_count, load_count;
197	unsigned int pmcnt3_high, pmcnt3_low;
198	unsigned int pmnc, cntenc;
199	int ret;
200
201	if (id < 0)
202		return -EINVAL;
203
204	/* Disable PPMU */
205	ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
206	if (ret < 0)
207		return ret;
208
209	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
210	ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
211	if (ret < 0)
212		return ret;
213
214	/* Read cycle count */
215	ret = regmap_read(info->regmap, PPMU_CCNT, &total_count);
216	if (ret < 0)
217		return ret;
218	edata->total_count = total_count;
219
220	/* Read performance count */
221	switch (id) {
222	case PPMU_PMNCNT0:
223	case PPMU_PMNCNT1:
224	case PPMU_PMNCNT2:
225		ret = regmap_read(info->regmap, PPMU_PMNCT(id), &load_count);
226		if (ret < 0)
227			return ret;
228		edata->load_count = load_count;
229		break;
230	case PPMU_PMNCNT3:
231		ret = regmap_read(info->regmap, PPMU_PMCNT3_HIGH, &pmcnt3_high);
232		if (ret < 0)
233			return ret;
234
235		ret = regmap_read(info->regmap, PPMU_PMCNT3_LOW, &pmcnt3_low);
236		if (ret < 0)
237			return ret;
238
239		edata->load_count = ((pmcnt3_high << 8) | pmcnt3_low);
240		break;
241	default:
242		return -EINVAL;
243	}
244
245	/* Disable specific counter */
246	ret = regmap_read(info->regmap, PPMU_CNTENC, &cntenc);
247	if (ret < 0)
248		return ret;
249
250	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
251	ret = regmap_write(info->regmap, PPMU_CNTENC, cntenc);
252	if (ret < 0)
253		return ret;
254
255	dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
256					edata->load_count, edata->total_count);
257
258	return 0;
259}
260
261static const struct devfreq_event_ops exynos_ppmu_ops = {
262	.disable = exynos_ppmu_disable,
263	.set_event = exynos_ppmu_set_event,
264	.get_event = exynos_ppmu_get_event,
265};
266
267/*
268 * The devfreq-event ops structure for PPMU v2.0
269 */
270static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
271{
272	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
273	int ret;
274	u32 pmnc, clear;
275
276	/* Disable all counters */
277	clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
278		| PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
279	ret = regmap_write(info->regmap, PPMU_V2_FLAG, clear);
280	if (ret < 0)
281		return ret;
282
283	ret = regmap_write(info->regmap, PPMU_V2_INTENC, clear);
284	if (ret < 0)
285		return ret;
286
287	ret = regmap_write(info->regmap, PPMU_V2_CNTENC, clear);
288	if (ret < 0)
289		return ret;
290
291	ret = regmap_write(info->regmap, PPMU_V2_CNT_RESET, clear);
292	if (ret < 0)
293		return ret;
294
295	ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG0, 0x0);
296	if (ret < 0)
297		return ret;
298
299	ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG1, 0x0);
300	if (ret < 0)
301		return ret;
302
303	ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG2, 0x0);
304	if (ret < 0)
305		return ret;
306
307	ret = regmap_write(info->regmap, PPMU_V2_CIG_RESULT, 0x0);
308	if (ret < 0)
309		return ret;
310
311	ret = regmap_write(info->regmap, PPMU_V2_CNT_AUTO, 0x0);
312	if (ret < 0)
313		return ret;
314
315	ret = regmap_write(info->regmap, PPMU_V2_CH_EV0_TYPE, 0x0);
316	if (ret < 0)
317		return ret;
318
319	ret = regmap_write(info->regmap, PPMU_V2_CH_EV1_TYPE, 0x0);
320	if (ret < 0)
321		return ret;
322
323	ret = regmap_write(info->regmap, PPMU_V2_CH_EV2_TYPE, 0x0);
324	if (ret < 0)
325		return ret;
326
327	ret = regmap_write(info->regmap, PPMU_V2_CH_EV3_TYPE, 0x0);
328	if (ret < 0)
329		return ret;
330
331	ret = regmap_write(info->regmap, PPMU_V2_SM_ID_V, 0x0);
332	if (ret < 0)
333		return ret;
334
335	ret = regmap_write(info->regmap, PPMU_V2_SM_ID_A, 0x0);
336	if (ret < 0)
337		return ret;
338
339	ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_V, 0x0);
340	if (ret < 0)
341		return ret;
342
343	ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_A, 0x0);
344	if (ret < 0)
345		return ret;
346
347	ret = regmap_write(info->regmap, PPMU_V2_INTERRUPT_RESET, 0x0);
348	if (ret < 0)
349		return ret;
350
351	/* Disable PPMU */
352	ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
353	if (ret < 0)
354		return ret;
355
356	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
357	ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
358	if (ret < 0)
359		return ret;
360
361	return 0;
362}
363
364static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
365{
366	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
367	unsigned int pmnc, cntens;
368	int id = exynos_ppmu_find_ppmu_id(edev);
369	int ret;
370
371	/* Enable all counters */
372	ret = regmap_read(info->regmap, PPMU_V2_CNTENS, &cntens);
373	if (ret < 0)
374		return ret;
375
376	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
377	ret = regmap_write(info->regmap, PPMU_V2_CNTENS, cntens);
378	if (ret < 0)
379		return ret;
380
381	/* Set the event of proper data type monitoring */
382	ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
383			   edev->desc->event_type);
384	if (ret < 0)
385		return ret;
 
 
 
 
 
 
 
 
386
387	/* Reset cycle counter/performance counter and enable PPMU */
388	ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
389	if (ret < 0)
390		return ret;
391
392	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
393			| PPMU_PMNC_COUNTER_RESET_MASK
394			| PPMU_PMNC_CC_RESET_MASK
395			| PPMU_PMNC_CC_DIVIDER_MASK
396			| PPMU_V2_PMNC_START_MODE_MASK);
397	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
398	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
399	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
400	pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
401
402	ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
403	if (ret < 0)
404		return ret;
405
406	return 0;
407}
408
409static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
410				    struct devfreq_event_data *edata)
411{
412	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
413	int id = exynos_ppmu_find_ppmu_id(edev);
414	int ret;
415	unsigned int pmnc, cntenc;
416	unsigned int pmcnt_high, pmcnt_low;
417	unsigned int total_count, count;
418	unsigned long load_count = 0;
419
420	/* Disable PPMU */
421	ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
422	if (ret < 0)
423		return ret;
424
425	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
426	ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
427	if (ret < 0)
428		return ret;
429
430	/* Read cycle count and performance count */
431	ret = regmap_read(info->regmap, PPMU_V2_CCNT, &total_count);
432	if (ret < 0)
433		return ret;
434	edata->total_count = total_count;
435
436	switch (id) {
437	case PPMU_PMNCNT0:
438	case PPMU_PMNCNT1:
439	case PPMU_PMNCNT2:
440		ret = regmap_read(info->regmap, PPMU_V2_PMNCT(id), &count);
441		if (ret < 0)
442			return ret;
443		load_count = count;
444		break;
445	case PPMU_PMNCNT3:
446		ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_HIGH,
447						&pmcnt_high);
448		if (ret < 0)
449			return ret;
450
451		ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_LOW, &pmcnt_low);
452		if (ret < 0)
453			return ret;
454
455		load_count = ((u64)((pmcnt_high & 0xff)) << 32)+ (u64)pmcnt_low;
456		break;
457	}
458	edata->load_count = load_count;
459
460	/* Disable all counters */
461	ret = regmap_read(info->regmap, PPMU_V2_CNTENC, &cntenc);
462	if (ret < 0)
463		return 0;
464
465	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
466	ret = regmap_write(info->regmap, PPMU_V2_CNTENC, cntenc);
467	if (ret < 0)
468		return ret;
469
470	dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
471					edata->load_count, edata->total_count);
472	return 0;
473}
474
475static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
476	.disable = exynos_ppmu_v2_disable,
477	.set_event = exynos_ppmu_v2_set_event,
478	.get_event = exynos_ppmu_v2_get_event,
479};
480
481static const struct of_device_id exynos_ppmu_id_match[] = {
482	{
483		.compatible = "samsung,exynos-ppmu",
484		.data = (void *)EXYNOS_TYPE_PPMU,
485	}, {
486		.compatible = "samsung,exynos-ppmu-v2",
487		.data = (void *)EXYNOS_TYPE_PPMU_V2,
488	},
489	{ /* sentinel */ },
490};
491MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match);
 
 
 
 
 
 
 
492
493static int of_get_devfreq_events(struct device_node *np,
494				 struct exynos_ppmu *info)
495{
496	struct devfreq_event_desc *desc;
 
497	struct device *dev = info->dev;
498	struct device_node *events_np, *node;
499	int i, j, count;
500	const struct of_device_id *of_id;
501	int ret;
502
503	events_np = of_get_child_by_name(np, "events");
504	if (!events_np) {
505		dev_err(dev,
506			"failed to get child node of devfreq-event devices\n");
507		return -EINVAL;
508	}
 
509
510	count = of_get_child_count(events_np);
511	desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL);
512	if (!desc)
513		return -ENOMEM;
514	info->num_events = count;
515
516	of_id = of_match_device(exynos_ppmu_id_match, dev);
517	if (of_id)
518		info->ppmu_type = (enum exynos_ppmu_type)of_id->data;
519	else
520		return -EINVAL;
521
522	j = 0;
523	for_each_child_of_node(events_np, node) {
524		for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
525			if (!ppmu_events[i].name)
526				continue;
527
528			if (of_node_name_eq(node, ppmu_events[i].name))
529				break;
530		}
531
532		if (i == ARRAY_SIZE(ppmu_events)) {
533			dev_warn(dev,
534				"don't know how to configure events : %pOFn\n",
535				node);
536			continue;
537		}
538
539		switch (info->ppmu_type) {
540		case EXYNOS_TYPE_PPMU:
541			desc[j].ops = &exynos_ppmu_ops;
542			break;
543		case EXYNOS_TYPE_PPMU_V2:
544			desc[j].ops = &exynos_ppmu_v2_ops;
545			break;
546		}
547
548		desc[j].driver_data = info;
549
550		of_property_read_string(node, "event-name", &desc[j].name);
551		ret = of_property_read_u32(node, "event-data-type",
552					   &desc[j].event_type);
553		if (ret) {
554			/* Set the event of proper data type counting.
555			 * Check if the data type has been defined in DT,
556			 * use default if not.
557			 */
558			if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
559				struct devfreq_event_dev edev;
560				int id;
561				/* Not all registers take the same value for
562				 * read+write data count.
563				 */
564				edev.desc = &desc[j];
565				id = exynos_ppmu_find_ppmu_id(&edev);
566
567				switch (id) {
568				case PPMU_PMNCNT0:
569				case PPMU_PMNCNT1:
570				case PPMU_PMNCNT2:
571					desc[j].event_type = PPMU_V2_RO_DATA_CNT
572						| PPMU_V2_WO_DATA_CNT;
573					break;
574				case PPMU_PMNCNT3:
575					desc[j].event_type =
576						PPMU_V2_EVT3_RW_DATA_CNT;
577					break;
578				}
579			} else {
580				desc[j].event_type = PPMU_RO_DATA_CNT |
581					PPMU_WO_DATA_CNT;
582			}
583		}
584
585		j++;
 
 
586	}
587	info->desc = desc;
588
589	of_node_put(events_np);
590
591	return 0;
592}
593
594static struct regmap_config exynos_ppmu_regmap_config = {
595	.reg_bits = 32,
596	.val_bits = 32,
597	.reg_stride = 4,
598};
599
600static int exynos_ppmu_parse_dt(struct platform_device *pdev,
601				struct exynos_ppmu *info)
602{
603	struct device *dev = info->dev;
604	struct device_node *np = dev->of_node;
605	struct resource *res;
606	void __iomem *base;
607	int ret = 0;
608
609	if (!np) {
610		dev_err(dev, "failed to find devicetree node\n");
611		return -EINVAL;
612	}
613
614	/* Maps the memory mapped IO to control PPMU register */
615	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
616	base = devm_ioremap_resource(dev, res);
617	if (IS_ERR(base))
618		return PTR_ERR(base);
619
620	exynos_ppmu_regmap_config.max_register = resource_size(res) - 4;
621	info->regmap = devm_regmap_init_mmio(dev, base,
622					&exynos_ppmu_regmap_config);
623	if (IS_ERR(info->regmap)) {
624		dev_err(dev, "failed to initialize regmap\n");
625		return PTR_ERR(info->regmap);
626	}
627
628	info->ppmu.clk = devm_clk_get(dev, "ppmu");
629	if (IS_ERR(info->ppmu.clk)) {
630		info->ppmu.clk = NULL;
631		dev_warn(dev, "cannot get PPMU clock\n");
632	}
633
634	ret = of_get_devfreq_events(np, info);
635	if (ret < 0) {
636		dev_err(dev, "failed to parse exynos ppmu dt node\n");
637		return ret;
638	}
639
640	return 0;
 
 
 
 
 
641}
642
643static int exynos_ppmu_probe(struct platform_device *pdev)
644{
645	struct exynos_ppmu *info;
646	struct devfreq_event_dev **edev;
647	struct devfreq_event_desc *desc;
648	int i, ret = 0, size;
649
650	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
651	if (!info)
652		return -ENOMEM;
653
 
654	info->dev = &pdev->dev;
655
656	/* Parse dt data to get resource */
657	ret = exynos_ppmu_parse_dt(pdev, info);
658	if (ret < 0) {
659		dev_err(&pdev->dev,
660			"failed to parse devicetree for resource\n");
661		return ret;
662	}
663	desc = info->desc;
664
665	size = sizeof(struct devfreq_event_dev *) * info->num_events;
666	info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
667	if (!info->edev)
 
 
668		return -ENOMEM;
669
670	edev = info->edev;
671	platform_set_drvdata(pdev, info);
672
673	for (i = 0; i < info->num_events; i++) {
674		edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
675		if (IS_ERR(edev[i])) {
676			ret = PTR_ERR(edev[i]);
677			dev_err(&pdev->dev,
678				"failed to add devfreq-event device\n");
679			return PTR_ERR(edev[i]);
680		}
681
682		pr_info("exynos-ppmu: new PPMU device registered %s (%s)\n",
683			dev_name(&pdev->dev), desc[i].name);
684	}
685
686	ret = clk_prepare_enable(info->ppmu.clk);
687	if (ret) {
688		dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
689		return ret;
690	}
691
692	return 0;
 
 
 
 
693}
694
695static int exynos_ppmu_remove(struct platform_device *pdev)
696{
697	struct exynos_ppmu *info = platform_get_drvdata(pdev);
698
699	clk_disable_unprepare(info->ppmu.clk);
 
700
701	return 0;
702}
703
704static struct platform_driver exynos_ppmu_driver = {
705	.probe	= exynos_ppmu_probe,
706	.remove	= exynos_ppmu_remove,
707	.driver = {
708		.name	= "exynos-ppmu",
709		.of_match_table = exynos_ppmu_id_match,
710	},
711};
712module_platform_driver(exynos_ppmu_driver);
713
714MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
715MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
716MODULE_LICENSE("GPL");