Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * exynos_ppmu.c - Exynos PPMU (Platform Performance Monitoring Unit) support
4 *
5 * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
6 * Author : Chanwoo Choi <cw00.choi@samsung.com>
7 *
8 * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
9 */
10
11#include <linux/clk.h>
12#include <linux/io.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/of_address.h>
16#include <linux/of_device.h>
17#include <linux/platform_device.h>
18#include <linux/regmap.h>
19#include <linux/suspend.h>
20#include <linux/devfreq-event.h>
21
22#include "exynos-ppmu.h"
23
24enum exynos_ppmu_type {
25 EXYNOS_TYPE_PPMU,
26 EXYNOS_TYPE_PPMU_V2,
27};
28
29struct exynos_ppmu_data {
30 struct clk *clk;
31};
32
33struct exynos_ppmu {
34 struct devfreq_event_dev **edev;
35 struct devfreq_event_desc *desc;
36 unsigned int num_events;
37
38 struct device *dev;
39 struct regmap *regmap;
40
41 struct exynos_ppmu_data ppmu;
42 enum exynos_ppmu_type ppmu_type;
43};
44
45#define PPMU_EVENT(name) \
46 { "ppmu-event0-"#name, PPMU_PMNCNT0 }, \
47 { "ppmu-event1-"#name, PPMU_PMNCNT1 }, \
48 { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
49 { "ppmu-event3-"#name, PPMU_PMNCNT3 }
50
51static struct __exynos_ppmu_events {
52 char *name;
53 int id;
54} ppmu_events[] = {
55 /* For Exynos3250, Exynos4 and Exynos5260 */
56 PPMU_EVENT(g3d),
57 PPMU_EVENT(fsys),
58
59 /* For Exynos4 SoCs and Exynos3250 */
60 PPMU_EVENT(dmc0),
61 PPMU_EVENT(dmc1),
62 PPMU_EVENT(cpu),
63 PPMU_EVENT(rightbus),
64 PPMU_EVENT(leftbus),
65 PPMU_EVENT(lcd0),
66 PPMU_EVENT(camif),
67
68 /* Only for Exynos3250 and Exynos5260 */
69 PPMU_EVENT(mfc),
70
71 /* Only for Exynos4 SoCs */
72 PPMU_EVENT(mfc-left),
73 PPMU_EVENT(mfc-right),
74
75 /* Only for Exynos5260 SoCs */
76 PPMU_EVENT(drex0-s0),
77 PPMU_EVENT(drex0-s1),
78 PPMU_EVENT(drex1-s0),
79 PPMU_EVENT(drex1-s1),
80 PPMU_EVENT(eagle),
81 PPMU_EVENT(kfc),
82 PPMU_EVENT(isp),
83 PPMU_EVENT(fimc),
84 PPMU_EVENT(gscl),
85 PPMU_EVENT(mscl),
86 PPMU_EVENT(fimd0x),
87 PPMU_EVENT(fimd1x),
88
89 /* Only for Exynos5433 SoCs */
90 PPMU_EVENT(d0-cpu),
91 PPMU_EVENT(d0-general),
92 PPMU_EVENT(d0-rt),
93 PPMU_EVENT(d1-cpu),
94 PPMU_EVENT(d1-general),
95 PPMU_EVENT(d1-rt),
96
97 /* For Exynos5422 SoC, deprecated (backwards compatible) */
98 PPMU_EVENT(dmc0_0),
99 PPMU_EVENT(dmc0_1),
100 PPMU_EVENT(dmc1_0),
101 PPMU_EVENT(dmc1_1),
102 /* For Exynos5422 SoC */
103 PPMU_EVENT(dmc0-0),
104 PPMU_EVENT(dmc0-1),
105 PPMU_EVENT(dmc1-0),
106 PPMU_EVENT(dmc1-1),
107};
108
109static int __exynos_ppmu_find_ppmu_id(const char *edev_name)
110{
111 int i;
112
113 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
114 if (!strcmp(edev_name, ppmu_events[i].name))
115 return ppmu_events[i].id;
116
117 return -EINVAL;
118}
119
120static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
121{
122 return __exynos_ppmu_find_ppmu_id(edev->desc->name);
123}
124
125/*
126 * The devfreq-event ops structure for PPMU v1.1
127 */
128static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
129{
130 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
131 int ret;
132 u32 pmnc;
133
134 /* Disable all counters */
135 ret = regmap_write(info->regmap, PPMU_CNTENC,
136 PPMU_CCNT_MASK |
137 PPMU_PMCNT0_MASK |
138 PPMU_PMCNT1_MASK |
139 PPMU_PMCNT2_MASK |
140 PPMU_PMCNT3_MASK);
141 if (ret < 0)
142 return ret;
143
144 /* Disable PPMU */
145 ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
146 if (ret < 0)
147 return ret;
148
149 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
150 ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
151 if (ret < 0)
152 return ret;
153
154 return 0;
155}
156
157static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
158{
159 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
160 int id = exynos_ppmu_find_ppmu_id(edev);
161 int ret;
162 u32 pmnc, cntens;
163
164 if (id < 0)
165 return id;
166
167 /* Enable specific counter */
168 ret = regmap_read(info->regmap, PPMU_CNTENS, &cntens);
169 if (ret < 0)
170 return ret;
171
172 cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
173 ret = regmap_write(info->regmap, PPMU_CNTENS, cntens);
174 if (ret < 0)
175 return ret;
176
177 /* Set the event of proper data type monitoring */
178 ret = regmap_write(info->regmap, PPMU_BEVTxSEL(id),
179 edev->desc->event_type);
180 if (ret < 0)
181 return ret;
182
183 /* Reset cycle counter/performance counter and enable PPMU */
184 ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
185 if (ret < 0)
186 return ret;
187
188 pmnc &= ~(PPMU_PMNC_ENABLE_MASK
189 | PPMU_PMNC_COUNTER_RESET_MASK
190 | PPMU_PMNC_CC_RESET_MASK);
191 pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
192 pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
193 pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
194 ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
195 if (ret < 0)
196 return ret;
197
198 return 0;
199}
200
201static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
202 struct devfreq_event_data *edata)
203{
204 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
205 int id = exynos_ppmu_find_ppmu_id(edev);
206 unsigned int total_count, load_count;
207 unsigned int pmcnt3_high, pmcnt3_low;
208 unsigned int pmnc, cntenc;
209 int ret;
210
211 if (id < 0)
212 return -EINVAL;
213
214 /* Disable PPMU */
215 ret = regmap_read(info->regmap, PPMU_PMNC, &pmnc);
216 if (ret < 0)
217 return ret;
218
219 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
220 ret = regmap_write(info->regmap, PPMU_PMNC, pmnc);
221 if (ret < 0)
222 return ret;
223
224 /* Read cycle count */
225 ret = regmap_read(info->regmap, PPMU_CCNT, &total_count);
226 if (ret < 0)
227 return ret;
228 edata->total_count = total_count;
229
230 /* Read performance count */
231 switch (id) {
232 case PPMU_PMNCNT0:
233 case PPMU_PMNCNT1:
234 case PPMU_PMNCNT2:
235 ret = regmap_read(info->regmap, PPMU_PMNCT(id), &load_count);
236 if (ret < 0)
237 return ret;
238 edata->load_count = load_count;
239 break;
240 case PPMU_PMNCNT3:
241 ret = regmap_read(info->regmap, PPMU_PMCNT3_HIGH, &pmcnt3_high);
242 if (ret < 0)
243 return ret;
244
245 ret = regmap_read(info->regmap, PPMU_PMCNT3_LOW, &pmcnt3_low);
246 if (ret < 0)
247 return ret;
248
249 edata->load_count = ((pmcnt3_high << 8) | pmcnt3_low);
250 break;
251 default:
252 return -EINVAL;
253 }
254
255 /* Disable specific counter */
256 ret = regmap_read(info->regmap, PPMU_CNTENC, &cntenc);
257 if (ret < 0)
258 return ret;
259
260 cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
261 ret = regmap_write(info->regmap, PPMU_CNTENC, cntenc);
262 if (ret < 0)
263 return ret;
264
265 dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
266 edata->load_count, edata->total_count);
267
268 return 0;
269}
270
271static const struct devfreq_event_ops exynos_ppmu_ops = {
272 .disable = exynos_ppmu_disable,
273 .set_event = exynos_ppmu_set_event,
274 .get_event = exynos_ppmu_get_event,
275};
276
277/*
278 * The devfreq-event ops structure for PPMU v2.0
279 */
280static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
281{
282 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
283 int ret;
284 u32 pmnc, clear;
285
286 /* Disable all counters */
287 clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
288 | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
289 ret = regmap_write(info->regmap, PPMU_V2_FLAG, clear);
290 if (ret < 0)
291 return ret;
292
293 ret = regmap_write(info->regmap, PPMU_V2_INTENC, clear);
294 if (ret < 0)
295 return ret;
296
297 ret = regmap_write(info->regmap, PPMU_V2_CNTENC, clear);
298 if (ret < 0)
299 return ret;
300
301 ret = regmap_write(info->regmap, PPMU_V2_CNT_RESET, clear);
302 if (ret < 0)
303 return ret;
304
305 ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG0, 0x0);
306 if (ret < 0)
307 return ret;
308
309 ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG1, 0x0);
310 if (ret < 0)
311 return ret;
312
313 ret = regmap_write(info->regmap, PPMU_V2_CIG_CFG2, 0x0);
314 if (ret < 0)
315 return ret;
316
317 ret = regmap_write(info->regmap, PPMU_V2_CIG_RESULT, 0x0);
318 if (ret < 0)
319 return ret;
320
321 ret = regmap_write(info->regmap, PPMU_V2_CNT_AUTO, 0x0);
322 if (ret < 0)
323 return ret;
324
325 ret = regmap_write(info->regmap, PPMU_V2_CH_EV0_TYPE, 0x0);
326 if (ret < 0)
327 return ret;
328
329 ret = regmap_write(info->regmap, PPMU_V2_CH_EV1_TYPE, 0x0);
330 if (ret < 0)
331 return ret;
332
333 ret = regmap_write(info->regmap, PPMU_V2_CH_EV2_TYPE, 0x0);
334 if (ret < 0)
335 return ret;
336
337 ret = regmap_write(info->regmap, PPMU_V2_CH_EV3_TYPE, 0x0);
338 if (ret < 0)
339 return ret;
340
341 ret = regmap_write(info->regmap, PPMU_V2_SM_ID_V, 0x0);
342 if (ret < 0)
343 return ret;
344
345 ret = regmap_write(info->regmap, PPMU_V2_SM_ID_A, 0x0);
346 if (ret < 0)
347 return ret;
348
349 ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_V, 0x0);
350 if (ret < 0)
351 return ret;
352
353 ret = regmap_write(info->regmap, PPMU_V2_SM_OTHERS_A, 0x0);
354 if (ret < 0)
355 return ret;
356
357 ret = regmap_write(info->regmap, PPMU_V2_INTERRUPT_RESET, 0x0);
358 if (ret < 0)
359 return ret;
360
361 /* Disable PPMU */
362 ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
363 if (ret < 0)
364 return ret;
365
366 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
367 ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
368 if (ret < 0)
369 return ret;
370
371 return 0;
372}
373
374static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
375{
376 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
377 unsigned int pmnc, cntens;
378 int id = exynos_ppmu_find_ppmu_id(edev);
379 int ret;
380
381 /* Enable all counters */
382 ret = regmap_read(info->regmap, PPMU_V2_CNTENS, &cntens);
383 if (ret < 0)
384 return ret;
385
386 cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
387 ret = regmap_write(info->regmap, PPMU_V2_CNTENS, cntens);
388 if (ret < 0)
389 return ret;
390
391 /* Set the event of proper data type monitoring */
392 ret = regmap_write(info->regmap, PPMU_V2_CH_EVx_TYPE(id),
393 edev->desc->event_type);
394 if (ret < 0)
395 return ret;
396
397 /* Reset cycle counter/performance counter and enable PPMU */
398 ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
399 if (ret < 0)
400 return ret;
401
402 pmnc &= ~(PPMU_PMNC_ENABLE_MASK
403 | PPMU_PMNC_COUNTER_RESET_MASK
404 | PPMU_PMNC_CC_RESET_MASK
405 | PPMU_PMNC_CC_DIVIDER_MASK
406 | PPMU_V2_PMNC_START_MODE_MASK);
407 pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
408 pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
409 pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
410 pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
411
412 ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
413 if (ret < 0)
414 return ret;
415
416 return 0;
417}
418
419static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
420 struct devfreq_event_data *edata)
421{
422 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
423 int id = exynos_ppmu_find_ppmu_id(edev);
424 int ret;
425 unsigned int pmnc, cntenc;
426 unsigned int pmcnt_high, pmcnt_low;
427 unsigned int total_count, count;
428 unsigned long load_count = 0;
429
430 /* Disable PPMU */
431 ret = regmap_read(info->regmap, PPMU_V2_PMNC, &pmnc);
432 if (ret < 0)
433 return ret;
434
435 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
436 ret = regmap_write(info->regmap, PPMU_V2_PMNC, pmnc);
437 if (ret < 0)
438 return ret;
439
440 /* Read cycle count and performance count */
441 ret = regmap_read(info->regmap, PPMU_V2_CCNT, &total_count);
442 if (ret < 0)
443 return ret;
444 edata->total_count = total_count;
445
446 switch (id) {
447 case PPMU_PMNCNT0:
448 case PPMU_PMNCNT1:
449 case PPMU_PMNCNT2:
450 ret = regmap_read(info->regmap, PPMU_V2_PMNCT(id), &count);
451 if (ret < 0)
452 return ret;
453 load_count = count;
454 break;
455 case PPMU_PMNCNT3:
456 ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_HIGH,
457 &pmcnt_high);
458 if (ret < 0)
459 return ret;
460
461 ret = regmap_read(info->regmap, PPMU_V2_PMCNT3_LOW, &pmcnt_low);
462 if (ret < 0)
463 return ret;
464
465 load_count = ((u64)((pmcnt_high & 0xff)) << 32)+ (u64)pmcnt_low;
466 break;
467 }
468 edata->load_count = load_count;
469
470 /* Disable all counters */
471 ret = regmap_read(info->regmap, PPMU_V2_CNTENC, &cntenc);
472 if (ret < 0)
473 return 0;
474
475 cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
476 ret = regmap_write(info->regmap, PPMU_V2_CNTENC, cntenc);
477 if (ret < 0)
478 return ret;
479
480 dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
481 edata->load_count, edata->total_count);
482 return 0;
483}
484
485static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
486 .disable = exynos_ppmu_v2_disable,
487 .set_event = exynos_ppmu_v2_set_event,
488 .get_event = exynos_ppmu_v2_get_event,
489};
490
491static const struct of_device_id exynos_ppmu_id_match[] = {
492 {
493 .compatible = "samsung,exynos-ppmu",
494 .data = (void *)EXYNOS_TYPE_PPMU,
495 }, {
496 .compatible = "samsung,exynos-ppmu-v2",
497 .data = (void *)EXYNOS_TYPE_PPMU_V2,
498 },
499 { /* sentinel */ },
500};
501MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match);
502
503static int of_get_devfreq_events(struct device_node *np,
504 struct exynos_ppmu *info)
505{
506 struct devfreq_event_desc *desc;
507 struct device *dev = info->dev;
508 struct device_node *events_np, *node;
509 int i, j, count;
510 const struct of_device_id *of_id;
511 int ret;
512
513 events_np = of_get_child_by_name(np, "events");
514 if (!events_np) {
515 dev_err(dev,
516 "failed to get child node of devfreq-event devices\n");
517 return -EINVAL;
518 }
519
520 count = of_get_child_count(events_np);
521 desc = devm_kcalloc(dev, count, sizeof(*desc), GFP_KERNEL);
522 if (!desc) {
523 of_node_put(events_np);
524 return -ENOMEM;
525 }
526 info->num_events = count;
527
528 of_id = of_match_device(exynos_ppmu_id_match, dev);
529 if (of_id)
530 info->ppmu_type = (enum exynos_ppmu_type)of_id->data;
531 else {
532 of_node_put(events_np);
533 return -EINVAL;
534 }
535
536 j = 0;
537 for_each_child_of_node(events_np, node) {
538 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
539 if (!ppmu_events[i].name)
540 continue;
541
542 if (of_node_name_eq(node, ppmu_events[i].name))
543 break;
544 }
545
546 if (i == ARRAY_SIZE(ppmu_events)) {
547 dev_warn(dev,
548 "don't know how to configure events : %pOFn\n",
549 node);
550 continue;
551 }
552
553 switch (info->ppmu_type) {
554 case EXYNOS_TYPE_PPMU:
555 desc[j].ops = &exynos_ppmu_ops;
556 break;
557 case EXYNOS_TYPE_PPMU_V2:
558 desc[j].ops = &exynos_ppmu_v2_ops;
559 break;
560 }
561
562 desc[j].driver_data = info;
563
564 of_property_read_string(node, "event-name", &desc[j].name);
565 ret = of_property_read_u32(node, "event-data-type",
566 &desc[j].event_type);
567 if (ret) {
568 /* Set the event of proper data type counting.
569 * Check if the data type has been defined in DT,
570 * use default if not.
571 */
572 if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
573 /* Not all registers take the same value for
574 * read+write data count.
575 */
576 switch (ppmu_events[i].id) {
577 case PPMU_PMNCNT0:
578 case PPMU_PMNCNT1:
579 case PPMU_PMNCNT2:
580 desc[j].event_type = PPMU_V2_RO_DATA_CNT
581 | PPMU_V2_WO_DATA_CNT;
582 break;
583 case PPMU_PMNCNT3:
584 desc[j].event_type =
585 PPMU_V2_EVT3_RW_DATA_CNT;
586 break;
587 }
588 } else {
589 desc[j].event_type = PPMU_RO_DATA_CNT |
590 PPMU_WO_DATA_CNT;
591 }
592 }
593
594 j++;
595 }
596 info->desc = desc;
597
598 of_node_put(events_np);
599
600 return 0;
601}
602
603static struct regmap_config exynos_ppmu_regmap_config = {
604 .reg_bits = 32,
605 .val_bits = 32,
606 .reg_stride = 4,
607};
608
609static int exynos_ppmu_parse_dt(struct platform_device *pdev,
610 struct exynos_ppmu *info)
611{
612 struct device *dev = info->dev;
613 struct device_node *np = dev->of_node;
614 struct resource *res;
615 void __iomem *base;
616 int ret = 0;
617
618 if (!np) {
619 dev_err(dev, "failed to find devicetree node\n");
620 return -EINVAL;
621 }
622
623 /* Maps the memory mapped IO to control PPMU register */
624 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
625 base = devm_ioremap_resource(dev, res);
626 if (IS_ERR(base))
627 return PTR_ERR(base);
628
629 exynos_ppmu_regmap_config.max_register = resource_size(res) - 4;
630 info->regmap = devm_regmap_init_mmio(dev, base,
631 &exynos_ppmu_regmap_config);
632 if (IS_ERR(info->regmap)) {
633 dev_err(dev, "failed to initialize regmap\n");
634 return PTR_ERR(info->regmap);
635 }
636
637 info->ppmu.clk = devm_clk_get(dev, "ppmu");
638 if (IS_ERR(info->ppmu.clk)) {
639 info->ppmu.clk = NULL;
640 dev_warn(dev, "cannot get PPMU clock\n");
641 }
642
643 ret = of_get_devfreq_events(np, info);
644 if (ret < 0) {
645 dev_err(dev, "failed to parse exynos ppmu dt node\n");
646 return ret;
647 }
648
649 return 0;
650}
651
652static int exynos_ppmu_probe(struct platform_device *pdev)
653{
654 struct exynos_ppmu *info;
655 struct devfreq_event_dev **edev;
656 struct devfreq_event_desc *desc;
657 int i, ret = 0, size;
658
659 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
660 if (!info)
661 return -ENOMEM;
662
663 info->dev = &pdev->dev;
664
665 /* Parse dt data to get resource */
666 ret = exynos_ppmu_parse_dt(pdev, info);
667 if (ret < 0) {
668 dev_err(&pdev->dev,
669 "failed to parse devicetree for resource\n");
670 return ret;
671 }
672 desc = info->desc;
673
674 size = sizeof(struct devfreq_event_dev *) * info->num_events;
675 info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
676 if (!info->edev)
677 return -ENOMEM;
678
679 edev = info->edev;
680 platform_set_drvdata(pdev, info);
681
682 for (i = 0; i < info->num_events; i++) {
683 edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
684 if (IS_ERR(edev[i])) {
685 dev_err(&pdev->dev,
686 "failed to add devfreq-event device\n");
687 return PTR_ERR(edev[i]);
688 }
689
690 pr_info("exynos-ppmu: new PPMU device registered %s (%s)\n",
691 dev_name(&pdev->dev), desc[i].name);
692 }
693
694 ret = clk_prepare_enable(info->ppmu.clk);
695 if (ret) {
696 dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
697 return ret;
698 }
699
700 return 0;
701}
702
703static int exynos_ppmu_remove(struct platform_device *pdev)
704{
705 struct exynos_ppmu *info = platform_get_drvdata(pdev);
706
707 clk_disable_unprepare(info->ppmu.clk);
708
709 return 0;
710}
711
712static struct platform_driver exynos_ppmu_driver = {
713 .probe = exynos_ppmu_probe,
714 .remove = exynos_ppmu_remove,
715 .driver = {
716 .name = "exynos-ppmu",
717 .of_match_table = exynos_ppmu_id_match,
718 },
719};
720module_platform_driver(exynos_ppmu_driver);
721
722MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
723MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
724MODULE_LICENSE("GPL");
1/*
2 * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
3 *
4 * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
5 * Author : Chanwoo Choi <cw00.choi@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
12 */
13
14#include <linux/clk.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of_address.h>
19#include <linux/platform_device.h>
20#include <linux/suspend.h>
21#include <linux/devfreq-event.h>
22
23#include "exynos-ppmu.h"
24
25struct exynos_ppmu_data {
26 void __iomem *base;
27 struct clk *clk;
28};
29
30struct exynos_ppmu {
31 struct devfreq_event_dev **edev;
32 struct devfreq_event_desc *desc;
33 unsigned int num_events;
34
35 struct device *dev;
36
37 struct exynos_ppmu_data ppmu;
38};
39
40#define PPMU_EVENT(name) \
41 { "ppmu-event0-"#name, PPMU_PMNCNT0 }, \
42 { "ppmu-event1-"#name, PPMU_PMNCNT1 }, \
43 { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
44 { "ppmu-event3-"#name, PPMU_PMNCNT3 }
45
46struct __exynos_ppmu_events {
47 char *name;
48 int id;
49} ppmu_events[] = {
50 /* For Exynos3250, Exynos4 and Exynos5260 */
51 PPMU_EVENT(g3d),
52 PPMU_EVENT(fsys),
53
54 /* For Exynos4 SoCs and Exynos3250 */
55 PPMU_EVENT(dmc0),
56 PPMU_EVENT(dmc1),
57 PPMU_EVENT(cpu),
58 PPMU_EVENT(rightbus),
59 PPMU_EVENT(leftbus),
60 PPMU_EVENT(lcd0),
61 PPMU_EVENT(camif),
62
63 /* Only for Exynos3250 and Exynos5260 */
64 PPMU_EVENT(mfc),
65
66 /* Only for Exynos4 SoCs */
67 PPMU_EVENT(mfc-left),
68 PPMU_EVENT(mfc-right),
69
70 /* Only for Exynos5260 SoCs */
71 PPMU_EVENT(drex0-s0),
72 PPMU_EVENT(drex0-s1),
73 PPMU_EVENT(drex1-s0),
74 PPMU_EVENT(drex1-s1),
75 PPMU_EVENT(eagle),
76 PPMU_EVENT(kfc),
77 PPMU_EVENT(isp),
78 PPMU_EVENT(fimc),
79 PPMU_EVENT(gscl),
80 PPMU_EVENT(mscl),
81 PPMU_EVENT(fimd0x),
82 PPMU_EVENT(fimd1x),
83
84 /* Only for Exynos5433 SoCs */
85 PPMU_EVENT(d0-cpu),
86 PPMU_EVENT(d0-general),
87 PPMU_EVENT(d0-rt),
88 PPMU_EVENT(d1-cpu),
89 PPMU_EVENT(d1-general),
90 PPMU_EVENT(d1-rt),
91};
92
93static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
94{
95 int i;
96
97 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
98 if (!strcmp(edev->desc->name, ppmu_events[i].name))
99 return ppmu_events[i].id;
100
101 return -EINVAL;
102}
103
104/*
105 * The devfreq-event ops structure for PPMU v1.1
106 */
107static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
108{
109 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
110 u32 pmnc;
111
112 /* Disable all counters */
113 __raw_writel(PPMU_CCNT_MASK |
114 PPMU_PMCNT0_MASK |
115 PPMU_PMCNT1_MASK |
116 PPMU_PMCNT2_MASK |
117 PPMU_PMCNT3_MASK,
118 info->ppmu.base + PPMU_CNTENC);
119
120 /* Disable PPMU */
121 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
122 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
123 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
124
125 return 0;
126}
127
128static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
129{
130 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
131 int id = exynos_ppmu_find_ppmu_id(edev);
132 u32 pmnc, cntens;
133
134 if (id < 0)
135 return id;
136
137 /* Enable specific counter */
138 cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
139 cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
140 __raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
141
142 /* Set the event of Read/Write data count */
143 __raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
144 info->ppmu.base + PPMU_BEVTxSEL(id));
145
146 /* Reset cycle counter/performance counter and enable PPMU */
147 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
148 pmnc &= ~(PPMU_PMNC_ENABLE_MASK
149 | PPMU_PMNC_COUNTER_RESET_MASK
150 | PPMU_PMNC_CC_RESET_MASK);
151 pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
152 pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
153 pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
154 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
155
156 return 0;
157}
158
159static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
160 struct devfreq_event_data *edata)
161{
162 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
163 int id = exynos_ppmu_find_ppmu_id(edev);
164 u32 pmnc, cntenc;
165
166 if (id < 0)
167 return -EINVAL;
168
169 /* Disable PPMU */
170 pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
171 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
172 __raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
173
174 /* Read cycle count */
175 edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
176
177 /* Read performance count */
178 switch (id) {
179 case PPMU_PMNCNT0:
180 case PPMU_PMNCNT1:
181 case PPMU_PMNCNT2:
182 edata->load_count
183 = __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
184 break;
185 case PPMU_PMNCNT3:
186 edata->load_count =
187 ((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
188 | __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
189 break;
190 default:
191 return -EINVAL;
192 }
193
194 /* Disable specific counter */
195 cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
196 cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
197 __raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
198
199 dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
200 edata->load_count, edata->total_count);
201
202 return 0;
203}
204
205static const struct devfreq_event_ops exynos_ppmu_ops = {
206 .disable = exynos_ppmu_disable,
207 .set_event = exynos_ppmu_set_event,
208 .get_event = exynos_ppmu_get_event,
209};
210
211/*
212 * The devfreq-event ops structure for PPMU v2.0
213 */
214static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
215{
216 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
217 u32 pmnc, clear;
218
219 /* Disable all counters */
220 clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
221 | PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
222
223 __raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
224 __raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
225 __raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
226 __raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
227
228 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
229 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
230 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
231 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
232 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
233 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
234 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
235 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
236 __raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
237 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
238 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
239 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
240 __raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
241 __raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
242
243 /* Disable PPMU */
244 pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
245 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
246 __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
247
248 return 0;
249}
250
251static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
252{
253 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
254 int id = exynos_ppmu_find_ppmu_id(edev);
255 u32 pmnc, cntens;
256
257 /* Enable all counters */
258 cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
259 cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
260 __raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
261
262 /* Set the event of Read/Write data count */
263 switch (id) {
264 case PPMU_PMNCNT0:
265 case PPMU_PMNCNT1:
266 case PPMU_PMNCNT2:
267 __raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
268 info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
269 break;
270 case PPMU_PMNCNT3:
271 __raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
272 info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
273 break;
274 }
275
276 /* Reset cycle counter/performance counter and enable PPMU */
277 pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
278 pmnc &= ~(PPMU_PMNC_ENABLE_MASK
279 | PPMU_PMNC_COUNTER_RESET_MASK
280 | PPMU_PMNC_CC_RESET_MASK
281 | PPMU_PMNC_CC_DIVIDER_MASK
282 | PPMU_V2_PMNC_START_MODE_MASK);
283 pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
284 pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
285 pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
286 pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
287 __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
288
289 return 0;
290}
291
292static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
293 struct devfreq_event_data *edata)
294{
295 struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
296 int id = exynos_ppmu_find_ppmu_id(edev);
297 u32 pmnc, cntenc;
298 u32 pmcnt_high, pmcnt_low;
299 u64 load_count = 0;
300
301 /* Disable PPMU */
302 pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
303 pmnc &= ~PPMU_PMNC_ENABLE_MASK;
304 __raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
305
306 /* Read cycle count and performance count */
307 edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
308
309 switch (id) {
310 case PPMU_PMNCNT0:
311 case PPMU_PMNCNT1:
312 case PPMU_PMNCNT2:
313 load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
314 break;
315 case PPMU_PMNCNT3:
316 pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
317 pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
318 load_count = ((u64)((pmcnt_high & 0xff)) << 32)
319 + (u64)pmcnt_low;
320 break;
321 }
322 edata->load_count = load_count;
323
324 /* Disable all counters */
325 cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
326 cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
327 __raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
328
329 dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
330 edata->load_count, edata->total_count);
331 return 0;
332}
333
334static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
335 .disable = exynos_ppmu_v2_disable,
336 .set_event = exynos_ppmu_v2_set_event,
337 .get_event = exynos_ppmu_v2_get_event,
338};
339
340static const struct of_device_id exynos_ppmu_id_match[] = {
341 {
342 .compatible = "samsung,exynos-ppmu",
343 .data = (void *)&exynos_ppmu_ops,
344 }, {
345 .compatible = "samsung,exynos-ppmu-v2",
346 .data = (void *)&exynos_ppmu_v2_ops,
347 },
348 { /* sentinel */ },
349};
350MODULE_DEVICE_TABLE(of, exynos_ppmu_id_match);
351
352static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
353{
354 const struct of_device_id *match;
355
356 match = of_match_node(exynos_ppmu_id_match, np);
357 return (struct devfreq_event_ops *)match->data;
358}
359
360static int of_get_devfreq_events(struct device_node *np,
361 struct exynos_ppmu *info)
362{
363 struct devfreq_event_desc *desc;
364 struct devfreq_event_ops *event_ops;
365 struct device *dev = info->dev;
366 struct device_node *events_np, *node;
367 int i, j, count;
368
369 events_np = of_get_child_by_name(np, "events");
370 if (!events_np) {
371 dev_err(dev,
372 "failed to get child node of devfreq-event devices\n");
373 return -EINVAL;
374 }
375 event_ops = exynos_bus_get_ops(np);
376
377 count = of_get_child_count(events_np);
378 desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
379 if (!desc)
380 return -ENOMEM;
381 info->num_events = count;
382
383 j = 0;
384 for_each_child_of_node(events_np, node) {
385 for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
386 if (!ppmu_events[i].name)
387 continue;
388
389 if (!of_node_cmp(node->name, ppmu_events[i].name))
390 break;
391 }
392
393 if (i == ARRAY_SIZE(ppmu_events)) {
394 dev_warn(dev,
395 "don't know how to configure events : %s\n",
396 node->name);
397 continue;
398 }
399
400 desc[j].ops = event_ops;
401 desc[j].driver_data = info;
402
403 of_property_read_string(node, "event-name", &desc[j].name);
404
405 j++;
406 }
407 info->desc = desc;
408
409 of_node_put(events_np);
410
411 return 0;
412}
413
414static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
415{
416 struct device *dev = info->dev;
417 struct device_node *np = dev->of_node;
418 int ret = 0;
419
420 if (!np) {
421 dev_err(dev, "failed to find devicetree node\n");
422 return -EINVAL;
423 }
424
425 /* Maps the memory mapped IO to control PPMU register */
426 info->ppmu.base = of_iomap(np, 0);
427 if (IS_ERR_OR_NULL(info->ppmu.base)) {
428 dev_err(dev, "failed to map memory region\n");
429 return -ENOMEM;
430 }
431
432 info->ppmu.clk = devm_clk_get(dev, "ppmu");
433 if (IS_ERR(info->ppmu.clk)) {
434 info->ppmu.clk = NULL;
435 dev_warn(dev, "cannot get PPMU clock\n");
436 }
437
438 ret = of_get_devfreq_events(np, info);
439 if (ret < 0) {
440 dev_err(dev, "failed to parse exynos ppmu dt node\n");
441 goto err;
442 }
443
444 return 0;
445
446err:
447 iounmap(info->ppmu.base);
448
449 return ret;
450}
451
452static int exynos_ppmu_probe(struct platform_device *pdev)
453{
454 struct exynos_ppmu *info;
455 struct devfreq_event_dev **edev;
456 struct devfreq_event_desc *desc;
457 int i, ret = 0, size;
458
459 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
460 if (!info)
461 return -ENOMEM;
462
463 info->dev = &pdev->dev;
464
465 /* Parse dt data to get resource */
466 ret = exynos_ppmu_parse_dt(info);
467 if (ret < 0) {
468 dev_err(&pdev->dev,
469 "failed to parse devicetree for resource\n");
470 return ret;
471 }
472 desc = info->desc;
473
474 size = sizeof(struct devfreq_event_dev *) * info->num_events;
475 info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
476 if (!info->edev) {
477 dev_err(&pdev->dev,
478 "failed to allocate memory devfreq-event devices\n");
479 ret = -ENOMEM;
480 goto err;
481 }
482 edev = info->edev;
483 platform_set_drvdata(pdev, info);
484
485 for (i = 0; i < info->num_events; i++) {
486 edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
487 if (IS_ERR(edev[i])) {
488 ret = PTR_ERR(edev[i]);
489 dev_err(&pdev->dev,
490 "failed to add devfreq-event device\n");
491 goto err;
492 }
493 }
494
495 clk_prepare_enable(info->ppmu.clk);
496
497 return 0;
498err:
499 iounmap(info->ppmu.base);
500
501 return ret;
502}
503
504static int exynos_ppmu_remove(struct platform_device *pdev)
505{
506 struct exynos_ppmu *info = platform_get_drvdata(pdev);
507
508 clk_disable_unprepare(info->ppmu.clk);
509 iounmap(info->ppmu.base);
510
511 return 0;
512}
513
514static struct platform_driver exynos_ppmu_driver = {
515 .probe = exynos_ppmu_probe,
516 .remove = exynos_ppmu_remove,
517 .driver = {
518 .name = "exynos-ppmu",
519 .of_match_table = exynos_ppmu_id_match,
520 },
521};
522module_platform_driver(exynos_ppmu_driver);
523
524MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
525MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
526MODULE_LICENSE("GPL");