Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Performance event support - Freescale Embedded Performance Monitor
  4 *
  5 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  6 * Copyright 2010 Freescale Semiconductor, Inc.
  7 */
  8#include <linux/kernel.h>
  9#include <linux/sched.h>
 10#include <linux/perf_event.h>
 11#include <linux/percpu.h>
 12#include <linux/hardirq.h>
 13#include <asm/reg_fsl_emb.h>
 14#include <asm/pmc.h>
 15#include <asm/machdep.h>
 16#include <asm/firmware.h>
 17#include <asm/ptrace.h>
 18
 19struct cpu_hw_events {
 20	int n_events;
 21	int disabled;
 22	u8  pmcs_enabled;
 23	struct perf_event *event[MAX_HWEVENTS];
 24};
 25static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 26
 27static struct fsl_emb_pmu *ppmu;
 28
 29/* Number of perf_events counting hardware events */
 30static atomic_t num_events;
 31/* Used to avoid races in calling reserve/release_pmc_hardware */
 32static DEFINE_MUTEX(pmc_reserve_mutex);
 33
 
 
 
 
 
 
 
 
 
 
 
 
 
 34static void perf_event_interrupt(struct pt_regs *regs);
 35
 36/*
 37 * Read one performance monitor counter (PMC).
 38 */
 39static unsigned long read_pmc(int idx)
 40{
 41	unsigned long val;
 42
 43	switch (idx) {
 44	case 0:
 45		val = mfpmr(PMRN_PMC0);
 46		break;
 47	case 1:
 48		val = mfpmr(PMRN_PMC1);
 49		break;
 50	case 2:
 51		val = mfpmr(PMRN_PMC2);
 52		break;
 53	case 3:
 54		val = mfpmr(PMRN_PMC3);
 55		break;
 56	case 4:
 57		val = mfpmr(PMRN_PMC4);
 58		break;
 59	case 5:
 60		val = mfpmr(PMRN_PMC5);
 61		break;
 62	default:
 63		printk(KERN_ERR "oops trying to read PMC%d\n", idx);
 64		val = 0;
 65	}
 66	return val;
 67}
 68
 69/*
 70 * Write one PMC.
 71 */
 72static void write_pmc(int idx, unsigned long val)
 73{
 74	switch (idx) {
 75	case 0:
 76		mtpmr(PMRN_PMC0, val);
 77		break;
 78	case 1:
 79		mtpmr(PMRN_PMC1, val);
 80		break;
 81	case 2:
 82		mtpmr(PMRN_PMC2, val);
 83		break;
 84	case 3:
 85		mtpmr(PMRN_PMC3, val);
 86		break;
 87	case 4:
 88		mtpmr(PMRN_PMC4, val);
 89		break;
 90	case 5:
 91		mtpmr(PMRN_PMC5, val);
 92		break;
 93	default:
 94		printk(KERN_ERR "oops trying to write PMC%d\n", idx);
 95	}
 96
 97	isync();
 98}
 99
100/*
101 * Write one local control A register
102 */
103static void write_pmlca(int idx, unsigned long val)
104{
105	switch (idx) {
106	case 0:
107		mtpmr(PMRN_PMLCA0, val);
108		break;
109	case 1:
110		mtpmr(PMRN_PMLCA1, val);
111		break;
112	case 2:
113		mtpmr(PMRN_PMLCA2, val);
114		break;
115	case 3:
116		mtpmr(PMRN_PMLCA3, val);
117		break;
118	case 4:
119		mtpmr(PMRN_PMLCA4, val);
120		break;
121	case 5:
122		mtpmr(PMRN_PMLCA5, val);
123		break;
124	default:
125		printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
126	}
127
128	isync();
129}
130
131/*
132 * Write one local control B register
133 */
134static void write_pmlcb(int idx, unsigned long val)
135{
136	switch (idx) {
137	case 0:
138		mtpmr(PMRN_PMLCB0, val);
139		break;
140	case 1:
141		mtpmr(PMRN_PMLCB1, val);
142		break;
143	case 2:
144		mtpmr(PMRN_PMLCB2, val);
145		break;
146	case 3:
147		mtpmr(PMRN_PMLCB3, val);
148		break;
149	case 4:
150		mtpmr(PMRN_PMLCB4, val);
151		break;
152	case 5:
153		mtpmr(PMRN_PMLCB5, val);
154		break;
155	default:
156		printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
157	}
158
159	isync();
160}
161
162static void fsl_emb_pmu_read(struct perf_event *event)
163{
164	s64 val, delta, prev;
165
166	if (event->hw.state & PERF_HES_STOPPED)
167		return;
168
169	/*
170	 * Performance monitor interrupts come even when interrupts
171	 * are soft-disabled, as long as interrupts are hard-enabled.
172	 * Therefore we treat them like NMIs.
173	 */
174	do {
175		prev = local64_read(&event->hw.prev_count);
176		barrier();
177		val = read_pmc(event->hw.idx);
178	} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
179
180	/* The counters are only 32 bits wide */
181	delta = (val - prev) & 0xfffffffful;
182	local64_add(delta, &event->count);
183	local64_sub(delta, &event->hw.period_left);
184}
185
186/*
187 * Disable all events to prevent PMU interrupts and to allow
188 * events to be added or removed.
189 */
190static void fsl_emb_pmu_disable(struct pmu *pmu)
191{
192	struct cpu_hw_events *cpuhw;
193	unsigned long flags;
194
195	local_irq_save(flags);
196	cpuhw = this_cpu_ptr(&cpu_hw_events);
197
198	if (!cpuhw->disabled) {
199		cpuhw->disabled = 1;
200
201		/*
202		 * Check if we ever enabled the PMU on this cpu.
203		 */
204		if (!cpuhw->pmcs_enabled) {
205			ppc_enable_pmcs();
206			cpuhw->pmcs_enabled = 1;
207		}
208
209		if (atomic_read(&num_events)) {
210			/*
211			 * Set the 'freeze all counters' bit, and disable
212			 * interrupts.  The barrier is to make sure the
213			 * mtpmr has been executed and the PMU has frozen
214			 * the events before we return.
215			 */
216
217			mtpmr(PMRN_PMGC0, PMGC0_FAC);
218			isync();
219		}
220	}
221	local_irq_restore(flags);
222}
223
224/*
225 * Re-enable all events if disable == 0.
226 * If we were previously disabled and events were added, then
227 * put the new config on the PMU.
228 */
229static void fsl_emb_pmu_enable(struct pmu *pmu)
230{
231	struct cpu_hw_events *cpuhw;
232	unsigned long flags;
233
234	local_irq_save(flags);
235	cpuhw = this_cpu_ptr(&cpu_hw_events);
236	if (!cpuhw->disabled)
237		goto out;
238
239	cpuhw->disabled = 0;
240	ppc_set_pmu_inuse(cpuhw->n_events != 0);
241
242	if (cpuhw->n_events > 0) {
243		mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
244		isync();
245	}
246
247 out:
248	local_irq_restore(flags);
249}
250
251static int collect_events(struct perf_event *group, int max_count,
252			  struct perf_event *ctrs[])
253{
254	int n = 0;
255	struct perf_event *event;
256
257	if (!is_software_event(group)) {
258		if (n >= max_count)
259			return -1;
260		ctrs[n] = group;
261		n++;
262	}
263	for_each_sibling_event(event, group) {
264		if (!is_software_event(event) &&
265		    event->state != PERF_EVENT_STATE_OFF) {
266			if (n >= max_count)
267				return -1;
268			ctrs[n] = event;
269			n++;
270		}
271	}
272	return n;
273}
274
275/* context locked on entry */
276static int fsl_emb_pmu_add(struct perf_event *event, int flags)
277{
278	struct cpu_hw_events *cpuhw;
279	int ret = -EAGAIN;
280	int num_counters = ppmu->n_counter;
281	u64 val;
282	int i;
283
284	perf_pmu_disable(event->pmu);
285	cpuhw = &get_cpu_var(cpu_hw_events);
286
287	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
288		num_counters = ppmu->n_restricted;
289
290	/*
291	 * Allocate counters from top-down, so that restricted-capable
292	 * counters are kept free as long as possible.
293	 */
294	for (i = num_counters - 1; i >= 0; i--) {
295		if (cpuhw->event[i])
296			continue;
297
298		break;
299	}
300
301	if (i < 0)
302		goto out;
303
304	event->hw.idx = i;
305	cpuhw->event[i] = event;
306	++cpuhw->n_events;
307
308	val = 0;
309	if (event->hw.sample_period) {
310		s64 left = local64_read(&event->hw.period_left);
311		if (left < 0x80000000L)
312			val = 0x80000000L - left;
313	}
314	local64_set(&event->hw.prev_count, val);
315
316	if (unlikely(!(flags & PERF_EF_START))) {
317		event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
318		val = 0;
319	} else {
320		event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE);
321	}
322
323	write_pmc(i, val);
324	perf_event_update_userpage(event);
325
326	write_pmlcb(i, event->hw.config >> 32);
327	write_pmlca(i, event->hw.config_base);
328
329	ret = 0;
330 out:
331	put_cpu_var(cpu_hw_events);
332	perf_pmu_enable(event->pmu);
333	return ret;
334}
335
336/* context locked on entry */
337static void fsl_emb_pmu_del(struct perf_event *event, int flags)
338{
339	struct cpu_hw_events *cpuhw;
340	int i = event->hw.idx;
341
342	perf_pmu_disable(event->pmu);
343	if (i < 0)
344		goto out;
345
346	fsl_emb_pmu_read(event);
347
348	cpuhw = &get_cpu_var(cpu_hw_events);
349
350	WARN_ON(event != cpuhw->event[event->hw.idx]);
351
352	write_pmlca(i, 0);
353	write_pmlcb(i, 0);
354	write_pmc(i, 0);
355
356	cpuhw->event[i] = NULL;
357	event->hw.idx = -1;
358
359	/*
360	 * TODO: if at least one restricted event exists, and we
361	 * just freed up a non-restricted-capable counter, and
362	 * there is a restricted-capable counter occupied by
363	 * a non-restricted event, migrate that event to the
364	 * vacated counter.
365	 */
366
367	cpuhw->n_events--;
368
369 out:
370	perf_pmu_enable(event->pmu);
371	put_cpu_var(cpu_hw_events);
372}
373
374static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
375{
376	unsigned long flags;
377	unsigned long val;
378	s64 left;
379
380	if (event->hw.idx < 0 || !event->hw.sample_period)
381		return;
382
383	if (!(event->hw.state & PERF_HES_STOPPED))
384		return;
385
386	if (ef_flags & PERF_EF_RELOAD)
387		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
388
389	local_irq_save(flags);
390	perf_pmu_disable(event->pmu);
391
392	event->hw.state = 0;
393	left = local64_read(&event->hw.period_left);
394	val = 0;
395	if (left < 0x80000000L)
396		val = 0x80000000L - left;
397	write_pmc(event->hw.idx, val);
398
399	perf_event_update_userpage(event);
400	perf_pmu_enable(event->pmu);
401	local_irq_restore(flags);
402}
403
404static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
405{
406	unsigned long flags;
407
408	if (event->hw.idx < 0 || !event->hw.sample_period)
409		return;
410
411	if (event->hw.state & PERF_HES_STOPPED)
412		return;
413
414	local_irq_save(flags);
415	perf_pmu_disable(event->pmu);
416
417	fsl_emb_pmu_read(event);
418	event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
419	write_pmc(event->hw.idx, 0);
420
421	perf_event_update_userpage(event);
422	perf_pmu_enable(event->pmu);
423	local_irq_restore(flags);
424}
425
426/*
427 * Release the PMU if this is the last perf_event.
428 */
429static void hw_perf_event_destroy(struct perf_event *event)
430{
431	if (!atomic_add_unless(&num_events, -1, 1)) {
432		mutex_lock(&pmc_reserve_mutex);
433		if (atomic_dec_return(&num_events) == 0)
434			release_pmc_hardware();
435		mutex_unlock(&pmc_reserve_mutex);
436	}
437}
438
439/*
440 * Translate a generic cache event_id config to a raw event_id code.
441 */
442static int hw_perf_cache_event(u64 config, u64 *eventp)
443{
444	unsigned long type, op, result;
445	int ev;
446
447	if (!ppmu->cache_events)
448		return -EINVAL;
449
450	/* unpack config */
451	type = config & 0xff;
452	op = (config >> 8) & 0xff;
453	result = (config >> 16) & 0xff;
454
455	if (type >= PERF_COUNT_HW_CACHE_MAX ||
456	    op >= PERF_COUNT_HW_CACHE_OP_MAX ||
457	    result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
458		return -EINVAL;
459
460	ev = (*ppmu->cache_events)[type][op][result];
461	if (ev == 0)
462		return -EOPNOTSUPP;
463	if (ev == -1)
464		return -EINVAL;
465	*eventp = ev;
466	return 0;
467}
468
469static int fsl_emb_pmu_event_init(struct perf_event *event)
470{
471	u64 ev;
472	struct perf_event *events[MAX_HWEVENTS];
473	int n;
474	int err;
475	int num_restricted;
476	int i;
477
478	if (ppmu->n_counter > MAX_HWEVENTS) {
479		WARN(1, "No. of perf counters (%d) is higher than max array size(%d)\n",
480			ppmu->n_counter, MAX_HWEVENTS);
481		ppmu->n_counter = MAX_HWEVENTS;
482	}
483
484	switch (event->attr.type) {
485	case PERF_TYPE_HARDWARE:
486		ev = event->attr.config;
487		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
488			return -EOPNOTSUPP;
489		ev = ppmu->generic_events[ev];
490		break;
491
492	case PERF_TYPE_HW_CACHE:
493		err = hw_perf_cache_event(event->attr.config, &ev);
494		if (err)
495			return err;
496		break;
497
498	case PERF_TYPE_RAW:
499		ev = event->attr.config;
500		break;
501
502	default:
503		return -ENOENT;
504	}
505
506	event->hw.config = ppmu->xlate_event(ev);
507	if (!(event->hw.config & FSL_EMB_EVENT_VALID))
508		return -EINVAL;
509
510	/*
511	 * If this is in a group, check if it can go on with all the
512	 * other hardware events in the group.  We assume the event
513	 * hasn't been linked into its leader's sibling list at this point.
514	 */
515	n = 0;
516	if (event->group_leader != event) {
517		n = collect_events(event->group_leader,
518		                   ppmu->n_counter - 1, events);
519		if (n < 0)
520			return -EINVAL;
521	}
522
523	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
524		num_restricted = 0;
525		for (i = 0; i < n; i++) {
526			if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
527				num_restricted++;
528		}
529
530		if (num_restricted >= ppmu->n_restricted)
531			return -EINVAL;
532	}
533
534	event->hw.idx = -1;
535
536	event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
537	                        (u32)((ev << 16) & PMLCA_EVENT_MASK);
538
539	if (event->attr.exclude_user)
540		event->hw.config_base |= PMLCA_FCU;
541	if (event->attr.exclude_kernel)
542		event->hw.config_base |= PMLCA_FCS;
543	if (event->attr.exclude_idle)
544		return -ENOTSUPP;
545
546	event->hw.last_period = event->hw.sample_period;
547	local64_set(&event->hw.period_left, event->hw.last_period);
548
549	/*
550	 * See if we need to reserve the PMU.
551	 * If no events are currently in use, then we have to take a
552	 * mutex to ensure that we don't race with another task doing
553	 * reserve_pmc_hardware or release_pmc_hardware.
554	 */
555	err = 0;
556	if (!atomic_inc_not_zero(&num_events)) {
557		mutex_lock(&pmc_reserve_mutex);
558		if (atomic_read(&num_events) == 0 &&
559		    reserve_pmc_hardware(perf_event_interrupt))
560			err = -EBUSY;
561		else
562			atomic_inc(&num_events);
563		mutex_unlock(&pmc_reserve_mutex);
564
565		mtpmr(PMRN_PMGC0, PMGC0_FAC);
566		isync();
567	}
568	event->destroy = hw_perf_event_destroy;
569
570	return err;
571}
572
573static struct pmu fsl_emb_pmu = {
574	.pmu_enable	= fsl_emb_pmu_enable,
575	.pmu_disable	= fsl_emb_pmu_disable,
576	.event_init	= fsl_emb_pmu_event_init,
577	.add		= fsl_emb_pmu_add,
578	.del		= fsl_emb_pmu_del,
579	.start		= fsl_emb_pmu_start,
580	.stop		= fsl_emb_pmu_stop,
581	.read		= fsl_emb_pmu_read,
582};
583
584/*
585 * A counter has overflowed; update its count and record
586 * things if requested.  Note that interrupts are hard-disabled
587 * here so there is no possibility of being interrupted.
588 */
589static void record_and_restart(struct perf_event *event, unsigned long val,
590			       struct pt_regs *regs)
591{
592	u64 period = event->hw.sample_period;
593	s64 prev, delta, left;
594	int record = 0;
595
596	if (event->hw.state & PERF_HES_STOPPED) {
597		write_pmc(event->hw.idx, 0);
598		return;
599	}
600
601	/* we don't have to worry about interrupts here */
602	prev = local64_read(&event->hw.prev_count);
603	delta = (val - prev) & 0xfffffffful;
604	local64_add(delta, &event->count);
605
606	/*
607	 * See if the total period for this event has expired,
608	 * and update for the next period.
609	 */
610	val = 0;
611	left = local64_read(&event->hw.period_left) - delta;
612	if (period) {
613		if (left <= 0) {
614			left += period;
615			if (left <= 0)
616				left = period;
617			record = 1;
618			event->hw.last_period = event->hw.sample_period;
619		}
620		if (left < 0x80000000LL)
621			val = 0x80000000LL - left;
622	}
623
624	write_pmc(event->hw.idx, val);
625	local64_set(&event->hw.prev_count, val);
626	local64_set(&event->hw.period_left, left);
627	perf_event_update_userpage(event);
628
629	/*
630	 * Finally record data if requested.
631	 */
632	if (record) {
633		struct perf_sample_data data;
634
635		perf_sample_data_init(&data, 0, event->hw.last_period);
636
637		if (perf_event_overflow(event, &data, regs))
638			fsl_emb_pmu_stop(event, 0);
639	}
640}
641
642static void perf_event_interrupt(struct pt_regs *regs)
643{
644	int i;
645	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
646	struct perf_event *event;
647	unsigned long val;
 
 
 
 
 
 
 
 
648
649	for (i = 0; i < ppmu->n_counter; ++i) {
650		event = cpuhw->event[i];
651
652		val = read_pmc(i);
653		if ((int)val < 0) {
654			if (event) {
655				/* event has overflowed */
 
656				record_and_restart(event, val, regs);
657			} else {
658				/*
659				 * Disabled counter is negative,
660				 * reset it just in case.
661				 */
662				write_pmc(i, 0);
663			}
664		}
665	}
666
667	/* PMM will keep counters frozen until we return from the interrupt. */
668	mtmsr(mfmsr() | MSR_PMM);
669	mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
670	isync();
 
 
 
 
 
671}
672
673static int fsl_emb_pmu_prepare_cpu(unsigned int cpu)
674{
675	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
676
677	memset(cpuhw, 0, sizeof(*cpuhw));
678
679	return 0;
680}
681
682int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
683{
684	if (ppmu)
685		return -EBUSY;		/* something's already registered */
686
687	ppmu = pmu;
688	pr_info("%s performance monitor hardware support registered\n",
689		pmu->name);
690
691	perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
692	cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare",
693			  fsl_emb_pmu_prepare_cpu, NULL);
694
695	return 0;
696}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Performance event support - Freescale Embedded Performance Monitor
  4 *
  5 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  6 * Copyright 2010 Freescale Semiconductor, Inc.
  7 */
  8#include <linux/kernel.h>
  9#include <linux/sched.h>
 10#include <linux/perf_event.h>
 11#include <linux/percpu.h>
 12#include <linux/hardirq.h>
 13#include <asm/reg_fsl_emb.h>
 14#include <asm/pmc.h>
 15#include <asm/machdep.h>
 16#include <asm/firmware.h>
 17#include <asm/ptrace.h>
 18
 19struct cpu_hw_events {
 20	int n_events;
 21	int disabled;
 22	u8  pmcs_enabled;
 23	struct perf_event *event[MAX_HWEVENTS];
 24};
 25static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 26
 27static struct fsl_emb_pmu *ppmu;
 28
 29/* Number of perf_events counting hardware events */
 30static atomic_t num_events;
 31/* Used to avoid races in calling reserve/release_pmc_hardware */
 32static DEFINE_MUTEX(pmc_reserve_mutex);
 33
 34/*
 35 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
 36 * it as an NMI.
 37 */
 38static inline int perf_intr_is_nmi(struct pt_regs *regs)
 39{
 40#ifdef __powerpc64__
 41	return (regs->softe & IRQS_DISABLED);
 42#else
 43	return 0;
 44#endif
 45}
 46
 47static void perf_event_interrupt(struct pt_regs *regs);
 48
 49/*
 50 * Read one performance monitor counter (PMC).
 51 */
 52static unsigned long read_pmc(int idx)
 53{
 54	unsigned long val;
 55
 56	switch (idx) {
 57	case 0:
 58		val = mfpmr(PMRN_PMC0);
 59		break;
 60	case 1:
 61		val = mfpmr(PMRN_PMC1);
 62		break;
 63	case 2:
 64		val = mfpmr(PMRN_PMC2);
 65		break;
 66	case 3:
 67		val = mfpmr(PMRN_PMC3);
 68		break;
 69	case 4:
 70		val = mfpmr(PMRN_PMC4);
 71		break;
 72	case 5:
 73		val = mfpmr(PMRN_PMC5);
 74		break;
 75	default:
 76		printk(KERN_ERR "oops trying to read PMC%d\n", idx);
 77		val = 0;
 78	}
 79	return val;
 80}
 81
 82/*
 83 * Write one PMC.
 84 */
 85static void write_pmc(int idx, unsigned long val)
 86{
 87	switch (idx) {
 88	case 0:
 89		mtpmr(PMRN_PMC0, val);
 90		break;
 91	case 1:
 92		mtpmr(PMRN_PMC1, val);
 93		break;
 94	case 2:
 95		mtpmr(PMRN_PMC2, val);
 96		break;
 97	case 3:
 98		mtpmr(PMRN_PMC3, val);
 99		break;
100	case 4:
101		mtpmr(PMRN_PMC4, val);
102		break;
103	case 5:
104		mtpmr(PMRN_PMC5, val);
105		break;
106	default:
107		printk(KERN_ERR "oops trying to write PMC%d\n", idx);
108	}
109
110	isync();
111}
112
113/*
114 * Write one local control A register
115 */
116static void write_pmlca(int idx, unsigned long val)
117{
118	switch (idx) {
119	case 0:
120		mtpmr(PMRN_PMLCA0, val);
121		break;
122	case 1:
123		mtpmr(PMRN_PMLCA1, val);
124		break;
125	case 2:
126		mtpmr(PMRN_PMLCA2, val);
127		break;
128	case 3:
129		mtpmr(PMRN_PMLCA3, val);
130		break;
131	case 4:
132		mtpmr(PMRN_PMLCA4, val);
133		break;
134	case 5:
135		mtpmr(PMRN_PMLCA5, val);
136		break;
137	default:
138		printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
139	}
140
141	isync();
142}
143
144/*
145 * Write one local control B register
146 */
147static void write_pmlcb(int idx, unsigned long val)
148{
149	switch (idx) {
150	case 0:
151		mtpmr(PMRN_PMLCB0, val);
152		break;
153	case 1:
154		mtpmr(PMRN_PMLCB1, val);
155		break;
156	case 2:
157		mtpmr(PMRN_PMLCB2, val);
158		break;
159	case 3:
160		mtpmr(PMRN_PMLCB3, val);
161		break;
162	case 4:
163		mtpmr(PMRN_PMLCB4, val);
164		break;
165	case 5:
166		mtpmr(PMRN_PMLCB5, val);
167		break;
168	default:
169		printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
170	}
171
172	isync();
173}
174
175static void fsl_emb_pmu_read(struct perf_event *event)
176{
177	s64 val, delta, prev;
178
179	if (event->hw.state & PERF_HES_STOPPED)
180		return;
181
182	/*
183	 * Performance monitor interrupts come even when interrupts
184	 * are soft-disabled, as long as interrupts are hard-enabled.
185	 * Therefore we treat them like NMIs.
186	 */
187	do {
188		prev = local64_read(&event->hw.prev_count);
189		barrier();
190		val = read_pmc(event->hw.idx);
191	} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
192
193	/* The counters are only 32 bits wide */
194	delta = (val - prev) & 0xfffffffful;
195	local64_add(delta, &event->count);
196	local64_sub(delta, &event->hw.period_left);
197}
198
199/*
200 * Disable all events to prevent PMU interrupts and to allow
201 * events to be added or removed.
202 */
203static void fsl_emb_pmu_disable(struct pmu *pmu)
204{
205	struct cpu_hw_events *cpuhw;
206	unsigned long flags;
207
208	local_irq_save(flags);
209	cpuhw = this_cpu_ptr(&cpu_hw_events);
210
211	if (!cpuhw->disabled) {
212		cpuhw->disabled = 1;
213
214		/*
215		 * Check if we ever enabled the PMU on this cpu.
216		 */
217		if (!cpuhw->pmcs_enabled) {
218			ppc_enable_pmcs();
219			cpuhw->pmcs_enabled = 1;
220		}
221
222		if (atomic_read(&num_events)) {
223			/*
224			 * Set the 'freeze all counters' bit, and disable
225			 * interrupts.  The barrier is to make sure the
226			 * mtpmr has been executed and the PMU has frozen
227			 * the events before we return.
228			 */
229
230			mtpmr(PMRN_PMGC0, PMGC0_FAC);
231			isync();
232		}
233	}
234	local_irq_restore(flags);
235}
236
237/*
238 * Re-enable all events if disable == 0.
239 * If we were previously disabled and events were added, then
240 * put the new config on the PMU.
241 */
242static void fsl_emb_pmu_enable(struct pmu *pmu)
243{
244	struct cpu_hw_events *cpuhw;
245	unsigned long flags;
246
247	local_irq_save(flags);
248	cpuhw = this_cpu_ptr(&cpu_hw_events);
249	if (!cpuhw->disabled)
250		goto out;
251
252	cpuhw->disabled = 0;
253	ppc_set_pmu_inuse(cpuhw->n_events != 0);
254
255	if (cpuhw->n_events > 0) {
256		mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
257		isync();
258	}
259
260 out:
261	local_irq_restore(flags);
262}
263
264static int collect_events(struct perf_event *group, int max_count,
265			  struct perf_event *ctrs[])
266{
267	int n = 0;
268	struct perf_event *event;
269
270	if (!is_software_event(group)) {
271		if (n >= max_count)
272			return -1;
273		ctrs[n] = group;
274		n++;
275	}
276	for_each_sibling_event(event, group) {
277		if (!is_software_event(event) &&
278		    event->state != PERF_EVENT_STATE_OFF) {
279			if (n >= max_count)
280				return -1;
281			ctrs[n] = event;
282			n++;
283		}
284	}
285	return n;
286}
287
288/* context locked on entry */
289static int fsl_emb_pmu_add(struct perf_event *event, int flags)
290{
291	struct cpu_hw_events *cpuhw;
292	int ret = -EAGAIN;
293	int num_counters = ppmu->n_counter;
294	u64 val;
295	int i;
296
297	perf_pmu_disable(event->pmu);
298	cpuhw = &get_cpu_var(cpu_hw_events);
299
300	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
301		num_counters = ppmu->n_restricted;
302
303	/*
304	 * Allocate counters from top-down, so that restricted-capable
305	 * counters are kept free as long as possible.
306	 */
307	for (i = num_counters - 1; i >= 0; i--) {
308		if (cpuhw->event[i])
309			continue;
310
311		break;
312	}
313
314	if (i < 0)
315		goto out;
316
317	event->hw.idx = i;
318	cpuhw->event[i] = event;
319	++cpuhw->n_events;
320
321	val = 0;
322	if (event->hw.sample_period) {
323		s64 left = local64_read(&event->hw.period_left);
324		if (left < 0x80000000L)
325			val = 0x80000000L - left;
326	}
327	local64_set(&event->hw.prev_count, val);
328
329	if (unlikely(!(flags & PERF_EF_START))) {
330		event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
331		val = 0;
332	} else {
333		event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE);
334	}
335
336	write_pmc(i, val);
337	perf_event_update_userpage(event);
338
339	write_pmlcb(i, event->hw.config >> 32);
340	write_pmlca(i, event->hw.config_base);
341
342	ret = 0;
343 out:
344	put_cpu_var(cpu_hw_events);
345	perf_pmu_enable(event->pmu);
346	return ret;
347}
348
349/* context locked on entry */
350static void fsl_emb_pmu_del(struct perf_event *event, int flags)
351{
352	struct cpu_hw_events *cpuhw;
353	int i = event->hw.idx;
354
355	perf_pmu_disable(event->pmu);
356	if (i < 0)
357		goto out;
358
359	fsl_emb_pmu_read(event);
360
361	cpuhw = &get_cpu_var(cpu_hw_events);
362
363	WARN_ON(event != cpuhw->event[event->hw.idx]);
364
365	write_pmlca(i, 0);
366	write_pmlcb(i, 0);
367	write_pmc(i, 0);
368
369	cpuhw->event[i] = NULL;
370	event->hw.idx = -1;
371
372	/*
373	 * TODO: if at least one restricted event exists, and we
374	 * just freed up a non-restricted-capable counter, and
375	 * there is a restricted-capable counter occupied by
376	 * a non-restricted event, migrate that event to the
377	 * vacated counter.
378	 */
379
380	cpuhw->n_events--;
381
382 out:
383	perf_pmu_enable(event->pmu);
384	put_cpu_var(cpu_hw_events);
385}
386
387static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
388{
389	unsigned long flags;
390	unsigned long val;
391	s64 left;
392
393	if (event->hw.idx < 0 || !event->hw.sample_period)
394		return;
395
396	if (!(event->hw.state & PERF_HES_STOPPED))
397		return;
398
399	if (ef_flags & PERF_EF_RELOAD)
400		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
401
402	local_irq_save(flags);
403	perf_pmu_disable(event->pmu);
404
405	event->hw.state = 0;
406	left = local64_read(&event->hw.period_left);
407	val = 0;
408	if (left < 0x80000000L)
409		val = 0x80000000L - left;
410	write_pmc(event->hw.idx, val);
411
412	perf_event_update_userpage(event);
413	perf_pmu_enable(event->pmu);
414	local_irq_restore(flags);
415}
416
417static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
418{
419	unsigned long flags;
420
421	if (event->hw.idx < 0 || !event->hw.sample_period)
422		return;
423
424	if (event->hw.state & PERF_HES_STOPPED)
425		return;
426
427	local_irq_save(flags);
428	perf_pmu_disable(event->pmu);
429
430	fsl_emb_pmu_read(event);
431	event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
432	write_pmc(event->hw.idx, 0);
433
434	perf_event_update_userpage(event);
435	perf_pmu_enable(event->pmu);
436	local_irq_restore(flags);
437}
438
439/*
440 * Release the PMU if this is the last perf_event.
441 */
442static void hw_perf_event_destroy(struct perf_event *event)
443{
444	if (!atomic_add_unless(&num_events, -1, 1)) {
445		mutex_lock(&pmc_reserve_mutex);
446		if (atomic_dec_return(&num_events) == 0)
447			release_pmc_hardware();
448		mutex_unlock(&pmc_reserve_mutex);
449	}
450}
451
452/*
453 * Translate a generic cache event_id config to a raw event_id code.
454 */
455static int hw_perf_cache_event(u64 config, u64 *eventp)
456{
457	unsigned long type, op, result;
458	int ev;
459
460	if (!ppmu->cache_events)
461		return -EINVAL;
462
463	/* unpack config */
464	type = config & 0xff;
465	op = (config >> 8) & 0xff;
466	result = (config >> 16) & 0xff;
467
468	if (type >= PERF_COUNT_HW_CACHE_MAX ||
469	    op >= PERF_COUNT_HW_CACHE_OP_MAX ||
470	    result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
471		return -EINVAL;
472
473	ev = (*ppmu->cache_events)[type][op][result];
474	if (ev == 0)
475		return -EOPNOTSUPP;
476	if (ev == -1)
477		return -EINVAL;
478	*eventp = ev;
479	return 0;
480}
481
482static int fsl_emb_pmu_event_init(struct perf_event *event)
483{
484	u64 ev;
485	struct perf_event *events[MAX_HWEVENTS];
486	int n;
487	int err;
488	int num_restricted;
489	int i;
490
491	if (ppmu->n_counter > MAX_HWEVENTS) {
492		WARN(1, "No. of perf counters (%d) is higher than max array size(%d)\n",
493			ppmu->n_counter, MAX_HWEVENTS);
494		ppmu->n_counter = MAX_HWEVENTS;
495	}
496
497	switch (event->attr.type) {
498	case PERF_TYPE_HARDWARE:
499		ev = event->attr.config;
500		if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
501			return -EOPNOTSUPP;
502		ev = ppmu->generic_events[ev];
503		break;
504
505	case PERF_TYPE_HW_CACHE:
506		err = hw_perf_cache_event(event->attr.config, &ev);
507		if (err)
508			return err;
509		break;
510
511	case PERF_TYPE_RAW:
512		ev = event->attr.config;
513		break;
514
515	default:
516		return -ENOENT;
517	}
518
519	event->hw.config = ppmu->xlate_event(ev);
520	if (!(event->hw.config & FSL_EMB_EVENT_VALID))
521		return -EINVAL;
522
523	/*
524	 * If this is in a group, check if it can go on with all the
525	 * other hardware events in the group.  We assume the event
526	 * hasn't been linked into its leader's sibling list at this point.
527	 */
528	n = 0;
529	if (event->group_leader != event) {
530		n = collect_events(event->group_leader,
531		                   ppmu->n_counter - 1, events);
532		if (n < 0)
533			return -EINVAL;
534	}
535
536	if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
537		num_restricted = 0;
538		for (i = 0; i < n; i++) {
539			if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
540				num_restricted++;
541		}
542
543		if (num_restricted >= ppmu->n_restricted)
544			return -EINVAL;
545	}
546
547	event->hw.idx = -1;
548
549	event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
550	                        (u32)((ev << 16) & PMLCA_EVENT_MASK);
551
552	if (event->attr.exclude_user)
553		event->hw.config_base |= PMLCA_FCU;
554	if (event->attr.exclude_kernel)
555		event->hw.config_base |= PMLCA_FCS;
556	if (event->attr.exclude_idle)
557		return -ENOTSUPP;
558
559	event->hw.last_period = event->hw.sample_period;
560	local64_set(&event->hw.period_left, event->hw.last_period);
561
562	/*
563	 * See if we need to reserve the PMU.
564	 * If no events are currently in use, then we have to take a
565	 * mutex to ensure that we don't race with another task doing
566	 * reserve_pmc_hardware or release_pmc_hardware.
567	 */
568	err = 0;
569	if (!atomic_inc_not_zero(&num_events)) {
570		mutex_lock(&pmc_reserve_mutex);
571		if (atomic_read(&num_events) == 0 &&
572		    reserve_pmc_hardware(perf_event_interrupt))
573			err = -EBUSY;
574		else
575			atomic_inc(&num_events);
576		mutex_unlock(&pmc_reserve_mutex);
577
578		mtpmr(PMRN_PMGC0, PMGC0_FAC);
579		isync();
580	}
581	event->destroy = hw_perf_event_destroy;
582
583	return err;
584}
585
586static struct pmu fsl_emb_pmu = {
587	.pmu_enable	= fsl_emb_pmu_enable,
588	.pmu_disable	= fsl_emb_pmu_disable,
589	.event_init	= fsl_emb_pmu_event_init,
590	.add		= fsl_emb_pmu_add,
591	.del		= fsl_emb_pmu_del,
592	.start		= fsl_emb_pmu_start,
593	.stop		= fsl_emb_pmu_stop,
594	.read		= fsl_emb_pmu_read,
595};
596
597/*
598 * A counter has overflowed; update its count and record
599 * things if requested.  Note that interrupts are hard-disabled
600 * here so there is no possibility of being interrupted.
601 */
602static void record_and_restart(struct perf_event *event, unsigned long val,
603			       struct pt_regs *regs)
604{
605	u64 period = event->hw.sample_period;
606	s64 prev, delta, left;
607	int record = 0;
608
609	if (event->hw.state & PERF_HES_STOPPED) {
610		write_pmc(event->hw.idx, 0);
611		return;
612	}
613
614	/* we don't have to worry about interrupts here */
615	prev = local64_read(&event->hw.prev_count);
616	delta = (val - prev) & 0xfffffffful;
617	local64_add(delta, &event->count);
618
619	/*
620	 * See if the total period for this event has expired,
621	 * and update for the next period.
622	 */
623	val = 0;
624	left = local64_read(&event->hw.period_left) - delta;
625	if (period) {
626		if (left <= 0) {
627			left += period;
628			if (left <= 0)
629				left = period;
630			record = 1;
631			event->hw.last_period = event->hw.sample_period;
632		}
633		if (left < 0x80000000LL)
634			val = 0x80000000LL - left;
635	}
636
637	write_pmc(event->hw.idx, val);
638	local64_set(&event->hw.prev_count, val);
639	local64_set(&event->hw.period_left, left);
640	perf_event_update_userpage(event);
641
642	/*
643	 * Finally record data if requested.
644	 */
645	if (record) {
646		struct perf_sample_data data;
647
648		perf_sample_data_init(&data, 0, event->hw.last_period);
649
650		if (perf_event_overflow(event, &data, regs))
651			fsl_emb_pmu_stop(event, 0);
652	}
653}
654
655static void perf_event_interrupt(struct pt_regs *regs)
656{
657	int i;
658	struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
659	struct perf_event *event;
660	unsigned long val;
661	int found = 0;
662	int nmi;
663
664	nmi = perf_intr_is_nmi(regs);
665	if (nmi)
666		nmi_enter();
667	else
668		irq_enter();
669
670	for (i = 0; i < ppmu->n_counter; ++i) {
671		event = cpuhw->event[i];
672
673		val = read_pmc(i);
674		if ((int)val < 0) {
675			if (event) {
676				/* event has overflowed */
677				found = 1;
678				record_and_restart(event, val, regs);
679			} else {
680				/*
681				 * Disabled counter is negative,
682				 * reset it just in case.
683				 */
684				write_pmc(i, 0);
685			}
686		}
687	}
688
689	/* PMM will keep counters frozen until we return from the interrupt. */
690	mtmsr(mfmsr() | MSR_PMM);
691	mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
692	isync();
693
694	if (nmi)
695		nmi_exit();
696	else
697		irq_exit();
698}
699
700void hw_perf_event_setup(int cpu)
701{
702	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
703
704	memset(cpuhw, 0, sizeof(*cpuhw));
 
 
705}
706
707int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
708{
709	if (ppmu)
710		return -EBUSY;		/* something's already registered */
711
712	ppmu = pmu;
713	pr_info("%s performance monitor hardware support registered\n",
714		pmu->name);
715
716	perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
 
 
717
718	return 0;
719}