Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Freescale Embedded oprofile support, based on ppc64 oprofile support
  3 * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
  4 *
  5 * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
  6 *
  7 * Author: Andy Fleming
  8 * Maintainer: Kumar Gala <galak@kernel.crashing.org>
  9 *
 10 * This program is free software; you can redistribute it and/or
 11 * modify it under the terms of the GNU General Public License
 12 * as published by the Free Software Foundation; either version
 13 * 2 of the License, or (at your option) any later version.
 14 */
 15
 16#include <linux/oprofile.h>
 17#include <linux/init.h>
 18#include <linux/smp.h>
 19#include <asm/ptrace.h>
 20#include <asm/processor.h>
 21#include <asm/cputable.h>
 22#include <asm/reg_fsl_emb.h>
 23#include <asm/page.h>
 24#include <asm/pmc.h>
 25#include <asm/oprofile_impl.h>
 26
 27static unsigned long reset_value[OP_MAX_COUNTER];
 28
 29static int num_counters;
 30static int oprofile_running;
 31
 32static inline u32 get_pmlca(int ctr)
 33{
 34	u32 pmlca;
 35
 36	switch (ctr) {
 37		case 0:
 38			pmlca = mfpmr(PMRN_PMLCA0);
 39			break;
 40		case 1:
 41			pmlca = mfpmr(PMRN_PMLCA1);
 42			break;
 43		case 2:
 44			pmlca = mfpmr(PMRN_PMLCA2);
 45			break;
 46		case 3:
 47			pmlca = mfpmr(PMRN_PMLCA3);
 48			break;
 49		default:
 50			panic("Bad ctr number\n");
 51	}
 52
 53	return pmlca;
 54}
 55
 56static inline void set_pmlca(int ctr, u32 pmlca)
 57{
 58	switch (ctr) {
 59		case 0:
 60			mtpmr(PMRN_PMLCA0, pmlca);
 61			break;
 62		case 1:
 63			mtpmr(PMRN_PMLCA1, pmlca);
 64			break;
 65		case 2:
 66			mtpmr(PMRN_PMLCA2, pmlca);
 67			break;
 68		case 3:
 69			mtpmr(PMRN_PMLCA3, pmlca);
 70			break;
 71		default:
 72			panic("Bad ctr number\n");
 73	}
 74}
 75
 76static inline unsigned int ctr_read(unsigned int i)
 77{
 78	switch(i) {
 79		case 0:
 80			return mfpmr(PMRN_PMC0);
 81		case 1:
 82			return mfpmr(PMRN_PMC1);
 83		case 2:
 84			return mfpmr(PMRN_PMC2);
 85		case 3:
 86			return mfpmr(PMRN_PMC3);
 87		default:
 88			return 0;
 89	}
 90}
 91
 92static inline void ctr_write(unsigned int i, unsigned int val)
 93{
 94	switch(i) {
 95		case 0:
 96			mtpmr(PMRN_PMC0, val);
 97			break;
 98		case 1:
 99			mtpmr(PMRN_PMC1, val);
100			break;
101		case 2:
102			mtpmr(PMRN_PMC2, val);
103			break;
104		case 3:
105			mtpmr(PMRN_PMC3, val);
106			break;
107		default:
108			break;
109	}
110}
111
112
113static void init_pmc_stop(int ctr)
114{
115	u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
116			PMLCA_FCM1 | PMLCA_FCM0);
117	u32 pmlcb = 0;
118
119	switch (ctr) {
120		case 0:
121			mtpmr(PMRN_PMLCA0, pmlca);
122			mtpmr(PMRN_PMLCB0, pmlcb);
123			break;
124		case 1:
125			mtpmr(PMRN_PMLCA1, pmlca);
126			mtpmr(PMRN_PMLCB1, pmlcb);
127			break;
128		case 2:
129			mtpmr(PMRN_PMLCA2, pmlca);
130			mtpmr(PMRN_PMLCB2, pmlcb);
131			break;
132		case 3:
133			mtpmr(PMRN_PMLCA3, pmlca);
134			mtpmr(PMRN_PMLCB3, pmlcb);
135			break;
136		default:
137			panic("Bad ctr number!\n");
138	}
139}
140
141static void set_pmc_event(int ctr, int event)
142{
143	u32 pmlca;
144
145	pmlca = get_pmlca(ctr);
146
147	pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
148		((event << PMLCA_EVENT_SHIFT) &
149		 PMLCA_EVENT_MASK);
150
151	set_pmlca(ctr, pmlca);
152}
153
154static void set_pmc_user_kernel(int ctr, int user, int kernel)
155{
156	u32 pmlca;
157
158	pmlca = get_pmlca(ctr);
159
160	if(user)
161		pmlca &= ~PMLCA_FCU;
162	else
163		pmlca |= PMLCA_FCU;
164
165	if(kernel)
166		pmlca &= ~PMLCA_FCS;
167	else
168		pmlca |= PMLCA_FCS;
169
170	set_pmlca(ctr, pmlca);
171}
172
173static void set_pmc_marked(int ctr, int mark0, int mark1)
174{
175	u32 pmlca = get_pmlca(ctr);
176
177	if(mark0)
178		pmlca &= ~PMLCA_FCM0;
179	else
180		pmlca |= PMLCA_FCM0;
181
182	if(mark1)
183		pmlca &= ~PMLCA_FCM1;
184	else
185		pmlca |= PMLCA_FCM1;
186
187	set_pmlca(ctr, pmlca);
188}
189
190static void pmc_start_ctr(int ctr, int enable)
191{
192	u32 pmlca = get_pmlca(ctr);
193
194	pmlca &= ~PMLCA_FC;
195
196	if (enable)
197		pmlca |= PMLCA_CE;
198	else
199		pmlca &= ~PMLCA_CE;
200
201	set_pmlca(ctr, pmlca);
202}
203
204static void pmc_start_ctrs(int enable)
205{
206	u32 pmgc0 = mfpmr(PMRN_PMGC0);
207
208	pmgc0 &= ~PMGC0_FAC;
209	pmgc0 |= PMGC0_FCECE;
210
211	if (enable)
212		pmgc0 |= PMGC0_PMIE;
213	else
214		pmgc0 &= ~PMGC0_PMIE;
215
216	mtpmr(PMRN_PMGC0, pmgc0);
217}
218
219static void pmc_stop_ctrs(void)
220{
221	u32 pmgc0 = mfpmr(PMRN_PMGC0);
222
223	pmgc0 |= PMGC0_FAC;
224
225	pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
226
227	mtpmr(PMRN_PMGC0, pmgc0);
228}
229
230static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
231{
232	int i;
233
234	/* freeze all counters */
235	pmc_stop_ctrs();
236
237	for (i = 0;i < num_counters;i++) {
238		init_pmc_stop(i);
239
240		set_pmc_event(i, ctr[i].event);
241
242		set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
243	}
244
245	return 0;
246}
247
248static int fsl_emb_reg_setup(struct op_counter_config *ctr,
249			     struct op_system_config *sys,
250			     int num_ctrs)
251{
252	int i;
253
254	num_counters = num_ctrs;
255
256	/* Our counters count up, and "count" refers to
257	 * how much before the next interrupt, and we interrupt
258	 * on overflow.  So we calculate the starting value
259	 * which will give us "count" until overflow.
260	 * Then we set the events on the enabled counters */
261	for (i = 0; i < num_counters; ++i)
262		reset_value[i] = 0x80000000UL - ctr[i].count;
263
264	return 0;
265}
266
267static int fsl_emb_start(struct op_counter_config *ctr)
268{
269	int i;
270
271	mtmsr(mfmsr() | MSR_PMM);
272
273	for (i = 0; i < num_counters; ++i) {
274		if (ctr[i].enabled) {
275			ctr_write(i, reset_value[i]);
276			/* Set each enabled counter to only
277			 * count when the Mark bit is *not* set */
278			set_pmc_marked(i, 1, 0);
279			pmc_start_ctr(i, 1);
280		} else {
281			ctr_write(i, 0);
282
283			/* Set the ctr to be stopped */
284			pmc_start_ctr(i, 0);
285		}
286	}
287
288	/* Clear the freeze bit, and enable the interrupt.
289	 * The counters won't actually start until the rfi clears
290	 * the PMM bit */
291	pmc_start_ctrs(1);
292
293	oprofile_running = 1;
294
295	pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
296			mfpmr(PMRN_PMGC0));
297
298	return 0;
299}
300
301static void fsl_emb_stop(void)
302{
303	/* freeze counters */
304	pmc_stop_ctrs();
305
306	oprofile_running = 0;
307
308	pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
309			mfpmr(PMRN_PMGC0));
310
311	mb();
312}
313
314
315static void fsl_emb_handle_interrupt(struct pt_regs *regs,
316				    struct op_counter_config *ctr)
317{
318	unsigned long pc;
319	int is_kernel;
320	int val;
321	int i;
322
323	pc = regs->nip;
324	is_kernel = is_kernel_addr(pc);
325
326	for (i = 0; i < num_counters; ++i) {
327		val = ctr_read(i);
328		if (val < 0) {
329			if (oprofile_running && ctr[i].enabled) {
330				oprofile_add_ext_sample(pc, regs, i, is_kernel);
331				ctr_write(i, reset_value[i]);
332			} else {
333				ctr_write(i, 0);
334			}
335		}
336	}
337
338	/* The freeze bit was set by the interrupt. */
339	/* Clear the freeze bit, and reenable the interrupt.  The
340	 * counters won't actually start until the rfi clears the PMM
341	 * bit.  The PMM bit should not be set until after the interrupt
342	 * is cleared to avoid it getting lost in some hypervisor
343	 * environments.
344	 */
345	mtmsr(mfmsr() | MSR_PMM);
346	pmc_start_ctrs(1);
347}
348
349struct op_powerpc_model op_model_fsl_emb = {
350	.reg_setup		= fsl_emb_reg_setup,
351	.cpu_setup		= fsl_emb_cpu_setup,
352	.start			= fsl_emb_start,
353	.stop			= fsl_emb_stop,
354	.handle_interrupt	= fsl_emb_handle_interrupt,
355};