Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2004, 05, 06 by Ralf Baechle
  7 * Copyright (C) 2005 by MIPS Technologies, Inc.
  8 */
  9#include <linux/cpumask.h>
 10#include <linux/oprofile.h>
 11#include <linux/interrupt.h>
 12#include <linux/smp.h>
 13#include <asm/irq_regs.h>
 
 14
 15#include "op_impl.h"
 16
 17#define M_PERFCTL_EXL			(1UL      <<  0)
 18#define M_PERFCTL_KERNEL		(1UL      <<  1)
 19#define M_PERFCTL_SUPERVISOR		(1UL      <<  2)
 20#define M_PERFCTL_USER			(1UL      <<  3)
 21#define M_PERFCTL_INTERRUPT_ENABLE	(1UL      <<  4)
 22#define M_PERFCTL_EVENT(event)		(((event) & 0x3ff)  << 5)
 23#define M_PERFCTL_VPEID(vpe)		((vpe)    << 16)
 24#define M_PERFCTL_MT_EN(filter)		((filter) << 20)
 25#define    M_TC_EN_ALL			M_PERFCTL_MT_EN(0)
 26#define    M_TC_EN_VPE			M_PERFCTL_MT_EN(1)
 27#define    M_TC_EN_TC			M_PERFCTL_MT_EN(2)
 28#define M_PERFCTL_TCID(tcid)		((tcid)   << 22)
 29#define M_PERFCTL_WIDE			(1UL      << 30)
 30#define M_PERFCTL_MORE			(1UL      << 31)
 31
 32#define M_COUNTER_OVERFLOW		(1UL      << 31)
 33
 34static int (*save_perf_irq)(void);
 
 
 
 
 
 
 
 
 
 
 
 
 35
 36#ifdef CONFIG_MIPS_MT_SMP
 37static int cpu_has_mipsmt_pertccounters;
 38#define WHAT		(M_TC_EN_VPE | \
 39			 M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
 40#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
 41			0 : cpu_data[smp_processor_id()].vpe_id)
 42
 43/*
 44 * The number of bits to shift to convert between counters per core and
 45 * counters per VPE.  There is no reasonable interface atm to obtain the
 46 * number of VPEs used by Linux and in the 34K this number is fixed to two
 47 * anyways so we hardcore a few things here for the moment.  The way it's
 48 * done here will ensure that oprofile VSMP kernel will run right on a lesser
 49 * core like a 24K also or with maxcpus=1.
 50 */
 51static inline unsigned int vpe_shift(void)
 52{
 53	if (num_possible_cpus() > 1)
 54		return 1;
 55
 56	return 0;
 57}
 58
 59#else
 60
 61#define WHAT		0
 62#define vpe_id()	0
 63
 64static inline unsigned int vpe_shift(void)
 65{
 66	return 0;
 67}
 68
 69#endif
 70
 71static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
 72{
 73	return counters >> vpe_shift();
 74}
 75
 76static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
 77{
 78	return counters << vpe_shift();
 79}
 80
 81#define __define_perf_accessors(r, n, np)				\
 82									\
 83static inline unsigned int r_c0_ ## r ## n(void)			\
 84{									\
 85	unsigned int cpu = vpe_id();					\
 86									\
 87	switch (cpu) {							\
 88	case 0:								\
 89		return read_c0_ ## r ## n();				\
 90	case 1:								\
 91		return read_c0_ ## r ## np();				\
 92	default:							\
 93		BUG();							\
 94	}								\
 95	return 0;							\
 96}									\
 97									\
 98static inline void w_c0_ ## r ## n(unsigned int value)			\
 99{									\
100	unsigned int cpu = vpe_id();					\
101									\
102	switch (cpu) {							\
103	case 0:								\
104		write_c0_ ## r ## n(value);				\
105		return;							\
106	case 1:								\
107		write_c0_ ## r ## np(value);				\
108		return;							\
109	default:							\
110		BUG();							\
111	}								\
112	return;								\
113}									\
114
115__define_perf_accessors(perfcntr, 0, 2)
116__define_perf_accessors(perfcntr, 1, 3)
117__define_perf_accessors(perfcntr, 2, 0)
118__define_perf_accessors(perfcntr, 3, 1)
119
120__define_perf_accessors(perfctrl, 0, 2)
121__define_perf_accessors(perfctrl, 1, 3)
122__define_perf_accessors(perfctrl, 2, 0)
123__define_perf_accessors(perfctrl, 3, 1)
124
125struct op_mips_model op_model_mipsxx_ops;
126
127static struct mipsxx_register_config {
128	unsigned int control[4];
129	unsigned int counter[4];
130} reg;
131
132/* Compute all of the registers in preparation for enabling profiling.  */
133
134static void mipsxx_reg_setup(struct op_counter_config *ctr)
135{
136	unsigned int counters = op_model_mipsxx_ops.num_counters;
137	int i;
138
139	/* Compute the performance counter control word.  */
140	for (i = 0; i < counters; i++) {
141		reg.control[i] = 0;
142		reg.counter[i] = 0;
143
144		if (!ctr[i].enabled)
145			continue;
146
147		reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
148		                 M_PERFCTL_INTERRUPT_ENABLE;
149		if (ctr[i].kernel)
150			reg.control[i] |= M_PERFCTL_KERNEL;
151		if (ctr[i].user)
152			reg.control[i] |= M_PERFCTL_USER;
153		if (ctr[i].exl)
154			reg.control[i] |= M_PERFCTL_EXL;
 
 
155		reg.counter[i] = 0x80000000 - ctr[i].count;
156	}
157}
158
159/* Program all of the registers in preparation for enabling profiling.  */
160
161static void mipsxx_cpu_setup(void *args)
162{
163	unsigned int counters = op_model_mipsxx_ops.num_counters;
164
 
 
 
165	switch (counters) {
166	case 4:
167		w_c0_perfctrl3(0);
168		w_c0_perfcntr3(reg.counter[3]);
 
169	case 3:
170		w_c0_perfctrl2(0);
171		w_c0_perfcntr2(reg.counter[2]);
 
172	case 2:
173		w_c0_perfctrl1(0);
174		w_c0_perfcntr1(reg.counter[1]);
 
175	case 1:
176		w_c0_perfctrl0(0);
177		w_c0_perfcntr0(reg.counter[0]);
178	}
179}
180
181/* Start all counters on current CPU */
182static void mipsxx_cpu_start(void *args)
183{
184	unsigned int counters = op_model_mipsxx_ops.num_counters;
185
 
 
 
186	switch (counters) {
187	case 4:
188		w_c0_perfctrl3(WHAT | reg.control[3]);
 
189	case 3:
190		w_c0_perfctrl2(WHAT | reg.control[2]);
 
191	case 2:
192		w_c0_perfctrl1(WHAT | reg.control[1]);
 
193	case 1:
194		w_c0_perfctrl0(WHAT | reg.control[0]);
195	}
196}
197
198/* Stop all counters on current CPU */
199static void mipsxx_cpu_stop(void *args)
200{
201	unsigned int counters = op_model_mipsxx_ops.num_counters;
202
 
 
 
203	switch (counters) {
204	case 4:
205		w_c0_perfctrl3(0);
 
206	case 3:
207		w_c0_perfctrl2(0);
 
208	case 2:
209		w_c0_perfctrl1(0);
 
210	case 1:
211		w_c0_perfctrl0(0);
212	}
213}
214
215static int mipsxx_perfcount_handler(void)
216{
217	unsigned int counters = op_model_mipsxx_ops.num_counters;
218	unsigned int control;
219	unsigned int counter;
220	int handled = IRQ_NONE;
221
222	if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
223		return handled;
224
225	switch (counters) {
226#define HANDLE_COUNTER(n)						\
 
227	case n + 1:							\
228		control = r_c0_perfctrl ## n();				\
229		counter = r_c0_perfcntr ## n();				\
230		if ((control & M_PERFCTL_INTERRUPT_ENABLE) &&		\
231		    (counter & M_COUNTER_OVERFLOW)) {			\
232			oprofile_add_sample(get_irq_regs(), n);		\
233			w_c0_perfcntr ## n(reg.counter[n]);		\
234			handled = IRQ_HANDLED;				\
235		}
236	HANDLE_COUNTER(3)
237	HANDLE_COUNTER(2)
238	HANDLE_COUNTER(1)
239	HANDLE_COUNTER(0)
240	}
241
242	return handled;
243}
244
245#define M_CONFIG1_PC	(1 << 4)
246
247static inline int __n_counters(void)
248{
249	if (!(read_c0_config1() & M_CONFIG1_PC))
250		return 0;
251	if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
252		return 1;
253	if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
254		return 2;
255	if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
256		return 3;
257
258	return 4;
259}
260
261static inline int n_counters(void)
262{
263	int counters;
264
265	switch (current_cpu_type()) {
266	case CPU_R10000:
267		counters = 2;
268		break;
269
270	case CPU_R12000:
271	case CPU_R14000:
 
272		counters = 4;
273		break;
274
275	default:
276		counters = __n_counters();
277	}
278
279	return counters;
280}
281
282static void reset_counters(void *arg)
283{
284	int counters = (int)(long)arg;
285	switch (counters) {
286	case 4:
287		w_c0_perfctrl3(0);
288		w_c0_perfcntr3(0);
 
289	case 3:
290		w_c0_perfctrl2(0);
291		w_c0_perfcntr2(0);
 
292	case 2:
293		w_c0_perfctrl1(0);
294		w_c0_perfcntr1(0);
 
295	case 1:
296		w_c0_perfctrl0(0);
297		w_c0_perfcntr0(0);
298	}
299}
300
 
 
 
 
 
301static int __init mipsxx_init(void)
302{
303	int counters;
304
305	counters = n_counters();
306	if (counters == 0) {
307		printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
308		return -ENODEV;
309	}
310
311#ifdef CONFIG_MIPS_MT_SMP
312	cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
313	if (!cpu_has_mipsmt_pertccounters)
314		counters = counters_total_to_per_cpu(counters);
315#endif
316	on_each_cpu(reset_counters, (void *)(long)counters, 1);
317
318	op_model_mipsxx_ops.num_counters = counters;
319	switch (current_cpu_type()) {
 
 
 
 
 
 
 
 
320	case CPU_20KC:
321		op_model_mipsxx_ops.cpu_type = "mips/20K";
322		break;
323
324	case CPU_24K:
325		op_model_mipsxx_ops.cpu_type = "mips/24K";
326		break;
327
328	case CPU_25KF:
329		op_model_mipsxx_ops.cpu_type = "mips/25K";
330		break;
331
332	case CPU_1004K:
333#if 0
334		/* FIXME: report as 34K for now */
335		op_model_mipsxx_ops.cpu_type = "mips/1004K";
336		break;
337#endif
338
339	case CPU_34K:
340		op_model_mipsxx_ops.cpu_type = "mips/34K";
341		break;
342
 
343	case CPU_74K:
344		op_model_mipsxx_ops.cpu_type = "mips/74K";
345		break;
346
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347	case CPU_5KC:
348		op_model_mipsxx_ops.cpu_type = "mips/5K";
349		break;
350
351	case CPU_R10000:
352		if ((current_cpu_data.processor_id & 0xff) == 0x20)
353			op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
354		else
355			op_model_mipsxx_ops.cpu_type = "mips/r10000";
356		break;
357
358	case CPU_R12000:
359	case CPU_R14000:
360		op_model_mipsxx_ops.cpu_type = "mips/r12000";
361		break;
362
 
 
 
 
363	case CPU_SB1:
364	case CPU_SB1A:
365		op_model_mipsxx_ops.cpu_type = "mips/sb1";
366		break;
367
 
 
 
 
 
 
 
 
368	default:
369		printk(KERN_ERR "Profiling unsupported for this CPU\n");
370
371		return -ENODEV;
372	}
373
374	save_perf_irq = perf_irq;
375	perf_irq = mipsxx_perfcount_handler;
376
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377	return 0;
378}
379
380static void mipsxx_exit(void)
381{
382	int counters = op_model_mipsxx_ops.num_counters;
 
 
 
383
384	counters = counters_per_cpu_to_total(counters);
385	on_each_cpu(reset_counters, (void *)(long)counters, 1);
386
387	perf_irq = save_perf_irq;
388}
389
390struct op_mips_model op_model_mipsxx_ops = {
391	.reg_setup	= mipsxx_reg_setup,
392	.cpu_setup	= mipsxx_cpu_setup,
393	.init		= mipsxx_init,
394	.exit		= mipsxx_exit,
395	.cpu_start	= mipsxx_cpu_start,
396	.cpu_stop	= mipsxx_cpu_stop,
397};
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2004, 05, 06 by Ralf Baechle
  7 * Copyright (C) 2005 by MIPS Technologies, Inc.
  8 */
  9#include <linux/cpumask.h>
 10#include <linux/oprofile.h>
 11#include <linux/interrupt.h>
 12#include <linux/smp.h>
 13#include <asm/irq_regs.h>
 14#include <asm/time.h>
 15
 16#include "op_impl.h"
 17
 18#define M_PERFCTL_EVENT(event)		(((event) << MIPS_PERFCTRL_EVENT_S) & \
 19					 MIPS_PERFCTRL_EVENT)
 20#define M_PERFCTL_VPEID(vpe)		((vpe)	  << MIPS_PERFCTRL_VPEID_S)
 
 
 
 
 
 
 
 
 
 
 
 21
 22#define M_COUNTER_OVERFLOW		(1UL	  << 31)
 23
 24static int (*save_perf_irq)(void);
 25static int perfcount_irq;
 26
 27/*
 28 * XLR has only one set of counters per core. Designate the
 29 * first hardware thread in the core for setup and init.
 30 * Skip CPUs with non-zero hardware thread id (4 hwt per core)
 31 */
 32#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
 33#define oprofile_skip_cpu(c)	((cpu_logical_map(c) & 0x3) != 0)
 34#else
 35#define oprofile_skip_cpu(c)	0
 36#endif
 37
 38#ifdef CONFIG_MIPS_MT_SMP
 39#define WHAT		(MIPS_PERFCTRL_MT_EN_VPE | \
 40			 M_PERFCTL_VPEID(cpu_vpe_id(&current_cpu_data)))
 
 41#define vpe_id()	(cpu_has_mipsmt_pertccounters ? \
 42			0 : cpu_vpe_id(&current_cpu_data))
 43
 44/*
 45 * The number of bits to shift to convert between counters per core and
 46 * counters per VPE.  There is no reasonable interface atm to obtain the
 47 * number of VPEs used by Linux and in the 34K this number is fixed to two
 48 * anyways so we hardcore a few things here for the moment.  The way it's
 49 * done here will ensure that oprofile VSMP kernel will run right on a lesser
 50 * core like a 24K also or with maxcpus=1.
 51 */
 52static inline unsigned int vpe_shift(void)
 53{
 54	if (num_possible_cpus() > 1)
 55		return 1;
 56
 57	return 0;
 58}
 59
 60#else
 61
 62#define WHAT		0
 63#define vpe_id()	0
 64
 65static inline unsigned int vpe_shift(void)
 66{
 67	return 0;
 68}
 69
 70#endif
 71
 72static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
 73{
 74	return counters >> vpe_shift();
 75}
 76
 77static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
 78{
 79	return counters << vpe_shift();
 80}
 81
 82#define __define_perf_accessors(r, n, np)				\
 83									\
 84static inline unsigned int r_c0_ ## r ## n(void)			\
 85{									\
 86	unsigned int cpu = vpe_id();					\
 87									\
 88	switch (cpu) {							\
 89	case 0:								\
 90		return read_c0_ ## r ## n();				\
 91	case 1:								\
 92		return read_c0_ ## r ## np();				\
 93	default:							\
 94		BUG();							\
 95	}								\
 96	return 0;							\
 97}									\
 98									\
 99static inline void w_c0_ ## r ## n(unsigned int value)			\
100{									\
101	unsigned int cpu = vpe_id();					\
102									\
103	switch (cpu) {							\
104	case 0:								\
105		write_c0_ ## r ## n(value);				\
106		return;							\
107	case 1:								\
108		write_c0_ ## r ## np(value);				\
109		return;							\
110	default:							\
111		BUG();							\
112	}								\
113	return;								\
114}									\
115
116__define_perf_accessors(perfcntr, 0, 2)
117__define_perf_accessors(perfcntr, 1, 3)
118__define_perf_accessors(perfcntr, 2, 0)
119__define_perf_accessors(perfcntr, 3, 1)
120
121__define_perf_accessors(perfctrl, 0, 2)
122__define_perf_accessors(perfctrl, 1, 3)
123__define_perf_accessors(perfctrl, 2, 0)
124__define_perf_accessors(perfctrl, 3, 1)
125
126struct op_mips_model op_model_mipsxx_ops;
127
128static struct mipsxx_register_config {
129	unsigned int control[4];
130	unsigned int counter[4];
131} reg;
132
133/* Compute all of the registers in preparation for enabling profiling.	*/
134
135static void mipsxx_reg_setup(struct op_counter_config *ctr)
136{
137	unsigned int counters = op_model_mipsxx_ops.num_counters;
138	int i;
139
140	/* Compute the performance counter control word.  */
141	for (i = 0; i < counters; i++) {
142		reg.control[i] = 0;
143		reg.counter[i] = 0;
144
145		if (!ctr[i].enabled)
146			continue;
147
148		reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
149				 MIPS_PERFCTRL_IE;
150		if (ctr[i].kernel)
151			reg.control[i] |= MIPS_PERFCTRL_K;
152		if (ctr[i].user)
153			reg.control[i] |= MIPS_PERFCTRL_U;
154		if (ctr[i].exl)
155			reg.control[i] |= MIPS_PERFCTRL_EXL;
156		if (boot_cpu_type() == CPU_XLR)
157			reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
158		reg.counter[i] = 0x80000000 - ctr[i].count;
159	}
160}
161
162/* Program all of the registers in preparation for enabling profiling.	*/
163
164static void mipsxx_cpu_setup(void *args)
165{
166	unsigned int counters = op_model_mipsxx_ops.num_counters;
167
168	if (oprofile_skip_cpu(smp_processor_id()))
169		return;
170
171	switch (counters) {
172	case 4:
173		w_c0_perfctrl3(0);
174		w_c0_perfcntr3(reg.counter[3]);
175		/* fall through */
176	case 3:
177		w_c0_perfctrl2(0);
178		w_c0_perfcntr2(reg.counter[2]);
179		/* fall through */
180	case 2:
181		w_c0_perfctrl1(0);
182		w_c0_perfcntr1(reg.counter[1]);
183		/* fall through */
184	case 1:
185		w_c0_perfctrl0(0);
186		w_c0_perfcntr0(reg.counter[0]);
187	}
188}
189
190/* Start all counters on current CPU */
191static void mipsxx_cpu_start(void *args)
192{
193	unsigned int counters = op_model_mipsxx_ops.num_counters;
194
195	if (oprofile_skip_cpu(smp_processor_id()))
196		return;
197
198	switch (counters) {
199	case 4:
200		w_c0_perfctrl3(WHAT | reg.control[3]);
201		/* fall through */
202	case 3:
203		w_c0_perfctrl2(WHAT | reg.control[2]);
204		/* fall through */
205	case 2:
206		w_c0_perfctrl1(WHAT | reg.control[1]);
207		/* fall through */
208	case 1:
209		w_c0_perfctrl0(WHAT | reg.control[0]);
210	}
211}
212
213/* Stop all counters on current CPU */
214static void mipsxx_cpu_stop(void *args)
215{
216	unsigned int counters = op_model_mipsxx_ops.num_counters;
217
218	if (oprofile_skip_cpu(smp_processor_id()))
219		return;
220
221	switch (counters) {
222	case 4:
223		w_c0_perfctrl3(0);
224		/* fall through */
225	case 3:
226		w_c0_perfctrl2(0);
227		/* fall through */
228	case 2:
229		w_c0_perfctrl1(0);
230		/* fall through */
231	case 1:
232		w_c0_perfctrl0(0);
233	}
234}
235
236static int mipsxx_perfcount_handler(void)
237{
238	unsigned int counters = op_model_mipsxx_ops.num_counters;
239	unsigned int control;
240	unsigned int counter;
241	int handled = IRQ_NONE;
242
243	if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
244		return handled;
245
246	switch (counters) {
247#define HANDLE_COUNTER(n)						\
248	/* fall through */						\
249	case n + 1:							\
250		control = r_c0_perfctrl ## n();				\
251		counter = r_c0_perfcntr ## n();				\
252		if ((control & MIPS_PERFCTRL_IE) &&			\
253		    (counter & M_COUNTER_OVERFLOW)) {			\
254			oprofile_add_sample(get_irq_regs(), n);		\
255			w_c0_perfcntr ## n(reg.counter[n]);		\
256			handled = IRQ_HANDLED;				\
257		}
258	HANDLE_COUNTER(3)
259	HANDLE_COUNTER(2)
260	HANDLE_COUNTER(1)
261	HANDLE_COUNTER(0)
262	}
263
264	return handled;
265}
266
 
 
267static inline int __n_counters(void)
268{
269	if (!cpu_has_perf)
270		return 0;
271	if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
272		return 1;
273	if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
274		return 2;
275	if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
276		return 3;
277
278	return 4;
279}
280
281static inline int n_counters(void)
282{
283	int counters;
284
285	switch (current_cpu_type()) {
286	case CPU_R10000:
287		counters = 2;
288		break;
289
290	case CPU_R12000:
291	case CPU_R14000:
292	case CPU_R16000:
293		counters = 4;
294		break;
295
296	default:
297		counters = __n_counters();
298	}
299
300	return counters;
301}
302
303static void reset_counters(void *arg)
304{
305	int counters = (int)(long)arg;
306	switch (counters) {
307	case 4:
308		w_c0_perfctrl3(0);
309		w_c0_perfcntr3(0);
310		/* fall through */
311	case 3:
312		w_c0_perfctrl2(0);
313		w_c0_perfcntr2(0);
314		/* fall through */
315	case 2:
316		w_c0_perfctrl1(0);
317		w_c0_perfcntr1(0);
318		/* fall through */
319	case 1:
320		w_c0_perfctrl0(0);
321		w_c0_perfcntr0(0);
322	}
323}
324
325static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
326{
327	return mipsxx_perfcount_handler();
328}
329
330static int __init mipsxx_init(void)
331{
332	int counters;
333
334	counters = n_counters();
335	if (counters == 0) {
336		printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
337		return -ENODEV;
338	}
339
340#ifdef CONFIG_MIPS_MT_SMP
 
341	if (!cpu_has_mipsmt_pertccounters)
342		counters = counters_total_to_per_cpu(counters);
343#endif
344	on_each_cpu(reset_counters, (void *)(long)counters, 1);
345
346	op_model_mipsxx_ops.num_counters = counters;
347	switch (current_cpu_type()) {
348	case CPU_M14KC:
349		op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
350		break;
351
352	case CPU_M14KEC:
353		op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
354		break;
355
356	case CPU_20KC:
357		op_model_mipsxx_ops.cpu_type = "mips/20K";
358		break;
359
360	case CPU_24K:
361		op_model_mipsxx_ops.cpu_type = "mips/24K";
362		break;
363
364	case CPU_25KF:
365		op_model_mipsxx_ops.cpu_type = "mips/25K";
366		break;
367
368	case CPU_1004K:
 
 
 
 
 
 
369	case CPU_34K:
370		op_model_mipsxx_ops.cpu_type = "mips/34K";
371		break;
372
373	case CPU_1074K:
374	case CPU_74K:
375		op_model_mipsxx_ops.cpu_type = "mips/74K";
376		break;
377
378	case CPU_INTERAPTIV:
379		op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
380		break;
381
382	case CPU_PROAPTIV:
383		op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
384		break;
385
386	case CPU_P5600:
387		op_model_mipsxx_ops.cpu_type = "mips/P5600";
388		break;
389
390	case CPU_I6400:
391		op_model_mipsxx_ops.cpu_type = "mips/I6400";
392		break;
393
394	case CPU_M5150:
395		op_model_mipsxx_ops.cpu_type = "mips/M5150";
396		break;
397
398	case CPU_5KC:
399		op_model_mipsxx_ops.cpu_type = "mips/5K";
400		break;
401
402	case CPU_R10000:
403		if ((current_cpu_data.processor_id & 0xff) == 0x20)
404			op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
405		else
406			op_model_mipsxx_ops.cpu_type = "mips/r10000";
407		break;
408
409	case CPU_R12000:
410	case CPU_R14000:
411		op_model_mipsxx_ops.cpu_type = "mips/r12000";
412		break;
413
414	case CPU_R16000:
415		op_model_mipsxx_ops.cpu_type = "mips/r16000";
416		break;
417
418	case CPU_SB1:
419	case CPU_SB1A:
420		op_model_mipsxx_ops.cpu_type = "mips/sb1";
421		break;
422
423	case CPU_LOONGSON1:
424		op_model_mipsxx_ops.cpu_type = "mips/loongson1";
425		break;
426
427	case CPU_XLR:
428		op_model_mipsxx_ops.cpu_type = "mips/xlr";
429		break;
430
431	default:
432		printk(KERN_ERR "Profiling unsupported for this CPU\n");
433
434		return -ENODEV;
435	}
436
437	save_perf_irq = perf_irq;
438	perf_irq = mipsxx_perfcount_handler;
439
440	if (get_c0_perfcount_int)
441		perfcount_irq = get_c0_perfcount_int();
442	else if (cp0_perfcount_irq >= 0)
443		perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
444	else
445		perfcount_irq = -1;
446
447	if (perfcount_irq >= 0)
448		return request_irq(perfcount_irq, mipsxx_perfcount_int,
449				   IRQF_PERCPU | IRQF_NOBALANCING |
450				   IRQF_NO_THREAD | IRQF_NO_SUSPEND |
451				   IRQF_SHARED,
452				   "Perfcounter", save_perf_irq);
453
454	return 0;
455}
456
457static void mipsxx_exit(void)
458{
459	int counters = op_model_mipsxx_ops.num_counters;
460
461	if (perfcount_irq >= 0)
462		free_irq(perfcount_irq, save_perf_irq);
463
464	counters = counters_per_cpu_to_total(counters);
465	on_each_cpu(reset_counters, (void *)(long)counters, 1);
466
467	perf_irq = save_perf_irq;
468}
469
470struct op_mips_model op_model_mipsxx_ops = {
471	.reg_setup	= mipsxx_reg_setup,
472	.cpu_setup	= mipsxx_cpu_setup,
473	.init		= mipsxx_init,
474	.exit		= mipsxx_exit,
475	.cpu_start	= mipsxx_cpu_start,
476	.cpu_stop	= mipsxx_cpu_stop,
477};