Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * Cell Broadband Engine OProfile Support
   3 *
   4 * (C) Copyright IBM Corporation 2006
   5 *
   6 * Author: David Erb (djerb@us.ibm.com)
   7 * Modifications:
   8 *	   Carl Love <carll@us.ibm.com>
   9 *	   Maynard Johnson <maynardj@us.ibm.com>
  10 *
  11 * This program is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU General Public License
  13 * as published by the Free Software Foundation; either version
  14 * 2 of the License, or (at your option) any later version.
  15 */
  16
  17#include <linux/cpufreq.h>
  18#include <linux/delay.h>
  19#include <linux/jiffies.h>
  20#include <linux/kthread.h>
  21#include <linux/oprofile.h>
  22#include <linux/percpu.h>
  23#include <linux/smp.h>
  24#include <linux/spinlock.h>
  25#include <linux/timer.h>
  26#include <asm/cell-pmu.h>
  27#include <asm/cputable.h>
  28#include <asm/firmware.h>
  29#include <asm/io.h>
  30#include <asm/oprofile_impl.h>
  31#include <asm/processor.h>
  32#include <asm/prom.h>
  33#include <asm/ptrace.h>
  34#include <asm/reg.h>
  35#include <asm/rtas.h>
  36#include <asm/cell-regs.h>
  37
  38#include "../platforms/cell/interrupt.h"
  39#include "cell/pr_util.h"
  40
  41#define PPU_PROFILING            0
  42#define SPU_PROFILING_CYCLES     1
  43#define SPU_PROFILING_EVENTS     2
  44
  45#define SPU_EVENT_NUM_START      4100
  46#define SPU_EVENT_NUM_STOP       4399
  47#define SPU_PROFILE_EVENT_ADDR          4363  /* spu, address trace, decimal */
  48#define SPU_PROFILE_EVENT_ADDR_MASK_A   0x146 /* sub unit set to zero */
  49#define SPU_PROFILE_EVENT_ADDR_MASK_B   0x186 /* sub unit set to zero */
  50
  51#define NUM_SPUS_PER_NODE    8
  52#define SPU_CYCLES_EVENT_NUM 2	/*  event number for SPU_CYCLES */
  53
  54#define PPU_CYCLES_EVENT_NUM 1	/*  event number for CYCLES */
  55#define PPU_CYCLES_GRP_NUM   1	/* special group number for identifying
  56				 * PPU_CYCLES event
  57				 */
  58#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
  59
  60#define NUM_THREADS 2         /* number of physical threads in
  61			       * physical processor
  62			       */
  63#define NUM_DEBUG_BUS_WORDS 4
  64#define NUM_INPUT_BUS_WORDS 2
  65
  66#define MAX_SPU_COUNT 0xFFFFFF	/* maximum 24 bit LFSR value */
  67
  68/* Minimum HW interval timer setting to send value to trace buffer is 10 cycle.
  69 * To configure counter to send value every N cycles set counter to
  70 * 2^32 - 1 - N.
  71 */
  72#define NUM_INTERVAL_CYC  0xFFFFFFFF - 10
  73
  74/*
  75 * spu_cycle_reset is the number of cycles between samples.
  76 * This variable is used for SPU profiling and should ONLY be set
  77 * at the beginning of cell_reg_setup; otherwise, it's read-only.
  78 */
  79static unsigned int spu_cycle_reset;
  80static unsigned int profiling_mode;
  81static int spu_evnt_phys_spu_indx;
  82
  83struct pmc_cntrl_data {
  84	unsigned long vcntr;
  85	unsigned long evnts;
  86	unsigned long masks;
  87	unsigned long enabled;
  88};
  89
  90/*
  91 * ibm,cbe-perftools rtas parameters
  92 */
  93struct pm_signal {
  94	u16 cpu;		/* Processor to modify */
  95	u16 sub_unit;		/* hw subunit this applies to (if applicable)*/
  96	short int signal_group; /* Signal Group to Enable/Disable */
  97	u8 bus_word;		/* Enable/Disable on this Trace/Trigger/Event
  98				 * Bus Word(s) (bitmask)
  99				 */
 100	u8 bit;			/* Trigger/Event bit (if applicable) */
 101};
 102
 103/*
 104 * rtas call arguments
 105 */
 106enum {
 107	SUBFUNC_RESET = 1,
 108	SUBFUNC_ACTIVATE = 2,
 109	SUBFUNC_DEACTIVATE = 3,
 110
 111	PASSTHRU_IGNORE = 0,
 112	PASSTHRU_ENABLE = 1,
 113	PASSTHRU_DISABLE = 2,
 114};
 115
 116struct pm_cntrl {
 117	u16 enable;
 118	u16 stop_at_max;
 119	u16 trace_mode;
 120	u16 freeze;
 121	u16 count_mode;
 122	u16 spu_addr_trace;
 123	u8  trace_buf_ovflw;
 124};
 125
 126static struct {
 127	u32 group_control;
 128	u32 debug_bus_control;
 129	struct pm_cntrl pm_cntrl;
 130	u32 pm07_cntrl[NR_PHYS_CTRS];
 131} pm_regs;
 132
 133#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
 134#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
 135#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
 136#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
 137#define GET_COUNT_CYCLES(x) (x & 0x00000001)
 138#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
 139
 140static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
 141static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
 142static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
 143
 144/*
 145 * The CELL profiling code makes rtas calls to setup the debug bus to
 146 * route the performance signals.  Additionally, SPU profiling requires
 147 * a second rtas call to setup the hardware to capture the SPU PCs.
 148 * The EIO error value is returned if the token lookups or the rtas
 149 * call fail.  The EIO error number is the best choice of the existing
 150 * error numbers.  The probability of rtas related error is very low.  But
 151 * by returning EIO and printing additional information to dmsg the user
 152 * will know that OProfile did not start and dmesg will tell them why.
 153 * OProfile does not support returning errors on Stop.	Not a huge issue
 154 * since failure to reset the debug bus or stop the SPU PC collection is
 155 * not a fatel issue.  Chances are if the Stop failed, Start doesn't work
 156 * either.
 157 */
 158
 159/*
 160 * Interpetation of hdw_thread:
 161 * 0 - even virtual cpus 0, 2, 4,...
 162 * 1 - odd virtual cpus 1, 3, 5, ...
 163 *
 164 * FIXME: this is strictly wrong, we need to clean this up in a number
 165 * of places. It works for now. -arnd
 166 */
 167static u32 hdw_thread;
 168
 169static u32 virt_cntr_inter_mask;
 170static struct timer_list timer_virt_cntr;
 171static struct timer_list timer_spu_event_swap;
 172
 173/*
 174 * pm_signal needs to be global since it is initialized in
 175 * cell_reg_setup at the time when the necessary information
 176 * is available.
 177 */
 178static struct pm_signal pm_signal[NR_PHYS_CTRS];
 179static int pm_rtas_token;    /* token for debug bus setup call */
 180static int spu_rtas_token;   /* token for SPU cycle profiling */
 181
 182static u32 reset_value[NR_PHYS_CTRS];
 183static int num_counters;
 184static int oprofile_running;
 185static DEFINE_SPINLOCK(cntr_lock);
 186
 187static u32 ctr_enabled;
 188
 189static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
 190
 191/*
 192 * Firmware interface functions
 193 */
 194static int
 195rtas_ibm_cbe_perftools(int subfunc, int passthru,
 196		       void *address, unsigned long length)
 197{
 198	u64 paddr = __pa(address);
 199
 200	return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
 201			 passthru, paddr >> 32, paddr & 0xffffffff, length);
 202}
 203
 204static void pm_rtas_reset_signals(u32 node)
 205{
 206	int ret;
 207	struct pm_signal pm_signal_local;
 208
 209	/*
 210	 * The debug bus is being set to the passthru disable state.
 211	 * However, the FW still expects at least one legal signal routing
 212	 * entry or it will return an error on the arguments.	If we don't
 213	 * supply a valid entry, we must ignore all return values.  Ignoring
 214	 * all return values means we might miss an error we should be
 215	 * concerned about.
 216	 */
 217
 218	/*  fw expects physical cpu #. */
 219	pm_signal_local.cpu = node;
 220	pm_signal_local.signal_group = 21;
 221	pm_signal_local.bus_word = 1;
 222	pm_signal_local.sub_unit = 0;
 223	pm_signal_local.bit = 0;
 224
 225	ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
 226				     &pm_signal_local,
 227				     sizeof(struct pm_signal));
 228
 229	if (unlikely(ret))
 230		/*
 231		 * Not a fatal error. For Oprofile stop, the oprofile
 232		 * functions do not support returning an error for
 233		 * failure to stop OProfile.
 234		 */
 235		printk(KERN_WARNING "%s: rtas returned: %d\n",
 236		       __func__, ret);
 237}
 238
 239static int pm_rtas_activate_signals(u32 node, u32 count)
 240{
 241	int ret;
 242	int i, j;
 243	struct pm_signal pm_signal_local[NR_PHYS_CTRS];
 244
 245	/*
 246	 * There is no debug setup required for the cycles event.
 247	 * Note that only events in the same group can be used.
 248	 * Otherwise, there will be conflicts in correctly routing
 249	 * the signals on the debug bus.  It is the responsibility
 250	 * of the OProfile user tool to check the events are in
 251	 * the same group.
 252	 */
 253	i = 0;
 254	for (j = 0; j < count; j++) {
 255		if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
 256
 257			/* fw expects physical cpu # */
 258			pm_signal_local[i].cpu = node;
 259			pm_signal_local[i].signal_group
 260				= pm_signal[j].signal_group;
 261			pm_signal_local[i].bus_word = pm_signal[j].bus_word;
 262			pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
 263			pm_signal_local[i].bit = pm_signal[j].bit;
 264			i++;
 265		}
 266	}
 267
 268	if (i != 0) {
 269		ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
 270					     pm_signal_local,
 271					     i * sizeof(struct pm_signal));
 272
 273		if (unlikely(ret)) {
 274			printk(KERN_WARNING "%s: rtas returned: %d\n",
 275			       __func__, ret);
 276			return -EIO;
 277		}
 278	}
 279
 280	return 0;
 281}
 282
 283/*
 284 * PM Signal functions
 285 */
 286static void set_pm_event(u32 ctr, int event, u32 unit_mask)
 287{
 288	struct pm_signal *p;
 289	u32 signal_bit;
 290	u32 bus_word, bus_type, count_cycles, polarity, input_control;
 291	int j, i;
 292
 293	if (event == PPU_CYCLES_EVENT_NUM) {
 294		/* Special Event: Count all cpu cycles */
 295		pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
 296		p = &(pm_signal[ctr]);
 297		p->signal_group = PPU_CYCLES_GRP_NUM;
 298		p->bus_word = 1;
 299		p->sub_unit = 0;
 300		p->bit = 0;
 301		goto out;
 302	} else {
 303		pm_regs.pm07_cntrl[ctr] = 0;
 304	}
 305
 306	bus_word = GET_BUS_WORD(unit_mask);
 307	bus_type = GET_BUS_TYPE(unit_mask);
 308	count_cycles = GET_COUNT_CYCLES(unit_mask);
 309	polarity = GET_POLARITY(unit_mask);
 310	input_control = GET_INPUT_CONTROL(unit_mask);
 311	signal_bit = (event % 100);
 312
 313	p = &(pm_signal[ctr]);
 314
 315	p->signal_group = event / 100;
 316	p->bus_word = bus_word;
 317	p->sub_unit = GET_SUB_UNIT(unit_mask);
 318
 319	pm_regs.pm07_cntrl[ctr] = 0;
 320	pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
 321	pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
 322	pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
 323
 324	/*
 325	 * Some of the islands signal selection is based on 64 bit words.
 326	 * The debug bus words are 32 bits, the input words to the performance
 327	 * counters are defined as 32 bits.  Need to convert the 64 bit island
 328	 * specification to the appropriate 32 input bit and bus word for the
 329	 * performance counter event selection.	 See the CELL Performance
 330	 * monitoring signals manual and the Perf cntr hardware descriptions
 331	 * for the details.
 332	 */
 333	if (input_control == 0) {
 334		if (signal_bit > 31) {
 335			signal_bit -= 32;
 336			if (bus_word == 0x3)
 337				bus_word = 0x2;
 338			else if (bus_word == 0xc)
 339				bus_word = 0x8;
 340		}
 341
 342		if ((bus_type == 0) && p->signal_group >= 60)
 343			bus_type = 2;
 344		if ((bus_type == 1) && p->signal_group >= 50)
 345			bus_type = 0;
 346
 347		pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
 348	} else {
 349		pm_regs.pm07_cntrl[ctr] = 0;
 350		p->bit = signal_bit;
 351	}
 352
 353	for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
 354		if (bus_word & (1 << i)) {
 355			pm_regs.debug_bus_control |=
 356				(bus_type << (30 - (2 * i)));
 357
 358			for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
 359				if (input_bus[j] == 0xff) {
 360					input_bus[j] = i;
 361					pm_regs.group_control |=
 362						(i << (30 - (2 * j)));
 363
 364					break;
 365				}
 366			}
 367		}
 368	}
 369out:
 370	;
 371}
 372
 373static void write_pm_cntrl(int cpu)
 374{
 375	/*
 376	 * Oprofile will use 32 bit counters, set bits 7:10 to 0
 377	 * pmregs.pm_cntrl is a global
 378	 */
 379
 380	u32 val = 0;
 381	if (pm_regs.pm_cntrl.enable == 1)
 382		val |= CBE_PM_ENABLE_PERF_MON;
 383
 384	if (pm_regs.pm_cntrl.stop_at_max == 1)
 385		val |= CBE_PM_STOP_AT_MAX;
 386
 387	if (pm_regs.pm_cntrl.trace_mode != 0)
 388		val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
 389
 390	if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
 391		val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
 392	if (pm_regs.pm_cntrl.freeze == 1)
 393		val |= CBE_PM_FREEZE_ALL_CTRS;
 394
 395	val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
 396
 397	/*
 398	 * Routine set_count_mode must be called previously to set
 399	 * the count mode based on the user selection of user and kernel.
 400	 */
 401	val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
 402	cbe_write_pm(cpu, pm_control, val);
 403}
 404
 405static inline void
 406set_count_mode(u32 kernel, u32 user)
 407{
 408	/*
 409	 * The user must specify user and kernel if they want them. If
 410	 *  neither is specified, OProfile will count in hypervisor mode.
 411	 *  pm_regs.pm_cntrl is a global
 412	 */
 413	if (kernel) {
 414		if (user)
 415			pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
 416		else
 417			pm_regs.pm_cntrl.count_mode =
 418				CBE_COUNT_SUPERVISOR_MODE;
 419	} else {
 420		if (user)
 421			pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
 422		else
 423			pm_regs.pm_cntrl.count_mode =
 424				CBE_COUNT_HYPERVISOR_MODE;
 425	}
 426}
 427
 428static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
 429{
 430
 431	pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
 432	cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
 433}
 434
 435/*
 436 * Oprofile is expected to collect data on all CPUs simultaneously.
 437 * However, there is one set of performance counters per node.	There are
 438 * two hardware threads or virtual CPUs on each node.  Hence, OProfile must
 439 * multiplex in time the performance counter collection on the two virtual
 440 * CPUs.  The multiplexing of the performance counters is done by this
 441 * virtual counter routine.
 442 *
 443 * The pmc_values used below is defined as 'per-cpu' but its use is
 444 * more akin to 'per-node'.  We need to store two sets of counter
 445 * values per node -- one for the previous run and one for the next.
 446 * The per-cpu[NR_PHYS_CTRS] gives us the storage we need.  Each odd/even
 447 * pair of per-cpu arrays is used for storing the previous and next
 448 * pmc values for a given node.
 449 * NOTE: We use the per-cpu variable to improve cache performance.
 450 *
 451 * This routine will alternate loading the virtual counters for
 452 * virtual CPUs
 453 */
 454static void cell_virtual_cntr(struct timer_list *unused)
 455{
 456	int i, prev_hdw_thread, next_hdw_thread;
 457	u32 cpu;
 458	unsigned long flags;
 459
 460	/*
 461	 * Make sure that the interrupt_hander and the virt counter are
 462	 * not both playing with the counters on the same node.
 463	 */
 464
 465	spin_lock_irqsave(&cntr_lock, flags);
 466
 467	prev_hdw_thread = hdw_thread;
 468
 469	/* switch the cpu handling the interrupts */
 470	hdw_thread = 1 ^ hdw_thread;
 471	next_hdw_thread = hdw_thread;
 472
 473	pm_regs.group_control = 0;
 474	pm_regs.debug_bus_control = 0;
 475
 476	for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
 477		input_bus[i] = 0xff;
 478
 479	/*
 480	 * There are some per thread events.  Must do the
 481	 * set event, for the thread that is being started
 482	 */
 483	for (i = 0; i < num_counters; i++)
 484		set_pm_event(i,
 485			pmc_cntrl[next_hdw_thread][i].evnts,
 486			pmc_cntrl[next_hdw_thread][i].masks);
 487
 488	/*
 489	 * The following is done only once per each node, but
 490	 * we need cpu #, not node #, to pass to the cbe_xxx functions.
 491	 */
 492	for_each_online_cpu(cpu) {
 493		if (cbe_get_hw_thread_id(cpu))
 494			continue;
 495
 496		/*
 497		 * stop counters, save counter values, restore counts
 498		 * for previous thread
 499		 */
 500		cbe_disable_pm(cpu);
 501		cbe_disable_pm_interrupts(cpu);
 502		for (i = 0; i < num_counters; i++) {
 503			per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
 504				= cbe_read_ctr(cpu, i);
 505
 506			if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
 507			    == 0xFFFFFFFF)
 508				/* If the cntr value is 0xffffffff, we must
 509				 * reset that to 0xfffffff0 when the current
 510				 * thread is restarted.	 This will generate a
 511				 * new interrupt and make sure that we never
 512				 * restore the counters to the max value.  If
 513				 * the counters were restored to the max value,
 514				 * they do not increment and no interrupts are
 515				 * generated.  Hence no more samples will be
 516				 * collected on that cpu.
 517				 */
 518				cbe_write_ctr(cpu, i, 0xFFFFFFF0);
 519			else
 520				cbe_write_ctr(cpu, i,
 521					      per_cpu(pmc_values,
 522						      cpu +
 523						      next_hdw_thread)[i]);
 524		}
 525
 526		/*
 527		 * Switch to the other thread. Change the interrupt
 528		 * and control regs to be scheduled on the CPU
 529		 * corresponding to the thread to execute.
 530		 */
 531		for (i = 0; i < num_counters; i++) {
 532			if (pmc_cntrl[next_hdw_thread][i].enabled) {
 533				/*
 534				 * There are some per thread events.
 535				 * Must do the set event, enable_cntr
 536				 * for each cpu.
 537				 */
 538				enable_ctr(cpu, i,
 539					   pm_regs.pm07_cntrl);
 540			} else {
 541				cbe_write_pm07_control(cpu, i, 0);
 542			}
 543		}
 544
 545		/* Enable interrupts on the CPU thread that is starting */
 546		cbe_enable_pm_interrupts(cpu, next_hdw_thread,
 547					 virt_cntr_inter_mask);
 548		cbe_enable_pm(cpu);
 549	}
 550
 551	spin_unlock_irqrestore(&cntr_lock, flags);
 552
 553	mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
 554}
 555
 556static void start_virt_cntrs(void)
 557{
 558	timer_setup(&timer_virt_cntr, cell_virtual_cntr, 0);
 559	timer_virt_cntr.expires = jiffies + HZ / 10;
 560	add_timer(&timer_virt_cntr);
 561}
 562
 563static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
 564			struct op_system_config *sys, int num_ctrs)
 565{
 566	spu_cycle_reset = ctr[0].count;
 567
 568	/*
 569	 * Each node will need to make the rtas call to start
 570	 * and stop SPU profiling.  Get the token once and store it.
 571	 */
 572	spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
 573
 574	if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
 575		printk(KERN_ERR
 576		       "%s: rtas token ibm,cbe-spu-perftools unknown\n",
 577		       __func__);
 578		return -EIO;
 579	}
 580	return 0;
 581}
 582
 583/* Unfortunately, the hardware will only support event profiling
 584 * on one SPU per node at a time.  Therefore, we must time slice
 585 * the profiling across all SPUs in the node.  Note, we do this
 586 * in parallel for each node.  The following routine is called
 587 * periodically based on kernel timer to switch which SPU is
 588 * being monitored in a round robbin fashion.
 589 */
 590static void spu_evnt_swap(struct timer_list *unused)
 591{
 592	int node;
 593	int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
 594	unsigned long flags;
 595	int cpu;
 596	int ret;
 597	u32 interrupt_mask;
 598
 599
 600	/* enable interrupts on cntr 0 */
 601	interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
 602
 603	hdw_thread = 0;
 604
 605	/* Make sure spu event interrupt handler and spu event swap
 606	 * don't access the counters simultaneously.
 607	 */
 608	spin_lock_irqsave(&cntr_lock, flags);
 609
 610	cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
 611
 612	if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
 613		spu_evnt_phys_spu_indx = 0;
 614
 615	pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
 616	pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
 617	pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
 618
 619	/* switch the SPU being profiled on each node */
 620	for_each_online_cpu(cpu) {
 621		if (cbe_get_hw_thread_id(cpu))
 622			continue;
 623
 624		node = cbe_cpu_to_node(cpu);
 625		cur_phys_spu = (node * NUM_SPUS_PER_NODE)
 626			+ cur_spu_evnt_phys_spu_indx;
 627		nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
 628			+ spu_evnt_phys_spu_indx;
 629
 630		/*
 631		 * stop counters, save counter values, restore counts
 632		 * for previous physical SPU
 633		 */
 634		cbe_disable_pm(cpu);
 635		cbe_disable_pm_interrupts(cpu);
 636
 637		spu_pm_cnt[cur_phys_spu]
 638			= cbe_read_ctr(cpu, 0);
 639
 640		/* restore previous count for the next spu to sample */
 641		/* NOTE, hardware issue, counter will not start if the
 642		 * counter value is at max (0xFFFFFFFF).
 643		 */
 644		if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
 645			cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
 646		 else
 647			 cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
 648
 649		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
 650
 651		/* setup the debug bus measure the one event and
 652		 * the two events to route the next SPU's PC on
 653		 * the debug bus
 654		 */
 655		ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
 656		if (ret)
 657			printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
 658			       "SPU event swap\n", __func__);
 659
 660		/* clear the trace buffer, don't want to take PC for
 661		 * previous SPU*/
 662		cbe_write_pm(cpu, trace_address, 0);
 663
 664		enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
 665
 666		/* Enable interrupts on the CPU thread that is starting */
 667		cbe_enable_pm_interrupts(cpu, hdw_thread,
 668					 interrupt_mask);
 669		cbe_enable_pm(cpu);
 670	}
 671
 672	spin_unlock_irqrestore(&cntr_lock, flags);
 673
 674	/* swap approximately every 0.1 seconds */
 675	mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
 676}
 677
 678static void start_spu_event_swap(void)
 679{
 680	timer_setup(&timer_spu_event_swap, spu_evnt_swap, 0);
 681	timer_spu_event_swap.expires = jiffies + HZ / 25;
 682	add_timer(&timer_spu_event_swap);
 683}
 684
 685static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
 686			struct op_system_config *sys, int num_ctrs)
 687{
 688	int i;
 689
 690	/* routine is called once for all nodes */
 691
 692	spu_evnt_phys_spu_indx = 0;
 693	/*
 694	 * For all events except PPU CYCLEs, each node will need to make
 695	 * the rtas cbe-perftools call to setup and reset the debug bus.
 696	 * Make the token lookup call once and store it in the global
 697	 * variable pm_rtas_token.
 698	 */
 699	pm_rtas_token = rtas_token("ibm,cbe-perftools");
 700
 701	if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
 702		printk(KERN_ERR
 703		       "%s: rtas token ibm,cbe-perftools unknown\n",
 704		       __func__);
 705		return -EIO;
 706	}
 707
 708	/* setup the pm_control register settings,
 709	 * settings will be written per node by the
 710	 * cell_cpu_setup() function.
 711	 */
 712	pm_regs.pm_cntrl.trace_buf_ovflw = 1;
 713
 714	/* Use the occurrence trace mode to have SPU PC saved
 715	 * to the trace buffer.  Occurrence data in trace buffer
 716	 * is not used.  Bit 2 must be set to store SPU addresses.
 717	 */
 718	pm_regs.pm_cntrl.trace_mode = 2;
 719
 720	pm_regs.pm_cntrl.spu_addr_trace = 0x1;  /* using debug bus
 721						   event 2 & 3 */
 722
 723	/* setup the debug bus event array with the SPU PC routing events.
 724	*  Note, pm_signal[0] will be filled in by set_pm_event() call below.
 725	*/
 726	pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
 727	pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
 728	pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
 729	pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
 730
 731	pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
 732	pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
 733	pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
 734	pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
 735
 736	/* Set the user selected spu event to profile on,
 737	 * note, only one SPU profiling event is supported
 738	 */
 739	num_counters = 1;  /* Only support one SPU event at a time */
 740	set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
 741
 742	reset_value[0] = 0xFFFFFFFF - ctr[0].count;
 743
 744	/* global, used by cell_cpu_setup */
 745	ctr_enabled |= 1;
 746
 747	/* Initialize the count for each SPU to the reset value */
 748	for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
 749		spu_pm_cnt[i] = reset_value[0];
 750
 751	return 0;
 752}
 753
 754static int cell_reg_setup_ppu(struct op_counter_config *ctr,
 755			struct op_system_config *sys, int num_ctrs)
 756{
 757	/* routine is called once for all nodes */
 758	int i, j, cpu;
 759
 760	num_counters = num_ctrs;
 761
 762	if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
 763		printk(KERN_ERR
 764		       "%s: Oprofile, number of specified events " \
 765		       "exceeds number of physical counters\n",
 766		       __func__);
 767		return -EIO;
 768	}
 769
 770	set_count_mode(sys->enable_kernel, sys->enable_user);
 771
 772	/* Setup the thread 0 events */
 773	for (i = 0; i < num_ctrs; ++i) {
 774
 775		pmc_cntrl[0][i].evnts = ctr[i].event;
 776		pmc_cntrl[0][i].masks = ctr[i].unit_mask;
 777		pmc_cntrl[0][i].enabled = ctr[i].enabled;
 778		pmc_cntrl[0][i].vcntr = i;
 779
 780		for_each_possible_cpu(j)
 781			per_cpu(pmc_values, j)[i] = 0;
 782	}
 783
 784	/*
 785	 * Setup the thread 1 events, map the thread 0 event to the
 786	 * equivalent thread 1 event.
 787	 */
 788	for (i = 0; i < num_ctrs; ++i) {
 789		if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
 790			pmc_cntrl[1][i].evnts = ctr[i].event + 19;
 791		else if (ctr[i].event == 2203)
 792			pmc_cntrl[1][i].evnts = ctr[i].event;
 793		else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
 794			pmc_cntrl[1][i].evnts = ctr[i].event + 16;
 795		else
 796			pmc_cntrl[1][i].evnts = ctr[i].event;
 797
 798		pmc_cntrl[1][i].masks = ctr[i].unit_mask;
 799		pmc_cntrl[1][i].enabled = ctr[i].enabled;
 800		pmc_cntrl[1][i].vcntr = i;
 801	}
 802
 803	for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
 804		input_bus[i] = 0xff;
 805
 806	/*
 807	 * Our counters count up, and "count" refers to
 808	 * how much before the next interrupt, and we interrupt
 809	 * on overflow.	 So we calculate the starting value
 810	 * which will give us "count" until overflow.
 811	 * Then we set the events on the enabled counters.
 812	 */
 813	for (i = 0; i < num_counters; ++i) {
 814		/* start with virtual counter set 0 */
 815		if (pmc_cntrl[0][i].enabled) {
 816			/* Using 32bit counters, reset max - count */
 817			reset_value[i] = 0xFFFFFFFF - ctr[i].count;
 818			set_pm_event(i,
 819				     pmc_cntrl[0][i].evnts,
 820				     pmc_cntrl[0][i].masks);
 821
 822			/* global, used by cell_cpu_setup */
 823			ctr_enabled |= (1 << i);
 824		}
 825	}
 826
 827	/* initialize the previous counts for the virtual cntrs */
 828	for_each_online_cpu(cpu)
 829		for (i = 0; i < num_counters; ++i) {
 830			per_cpu(pmc_values, cpu)[i] = reset_value[i];
 831		}
 832
 833	return 0;
 834}
 835
 836
 837/* This function is called once for all cpus combined */
 838static int cell_reg_setup(struct op_counter_config *ctr,
 839			struct op_system_config *sys, int num_ctrs)
 840{
 841	int ret=0;
 842	spu_cycle_reset = 0;
 843
 844	/* initialize the spu_arr_trace value, will be reset if
 845	 * doing spu event profiling.
 846	 */
 847	pm_regs.group_control = 0;
 848	pm_regs.debug_bus_control = 0;
 849	pm_regs.pm_cntrl.stop_at_max = 1;
 850	pm_regs.pm_cntrl.trace_mode = 0;
 851	pm_regs.pm_cntrl.freeze = 1;
 852	pm_regs.pm_cntrl.trace_buf_ovflw = 0;
 853	pm_regs.pm_cntrl.spu_addr_trace = 0;
 854
 855	/*
 856	 * For all events except PPU CYCLEs, each node will need to make
 857	 * the rtas cbe-perftools call to setup and reset the debug bus.
 858	 * Make the token lookup call once and store it in the global
 859	 * variable pm_rtas_token.
 860	 */
 861	pm_rtas_token = rtas_token("ibm,cbe-perftools");
 862
 863	if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
 864		printk(KERN_ERR
 865		       "%s: rtas token ibm,cbe-perftools unknown\n",
 866		       __func__);
 867		return -EIO;
 868	}
 869
 870	if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
 871		profiling_mode = SPU_PROFILING_CYCLES;
 872		ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
 873	} else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
 874		   (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
 875		profiling_mode = SPU_PROFILING_EVENTS;
 876		spu_cycle_reset = ctr[0].count;
 877
 878		/* for SPU event profiling, need to setup the
 879		 * pm_signal array with the events to route the
 880		 * SPU PC before making the FW call.  Note, only
 881		 * one SPU event for profiling can be specified
 882		 * at a time.
 883		 */
 884		cell_reg_setup_spu_events(ctr, sys, num_ctrs);
 885	} else {
 886		profiling_mode = PPU_PROFILING;
 887		ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
 888	}
 889
 890	return ret;
 891}
 892
 893
 894
 895/* This function is called once for each cpu */
 896static int cell_cpu_setup(struct op_counter_config *cntr)
 897{
 898	u32 cpu = smp_processor_id();
 899	u32 num_enabled = 0;
 900	int i;
 901	int ret;
 902
 903	/* Cycle based SPU profiling does not use the performance
 904	 * counters.  The trace array is configured to collect
 905	 * the data.
 906	 */
 907	if (profiling_mode == SPU_PROFILING_CYCLES)
 908		return 0;
 909
 910	/* There is one performance monitor per processor chip (i.e. node),
 911	 * so we only need to perform this function once per node.
 912	 */
 913	if (cbe_get_hw_thread_id(cpu))
 914		return 0;
 915
 916	/* Stop all counters */
 917	cbe_disable_pm(cpu);
 918	cbe_disable_pm_interrupts(cpu);
 919
 920	cbe_write_pm(cpu, pm_start_stop, 0);
 921	cbe_write_pm(cpu, group_control, pm_regs.group_control);
 922	cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
 923	write_pm_cntrl(cpu);
 924
 925	for (i = 0; i < num_counters; ++i) {
 926		if (ctr_enabled & (1 << i)) {
 927			pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
 928			num_enabled++;
 929		}
 930	}
 931
 932	/*
 933	 * The pm_rtas_activate_signals will return -EIO if the FW
 934	 * call failed.
 935	 */
 936	if (profiling_mode == SPU_PROFILING_EVENTS) {
 937		/* For SPU event profiling also need to setup the
 938		 * pm interval timer
 939		 */
 940		ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
 941					       num_enabled+2);
 942		/* store PC from debug bus to Trace buffer as often
 943		 * as possible (every 10 cycles)
 944		 */
 945		cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
 946		return ret;
 947	} else
 948		return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
 949						num_enabled);
 950}
 951
 952#define ENTRIES	 303
 953#define MAXLFSR	 0xFFFFFF
 954
 955/* precomputed table of 24 bit LFSR values */
 956static int initial_lfsr[] = {
 957 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
 958 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
 959 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
 960 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
 961 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
 962 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
 963 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
 964 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
 965 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
 966 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
 967 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
 968 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
 969 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
 970 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
 971 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
 972 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
 973 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
 974 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
 975 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
 976 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
 977 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
 978 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
 979 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
 980 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
 981 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
 982 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
 983 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
 984 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
 985 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
 986 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
 987 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
 988 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
 989 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
 990 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
 991 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
 992 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
 993 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
 994 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
 995};
 996
 997/*
 998 * The hardware uses an LFSR counting sequence to determine when to capture
 999 * the SPU PCs.	 An LFSR sequence is like a puesdo random number sequence
1000 * where each number occurs once in the sequence but the sequence is not in
1001 * numerical order. The SPU PC capture is done when the LFSR sequence reaches
1002 * the last value in the sequence.  Hence the user specified value N
1003 * corresponds to the LFSR number that is N from the end of the sequence.
1004 *
1005 * To avoid the time to compute the LFSR, a lookup table is used.  The 24 bit
1006 * LFSR sequence is broken into four ranges.  The spacing of the precomputed
1007 * values is adjusted in each range so the error between the user specified
1008 * number (N) of events between samples and the actual number of events based
1009 * on the precomputed value will be les then about 6.2%.  Note, if the user
1010 * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
1011 * This is to prevent the loss of samples because the trace buffer is full.
1012 *
1013 *	   User specified N		     Step between	   Index in
1014 *					 precomputed values	 precomputed
1015 *								    table
1016 * 0		    to	2^16-1			----		      0
1017 * 2^16	    to	2^16+2^19-1		2^12		    1 to 128
1018 * 2^16+2^19	    to	2^16+2^19+2^22-1	2^15		  129 to 256
1019 * 2^16+2^19+2^22  to	2^24-1			2^18		  257 to 302
1020 *
1021 *
1022 * For example, the LFSR values in the second range are computed for 2^16,
1023 * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
1024 * 1, 2,..., 127, 128.
1025 *
1026 * The 24 bit LFSR value for the nth number in the sequence can be
1027 * calculated using the following code:
1028 *
1029 * #define size 24
1030 * int calculate_lfsr(int n)
1031 * {
1032 *	int i;
1033 *	unsigned int newlfsr0;
1034 *	unsigned int lfsr = 0xFFFFFF;
1035 *	unsigned int howmany = n;
1036 *
1037 *	for (i = 2; i < howmany + 2; i++) {
1038 *		newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
1039 *		((lfsr >> (size - 1 - 1)) & 1) ^
1040 *		(((lfsr >> (size - 1 - 6)) & 1) ^
1041 *		((lfsr >> (size - 1 - 23)) & 1)));
1042 *
1043 *		lfsr >>= 1;
1044 *		lfsr = lfsr | (newlfsr0 << (size - 1));
1045 *	}
1046 *	return lfsr;
1047 * }
1048 */
1049
1050#define V2_16  (0x1 << 16)
1051#define V2_19  (0x1 << 19)
1052#define V2_22  (0x1 << 22)
1053
1054static int calculate_lfsr(int n)
1055{
1056	/*
1057	 * The ranges and steps are in powers of 2 so the calculations
1058	 * can be done using shifts rather then divide.
1059	 */
1060	int index;
1061
1062	if ((n >> 16) == 0)
1063		index = 0;
1064	else if (((n - V2_16) >> 19) == 0)
1065		index = ((n - V2_16) >> 12) + 1;
1066	else if (((n - V2_16 - V2_19) >> 22) == 0)
1067		index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
1068	else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
1069		index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
1070	else
1071		index = ENTRIES-1;
1072
1073	/* make sure index is valid */
1074	if ((index >= ENTRIES) || (index < 0))
1075		index = ENTRIES-1;
1076
1077	return initial_lfsr[index];
1078}
1079
1080static int pm_rtas_activate_spu_profiling(u32 node)
1081{
1082	int ret, i;
1083	struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
1084
1085	/*
1086	 * Set up the rtas call to configure the debug bus to
1087	 * route the SPU PCs.  Setup the pm_signal for each SPU
1088	 */
1089	for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
1090		pm_signal_local[i].cpu = node;
1091		pm_signal_local[i].signal_group = 41;
1092		/* spu i on word (i/2) */
1093		pm_signal_local[i].bus_word = 1 << i / 2;
1094		/* spu i */
1095		pm_signal_local[i].sub_unit = i;
1096		pm_signal_local[i].bit = 63;
1097	}
1098
1099	ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
1100				     PASSTHRU_ENABLE, pm_signal_local,
1101				     (ARRAY_SIZE(pm_signal_local)
1102				      * sizeof(struct pm_signal)));
1103
1104	if (unlikely(ret)) {
1105		printk(KERN_WARNING "%s: rtas returned: %d\n",
1106		       __func__, ret);
1107		return -EIO;
1108	}
1109
1110	return 0;
1111}
1112
1113#ifdef CONFIG_CPU_FREQ
1114static int
1115oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
1116{
1117	int ret = 0;
1118	struct cpufreq_freqs *frq = data;
1119	if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
1120	    (val == CPUFREQ_POSTCHANGE && frq->old > frq->new))
1121		set_spu_profiling_frequency(frq->new, spu_cycle_reset);
1122	return ret;
1123}
1124
1125static struct notifier_block cpu_freq_notifier_block = {
1126	.notifier_call	= oprof_cpufreq_notify
1127};
1128#endif
1129
1130/*
1131 * Note the generic OProfile stop calls do not support returning
1132 * an error on stop.  Hence, will not return an error if the FW
1133 * calls fail on stop.	Failure to reset the debug bus is not an issue.
1134 * Failure to disable the SPU profiling is not an issue.  The FW calls
1135 * to enable the performance counters and debug bus will work even if
1136 * the hardware was not cleanly reset.
1137 */
1138static void cell_global_stop_spu_cycles(void)
1139{
1140	int subfunc, rtn_value;
1141	unsigned int lfsr_value;
1142	int cpu;
1143
1144	oprofile_running = 0;
1145	smp_wmb();
1146
1147#ifdef CONFIG_CPU_FREQ
1148	cpufreq_unregister_notifier(&cpu_freq_notifier_block,
1149				    CPUFREQ_TRANSITION_NOTIFIER);
1150#endif
1151
1152	for_each_online_cpu(cpu) {
1153		if (cbe_get_hw_thread_id(cpu))
1154			continue;
1155
1156		subfunc = 3;	/*
1157				 * 2 - activate SPU tracing,
1158				 * 3 - deactivate
1159				 */
1160		lfsr_value = 0x8f100000;
1161
1162		rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
1163				      subfunc, cbe_cpu_to_node(cpu),
1164				      lfsr_value);
1165
1166		if (unlikely(rtn_value != 0)) {
1167			printk(KERN_ERR
1168			       "%s: rtas call ibm,cbe-spu-perftools " \
1169			       "failed, return = %d\n",
1170			       __func__, rtn_value);
1171		}
1172
1173		/* Deactivate the signals */
1174		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1175	}
1176
1177	stop_spu_profiling_cycles();
1178}
1179
1180static void cell_global_stop_spu_events(void)
1181{
1182	int cpu;
1183	oprofile_running = 0;
1184
1185	stop_spu_profiling_events();
1186	smp_wmb();
1187
1188	for_each_online_cpu(cpu) {
1189		if (cbe_get_hw_thread_id(cpu))
1190			continue;
1191
1192		cbe_sync_irq(cbe_cpu_to_node(cpu));
1193		/* Stop the counters */
1194		cbe_disable_pm(cpu);
1195		cbe_write_pm07_control(cpu, 0, 0);
1196
1197		/* Deactivate the signals */
1198		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1199
1200		/* Deactivate interrupts */
1201		cbe_disable_pm_interrupts(cpu);
1202	}
1203	del_timer_sync(&timer_spu_event_swap);
1204}
1205
1206static void cell_global_stop_ppu(void)
1207{
1208	int cpu;
1209
1210	/*
1211	 * This routine will be called once for the system.
1212	 * There is one performance monitor per node, so we
1213	 * only need to perform this function once per node.
1214	 */
1215	del_timer_sync(&timer_virt_cntr);
1216	oprofile_running = 0;
1217	smp_wmb();
1218
1219	for_each_online_cpu(cpu) {
1220		if (cbe_get_hw_thread_id(cpu))
1221			continue;
1222
1223		cbe_sync_irq(cbe_cpu_to_node(cpu));
1224		/* Stop the counters */
1225		cbe_disable_pm(cpu);
1226
1227		/* Deactivate the signals */
1228		pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1229
1230		/* Deactivate interrupts */
1231		cbe_disable_pm_interrupts(cpu);
1232	}
1233}
1234
1235static void cell_global_stop(void)
1236{
1237	if (profiling_mode == PPU_PROFILING)
1238		cell_global_stop_ppu();
1239	else if (profiling_mode == SPU_PROFILING_EVENTS)
1240		cell_global_stop_spu_events();
1241	else
1242		cell_global_stop_spu_cycles();
1243}
1244
1245static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
1246{
1247	int subfunc;
1248	unsigned int lfsr_value;
1249	int cpu;
1250	int ret;
1251	int rtas_error;
1252	unsigned int cpu_khzfreq = 0;
1253
1254	/* The SPU profiling uses time-based profiling based on
1255	 * cpu frequency, so if configured with the CPU_FREQ
1256	 * option, we should detect frequency changes and react
1257	 * accordingly.
1258	 */
1259#ifdef CONFIG_CPU_FREQ
1260	ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
1261					CPUFREQ_TRANSITION_NOTIFIER);
1262	if (ret < 0)
1263		/* this is not a fatal error */
1264		printk(KERN_ERR "CPU freq change registration failed: %d\n",
1265		       ret);
1266
1267	else
1268		cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
1269#endif
1270
1271	set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
1272
1273	for_each_online_cpu(cpu) {
1274		if (cbe_get_hw_thread_id(cpu))
1275			continue;
1276
1277		/*
1278		 * Setup SPU cycle-based profiling.
1279		 * Set perf_mon_control bit 0 to a zero before
1280		 * enabling spu collection hardware.
1281		 */
1282		cbe_write_pm(cpu, pm_control, 0);
1283
1284		if (spu_cycle_reset > MAX_SPU_COUNT)
1285			/* use largest possible value */
1286			lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
1287		else
1288			lfsr_value = calculate_lfsr(spu_cycle_reset);
1289
1290		/* must use a non zero value. Zero disables data collection. */
1291		if (lfsr_value == 0)
1292			lfsr_value = calculate_lfsr(1);
1293
1294		lfsr_value = lfsr_value << 8; /* shift lfsr to correct
1295						* register location
1296						*/
1297
1298		/* debug bus setup */
1299		ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
1300
1301		if (unlikely(ret)) {
1302			rtas_error = ret;
1303			goto out;
1304		}
1305
1306
1307		subfunc = 2;	/* 2 - activate SPU tracing, 3 - deactivate */
1308
1309		/* start profiling */
1310		ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
1311				cbe_cpu_to_node(cpu), lfsr_value);
1312
1313		if (unlikely(ret != 0)) {
1314			printk(KERN_ERR
1315			       "%s: rtas call ibm,cbe-spu-perftools failed, " \
1316			       "return = %d\n", __func__, ret);
1317			rtas_error = -EIO;
1318			goto out;
1319		}
1320	}
1321
1322	rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
1323	if (rtas_error)
1324		goto out_stop;
1325
1326	oprofile_running = 1;
1327	return 0;
1328
1329out_stop:
1330	cell_global_stop_spu_cycles();	/* clean up the PMU/debug bus */
1331out:
1332	return rtas_error;
1333}
1334
1335static int cell_global_start_spu_events(struct op_counter_config *ctr)
1336{
1337	int cpu;
1338	u32 interrupt_mask = 0;
1339	int rtn = 0;
1340
1341	hdw_thread = 0;
1342
1343	/* spu event profiling, uses the performance counters to generate
1344	 * an interrupt.  The hardware is setup to store the SPU program
1345	 * counter into the trace array.  The occurrence mode is used to
1346	 * enable storing data to the trace buffer.  The bits are set
1347	 * to send/store the SPU address in the trace buffer.  The debug
1348	 * bus must be setup to route the SPU program counter onto the
1349	 * debug bus.  The occurrence data in the trace buffer is not used.
1350	 */
1351
1352	/* This routine gets called once for the system.
1353	 * There is one performance monitor per node, so we
1354	 * only need to perform this function once per node.
1355	 */
1356
1357	for_each_online_cpu(cpu) {
1358		if (cbe_get_hw_thread_id(cpu))
1359			continue;
1360
1361		/*
1362		 * Setup SPU event-based profiling.
1363		 * Set perf_mon_control bit 0 to a zero before
1364		 * enabling spu collection hardware.
1365		 *
1366		 * Only support one SPU event on one SPU per node.
1367		 */
1368		if (ctr_enabled & 1) {
1369			cbe_write_ctr(cpu, 0, reset_value[0]);
1370			enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
1371			interrupt_mask |=
1372				CBE_PM_CTR_OVERFLOW_INTR(0);
1373		} else {
1374			/* Disable counter */
1375			cbe_write_pm07_control(cpu, 0, 0);
1376		}
1377
1378		cbe_get_and_clear_pm_interrupts(cpu);
1379		cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1380		cbe_enable_pm(cpu);
1381
1382		/* clear the trace buffer */
1383		cbe_write_pm(cpu, trace_address, 0);
1384	}
1385
1386	/* Start the timer to time slice collecting the event profile
1387	 * on each of the SPUs.  Note, can collect profile on one SPU
1388	 * per node at a time.
1389	 */
1390	start_spu_event_swap();
1391	start_spu_profiling_events();
1392	oprofile_running = 1;
1393	smp_wmb();
1394
1395	return rtn;
1396}
1397
1398static int cell_global_start_ppu(struct op_counter_config *ctr)
1399{
1400	u32 cpu, i;
1401	u32 interrupt_mask = 0;
1402
1403	/* This routine gets called once for the system.
1404	 * There is one performance monitor per node, so we
1405	 * only need to perform this function once per node.
1406	 */
1407	for_each_online_cpu(cpu) {
1408		if (cbe_get_hw_thread_id(cpu))
1409			continue;
1410
1411		interrupt_mask = 0;
1412
1413		for (i = 0; i < num_counters; ++i) {
1414			if (ctr_enabled & (1 << i)) {
1415				cbe_write_ctr(cpu, i, reset_value[i]);
1416				enable_ctr(cpu, i, pm_regs.pm07_cntrl);
1417				interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
1418			} else {
1419				/* Disable counter */
1420				cbe_write_pm07_control(cpu, i, 0);
1421			}
1422		}
1423
1424		cbe_get_and_clear_pm_interrupts(cpu);
1425		cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1426		cbe_enable_pm(cpu);
1427	}
1428
1429	virt_cntr_inter_mask = interrupt_mask;
1430	oprofile_running = 1;
1431	smp_wmb();
1432
1433	/*
1434	 * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
1435	 * executed which manipulates the PMU.	We start the "virtual counter"
1436	 * here so that we do not need to synchronize access to the PMU in
1437	 * the above for-loop.
1438	 */
1439	start_virt_cntrs();
1440
1441	return 0;
1442}
1443
1444static int cell_global_start(struct op_counter_config *ctr)
1445{
1446	if (profiling_mode == SPU_PROFILING_CYCLES)
1447		return cell_global_start_spu_cycles(ctr);
1448	else if (profiling_mode == SPU_PROFILING_EVENTS)
1449		return cell_global_start_spu_events(ctr);
1450	else
1451		return cell_global_start_ppu(ctr);
1452}
1453
1454
1455/* The SPU interrupt handler
1456 *
1457 * SPU event profiling works as follows:
1458 * The pm_signal[0] holds the one SPU event to be measured.  It is routed on
1459 * the debug bus using word 0 or 1.  The value of pm_signal[1] and
1460 * pm_signal[2] contain the necessary events to route the SPU program
1461 * counter for the selected SPU onto the debug bus using words 2 and 3.
1462 * The pm_interval register is setup to write the SPU PC value into the
1463 * trace buffer at the maximum rate possible.  The trace buffer is configured
1464 * to store the PCs, wrapping when it is full.  The performance counter is
1465 * initialized to the max hardware count minus the number of events, N, between
1466 * samples.  Once the N events have occurred, a HW counter overflow occurs
1467 * causing the generation of a HW counter interrupt which also stops the
1468 * writing of the SPU PC values to the trace buffer.  Hence the last PC
1469 * written to the trace buffer is the SPU PC that we want.  Unfortunately,
1470 * we have to read from the beginning of the trace buffer to get to the
1471 * last value written.  We just hope the PPU has nothing better to do then
1472 * service this interrupt. The PC for the specific SPU being profiled is
1473 * extracted from the trace buffer processed and stored.  The trace buffer
1474 * is cleared, interrupts are cleared, the counter is reset to max - N.
1475 * A kernel timer is used to periodically call the routine spu_evnt_swap()
1476 * to switch to the next physical SPU in the node to profile in round robbin
1477 * order.  This way data is collected for all SPUs on the node. It does mean
1478 * that we need to use a relatively small value of N to ensure enough samples
1479 * on each SPU are collected each SPU is being profiled 1/8 of the time.
1480 * It may also be necessary to use a longer sample collection period.
1481 */
1482static void cell_handle_interrupt_spu(struct pt_regs *regs,
1483				      struct op_counter_config *ctr)
1484{
1485	u32 cpu, cpu_tmp;
1486	u64 trace_entry;
1487	u32 interrupt_mask;
1488	u64 trace_buffer[2];
1489	u64 last_trace_buffer;
1490	u32 sample;
1491	u32 trace_addr;
1492	unsigned long sample_array_lock_flags;
1493	int spu_num;
1494	unsigned long flags;
1495
1496	/* Make sure spu event interrupt handler and spu event swap
1497	 * don't access the counters simultaneously.
1498	 */
1499	cpu = smp_processor_id();
1500	spin_lock_irqsave(&cntr_lock, flags);
1501
1502	cpu_tmp = cpu;
1503	cbe_disable_pm(cpu);
1504
1505	interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
1506
1507	sample = 0xABCDEF;
1508	trace_entry = 0xfedcba;
1509	last_trace_buffer = 0xdeadbeaf;
1510
1511	if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1512		/* disable writes to trace buff */
1513		cbe_write_pm(cpu, pm_interval, 0);
1514
1515		/* only have one perf cntr being used, cntr 0 */
1516		if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
1517		    && ctr[0].enabled)
1518			/* The SPU PC values will be read
1519			 * from the trace buffer, reset counter
1520			 */
1521
1522			cbe_write_ctr(cpu, 0, reset_value[0]);
1523
1524		trace_addr = cbe_read_pm(cpu, trace_address);
1525
1526		while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
1527			/* There is data in the trace buffer to process
1528			 * Read the buffer until you get to the last
1529			 * entry.  This is the value we want.
1530			 */
1531
1532			cbe_read_trace_buffer(cpu, trace_buffer);
1533			trace_addr = cbe_read_pm(cpu, trace_address);
1534		}
1535
1536		/* SPU Address 16 bit count format for 128 bit
1537		 * HW trace buffer is used for the SPU PC storage
1538		 *    HDR bits          0:15
1539		 *    SPU Addr 0 bits   16:31
1540		 *    SPU Addr 1 bits   32:47
1541		 *    unused bits       48:127
1542		 *
1543		 * HDR: bit4 = 1 SPU Address 0 valid
1544		 * HDR: bit5 = 1 SPU Address 1 valid
1545		 *  - unfortunately, the valid bits don't seem to work
1546		 *
1547		 * Note trace_buffer[0] holds bits 0:63 of the HW
1548		 * trace buffer, trace_buffer[1] holds bits 64:127
1549		 */
1550
1551		trace_entry = trace_buffer[0]
1552			& 0x00000000FFFF0000;
1553
1554		/* only top 16 of the 18 bit SPU PC address
1555		 * is stored in trace buffer, hence shift right
1556		 * by 16 -2 bits */
1557		sample = trace_entry >> 14;
1558		last_trace_buffer = trace_buffer[0];
1559
1560		spu_num = spu_evnt_phys_spu_indx
1561			+ (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
1562
1563		/* make sure only one process at a time is calling
1564		 * spu_sync_buffer()
1565		 */
1566		spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
1567				  sample_array_lock_flags);
1568		spu_sync_buffer(spu_num, &sample, 1);
1569		spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
1570				       sample_array_lock_flags);
1571
1572		smp_wmb();    /* insure spu event buffer updates are written
1573			       * don't want events intermingled... */
1574
1575		/* The counters were frozen by the interrupt.
1576		 * Reenable the interrupt and restart the counters.
1577		 */
1578		cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
1579		cbe_enable_pm_interrupts(cpu, hdw_thread,
1580					 virt_cntr_inter_mask);
1581
1582		/* clear the trace buffer, re-enable writes to trace buff */
1583		cbe_write_pm(cpu, trace_address, 0);
1584		cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
1585
1586		/* The writes to the various performance counters only writes
1587		 * to a latch.  The new values (interrupt setting bits, reset
1588		 * counter value etc.) are not copied to the actual registers
1589		 * until the performance monitor is enabled.  In order to get
1590		 * this to work as desired, the performance monitor needs to
1591		 * be disabled while writing to the latches.  This is a
1592		 * HW design issue.
1593		 */
1594		write_pm_cntrl(cpu);
1595		cbe_enable_pm(cpu);
1596	}
1597	spin_unlock_irqrestore(&cntr_lock, flags);
1598}
1599
1600static void cell_handle_interrupt_ppu(struct pt_regs *regs,
1601				      struct op_counter_config *ctr)
1602{
1603	u32 cpu;
1604	u64 pc;
1605	int is_kernel;
1606	unsigned long flags = 0;
1607	u32 interrupt_mask;
1608	int i;
1609
1610	cpu = smp_processor_id();
1611
1612	/*
1613	 * Need to make sure the interrupt handler and the virt counter
1614	 * routine are not running at the same time. See the
1615	 * cell_virtual_cntr() routine for additional comments.
1616	 */
1617	spin_lock_irqsave(&cntr_lock, flags);
1618
1619	/*
1620	 * Need to disable and reenable the performance counters
1621	 * to get the desired behavior from the hardware.  This
1622	 * is hardware specific.
1623	 */
1624
1625	cbe_disable_pm(cpu);
1626
1627	interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
1628
1629	/*
1630	 * If the interrupt mask has been cleared, then the virt cntr
1631	 * has cleared the interrupt.  When the thread that generated
1632	 * the interrupt is restored, the data count will be restored to
1633	 * 0xffffff0 to cause the interrupt to be regenerated.
1634	 */
1635
1636	if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1637		pc = regs->nip;
1638		is_kernel = is_kernel_addr(pc);
1639
1640		for (i = 0; i < num_counters; ++i) {
1641			if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
1642			    && ctr[i].enabled) {
1643				oprofile_add_ext_sample(pc, regs, i, is_kernel);
1644				cbe_write_ctr(cpu, i, reset_value[i]);
1645			}
1646		}
1647
1648		/*
1649		 * The counters were frozen by the interrupt.
1650		 * Reenable the interrupt and restart the counters.
1651		 * If there was a race between the interrupt handler and
1652		 * the virtual counter routine.	 The virtual counter
1653		 * routine may have cleared the interrupts.  Hence must
1654		 * use the virt_cntr_inter_mask to re-enable the interrupts.
1655		 */
1656		cbe_enable_pm_interrupts(cpu, hdw_thread,
1657					 virt_cntr_inter_mask);
1658
1659		/*
1660		 * The writes to the various performance counters only writes
1661		 * to a latch.	The new values (interrupt setting bits, reset
1662		 * counter value etc.) are not copied to the actual registers
1663		 * until the performance monitor is enabled.  In order to get
1664		 * this to work as desired, the performance monitor needs to
1665		 * be disabled while writing to the latches.  This is a
1666		 * HW design issue.
1667		 */
1668		cbe_enable_pm(cpu);
1669	}
1670	spin_unlock_irqrestore(&cntr_lock, flags);
1671}
1672
1673static void cell_handle_interrupt(struct pt_regs *regs,
1674				  struct op_counter_config *ctr)
1675{
1676	if (profiling_mode == PPU_PROFILING)
1677		cell_handle_interrupt_ppu(regs, ctr);
1678	else
1679		cell_handle_interrupt_spu(regs, ctr);
1680}
1681
1682/*
1683 * This function is called from the generic OProfile
1684 * driver.  When profiling PPUs, we need to do the
1685 * generic sync start; otherwise, do spu_sync_start.
1686 */
1687static int cell_sync_start(void)
1688{
1689	if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1690	    (profiling_mode == SPU_PROFILING_EVENTS))
1691		return spu_sync_start();
1692	else
1693		return DO_GENERIC_SYNC;
1694}
1695
1696static int cell_sync_stop(void)
1697{
1698	if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1699	    (profiling_mode == SPU_PROFILING_EVENTS))
1700		return spu_sync_stop();
1701	else
1702		return 1;
1703}
1704
1705struct op_powerpc_model op_model_cell = {
1706	.reg_setup = cell_reg_setup,
1707	.cpu_setup = cell_cpu_setup,
1708	.global_start = cell_global_start,
1709	.global_stop = cell_global_stop,
1710	.sync_start = cell_sync_start,
1711	.sync_stop = cell_sync_stop,
1712	.handle_interrupt = cell_handle_interrupt,
1713};