Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *    Initial setup-routines for HP 9000 based hardware.
  4 *
  5 *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
  6 *    Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
  7 *    Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
  8 *    Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
  9 *    Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
 10 *    Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
 11 *
 12 *    Initial PA-RISC Version: 04-23-1999 by Helge Deller
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13 */
 14#include <linux/delay.h>
 15#include <linux/init.h>
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/seq_file.h>
 19#include <linux/random.h>
 20#include <linux/slab.h>
 21#include <linux/cpu.h>
 22#include <asm/param.h>
 23#include <asm/cache.h>
 24#include <asm/hardware.h>	/* for register_parisc_driver() stuff */
 25#include <asm/processor.h>
 26#include <asm/page.h>
 27#include <asm/pdc.h>
 28#include <asm/pdcpat.h>
 29#include <asm/irq.h>		/* for struct irq_region */
 30#include <asm/parisc-device.h>
 31
 32struct system_cpuinfo_parisc boot_cpu_data __ro_after_init;
 33EXPORT_SYMBOL(boot_cpu_data);
 34#ifdef CONFIG_PA8X00
 35int _parisc_requires_coherency __ro_after_init;
 36EXPORT_SYMBOL(_parisc_requires_coherency);
 37#endif
 38
 39DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 40
 
 
 41/*
 42**  	PARISC CPU driver - claim "device" and initialize CPU data structures.
 43**
 44** Consolidate per CPU initialization into (mostly) one module.
 45** Monarch CPU will initialize boot_cpu_data which shouldn't
 46** change once the system has booted.
 47**
 48** The callback *should* do per-instance initialization of
 49** everything including the monarch. "Per CPU" init code in
 50** setup.c:start_parisc() has migrated here and start_parisc()
 51** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
 52**
 53** The goal of consolidating CPU initialization into one place is
 54** to make sure all CPUs get initialized the same way.
 55** The code path not shared is how PDC hands control of the CPU to the OS.
 56** The initialization of OS data structures is the same (done below).
 57*/
 58
 59/**
 60 * init_cpu_profiler - enable/setup per cpu profiling hooks.
 61 * @cpunum: The processor instance.
 62 *
 63 * FIXME: doesn't do much yet...
 64 */
 65static void
 66init_percpu_prof(unsigned long cpunum)
 67{
 
 
 
 
 
 68}
 69
 70
 71/**
 72 * processor_probe - Determine if processor driver should claim this device.
 73 * @dev: The device which has been found.
 74 *
 75 * Determine if processor driver should claim this chip (return 0) or not 
 76 * (return 1).  If so, initialize the chip and tell other partners in crime 
 77 * they have work to do.
 78 */
 79static int __init processor_probe(struct parisc_device *dev)
 80{
 81	unsigned long txn_addr;
 82	unsigned long cpuid;
 83	struct cpuinfo_parisc *p;
 84	struct pdc_pat_cpu_num cpu_info = { };
 85
 86#ifdef CONFIG_SMP
 87	if (num_online_cpus() >= nr_cpu_ids) {
 88		printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
 89		return 1;
 90	}
 91#else
 92	if (boot_cpu_data.cpu_count > 0) {
 93		printk(KERN_INFO "CONFIG_SMP=n  ignoring additional CPUs\n");
 94		return 1;
 95	}
 96#endif
 97
 98	/* logical CPU ID and update global counter
 99	 * May get overwritten by PAT code.
100	 */
101	cpuid = boot_cpu_data.cpu_count;
102	txn_addr = dev->hpa.start;	/* for legacy PDC */
103	cpu_info.cpu_num = cpu_info.cpu_loc = cpuid;
104
105#ifdef CONFIG_64BIT
106	if (is_pdc_pat()) {
107		ulong status;
108		unsigned long bytecnt;
109	        pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
 
 
 
 
110
111		pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
112		if (!pa_pdc_cell)
113			panic("couldn't allocate memory for PDC_PAT_CELL!");
114
115		status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
116			dev->mod_index, PA_VIEW, pa_pdc_cell);
117
118		BUG_ON(PDC_OK != status);
119
120		/* verify it's the same as what do_pat_inventory() found */
121		BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
122		BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
123
124		txn_addr = pa_pdc_cell->mod[0];   /* id_eid for IO sapic */
125
126		kfree(pa_pdc_cell);
127
128		/* get the cpu number */
129		status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
130		BUG_ON(PDC_OK != status);
131
132		pr_info("Logical CPU #%lu is physical cpu #%lu at location "
133			"0x%lx with hpa %pa\n",
134			cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
135			&dev->hpa.start);
136
137#undef USE_PAT_CPUID
138#ifdef USE_PAT_CPUID
139/* We need contiguous numbers for cpuid. Firmware's notion
140 * of cpuid is for physical CPUs and we just don't care yet.
141 * We'll care when we need to query PAT PDC about a CPU *after*
142 * boot time (ie shutdown a CPU from an OS perspective).
143 */
 
 
 
 
 
144		if (cpu_info.cpu_num >= NR_CPUS) {
145			printk(KERN_WARNING "IGNORING CPU at %pa,"
146				" cpu_slot_id > NR_CPUS"
147				" (%ld > %d)\n",
148				&dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
149			/* Ignore CPU since it will only crash */
150			boot_cpu_data.cpu_count--;
151			return 1;
152		} else {
153			cpuid = cpu_info.cpu_num;
154		}
155#endif
156	}
157#endif
158
159	p = &per_cpu(cpu_data, cpuid);
160	boot_cpu_data.cpu_count++;
161
162	/* initialize counters - CPU 0 gets it_value set in time_init() */
163	if (cpuid)
164		memset(p, 0, sizeof(struct cpuinfo_parisc));
165
166	p->loops_per_jiffy = loops_per_jiffy;
167	p->dev = dev;		/* Save IODC data in case we need it */
168	p->hpa = dev->hpa.start;	/* save CPU hpa */
169	p->cpuid = cpuid;	/* save CPU id */
170	p->txn_addr = txn_addr;	/* save CPU IRQ address */
171	p->cpu_num = cpu_info.cpu_num;
172	p->cpu_loc = cpu_info.cpu_loc;
173
174	store_cpu_topology(cpuid);
175
176#ifdef CONFIG_SMP
177	/*
178	** FIXME: review if any other initialization is clobbered
179	**	  for boot_cpu by the above memset().
180	*/
181	init_percpu_prof(cpuid);
182#endif
183
184	/*
185	** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
186	** OS control. RENDEZVOUS is the default state - see mem_set above.
187	**	p->state = STATE_RENDEZVOUS;
188	*/
189
190#if 0
191	/* CPU 0 IRQ table is statically allocated/initialized */
192	if (cpuid) {
193		struct irqaction actions[];
194
195		/*
196		** itimer and ipi IRQ handlers are statically initialized in
197		** arch/parisc/kernel/irq.c. ie Don't need to register them.
198		*/
199		actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
200		if (!actions) {
201			/* not getting it's own table, share with monarch */
202			actions = cpu_irq_actions[0];
203		}
204
205		cpu_irq_actions[cpuid] = actions;
206	}
207#endif
208
209	/* 
210	 * Bring this CPU up now! (ignore bootstrap cpuid == 0)
211	 */
212#ifdef CONFIG_SMP
213	if (cpuid) {
214		set_cpu_present(cpuid, true);
215		cpu_up(cpuid);
216	}
217#endif
218
 
 
 
 
 
 
219	return 0;
220}
221
222/**
223 * collect_boot_cpu_data - Fill the boot_cpu_data structure.
224 *
225 * This function collects and stores the generic processor information
226 * in the boot_cpu_data structure.
227 */
228void __init collect_boot_cpu_data(void)
229{
230	unsigned long cr16_seed;
231	char orig_prod_num[64], current_prod_num[64], serial_no[64];
232
233	memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
234
235	cr16_seed = get_cycles();
236	add_device_randomness(&cr16_seed, sizeof(cr16_seed));
237
238	boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
239
240	/* get CPU-Model Information... */
241#define p ((unsigned long *)&boot_cpu_data.pdc.model)
242	if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) {
243		printk(KERN_INFO 
244			"model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
245			p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
246
247		add_device_randomness(&boot_cpu_data.pdc.model,
248			sizeof(boot_cpu_data.pdc.model));
249	}
250#undef p
251
252	if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK) {
253		printk(KERN_INFO "vers  %08lx\n", 
254			boot_cpu_data.pdc.versions);
255
256		add_device_randomness(&boot_cpu_data.pdc.versions,
257			sizeof(boot_cpu_data.pdc.versions));
258	}
259
260	if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) {
261		printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
262			(boot_cpu_data.pdc.cpuid >> 5) & 127,
263			boot_cpu_data.pdc.cpuid & 31,
264			boot_cpu_data.pdc.cpuid);
265
266		add_device_randomness(&boot_cpu_data.pdc.cpuid,
267			sizeof(boot_cpu_data.pdc.cpuid));
268	}
269
270	if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
271		printk(KERN_INFO "capabilities 0x%lx\n",
272			boot_cpu_data.pdc.capabilities);
273
274	if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK)
275		printk(KERN_INFO "model %s\n",
276			boot_cpu_data.pdc.sys_model_name);
277
278	dump_stack_set_arch_desc("%s", boot_cpu_data.pdc.sys_model_name);
279
280	boot_cpu_data.hversion =  boot_cpu_data.pdc.model.hversion;
281	boot_cpu_data.sversion =  boot_cpu_data.pdc.model.sversion;
282
283	boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
284	boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
285	boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
286
287#ifdef CONFIG_PA8X00
288	_parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) ||
289				(boot_cpu_data.cpu_type == mako2);
290#endif
291
292	if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) {
293		printk(KERN_INFO "product %s, original product %s, S/N: %s\n",
294			current_prod_num[0] ? current_prod_num : "n/a",
295			orig_prod_num, serial_no);
296		add_device_randomness(orig_prod_num, strlen(orig_prod_num));
297		add_device_randomness(current_prod_num, strlen(current_prod_num));
298		add_device_randomness(serial_no, strlen(serial_no));
299	}
300}
301
302
 
303/**
304 * init_per_cpu - Handle individual processor initializations.
305 * @cpunum: logical processor number.
306 *
307 * This function handles initialization for *every* CPU
308 * in the system:
309 *
310 * o Set "default" CPU width for trap handlers
311 *
312 * o Enable FP coprocessor
313 *   REVISIT: this could be done in the "code 22" trap handler.
314 *	(frowands idea - that way we know which processes need FP
315 *	registers saved on the interrupt stack.)
316 *   NEWS FLASH: wide kernels need FP coprocessor enabled to handle
317 *	formatted printing of %lx for example (double divides I think)
318 *
319 * o Enable CPU profiling hooks.
320 */
321int __init init_per_cpu(int cpunum)
322{
323	int ret;
324	struct pdc_coproc_cfg coproc_cfg;
325
326	set_firmware_width();
327	ret = pdc_coproc_cfg(&coproc_cfg);
328
329	store_cpu_topology(cpunum);
330
331	if(ret >= 0 && coproc_cfg.ccr_functional) {
332		mtctl(coproc_cfg.ccr_functional, 10);  /* 10 == Coprocessor Control Reg */
333
334		/* FWIW, FP rev/model is a more accurate way to determine
335		** CPU type. CPU rev/model has some ambiguous cases.
336		*/
337		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
338		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
339
340		if (cpunum == 0)
341			printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
342				cpunum, coproc_cfg.revision, coproc_cfg.model);
343
344		/*
345		** store status register to stack (hopefully aligned)
346		** and clear the T-bit.
347		*/
348		asm volatile ("fstd    %fr0,8(%sp)");
349
350	} else {
351		printk(KERN_WARNING  "WARNING: No FP CoProcessor?!"
352			" (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
353#ifdef CONFIG_64BIT
354			"Halting Machine - FP required\n"
355#endif
356			, coproc_cfg.ccr_functional);
357#ifdef CONFIG_64BIT
358		mdelay(100);	/* previous chars get pushed to console */
359		panic("FP CoProc not reported");
360#endif
361	}
362
363	/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
364	init_percpu_prof(cpunum);
365
366	return ret;
367}
368
369/*
370 * Display CPU info for all CPUs.
371 */
372int
373show_cpuinfo (struct seq_file *m, void *v)
374{
375	unsigned long cpu;
376
377	for_each_online_cpu(cpu) {
378		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
379#ifdef CONFIG_SMP
380		if (0 == cpuinfo->hpa)
381			continue;
382#endif
383		seq_printf(m, "processor\t: %lu\n"
384				"cpu family\t: PA-RISC %s\n",
385				 cpu, boot_cpu_data.family_name);
386
387		seq_printf(m, "cpu\t\t: %s\n",  boot_cpu_data.cpu_name );
388
389		/* cpu MHz */
390		seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
391				 boot_cpu_data.cpu_hz / 1000000,
392				 boot_cpu_data.cpu_hz % 1000000  );
393
394#ifdef CONFIG_PARISC_CPU_TOPOLOGY
395		seq_printf(m, "physical id\t: %d\n",
396				topology_physical_package_id(cpu));
397		seq_printf(m, "siblings\t: %d\n",
398				cpumask_weight(topology_core_cpumask(cpu)));
399		seq_printf(m, "core id\t\t: %d\n", topology_core_id(cpu));
400#endif
401
402		seq_printf(m, "capabilities\t:");
403		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
404			seq_puts(m, " os32");
405		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
406			seq_puts(m, " os64");
407		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)
408			seq_puts(m, " iopdir_fdc");
409		switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) {
410		case PDC_MODEL_NVA_SUPPORTED:
411			seq_puts(m, " nva_supported");
412			break;
413		case PDC_MODEL_NVA_SLOW:
414			seq_puts(m, " nva_slow");
415			break;
416		case PDC_MODEL_NVA_UNSUPPORTED:
417			seq_puts(m, " needs_equivalent_aliasing");
418			break;
419		}
420		seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
421
422		seq_printf(m, "model\t\t: %s\n"
423				"model name\t: %s\n",
424				 boot_cpu_data.pdc.sys_model_name,
425				 cpuinfo->dev ?
426				 cpuinfo->dev->name : "Unknown");
427
428		seq_printf(m, "hversion\t: 0x%08x\n"
429			        "sversion\t: 0x%08x\n",
430				 boot_cpu_data.hversion,
431				 boot_cpu_data.sversion );
432
433		/* print cachesize info */
434		show_cache_info(m);
435
436		seq_printf(m, "bogomips\t: %lu.%02lu\n",
437			     cpuinfo->loops_per_jiffy / (500000 / HZ),
438			     (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
439
440		seq_printf(m, "software id\t: %ld\n\n",
441				boot_cpu_data.pdc.model.sw_id);
442	}
443	return 0;
444}
445
446static const struct parisc_device_id processor_tbl[] __initconst = {
447	{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
448	{ 0, }
449};
450
451static struct parisc_driver cpu_driver __refdata = {
452	.name		= "CPU",
453	.id_table	= processor_tbl,
454	.probe		= processor_probe
455};
456
457/**
458 * processor_init - Processor initialization procedure.
459 *
460 * Register this driver.
461 */
462void __init processor_init(void)
463{
464	register_parisc_driver(&cpu_driver);
465}
v3.5.6
 
  1/*
  2 *    Initial setup-routines for HP 9000 based hardware.
  3 *
  4 *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
  5 *    Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
  6 *    Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
  7 *    Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
  8 *    Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
  9 *    Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
 10 *
 11 *    Initial PA-RISC Version: 04-23-1999 by Helge Deller
 12 *
 13 *    This program is free software; you can redistribute it and/or modify
 14 *    it under the terms of the GNU General Public License as published by
 15 *    the Free Software Foundation; either version 2, or (at your option)
 16 *    any later version.
 17 *
 18 *    This program is distributed in the hope that it will be useful,
 19 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
 20 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 21 *    GNU General Public License for more details.
 22 *
 23 *    You should have received a copy of the GNU General Public License
 24 *    along with this program; if not, write to the Free Software
 25 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 26 *
 27 */
 28#include <linux/delay.h>
 29#include <linux/init.h>
 30#include <linux/mm.h>
 31#include <linux/module.h>
 32#include <linux/seq_file.h>
 
 33#include <linux/slab.h>
 34#include <linux/cpu.h>
 35#include <asm/param.h>
 36#include <asm/cache.h>
 37#include <asm/hardware.h>	/* for register_parisc_driver() stuff */
 38#include <asm/processor.h>
 39#include <asm/page.h>
 40#include <asm/pdc.h>
 41#include <asm/pdcpat.h>
 42#include <asm/irq.h>		/* for struct irq_region */
 43#include <asm/parisc-device.h>
 44
 45struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
 46EXPORT_SYMBOL(boot_cpu_data);
 
 
 
 
 47
 48DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 49
 50extern int update_cr16_clocksource(void);	/* from time.c */
 51
 52/*
 53**  	PARISC CPU driver - claim "device" and initialize CPU data structures.
 54**
 55** Consolidate per CPU initialization into (mostly) one module.
 56** Monarch CPU will initialize boot_cpu_data which shouldn't
 57** change once the system has booted.
 58**
 59** The callback *should* do per-instance initialization of
 60** everything including the monarch. "Per CPU" init code in
 61** setup.c:start_parisc() has migrated here and start_parisc()
 62** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
 63**
 64** The goal of consolidating CPU initialization into one place is
 65** to make sure all CPUs get initialized the same way.
 66** The code path not shared is how PDC hands control of the CPU to the OS.
 67** The initialization of OS data structures is the same (done below).
 68*/
 69
 70/**
 71 * init_cpu_profiler - enable/setup per cpu profiling hooks.
 72 * @cpunum: The processor instance.
 73 *
 74 * FIXME: doesn't do much yet...
 75 */
 76static void __cpuinit
 77init_percpu_prof(unsigned long cpunum)
 78{
 79	struct cpuinfo_parisc *p;
 80
 81	p = &per_cpu(cpu_data, cpunum);
 82	p->prof_counter = 1;
 83	p->prof_multiplier = 1;
 84}
 85
 86
 87/**
 88 * processor_probe - Determine if processor driver should claim this device.
 89 * @dev: The device which has been found.
 90 *
 91 * Determine if processor driver should claim this chip (return 0) or not 
 92 * (return 1).  If so, initialize the chip and tell other partners in crime 
 93 * they have work to do.
 94 */
 95static int __cpuinit processor_probe(struct parisc_device *dev)
 96{
 97	unsigned long txn_addr;
 98	unsigned long cpuid;
 99	struct cpuinfo_parisc *p;
 
100
101#ifdef CONFIG_SMP
102	if (num_online_cpus() >= nr_cpu_ids) {
103		printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
104		return 1;
105	}
106#else
107	if (boot_cpu_data.cpu_count > 0) {
108		printk(KERN_INFO "CONFIG_SMP=n  ignoring additional CPUs\n");
109		return 1;
110	}
111#endif
112
113	/* logical CPU ID and update global counter
114	 * May get overwritten by PAT code.
115	 */
116	cpuid = boot_cpu_data.cpu_count;
117	txn_addr = dev->hpa.start;	/* for legacy PDC */
 
118
119#ifdef CONFIG_64BIT
120	if (is_pdc_pat()) {
121		ulong status;
122		unsigned long bytecnt;
123	        pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
124#undef USE_PAT_CPUID
125#ifdef USE_PAT_CPUID
126		struct pdc_pat_cpu_num cpu_info;
127#endif
128
129		pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
130		if (!pa_pdc_cell)
131			panic("couldn't allocate memory for PDC_PAT_CELL!");
132
133		status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
134			dev->mod_index, PA_VIEW, pa_pdc_cell);
135
136		BUG_ON(PDC_OK != status);
137
138		/* verify it's the same as what do_pat_inventory() found */
139		BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
140		BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
141
142		txn_addr = pa_pdc_cell->mod[0];   /* id_eid for IO sapic */
143
144		kfree(pa_pdc_cell);
145
 
 
 
 
 
 
 
 
 
 
146#ifdef USE_PAT_CPUID
147/* We need contiguous numbers for cpuid. Firmware's notion
148 * of cpuid is for physical CPUs and we just don't care yet.
149 * We'll care when we need to query PAT PDC about a CPU *after*
150 * boot time (ie shutdown a CPU from an OS perspective).
151 */
152		/* get the cpu number */
153		status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
154
155		BUG_ON(PDC_OK != status);
156
157		if (cpu_info.cpu_num >= NR_CPUS) {
158			printk(KERN_WARNING "IGNORING CPU at 0x%x,"
159				" cpu_slot_id > NR_CPUS"
160				" (%ld > %d)\n",
161				dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
162			/* Ignore CPU since it will only crash */
163			boot_cpu_data.cpu_count--;
164			return 1;
165		} else {
166			cpuid = cpu_info.cpu_num;
167		}
168#endif
169	}
170#endif
171
172	p = &per_cpu(cpu_data, cpuid);
173	boot_cpu_data.cpu_count++;
174
175	/* initialize counters - CPU 0 gets it_value set in time_init() */
176	if (cpuid)
177		memset(p, 0, sizeof(struct cpuinfo_parisc));
178
179	p->loops_per_jiffy = loops_per_jiffy;
180	p->dev = dev;		/* Save IODC data in case we need it */
181	p->hpa = dev->hpa.start;	/* save CPU hpa */
182	p->cpuid = cpuid;	/* save CPU id */
183	p->txn_addr = txn_addr;	/* save CPU IRQ address */
 
 
 
 
 
184#ifdef CONFIG_SMP
185	/*
186	** FIXME: review if any other initialization is clobbered
187	**	  for boot_cpu by the above memset().
188	*/
189	init_percpu_prof(cpuid);
190#endif
191
192	/*
193	** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
194	** OS control. RENDEZVOUS is the default state - see mem_set above.
195	**	p->state = STATE_RENDEZVOUS;
196	*/
197
198#if 0
199	/* CPU 0 IRQ table is statically allocated/initialized */
200	if (cpuid) {
201		struct irqaction actions[];
202
203		/*
204		** itimer and ipi IRQ handlers are statically initialized in
205		** arch/parisc/kernel/irq.c. ie Don't need to register them.
206		*/
207		actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
208		if (!actions) {
209			/* not getting it's own table, share with monarch */
210			actions = cpu_irq_actions[0];
211		}
212
213		cpu_irq_actions[cpuid] = actions;
214	}
215#endif
216
217	/* 
218	 * Bring this CPU up now! (ignore bootstrap cpuid == 0)
219	 */
220#ifdef CONFIG_SMP
221	if (cpuid) {
222		set_cpu_present(cpuid, true);
223		cpu_up(cpuid);
224	}
225#endif
226
227	/* If we've registered more than one cpu,
228	 * we'll use the jiffies clocksource since cr16
229	 * is not synchronized between CPUs.
230	 */
231	update_cr16_clocksource();
232
233	return 0;
234}
235
236/**
237 * collect_boot_cpu_data - Fill the boot_cpu_data structure.
238 *
239 * This function collects and stores the generic processor information
240 * in the boot_cpu_data structure.
241 */
242void __init collect_boot_cpu_data(void)
243{
 
 
 
244	memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
245
 
 
 
246	boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
247
248	/* get CPU-Model Information... */
249#define p ((unsigned long *)&boot_cpu_data.pdc.model)
250	if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK)
251		printk(KERN_INFO 
252			"model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
253			p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
 
 
 
 
254#undef p
255
256	if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK)
257		printk(KERN_INFO "vers  %08lx\n", 
258			boot_cpu_data.pdc.versions);
259
260	if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK)
 
 
 
 
261		printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
262			(boot_cpu_data.pdc.cpuid >> 5) & 127,
263			boot_cpu_data.pdc.cpuid & 31,
264			boot_cpu_data.pdc.cpuid);
265
 
 
 
 
266	if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
267		printk(KERN_INFO "capabilities 0x%lx\n",
268			boot_cpu_data.pdc.capabilities);
269
270	if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK)
271		printk(KERN_INFO "model %s\n",
272			boot_cpu_data.pdc.sys_model_name);
273
 
 
274	boot_cpu_data.hversion =  boot_cpu_data.pdc.model.hversion;
275	boot_cpu_data.sversion =  boot_cpu_data.pdc.model.sversion;
276
277	boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
278	boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
279	boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280}
281
282
283
284/**
285 * init_per_cpu - Handle individual processor initializations.
286 * @cpunum: logical processor number.
287 *
288 * This function handles initialization for *every* CPU
289 * in the system:
290 *
291 * o Set "default" CPU width for trap handlers
292 *
293 * o Enable FP coprocessor
294 *   REVISIT: this could be done in the "code 22" trap handler.
295 *	(frowands idea - that way we know which processes need FP
296 *	registers saved on the interrupt stack.)
297 *   NEWS FLASH: wide kernels need FP coprocessor enabled to handle
298 *	formatted printing of %lx for example (double divides I think)
299 *
300 * o Enable CPU profiling hooks.
301 */
302int __cpuinit init_per_cpu(int cpunum)
303{
304	int ret;
305	struct pdc_coproc_cfg coproc_cfg;
306
307	set_firmware_width();
308	ret = pdc_coproc_cfg(&coproc_cfg);
309
 
 
310	if(ret >= 0 && coproc_cfg.ccr_functional) {
311		mtctl(coproc_cfg.ccr_functional, 10);  /* 10 == Coprocessor Control Reg */
312
313		/* FWIW, FP rev/model is a more accurate way to determine
314		** CPU type. CPU rev/model has some ambiguous cases.
315		*/
316		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
317		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
318
319		printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
320			cpunum, coproc_cfg.revision, coproc_cfg.model);
 
321
322		/*
323		** store status register to stack (hopefully aligned)
324		** and clear the T-bit.
325		*/
326		asm volatile ("fstd    %fr0,8(%sp)");
327
328	} else {
329		printk(KERN_WARNING  "WARNING: No FP CoProcessor?!"
330			" (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
331#ifdef CONFIG_64BIT
332			"Halting Machine - FP required\n"
333#endif
334			, coproc_cfg.ccr_functional);
335#ifdef CONFIG_64BIT
336		mdelay(100);	/* previous chars get pushed to console */
337		panic("FP CoProc not reported");
338#endif
339	}
340
341	/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
342	init_percpu_prof(cpunum);
343
344	return ret;
345}
346
347/*
348 * Display CPU info for all CPUs.
349 */
350int
351show_cpuinfo (struct seq_file *m, void *v)
352{
353	unsigned long cpu;
354
355	for_each_online_cpu(cpu) {
356		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
357#ifdef CONFIG_SMP
358		if (0 == cpuinfo->hpa)
359			continue;
360#endif
361		seq_printf(m, "processor\t: %lu\n"
362				"cpu family\t: PA-RISC %s\n",
363				 cpu, boot_cpu_data.family_name);
364
365		seq_printf(m, "cpu\t\t: %s\n",  boot_cpu_data.cpu_name );
366
367		/* cpu MHz */
368		seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
369				 boot_cpu_data.cpu_hz / 1000000,
370				 boot_cpu_data.cpu_hz % 1000000  );
371
 
 
 
 
 
 
 
 
372		seq_printf(m, "capabilities\t:");
373		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
374			seq_printf(m, " os32");
375		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
376			seq_printf(m, " os64");
377		seq_printf(m, "\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
378
379		seq_printf(m, "model\t\t: %s\n"
380				"model name\t: %s\n",
381				 boot_cpu_data.pdc.sys_model_name,
382				 cpuinfo->dev ?
383				 cpuinfo->dev->name : "Unknown");
384
385		seq_printf(m, "hversion\t: 0x%08x\n"
386			        "sversion\t: 0x%08x\n",
387				 boot_cpu_data.hversion,
388				 boot_cpu_data.sversion );
389
390		/* print cachesize info */
391		show_cache_info(m);
392
393		seq_printf(m, "bogomips\t: %lu.%02lu\n",
394			     cpuinfo->loops_per_jiffy / (500000 / HZ),
395			     (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
396
397		seq_printf(m, "software id\t: %ld\n\n",
398				boot_cpu_data.pdc.model.sw_id);
399	}
400	return 0;
401}
402
403static const struct parisc_device_id processor_tbl[] = {
404	{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
405	{ 0, }
406};
407
408static struct parisc_driver cpu_driver = {
409	.name		= "CPU",
410	.id_table	= processor_tbl,
411	.probe		= processor_probe
412};
413
414/**
415 * processor_init - Processor initialization procedure.
416 *
417 * Register this driver.
418 */
419void __init processor_init(void)
420{
421	register_parisc_driver(&cpu_driver);
422}