Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *    Initial setup-routines for HP 9000 based hardware.
  4 *
  5 *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
  6 *    Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
  7 *    Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
  8 *    Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
  9 *    Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
 10 *    Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
 11 *
 12 *    Initial PA-RISC Version: 04-23-1999 by Helge Deller
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13 */
 14#include <linux/delay.h>
 15#include <linux/init.h>
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/seq_file.h>
 19#include <linux/random.h>
 20#include <linux/slab.h>
 21#include <linux/cpu.h>
 22#include <asm/param.h>
 23#include <asm/cache.h>
 24#include <asm/hardware.h>	/* for register_parisc_driver() stuff */
 25#include <asm/processor.h>
 26#include <asm/page.h>
 27#include <asm/pdc.h>
 28#include <asm/pdcpat.h>
 29#include <asm/irq.h>		/* for struct irq_region */
 30#include <asm/parisc-device.h>
 31
 32struct system_cpuinfo_parisc boot_cpu_data __ro_after_init;
 33EXPORT_SYMBOL(boot_cpu_data);
 34#ifdef CONFIG_PA8X00
 35int _parisc_requires_coherency __ro_after_init;
 36EXPORT_SYMBOL(_parisc_requires_coherency);
 37#endif
 38
 39DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 40
 41/*
 42**  	PARISC CPU driver - claim "device" and initialize CPU data structures.
 43**
 44** Consolidate per CPU initialization into (mostly) one module.
 45** Monarch CPU will initialize boot_cpu_data which shouldn't
 46** change once the system has booted.
 47**
 48** The callback *should* do per-instance initialization of
 49** everything including the monarch. "Per CPU" init code in
 50** setup.c:start_parisc() has migrated here and start_parisc()
 51** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
 52**
 53** The goal of consolidating CPU initialization into one place is
 54** to make sure all CPUs get initialized the same way.
 55** The code path not shared is how PDC hands control of the CPU to the OS.
 56** The initialization of OS data structures is the same (done below).
 57*/
 58
 59/**
 60 * init_cpu_profiler - enable/setup per cpu profiling hooks.
 61 * @cpunum: The processor instance.
 62 *
 63 * FIXME: doesn't do much yet...
 64 */
 65static void
 66init_percpu_prof(unsigned long cpunum)
 67{
 68}
 69
 70
 71/**
 72 * processor_probe - Determine if processor driver should claim this device.
 73 * @dev: The device which has been found.
 74 *
 75 * Determine if processor driver should claim this chip (return 0) or not 
 76 * (return 1).  If so, initialize the chip and tell other partners in crime 
 77 * they have work to do.
 78 */
 79static int __init processor_probe(struct parisc_device *dev)
 80{
 81	unsigned long txn_addr;
 82	unsigned long cpuid;
 83	struct cpuinfo_parisc *p;
 84	struct pdc_pat_cpu_num cpu_info = { };
 85
 86#ifdef CONFIG_SMP
 87	if (num_online_cpus() >= nr_cpu_ids) {
 88		printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
 89		return 1;
 90	}
 91#else
 92	if (boot_cpu_data.cpu_count > 0) {
 93		printk(KERN_INFO "CONFIG_SMP=n  ignoring additional CPUs\n");
 94		return 1;
 95	}
 96#endif
 97
 98	/* logical CPU ID and update global counter
 99	 * May get overwritten by PAT code.
100	 */
101	cpuid = boot_cpu_data.cpu_count;
102	txn_addr = dev->hpa.start;	/* for legacy PDC */
103	cpu_info.cpu_num = cpu_info.cpu_loc = cpuid;
104
105#ifdef CONFIG_64BIT
106	if (is_pdc_pat()) {
107		ulong status;
108		unsigned long bytecnt;
109	        pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
110
111		pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
112		if (!pa_pdc_cell)
113			panic("couldn't allocate memory for PDC_PAT_CELL!");
114
115		status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
116			dev->mod_index, PA_VIEW, pa_pdc_cell);
117
118		BUG_ON(PDC_OK != status);
119
120		/* verify it's the same as what do_pat_inventory() found */
121		BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
122		BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
123
124		txn_addr = pa_pdc_cell->mod[0];   /* id_eid for IO sapic */
125
126		kfree(pa_pdc_cell);
127
128		/* get the cpu number */
129		status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
130		BUG_ON(PDC_OK != status);
131
132		pr_info("Logical CPU #%lu is physical cpu #%lu at location "
133			"0x%lx with hpa %pa\n",
134			cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
135			&dev->hpa.start);
136
137#undef USE_PAT_CPUID
138#ifdef USE_PAT_CPUID
139/* We need contiguous numbers for cpuid. Firmware's notion
140 * of cpuid is for physical CPUs and we just don't care yet.
141 * We'll care when we need to query PAT PDC about a CPU *after*
142 * boot time (ie shutdown a CPU from an OS perspective).
143 */
144		if (cpu_info.cpu_num >= NR_CPUS) {
145			printk(KERN_WARNING "IGNORING CPU at %pa,"
146				" cpu_slot_id > NR_CPUS"
147				" (%ld > %d)\n",
148				&dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
149			/* Ignore CPU since it will only crash */
150			boot_cpu_data.cpu_count--;
151			return 1;
152		} else {
153			cpuid = cpu_info.cpu_num;
154		}
155#endif
156	}
157#endif
158
159	p = &per_cpu(cpu_data, cpuid);
160	boot_cpu_data.cpu_count++;
161
162	/* initialize counters - CPU 0 gets it_value set in time_init() */
163	if (cpuid)
164		memset(p, 0, sizeof(struct cpuinfo_parisc));
165
166	p->loops_per_jiffy = loops_per_jiffy;
167	p->dev = dev;		/* Save IODC data in case we need it */
168	p->hpa = dev->hpa.start;	/* save CPU hpa */
169	p->cpuid = cpuid;	/* save CPU id */
170	p->txn_addr = txn_addr;	/* save CPU IRQ address */
171	p->cpu_num = cpu_info.cpu_num;
172	p->cpu_loc = cpu_info.cpu_loc;
173
174	store_cpu_topology(cpuid);
175
176#ifdef CONFIG_SMP
177	/*
178	** FIXME: review if any other initialization is clobbered
179	**	  for boot_cpu by the above memset().
180	*/
181	init_percpu_prof(cpuid);
182#endif
183
184	/*
185	** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
186	** OS control. RENDEZVOUS is the default state - see mem_set above.
187	**	p->state = STATE_RENDEZVOUS;
188	*/
189
190#if 0
191	/* CPU 0 IRQ table is statically allocated/initialized */
192	if (cpuid) {
193		struct irqaction actions[];
194
195		/*
196		** itimer and ipi IRQ handlers are statically initialized in
197		** arch/parisc/kernel/irq.c. ie Don't need to register them.
198		*/
199		actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
200		if (!actions) {
201			/* not getting it's own table, share with monarch */
202			actions = cpu_irq_actions[0];
203		}
204
205		cpu_irq_actions[cpuid] = actions;
206	}
207#endif
208
209	/* 
210	 * Bring this CPU up now! (ignore bootstrap cpuid == 0)
211	 */
212#ifdef CONFIG_SMP
213	if (cpuid) {
214		set_cpu_present(cpuid, true);
215		cpu_up(cpuid);
216	}
217#endif
218
219	return 0;
220}
221
222/**
223 * collect_boot_cpu_data - Fill the boot_cpu_data structure.
224 *
225 * This function collects and stores the generic processor information
226 * in the boot_cpu_data structure.
227 */
228void __init collect_boot_cpu_data(void)
229{
230	unsigned long cr16_seed;
231	char orig_prod_num[64], current_prod_num[64], serial_no[64];
232
233	memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
234
235	cr16_seed = get_cycles();
236	add_device_randomness(&cr16_seed, sizeof(cr16_seed));
237
238	boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
239
240	/* get CPU-Model Information... */
241#define p ((unsigned long *)&boot_cpu_data.pdc.model)
242	if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) {
243		printk(KERN_INFO 
244			"model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
245			p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
246
247		add_device_randomness(&boot_cpu_data.pdc.model,
248			sizeof(boot_cpu_data.pdc.model));
249	}
250#undef p
251
252	if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK) {
253		printk(KERN_INFO "vers  %08lx\n", 
254			boot_cpu_data.pdc.versions);
255
256		add_device_randomness(&boot_cpu_data.pdc.versions,
257			sizeof(boot_cpu_data.pdc.versions));
258	}
259
260	if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) {
261		printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
262			(boot_cpu_data.pdc.cpuid >> 5) & 127,
263			boot_cpu_data.pdc.cpuid & 31,
264			boot_cpu_data.pdc.cpuid);
265
266		add_device_randomness(&boot_cpu_data.pdc.cpuid,
267			sizeof(boot_cpu_data.pdc.cpuid));
268	}
269
270	if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
271		printk(KERN_INFO "capabilities 0x%lx\n",
272			boot_cpu_data.pdc.capabilities);
273
274	if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK)
275		printk(KERN_INFO "model %s\n",
276			boot_cpu_data.pdc.sys_model_name);
277
278	dump_stack_set_arch_desc("%s", boot_cpu_data.pdc.sys_model_name);
279
280	boot_cpu_data.hversion =  boot_cpu_data.pdc.model.hversion;
281	boot_cpu_data.sversion =  boot_cpu_data.pdc.model.sversion;
282
283	boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
284	boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
285	boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
286
287#ifdef CONFIG_PA8X00
288	_parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) ||
289				(boot_cpu_data.cpu_type == mako2);
290#endif
291
292	if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) {
293		printk(KERN_INFO "product %s, original product %s, S/N: %s\n",
294			current_prod_num[0] ? current_prod_num : "n/a",
295			orig_prod_num, serial_no);
296		add_device_randomness(orig_prod_num, strlen(orig_prod_num));
297		add_device_randomness(current_prod_num, strlen(current_prod_num));
298		add_device_randomness(serial_no, strlen(serial_no));
299	}
300}
301
302
303/**
304 * init_per_cpu - Handle individual processor initializations.
305 * @cpunum: logical processor number.
306 *
307 * This function handles initialization for *every* CPU
308 * in the system:
309 *
310 * o Set "default" CPU width for trap handlers
311 *
312 * o Enable FP coprocessor
313 *   REVISIT: this could be done in the "code 22" trap handler.
314 *	(frowands idea - that way we know which processes need FP
315 *	registers saved on the interrupt stack.)
316 *   NEWS FLASH: wide kernels need FP coprocessor enabled to handle
317 *	formatted printing of %lx for example (double divides I think)
318 *
319 * o Enable CPU profiling hooks.
320 */
321int __init init_per_cpu(int cpunum)
322{
323	int ret;
324	struct pdc_coproc_cfg coproc_cfg;
325
326	set_firmware_width();
327	ret = pdc_coproc_cfg(&coproc_cfg);
328
329	store_cpu_topology(cpunum);
330
331	if(ret >= 0 && coproc_cfg.ccr_functional) {
332		mtctl(coproc_cfg.ccr_functional, 10);  /* 10 == Coprocessor Control Reg */
333
334		/* FWIW, FP rev/model is a more accurate way to determine
335		** CPU type. CPU rev/model has some ambiguous cases.
336		*/
337		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
338		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
339
340		if (cpunum == 0)
341			printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
342				cpunum, coproc_cfg.revision, coproc_cfg.model);
343
344		/*
345		** store status register to stack (hopefully aligned)
346		** and clear the T-bit.
347		*/
348		asm volatile ("fstd    %fr0,8(%sp)");
349
350	} else {
351		printk(KERN_WARNING  "WARNING: No FP CoProcessor?!"
352			" (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
353#ifdef CONFIG_64BIT
354			"Halting Machine - FP required\n"
355#endif
356			, coproc_cfg.ccr_functional);
357#ifdef CONFIG_64BIT
358		mdelay(100);	/* previous chars get pushed to console */
359		panic("FP CoProc not reported");
360#endif
361	}
362
363	/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
364	init_percpu_prof(cpunum);
365
366	return ret;
367}
368
369/*
370 * Display CPU info for all CPUs.
371 */
372int
373show_cpuinfo (struct seq_file *m, void *v)
374{
375	unsigned long cpu;
376
377	for_each_online_cpu(cpu) {
378		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
379#ifdef CONFIG_SMP
380		if (0 == cpuinfo->hpa)
381			continue;
382#endif
383		seq_printf(m, "processor\t: %lu\n"
384				"cpu family\t: PA-RISC %s\n",
385				 cpu, boot_cpu_data.family_name);
386
387		seq_printf(m, "cpu\t\t: %s\n",  boot_cpu_data.cpu_name );
388
389		/* cpu MHz */
390		seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
391				 boot_cpu_data.cpu_hz / 1000000,
392				 boot_cpu_data.cpu_hz % 1000000  );
393
394#ifdef CONFIG_PARISC_CPU_TOPOLOGY
395		seq_printf(m, "physical id\t: %d\n",
396				topology_physical_package_id(cpu));
397		seq_printf(m, "siblings\t: %d\n",
398				cpumask_weight(topology_core_cpumask(cpu)));
399		seq_printf(m, "core id\t\t: %d\n", topology_core_id(cpu));
400#endif
401
402		seq_printf(m, "capabilities\t:");
403		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
404			seq_puts(m, " os32");
405		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
406			seq_puts(m, " os64");
407		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)
408			seq_puts(m, " iopdir_fdc");
409		switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) {
410		case PDC_MODEL_NVA_SUPPORTED:
411			seq_puts(m, " nva_supported");
412			break;
413		case PDC_MODEL_NVA_SLOW:
414			seq_puts(m, " nva_slow");
415			break;
416		case PDC_MODEL_NVA_UNSUPPORTED:
417			seq_puts(m, " needs_equivalent_aliasing");
418			break;
419		}
420		seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
421
422		seq_printf(m, "model\t\t: %s\n"
423				"model name\t: %s\n",
424				 boot_cpu_data.pdc.sys_model_name,
425				 cpuinfo->dev ?
426				 cpuinfo->dev->name : "Unknown");
427
428		seq_printf(m, "hversion\t: 0x%08x\n"
429			        "sversion\t: 0x%08x\n",
430				 boot_cpu_data.hversion,
431				 boot_cpu_data.sversion );
432
433		/* print cachesize info */
434		show_cache_info(m);
435
436		seq_printf(m, "bogomips\t: %lu.%02lu\n",
437			     cpuinfo->loops_per_jiffy / (500000 / HZ),
438			     (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
439
440		seq_printf(m, "software id\t: %ld\n\n",
441				boot_cpu_data.pdc.model.sw_id);
442	}
443	return 0;
444}
445
446static const struct parisc_device_id processor_tbl[] __initconst = {
447	{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
448	{ 0, }
449};
450
451static struct parisc_driver cpu_driver __refdata = {
452	.name		= "CPU",
453	.id_table	= processor_tbl,
454	.probe		= processor_probe
455};
456
457/**
458 * processor_init - Processor initialization procedure.
459 *
460 * Register this driver.
461 */
462void __init processor_init(void)
463{
464	register_parisc_driver(&cpu_driver);
465}
v4.17
 
  1/*
  2 *    Initial setup-routines for HP 9000 based hardware.
  3 *
  4 *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
  5 *    Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de>
  6 *    Modifications copyright 1999 SuSE GmbH (Philipp Rumpf)
  7 *    Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net>
  8 *    Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org>
  9 *    Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net>
 10 *
 11 *    Initial PA-RISC Version: 04-23-1999 by Helge Deller
 12 *
 13 *    This program is free software; you can redistribute it and/or modify
 14 *    it under the terms of the GNU General Public License as published by
 15 *    the Free Software Foundation; either version 2, or (at your option)
 16 *    any later version.
 17 *
 18 *    This program is distributed in the hope that it will be useful,
 19 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
 20 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 21 *    GNU General Public License for more details.
 22 *
 23 *    You should have received a copy of the GNU General Public License
 24 *    along with this program; if not, write to the Free Software
 25 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 26 *
 27 */
 28#include <linux/delay.h>
 29#include <linux/init.h>
 30#include <linux/mm.h>
 31#include <linux/module.h>
 32#include <linux/seq_file.h>
 33#include <linux/random.h>
 34#include <linux/slab.h>
 35#include <linux/cpu.h>
 36#include <asm/param.h>
 37#include <asm/cache.h>
 38#include <asm/hardware.h>	/* for register_parisc_driver() stuff */
 39#include <asm/processor.h>
 40#include <asm/page.h>
 41#include <asm/pdc.h>
 42#include <asm/pdcpat.h>
 43#include <asm/irq.h>		/* for struct irq_region */
 44#include <asm/parisc-device.h>
 45
 46struct system_cpuinfo_parisc boot_cpu_data __read_mostly;
 47EXPORT_SYMBOL(boot_cpu_data);
 48#ifdef CONFIG_PA8X00
 49int _parisc_requires_coherency __read_mostly;
 50EXPORT_SYMBOL(_parisc_requires_coherency);
 51#endif
 52
 53DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
 54
 55/*
 56**  	PARISC CPU driver - claim "device" and initialize CPU data structures.
 57**
 58** Consolidate per CPU initialization into (mostly) one module.
 59** Monarch CPU will initialize boot_cpu_data which shouldn't
 60** change once the system has booted.
 61**
 62** The callback *should* do per-instance initialization of
 63** everything including the monarch. "Per CPU" init code in
 64** setup.c:start_parisc() has migrated here and start_parisc()
 65** will call register_parisc_driver(&cpu_driver) before calling do_inventory().
 66**
 67** The goal of consolidating CPU initialization into one place is
 68** to make sure all CPUs get initialized the same way.
 69** The code path not shared is how PDC hands control of the CPU to the OS.
 70** The initialization of OS data structures is the same (done below).
 71*/
 72
 73/**
 74 * init_cpu_profiler - enable/setup per cpu profiling hooks.
 75 * @cpunum: The processor instance.
 76 *
 77 * FIXME: doesn't do much yet...
 78 */
 79static void
 80init_percpu_prof(unsigned long cpunum)
 81{
 82}
 83
 84
 85/**
 86 * processor_probe - Determine if processor driver should claim this device.
 87 * @dev: The device which has been found.
 88 *
 89 * Determine if processor driver should claim this chip (return 0) or not 
 90 * (return 1).  If so, initialize the chip and tell other partners in crime 
 91 * they have work to do.
 92 */
 93static int __init processor_probe(struct parisc_device *dev)
 94{
 95	unsigned long txn_addr;
 96	unsigned long cpuid;
 97	struct cpuinfo_parisc *p;
 98	struct pdc_pat_cpu_num cpu_info = { };
 99
100#ifdef CONFIG_SMP
101	if (num_online_cpus() >= nr_cpu_ids) {
102		printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n");
103		return 1;
104	}
105#else
106	if (boot_cpu_data.cpu_count > 0) {
107		printk(KERN_INFO "CONFIG_SMP=n  ignoring additional CPUs\n");
108		return 1;
109	}
110#endif
111
112	/* logical CPU ID and update global counter
113	 * May get overwritten by PAT code.
114	 */
115	cpuid = boot_cpu_data.cpu_count;
116	txn_addr = dev->hpa.start;	/* for legacy PDC */
117	cpu_info.cpu_num = cpu_info.cpu_loc = cpuid;
118
119#ifdef CONFIG_64BIT
120	if (is_pdc_pat()) {
121		ulong status;
122		unsigned long bytecnt;
123	        pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
124
125		pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
126		if (!pa_pdc_cell)
127			panic("couldn't allocate memory for PDC_PAT_CELL!");
128
129		status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc,
130			dev->mod_index, PA_VIEW, pa_pdc_cell);
131
132		BUG_ON(PDC_OK != status);
133
134		/* verify it's the same as what do_pat_inventory() found */
135		BUG_ON(dev->mod_info != pa_pdc_cell->mod_info);
136		BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location);
137
138		txn_addr = pa_pdc_cell->mod[0];   /* id_eid for IO sapic */
139
140		kfree(pa_pdc_cell);
141
142		/* get the cpu number */
143		status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start);
144		BUG_ON(PDC_OK != status);
145
146		pr_info("Logical CPU #%lu is physical cpu #%lu at location "
147			"0x%lx with hpa %pa\n",
148			cpuid, cpu_info.cpu_num, cpu_info.cpu_loc,
149			&dev->hpa.start);
150
151#undef USE_PAT_CPUID
152#ifdef USE_PAT_CPUID
153/* We need contiguous numbers for cpuid. Firmware's notion
154 * of cpuid is for physical CPUs and we just don't care yet.
155 * We'll care when we need to query PAT PDC about a CPU *after*
156 * boot time (ie shutdown a CPU from an OS perspective).
157 */
158		if (cpu_info.cpu_num >= NR_CPUS) {
159			printk(KERN_WARNING "IGNORING CPU at %pa,"
160				" cpu_slot_id > NR_CPUS"
161				" (%ld > %d)\n",
162				&dev->hpa.start, cpu_info.cpu_num, NR_CPUS);
163			/* Ignore CPU since it will only crash */
164			boot_cpu_data.cpu_count--;
165			return 1;
166		} else {
167			cpuid = cpu_info.cpu_num;
168		}
169#endif
170	}
171#endif
172
173	p = &per_cpu(cpu_data, cpuid);
174	boot_cpu_data.cpu_count++;
175
176	/* initialize counters - CPU 0 gets it_value set in time_init() */
177	if (cpuid)
178		memset(p, 0, sizeof(struct cpuinfo_parisc));
179
180	p->loops_per_jiffy = loops_per_jiffy;
181	p->dev = dev;		/* Save IODC data in case we need it */
182	p->hpa = dev->hpa.start;	/* save CPU hpa */
183	p->cpuid = cpuid;	/* save CPU id */
184	p->txn_addr = txn_addr;	/* save CPU IRQ address */
185	p->cpu_num = cpu_info.cpu_num;
186	p->cpu_loc = cpu_info.cpu_loc;
187
188	store_cpu_topology(cpuid);
189
190#ifdef CONFIG_SMP
191	/*
192	** FIXME: review if any other initialization is clobbered
193	**	  for boot_cpu by the above memset().
194	*/
195	init_percpu_prof(cpuid);
196#endif
197
198	/*
199	** CONFIG_SMP: init_smp_config() will attempt to get CPUs into
200	** OS control. RENDEZVOUS is the default state - see mem_set above.
201	**	p->state = STATE_RENDEZVOUS;
202	*/
203
204#if 0
205	/* CPU 0 IRQ table is statically allocated/initialized */
206	if (cpuid) {
207		struct irqaction actions[];
208
209		/*
210		** itimer and ipi IRQ handlers are statically initialized in
211		** arch/parisc/kernel/irq.c. ie Don't need to register them.
212		*/
213		actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC);
214		if (!actions) {
215			/* not getting it's own table, share with monarch */
216			actions = cpu_irq_actions[0];
217		}
218
219		cpu_irq_actions[cpuid] = actions;
220	}
221#endif
222
223	/* 
224	 * Bring this CPU up now! (ignore bootstrap cpuid == 0)
225	 */
226#ifdef CONFIG_SMP
227	if (cpuid) {
228		set_cpu_present(cpuid, true);
229		cpu_up(cpuid);
230	}
231#endif
232
233	return 0;
234}
235
236/**
237 * collect_boot_cpu_data - Fill the boot_cpu_data structure.
238 *
239 * This function collects and stores the generic processor information
240 * in the boot_cpu_data structure.
241 */
242void __init collect_boot_cpu_data(void)
243{
244	unsigned long cr16_seed;
 
245
246	memset(&boot_cpu_data, 0, sizeof(boot_cpu_data));
247
248	cr16_seed = get_cycles();
249	add_device_randomness(&cr16_seed, sizeof(cr16_seed));
250
251	boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */
252
253	/* get CPU-Model Information... */
254#define p ((unsigned long *)&boot_cpu_data.pdc.model)
255	if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) {
256		printk(KERN_INFO 
257			"model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
258			p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
259
260		add_device_randomness(&boot_cpu_data.pdc.model,
261			sizeof(boot_cpu_data.pdc.model));
262	}
263#undef p
264
265	if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK) {
266		printk(KERN_INFO "vers  %08lx\n", 
267			boot_cpu_data.pdc.versions);
268
269		add_device_randomness(&boot_cpu_data.pdc.versions,
270			sizeof(boot_cpu_data.pdc.versions));
271	}
272
273	if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) {
274		printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n",
275			(boot_cpu_data.pdc.cpuid >> 5) & 127,
276			boot_cpu_data.pdc.cpuid & 31,
277			boot_cpu_data.pdc.cpuid);
278
279		add_device_randomness(&boot_cpu_data.pdc.cpuid,
280			sizeof(boot_cpu_data.pdc.cpuid));
281	}
282
283	if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK)
284		printk(KERN_INFO "capabilities 0x%lx\n",
285			boot_cpu_data.pdc.capabilities);
286
287	if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK)
288		printk(KERN_INFO "model %s\n",
289			boot_cpu_data.pdc.sys_model_name);
290
 
 
291	boot_cpu_data.hversion =  boot_cpu_data.pdc.model.hversion;
292	boot_cpu_data.sversion =  boot_cpu_data.pdc.model.sversion;
293
294	boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion);
295	boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0];
296	boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1];
297
298#ifdef CONFIG_PA8X00
299	_parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) ||
300				(boot_cpu_data.cpu_type == mako2);
301#endif
 
 
 
 
 
 
 
 
 
302}
303
304
305/**
306 * init_per_cpu - Handle individual processor initializations.
307 * @cpunum: logical processor number.
308 *
309 * This function handles initialization for *every* CPU
310 * in the system:
311 *
312 * o Set "default" CPU width for trap handlers
313 *
314 * o Enable FP coprocessor
315 *   REVISIT: this could be done in the "code 22" trap handler.
316 *	(frowands idea - that way we know which processes need FP
317 *	registers saved on the interrupt stack.)
318 *   NEWS FLASH: wide kernels need FP coprocessor enabled to handle
319 *	formatted printing of %lx for example (double divides I think)
320 *
321 * o Enable CPU profiling hooks.
322 */
323int __init init_per_cpu(int cpunum)
324{
325	int ret;
326	struct pdc_coproc_cfg coproc_cfg;
327
328	set_firmware_width();
329	ret = pdc_coproc_cfg(&coproc_cfg);
330
331	store_cpu_topology(cpunum);
332
333	if(ret >= 0 && coproc_cfg.ccr_functional) {
334		mtctl(coproc_cfg.ccr_functional, 10);  /* 10 == Coprocessor Control Reg */
335
336		/* FWIW, FP rev/model is a more accurate way to determine
337		** CPU type. CPU rev/model has some ambiguous cases.
338		*/
339		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
340		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;
341
342		if (cpunum == 0)
343			printk(KERN_INFO  "FP[%d] enabled: Rev %ld Model %ld\n",
344				cpunum, coproc_cfg.revision, coproc_cfg.model);
345
346		/*
347		** store status register to stack (hopefully aligned)
348		** and clear the T-bit.
349		*/
350		asm volatile ("fstd    %fr0,8(%sp)");
351
352	} else {
353		printk(KERN_WARNING  "WARNING: No FP CoProcessor?!"
354			" (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n"
355#ifdef CONFIG_64BIT
356			"Halting Machine - FP required\n"
357#endif
358			, coproc_cfg.ccr_functional);
359#ifdef CONFIG_64BIT
360		mdelay(100);	/* previous chars get pushed to console */
361		panic("FP CoProc not reported");
362#endif
363	}
364
365	/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
366	init_percpu_prof(cpunum);
367
368	return ret;
369}
370
371/*
372 * Display CPU info for all CPUs.
373 */
374int
375show_cpuinfo (struct seq_file *m, void *v)
376{
377	unsigned long cpu;
378
379	for_each_online_cpu(cpu) {
380		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
381#ifdef CONFIG_SMP
382		if (0 == cpuinfo->hpa)
383			continue;
384#endif
385		seq_printf(m, "processor\t: %lu\n"
386				"cpu family\t: PA-RISC %s\n",
387				 cpu, boot_cpu_data.family_name);
388
389		seq_printf(m, "cpu\t\t: %s\n",  boot_cpu_data.cpu_name );
390
391		/* cpu MHz */
392		seq_printf(m, "cpu MHz\t\t: %d.%06d\n",
393				 boot_cpu_data.cpu_hz / 1000000,
394				 boot_cpu_data.cpu_hz % 1000000  );
395
396#ifdef CONFIG_PARISC_CPU_TOPOLOGY
397		seq_printf(m, "physical id\t: %d\n",
398				topology_physical_package_id(cpu));
399		seq_printf(m, "siblings\t: %d\n",
400				cpumask_weight(topology_core_cpumask(cpu)));
401		seq_printf(m, "core id\t\t: %d\n", topology_core_id(cpu));
402#endif
403
404		seq_printf(m, "capabilities\t:");
405		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32)
406			seq_puts(m, " os32");
407		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64)
408			seq_puts(m, " os64");
409		if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)
410			seq_puts(m, " iopdir_fdc");
411		switch (boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) {
412		case PDC_MODEL_NVA_SUPPORTED:
413			seq_puts(m, " nva_supported");
414			break;
415		case PDC_MODEL_NVA_SLOW:
416			seq_puts(m, " nva_slow");
417			break;
418		case PDC_MODEL_NVA_UNSUPPORTED:
419			seq_puts(m, " needs_equivalent_aliasing");
420			break;
421		}
422		seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
423
424		seq_printf(m, "model\t\t: %s\n"
425				"model name\t: %s\n",
426				 boot_cpu_data.pdc.sys_model_name,
427				 cpuinfo->dev ?
428				 cpuinfo->dev->name : "Unknown");
429
430		seq_printf(m, "hversion\t: 0x%08x\n"
431			        "sversion\t: 0x%08x\n",
432				 boot_cpu_data.hversion,
433				 boot_cpu_data.sversion );
434
435		/* print cachesize info */
436		show_cache_info(m);
437
438		seq_printf(m, "bogomips\t: %lu.%02lu\n",
439			     cpuinfo->loops_per_jiffy / (500000 / HZ),
440			     (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100);
441
442		seq_printf(m, "software id\t: %ld\n\n",
443				boot_cpu_data.pdc.model.sw_id);
444	}
445	return 0;
446}
447
448static const struct parisc_device_id processor_tbl[] __initconst = {
449	{ HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID },
450	{ 0, }
451};
452
453static struct parisc_driver cpu_driver __refdata = {
454	.name		= "CPU",
455	.id_table	= processor_tbl,
456	.probe		= processor_probe
457};
458
459/**
460 * processor_init - Processor initialization procedure.
461 *
462 * Register this driver.
463 */
464void __init processor_init(void)
465{
466	register_parisc_driver(&cpu_driver);
467}