Loading...
1/*
2 * linux/arch/alpha/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 */
6
7/* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
8
9/*
10 * Bootup setup stuff.
11 */
12
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/stddef.h>
17#include <linux/unistd.h>
18#include <linux/ptrace.h>
19#include <linux/slab.h>
20#include <linux/user.h>
21#include <linux/screen_info.h>
22#include <linux/delay.h>
23#include <linux/mc146818rtc.h>
24#include <linux/console.h>
25#include <linux/cpu.h>
26#include <linux/errno.h>
27#include <linux/init.h>
28#include <linux/string.h>
29#include <linux/ioport.h>
30#include <linux/platform_device.h>
31#include <linux/bootmem.h>
32#include <linux/pci.h>
33#include <linux/seq_file.h>
34#include <linux/root_dev.h>
35#include <linux/initrd.h>
36#include <linux/eisa.h>
37#include <linux/pfn.h>
38#ifdef CONFIG_MAGIC_SYSRQ
39#include <linux/sysrq.h>
40#include <linux/reboot.h>
41#endif
42#include <linux/notifier.h>
43#include <asm/setup.h>
44#include <asm/io.h>
45#include <linux/log2.h>
46
47extern struct atomic_notifier_head panic_notifier_list;
48static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
49static struct notifier_block alpha_panic_block = {
50 alpha_panic_event,
51 NULL,
52 INT_MAX /* try to do it first */
53};
54
55#include <asm/uaccess.h>
56#include <asm/pgtable.h>
57#include <asm/system.h>
58#include <asm/hwrpb.h>
59#include <asm/dma.h>
60#include <asm/mmu_context.h>
61#include <asm/console.h>
62
63#include "proto.h"
64#include "pci_impl.h"
65
66
67struct hwrpb_struct *hwrpb;
68EXPORT_SYMBOL(hwrpb);
69unsigned long srm_hae;
70
71int alpha_l1i_cacheshape;
72int alpha_l1d_cacheshape;
73int alpha_l2_cacheshape;
74int alpha_l3_cacheshape;
75
76#ifdef CONFIG_VERBOSE_MCHECK
77/* 0=minimum, 1=verbose, 2=all */
78/* These can be overridden via the command line, ie "verbose_mcheck=2") */
79unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
80#endif
81
82#ifdef CONFIG_NUMA
83struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
84EXPORT_SYMBOL(node_to_cpumask_map);
85#endif
86
87/* Which processor we booted from. */
88int boot_cpuid;
89
90/*
91 * Using SRM callbacks for initial console output. This works from
92 * setup_arch() time through the end of time_init(), as those places
93 * are under our (Alpha) control.
94
95 * "srmcons" specified in the boot command arguments allows us to
96 * see kernel messages during the period of time before the true
97 * console device is "registered" during console_init().
98 * As of this version (2.5.59), console_init() will call
99 * disable_early_printk() as the last action before initializing
100 * the console drivers. That's the last possible time srmcons can be
101 * unregistered without interfering with console behavior.
102 *
103 * By default, OFF; set it with a bootcommand arg of "srmcons" or
104 * "console=srm". The meaning of these two args is:
105 * "srmcons" - early callback prints
106 * "console=srm" - full callback based console, including early prints
107 */
108int srmcons_output = 0;
109
110/* Enforce a memory size limit; useful for testing. By default, none. */
111unsigned long mem_size_limit = 0;
112
113/* Set AGP GART window size (0 means disabled). */
114unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
115
116#ifdef CONFIG_ALPHA_GENERIC
117struct alpha_machine_vector alpha_mv;
118int alpha_using_srm;
119EXPORT_SYMBOL(alpha_using_srm);
120#endif
121
122static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
123 unsigned long);
124static struct alpha_machine_vector *get_sysvec_byname(const char *);
125static void get_sysnames(unsigned long, unsigned long, unsigned long,
126 char **, char **);
127static void determine_cpu_caches (unsigned int);
128
129static char __initdata command_line[COMMAND_LINE_SIZE];
130
131/*
132 * The format of "screen_info" is strange, and due to early
133 * i386-setup code. This is just enough to make the console
134 * code think we're on a VGA color display.
135 */
136
137struct screen_info screen_info = {
138 .orig_x = 0,
139 .orig_y = 25,
140 .orig_video_cols = 80,
141 .orig_video_lines = 25,
142 .orig_video_isVGA = 1,
143 .orig_video_points = 16
144};
145
146EXPORT_SYMBOL(screen_info);
147
148/*
149 * The direct map I/O window, if any. This should be the same
150 * for all busses, since it's used by virt_to_bus.
151 */
152
153unsigned long __direct_map_base;
154unsigned long __direct_map_size;
155EXPORT_SYMBOL(__direct_map_base);
156EXPORT_SYMBOL(__direct_map_size);
157
158/*
159 * Declare all of the machine vectors.
160 */
161
162/* GCC 2.7.2 (on alpha at least) is lame. It does not support either
163 __attribute__((weak)) or #pragma weak. Bypass it and talk directly
164 to the assembler. */
165
166#define WEAK(X) \
167 extern struct alpha_machine_vector X; \
168 asm(".weak "#X)
169
170WEAK(alcor_mv);
171WEAK(alphabook1_mv);
172WEAK(avanti_mv);
173WEAK(cabriolet_mv);
174WEAK(clipper_mv);
175WEAK(dp264_mv);
176WEAK(eb164_mv);
177WEAK(eb64p_mv);
178WEAK(eb66_mv);
179WEAK(eb66p_mv);
180WEAK(eiger_mv);
181WEAK(jensen_mv);
182WEAK(lx164_mv);
183WEAK(lynx_mv);
184WEAK(marvel_ev7_mv);
185WEAK(miata_mv);
186WEAK(mikasa_mv);
187WEAK(mikasa_primo_mv);
188WEAK(monet_mv);
189WEAK(nautilus_mv);
190WEAK(noname_mv);
191WEAK(noritake_mv);
192WEAK(noritake_primo_mv);
193WEAK(p2k_mv);
194WEAK(pc164_mv);
195WEAK(privateer_mv);
196WEAK(rawhide_mv);
197WEAK(ruffian_mv);
198WEAK(rx164_mv);
199WEAK(sable_mv);
200WEAK(sable_gamma_mv);
201WEAK(shark_mv);
202WEAK(sx164_mv);
203WEAK(takara_mv);
204WEAK(titan_mv);
205WEAK(webbrick_mv);
206WEAK(wildfire_mv);
207WEAK(xl_mv);
208WEAK(xlt_mv);
209
210#undef WEAK
211
212/*
213 * I/O resources inherited from PeeCees. Except for perhaps the
214 * turbochannel alphas, everyone has these on some sort of SuperIO chip.
215 *
216 * ??? If this becomes less standard, move the struct out into the
217 * machine vector.
218 */
219
220static void __init
221reserve_std_resources(void)
222{
223 static struct resource standard_io_resources[] = {
224 { .name = "rtc", .start = -1, .end = -1 },
225 { .name = "dma1", .start = 0x00, .end = 0x1f },
226 { .name = "pic1", .start = 0x20, .end = 0x3f },
227 { .name = "timer", .start = 0x40, .end = 0x5f },
228 { .name = "keyboard", .start = 0x60, .end = 0x6f },
229 { .name = "dma page reg", .start = 0x80, .end = 0x8f },
230 { .name = "pic2", .start = 0xa0, .end = 0xbf },
231 { .name = "dma2", .start = 0xc0, .end = 0xdf },
232 };
233
234 struct resource *io = &ioport_resource;
235 size_t i;
236
237 if (hose_head) {
238 struct pci_controller *hose;
239 for (hose = hose_head; hose; hose = hose->next)
240 if (hose->index == 0) {
241 io = hose->io_space;
242 break;
243 }
244 }
245
246 /* Fix up for the Jensen's queer RTC placement. */
247 standard_io_resources[0].start = RTC_PORT(0);
248 standard_io_resources[0].end = RTC_PORT(0) + 0x10;
249
250 for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
251 request_resource(io, standard_io_resources+i);
252}
253
254#define PFN_MAX PFN_DOWN(0x80000000)
255#define for_each_mem_cluster(memdesc, _cluster, i) \
256 for ((_cluster) = (memdesc)->cluster, (i) = 0; \
257 (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
258
259static unsigned long __init
260get_mem_size_limit(char *s)
261{
262 unsigned long end = 0;
263 char *from = s;
264
265 end = simple_strtoul(from, &from, 0);
266 if ( *from == 'K' || *from == 'k' ) {
267 end = end << 10;
268 from++;
269 } else if ( *from == 'M' || *from == 'm' ) {
270 end = end << 20;
271 from++;
272 } else if ( *from == 'G' || *from == 'g' ) {
273 end = end << 30;
274 from++;
275 }
276 return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
277}
278
279#ifdef CONFIG_BLK_DEV_INITRD
280void * __init
281move_initrd(unsigned long mem_limit)
282{
283 void *start;
284 unsigned long size;
285
286 size = initrd_end - initrd_start;
287 start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
288 if (!start || __pa(start) + size > mem_limit) {
289 initrd_start = initrd_end = 0;
290 return NULL;
291 }
292 memmove(start, (void *)initrd_start, size);
293 initrd_start = (unsigned long)start;
294 initrd_end = initrd_start + size;
295 printk("initrd moved to %p\n", start);
296 return start;
297}
298#endif
299
300#ifndef CONFIG_DISCONTIGMEM
301static void __init
302setup_memory(void *kernel_end)
303{
304 struct memclust_struct * cluster;
305 struct memdesc_struct * memdesc;
306 unsigned long start_kernel_pfn, end_kernel_pfn;
307 unsigned long bootmap_size, bootmap_pages, bootmap_start;
308 unsigned long start, end;
309 unsigned long i;
310
311 /* Find free clusters, and init and free the bootmem accordingly. */
312 memdesc = (struct memdesc_struct *)
313 (hwrpb->mddt_offset + (unsigned long) hwrpb);
314
315 for_each_mem_cluster(memdesc, cluster, i) {
316 printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
317 i, cluster->usage, cluster->start_pfn,
318 cluster->start_pfn + cluster->numpages);
319
320 /* Bit 0 is console/PALcode reserved. Bit 1 is
321 non-volatile memory -- we might want to mark
322 this for later. */
323 if (cluster->usage & 3)
324 continue;
325
326 end = cluster->start_pfn + cluster->numpages;
327 if (end > max_low_pfn)
328 max_low_pfn = end;
329 }
330
331 /*
332 * Except for the NUMA systems (wildfire, marvel) all of the
333 * Alpha systems we run on support 32GB of memory or less.
334 * Since the NUMA systems introduce large holes in memory addressing,
335 * we can get into a situation where there is not enough contiguous
336 * memory for the memory map.
337 *
338 * Limit memory to the first 32GB to limit the NUMA systems to
339 * memory on their first node (wildfire) or 2 (marvel) to avoid
340 * not being able to produce the memory map. In order to access
341 * all of the memory on the NUMA systems, build with discontiguous
342 * memory support.
343 *
344 * If the user specified a memory limit, let that memory limit stand.
345 */
346 if (!mem_size_limit)
347 mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
348
349 if (mem_size_limit && max_low_pfn >= mem_size_limit)
350 {
351 printk("setup: forcing memory size to %ldK (from %ldK).\n",
352 mem_size_limit << (PAGE_SHIFT - 10),
353 max_low_pfn << (PAGE_SHIFT - 10));
354 max_low_pfn = mem_size_limit;
355 }
356
357 /* Find the bounds of kernel memory. */
358 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
359 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
360 bootmap_start = -1;
361
362 try_again:
363 if (max_low_pfn <= end_kernel_pfn)
364 panic("not enough memory to boot");
365
366 /* We need to know how many physically contiguous pages
367 we'll need for the bootmap. */
368 bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
369
370 /* Now find a good region where to allocate the bootmap. */
371 for_each_mem_cluster(memdesc, cluster, i) {
372 if (cluster->usage & 3)
373 continue;
374
375 start = cluster->start_pfn;
376 end = start + cluster->numpages;
377 if (start >= max_low_pfn)
378 continue;
379 if (end > max_low_pfn)
380 end = max_low_pfn;
381 if (start < start_kernel_pfn) {
382 if (end > end_kernel_pfn
383 && end - end_kernel_pfn >= bootmap_pages) {
384 bootmap_start = end_kernel_pfn;
385 break;
386 } else if (end > start_kernel_pfn)
387 end = start_kernel_pfn;
388 } else if (start < end_kernel_pfn)
389 start = end_kernel_pfn;
390 if (end - start >= bootmap_pages) {
391 bootmap_start = start;
392 break;
393 }
394 }
395
396 if (bootmap_start == ~0UL) {
397 max_low_pfn >>= 1;
398 goto try_again;
399 }
400
401 /* Allocate the bootmap and mark the whole MM as reserved. */
402 bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
403
404 /* Mark the free regions. */
405 for_each_mem_cluster(memdesc, cluster, i) {
406 if (cluster->usage & 3)
407 continue;
408
409 start = cluster->start_pfn;
410 end = cluster->start_pfn + cluster->numpages;
411 if (start >= max_low_pfn)
412 continue;
413 if (end > max_low_pfn)
414 end = max_low_pfn;
415 if (start < start_kernel_pfn) {
416 if (end > end_kernel_pfn) {
417 free_bootmem(PFN_PHYS(start),
418 (PFN_PHYS(start_kernel_pfn)
419 - PFN_PHYS(start)));
420 printk("freeing pages %ld:%ld\n",
421 start, start_kernel_pfn);
422 start = end_kernel_pfn;
423 } else if (end > start_kernel_pfn)
424 end = start_kernel_pfn;
425 } else if (start < end_kernel_pfn)
426 start = end_kernel_pfn;
427 if (start >= end)
428 continue;
429
430 free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
431 printk("freeing pages %ld:%ld\n", start, end);
432 }
433
434 /* Reserve the bootmap memory. */
435 reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size,
436 BOOTMEM_DEFAULT);
437 printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
438
439#ifdef CONFIG_BLK_DEV_INITRD
440 initrd_start = INITRD_START;
441 if (initrd_start) {
442 initrd_end = initrd_start+INITRD_SIZE;
443 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
444 (void *) initrd_start, INITRD_SIZE);
445
446 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
447 if (!move_initrd(PFN_PHYS(max_low_pfn)))
448 printk("initrd extends beyond end of memory "
449 "(0x%08lx > 0x%p)\ndisabling initrd\n",
450 initrd_end,
451 phys_to_virt(PFN_PHYS(max_low_pfn)));
452 } else {
453 reserve_bootmem(virt_to_phys((void *)initrd_start),
454 INITRD_SIZE, BOOTMEM_DEFAULT);
455 }
456 }
457#endif /* CONFIG_BLK_DEV_INITRD */
458}
459#else
460extern void setup_memory(void *);
461#endif /* !CONFIG_DISCONTIGMEM */
462
463int __init
464page_is_ram(unsigned long pfn)
465{
466 struct memclust_struct * cluster;
467 struct memdesc_struct * memdesc;
468 unsigned long i;
469
470 memdesc = (struct memdesc_struct *)
471 (hwrpb->mddt_offset + (unsigned long) hwrpb);
472 for_each_mem_cluster(memdesc, cluster, i)
473 {
474 if (pfn >= cluster->start_pfn &&
475 pfn < cluster->start_pfn + cluster->numpages) {
476 return (cluster->usage & 3) ? 0 : 1;
477 }
478 }
479
480 return 0;
481}
482
483static int __init
484register_cpus(void)
485{
486 int i;
487
488 for_each_possible_cpu(i) {
489 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
490 if (!p)
491 return -ENOMEM;
492 register_cpu(p, i);
493 }
494 return 0;
495}
496
497arch_initcall(register_cpus);
498
499void __init
500setup_arch(char **cmdline_p)
501{
502 extern char _end[];
503
504 struct alpha_machine_vector *vec = NULL;
505 struct percpu_struct *cpu;
506 char *type_name, *var_name, *p;
507 void *kernel_end = _end; /* end of kernel */
508 char *args = command_line;
509
510 hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
511 boot_cpuid = hard_smp_processor_id();
512
513 /*
514 * Pre-process the system type to make sure it will be valid.
515 *
516 * This may restore real CABRIO and EB66+ family names, ie
517 * EB64+ and EB66.
518 *
519 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
520 * and AS1200 (DIGITAL Server 5000 series) have the type as
521 * the negative of the real one.
522 */
523 if ((long)hwrpb->sys_type < 0) {
524 hwrpb->sys_type = -((long)hwrpb->sys_type);
525 hwrpb_update_checksum(hwrpb);
526 }
527
528 /* Register a call for panic conditions. */
529 atomic_notifier_chain_register(&panic_notifier_list,
530 &alpha_panic_block);
531
532#ifdef CONFIG_ALPHA_GENERIC
533 /* Assume that we've booted from SRM if we haven't booted from MILO.
534 Detect the later by looking for "MILO" in the system serial nr. */
535 alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
536#endif
537
538 /* If we are using SRM, we want to allow callbacks
539 as early as possible, so do this NOW, and then
540 they should work immediately thereafter.
541 */
542 kernel_end = callback_init(kernel_end);
543
544 /*
545 * Locate the command line.
546 */
547 /* Hack for Jensen... since we're restricted to 8 or 16 chars for
548 boot flags depending on the boot mode, we need some shorthand.
549 This should do for installation. */
550 if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
551 strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
552 } else {
553 strlcpy(command_line, COMMAND_LINE, sizeof command_line);
554 }
555 strcpy(boot_command_line, command_line);
556 *cmdline_p = command_line;
557
558 /*
559 * Process command-line arguments.
560 */
561 while ((p = strsep(&args, " \t")) != NULL) {
562 if (!*p) continue;
563 if (strncmp(p, "alpha_mv=", 9) == 0) {
564 vec = get_sysvec_byname(p+9);
565 continue;
566 }
567 if (strncmp(p, "cycle=", 6) == 0) {
568 est_cycle_freq = simple_strtol(p+6, NULL, 0);
569 continue;
570 }
571 if (strncmp(p, "mem=", 4) == 0) {
572 mem_size_limit = get_mem_size_limit(p+4);
573 continue;
574 }
575 if (strncmp(p, "srmcons", 7) == 0) {
576 srmcons_output |= 1;
577 continue;
578 }
579 if (strncmp(p, "console=srm", 11) == 0) {
580 srmcons_output |= 2;
581 continue;
582 }
583 if (strncmp(p, "gartsize=", 9) == 0) {
584 alpha_agpgart_size =
585 get_mem_size_limit(p+9) << PAGE_SHIFT;
586 continue;
587 }
588#ifdef CONFIG_VERBOSE_MCHECK
589 if (strncmp(p, "verbose_mcheck=", 15) == 0) {
590 alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
591 continue;
592 }
593#endif
594 }
595
596 /* Replace the command line, now that we've killed it with strsep. */
597 strcpy(command_line, boot_command_line);
598
599 /* If we want SRM console printk echoing early, do it now. */
600 if (alpha_using_srm && srmcons_output) {
601 register_srm_console();
602
603 /*
604 * If "console=srm" was specified, clear the srmcons_output
605 * flag now so that time.c won't unregister_srm_console
606 */
607 if (srmcons_output & 2)
608 srmcons_output = 0;
609 }
610
611#ifdef CONFIG_MAGIC_SYSRQ
612 /* If we're using SRM, make sysrq-b halt back to the prom,
613 not auto-reboot. */
614 if (alpha_using_srm) {
615 struct sysrq_key_op *op = __sysrq_get_key_op('b');
616 op->handler = (void *) machine_halt;
617 }
618#endif
619
620 /*
621 * Identify and reconfigure for the current system.
622 */
623 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
624
625 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
626 cpu->type, &type_name, &var_name);
627 if (*var_name == '0')
628 var_name = "";
629
630 if (!vec) {
631 vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
632 cpu->type);
633 }
634
635 if (!vec) {
636 panic("Unsupported system type: %s%s%s (%ld %ld)\n",
637 type_name, (*var_name ? " variation " : ""), var_name,
638 hwrpb->sys_type, hwrpb->sys_variation);
639 }
640 if (vec != &alpha_mv) {
641 alpha_mv = *vec;
642 }
643
644 printk("Booting "
645#ifdef CONFIG_ALPHA_GENERIC
646 "GENERIC "
647#endif
648 "on %s%s%s using machine vector %s from %s\n",
649 type_name, (*var_name ? " variation " : ""),
650 var_name, alpha_mv.vector_name,
651 (alpha_using_srm ? "SRM" : "MILO"));
652
653 printk("Major Options: "
654#ifdef CONFIG_SMP
655 "SMP "
656#endif
657#ifdef CONFIG_ALPHA_EV56
658 "EV56 "
659#endif
660#ifdef CONFIG_ALPHA_EV67
661 "EV67 "
662#endif
663#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
664 "LEGACY_START "
665#endif
666#ifdef CONFIG_VERBOSE_MCHECK
667 "VERBOSE_MCHECK "
668#endif
669
670#ifdef CONFIG_DISCONTIGMEM
671 "DISCONTIGMEM "
672#ifdef CONFIG_NUMA
673 "NUMA "
674#endif
675#endif
676
677#ifdef CONFIG_DEBUG_SPINLOCK
678 "DEBUG_SPINLOCK "
679#endif
680#ifdef CONFIG_MAGIC_SYSRQ
681 "MAGIC_SYSRQ "
682#endif
683 "\n");
684
685 printk("Command line: %s\n", command_line);
686
687 /*
688 * Sync up the HAE.
689 * Save the SRM's current value for restoration.
690 */
691 srm_hae = *alpha_mv.hae_register;
692 __set_hae(alpha_mv.hae_cache);
693
694 /* Reset enable correctable error reports. */
695 wrmces(0x7);
696
697 /* Find our memory. */
698 setup_memory(kernel_end);
699
700 /* First guess at cpu cache sizes. Do this before init_arch. */
701 determine_cpu_caches(cpu->type);
702
703 /* Initialize the machine. Usually has to do with setting up
704 DMA windows and the like. */
705 if (alpha_mv.init_arch)
706 alpha_mv.init_arch();
707
708 /* Reserve standard resources. */
709 reserve_std_resources();
710
711 /*
712 * Give us a default console. TGA users will see nothing until
713 * chr_dev_init is called, rather late in the boot sequence.
714 */
715
716#ifdef CONFIG_VT
717#if defined(CONFIG_VGA_CONSOLE)
718 conswitchp = &vga_con;
719#elif defined(CONFIG_DUMMY_CONSOLE)
720 conswitchp = &dummy_con;
721#endif
722#endif
723
724 /* Default root filesystem to sda2. */
725 ROOT_DEV = Root_SDA2;
726
727#ifdef CONFIG_EISA
728 /* FIXME: only set this when we actually have EISA in this box? */
729 EISA_bus = 1;
730#endif
731
732 /*
733 * Check ASN in HWRPB for validity, report if bad.
734 * FIXME: how was this failing? Should we trust it instead,
735 * and copy the value into alpha_mv.max_asn?
736 */
737
738 if (hwrpb->max_asn != MAX_ASN) {
739 printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
740 }
741
742 /*
743 * Identify the flock of penguins.
744 */
745
746#ifdef CONFIG_SMP
747 setup_smp();
748#endif
749 paging_init();
750}
751
752static char sys_unknown[] = "Unknown";
753static char systype_names[][16] = {
754 "0",
755 "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
756 "Pelican", "Morgan", "Sable", "Medulla", "Noname",
757 "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
758 "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
759 "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
760 "Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
761 "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
762};
763
764static char unofficial_names[][8] = {"100", "Ruffian"};
765
766static char api_names[][16] = {"200", "Nautilus"};
767
768static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
769static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
770
771static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
772static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
773
774static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
775static int eb64p_indices[] = {0,0,1,2};
776
777static char eb66_names[][8] = {"EB66", "EB66+"};
778static int eb66_indices[] = {0,0,1};
779
780static char marvel_names[][16] = {
781 "Marvel/EV7"
782};
783static int marvel_indices[] = { 0 };
784
785static char rawhide_names[][16] = {
786 "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
787};
788static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
789
790static char titan_names[][16] = {
791 "DEFAULT", "Privateer", "Falcon", "Granite"
792};
793static int titan_indices[] = {0,1,2,2,3};
794
795static char tsunami_names[][16] = {
796 "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
797 "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
798 "Flying Clipper", "Shark"
799};
800static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
801
802static struct alpha_machine_vector * __init
803get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
804{
805 static struct alpha_machine_vector *systype_vecs[] __initdata =
806 {
807 NULL, /* 0 */
808 NULL, /* ADU */
809 NULL, /* Cobra */
810 NULL, /* Ruby */
811 NULL, /* Flamingo */
812 NULL, /* Mannequin */
813 &jensen_mv,
814 NULL, /* Pelican */
815 NULL, /* Morgan */
816 NULL, /* Sable -- see below. */
817 NULL, /* Medulla */
818 &noname_mv,
819 NULL, /* Turbolaser */
820 &avanti_mv,
821 NULL, /* Mustang */
822 NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
823 NULL, /* Tradewind */
824 NULL, /* Mikasa -- see below. */
825 NULL, /* EB64 */
826 NULL, /* EB66 -- see variation. */
827 NULL, /* EB64+ -- see variation. */
828 &alphabook1_mv,
829 &rawhide_mv,
830 NULL, /* K2 */
831 &lynx_mv, /* Lynx */
832 &xl_mv,
833 NULL, /* EB164 -- see variation. */
834 NULL, /* Noritake -- see below. */
835 NULL, /* Cortex */
836 NULL, /* 29 */
837 &miata_mv,
838 NULL, /* XXM */
839 &takara_mv,
840 NULL, /* Yukon */
841 NULL, /* Tsunami -- see variation. */
842 &wildfire_mv, /* Wildfire */
843 NULL, /* CUSCO */
844 &eiger_mv, /* Eiger */
845 NULL, /* Titan */
846 NULL, /* Marvel */
847 };
848
849 static struct alpha_machine_vector *unofficial_vecs[] __initdata =
850 {
851 NULL, /* 100 */
852 &ruffian_mv,
853 };
854
855 static struct alpha_machine_vector *api_vecs[] __initdata =
856 {
857 NULL, /* 200 */
858 &nautilus_mv,
859 };
860
861 static struct alpha_machine_vector *alcor_vecs[] __initdata =
862 {
863 &alcor_mv, &xlt_mv, &xlt_mv
864 };
865
866 static struct alpha_machine_vector *eb164_vecs[] __initdata =
867 {
868 &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
869 };
870
871 static struct alpha_machine_vector *eb64p_vecs[] __initdata =
872 {
873 &eb64p_mv,
874 &cabriolet_mv,
875 &cabriolet_mv /* AlphaPCI64 */
876 };
877
878 static struct alpha_machine_vector *eb66_vecs[] __initdata =
879 {
880 &eb66_mv,
881 &eb66p_mv
882 };
883
884 static struct alpha_machine_vector *marvel_vecs[] __initdata =
885 {
886 &marvel_ev7_mv,
887 };
888
889 static struct alpha_machine_vector *titan_vecs[] __initdata =
890 {
891 &titan_mv, /* default */
892 &privateer_mv, /* privateer */
893 &titan_mv, /* falcon */
894 &privateer_mv, /* granite */
895 };
896
897 static struct alpha_machine_vector *tsunami_vecs[] __initdata =
898 {
899 NULL,
900 &dp264_mv, /* dp264 */
901 &dp264_mv, /* warhol */
902 &dp264_mv, /* windjammer */
903 &monet_mv, /* monet */
904 &clipper_mv, /* clipper */
905 &dp264_mv, /* goldrush */
906 &webbrick_mv, /* webbrick */
907 &dp264_mv, /* catamaran */
908 NULL, /* brisbane? */
909 NULL, /* melbourne? */
910 NULL, /* flying clipper? */
911 &shark_mv, /* shark */
912 };
913
914 /* ??? Do we need to distinguish between Rawhides? */
915
916 struct alpha_machine_vector *vec;
917
918 /* Search the system tables first... */
919 vec = NULL;
920 if (type < ARRAY_SIZE(systype_vecs)) {
921 vec = systype_vecs[type];
922 } else if ((type > ST_API_BIAS) &&
923 (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) {
924 vec = api_vecs[type - ST_API_BIAS];
925 } else if ((type > ST_UNOFFICIAL_BIAS) &&
926 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) {
927 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
928 }
929
930 /* If we've not found one, try for a variation. */
931
932 if (!vec) {
933 /* Member ID is a bit-field. */
934 unsigned long member = (variation >> 10) & 0x3f;
935
936 cpu &= 0xffffffff; /* make it usable */
937
938 switch (type) {
939 case ST_DEC_ALCOR:
940 if (member < ARRAY_SIZE(alcor_indices))
941 vec = alcor_vecs[alcor_indices[member]];
942 break;
943 case ST_DEC_EB164:
944 if (member < ARRAY_SIZE(eb164_indices))
945 vec = eb164_vecs[eb164_indices[member]];
946 /* PC164 may show as EB164 variation with EV56 CPU,
947 but, since no true EB164 had anything but EV5... */
948 if (vec == &eb164_mv && cpu == EV56_CPU)
949 vec = &pc164_mv;
950 break;
951 case ST_DEC_EB64P:
952 if (member < ARRAY_SIZE(eb64p_indices))
953 vec = eb64p_vecs[eb64p_indices[member]];
954 break;
955 case ST_DEC_EB66:
956 if (member < ARRAY_SIZE(eb66_indices))
957 vec = eb66_vecs[eb66_indices[member]];
958 break;
959 case ST_DEC_MARVEL:
960 if (member < ARRAY_SIZE(marvel_indices))
961 vec = marvel_vecs[marvel_indices[member]];
962 break;
963 case ST_DEC_TITAN:
964 vec = titan_vecs[0]; /* default */
965 if (member < ARRAY_SIZE(titan_indices))
966 vec = titan_vecs[titan_indices[member]];
967 break;
968 case ST_DEC_TSUNAMI:
969 if (member < ARRAY_SIZE(tsunami_indices))
970 vec = tsunami_vecs[tsunami_indices[member]];
971 break;
972 case ST_DEC_1000:
973 if (cpu == EV5_CPU || cpu == EV56_CPU)
974 vec = &mikasa_primo_mv;
975 else
976 vec = &mikasa_mv;
977 break;
978 case ST_DEC_NORITAKE:
979 if (cpu == EV5_CPU || cpu == EV56_CPU)
980 vec = &noritake_primo_mv;
981 else
982 vec = &noritake_mv;
983 break;
984 case ST_DEC_2100_A500:
985 if (cpu == EV5_CPU || cpu == EV56_CPU)
986 vec = &sable_gamma_mv;
987 else
988 vec = &sable_mv;
989 break;
990 }
991 }
992 return vec;
993}
994
995static struct alpha_machine_vector * __init
996get_sysvec_byname(const char *name)
997{
998 static struct alpha_machine_vector *all_vecs[] __initdata =
999 {
1000 &alcor_mv,
1001 &alphabook1_mv,
1002 &avanti_mv,
1003 &cabriolet_mv,
1004 &clipper_mv,
1005 &dp264_mv,
1006 &eb164_mv,
1007 &eb64p_mv,
1008 &eb66_mv,
1009 &eb66p_mv,
1010 &eiger_mv,
1011 &jensen_mv,
1012 &lx164_mv,
1013 &lynx_mv,
1014 &miata_mv,
1015 &mikasa_mv,
1016 &mikasa_primo_mv,
1017 &monet_mv,
1018 &nautilus_mv,
1019 &noname_mv,
1020 &noritake_mv,
1021 &noritake_primo_mv,
1022 &p2k_mv,
1023 &pc164_mv,
1024 &privateer_mv,
1025 &rawhide_mv,
1026 &ruffian_mv,
1027 &rx164_mv,
1028 &sable_mv,
1029 &sable_gamma_mv,
1030 &shark_mv,
1031 &sx164_mv,
1032 &takara_mv,
1033 &webbrick_mv,
1034 &wildfire_mv,
1035 &xl_mv,
1036 &xlt_mv
1037 };
1038
1039 size_t i;
1040
1041 for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) {
1042 struct alpha_machine_vector *mv = all_vecs[i];
1043 if (strcasecmp(mv->vector_name, name) == 0)
1044 return mv;
1045 }
1046 return NULL;
1047}
1048
1049static void
1050get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1051 char **type_name, char **variation_name)
1052{
1053 unsigned long member;
1054
1055 /* If not in the tables, make it UNKNOWN,
1056 else set type name to family */
1057 if (type < ARRAY_SIZE(systype_names)) {
1058 *type_name = systype_names[type];
1059 } else if ((type > ST_API_BIAS) &&
1060 (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) {
1061 *type_name = api_names[type - ST_API_BIAS];
1062 } else if ((type > ST_UNOFFICIAL_BIAS) &&
1063 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) {
1064 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
1065 } else {
1066 *type_name = sys_unknown;
1067 *variation_name = sys_unknown;
1068 return;
1069 }
1070
1071 /* Set variation to "0"; if variation is zero, done. */
1072 *variation_name = systype_names[0];
1073 if (variation == 0) {
1074 return;
1075 }
1076
1077 member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
1078
1079 cpu &= 0xffffffff; /* make it usable */
1080
1081 switch (type) { /* select by family */
1082 default: /* default to variation "0" for now */
1083 break;
1084 case ST_DEC_EB164:
1085 if (member < ARRAY_SIZE(eb164_indices))
1086 *variation_name = eb164_names[eb164_indices[member]];
1087 /* PC164 may show as EB164 variation, but with EV56 CPU,
1088 so, since no true EB164 had anything but EV5... */
1089 if (eb164_indices[member] == 0 && cpu == EV56_CPU)
1090 *variation_name = eb164_names[1]; /* make it PC164 */
1091 break;
1092 case ST_DEC_ALCOR:
1093 if (member < ARRAY_SIZE(alcor_indices))
1094 *variation_name = alcor_names[alcor_indices[member]];
1095 break;
1096 case ST_DEC_EB64P:
1097 if (member < ARRAY_SIZE(eb64p_indices))
1098 *variation_name = eb64p_names[eb64p_indices[member]];
1099 break;
1100 case ST_DEC_EB66:
1101 if (member < ARRAY_SIZE(eb66_indices))
1102 *variation_name = eb66_names[eb66_indices[member]];
1103 break;
1104 case ST_DEC_MARVEL:
1105 if (member < ARRAY_SIZE(marvel_indices))
1106 *variation_name = marvel_names[marvel_indices[member]];
1107 break;
1108 case ST_DEC_RAWHIDE:
1109 if (member < ARRAY_SIZE(rawhide_indices))
1110 *variation_name = rawhide_names[rawhide_indices[member]];
1111 break;
1112 case ST_DEC_TITAN:
1113 *variation_name = titan_names[0]; /* default */
1114 if (member < ARRAY_SIZE(titan_indices))
1115 *variation_name = titan_names[titan_indices[member]];
1116 break;
1117 case ST_DEC_TSUNAMI:
1118 if (member < ARRAY_SIZE(tsunami_indices))
1119 *variation_name = tsunami_names[tsunami_indices[member]];
1120 break;
1121 }
1122}
1123
1124/*
1125 * A change was made to the HWRPB via an ECO and the following code
1126 * tracks a part of the ECO. In HWRPB versions less than 5, the ECO
1127 * was not implemented in the console firmware. If it's revision 5 or
1128 * greater we can get the name of the platform as an ASCII string from
1129 * the HWRPB. That's what this function does. It checks the revision
1130 * level and if the string is in the HWRPB it returns the address of
1131 * the string--a pointer to the name of the platform.
1132 *
1133 * Returns:
1134 * - Pointer to a ASCII string if it's in the HWRPB
1135 * - Pointer to a blank string if the data is not in the HWRPB.
1136 */
1137
1138static char *
1139platform_string(void)
1140{
1141 struct dsr_struct *dsr;
1142 static char unk_system_string[] = "N/A";
1143
1144 /* Go to the console for the string pointer.
1145 * If the rpb_vers is not 5 or greater the rpb
1146 * is old and does not have this data in it.
1147 */
1148 if (hwrpb->revision < 5)
1149 return (unk_system_string);
1150 else {
1151 /* The Dynamic System Recognition struct
1152 * has the system platform name starting
1153 * after the character count of the string.
1154 */
1155 dsr = ((struct dsr_struct *)
1156 ((char *)hwrpb + hwrpb->dsr_offset));
1157 return ((char *)dsr + (dsr->sysname_off +
1158 sizeof(long)));
1159 }
1160}
1161
1162static int
1163get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
1164{
1165 struct percpu_struct *cpu;
1166 unsigned long i;
1167 int count = 0;
1168
1169 for (i = 0; i < num; i++) {
1170 cpu = (struct percpu_struct *)
1171 ((char *)cpubase + i*hwrpb->processor_size);
1172 if ((cpu->flags & 0x1cc) == 0x1cc)
1173 count++;
1174 }
1175 return count;
1176}
1177
1178static void
1179show_cache_size (struct seq_file *f, const char *which, int shape)
1180{
1181 if (shape == -1)
1182 seq_printf (f, "%s\t\t: n/a\n", which);
1183 else if (shape == 0)
1184 seq_printf (f, "%s\t\t: unknown\n", which);
1185 else
1186 seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
1187 which, shape >> 10, shape & 15,
1188 1 << ((shape >> 4) & 15));
1189}
1190
1191static int
1192show_cpuinfo(struct seq_file *f, void *slot)
1193{
1194 extern struct unaligned_stat {
1195 unsigned long count, va, pc;
1196 } unaligned[2];
1197
1198 static char cpu_names[][8] = {
1199 "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
1200 "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
1201 "EV68CX", "EV7", "EV79", "EV69"
1202 };
1203
1204 struct percpu_struct *cpu = slot;
1205 unsigned int cpu_index;
1206 char *cpu_name;
1207 char *systype_name;
1208 char *sysvariation_name;
1209 int nr_processors;
1210
1211 cpu_index = (unsigned) (cpu->type - 1);
1212 cpu_name = "Unknown";
1213 if (cpu_index < ARRAY_SIZE(cpu_names))
1214 cpu_name = cpu_names[cpu_index];
1215
1216 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
1217 cpu->type, &systype_name, &sysvariation_name);
1218
1219 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
1220
1221 seq_printf(f, "cpu\t\t\t: Alpha\n"
1222 "cpu model\t\t: %s\n"
1223 "cpu variation\t\t: %ld\n"
1224 "cpu revision\t\t: %ld\n"
1225 "cpu serial number\t: %s\n"
1226 "system type\t\t: %s\n"
1227 "system variation\t: %s\n"
1228 "system revision\t\t: %ld\n"
1229 "system serial number\t: %s\n"
1230 "cycle frequency [Hz]\t: %lu %s\n"
1231 "timer frequency [Hz]\t: %lu.%02lu\n"
1232 "page size [bytes]\t: %ld\n"
1233 "phys. address bits\t: %ld\n"
1234 "max. addr. space #\t: %ld\n"
1235 "BogoMIPS\t\t: %lu.%02lu\n"
1236 "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1237 "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1238 "platform string\t\t: %s\n"
1239 "cpus detected\t\t: %d\n",
1240 cpu_name, cpu->variation, cpu->revision,
1241 (char*)cpu->serial_no,
1242 systype_name, sysvariation_name, hwrpb->sys_revision,
1243 (char*)hwrpb->ssn,
1244 est_cycle_freq ? : hwrpb->cycle_freq,
1245 est_cycle_freq ? "est." : "",
1246 hwrpb->intr_freq / 4096,
1247 (100 * hwrpb->intr_freq / 4096) % 100,
1248 hwrpb->pagesize,
1249 hwrpb->pa_bits,
1250 hwrpb->max_asn,
1251 loops_per_jiffy / (500000/HZ),
1252 (loops_per_jiffy / (5000/HZ)) % 100,
1253 unaligned[0].count, unaligned[0].pc, unaligned[0].va,
1254 unaligned[1].count, unaligned[1].pc, unaligned[1].va,
1255 platform_string(), nr_processors);
1256
1257#ifdef CONFIG_SMP
1258 seq_printf(f, "cpus active\t\t: %u\n"
1259 "cpu active mask\t\t: %016lx\n",
1260 num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]);
1261#endif
1262
1263 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
1264 show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
1265 show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
1266 show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
1267
1268 return 0;
1269}
1270
1271static int __init
1272read_mem_block(int *addr, int stride, int size)
1273{
1274 long nloads = size / stride, cnt, tmp;
1275
1276 __asm__ __volatile__(
1277 " rpcc %0\n"
1278 "1: ldl %3,0(%2)\n"
1279 " subq %1,1,%1\n"
1280 /* Next two XORs introduce an explicit data dependency between
1281 consecutive loads in the loop, which will give us true load
1282 latency. */
1283 " xor %3,%2,%2\n"
1284 " xor %3,%2,%2\n"
1285 " addq %2,%4,%2\n"
1286 " bne %1,1b\n"
1287 " rpcc %3\n"
1288 " subl %3,%0,%0\n"
1289 : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
1290 : "r" (stride), "1" (nloads), "2" (addr));
1291
1292 return cnt / (size / stride);
1293}
1294
1295#define CSHAPE(totalsize, linesize, assoc) \
1296 ((totalsize & ~0xff) | (linesize << 4) | assoc)
1297
1298/* ??? EV5 supports up to 64M, but did the systems with more than
1299 16M of BCACHE ever exist? */
1300#define MAX_BCACHE_SIZE 16*1024*1024
1301
1302/* Note that the offchip caches are direct mapped on all Alphas. */
1303static int __init
1304external_cache_probe(int minsize, int width)
1305{
1306 int cycles, prev_cycles = 1000000;
1307 int stride = 1 << width;
1308 long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
1309
1310 if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
1311 maxsize = 1 << (ilog2(max_low_pfn + 1) + PAGE_SHIFT);
1312
1313 /* Get the first block cached. */
1314 read_mem_block(__va(0), stride, size);
1315
1316 while (size < maxsize) {
1317 /* Get an average load latency in cycles. */
1318 cycles = read_mem_block(__va(0), stride, size);
1319 if (cycles > prev_cycles * 2) {
1320 /* Fine, we exceed the cache. */
1321 printk("%ldK Bcache detected; load hit latency %d "
1322 "cycles, load miss latency %d cycles\n",
1323 size >> 11, prev_cycles, cycles);
1324 return CSHAPE(size >> 1, width, 1);
1325 }
1326 /* Try to get the next block cached. */
1327 read_mem_block(__va(size), stride, size);
1328 prev_cycles = cycles;
1329 size <<= 1;
1330 }
1331 return -1; /* No BCACHE found. */
1332}
1333
1334static void __init
1335determine_cpu_caches (unsigned int cpu_type)
1336{
1337 int L1I, L1D, L2, L3;
1338
1339 switch (cpu_type) {
1340 case EV4_CPU:
1341 case EV45_CPU:
1342 {
1343 if (cpu_type == EV4_CPU)
1344 L1I = CSHAPE(8*1024, 5, 1);
1345 else
1346 L1I = CSHAPE(16*1024, 5, 1);
1347 L1D = L1I;
1348 L3 = -1;
1349
1350 /* BIU_CTL is a write-only Abox register. PALcode has a
1351 shadow copy, and may be available from some versions
1352 of the CSERVE PALcall. If we can get it, then
1353
1354 unsigned long biu_ctl, size;
1355 size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
1356 L2 = CSHAPE (size, 5, 1);
1357
1358 Unfortunately, we can't rely on that.
1359 */
1360 L2 = external_cache_probe(128*1024, 5);
1361 break;
1362 }
1363
1364 case LCA4_CPU:
1365 {
1366 unsigned long car, size;
1367
1368 L1I = L1D = CSHAPE(8*1024, 5, 1);
1369 L3 = -1;
1370
1371 car = *(vuip) phys_to_virt (0x120000078UL);
1372 size = 64*1024 * (1 << ((car >> 5) & 7));
1373 /* No typo -- 8 byte cacheline size. Whodathunk. */
1374 L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
1375 break;
1376 }
1377
1378 case EV5_CPU:
1379 case EV56_CPU:
1380 {
1381 unsigned long sc_ctl, width;
1382
1383 L1I = L1D = CSHAPE(8*1024, 5, 1);
1384
1385 /* Check the line size of the Scache. */
1386 sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
1387 width = sc_ctl & 0x1000 ? 6 : 5;
1388 L2 = CSHAPE (96*1024, width, 3);
1389
1390 /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode
1391 has a shadow copy, and may be available from some versions
1392 of the CSERVE PALcall. If we can get it, then
1393
1394 unsigned long bc_control, bc_config, size;
1395 size = 1024*1024 * (1 << ((bc_config & 7) - 1));
1396 L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
1397
1398 Unfortunately, we can't rely on that.
1399 */
1400 L3 = external_cache_probe(1024*1024, width);
1401 break;
1402 }
1403
1404 case PCA56_CPU:
1405 case PCA57_CPU:
1406 {
1407 if (cpu_type == PCA56_CPU) {
1408 L1I = CSHAPE(16*1024, 6, 1);
1409 L1D = CSHAPE(8*1024, 5, 1);
1410 } else {
1411 L1I = CSHAPE(32*1024, 6, 2);
1412 L1D = CSHAPE(16*1024, 5, 1);
1413 }
1414 L3 = -1;
1415
1416#if 0
1417 unsigned long cbox_config, size;
1418
1419 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1420 size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1421
1422 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1423#else
1424 L2 = external_cache_probe(512*1024, 6);
1425#endif
1426 break;
1427 }
1428
1429 case EV6_CPU:
1430 case EV67_CPU:
1431 case EV68CB_CPU:
1432 case EV68AL_CPU:
1433 case EV68CX_CPU:
1434 case EV69_CPU:
1435 L1I = L1D = CSHAPE(64*1024, 6, 2);
1436 L2 = external_cache_probe(1024*1024, 6);
1437 L3 = -1;
1438 break;
1439
1440 case EV7_CPU:
1441 case EV79_CPU:
1442 L1I = L1D = CSHAPE(64*1024, 6, 2);
1443 L2 = CSHAPE(7*1024*1024/4, 6, 7);
1444 L3 = -1;
1445 break;
1446
1447 default:
1448 /* Nothing known about this cpu type. */
1449 L1I = L1D = L2 = L3 = 0;
1450 break;
1451 }
1452
1453 alpha_l1i_cacheshape = L1I;
1454 alpha_l1d_cacheshape = L1D;
1455 alpha_l2_cacheshape = L2;
1456 alpha_l3_cacheshape = L3;
1457}
1458
1459/*
1460 * We show only CPU #0 info.
1461 */
1462static void *
1463c_start(struct seq_file *f, loff_t *pos)
1464{
1465 return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
1466}
1467
1468static void *
1469c_next(struct seq_file *f, void *v, loff_t *pos)
1470{
1471 return NULL;
1472}
1473
1474static void
1475c_stop(struct seq_file *f, void *v)
1476{
1477}
1478
1479const struct seq_operations cpuinfo_op = {
1480 .start = c_start,
1481 .next = c_next,
1482 .stop = c_stop,
1483 .show = show_cpuinfo,
1484};
1485
1486
1487static int
1488alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1489{
1490#if 1
1491 /* FIXME FIXME FIXME */
1492 /* If we are using SRM and serial console, just hard halt here. */
1493 if (alpha_using_srm && srmcons_output)
1494 __halt();
1495#endif
1496 return NOTIFY_DONE;
1497}
1498
1499static __init int add_pcspkr(void)
1500{
1501 struct platform_device *pd;
1502 int ret;
1503
1504 pd = platform_device_alloc("pcspkr", -1);
1505 if (!pd)
1506 return -ENOMEM;
1507
1508 ret = platform_device_add(pd);
1509 if (ret)
1510 platform_device_put(pd);
1511
1512 return ret;
1513}
1514device_initcall(add_pcspkr);
1/*
2 * linux/arch/alpha/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 */
6
7/* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
8
9/*
10 * Bootup setup stuff.
11 */
12
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/stddef.h>
17#include <linux/unistd.h>
18#include <linux/ptrace.h>
19#include <linux/slab.h>
20#include <linux/user.h>
21#include <linux/screen_info.h>
22#include <linux/delay.h>
23#include <linux/mc146818rtc.h>
24#include <linux/console.h>
25#include <linux/cpu.h>
26#include <linux/errno.h>
27#include <linux/init.h>
28#include <linux/string.h>
29#include <linux/ioport.h>
30#include <linux/platform_device.h>
31#include <linux/bootmem.h>
32#include <linux/pci.h>
33#include <linux/seq_file.h>
34#include <linux/root_dev.h>
35#include <linux/initrd.h>
36#include <linux/eisa.h>
37#include <linux/pfn.h>
38#ifdef CONFIG_MAGIC_SYSRQ
39#include <linux/sysrq.h>
40#include <linux/reboot.h>
41#endif
42#include <linux/notifier.h>
43#include <asm/setup.h>
44#include <asm/io.h>
45#include <linux/log2.h>
46#include <linux/export.h>
47
48extern struct atomic_notifier_head panic_notifier_list;
49static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
50static struct notifier_block alpha_panic_block = {
51 alpha_panic_event,
52 NULL,
53 INT_MAX /* try to do it first */
54};
55
56#include <asm/uaccess.h>
57#include <asm/pgtable.h>
58#include <asm/hwrpb.h>
59#include <asm/dma.h>
60#include <asm/mmu_context.h>
61#include <asm/console.h>
62
63#include "proto.h"
64#include "pci_impl.h"
65
66
67struct hwrpb_struct *hwrpb;
68EXPORT_SYMBOL(hwrpb);
69unsigned long srm_hae;
70
71int alpha_l1i_cacheshape;
72int alpha_l1d_cacheshape;
73int alpha_l2_cacheshape;
74int alpha_l3_cacheshape;
75
76#ifdef CONFIG_VERBOSE_MCHECK
77/* 0=minimum, 1=verbose, 2=all */
78/* These can be overridden via the command line, ie "verbose_mcheck=2") */
79unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
80#endif
81
82#ifdef CONFIG_NUMA
83struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
84EXPORT_SYMBOL(node_to_cpumask_map);
85#endif
86
87/* Which processor we booted from. */
88int boot_cpuid;
89
90/*
91 * Using SRM callbacks for initial console output. This works from
92 * setup_arch() time through the end of time_init(), as those places
93 * are under our (Alpha) control.
94
95 * "srmcons" specified in the boot command arguments allows us to
96 * see kernel messages during the period of time before the true
97 * console device is "registered" during console_init().
98 * As of this version (2.5.59), console_init() will call
99 * disable_early_printk() as the last action before initializing
100 * the console drivers. That's the last possible time srmcons can be
101 * unregistered without interfering with console behavior.
102 *
103 * By default, OFF; set it with a bootcommand arg of "srmcons" or
104 * "console=srm". The meaning of these two args is:
105 * "srmcons" - early callback prints
106 * "console=srm" - full callback based console, including early prints
107 */
108int srmcons_output = 0;
109
110/* Enforce a memory size limit; useful for testing. By default, none. */
111unsigned long mem_size_limit = 0;
112
113/* Set AGP GART window size (0 means disabled). */
114unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
115
116#ifdef CONFIG_ALPHA_GENERIC
117struct alpha_machine_vector alpha_mv;
118#endif
119
120#ifndef alpha_using_srm
121int alpha_using_srm;
122EXPORT_SYMBOL(alpha_using_srm);
123#endif
124
125#ifndef alpha_using_qemu
126int alpha_using_qemu;
127#endif
128
129static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
130 unsigned long);
131static struct alpha_machine_vector *get_sysvec_byname(const char *);
132static void get_sysnames(unsigned long, unsigned long, unsigned long,
133 char **, char **);
134static void determine_cpu_caches (unsigned int);
135
136static char __initdata command_line[COMMAND_LINE_SIZE];
137
138/*
139 * The format of "screen_info" is strange, and due to early
140 * i386-setup code. This is just enough to make the console
141 * code think we're on a VGA color display.
142 */
143
144struct screen_info screen_info = {
145 .orig_x = 0,
146 .orig_y = 25,
147 .orig_video_cols = 80,
148 .orig_video_lines = 25,
149 .orig_video_isVGA = 1,
150 .orig_video_points = 16
151};
152
153EXPORT_SYMBOL(screen_info);
154
155/*
156 * The direct map I/O window, if any. This should be the same
157 * for all busses, since it's used by virt_to_bus.
158 */
159
160unsigned long __direct_map_base;
161unsigned long __direct_map_size;
162EXPORT_SYMBOL(__direct_map_base);
163EXPORT_SYMBOL(__direct_map_size);
164
165/*
166 * Declare all of the machine vectors.
167 */
168
169/* GCC 2.7.2 (on alpha at least) is lame. It does not support either
170 __attribute__((weak)) or #pragma weak. Bypass it and talk directly
171 to the assembler. */
172
173#define WEAK(X) \
174 extern struct alpha_machine_vector X; \
175 asm(".weak "#X)
176
177WEAK(alcor_mv);
178WEAK(alphabook1_mv);
179WEAK(avanti_mv);
180WEAK(cabriolet_mv);
181WEAK(clipper_mv);
182WEAK(dp264_mv);
183WEAK(eb164_mv);
184WEAK(eb64p_mv);
185WEAK(eb66_mv);
186WEAK(eb66p_mv);
187WEAK(eiger_mv);
188WEAK(jensen_mv);
189WEAK(lx164_mv);
190WEAK(lynx_mv);
191WEAK(marvel_ev7_mv);
192WEAK(miata_mv);
193WEAK(mikasa_mv);
194WEAK(mikasa_primo_mv);
195WEAK(monet_mv);
196WEAK(nautilus_mv);
197WEAK(noname_mv);
198WEAK(noritake_mv);
199WEAK(noritake_primo_mv);
200WEAK(p2k_mv);
201WEAK(pc164_mv);
202WEAK(privateer_mv);
203WEAK(rawhide_mv);
204WEAK(ruffian_mv);
205WEAK(rx164_mv);
206WEAK(sable_mv);
207WEAK(sable_gamma_mv);
208WEAK(shark_mv);
209WEAK(sx164_mv);
210WEAK(takara_mv);
211WEAK(titan_mv);
212WEAK(webbrick_mv);
213WEAK(wildfire_mv);
214WEAK(xl_mv);
215WEAK(xlt_mv);
216
217#undef WEAK
218
219/*
220 * I/O resources inherited from PeeCees. Except for perhaps the
221 * turbochannel alphas, everyone has these on some sort of SuperIO chip.
222 *
223 * ??? If this becomes less standard, move the struct out into the
224 * machine vector.
225 */
226
227static void __init
228reserve_std_resources(void)
229{
230 static struct resource standard_io_resources[] = {
231 { .name = "rtc", .start = -1, .end = -1 },
232 { .name = "dma1", .start = 0x00, .end = 0x1f },
233 { .name = "pic1", .start = 0x20, .end = 0x3f },
234 { .name = "timer", .start = 0x40, .end = 0x5f },
235 { .name = "keyboard", .start = 0x60, .end = 0x6f },
236 { .name = "dma page reg", .start = 0x80, .end = 0x8f },
237 { .name = "pic2", .start = 0xa0, .end = 0xbf },
238 { .name = "dma2", .start = 0xc0, .end = 0xdf },
239 };
240
241 struct resource *io = &ioport_resource;
242 size_t i;
243
244 if (hose_head) {
245 struct pci_controller *hose;
246 for (hose = hose_head; hose; hose = hose->next)
247 if (hose->index == 0) {
248 io = hose->io_space;
249 break;
250 }
251 }
252
253 /* Fix up for the Jensen's queer RTC placement. */
254 standard_io_resources[0].start = RTC_PORT(0);
255 standard_io_resources[0].end = RTC_PORT(0) + 0x10;
256
257 for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
258 request_resource(io, standard_io_resources+i);
259}
260
261#define PFN_MAX PFN_DOWN(0x80000000)
262#define for_each_mem_cluster(memdesc, _cluster, i) \
263 for ((_cluster) = (memdesc)->cluster, (i) = 0; \
264 (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
265
266static unsigned long __init
267get_mem_size_limit(char *s)
268{
269 unsigned long end = 0;
270 char *from = s;
271
272 end = simple_strtoul(from, &from, 0);
273 if ( *from == 'K' || *from == 'k' ) {
274 end = end << 10;
275 from++;
276 } else if ( *from == 'M' || *from == 'm' ) {
277 end = end << 20;
278 from++;
279 } else if ( *from == 'G' || *from == 'g' ) {
280 end = end << 30;
281 from++;
282 }
283 return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
284}
285
286#ifdef CONFIG_BLK_DEV_INITRD
287void * __init
288move_initrd(unsigned long mem_limit)
289{
290 void *start;
291 unsigned long size;
292
293 size = initrd_end - initrd_start;
294 start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
295 if (!start || __pa(start) + size > mem_limit) {
296 initrd_start = initrd_end = 0;
297 return NULL;
298 }
299 memmove(start, (void *)initrd_start, size);
300 initrd_start = (unsigned long)start;
301 initrd_end = initrd_start + size;
302 printk("initrd moved to %p\n", start);
303 return start;
304}
305#endif
306
307#ifndef CONFIG_DISCONTIGMEM
308static void __init
309setup_memory(void *kernel_end)
310{
311 struct memclust_struct * cluster;
312 struct memdesc_struct * memdesc;
313 unsigned long start_kernel_pfn, end_kernel_pfn;
314 unsigned long bootmap_size, bootmap_pages, bootmap_start;
315 unsigned long start, end;
316 unsigned long i;
317
318 /* Find free clusters, and init and free the bootmem accordingly. */
319 memdesc = (struct memdesc_struct *)
320 (hwrpb->mddt_offset + (unsigned long) hwrpb);
321
322 for_each_mem_cluster(memdesc, cluster, i) {
323 printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
324 i, cluster->usage, cluster->start_pfn,
325 cluster->start_pfn + cluster->numpages);
326
327 /* Bit 0 is console/PALcode reserved. Bit 1 is
328 non-volatile memory -- we might want to mark
329 this for later. */
330 if (cluster->usage & 3)
331 continue;
332
333 end = cluster->start_pfn + cluster->numpages;
334 if (end > max_low_pfn)
335 max_low_pfn = end;
336 }
337
338 /*
339 * Except for the NUMA systems (wildfire, marvel) all of the
340 * Alpha systems we run on support 32GB of memory or less.
341 * Since the NUMA systems introduce large holes in memory addressing,
342 * we can get into a situation where there is not enough contiguous
343 * memory for the memory map.
344 *
345 * Limit memory to the first 32GB to limit the NUMA systems to
346 * memory on their first node (wildfire) or 2 (marvel) to avoid
347 * not being able to produce the memory map. In order to access
348 * all of the memory on the NUMA systems, build with discontiguous
349 * memory support.
350 *
351 * If the user specified a memory limit, let that memory limit stand.
352 */
353 if (!mem_size_limit)
354 mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
355
356 if (mem_size_limit && max_low_pfn >= mem_size_limit)
357 {
358 printk("setup: forcing memory size to %ldK (from %ldK).\n",
359 mem_size_limit << (PAGE_SHIFT - 10),
360 max_low_pfn << (PAGE_SHIFT - 10));
361 max_low_pfn = mem_size_limit;
362 }
363
364 /* Find the bounds of kernel memory. */
365 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
366 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
367 bootmap_start = -1;
368
369 try_again:
370 if (max_low_pfn <= end_kernel_pfn)
371 panic("not enough memory to boot");
372
373 /* We need to know how many physically contiguous pages
374 we'll need for the bootmap. */
375 bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
376
377 /* Now find a good region where to allocate the bootmap. */
378 for_each_mem_cluster(memdesc, cluster, i) {
379 if (cluster->usage & 3)
380 continue;
381
382 start = cluster->start_pfn;
383 end = start + cluster->numpages;
384 if (start >= max_low_pfn)
385 continue;
386 if (end > max_low_pfn)
387 end = max_low_pfn;
388 if (start < start_kernel_pfn) {
389 if (end > end_kernel_pfn
390 && end - end_kernel_pfn >= bootmap_pages) {
391 bootmap_start = end_kernel_pfn;
392 break;
393 } else if (end > start_kernel_pfn)
394 end = start_kernel_pfn;
395 } else if (start < end_kernel_pfn)
396 start = end_kernel_pfn;
397 if (end - start >= bootmap_pages) {
398 bootmap_start = start;
399 break;
400 }
401 }
402
403 if (bootmap_start == ~0UL) {
404 max_low_pfn >>= 1;
405 goto try_again;
406 }
407
408 /* Allocate the bootmap and mark the whole MM as reserved. */
409 bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
410
411 /* Mark the free regions. */
412 for_each_mem_cluster(memdesc, cluster, i) {
413 if (cluster->usage & 3)
414 continue;
415
416 start = cluster->start_pfn;
417 end = cluster->start_pfn + cluster->numpages;
418 if (start >= max_low_pfn)
419 continue;
420 if (end > max_low_pfn)
421 end = max_low_pfn;
422 if (start < start_kernel_pfn) {
423 if (end > end_kernel_pfn) {
424 free_bootmem(PFN_PHYS(start),
425 (PFN_PHYS(start_kernel_pfn)
426 - PFN_PHYS(start)));
427 printk("freeing pages %ld:%ld\n",
428 start, start_kernel_pfn);
429 start = end_kernel_pfn;
430 } else if (end > start_kernel_pfn)
431 end = start_kernel_pfn;
432 } else if (start < end_kernel_pfn)
433 start = end_kernel_pfn;
434 if (start >= end)
435 continue;
436
437 free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
438 printk("freeing pages %ld:%ld\n", start, end);
439 }
440
441 /* Reserve the bootmap memory. */
442 reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size,
443 BOOTMEM_DEFAULT);
444 printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
445
446#ifdef CONFIG_BLK_DEV_INITRD
447 initrd_start = INITRD_START;
448 if (initrd_start) {
449 initrd_end = initrd_start+INITRD_SIZE;
450 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
451 (void *) initrd_start, INITRD_SIZE);
452
453 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
454 if (!move_initrd(PFN_PHYS(max_low_pfn)))
455 printk("initrd extends beyond end of memory "
456 "(0x%08lx > 0x%p)\ndisabling initrd\n",
457 initrd_end,
458 phys_to_virt(PFN_PHYS(max_low_pfn)));
459 } else {
460 reserve_bootmem(virt_to_phys((void *)initrd_start),
461 INITRD_SIZE, BOOTMEM_DEFAULT);
462 }
463 }
464#endif /* CONFIG_BLK_DEV_INITRD */
465}
466#else
467extern void setup_memory(void *);
468#endif /* !CONFIG_DISCONTIGMEM */
469
470int __init
471page_is_ram(unsigned long pfn)
472{
473 struct memclust_struct * cluster;
474 struct memdesc_struct * memdesc;
475 unsigned long i;
476
477 memdesc = (struct memdesc_struct *)
478 (hwrpb->mddt_offset + (unsigned long) hwrpb);
479 for_each_mem_cluster(memdesc, cluster, i)
480 {
481 if (pfn >= cluster->start_pfn &&
482 pfn < cluster->start_pfn + cluster->numpages) {
483 return (cluster->usage & 3) ? 0 : 1;
484 }
485 }
486
487 return 0;
488}
489
490static int __init
491register_cpus(void)
492{
493 int i;
494
495 for_each_possible_cpu(i) {
496 struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
497 if (!p)
498 return -ENOMEM;
499 register_cpu(p, i);
500 }
501 return 0;
502}
503
504arch_initcall(register_cpus);
505
506void __init
507setup_arch(char **cmdline_p)
508{
509 extern char _end[];
510
511 struct alpha_machine_vector *vec = NULL;
512 struct percpu_struct *cpu;
513 char *type_name, *var_name, *p;
514 void *kernel_end = _end; /* end of kernel */
515 char *args = command_line;
516
517 hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
518 boot_cpuid = hard_smp_processor_id();
519
520 /*
521 * Pre-process the system type to make sure it will be valid.
522 *
523 * This may restore real CABRIO and EB66+ family names, ie
524 * EB64+ and EB66.
525 *
526 * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
527 * and AS1200 (DIGITAL Server 5000 series) have the type as
528 * the negative of the real one.
529 */
530 if ((long)hwrpb->sys_type < 0) {
531 hwrpb->sys_type = -((long)hwrpb->sys_type);
532 hwrpb_update_checksum(hwrpb);
533 }
534
535 /* Register a call for panic conditions. */
536 atomic_notifier_chain_register(&panic_notifier_list,
537 &alpha_panic_block);
538
539#ifndef alpha_using_srm
540 /* Assume that we've booted from SRM if we haven't booted from MILO.
541 Detect the later by looking for "MILO" in the system serial nr. */
542 alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
543#endif
544#ifndef alpha_using_qemu
545 /* Similarly, look for QEMU. */
546 alpha_using_qemu = strstr((const char *)hwrpb->ssn, "QEMU") != 0;
547#endif
548
549 /* If we are using SRM, we want to allow callbacks
550 as early as possible, so do this NOW, and then
551 they should work immediately thereafter.
552 */
553 kernel_end = callback_init(kernel_end);
554
555 /*
556 * Locate the command line.
557 */
558 /* Hack for Jensen... since we're restricted to 8 or 16 chars for
559 boot flags depending on the boot mode, we need some shorthand.
560 This should do for installation. */
561 if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
562 strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
563 } else {
564 strlcpy(command_line, COMMAND_LINE, sizeof command_line);
565 }
566 strcpy(boot_command_line, command_line);
567 *cmdline_p = command_line;
568
569 /*
570 * Process command-line arguments.
571 */
572 while ((p = strsep(&args, " \t")) != NULL) {
573 if (!*p) continue;
574 if (strncmp(p, "alpha_mv=", 9) == 0) {
575 vec = get_sysvec_byname(p+9);
576 continue;
577 }
578 if (strncmp(p, "cycle=", 6) == 0) {
579 est_cycle_freq = simple_strtol(p+6, NULL, 0);
580 continue;
581 }
582 if (strncmp(p, "mem=", 4) == 0) {
583 mem_size_limit = get_mem_size_limit(p+4);
584 continue;
585 }
586 if (strncmp(p, "srmcons", 7) == 0) {
587 srmcons_output |= 1;
588 continue;
589 }
590 if (strncmp(p, "console=srm", 11) == 0) {
591 srmcons_output |= 2;
592 continue;
593 }
594 if (strncmp(p, "gartsize=", 9) == 0) {
595 alpha_agpgart_size =
596 get_mem_size_limit(p+9) << PAGE_SHIFT;
597 continue;
598 }
599#ifdef CONFIG_VERBOSE_MCHECK
600 if (strncmp(p, "verbose_mcheck=", 15) == 0) {
601 alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
602 continue;
603 }
604#endif
605 }
606
607 /* Replace the command line, now that we've killed it with strsep. */
608 strcpy(command_line, boot_command_line);
609
610 /* If we want SRM console printk echoing early, do it now. */
611 if (alpha_using_srm && srmcons_output) {
612 register_srm_console();
613
614 /*
615 * If "console=srm" was specified, clear the srmcons_output
616 * flag now so that time.c won't unregister_srm_console
617 */
618 if (srmcons_output & 2)
619 srmcons_output = 0;
620 }
621
622#ifdef CONFIG_MAGIC_SYSRQ
623 /* If we're using SRM, make sysrq-b halt back to the prom,
624 not auto-reboot. */
625 if (alpha_using_srm) {
626 struct sysrq_key_op *op = __sysrq_get_key_op('b');
627 op->handler = (void *) machine_halt;
628 }
629#endif
630
631 /*
632 * Identify and reconfigure for the current system.
633 */
634 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
635
636 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
637 cpu->type, &type_name, &var_name);
638 if (*var_name == '0')
639 var_name = "";
640
641 if (!vec) {
642 vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
643 cpu->type);
644 }
645
646 if (!vec) {
647 panic("Unsupported system type: %s%s%s (%ld %ld)\n",
648 type_name, (*var_name ? " variation " : ""), var_name,
649 hwrpb->sys_type, hwrpb->sys_variation);
650 }
651 if (vec != &alpha_mv) {
652 alpha_mv = *vec;
653 }
654
655 printk("Booting "
656#ifdef CONFIG_ALPHA_GENERIC
657 "GENERIC "
658#endif
659 "on %s%s%s using machine vector %s from %s\n",
660 type_name, (*var_name ? " variation " : ""),
661 var_name, alpha_mv.vector_name,
662 (alpha_using_srm ? "SRM" : "MILO"));
663
664 printk("Major Options: "
665#ifdef CONFIG_SMP
666 "SMP "
667#endif
668#ifdef CONFIG_ALPHA_EV56
669 "EV56 "
670#endif
671#ifdef CONFIG_ALPHA_EV67
672 "EV67 "
673#endif
674#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
675 "LEGACY_START "
676#endif
677#ifdef CONFIG_VERBOSE_MCHECK
678 "VERBOSE_MCHECK "
679#endif
680
681#ifdef CONFIG_DISCONTIGMEM
682 "DISCONTIGMEM "
683#ifdef CONFIG_NUMA
684 "NUMA "
685#endif
686#endif
687
688#ifdef CONFIG_DEBUG_SPINLOCK
689 "DEBUG_SPINLOCK "
690#endif
691#ifdef CONFIG_MAGIC_SYSRQ
692 "MAGIC_SYSRQ "
693#endif
694 "\n");
695
696 printk("Command line: %s\n", command_line);
697
698 /*
699 * Sync up the HAE.
700 * Save the SRM's current value for restoration.
701 */
702 srm_hae = *alpha_mv.hae_register;
703 __set_hae(alpha_mv.hae_cache);
704
705 /* Reset enable correctable error reports. */
706 wrmces(0x7);
707
708 /* Find our memory. */
709 setup_memory(kernel_end);
710
711 /* First guess at cpu cache sizes. Do this before init_arch. */
712 determine_cpu_caches(cpu->type);
713
714 /* Initialize the machine. Usually has to do with setting up
715 DMA windows and the like. */
716 if (alpha_mv.init_arch)
717 alpha_mv.init_arch();
718
719 /* Reserve standard resources. */
720 reserve_std_resources();
721
722 /*
723 * Give us a default console. TGA users will see nothing until
724 * chr_dev_init is called, rather late in the boot sequence.
725 */
726
727#ifdef CONFIG_VT
728#if defined(CONFIG_VGA_CONSOLE)
729 conswitchp = &vga_con;
730#elif defined(CONFIG_DUMMY_CONSOLE)
731 conswitchp = &dummy_con;
732#endif
733#endif
734
735 /* Default root filesystem to sda2. */
736 ROOT_DEV = Root_SDA2;
737
738#ifdef CONFIG_EISA
739 /* FIXME: only set this when we actually have EISA in this box? */
740 EISA_bus = 1;
741#endif
742
743 /*
744 * Check ASN in HWRPB for validity, report if bad.
745 * FIXME: how was this failing? Should we trust it instead,
746 * and copy the value into alpha_mv.max_asn?
747 */
748
749 if (hwrpb->max_asn != MAX_ASN) {
750 printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
751 }
752
753 /*
754 * Identify the flock of penguins.
755 */
756
757#ifdef CONFIG_SMP
758 setup_smp();
759#endif
760 paging_init();
761}
762
763static char sys_unknown[] = "Unknown";
764static char systype_names[][16] = {
765 "0",
766 "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
767 "Pelican", "Morgan", "Sable", "Medulla", "Noname",
768 "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
769 "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
770 "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
771 "Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
772 "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
773};
774
775static char unofficial_names[][8] = {"100", "Ruffian"};
776
777static char api_names[][16] = {"200", "Nautilus"};
778
779static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
780static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
781
782static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
783static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
784
785static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
786static int eb64p_indices[] = {0,0,1,2};
787
788static char eb66_names[][8] = {"EB66", "EB66+"};
789static int eb66_indices[] = {0,0,1};
790
791static char marvel_names[][16] = {
792 "Marvel/EV7"
793};
794static int marvel_indices[] = { 0 };
795
796static char rawhide_names[][16] = {
797 "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
798};
799static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
800
801static char titan_names[][16] = {
802 "DEFAULT", "Privateer", "Falcon", "Granite"
803};
804static int titan_indices[] = {0,1,2,2,3};
805
806static char tsunami_names[][16] = {
807 "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
808 "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
809 "Flying Clipper", "Shark"
810};
811static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
812
813static struct alpha_machine_vector * __init
814get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
815{
816 static struct alpha_machine_vector *systype_vecs[] __initdata =
817 {
818 NULL, /* 0 */
819 NULL, /* ADU */
820 NULL, /* Cobra */
821 NULL, /* Ruby */
822 NULL, /* Flamingo */
823 NULL, /* Mannequin */
824 &jensen_mv,
825 NULL, /* Pelican */
826 NULL, /* Morgan */
827 NULL, /* Sable -- see below. */
828 NULL, /* Medulla */
829 &noname_mv,
830 NULL, /* Turbolaser */
831 &avanti_mv,
832 NULL, /* Mustang */
833 NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
834 NULL, /* Tradewind */
835 NULL, /* Mikasa -- see below. */
836 NULL, /* EB64 */
837 NULL, /* EB66 -- see variation. */
838 NULL, /* EB64+ -- see variation. */
839 &alphabook1_mv,
840 &rawhide_mv,
841 NULL, /* K2 */
842 &lynx_mv, /* Lynx */
843 &xl_mv,
844 NULL, /* EB164 -- see variation. */
845 NULL, /* Noritake -- see below. */
846 NULL, /* Cortex */
847 NULL, /* 29 */
848 &miata_mv,
849 NULL, /* XXM */
850 &takara_mv,
851 NULL, /* Yukon */
852 NULL, /* Tsunami -- see variation. */
853 &wildfire_mv, /* Wildfire */
854 NULL, /* CUSCO */
855 &eiger_mv, /* Eiger */
856 NULL, /* Titan */
857 NULL, /* Marvel */
858 };
859
860 static struct alpha_machine_vector *unofficial_vecs[] __initdata =
861 {
862 NULL, /* 100 */
863 &ruffian_mv,
864 };
865
866 static struct alpha_machine_vector *api_vecs[] __initdata =
867 {
868 NULL, /* 200 */
869 &nautilus_mv,
870 };
871
872 static struct alpha_machine_vector *alcor_vecs[] __initdata =
873 {
874 &alcor_mv, &xlt_mv, &xlt_mv
875 };
876
877 static struct alpha_machine_vector *eb164_vecs[] __initdata =
878 {
879 &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
880 };
881
882 static struct alpha_machine_vector *eb64p_vecs[] __initdata =
883 {
884 &eb64p_mv,
885 &cabriolet_mv,
886 &cabriolet_mv /* AlphaPCI64 */
887 };
888
889 static struct alpha_machine_vector *eb66_vecs[] __initdata =
890 {
891 &eb66_mv,
892 &eb66p_mv
893 };
894
895 static struct alpha_machine_vector *marvel_vecs[] __initdata =
896 {
897 &marvel_ev7_mv,
898 };
899
900 static struct alpha_machine_vector *titan_vecs[] __initdata =
901 {
902 &titan_mv, /* default */
903 &privateer_mv, /* privateer */
904 &titan_mv, /* falcon */
905 &privateer_mv, /* granite */
906 };
907
908 static struct alpha_machine_vector *tsunami_vecs[] __initdata =
909 {
910 NULL,
911 &dp264_mv, /* dp264 */
912 &dp264_mv, /* warhol */
913 &dp264_mv, /* windjammer */
914 &monet_mv, /* monet */
915 &clipper_mv, /* clipper */
916 &dp264_mv, /* goldrush */
917 &webbrick_mv, /* webbrick */
918 &dp264_mv, /* catamaran */
919 NULL, /* brisbane? */
920 NULL, /* melbourne? */
921 NULL, /* flying clipper? */
922 &shark_mv, /* shark */
923 };
924
925 /* ??? Do we need to distinguish between Rawhides? */
926
927 struct alpha_machine_vector *vec;
928
929 /* Search the system tables first... */
930 vec = NULL;
931 if (type < ARRAY_SIZE(systype_vecs)) {
932 vec = systype_vecs[type];
933 } else if ((type > ST_API_BIAS) &&
934 (type - ST_API_BIAS) < ARRAY_SIZE(api_vecs)) {
935 vec = api_vecs[type - ST_API_BIAS];
936 } else if ((type > ST_UNOFFICIAL_BIAS) &&
937 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_vecs)) {
938 vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
939 }
940
941 /* If we've not found one, try for a variation. */
942
943 if (!vec) {
944 /* Member ID is a bit-field. */
945 unsigned long member = (variation >> 10) & 0x3f;
946
947 cpu &= 0xffffffff; /* make it usable */
948
949 switch (type) {
950 case ST_DEC_ALCOR:
951 if (member < ARRAY_SIZE(alcor_indices))
952 vec = alcor_vecs[alcor_indices[member]];
953 break;
954 case ST_DEC_EB164:
955 if (member < ARRAY_SIZE(eb164_indices))
956 vec = eb164_vecs[eb164_indices[member]];
957 /* PC164 may show as EB164 variation with EV56 CPU,
958 but, since no true EB164 had anything but EV5... */
959 if (vec == &eb164_mv && cpu == EV56_CPU)
960 vec = &pc164_mv;
961 break;
962 case ST_DEC_EB64P:
963 if (member < ARRAY_SIZE(eb64p_indices))
964 vec = eb64p_vecs[eb64p_indices[member]];
965 break;
966 case ST_DEC_EB66:
967 if (member < ARRAY_SIZE(eb66_indices))
968 vec = eb66_vecs[eb66_indices[member]];
969 break;
970 case ST_DEC_MARVEL:
971 if (member < ARRAY_SIZE(marvel_indices))
972 vec = marvel_vecs[marvel_indices[member]];
973 break;
974 case ST_DEC_TITAN:
975 vec = titan_vecs[0]; /* default */
976 if (member < ARRAY_SIZE(titan_indices))
977 vec = titan_vecs[titan_indices[member]];
978 break;
979 case ST_DEC_TSUNAMI:
980 if (member < ARRAY_SIZE(tsunami_indices))
981 vec = tsunami_vecs[tsunami_indices[member]];
982 break;
983 case ST_DEC_1000:
984 if (cpu == EV5_CPU || cpu == EV56_CPU)
985 vec = &mikasa_primo_mv;
986 else
987 vec = &mikasa_mv;
988 break;
989 case ST_DEC_NORITAKE:
990 if (cpu == EV5_CPU || cpu == EV56_CPU)
991 vec = &noritake_primo_mv;
992 else
993 vec = &noritake_mv;
994 break;
995 case ST_DEC_2100_A500:
996 if (cpu == EV5_CPU || cpu == EV56_CPU)
997 vec = &sable_gamma_mv;
998 else
999 vec = &sable_mv;
1000 break;
1001 }
1002 }
1003 return vec;
1004}
1005
1006static struct alpha_machine_vector * __init
1007get_sysvec_byname(const char *name)
1008{
1009 static struct alpha_machine_vector *all_vecs[] __initdata =
1010 {
1011 &alcor_mv,
1012 &alphabook1_mv,
1013 &avanti_mv,
1014 &cabriolet_mv,
1015 &clipper_mv,
1016 &dp264_mv,
1017 &eb164_mv,
1018 &eb64p_mv,
1019 &eb66_mv,
1020 &eb66p_mv,
1021 &eiger_mv,
1022 &jensen_mv,
1023 &lx164_mv,
1024 &lynx_mv,
1025 &miata_mv,
1026 &mikasa_mv,
1027 &mikasa_primo_mv,
1028 &monet_mv,
1029 &nautilus_mv,
1030 &noname_mv,
1031 &noritake_mv,
1032 &noritake_primo_mv,
1033 &p2k_mv,
1034 &pc164_mv,
1035 &privateer_mv,
1036 &rawhide_mv,
1037 &ruffian_mv,
1038 &rx164_mv,
1039 &sable_mv,
1040 &sable_gamma_mv,
1041 &shark_mv,
1042 &sx164_mv,
1043 &takara_mv,
1044 &webbrick_mv,
1045 &wildfire_mv,
1046 &xl_mv,
1047 &xlt_mv
1048 };
1049
1050 size_t i;
1051
1052 for (i = 0; i < ARRAY_SIZE(all_vecs); ++i) {
1053 struct alpha_machine_vector *mv = all_vecs[i];
1054 if (strcasecmp(mv->vector_name, name) == 0)
1055 return mv;
1056 }
1057 return NULL;
1058}
1059
1060static void
1061get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
1062 char **type_name, char **variation_name)
1063{
1064 unsigned long member;
1065
1066 /* If not in the tables, make it UNKNOWN,
1067 else set type name to family */
1068 if (type < ARRAY_SIZE(systype_names)) {
1069 *type_name = systype_names[type];
1070 } else if ((type > ST_API_BIAS) &&
1071 (type - ST_API_BIAS) < ARRAY_SIZE(api_names)) {
1072 *type_name = api_names[type - ST_API_BIAS];
1073 } else if ((type > ST_UNOFFICIAL_BIAS) &&
1074 (type - ST_UNOFFICIAL_BIAS) < ARRAY_SIZE(unofficial_names)) {
1075 *type_name = unofficial_names[type - ST_UNOFFICIAL_BIAS];
1076 } else {
1077 *type_name = sys_unknown;
1078 *variation_name = sys_unknown;
1079 return;
1080 }
1081
1082 /* Set variation to "0"; if variation is zero, done. */
1083 *variation_name = systype_names[0];
1084 if (variation == 0) {
1085 return;
1086 }
1087
1088 member = (variation >> 10) & 0x3f; /* member ID is a bit-field */
1089
1090 cpu &= 0xffffffff; /* make it usable */
1091
1092 switch (type) { /* select by family */
1093 default: /* default to variation "0" for now */
1094 break;
1095 case ST_DEC_EB164:
1096 if (member < ARRAY_SIZE(eb164_indices))
1097 *variation_name = eb164_names[eb164_indices[member]];
1098 /* PC164 may show as EB164 variation, but with EV56 CPU,
1099 so, since no true EB164 had anything but EV5... */
1100 if (eb164_indices[member] == 0 && cpu == EV56_CPU)
1101 *variation_name = eb164_names[1]; /* make it PC164 */
1102 break;
1103 case ST_DEC_ALCOR:
1104 if (member < ARRAY_SIZE(alcor_indices))
1105 *variation_name = alcor_names[alcor_indices[member]];
1106 break;
1107 case ST_DEC_EB64P:
1108 if (member < ARRAY_SIZE(eb64p_indices))
1109 *variation_name = eb64p_names[eb64p_indices[member]];
1110 break;
1111 case ST_DEC_EB66:
1112 if (member < ARRAY_SIZE(eb66_indices))
1113 *variation_name = eb66_names[eb66_indices[member]];
1114 break;
1115 case ST_DEC_MARVEL:
1116 if (member < ARRAY_SIZE(marvel_indices))
1117 *variation_name = marvel_names[marvel_indices[member]];
1118 break;
1119 case ST_DEC_RAWHIDE:
1120 if (member < ARRAY_SIZE(rawhide_indices))
1121 *variation_name = rawhide_names[rawhide_indices[member]];
1122 break;
1123 case ST_DEC_TITAN:
1124 *variation_name = titan_names[0]; /* default */
1125 if (member < ARRAY_SIZE(titan_indices))
1126 *variation_name = titan_names[titan_indices[member]];
1127 break;
1128 case ST_DEC_TSUNAMI:
1129 if (member < ARRAY_SIZE(tsunami_indices))
1130 *variation_name = tsunami_names[tsunami_indices[member]];
1131 break;
1132 }
1133}
1134
1135/*
1136 * A change was made to the HWRPB via an ECO and the following code
1137 * tracks a part of the ECO. In HWRPB versions less than 5, the ECO
1138 * was not implemented in the console firmware. If it's revision 5 or
1139 * greater we can get the name of the platform as an ASCII string from
1140 * the HWRPB. That's what this function does. It checks the revision
1141 * level and if the string is in the HWRPB it returns the address of
1142 * the string--a pointer to the name of the platform.
1143 *
1144 * Returns:
1145 * - Pointer to a ASCII string if it's in the HWRPB
1146 * - Pointer to a blank string if the data is not in the HWRPB.
1147 */
1148
1149static char *
1150platform_string(void)
1151{
1152 struct dsr_struct *dsr;
1153 static char unk_system_string[] = "N/A";
1154
1155 /* Go to the console for the string pointer.
1156 * If the rpb_vers is not 5 or greater the rpb
1157 * is old and does not have this data in it.
1158 */
1159 if (hwrpb->revision < 5)
1160 return (unk_system_string);
1161 else {
1162 /* The Dynamic System Recognition struct
1163 * has the system platform name starting
1164 * after the character count of the string.
1165 */
1166 dsr = ((struct dsr_struct *)
1167 ((char *)hwrpb + hwrpb->dsr_offset));
1168 return ((char *)dsr + (dsr->sysname_off +
1169 sizeof(long)));
1170 }
1171}
1172
1173static int
1174get_nr_processors(struct percpu_struct *cpubase, unsigned long num)
1175{
1176 struct percpu_struct *cpu;
1177 unsigned long i;
1178 int count = 0;
1179
1180 for (i = 0; i < num; i++) {
1181 cpu = (struct percpu_struct *)
1182 ((char *)cpubase + i*hwrpb->processor_size);
1183 if ((cpu->flags & 0x1cc) == 0x1cc)
1184 count++;
1185 }
1186 return count;
1187}
1188
1189static void
1190show_cache_size (struct seq_file *f, const char *which, int shape)
1191{
1192 if (shape == -1)
1193 seq_printf (f, "%s\t\t: n/a\n", which);
1194 else if (shape == 0)
1195 seq_printf (f, "%s\t\t: unknown\n", which);
1196 else
1197 seq_printf (f, "%s\t\t: %dK, %d-way, %db line\n",
1198 which, shape >> 10, shape & 15,
1199 1 << ((shape >> 4) & 15));
1200}
1201
1202static int
1203show_cpuinfo(struct seq_file *f, void *slot)
1204{
1205 extern struct unaligned_stat {
1206 unsigned long count, va, pc;
1207 } unaligned[2];
1208
1209 static char cpu_names[][8] = {
1210 "EV3", "EV4", "Simulate", "LCA4", "EV5", "EV45", "EV56",
1211 "EV6", "PCA56", "PCA57", "EV67", "EV68CB", "EV68AL",
1212 "EV68CX", "EV7", "EV79", "EV69"
1213 };
1214
1215 struct percpu_struct *cpu = slot;
1216 unsigned int cpu_index;
1217 char *cpu_name;
1218 char *systype_name;
1219 char *sysvariation_name;
1220 int nr_processors;
1221 unsigned long timer_freq;
1222
1223 cpu_index = (unsigned) (cpu->type - 1);
1224 cpu_name = "Unknown";
1225 if (cpu_index < ARRAY_SIZE(cpu_names))
1226 cpu_name = cpu_names[cpu_index];
1227
1228 get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
1229 cpu->type, &systype_name, &sysvariation_name);
1230
1231 nr_processors = get_nr_processors(cpu, hwrpb->nr_processors);
1232
1233#if CONFIG_HZ == 1024 || CONFIG_HZ == 1200
1234 timer_freq = (100UL * hwrpb->intr_freq) / 4096;
1235#else
1236 timer_freq = 100UL * CONFIG_HZ;
1237#endif
1238
1239 seq_printf(f, "cpu\t\t\t: Alpha\n"
1240 "cpu model\t\t: %s\n"
1241 "cpu variation\t\t: %ld\n"
1242 "cpu revision\t\t: %ld\n"
1243 "cpu serial number\t: %s\n"
1244 "system type\t\t: %s\n"
1245 "system variation\t: %s\n"
1246 "system revision\t\t: %ld\n"
1247 "system serial number\t: %s\n"
1248 "cycle frequency [Hz]\t: %lu %s\n"
1249 "timer frequency [Hz]\t: %lu.%02lu\n"
1250 "page size [bytes]\t: %ld\n"
1251 "phys. address bits\t: %ld\n"
1252 "max. addr. space #\t: %ld\n"
1253 "BogoMIPS\t\t: %lu.%02lu\n"
1254 "kernel unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1255 "user unaligned acc\t: %ld (pc=%lx,va=%lx)\n"
1256 "platform string\t\t: %s\n"
1257 "cpus detected\t\t: %d\n",
1258 cpu_name, cpu->variation, cpu->revision,
1259 (char*)cpu->serial_no,
1260 systype_name, sysvariation_name, hwrpb->sys_revision,
1261 (char*)hwrpb->ssn,
1262 est_cycle_freq ? : hwrpb->cycle_freq,
1263 est_cycle_freq ? "est." : "",
1264 timer_freq / 100, timer_freq % 100,
1265 hwrpb->pagesize,
1266 hwrpb->pa_bits,
1267 hwrpb->max_asn,
1268 loops_per_jiffy / (500000/HZ),
1269 (loops_per_jiffy / (5000/HZ)) % 100,
1270 unaligned[0].count, unaligned[0].pc, unaligned[0].va,
1271 unaligned[1].count, unaligned[1].pc, unaligned[1].va,
1272 platform_string(), nr_processors);
1273
1274#ifdef CONFIG_SMP
1275 seq_printf(f, "cpus active\t\t: %u\n"
1276 "cpu active mask\t\t: %016lx\n",
1277 num_online_cpus(), cpumask_bits(cpu_possible_mask)[0]);
1278#endif
1279
1280 show_cache_size (f, "L1 Icache", alpha_l1i_cacheshape);
1281 show_cache_size (f, "L1 Dcache", alpha_l1d_cacheshape);
1282 show_cache_size (f, "L2 cache", alpha_l2_cacheshape);
1283 show_cache_size (f, "L3 cache", alpha_l3_cacheshape);
1284
1285 return 0;
1286}
1287
1288static int __init
1289read_mem_block(int *addr, int stride, int size)
1290{
1291 long nloads = size / stride, cnt, tmp;
1292
1293 __asm__ __volatile__(
1294 " rpcc %0\n"
1295 "1: ldl %3,0(%2)\n"
1296 " subq %1,1,%1\n"
1297 /* Next two XORs introduce an explicit data dependency between
1298 consecutive loads in the loop, which will give us true load
1299 latency. */
1300 " xor %3,%2,%2\n"
1301 " xor %3,%2,%2\n"
1302 " addq %2,%4,%2\n"
1303 " bne %1,1b\n"
1304 " rpcc %3\n"
1305 " subl %3,%0,%0\n"
1306 : "=&r" (cnt), "=&r" (nloads), "=&r" (addr), "=&r" (tmp)
1307 : "r" (stride), "1" (nloads), "2" (addr));
1308
1309 return cnt / (size / stride);
1310}
1311
1312#define CSHAPE(totalsize, linesize, assoc) \
1313 ((totalsize & ~0xff) | (linesize << 4) | assoc)
1314
1315/* ??? EV5 supports up to 64M, but did the systems with more than
1316 16M of BCACHE ever exist? */
1317#define MAX_BCACHE_SIZE 16*1024*1024
1318
1319/* Note that the offchip caches are direct mapped on all Alphas. */
1320static int __init
1321external_cache_probe(int minsize, int width)
1322{
1323 int cycles, prev_cycles = 1000000;
1324 int stride = 1 << width;
1325 long size = minsize, maxsize = MAX_BCACHE_SIZE * 2;
1326
1327 if (maxsize > (max_low_pfn + 1) << PAGE_SHIFT)
1328 maxsize = 1 << (ilog2(max_low_pfn + 1) + PAGE_SHIFT);
1329
1330 /* Get the first block cached. */
1331 read_mem_block(__va(0), stride, size);
1332
1333 while (size < maxsize) {
1334 /* Get an average load latency in cycles. */
1335 cycles = read_mem_block(__va(0), stride, size);
1336 if (cycles > prev_cycles * 2) {
1337 /* Fine, we exceed the cache. */
1338 printk("%ldK Bcache detected; load hit latency %d "
1339 "cycles, load miss latency %d cycles\n",
1340 size >> 11, prev_cycles, cycles);
1341 return CSHAPE(size >> 1, width, 1);
1342 }
1343 /* Try to get the next block cached. */
1344 read_mem_block(__va(size), stride, size);
1345 prev_cycles = cycles;
1346 size <<= 1;
1347 }
1348 return -1; /* No BCACHE found. */
1349}
1350
1351static void __init
1352determine_cpu_caches (unsigned int cpu_type)
1353{
1354 int L1I, L1D, L2, L3;
1355
1356 switch (cpu_type) {
1357 case EV4_CPU:
1358 case EV45_CPU:
1359 {
1360 if (cpu_type == EV4_CPU)
1361 L1I = CSHAPE(8*1024, 5, 1);
1362 else
1363 L1I = CSHAPE(16*1024, 5, 1);
1364 L1D = L1I;
1365 L3 = -1;
1366
1367 /* BIU_CTL is a write-only Abox register. PALcode has a
1368 shadow copy, and may be available from some versions
1369 of the CSERVE PALcall. If we can get it, then
1370
1371 unsigned long biu_ctl, size;
1372 size = 128*1024 * (1 << ((biu_ctl >> 28) & 7));
1373 L2 = CSHAPE (size, 5, 1);
1374
1375 Unfortunately, we can't rely on that.
1376 */
1377 L2 = external_cache_probe(128*1024, 5);
1378 break;
1379 }
1380
1381 case LCA4_CPU:
1382 {
1383 unsigned long car, size;
1384
1385 L1I = L1D = CSHAPE(8*1024, 5, 1);
1386 L3 = -1;
1387
1388 car = *(vuip) phys_to_virt (0x120000078UL);
1389 size = 64*1024 * (1 << ((car >> 5) & 7));
1390 /* No typo -- 8 byte cacheline size. Whodathunk. */
1391 L2 = (car & 1 ? CSHAPE (size, 3, 1) : -1);
1392 break;
1393 }
1394
1395 case EV5_CPU:
1396 case EV56_CPU:
1397 {
1398 unsigned long sc_ctl, width;
1399
1400 L1I = L1D = CSHAPE(8*1024, 5, 1);
1401
1402 /* Check the line size of the Scache. */
1403 sc_ctl = *(vulp) phys_to_virt (0xfffff000a8UL);
1404 width = sc_ctl & 0x1000 ? 6 : 5;
1405 L2 = CSHAPE (96*1024, width, 3);
1406
1407 /* BC_CONTROL and BC_CONFIG are write-only IPRs. PALcode
1408 has a shadow copy, and may be available from some versions
1409 of the CSERVE PALcall. If we can get it, then
1410
1411 unsigned long bc_control, bc_config, size;
1412 size = 1024*1024 * (1 << ((bc_config & 7) - 1));
1413 L3 = (bc_control & 1 ? CSHAPE (size, width, 1) : -1);
1414
1415 Unfortunately, we can't rely on that.
1416 */
1417 L3 = external_cache_probe(1024*1024, width);
1418 break;
1419 }
1420
1421 case PCA56_CPU:
1422 case PCA57_CPU:
1423 {
1424 if (cpu_type == PCA56_CPU) {
1425 L1I = CSHAPE(16*1024, 6, 1);
1426 L1D = CSHAPE(8*1024, 5, 1);
1427 } else {
1428 L1I = CSHAPE(32*1024, 6, 2);
1429 L1D = CSHAPE(16*1024, 5, 1);
1430 }
1431 L3 = -1;
1432
1433#if 0
1434 unsigned long cbox_config, size;
1435
1436 cbox_config = *(vulp) phys_to_virt (0xfffff00008UL);
1437 size = 512*1024 * (1 << ((cbox_config >> 12) & 3));
1438
1439 L2 = ((cbox_config >> 31) & 1 ? CSHAPE (size, 6, 1) : -1);
1440#else
1441 L2 = external_cache_probe(512*1024, 6);
1442#endif
1443 break;
1444 }
1445
1446 case EV6_CPU:
1447 case EV67_CPU:
1448 case EV68CB_CPU:
1449 case EV68AL_CPU:
1450 case EV68CX_CPU:
1451 case EV69_CPU:
1452 L1I = L1D = CSHAPE(64*1024, 6, 2);
1453 L2 = external_cache_probe(1024*1024, 6);
1454 L3 = -1;
1455 break;
1456
1457 case EV7_CPU:
1458 case EV79_CPU:
1459 L1I = L1D = CSHAPE(64*1024, 6, 2);
1460 L2 = CSHAPE(7*1024*1024/4, 6, 7);
1461 L3 = -1;
1462 break;
1463
1464 default:
1465 /* Nothing known about this cpu type. */
1466 L1I = L1D = L2 = L3 = 0;
1467 break;
1468 }
1469
1470 alpha_l1i_cacheshape = L1I;
1471 alpha_l1d_cacheshape = L1D;
1472 alpha_l2_cacheshape = L2;
1473 alpha_l3_cacheshape = L3;
1474}
1475
1476/*
1477 * We show only CPU #0 info.
1478 */
1479static void *
1480c_start(struct seq_file *f, loff_t *pos)
1481{
1482 return *pos ? NULL : (char *)hwrpb + hwrpb->processor_offset;
1483}
1484
1485static void *
1486c_next(struct seq_file *f, void *v, loff_t *pos)
1487{
1488 return NULL;
1489}
1490
1491static void
1492c_stop(struct seq_file *f, void *v)
1493{
1494}
1495
1496const struct seq_operations cpuinfo_op = {
1497 .start = c_start,
1498 .next = c_next,
1499 .stop = c_stop,
1500 .show = show_cpuinfo,
1501};
1502
1503
1504static int
1505alpha_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1506{
1507#if 1
1508 /* FIXME FIXME FIXME */
1509 /* If we are using SRM and serial console, just hard halt here. */
1510 if (alpha_using_srm && srmcons_output)
1511 __halt();
1512#endif
1513 return NOTIFY_DONE;
1514}
1515
1516static __init int add_pcspkr(void)
1517{
1518 struct platform_device *pd;
1519 int ret;
1520
1521 pd = platform_device_alloc("pcspkr", -1);
1522 if (!pd)
1523 return -ENOMEM;
1524
1525 ret = platform_device_add(pd);
1526 if (ret)
1527 platform_device_put(pd);
1528
1529 return ret;
1530}
1531device_initcall(add_pcspkr);