Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/sparc64/kernel/setup.c
4 *
5 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <asm/smp.h>
17#include <linux/user.h>
18#include <linux/screen_info.h>
19#include <linux/delay.h>
20#include <linux/fs.h>
21#include <linux/seq_file.h>
22#include <linux/syscalls.h>
23#include <linux/kdev_t.h>
24#include <linux/major.h>
25#include <linux/string.h>
26#include <linux/init.h>
27#include <linux/inet.h>
28#include <linux/console.h>
29#include <linux/root_dev.h>
30#include <linux/interrupt.h>
31#include <linux/cpu.h>
32#include <linux/initrd.h>
33#include <linux/module.h>
34#include <linux/start_kernel.h>
35#include <linux/memblock.h>
36#include <uapi/linux/mount.h>
37
38#include <asm/io.h>
39#include <asm/processor.h>
40#include <asm/oplib.h>
41#include <asm/page.h>
42#include <asm/pgtable.h>
43#include <asm/idprom.h>
44#include <asm/head.h>
45#include <asm/starfire.h>
46#include <asm/mmu_context.h>
47#include <asm/timer.h>
48#include <asm/sections.h>
49#include <asm/setup.h>
50#include <asm/mmu.h>
51#include <asm/ns87303.h>
52#include <asm/btext.h>
53#include <asm/elf.h>
54#include <asm/mdesc.h>
55#include <asm/cacheflush.h>
56#include <asm/dma.h>
57#include <asm/irq.h>
58
59#ifdef CONFIG_IP_PNP
60#include <net/ipconfig.h>
61#endif
62
63#include "entry.h"
64#include "kernel.h"
65
66/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
67 * operations in asm/ns87303.h
68 */
69DEFINE_SPINLOCK(ns87303_lock);
70EXPORT_SYMBOL(ns87303_lock);
71
72struct screen_info screen_info = {
73 0, 0, /* orig-x, orig-y */
74 0, /* unused */
75 0, /* orig-video-page */
76 0, /* orig-video-mode */
77 128, /* orig-video-cols */
78 0, 0, 0, /* unused, ega_bx, unused */
79 54, /* orig-video-lines */
80 0, /* orig-video-isVGA */
81 16 /* orig-video-points */
82};
83
84static void
85prom_console_write(struct console *con, const char *s, unsigned int n)
86{
87 prom_write(s, n);
88}
89
90/* Exported for mm/init.c:paging_init. */
91unsigned long cmdline_memory_size = 0;
92
93static struct console prom_early_console = {
94 .name = "earlyprom",
95 .write = prom_console_write,
96 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
97 .index = -1,
98};
99
100/*
101 * Process kernel command line switches that are specific to the
102 * SPARC or that require special low-level processing.
103 */
104static void __init process_switch(char c)
105{
106 switch (c) {
107 case 'd':
108 case 's':
109 break;
110 case 'h':
111 prom_printf("boot_flags_init: Halt!\n");
112 prom_halt();
113 break;
114 case 'p':
115 prom_early_console.flags &= ~CON_BOOT;
116 break;
117 case 'P':
118 /* Force UltraSPARC-III P-Cache on. */
119 if (tlb_type != cheetah) {
120 printk("BOOT: Ignoring P-Cache force option.\n");
121 break;
122 }
123 cheetah_pcache_forced_on = 1;
124 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
125 cheetah_enable_pcache();
126 break;
127
128 default:
129 printk("Unknown boot switch (-%c)\n", c);
130 break;
131 }
132}
133
134static void __init boot_flags_init(char *commands)
135{
136 while (*commands) {
137 /* Move to the start of the next "argument". */
138 while (*commands == ' ')
139 commands++;
140
141 /* Process any command switches, otherwise skip it. */
142 if (*commands == '\0')
143 break;
144 if (*commands == '-') {
145 commands++;
146 while (*commands && *commands != ' ')
147 process_switch(*commands++);
148 continue;
149 }
150 if (!strncmp(commands, "mem=", 4))
151 cmdline_memory_size = memparse(commands + 4, &commands);
152
153 while (*commands && *commands != ' ')
154 commands++;
155 }
156}
157
158extern unsigned short root_flags;
159extern unsigned short root_dev;
160extern unsigned short ram_flags;
161#define RAMDISK_IMAGE_START_MASK 0x07FF
162#define RAMDISK_PROMPT_FLAG 0x8000
163#define RAMDISK_LOAD_FLAG 0x4000
164
165extern int root_mountflags;
166
167char reboot_command[COMMAND_LINE_SIZE];
168
169static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
170
171static void __init per_cpu_patch(void)
172{
173 struct cpuid_patch_entry *p;
174 unsigned long ver;
175 int is_jbus;
176
177 if (tlb_type == spitfire && !this_is_starfire)
178 return;
179
180 is_jbus = 0;
181 if (tlb_type != hypervisor) {
182 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
183 is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
184 (ver >> 32UL) == __SERRANO_ID);
185 }
186
187 p = &__cpuid_patch;
188 while (p < &__cpuid_patch_end) {
189 unsigned long addr = p->addr;
190 unsigned int *insns;
191
192 switch (tlb_type) {
193 case spitfire:
194 insns = &p->starfire[0];
195 break;
196 case cheetah:
197 case cheetah_plus:
198 if (is_jbus)
199 insns = &p->cheetah_jbus[0];
200 else
201 insns = &p->cheetah_safari[0];
202 break;
203 case hypervisor:
204 insns = &p->sun4v[0];
205 break;
206 default:
207 prom_printf("Unknown cpu type, halting.\n");
208 prom_halt();
209 }
210
211 *(unsigned int *) (addr + 0) = insns[0];
212 wmb();
213 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
214
215 *(unsigned int *) (addr + 4) = insns[1];
216 wmb();
217 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
218
219 *(unsigned int *) (addr + 8) = insns[2];
220 wmb();
221 __asm__ __volatile__("flush %0" : : "r" (addr + 8));
222
223 *(unsigned int *) (addr + 12) = insns[3];
224 wmb();
225 __asm__ __volatile__("flush %0" : : "r" (addr + 12));
226
227 p++;
228 }
229}
230
231void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
232 struct sun4v_1insn_patch_entry *end)
233{
234 while (start < end) {
235 unsigned long addr = start->addr;
236
237 *(unsigned int *) (addr + 0) = start->insn;
238 wmb();
239 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
240
241 start++;
242 }
243}
244
245void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
246 struct sun4v_2insn_patch_entry *end)
247{
248 while (start < end) {
249 unsigned long addr = start->addr;
250
251 *(unsigned int *) (addr + 0) = start->insns[0];
252 wmb();
253 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
254
255 *(unsigned int *) (addr + 4) = start->insns[1];
256 wmb();
257 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
258
259 start++;
260 }
261}
262
263void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
264 struct sun4v_2insn_patch_entry *end)
265{
266 while (start < end) {
267 unsigned long addr = start->addr;
268
269 *(unsigned int *) (addr + 0) = start->insns[0];
270 wmb();
271 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
272
273 *(unsigned int *) (addr + 4) = start->insns[1];
274 wmb();
275 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
276
277 start++;
278 }
279}
280
281static void __init sun4v_patch(void)
282{
283 extern void sun4v_hvapi_init(void);
284
285 if (tlb_type != hypervisor)
286 return;
287
288 sun4v_patch_1insn_range(&__sun4v_1insn_patch,
289 &__sun4v_1insn_patch_end);
290
291 sun4v_patch_2insn_range(&__sun4v_2insn_patch,
292 &__sun4v_2insn_patch_end);
293
294 switch (sun4v_chip_type) {
295 case SUN4V_CHIP_SPARC_M7:
296 case SUN4V_CHIP_SPARC_M8:
297 case SUN4V_CHIP_SPARC_SN:
298 sun4v_patch_1insn_range(&__sun_m7_1insn_patch,
299 &__sun_m7_1insn_patch_end);
300 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
301 &__sun_m7_2insn_patch_end);
302 break;
303 default:
304 break;
305 }
306
307 if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) {
308 sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch,
309 &__fast_win_ctrl_1insn_patch_end);
310 }
311
312 sun4v_hvapi_init();
313}
314
315static void __init popc_patch(void)
316{
317 struct popc_3insn_patch_entry *p3;
318 struct popc_6insn_patch_entry *p6;
319
320 p3 = &__popc_3insn_patch;
321 while (p3 < &__popc_3insn_patch_end) {
322 unsigned long i, addr = p3->addr;
323
324 for (i = 0; i < 3; i++) {
325 *(unsigned int *) (addr + (i * 4)) = p3->insns[i];
326 wmb();
327 __asm__ __volatile__("flush %0"
328 : : "r" (addr + (i * 4)));
329 }
330
331 p3++;
332 }
333
334 p6 = &__popc_6insn_patch;
335 while (p6 < &__popc_6insn_patch_end) {
336 unsigned long i, addr = p6->addr;
337
338 for (i = 0; i < 6; i++) {
339 *(unsigned int *) (addr + (i * 4)) = p6->insns[i];
340 wmb();
341 __asm__ __volatile__("flush %0"
342 : : "r" (addr + (i * 4)));
343 }
344
345 p6++;
346 }
347}
348
349static void __init pause_patch(void)
350{
351 struct pause_patch_entry *p;
352
353 p = &__pause_3insn_patch;
354 while (p < &__pause_3insn_patch_end) {
355 unsigned long i, addr = p->addr;
356
357 for (i = 0; i < 3; i++) {
358 *(unsigned int *) (addr + (i * 4)) = p->insns[i];
359 wmb();
360 __asm__ __volatile__("flush %0"
361 : : "r" (addr + (i * 4)));
362 }
363
364 p++;
365 }
366}
367
368void __init start_early_boot(void)
369{
370 int cpu;
371
372 check_if_starfire();
373 per_cpu_patch();
374 sun4v_patch();
375 smp_init_cpu_poke();
376
377 cpu = hard_smp_processor_id();
378 if (cpu >= NR_CPUS) {
379 prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
380 cpu, NR_CPUS);
381 prom_halt();
382 }
383 current_thread_info()->cpu = cpu;
384
385 time_init_early();
386 prom_init_report();
387 start_kernel();
388}
389
390/* On Ultra, we support all of the v8 capabilities. */
391unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
392 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
393 HWCAP_SPARC_V9);
394EXPORT_SYMBOL(sparc64_elf_hwcap);
395
396static const char *hwcaps[] = {
397 "flush", "stbar", "swap", "muldiv", "v9",
398 "ultra3", "blkinit", "n2",
399
400 /* These strings are as they appear in the machine description
401 * 'hwcap-list' property for cpu nodes.
402 */
403 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
404 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
405 "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
406 "adp",
407};
408
409static const char *crypto_hwcaps[] = {
410 "aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256",
411 "sha512", "mpmul", "montmul", "montsqr", "crc32c",
412};
413
414void cpucap_info(struct seq_file *m)
415{
416 unsigned long caps = sparc64_elf_hwcap;
417 int i, printed = 0;
418
419 seq_puts(m, "cpucaps\t\t: ");
420 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
421 unsigned long bit = 1UL << i;
422 if (hwcaps[i] && (caps & bit)) {
423 seq_printf(m, "%s%s",
424 printed ? "," : "", hwcaps[i]);
425 printed++;
426 }
427 }
428 if (caps & HWCAP_SPARC_CRYPTO) {
429 unsigned long cfr;
430
431 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
432 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
433 unsigned long bit = 1UL << i;
434 if (cfr & bit) {
435 seq_printf(m, "%s%s",
436 printed ? "," : "", crypto_hwcaps[i]);
437 printed++;
438 }
439 }
440 }
441 seq_putc(m, '\n');
442}
443
444static void __init report_one_hwcap(int *printed, const char *name)
445{
446 if ((*printed) == 0)
447 printk(KERN_INFO "CPU CAPS: [");
448 printk(KERN_CONT "%s%s",
449 (*printed) ? "," : "", name);
450 if (++(*printed) == 8) {
451 printk(KERN_CONT "]\n");
452 *printed = 0;
453 }
454}
455
456static void __init report_crypto_hwcaps(int *printed)
457{
458 unsigned long cfr;
459 int i;
460
461 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
462
463 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
464 unsigned long bit = 1UL << i;
465 if (cfr & bit)
466 report_one_hwcap(printed, crypto_hwcaps[i]);
467 }
468}
469
470static void __init report_hwcaps(unsigned long caps)
471{
472 int i, printed = 0;
473
474 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
475 unsigned long bit = 1UL << i;
476 if (hwcaps[i] && (caps & bit))
477 report_one_hwcap(&printed, hwcaps[i]);
478 }
479 if (caps & HWCAP_SPARC_CRYPTO)
480 report_crypto_hwcaps(&printed);
481 if (printed != 0)
482 printk(KERN_CONT "]\n");
483}
484
485static unsigned long __init mdesc_cpu_hwcap_list(void)
486{
487 struct mdesc_handle *hp;
488 unsigned long caps = 0;
489 const char *prop;
490 int len;
491 u64 pn;
492
493 hp = mdesc_grab();
494 if (!hp)
495 return 0;
496
497 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
498 if (pn == MDESC_NODE_NULL)
499 goto out;
500
501 prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
502 if (!prop)
503 goto out;
504
505 while (len) {
506 int i, plen;
507
508 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
509 unsigned long bit = 1UL << i;
510
511 if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
512 caps |= bit;
513 break;
514 }
515 }
516 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
517 if (!strcmp(prop, crypto_hwcaps[i]))
518 caps |= HWCAP_SPARC_CRYPTO;
519 }
520
521 plen = strlen(prop) + 1;
522 prop += plen;
523 len -= plen;
524 }
525
526out:
527 mdesc_release(hp);
528 return caps;
529}
530
531/* This yields a mask that user programs can use to figure out what
532 * instruction set this cpu supports.
533 */
534static void __init init_sparc64_elf_hwcap(void)
535{
536 unsigned long cap = sparc64_elf_hwcap;
537 unsigned long mdesc_caps;
538
539 if (tlb_type == cheetah || tlb_type == cheetah_plus)
540 cap |= HWCAP_SPARC_ULTRA3;
541 else if (tlb_type == hypervisor) {
542 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
543 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
544 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
545 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
546 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
547 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
548 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
549 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
550 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
551 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
552 cap |= HWCAP_SPARC_BLKINIT;
553 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
554 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
555 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
556 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
557 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
558 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
559 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
560 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
561 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
562 cap |= HWCAP_SPARC_N2;
563 }
564
565 cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
566
567 mdesc_caps = mdesc_cpu_hwcap_list();
568 if (!mdesc_caps) {
569 if (tlb_type == spitfire)
570 cap |= AV_SPARC_VIS;
571 if (tlb_type == cheetah || tlb_type == cheetah_plus)
572 cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
573 if (tlb_type == cheetah_plus) {
574 unsigned long impl, ver;
575
576 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
577 impl = ((ver >> 32) & 0xffff);
578 if (impl == PANTHER_IMPL)
579 cap |= AV_SPARC_POPC;
580 }
581 if (tlb_type == hypervisor) {
582 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
583 cap |= AV_SPARC_ASI_BLK_INIT;
584 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
585 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
586 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
587 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
588 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
589 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
590 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
591 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
592 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
593 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
594 AV_SPARC_ASI_BLK_INIT |
595 AV_SPARC_POPC);
596 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
597 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
598 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
599 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
600 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
601 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
602 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
603 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
604 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
605 AV_SPARC_FMAF);
606 }
607 }
608 sparc64_elf_hwcap = cap | mdesc_caps;
609
610 report_hwcaps(sparc64_elf_hwcap);
611
612 if (sparc64_elf_hwcap & AV_SPARC_POPC)
613 popc_patch();
614 if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
615 pause_patch();
616}
617
618void __init alloc_irqstack_bootmem(void)
619{
620 unsigned int i, node;
621
622 for_each_possible_cpu(i) {
623 node = cpu_to_node(i);
624
625 softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
626 THREAD_SIZE, node);
627 if (!softirq_stack[i])
628 panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
629 __func__, THREAD_SIZE, THREAD_SIZE, node);
630 hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
631 THREAD_SIZE, node);
632 if (!hardirq_stack[i])
633 panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
634 __func__, THREAD_SIZE, THREAD_SIZE, node);
635 }
636}
637
638void __init setup_arch(char **cmdline_p)
639{
640 /* Initialize PROM console and command line. */
641 *cmdline_p = prom_getbootargs();
642 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
643 parse_early_param();
644
645 boot_flags_init(*cmdline_p);
646#ifdef CONFIG_EARLYFB
647 if (btext_find_display())
648#endif
649 register_console(&prom_early_console);
650
651 if (tlb_type == hypervisor)
652 pr_info("ARCH: SUN4V\n");
653 else
654 pr_info("ARCH: SUN4U\n");
655
656#ifdef CONFIG_DUMMY_CONSOLE
657 conswitchp = &dummy_con;
658#endif
659
660 idprom_init();
661
662 if (!root_flags)
663 root_mountflags &= ~MS_RDONLY;
664 ROOT_DEV = old_decode_dev(root_dev);
665#ifdef CONFIG_BLK_DEV_RAM
666 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
667 rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
668 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
669#endif
670
671 task_thread_info(&init_task)->kregs = &fake_swapper_regs;
672
673#ifdef CONFIG_IP_PNP
674 if (!ic_set_manually) {
675 phandle chosen = prom_finddevice("/chosen");
676 u32 cl, sv, gw;
677
678 cl = prom_getintdefault (chosen, "client-ip", 0);
679 sv = prom_getintdefault (chosen, "server-ip", 0);
680 gw = prom_getintdefault (chosen, "gateway-ip", 0);
681 if (cl && sv) {
682 ic_myaddr = cl;
683 ic_servaddr = sv;
684 if (gw)
685 ic_gateway = gw;
686#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
687 ic_proto_enabled = 0;
688#endif
689 }
690 }
691#endif
692
693 /* Get boot processor trap_block[] setup. */
694 init_cur_cpu_trap(current_thread_info());
695
696 paging_init();
697 init_sparc64_elf_hwcap();
698 smp_fill_in_cpu_possible_map();
699 /*
700 * Once the OF device tree and MDESC have been setup and nr_cpus has
701 * been parsed, we know the list of possible cpus. Therefore we can
702 * allocate the IRQ stacks.
703 */
704 alloc_irqstack_bootmem();
705}
706
707extern int stop_a_enabled;
708
709void sun_do_break(void)
710{
711 if (!stop_a_enabled)
712 return;
713
714 prom_printf("\n");
715 flush_user_windows();
716
717 prom_cmdline();
718}
719EXPORT_SYMBOL(sun_do_break);
720
721int stop_a_enabled = 1;
722EXPORT_SYMBOL(stop_a_enabled);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/sparc64/kernel/setup.c
4 *
5 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/errno.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/stddef.h>
14#include <linux/unistd.h>
15#include <linux/ptrace.h>
16#include <asm/smp.h>
17#include <linux/user.h>
18#include <linux/delay.h>
19#include <linux/fs.h>
20#include <linux/seq_file.h>
21#include <linux/syscalls.h>
22#include <linux/kdev_t.h>
23#include <linux/major.h>
24#include <linux/string.h>
25#include <linux/init.h>
26#include <linux/inet.h>
27#include <linux/console.h>
28#include <linux/root_dev.h>
29#include <linux/interrupt.h>
30#include <linux/cpu.h>
31#include <linux/initrd.h>
32#include <linux/module.h>
33#include <linux/start_kernel.h>
34#include <linux/memblock.h>
35#include <uapi/linux/mount.h>
36
37#include <asm/io.h>
38#include <asm/processor.h>
39#include <asm/oplib.h>
40#include <asm/page.h>
41#include <asm/idprom.h>
42#include <asm/head.h>
43#include <asm/starfire.h>
44#include <asm/mmu_context.h>
45#include <asm/timer.h>
46#include <asm/sections.h>
47#include <asm/setup.h>
48#include <asm/mmu.h>
49#include <asm/ns87303.h>
50#include <asm/btext.h>
51#include <asm/elf.h>
52#include <asm/mdesc.h>
53#include <asm/cacheflush.h>
54#include <asm/dma.h>
55#include <asm/irq.h>
56
57#ifdef CONFIG_IP_PNP
58#include <net/ipconfig.h>
59#endif
60
61#include "entry.h"
62#include "kernel.h"
63
64/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
65 * operations in asm/ns87303.h
66 */
67DEFINE_SPINLOCK(ns87303_lock);
68EXPORT_SYMBOL(ns87303_lock);
69
70static void
71prom_console_write(struct console *con, const char *s, unsigned int n)
72{
73 prom_write(s, n);
74}
75
76/* Exported for mm/init.c:paging_init. */
77unsigned long cmdline_memory_size = 0;
78
79static struct console prom_early_console = {
80 .name = "earlyprom",
81 .write = prom_console_write,
82 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
83 .index = -1,
84};
85
86/*
87 * Process kernel command line switches that are specific to the
88 * SPARC or that require special low-level processing.
89 */
90static void __init process_switch(char c)
91{
92 switch (c) {
93 case 'd':
94 case 's':
95 break;
96 case 'h':
97 prom_printf("boot_flags_init: Halt!\n");
98 prom_halt();
99 break;
100 case 'p':
101 prom_early_console.flags &= ~CON_BOOT;
102 break;
103 case 'P':
104 /* Force UltraSPARC-III P-Cache on. */
105 if (tlb_type != cheetah) {
106 printk("BOOT: Ignoring P-Cache force option.\n");
107 break;
108 }
109 cheetah_pcache_forced_on = 1;
110 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
111 cheetah_enable_pcache();
112 break;
113
114 default:
115 printk("Unknown boot switch (-%c)\n", c);
116 break;
117 }
118}
119
120static void __init boot_flags_init(char *commands)
121{
122 while (*commands) {
123 /* Move to the start of the next "argument". */
124 while (*commands == ' ')
125 commands++;
126
127 /* Process any command switches, otherwise skip it. */
128 if (*commands == '\0')
129 break;
130 if (*commands == '-') {
131 commands++;
132 while (*commands && *commands != ' ')
133 process_switch(*commands++);
134 continue;
135 }
136 if (!strncmp(commands, "mem=", 4))
137 cmdline_memory_size = memparse(commands + 4, &commands);
138
139 while (*commands && *commands != ' ')
140 commands++;
141 }
142}
143
144extern unsigned short root_flags;
145extern unsigned short root_dev;
146extern unsigned short ram_flags;
147#define RAMDISK_IMAGE_START_MASK 0x07FF
148#define RAMDISK_PROMPT_FLAG 0x8000
149#define RAMDISK_LOAD_FLAG 0x4000
150
151extern int root_mountflags;
152
153char reboot_command[COMMAND_LINE_SIZE];
154
155static void __init per_cpu_patch(void)
156{
157 struct cpuid_patch_entry *p;
158 unsigned long ver;
159 int is_jbus;
160
161 if (tlb_type == spitfire && !this_is_starfire)
162 return;
163
164 is_jbus = 0;
165 if (tlb_type != hypervisor) {
166 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
167 is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
168 (ver >> 32UL) == __SERRANO_ID);
169 }
170
171 p = &__cpuid_patch;
172 while (p < &__cpuid_patch_end) {
173 unsigned long addr = p->addr;
174 unsigned int *insns;
175
176 switch (tlb_type) {
177 case spitfire:
178 insns = &p->starfire[0];
179 break;
180 case cheetah:
181 case cheetah_plus:
182 if (is_jbus)
183 insns = &p->cheetah_jbus[0];
184 else
185 insns = &p->cheetah_safari[0];
186 break;
187 case hypervisor:
188 insns = &p->sun4v[0];
189 break;
190 default:
191 prom_printf("Unknown cpu type, halting.\n");
192 prom_halt();
193 }
194
195 *(unsigned int *) (addr + 0) = insns[0];
196 wmb();
197 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
198
199 *(unsigned int *) (addr + 4) = insns[1];
200 wmb();
201 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
202
203 *(unsigned int *) (addr + 8) = insns[2];
204 wmb();
205 __asm__ __volatile__("flush %0" : : "r" (addr + 8));
206
207 *(unsigned int *) (addr + 12) = insns[3];
208 wmb();
209 __asm__ __volatile__("flush %0" : : "r" (addr + 12));
210
211 p++;
212 }
213}
214
215void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
216 struct sun4v_1insn_patch_entry *end)
217{
218 while (start < end) {
219 unsigned long addr = start->addr;
220
221 *(unsigned int *) (addr + 0) = start->insn;
222 wmb();
223 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
224
225 start++;
226 }
227}
228
229void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
230 struct sun4v_2insn_patch_entry *end)
231{
232 while (start < end) {
233 unsigned long addr = start->addr;
234
235 *(unsigned int *) (addr + 0) = start->insns[0];
236 wmb();
237 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
238
239 *(unsigned int *) (addr + 4) = start->insns[1];
240 wmb();
241 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
242
243 start++;
244 }
245}
246
247void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
248 struct sun4v_2insn_patch_entry *end)
249{
250 while (start < end) {
251 unsigned long addr = start->addr;
252
253 *(unsigned int *) (addr + 0) = start->insns[0];
254 wmb();
255 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
256
257 *(unsigned int *) (addr + 4) = start->insns[1];
258 wmb();
259 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
260
261 start++;
262 }
263}
264
265static void __init sun4v_patch(void)
266{
267 extern void sun4v_hvapi_init(void);
268
269 if (tlb_type != hypervisor)
270 return;
271
272 sun4v_patch_1insn_range(&__sun4v_1insn_patch,
273 &__sun4v_1insn_patch_end);
274
275 sun4v_patch_2insn_range(&__sun4v_2insn_patch,
276 &__sun4v_2insn_patch_end);
277
278 switch (sun4v_chip_type) {
279 case SUN4V_CHIP_SPARC_M7:
280 case SUN4V_CHIP_SPARC_M8:
281 case SUN4V_CHIP_SPARC_SN:
282 sun4v_patch_1insn_range(&__sun_m7_1insn_patch,
283 &__sun_m7_1insn_patch_end);
284 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
285 &__sun_m7_2insn_patch_end);
286 break;
287 default:
288 break;
289 }
290
291 if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) {
292 sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch,
293 &__fast_win_ctrl_1insn_patch_end);
294 }
295
296 sun4v_hvapi_init();
297}
298
299static void __init popc_patch(void)
300{
301 struct popc_3insn_patch_entry *p3;
302 struct popc_6insn_patch_entry *p6;
303
304 p3 = &__popc_3insn_patch;
305 while (p3 < &__popc_3insn_patch_end) {
306 unsigned long i, addr = p3->addr;
307
308 for (i = 0; i < 3; i++) {
309 *(unsigned int *) (addr + (i * 4)) = p3->insns[i];
310 wmb();
311 __asm__ __volatile__("flush %0"
312 : : "r" (addr + (i * 4)));
313 }
314
315 p3++;
316 }
317
318 p6 = &__popc_6insn_patch;
319 while (p6 < &__popc_6insn_patch_end) {
320 unsigned long i, addr = p6->addr;
321
322 for (i = 0; i < 6; i++) {
323 *(unsigned int *) (addr + (i * 4)) = p6->insns[i];
324 wmb();
325 __asm__ __volatile__("flush %0"
326 : : "r" (addr + (i * 4)));
327 }
328
329 p6++;
330 }
331}
332
333static void __init pause_patch(void)
334{
335 struct pause_patch_entry *p;
336
337 p = &__pause_3insn_patch;
338 while (p < &__pause_3insn_patch_end) {
339 unsigned long i, addr = p->addr;
340
341 for (i = 0; i < 3; i++) {
342 *(unsigned int *) (addr + (i * 4)) = p->insns[i];
343 wmb();
344 __asm__ __volatile__("flush %0"
345 : : "r" (addr + (i * 4)));
346 }
347
348 p++;
349 }
350}
351
352void __init start_early_boot(void)
353{
354 int cpu;
355
356 check_if_starfire();
357 per_cpu_patch();
358 sun4v_patch();
359 smp_init_cpu_poke();
360
361 cpu = hard_smp_processor_id();
362 if (cpu >= NR_CPUS) {
363 prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
364 cpu, NR_CPUS);
365 prom_halt();
366 }
367 current_thread_info()->cpu = cpu;
368
369 time_init_early();
370 prom_init_report();
371 start_kernel();
372}
373
374/* On Ultra, we support all of the v8 capabilities. */
375unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
376 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
377 HWCAP_SPARC_V9);
378EXPORT_SYMBOL(sparc64_elf_hwcap);
379
380static const char *hwcaps[] = {
381 "flush", "stbar", "swap", "muldiv", "v9",
382 "ultra3", "blkinit", "n2",
383
384 /* These strings are as they appear in the machine description
385 * 'hwcap-list' property for cpu nodes.
386 */
387 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
388 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
389 "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
390 "adp",
391};
392
393static const char *crypto_hwcaps[] = {
394 "aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256",
395 "sha512", "mpmul", "montmul", "montsqr", "crc32c",
396};
397
398void cpucap_info(struct seq_file *m)
399{
400 unsigned long caps = sparc64_elf_hwcap;
401 int i, printed = 0;
402
403 seq_puts(m, "cpucaps\t\t: ");
404 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
405 unsigned long bit = 1UL << i;
406 if (hwcaps[i] && (caps & bit)) {
407 seq_printf(m, "%s%s",
408 printed ? "," : "", hwcaps[i]);
409 printed++;
410 }
411 }
412 if (caps & HWCAP_SPARC_CRYPTO) {
413 unsigned long cfr;
414
415 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
416 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
417 unsigned long bit = 1UL << i;
418 if (cfr & bit) {
419 seq_printf(m, "%s%s",
420 printed ? "," : "", crypto_hwcaps[i]);
421 printed++;
422 }
423 }
424 }
425 seq_putc(m, '\n');
426}
427
428static void __init report_one_hwcap(int *printed, const char *name)
429{
430 if ((*printed) == 0)
431 printk(KERN_INFO "CPU CAPS: [");
432 printk(KERN_CONT "%s%s",
433 (*printed) ? "," : "", name);
434 if (++(*printed) == 8) {
435 printk(KERN_CONT "]\n");
436 *printed = 0;
437 }
438}
439
440static void __init report_crypto_hwcaps(int *printed)
441{
442 unsigned long cfr;
443 int i;
444
445 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
446
447 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
448 unsigned long bit = 1UL << i;
449 if (cfr & bit)
450 report_one_hwcap(printed, crypto_hwcaps[i]);
451 }
452}
453
454static void __init report_hwcaps(unsigned long caps)
455{
456 int i, printed = 0;
457
458 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
459 unsigned long bit = 1UL << i;
460 if (hwcaps[i] && (caps & bit))
461 report_one_hwcap(&printed, hwcaps[i]);
462 }
463 if (caps & HWCAP_SPARC_CRYPTO)
464 report_crypto_hwcaps(&printed);
465 if (printed != 0)
466 printk(KERN_CONT "]\n");
467}
468
469static unsigned long __init mdesc_cpu_hwcap_list(void)
470{
471 struct mdesc_handle *hp;
472 unsigned long caps = 0;
473 const char *prop;
474 int len;
475 u64 pn;
476
477 hp = mdesc_grab();
478 if (!hp)
479 return 0;
480
481 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
482 if (pn == MDESC_NODE_NULL)
483 goto out;
484
485 prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
486 if (!prop)
487 goto out;
488
489 while (len) {
490 int i, plen;
491
492 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
493 unsigned long bit = 1UL << i;
494
495 if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
496 caps |= bit;
497 break;
498 }
499 }
500 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
501 if (!strcmp(prop, crypto_hwcaps[i]))
502 caps |= HWCAP_SPARC_CRYPTO;
503 }
504
505 plen = strlen(prop) + 1;
506 prop += plen;
507 len -= plen;
508 }
509
510out:
511 mdesc_release(hp);
512 return caps;
513}
514
515/* This yields a mask that user programs can use to figure out what
516 * instruction set this cpu supports.
517 */
518static void __init init_sparc64_elf_hwcap(void)
519{
520 unsigned long cap = sparc64_elf_hwcap;
521 unsigned long mdesc_caps;
522
523 if (tlb_type == cheetah || tlb_type == cheetah_plus)
524 cap |= HWCAP_SPARC_ULTRA3;
525 else if (tlb_type == hypervisor) {
526 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
527 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
528 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
529 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
530 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
531 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
532 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
533 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
534 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
535 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
536 cap |= HWCAP_SPARC_BLKINIT;
537 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
538 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
539 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
540 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
541 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
542 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
543 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
544 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
545 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
546 cap |= HWCAP_SPARC_N2;
547 }
548
549 cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
550
551 mdesc_caps = mdesc_cpu_hwcap_list();
552 if (!mdesc_caps) {
553 if (tlb_type == spitfire)
554 cap |= AV_SPARC_VIS;
555 if (tlb_type == cheetah || tlb_type == cheetah_plus)
556 cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
557 if (tlb_type == cheetah_plus) {
558 unsigned long impl, ver;
559
560 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
561 impl = ((ver >> 32) & 0xffff);
562 if (impl == PANTHER_IMPL)
563 cap |= AV_SPARC_POPC;
564 }
565 if (tlb_type == hypervisor) {
566 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
567 cap |= AV_SPARC_ASI_BLK_INIT;
568 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
569 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
570 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
571 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
572 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
573 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
574 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
575 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
576 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
577 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
578 AV_SPARC_ASI_BLK_INIT |
579 AV_SPARC_POPC);
580 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
581 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
582 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
583 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
584 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
585 sun4v_chip_type == SUN4V_CHIP_SPARC_M8 ||
586 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
587 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
588 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
589 AV_SPARC_FMAF);
590 }
591 }
592 sparc64_elf_hwcap = cap | mdesc_caps;
593
594 report_hwcaps(sparc64_elf_hwcap);
595
596 if (sparc64_elf_hwcap & AV_SPARC_POPC)
597 popc_patch();
598 if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
599 pause_patch();
600}
601
602void __init alloc_irqstack_bootmem(void)
603{
604 unsigned int i, node;
605
606 for_each_possible_cpu(i) {
607 node = cpu_to_node(i);
608
609 softirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
610 THREAD_SIZE, node);
611 if (!softirq_stack[i])
612 panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
613 __func__, THREAD_SIZE, THREAD_SIZE, node);
614 hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE,
615 THREAD_SIZE, node);
616 if (!hardirq_stack[i])
617 panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n",
618 __func__, THREAD_SIZE, THREAD_SIZE, node);
619 }
620}
621
622void __init setup_arch(char **cmdline_p)
623{
624 /* Initialize PROM console and command line. */
625 *cmdline_p = prom_getbootargs();
626 strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
627 parse_early_param();
628
629 boot_flags_init(*cmdline_p);
630#ifdef CONFIG_EARLYFB
631 if (btext_find_display())
632#endif
633 register_console(&prom_early_console);
634
635 if (tlb_type == hypervisor)
636 pr_info("ARCH: SUN4V\n");
637 else
638 pr_info("ARCH: SUN4U\n");
639
640 idprom_init();
641
642 if (!root_flags)
643 root_mountflags &= ~MS_RDONLY;
644 ROOT_DEV = old_decode_dev(root_dev);
645#ifdef CONFIG_BLK_DEV_RAM
646 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
647#endif
648
649#ifdef CONFIG_IP_PNP
650 if (!ic_set_manually) {
651 phandle chosen = prom_finddevice("/chosen");
652 u32 cl, sv, gw;
653
654 cl = prom_getintdefault (chosen, "client-ip", 0);
655 sv = prom_getintdefault (chosen, "server-ip", 0);
656 gw = prom_getintdefault (chosen, "gateway-ip", 0);
657 if (cl && sv) {
658 ic_myaddr = cl;
659 ic_servaddr = sv;
660 if (gw)
661 ic_gateway = gw;
662#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
663 ic_proto_enabled = 0;
664#endif
665 }
666 }
667#endif
668
669 /* Get boot processor trap_block[] setup. */
670 init_cur_cpu_trap(current_thread_info());
671
672 paging_init();
673 init_sparc64_elf_hwcap();
674 smp_fill_in_cpu_possible_map();
675 /*
676 * Once the OF device tree and MDESC have been setup and nr_cpus has
677 * been parsed, we know the list of possible cpus. Therefore we can
678 * allocate the IRQ stacks.
679 */
680 alloc_irqstack_bootmem();
681}
682
683extern int stop_a_enabled;
684
685void sun_do_break(void)
686{
687 if (!stop_a_enabled)
688 return;
689
690 prom_printf("\n");
691 flush_user_windows();
692
693 prom_cmdline();
694}
695EXPORT_SYMBOL(sun_do_break);
696
697int stop_a_enabled = 1;
698EXPORT_SYMBOL(stop_a_enabled);