Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Procedures for interfacing to Open Firmware.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 */
11
12#undef DEBUG_PROM
13
14/* we cannot use FORTIFY as it brings in new symbols */
15#define __NO_FORTIFY
16
17#include <stdarg.h>
18#include <linux/kernel.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/threads.h>
22#include <linux/spinlock.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/proc_fs.h>
26#include <linux/delay.h>
27#include <linux/initrd.h>
28#include <linux/bitops.h>
29#include <linux/pgtable.h>
30#include <linux/printk.h>
31#include <asm/prom.h>
32#include <asm/rtas.h>
33#include <asm/page.h>
34#include <asm/processor.h>
35#include <asm/interrupt.h>
36#include <asm/irq.h>
37#include <asm/io.h>
38#include <asm/smp.h>
39#include <asm/mmu.h>
40#include <asm/iommu.h>
41#include <asm/btext.h>
42#include <asm/sections.h>
43#include <asm/machdep.h>
44#include <asm/asm-prototypes.h>
45#include <asm/ultravisor-api.h>
46
47#include <linux/linux_logo.h>
48
49/* All of prom_init bss lives here */
50#define __prombss __section(".bss.prominit")
51
52/*
53 * Eventually bump that one up
54 */
55#define DEVTREE_CHUNK_SIZE 0x100000
56
57/*
58 * This is the size of the local memory reserve map that gets copied
59 * into the boot params passed to the kernel. That size is totally
60 * flexible as the kernel just reads the list until it encounters an
61 * entry with size 0, so it can be changed without breaking binary
62 * compatibility
63 */
64#define MEM_RESERVE_MAP_SIZE 8
65
66/*
67 * prom_init() is called very early on, before the kernel text
68 * and data have been mapped to KERNELBASE. At this point the code
69 * is running at whatever address it has been loaded at.
70 * On ppc32 we compile with -mrelocatable, which means that references
71 * to extern and static variables get relocated automatically.
72 * ppc64 objects are always relocatable, we just need to relocate the
73 * TOC.
74 *
75 * Because OF may have mapped I/O devices into the area starting at
76 * KERNELBASE, particularly on CHRP machines, we can't safely call
77 * OF once the kernel has been mapped to KERNELBASE. Therefore all
78 * OF calls must be done within prom_init().
79 *
80 * ADDR is used in calls to call_prom. The 4th and following
81 * arguments to call_prom should be 32-bit values.
82 * On ppc64, 64 bit values are truncated to 32 bits (and
83 * fortunately don't get interpreted as two arguments).
84 */
85#define ADDR(x) (u32)(unsigned long)(x)
86
87#ifdef CONFIG_PPC64
88#define OF_WORKAROUNDS 0
89#else
90#define OF_WORKAROUNDS of_workarounds
91static int of_workarounds __prombss;
92#endif
93
94#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
95#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
96
97#define PROM_BUG() do { \
98 prom_printf("kernel BUG at %s line 0x%x!\n", \
99 __FILE__, __LINE__); \
100 __builtin_trap(); \
101} while (0)
102
103#ifdef DEBUG_PROM
104#define prom_debug(x...) prom_printf(x)
105#else
106#define prom_debug(x...) do { } while (0)
107#endif
108
109
110typedef u32 prom_arg_t;
111
112struct prom_args {
113 __be32 service;
114 __be32 nargs;
115 __be32 nret;
116 __be32 args[10];
117};
118
119struct prom_t {
120 ihandle root;
121 phandle chosen;
122 int cpu;
123 ihandle stdout;
124 ihandle mmumap;
125 ihandle memory;
126};
127
128struct mem_map_entry {
129 __be64 base;
130 __be64 size;
131};
132
133typedef __be32 cell_t;
134
135extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
136 unsigned long r6, unsigned long r7, unsigned long r8,
137 unsigned long r9);
138
139#ifdef CONFIG_PPC64
140extern int enter_prom(struct prom_args *args, unsigned long entry);
141#else
142static inline int enter_prom(struct prom_args *args, unsigned long entry)
143{
144 return ((int (*)(struct prom_args *))entry)(args);
145}
146#endif
147
148extern void copy_and_flush(unsigned long dest, unsigned long src,
149 unsigned long size, unsigned long offset);
150
151/* prom structure */
152static struct prom_t __prombss prom;
153
154static unsigned long __prombss prom_entry;
155
156static char __prombss of_stdout_device[256];
157static char __prombss prom_scratch[256];
158
159static unsigned long __prombss dt_header_start;
160static unsigned long __prombss dt_struct_start, dt_struct_end;
161static unsigned long __prombss dt_string_start, dt_string_end;
162
163static unsigned long __prombss prom_initrd_start, prom_initrd_end;
164
165#ifdef CONFIG_PPC64
166static int __prombss prom_iommu_force_on;
167static int __prombss prom_iommu_off;
168static unsigned long __prombss prom_tce_alloc_start;
169static unsigned long __prombss prom_tce_alloc_end;
170#endif
171
172#ifdef CONFIG_PPC_PSERIES
173static bool __prombss prom_radix_disable;
174static bool __prombss prom_radix_gtse_disable;
175static bool __prombss prom_xive_disable;
176#endif
177
178#ifdef CONFIG_PPC_SVM
179static bool __prombss prom_svm_enable;
180#endif
181
182struct platform_support {
183 bool hash_mmu;
184 bool radix_mmu;
185 bool radix_gtse;
186 bool xive;
187};
188
189/* Platforms codes are now obsolete in the kernel. Now only used within this
190 * file and ultimately gone too. Feel free to change them if you need, they
191 * are not shared with anything outside of this file anymore
192 */
193#define PLATFORM_PSERIES 0x0100
194#define PLATFORM_PSERIES_LPAR 0x0101
195#define PLATFORM_LPAR 0x0001
196#define PLATFORM_POWERMAC 0x0400
197#define PLATFORM_GENERIC 0x0500
198
199static int __prombss of_platform;
200
201static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
202
203static unsigned long __prombss prom_memory_limit;
204
205static unsigned long __prombss alloc_top;
206static unsigned long __prombss alloc_top_high;
207static unsigned long __prombss alloc_bottom;
208static unsigned long __prombss rmo_top;
209static unsigned long __prombss ram_top;
210
211static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
212static int __prombss mem_reserve_cnt;
213
214static cell_t __prombss regbuf[1024];
215
216static bool __prombss rtas_has_query_cpu_stopped;
217
218
219/*
220 * Error results ... some OF calls will return "-1" on error, some
221 * will return 0, some will return either. To simplify, here are
222 * macros to use with any ihandle or phandle return value to check if
223 * it is valid
224 */
225
226#define PROM_ERROR (-1u)
227#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
228#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
229
230/* Copied from lib/string.c and lib/kstrtox.c */
231
232static int __init prom_strcmp(const char *cs, const char *ct)
233{
234 unsigned char c1, c2;
235
236 while (1) {
237 c1 = *cs++;
238 c2 = *ct++;
239 if (c1 != c2)
240 return c1 < c2 ? -1 : 1;
241 if (!c1)
242 break;
243 }
244 return 0;
245}
246
247static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
248{
249 ssize_t rc;
250 size_t i;
251
252 if (n == 0 || n > INT_MAX)
253 return -E2BIG;
254
255 // Copy up to n bytes
256 for (i = 0; i < n && src[i] != '\0'; i++)
257 dest[i] = src[i];
258
259 rc = i;
260
261 // If we copied all n then we have run out of space for the nul
262 if (rc == n) {
263 // Rewind by one character to ensure nul termination
264 i--;
265 rc = -E2BIG;
266 }
267
268 for (; i < n; i++)
269 dest[i] = '\0';
270
271 return rc;
272}
273
274static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
275{
276 unsigned char c1, c2;
277
278 while (count) {
279 c1 = *cs++;
280 c2 = *ct++;
281 if (c1 != c2)
282 return c1 < c2 ? -1 : 1;
283 if (!c1)
284 break;
285 count--;
286 }
287 return 0;
288}
289
290static size_t __init prom_strlen(const char *s)
291{
292 const char *sc;
293
294 for (sc = s; *sc != '\0'; ++sc)
295 /* nothing */;
296 return sc - s;
297}
298
299static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
300{
301 const unsigned char *su1, *su2;
302 int res = 0;
303
304 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
305 if ((res = *su1 - *su2) != 0)
306 break;
307 return res;
308}
309
310static char __init *prom_strstr(const char *s1, const char *s2)
311{
312 size_t l1, l2;
313
314 l2 = prom_strlen(s2);
315 if (!l2)
316 return (char *)s1;
317 l1 = prom_strlen(s1);
318 while (l1 >= l2) {
319 l1--;
320 if (!prom_memcmp(s1, s2, l2))
321 return (char *)s1;
322 s1++;
323 }
324 return NULL;
325}
326
327static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
328{
329 size_t dsize = prom_strlen(dest);
330 size_t len = prom_strlen(src);
331 size_t res = dsize + len;
332
333 /* This would be a bug */
334 if (dsize >= count)
335 return count;
336
337 dest += dsize;
338 count -= dsize;
339 if (len >= count)
340 len = count-1;
341 memcpy(dest, src, len);
342 dest[len] = 0;
343 return res;
344
345}
346
347#ifdef CONFIG_PPC_PSERIES
348static int __init prom_strtobool(const char *s, bool *res)
349{
350 if (!s)
351 return -EINVAL;
352
353 switch (s[0]) {
354 case 'y':
355 case 'Y':
356 case '1':
357 *res = true;
358 return 0;
359 case 'n':
360 case 'N':
361 case '0':
362 *res = false;
363 return 0;
364 case 'o':
365 case 'O':
366 switch (s[1]) {
367 case 'n':
368 case 'N':
369 *res = true;
370 return 0;
371 case 'f':
372 case 'F':
373 *res = false;
374 return 0;
375 default:
376 break;
377 }
378 break;
379 default:
380 break;
381 }
382
383 return -EINVAL;
384}
385#endif
386
387/* This is the one and *ONLY* place where we actually call open
388 * firmware.
389 */
390
391static int __init call_prom(const char *service, int nargs, int nret, ...)
392{
393 int i;
394 struct prom_args args;
395 va_list list;
396
397 args.service = cpu_to_be32(ADDR(service));
398 args.nargs = cpu_to_be32(nargs);
399 args.nret = cpu_to_be32(nret);
400
401 va_start(list, nret);
402 for (i = 0; i < nargs; i++)
403 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
404 va_end(list);
405
406 for (i = 0; i < nret; i++)
407 args.args[nargs+i] = 0;
408
409 if (enter_prom(&args, prom_entry) < 0)
410 return PROM_ERROR;
411
412 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
413}
414
415static int __init call_prom_ret(const char *service, int nargs, int nret,
416 prom_arg_t *rets, ...)
417{
418 int i;
419 struct prom_args args;
420 va_list list;
421
422 args.service = cpu_to_be32(ADDR(service));
423 args.nargs = cpu_to_be32(nargs);
424 args.nret = cpu_to_be32(nret);
425
426 va_start(list, rets);
427 for (i = 0; i < nargs; i++)
428 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
429 va_end(list);
430
431 for (i = 0; i < nret; i++)
432 args.args[nargs+i] = 0;
433
434 if (enter_prom(&args, prom_entry) < 0)
435 return PROM_ERROR;
436
437 if (rets != NULL)
438 for (i = 1; i < nret; ++i)
439 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
440
441 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
442}
443
444
445static void __init prom_print(const char *msg)
446{
447 const char *p, *q;
448
449 if (prom.stdout == 0)
450 return;
451
452 for (p = msg; *p != 0; p = q) {
453 for (q = p; *q != 0 && *q != '\n'; ++q)
454 ;
455 if (q > p)
456 call_prom("write", 3, 1, prom.stdout, p, q - p);
457 if (*q == 0)
458 break;
459 ++q;
460 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
461 }
462}
463
464
465/*
466 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
467 * we do not need __udivdi3 or __umoddi3 on 32bits.
468 */
469static void __init prom_print_hex(unsigned long val)
470{
471 int i, nibbles = sizeof(val)*2;
472 char buf[sizeof(val)*2+1];
473
474 for (i = nibbles-1; i >= 0; i--) {
475 buf[i] = (val & 0xf) + '0';
476 if (buf[i] > '9')
477 buf[i] += ('a'-'0'-10);
478 val >>= 4;
479 }
480 buf[nibbles] = '\0';
481 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
482}
483
484/* max number of decimal digits in an unsigned long */
485#define UL_DIGITS 21
486static void __init prom_print_dec(unsigned long val)
487{
488 int i, size;
489 char buf[UL_DIGITS+1];
490
491 for (i = UL_DIGITS-1; i >= 0; i--) {
492 buf[i] = (val % 10) + '0';
493 val = val/10;
494 if (val == 0)
495 break;
496 }
497 /* shift stuff down */
498 size = UL_DIGITS - i;
499 call_prom("write", 3, 1, prom.stdout, buf+i, size);
500}
501
502__printf(1, 2)
503static void __init prom_printf(const char *format, ...)
504{
505 const char *p, *q, *s;
506 va_list args;
507 unsigned long v;
508 long vs;
509 int n = 0;
510
511 va_start(args, format);
512 for (p = format; *p != 0; p = q) {
513 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
514 ;
515 if (q > p)
516 call_prom("write", 3, 1, prom.stdout, p, q - p);
517 if (*q == 0)
518 break;
519 if (*q == '\n') {
520 ++q;
521 call_prom("write", 3, 1, prom.stdout,
522 ADDR("\r\n"), 2);
523 continue;
524 }
525 ++q;
526 if (*q == 0)
527 break;
528 while (*q == 'l') {
529 ++q;
530 ++n;
531 }
532 switch (*q) {
533 case 's':
534 ++q;
535 s = va_arg(args, const char *);
536 prom_print(s);
537 break;
538 case 'x':
539 ++q;
540 switch (n) {
541 case 0:
542 v = va_arg(args, unsigned int);
543 break;
544 case 1:
545 v = va_arg(args, unsigned long);
546 break;
547 case 2:
548 default:
549 v = va_arg(args, unsigned long long);
550 break;
551 }
552 prom_print_hex(v);
553 break;
554 case 'u':
555 ++q;
556 switch (n) {
557 case 0:
558 v = va_arg(args, unsigned int);
559 break;
560 case 1:
561 v = va_arg(args, unsigned long);
562 break;
563 case 2:
564 default:
565 v = va_arg(args, unsigned long long);
566 break;
567 }
568 prom_print_dec(v);
569 break;
570 case 'd':
571 ++q;
572 switch (n) {
573 case 0:
574 vs = va_arg(args, int);
575 break;
576 case 1:
577 vs = va_arg(args, long);
578 break;
579 case 2:
580 default:
581 vs = va_arg(args, long long);
582 break;
583 }
584 if (vs < 0) {
585 prom_print("-");
586 vs = -vs;
587 }
588 prom_print_dec(vs);
589 break;
590 }
591 }
592 va_end(args);
593}
594
595
596static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
597 unsigned long align)
598{
599
600 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
601 /*
602 * Old OF requires we claim physical and virtual separately
603 * and then map explicitly (assuming virtual mode)
604 */
605 int ret;
606 prom_arg_t result;
607
608 ret = call_prom_ret("call-method", 5, 2, &result,
609 ADDR("claim"), prom.memory,
610 align, size, virt);
611 if (ret != 0 || result == -1)
612 return -1;
613 ret = call_prom_ret("call-method", 5, 2, &result,
614 ADDR("claim"), prom.mmumap,
615 align, size, virt);
616 if (ret != 0) {
617 call_prom("call-method", 4, 1, ADDR("release"),
618 prom.memory, size, virt);
619 return -1;
620 }
621 /* the 0x12 is M (coherence) + PP == read/write */
622 call_prom("call-method", 6, 1,
623 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
624 return virt;
625 }
626 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
627 (prom_arg_t)align);
628}
629
630static void __init __attribute__((noreturn)) prom_panic(const char *reason)
631{
632 prom_print(reason);
633 /* Do not call exit because it clears the screen on pmac
634 * it also causes some sort of double-fault on early pmacs */
635 if (of_platform == PLATFORM_POWERMAC)
636 asm("trap\n");
637
638 /* ToDo: should put up an SRC here on pSeries */
639 call_prom("exit", 0, 0);
640
641 for (;;) /* should never get here */
642 ;
643}
644
645
646static int __init prom_next_node(phandle *nodep)
647{
648 phandle node;
649
650 if ((node = *nodep) != 0
651 && (*nodep = call_prom("child", 1, 1, node)) != 0)
652 return 1;
653 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
654 return 1;
655 for (;;) {
656 if ((node = call_prom("parent", 1, 1, node)) == 0)
657 return 0;
658 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
659 return 1;
660 }
661}
662
663static inline int __init prom_getprop(phandle node, const char *pname,
664 void *value, size_t valuelen)
665{
666 return call_prom("getprop", 4, 1, node, ADDR(pname),
667 (u32)(unsigned long) value, (u32) valuelen);
668}
669
670static inline int __init prom_getproplen(phandle node, const char *pname)
671{
672 return call_prom("getproplen", 2, 1, node, ADDR(pname));
673}
674
675static void add_string(char **str, const char *q)
676{
677 char *p = *str;
678
679 while (*q)
680 *p++ = *q++;
681 *p++ = ' ';
682 *str = p;
683}
684
685static char *tohex(unsigned int x)
686{
687 static const char digits[] __initconst = "0123456789abcdef";
688 static char result[9] __prombss;
689 int i;
690
691 result[8] = 0;
692 i = 8;
693 do {
694 --i;
695 result[i] = digits[x & 0xf];
696 x >>= 4;
697 } while (x != 0 && i > 0);
698 return &result[i];
699}
700
701static int __init prom_setprop(phandle node, const char *nodename,
702 const char *pname, void *value, size_t valuelen)
703{
704 char cmd[256], *p;
705
706 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
707 return call_prom("setprop", 4, 1, node, ADDR(pname),
708 (u32)(unsigned long) value, (u32) valuelen);
709
710 /* gah... setprop doesn't work on longtrail, have to use interpret */
711 p = cmd;
712 add_string(&p, "dev");
713 add_string(&p, nodename);
714 add_string(&p, tohex((u32)(unsigned long) value));
715 add_string(&p, tohex(valuelen));
716 add_string(&p, tohex(ADDR(pname)));
717 add_string(&p, tohex(prom_strlen(pname)));
718 add_string(&p, "property");
719 *p = 0;
720 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
721}
722
723/* We can't use the standard versions because of relocation headaches. */
724#define prom_isxdigit(c) \
725 (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
726
727#define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
728#define prom_islower(c) ('a' <= (c) && (c) <= 'z')
729#define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
730
731static unsigned long prom_strtoul(const char *cp, const char **endp)
732{
733 unsigned long result = 0, base = 10, value;
734
735 if (*cp == '0') {
736 base = 8;
737 cp++;
738 if (prom_toupper(*cp) == 'X') {
739 cp++;
740 base = 16;
741 }
742 }
743
744 while (prom_isxdigit(*cp) &&
745 (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
746 result = result * base + value;
747 cp++;
748 }
749
750 if (endp)
751 *endp = cp;
752
753 return result;
754}
755
756static unsigned long prom_memparse(const char *ptr, const char **retptr)
757{
758 unsigned long ret = prom_strtoul(ptr, retptr);
759 int shift = 0;
760
761 /*
762 * We can't use a switch here because GCC *may* generate a
763 * jump table which won't work, because we're not running at
764 * the address we're linked at.
765 */
766 if ('G' == **retptr || 'g' == **retptr)
767 shift = 30;
768
769 if ('M' == **retptr || 'm' == **retptr)
770 shift = 20;
771
772 if ('K' == **retptr || 'k' == **retptr)
773 shift = 10;
774
775 if (shift) {
776 ret <<= shift;
777 (*retptr)++;
778 }
779
780 return ret;
781}
782
783/*
784 * Early parsing of the command line passed to the kernel, used for
785 * "mem=x" and the options that affect the iommu
786 */
787static void __init early_cmdline_parse(void)
788{
789 const char *opt;
790
791 char *p;
792 int l = 0;
793
794 prom_cmd_line[0] = 0;
795 p = prom_cmd_line;
796
797 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
798 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
799
800 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
801 prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
802 sizeof(prom_cmd_line));
803
804 prom_printf("command line: %s\n", prom_cmd_line);
805
806#ifdef CONFIG_PPC64
807 opt = prom_strstr(prom_cmd_line, "iommu=");
808 if (opt) {
809 prom_printf("iommu opt is: %s\n", opt);
810 opt += 6;
811 while (*opt && *opt == ' ')
812 opt++;
813 if (!prom_strncmp(opt, "off", 3))
814 prom_iommu_off = 1;
815 else if (!prom_strncmp(opt, "force", 5))
816 prom_iommu_force_on = 1;
817 }
818#endif
819 opt = prom_strstr(prom_cmd_line, "mem=");
820 if (opt) {
821 opt += 4;
822 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
823#ifdef CONFIG_PPC64
824 /* Align to 16 MB == size of ppc64 large page */
825 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
826#endif
827 }
828
829#ifdef CONFIG_PPC_PSERIES
830 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
831 opt = prom_strstr(prom_cmd_line, "disable_radix");
832 if (opt) {
833 opt += 13;
834 if (*opt && *opt == '=') {
835 bool val;
836
837 if (prom_strtobool(++opt, &val))
838 prom_radix_disable = false;
839 else
840 prom_radix_disable = val;
841 } else
842 prom_radix_disable = true;
843 }
844 if (prom_radix_disable)
845 prom_debug("Radix disabled from cmdline\n");
846
847 opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
848 if (opt) {
849 prom_radix_gtse_disable = true;
850 prom_debug("Radix GTSE disabled from cmdline\n");
851 }
852
853 opt = prom_strstr(prom_cmd_line, "xive=off");
854 if (opt) {
855 prom_xive_disable = true;
856 prom_debug("XIVE disabled from cmdline\n");
857 }
858#endif /* CONFIG_PPC_PSERIES */
859
860#ifdef CONFIG_PPC_SVM
861 opt = prom_strstr(prom_cmd_line, "svm=");
862 if (opt) {
863 bool val;
864
865 opt += sizeof("svm=") - 1;
866 if (!prom_strtobool(opt, &val))
867 prom_svm_enable = val;
868 }
869#endif /* CONFIG_PPC_SVM */
870}
871
872#ifdef CONFIG_PPC_PSERIES
873/*
874 * The architecture vector has an array of PVR mask/value pairs,
875 * followed by # option vectors - 1, followed by the option vectors.
876 *
877 * See prom.h for the definition of the bits specified in the
878 * architecture vector.
879 */
880
881/* Firmware expects the value to be n - 1, where n is the # of vectors */
882#define NUM_VECTORS(n) ((n) - 1)
883
884/*
885 * Firmware expects 1 + n - 2, where n is the length of the option vector in
886 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
887 */
888#define VECTOR_LENGTH(n) (1 + (n) - 2)
889
890struct option_vector1 {
891 u8 byte1;
892 u8 arch_versions;
893 u8 arch_versions3;
894} __packed;
895
896struct option_vector2 {
897 u8 byte1;
898 __be16 reserved;
899 __be32 real_base;
900 __be32 real_size;
901 __be32 virt_base;
902 __be32 virt_size;
903 __be32 load_base;
904 __be32 min_rma;
905 __be32 min_load;
906 u8 min_rma_percent;
907 u8 max_pft_size;
908} __packed;
909
910struct option_vector3 {
911 u8 byte1;
912 u8 byte2;
913} __packed;
914
915struct option_vector4 {
916 u8 byte1;
917 u8 min_vp_cap;
918} __packed;
919
920struct option_vector5 {
921 u8 byte1;
922 u8 byte2;
923 u8 byte3;
924 u8 cmo;
925 u8 associativity;
926 u8 bin_opts;
927 u8 micro_checkpoint;
928 u8 reserved0;
929 __be32 max_cpus;
930 __be16 papr_level;
931 __be16 reserved1;
932 u8 platform_facilities;
933 u8 reserved2;
934 __be16 reserved3;
935 u8 subprocessors;
936 u8 byte22;
937 u8 intarch;
938 u8 mmu;
939 u8 hash_ext;
940 u8 radix_ext;
941} __packed;
942
943struct option_vector6 {
944 u8 reserved;
945 u8 secondary_pteg;
946 u8 os_name;
947} __packed;
948
949struct option_vector7 {
950 u8 os_id[256];
951} __packed;
952
953struct ibm_arch_vec {
954 struct { u32 mask, val; } pvrs[14];
955
956 u8 num_vectors;
957
958 u8 vec1_len;
959 struct option_vector1 vec1;
960
961 u8 vec2_len;
962 struct option_vector2 vec2;
963
964 u8 vec3_len;
965 struct option_vector3 vec3;
966
967 u8 vec4_len;
968 struct option_vector4 vec4;
969
970 u8 vec5_len;
971 struct option_vector5 vec5;
972
973 u8 vec6_len;
974 struct option_vector6 vec6;
975
976 u8 vec7_len;
977 struct option_vector7 vec7;
978} __packed;
979
980static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
981 .pvrs = {
982 {
983 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
984 .val = cpu_to_be32(0x003a0000),
985 },
986 {
987 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
988 .val = cpu_to_be32(0x003e0000),
989 },
990 {
991 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
992 .val = cpu_to_be32(0x003f0000),
993 },
994 {
995 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
996 .val = cpu_to_be32(0x004b0000),
997 },
998 {
999 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
1000 .val = cpu_to_be32(0x004c0000),
1001 },
1002 {
1003 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
1004 .val = cpu_to_be32(0x004d0000),
1005 },
1006 {
1007 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
1008 .val = cpu_to_be32(0x004e0000),
1009 },
1010 {
1011 .mask = cpu_to_be32(0xffff0000), /* POWER10 */
1012 .val = cpu_to_be32(0x00800000),
1013 },
1014 {
1015 .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
1016 .val = cpu_to_be32(0x0f000006),
1017 },
1018 {
1019 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
1020 .val = cpu_to_be32(0x0f000005),
1021 },
1022 {
1023 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
1024 .val = cpu_to_be32(0x0f000004),
1025 },
1026 {
1027 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1028 .val = cpu_to_be32(0x0f000003),
1029 },
1030 {
1031 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1032 .val = cpu_to_be32(0x0f000002),
1033 },
1034 {
1035 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1036 .val = cpu_to_be32(0x0f000001),
1037 },
1038 },
1039
1040 .num_vectors = NUM_VECTORS(6),
1041
1042 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
1043 .vec1 = {
1044 .byte1 = 0,
1045 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
1046 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
1047 .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
1048 },
1049
1050 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
1051 /* option vector 2: Open Firmware options supported */
1052 .vec2 = {
1053 .byte1 = OV2_REAL_MODE,
1054 .reserved = 0,
1055 .real_base = cpu_to_be32(0xffffffff),
1056 .real_size = cpu_to_be32(0xffffffff),
1057 .virt_base = cpu_to_be32(0xffffffff),
1058 .virt_size = cpu_to_be32(0xffffffff),
1059 .load_base = cpu_to_be32(0xffffffff),
1060 .min_rma = cpu_to_be32(512), /* 512MB min RMA */
1061 .min_load = cpu_to_be32(0xffffffff), /* full client load */
1062 .min_rma_percent = 0, /* min RMA percentage of total RAM */
1063 .max_pft_size = 48, /* max log_2(hash table size) */
1064 },
1065
1066 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
1067 /* option vector 3: processor options supported */
1068 .vec3 = {
1069 .byte1 = 0, /* don't ignore, don't halt */
1070 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
1071 },
1072
1073 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
1074 /* option vector 4: IBM PAPR implementation */
1075 .vec4 = {
1076 .byte1 = 0, /* don't halt */
1077 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
1078 },
1079
1080 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
1081 /* option vector 5: PAPR/OF options */
1082 .vec5 = {
1083 .byte1 = 0, /* don't ignore, don't halt */
1084 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
1085 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
1086#ifdef CONFIG_PCI_MSI
1087 /* PCIe/MSI support. Without MSI full PCIe is not supported */
1088 OV5_FEAT(OV5_MSI),
1089#else
1090 0,
1091#endif
1092 .byte3 = 0,
1093 .cmo =
1094#ifdef CONFIG_PPC_SMLPAR
1095 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
1096#else
1097 0,
1098#endif
1099 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
1100 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
1101 .micro_checkpoint = 0,
1102 .reserved0 = 0,
1103 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
1104 .papr_level = 0,
1105 .reserved1 = 0,
1106 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
1107 .reserved2 = 0,
1108 .reserved3 = 0,
1109 .subprocessors = 1,
1110 .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
1111 .intarch = 0,
1112 .mmu = 0,
1113 .hash_ext = 0,
1114 .radix_ext = 0,
1115 },
1116
1117 /* option vector 6: IBM PAPR hints */
1118 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
1119 .vec6 = {
1120 .reserved = 0,
1121 .secondary_pteg = 0,
1122 .os_name = OV6_LINUX,
1123 },
1124
1125 /* option vector 7: OS Identification */
1126 .vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
1127};
1128
1129static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
1130
1131/* Old method - ELF header with PT_NOTE sections only works on BE */
1132#ifdef __BIG_ENDIAN__
1133static const struct fake_elf {
1134 Elf32_Ehdr elfhdr;
1135 Elf32_Phdr phdr[2];
1136 struct chrpnote {
1137 u32 namesz;
1138 u32 descsz;
1139 u32 type;
1140 char name[8]; /* "PowerPC" */
1141 struct chrpdesc {
1142 u32 real_mode;
1143 u32 real_base;
1144 u32 real_size;
1145 u32 virt_base;
1146 u32 virt_size;
1147 u32 load_base;
1148 } chrpdesc;
1149 } chrpnote;
1150 struct rpanote {
1151 u32 namesz;
1152 u32 descsz;
1153 u32 type;
1154 char name[24]; /* "IBM,RPA-Client-Config" */
1155 struct rpadesc {
1156 u32 lpar_affinity;
1157 u32 min_rmo_size;
1158 u32 min_rmo_percent;
1159 u32 max_pft_size;
1160 u32 splpar;
1161 u32 min_load;
1162 u32 new_mem_def;
1163 u32 ignore_me;
1164 } rpadesc;
1165 } rpanote;
1166} fake_elf __initconst = {
1167 .elfhdr = {
1168 .e_ident = { 0x7f, 'E', 'L', 'F',
1169 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
1170 .e_type = ET_EXEC, /* yeah right */
1171 .e_machine = EM_PPC,
1172 .e_version = EV_CURRENT,
1173 .e_phoff = offsetof(struct fake_elf, phdr),
1174 .e_phentsize = sizeof(Elf32_Phdr),
1175 .e_phnum = 2
1176 },
1177 .phdr = {
1178 [0] = {
1179 .p_type = PT_NOTE,
1180 .p_offset = offsetof(struct fake_elf, chrpnote),
1181 .p_filesz = sizeof(struct chrpnote)
1182 }, [1] = {
1183 .p_type = PT_NOTE,
1184 .p_offset = offsetof(struct fake_elf, rpanote),
1185 .p_filesz = sizeof(struct rpanote)
1186 }
1187 },
1188 .chrpnote = {
1189 .namesz = sizeof("PowerPC"),
1190 .descsz = sizeof(struct chrpdesc),
1191 .type = 0x1275,
1192 .name = "PowerPC",
1193 .chrpdesc = {
1194 .real_mode = ~0U, /* ~0 means "don't care" */
1195 .real_base = ~0U,
1196 .real_size = ~0U,
1197 .virt_base = ~0U,
1198 .virt_size = ~0U,
1199 .load_base = ~0U
1200 },
1201 },
1202 .rpanote = {
1203 .namesz = sizeof("IBM,RPA-Client-Config"),
1204 .descsz = sizeof(struct rpadesc),
1205 .type = 0x12759999,
1206 .name = "IBM,RPA-Client-Config",
1207 .rpadesc = {
1208 .lpar_affinity = 0,
1209 .min_rmo_size = 64, /* in megabytes */
1210 .min_rmo_percent = 0,
1211 .max_pft_size = 48, /* 2^48 bytes max PFT size */
1212 .splpar = 1,
1213 .min_load = ~0U,
1214 .new_mem_def = 0
1215 }
1216 }
1217};
1218#endif /* __BIG_ENDIAN__ */
1219
1220static int __init prom_count_smt_threads(void)
1221{
1222 phandle node;
1223 char type[64];
1224 unsigned int plen;
1225
1226 /* Pick up th first CPU node we can find */
1227 for (node = 0; prom_next_node(&node); ) {
1228 type[0] = 0;
1229 prom_getprop(node, "device_type", type, sizeof(type));
1230
1231 if (prom_strcmp(type, "cpu"))
1232 continue;
1233 /*
1234 * There is an entry for each smt thread, each entry being
1235 * 4 bytes long. All cpus should have the same number of
1236 * smt threads, so return after finding the first.
1237 */
1238 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1239 if (plen == PROM_ERROR)
1240 break;
1241 plen >>= 2;
1242 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1243
1244 /* Sanity check */
1245 if (plen < 1 || plen > 64) {
1246 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1247 (unsigned long)plen);
1248 return 1;
1249 }
1250 return plen;
1251 }
1252 prom_debug("No threads found, assuming 1 per core\n");
1253
1254 return 1;
1255
1256}
1257
1258static void __init prom_parse_mmu_model(u8 val,
1259 struct platform_support *support)
1260{
1261 switch (val) {
1262 case OV5_FEAT(OV5_MMU_DYNAMIC):
1263 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1264 prom_debug("MMU - either supported\n");
1265 support->radix_mmu = !prom_radix_disable;
1266 support->hash_mmu = true;
1267 break;
1268 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1269 prom_debug("MMU - radix only\n");
1270 if (prom_radix_disable) {
1271 /*
1272 * If we __have__ to do radix, we're better off ignoring
1273 * the command line rather than not booting.
1274 */
1275 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1276 }
1277 support->radix_mmu = true;
1278 break;
1279 case OV5_FEAT(OV5_MMU_HASH):
1280 prom_debug("MMU - hash only\n");
1281 support->hash_mmu = true;
1282 break;
1283 default:
1284 prom_debug("Unknown mmu support option: 0x%x\n", val);
1285 break;
1286 }
1287}
1288
1289static void __init prom_parse_xive_model(u8 val,
1290 struct platform_support *support)
1291{
1292 switch (val) {
1293 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1294 prom_debug("XIVE - either mode supported\n");
1295 support->xive = !prom_xive_disable;
1296 break;
1297 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1298 prom_debug("XIVE - exploitation mode supported\n");
1299 if (prom_xive_disable) {
1300 /*
1301 * If we __have__ to do XIVE, we're better off ignoring
1302 * the command line rather than not booting.
1303 */
1304 prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1305 }
1306 support->xive = true;
1307 break;
1308 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1309 prom_debug("XIVE - legacy mode supported\n");
1310 break;
1311 default:
1312 prom_debug("Unknown xive support option: 0x%x\n", val);
1313 break;
1314 }
1315}
1316
1317static void __init prom_parse_platform_support(u8 index, u8 val,
1318 struct platform_support *support)
1319{
1320 switch (index) {
1321 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1322 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1323 break;
1324 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1325 if (val & OV5_FEAT(OV5_RADIX_GTSE))
1326 support->radix_gtse = !prom_radix_gtse_disable;
1327 break;
1328 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1329 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1330 support);
1331 break;
1332 }
1333}
1334
1335static void __init prom_check_platform_support(void)
1336{
1337 struct platform_support supported = {
1338 .hash_mmu = false,
1339 .radix_mmu = false,
1340 .radix_gtse = false,
1341 .xive = false
1342 };
1343 int prop_len = prom_getproplen(prom.chosen,
1344 "ibm,arch-vec-5-platform-support");
1345
1346 /*
1347 * First copy the architecture vec template
1348 *
1349 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1350 * by __memcpy() when KASAN is active
1351 */
1352 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
1353 sizeof(ibm_architecture_vec));
1354
1355 prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
1356
1357 if (prop_len > 1) {
1358 int i;
1359 u8 vec[8];
1360 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1361 prop_len);
1362 if (prop_len > sizeof(vec))
1363 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1364 prop_len);
1365 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
1366 for (i = 0; i < prop_len; i += 2) {
1367 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
1368 prom_parse_platform_support(vec[i], vec[i + 1], &supported);
1369 }
1370 }
1371
1372 if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1373 /* Radix preferred - Check if GTSE is also supported */
1374 prom_debug("Asking for radix\n");
1375 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1376 if (supported.radix_gtse)
1377 ibm_architecture_vec.vec5.radix_ext =
1378 OV5_FEAT(OV5_RADIX_GTSE);
1379 else
1380 prom_debug("Radix GTSE isn't supported\n");
1381 } else if (supported.hash_mmu) {
1382 /* Default to hash mmu (if we can) */
1383 prom_debug("Asking for hash\n");
1384 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1385 } else {
1386 /* We're probably on a legacy hypervisor */
1387 prom_debug("Assuming legacy hash support\n");
1388 }
1389
1390 if (supported.xive) {
1391 prom_debug("Asking for XIVE\n");
1392 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1393 }
1394}
1395
1396static void __init prom_send_capabilities(void)
1397{
1398 ihandle root;
1399 prom_arg_t ret;
1400 u32 cores;
1401
1402 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1403 prom_check_platform_support();
1404
1405 root = call_prom("open", 1, 1, ADDR("/"));
1406 if (root != 0) {
1407 /* We need to tell the FW about the number of cores we support.
1408 *
1409 * To do that, we count the number of threads on the first core
1410 * (we assume this is the same for all cores) and use it to
1411 * divide NR_CPUS.
1412 */
1413
1414 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1415 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1416 cores, NR_CPUS);
1417
1418 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1419
1420 /* try calling the ibm,client-architecture-support method */
1421 prom_printf("Calling ibm,client-architecture-support...");
1422 if (call_prom_ret("call-method", 3, 2, &ret,
1423 ADDR("ibm,client-architecture-support"),
1424 root,
1425 ADDR(&ibm_architecture_vec)) == 0) {
1426 /* the call exists... */
1427 if (ret)
1428 prom_printf("\nWARNING: ibm,client-architecture"
1429 "-support call FAILED!\n");
1430 call_prom("close", 1, 0, root);
1431 prom_printf(" done\n");
1432 return;
1433 }
1434 call_prom("close", 1, 0, root);
1435 prom_printf(" not implemented\n");
1436 }
1437
1438#ifdef __BIG_ENDIAN__
1439 {
1440 ihandle elfloader;
1441
1442 /* no ibm,client-architecture-support call, try the old way */
1443 elfloader = call_prom("open", 1, 1,
1444 ADDR("/packages/elf-loader"));
1445 if (elfloader == 0) {
1446 prom_printf("couldn't open /packages/elf-loader\n");
1447 return;
1448 }
1449 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1450 elfloader, ADDR(&fake_elf));
1451 call_prom("close", 1, 0, elfloader);
1452 }
1453#endif /* __BIG_ENDIAN__ */
1454}
1455#endif /* CONFIG_PPC_PSERIES */
1456
1457/*
1458 * Memory allocation strategy... our layout is normally:
1459 *
1460 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1461 * rare cases, initrd might end up being before the kernel though.
1462 * We assume this won't override the final kernel at 0, we have no
1463 * provision to handle that in this version, but it should hopefully
1464 * never happen.
1465 *
1466 * alloc_top is set to the top of RMO, eventually shrink down if the
1467 * TCEs overlap
1468 *
1469 * alloc_bottom is set to the top of kernel/initrd
1470 *
1471 * from there, allocations are done this way : rtas is allocated
1472 * topmost, and the device-tree is allocated from the bottom. We try
1473 * to grow the device-tree allocation as we progress. If we can't,
1474 * then we fail, we don't currently have a facility to restart
1475 * elsewhere, but that shouldn't be necessary.
1476 *
1477 * Note that calls to reserve_mem have to be done explicitly, memory
1478 * allocated with either alloc_up or alloc_down isn't automatically
1479 * reserved.
1480 */
1481
1482
1483/*
1484 * Allocates memory in the RMO upward from the kernel/initrd
1485 *
1486 * When align is 0, this is a special case, it means to allocate in place
1487 * at the current location of alloc_bottom or fail (that is basically
1488 * extending the previous allocation). Used for the device-tree flattening
1489 */
1490static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1491{
1492 unsigned long base = alloc_bottom;
1493 unsigned long addr = 0;
1494
1495 if (align)
1496 base = ALIGN(base, align);
1497 prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1498 if (ram_top == 0)
1499 prom_panic("alloc_up() called with mem not initialized\n");
1500
1501 if (align)
1502 base = ALIGN(alloc_bottom, align);
1503 else
1504 base = alloc_bottom;
1505
1506 for(; (base + size) <= alloc_top;
1507 base = ALIGN(base + 0x100000, align)) {
1508 prom_debug(" trying: 0x%lx\n\r", base);
1509 addr = (unsigned long)prom_claim(base, size, 0);
1510 if (addr != PROM_ERROR && addr != 0)
1511 break;
1512 addr = 0;
1513 if (align == 0)
1514 break;
1515 }
1516 if (addr == 0)
1517 return 0;
1518 alloc_bottom = addr + size;
1519
1520 prom_debug(" -> %lx\n", addr);
1521 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1522 prom_debug(" alloc_top : %lx\n", alloc_top);
1523 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1524 prom_debug(" rmo_top : %lx\n", rmo_top);
1525 prom_debug(" ram_top : %lx\n", ram_top);
1526
1527 return addr;
1528}
1529
1530/*
1531 * Allocates memory downward, either from top of RMO, or if highmem
1532 * is set, from the top of RAM. Note that this one doesn't handle
1533 * failures. It does claim memory if highmem is not set.
1534 */
1535static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1536 int highmem)
1537{
1538 unsigned long base, addr = 0;
1539
1540 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1541 highmem ? "(high)" : "(low)");
1542 if (ram_top == 0)
1543 prom_panic("alloc_down() called with mem not initialized\n");
1544
1545 if (highmem) {
1546 /* Carve out storage for the TCE table. */
1547 addr = ALIGN_DOWN(alloc_top_high - size, align);
1548 if (addr <= alloc_bottom)
1549 return 0;
1550 /* Will we bump into the RMO ? If yes, check out that we
1551 * didn't overlap existing allocations there, if we did,
1552 * we are dead, we must be the first in town !
1553 */
1554 if (addr < rmo_top) {
1555 /* Good, we are first */
1556 if (alloc_top == rmo_top)
1557 alloc_top = rmo_top = addr;
1558 else
1559 return 0;
1560 }
1561 alloc_top_high = addr;
1562 goto bail;
1563 }
1564
1565 base = ALIGN_DOWN(alloc_top - size, align);
1566 for (; base > alloc_bottom;
1567 base = ALIGN_DOWN(base - 0x100000, align)) {
1568 prom_debug(" trying: 0x%lx\n\r", base);
1569 addr = (unsigned long)prom_claim(base, size, 0);
1570 if (addr != PROM_ERROR && addr != 0)
1571 break;
1572 addr = 0;
1573 }
1574 if (addr == 0)
1575 return 0;
1576 alloc_top = addr;
1577
1578 bail:
1579 prom_debug(" -> %lx\n", addr);
1580 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1581 prom_debug(" alloc_top : %lx\n", alloc_top);
1582 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1583 prom_debug(" rmo_top : %lx\n", rmo_top);
1584 prom_debug(" ram_top : %lx\n", ram_top);
1585
1586 return addr;
1587}
1588
1589/*
1590 * Parse a "reg" cell
1591 */
1592static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1593{
1594 cell_t *p = *cellp;
1595 unsigned long r = 0;
1596
1597 /* Ignore more than 2 cells */
1598 while (s > sizeof(unsigned long) / 4) {
1599 p++;
1600 s--;
1601 }
1602 r = be32_to_cpu(*p++);
1603#ifdef CONFIG_PPC64
1604 if (s > 1) {
1605 r <<= 32;
1606 r |= be32_to_cpu(*(p++));
1607 }
1608#endif
1609 *cellp = p;
1610 return r;
1611}
1612
1613/*
1614 * Very dumb function for adding to the memory reserve list, but
1615 * we don't need anything smarter at this point
1616 *
1617 * XXX Eventually check for collisions. They should NEVER happen.
1618 * If problems seem to show up, it would be a good start to track
1619 * them down.
1620 */
1621static void __init reserve_mem(u64 base, u64 size)
1622{
1623 u64 top = base + size;
1624 unsigned long cnt = mem_reserve_cnt;
1625
1626 if (size == 0)
1627 return;
1628
1629 /* We need to always keep one empty entry so that we
1630 * have our terminator with "size" set to 0 since we are
1631 * dumb and just copy this entire array to the boot params
1632 */
1633 base = ALIGN_DOWN(base, PAGE_SIZE);
1634 top = ALIGN(top, PAGE_SIZE);
1635 size = top - base;
1636
1637 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1638 prom_panic("Memory reserve map exhausted !\n");
1639 mem_reserve_map[cnt].base = cpu_to_be64(base);
1640 mem_reserve_map[cnt].size = cpu_to_be64(size);
1641 mem_reserve_cnt = cnt + 1;
1642}
1643
1644/*
1645 * Initialize memory allocation mechanism, parse "memory" nodes and
1646 * obtain that way the top of memory and RMO to setup out local allocator
1647 */
1648static void __init prom_init_mem(void)
1649{
1650 phandle node;
1651 char type[64];
1652 unsigned int plen;
1653 cell_t *p, *endp;
1654 __be32 val;
1655 u32 rac, rsc;
1656
1657 /*
1658 * We iterate the memory nodes to find
1659 * 1) top of RMO (first node)
1660 * 2) top of memory
1661 */
1662 val = cpu_to_be32(2);
1663 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1664 rac = be32_to_cpu(val);
1665 val = cpu_to_be32(1);
1666 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1667 rsc = be32_to_cpu(val);
1668 prom_debug("root_addr_cells: %x\n", rac);
1669 prom_debug("root_size_cells: %x\n", rsc);
1670
1671 prom_debug("scanning memory:\n");
1672
1673 for (node = 0; prom_next_node(&node); ) {
1674 type[0] = 0;
1675 prom_getprop(node, "device_type", type, sizeof(type));
1676
1677 if (type[0] == 0) {
1678 /*
1679 * CHRP Longtrail machines have no device_type
1680 * on the memory node, so check the name instead...
1681 */
1682 prom_getprop(node, "name", type, sizeof(type));
1683 }
1684 if (prom_strcmp(type, "memory"))
1685 continue;
1686
1687 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1688 if (plen > sizeof(regbuf)) {
1689 prom_printf("memory node too large for buffer !\n");
1690 plen = sizeof(regbuf);
1691 }
1692 p = regbuf;
1693 endp = p + (plen / sizeof(cell_t));
1694
1695#ifdef DEBUG_PROM
1696 memset(prom_scratch, 0, sizeof(prom_scratch));
1697 call_prom("package-to-path", 3, 1, node, prom_scratch,
1698 sizeof(prom_scratch) - 1);
1699 prom_debug(" node %s :\n", prom_scratch);
1700#endif /* DEBUG_PROM */
1701
1702 while ((endp - p) >= (rac + rsc)) {
1703 unsigned long base, size;
1704
1705 base = prom_next_cell(rac, &p);
1706 size = prom_next_cell(rsc, &p);
1707
1708 if (size == 0)
1709 continue;
1710 prom_debug(" %lx %lx\n", base, size);
1711 if (base == 0 && (of_platform & PLATFORM_LPAR))
1712 rmo_top = size;
1713 if ((base + size) > ram_top)
1714 ram_top = base + size;
1715 }
1716 }
1717
1718 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1719
1720 /*
1721 * If prom_memory_limit is set we reduce the upper limits *except* for
1722 * alloc_top_high. This must be the real top of RAM so we can put
1723 * TCE's up there.
1724 */
1725
1726 alloc_top_high = ram_top;
1727
1728 if (prom_memory_limit) {
1729 if (prom_memory_limit <= alloc_bottom) {
1730 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1731 prom_memory_limit);
1732 prom_memory_limit = 0;
1733 } else if (prom_memory_limit >= ram_top) {
1734 prom_printf("Ignoring mem=%lx >= ram_top.\n",
1735 prom_memory_limit);
1736 prom_memory_limit = 0;
1737 } else {
1738 ram_top = prom_memory_limit;
1739 rmo_top = min(rmo_top, prom_memory_limit);
1740 }
1741 }
1742
1743 /*
1744 * Setup our top alloc point, that is top of RMO or top of
1745 * segment 0 when running non-LPAR.
1746 * Some RS64 machines have buggy firmware where claims up at
1747 * 1GB fail. Cap at 768MB as a workaround.
1748 * Since 768MB is plenty of room, and we need to cap to something
1749 * reasonable on 32-bit, cap at 768MB on all machines.
1750 */
1751 if (!rmo_top)
1752 rmo_top = ram_top;
1753 rmo_top = min(0x30000000ul, rmo_top);
1754 alloc_top = rmo_top;
1755 alloc_top_high = ram_top;
1756
1757 /*
1758 * Check if we have an initrd after the kernel but still inside
1759 * the RMO. If we do move our bottom point to after it.
1760 */
1761 if (prom_initrd_start &&
1762 prom_initrd_start < rmo_top &&
1763 prom_initrd_end > alloc_bottom)
1764 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1765
1766 prom_printf("memory layout at init:\n");
1767 prom_printf(" memory_limit : %lx (16 MB aligned)\n",
1768 prom_memory_limit);
1769 prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
1770 prom_printf(" alloc_top : %lx\n", alloc_top);
1771 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
1772 prom_printf(" rmo_top : %lx\n", rmo_top);
1773 prom_printf(" ram_top : %lx\n", ram_top);
1774}
1775
1776static void __init prom_close_stdin(void)
1777{
1778 __be32 val;
1779 ihandle stdin;
1780
1781 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1782 stdin = be32_to_cpu(val);
1783 call_prom("close", 1, 0, stdin);
1784 }
1785}
1786
1787#ifdef CONFIG_PPC_SVM
1788static int prom_rtas_hcall(uint64_t args)
1789{
1790 register uint64_t arg1 asm("r3") = H_RTAS;
1791 register uint64_t arg2 asm("r4") = args;
1792
1793 asm volatile("sc 1\n" : "=r" (arg1) :
1794 "r" (arg1),
1795 "r" (arg2) :);
1796 srr_regs_clobbered();
1797
1798 return arg1;
1799}
1800
1801static struct rtas_args __prombss os_term_args;
1802
1803static void __init prom_rtas_os_term(char *str)
1804{
1805 phandle rtas_node;
1806 __be32 val;
1807 u32 token;
1808
1809 prom_debug("%s: start...\n", __func__);
1810 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1811 prom_debug("rtas_node: %x\n", rtas_node);
1812 if (!PHANDLE_VALID(rtas_node))
1813 return;
1814
1815 val = 0;
1816 prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
1817 token = be32_to_cpu(val);
1818 prom_debug("ibm,os-term: %x\n", token);
1819 if (token == 0)
1820 prom_panic("Could not get token for ibm,os-term\n");
1821 os_term_args.token = cpu_to_be32(token);
1822 os_term_args.nargs = cpu_to_be32(1);
1823 os_term_args.nret = cpu_to_be32(1);
1824 os_term_args.args[0] = cpu_to_be32(__pa(str));
1825 prom_rtas_hcall((uint64_t)&os_term_args);
1826}
1827#endif /* CONFIG_PPC_SVM */
1828
1829/*
1830 * Allocate room for and instantiate RTAS
1831 */
1832static void __init prom_instantiate_rtas(void)
1833{
1834 phandle rtas_node;
1835 ihandle rtas_inst;
1836 u32 base, entry = 0;
1837 __be32 val;
1838 u32 size = 0;
1839
1840 prom_debug("prom_instantiate_rtas: start...\n");
1841
1842 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1843 prom_debug("rtas_node: %x\n", rtas_node);
1844 if (!PHANDLE_VALID(rtas_node))
1845 return;
1846
1847 val = 0;
1848 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1849 size = be32_to_cpu(val);
1850 if (size == 0)
1851 return;
1852
1853 base = alloc_down(size, PAGE_SIZE, 0);
1854 if (base == 0)
1855 prom_panic("Could not allocate memory for RTAS\n");
1856
1857 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1858 if (!IHANDLE_VALID(rtas_inst)) {
1859 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1860 return;
1861 }
1862
1863 prom_printf("instantiating rtas at 0x%x...", base);
1864
1865 if (call_prom_ret("call-method", 3, 2, &entry,
1866 ADDR("instantiate-rtas"),
1867 rtas_inst, base) != 0
1868 || entry == 0) {
1869 prom_printf(" failed\n");
1870 return;
1871 }
1872 prom_printf(" done\n");
1873
1874 reserve_mem(base, size);
1875
1876 val = cpu_to_be32(base);
1877 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1878 &val, sizeof(val));
1879 val = cpu_to_be32(entry);
1880 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1881 &val, sizeof(val));
1882
1883 /* Check if it supports "query-cpu-stopped-state" */
1884 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1885 &val, sizeof(val)) != PROM_ERROR)
1886 rtas_has_query_cpu_stopped = true;
1887
1888 prom_debug("rtas base = 0x%x\n", base);
1889 prom_debug("rtas entry = 0x%x\n", entry);
1890 prom_debug("rtas size = 0x%x\n", size);
1891
1892 prom_debug("prom_instantiate_rtas: end...\n");
1893}
1894
1895#ifdef CONFIG_PPC64
1896/*
1897 * Allocate room for and instantiate Stored Measurement Log (SML)
1898 */
1899static void __init prom_instantiate_sml(void)
1900{
1901 phandle ibmvtpm_node;
1902 ihandle ibmvtpm_inst;
1903 u32 entry = 0, size = 0, succ = 0;
1904 u64 base;
1905 __be32 val;
1906
1907 prom_debug("prom_instantiate_sml: start...\n");
1908
1909 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1910 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1911 if (!PHANDLE_VALID(ibmvtpm_node))
1912 return;
1913
1914 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1915 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1916 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1917 return;
1918 }
1919
1920 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1921 &val, sizeof(val)) != PROM_ERROR) {
1922 if (call_prom_ret("call-method", 2, 2, &succ,
1923 ADDR("reformat-sml-to-efi-alignment"),
1924 ibmvtpm_inst) != 0 || succ == 0) {
1925 prom_printf("Reformat SML to EFI alignment failed\n");
1926 return;
1927 }
1928
1929 if (call_prom_ret("call-method", 2, 2, &size,
1930 ADDR("sml-get-allocated-size"),
1931 ibmvtpm_inst) != 0 || size == 0) {
1932 prom_printf("SML get allocated size failed\n");
1933 return;
1934 }
1935 } else {
1936 if (call_prom_ret("call-method", 2, 2, &size,
1937 ADDR("sml-get-handover-size"),
1938 ibmvtpm_inst) != 0 || size == 0) {
1939 prom_printf("SML get handover size failed\n");
1940 return;
1941 }
1942 }
1943
1944 base = alloc_down(size, PAGE_SIZE, 0);
1945 if (base == 0)
1946 prom_panic("Could not allocate memory for sml\n");
1947
1948 prom_printf("instantiating sml at 0x%llx...", base);
1949
1950 memset((void *)base, 0, size);
1951
1952 if (call_prom_ret("call-method", 4, 2, &entry,
1953 ADDR("sml-handover"),
1954 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1955 prom_printf("SML handover failed\n");
1956 return;
1957 }
1958 prom_printf(" done\n");
1959
1960 reserve_mem(base, size);
1961
1962 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1963 &base, sizeof(base));
1964 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1965 &size, sizeof(size));
1966
1967 prom_debug("sml base = 0x%llx\n", base);
1968 prom_debug("sml size = 0x%x\n", size);
1969
1970 prom_debug("prom_instantiate_sml: end...\n");
1971}
1972
1973/*
1974 * Allocate room for and initialize TCE tables
1975 */
1976#ifdef __BIG_ENDIAN__
1977static void __init prom_initialize_tce_table(void)
1978{
1979 phandle node;
1980 ihandle phb_node;
1981 char compatible[64], type[64], model[64];
1982 char *path = prom_scratch;
1983 u64 base, align;
1984 u32 minalign, minsize;
1985 u64 tce_entry, *tce_entryp;
1986 u64 local_alloc_top, local_alloc_bottom;
1987 u64 i;
1988
1989 if (prom_iommu_off)
1990 return;
1991
1992 prom_debug("starting prom_initialize_tce_table\n");
1993
1994 /* Cache current top of allocs so we reserve a single block */
1995 local_alloc_top = alloc_top_high;
1996 local_alloc_bottom = local_alloc_top;
1997
1998 /* Search all nodes looking for PHBs. */
1999 for (node = 0; prom_next_node(&node); ) {
2000 compatible[0] = 0;
2001 type[0] = 0;
2002 model[0] = 0;
2003 prom_getprop(node, "compatible",
2004 compatible, sizeof(compatible));
2005 prom_getprop(node, "device_type", type, sizeof(type));
2006 prom_getprop(node, "model", model, sizeof(model));
2007
2008 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
2009 continue;
2010
2011 /* Keep the old logic intact to avoid regression. */
2012 if (compatible[0] != 0) {
2013 if ((prom_strstr(compatible, "python") == NULL) &&
2014 (prom_strstr(compatible, "Speedwagon") == NULL) &&
2015 (prom_strstr(compatible, "Winnipeg") == NULL))
2016 continue;
2017 } else if (model[0] != 0) {
2018 if ((prom_strstr(model, "ython") == NULL) &&
2019 (prom_strstr(model, "peedwagon") == NULL) &&
2020 (prom_strstr(model, "innipeg") == NULL))
2021 continue;
2022 }
2023
2024 if (prom_getprop(node, "tce-table-minalign", &minalign,
2025 sizeof(minalign)) == PROM_ERROR)
2026 minalign = 0;
2027 if (prom_getprop(node, "tce-table-minsize", &minsize,
2028 sizeof(minsize)) == PROM_ERROR)
2029 minsize = 4UL << 20;
2030
2031 /*
2032 * Even though we read what OF wants, we just set the table
2033 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
2034 * By doing this, we avoid the pitfalls of trying to DMA to
2035 * MMIO space and the DMA alias hole.
2036 */
2037 minsize = 4UL << 20;
2038
2039 /* Align to the greater of the align or size */
2040 align = max(minalign, minsize);
2041 base = alloc_down(minsize, align, 1);
2042 if (base == 0)
2043 prom_panic("ERROR, cannot find space for TCE table.\n");
2044 if (base < local_alloc_bottom)
2045 local_alloc_bottom = base;
2046
2047 /* It seems OF doesn't null-terminate the path :-( */
2048 memset(path, 0, sizeof(prom_scratch));
2049 /* Call OF to setup the TCE hardware */
2050 if (call_prom("package-to-path", 3, 1, node,
2051 path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
2052 prom_printf("package-to-path failed\n");
2053 }
2054
2055 /* Save away the TCE table attributes for later use. */
2056 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
2057 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
2058
2059 prom_debug("TCE table: %s\n", path);
2060 prom_debug("\tnode = 0x%x\n", node);
2061 prom_debug("\tbase = 0x%llx\n", base);
2062 prom_debug("\tsize = 0x%x\n", minsize);
2063
2064 /* Initialize the table to have a one-to-one mapping
2065 * over the allocated size.
2066 */
2067 tce_entryp = (u64 *)base;
2068 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
2069 tce_entry = (i << PAGE_SHIFT);
2070 tce_entry |= 0x3;
2071 *tce_entryp = tce_entry;
2072 }
2073
2074 prom_printf("opening PHB %s", path);
2075 phb_node = call_prom("open", 1, 1, path);
2076 if (phb_node == 0)
2077 prom_printf("... failed\n");
2078 else
2079 prom_printf("... done\n");
2080
2081 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2082 phb_node, -1, minsize,
2083 (u32) base, (u32) (base >> 32));
2084 call_prom("close", 1, 0, phb_node);
2085 }
2086
2087 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
2088
2089 /* These are only really needed if there is a memory limit in
2090 * effect, but we don't know so export them always. */
2091 prom_tce_alloc_start = local_alloc_bottom;
2092 prom_tce_alloc_end = local_alloc_top;
2093
2094 /* Flag the first invalid entry */
2095 prom_debug("ending prom_initialize_tce_table\n");
2096}
2097#endif /* __BIG_ENDIAN__ */
2098#endif /* CONFIG_PPC64 */
2099
2100/*
2101 * With CHRP SMP we need to use the OF to start the other processors.
2102 * We can't wait until smp_boot_cpus (the OF is trashed by then)
2103 * so we have to put the processors into a holding pattern controlled
2104 * by the kernel (not OF) before we destroy the OF.
2105 *
2106 * This uses a chunk of low memory, puts some holding pattern
2107 * code there and sends the other processors off to there until
2108 * smp_boot_cpus tells them to do something. The holding pattern
2109 * checks that address until its cpu # is there, when it is that
2110 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
2111 * of setting those values.
2112 *
2113 * We also use physical address 0x4 here to tell when a cpu
2114 * is in its holding pattern code.
2115 *
2116 * -- Cort
2117 */
2118/*
2119 * We want to reference the copy of __secondary_hold_* in the
2120 * 0 - 0x100 address range
2121 */
2122#define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
2123
2124static void __init prom_hold_cpus(void)
2125{
2126 unsigned long i;
2127 phandle node;
2128 char type[64];
2129 unsigned long *spinloop
2130 = (void *) LOW_ADDR(__secondary_hold_spinloop);
2131 unsigned long *acknowledge
2132 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
2133 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
2134
2135 /*
2136 * On pseries, if RTAS supports "query-cpu-stopped-state",
2137 * we skip this stage, the CPUs will be started by the
2138 * kernel using RTAS.
2139 */
2140 if ((of_platform == PLATFORM_PSERIES ||
2141 of_platform == PLATFORM_PSERIES_LPAR) &&
2142 rtas_has_query_cpu_stopped) {
2143 prom_printf("prom_hold_cpus: skipped\n");
2144 return;
2145 }
2146
2147 prom_debug("prom_hold_cpus: start...\n");
2148 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
2149 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
2150 prom_debug(" 1) acknowledge = 0x%lx\n",
2151 (unsigned long)acknowledge);
2152 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
2153 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
2154
2155 /* Set the common spinloop variable, so all of the secondary cpus
2156 * will block when they are awakened from their OF spinloop.
2157 * This must occur for both SMP and non SMP kernels, since OF will
2158 * be trashed when we move the kernel.
2159 */
2160 *spinloop = 0;
2161
2162 /* look for cpus */
2163 for (node = 0; prom_next_node(&node); ) {
2164 unsigned int cpu_no;
2165 __be32 reg;
2166
2167 type[0] = 0;
2168 prom_getprop(node, "device_type", type, sizeof(type));
2169 if (prom_strcmp(type, "cpu") != 0)
2170 continue;
2171
2172 /* Skip non-configured cpus. */
2173 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
2174 if (prom_strcmp(type, "okay") != 0)
2175 continue;
2176
2177 reg = cpu_to_be32(-1); /* make sparse happy */
2178 prom_getprop(node, "reg", ®, sizeof(reg));
2179 cpu_no = be32_to_cpu(reg);
2180
2181 prom_debug("cpu hw idx = %u\n", cpu_no);
2182
2183 /* Init the acknowledge var which will be reset by
2184 * the secondary cpu when it awakens from its OF
2185 * spinloop.
2186 */
2187 *acknowledge = (unsigned long)-1;
2188
2189 if (cpu_no != prom.cpu) {
2190 /* Primary Thread of non-boot cpu or any thread */
2191 prom_printf("starting cpu hw idx %u... ", cpu_no);
2192 call_prom("start-cpu", 3, 0, node,
2193 secondary_hold, cpu_no);
2194
2195 for (i = 0; (i < 100000000) &&
2196 (*acknowledge == ((unsigned long)-1)); i++ )
2197 mb();
2198
2199 if (*acknowledge == cpu_no)
2200 prom_printf("done\n");
2201 else
2202 prom_printf("failed: %lx\n", *acknowledge);
2203 }
2204#ifdef CONFIG_SMP
2205 else
2206 prom_printf("boot cpu hw idx %u\n", cpu_no);
2207#endif /* CONFIG_SMP */
2208 }
2209
2210 prom_debug("prom_hold_cpus: end...\n");
2211}
2212
2213
2214static void __init prom_init_client_services(unsigned long pp)
2215{
2216 /* Get a handle to the prom entry point before anything else */
2217 prom_entry = pp;
2218
2219 /* get a handle for the stdout device */
2220 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2221 if (!PHANDLE_VALID(prom.chosen))
2222 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2223
2224 /* get device tree root */
2225 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2226 if (!PHANDLE_VALID(prom.root))
2227 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2228
2229 prom.mmumap = 0;
2230}
2231
2232#ifdef CONFIG_PPC32
2233/*
2234 * For really old powermacs, we need to map things we claim.
2235 * For that, we need the ihandle of the mmu.
2236 * Also, on the longtrail, we need to work around other bugs.
2237 */
2238static void __init prom_find_mmu(void)
2239{
2240 phandle oprom;
2241 char version[64];
2242
2243 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2244 if (!PHANDLE_VALID(oprom))
2245 return;
2246 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2247 return;
2248 version[sizeof(version) - 1] = 0;
2249 /* XXX might need to add other versions here */
2250 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
2251 of_workarounds = OF_WA_CLAIM;
2252 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
2253 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2254 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2255 } else
2256 return;
2257 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2258 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2259 sizeof(prom.mmumap));
2260 prom.mmumap = be32_to_cpu(prom.mmumap);
2261 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2262 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2263}
2264#else
2265#define prom_find_mmu()
2266#endif
2267
2268static void __init prom_init_stdout(void)
2269{
2270 char *path = of_stdout_device;
2271 char type[16];
2272 phandle stdout_node;
2273 __be32 val;
2274
2275 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2276 prom_panic("cannot find stdout");
2277
2278 prom.stdout = be32_to_cpu(val);
2279
2280 /* Get the full OF pathname of the stdout device */
2281 memset(path, 0, 256);
2282 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2283 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2284 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2285 path, prom_strlen(path) + 1);
2286
2287 /* instance-to-package fails on PA-Semi */
2288 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2289 if (stdout_node != PROM_ERROR) {
2290 val = cpu_to_be32(stdout_node);
2291
2292 /* If it's a display, note it */
2293 memset(type, 0, sizeof(type));
2294 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2295 if (prom_strcmp(type, "display") == 0)
2296 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2297 }
2298}
2299
2300static int __init prom_find_machine_type(void)
2301{
2302 char compat[256];
2303 int len, i = 0;
2304#ifdef CONFIG_PPC64
2305 phandle rtas;
2306 int x;
2307#endif
2308
2309 /* Look for a PowerMac or a Cell */
2310 len = prom_getprop(prom.root, "compatible",
2311 compat, sizeof(compat)-1);
2312 if (len > 0) {
2313 compat[len] = 0;
2314 while (i < len) {
2315 char *p = &compat[i];
2316 int sl = prom_strlen(p);
2317 if (sl == 0)
2318 break;
2319 if (prom_strstr(p, "Power Macintosh") ||
2320 prom_strstr(p, "MacRISC"))
2321 return PLATFORM_POWERMAC;
2322#ifdef CONFIG_PPC64
2323 /* We must make sure we don't detect the IBM Cell
2324 * blades as pSeries due to some firmware issues,
2325 * so we do it here.
2326 */
2327 if (prom_strstr(p, "IBM,CBEA") ||
2328 prom_strstr(p, "IBM,CPBW-1.0"))
2329 return PLATFORM_GENERIC;
2330#endif /* CONFIG_PPC64 */
2331 i += sl + 1;
2332 }
2333 }
2334#ifdef CONFIG_PPC64
2335 /* Try to figure out if it's an IBM pSeries or any other
2336 * PAPR compliant platform. We assume it is if :
2337 * - /device_type is "chrp" (please, do NOT use that for future
2338 * non-IBM designs !
2339 * - it has /rtas
2340 */
2341 len = prom_getprop(prom.root, "device_type",
2342 compat, sizeof(compat)-1);
2343 if (len <= 0)
2344 return PLATFORM_GENERIC;
2345 if (prom_strcmp(compat, "chrp"))
2346 return PLATFORM_GENERIC;
2347
2348 /* Default to pSeries. We need to know if we are running LPAR */
2349 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2350 if (!PHANDLE_VALID(rtas))
2351 return PLATFORM_GENERIC;
2352 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2353 if (x != PROM_ERROR) {
2354 prom_debug("Hypertas detected, assuming LPAR !\n");
2355 return PLATFORM_PSERIES_LPAR;
2356 }
2357 return PLATFORM_PSERIES;
2358#else
2359 return PLATFORM_GENERIC;
2360#endif
2361}
2362
2363static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2364{
2365 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2366}
2367
2368/*
2369 * If we have a display that we don't know how to drive,
2370 * we will want to try to execute OF's open method for it
2371 * later. However, OF will probably fall over if we do that
2372 * we've taken over the MMU.
2373 * So we check whether we will need to open the display,
2374 * and if so, open it now.
2375 */
2376static void __init prom_check_displays(void)
2377{
2378 char type[16], *path;
2379 phandle node;
2380 ihandle ih;
2381 int i;
2382
2383 static const unsigned char default_colors[] __initconst = {
2384 0x00, 0x00, 0x00,
2385 0x00, 0x00, 0xaa,
2386 0x00, 0xaa, 0x00,
2387 0x00, 0xaa, 0xaa,
2388 0xaa, 0x00, 0x00,
2389 0xaa, 0x00, 0xaa,
2390 0xaa, 0xaa, 0x00,
2391 0xaa, 0xaa, 0xaa,
2392 0x55, 0x55, 0x55,
2393 0x55, 0x55, 0xff,
2394 0x55, 0xff, 0x55,
2395 0x55, 0xff, 0xff,
2396 0xff, 0x55, 0x55,
2397 0xff, 0x55, 0xff,
2398 0xff, 0xff, 0x55,
2399 0xff, 0xff, 0xff
2400 };
2401 const unsigned char *clut;
2402
2403 prom_debug("Looking for displays\n");
2404 for (node = 0; prom_next_node(&node); ) {
2405 memset(type, 0, sizeof(type));
2406 prom_getprop(node, "device_type", type, sizeof(type));
2407 if (prom_strcmp(type, "display") != 0)
2408 continue;
2409
2410 /* It seems OF doesn't null-terminate the path :-( */
2411 path = prom_scratch;
2412 memset(path, 0, sizeof(prom_scratch));
2413
2414 /*
2415 * leave some room at the end of the path for appending extra
2416 * arguments
2417 */
2418 if (call_prom("package-to-path", 3, 1, node, path,
2419 sizeof(prom_scratch) - 10) == PROM_ERROR)
2420 continue;
2421 prom_printf("found display : %s, opening... ", path);
2422
2423 ih = call_prom("open", 1, 1, path);
2424 if (ih == 0) {
2425 prom_printf("failed\n");
2426 continue;
2427 }
2428
2429 /* Success */
2430 prom_printf("done\n");
2431 prom_setprop(node, path, "linux,opened", NULL, 0);
2432
2433 /* Setup a usable color table when the appropriate
2434 * method is available. Should update this to set-colors */
2435 clut = default_colors;
2436 for (i = 0; i < 16; i++, clut += 3)
2437 if (prom_set_color(ih, i, clut[0], clut[1],
2438 clut[2]) != 0)
2439 break;
2440
2441#ifdef CONFIG_LOGO_LINUX_CLUT224
2442 clut = PTRRELOC(logo_linux_clut224.clut);
2443 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2444 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2445 clut[2]) != 0)
2446 break;
2447#endif /* CONFIG_LOGO_LINUX_CLUT224 */
2448
2449#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2450 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2451 PROM_ERROR) {
2452 u32 width, height, pitch, addr;
2453
2454 prom_printf("Setting btext !\n");
2455
2456 if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
2457 return;
2458
2459 if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
2460 return;
2461
2462 if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
2463 return;
2464
2465 if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
2466 return;
2467
2468 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2469 width, height, pitch, addr);
2470 btext_setup_display(width, height, 8, pitch, addr);
2471 btext_prepare_BAT();
2472 }
2473#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2474 }
2475}
2476
2477
2478/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2479static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2480 unsigned long needed, unsigned long align)
2481{
2482 void *ret;
2483
2484 *mem_start = ALIGN(*mem_start, align);
2485 while ((*mem_start + needed) > *mem_end) {
2486 unsigned long room, chunk;
2487
2488 prom_debug("Chunk exhausted, claiming more at %lx...\n",
2489 alloc_bottom);
2490 room = alloc_top - alloc_bottom;
2491 if (room > DEVTREE_CHUNK_SIZE)
2492 room = DEVTREE_CHUNK_SIZE;
2493 if (room < PAGE_SIZE)
2494 prom_panic("No memory for flatten_device_tree "
2495 "(no room)\n");
2496 chunk = alloc_up(room, 0);
2497 if (chunk == 0)
2498 prom_panic("No memory for flatten_device_tree "
2499 "(claim failed)\n");
2500 *mem_end = chunk + room;
2501 }
2502
2503 ret = (void *)*mem_start;
2504 *mem_start += needed;
2505
2506 return ret;
2507}
2508
2509#define dt_push_token(token, mem_start, mem_end) do { \
2510 void *room = make_room(mem_start, mem_end, 4, 4); \
2511 *(__be32 *)room = cpu_to_be32(token); \
2512 } while(0)
2513
2514static unsigned long __init dt_find_string(char *str)
2515{
2516 char *s, *os;
2517
2518 s = os = (char *)dt_string_start;
2519 s += 4;
2520 while (s < (char *)dt_string_end) {
2521 if (prom_strcmp(s, str) == 0)
2522 return s - os;
2523 s += prom_strlen(s) + 1;
2524 }
2525 return 0;
2526}
2527
2528/*
2529 * The Open Firmware 1275 specification states properties must be 31 bytes or
2530 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2531 */
2532#define MAX_PROPERTY_NAME 64
2533
2534static void __init scan_dt_build_strings(phandle node,
2535 unsigned long *mem_start,
2536 unsigned long *mem_end)
2537{
2538 char *prev_name, *namep, *sstart;
2539 unsigned long soff;
2540 phandle child;
2541
2542 sstart = (char *)dt_string_start;
2543
2544 /* get and store all property names */
2545 prev_name = "";
2546 for (;;) {
2547 /* 64 is max len of name including nul. */
2548 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2549 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2550 /* No more nodes: unwind alloc */
2551 *mem_start = (unsigned long)namep;
2552 break;
2553 }
2554
2555 /* skip "name" */
2556 if (prom_strcmp(namep, "name") == 0) {
2557 *mem_start = (unsigned long)namep;
2558 prev_name = "name";
2559 continue;
2560 }
2561 /* get/create string entry */
2562 soff = dt_find_string(namep);
2563 if (soff != 0) {
2564 *mem_start = (unsigned long)namep;
2565 namep = sstart + soff;
2566 } else {
2567 /* Trim off some if we can */
2568 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2569 dt_string_end = *mem_start;
2570 }
2571 prev_name = namep;
2572 }
2573
2574 /* do all our children */
2575 child = call_prom("child", 1, 1, node);
2576 while (child != 0) {
2577 scan_dt_build_strings(child, mem_start, mem_end);
2578 child = call_prom("peer", 1, 1, child);
2579 }
2580}
2581
2582static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2583 unsigned long *mem_end)
2584{
2585 phandle child;
2586 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2587 unsigned long soff;
2588 unsigned char *valp;
2589 static char pname[MAX_PROPERTY_NAME] __prombss;
2590 int l, room, has_phandle = 0;
2591
2592 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2593
2594 /* get the node's full name */
2595 namep = (char *)*mem_start;
2596 room = *mem_end - *mem_start;
2597 if (room > 255)
2598 room = 255;
2599 l = call_prom("package-to-path", 3, 1, node, namep, room);
2600 if (l >= 0) {
2601 /* Didn't fit? Get more room. */
2602 if (l >= room) {
2603 if (l >= *mem_end - *mem_start)
2604 namep = make_room(mem_start, mem_end, l+1, 1);
2605 call_prom("package-to-path", 3, 1, node, namep, l);
2606 }
2607 namep[l] = '\0';
2608
2609 /* Fixup an Apple bug where they have bogus \0 chars in the
2610 * middle of the path in some properties, and extract
2611 * the unit name (everything after the last '/').
2612 */
2613 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2614 if (*p == '/')
2615 lp = namep;
2616 else if (*p != 0)
2617 *lp++ = *p;
2618 }
2619 *lp = 0;
2620 *mem_start = ALIGN((unsigned long)lp + 1, 4);
2621 }
2622
2623 /* get it again for debugging */
2624 path = prom_scratch;
2625 memset(path, 0, sizeof(prom_scratch));
2626 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
2627
2628 /* get and store all properties */
2629 prev_name = "";
2630 sstart = (char *)dt_string_start;
2631 for (;;) {
2632 if (call_prom("nextprop", 3, 1, node, prev_name,
2633 pname) != 1)
2634 break;
2635
2636 /* skip "name" */
2637 if (prom_strcmp(pname, "name") == 0) {
2638 prev_name = "name";
2639 continue;
2640 }
2641
2642 /* find string offset */
2643 soff = dt_find_string(pname);
2644 if (soff == 0) {
2645 prom_printf("WARNING: Can't find string index for"
2646 " <%s>, node %s\n", pname, path);
2647 break;
2648 }
2649 prev_name = sstart + soff;
2650
2651 /* get length */
2652 l = call_prom("getproplen", 2, 1, node, pname);
2653
2654 /* sanity checks */
2655 if (l == PROM_ERROR)
2656 continue;
2657
2658 /* push property head */
2659 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2660 dt_push_token(l, mem_start, mem_end);
2661 dt_push_token(soff, mem_start, mem_end);
2662
2663 /* push property content */
2664 valp = make_room(mem_start, mem_end, l, 4);
2665 call_prom("getprop", 4, 1, node, pname, valp, l);
2666 *mem_start = ALIGN(*mem_start, 4);
2667
2668 if (!prom_strcmp(pname, "phandle"))
2669 has_phandle = 1;
2670 }
2671
2672 /* Add a "phandle" property if none already exist */
2673 if (!has_phandle) {
2674 soff = dt_find_string("phandle");
2675 if (soff == 0)
2676 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2677 else {
2678 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2679 dt_push_token(4, mem_start, mem_end);
2680 dt_push_token(soff, mem_start, mem_end);
2681 valp = make_room(mem_start, mem_end, 4, 4);
2682 *(__be32 *)valp = cpu_to_be32(node);
2683 }
2684 }
2685
2686 /* do all our children */
2687 child = call_prom("child", 1, 1, node);
2688 while (child != 0) {
2689 scan_dt_build_struct(child, mem_start, mem_end);
2690 child = call_prom("peer", 1, 1, child);
2691 }
2692
2693 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2694}
2695
2696static void __init flatten_device_tree(void)
2697{
2698 phandle root;
2699 unsigned long mem_start, mem_end, room;
2700 struct boot_param_header *hdr;
2701 char *namep;
2702 u64 *rsvmap;
2703
2704 /*
2705 * Check how much room we have between alloc top & bottom (+/- a
2706 * few pages), crop to 1MB, as this is our "chunk" size
2707 */
2708 room = alloc_top - alloc_bottom - 0x4000;
2709 if (room > DEVTREE_CHUNK_SIZE)
2710 room = DEVTREE_CHUNK_SIZE;
2711 prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2712
2713 /* Now try to claim that */
2714 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2715 if (mem_start == 0)
2716 prom_panic("Can't allocate initial device-tree chunk\n");
2717 mem_end = mem_start + room;
2718
2719 /* Get root of tree */
2720 root = call_prom("peer", 1, 1, (phandle)0);
2721 if (root == (phandle)0)
2722 prom_panic ("couldn't get device tree root\n");
2723
2724 /* Build header and make room for mem rsv map */
2725 mem_start = ALIGN(mem_start, 4);
2726 hdr = make_room(&mem_start, &mem_end,
2727 sizeof(struct boot_param_header), 4);
2728 dt_header_start = (unsigned long)hdr;
2729 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2730
2731 /* Start of strings */
2732 mem_start = PAGE_ALIGN(mem_start);
2733 dt_string_start = mem_start;
2734 mem_start += 4; /* hole */
2735
2736 /* Add "phandle" in there, we'll need it */
2737 namep = make_room(&mem_start, &mem_end, 16, 1);
2738 prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
2739 mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2740
2741 /* Build string array */
2742 prom_printf("Building dt strings...\n");
2743 scan_dt_build_strings(root, &mem_start, &mem_end);
2744 dt_string_end = mem_start;
2745
2746 /* Build structure */
2747 mem_start = PAGE_ALIGN(mem_start);
2748 dt_struct_start = mem_start;
2749 prom_printf("Building dt structure...\n");
2750 scan_dt_build_struct(root, &mem_start, &mem_end);
2751 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2752 dt_struct_end = PAGE_ALIGN(mem_start);
2753
2754 /* Finish header */
2755 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2756 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2757 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2758 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2759 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2760 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2761 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2762 hdr->version = cpu_to_be32(OF_DT_VERSION);
2763 /* Version 16 is not backward compatible */
2764 hdr->last_comp_version = cpu_to_be32(0x10);
2765
2766 /* Copy the reserve map in */
2767 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2768
2769#ifdef DEBUG_PROM
2770 {
2771 int i;
2772 prom_printf("reserved memory map:\n");
2773 for (i = 0; i < mem_reserve_cnt; i++)
2774 prom_printf(" %llx - %llx\n",
2775 be64_to_cpu(mem_reserve_map[i].base),
2776 be64_to_cpu(mem_reserve_map[i].size));
2777 }
2778#endif
2779 /* Bump mem_reserve_cnt to cause further reservations to fail
2780 * since it's too late.
2781 */
2782 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2783
2784 prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2785 dt_string_start, dt_string_end);
2786 prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
2787 dt_struct_start, dt_struct_end);
2788}
2789
2790#ifdef CONFIG_PPC_MAPLE
2791/* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2792 * The values are bad, and it doesn't even have the right number of cells. */
2793static void __init fixup_device_tree_maple(void)
2794{
2795 phandle isa;
2796 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2797 u32 isa_ranges[6];
2798 char *name;
2799
2800 name = "/ht@0/isa@4";
2801 isa = call_prom("finddevice", 1, 1, ADDR(name));
2802 if (!PHANDLE_VALID(isa)) {
2803 name = "/ht@0/isa@6";
2804 isa = call_prom("finddevice", 1, 1, ADDR(name));
2805 rloc = 0x01003000; /* IO space; PCI device = 6 */
2806 }
2807 if (!PHANDLE_VALID(isa))
2808 return;
2809
2810 if (prom_getproplen(isa, "ranges") != 12)
2811 return;
2812 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2813 == PROM_ERROR)
2814 return;
2815
2816 if (isa_ranges[0] != 0x1 ||
2817 isa_ranges[1] != 0xf4000000 ||
2818 isa_ranges[2] != 0x00010000)
2819 return;
2820
2821 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2822
2823 isa_ranges[0] = 0x1;
2824 isa_ranges[1] = 0x0;
2825 isa_ranges[2] = rloc;
2826 isa_ranges[3] = 0x0;
2827 isa_ranges[4] = 0x0;
2828 isa_ranges[5] = 0x00010000;
2829 prom_setprop(isa, name, "ranges",
2830 isa_ranges, sizeof(isa_ranges));
2831}
2832
2833#define CPC925_MC_START 0xf8000000
2834#define CPC925_MC_LENGTH 0x1000000
2835/* The values for memory-controller don't have right number of cells */
2836static void __init fixup_device_tree_maple_memory_controller(void)
2837{
2838 phandle mc;
2839 u32 mc_reg[4];
2840 char *name = "/hostbridge@f8000000";
2841 u32 ac, sc;
2842
2843 mc = call_prom("finddevice", 1, 1, ADDR(name));
2844 if (!PHANDLE_VALID(mc))
2845 return;
2846
2847 if (prom_getproplen(mc, "reg") != 8)
2848 return;
2849
2850 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2851 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2852 if ((ac != 2) || (sc != 2))
2853 return;
2854
2855 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2856 return;
2857
2858 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2859 return;
2860
2861 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2862
2863 mc_reg[0] = 0x0;
2864 mc_reg[1] = CPC925_MC_START;
2865 mc_reg[2] = 0x0;
2866 mc_reg[3] = CPC925_MC_LENGTH;
2867 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2868}
2869#else
2870#define fixup_device_tree_maple()
2871#define fixup_device_tree_maple_memory_controller()
2872#endif
2873
2874#ifdef CONFIG_PPC_CHRP
2875/*
2876 * Pegasos and BriQ lacks the "ranges" property in the isa node
2877 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2878 * Pegasos has the IDE configured in legacy mode, but advertised as native
2879 */
2880static void __init fixup_device_tree_chrp(void)
2881{
2882 phandle ph;
2883 u32 prop[6];
2884 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2885 char *name;
2886 int rc;
2887
2888 name = "/pci@80000000/isa@c";
2889 ph = call_prom("finddevice", 1, 1, ADDR(name));
2890 if (!PHANDLE_VALID(ph)) {
2891 name = "/pci@ff500000/isa@6";
2892 ph = call_prom("finddevice", 1, 1, ADDR(name));
2893 rloc = 0x01003000; /* IO space; PCI device = 6 */
2894 }
2895 if (PHANDLE_VALID(ph)) {
2896 rc = prom_getproplen(ph, "ranges");
2897 if (rc == 0 || rc == PROM_ERROR) {
2898 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2899
2900 prop[0] = 0x1;
2901 prop[1] = 0x0;
2902 prop[2] = rloc;
2903 prop[3] = 0x0;
2904 prop[4] = 0x0;
2905 prop[5] = 0x00010000;
2906 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2907 }
2908 }
2909
2910 name = "/pci@80000000/ide@C,1";
2911 ph = call_prom("finddevice", 1, 1, ADDR(name));
2912 if (PHANDLE_VALID(ph)) {
2913 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2914 prop[0] = 14;
2915 prop[1] = 0x0;
2916 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2917 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2918 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2919 if (rc == sizeof(u32)) {
2920 prop[0] &= ~0x5;
2921 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2922 }
2923 }
2924}
2925#else
2926#define fixup_device_tree_chrp()
2927#endif
2928
2929#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2930static void __init fixup_device_tree_pmac(void)
2931{
2932 phandle u3, i2c, mpic;
2933 u32 u3_rev;
2934 u32 interrupts[2];
2935 u32 parent;
2936
2937 /* Some G5s have a missing interrupt definition, fix it up here */
2938 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2939 if (!PHANDLE_VALID(u3))
2940 return;
2941 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2942 if (!PHANDLE_VALID(i2c))
2943 return;
2944 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2945 if (!PHANDLE_VALID(mpic))
2946 return;
2947
2948 /* check if proper rev of u3 */
2949 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2950 == PROM_ERROR)
2951 return;
2952 if (u3_rev < 0x35 || u3_rev > 0x39)
2953 return;
2954 /* does it need fixup ? */
2955 if (prom_getproplen(i2c, "interrupts") > 0)
2956 return;
2957
2958 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2959
2960 /* interrupt on this revision of u3 is number 0 and level */
2961 interrupts[0] = 0;
2962 interrupts[1] = 1;
2963 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2964 &interrupts, sizeof(interrupts));
2965 parent = (u32)mpic;
2966 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2967 &parent, sizeof(parent));
2968}
2969#else
2970#define fixup_device_tree_pmac()
2971#endif
2972
2973#ifdef CONFIG_PPC_EFIKA
2974/*
2975 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2976 * to talk to the phy. If the phy-handle property is missing, then this
2977 * function is called to add the appropriate nodes and link it to the
2978 * ethernet node.
2979 */
2980static void __init fixup_device_tree_efika_add_phy(void)
2981{
2982 u32 node;
2983 char prop[64];
2984 int rv;
2985
2986 /* Check if /builtin/ethernet exists - bail if it doesn't */
2987 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2988 if (!PHANDLE_VALID(node))
2989 return;
2990
2991 /* Check if the phy-handle property exists - bail if it does */
2992 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2993 if (!rv)
2994 return;
2995
2996 /*
2997 * At this point the ethernet device doesn't have a phy described.
2998 * Now we need to add the missing phy node and linkage
2999 */
3000
3001 /* Check for an MDIO bus node - if missing then create one */
3002 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
3003 if (!PHANDLE_VALID(node)) {
3004 prom_printf("Adding Ethernet MDIO node\n");
3005 call_prom("interpret", 1, 1,
3006 " s\" /builtin\" find-device"
3007 " new-device"
3008 " 1 encode-int s\" #address-cells\" property"
3009 " 0 encode-int s\" #size-cells\" property"
3010 " s\" mdio\" device-name"
3011 " s\" fsl,mpc5200b-mdio\" encode-string"
3012 " s\" compatible\" property"
3013 " 0xf0003000 0x400 reg"
3014 " 0x2 encode-int"
3015 " 0x5 encode-int encode+"
3016 " 0x3 encode-int encode+"
3017 " s\" interrupts\" property"
3018 " finish-device");
3019 }
3020
3021 /* Check for a PHY device node - if missing then create one and
3022 * give it's phandle to the ethernet node */
3023 node = call_prom("finddevice", 1, 1,
3024 ADDR("/builtin/mdio/ethernet-phy"));
3025 if (!PHANDLE_VALID(node)) {
3026 prom_printf("Adding Ethernet PHY node\n");
3027 call_prom("interpret", 1, 1,
3028 " s\" /builtin/mdio\" find-device"
3029 " new-device"
3030 " s\" ethernet-phy\" device-name"
3031 " 0x10 encode-int s\" reg\" property"
3032 " my-self"
3033 " ihandle>phandle"
3034 " finish-device"
3035 " s\" /builtin/ethernet\" find-device"
3036 " encode-int"
3037 " s\" phy-handle\" property"
3038 " device-end");
3039 }
3040}
3041
3042static void __init fixup_device_tree_efika(void)
3043{
3044 int sound_irq[3] = { 2, 2, 0 };
3045 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
3046 3,4,0, 3,5,0, 3,6,0, 3,7,0,
3047 3,8,0, 3,9,0, 3,10,0, 3,11,0,
3048 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
3049 u32 node;
3050 char prop[64];
3051 int rv, len;
3052
3053 /* Check if we're really running on a EFIKA */
3054 node = call_prom("finddevice", 1, 1, ADDR("/"));
3055 if (!PHANDLE_VALID(node))
3056 return;
3057
3058 rv = prom_getprop(node, "model", prop, sizeof(prop));
3059 if (rv == PROM_ERROR)
3060 return;
3061 if (prom_strcmp(prop, "EFIKA5K2"))
3062 return;
3063
3064 prom_printf("Applying EFIKA device tree fixups\n");
3065
3066 /* Claiming to be 'chrp' is death */
3067 node = call_prom("finddevice", 1, 1, ADDR("/"));
3068 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
3069 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
3070 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
3071
3072 /* CODEGEN,description is exposed in /proc/cpuinfo so
3073 fix that too */
3074 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
3075 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
3076 prom_setprop(node, "/", "CODEGEN,description",
3077 "Efika 5200B PowerPC System",
3078 sizeof("Efika 5200B PowerPC System"));
3079
3080 /* Fixup bestcomm interrupts property */
3081 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3082 if (PHANDLE_VALID(node)) {
3083 len = prom_getproplen(node, "interrupts");
3084 if (len == 12) {
3085 prom_printf("Fixing bestcomm interrupts property\n");
3086 prom_setprop(node, "/builtin/bestcom", "interrupts",
3087 bcomm_irq, sizeof(bcomm_irq));
3088 }
3089 }
3090
3091 /* Fixup sound interrupts property */
3092 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3093 if (PHANDLE_VALID(node)) {
3094 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
3095 if (rv == PROM_ERROR) {
3096 prom_printf("Adding sound interrupts property\n");
3097 prom_setprop(node, "/builtin/sound", "interrupts",
3098 sound_irq, sizeof(sound_irq));
3099 }
3100 }
3101
3102 /* Make sure ethernet phy-handle property exists */
3103 fixup_device_tree_efika_add_phy();
3104}
3105#else
3106#define fixup_device_tree_efika()
3107#endif
3108
3109#ifdef CONFIG_PPC_PASEMI_NEMO
3110/*
3111 * CFE supplied on Nemo is broken in several ways, biggest
3112 * problem is that it reassigns ISA interrupts to unused mpic ints.
3113 * Add an interrupt-controller property for the io-bridge to use
3114 * and correct the ints so we can attach them to an irq_domain
3115 */
3116static void __init fixup_device_tree_pasemi(void)
3117{
3118 u32 interrupts[2], parent, rval, val = 0;
3119 char *name, *pci_name;
3120 phandle iob, node;
3121
3122 /* Find the root pci node */
3123 name = "/pxp@0,e0000000";
3124 iob = call_prom("finddevice", 1, 1, ADDR(name));
3125 if (!PHANDLE_VALID(iob))
3126 return;
3127
3128 /* check if interrupt-controller node set yet */
3129 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
3130 return;
3131
3132 prom_printf("adding interrupt-controller property for SB600...\n");
3133
3134 prom_setprop(iob, name, "interrupt-controller", &val, 0);
3135
3136 pci_name = "/pxp@0,e0000000/pci@11";
3137 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
3138 parent = ADDR(iob);
3139
3140 for( ; prom_next_node(&node); ) {
3141 /* scan each node for one with an interrupt */
3142 if (!PHANDLE_VALID(node))
3143 continue;
3144
3145 rval = prom_getproplen(node, "interrupts");
3146 if (rval == 0 || rval == PROM_ERROR)
3147 continue;
3148
3149 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
3150 if ((interrupts[0] < 212) || (interrupts[0] > 222))
3151 continue;
3152
3153 /* found a node, update both interrupts and interrupt-parent */
3154 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
3155 interrupts[0] -= 203;
3156 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
3157 interrupts[0] -= 213;
3158 if (interrupts[0] == 221)
3159 interrupts[0] = 14;
3160 if (interrupts[0] == 222)
3161 interrupts[0] = 8;
3162
3163 prom_setprop(node, pci_name, "interrupts", interrupts,
3164 sizeof(interrupts));
3165 prom_setprop(node, pci_name, "interrupt-parent", &parent,
3166 sizeof(parent));
3167 }
3168
3169 /*
3170 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3171 * so that generic isa-bridge code can add the SB600 and its on-board
3172 * peripherals.
3173 */
3174 name = "/pxp@0,e0000000/io-bridge@0";
3175 iob = call_prom("finddevice", 1, 1, ADDR(name));
3176 if (!PHANDLE_VALID(iob))
3177 return;
3178
3179 /* device_type is already set, just change it. */
3180
3181 prom_printf("Changing device_type of SB600 node...\n");
3182
3183 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
3184}
3185#else /* !CONFIG_PPC_PASEMI_NEMO */
3186static inline void fixup_device_tree_pasemi(void) { }
3187#endif
3188
3189static void __init fixup_device_tree(void)
3190{
3191 fixup_device_tree_maple();
3192 fixup_device_tree_maple_memory_controller();
3193 fixup_device_tree_chrp();
3194 fixup_device_tree_pmac();
3195 fixup_device_tree_efika();
3196 fixup_device_tree_pasemi();
3197}
3198
3199static void __init prom_find_boot_cpu(void)
3200{
3201 __be32 rval;
3202 ihandle prom_cpu;
3203 phandle cpu_pkg;
3204
3205 rval = 0;
3206 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3207 return;
3208 prom_cpu = be32_to_cpu(rval);
3209
3210 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3211
3212 if (!PHANDLE_VALID(cpu_pkg))
3213 return;
3214
3215 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3216 prom.cpu = be32_to_cpu(rval);
3217
3218 prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3219}
3220
3221static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3222{
3223#ifdef CONFIG_BLK_DEV_INITRD
3224 if (r3 && r4 && r4 != 0xdeadbeef) {
3225 __be64 val;
3226
3227 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3228 prom_initrd_end = prom_initrd_start + r4;
3229
3230 val = cpu_to_be64(prom_initrd_start);
3231 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3232 &val, sizeof(val));
3233 val = cpu_to_be64(prom_initrd_end);
3234 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3235 &val, sizeof(val));
3236
3237 reserve_mem(prom_initrd_start,
3238 prom_initrd_end - prom_initrd_start);
3239
3240 prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3241 prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3242 }
3243#endif /* CONFIG_BLK_DEV_INITRD */
3244}
3245
3246#ifdef CONFIG_PPC_SVM
3247/*
3248 * Perform the Enter Secure Mode ultracall.
3249 */
3250static int enter_secure_mode(unsigned long kbase, unsigned long fdt)
3251{
3252 register unsigned long r3 asm("r3") = UV_ESM;
3253 register unsigned long r4 asm("r4") = kbase;
3254 register unsigned long r5 asm("r5") = fdt;
3255
3256 asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
3257
3258 return r3;
3259}
3260
3261/*
3262 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3263 */
3264static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3265{
3266 int ret;
3267
3268 if (!prom_svm_enable)
3269 return;
3270
3271 /* Switch to secure mode. */
3272 prom_printf("Switching to secure mode.\n");
3273
3274 /*
3275 * The ultravisor will do an integrity check of the kernel image but we
3276 * relocated it so the check will fail. Restore the original image by
3277 * relocating it back to the kernel virtual base address.
3278 */
3279 relocate(KERNELBASE);
3280
3281 ret = enter_secure_mode(kbase, fdt);
3282
3283 /* Relocate the kernel again. */
3284 relocate(kbase);
3285
3286 if (ret != U_SUCCESS) {
3287 prom_printf("Returned %d from switching to secure mode.\n", ret);
3288 prom_rtas_os_term("Switch to secure mode failed.\n");
3289 }
3290}
3291#else
3292static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3293{
3294}
3295#endif /* CONFIG_PPC_SVM */
3296
3297/*
3298 * We enter here early on, when the Open Firmware prom is still
3299 * handling exceptions and the MMU hash table for us.
3300 */
3301
3302unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3303 unsigned long pp,
3304 unsigned long r6, unsigned long r7,
3305 unsigned long kbase)
3306{
3307 unsigned long hdr;
3308
3309#ifdef CONFIG_PPC32
3310 unsigned long offset = reloc_offset();
3311 reloc_got2(offset);
3312#endif
3313
3314 /*
3315 * First zero the BSS
3316 */
3317 memset(&__bss_start, 0, __bss_stop - __bss_start);
3318
3319 /*
3320 * Init interface to Open Firmware, get some node references,
3321 * like /chosen
3322 */
3323 prom_init_client_services(pp);
3324
3325 /*
3326 * See if this OF is old enough that we need to do explicit maps
3327 * and other workarounds
3328 */
3329 prom_find_mmu();
3330
3331 /*
3332 * Init prom stdout device
3333 */
3334 prom_init_stdout();
3335
3336 prom_printf("Preparing to boot %s", linux_banner);
3337
3338 /*
3339 * Get default machine type. At this point, we do not differentiate
3340 * between pSeries SMP and pSeries LPAR
3341 */
3342 of_platform = prom_find_machine_type();
3343 prom_printf("Detected machine type: %x\n", of_platform);
3344
3345#ifndef CONFIG_NONSTATIC_KERNEL
3346 /* Bail if this is a kdump kernel. */
3347 if (PHYSICAL_START > 0)
3348 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3349#endif
3350
3351 /*
3352 * Check for an initrd
3353 */
3354 prom_check_initrd(r3, r4);
3355
3356 /*
3357 * Do early parsing of command line
3358 */
3359 early_cmdline_parse();
3360
3361#ifdef CONFIG_PPC_PSERIES
3362 /*
3363 * On pSeries, inform the firmware about our capabilities
3364 */
3365 if (of_platform == PLATFORM_PSERIES ||
3366 of_platform == PLATFORM_PSERIES_LPAR)
3367 prom_send_capabilities();
3368#endif
3369
3370 /*
3371 * Copy the CPU hold code
3372 */
3373 if (of_platform != PLATFORM_POWERMAC)
3374 copy_and_flush(0, kbase, 0x100, 0);
3375
3376 /*
3377 * Initialize memory management within prom_init
3378 */
3379 prom_init_mem();
3380
3381 /*
3382 * Determine which cpu is actually running right _now_
3383 */
3384 prom_find_boot_cpu();
3385
3386 /*
3387 * Initialize display devices
3388 */
3389 prom_check_displays();
3390
3391#if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3392 /*
3393 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3394 * that uses the allocator, we need to make sure we get the top of memory
3395 * available for us here...
3396 */
3397 if (of_platform == PLATFORM_PSERIES)
3398 prom_initialize_tce_table();
3399#endif
3400
3401 /*
3402 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3403 * have a usable RTAS implementation.
3404 */
3405 if (of_platform != PLATFORM_POWERMAC)
3406 prom_instantiate_rtas();
3407
3408#ifdef CONFIG_PPC64
3409 /* instantiate sml */
3410 prom_instantiate_sml();
3411#endif
3412
3413 /*
3414 * On non-powermacs, put all CPUs in spin-loops.
3415 *
3416 * PowerMacs use a different mechanism to spin CPUs
3417 *
3418 * (This must be done after instanciating RTAS)
3419 */
3420 if (of_platform != PLATFORM_POWERMAC)
3421 prom_hold_cpus();
3422
3423 /*
3424 * Fill in some infos for use by the kernel later on
3425 */
3426 if (prom_memory_limit) {
3427 __be64 val = cpu_to_be64(prom_memory_limit);
3428 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3429 &val, sizeof(val));
3430 }
3431#ifdef CONFIG_PPC64
3432 if (prom_iommu_off)
3433 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3434 NULL, 0);
3435
3436 if (prom_iommu_force_on)
3437 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3438 NULL, 0);
3439
3440 if (prom_tce_alloc_start) {
3441 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3442 &prom_tce_alloc_start,
3443 sizeof(prom_tce_alloc_start));
3444 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3445 &prom_tce_alloc_end,
3446 sizeof(prom_tce_alloc_end));
3447 }
3448#endif
3449
3450 /*
3451 * Fixup any known bugs in the device-tree
3452 */
3453 fixup_device_tree();
3454
3455 /*
3456 * Now finally create the flattened device-tree
3457 */
3458 prom_printf("copying OF device tree...\n");
3459 flatten_device_tree();
3460
3461 /*
3462 * in case stdin is USB and still active on IBM machines...
3463 * Unfortunately quiesce crashes on some powermacs if we have
3464 * closed stdin already (in particular the powerbook 101).
3465 */
3466 if (of_platform != PLATFORM_POWERMAC)
3467 prom_close_stdin();
3468
3469 /*
3470 * Call OF "quiesce" method to shut down pending DMA's from
3471 * devices etc...
3472 */
3473 prom_printf("Quiescing Open Firmware ...\n");
3474 call_prom("quiesce", 0, 0);
3475
3476 /*
3477 * And finally, call the kernel passing it the flattened device
3478 * tree and NULL as r5, thus triggering the new entry point which
3479 * is common to us and kexec
3480 */
3481 hdr = dt_header_start;
3482
3483 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3484 prom_debug("->dt_header_start=0x%lx\n", hdr);
3485
3486#ifdef CONFIG_PPC32
3487 reloc_got2(-offset);
3488#endif
3489
3490 /* Move to secure memory if we're supposed to be secure guests. */
3491 setup_secure_guest(kbase, hdr);
3492
3493 __start(hdr, kbase, 0, 0, 0, 0, 0);
3494
3495 return 0;
3496}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Procedures for interfacing to Open Firmware.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 */
11
12#undef DEBUG_PROM
13
14/* we cannot use FORTIFY as it brings in new symbols */
15#define __NO_FORTIFY
16
17#include <linux/stdarg.h>
18#include <linux/kernel.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/threads.h>
22#include <linux/spinlock.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/proc_fs.h>
26#include <linux/delay.h>
27#include <linux/initrd.h>
28#include <linux/bitops.h>
29#include <linux/pgtable.h>
30#include <linux/printk.h>
31#include <linux/of.h>
32#include <linux/of_fdt.h>
33#include <asm/prom.h>
34#include <asm/rtas.h>
35#include <asm/page.h>
36#include <asm/processor.h>
37#include <asm/interrupt.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/smp.h>
41#include <asm/mmu.h>
42#include <asm/iommu.h>
43#include <asm/btext.h>
44#include <asm/sections.h>
45#include <asm/setup.h>
46#include <asm/asm-prototypes.h>
47#include <asm/ultravisor-api.h>
48
49#include <linux/linux_logo.h>
50
51/* All of prom_init bss lives here */
52#define __prombss __section(".bss.prominit")
53
54/*
55 * Eventually bump that one up
56 */
57#define DEVTREE_CHUNK_SIZE 0x100000
58
59/*
60 * This is the size of the local memory reserve map that gets copied
61 * into the boot params passed to the kernel. That size is totally
62 * flexible as the kernel just reads the list until it encounters an
63 * entry with size 0, so it can be changed without breaking binary
64 * compatibility
65 */
66#define MEM_RESERVE_MAP_SIZE 8
67
68/*
69 * prom_init() is called very early on, before the kernel text
70 * and data have been mapped to KERNELBASE. At this point the code
71 * is running at whatever address it has been loaded at.
72 * On ppc32 we compile with -mrelocatable, which means that references
73 * to extern and static variables get relocated automatically.
74 * ppc64 objects are always relocatable, we just need to relocate the
75 * TOC.
76 *
77 * Because OF may have mapped I/O devices into the area starting at
78 * KERNELBASE, particularly on CHRP machines, we can't safely call
79 * OF once the kernel has been mapped to KERNELBASE. Therefore all
80 * OF calls must be done within prom_init().
81 *
82 * ADDR is used in calls to call_prom. The 4th and following
83 * arguments to call_prom should be 32-bit values.
84 * On ppc64, 64 bit values are truncated to 32 bits (and
85 * fortunately don't get interpreted as two arguments).
86 */
87#define ADDR(x) (u32)(unsigned long)(x)
88
89#ifdef CONFIG_PPC64
90#define OF_WORKAROUNDS 0
91#else
92#define OF_WORKAROUNDS of_workarounds
93static int of_workarounds __prombss;
94#endif
95
96#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
97#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
98
99#ifdef DEBUG_PROM
100#define prom_debug(x...) prom_printf(x)
101#else
102#define prom_debug(x...) do { } while (0)
103#endif
104
105
106typedef u32 prom_arg_t;
107
108struct prom_args {
109 __be32 service;
110 __be32 nargs;
111 __be32 nret;
112 __be32 args[10];
113};
114
115struct prom_t {
116 ihandle root;
117 phandle chosen;
118 int cpu;
119 ihandle stdout;
120 ihandle mmumap;
121 ihandle memory;
122};
123
124struct mem_map_entry {
125 __be64 base;
126 __be64 size;
127};
128
129typedef __be32 cell_t;
130
131extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
132 unsigned long r6, unsigned long r7, unsigned long r8,
133 unsigned long r9);
134
135#ifdef CONFIG_PPC64
136extern int enter_prom(struct prom_args *args, unsigned long entry);
137#else
138static inline int enter_prom(struct prom_args *args, unsigned long entry)
139{
140 return ((int (*)(struct prom_args *))entry)(args);
141}
142#endif
143
144extern void copy_and_flush(unsigned long dest, unsigned long src,
145 unsigned long size, unsigned long offset);
146
147/* prom structure */
148static struct prom_t __prombss prom;
149
150static unsigned long __prombss prom_entry;
151
152static char __prombss of_stdout_device[256];
153static char __prombss prom_scratch[256];
154
155static unsigned long __prombss dt_header_start;
156static unsigned long __prombss dt_struct_start, dt_struct_end;
157static unsigned long __prombss dt_string_start, dt_string_end;
158
159static unsigned long __prombss prom_initrd_start, prom_initrd_end;
160
161#ifdef CONFIG_PPC64
162static int __prombss prom_iommu_force_on;
163static int __prombss prom_iommu_off;
164static unsigned long __prombss prom_tce_alloc_start;
165static unsigned long __prombss prom_tce_alloc_end;
166#endif
167
168#ifdef CONFIG_PPC_PSERIES
169static bool __prombss prom_radix_disable;
170static bool __prombss prom_radix_gtse_disable;
171static bool __prombss prom_xive_disable;
172#endif
173
174#ifdef CONFIG_PPC_SVM
175static bool __prombss prom_svm_enable;
176#endif
177
178struct platform_support {
179 bool hash_mmu;
180 bool radix_mmu;
181 bool radix_gtse;
182 bool xive;
183};
184
185/* Platforms codes are now obsolete in the kernel. Now only used within this
186 * file and ultimately gone too. Feel free to change them if you need, they
187 * are not shared with anything outside of this file anymore
188 */
189#define PLATFORM_PSERIES 0x0100
190#define PLATFORM_PSERIES_LPAR 0x0101
191#define PLATFORM_LPAR 0x0001
192#define PLATFORM_POWERMAC 0x0400
193#define PLATFORM_GENERIC 0x0500
194
195static int __prombss of_platform;
196
197static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
198
199static unsigned long __prombss prom_memory_limit;
200
201static unsigned long __prombss alloc_top;
202static unsigned long __prombss alloc_top_high;
203static unsigned long __prombss alloc_bottom;
204static unsigned long __prombss rmo_top;
205static unsigned long __prombss ram_top;
206
207static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
208static int __prombss mem_reserve_cnt;
209
210static cell_t __prombss regbuf[1024];
211
212static bool __prombss rtas_has_query_cpu_stopped;
213
214
215/*
216 * Error results ... some OF calls will return "-1" on error, some
217 * will return 0, some will return either. To simplify, here are
218 * macros to use with any ihandle or phandle return value to check if
219 * it is valid
220 */
221
222#define PROM_ERROR (-1u)
223#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
224#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
225
226/* Copied from lib/string.c and lib/kstrtox.c */
227
228static int __init prom_strcmp(const char *cs, const char *ct)
229{
230 unsigned char c1, c2;
231
232 while (1) {
233 c1 = *cs++;
234 c2 = *ct++;
235 if (c1 != c2)
236 return c1 < c2 ? -1 : 1;
237 if (!c1)
238 break;
239 }
240 return 0;
241}
242
243static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
244{
245 ssize_t rc;
246 size_t i;
247
248 if (n == 0 || n > INT_MAX)
249 return -E2BIG;
250
251 // Copy up to n bytes
252 for (i = 0; i < n && src[i] != '\0'; i++)
253 dest[i] = src[i];
254
255 rc = i;
256
257 // If we copied all n then we have run out of space for the nul
258 if (rc == n) {
259 // Rewind by one character to ensure nul termination
260 i--;
261 rc = -E2BIG;
262 }
263
264 for (; i < n; i++)
265 dest[i] = '\0';
266
267 return rc;
268}
269
270static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
271{
272 unsigned char c1, c2;
273
274 while (count) {
275 c1 = *cs++;
276 c2 = *ct++;
277 if (c1 != c2)
278 return c1 < c2 ? -1 : 1;
279 if (!c1)
280 break;
281 count--;
282 }
283 return 0;
284}
285
286static size_t __init prom_strlen(const char *s)
287{
288 const char *sc;
289
290 for (sc = s; *sc != '\0'; ++sc)
291 /* nothing */;
292 return sc - s;
293}
294
295static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
296{
297 const unsigned char *su1, *su2;
298 int res = 0;
299
300 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
301 if ((res = *su1 - *su2) != 0)
302 break;
303 return res;
304}
305
306static char __init *prom_strstr(const char *s1, const char *s2)
307{
308 size_t l1, l2;
309
310 l2 = prom_strlen(s2);
311 if (!l2)
312 return (char *)s1;
313 l1 = prom_strlen(s1);
314 while (l1 >= l2) {
315 l1--;
316 if (!prom_memcmp(s1, s2, l2))
317 return (char *)s1;
318 s1++;
319 }
320 return NULL;
321}
322
323static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
324{
325 size_t dsize = prom_strlen(dest);
326 size_t len = prom_strlen(src);
327 size_t res = dsize + len;
328
329 /* This would be a bug */
330 if (dsize >= count)
331 return count;
332
333 dest += dsize;
334 count -= dsize;
335 if (len >= count)
336 len = count-1;
337 memcpy(dest, src, len);
338 dest[len] = 0;
339 return res;
340
341}
342
343#ifdef CONFIG_PPC_PSERIES
344static int __init prom_strtobool(const char *s, bool *res)
345{
346 if (!s)
347 return -EINVAL;
348
349 switch (s[0]) {
350 case 'y':
351 case 'Y':
352 case '1':
353 *res = true;
354 return 0;
355 case 'n':
356 case 'N':
357 case '0':
358 *res = false;
359 return 0;
360 case 'o':
361 case 'O':
362 switch (s[1]) {
363 case 'n':
364 case 'N':
365 *res = true;
366 return 0;
367 case 'f':
368 case 'F':
369 *res = false;
370 return 0;
371 default:
372 break;
373 }
374 break;
375 default:
376 break;
377 }
378
379 return -EINVAL;
380}
381#endif
382
383/* This is the one and *ONLY* place where we actually call open
384 * firmware.
385 */
386
387static int __init call_prom(const char *service, int nargs, int nret, ...)
388{
389 int i;
390 struct prom_args args;
391 va_list list;
392
393 args.service = cpu_to_be32(ADDR(service));
394 args.nargs = cpu_to_be32(nargs);
395 args.nret = cpu_to_be32(nret);
396
397 va_start(list, nret);
398 for (i = 0; i < nargs; i++)
399 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
400 va_end(list);
401
402 for (i = 0; i < nret; i++)
403 args.args[nargs+i] = 0;
404
405 if (enter_prom(&args, prom_entry) < 0)
406 return PROM_ERROR;
407
408 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
409}
410
411static int __init call_prom_ret(const char *service, int nargs, int nret,
412 prom_arg_t *rets, ...)
413{
414 int i;
415 struct prom_args args;
416 va_list list;
417
418 args.service = cpu_to_be32(ADDR(service));
419 args.nargs = cpu_to_be32(nargs);
420 args.nret = cpu_to_be32(nret);
421
422 va_start(list, rets);
423 for (i = 0; i < nargs; i++)
424 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
425 va_end(list);
426
427 for (i = 0; i < nret; i++)
428 args.args[nargs+i] = 0;
429
430 if (enter_prom(&args, prom_entry) < 0)
431 return PROM_ERROR;
432
433 if (rets != NULL)
434 for (i = 1; i < nret; ++i)
435 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
436
437 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
438}
439
440
441static void __init prom_print(const char *msg)
442{
443 const char *p, *q;
444
445 if (prom.stdout == 0)
446 return;
447
448 for (p = msg; *p != 0; p = q) {
449 for (q = p; *q != 0 && *q != '\n'; ++q)
450 ;
451 if (q > p)
452 call_prom("write", 3, 1, prom.stdout, p, q - p);
453 if (*q == 0)
454 break;
455 ++q;
456 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
457 }
458}
459
460
461/*
462 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
463 * we do not need __udivdi3 or __umoddi3 on 32bits.
464 */
465static void __init prom_print_hex(unsigned long val)
466{
467 int i, nibbles = sizeof(val)*2;
468 char buf[sizeof(val)*2+1];
469
470 for (i = nibbles-1; i >= 0; i--) {
471 buf[i] = (val & 0xf) + '0';
472 if (buf[i] > '9')
473 buf[i] += ('a'-'0'-10);
474 val >>= 4;
475 }
476 buf[nibbles] = '\0';
477 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
478}
479
480/* max number of decimal digits in an unsigned long */
481#define UL_DIGITS 21
482static void __init prom_print_dec(unsigned long val)
483{
484 int i, size;
485 char buf[UL_DIGITS+1];
486
487 for (i = UL_DIGITS-1; i >= 0; i--) {
488 buf[i] = (val % 10) + '0';
489 val = val/10;
490 if (val == 0)
491 break;
492 }
493 /* shift stuff down */
494 size = UL_DIGITS - i;
495 call_prom("write", 3, 1, prom.stdout, buf+i, size);
496}
497
498__printf(1, 2)
499static void __init prom_printf(const char *format, ...)
500{
501 const char *p, *q, *s;
502 va_list args;
503 unsigned long v;
504 long vs;
505 int n = 0;
506
507 va_start(args, format);
508 for (p = format; *p != 0; p = q) {
509 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
510 ;
511 if (q > p)
512 call_prom("write", 3, 1, prom.stdout, p, q - p);
513 if (*q == 0)
514 break;
515 if (*q == '\n') {
516 ++q;
517 call_prom("write", 3, 1, prom.stdout,
518 ADDR("\r\n"), 2);
519 continue;
520 }
521 ++q;
522 if (*q == 0)
523 break;
524 while (*q == 'l') {
525 ++q;
526 ++n;
527 }
528 switch (*q) {
529 case 's':
530 ++q;
531 s = va_arg(args, const char *);
532 prom_print(s);
533 break;
534 case 'x':
535 ++q;
536 switch (n) {
537 case 0:
538 v = va_arg(args, unsigned int);
539 break;
540 case 1:
541 v = va_arg(args, unsigned long);
542 break;
543 case 2:
544 default:
545 v = va_arg(args, unsigned long long);
546 break;
547 }
548 prom_print_hex(v);
549 break;
550 case 'u':
551 ++q;
552 switch (n) {
553 case 0:
554 v = va_arg(args, unsigned int);
555 break;
556 case 1:
557 v = va_arg(args, unsigned long);
558 break;
559 case 2:
560 default:
561 v = va_arg(args, unsigned long long);
562 break;
563 }
564 prom_print_dec(v);
565 break;
566 case 'd':
567 ++q;
568 switch (n) {
569 case 0:
570 vs = va_arg(args, int);
571 break;
572 case 1:
573 vs = va_arg(args, long);
574 break;
575 case 2:
576 default:
577 vs = va_arg(args, long long);
578 break;
579 }
580 if (vs < 0) {
581 prom_print("-");
582 vs = -vs;
583 }
584 prom_print_dec(vs);
585 break;
586 }
587 }
588 va_end(args);
589}
590
591
592static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
593 unsigned long align)
594{
595
596 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
597 /*
598 * Old OF requires we claim physical and virtual separately
599 * and then map explicitly (assuming virtual mode)
600 */
601 int ret;
602 prom_arg_t result;
603
604 ret = call_prom_ret("call-method", 5, 2, &result,
605 ADDR("claim"), prom.memory,
606 align, size, virt);
607 if (ret != 0 || result == -1)
608 return -1;
609 ret = call_prom_ret("call-method", 5, 2, &result,
610 ADDR("claim"), prom.mmumap,
611 align, size, virt);
612 if (ret != 0) {
613 call_prom("call-method", 4, 1, ADDR("release"),
614 prom.memory, size, virt);
615 return -1;
616 }
617 /* the 0x12 is M (coherence) + PP == read/write */
618 call_prom("call-method", 6, 1,
619 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
620 return virt;
621 }
622 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
623 (prom_arg_t)align);
624}
625
626static void __init __attribute__((noreturn)) prom_panic(const char *reason)
627{
628 prom_print(reason);
629 /* Do not call exit because it clears the screen on pmac
630 * it also causes some sort of double-fault on early pmacs */
631 if (of_platform == PLATFORM_POWERMAC)
632 asm("trap\n");
633
634 /* ToDo: should put up an SRC here on pSeries */
635 call_prom("exit", 0, 0);
636
637 for (;;) /* should never get here */
638 ;
639}
640
641
642static int __init prom_next_node(phandle *nodep)
643{
644 phandle node;
645
646 if ((node = *nodep) != 0
647 && (*nodep = call_prom("child", 1, 1, node)) != 0)
648 return 1;
649 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
650 return 1;
651 for (;;) {
652 if ((node = call_prom("parent", 1, 1, node)) == 0)
653 return 0;
654 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
655 return 1;
656 }
657}
658
659static inline int __init prom_getprop(phandle node, const char *pname,
660 void *value, size_t valuelen)
661{
662 return call_prom("getprop", 4, 1, node, ADDR(pname),
663 (u32)(unsigned long) value, (u32) valuelen);
664}
665
666static inline int __init prom_getproplen(phandle node, const char *pname)
667{
668 return call_prom("getproplen", 2, 1, node, ADDR(pname));
669}
670
671static void __init add_string(char **str, const char *q)
672{
673 char *p = *str;
674
675 while (*q)
676 *p++ = *q++;
677 *p++ = ' ';
678 *str = p;
679}
680
681static char *__init tohex(unsigned int x)
682{
683 static const char digits[] __initconst = "0123456789abcdef";
684 static char result[9] __prombss;
685 int i;
686
687 result[8] = 0;
688 i = 8;
689 do {
690 --i;
691 result[i] = digits[x & 0xf];
692 x >>= 4;
693 } while (x != 0 && i > 0);
694 return &result[i];
695}
696
697static int __init prom_setprop(phandle node, const char *nodename,
698 const char *pname, void *value, size_t valuelen)
699{
700 char cmd[256], *p;
701
702 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
703 return call_prom("setprop", 4, 1, node, ADDR(pname),
704 (u32)(unsigned long) value, (u32) valuelen);
705
706 /* gah... setprop doesn't work on longtrail, have to use interpret */
707 p = cmd;
708 add_string(&p, "dev");
709 add_string(&p, nodename);
710 add_string(&p, tohex((u32)(unsigned long) value));
711 add_string(&p, tohex(valuelen));
712 add_string(&p, tohex(ADDR(pname)));
713 add_string(&p, tohex(prom_strlen(pname)));
714 add_string(&p, "property");
715 *p = 0;
716 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
717}
718
719/* We can't use the standard versions because of relocation headaches. */
720#define prom_isxdigit(c) \
721 (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
722
723#define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
724#define prom_islower(c) ('a' <= (c) && (c) <= 'z')
725#define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
726
727static unsigned long __init prom_strtoul(const char *cp, const char **endp)
728{
729 unsigned long result = 0, base = 10, value;
730
731 if (*cp == '0') {
732 base = 8;
733 cp++;
734 if (prom_toupper(*cp) == 'X') {
735 cp++;
736 base = 16;
737 }
738 }
739
740 while (prom_isxdigit(*cp) &&
741 (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
742 result = result * base + value;
743 cp++;
744 }
745
746 if (endp)
747 *endp = cp;
748
749 return result;
750}
751
752static unsigned long __init prom_memparse(const char *ptr, const char **retptr)
753{
754 unsigned long ret = prom_strtoul(ptr, retptr);
755 int shift = 0;
756
757 /*
758 * We can't use a switch here because GCC *may* generate a
759 * jump table which won't work, because we're not running at
760 * the address we're linked at.
761 */
762 if ('G' == **retptr || 'g' == **retptr)
763 shift = 30;
764
765 if ('M' == **retptr || 'm' == **retptr)
766 shift = 20;
767
768 if ('K' == **retptr || 'k' == **retptr)
769 shift = 10;
770
771 if (shift) {
772 ret <<= shift;
773 (*retptr)++;
774 }
775
776 return ret;
777}
778
779/*
780 * Early parsing of the command line passed to the kernel, used for
781 * "mem=x" and the options that affect the iommu
782 */
783static void __init early_cmdline_parse(void)
784{
785 const char *opt;
786
787 char *p;
788 int l = 0;
789
790 prom_cmd_line[0] = 0;
791 p = prom_cmd_line;
792
793 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
794 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
795
796 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
797 prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
798 sizeof(prom_cmd_line));
799
800 prom_printf("command line: %s\n", prom_cmd_line);
801
802#ifdef CONFIG_PPC64
803 opt = prom_strstr(prom_cmd_line, "iommu=");
804 if (opt) {
805 prom_printf("iommu opt is: %s\n", opt);
806 opt += 6;
807 while (*opt && *opt == ' ')
808 opt++;
809 if (!prom_strncmp(opt, "off", 3))
810 prom_iommu_off = 1;
811 else if (!prom_strncmp(opt, "force", 5))
812 prom_iommu_force_on = 1;
813 }
814#endif
815 opt = prom_strstr(prom_cmd_line, "mem=");
816 if (opt) {
817 opt += 4;
818 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
819#ifdef CONFIG_PPC64
820 /* Align down to 16 MB which is large page size with hash page translation */
821 prom_memory_limit = ALIGN_DOWN(prom_memory_limit, SZ_16M);
822#endif
823 }
824
825#ifdef CONFIG_PPC_PSERIES
826 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
827 opt = prom_strstr(prom_cmd_line, "disable_radix");
828 if (opt) {
829 opt += 13;
830 if (*opt && *opt == '=') {
831 bool val;
832
833 if (prom_strtobool(++opt, &val))
834 prom_radix_disable = false;
835 else
836 prom_radix_disable = val;
837 } else
838 prom_radix_disable = true;
839 }
840 if (prom_radix_disable)
841 prom_debug("Radix disabled from cmdline\n");
842
843 opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
844 if (opt) {
845 prom_radix_gtse_disable = true;
846 prom_debug("Radix GTSE disabled from cmdline\n");
847 }
848
849 opt = prom_strstr(prom_cmd_line, "xive=off");
850 if (opt) {
851 prom_xive_disable = true;
852 prom_debug("XIVE disabled from cmdline\n");
853 }
854#endif /* CONFIG_PPC_PSERIES */
855
856#ifdef CONFIG_PPC_SVM
857 opt = prom_strstr(prom_cmd_line, "svm=");
858 if (opt) {
859 bool val;
860
861 opt += sizeof("svm=") - 1;
862 if (!prom_strtobool(opt, &val))
863 prom_svm_enable = val;
864 }
865#endif /* CONFIG_PPC_SVM */
866}
867
868#ifdef CONFIG_PPC_PSERIES
869/*
870 * The architecture vector has an array of PVR mask/value pairs,
871 * followed by # option vectors - 1, followed by the option vectors.
872 *
873 * See prom.h for the definition of the bits specified in the
874 * architecture vector.
875 */
876
877/* Firmware expects the value to be n - 1, where n is the # of vectors */
878#define NUM_VECTORS(n) ((n) - 1)
879
880/*
881 * Firmware expects 1 + n - 2, where n is the length of the option vector in
882 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
883 */
884#define VECTOR_LENGTH(n) (1 + (n) - 2)
885
886struct option_vector1 {
887 u8 byte1;
888 u8 arch_versions;
889 u8 arch_versions3;
890} __packed;
891
892struct option_vector2 {
893 u8 byte1;
894 __be16 reserved;
895 __be32 real_base;
896 __be32 real_size;
897 __be32 virt_base;
898 __be32 virt_size;
899 __be32 load_base;
900 __be32 min_rma;
901 __be32 min_load;
902 u8 min_rma_percent;
903 u8 max_pft_size;
904} __packed;
905
906struct option_vector3 {
907 u8 byte1;
908 u8 byte2;
909} __packed;
910
911struct option_vector4 {
912 u8 byte1;
913 u8 min_vp_cap;
914} __packed;
915
916struct option_vector5 {
917 u8 byte1;
918 u8 byte2;
919 u8 byte3;
920 u8 cmo;
921 u8 associativity;
922 u8 bin_opts;
923 u8 micro_checkpoint;
924 u8 reserved0;
925 __be32 max_cpus;
926 __be16 papr_level;
927 __be16 reserved1;
928 u8 platform_facilities;
929 u8 reserved2;
930 __be16 reserved3;
931 u8 subprocessors;
932 u8 byte22;
933 u8 intarch;
934 u8 mmu;
935 u8 hash_ext;
936 u8 radix_ext;
937} __packed;
938
939struct option_vector6 {
940 u8 reserved;
941 u8 secondary_pteg;
942 u8 os_name;
943} __packed;
944
945struct option_vector7 {
946 u8 os_id[256];
947} __packed;
948
949struct ibm_arch_vec {
950 struct { __be32 mask, val; } pvrs[16];
951
952 u8 num_vectors;
953
954 u8 vec1_len;
955 struct option_vector1 vec1;
956
957 u8 vec2_len;
958 struct option_vector2 vec2;
959
960 u8 vec3_len;
961 struct option_vector3 vec3;
962
963 u8 vec4_len;
964 struct option_vector4 vec4;
965
966 u8 vec5_len;
967 struct option_vector5 vec5;
968
969 u8 vec6_len;
970 struct option_vector6 vec6;
971
972 u8 vec7_len;
973 struct option_vector7 vec7;
974} __packed;
975
976static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
977 .pvrs = {
978 {
979 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
980 .val = cpu_to_be32(0x003a0000),
981 },
982 {
983 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
984 .val = cpu_to_be32(0x003e0000),
985 },
986 {
987 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
988 .val = cpu_to_be32(0x003f0000),
989 },
990 {
991 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
992 .val = cpu_to_be32(0x004b0000),
993 },
994 {
995 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
996 .val = cpu_to_be32(0x004c0000),
997 },
998 {
999 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
1000 .val = cpu_to_be32(0x004d0000),
1001 },
1002 {
1003 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
1004 .val = cpu_to_be32(0x004e0000),
1005 },
1006 {
1007 .mask = cpu_to_be32(0xffff0000), /* POWER10 */
1008 .val = cpu_to_be32(0x00800000),
1009 },
1010 {
1011 .mask = cpu_to_be32(0xffff0000), /* POWER11 */
1012 .val = cpu_to_be32(0x00820000),
1013 },
1014 {
1015 .mask = cpu_to_be32(0xffffffff), /* P11 compliant */
1016 .val = cpu_to_be32(0x0f000007),
1017 },
1018 {
1019 .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
1020 .val = cpu_to_be32(0x0f000006),
1021 },
1022 {
1023 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
1024 .val = cpu_to_be32(0x0f000005),
1025 },
1026 {
1027 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
1028 .val = cpu_to_be32(0x0f000004),
1029 },
1030 {
1031 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1032 .val = cpu_to_be32(0x0f000003),
1033 },
1034 {
1035 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1036 .val = cpu_to_be32(0x0f000002),
1037 },
1038 {
1039 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1040 .val = cpu_to_be32(0x0f000001),
1041 },
1042 },
1043
1044 .num_vectors = NUM_VECTORS(6),
1045
1046 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
1047 .vec1 = {
1048 .byte1 = 0,
1049 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
1050 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
1051 .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
1052 },
1053
1054 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
1055 /* option vector 2: Open Firmware options supported */
1056 .vec2 = {
1057 .byte1 = OV2_REAL_MODE,
1058 .reserved = 0,
1059 .real_base = cpu_to_be32(0xffffffff),
1060 .real_size = cpu_to_be32(0xffffffff),
1061 .virt_base = cpu_to_be32(0xffffffff),
1062 .virt_size = cpu_to_be32(0xffffffff),
1063 .load_base = cpu_to_be32(0xffffffff),
1064 .min_rma = cpu_to_be32(512), /* 512MB min RMA */
1065 .min_load = cpu_to_be32(0xffffffff), /* full client load */
1066 .min_rma_percent = 0, /* min RMA percentage of total RAM */
1067 .max_pft_size = 48, /* max log_2(hash table size) */
1068 },
1069
1070 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
1071 /* option vector 3: processor options supported */
1072 .vec3 = {
1073 .byte1 = 0, /* don't ignore, don't halt */
1074 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
1075 },
1076
1077 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
1078 /* option vector 4: IBM PAPR implementation */
1079 .vec4 = {
1080 .byte1 = 0, /* don't halt */
1081 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
1082 },
1083
1084 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
1085 /* option vector 5: PAPR/OF options */
1086 .vec5 = {
1087 .byte1 = 0, /* don't ignore, don't halt */
1088 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
1089 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
1090#ifdef CONFIG_PCI_MSI
1091 /* PCIe/MSI support. Without MSI full PCIe is not supported */
1092 OV5_FEAT(OV5_MSI),
1093#else
1094 0,
1095#endif
1096 .byte3 = 0,
1097 .cmo =
1098#ifdef CONFIG_PPC_SMLPAR
1099 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
1100#else
1101 0,
1102#endif
1103 .associativity = OV5_FEAT(OV5_FORM1_AFFINITY) | OV5_FEAT(OV5_PRRN) |
1104 OV5_FEAT(OV5_FORM2_AFFINITY),
1105 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
1106 .micro_checkpoint = 0,
1107 .reserved0 = 0,
1108 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
1109 .papr_level = 0,
1110 .reserved1 = 0,
1111 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
1112 .reserved2 = 0,
1113 .reserved3 = 0,
1114 .subprocessors = 1,
1115 .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
1116 .intarch = 0,
1117 .mmu = 0,
1118 .hash_ext = 0,
1119 .radix_ext = 0,
1120 },
1121
1122 /* option vector 6: IBM PAPR hints */
1123 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
1124 .vec6 = {
1125 .reserved = 0,
1126 .secondary_pteg = 0,
1127 .os_name = OV6_LINUX,
1128 },
1129
1130 /* option vector 7: OS Identification */
1131 .vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
1132};
1133
1134static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
1135
1136/* Old method - ELF header with PT_NOTE sections only works on BE */
1137#ifdef __BIG_ENDIAN__
1138static const struct fake_elf {
1139 Elf32_Ehdr elfhdr;
1140 Elf32_Phdr phdr[2];
1141 struct chrpnote {
1142 u32 namesz;
1143 u32 descsz;
1144 u32 type;
1145 char name[8]; /* "PowerPC" */
1146 struct chrpdesc {
1147 u32 real_mode;
1148 u32 real_base;
1149 u32 real_size;
1150 u32 virt_base;
1151 u32 virt_size;
1152 u32 load_base;
1153 } chrpdesc;
1154 } chrpnote;
1155 struct rpanote {
1156 u32 namesz;
1157 u32 descsz;
1158 u32 type;
1159 char name[24]; /* "IBM,RPA-Client-Config" */
1160 struct rpadesc {
1161 u32 lpar_affinity;
1162 u32 min_rmo_size;
1163 u32 min_rmo_percent;
1164 u32 max_pft_size;
1165 u32 splpar;
1166 u32 min_load;
1167 u32 new_mem_def;
1168 u32 ignore_me;
1169 } rpadesc;
1170 } rpanote;
1171} fake_elf __initconst = {
1172 .elfhdr = {
1173 .e_ident = { 0x7f, 'E', 'L', 'F',
1174 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
1175 .e_type = ET_EXEC, /* yeah right */
1176 .e_machine = EM_PPC,
1177 .e_version = EV_CURRENT,
1178 .e_phoff = offsetof(struct fake_elf, phdr),
1179 .e_phentsize = sizeof(Elf32_Phdr),
1180 .e_phnum = 2
1181 },
1182 .phdr = {
1183 [0] = {
1184 .p_type = PT_NOTE,
1185 .p_offset = offsetof(struct fake_elf, chrpnote),
1186 .p_filesz = sizeof(struct chrpnote)
1187 }, [1] = {
1188 .p_type = PT_NOTE,
1189 .p_offset = offsetof(struct fake_elf, rpanote),
1190 .p_filesz = sizeof(struct rpanote)
1191 }
1192 },
1193 .chrpnote = {
1194 .namesz = sizeof("PowerPC"),
1195 .descsz = sizeof(struct chrpdesc),
1196 .type = 0x1275,
1197 .name = "PowerPC",
1198 .chrpdesc = {
1199 .real_mode = ~0U, /* ~0 means "don't care" */
1200 .real_base = ~0U,
1201 .real_size = ~0U,
1202 .virt_base = ~0U,
1203 .virt_size = ~0U,
1204 .load_base = ~0U
1205 },
1206 },
1207 .rpanote = {
1208 .namesz = sizeof("IBM,RPA-Client-Config"),
1209 .descsz = sizeof(struct rpadesc),
1210 .type = 0x12759999,
1211 .name = "IBM,RPA-Client-Config",
1212 .rpadesc = {
1213 .lpar_affinity = 0,
1214 .min_rmo_size = 64, /* in megabytes */
1215 .min_rmo_percent = 0,
1216 .max_pft_size = 48, /* 2^48 bytes max PFT size */
1217 .splpar = 1,
1218 .min_load = ~0U,
1219 .new_mem_def = 0
1220 }
1221 }
1222};
1223#endif /* __BIG_ENDIAN__ */
1224
1225static int __init prom_count_smt_threads(void)
1226{
1227 phandle node;
1228 char type[64];
1229 unsigned int plen;
1230
1231 /* Pick up th first CPU node we can find */
1232 for (node = 0; prom_next_node(&node); ) {
1233 type[0] = 0;
1234 prom_getprop(node, "device_type", type, sizeof(type));
1235
1236 if (prom_strcmp(type, "cpu"))
1237 continue;
1238 /*
1239 * There is an entry for each smt thread, each entry being
1240 * 4 bytes long. All cpus should have the same number of
1241 * smt threads, so return after finding the first.
1242 */
1243 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1244 if (plen == PROM_ERROR)
1245 break;
1246 plen >>= 2;
1247 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1248
1249 /* Sanity check */
1250 if (plen < 1 || plen > 64) {
1251 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1252 (unsigned long)plen);
1253 return 1;
1254 }
1255 return plen;
1256 }
1257 prom_debug("No threads found, assuming 1 per core\n");
1258
1259 return 1;
1260
1261}
1262
1263static void __init prom_parse_mmu_model(u8 val,
1264 struct platform_support *support)
1265{
1266 switch (val) {
1267 case OV5_FEAT(OV5_MMU_DYNAMIC):
1268 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1269 prom_debug("MMU - either supported\n");
1270 support->radix_mmu = !prom_radix_disable;
1271 support->hash_mmu = true;
1272 break;
1273 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1274 prom_debug("MMU - radix only\n");
1275 if (prom_radix_disable) {
1276 /*
1277 * If we __have__ to do radix, we're better off ignoring
1278 * the command line rather than not booting.
1279 */
1280 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1281 }
1282 support->radix_mmu = true;
1283 break;
1284 case OV5_FEAT(OV5_MMU_HASH):
1285 prom_debug("MMU - hash only\n");
1286 support->hash_mmu = true;
1287 break;
1288 default:
1289 prom_debug("Unknown mmu support option: 0x%x\n", val);
1290 break;
1291 }
1292}
1293
1294static void __init prom_parse_xive_model(u8 val,
1295 struct platform_support *support)
1296{
1297 switch (val) {
1298 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1299 prom_debug("XIVE - either mode supported\n");
1300 support->xive = !prom_xive_disable;
1301 break;
1302 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1303 prom_debug("XIVE - exploitation mode supported\n");
1304 if (prom_xive_disable) {
1305 /*
1306 * If we __have__ to do XIVE, we're better off ignoring
1307 * the command line rather than not booting.
1308 */
1309 prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1310 }
1311 support->xive = true;
1312 break;
1313 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1314 prom_debug("XIVE - legacy mode supported\n");
1315 break;
1316 default:
1317 prom_debug("Unknown xive support option: 0x%x\n", val);
1318 break;
1319 }
1320}
1321
1322static void __init prom_parse_platform_support(u8 index, u8 val,
1323 struct platform_support *support)
1324{
1325 switch (index) {
1326 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1327 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1328 break;
1329 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1330 if (val & OV5_FEAT(OV5_RADIX_GTSE))
1331 support->radix_gtse = !prom_radix_gtse_disable;
1332 break;
1333 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1334 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1335 support);
1336 break;
1337 }
1338}
1339
1340static void __init prom_check_platform_support(void)
1341{
1342 struct platform_support supported = {
1343 .hash_mmu = false,
1344 .radix_mmu = false,
1345 .radix_gtse = false,
1346 .xive = false
1347 };
1348 int prop_len = prom_getproplen(prom.chosen,
1349 "ibm,arch-vec-5-platform-support");
1350
1351 /*
1352 * First copy the architecture vec template
1353 *
1354 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1355 * by __memcpy() when KASAN is active
1356 */
1357 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
1358 sizeof(ibm_architecture_vec));
1359
1360 prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
1361
1362 if (prop_len > 1) {
1363 int i;
1364 u8 vec[8];
1365 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1366 prop_len);
1367 if (prop_len > sizeof(vec))
1368 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1369 prop_len);
1370 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
1371 for (i = 0; i < prop_len; i += 2) {
1372 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
1373 prom_parse_platform_support(vec[i], vec[i + 1], &supported);
1374 }
1375 }
1376
1377 if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1378 /* Radix preferred - Check if GTSE is also supported */
1379 prom_debug("Asking for radix\n");
1380 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1381 if (supported.radix_gtse)
1382 ibm_architecture_vec.vec5.radix_ext =
1383 OV5_FEAT(OV5_RADIX_GTSE);
1384 else
1385 prom_debug("Radix GTSE isn't supported\n");
1386 } else if (supported.hash_mmu) {
1387 /* Default to hash mmu (if we can) */
1388 prom_debug("Asking for hash\n");
1389 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1390 } else {
1391 /* We're probably on a legacy hypervisor */
1392 prom_debug("Assuming legacy hash support\n");
1393 }
1394
1395 if (supported.xive) {
1396 prom_debug("Asking for XIVE\n");
1397 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1398 }
1399}
1400
1401static void __init prom_send_capabilities(void)
1402{
1403 ihandle root;
1404 prom_arg_t ret;
1405 u32 cores;
1406
1407 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1408 prom_check_platform_support();
1409
1410 root = call_prom("open", 1, 1, ADDR("/"));
1411 if (root != 0) {
1412 /* We need to tell the FW about the number of cores we support.
1413 *
1414 * To do that, we count the number of threads on the first core
1415 * (we assume this is the same for all cores) and use it to
1416 * divide NR_CPUS.
1417 */
1418
1419 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1420 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1421 cores, NR_CPUS);
1422
1423 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1424
1425 /* try calling the ibm,client-architecture-support method */
1426 prom_printf("Calling ibm,client-architecture-support...");
1427 if (call_prom_ret("call-method", 3, 2, &ret,
1428 ADDR("ibm,client-architecture-support"),
1429 root,
1430 ADDR(&ibm_architecture_vec)) == 0) {
1431 /* the call exists... */
1432 if (ret)
1433 prom_printf("\nWARNING: ibm,client-architecture"
1434 "-support call FAILED!\n");
1435 call_prom("close", 1, 0, root);
1436 prom_printf(" done\n");
1437 return;
1438 }
1439 call_prom("close", 1, 0, root);
1440 prom_printf(" not implemented\n");
1441 }
1442
1443#ifdef __BIG_ENDIAN__
1444 {
1445 ihandle elfloader;
1446
1447 /* no ibm,client-architecture-support call, try the old way */
1448 elfloader = call_prom("open", 1, 1,
1449 ADDR("/packages/elf-loader"));
1450 if (elfloader == 0) {
1451 prom_printf("couldn't open /packages/elf-loader\n");
1452 return;
1453 }
1454 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1455 elfloader, ADDR(&fake_elf));
1456 call_prom("close", 1, 0, elfloader);
1457 }
1458#endif /* __BIG_ENDIAN__ */
1459}
1460#endif /* CONFIG_PPC_PSERIES */
1461
1462/*
1463 * Memory allocation strategy... our layout is normally:
1464 *
1465 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1466 * rare cases, initrd might end up being before the kernel though.
1467 * We assume this won't override the final kernel at 0, we have no
1468 * provision to handle that in this version, but it should hopefully
1469 * never happen.
1470 *
1471 * alloc_top is set to the top of RMO, eventually shrink down if the
1472 * TCEs overlap
1473 *
1474 * alloc_bottom is set to the top of kernel/initrd
1475 *
1476 * from there, allocations are done this way : rtas is allocated
1477 * topmost, and the device-tree is allocated from the bottom. We try
1478 * to grow the device-tree allocation as we progress. If we can't,
1479 * then we fail, we don't currently have a facility to restart
1480 * elsewhere, but that shouldn't be necessary.
1481 *
1482 * Note that calls to reserve_mem have to be done explicitly, memory
1483 * allocated with either alloc_up or alloc_down isn't automatically
1484 * reserved.
1485 */
1486
1487
1488/*
1489 * Allocates memory in the RMO upward from the kernel/initrd
1490 *
1491 * When align is 0, this is a special case, it means to allocate in place
1492 * at the current location of alloc_bottom or fail (that is basically
1493 * extending the previous allocation). Used for the device-tree flattening
1494 */
1495static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1496{
1497 unsigned long base = alloc_bottom;
1498 unsigned long addr = 0;
1499
1500 if (align)
1501 base = ALIGN(base, align);
1502 prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1503 if (ram_top == 0)
1504 prom_panic("alloc_up() called with mem not initialized\n");
1505
1506 if (align)
1507 base = ALIGN(alloc_bottom, align);
1508 else
1509 base = alloc_bottom;
1510
1511 for(; (base + size) <= alloc_top;
1512 base = ALIGN(base + 0x100000, align)) {
1513 prom_debug(" trying: 0x%lx\n\r", base);
1514 addr = (unsigned long)prom_claim(base, size, 0);
1515 if (addr != PROM_ERROR && addr != 0)
1516 break;
1517 addr = 0;
1518 if (align == 0)
1519 break;
1520 }
1521 if (addr == 0)
1522 return 0;
1523 alloc_bottom = addr + size;
1524
1525 prom_debug(" -> %lx\n", addr);
1526 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1527 prom_debug(" alloc_top : %lx\n", alloc_top);
1528 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1529 prom_debug(" rmo_top : %lx\n", rmo_top);
1530 prom_debug(" ram_top : %lx\n", ram_top);
1531
1532 return addr;
1533}
1534
1535/*
1536 * Allocates memory downward, either from top of RMO, or if highmem
1537 * is set, from the top of RAM. Note that this one doesn't handle
1538 * failures. It does claim memory if highmem is not set.
1539 */
1540static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1541 int highmem)
1542{
1543 unsigned long base, addr = 0;
1544
1545 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1546 highmem ? "(high)" : "(low)");
1547 if (ram_top == 0)
1548 prom_panic("alloc_down() called with mem not initialized\n");
1549
1550 if (highmem) {
1551 /* Carve out storage for the TCE table. */
1552 addr = ALIGN_DOWN(alloc_top_high - size, align);
1553 if (addr <= alloc_bottom)
1554 return 0;
1555 /* Will we bump into the RMO ? If yes, check out that we
1556 * didn't overlap existing allocations there, if we did,
1557 * we are dead, we must be the first in town !
1558 */
1559 if (addr < rmo_top) {
1560 /* Good, we are first */
1561 if (alloc_top == rmo_top)
1562 alloc_top = rmo_top = addr;
1563 else
1564 return 0;
1565 }
1566 alloc_top_high = addr;
1567 goto bail;
1568 }
1569
1570 base = ALIGN_DOWN(alloc_top - size, align);
1571 for (; base > alloc_bottom;
1572 base = ALIGN_DOWN(base - 0x100000, align)) {
1573 prom_debug(" trying: 0x%lx\n\r", base);
1574 addr = (unsigned long)prom_claim(base, size, 0);
1575 if (addr != PROM_ERROR && addr != 0)
1576 break;
1577 addr = 0;
1578 }
1579 if (addr == 0)
1580 return 0;
1581 alloc_top = addr;
1582
1583 bail:
1584 prom_debug(" -> %lx\n", addr);
1585 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1586 prom_debug(" alloc_top : %lx\n", alloc_top);
1587 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1588 prom_debug(" rmo_top : %lx\n", rmo_top);
1589 prom_debug(" ram_top : %lx\n", ram_top);
1590
1591 return addr;
1592}
1593
1594/*
1595 * Parse a "reg" cell
1596 */
1597static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1598{
1599 cell_t *p = *cellp;
1600 unsigned long r = 0;
1601
1602 /* Ignore more than 2 cells */
1603 while (s > sizeof(unsigned long) / 4) {
1604 p++;
1605 s--;
1606 }
1607 r = be32_to_cpu(*p++);
1608#ifdef CONFIG_PPC64
1609 if (s > 1) {
1610 r <<= 32;
1611 r |= be32_to_cpu(*(p++));
1612 }
1613#endif
1614 *cellp = p;
1615 return r;
1616}
1617
1618/*
1619 * Very dumb function for adding to the memory reserve list, but
1620 * we don't need anything smarter at this point
1621 *
1622 * XXX Eventually check for collisions. They should NEVER happen.
1623 * If problems seem to show up, it would be a good start to track
1624 * them down.
1625 */
1626static void __init reserve_mem(u64 base, u64 size)
1627{
1628 u64 top = base + size;
1629 unsigned long cnt = mem_reserve_cnt;
1630
1631 if (size == 0)
1632 return;
1633
1634 /* We need to always keep one empty entry so that we
1635 * have our terminator with "size" set to 0 since we are
1636 * dumb and just copy this entire array to the boot params
1637 */
1638 base = ALIGN_DOWN(base, PAGE_SIZE);
1639 top = ALIGN(top, PAGE_SIZE);
1640 size = top - base;
1641
1642 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1643 prom_panic("Memory reserve map exhausted !\n");
1644 mem_reserve_map[cnt].base = cpu_to_be64(base);
1645 mem_reserve_map[cnt].size = cpu_to_be64(size);
1646 mem_reserve_cnt = cnt + 1;
1647}
1648
1649/*
1650 * Initialize memory allocation mechanism, parse "memory" nodes and
1651 * obtain that way the top of memory and RMO to setup out local allocator
1652 */
1653static void __init prom_init_mem(void)
1654{
1655 phandle node;
1656 char type[64];
1657 unsigned int plen;
1658 cell_t *p, *endp;
1659 __be32 val;
1660 u32 rac, rsc;
1661
1662 /*
1663 * We iterate the memory nodes to find
1664 * 1) top of RMO (first node)
1665 * 2) top of memory
1666 */
1667 val = cpu_to_be32(2);
1668 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1669 rac = be32_to_cpu(val);
1670 val = cpu_to_be32(1);
1671 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1672 rsc = be32_to_cpu(val);
1673 prom_debug("root_addr_cells: %x\n", rac);
1674 prom_debug("root_size_cells: %x\n", rsc);
1675
1676 prom_debug("scanning memory:\n");
1677
1678 for (node = 0; prom_next_node(&node); ) {
1679 type[0] = 0;
1680 prom_getprop(node, "device_type", type, sizeof(type));
1681
1682 if (type[0] == 0) {
1683 /*
1684 * CHRP Longtrail machines have no device_type
1685 * on the memory node, so check the name instead...
1686 */
1687 prom_getprop(node, "name", type, sizeof(type));
1688 }
1689 if (prom_strcmp(type, "memory"))
1690 continue;
1691
1692 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1693 if (plen > sizeof(regbuf)) {
1694 prom_printf("memory node too large for buffer !\n");
1695 plen = sizeof(regbuf);
1696 }
1697 p = regbuf;
1698 endp = p + (plen / sizeof(cell_t));
1699
1700#ifdef DEBUG_PROM
1701 memset(prom_scratch, 0, sizeof(prom_scratch));
1702 call_prom("package-to-path", 3, 1, node, prom_scratch,
1703 sizeof(prom_scratch) - 1);
1704 prom_debug(" node %s :\n", prom_scratch);
1705#endif /* DEBUG_PROM */
1706
1707 while ((endp - p) >= (rac + rsc)) {
1708 unsigned long base, size;
1709
1710 base = prom_next_cell(rac, &p);
1711 size = prom_next_cell(rsc, &p);
1712
1713 if (size == 0)
1714 continue;
1715 prom_debug(" %lx %lx\n", base, size);
1716 if (base == 0 && (of_platform & PLATFORM_LPAR))
1717 rmo_top = size;
1718 if ((base + size) > ram_top)
1719 ram_top = base + size;
1720 }
1721 }
1722
1723 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1724
1725 /*
1726 * If prom_memory_limit is set we reduce the upper limits *except* for
1727 * alloc_top_high. This must be the real top of RAM so we can put
1728 * TCE's up there.
1729 */
1730
1731 alloc_top_high = ram_top;
1732
1733 if (prom_memory_limit) {
1734 if (prom_memory_limit <= alloc_bottom) {
1735 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1736 prom_memory_limit);
1737 prom_memory_limit = 0;
1738 } else if (prom_memory_limit >= ram_top) {
1739 prom_printf("Ignoring mem=%lx >= ram_top.\n",
1740 prom_memory_limit);
1741 prom_memory_limit = 0;
1742 } else {
1743 ram_top = prom_memory_limit;
1744 rmo_top = min(rmo_top, prom_memory_limit);
1745 }
1746 }
1747
1748 /*
1749 * Setup our top alloc point, that is top of RMO or top of
1750 * segment 0 when running non-LPAR.
1751 * Some RS64 machines have buggy firmware where claims up at
1752 * 1GB fail. Cap at 768MB as a workaround.
1753 * Since 768MB is plenty of room, and we need to cap to something
1754 * reasonable on 32-bit, cap at 768MB on all machines.
1755 */
1756 if (!rmo_top)
1757 rmo_top = ram_top;
1758 rmo_top = min(0x30000000ul, rmo_top);
1759 alloc_top = rmo_top;
1760 alloc_top_high = ram_top;
1761
1762 /*
1763 * Check if we have an initrd after the kernel but still inside
1764 * the RMO. If we do move our bottom point to after it.
1765 */
1766 if (prom_initrd_start &&
1767 prom_initrd_start < rmo_top &&
1768 prom_initrd_end > alloc_bottom)
1769 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1770
1771 prom_printf("memory layout at init:\n");
1772 prom_printf(" memory_limit : %lx (16 MB aligned)\n",
1773 prom_memory_limit);
1774 prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
1775 prom_printf(" alloc_top : %lx\n", alloc_top);
1776 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
1777 prom_printf(" rmo_top : %lx\n", rmo_top);
1778 prom_printf(" ram_top : %lx\n", ram_top);
1779}
1780
1781static void __init prom_close_stdin(void)
1782{
1783 __be32 val;
1784 ihandle stdin;
1785
1786 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1787 stdin = be32_to_cpu(val);
1788 call_prom("close", 1, 0, stdin);
1789 }
1790}
1791
1792#ifdef CONFIG_PPC_SVM
1793static int __init prom_rtas_hcall(uint64_t args)
1794{
1795 register uint64_t arg1 asm("r3") = H_RTAS;
1796 register uint64_t arg2 asm("r4") = args;
1797
1798 asm volatile("sc 1\n" : "=r" (arg1) :
1799 "r" (arg1),
1800 "r" (arg2) :);
1801 srr_regs_clobbered();
1802
1803 return arg1;
1804}
1805
1806static struct rtas_args __prombss os_term_args;
1807
1808static void __init prom_rtas_os_term(char *str)
1809{
1810 phandle rtas_node;
1811 __be32 val;
1812 u32 token;
1813
1814 prom_debug("%s: start...\n", __func__);
1815 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1816 prom_debug("rtas_node: %x\n", rtas_node);
1817 if (!PHANDLE_VALID(rtas_node))
1818 return;
1819
1820 val = 0;
1821 prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
1822 token = be32_to_cpu(val);
1823 prom_debug("ibm,os-term: %x\n", token);
1824 if (token == 0)
1825 prom_panic("Could not get token for ibm,os-term\n");
1826 os_term_args.token = cpu_to_be32(token);
1827 os_term_args.nargs = cpu_to_be32(1);
1828 os_term_args.nret = cpu_to_be32(1);
1829 os_term_args.args[0] = cpu_to_be32(__pa(str));
1830 prom_rtas_hcall((uint64_t)&os_term_args);
1831}
1832#endif /* CONFIG_PPC_SVM */
1833
1834/*
1835 * Allocate room for and instantiate RTAS
1836 */
1837static void __init prom_instantiate_rtas(void)
1838{
1839 phandle rtas_node;
1840 ihandle rtas_inst;
1841 u32 base, entry = 0;
1842 __be32 val;
1843 u32 size = 0;
1844
1845 prom_debug("prom_instantiate_rtas: start...\n");
1846
1847 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1848 prom_debug("rtas_node: %x\n", rtas_node);
1849 if (!PHANDLE_VALID(rtas_node))
1850 return;
1851
1852 val = 0;
1853 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1854 size = be32_to_cpu(val);
1855 if (size == 0)
1856 return;
1857
1858 base = alloc_down(size, PAGE_SIZE, 0);
1859 if (base == 0)
1860 prom_panic("Could not allocate memory for RTAS\n");
1861
1862 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1863 if (!IHANDLE_VALID(rtas_inst)) {
1864 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1865 return;
1866 }
1867
1868 prom_printf("instantiating rtas at 0x%x...", base);
1869
1870 if (call_prom_ret("call-method", 3, 2, &entry,
1871 ADDR("instantiate-rtas"),
1872 rtas_inst, base) != 0
1873 || entry == 0) {
1874 prom_printf(" failed\n");
1875 return;
1876 }
1877 prom_printf(" done\n");
1878
1879 reserve_mem(base, size);
1880
1881 val = cpu_to_be32(base);
1882 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1883 &val, sizeof(val));
1884 val = cpu_to_be32(entry);
1885 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1886 &val, sizeof(val));
1887
1888 /* Check if it supports "query-cpu-stopped-state" */
1889 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1890 &val, sizeof(val)) != PROM_ERROR)
1891 rtas_has_query_cpu_stopped = true;
1892
1893 prom_debug("rtas base = 0x%x\n", base);
1894 prom_debug("rtas entry = 0x%x\n", entry);
1895 prom_debug("rtas size = 0x%x\n", size);
1896
1897 prom_debug("prom_instantiate_rtas: end...\n");
1898}
1899
1900#ifdef CONFIG_PPC64
1901/*
1902 * Allocate room for and instantiate Stored Measurement Log (SML)
1903 */
1904static void __init prom_instantiate_sml(void)
1905{
1906 phandle ibmvtpm_node;
1907 ihandle ibmvtpm_inst;
1908 u32 entry = 0, size = 0, succ = 0;
1909 u64 base;
1910 __be32 val;
1911
1912 prom_debug("prom_instantiate_sml: start...\n");
1913
1914 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1915 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1916 if (!PHANDLE_VALID(ibmvtpm_node))
1917 return;
1918
1919 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1920 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1921 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1922 return;
1923 }
1924
1925 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1926 &val, sizeof(val)) != PROM_ERROR) {
1927 if (call_prom_ret("call-method", 2, 2, &succ,
1928 ADDR("reformat-sml-to-efi-alignment"),
1929 ibmvtpm_inst) != 0 || succ == 0) {
1930 prom_printf("Reformat SML to EFI alignment failed\n");
1931 return;
1932 }
1933
1934 if (call_prom_ret("call-method", 2, 2, &size,
1935 ADDR("sml-get-allocated-size"),
1936 ibmvtpm_inst) != 0 || size == 0) {
1937 prom_printf("SML get allocated size failed\n");
1938 return;
1939 }
1940 } else {
1941 if (call_prom_ret("call-method", 2, 2, &size,
1942 ADDR("sml-get-handover-size"),
1943 ibmvtpm_inst) != 0 || size == 0) {
1944 prom_printf("SML get handover size failed\n");
1945 return;
1946 }
1947 }
1948
1949 base = alloc_down(size, PAGE_SIZE, 0);
1950 if (base == 0)
1951 prom_panic("Could not allocate memory for sml\n");
1952
1953 prom_printf("instantiating sml at 0x%llx...", base);
1954
1955 memset((void *)base, 0, size);
1956
1957 if (call_prom_ret("call-method", 4, 2, &entry,
1958 ADDR("sml-handover"),
1959 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1960 prom_printf("SML handover failed\n");
1961 return;
1962 }
1963 prom_printf(" done\n");
1964
1965 reserve_mem(base, size);
1966
1967 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1968 &base, sizeof(base));
1969 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1970 &size, sizeof(size));
1971
1972 prom_debug("sml base = 0x%llx\n", base);
1973 prom_debug("sml size = 0x%x\n", size);
1974
1975 prom_debug("prom_instantiate_sml: end...\n");
1976}
1977
1978/*
1979 * Allocate room for and initialize TCE tables
1980 */
1981#ifdef __BIG_ENDIAN__
1982static void __init prom_initialize_tce_table(void)
1983{
1984 phandle node;
1985 ihandle phb_node;
1986 char compatible[64], type[64], model[64];
1987 char *path = prom_scratch;
1988 u64 base, align;
1989 u32 minalign, minsize;
1990 u64 tce_entry, *tce_entryp;
1991 u64 local_alloc_top, local_alloc_bottom;
1992 u64 i;
1993
1994 if (prom_iommu_off)
1995 return;
1996
1997 prom_debug("starting prom_initialize_tce_table\n");
1998
1999 /* Cache current top of allocs so we reserve a single block */
2000 local_alloc_top = alloc_top_high;
2001 local_alloc_bottom = local_alloc_top;
2002
2003 /* Search all nodes looking for PHBs. */
2004 for (node = 0; prom_next_node(&node); ) {
2005 compatible[0] = 0;
2006 type[0] = 0;
2007 model[0] = 0;
2008 prom_getprop(node, "compatible",
2009 compatible, sizeof(compatible));
2010 prom_getprop(node, "device_type", type, sizeof(type));
2011 prom_getprop(node, "model", model, sizeof(model));
2012
2013 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
2014 continue;
2015
2016 /* Keep the old logic intact to avoid regression. */
2017 if (compatible[0] != 0) {
2018 if ((prom_strstr(compatible, "python") == NULL) &&
2019 (prom_strstr(compatible, "Speedwagon") == NULL) &&
2020 (prom_strstr(compatible, "Winnipeg") == NULL))
2021 continue;
2022 } else if (model[0] != 0) {
2023 if ((prom_strstr(model, "ython") == NULL) &&
2024 (prom_strstr(model, "peedwagon") == NULL) &&
2025 (prom_strstr(model, "innipeg") == NULL))
2026 continue;
2027 }
2028
2029 if (prom_getprop(node, "tce-table-minalign", &minalign,
2030 sizeof(minalign)) == PROM_ERROR)
2031 minalign = 0;
2032 if (prom_getprop(node, "tce-table-minsize", &minsize,
2033 sizeof(minsize)) == PROM_ERROR)
2034 minsize = 4UL << 20;
2035
2036 /*
2037 * Even though we read what OF wants, we just set the table
2038 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
2039 * By doing this, we avoid the pitfalls of trying to DMA to
2040 * MMIO space and the DMA alias hole.
2041 */
2042 minsize = 4UL << 20;
2043
2044 /* Align to the greater of the align or size */
2045 align = max(minalign, minsize);
2046 base = alloc_down(minsize, align, 1);
2047 if (base == 0)
2048 prom_panic("ERROR, cannot find space for TCE table.\n");
2049 if (base < local_alloc_bottom)
2050 local_alloc_bottom = base;
2051
2052 /* It seems OF doesn't null-terminate the path :-( */
2053 memset(path, 0, sizeof(prom_scratch));
2054 /* Call OF to setup the TCE hardware */
2055 if (call_prom("package-to-path", 3, 1, node,
2056 path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
2057 prom_printf("package-to-path failed\n");
2058 }
2059
2060 /* Save away the TCE table attributes for later use. */
2061 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
2062 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
2063
2064 prom_debug("TCE table: %s\n", path);
2065 prom_debug("\tnode = 0x%x\n", node);
2066 prom_debug("\tbase = 0x%llx\n", base);
2067 prom_debug("\tsize = 0x%x\n", minsize);
2068
2069 /* Initialize the table to have a one-to-one mapping
2070 * over the allocated size.
2071 */
2072 tce_entryp = (u64 *)base;
2073 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
2074 tce_entry = (i << PAGE_SHIFT);
2075 tce_entry |= 0x3;
2076 *tce_entryp = tce_entry;
2077 }
2078
2079 prom_printf("opening PHB %s", path);
2080 phb_node = call_prom("open", 1, 1, path);
2081 if (phb_node == 0)
2082 prom_printf("... failed\n");
2083 else
2084 prom_printf("... done\n");
2085
2086 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2087 phb_node, -1, minsize,
2088 (u32) base, (u32) (base >> 32));
2089 call_prom("close", 1, 0, phb_node);
2090 }
2091
2092 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
2093
2094 /* These are only really needed if there is a memory limit in
2095 * effect, but we don't know so export them always. */
2096 prom_tce_alloc_start = local_alloc_bottom;
2097 prom_tce_alloc_end = local_alloc_top;
2098
2099 /* Flag the first invalid entry */
2100 prom_debug("ending prom_initialize_tce_table\n");
2101}
2102#endif /* __BIG_ENDIAN__ */
2103#endif /* CONFIG_PPC64 */
2104
2105/*
2106 * With CHRP SMP we need to use the OF to start the other processors.
2107 * We can't wait until smp_boot_cpus (the OF is trashed by then)
2108 * so we have to put the processors into a holding pattern controlled
2109 * by the kernel (not OF) before we destroy the OF.
2110 *
2111 * This uses a chunk of low memory, puts some holding pattern
2112 * code there and sends the other processors off to there until
2113 * smp_boot_cpus tells them to do something. The holding pattern
2114 * checks that address until its cpu # is there, when it is that
2115 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
2116 * of setting those values.
2117 *
2118 * We also use physical address 0x4 here to tell when a cpu
2119 * is in its holding pattern code.
2120 *
2121 * -- Cort
2122 */
2123/*
2124 * We want to reference the copy of __secondary_hold_* in the
2125 * 0 - 0x100 address range
2126 */
2127#define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
2128
2129static void __init prom_hold_cpus(void)
2130{
2131 unsigned long i;
2132 phandle node;
2133 char type[64];
2134 unsigned long *spinloop
2135 = (void *) LOW_ADDR(__secondary_hold_spinloop);
2136 unsigned long *acknowledge
2137 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
2138 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
2139
2140 /*
2141 * On pseries, if RTAS supports "query-cpu-stopped-state",
2142 * we skip this stage, the CPUs will be started by the
2143 * kernel using RTAS.
2144 */
2145 if ((of_platform == PLATFORM_PSERIES ||
2146 of_platform == PLATFORM_PSERIES_LPAR) &&
2147 rtas_has_query_cpu_stopped) {
2148 prom_printf("prom_hold_cpus: skipped\n");
2149 return;
2150 }
2151
2152 prom_debug("prom_hold_cpus: start...\n");
2153 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
2154 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
2155 prom_debug(" 1) acknowledge = 0x%lx\n",
2156 (unsigned long)acknowledge);
2157 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
2158 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
2159
2160 /* Set the common spinloop variable, so all of the secondary cpus
2161 * will block when they are awakened from their OF spinloop.
2162 * This must occur for both SMP and non SMP kernels, since OF will
2163 * be trashed when we move the kernel.
2164 */
2165 *spinloop = 0;
2166
2167 /* look for cpus */
2168 for (node = 0; prom_next_node(&node); ) {
2169 unsigned int cpu_no;
2170 __be32 reg;
2171
2172 type[0] = 0;
2173 prom_getprop(node, "device_type", type, sizeof(type));
2174 if (prom_strcmp(type, "cpu") != 0)
2175 continue;
2176
2177 /* Skip non-configured cpus. */
2178 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
2179 if (prom_strcmp(type, "okay") != 0)
2180 continue;
2181
2182 reg = cpu_to_be32(-1); /* make sparse happy */
2183 prom_getprop(node, "reg", ®, sizeof(reg));
2184 cpu_no = be32_to_cpu(reg);
2185
2186 prom_debug("cpu hw idx = %u\n", cpu_no);
2187
2188 /* Init the acknowledge var which will be reset by
2189 * the secondary cpu when it awakens from its OF
2190 * spinloop.
2191 */
2192 *acknowledge = (unsigned long)-1;
2193
2194 if (cpu_no != prom.cpu) {
2195 /* Primary Thread of non-boot cpu or any thread */
2196 prom_printf("starting cpu hw idx %u... ", cpu_no);
2197 call_prom("start-cpu", 3, 0, node,
2198 secondary_hold, cpu_no);
2199
2200 for (i = 0; (i < 100000000) &&
2201 (*acknowledge == ((unsigned long)-1)); i++ )
2202 mb();
2203
2204 if (*acknowledge == cpu_no)
2205 prom_printf("done\n");
2206 else
2207 prom_printf("failed: %lx\n", *acknowledge);
2208 }
2209#ifdef CONFIG_SMP
2210 else
2211 prom_printf("boot cpu hw idx %u\n", cpu_no);
2212#endif /* CONFIG_SMP */
2213 }
2214
2215 prom_debug("prom_hold_cpus: end...\n");
2216}
2217
2218
2219static void __init prom_init_client_services(unsigned long pp)
2220{
2221 /* Get a handle to the prom entry point before anything else */
2222 prom_entry = pp;
2223
2224 /* get a handle for the stdout device */
2225 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2226 if (!PHANDLE_VALID(prom.chosen))
2227 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2228
2229 /* get device tree root */
2230 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2231 if (!PHANDLE_VALID(prom.root))
2232 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2233
2234 prom.mmumap = 0;
2235}
2236
2237#ifdef CONFIG_PPC32
2238/*
2239 * For really old powermacs, we need to map things we claim.
2240 * For that, we need the ihandle of the mmu.
2241 * Also, on the longtrail, we need to work around other bugs.
2242 */
2243static void __init prom_find_mmu(void)
2244{
2245 phandle oprom;
2246 char version[64];
2247
2248 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2249 if (!PHANDLE_VALID(oprom))
2250 return;
2251 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2252 return;
2253 version[sizeof(version) - 1] = 0;
2254 /* XXX might need to add other versions here */
2255 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
2256 of_workarounds = OF_WA_CLAIM;
2257 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
2258 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2259 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2260 } else
2261 return;
2262 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2263 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2264 sizeof(prom.mmumap));
2265 prom.mmumap = be32_to_cpu(prom.mmumap);
2266 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2267 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2268}
2269#else
2270#define prom_find_mmu()
2271#endif
2272
2273static void __init prom_init_stdout(void)
2274{
2275 char *path = of_stdout_device;
2276 char type[16];
2277 phandle stdout_node;
2278 __be32 val;
2279
2280 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2281 prom_panic("cannot find stdout");
2282
2283 prom.stdout = be32_to_cpu(val);
2284
2285 /* Get the full OF pathname of the stdout device */
2286 memset(path, 0, 256);
2287 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2288 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2289 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2290 path, prom_strlen(path) + 1);
2291
2292 /* instance-to-package fails on PA-Semi */
2293 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2294 if (stdout_node != PROM_ERROR) {
2295 val = cpu_to_be32(stdout_node);
2296
2297 /* If it's a display, note it */
2298 memset(type, 0, sizeof(type));
2299 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2300 if (prom_strcmp(type, "display") == 0)
2301 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2302 }
2303}
2304
2305static int __init prom_find_machine_type(void)
2306{
2307 static char compat[256] __prombss;
2308 int len, i = 0;
2309#ifdef CONFIG_PPC64
2310 phandle rtas;
2311 int x;
2312#endif
2313
2314 /* Look for a PowerMac or a Cell */
2315 len = prom_getprop(prom.root, "compatible",
2316 compat, sizeof(compat)-1);
2317 if (len > 0) {
2318 compat[len] = 0;
2319 while (i < len) {
2320 char *p = &compat[i];
2321 int sl = prom_strlen(p);
2322 if (sl == 0)
2323 break;
2324 if (prom_strstr(p, "Power Macintosh") ||
2325 prom_strstr(p, "MacRISC"))
2326 return PLATFORM_POWERMAC;
2327#ifdef CONFIG_PPC64
2328 /* We must make sure we don't detect the IBM Cell
2329 * blades as pSeries due to some firmware issues,
2330 * so we do it here.
2331 */
2332 if (prom_strstr(p, "IBM,CBEA") ||
2333 prom_strstr(p, "IBM,CPBW-1.0"))
2334 return PLATFORM_GENERIC;
2335#endif /* CONFIG_PPC64 */
2336 i += sl + 1;
2337 }
2338 }
2339#ifdef CONFIG_PPC64
2340 /* Try to figure out if it's an IBM pSeries or any other
2341 * PAPR compliant platform. We assume it is if :
2342 * - /device_type is "chrp" (please, do NOT use that for future
2343 * non-IBM designs !
2344 * - it has /rtas
2345 */
2346 len = prom_getprop(prom.root, "device_type",
2347 compat, sizeof(compat)-1);
2348 if (len <= 0)
2349 return PLATFORM_GENERIC;
2350 if (prom_strcmp(compat, "chrp"))
2351 return PLATFORM_GENERIC;
2352
2353 /* Default to pSeries. We need to know if we are running LPAR */
2354 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2355 if (!PHANDLE_VALID(rtas))
2356 return PLATFORM_GENERIC;
2357 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2358 if (x != PROM_ERROR) {
2359 prom_debug("Hypertas detected, assuming LPAR !\n");
2360 return PLATFORM_PSERIES_LPAR;
2361 }
2362 return PLATFORM_PSERIES;
2363#else
2364 return PLATFORM_GENERIC;
2365#endif
2366}
2367
2368static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2369{
2370 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2371}
2372
2373/*
2374 * If we have a display that we don't know how to drive,
2375 * we will want to try to execute OF's open method for it
2376 * later. However, OF will probably fall over if we do that
2377 * we've taken over the MMU.
2378 * So we check whether we will need to open the display,
2379 * and if so, open it now.
2380 */
2381static void __init prom_check_displays(void)
2382{
2383 char type[16], *path;
2384 phandle node;
2385 ihandle ih;
2386 int i;
2387
2388 static const unsigned char default_colors[] __initconst = {
2389 0x00, 0x00, 0x00,
2390 0x00, 0x00, 0xaa,
2391 0x00, 0xaa, 0x00,
2392 0x00, 0xaa, 0xaa,
2393 0xaa, 0x00, 0x00,
2394 0xaa, 0x00, 0xaa,
2395 0xaa, 0xaa, 0x00,
2396 0xaa, 0xaa, 0xaa,
2397 0x55, 0x55, 0x55,
2398 0x55, 0x55, 0xff,
2399 0x55, 0xff, 0x55,
2400 0x55, 0xff, 0xff,
2401 0xff, 0x55, 0x55,
2402 0xff, 0x55, 0xff,
2403 0xff, 0xff, 0x55,
2404 0xff, 0xff, 0xff
2405 };
2406 const unsigned char *clut;
2407
2408 prom_debug("Looking for displays\n");
2409 for (node = 0; prom_next_node(&node); ) {
2410 memset(type, 0, sizeof(type));
2411 prom_getprop(node, "device_type", type, sizeof(type));
2412 if (prom_strcmp(type, "display") != 0)
2413 continue;
2414
2415 /* It seems OF doesn't null-terminate the path :-( */
2416 path = prom_scratch;
2417 memset(path, 0, sizeof(prom_scratch));
2418
2419 /*
2420 * leave some room at the end of the path for appending extra
2421 * arguments
2422 */
2423 if (call_prom("package-to-path", 3, 1, node, path,
2424 sizeof(prom_scratch) - 10) == PROM_ERROR)
2425 continue;
2426 prom_printf("found display : %s, opening... ", path);
2427
2428 ih = call_prom("open", 1, 1, path);
2429 if (ih == 0) {
2430 prom_printf("failed\n");
2431 continue;
2432 }
2433
2434 /* Success */
2435 prom_printf("done\n");
2436 prom_setprop(node, path, "linux,opened", NULL, 0);
2437
2438 /* Setup a usable color table when the appropriate
2439 * method is available. Should update this to set-colors */
2440 clut = default_colors;
2441 for (i = 0; i < 16; i++, clut += 3)
2442 if (prom_set_color(ih, i, clut[0], clut[1],
2443 clut[2]) != 0)
2444 break;
2445
2446#ifdef CONFIG_LOGO_LINUX_CLUT224
2447 clut = PTRRELOC(logo_linux_clut224.clut);
2448 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2449 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2450 clut[2]) != 0)
2451 break;
2452#endif /* CONFIG_LOGO_LINUX_CLUT224 */
2453
2454#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2455 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2456 PROM_ERROR) {
2457 u32 width, height, pitch, addr;
2458
2459 prom_printf("Setting btext !\n");
2460
2461 if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
2462 return;
2463
2464 if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
2465 return;
2466
2467 if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
2468 return;
2469
2470 if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
2471 return;
2472
2473 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2474 width, height, pitch, addr);
2475 btext_setup_display(width, height, 8, pitch, addr);
2476 btext_prepare_BAT();
2477 }
2478#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2479 }
2480}
2481
2482
2483/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2484static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2485 unsigned long needed, unsigned long align)
2486{
2487 void *ret;
2488
2489 *mem_start = ALIGN(*mem_start, align);
2490 while ((*mem_start + needed) > *mem_end) {
2491 unsigned long room, chunk;
2492
2493 prom_debug("Chunk exhausted, claiming more at %lx...\n",
2494 alloc_bottom);
2495 room = alloc_top - alloc_bottom;
2496 if (room > DEVTREE_CHUNK_SIZE)
2497 room = DEVTREE_CHUNK_SIZE;
2498 if (room < PAGE_SIZE)
2499 prom_panic("No memory for flatten_device_tree "
2500 "(no room)\n");
2501 chunk = alloc_up(room, 0);
2502 if (chunk == 0)
2503 prom_panic("No memory for flatten_device_tree "
2504 "(claim failed)\n");
2505 *mem_end = chunk + room;
2506 }
2507
2508 ret = (void *)*mem_start;
2509 *mem_start += needed;
2510
2511 return ret;
2512}
2513
2514#define dt_push_token(token, mem_start, mem_end) do { \
2515 void *room = make_room(mem_start, mem_end, 4, 4); \
2516 *(__be32 *)room = cpu_to_be32(token); \
2517 } while(0)
2518
2519static unsigned long __init dt_find_string(char *str)
2520{
2521 char *s, *os;
2522
2523 s = os = (char *)dt_string_start;
2524 s += 4;
2525 while (s < (char *)dt_string_end) {
2526 if (prom_strcmp(s, str) == 0)
2527 return s - os;
2528 s += prom_strlen(s) + 1;
2529 }
2530 return 0;
2531}
2532
2533/*
2534 * The Open Firmware 1275 specification states properties must be 31 bytes or
2535 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2536 */
2537#define MAX_PROPERTY_NAME 64
2538
2539static void __init scan_dt_build_strings(phandle node,
2540 unsigned long *mem_start,
2541 unsigned long *mem_end)
2542{
2543 char *prev_name, *namep, *sstart;
2544 unsigned long soff;
2545 phandle child;
2546
2547 sstart = (char *)dt_string_start;
2548
2549 /* get and store all property names */
2550 prev_name = "";
2551 for (;;) {
2552 /* 64 is max len of name including nul. */
2553 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2554 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2555 /* No more nodes: unwind alloc */
2556 *mem_start = (unsigned long)namep;
2557 break;
2558 }
2559
2560 /* skip "name" */
2561 if (prom_strcmp(namep, "name") == 0) {
2562 *mem_start = (unsigned long)namep;
2563 prev_name = "name";
2564 continue;
2565 }
2566 /* get/create string entry */
2567 soff = dt_find_string(namep);
2568 if (soff != 0) {
2569 *mem_start = (unsigned long)namep;
2570 namep = sstart + soff;
2571 } else {
2572 /* Trim off some if we can */
2573 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2574 dt_string_end = *mem_start;
2575 }
2576 prev_name = namep;
2577 }
2578
2579 /* do all our children */
2580 child = call_prom("child", 1, 1, node);
2581 while (child != 0) {
2582 scan_dt_build_strings(child, mem_start, mem_end);
2583 child = call_prom("peer", 1, 1, child);
2584 }
2585}
2586
2587static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2588 unsigned long *mem_end)
2589{
2590 phandle child;
2591 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2592 unsigned long soff;
2593 unsigned char *valp;
2594 static char pname[MAX_PROPERTY_NAME] __prombss;
2595 int l, room, has_phandle = 0;
2596
2597 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2598
2599 /* get the node's full name */
2600 namep = (char *)*mem_start;
2601 room = *mem_end - *mem_start;
2602 if (room > 255)
2603 room = 255;
2604 l = call_prom("package-to-path", 3, 1, node, namep, room);
2605 if (l >= 0) {
2606 /* Didn't fit? Get more room. */
2607 if (l >= room) {
2608 if (l >= *mem_end - *mem_start)
2609 namep = make_room(mem_start, mem_end, l+1, 1);
2610 call_prom("package-to-path", 3, 1, node, namep, l);
2611 }
2612 namep[l] = '\0';
2613
2614 /* Fixup an Apple bug where they have bogus \0 chars in the
2615 * middle of the path in some properties, and extract
2616 * the unit name (everything after the last '/').
2617 */
2618 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2619 if (*p == '/')
2620 lp = namep;
2621 else if (*p != 0)
2622 *lp++ = *p;
2623 }
2624 *lp = 0;
2625 *mem_start = ALIGN((unsigned long)lp + 1, 4);
2626 }
2627
2628 /* get it again for debugging */
2629 path = prom_scratch;
2630 memset(path, 0, sizeof(prom_scratch));
2631 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
2632
2633 /* get and store all properties */
2634 prev_name = "";
2635 sstart = (char *)dt_string_start;
2636 for (;;) {
2637 if (call_prom("nextprop", 3, 1, node, prev_name,
2638 pname) != 1)
2639 break;
2640
2641 /* skip "name" */
2642 if (prom_strcmp(pname, "name") == 0) {
2643 prev_name = "name";
2644 continue;
2645 }
2646
2647 /* find string offset */
2648 soff = dt_find_string(pname);
2649 if (soff == 0) {
2650 prom_printf("WARNING: Can't find string index for"
2651 " <%s>, node %s\n", pname, path);
2652 break;
2653 }
2654 prev_name = sstart + soff;
2655
2656 /* get length */
2657 l = call_prom("getproplen", 2, 1, node, pname);
2658
2659 /* sanity checks */
2660 if (l == PROM_ERROR)
2661 continue;
2662
2663 /* push property head */
2664 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2665 dt_push_token(l, mem_start, mem_end);
2666 dt_push_token(soff, mem_start, mem_end);
2667
2668 /* push property content */
2669 valp = make_room(mem_start, mem_end, l, 4);
2670 call_prom("getprop", 4, 1, node, pname, valp, l);
2671 *mem_start = ALIGN(*mem_start, 4);
2672
2673 if (!prom_strcmp(pname, "phandle"))
2674 has_phandle = 1;
2675 }
2676
2677 /* Add a "phandle" property if none already exist */
2678 if (!has_phandle) {
2679 soff = dt_find_string("phandle");
2680 if (soff == 0)
2681 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2682 else {
2683 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2684 dt_push_token(4, mem_start, mem_end);
2685 dt_push_token(soff, mem_start, mem_end);
2686 valp = make_room(mem_start, mem_end, 4, 4);
2687 *(__be32 *)valp = cpu_to_be32(node);
2688 }
2689 }
2690
2691 /* do all our children */
2692 child = call_prom("child", 1, 1, node);
2693 while (child != 0) {
2694 scan_dt_build_struct(child, mem_start, mem_end);
2695 child = call_prom("peer", 1, 1, child);
2696 }
2697
2698 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2699}
2700
2701static void __init flatten_device_tree(void)
2702{
2703 phandle root;
2704 unsigned long mem_start, mem_end, room;
2705 struct boot_param_header *hdr;
2706 char *namep;
2707 u64 *rsvmap;
2708
2709 /*
2710 * Check how much room we have between alloc top & bottom (+/- a
2711 * few pages), crop to 1MB, as this is our "chunk" size
2712 */
2713 room = alloc_top - alloc_bottom - 0x4000;
2714 if (room > DEVTREE_CHUNK_SIZE)
2715 room = DEVTREE_CHUNK_SIZE;
2716 prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2717
2718 /* Now try to claim that */
2719 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2720 if (mem_start == 0)
2721 prom_panic("Can't allocate initial device-tree chunk\n");
2722 mem_end = mem_start + room;
2723
2724 /* Get root of tree */
2725 root = call_prom("peer", 1, 1, (phandle)0);
2726 if (root == (phandle)0)
2727 prom_panic ("couldn't get device tree root\n");
2728
2729 /* Build header and make room for mem rsv map */
2730 mem_start = ALIGN(mem_start, 4);
2731 hdr = make_room(&mem_start, &mem_end,
2732 sizeof(struct boot_param_header), 4);
2733 dt_header_start = (unsigned long)hdr;
2734 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2735
2736 /* Start of strings */
2737 mem_start = PAGE_ALIGN(mem_start);
2738 dt_string_start = mem_start;
2739 mem_start += 4; /* hole */
2740
2741 /* Add "phandle" in there, we'll need it */
2742 namep = make_room(&mem_start, &mem_end, 16, 1);
2743 prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
2744 mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2745
2746 /* Build string array */
2747 prom_printf("Building dt strings...\n");
2748 scan_dt_build_strings(root, &mem_start, &mem_end);
2749 dt_string_end = mem_start;
2750
2751 /* Build structure */
2752 mem_start = PAGE_ALIGN(mem_start);
2753 dt_struct_start = mem_start;
2754 prom_printf("Building dt structure...\n");
2755 scan_dt_build_struct(root, &mem_start, &mem_end);
2756 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2757 dt_struct_end = PAGE_ALIGN(mem_start);
2758
2759 /* Finish header */
2760 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2761 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2762 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2763 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2764 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2765 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2766 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2767 hdr->version = cpu_to_be32(OF_DT_VERSION);
2768 /* Version 16 is not backward compatible */
2769 hdr->last_comp_version = cpu_to_be32(0x10);
2770
2771 /* Copy the reserve map in */
2772 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2773
2774#ifdef DEBUG_PROM
2775 {
2776 int i;
2777 prom_printf("reserved memory map:\n");
2778 for (i = 0; i < mem_reserve_cnt; i++)
2779 prom_printf(" %llx - %llx\n",
2780 be64_to_cpu(mem_reserve_map[i].base),
2781 be64_to_cpu(mem_reserve_map[i].size));
2782 }
2783#endif
2784 /* Bump mem_reserve_cnt to cause further reservations to fail
2785 * since it's too late.
2786 */
2787 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2788
2789 prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2790 dt_string_start, dt_string_end);
2791 prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
2792 dt_struct_start, dt_struct_end);
2793}
2794
2795#ifdef CONFIG_PPC_CHRP
2796/*
2797 * Pegasos and BriQ lacks the "ranges" property in the isa node
2798 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2799 * Pegasos has the IDE configured in legacy mode, but advertised as native
2800 */
2801static void __init fixup_device_tree_chrp(void)
2802{
2803 phandle ph;
2804 u32 prop[6];
2805 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2806 char *name;
2807 int rc;
2808
2809 name = "/pci@80000000/isa@c";
2810 ph = call_prom("finddevice", 1, 1, ADDR(name));
2811 if (!PHANDLE_VALID(ph)) {
2812 name = "/pci@ff500000/isa@6";
2813 ph = call_prom("finddevice", 1, 1, ADDR(name));
2814 rloc = 0x01003000; /* IO space; PCI device = 6 */
2815 }
2816 if (PHANDLE_VALID(ph)) {
2817 rc = prom_getproplen(ph, "ranges");
2818 if (rc == 0 || rc == PROM_ERROR) {
2819 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2820
2821 prop[0] = 0x1;
2822 prop[1] = 0x0;
2823 prop[2] = rloc;
2824 prop[3] = 0x0;
2825 prop[4] = 0x0;
2826 prop[5] = 0x00010000;
2827 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2828 }
2829 }
2830
2831 name = "/pci@80000000/ide@C,1";
2832 ph = call_prom("finddevice", 1, 1, ADDR(name));
2833 if (PHANDLE_VALID(ph)) {
2834 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2835 prop[0] = 14;
2836 prop[1] = 0x0;
2837 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2838 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2839 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2840 if (rc == sizeof(u32)) {
2841 prop[0] &= ~0x5;
2842 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2843 }
2844 }
2845}
2846#else
2847#define fixup_device_tree_chrp()
2848#endif
2849
2850#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2851static void __init fixup_device_tree_pmac64(void)
2852{
2853 phandle u3, i2c, mpic;
2854 u32 u3_rev;
2855 u32 interrupts[2];
2856 u32 parent;
2857
2858 /* Some G5s have a missing interrupt definition, fix it up here */
2859 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2860 if (!PHANDLE_VALID(u3))
2861 return;
2862 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2863 if (!PHANDLE_VALID(i2c))
2864 return;
2865 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2866 if (!PHANDLE_VALID(mpic))
2867 return;
2868
2869 /* check if proper rev of u3 */
2870 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2871 == PROM_ERROR)
2872 return;
2873 if (u3_rev < 0x35 || u3_rev > 0x39)
2874 return;
2875 /* does it need fixup ? */
2876 if (prom_getproplen(i2c, "interrupts") > 0)
2877 return;
2878
2879 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2880
2881 /* interrupt on this revision of u3 is number 0 and level */
2882 interrupts[0] = 0;
2883 interrupts[1] = 1;
2884 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2885 &interrupts, sizeof(interrupts));
2886 parent = (u32)mpic;
2887 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2888 &parent, sizeof(parent));
2889}
2890#else
2891#define fixup_device_tree_pmac64()
2892#endif
2893
2894#ifdef CONFIG_PPC_PMAC
2895static void __init fixup_device_tree_pmac(void)
2896{
2897 __be32 val = 1;
2898 char type[8];
2899 phandle node;
2900
2901 // Some pmacs are missing #size-cells on escc nodes
2902 for (node = 0; prom_next_node(&node); ) {
2903 type[0] = '\0';
2904 prom_getprop(node, "device_type", type, sizeof(type));
2905 if (prom_strcmp(type, "escc"))
2906 continue;
2907
2908 if (prom_getproplen(node, "#size-cells") != PROM_ERROR)
2909 continue;
2910
2911 prom_setprop(node, NULL, "#size-cells", &val, sizeof(val));
2912 }
2913}
2914#else
2915static inline void fixup_device_tree_pmac(void) { }
2916#endif
2917
2918#ifdef CONFIG_PPC_EFIKA
2919/*
2920 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2921 * to talk to the phy. If the phy-handle property is missing, then this
2922 * function is called to add the appropriate nodes and link it to the
2923 * ethernet node.
2924 */
2925static void __init fixup_device_tree_efika_add_phy(void)
2926{
2927 u32 node;
2928 char prop[64];
2929 int rv;
2930
2931 /* Check if /builtin/ethernet exists - bail if it doesn't */
2932 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2933 if (!PHANDLE_VALID(node))
2934 return;
2935
2936 /* Check if the phy-handle property exists - bail if it does */
2937 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2938 if (rv <= 0)
2939 return;
2940
2941 /*
2942 * At this point the ethernet device doesn't have a phy described.
2943 * Now we need to add the missing phy node and linkage
2944 */
2945
2946 /* Check for an MDIO bus node - if missing then create one */
2947 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2948 if (!PHANDLE_VALID(node)) {
2949 prom_printf("Adding Ethernet MDIO node\n");
2950 call_prom("interpret", 1, 1,
2951 " s\" /builtin\" find-device"
2952 " new-device"
2953 " 1 encode-int s\" #address-cells\" property"
2954 " 0 encode-int s\" #size-cells\" property"
2955 " s\" mdio\" device-name"
2956 " s\" fsl,mpc5200b-mdio\" encode-string"
2957 " s\" compatible\" property"
2958 " 0xf0003000 0x400 reg"
2959 " 0x2 encode-int"
2960 " 0x5 encode-int encode+"
2961 " 0x3 encode-int encode+"
2962 " s\" interrupts\" property"
2963 " finish-device");
2964 }
2965
2966 /* Check for a PHY device node - if missing then create one and
2967 * give it's phandle to the ethernet node */
2968 node = call_prom("finddevice", 1, 1,
2969 ADDR("/builtin/mdio/ethernet-phy"));
2970 if (!PHANDLE_VALID(node)) {
2971 prom_printf("Adding Ethernet PHY node\n");
2972 call_prom("interpret", 1, 1,
2973 " s\" /builtin/mdio\" find-device"
2974 " new-device"
2975 " s\" ethernet-phy\" device-name"
2976 " 0x10 encode-int s\" reg\" property"
2977 " my-self"
2978 " ihandle>phandle"
2979 " finish-device"
2980 " s\" /builtin/ethernet\" find-device"
2981 " encode-int"
2982 " s\" phy-handle\" property"
2983 " device-end");
2984 }
2985}
2986
2987static void __init fixup_device_tree_efika(void)
2988{
2989 int sound_irq[3] = { 2, 2, 0 };
2990 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2991 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2992 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2993 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2994 u32 node;
2995 char prop[64];
2996 int rv, len;
2997
2998 /* Check if we're really running on a EFIKA */
2999 node = call_prom("finddevice", 1, 1, ADDR("/"));
3000 if (!PHANDLE_VALID(node))
3001 return;
3002
3003 rv = prom_getprop(node, "model", prop, sizeof(prop));
3004 if (rv == PROM_ERROR)
3005 return;
3006 if (prom_strcmp(prop, "EFIKA5K2"))
3007 return;
3008
3009 prom_printf("Applying EFIKA device tree fixups\n");
3010
3011 /* Claiming to be 'chrp' is death */
3012 node = call_prom("finddevice", 1, 1, ADDR("/"));
3013 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
3014 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
3015 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
3016
3017 /* CODEGEN,description is exposed in /proc/cpuinfo so
3018 fix that too */
3019 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
3020 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
3021 prom_setprop(node, "/", "CODEGEN,description",
3022 "Efika 5200B PowerPC System",
3023 sizeof("Efika 5200B PowerPC System"));
3024
3025 /* Fixup bestcomm interrupts property */
3026 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3027 if (PHANDLE_VALID(node)) {
3028 len = prom_getproplen(node, "interrupts");
3029 if (len == 12) {
3030 prom_printf("Fixing bestcomm interrupts property\n");
3031 prom_setprop(node, "/builtin/bestcom", "interrupts",
3032 bcomm_irq, sizeof(bcomm_irq));
3033 }
3034 }
3035
3036 /* Fixup sound interrupts property */
3037 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3038 if (PHANDLE_VALID(node)) {
3039 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
3040 if (rv == PROM_ERROR) {
3041 prom_printf("Adding sound interrupts property\n");
3042 prom_setprop(node, "/builtin/sound", "interrupts",
3043 sound_irq, sizeof(sound_irq));
3044 }
3045 }
3046
3047 /* Make sure ethernet phy-handle property exists */
3048 fixup_device_tree_efika_add_phy();
3049}
3050#else
3051#define fixup_device_tree_efika()
3052#endif
3053
3054#ifdef CONFIG_PPC_PASEMI_NEMO
3055/*
3056 * CFE supplied on Nemo is broken in several ways, biggest
3057 * problem is that it reassigns ISA interrupts to unused mpic ints.
3058 * Add an interrupt-controller property for the io-bridge to use
3059 * and correct the ints so we can attach them to an irq_domain
3060 */
3061static void __init fixup_device_tree_pasemi(void)
3062{
3063 u32 interrupts[2], parent, rval, val = 0;
3064 char *name, *pci_name;
3065 phandle iob, node;
3066
3067 /* Find the root pci node */
3068 name = "/pxp@0,e0000000";
3069 iob = call_prom("finddevice", 1, 1, ADDR(name));
3070 if (!PHANDLE_VALID(iob))
3071 return;
3072
3073 /* check if interrupt-controller node set yet */
3074 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
3075 return;
3076
3077 prom_printf("adding interrupt-controller property for SB600...\n");
3078
3079 prom_setprop(iob, name, "interrupt-controller", &val, 0);
3080
3081 pci_name = "/pxp@0,e0000000/pci@11";
3082 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
3083 parent = ADDR(iob);
3084
3085 for( ; prom_next_node(&node); ) {
3086 /* scan each node for one with an interrupt */
3087 if (!PHANDLE_VALID(node))
3088 continue;
3089
3090 rval = prom_getproplen(node, "interrupts");
3091 if (rval == 0 || rval == PROM_ERROR)
3092 continue;
3093
3094 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
3095 if ((interrupts[0] < 212) || (interrupts[0] > 222))
3096 continue;
3097
3098 /* found a node, update both interrupts and interrupt-parent */
3099 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
3100 interrupts[0] -= 203;
3101 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
3102 interrupts[0] -= 213;
3103 if (interrupts[0] == 221)
3104 interrupts[0] = 14;
3105 if (interrupts[0] == 222)
3106 interrupts[0] = 8;
3107
3108 prom_setprop(node, pci_name, "interrupts", interrupts,
3109 sizeof(interrupts));
3110 prom_setprop(node, pci_name, "interrupt-parent", &parent,
3111 sizeof(parent));
3112 }
3113
3114 /*
3115 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3116 * so that generic isa-bridge code can add the SB600 and its on-board
3117 * peripherals.
3118 */
3119 name = "/pxp@0,e0000000/io-bridge@0";
3120 iob = call_prom("finddevice", 1, 1, ADDR(name));
3121 if (!PHANDLE_VALID(iob))
3122 return;
3123
3124 /* device_type is already set, just change it. */
3125
3126 prom_printf("Changing device_type of SB600 node...\n");
3127
3128 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
3129}
3130#else /* !CONFIG_PPC_PASEMI_NEMO */
3131static inline void fixup_device_tree_pasemi(void) { }
3132#endif
3133
3134static void __init fixup_device_tree(void)
3135{
3136 fixup_device_tree_chrp();
3137 fixup_device_tree_pmac();
3138 fixup_device_tree_pmac64();
3139 fixup_device_tree_efika();
3140 fixup_device_tree_pasemi();
3141}
3142
3143static void __init prom_find_boot_cpu(void)
3144{
3145 __be32 rval;
3146 ihandle prom_cpu;
3147 phandle cpu_pkg;
3148
3149 rval = 0;
3150 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3151 return;
3152 prom_cpu = be32_to_cpu(rval);
3153
3154 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3155
3156 if (!PHANDLE_VALID(cpu_pkg))
3157 return;
3158
3159 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3160 prom.cpu = be32_to_cpu(rval);
3161
3162 prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3163}
3164
3165static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3166{
3167#ifdef CONFIG_BLK_DEV_INITRD
3168 if (r3 && r4 && r4 != 0xdeadbeef) {
3169 __be64 val;
3170
3171 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3172 prom_initrd_end = prom_initrd_start + r4;
3173
3174 val = cpu_to_be64(prom_initrd_start);
3175 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3176 &val, sizeof(val));
3177 val = cpu_to_be64(prom_initrd_end);
3178 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3179 &val, sizeof(val));
3180
3181 reserve_mem(prom_initrd_start,
3182 prom_initrd_end - prom_initrd_start);
3183
3184 prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3185 prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3186 }
3187#endif /* CONFIG_BLK_DEV_INITRD */
3188}
3189
3190#ifdef CONFIG_PPC_SVM
3191/*
3192 * Perform the Enter Secure Mode ultracall.
3193 */
3194static int __init enter_secure_mode(unsigned long kbase, unsigned long fdt)
3195{
3196 register unsigned long r3 asm("r3") = UV_ESM;
3197 register unsigned long r4 asm("r4") = kbase;
3198 register unsigned long r5 asm("r5") = fdt;
3199
3200 asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
3201
3202 return r3;
3203}
3204
3205/*
3206 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3207 */
3208static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3209{
3210 int ret;
3211
3212 if (!prom_svm_enable)
3213 return;
3214
3215 /* Switch to secure mode. */
3216 prom_printf("Switching to secure mode.\n");
3217
3218 /*
3219 * The ultravisor will do an integrity check of the kernel image but we
3220 * relocated it so the check will fail. Restore the original image by
3221 * relocating it back to the kernel virtual base address.
3222 */
3223 relocate(KERNELBASE);
3224
3225 ret = enter_secure_mode(kbase, fdt);
3226
3227 /* Relocate the kernel again. */
3228 relocate(kbase);
3229
3230 if (ret != U_SUCCESS) {
3231 prom_printf("Returned %d from switching to secure mode.\n", ret);
3232 prom_rtas_os_term("Switch to secure mode failed.\n");
3233 }
3234}
3235#else
3236static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3237{
3238}
3239#endif /* CONFIG_PPC_SVM */
3240
3241/*
3242 * We enter here early on, when the Open Firmware prom is still
3243 * handling exceptions and the MMU hash table for us.
3244 */
3245
3246unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3247 unsigned long pp,
3248 unsigned long r6, unsigned long r7,
3249 unsigned long kbase)
3250{
3251 unsigned long hdr;
3252
3253#ifdef CONFIG_PPC32
3254 unsigned long offset = reloc_offset();
3255 reloc_got2(offset);
3256#endif
3257
3258 /*
3259 * First zero the BSS
3260 */
3261 memset(&__bss_start, 0, __bss_stop - __bss_start);
3262
3263 /*
3264 * Init interface to Open Firmware, get some node references,
3265 * like /chosen
3266 */
3267 prom_init_client_services(pp);
3268
3269 /*
3270 * See if this OF is old enough that we need to do explicit maps
3271 * and other workarounds
3272 */
3273 prom_find_mmu();
3274
3275 /*
3276 * Init prom stdout device
3277 */
3278 prom_init_stdout();
3279
3280 prom_printf("Preparing to boot %s", linux_banner);
3281
3282 /*
3283 * Get default machine type. At this point, we do not differentiate
3284 * between pSeries SMP and pSeries LPAR
3285 */
3286 of_platform = prom_find_machine_type();
3287 prom_printf("Detected machine type: %x\n", of_platform);
3288
3289#ifndef CONFIG_NONSTATIC_KERNEL
3290 /* Bail if this is a kdump kernel. */
3291 if (PHYSICAL_START > 0)
3292 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3293#endif
3294
3295 /*
3296 * Check for an initrd
3297 */
3298 prom_check_initrd(r3, r4);
3299
3300 /*
3301 * Do early parsing of command line
3302 */
3303 early_cmdline_parse();
3304
3305#ifdef CONFIG_PPC_PSERIES
3306 /*
3307 * On pSeries, inform the firmware about our capabilities
3308 */
3309 if (of_platform == PLATFORM_PSERIES ||
3310 of_platform == PLATFORM_PSERIES_LPAR)
3311 prom_send_capabilities();
3312#endif
3313
3314 /*
3315 * Copy the CPU hold code
3316 */
3317 if (of_platform != PLATFORM_POWERMAC)
3318 copy_and_flush(0, kbase, 0x100, 0);
3319
3320 /*
3321 * Initialize memory management within prom_init
3322 */
3323 prom_init_mem();
3324
3325 /*
3326 * Determine which cpu is actually running right _now_
3327 */
3328 prom_find_boot_cpu();
3329
3330 /*
3331 * Initialize display devices
3332 */
3333 prom_check_displays();
3334
3335#if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3336 /*
3337 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3338 * that uses the allocator, we need to make sure we get the top of memory
3339 * available for us here...
3340 */
3341 if (of_platform == PLATFORM_PSERIES)
3342 prom_initialize_tce_table();
3343#endif
3344
3345 /*
3346 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3347 * have a usable RTAS implementation.
3348 */
3349 if (of_platform != PLATFORM_POWERMAC)
3350 prom_instantiate_rtas();
3351
3352#ifdef CONFIG_PPC64
3353 /* instantiate sml */
3354 prom_instantiate_sml();
3355#endif
3356
3357 /*
3358 * On non-powermacs, put all CPUs in spin-loops.
3359 *
3360 * PowerMacs use a different mechanism to spin CPUs
3361 *
3362 * (This must be done after instantiating RTAS)
3363 */
3364 if (of_platform != PLATFORM_POWERMAC)
3365 prom_hold_cpus();
3366
3367 /*
3368 * Fill in some infos for use by the kernel later on
3369 */
3370 if (prom_memory_limit) {
3371 __be64 val = cpu_to_be64(prom_memory_limit);
3372 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3373 &val, sizeof(val));
3374 }
3375#ifdef CONFIG_PPC64
3376 if (prom_iommu_off)
3377 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3378 NULL, 0);
3379
3380 if (prom_iommu_force_on)
3381 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3382 NULL, 0);
3383
3384 if (prom_tce_alloc_start) {
3385 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3386 &prom_tce_alloc_start,
3387 sizeof(prom_tce_alloc_start));
3388 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3389 &prom_tce_alloc_end,
3390 sizeof(prom_tce_alloc_end));
3391 }
3392#endif
3393
3394 /*
3395 * Fixup any known bugs in the device-tree
3396 */
3397 fixup_device_tree();
3398
3399 /*
3400 * Now finally create the flattened device-tree
3401 */
3402 prom_printf("copying OF device tree...\n");
3403 flatten_device_tree();
3404
3405 /*
3406 * in case stdin is USB and still active on IBM machines...
3407 * Unfortunately quiesce crashes on some powermacs if we have
3408 * closed stdin already (in particular the powerbook 101).
3409 */
3410 if (of_platform != PLATFORM_POWERMAC)
3411 prom_close_stdin();
3412
3413 /*
3414 * Call OF "quiesce" method to shut down pending DMA's from
3415 * devices etc...
3416 */
3417 prom_printf("Quiescing Open Firmware ...\n");
3418 call_prom("quiesce", 0, 0);
3419
3420 /*
3421 * And finally, call the kernel passing it the flattened device
3422 * tree and NULL as r5, thus triggering the new entry point which
3423 * is common to us and kexec
3424 */
3425 hdr = dt_header_start;
3426
3427 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3428 prom_debug("->dt_header_start=0x%lx\n", hdr);
3429
3430#ifdef CONFIG_PPC32
3431 reloc_got2(-offset);
3432#endif
3433
3434 /* Move to secure memory if we're supposed to be secure guests. */
3435 setup_secure_guest(kbase, hdr);
3436
3437 __start(hdr, kbase, 0, 0, 0, 0, 0);
3438
3439 return 0;
3440}