Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Procedures for interfacing to Open Firmware.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 */
11
12#undef DEBUG_PROM
13
14/* we cannot use FORTIFY as it brings in new symbols */
15#define __NO_FORTIFY
16
17#include <stdarg.h>
18#include <linux/kernel.h>
19#include <linux/string.h>
20#include <linux/init.h>
21#include <linux/threads.h>
22#include <linux/spinlock.h>
23#include <linux/types.h>
24#include <linux/pci.h>
25#include <linux/proc_fs.h>
26#include <linux/delay.h>
27#include <linux/initrd.h>
28#include <linux/bitops.h>
29#include <linux/pgtable.h>
30#include <asm/prom.h>
31#include <asm/rtas.h>
32#include <asm/page.h>
33#include <asm/processor.h>
34#include <asm/irq.h>
35#include <asm/io.h>
36#include <asm/smp.h>
37#include <asm/mmu.h>
38#include <asm/iommu.h>
39#include <asm/btext.h>
40#include <asm/sections.h>
41#include <asm/machdep.h>
42#include <asm/asm-prototypes.h>
43#include <asm/ultravisor-api.h>
44
45#include <linux/linux_logo.h>
46
47/* All of prom_init bss lives here */
48#define __prombss __section(.bss.prominit)
49
50/*
51 * Eventually bump that one up
52 */
53#define DEVTREE_CHUNK_SIZE 0x100000
54
55/*
56 * This is the size of the local memory reserve map that gets copied
57 * into the boot params passed to the kernel. That size is totally
58 * flexible as the kernel just reads the list until it encounters an
59 * entry with size 0, so it can be changed without breaking binary
60 * compatibility
61 */
62#define MEM_RESERVE_MAP_SIZE 8
63
64/*
65 * prom_init() is called very early on, before the kernel text
66 * and data have been mapped to KERNELBASE. At this point the code
67 * is running at whatever address it has been loaded at.
68 * On ppc32 we compile with -mrelocatable, which means that references
69 * to extern and static variables get relocated automatically.
70 * ppc64 objects are always relocatable, we just need to relocate the
71 * TOC.
72 *
73 * Because OF may have mapped I/O devices into the area starting at
74 * KERNELBASE, particularly on CHRP machines, we can't safely call
75 * OF once the kernel has been mapped to KERNELBASE. Therefore all
76 * OF calls must be done within prom_init().
77 *
78 * ADDR is used in calls to call_prom. The 4th and following
79 * arguments to call_prom should be 32-bit values.
80 * On ppc64, 64 bit values are truncated to 32 bits (and
81 * fortunately don't get interpreted as two arguments).
82 */
83#define ADDR(x) (u32)(unsigned long)(x)
84
85#ifdef CONFIG_PPC64
86#define OF_WORKAROUNDS 0
87#else
88#define OF_WORKAROUNDS of_workarounds
89static int of_workarounds __prombss;
90#endif
91
92#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
93#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
94
95#define PROM_BUG() do { \
96 prom_printf("kernel BUG at %s line 0x%x!\n", \
97 __FILE__, __LINE__); \
98 __builtin_trap(); \
99} while (0)
100
101#ifdef DEBUG_PROM
102#define prom_debug(x...) prom_printf(x)
103#else
104#define prom_debug(x...) do { } while (0)
105#endif
106
107
108typedef u32 prom_arg_t;
109
110struct prom_args {
111 __be32 service;
112 __be32 nargs;
113 __be32 nret;
114 __be32 args[10];
115};
116
117struct prom_t {
118 ihandle root;
119 phandle chosen;
120 int cpu;
121 ihandle stdout;
122 ihandle mmumap;
123 ihandle memory;
124};
125
126struct mem_map_entry {
127 __be64 base;
128 __be64 size;
129};
130
131typedef __be32 cell_t;
132
133extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
134 unsigned long r6, unsigned long r7, unsigned long r8,
135 unsigned long r9);
136
137#ifdef CONFIG_PPC64
138extern int enter_prom(struct prom_args *args, unsigned long entry);
139#else
140static inline int enter_prom(struct prom_args *args, unsigned long entry)
141{
142 return ((int (*)(struct prom_args *))entry)(args);
143}
144#endif
145
146extern void copy_and_flush(unsigned long dest, unsigned long src,
147 unsigned long size, unsigned long offset);
148
149/* prom structure */
150static struct prom_t __prombss prom;
151
152static unsigned long __prombss prom_entry;
153
154static char __prombss of_stdout_device[256];
155static char __prombss prom_scratch[256];
156
157static unsigned long __prombss dt_header_start;
158static unsigned long __prombss dt_struct_start, dt_struct_end;
159static unsigned long __prombss dt_string_start, dt_string_end;
160
161static unsigned long __prombss prom_initrd_start, prom_initrd_end;
162
163#ifdef CONFIG_PPC64
164static int __prombss prom_iommu_force_on;
165static int __prombss prom_iommu_off;
166static unsigned long __prombss prom_tce_alloc_start;
167static unsigned long __prombss prom_tce_alloc_end;
168#endif
169
170#ifdef CONFIG_PPC_PSERIES
171static bool __prombss prom_radix_disable;
172static bool __prombss prom_radix_gtse_disable;
173static bool __prombss prom_xive_disable;
174#endif
175
176#ifdef CONFIG_PPC_SVM
177static bool __prombss prom_svm_enable;
178#endif
179
180struct platform_support {
181 bool hash_mmu;
182 bool radix_mmu;
183 bool radix_gtse;
184 bool xive;
185};
186
187/* Platforms codes are now obsolete in the kernel. Now only used within this
188 * file and ultimately gone too. Feel free to change them if you need, they
189 * are not shared with anything outside of this file anymore
190 */
191#define PLATFORM_PSERIES 0x0100
192#define PLATFORM_PSERIES_LPAR 0x0101
193#define PLATFORM_LPAR 0x0001
194#define PLATFORM_POWERMAC 0x0400
195#define PLATFORM_GENERIC 0x0500
196
197static int __prombss of_platform;
198
199static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
200
201static unsigned long __prombss prom_memory_limit;
202
203static unsigned long __prombss alloc_top;
204static unsigned long __prombss alloc_top_high;
205static unsigned long __prombss alloc_bottom;
206static unsigned long __prombss rmo_top;
207static unsigned long __prombss ram_top;
208
209static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
210static int __prombss mem_reserve_cnt;
211
212static cell_t __prombss regbuf[1024];
213
214static bool __prombss rtas_has_query_cpu_stopped;
215
216
217/*
218 * Error results ... some OF calls will return "-1" on error, some
219 * will return 0, some will return either. To simplify, here are
220 * macros to use with any ihandle or phandle return value to check if
221 * it is valid
222 */
223
224#define PROM_ERROR (-1u)
225#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
226#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
227
228/* Copied from lib/string.c and lib/kstrtox.c */
229
230static int __init prom_strcmp(const char *cs, const char *ct)
231{
232 unsigned char c1, c2;
233
234 while (1) {
235 c1 = *cs++;
236 c2 = *ct++;
237 if (c1 != c2)
238 return c1 < c2 ? -1 : 1;
239 if (!c1)
240 break;
241 }
242 return 0;
243}
244
245static char __init *prom_strcpy(char *dest, const char *src)
246{
247 char *tmp = dest;
248
249 while ((*dest++ = *src++) != '\0')
250 /* nothing */;
251 return tmp;
252}
253
254static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
255{
256 unsigned char c1, c2;
257
258 while (count) {
259 c1 = *cs++;
260 c2 = *ct++;
261 if (c1 != c2)
262 return c1 < c2 ? -1 : 1;
263 if (!c1)
264 break;
265 count--;
266 }
267 return 0;
268}
269
270static size_t __init prom_strlen(const char *s)
271{
272 const char *sc;
273
274 for (sc = s; *sc != '\0'; ++sc)
275 /* nothing */;
276 return sc - s;
277}
278
279static int __init prom_memcmp(const void *cs, const void *ct, size_t count)
280{
281 const unsigned char *su1, *su2;
282 int res = 0;
283
284 for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
285 if ((res = *su1 - *su2) != 0)
286 break;
287 return res;
288}
289
290static char __init *prom_strstr(const char *s1, const char *s2)
291{
292 size_t l1, l2;
293
294 l2 = prom_strlen(s2);
295 if (!l2)
296 return (char *)s1;
297 l1 = prom_strlen(s1);
298 while (l1 >= l2) {
299 l1--;
300 if (!prom_memcmp(s1, s2, l2))
301 return (char *)s1;
302 s1++;
303 }
304 return NULL;
305}
306
307static size_t __init prom_strlcat(char *dest, const char *src, size_t count)
308{
309 size_t dsize = prom_strlen(dest);
310 size_t len = prom_strlen(src);
311 size_t res = dsize + len;
312
313 /* This would be a bug */
314 if (dsize >= count)
315 return count;
316
317 dest += dsize;
318 count -= dsize;
319 if (len >= count)
320 len = count-1;
321 memcpy(dest, src, len);
322 dest[len] = 0;
323 return res;
324
325}
326
327#ifdef CONFIG_PPC_PSERIES
328static int __init prom_strtobool(const char *s, bool *res)
329{
330 if (!s)
331 return -EINVAL;
332
333 switch (s[0]) {
334 case 'y':
335 case 'Y':
336 case '1':
337 *res = true;
338 return 0;
339 case 'n':
340 case 'N':
341 case '0':
342 *res = false;
343 return 0;
344 case 'o':
345 case 'O':
346 switch (s[1]) {
347 case 'n':
348 case 'N':
349 *res = true;
350 return 0;
351 case 'f':
352 case 'F':
353 *res = false;
354 return 0;
355 default:
356 break;
357 }
358 default:
359 break;
360 }
361
362 return -EINVAL;
363}
364#endif
365
366/* This is the one and *ONLY* place where we actually call open
367 * firmware.
368 */
369
370static int __init call_prom(const char *service, int nargs, int nret, ...)
371{
372 int i;
373 struct prom_args args;
374 va_list list;
375
376 args.service = cpu_to_be32(ADDR(service));
377 args.nargs = cpu_to_be32(nargs);
378 args.nret = cpu_to_be32(nret);
379
380 va_start(list, nret);
381 for (i = 0; i < nargs; i++)
382 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
383 va_end(list);
384
385 for (i = 0; i < nret; i++)
386 args.args[nargs+i] = 0;
387
388 if (enter_prom(&args, prom_entry) < 0)
389 return PROM_ERROR;
390
391 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
392}
393
394static int __init call_prom_ret(const char *service, int nargs, int nret,
395 prom_arg_t *rets, ...)
396{
397 int i;
398 struct prom_args args;
399 va_list list;
400
401 args.service = cpu_to_be32(ADDR(service));
402 args.nargs = cpu_to_be32(nargs);
403 args.nret = cpu_to_be32(nret);
404
405 va_start(list, rets);
406 for (i = 0; i < nargs; i++)
407 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
408 va_end(list);
409
410 for (i = 0; i < nret; i++)
411 args.args[nargs+i] = 0;
412
413 if (enter_prom(&args, prom_entry) < 0)
414 return PROM_ERROR;
415
416 if (rets != NULL)
417 for (i = 1; i < nret; ++i)
418 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
419
420 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
421}
422
423
424static void __init prom_print(const char *msg)
425{
426 const char *p, *q;
427
428 if (prom.stdout == 0)
429 return;
430
431 for (p = msg; *p != 0; p = q) {
432 for (q = p; *q != 0 && *q != '\n'; ++q)
433 ;
434 if (q > p)
435 call_prom("write", 3, 1, prom.stdout, p, q - p);
436 if (*q == 0)
437 break;
438 ++q;
439 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
440 }
441}
442
443
444/*
445 * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that
446 * we do not need __udivdi3 or __umoddi3 on 32bits.
447 */
448static void __init prom_print_hex(unsigned long val)
449{
450 int i, nibbles = sizeof(val)*2;
451 char buf[sizeof(val)*2+1];
452
453 for (i = nibbles-1; i >= 0; i--) {
454 buf[i] = (val & 0xf) + '0';
455 if (buf[i] > '9')
456 buf[i] += ('a'-'0'-10);
457 val >>= 4;
458 }
459 buf[nibbles] = '\0';
460 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
461}
462
463/* max number of decimal digits in an unsigned long */
464#define UL_DIGITS 21
465static void __init prom_print_dec(unsigned long val)
466{
467 int i, size;
468 char buf[UL_DIGITS+1];
469
470 for (i = UL_DIGITS-1; i >= 0; i--) {
471 buf[i] = (val % 10) + '0';
472 val = val/10;
473 if (val == 0)
474 break;
475 }
476 /* shift stuff down */
477 size = UL_DIGITS - i;
478 call_prom("write", 3, 1, prom.stdout, buf+i, size);
479}
480
481__printf(1, 2)
482static void __init prom_printf(const char *format, ...)
483{
484 const char *p, *q, *s;
485 va_list args;
486 unsigned long v;
487 long vs;
488 int n = 0;
489
490 va_start(args, format);
491 for (p = format; *p != 0; p = q) {
492 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
493 ;
494 if (q > p)
495 call_prom("write", 3, 1, prom.stdout, p, q - p);
496 if (*q == 0)
497 break;
498 if (*q == '\n') {
499 ++q;
500 call_prom("write", 3, 1, prom.stdout,
501 ADDR("\r\n"), 2);
502 continue;
503 }
504 ++q;
505 if (*q == 0)
506 break;
507 while (*q == 'l') {
508 ++q;
509 ++n;
510 }
511 switch (*q) {
512 case 's':
513 ++q;
514 s = va_arg(args, const char *);
515 prom_print(s);
516 break;
517 case 'x':
518 ++q;
519 switch (n) {
520 case 0:
521 v = va_arg(args, unsigned int);
522 break;
523 case 1:
524 v = va_arg(args, unsigned long);
525 break;
526 case 2:
527 default:
528 v = va_arg(args, unsigned long long);
529 break;
530 }
531 prom_print_hex(v);
532 break;
533 case 'u':
534 ++q;
535 switch (n) {
536 case 0:
537 v = va_arg(args, unsigned int);
538 break;
539 case 1:
540 v = va_arg(args, unsigned long);
541 break;
542 case 2:
543 default:
544 v = va_arg(args, unsigned long long);
545 break;
546 }
547 prom_print_dec(v);
548 break;
549 case 'd':
550 ++q;
551 switch (n) {
552 case 0:
553 vs = va_arg(args, int);
554 break;
555 case 1:
556 vs = va_arg(args, long);
557 break;
558 case 2:
559 default:
560 vs = va_arg(args, long long);
561 break;
562 }
563 if (vs < 0) {
564 prom_print("-");
565 vs = -vs;
566 }
567 prom_print_dec(vs);
568 break;
569 }
570 }
571 va_end(args);
572}
573
574
575static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
576 unsigned long align)
577{
578
579 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
580 /*
581 * Old OF requires we claim physical and virtual separately
582 * and then map explicitly (assuming virtual mode)
583 */
584 int ret;
585 prom_arg_t result;
586
587 ret = call_prom_ret("call-method", 5, 2, &result,
588 ADDR("claim"), prom.memory,
589 align, size, virt);
590 if (ret != 0 || result == -1)
591 return -1;
592 ret = call_prom_ret("call-method", 5, 2, &result,
593 ADDR("claim"), prom.mmumap,
594 align, size, virt);
595 if (ret != 0) {
596 call_prom("call-method", 4, 1, ADDR("release"),
597 prom.memory, size, virt);
598 return -1;
599 }
600 /* the 0x12 is M (coherence) + PP == read/write */
601 call_prom("call-method", 6, 1,
602 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
603 return virt;
604 }
605 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
606 (prom_arg_t)align);
607}
608
609static void __init __attribute__((noreturn)) prom_panic(const char *reason)
610{
611 prom_print(reason);
612 /* Do not call exit because it clears the screen on pmac
613 * it also causes some sort of double-fault on early pmacs */
614 if (of_platform == PLATFORM_POWERMAC)
615 asm("trap\n");
616
617 /* ToDo: should put up an SRC here on pSeries */
618 call_prom("exit", 0, 0);
619
620 for (;;) /* should never get here */
621 ;
622}
623
624
625static int __init prom_next_node(phandle *nodep)
626{
627 phandle node;
628
629 if ((node = *nodep) != 0
630 && (*nodep = call_prom("child", 1, 1, node)) != 0)
631 return 1;
632 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
633 return 1;
634 for (;;) {
635 if ((node = call_prom("parent", 1, 1, node)) == 0)
636 return 0;
637 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
638 return 1;
639 }
640}
641
642static inline int __init prom_getprop(phandle node, const char *pname,
643 void *value, size_t valuelen)
644{
645 return call_prom("getprop", 4, 1, node, ADDR(pname),
646 (u32)(unsigned long) value, (u32) valuelen);
647}
648
649static inline int __init prom_getproplen(phandle node, const char *pname)
650{
651 return call_prom("getproplen", 2, 1, node, ADDR(pname));
652}
653
654static void add_string(char **str, const char *q)
655{
656 char *p = *str;
657
658 while (*q)
659 *p++ = *q++;
660 *p++ = ' ';
661 *str = p;
662}
663
664static char *tohex(unsigned int x)
665{
666 static const char digits[] __initconst = "0123456789abcdef";
667 static char result[9] __prombss;
668 int i;
669
670 result[8] = 0;
671 i = 8;
672 do {
673 --i;
674 result[i] = digits[x & 0xf];
675 x >>= 4;
676 } while (x != 0 && i > 0);
677 return &result[i];
678}
679
680static int __init prom_setprop(phandle node, const char *nodename,
681 const char *pname, void *value, size_t valuelen)
682{
683 char cmd[256], *p;
684
685 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
686 return call_prom("setprop", 4, 1, node, ADDR(pname),
687 (u32)(unsigned long) value, (u32) valuelen);
688
689 /* gah... setprop doesn't work on longtrail, have to use interpret */
690 p = cmd;
691 add_string(&p, "dev");
692 add_string(&p, nodename);
693 add_string(&p, tohex((u32)(unsigned long) value));
694 add_string(&p, tohex(valuelen));
695 add_string(&p, tohex(ADDR(pname)));
696 add_string(&p, tohex(prom_strlen(pname)));
697 add_string(&p, "property");
698 *p = 0;
699 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
700}
701
702/* We can't use the standard versions because of relocation headaches. */
703#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
704 || ('a' <= (c) && (c) <= 'f') \
705 || ('A' <= (c) && (c) <= 'F'))
706
707#define isdigit(c) ('0' <= (c) && (c) <= '9')
708#define islower(c) ('a' <= (c) && (c) <= 'z')
709#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
710
711static unsigned long prom_strtoul(const char *cp, const char **endp)
712{
713 unsigned long result = 0, base = 10, value;
714
715 if (*cp == '0') {
716 base = 8;
717 cp++;
718 if (toupper(*cp) == 'X') {
719 cp++;
720 base = 16;
721 }
722 }
723
724 while (isxdigit(*cp) &&
725 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
726 result = result * base + value;
727 cp++;
728 }
729
730 if (endp)
731 *endp = cp;
732
733 return result;
734}
735
736static unsigned long prom_memparse(const char *ptr, const char **retptr)
737{
738 unsigned long ret = prom_strtoul(ptr, retptr);
739 int shift = 0;
740
741 /*
742 * We can't use a switch here because GCC *may* generate a
743 * jump table which won't work, because we're not running at
744 * the address we're linked at.
745 */
746 if ('G' == **retptr || 'g' == **retptr)
747 shift = 30;
748
749 if ('M' == **retptr || 'm' == **retptr)
750 shift = 20;
751
752 if ('K' == **retptr || 'k' == **retptr)
753 shift = 10;
754
755 if (shift) {
756 ret <<= shift;
757 (*retptr)++;
758 }
759
760 return ret;
761}
762
763/*
764 * Early parsing of the command line passed to the kernel, used for
765 * "mem=x" and the options that affect the iommu
766 */
767static void __init early_cmdline_parse(void)
768{
769 const char *opt;
770
771 char *p;
772 int l = 0;
773
774 prom_cmd_line[0] = 0;
775 p = prom_cmd_line;
776
777 if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
778 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
779
780 if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
781 prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE,
782 sizeof(prom_cmd_line));
783
784 prom_printf("command line: %s\n", prom_cmd_line);
785
786#ifdef CONFIG_PPC64
787 opt = prom_strstr(prom_cmd_line, "iommu=");
788 if (opt) {
789 prom_printf("iommu opt is: %s\n", opt);
790 opt += 6;
791 while (*opt && *opt == ' ')
792 opt++;
793 if (!prom_strncmp(opt, "off", 3))
794 prom_iommu_off = 1;
795 else if (!prom_strncmp(opt, "force", 5))
796 prom_iommu_force_on = 1;
797 }
798#endif
799 opt = prom_strstr(prom_cmd_line, "mem=");
800 if (opt) {
801 opt += 4;
802 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
803#ifdef CONFIG_PPC64
804 /* Align to 16 MB == size of ppc64 large page */
805 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
806#endif
807 }
808
809#ifdef CONFIG_PPC_PSERIES
810 prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
811 opt = prom_strstr(prom_cmd_line, "disable_radix");
812 if (opt) {
813 opt += 13;
814 if (*opt && *opt == '=') {
815 bool val;
816
817 if (prom_strtobool(++opt, &val))
818 prom_radix_disable = false;
819 else
820 prom_radix_disable = val;
821 } else
822 prom_radix_disable = true;
823 }
824 if (prom_radix_disable)
825 prom_debug("Radix disabled from cmdline\n");
826
827 opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
828 if (opt) {
829 prom_radix_gtse_disable = true;
830 prom_debug("Radix GTSE disabled from cmdline\n");
831 }
832
833 opt = prom_strstr(prom_cmd_line, "xive=off");
834 if (opt) {
835 prom_xive_disable = true;
836 prom_debug("XIVE disabled from cmdline\n");
837 }
838#endif /* CONFIG_PPC_PSERIES */
839
840#ifdef CONFIG_PPC_SVM
841 opt = prom_strstr(prom_cmd_line, "svm=");
842 if (opt) {
843 bool val;
844
845 opt += sizeof("svm=") - 1;
846 if (!prom_strtobool(opt, &val))
847 prom_svm_enable = val;
848 }
849#endif /* CONFIG_PPC_SVM */
850}
851
852#ifdef CONFIG_PPC_PSERIES
853/*
854 * The architecture vector has an array of PVR mask/value pairs,
855 * followed by # option vectors - 1, followed by the option vectors.
856 *
857 * See prom.h for the definition of the bits specified in the
858 * architecture vector.
859 */
860
861/* Firmware expects the value to be n - 1, where n is the # of vectors */
862#define NUM_VECTORS(n) ((n) - 1)
863
864/*
865 * Firmware expects 1 + n - 2, where n is the length of the option vector in
866 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
867 */
868#define VECTOR_LENGTH(n) (1 + (n) - 2)
869
870struct option_vector1 {
871 u8 byte1;
872 u8 arch_versions;
873 u8 arch_versions3;
874} __packed;
875
876struct option_vector2 {
877 u8 byte1;
878 __be16 reserved;
879 __be32 real_base;
880 __be32 real_size;
881 __be32 virt_base;
882 __be32 virt_size;
883 __be32 load_base;
884 __be32 min_rma;
885 __be32 min_load;
886 u8 min_rma_percent;
887 u8 max_pft_size;
888} __packed;
889
890struct option_vector3 {
891 u8 byte1;
892 u8 byte2;
893} __packed;
894
895struct option_vector4 {
896 u8 byte1;
897 u8 min_vp_cap;
898} __packed;
899
900struct option_vector5 {
901 u8 byte1;
902 u8 byte2;
903 u8 byte3;
904 u8 cmo;
905 u8 associativity;
906 u8 bin_opts;
907 u8 micro_checkpoint;
908 u8 reserved0;
909 __be32 max_cpus;
910 __be16 papr_level;
911 __be16 reserved1;
912 u8 platform_facilities;
913 u8 reserved2;
914 __be16 reserved3;
915 u8 subprocessors;
916 u8 byte22;
917 u8 intarch;
918 u8 mmu;
919 u8 hash_ext;
920 u8 radix_ext;
921} __packed;
922
923struct option_vector6 {
924 u8 reserved;
925 u8 secondary_pteg;
926 u8 os_name;
927} __packed;
928
929struct ibm_arch_vec {
930 struct { u32 mask, val; } pvrs[14];
931
932 u8 num_vectors;
933
934 u8 vec1_len;
935 struct option_vector1 vec1;
936
937 u8 vec2_len;
938 struct option_vector2 vec2;
939
940 u8 vec3_len;
941 struct option_vector3 vec3;
942
943 u8 vec4_len;
944 struct option_vector4 vec4;
945
946 u8 vec5_len;
947 struct option_vector5 vec5;
948
949 u8 vec6_len;
950 struct option_vector6 vec6;
951} __packed;
952
953static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
954 .pvrs = {
955 {
956 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
957 .val = cpu_to_be32(0x003a0000),
958 },
959 {
960 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
961 .val = cpu_to_be32(0x003e0000),
962 },
963 {
964 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
965 .val = cpu_to_be32(0x003f0000),
966 },
967 {
968 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
969 .val = cpu_to_be32(0x004b0000),
970 },
971 {
972 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
973 .val = cpu_to_be32(0x004c0000),
974 },
975 {
976 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
977 .val = cpu_to_be32(0x004d0000),
978 },
979 {
980 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
981 .val = cpu_to_be32(0x004e0000),
982 },
983 {
984 .mask = cpu_to_be32(0xffff0000), /* POWER10 */
985 .val = cpu_to_be32(0x00800000),
986 },
987 {
988 .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
989 .val = cpu_to_be32(0x0f000006),
990 },
991 {
992 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
993 .val = cpu_to_be32(0x0f000005),
994 },
995 {
996 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
997 .val = cpu_to_be32(0x0f000004),
998 },
999 {
1000 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
1001 .val = cpu_to_be32(0x0f000003),
1002 },
1003 {
1004 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
1005 .val = cpu_to_be32(0x0f000002),
1006 },
1007 {
1008 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
1009 .val = cpu_to_be32(0x0f000001),
1010 },
1011 },
1012
1013 .num_vectors = NUM_VECTORS(6),
1014
1015 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
1016 .vec1 = {
1017 .byte1 = 0,
1018 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
1019 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
1020 .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
1021 },
1022
1023 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
1024 /* option vector 2: Open Firmware options supported */
1025 .vec2 = {
1026 .byte1 = OV2_REAL_MODE,
1027 .reserved = 0,
1028 .real_base = cpu_to_be32(0xffffffff),
1029 .real_size = cpu_to_be32(0xffffffff),
1030 .virt_base = cpu_to_be32(0xffffffff),
1031 .virt_size = cpu_to_be32(0xffffffff),
1032 .load_base = cpu_to_be32(0xffffffff),
1033 .min_rma = cpu_to_be32(512), /* 512MB min RMA */
1034 .min_load = cpu_to_be32(0xffffffff), /* full client load */
1035 .min_rma_percent = 0, /* min RMA percentage of total RAM */
1036 .max_pft_size = 48, /* max log_2(hash table size) */
1037 },
1038
1039 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
1040 /* option vector 3: processor options supported */
1041 .vec3 = {
1042 .byte1 = 0, /* don't ignore, don't halt */
1043 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
1044 },
1045
1046 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
1047 /* option vector 4: IBM PAPR implementation */
1048 .vec4 = {
1049 .byte1 = 0, /* don't halt */
1050 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
1051 },
1052
1053 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
1054 /* option vector 5: PAPR/OF options */
1055 .vec5 = {
1056 .byte1 = 0, /* don't ignore, don't halt */
1057 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
1058 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
1059#ifdef CONFIG_PCI_MSI
1060 /* PCIe/MSI support. Without MSI full PCIe is not supported */
1061 OV5_FEAT(OV5_MSI),
1062#else
1063 0,
1064#endif
1065 .byte3 = 0,
1066 .cmo =
1067#ifdef CONFIG_PPC_SMLPAR
1068 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
1069#else
1070 0,
1071#endif
1072 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
1073 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
1074 .micro_checkpoint = 0,
1075 .reserved0 = 0,
1076 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
1077 .papr_level = 0,
1078 .reserved1 = 0,
1079 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
1080 .reserved2 = 0,
1081 .reserved3 = 0,
1082 .subprocessors = 1,
1083 .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
1084 .intarch = 0,
1085 .mmu = 0,
1086 .hash_ext = 0,
1087 .radix_ext = 0,
1088 },
1089
1090 /* option vector 6: IBM PAPR hints */
1091 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
1092 .vec6 = {
1093 .reserved = 0,
1094 .secondary_pteg = 0,
1095 .os_name = OV6_LINUX,
1096 },
1097};
1098
1099static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
1100
1101/* Old method - ELF header with PT_NOTE sections only works on BE */
1102#ifdef __BIG_ENDIAN__
1103static const struct fake_elf {
1104 Elf32_Ehdr elfhdr;
1105 Elf32_Phdr phdr[2];
1106 struct chrpnote {
1107 u32 namesz;
1108 u32 descsz;
1109 u32 type;
1110 char name[8]; /* "PowerPC" */
1111 struct chrpdesc {
1112 u32 real_mode;
1113 u32 real_base;
1114 u32 real_size;
1115 u32 virt_base;
1116 u32 virt_size;
1117 u32 load_base;
1118 } chrpdesc;
1119 } chrpnote;
1120 struct rpanote {
1121 u32 namesz;
1122 u32 descsz;
1123 u32 type;
1124 char name[24]; /* "IBM,RPA-Client-Config" */
1125 struct rpadesc {
1126 u32 lpar_affinity;
1127 u32 min_rmo_size;
1128 u32 min_rmo_percent;
1129 u32 max_pft_size;
1130 u32 splpar;
1131 u32 min_load;
1132 u32 new_mem_def;
1133 u32 ignore_me;
1134 } rpadesc;
1135 } rpanote;
1136} fake_elf __initconst = {
1137 .elfhdr = {
1138 .e_ident = { 0x7f, 'E', 'L', 'F',
1139 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
1140 .e_type = ET_EXEC, /* yeah right */
1141 .e_machine = EM_PPC,
1142 .e_version = EV_CURRENT,
1143 .e_phoff = offsetof(struct fake_elf, phdr),
1144 .e_phentsize = sizeof(Elf32_Phdr),
1145 .e_phnum = 2
1146 },
1147 .phdr = {
1148 [0] = {
1149 .p_type = PT_NOTE,
1150 .p_offset = offsetof(struct fake_elf, chrpnote),
1151 .p_filesz = sizeof(struct chrpnote)
1152 }, [1] = {
1153 .p_type = PT_NOTE,
1154 .p_offset = offsetof(struct fake_elf, rpanote),
1155 .p_filesz = sizeof(struct rpanote)
1156 }
1157 },
1158 .chrpnote = {
1159 .namesz = sizeof("PowerPC"),
1160 .descsz = sizeof(struct chrpdesc),
1161 .type = 0x1275,
1162 .name = "PowerPC",
1163 .chrpdesc = {
1164 .real_mode = ~0U, /* ~0 means "don't care" */
1165 .real_base = ~0U,
1166 .real_size = ~0U,
1167 .virt_base = ~0U,
1168 .virt_size = ~0U,
1169 .load_base = ~0U
1170 },
1171 },
1172 .rpanote = {
1173 .namesz = sizeof("IBM,RPA-Client-Config"),
1174 .descsz = sizeof(struct rpadesc),
1175 .type = 0x12759999,
1176 .name = "IBM,RPA-Client-Config",
1177 .rpadesc = {
1178 .lpar_affinity = 0,
1179 .min_rmo_size = 64, /* in megabytes */
1180 .min_rmo_percent = 0,
1181 .max_pft_size = 48, /* 2^48 bytes max PFT size */
1182 .splpar = 1,
1183 .min_load = ~0U,
1184 .new_mem_def = 0
1185 }
1186 }
1187};
1188#endif /* __BIG_ENDIAN__ */
1189
1190static int __init prom_count_smt_threads(void)
1191{
1192 phandle node;
1193 char type[64];
1194 unsigned int plen;
1195
1196 /* Pick up th first CPU node we can find */
1197 for (node = 0; prom_next_node(&node); ) {
1198 type[0] = 0;
1199 prom_getprop(node, "device_type", type, sizeof(type));
1200
1201 if (prom_strcmp(type, "cpu"))
1202 continue;
1203 /*
1204 * There is an entry for each smt thread, each entry being
1205 * 4 bytes long. All cpus should have the same number of
1206 * smt threads, so return after finding the first.
1207 */
1208 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1209 if (plen == PROM_ERROR)
1210 break;
1211 plen >>= 2;
1212 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1213
1214 /* Sanity check */
1215 if (plen < 1 || plen > 64) {
1216 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1217 (unsigned long)plen);
1218 return 1;
1219 }
1220 return plen;
1221 }
1222 prom_debug("No threads found, assuming 1 per core\n");
1223
1224 return 1;
1225
1226}
1227
1228static void __init prom_parse_mmu_model(u8 val,
1229 struct platform_support *support)
1230{
1231 switch (val) {
1232 case OV5_FEAT(OV5_MMU_DYNAMIC):
1233 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1234 prom_debug("MMU - either supported\n");
1235 support->radix_mmu = !prom_radix_disable;
1236 support->hash_mmu = true;
1237 break;
1238 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1239 prom_debug("MMU - radix only\n");
1240 if (prom_radix_disable) {
1241 /*
1242 * If we __have__ to do radix, we're better off ignoring
1243 * the command line rather than not booting.
1244 */
1245 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1246 }
1247 support->radix_mmu = true;
1248 break;
1249 case OV5_FEAT(OV5_MMU_HASH):
1250 prom_debug("MMU - hash only\n");
1251 support->hash_mmu = true;
1252 break;
1253 default:
1254 prom_debug("Unknown mmu support option: 0x%x\n", val);
1255 break;
1256 }
1257}
1258
1259static void __init prom_parse_xive_model(u8 val,
1260 struct platform_support *support)
1261{
1262 switch (val) {
1263 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1264 prom_debug("XIVE - either mode supported\n");
1265 support->xive = !prom_xive_disable;
1266 break;
1267 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1268 prom_debug("XIVE - exploitation mode supported\n");
1269 if (prom_xive_disable) {
1270 /*
1271 * If we __have__ to do XIVE, we're better off ignoring
1272 * the command line rather than not booting.
1273 */
1274 prom_printf("WARNING: Ignoring cmdline option xive=off\n");
1275 }
1276 support->xive = true;
1277 break;
1278 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1279 prom_debug("XIVE - legacy mode supported\n");
1280 break;
1281 default:
1282 prom_debug("Unknown xive support option: 0x%x\n", val);
1283 break;
1284 }
1285}
1286
1287static void __init prom_parse_platform_support(u8 index, u8 val,
1288 struct platform_support *support)
1289{
1290 switch (index) {
1291 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1292 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1293 break;
1294 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1295 if (val & OV5_FEAT(OV5_RADIX_GTSE))
1296 support->radix_gtse = !prom_radix_gtse_disable;
1297 break;
1298 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1299 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1300 support);
1301 break;
1302 }
1303}
1304
1305static void __init prom_check_platform_support(void)
1306{
1307 struct platform_support supported = {
1308 .hash_mmu = false,
1309 .radix_mmu = false,
1310 .radix_gtse = false,
1311 .xive = false
1312 };
1313 int prop_len = prom_getproplen(prom.chosen,
1314 "ibm,arch-vec-5-platform-support");
1315
1316 /*
1317 * First copy the architecture vec template
1318 *
1319 * use memcpy() instead of *vec = *vec_template so that GCC replaces it
1320 * by __memcpy() when KASAN is active
1321 */
1322 memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
1323 sizeof(ibm_architecture_vec));
1324
1325 if (prop_len > 1) {
1326 int i;
1327 u8 vec[8];
1328 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1329 prop_len);
1330 if (prop_len > sizeof(vec))
1331 prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
1332 prop_len);
1333 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
1334 &vec, sizeof(vec));
1335 for (i = 0; i < sizeof(vec); i += 2) {
1336 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
1337 , vec[i]
1338 , vec[i + 1]);
1339 prom_parse_platform_support(vec[i], vec[i + 1],
1340 &supported);
1341 }
1342 }
1343
1344 if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1345 /* Radix preferred - Check if GTSE is also supported */
1346 prom_debug("Asking for radix\n");
1347 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1348 if (supported.radix_gtse)
1349 ibm_architecture_vec.vec5.radix_ext =
1350 OV5_FEAT(OV5_RADIX_GTSE);
1351 else
1352 prom_debug("Radix GTSE isn't supported\n");
1353 } else if (supported.hash_mmu) {
1354 /* Default to hash mmu (if we can) */
1355 prom_debug("Asking for hash\n");
1356 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1357 } else {
1358 /* We're probably on a legacy hypervisor */
1359 prom_debug("Assuming legacy hash support\n");
1360 }
1361
1362 if (supported.xive) {
1363 prom_debug("Asking for XIVE\n");
1364 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1365 }
1366}
1367
1368static void __init prom_send_capabilities(void)
1369{
1370 ihandle root;
1371 prom_arg_t ret;
1372 u32 cores;
1373
1374 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1375 prom_check_platform_support();
1376
1377 root = call_prom("open", 1, 1, ADDR("/"));
1378 if (root != 0) {
1379 /* We need to tell the FW about the number of cores we support.
1380 *
1381 * To do that, we count the number of threads on the first core
1382 * (we assume this is the same for all cores) and use it to
1383 * divide NR_CPUS.
1384 */
1385
1386 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1387 prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
1388 cores, NR_CPUS);
1389
1390 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1391
1392 /* try calling the ibm,client-architecture-support method */
1393 prom_printf("Calling ibm,client-architecture-support...");
1394 if (call_prom_ret("call-method", 3, 2, &ret,
1395 ADDR("ibm,client-architecture-support"),
1396 root,
1397 ADDR(&ibm_architecture_vec)) == 0) {
1398 /* the call exists... */
1399 if (ret)
1400 prom_printf("\nWARNING: ibm,client-architecture"
1401 "-support call FAILED!\n");
1402 call_prom("close", 1, 0, root);
1403 prom_printf(" done\n");
1404 return;
1405 }
1406 call_prom("close", 1, 0, root);
1407 prom_printf(" not implemented\n");
1408 }
1409
1410#ifdef __BIG_ENDIAN__
1411 {
1412 ihandle elfloader;
1413
1414 /* no ibm,client-architecture-support call, try the old way */
1415 elfloader = call_prom("open", 1, 1,
1416 ADDR("/packages/elf-loader"));
1417 if (elfloader == 0) {
1418 prom_printf("couldn't open /packages/elf-loader\n");
1419 return;
1420 }
1421 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1422 elfloader, ADDR(&fake_elf));
1423 call_prom("close", 1, 0, elfloader);
1424 }
1425#endif /* __BIG_ENDIAN__ */
1426}
1427#endif /* CONFIG_PPC_PSERIES */
1428
1429/*
1430 * Memory allocation strategy... our layout is normally:
1431 *
1432 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1433 * rare cases, initrd might end up being before the kernel though.
1434 * We assume this won't override the final kernel at 0, we have no
1435 * provision to handle that in this version, but it should hopefully
1436 * never happen.
1437 *
1438 * alloc_top is set to the top of RMO, eventually shrink down if the
1439 * TCEs overlap
1440 *
1441 * alloc_bottom is set to the top of kernel/initrd
1442 *
1443 * from there, allocations are done this way : rtas is allocated
1444 * topmost, and the device-tree is allocated from the bottom. We try
1445 * to grow the device-tree allocation as we progress. If we can't,
1446 * then we fail, we don't currently have a facility to restart
1447 * elsewhere, but that shouldn't be necessary.
1448 *
1449 * Note that calls to reserve_mem have to be done explicitly, memory
1450 * allocated with either alloc_up or alloc_down isn't automatically
1451 * reserved.
1452 */
1453
1454
1455/*
1456 * Allocates memory in the RMO upward from the kernel/initrd
1457 *
1458 * When align is 0, this is a special case, it means to allocate in place
1459 * at the current location of alloc_bottom or fail (that is basically
1460 * extending the previous allocation). Used for the device-tree flattening
1461 */
1462static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1463{
1464 unsigned long base = alloc_bottom;
1465 unsigned long addr = 0;
1466
1467 if (align)
1468 base = ALIGN(base, align);
1469 prom_debug("%s(%lx, %lx)\n", __func__, size, align);
1470 if (ram_top == 0)
1471 prom_panic("alloc_up() called with mem not initialized\n");
1472
1473 if (align)
1474 base = ALIGN(alloc_bottom, align);
1475 else
1476 base = alloc_bottom;
1477
1478 for(; (base + size) <= alloc_top;
1479 base = ALIGN(base + 0x100000, align)) {
1480 prom_debug(" trying: 0x%lx\n\r", base);
1481 addr = (unsigned long)prom_claim(base, size, 0);
1482 if (addr != PROM_ERROR && addr != 0)
1483 break;
1484 addr = 0;
1485 if (align == 0)
1486 break;
1487 }
1488 if (addr == 0)
1489 return 0;
1490 alloc_bottom = addr + size;
1491
1492 prom_debug(" -> %lx\n", addr);
1493 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1494 prom_debug(" alloc_top : %lx\n", alloc_top);
1495 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1496 prom_debug(" rmo_top : %lx\n", rmo_top);
1497 prom_debug(" ram_top : %lx\n", ram_top);
1498
1499 return addr;
1500}
1501
1502/*
1503 * Allocates memory downward, either from top of RMO, or if highmem
1504 * is set, from the top of RAM. Note that this one doesn't handle
1505 * failures. It does claim memory if highmem is not set.
1506 */
1507static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1508 int highmem)
1509{
1510 unsigned long base, addr = 0;
1511
1512 prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
1513 highmem ? "(high)" : "(low)");
1514 if (ram_top == 0)
1515 prom_panic("alloc_down() called with mem not initialized\n");
1516
1517 if (highmem) {
1518 /* Carve out storage for the TCE table. */
1519 addr = ALIGN_DOWN(alloc_top_high - size, align);
1520 if (addr <= alloc_bottom)
1521 return 0;
1522 /* Will we bump into the RMO ? If yes, check out that we
1523 * didn't overlap existing allocations there, if we did,
1524 * we are dead, we must be the first in town !
1525 */
1526 if (addr < rmo_top) {
1527 /* Good, we are first */
1528 if (alloc_top == rmo_top)
1529 alloc_top = rmo_top = addr;
1530 else
1531 return 0;
1532 }
1533 alloc_top_high = addr;
1534 goto bail;
1535 }
1536
1537 base = ALIGN_DOWN(alloc_top - size, align);
1538 for (; base > alloc_bottom;
1539 base = ALIGN_DOWN(base - 0x100000, align)) {
1540 prom_debug(" trying: 0x%lx\n\r", base);
1541 addr = (unsigned long)prom_claim(base, size, 0);
1542 if (addr != PROM_ERROR && addr != 0)
1543 break;
1544 addr = 0;
1545 }
1546 if (addr == 0)
1547 return 0;
1548 alloc_top = addr;
1549
1550 bail:
1551 prom_debug(" -> %lx\n", addr);
1552 prom_debug(" alloc_bottom : %lx\n", alloc_bottom);
1553 prom_debug(" alloc_top : %lx\n", alloc_top);
1554 prom_debug(" alloc_top_hi : %lx\n", alloc_top_high);
1555 prom_debug(" rmo_top : %lx\n", rmo_top);
1556 prom_debug(" ram_top : %lx\n", ram_top);
1557
1558 return addr;
1559}
1560
1561/*
1562 * Parse a "reg" cell
1563 */
1564static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1565{
1566 cell_t *p = *cellp;
1567 unsigned long r = 0;
1568
1569 /* Ignore more than 2 cells */
1570 while (s > sizeof(unsigned long) / 4) {
1571 p++;
1572 s--;
1573 }
1574 r = be32_to_cpu(*p++);
1575#ifdef CONFIG_PPC64
1576 if (s > 1) {
1577 r <<= 32;
1578 r |= be32_to_cpu(*(p++));
1579 }
1580#endif
1581 *cellp = p;
1582 return r;
1583}
1584
1585/*
1586 * Very dumb function for adding to the memory reserve list, but
1587 * we don't need anything smarter at this point
1588 *
1589 * XXX Eventually check for collisions. They should NEVER happen.
1590 * If problems seem to show up, it would be a good start to track
1591 * them down.
1592 */
1593static void __init reserve_mem(u64 base, u64 size)
1594{
1595 u64 top = base + size;
1596 unsigned long cnt = mem_reserve_cnt;
1597
1598 if (size == 0)
1599 return;
1600
1601 /* We need to always keep one empty entry so that we
1602 * have our terminator with "size" set to 0 since we are
1603 * dumb and just copy this entire array to the boot params
1604 */
1605 base = ALIGN_DOWN(base, PAGE_SIZE);
1606 top = ALIGN(top, PAGE_SIZE);
1607 size = top - base;
1608
1609 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1610 prom_panic("Memory reserve map exhausted !\n");
1611 mem_reserve_map[cnt].base = cpu_to_be64(base);
1612 mem_reserve_map[cnt].size = cpu_to_be64(size);
1613 mem_reserve_cnt = cnt + 1;
1614}
1615
1616/*
1617 * Initialize memory allocation mechanism, parse "memory" nodes and
1618 * obtain that way the top of memory and RMO to setup out local allocator
1619 */
1620static void __init prom_init_mem(void)
1621{
1622 phandle node;
1623 char type[64];
1624 unsigned int plen;
1625 cell_t *p, *endp;
1626 __be32 val;
1627 u32 rac, rsc;
1628
1629 /*
1630 * We iterate the memory nodes to find
1631 * 1) top of RMO (first node)
1632 * 2) top of memory
1633 */
1634 val = cpu_to_be32(2);
1635 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1636 rac = be32_to_cpu(val);
1637 val = cpu_to_be32(1);
1638 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1639 rsc = be32_to_cpu(val);
1640 prom_debug("root_addr_cells: %x\n", rac);
1641 prom_debug("root_size_cells: %x\n", rsc);
1642
1643 prom_debug("scanning memory:\n");
1644
1645 for (node = 0; prom_next_node(&node); ) {
1646 type[0] = 0;
1647 prom_getprop(node, "device_type", type, sizeof(type));
1648
1649 if (type[0] == 0) {
1650 /*
1651 * CHRP Longtrail machines have no device_type
1652 * on the memory node, so check the name instead...
1653 */
1654 prom_getprop(node, "name", type, sizeof(type));
1655 }
1656 if (prom_strcmp(type, "memory"))
1657 continue;
1658
1659 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1660 if (plen > sizeof(regbuf)) {
1661 prom_printf("memory node too large for buffer !\n");
1662 plen = sizeof(regbuf);
1663 }
1664 p = regbuf;
1665 endp = p + (plen / sizeof(cell_t));
1666
1667#ifdef DEBUG_PROM
1668 memset(prom_scratch, 0, sizeof(prom_scratch));
1669 call_prom("package-to-path", 3, 1, node, prom_scratch,
1670 sizeof(prom_scratch) - 1);
1671 prom_debug(" node %s :\n", prom_scratch);
1672#endif /* DEBUG_PROM */
1673
1674 while ((endp - p) >= (rac + rsc)) {
1675 unsigned long base, size;
1676
1677 base = prom_next_cell(rac, &p);
1678 size = prom_next_cell(rsc, &p);
1679
1680 if (size == 0)
1681 continue;
1682 prom_debug(" %lx %lx\n", base, size);
1683 if (base == 0 && (of_platform & PLATFORM_LPAR))
1684 rmo_top = size;
1685 if ((base + size) > ram_top)
1686 ram_top = base + size;
1687 }
1688 }
1689
1690 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1691
1692 /*
1693 * If prom_memory_limit is set we reduce the upper limits *except* for
1694 * alloc_top_high. This must be the real top of RAM so we can put
1695 * TCE's up there.
1696 */
1697
1698 alloc_top_high = ram_top;
1699
1700 if (prom_memory_limit) {
1701 if (prom_memory_limit <= alloc_bottom) {
1702 prom_printf("Ignoring mem=%lx <= alloc_bottom.\n",
1703 prom_memory_limit);
1704 prom_memory_limit = 0;
1705 } else if (prom_memory_limit >= ram_top) {
1706 prom_printf("Ignoring mem=%lx >= ram_top.\n",
1707 prom_memory_limit);
1708 prom_memory_limit = 0;
1709 } else {
1710 ram_top = prom_memory_limit;
1711 rmo_top = min(rmo_top, prom_memory_limit);
1712 }
1713 }
1714
1715 /*
1716 * Setup our top alloc point, that is top of RMO or top of
1717 * segment 0 when running non-LPAR.
1718 * Some RS64 machines have buggy firmware where claims up at
1719 * 1GB fail. Cap at 768MB as a workaround.
1720 * Since 768MB is plenty of room, and we need to cap to something
1721 * reasonable on 32-bit, cap at 768MB on all machines.
1722 */
1723 if (!rmo_top)
1724 rmo_top = ram_top;
1725 rmo_top = min(0x30000000ul, rmo_top);
1726 alloc_top = rmo_top;
1727 alloc_top_high = ram_top;
1728
1729 /*
1730 * Check if we have an initrd after the kernel but still inside
1731 * the RMO. If we do move our bottom point to after it.
1732 */
1733 if (prom_initrd_start &&
1734 prom_initrd_start < rmo_top &&
1735 prom_initrd_end > alloc_bottom)
1736 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1737
1738 prom_printf("memory layout at init:\n");
1739 prom_printf(" memory_limit : %lx (16 MB aligned)\n",
1740 prom_memory_limit);
1741 prom_printf(" alloc_bottom : %lx\n", alloc_bottom);
1742 prom_printf(" alloc_top : %lx\n", alloc_top);
1743 prom_printf(" alloc_top_hi : %lx\n", alloc_top_high);
1744 prom_printf(" rmo_top : %lx\n", rmo_top);
1745 prom_printf(" ram_top : %lx\n", ram_top);
1746}
1747
1748static void __init prom_close_stdin(void)
1749{
1750 __be32 val;
1751 ihandle stdin;
1752
1753 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1754 stdin = be32_to_cpu(val);
1755 call_prom("close", 1, 0, stdin);
1756 }
1757}
1758
1759#ifdef CONFIG_PPC_SVM
1760static int prom_rtas_hcall(uint64_t args)
1761{
1762 register uint64_t arg1 asm("r3") = H_RTAS;
1763 register uint64_t arg2 asm("r4") = args;
1764
1765 asm volatile("sc 1\n" : "=r" (arg1) :
1766 "r" (arg1),
1767 "r" (arg2) :);
1768 return arg1;
1769}
1770
1771static struct rtas_args __prombss os_term_args;
1772
1773static void __init prom_rtas_os_term(char *str)
1774{
1775 phandle rtas_node;
1776 __be32 val;
1777 u32 token;
1778
1779 prom_debug("%s: start...\n", __func__);
1780 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1781 prom_debug("rtas_node: %x\n", rtas_node);
1782 if (!PHANDLE_VALID(rtas_node))
1783 return;
1784
1785 val = 0;
1786 prom_getprop(rtas_node, "ibm,os-term", &val, sizeof(val));
1787 token = be32_to_cpu(val);
1788 prom_debug("ibm,os-term: %x\n", token);
1789 if (token == 0)
1790 prom_panic("Could not get token for ibm,os-term\n");
1791 os_term_args.token = cpu_to_be32(token);
1792 os_term_args.nargs = cpu_to_be32(1);
1793 os_term_args.nret = cpu_to_be32(1);
1794 os_term_args.args[0] = cpu_to_be32(__pa(str));
1795 prom_rtas_hcall((uint64_t)&os_term_args);
1796}
1797#endif /* CONFIG_PPC_SVM */
1798
1799/*
1800 * Allocate room for and instantiate RTAS
1801 */
1802static void __init prom_instantiate_rtas(void)
1803{
1804 phandle rtas_node;
1805 ihandle rtas_inst;
1806 u32 base, entry = 0;
1807 __be32 val;
1808 u32 size = 0;
1809
1810 prom_debug("prom_instantiate_rtas: start...\n");
1811
1812 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1813 prom_debug("rtas_node: %x\n", rtas_node);
1814 if (!PHANDLE_VALID(rtas_node))
1815 return;
1816
1817 val = 0;
1818 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1819 size = be32_to_cpu(val);
1820 if (size == 0)
1821 return;
1822
1823 base = alloc_down(size, PAGE_SIZE, 0);
1824 if (base == 0)
1825 prom_panic("Could not allocate memory for RTAS\n");
1826
1827 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1828 if (!IHANDLE_VALID(rtas_inst)) {
1829 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1830 return;
1831 }
1832
1833 prom_printf("instantiating rtas at 0x%x...", base);
1834
1835 if (call_prom_ret("call-method", 3, 2, &entry,
1836 ADDR("instantiate-rtas"),
1837 rtas_inst, base) != 0
1838 || entry == 0) {
1839 prom_printf(" failed\n");
1840 return;
1841 }
1842 prom_printf(" done\n");
1843
1844 reserve_mem(base, size);
1845
1846 val = cpu_to_be32(base);
1847 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1848 &val, sizeof(val));
1849 val = cpu_to_be32(entry);
1850 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1851 &val, sizeof(val));
1852
1853 /* Check if it supports "query-cpu-stopped-state" */
1854 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1855 &val, sizeof(val)) != PROM_ERROR)
1856 rtas_has_query_cpu_stopped = true;
1857
1858 prom_debug("rtas base = 0x%x\n", base);
1859 prom_debug("rtas entry = 0x%x\n", entry);
1860 prom_debug("rtas size = 0x%x\n", size);
1861
1862 prom_debug("prom_instantiate_rtas: end...\n");
1863}
1864
1865#ifdef CONFIG_PPC64
1866/*
1867 * Allocate room for and instantiate Stored Measurement Log (SML)
1868 */
1869static void __init prom_instantiate_sml(void)
1870{
1871 phandle ibmvtpm_node;
1872 ihandle ibmvtpm_inst;
1873 u32 entry = 0, size = 0, succ = 0;
1874 u64 base;
1875 __be32 val;
1876
1877 prom_debug("prom_instantiate_sml: start...\n");
1878
1879 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1880 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1881 if (!PHANDLE_VALID(ibmvtpm_node))
1882 return;
1883
1884 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1885 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1886 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1887 return;
1888 }
1889
1890 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1891 &val, sizeof(val)) != PROM_ERROR) {
1892 if (call_prom_ret("call-method", 2, 2, &succ,
1893 ADDR("reformat-sml-to-efi-alignment"),
1894 ibmvtpm_inst) != 0 || succ == 0) {
1895 prom_printf("Reformat SML to EFI alignment failed\n");
1896 return;
1897 }
1898
1899 if (call_prom_ret("call-method", 2, 2, &size,
1900 ADDR("sml-get-allocated-size"),
1901 ibmvtpm_inst) != 0 || size == 0) {
1902 prom_printf("SML get allocated size failed\n");
1903 return;
1904 }
1905 } else {
1906 if (call_prom_ret("call-method", 2, 2, &size,
1907 ADDR("sml-get-handover-size"),
1908 ibmvtpm_inst) != 0 || size == 0) {
1909 prom_printf("SML get handover size failed\n");
1910 return;
1911 }
1912 }
1913
1914 base = alloc_down(size, PAGE_SIZE, 0);
1915 if (base == 0)
1916 prom_panic("Could not allocate memory for sml\n");
1917
1918 prom_printf("instantiating sml at 0x%llx...", base);
1919
1920 memset((void *)base, 0, size);
1921
1922 if (call_prom_ret("call-method", 4, 2, &entry,
1923 ADDR("sml-handover"),
1924 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1925 prom_printf("SML handover failed\n");
1926 return;
1927 }
1928 prom_printf(" done\n");
1929
1930 reserve_mem(base, size);
1931
1932 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1933 &base, sizeof(base));
1934 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1935 &size, sizeof(size));
1936
1937 prom_debug("sml base = 0x%llx\n", base);
1938 prom_debug("sml size = 0x%x\n", size);
1939
1940 prom_debug("prom_instantiate_sml: end...\n");
1941}
1942
1943/*
1944 * Allocate room for and initialize TCE tables
1945 */
1946#ifdef __BIG_ENDIAN__
1947static void __init prom_initialize_tce_table(void)
1948{
1949 phandle node;
1950 ihandle phb_node;
1951 char compatible[64], type[64], model[64];
1952 char *path = prom_scratch;
1953 u64 base, align;
1954 u32 minalign, minsize;
1955 u64 tce_entry, *tce_entryp;
1956 u64 local_alloc_top, local_alloc_bottom;
1957 u64 i;
1958
1959 if (prom_iommu_off)
1960 return;
1961
1962 prom_debug("starting prom_initialize_tce_table\n");
1963
1964 /* Cache current top of allocs so we reserve a single block */
1965 local_alloc_top = alloc_top_high;
1966 local_alloc_bottom = local_alloc_top;
1967
1968 /* Search all nodes looking for PHBs. */
1969 for (node = 0; prom_next_node(&node); ) {
1970 compatible[0] = 0;
1971 type[0] = 0;
1972 model[0] = 0;
1973 prom_getprop(node, "compatible",
1974 compatible, sizeof(compatible));
1975 prom_getprop(node, "device_type", type, sizeof(type));
1976 prom_getprop(node, "model", model, sizeof(model));
1977
1978 if ((type[0] == 0) || (prom_strstr(type, "pci") == NULL))
1979 continue;
1980
1981 /* Keep the old logic intact to avoid regression. */
1982 if (compatible[0] != 0) {
1983 if ((prom_strstr(compatible, "python") == NULL) &&
1984 (prom_strstr(compatible, "Speedwagon") == NULL) &&
1985 (prom_strstr(compatible, "Winnipeg") == NULL))
1986 continue;
1987 } else if (model[0] != 0) {
1988 if ((prom_strstr(model, "ython") == NULL) &&
1989 (prom_strstr(model, "peedwagon") == NULL) &&
1990 (prom_strstr(model, "innipeg") == NULL))
1991 continue;
1992 }
1993
1994 if (prom_getprop(node, "tce-table-minalign", &minalign,
1995 sizeof(minalign)) == PROM_ERROR)
1996 minalign = 0;
1997 if (prom_getprop(node, "tce-table-minsize", &minsize,
1998 sizeof(minsize)) == PROM_ERROR)
1999 minsize = 4UL << 20;
2000
2001 /*
2002 * Even though we read what OF wants, we just set the table
2003 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
2004 * By doing this, we avoid the pitfalls of trying to DMA to
2005 * MMIO space and the DMA alias hole.
2006 */
2007 minsize = 4UL << 20;
2008
2009 /* Align to the greater of the align or size */
2010 align = max(minalign, minsize);
2011 base = alloc_down(minsize, align, 1);
2012 if (base == 0)
2013 prom_panic("ERROR, cannot find space for TCE table.\n");
2014 if (base < local_alloc_bottom)
2015 local_alloc_bottom = base;
2016
2017 /* It seems OF doesn't null-terminate the path :-( */
2018 memset(path, 0, sizeof(prom_scratch));
2019 /* Call OF to setup the TCE hardware */
2020 if (call_prom("package-to-path", 3, 1, node,
2021 path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
2022 prom_printf("package-to-path failed\n");
2023 }
2024
2025 /* Save away the TCE table attributes for later use. */
2026 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
2027 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
2028
2029 prom_debug("TCE table: %s\n", path);
2030 prom_debug("\tnode = 0x%x\n", node);
2031 prom_debug("\tbase = 0x%llx\n", base);
2032 prom_debug("\tsize = 0x%x\n", minsize);
2033
2034 /* Initialize the table to have a one-to-one mapping
2035 * over the allocated size.
2036 */
2037 tce_entryp = (u64 *)base;
2038 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
2039 tce_entry = (i << PAGE_SHIFT);
2040 tce_entry |= 0x3;
2041 *tce_entryp = tce_entry;
2042 }
2043
2044 prom_printf("opening PHB %s", path);
2045 phb_node = call_prom("open", 1, 1, path);
2046 if (phb_node == 0)
2047 prom_printf("... failed\n");
2048 else
2049 prom_printf("... done\n");
2050
2051 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
2052 phb_node, -1, minsize,
2053 (u32) base, (u32) (base >> 32));
2054 call_prom("close", 1, 0, phb_node);
2055 }
2056
2057 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
2058
2059 /* These are only really needed if there is a memory limit in
2060 * effect, but we don't know so export them always. */
2061 prom_tce_alloc_start = local_alloc_bottom;
2062 prom_tce_alloc_end = local_alloc_top;
2063
2064 /* Flag the first invalid entry */
2065 prom_debug("ending prom_initialize_tce_table\n");
2066}
2067#endif /* __BIG_ENDIAN__ */
2068#endif /* CONFIG_PPC64 */
2069
2070/*
2071 * With CHRP SMP we need to use the OF to start the other processors.
2072 * We can't wait until smp_boot_cpus (the OF is trashed by then)
2073 * so we have to put the processors into a holding pattern controlled
2074 * by the kernel (not OF) before we destroy the OF.
2075 *
2076 * This uses a chunk of low memory, puts some holding pattern
2077 * code there and sends the other processors off to there until
2078 * smp_boot_cpus tells them to do something. The holding pattern
2079 * checks that address until its cpu # is there, when it is that
2080 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
2081 * of setting those values.
2082 *
2083 * We also use physical address 0x4 here to tell when a cpu
2084 * is in its holding pattern code.
2085 *
2086 * -- Cort
2087 */
2088/*
2089 * We want to reference the copy of __secondary_hold_* in the
2090 * 0 - 0x100 address range
2091 */
2092#define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
2093
2094static void __init prom_hold_cpus(void)
2095{
2096 unsigned long i;
2097 phandle node;
2098 char type[64];
2099 unsigned long *spinloop
2100 = (void *) LOW_ADDR(__secondary_hold_spinloop);
2101 unsigned long *acknowledge
2102 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
2103 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
2104
2105 /*
2106 * On pseries, if RTAS supports "query-cpu-stopped-state",
2107 * we skip this stage, the CPUs will be started by the
2108 * kernel using RTAS.
2109 */
2110 if ((of_platform == PLATFORM_PSERIES ||
2111 of_platform == PLATFORM_PSERIES_LPAR) &&
2112 rtas_has_query_cpu_stopped) {
2113 prom_printf("prom_hold_cpus: skipped\n");
2114 return;
2115 }
2116
2117 prom_debug("prom_hold_cpus: start...\n");
2118 prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop);
2119 prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop);
2120 prom_debug(" 1) acknowledge = 0x%lx\n",
2121 (unsigned long)acknowledge);
2122 prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge);
2123 prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold);
2124
2125 /* Set the common spinloop variable, so all of the secondary cpus
2126 * will block when they are awakened from their OF spinloop.
2127 * This must occur for both SMP and non SMP kernels, since OF will
2128 * be trashed when we move the kernel.
2129 */
2130 *spinloop = 0;
2131
2132 /* look for cpus */
2133 for (node = 0; prom_next_node(&node); ) {
2134 unsigned int cpu_no;
2135 __be32 reg;
2136
2137 type[0] = 0;
2138 prom_getprop(node, "device_type", type, sizeof(type));
2139 if (prom_strcmp(type, "cpu") != 0)
2140 continue;
2141
2142 /* Skip non-configured cpus. */
2143 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
2144 if (prom_strcmp(type, "okay") != 0)
2145 continue;
2146
2147 reg = cpu_to_be32(-1); /* make sparse happy */
2148 prom_getprop(node, "reg", ®, sizeof(reg));
2149 cpu_no = be32_to_cpu(reg);
2150
2151 prom_debug("cpu hw idx = %u\n", cpu_no);
2152
2153 /* Init the acknowledge var which will be reset by
2154 * the secondary cpu when it awakens from its OF
2155 * spinloop.
2156 */
2157 *acknowledge = (unsigned long)-1;
2158
2159 if (cpu_no != prom.cpu) {
2160 /* Primary Thread of non-boot cpu or any thread */
2161 prom_printf("starting cpu hw idx %u... ", cpu_no);
2162 call_prom("start-cpu", 3, 0, node,
2163 secondary_hold, cpu_no);
2164
2165 for (i = 0; (i < 100000000) &&
2166 (*acknowledge == ((unsigned long)-1)); i++ )
2167 mb();
2168
2169 if (*acknowledge == cpu_no)
2170 prom_printf("done\n");
2171 else
2172 prom_printf("failed: %lx\n", *acknowledge);
2173 }
2174#ifdef CONFIG_SMP
2175 else
2176 prom_printf("boot cpu hw idx %u\n", cpu_no);
2177#endif /* CONFIG_SMP */
2178 }
2179
2180 prom_debug("prom_hold_cpus: end...\n");
2181}
2182
2183
2184static void __init prom_init_client_services(unsigned long pp)
2185{
2186 /* Get a handle to the prom entry point before anything else */
2187 prom_entry = pp;
2188
2189 /* get a handle for the stdout device */
2190 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2191 if (!PHANDLE_VALID(prom.chosen))
2192 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2193
2194 /* get device tree root */
2195 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2196 if (!PHANDLE_VALID(prom.root))
2197 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2198
2199 prom.mmumap = 0;
2200}
2201
2202#ifdef CONFIG_PPC32
2203/*
2204 * For really old powermacs, we need to map things we claim.
2205 * For that, we need the ihandle of the mmu.
2206 * Also, on the longtrail, we need to work around other bugs.
2207 */
2208static void __init prom_find_mmu(void)
2209{
2210 phandle oprom;
2211 char version[64];
2212
2213 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2214 if (!PHANDLE_VALID(oprom))
2215 return;
2216 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2217 return;
2218 version[sizeof(version) - 1] = 0;
2219 /* XXX might need to add other versions here */
2220 if (prom_strcmp(version, "Open Firmware, 1.0.5") == 0)
2221 of_workarounds = OF_WA_CLAIM;
2222 else if (prom_strncmp(version, "FirmWorks,3.", 12) == 0) {
2223 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2224 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2225 } else
2226 return;
2227 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2228 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2229 sizeof(prom.mmumap));
2230 prom.mmumap = be32_to_cpu(prom.mmumap);
2231 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2232 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2233}
2234#else
2235#define prom_find_mmu()
2236#endif
2237
2238static void __init prom_init_stdout(void)
2239{
2240 char *path = of_stdout_device;
2241 char type[16];
2242 phandle stdout_node;
2243 __be32 val;
2244
2245 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2246 prom_panic("cannot find stdout");
2247
2248 prom.stdout = be32_to_cpu(val);
2249
2250 /* Get the full OF pathname of the stdout device */
2251 memset(path, 0, 256);
2252 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2253 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2254 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2255 path, prom_strlen(path) + 1);
2256
2257 /* instance-to-package fails on PA-Semi */
2258 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2259 if (stdout_node != PROM_ERROR) {
2260 val = cpu_to_be32(stdout_node);
2261
2262 /* If it's a display, note it */
2263 memset(type, 0, sizeof(type));
2264 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2265 if (prom_strcmp(type, "display") == 0)
2266 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2267 }
2268}
2269
2270static int __init prom_find_machine_type(void)
2271{
2272 char compat[256];
2273 int len, i = 0;
2274#ifdef CONFIG_PPC64
2275 phandle rtas;
2276 int x;
2277#endif
2278
2279 /* Look for a PowerMac or a Cell */
2280 len = prom_getprop(prom.root, "compatible",
2281 compat, sizeof(compat)-1);
2282 if (len > 0) {
2283 compat[len] = 0;
2284 while (i < len) {
2285 char *p = &compat[i];
2286 int sl = prom_strlen(p);
2287 if (sl == 0)
2288 break;
2289 if (prom_strstr(p, "Power Macintosh") ||
2290 prom_strstr(p, "MacRISC"))
2291 return PLATFORM_POWERMAC;
2292#ifdef CONFIG_PPC64
2293 /* We must make sure we don't detect the IBM Cell
2294 * blades as pSeries due to some firmware issues,
2295 * so we do it here.
2296 */
2297 if (prom_strstr(p, "IBM,CBEA") ||
2298 prom_strstr(p, "IBM,CPBW-1.0"))
2299 return PLATFORM_GENERIC;
2300#endif /* CONFIG_PPC64 */
2301 i += sl + 1;
2302 }
2303 }
2304#ifdef CONFIG_PPC64
2305 /* Try to figure out if it's an IBM pSeries or any other
2306 * PAPR compliant platform. We assume it is if :
2307 * - /device_type is "chrp" (please, do NOT use that for future
2308 * non-IBM designs !
2309 * - it has /rtas
2310 */
2311 len = prom_getprop(prom.root, "device_type",
2312 compat, sizeof(compat)-1);
2313 if (len <= 0)
2314 return PLATFORM_GENERIC;
2315 if (prom_strcmp(compat, "chrp"))
2316 return PLATFORM_GENERIC;
2317
2318 /* Default to pSeries. We need to know if we are running LPAR */
2319 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2320 if (!PHANDLE_VALID(rtas))
2321 return PLATFORM_GENERIC;
2322 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2323 if (x != PROM_ERROR) {
2324 prom_debug("Hypertas detected, assuming LPAR !\n");
2325 return PLATFORM_PSERIES_LPAR;
2326 }
2327 return PLATFORM_PSERIES;
2328#else
2329 return PLATFORM_GENERIC;
2330#endif
2331}
2332
2333static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2334{
2335 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2336}
2337
2338/*
2339 * If we have a display that we don't know how to drive,
2340 * we will want to try to execute OF's open method for it
2341 * later. However, OF will probably fall over if we do that
2342 * we've taken over the MMU.
2343 * So we check whether we will need to open the display,
2344 * and if so, open it now.
2345 */
2346static void __init prom_check_displays(void)
2347{
2348 char type[16], *path;
2349 phandle node;
2350 ihandle ih;
2351 int i;
2352
2353 static const unsigned char default_colors[] __initconst = {
2354 0x00, 0x00, 0x00,
2355 0x00, 0x00, 0xaa,
2356 0x00, 0xaa, 0x00,
2357 0x00, 0xaa, 0xaa,
2358 0xaa, 0x00, 0x00,
2359 0xaa, 0x00, 0xaa,
2360 0xaa, 0xaa, 0x00,
2361 0xaa, 0xaa, 0xaa,
2362 0x55, 0x55, 0x55,
2363 0x55, 0x55, 0xff,
2364 0x55, 0xff, 0x55,
2365 0x55, 0xff, 0xff,
2366 0xff, 0x55, 0x55,
2367 0xff, 0x55, 0xff,
2368 0xff, 0xff, 0x55,
2369 0xff, 0xff, 0xff
2370 };
2371 const unsigned char *clut;
2372
2373 prom_debug("Looking for displays\n");
2374 for (node = 0; prom_next_node(&node); ) {
2375 memset(type, 0, sizeof(type));
2376 prom_getprop(node, "device_type", type, sizeof(type));
2377 if (prom_strcmp(type, "display") != 0)
2378 continue;
2379
2380 /* It seems OF doesn't null-terminate the path :-( */
2381 path = prom_scratch;
2382 memset(path, 0, sizeof(prom_scratch));
2383
2384 /*
2385 * leave some room at the end of the path for appending extra
2386 * arguments
2387 */
2388 if (call_prom("package-to-path", 3, 1, node, path,
2389 sizeof(prom_scratch) - 10) == PROM_ERROR)
2390 continue;
2391 prom_printf("found display : %s, opening... ", path);
2392
2393 ih = call_prom("open", 1, 1, path);
2394 if (ih == 0) {
2395 prom_printf("failed\n");
2396 continue;
2397 }
2398
2399 /* Success */
2400 prom_printf("done\n");
2401 prom_setprop(node, path, "linux,opened", NULL, 0);
2402
2403 /* Setup a usable color table when the appropriate
2404 * method is available. Should update this to set-colors */
2405 clut = default_colors;
2406 for (i = 0; i < 16; i++, clut += 3)
2407 if (prom_set_color(ih, i, clut[0], clut[1],
2408 clut[2]) != 0)
2409 break;
2410
2411#ifdef CONFIG_LOGO_LINUX_CLUT224
2412 clut = PTRRELOC(logo_linux_clut224.clut);
2413 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2414 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2415 clut[2]) != 0)
2416 break;
2417#endif /* CONFIG_LOGO_LINUX_CLUT224 */
2418
2419#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2420 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2421 PROM_ERROR) {
2422 u32 width, height, pitch, addr;
2423
2424 prom_printf("Setting btext !\n");
2425 prom_getprop(node, "width", &width, 4);
2426 prom_getprop(node, "height", &height, 4);
2427 prom_getprop(node, "linebytes", &pitch, 4);
2428 prom_getprop(node, "address", &addr, 4);
2429 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2430 width, height, pitch, addr);
2431 btext_setup_display(width, height, 8, pitch, addr);
2432 btext_prepare_BAT();
2433 }
2434#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2435 }
2436}
2437
2438
2439/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2440static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2441 unsigned long needed, unsigned long align)
2442{
2443 void *ret;
2444
2445 *mem_start = ALIGN(*mem_start, align);
2446 while ((*mem_start + needed) > *mem_end) {
2447 unsigned long room, chunk;
2448
2449 prom_debug("Chunk exhausted, claiming more at %lx...\n",
2450 alloc_bottom);
2451 room = alloc_top - alloc_bottom;
2452 if (room > DEVTREE_CHUNK_SIZE)
2453 room = DEVTREE_CHUNK_SIZE;
2454 if (room < PAGE_SIZE)
2455 prom_panic("No memory for flatten_device_tree "
2456 "(no room)\n");
2457 chunk = alloc_up(room, 0);
2458 if (chunk == 0)
2459 prom_panic("No memory for flatten_device_tree "
2460 "(claim failed)\n");
2461 *mem_end = chunk + room;
2462 }
2463
2464 ret = (void *)*mem_start;
2465 *mem_start += needed;
2466
2467 return ret;
2468}
2469
2470#define dt_push_token(token, mem_start, mem_end) do { \
2471 void *room = make_room(mem_start, mem_end, 4, 4); \
2472 *(__be32 *)room = cpu_to_be32(token); \
2473 } while(0)
2474
2475static unsigned long __init dt_find_string(char *str)
2476{
2477 char *s, *os;
2478
2479 s = os = (char *)dt_string_start;
2480 s += 4;
2481 while (s < (char *)dt_string_end) {
2482 if (prom_strcmp(s, str) == 0)
2483 return s - os;
2484 s += prom_strlen(s) + 1;
2485 }
2486 return 0;
2487}
2488
2489/*
2490 * The Open Firmware 1275 specification states properties must be 31 bytes or
2491 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2492 */
2493#define MAX_PROPERTY_NAME 64
2494
2495static void __init scan_dt_build_strings(phandle node,
2496 unsigned long *mem_start,
2497 unsigned long *mem_end)
2498{
2499 char *prev_name, *namep, *sstart;
2500 unsigned long soff;
2501 phandle child;
2502
2503 sstart = (char *)dt_string_start;
2504
2505 /* get and store all property names */
2506 prev_name = "";
2507 for (;;) {
2508 /* 64 is max len of name including nul. */
2509 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2510 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2511 /* No more nodes: unwind alloc */
2512 *mem_start = (unsigned long)namep;
2513 break;
2514 }
2515
2516 /* skip "name" */
2517 if (prom_strcmp(namep, "name") == 0) {
2518 *mem_start = (unsigned long)namep;
2519 prev_name = "name";
2520 continue;
2521 }
2522 /* get/create string entry */
2523 soff = dt_find_string(namep);
2524 if (soff != 0) {
2525 *mem_start = (unsigned long)namep;
2526 namep = sstart + soff;
2527 } else {
2528 /* Trim off some if we can */
2529 *mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2530 dt_string_end = *mem_start;
2531 }
2532 prev_name = namep;
2533 }
2534
2535 /* do all our children */
2536 child = call_prom("child", 1, 1, node);
2537 while (child != 0) {
2538 scan_dt_build_strings(child, mem_start, mem_end);
2539 child = call_prom("peer", 1, 1, child);
2540 }
2541}
2542
2543static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2544 unsigned long *mem_end)
2545{
2546 phandle child;
2547 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2548 unsigned long soff;
2549 unsigned char *valp;
2550 static char pname[MAX_PROPERTY_NAME] __prombss;
2551 int l, room, has_phandle = 0;
2552
2553 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2554
2555 /* get the node's full name */
2556 namep = (char *)*mem_start;
2557 room = *mem_end - *mem_start;
2558 if (room > 255)
2559 room = 255;
2560 l = call_prom("package-to-path", 3, 1, node, namep, room);
2561 if (l >= 0) {
2562 /* Didn't fit? Get more room. */
2563 if (l >= room) {
2564 if (l >= *mem_end - *mem_start)
2565 namep = make_room(mem_start, mem_end, l+1, 1);
2566 call_prom("package-to-path", 3, 1, node, namep, l);
2567 }
2568 namep[l] = '\0';
2569
2570 /* Fixup an Apple bug where they have bogus \0 chars in the
2571 * middle of the path in some properties, and extract
2572 * the unit name (everything after the last '/').
2573 */
2574 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2575 if (*p == '/')
2576 lp = namep;
2577 else if (*p != 0)
2578 *lp++ = *p;
2579 }
2580 *lp = 0;
2581 *mem_start = ALIGN((unsigned long)lp + 1, 4);
2582 }
2583
2584 /* get it again for debugging */
2585 path = prom_scratch;
2586 memset(path, 0, sizeof(prom_scratch));
2587 call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 1);
2588
2589 /* get and store all properties */
2590 prev_name = "";
2591 sstart = (char *)dt_string_start;
2592 for (;;) {
2593 if (call_prom("nextprop", 3, 1, node, prev_name,
2594 pname) != 1)
2595 break;
2596
2597 /* skip "name" */
2598 if (prom_strcmp(pname, "name") == 0) {
2599 prev_name = "name";
2600 continue;
2601 }
2602
2603 /* find string offset */
2604 soff = dt_find_string(pname);
2605 if (soff == 0) {
2606 prom_printf("WARNING: Can't find string index for"
2607 " <%s>, node %s\n", pname, path);
2608 break;
2609 }
2610 prev_name = sstart + soff;
2611
2612 /* get length */
2613 l = call_prom("getproplen", 2, 1, node, pname);
2614
2615 /* sanity checks */
2616 if (l == PROM_ERROR)
2617 continue;
2618
2619 /* push property head */
2620 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2621 dt_push_token(l, mem_start, mem_end);
2622 dt_push_token(soff, mem_start, mem_end);
2623
2624 /* push property content */
2625 valp = make_room(mem_start, mem_end, l, 4);
2626 call_prom("getprop", 4, 1, node, pname, valp, l);
2627 *mem_start = ALIGN(*mem_start, 4);
2628
2629 if (!prom_strcmp(pname, "phandle"))
2630 has_phandle = 1;
2631 }
2632
2633 /* Add a "phandle" property if none already exist */
2634 if (!has_phandle) {
2635 soff = dt_find_string("phandle");
2636 if (soff == 0)
2637 prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
2638 else {
2639 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2640 dt_push_token(4, mem_start, mem_end);
2641 dt_push_token(soff, mem_start, mem_end);
2642 valp = make_room(mem_start, mem_end, 4, 4);
2643 *(__be32 *)valp = cpu_to_be32(node);
2644 }
2645 }
2646
2647 /* do all our children */
2648 child = call_prom("child", 1, 1, node);
2649 while (child != 0) {
2650 scan_dt_build_struct(child, mem_start, mem_end);
2651 child = call_prom("peer", 1, 1, child);
2652 }
2653
2654 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2655}
2656
2657static void __init flatten_device_tree(void)
2658{
2659 phandle root;
2660 unsigned long mem_start, mem_end, room;
2661 struct boot_param_header *hdr;
2662 char *namep;
2663 u64 *rsvmap;
2664
2665 /*
2666 * Check how much room we have between alloc top & bottom (+/- a
2667 * few pages), crop to 1MB, as this is our "chunk" size
2668 */
2669 room = alloc_top - alloc_bottom - 0x4000;
2670 if (room > DEVTREE_CHUNK_SIZE)
2671 room = DEVTREE_CHUNK_SIZE;
2672 prom_debug("starting device tree allocs at %lx\n", alloc_bottom);
2673
2674 /* Now try to claim that */
2675 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2676 if (mem_start == 0)
2677 prom_panic("Can't allocate initial device-tree chunk\n");
2678 mem_end = mem_start + room;
2679
2680 /* Get root of tree */
2681 root = call_prom("peer", 1, 1, (phandle)0);
2682 if (root == (phandle)0)
2683 prom_panic ("couldn't get device tree root\n");
2684
2685 /* Build header and make room for mem rsv map */
2686 mem_start = ALIGN(mem_start, 4);
2687 hdr = make_room(&mem_start, &mem_end,
2688 sizeof(struct boot_param_header), 4);
2689 dt_header_start = (unsigned long)hdr;
2690 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2691
2692 /* Start of strings */
2693 mem_start = PAGE_ALIGN(mem_start);
2694 dt_string_start = mem_start;
2695 mem_start += 4; /* hole */
2696
2697 /* Add "phandle" in there, we'll need it */
2698 namep = make_room(&mem_start, &mem_end, 16, 1);
2699 prom_strcpy(namep, "phandle");
2700 mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
2701
2702 /* Build string array */
2703 prom_printf("Building dt strings...\n");
2704 scan_dt_build_strings(root, &mem_start, &mem_end);
2705 dt_string_end = mem_start;
2706
2707 /* Build structure */
2708 mem_start = PAGE_ALIGN(mem_start);
2709 dt_struct_start = mem_start;
2710 prom_printf("Building dt structure...\n");
2711 scan_dt_build_struct(root, &mem_start, &mem_end);
2712 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2713 dt_struct_end = PAGE_ALIGN(mem_start);
2714
2715 /* Finish header */
2716 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2717 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2718 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2719 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2720 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2721 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2722 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2723 hdr->version = cpu_to_be32(OF_DT_VERSION);
2724 /* Version 16 is not backward compatible */
2725 hdr->last_comp_version = cpu_to_be32(0x10);
2726
2727 /* Copy the reserve map in */
2728 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2729
2730#ifdef DEBUG_PROM
2731 {
2732 int i;
2733 prom_printf("reserved memory map:\n");
2734 for (i = 0; i < mem_reserve_cnt; i++)
2735 prom_printf(" %llx - %llx\n",
2736 be64_to_cpu(mem_reserve_map[i].base),
2737 be64_to_cpu(mem_reserve_map[i].size));
2738 }
2739#endif
2740 /* Bump mem_reserve_cnt to cause further reservations to fail
2741 * since it's too late.
2742 */
2743 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2744
2745 prom_printf("Device tree strings 0x%lx -> 0x%lx\n",
2746 dt_string_start, dt_string_end);
2747 prom_printf("Device tree struct 0x%lx -> 0x%lx\n",
2748 dt_struct_start, dt_struct_end);
2749}
2750
2751#ifdef CONFIG_PPC_MAPLE
2752/* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2753 * The values are bad, and it doesn't even have the right number of cells. */
2754static void __init fixup_device_tree_maple(void)
2755{
2756 phandle isa;
2757 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2758 u32 isa_ranges[6];
2759 char *name;
2760
2761 name = "/ht@0/isa@4";
2762 isa = call_prom("finddevice", 1, 1, ADDR(name));
2763 if (!PHANDLE_VALID(isa)) {
2764 name = "/ht@0/isa@6";
2765 isa = call_prom("finddevice", 1, 1, ADDR(name));
2766 rloc = 0x01003000; /* IO space; PCI device = 6 */
2767 }
2768 if (!PHANDLE_VALID(isa))
2769 return;
2770
2771 if (prom_getproplen(isa, "ranges") != 12)
2772 return;
2773 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2774 == PROM_ERROR)
2775 return;
2776
2777 if (isa_ranges[0] != 0x1 ||
2778 isa_ranges[1] != 0xf4000000 ||
2779 isa_ranges[2] != 0x00010000)
2780 return;
2781
2782 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2783
2784 isa_ranges[0] = 0x1;
2785 isa_ranges[1] = 0x0;
2786 isa_ranges[2] = rloc;
2787 isa_ranges[3] = 0x0;
2788 isa_ranges[4] = 0x0;
2789 isa_ranges[5] = 0x00010000;
2790 prom_setprop(isa, name, "ranges",
2791 isa_ranges, sizeof(isa_ranges));
2792}
2793
2794#define CPC925_MC_START 0xf8000000
2795#define CPC925_MC_LENGTH 0x1000000
2796/* The values for memory-controller don't have right number of cells */
2797static void __init fixup_device_tree_maple_memory_controller(void)
2798{
2799 phandle mc;
2800 u32 mc_reg[4];
2801 char *name = "/hostbridge@f8000000";
2802 u32 ac, sc;
2803
2804 mc = call_prom("finddevice", 1, 1, ADDR(name));
2805 if (!PHANDLE_VALID(mc))
2806 return;
2807
2808 if (prom_getproplen(mc, "reg") != 8)
2809 return;
2810
2811 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2812 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2813 if ((ac != 2) || (sc != 2))
2814 return;
2815
2816 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2817 return;
2818
2819 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2820 return;
2821
2822 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2823
2824 mc_reg[0] = 0x0;
2825 mc_reg[1] = CPC925_MC_START;
2826 mc_reg[2] = 0x0;
2827 mc_reg[3] = CPC925_MC_LENGTH;
2828 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2829}
2830#else
2831#define fixup_device_tree_maple()
2832#define fixup_device_tree_maple_memory_controller()
2833#endif
2834
2835#ifdef CONFIG_PPC_CHRP
2836/*
2837 * Pegasos and BriQ lacks the "ranges" property in the isa node
2838 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2839 * Pegasos has the IDE configured in legacy mode, but advertised as native
2840 */
2841static void __init fixup_device_tree_chrp(void)
2842{
2843 phandle ph;
2844 u32 prop[6];
2845 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2846 char *name;
2847 int rc;
2848
2849 name = "/pci@80000000/isa@c";
2850 ph = call_prom("finddevice", 1, 1, ADDR(name));
2851 if (!PHANDLE_VALID(ph)) {
2852 name = "/pci@ff500000/isa@6";
2853 ph = call_prom("finddevice", 1, 1, ADDR(name));
2854 rloc = 0x01003000; /* IO space; PCI device = 6 */
2855 }
2856 if (PHANDLE_VALID(ph)) {
2857 rc = prom_getproplen(ph, "ranges");
2858 if (rc == 0 || rc == PROM_ERROR) {
2859 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2860
2861 prop[0] = 0x1;
2862 prop[1] = 0x0;
2863 prop[2] = rloc;
2864 prop[3] = 0x0;
2865 prop[4] = 0x0;
2866 prop[5] = 0x00010000;
2867 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2868 }
2869 }
2870
2871 name = "/pci@80000000/ide@C,1";
2872 ph = call_prom("finddevice", 1, 1, ADDR(name));
2873 if (PHANDLE_VALID(ph)) {
2874 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2875 prop[0] = 14;
2876 prop[1] = 0x0;
2877 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2878 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2879 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2880 if (rc == sizeof(u32)) {
2881 prop[0] &= ~0x5;
2882 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2883 }
2884 }
2885}
2886#else
2887#define fixup_device_tree_chrp()
2888#endif
2889
2890#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2891static void __init fixup_device_tree_pmac(void)
2892{
2893 phandle u3, i2c, mpic;
2894 u32 u3_rev;
2895 u32 interrupts[2];
2896 u32 parent;
2897
2898 /* Some G5s have a missing interrupt definition, fix it up here */
2899 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2900 if (!PHANDLE_VALID(u3))
2901 return;
2902 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2903 if (!PHANDLE_VALID(i2c))
2904 return;
2905 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2906 if (!PHANDLE_VALID(mpic))
2907 return;
2908
2909 /* check if proper rev of u3 */
2910 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2911 == PROM_ERROR)
2912 return;
2913 if (u3_rev < 0x35 || u3_rev > 0x39)
2914 return;
2915 /* does it need fixup ? */
2916 if (prom_getproplen(i2c, "interrupts") > 0)
2917 return;
2918
2919 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2920
2921 /* interrupt on this revision of u3 is number 0 and level */
2922 interrupts[0] = 0;
2923 interrupts[1] = 1;
2924 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2925 &interrupts, sizeof(interrupts));
2926 parent = (u32)mpic;
2927 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2928 &parent, sizeof(parent));
2929}
2930#else
2931#define fixup_device_tree_pmac()
2932#endif
2933
2934#ifdef CONFIG_PPC_EFIKA
2935/*
2936 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2937 * to talk to the phy. If the phy-handle property is missing, then this
2938 * function is called to add the appropriate nodes and link it to the
2939 * ethernet node.
2940 */
2941static void __init fixup_device_tree_efika_add_phy(void)
2942{
2943 u32 node;
2944 char prop[64];
2945 int rv;
2946
2947 /* Check if /builtin/ethernet exists - bail if it doesn't */
2948 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2949 if (!PHANDLE_VALID(node))
2950 return;
2951
2952 /* Check if the phy-handle property exists - bail if it does */
2953 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2954 if (!rv)
2955 return;
2956
2957 /*
2958 * At this point the ethernet device doesn't have a phy described.
2959 * Now we need to add the missing phy node and linkage
2960 */
2961
2962 /* Check for an MDIO bus node - if missing then create one */
2963 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2964 if (!PHANDLE_VALID(node)) {
2965 prom_printf("Adding Ethernet MDIO node\n");
2966 call_prom("interpret", 1, 1,
2967 " s\" /builtin\" find-device"
2968 " new-device"
2969 " 1 encode-int s\" #address-cells\" property"
2970 " 0 encode-int s\" #size-cells\" property"
2971 " s\" mdio\" device-name"
2972 " s\" fsl,mpc5200b-mdio\" encode-string"
2973 " s\" compatible\" property"
2974 " 0xf0003000 0x400 reg"
2975 " 0x2 encode-int"
2976 " 0x5 encode-int encode+"
2977 " 0x3 encode-int encode+"
2978 " s\" interrupts\" property"
2979 " finish-device");
2980 };
2981
2982 /* Check for a PHY device node - if missing then create one and
2983 * give it's phandle to the ethernet node */
2984 node = call_prom("finddevice", 1, 1,
2985 ADDR("/builtin/mdio/ethernet-phy"));
2986 if (!PHANDLE_VALID(node)) {
2987 prom_printf("Adding Ethernet PHY node\n");
2988 call_prom("interpret", 1, 1,
2989 " s\" /builtin/mdio\" find-device"
2990 " new-device"
2991 " s\" ethernet-phy\" device-name"
2992 " 0x10 encode-int s\" reg\" property"
2993 " my-self"
2994 " ihandle>phandle"
2995 " finish-device"
2996 " s\" /builtin/ethernet\" find-device"
2997 " encode-int"
2998 " s\" phy-handle\" property"
2999 " device-end");
3000 }
3001}
3002
3003static void __init fixup_device_tree_efika(void)
3004{
3005 int sound_irq[3] = { 2, 2, 0 };
3006 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
3007 3,4,0, 3,5,0, 3,6,0, 3,7,0,
3008 3,8,0, 3,9,0, 3,10,0, 3,11,0,
3009 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
3010 u32 node;
3011 char prop[64];
3012 int rv, len;
3013
3014 /* Check if we're really running on a EFIKA */
3015 node = call_prom("finddevice", 1, 1, ADDR("/"));
3016 if (!PHANDLE_VALID(node))
3017 return;
3018
3019 rv = prom_getprop(node, "model", prop, sizeof(prop));
3020 if (rv == PROM_ERROR)
3021 return;
3022 if (prom_strcmp(prop, "EFIKA5K2"))
3023 return;
3024
3025 prom_printf("Applying EFIKA device tree fixups\n");
3026
3027 /* Claiming to be 'chrp' is death */
3028 node = call_prom("finddevice", 1, 1, ADDR("/"));
3029 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
3030 if (rv != PROM_ERROR && (prom_strcmp(prop, "chrp") == 0))
3031 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
3032
3033 /* CODEGEN,description is exposed in /proc/cpuinfo so
3034 fix that too */
3035 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
3036 if (rv != PROM_ERROR && (prom_strstr(prop, "CHRP")))
3037 prom_setprop(node, "/", "CODEGEN,description",
3038 "Efika 5200B PowerPC System",
3039 sizeof("Efika 5200B PowerPC System"));
3040
3041 /* Fixup bestcomm interrupts property */
3042 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
3043 if (PHANDLE_VALID(node)) {
3044 len = prom_getproplen(node, "interrupts");
3045 if (len == 12) {
3046 prom_printf("Fixing bestcomm interrupts property\n");
3047 prom_setprop(node, "/builtin/bestcom", "interrupts",
3048 bcomm_irq, sizeof(bcomm_irq));
3049 }
3050 }
3051
3052 /* Fixup sound interrupts property */
3053 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
3054 if (PHANDLE_VALID(node)) {
3055 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
3056 if (rv == PROM_ERROR) {
3057 prom_printf("Adding sound interrupts property\n");
3058 prom_setprop(node, "/builtin/sound", "interrupts",
3059 sound_irq, sizeof(sound_irq));
3060 }
3061 }
3062
3063 /* Make sure ethernet phy-handle property exists */
3064 fixup_device_tree_efika_add_phy();
3065}
3066#else
3067#define fixup_device_tree_efika()
3068#endif
3069
3070#ifdef CONFIG_PPC_PASEMI_NEMO
3071/*
3072 * CFE supplied on Nemo is broken in several ways, biggest
3073 * problem is that it reassigns ISA interrupts to unused mpic ints.
3074 * Add an interrupt-controller property for the io-bridge to use
3075 * and correct the ints so we can attach them to an irq_domain
3076 */
3077static void __init fixup_device_tree_pasemi(void)
3078{
3079 u32 interrupts[2], parent, rval, val = 0;
3080 char *name, *pci_name;
3081 phandle iob, node;
3082
3083 /* Find the root pci node */
3084 name = "/pxp@0,e0000000";
3085 iob = call_prom("finddevice", 1, 1, ADDR(name));
3086 if (!PHANDLE_VALID(iob))
3087 return;
3088
3089 /* check if interrupt-controller node set yet */
3090 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
3091 return;
3092
3093 prom_printf("adding interrupt-controller property for SB600...\n");
3094
3095 prom_setprop(iob, name, "interrupt-controller", &val, 0);
3096
3097 pci_name = "/pxp@0,e0000000/pci@11";
3098 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
3099 parent = ADDR(iob);
3100
3101 for( ; prom_next_node(&node); ) {
3102 /* scan each node for one with an interrupt */
3103 if (!PHANDLE_VALID(node))
3104 continue;
3105
3106 rval = prom_getproplen(node, "interrupts");
3107 if (rval == 0 || rval == PROM_ERROR)
3108 continue;
3109
3110 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
3111 if ((interrupts[0] < 212) || (interrupts[0] > 222))
3112 continue;
3113
3114 /* found a node, update both interrupts and interrupt-parent */
3115 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
3116 interrupts[0] -= 203;
3117 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
3118 interrupts[0] -= 213;
3119 if (interrupts[0] == 221)
3120 interrupts[0] = 14;
3121 if (interrupts[0] == 222)
3122 interrupts[0] = 8;
3123
3124 prom_setprop(node, pci_name, "interrupts", interrupts,
3125 sizeof(interrupts));
3126 prom_setprop(node, pci_name, "interrupt-parent", &parent,
3127 sizeof(parent));
3128 }
3129
3130 /*
3131 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
3132 * so that generic isa-bridge code can add the SB600 and its on-board
3133 * peripherals.
3134 */
3135 name = "/pxp@0,e0000000/io-bridge@0";
3136 iob = call_prom("finddevice", 1, 1, ADDR(name));
3137 if (!PHANDLE_VALID(iob))
3138 return;
3139
3140 /* device_type is already set, just change it. */
3141
3142 prom_printf("Changing device_type of SB600 node...\n");
3143
3144 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
3145}
3146#else /* !CONFIG_PPC_PASEMI_NEMO */
3147static inline void fixup_device_tree_pasemi(void) { }
3148#endif
3149
3150static void __init fixup_device_tree(void)
3151{
3152 fixup_device_tree_maple();
3153 fixup_device_tree_maple_memory_controller();
3154 fixup_device_tree_chrp();
3155 fixup_device_tree_pmac();
3156 fixup_device_tree_efika();
3157 fixup_device_tree_pasemi();
3158}
3159
3160static void __init prom_find_boot_cpu(void)
3161{
3162 __be32 rval;
3163 ihandle prom_cpu;
3164 phandle cpu_pkg;
3165
3166 rval = 0;
3167 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
3168 return;
3169 prom_cpu = be32_to_cpu(rval);
3170
3171 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
3172
3173 if (!PHANDLE_VALID(cpu_pkg))
3174 return;
3175
3176 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3177 prom.cpu = be32_to_cpu(rval);
3178
3179 prom_debug("Booting CPU hw index = %d\n", prom.cpu);
3180}
3181
3182static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3183{
3184#ifdef CONFIG_BLK_DEV_INITRD
3185 if (r3 && r4 && r4 != 0xdeadbeef) {
3186 __be64 val;
3187
3188 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3189 prom_initrd_end = prom_initrd_start + r4;
3190
3191 val = cpu_to_be64(prom_initrd_start);
3192 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3193 &val, sizeof(val));
3194 val = cpu_to_be64(prom_initrd_end);
3195 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3196 &val, sizeof(val));
3197
3198 reserve_mem(prom_initrd_start,
3199 prom_initrd_end - prom_initrd_start);
3200
3201 prom_debug("initrd_start=0x%lx\n", prom_initrd_start);
3202 prom_debug("initrd_end=0x%lx\n", prom_initrd_end);
3203 }
3204#endif /* CONFIG_BLK_DEV_INITRD */
3205}
3206
3207#ifdef CONFIG_PPC64
3208#ifdef CONFIG_RELOCATABLE
3209static void reloc_toc(void)
3210{
3211}
3212
3213static void unreloc_toc(void)
3214{
3215}
3216#else
3217static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
3218{
3219 unsigned long i;
3220 unsigned long *toc_entry;
3221
3222 /* Get the start of the TOC by using r2 directly. */
3223 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
3224
3225 for (i = 0; i < nr_entries; i++) {
3226 *toc_entry = *toc_entry + offset;
3227 toc_entry++;
3228 }
3229}
3230
3231static void reloc_toc(void)
3232{
3233 unsigned long offset = reloc_offset();
3234 unsigned long nr_entries =
3235 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3236
3237 __reloc_toc(offset, nr_entries);
3238
3239 mb();
3240}
3241
3242static void unreloc_toc(void)
3243{
3244 unsigned long offset = reloc_offset();
3245 unsigned long nr_entries =
3246 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3247
3248 mb();
3249
3250 __reloc_toc(-offset, nr_entries);
3251}
3252#endif
3253#endif
3254
3255#ifdef CONFIG_PPC_SVM
3256/*
3257 * Perform the Enter Secure Mode ultracall.
3258 */
3259static int enter_secure_mode(unsigned long kbase, unsigned long fdt)
3260{
3261 register unsigned long r3 asm("r3") = UV_ESM;
3262 register unsigned long r4 asm("r4") = kbase;
3263 register unsigned long r5 asm("r5") = fdt;
3264
3265 asm volatile("sc 2" : "+r"(r3) : "r"(r4), "r"(r5));
3266
3267 return r3;
3268}
3269
3270/*
3271 * Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
3272 */
3273static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3274{
3275 int ret;
3276
3277 if (!prom_svm_enable)
3278 return;
3279
3280 /* Switch to secure mode. */
3281 prom_printf("Switching to secure mode.\n");
3282
3283 /*
3284 * The ultravisor will do an integrity check of the kernel image but we
3285 * relocated it so the check will fail. Restore the original image by
3286 * relocating it back to the kernel virtual base address.
3287 */
3288 if (IS_ENABLED(CONFIG_RELOCATABLE))
3289 relocate(KERNELBASE);
3290
3291 ret = enter_secure_mode(kbase, fdt);
3292
3293 /* Relocate the kernel again. */
3294 if (IS_ENABLED(CONFIG_RELOCATABLE))
3295 relocate(kbase);
3296
3297 if (ret != U_SUCCESS) {
3298 prom_printf("Returned %d from switching to secure mode.\n", ret);
3299 prom_rtas_os_term("Switch to secure mode failed.\n");
3300 }
3301}
3302#else
3303static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
3304{
3305}
3306#endif /* CONFIG_PPC_SVM */
3307
3308/*
3309 * We enter here early on, when the Open Firmware prom is still
3310 * handling exceptions and the MMU hash table for us.
3311 */
3312
3313unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3314 unsigned long pp,
3315 unsigned long r6, unsigned long r7,
3316 unsigned long kbase)
3317{
3318 unsigned long hdr;
3319
3320#ifdef CONFIG_PPC32
3321 unsigned long offset = reloc_offset();
3322 reloc_got2(offset);
3323#else
3324 reloc_toc();
3325#endif
3326
3327 /*
3328 * First zero the BSS
3329 */
3330 memset(&__bss_start, 0, __bss_stop - __bss_start);
3331
3332 /*
3333 * Init interface to Open Firmware, get some node references,
3334 * like /chosen
3335 */
3336 prom_init_client_services(pp);
3337
3338 /*
3339 * See if this OF is old enough that we need to do explicit maps
3340 * and other workarounds
3341 */
3342 prom_find_mmu();
3343
3344 /*
3345 * Init prom stdout device
3346 */
3347 prom_init_stdout();
3348
3349 prom_printf("Preparing to boot %s", linux_banner);
3350
3351 /*
3352 * Get default machine type. At this point, we do not differentiate
3353 * between pSeries SMP and pSeries LPAR
3354 */
3355 of_platform = prom_find_machine_type();
3356 prom_printf("Detected machine type: %x\n", of_platform);
3357
3358#ifndef CONFIG_NONSTATIC_KERNEL
3359 /* Bail if this is a kdump kernel. */
3360 if (PHYSICAL_START > 0)
3361 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3362#endif
3363
3364 /*
3365 * Check for an initrd
3366 */
3367 prom_check_initrd(r3, r4);
3368
3369 /*
3370 * Do early parsing of command line
3371 */
3372 early_cmdline_parse();
3373
3374#ifdef CONFIG_PPC_PSERIES
3375 /*
3376 * On pSeries, inform the firmware about our capabilities
3377 */
3378 if (of_platform == PLATFORM_PSERIES ||
3379 of_platform == PLATFORM_PSERIES_LPAR)
3380 prom_send_capabilities();
3381#endif
3382
3383 /*
3384 * Copy the CPU hold code
3385 */
3386 if (of_platform != PLATFORM_POWERMAC)
3387 copy_and_flush(0, kbase, 0x100, 0);
3388
3389 /*
3390 * Initialize memory management within prom_init
3391 */
3392 prom_init_mem();
3393
3394 /*
3395 * Determine which cpu is actually running right _now_
3396 */
3397 prom_find_boot_cpu();
3398
3399 /*
3400 * Initialize display devices
3401 */
3402 prom_check_displays();
3403
3404#if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3405 /*
3406 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3407 * that uses the allocator, we need to make sure we get the top of memory
3408 * available for us here...
3409 */
3410 if (of_platform == PLATFORM_PSERIES)
3411 prom_initialize_tce_table();
3412#endif
3413
3414 /*
3415 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3416 * have a usable RTAS implementation.
3417 */
3418 if (of_platform != PLATFORM_POWERMAC)
3419 prom_instantiate_rtas();
3420
3421#ifdef CONFIG_PPC64
3422 /* instantiate sml */
3423 prom_instantiate_sml();
3424#endif
3425
3426 /*
3427 * On non-powermacs, put all CPUs in spin-loops.
3428 *
3429 * PowerMacs use a different mechanism to spin CPUs
3430 *
3431 * (This must be done after instanciating RTAS)
3432 */
3433 if (of_platform != PLATFORM_POWERMAC)
3434 prom_hold_cpus();
3435
3436 /*
3437 * Fill in some infos for use by the kernel later on
3438 */
3439 if (prom_memory_limit) {
3440 __be64 val = cpu_to_be64(prom_memory_limit);
3441 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3442 &val, sizeof(val));
3443 }
3444#ifdef CONFIG_PPC64
3445 if (prom_iommu_off)
3446 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3447 NULL, 0);
3448
3449 if (prom_iommu_force_on)
3450 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3451 NULL, 0);
3452
3453 if (prom_tce_alloc_start) {
3454 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3455 &prom_tce_alloc_start,
3456 sizeof(prom_tce_alloc_start));
3457 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3458 &prom_tce_alloc_end,
3459 sizeof(prom_tce_alloc_end));
3460 }
3461#endif
3462
3463 /*
3464 * Fixup any known bugs in the device-tree
3465 */
3466 fixup_device_tree();
3467
3468 /*
3469 * Now finally create the flattened device-tree
3470 */
3471 prom_printf("copying OF device tree...\n");
3472 flatten_device_tree();
3473
3474 /*
3475 * in case stdin is USB and still active on IBM machines...
3476 * Unfortunately quiesce crashes on some powermacs if we have
3477 * closed stdin already (in particular the powerbook 101).
3478 */
3479 if (of_platform != PLATFORM_POWERMAC)
3480 prom_close_stdin();
3481
3482 /*
3483 * Call OF "quiesce" method to shut down pending DMA's from
3484 * devices etc...
3485 */
3486 prom_printf("Quiescing Open Firmware ...\n");
3487 call_prom("quiesce", 0, 0);
3488
3489 /*
3490 * And finally, call the kernel passing it the flattened device
3491 * tree and NULL as r5, thus triggering the new entry point which
3492 * is common to us and kexec
3493 */
3494 hdr = dt_header_start;
3495
3496 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3497 prom_debug("->dt_header_start=0x%lx\n", hdr);
3498
3499#ifdef CONFIG_PPC32
3500 reloc_got2(-offset);
3501#else
3502 unreloc_toc();
3503#endif
3504
3505 /* Move to secure memory if we're supposed to be secure guests. */
3506 setup_secure_guest(kbase, hdr);
3507
3508 __start(hdr, kbase, 0, 0, 0, 0, 0);
3509
3510 return 0;
3511}
1/*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG_PROM
17
18#include <stdarg.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/init.h>
22#include <linux/threads.h>
23#include <linux/spinlock.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/proc_fs.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <asm/prom.h>
32#include <asm/rtas.h>
33#include <asm/page.h>
34#include <asm/processor.h>
35#include <asm/irq.h>
36#include <asm/io.h>
37#include <asm/smp.h>
38#include <asm/mmu.h>
39#include <asm/pgtable.h>
40#include <asm/iommu.h>
41#include <asm/btext.h>
42#include <asm/sections.h>
43#include <asm/machdep.h>
44#include <asm/opal.h>
45#include <asm/asm-prototypes.h>
46
47#include <linux/linux_logo.h>
48
49/*
50 * Eventually bump that one up
51 */
52#define DEVTREE_CHUNK_SIZE 0x100000
53
54/*
55 * This is the size of the local memory reserve map that gets copied
56 * into the boot params passed to the kernel. That size is totally
57 * flexible as the kernel just reads the list until it encounters an
58 * entry with size 0, so it can be changed without breaking binary
59 * compatibility
60 */
61#define MEM_RESERVE_MAP_SIZE 8
62
63/*
64 * prom_init() is called very early on, before the kernel text
65 * and data have been mapped to KERNELBASE. At this point the code
66 * is running at whatever address it has been loaded at.
67 * On ppc32 we compile with -mrelocatable, which means that references
68 * to extern and static variables get relocated automatically.
69 * ppc64 objects are always relocatable, we just need to relocate the
70 * TOC.
71 *
72 * Because OF may have mapped I/O devices into the area starting at
73 * KERNELBASE, particularly on CHRP machines, we can't safely call
74 * OF once the kernel has been mapped to KERNELBASE. Therefore all
75 * OF calls must be done within prom_init().
76 *
77 * ADDR is used in calls to call_prom. The 4th and following
78 * arguments to call_prom should be 32-bit values.
79 * On ppc64, 64 bit values are truncated to 32 bits (and
80 * fortunately don't get interpreted as two arguments).
81 */
82#define ADDR(x) (u32)(unsigned long)(x)
83
84#ifdef CONFIG_PPC64
85#define OF_WORKAROUNDS 0
86#else
87#define OF_WORKAROUNDS of_workarounds
88int of_workarounds;
89#endif
90
91#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
92#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
93
94#define PROM_BUG() do { \
95 prom_printf("kernel BUG at %s line 0x%x!\n", \
96 __FILE__, __LINE__); \
97 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
98} while (0)
99
100#ifdef DEBUG_PROM
101#define prom_debug(x...) prom_printf(x)
102#else
103#define prom_debug(x...)
104#endif
105
106
107typedef u32 prom_arg_t;
108
109struct prom_args {
110 __be32 service;
111 __be32 nargs;
112 __be32 nret;
113 __be32 args[10];
114};
115
116struct prom_t {
117 ihandle root;
118 phandle chosen;
119 int cpu;
120 ihandle stdout;
121 ihandle mmumap;
122 ihandle memory;
123};
124
125struct mem_map_entry {
126 __be64 base;
127 __be64 size;
128};
129
130typedef __be32 cell_t;
131
132extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
133 unsigned long r6, unsigned long r7, unsigned long r8,
134 unsigned long r9);
135
136#ifdef CONFIG_PPC64
137extern int enter_prom(struct prom_args *args, unsigned long entry);
138#else
139static inline int enter_prom(struct prom_args *args, unsigned long entry)
140{
141 return ((int (*)(struct prom_args *))entry)(args);
142}
143#endif
144
145extern void copy_and_flush(unsigned long dest, unsigned long src,
146 unsigned long size, unsigned long offset);
147
148/* prom structure */
149static struct prom_t __initdata prom;
150
151static unsigned long prom_entry __initdata;
152
153#define PROM_SCRATCH_SIZE 256
154
155static char __initdata of_stdout_device[256];
156static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
157
158static unsigned long __initdata dt_header_start;
159static unsigned long __initdata dt_struct_start, dt_struct_end;
160static unsigned long __initdata dt_string_start, dt_string_end;
161
162static unsigned long __initdata prom_initrd_start, prom_initrd_end;
163
164#ifdef CONFIG_PPC64
165static int __initdata prom_iommu_force_on;
166static int __initdata prom_iommu_off;
167static unsigned long __initdata prom_tce_alloc_start;
168static unsigned long __initdata prom_tce_alloc_end;
169#endif
170
171/* Platforms codes are now obsolete in the kernel. Now only used within this
172 * file and ultimately gone too. Feel free to change them if you need, they
173 * are not shared with anything outside of this file anymore
174 */
175#define PLATFORM_PSERIES 0x0100
176#define PLATFORM_PSERIES_LPAR 0x0101
177#define PLATFORM_LPAR 0x0001
178#define PLATFORM_POWERMAC 0x0400
179#define PLATFORM_GENERIC 0x0500
180#define PLATFORM_OPAL 0x0600
181
182static int __initdata of_platform;
183
184static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
185
186static unsigned long __initdata prom_memory_limit;
187
188static unsigned long __initdata alloc_top;
189static unsigned long __initdata alloc_top_high;
190static unsigned long __initdata alloc_bottom;
191static unsigned long __initdata rmo_top;
192static unsigned long __initdata ram_top;
193
194static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
195static int __initdata mem_reserve_cnt;
196
197static cell_t __initdata regbuf[1024];
198
199static bool rtas_has_query_cpu_stopped;
200
201
202/*
203 * Error results ... some OF calls will return "-1" on error, some
204 * will return 0, some will return either. To simplify, here are
205 * macros to use with any ihandle or phandle return value to check if
206 * it is valid
207 */
208
209#define PROM_ERROR (-1u)
210#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
211#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
212
213
214/* This is the one and *ONLY* place where we actually call open
215 * firmware.
216 */
217
218static int __init call_prom(const char *service, int nargs, int nret, ...)
219{
220 int i;
221 struct prom_args args;
222 va_list list;
223
224 args.service = cpu_to_be32(ADDR(service));
225 args.nargs = cpu_to_be32(nargs);
226 args.nret = cpu_to_be32(nret);
227
228 va_start(list, nret);
229 for (i = 0; i < nargs; i++)
230 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
231 va_end(list);
232
233 for (i = 0; i < nret; i++)
234 args.args[nargs+i] = 0;
235
236 if (enter_prom(&args, prom_entry) < 0)
237 return PROM_ERROR;
238
239 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
240}
241
242static int __init call_prom_ret(const char *service, int nargs, int nret,
243 prom_arg_t *rets, ...)
244{
245 int i;
246 struct prom_args args;
247 va_list list;
248
249 args.service = cpu_to_be32(ADDR(service));
250 args.nargs = cpu_to_be32(nargs);
251 args.nret = cpu_to_be32(nret);
252
253 va_start(list, rets);
254 for (i = 0; i < nargs; i++)
255 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
256 va_end(list);
257
258 for (i = 0; i < nret; i++)
259 args.args[nargs+i] = 0;
260
261 if (enter_prom(&args, prom_entry) < 0)
262 return PROM_ERROR;
263
264 if (rets != NULL)
265 for (i = 1; i < nret; ++i)
266 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
267
268 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
269}
270
271
272static void __init prom_print(const char *msg)
273{
274 const char *p, *q;
275
276 if (prom.stdout == 0)
277 return;
278
279 for (p = msg; *p != 0; p = q) {
280 for (q = p; *q != 0 && *q != '\n'; ++q)
281 ;
282 if (q > p)
283 call_prom("write", 3, 1, prom.stdout, p, q - p);
284 if (*q == 0)
285 break;
286 ++q;
287 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
288 }
289}
290
291
292static void __init prom_print_hex(unsigned long val)
293{
294 int i, nibbles = sizeof(val)*2;
295 char buf[sizeof(val)*2+1];
296
297 for (i = nibbles-1; i >= 0; i--) {
298 buf[i] = (val & 0xf) + '0';
299 if (buf[i] > '9')
300 buf[i] += ('a'-'0'-10);
301 val >>= 4;
302 }
303 buf[nibbles] = '\0';
304 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
305}
306
307/* max number of decimal digits in an unsigned long */
308#define UL_DIGITS 21
309static void __init prom_print_dec(unsigned long val)
310{
311 int i, size;
312 char buf[UL_DIGITS+1];
313
314 for (i = UL_DIGITS-1; i >= 0; i--) {
315 buf[i] = (val % 10) + '0';
316 val = val/10;
317 if (val == 0)
318 break;
319 }
320 /* shift stuff down */
321 size = UL_DIGITS - i;
322 call_prom("write", 3, 1, prom.stdout, buf+i, size);
323}
324
325static void __init prom_printf(const char *format, ...)
326{
327 const char *p, *q, *s;
328 va_list args;
329 unsigned long v;
330 long vs;
331
332 va_start(args, format);
333 for (p = format; *p != 0; p = q) {
334 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
335 ;
336 if (q > p)
337 call_prom("write", 3, 1, prom.stdout, p, q - p);
338 if (*q == 0)
339 break;
340 if (*q == '\n') {
341 ++q;
342 call_prom("write", 3, 1, prom.stdout,
343 ADDR("\r\n"), 2);
344 continue;
345 }
346 ++q;
347 if (*q == 0)
348 break;
349 switch (*q) {
350 case 's':
351 ++q;
352 s = va_arg(args, const char *);
353 prom_print(s);
354 break;
355 case 'x':
356 ++q;
357 v = va_arg(args, unsigned long);
358 prom_print_hex(v);
359 break;
360 case 'd':
361 ++q;
362 vs = va_arg(args, int);
363 if (vs < 0) {
364 prom_print("-");
365 vs = -vs;
366 }
367 prom_print_dec(vs);
368 break;
369 case 'l':
370 ++q;
371 if (*q == 0)
372 break;
373 else if (*q == 'x') {
374 ++q;
375 v = va_arg(args, unsigned long);
376 prom_print_hex(v);
377 } else if (*q == 'u') { /* '%lu' */
378 ++q;
379 v = va_arg(args, unsigned long);
380 prom_print_dec(v);
381 } else if (*q == 'd') { /* %ld */
382 ++q;
383 vs = va_arg(args, long);
384 if (vs < 0) {
385 prom_print("-");
386 vs = -vs;
387 }
388 prom_print_dec(vs);
389 }
390 break;
391 }
392 }
393 va_end(args);
394}
395
396
397static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
398 unsigned long align)
399{
400
401 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
402 /*
403 * Old OF requires we claim physical and virtual separately
404 * and then map explicitly (assuming virtual mode)
405 */
406 int ret;
407 prom_arg_t result;
408
409 ret = call_prom_ret("call-method", 5, 2, &result,
410 ADDR("claim"), prom.memory,
411 align, size, virt);
412 if (ret != 0 || result == -1)
413 return -1;
414 ret = call_prom_ret("call-method", 5, 2, &result,
415 ADDR("claim"), prom.mmumap,
416 align, size, virt);
417 if (ret != 0) {
418 call_prom("call-method", 4, 1, ADDR("release"),
419 prom.memory, size, virt);
420 return -1;
421 }
422 /* the 0x12 is M (coherence) + PP == read/write */
423 call_prom("call-method", 6, 1,
424 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
425 return virt;
426 }
427 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
428 (prom_arg_t)align);
429}
430
431static void __init __attribute__((noreturn)) prom_panic(const char *reason)
432{
433 prom_print(reason);
434 /* Do not call exit because it clears the screen on pmac
435 * it also causes some sort of double-fault on early pmacs */
436 if (of_platform == PLATFORM_POWERMAC)
437 asm("trap\n");
438
439 /* ToDo: should put up an SRC here on pSeries */
440 call_prom("exit", 0, 0);
441
442 for (;;) /* should never get here */
443 ;
444}
445
446
447static int __init prom_next_node(phandle *nodep)
448{
449 phandle node;
450
451 if ((node = *nodep) != 0
452 && (*nodep = call_prom("child", 1, 1, node)) != 0)
453 return 1;
454 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
455 return 1;
456 for (;;) {
457 if ((node = call_prom("parent", 1, 1, node)) == 0)
458 return 0;
459 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
460 return 1;
461 }
462}
463
464static inline int prom_getprop(phandle node, const char *pname,
465 void *value, size_t valuelen)
466{
467 return call_prom("getprop", 4, 1, node, ADDR(pname),
468 (u32)(unsigned long) value, (u32) valuelen);
469}
470
471static inline int prom_getproplen(phandle node, const char *pname)
472{
473 return call_prom("getproplen", 2, 1, node, ADDR(pname));
474}
475
476static void add_string(char **str, const char *q)
477{
478 char *p = *str;
479
480 while (*q)
481 *p++ = *q++;
482 *p++ = ' ';
483 *str = p;
484}
485
486static char *tohex(unsigned int x)
487{
488 static char digits[] = "0123456789abcdef";
489 static char result[9];
490 int i;
491
492 result[8] = 0;
493 i = 8;
494 do {
495 --i;
496 result[i] = digits[x & 0xf];
497 x >>= 4;
498 } while (x != 0 && i > 0);
499 return &result[i];
500}
501
502static int __init prom_setprop(phandle node, const char *nodename,
503 const char *pname, void *value, size_t valuelen)
504{
505 char cmd[256], *p;
506
507 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
508 return call_prom("setprop", 4, 1, node, ADDR(pname),
509 (u32)(unsigned long) value, (u32) valuelen);
510
511 /* gah... setprop doesn't work on longtrail, have to use interpret */
512 p = cmd;
513 add_string(&p, "dev");
514 add_string(&p, nodename);
515 add_string(&p, tohex((u32)(unsigned long) value));
516 add_string(&p, tohex(valuelen));
517 add_string(&p, tohex(ADDR(pname)));
518 add_string(&p, tohex(strlen(pname)));
519 add_string(&p, "property");
520 *p = 0;
521 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
522}
523
524/* We can't use the standard versions because of relocation headaches. */
525#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
526 || ('a' <= (c) && (c) <= 'f') \
527 || ('A' <= (c) && (c) <= 'F'))
528
529#define isdigit(c) ('0' <= (c) && (c) <= '9')
530#define islower(c) ('a' <= (c) && (c) <= 'z')
531#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
532
533static unsigned long prom_strtoul(const char *cp, const char **endp)
534{
535 unsigned long result = 0, base = 10, value;
536
537 if (*cp == '0') {
538 base = 8;
539 cp++;
540 if (toupper(*cp) == 'X') {
541 cp++;
542 base = 16;
543 }
544 }
545
546 while (isxdigit(*cp) &&
547 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
548 result = result * base + value;
549 cp++;
550 }
551
552 if (endp)
553 *endp = cp;
554
555 return result;
556}
557
558static unsigned long prom_memparse(const char *ptr, const char **retptr)
559{
560 unsigned long ret = prom_strtoul(ptr, retptr);
561 int shift = 0;
562
563 /*
564 * We can't use a switch here because GCC *may* generate a
565 * jump table which won't work, because we're not running at
566 * the address we're linked at.
567 */
568 if ('G' == **retptr || 'g' == **retptr)
569 shift = 30;
570
571 if ('M' == **retptr || 'm' == **retptr)
572 shift = 20;
573
574 if ('K' == **retptr || 'k' == **retptr)
575 shift = 10;
576
577 if (shift) {
578 ret <<= shift;
579 (*retptr)++;
580 }
581
582 return ret;
583}
584
585/*
586 * Early parsing of the command line passed to the kernel, used for
587 * "mem=x" and the options that affect the iommu
588 */
589static void __init early_cmdline_parse(void)
590{
591 const char *opt;
592
593 char *p;
594 int l = 0;
595
596 prom_cmd_line[0] = 0;
597 p = prom_cmd_line;
598 if ((long)prom.chosen > 0)
599 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
600#ifdef CONFIG_CMDLINE
601 if (l <= 0 || p[0] == '\0') /* dbl check */
602 strlcpy(prom_cmd_line,
603 CONFIG_CMDLINE, sizeof(prom_cmd_line));
604#endif /* CONFIG_CMDLINE */
605 prom_printf("command line: %s\n", prom_cmd_line);
606
607#ifdef CONFIG_PPC64
608 opt = strstr(prom_cmd_line, "iommu=");
609 if (opt) {
610 prom_printf("iommu opt is: %s\n", opt);
611 opt += 6;
612 while (*opt && *opt == ' ')
613 opt++;
614 if (!strncmp(opt, "off", 3))
615 prom_iommu_off = 1;
616 else if (!strncmp(opt, "force", 5))
617 prom_iommu_force_on = 1;
618 }
619#endif
620 opt = strstr(prom_cmd_line, "mem=");
621 if (opt) {
622 opt += 4;
623 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
624#ifdef CONFIG_PPC64
625 /* Align to 16 MB == size of ppc64 large page */
626 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
627#endif
628 }
629}
630
631#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
632/*
633 * The architecture vector has an array of PVR mask/value pairs,
634 * followed by # option vectors - 1, followed by the option vectors.
635 *
636 * See prom.h for the definition of the bits specified in the
637 * architecture vector.
638 */
639
640/* Firmware expects the value to be n - 1, where n is the # of vectors */
641#define NUM_VECTORS(n) ((n) - 1)
642
643/*
644 * Firmware expects 1 + n - 2, where n is the length of the option vector in
645 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
646 */
647#define VECTOR_LENGTH(n) (1 + (n) - 2)
648
649struct option_vector1 {
650 u8 byte1;
651 u8 arch_versions;
652} __packed;
653
654struct option_vector2 {
655 u8 byte1;
656 __be16 reserved;
657 __be32 real_base;
658 __be32 real_size;
659 __be32 virt_base;
660 __be32 virt_size;
661 __be32 load_base;
662 __be32 min_rma;
663 __be32 min_load;
664 u8 min_rma_percent;
665 u8 max_pft_size;
666} __packed;
667
668struct option_vector3 {
669 u8 byte1;
670 u8 byte2;
671} __packed;
672
673struct option_vector4 {
674 u8 byte1;
675 u8 min_vp_cap;
676} __packed;
677
678struct option_vector5 {
679 u8 byte1;
680 u8 byte2;
681 u8 byte3;
682 u8 cmo;
683 u8 associativity;
684 u8 bin_opts;
685 u8 micro_checkpoint;
686 u8 reserved0;
687 __be32 max_cpus;
688 __be16 papr_level;
689 __be16 reserved1;
690 u8 platform_facilities;
691 u8 reserved2;
692 __be16 reserved3;
693 u8 subprocessors;
694} __packed;
695
696struct option_vector6 {
697 u8 reserved;
698 u8 secondary_pteg;
699 u8 os_name;
700} __packed;
701
702struct ibm_arch_vec {
703 struct { u32 mask, val; } pvrs[10];
704
705 u8 num_vectors;
706
707 u8 vec1_len;
708 struct option_vector1 vec1;
709
710 u8 vec2_len;
711 struct option_vector2 vec2;
712
713 u8 vec3_len;
714 struct option_vector3 vec3;
715
716 u8 vec4_len;
717 struct option_vector4 vec4;
718
719 u8 vec5_len;
720 struct option_vector5 vec5;
721
722 u8 vec6_len;
723 struct option_vector6 vec6;
724} __packed;
725
726struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
727 .pvrs = {
728 {
729 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
730 .val = cpu_to_be32(0x003a0000),
731 },
732 {
733 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
734 .val = cpu_to_be32(0x003e0000),
735 },
736 {
737 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
738 .val = cpu_to_be32(0x003f0000),
739 },
740 {
741 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
742 .val = cpu_to_be32(0x004b0000),
743 },
744 {
745 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
746 .val = cpu_to_be32(0x004c0000),
747 },
748 {
749 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
750 .val = cpu_to_be32(0x004d0000),
751 },
752 {
753 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
754 .val = cpu_to_be32(0x0f000004),
755 },
756 {
757 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
758 .val = cpu_to_be32(0x0f000003),
759 },
760 {
761 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
762 .val = cpu_to_be32(0x0f000002),
763 },
764 {
765 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
766 .val = cpu_to_be32(0x0f000001),
767 },
768 },
769
770 .num_vectors = NUM_VECTORS(6),
771
772 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
773 .vec1 = {
774 .byte1 = 0,
775 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
776 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
777 },
778
779 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
780 /* option vector 2: Open Firmware options supported */
781 .vec2 = {
782 .byte1 = OV2_REAL_MODE,
783 .reserved = 0,
784 .real_base = cpu_to_be32(0xffffffff),
785 .real_size = cpu_to_be32(0xffffffff),
786 .virt_base = cpu_to_be32(0xffffffff),
787 .virt_size = cpu_to_be32(0xffffffff),
788 .load_base = cpu_to_be32(0xffffffff),
789 .min_rma = cpu_to_be32(256), /* 256MB min RMA */
790 .min_load = cpu_to_be32(0xffffffff), /* full client load */
791 .min_rma_percent = 0, /* min RMA percentage of total RAM */
792 .max_pft_size = 48, /* max log_2(hash table size) */
793 },
794
795 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
796 /* option vector 3: processor options supported */
797 .vec3 = {
798 .byte1 = 0, /* don't ignore, don't halt */
799 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
800 },
801
802 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
803 /* option vector 4: IBM PAPR implementation */
804 .vec4 = {
805 .byte1 = 0, /* don't halt */
806 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
807 },
808
809 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
810 /* option vector 5: PAPR/OF options */
811 .vec5 = {
812 .byte1 = 0, /* don't ignore, don't halt */
813 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
814 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
815#ifdef CONFIG_PCI_MSI
816 /* PCIe/MSI support. Without MSI full PCIe is not supported */
817 OV5_FEAT(OV5_MSI),
818#else
819 0,
820#endif
821 .byte3 = 0,
822 .cmo =
823#ifdef CONFIG_PPC_SMLPAR
824 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
825#else
826 0,
827#endif
828 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
829 .bin_opts = 0,
830 .micro_checkpoint = 0,
831 .reserved0 = 0,
832 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
833 .papr_level = 0,
834 .reserved1 = 0,
835 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
836 .reserved2 = 0,
837 .reserved3 = 0,
838 .subprocessors = 1,
839 },
840
841 /* option vector 6: IBM PAPR hints */
842 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
843 .vec6 = {
844 .reserved = 0,
845 .secondary_pteg = 0,
846 .os_name = OV6_LINUX,
847 },
848};
849
850/* Old method - ELF header with PT_NOTE sections only works on BE */
851#ifdef __BIG_ENDIAN__
852static struct fake_elf {
853 Elf32_Ehdr elfhdr;
854 Elf32_Phdr phdr[2];
855 struct chrpnote {
856 u32 namesz;
857 u32 descsz;
858 u32 type;
859 char name[8]; /* "PowerPC" */
860 struct chrpdesc {
861 u32 real_mode;
862 u32 real_base;
863 u32 real_size;
864 u32 virt_base;
865 u32 virt_size;
866 u32 load_base;
867 } chrpdesc;
868 } chrpnote;
869 struct rpanote {
870 u32 namesz;
871 u32 descsz;
872 u32 type;
873 char name[24]; /* "IBM,RPA-Client-Config" */
874 struct rpadesc {
875 u32 lpar_affinity;
876 u32 min_rmo_size;
877 u32 min_rmo_percent;
878 u32 max_pft_size;
879 u32 splpar;
880 u32 min_load;
881 u32 new_mem_def;
882 u32 ignore_me;
883 } rpadesc;
884 } rpanote;
885} fake_elf = {
886 .elfhdr = {
887 .e_ident = { 0x7f, 'E', 'L', 'F',
888 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
889 .e_type = ET_EXEC, /* yeah right */
890 .e_machine = EM_PPC,
891 .e_version = EV_CURRENT,
892 .e_phoff = offsetof(struct fake_elf, phdr),
893 .e_phentsize = sizeof(Elf32_Phdr),
894 .e_phnum = 2
895 },
896 .phdr = {
897 [0] = {
898 .p_type = PT_NOTE,
899 .p_offset = offsetof(struct fake_elf, chrpnote),
900 .p_filesz = sizeof(struct chrpnote)
901 }, [1] = {
902 .p_type = PT_NOTE,
903 .p_offset = offsetof(struct fake_elf, rpanote),
904 .p_filesz = sizeof(struct rpanote)
905 }
906 },
907 .chrpnote = {
908 .namesz = sizeof("PowerPC"),
909 .descsz = sizeof(struct chrpdesc),
910 .type = 0x1275,
911 .name = "PowerPC",
912 .chrpdesc = {
913 .real_mode = ~0U, /* ~0 means "don't care" */
914 .real_base = ~0U,
915 .real_size = ~0U,
916 .virt_base = ~0U,
917 .virt_size = ~0U,
918 .load_base = ~0U
919 },
920 },
921 .rpanote = {
922 .namesz = sizeof("IBM,RPA-Client-Config"),
923 .descsz = sizeof(struct rpadesc),
924 .type = 0x12759999,
925 .name = "IBM,RPA-Client-Config",
926 .rpadesc = {
927 .lpar_affinity = 0,
928 .min_rmo_size = 64, /* in megabytes */
929 .min_rmo_percent = 0,
930 .max_pft_size = 48, /* 2^48 bytes max PFT size */
931 .splpar = 1,
932 .min_load = ~0U,
933 .new_mem_def = 0
934 }
935 }
936};
937#endif /* __BIG_ENDIAN__ */
938
939static int __init prom_count_smt_threads(void)
940{
941 phandle node;
942 char type[64];
943 unsigned int plen;
944
945 /* Pick up th first CPU node we can find */
946 for (node = 0; prom_next_node(&node); ) {
947 type[0] = 0;
948 prom_getprop(node, "device_type", type, sizeof(type));
949
950 if (strcmp(type, "cpu"))
951 continue;
952 /*
953 * There is an entry for each smt thread, each entry being
954 * 4 bytes long. All cpus should have the same number of
955 * smt threads, so return after finding the first.
956 */
957 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
958 if (plen == PROM_ERROR)
959 break;
960 plen >>= 2;
961 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
962
963 /* Sanity check */
964 if (plen < 1 || plen > 64) {
965 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
966 (unsigned long)plen);
967 return 1;
968 }
969 return plen;
970 }
971 prom_debug("No threads found, assuming 1 per core\n");
972
973 return 1;
974
975}
976
977
978static void __init prom_send_capabilities(void)
979{
980 ihandle root;
981 prom_arg_t ret;
982 u32 cores;
983
984 root = call_prom("open", 1, 1, ADDR("/"));
985 if (root != 0) {
986 /* We need to tell the FW about the number of cores we support.
987 *
988 * To do that, we count the number of threads on the first core
989 * (we assume this is the same for all cores) and use it to
990 * divide NR_CPUS.
991 */
992
993 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
994 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
995 cores, NR_CPUS);
996
997 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
998
999 /* try calling the ibm,client-architecture-support method */
1000 prom_printf("Calling ibm,client-architecture-support...");
1001 if (call_prom_ret("call-method", 3, 2, &ret,
1002 ADDR("ibm,client-architecture-support"),
1003 root,
1004 ADDR(&ibm_architecture_vec)) == 0) {
1005 /* the call exists... */
1006 if (ret)
1007 prom_printf("\nWARNING: ibm,client-architecture"
1008 "-support call FAILED!\n");
1009 call_prom("close", 1, 0, root);
1010 prom_printf(" done\n");
1011 return;
1012 }
1013 call_prom("close", 1, 0, root);
1014 prom_printf(" not implemented\n");
1015 }
1016
1017#ifdef __BIG_ENDIAN__
1018 {
1019 ihandle elfloader;
1020
1021 /* no ibm,client-architecture-support call, try the old way */
1022 elfloader = call_prom("open", 1, 1,
1023 ADDR("/packages/elf-loader"));
1024 if (elfloader == 0) {
1025 prom_printf("couldn't open /packages/elf-loader\n");
1026 return;
1027 }
1028 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1029 elfloader, ADDR(&fake_elf));
1030 call_prom("close", 1, 0, elfloader);
1031 }
1032#endif /* __BIG_ENDIAN__ */
1033}
1034#endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1035
1036/*
1037 * Memory allocation strategy... our layout is normally:
1038 *
1039 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1040 * rare cases, initrd might end up being before the kernel though.
1041 * We assume this won't override the final kernel at 0, we have no
1042 * provision to handle that in this version, but it should hopefully
1043 * never happen.
1044 *
1045 * alloc_top is set to the top of RMO, eventually shrink down if the
1046 * TCEs overlap
1047 *
1048 * alloc_bottom is set to the top of kernel/initrd
1049 *
1050 * from there, allocations are done this way : rtas is allocated
1051 * topmost, and the device-tree is allocated from the bottom. We try
1052 * to grow the device-tree allocation as we progress. If we can't,
1053 * then we fail, we don't currently have a facility to restart
1054 * elsewhere, but that shouldn't be necessary.
1055 *
1056 * Note that calls to reserve_mem have to be done explicitly, memory
1057 * allocated with either alloc_up or alloc_down isn't automatically
1058 * reserved.
1059 */
1060
1061
1062/*
1063 * Allocates memory in the RMO upward from the kernel/initrd
1064 *
1065 * When align is 0, this is a special case, it means to allocate in place
1066 * at the current location of alloc_bottom or fail (that is basically
1067 * extending the previous allocation). Used for the device-tree flattening
1068 */
1069static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1070{
1071 unsigned long base = alloc_bottom;
1072 unsigned long addr = 0;
1073
1074 if (align)
1075 base = _ALIGN_UP(base, align);
1076 prom_debug("alloc_up(%x, %x)\n", size, align);
1077 if (ram_top == 0)
1078 prom_panic("alloc_up() called with mem not initialized\n");
1079
1080 if (align)
1081 base = _ALIGN_UP(alloc_bottom, align);
1082 else
1083 base = alloc_bottom;
1084
1085 for(; (base + size) <= alloc_top;
1086 base = _ALIGN_UP(base + 0x100000, align)) {
1087 prom_debug(" trying: 0x%x\n\r", base);
1088 addr = (unsigned long)prom_claim(base, size, 0);
1089 if (addr != PROM_ERROR && addr != 0)
1090 break;
1091 addr = 0;
1092 if (align == 0)
1093 break;
1094 }
1095 if (addr == 0)
1096 return 0;
1097 alloc_bottom = addr + size;
1098
1099 prom_debug(" -> %x\n", addr);
1100 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1101 prom_debug(" alloc_top : %x\n", alloc_top);
1102 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1103 prom_debug(" rmo_top : %x\n", rmo_top);
1104 prom_debug(" ram_top : %x\n", ram_top);
1105
1106 return addr;
1107}
1108
1109/*
1110 * Allocates memory downward, either from top of RMO, or if highmem
1111 * is set, from the top of RAM. Note that this one doesn't handle
1112 * failures. It does claim memory if highmem is not set.
1113 */
1114static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1115 int highmem)
1116{
1117 unsigned long base, addr = 0;
1118
1119 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
1120 highmem ? "(high)" : "(low)");
1121 if (ram_top == 0)
1122 prom_panic("alloc_down() called with mem not initialized\n");
1123
1124 if (highmem) {
1125 /* Carve out storage for the TCE table. */
1126 addr = _ALIGN_DOWN(alloc_top_high - size, align);
1127 if (addr <= alloc_bottom)
1128 return 0;
1129 /* Will we bump into the RMO ? If yes, check out that we
1130 * didn't overlap existing allocations there, if we did,
1131 * we are dead, we must be the first in town !
1132 */
1133 if (addr < rmo_top) {
1134 /* Good, we are first */
1135 if (alloc_top == rmo_top)
1136 alloc_top = rmo_top = addr;
1137 else
1138 return 0;
1139 }
1140 alloc_top_high = addr;
1141 goto bail;
1142 }
1143
1144 base = _ALIGN_DOWN(alloc_top - size, align);
1145 for (; base > alloc_bottom;
1146 base = _ALIGN_DOWN(base - 0x100000, align)) {
1147 prom_debug(" trying: 0x%x\n\r", base);
1148 addr = (unsigned long)prom_claim(base, size, 0);
1149 if (addr != PROM_ERROR && addr != 0)
1150 break;
1151 addr = 0;
1152 }
1153 if (addr == 0)
1154 return 0;
1155 alloc_top = addr;
1156
1157 bail:
1158 prom_debug(" -> %x\n", addr);
1159 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1160 prom_debug(" alloc_top : %x\n", alloc_top);
1161 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1162 prom_debug(" rmo_top : %x\n", rmo_top);
1163 prom_debug(" ram_top : %x\n", ram_top);
1164
1165 return addr;
1166}
1167
1168/*
1169 * Parse a "reg" cell
1170 */
1171static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1172{
1173 cell_t *p = *cellp;
1174 unsigned long r = 0;
1175
1176 /* Ignore more than 2 cells */
1177 while (s > sizeof(unsigned long) / 4) {
1178 p++;
1179 s--;
1180 }
1181 r = be32_to_cpu(*p++);
1182#ifdef CONFIG_PPC64
1183 if (s > 1) {
1184 r <<= 32;
1185 r |= be32_to_cpu(*(p++));
1186 }
1187#endif
1188 *cellp = p;
1189 return r;
1190}
1191
1192/*
1193 * Very dumb function for adding to the memory reserve list, but
1194 * we don't need anything smarter at this point
1195 *
1196 * XXX Eventually check for collisions. They should NEVER happen.
1197 * If problems seem to show up, it would be a good start to track
1198 * them down.
1199 */
1200static void __init reserve_mem(u64 base, u64 size)
1201{
1202 u64 top = base + size;
1203 unsigned long cnt = mem_reserve_cnt;
1204
1205 if (size == 0)
1206 return;
1207
1208 /* We need to always keep one empty entry so that we
1209 * have our terminator with "size" set to 0 since we are
1210 * dumb and just copy this entire array to the boot params
1211 */
1212 base = _ALIGN_DOWN(base, PAGE_SIZE);
1213 top = _ALIGN_UP(top, PAGE_SIZE);
1214 size = top - base;
1215
1216 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1217 prom_panic("Memory reserve map exhausted !\n");
1218 mem_reserve_map[cnt].base = cpu_to_be64(base);
1219 mem_reserve_map[cnt].size = cpu_to_be64(size);
1220 mem_reserve_cnt = cnt + 1;
1221}
1222
1223/*
1224 * Initialize memory allocation mechanism, parse "memory" nodes and
1225 * obtain that way the top of memory and RMO to setup out local allocator
1226 */
1227static void __init prom_init_mem(void)
1228{
1229 phandle node;
1230 char *path, type[64];
1231 unsigned int plen;
1232 cell_t *p, *endp;
1233 __be32 val;
1234 u32 rac, rsc;
1235
1236 /*
1237 * We iterate the memory nodes to find
1238 * 1) top of RMO (first node)
1239 * 2) top of memory
1240 */
1241 val = cpu_to_be32(2);
1242 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1243 rac = be32_to_cpu(val);
1244 val = cpu_to_be32(1);
1245 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1246 rsc = be32_to_cpu(val);
1247 prom_debug("root_addr_cells: %x\n", rac);
1248 prom_debug("root_size_cells: %x\n", rsc);
1249
1250 prom_debug("scanning memory:\n");
1251 path = prom_scratch;
1252
1253 for (node = 0; prom_next_node(&node); ) {
1254 type[0] = 0;
1255 prom_getprop(node, "device_type", type, sizeof(type));
1256
1257 if (type[0] == 0) {
1258 /*
1259 * CHRP Longtrail machines have no device_type
1260 * on the memory node, so check the name instead...
1261 */
1262 prom_getprop(node, "name", type, sizeof(type));
1263 }
1264 if (strcmp(type, "memory"))
1265 continue;
1266
1267 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1268 if (plen > sizeof(regbuf)) {
1269 prom_printf("memory node too large for buffer !\n");
1270 plen = sizeof(regbuf);
1271 }
1272 p = regbuf;
1273 endp = p + (plen / sizeof(cell_t));
1274
1275#ifdef DEBUG_PROM
1276 memset(path, 0, PROM_SCRATCH_SIZE);
1277 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1278 prom_debug(" node %s :\n", path);
1279#endif /* DEBUG_PROM */
1280
1281 while ((endp - p) >= (rac + rsc)) {
1282 unsigned long base, size;
1283
1284 base = prom_next_cell(rac, &p);
1285 size = prom_next_cell(rsc, &p);
1286
1287 if (size == 0)
1288 continue;
1289 prom_debug(" %x %x\n", base, size);
1290 if (base == 0 && (of_platform & PLATFORM_LPAR))
1291 rmo_top = size;
1292 if ((base + size) > ram_top)
1293 ram_top = base + size;
1294 }
1295 }
1296
1297 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1298
1299 /*
1300 * If prom_memory_limit is set we reduce the upper limits *except* for
1301 * alloc_top_high. This must be the real top of RAM so we can put
1302 * TCE's up there.
1303 */
1304
1305 alloc_top_high = ram_top;
1306
1307 if (prom_memory_limit) {
1308 if (prom_memory_limit <= alloc_bottom) {
1309 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1310 prom_memory_limit);
1311 prom_memory_limit = 0;
1312 } else if (prom_memory_limit >= ram_top) {
1313 prom_printf("Ignoring mem=%x >= ram_top.\n",
1314 prom_memory_limit);
1315 prom_memory_limit = 0;
1316 } else {
1317 ram_top = prom_memory_limit;
1318 rmo_top = min(rmo_top, prom_memory_limit);
1319 }
1320 }
1321
1322 /*
1323 * Setup our top alloc point, that is top of RMO or top of
1324 * segment 0 when running non-LPAR.
1325 * Some RS64 machines have buggy firmware where claims up at
1326 * 1GB fail. Cap at 768MB as a workaround.
1327 * Since 768MB is plenty of room, and we need to cap to something
1328 * reasonable on 32-bit, cap at 768MB on all machines.
1329 */
1330 if (!rmo_top)
1331 rmo_top = ram_top;
1332 rmo_top = min(0x30000000ul, rmo_top);
1333 alloc_top = rmo_top;
1334 alloc_top_high = ram_top;
1335
1336 /*
1337 * Check if we have an initrd after the kernel but still inside
1338 * the RMO. If we do move our bottom point to after it.
1339 */
1340 if (prom_initrd_start &&
1341 prom_initrd_start < rmo_top &&
1342 prom_initrd_end > alloc_bottom)
1343 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1344
1345 prom_printf("memory layout at init:\n");
1346 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1347 prom_printf(" alloc_bottom : %x\n", alloc_bottom);
1348 prom_printf(" alloc_top : %x\n", alloc_top);
1349 prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
1350 prom_printf(" rmo_top : %x\n", rmo_top);
1351 prom_printf(" ram_top : %x\n", ram_top);
1352}
1353
1354static void __init prom_close_stdin(void)
1355{
1356 __be32 val;
1357 ihandle stdin;
1358
1359 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1360 stdin = be32_to_cpu(val);
1361 call_prom("close", 1, 0, stdin);
1362 }
1363}
1364
1365#ifdef CONFIG_PPC_POWERNV
1366
1367#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1368static u64 __initdata prom_opal_base;
1369static u64 __initdata prom_opal_entry;
1370#endif
1371
1372/*
1373 * Allocate room for and instantiate OPAL
1374 */
1375static void __init prom_instantiate_opal(void)
1376{
1377 phandle opal_node;
1378 ihandle opal_inst;
1379 u64 base, entry;
1380 u64 size = 0, align = 0x10000;
1381 __be64 val64;
1382 u32 rets[2];
1383
1384 prom_debug("prom_instantiate_opal: start...\n");
1385
1386 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1387 prom_debug("opal_node: %x\n", opal_node);
1388 if (!PHANDLE_VALID(opal_node))
1389 return;
1390
1391 val64 = 0;
1392 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1393 size = be64_to_cpu(val64);
1394 if (size == 0)
1395 return;
1396 val64 = 0;
1397 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1398 align = be64_to_cpu(val64);
1399
1400 base = alloc_down(size, align, 0);
1401 if (base == 0) {
1402 prom_printf("OPAL allocation failed !\n");
1403 return;
1404 }
1405
1406 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1407 if (!IHANDLE_VALID(opal_inst)) {
1408 prom_printf("opening opal package failed (%x)\n", opal_inst);
1409 return;
1410 }
1411
1412 prom_printf("instantiating opal at 0x%x...", base);
1413
1414 if (call_prom_ret("call-method", 4, 3, rets,
1415 ADDR("load-opal-runtime"),
1416 opal_inst,
1417 base >> 32, base & 0xffffffff) != 0
1418 || (rets[0] == 0 && rets[1] == 0)) {
1419 prom_printf(" failed\n");
1420 return;
1421 }
1422 entry = (((u64)rets[0]) << 32) | rets[1];
1423
1424 prom_printf(" done\n");
1425
1426 reserve_mem(base, size);
1427
1428 prom_debug("opal base = 0x%x\n", base);
1429 prom_debug("opal align = 0x%x\n", align);
1430 prom_debug("opal entry = 0x%x\n", entry);
1431 prom_debug("opal size = 0x%x\n", (long)size);
1432
1433 prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1434 &base, sizeof(base));
1435 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1436 &entry, sizeof(entry));
1437
1438#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1439 prom_opal_base = base;
1440 prom_opal_entry = entry;
1441#endif
1442 prom_debug("prom_instantiate_opal: end...\n");
1443}
1444
1445#endif /* CONFIG_PPC_POWERNV */
1446
1447/*
1448 * Allocate room for and instantiate RTAS
1449 */
1450static void __init prom_instantiate_rtas(void)
1451{
1452 phandle rtas_node;
1453 ihandle rtas_inst;
1454 u32 base, entry = 0;
1455 __be32 val;
1456 u32 size = 0;
1457
1458 prom_debug("prom_instantiate_rtas: start...\n");
1459
1460 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1461 prom_debug("rtas_node: %x\n", rtas_node);
1462 if (!PHANDLE_VALID(rtas_node))
1463 return;
1464
1465 val = 0;
1466 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1467 size = be32_to_cpu(val);
1468 if (size == 0)
1469 return;
1470
1471 base = alloc_down(size, PAGE_SIZE, 0);
1472 if (base == 0)
1473 prom_panic("Could not allocate memory for RTAS\n");
1474
1475 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1476 if (!IHANDLE_VALID(rtas_inst)) {
1477 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1478 return;
1479 }
1480
1481 prom_printf("instantiating rtas at 0x%x...", base);
1482
1483 if (call_prom_ret("call-method", 3, 2, &entry,
1484 ADDR("instantiate-rtas"),
1485 rtas_inst, base) != 0
1486 || entry == 0) {
1487 prom_printf(" failed\n");
1488 return;
1489 }
1490 prom_printf(" done\n");
1491
1492 reserve_mem(base, size);
1493
1494 val = cpu_to_be32(base);
1495 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1496 &val, sizeof(val));
1497 val = cpu_to_be32(entry);
1498 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1499 &val, sizeof(val));
1500
1501 /* Check if it supports "query-cpu-stopped-state" */
1502 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1503 &val, sizeof(val)) != PROM_ERROR)
1504 rtas_has_query_cpu_stopped = true;
1505
1506 prom_debug("rtas base = 0x%x\n", base);
1507 prom_debug("rtas entry = 0x%x\n", entry);
1508 prom_debug("rtas size = 0x%x\n", (long)size);
1509
1510 prom_debug("prom_instantiate_rtas: end...\n");
1511}
1512
1513#ifdef CONFIG_PPC64
1514/*
1515 * Allocate room for and instantiate Stored Measurement Log (SML)
1516 */
1517static void __init prom_instantiate_sml(void)
1518{
1519 phandle ibmvtpm_node;
1520 ihandle ibmvtpm_inst;
1521 u32 entry = 0, size = 0, succ = 0;
1522 u64 base;
1523 __be32 val;
1524
1525 prom_debug("prom_instantiate_sml: start...\n");
1526
1527 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1528 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1529 if (!PHANDLE_VALID(ibmvtpm_node))
1530 return;
1531
1532 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1533 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1534 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1535 return;
1536 }
1537
1538 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1539 &val, sizeof(val)) != PROM_ERROR) {
1540 if (call_prom_ret("call-method", 2, 2, &succ,
1541 ADDR("reformat-sml-to-efi-alignment"),
1542 ibmvtpm_inst) != 0 || succ == 0) {
1543 prom_printf("Reformat SML to EFI alignment failed\n");
1544 return;
1545 }
1546
1547 if (call_prom_ret("call-method", 2, 2, &size,
1548 ADDR("sml-get-allocated-size"),
1549 ibmvtpm_inst) != 0 || size == 0) {
1550 prom_printf("SML get allocated size failed\n");
1551 return;
1552 }
1553 } else {
1554 if (call_prom_ret("call-method", 2, 2, &size,
1555 ADDR("sml-get-handover-size"),
1556 ibmvtpm_inst) != 0 || size == 0) {
1557 prom_printf("SML get handover size failed\n");
1558 return;
1559 }
1560 }
1561
1562 base = alloc_down(size, PAGE_SIZE, 0);
1563 if (base == 0)
1564 prom_panic("Could not allocate memory for sml\n");
1565
1566 prom_printf("instantiating sml at 0x%x...", base);
1567
1568 memset((void *)base, 0, size);
1569
1570 if (call_prom_ret("call-method", 4, 2, &entry,
1571 ADDR("sml-handover"),
1572 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1573 prom_printf("SML handover failed\n");
1574 return;
1575 }
1576 prom_printf(" done\n");
1577
1578 reserve_mem(base, size);
1579
1580 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1581 &base, sizeof(base));
1582 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1583 &size, sizeof(size));
1584
1585 prom_debug("sml base = 0x%x\n", base);
1586 prom_debug("sml size = 0x%x\n", (long)size);
1587
1588 prom_debug("prom_instantiate_sml: end...\n");
1589}
1590
1591/*
1592 * Allocate room for and initialize TCE tables
1593 */
1594#ifdef __BIG_ENDIAN__
1595static void __init prom_initialize_tce_table(void)
1596{
1597 phandle node;
1598 ihandle phb_node;
1599 char compatible[64], type[64], model[64];
1600 char *path = prom_scratch;
1601 u64 base, align;
1602 u32 minalign, minsize;
1603 u64 tce_entry, *tce_entryp;
1604 u64 local_alloc_top, local_alloc_bottom;
1605 u64 i;
1606
1607 if (prom_iommu_off)
1608 return;
1609
1610 prom_debug("starting prom_initialize_tce_table\n");
1611
1612 /* Cache current top of allocs so we reserve a single block */
1613 local_alloc_top = alloc_top_high;
1614 local_alloc_bottom = local_alloc_top;
1615
1616 /* Search all nodes looking for PHBs. */
1617 for (node = 0; prom_next_node(&node); ) {
1618 compatible[0] = 0;
1619 type[0] = 0;
1620 model[0] = 0;
1621 prom_getprop(node, "compatible",
1622 compatible, sizeof(compatible));
1623 prom_getprop(node, "device_type", type, sizeof(type));
1624 prom_getprop(node, "model", model, sizeof(model));
1625
1626 if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1627 continue;
1628
1629 /* Keep the old logic intact to avoid regression. */
1630 if (compatible[0] != 0) {
1631 if ((strstr(compatible, "python") == NULL) &&
1632 (strstr(compatible, "Speedwagon") == NULL) &&
1633 (strstr(compatible, "Winnipeg") == NULL))
1634 continue;
1635 } else if (model[0] != 0) {
1636 if ((strstr(model, "ython") == NULL) &&
1637 (strstr(model, "peedwagon") == NULL) &&
1638 (strstr(model, "innipeg") == NULL))
1639 continue;
1640 }
1641
1642 if (prom_getprop(node, "tce-table-minalign", &minalign,
1643 sizeof(minalign)) == PROM_ERROR)
1644 minalign = 0;
1645 if (prom_getprop(node, "tce-table-minsize", &minsize,
1646 sizeof(minsize)) == PROM_ERROR)
1647 minsize = 4UL << 20;
1648
1649 /*
1650 * Even though we read what OF wants, we just set the table
1651 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1652 * By doing this, we avoid the pitfalls of trying to DMA to
1653 * MMIO space and the DMA alias hole.
1654 *
1655 * On POWER4, firmware sets the TCE region by assuming
1656 * each TCE table is 8MB. Using this memory for anything
1657 * else will impact performance, so we always allocate 8MB.
1658 * Anton
1659 */
1660 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1661 minsize = 8UL << 20;
1662 else
1663 minsize = 4UL << 20;
1664
1665 /* Align to the greater of the align or size */
1666 align = max(minalign, minsize);
1667 base = alloc_down(minsize, align, 1);
1668 if (base == 0)
1669 prom_panic("ERROR, cannot find space for TCE table.\n");
1670 if (base < local_alloc_bottom)
1671 local_alloc_bottom = base;
1672
1673 /* It seems OF doesn't null-terminate the path :-( */
1674 memset(path, 0, PROM_SCRATCH_SIZE);
1675 /* Call OF to setup the TCE hardware */
1676 if (call_prom("package-to-path", 3, 1, node,
1677 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1678 prom_printf("package-to-path failed\n");
1679 }
1680
1681 /* Save away the TCE table attributes for later use. */
1682 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1683 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1684
1685 prom_debug("TCE table: %s\n", path);
1686 prom_debug("\tnode = 0x%x\n", node);
1687 prom_debug("\tbase = 0x%x\n", base);
1688 prom_debug("\tsize = 0x%x\n", minsize);
1689
1690 /* Initialize the table to have a one-to-one mapping
1691 * over the allocated size.
1692 */
1693 tce_entryp = (u64 *)base;
1694 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1695 tce_entry = (i << PAGE_SHIFT);
1696 tce_entry |= 0x3;
1697 *tce_entryp = tce_entry;
1698 }
1699
1700 prom_printf("opening PHB %s", path);
1701 phb_node = call_prom("open", 1, 1, path);
1702 if (phb_node == 0)
1703 prom_printf("... failed\n");
1704 else
1705 prom_printf("... done\n");
1706
1707 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1708 phb_node, -1, minsize,
1709 (u32) base, (u32) (base >> 32));
1710 call_prom("close", 1, 0, phb_node);
1711 }
1712
1713 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1714
1715 /* These are only really needed if there is a memory limit in
1716 * effect, but we don't know so export them always. */
1717 prom_tce_alloc_start = local_alloc_bottom;
1718 prom_tce_alloc_end = local_alloc_top;
1719
1720 /* Flag the first invalid entry */
1721 prom_debug("ending prom_initialize_tce_table\n");
1722}
1723#endif /* __BIG_ENDIAN__ */
1724#endif /* CONFIG_PPC64 */
1725
1726/*
1727 * With CHRP SMP we need to use the OF to start the other processors.
1728 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1729 * so we have to put the processors into a holding pattern controlled
1730 * by the kernel (not OF) before we destroy the OF.
1731 *
1732 * This uses a chunk of low memory, puts some holding pattern
1733 * code there and sends the other processors off to there until
1734 * smp_boot_cpus tells them to do something. The holding pattern
1735 * checks that address until its cpu # is there, when it is that
1736 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1737 * of setting those values.
1738 *
1739 * We also use physical address 0x4 here to tell when a cpu
1740 * is in its holding pattern code.
1741 *
1742 * -- Cort
1743 */
1744/*
1745 * We want to reference the copy of __secondary_hold_* in the
1746 * 0 - 0x100 address range
1747 */
1748#define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1749
1750static void __init prom_hold_cpus(void)
1751{
1752 unsigned long i;
1753 phandle node;
1754 char type[64];
1755 unsigned long *spinloop
1756 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1757 unsigned long *acknowledge
1758 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1759 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1760
1761 /*
1762 * On pseries, if RTAS supports "query-cpu-stopped-state",
1763 * we skip this stage, the CPUs will be started by the
1764 * kernel using RTAS.
1765 */
1766 if ((of_platform == PLATFORM_PSERIES ||
1767 of_platform == PLATFORM_PSERIES_LPAR) &&
1768 rtas_has_query_cpu_stopped) {
1769 prom_printf("prom_hold_cpus: skipped\n");
1770 return;
1771 }
1772
1773 prom_debug("prom_hold_cpus: start...\n");
1774 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1775 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1776 prom_debug(" 1) acknowledge = 0x%x\n",
1777 (unsigned long)acknowledge);
1778 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1779 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1780
1781 /* Set the common spinloop variable, so all of the secondary cpus
1782 * will block when they are awakened from their OF spinloop.
1783 * This must occur for both SMP and non SMP kernels, since OF will
1784 * be trashed when we move the kernel.
1785 */
1786 *spinloop = 0;
1787
1788 /* look for cpus */
1789 for (node = 0; prom_next_node(&node); ) {
1790 unsigned int cpu_no;
1791 __be32 reg;
1792
1793 type[0] = 0;
1794 prom_getprop(node, "device_type", type, sizeof(type));
1795 if (strcmp(type, "cpu") != 0)
1796 continue;
1797
1798 /* Skip non-configured cpus. */
1799 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1800 if (strcmp(type, "okay") != 0)
1801 continue;
1802
1803 reg = cpu_to_be32(-1); /* make sparse happy */
1804 prom_getprop(node, "reg", ®, sizeof(reg));
1805 cpu_no = be32_to_cpu(reg);
1806
1807 prom_debug("cpu hw idx = %lu\n", cpu_no);
1808
1809 /* Init the acknowledge var which will be reset by
1810 * the secondary cpu when it awakens from its OF
1811 * spinloop.
1812 */
1813 *acknowledge = (unsigned long)-1;
1814
1815 if (cpu_no != prom.cpu) {
1816 /* Primary Thread of non-boot cpu or any thread */
1817 prom_printf("starting cpu hw idx %lu... ", cpu_no);
1818 call_prom("start-cpu", 3, 0, node,
1819 secondary_hold, cpu_no);
1820
1821 for (i = 0; (i < 100000000) &&
1822 (*acknowledge == ((unsigned long)-1)); i++ )
1823 mb();
1824
1825 if (*acknowledge == cpu_no)
1826 prom_printf("done\n");
1827 else
1828 prom_printf("failed: %x\n", *acknowledge);
1829 }
1830#ifdef CONFIG_SMP
1831 else
1832 prom_printf("boot cpu hw idx %lu\n", cpu_no);
1833#endif /* CONFIG_SMP */
1834 }
1835
1836 prom_debug("prom_hold_cpus: end...\n");
1837}
1838
1839
1840static void __init prom_init_client_services(unsigned long pp)
1841{
1842 /* Get a handle to the prom entry point before anything else */
1843 prom_entry = pp;
1844
1845 /* get a handle for the stdout device */
1846 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1847 if (!PHANDLE_VALID(prom.chosen))
1848 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1849
1850 /* get device tree root */
1851 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
1852 if (!PHANDLE_VALID(prom.root))
1853 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1854
1855 prom.mmumap = 0;
1856}
1857
1858#ifdef CONFIG_PPC32
1859/*
1860 * For really old powermacs, we need to map things we claim.
1861 * For that, we need the ihandle of the mmu.
1862 * Also, on the longtrail, we need to work around other bugs.
1863 */
1864static void __init prom_find_mmu(void)
1865{
1866 phandle oprom;
1867 char version[64];
1868
1869 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1870 if (!PHANDLE_VALID(oprom))
1871 return;
1872 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1873 return;
1874 version[sizeof(version) - 1] = 0;
1875 /* XXX might need to add other versions here */
1876 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1877 of_workarounds = OF_WA_CLAIM;
1878 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1879 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1880 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1881 } else
1882 return;
1883 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
1884 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
1885 sizeof(prom.mmumap));
1886 prom.mmumap = be32_to_cpu(prom.mmumap);
1887 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
1888 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1889}
1890#else
1891#define prom_find_mmu()
1892#endif
1893
1894static void __init prom_init_stdout(void)
1895{
1896 char *path = of_stdout_device;
1897 char type[16];
1898 phandle stdout_node;
1899 __be32 val;
1900
1901 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
1902 prom_panic("cannot find stdout");
1903
1904 prom.stdout = be32_to_cpu(val);
1905
1906 /* Get the full OF pathname of the stdout device */
1907 memset(path, 0, 256);
1908 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
1909 prom_printf("OF stdout device is: %s\n", of_stdout_device);
1910 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
1911 path, strlen(path) + 1);
1912
1913 /* instance-to-package fails on PA-Semi */
1914 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
1915 if (stdout_node != PROM_ERROR) {
1916 val = cpu_to_be32(stdout_node);
1917 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1918 &val, sizeof(val));
1919
1920 /* If it's a display, note it */
1921 memset(type, 0, sizeof(type));
1922 prom_getprop(stdout_node, "device_type", type, sizeof(type));
1923 if (strcmp(type, "display") == 0)
1924 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
1925 }
1926}
1927
1928static int __init prom_find_machine_type(void)
1929{
1930 char compat[256];
1931 int len, i = 0;
1932#ifdef CONFIG_PPC64
1933 phandle rtas;
1934 int x;
1935#endif
1936
1937 /* Look for a PowerMac or a Cell */
1938 len = prom_getprop(prom.root, "compatible",
1939 compat, sizeof(compat)-1);
1940 if (len > 0) {
1941 compat[len] = 0;
1942 while (i < len) {
1943 char *p = &compat[i];
1944 int sl = strlen(p);
1945 if (sl == 0)
1946 break;
1947 if (strstr(p, "Power Macintosh") ||
1948 strstr(p, "MacRISC"))
1949 return PLATFORM_POWERMAC;
1950#ifdef CONFIG_PPC64
1951 /* We must make sure we don't detect the IBM Cell
1952 * blades as pSeries due to some firmware issues,
1953 * so we do it here.
1954 */
1955 if (strstr(p, "IBM,CBEA") ||
1956 strstr(p, "IBM,CPBW-1.0"))
1957 return PLATFORM_GENERIC;
1958#endif /* CONFIG_PPC64 */
1959 i += sl + 1;
1960 }
1961 }
1962#ifdef CONFIG_PPC64
1963 /* Try to detect OPAL */
1964 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
1965 return PLATFORM_OPAL;
1966
1967 /* Try to figure out if it's an IBM pSeries or any other
1968 * PAPR compliant platform. We assume it is if :
1969 * - /device_type is "chrp" (please, do NOT use that for future
1970 * non-IBM designs !
1971 * - it has /rtas
1972 */
1973 len = prom_getprop(prom.root, "device_type",
1974 compat, sizeof(compat)-1);
1975 if (len <= 0)
1976 return PLATFORM_GENERIC;
1977 if (strcmp(compat, "chrp"))
1978 return PLATFORM_GENERIC;
1979
1980 /* Default to pSeries. We need to know if we are running LPAR */
1981 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1982 if (!PHANDLE_VALID(rtas))
1983 return PLATFORM_GENERIC;
1984 x = prom_getproplen(rtas, "ibm,hypertas-functions");
1985 if (x != PROM_ERROR) {
1986 prom_debug("Hypertas detected, assuming LPAR !\n");
1987 return PLATFORM_PSERIES_LPAR;
1988 }
1989 return PLATFORM_PSERIES;
1990#else
1991 return PLATFORM_GENERIC;
1992#endif
1993}
1994
1995static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
1996{
1997 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
1998}
1999
2000/*
2001 * If we have a display that we don't know how to drive,
2002 * we will want to try to execute OF's open method for it
2003 * later. However, OF will probably fall over if we do that
2004 * we've taken over the MMU.
2005 * So we check whether we will need to open the display,
2006 * and if so, open it now.
2007 */
2008static void __init prom_check_displays(void)
2009{
2010 char type[16], *path;
2011 phandle node;
2012 ihandle ih;
2013 int i;
2014
2015 static unsigned char default_colors[] = {
2016 0x00, 0x00, 0x00,
2017 0x00, 0x00, 0xaa,
2018 0x00, 0xaa, 0x00,
2019 0x00, 0xaa, 0xaa,
2020 0xaa, 0x00, 0x00,
2021 0xaa, 0x00, 0xaa,
2022 0xaa, 0xaa, 0x00,
2023 0xaa, 0xaa, 0xaa,
2024 0x55, 0x55, 0x55,
2025 0x55, 0x55, 0xff,
2026 0x55, 0xff, 0x55,
2027 0x55, 0xff, 0xff,
2028 0xff, 0x55, 0x55,
2029 0xff, 0x55, 0xff,
2030 0xff, 0xff, 0x55,
2031 0xff, 0xff, 0xff
2032 };
2033 const unsigned char *clut;
2034
2035 prom_debug("Looking for displays\n");
2036 for (node = 0; prom_next_node(&node); ) {
2037 memset(type, 0, sizeof(type));
2038 prom_getprop(node, "device_type", type, sizeof(type));
2039 if (strcmp(type, "display") != 0)
2040 continue;
2041
2042 /* It seems OF doesn't null-terminate the path :-( */
2043 path = prom_scratch;
2044 memset(path, 0, PROM_SCRATCH_SIZE);
2045
2046 /*
2047 * leave some room at the end of the path for appending extra
2048 * arguments
2049 */
2050 if (call_prom("package-to-path", 3, 1, node, path,
2051 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2052 continue;
2053 prom_printf("found display : %s, opening... ", path);
2054
2055 ih = call_prom("open", 1, 1, path);
2056 if (ih == 0) {
2057 prom_printf("failed\n");
2058 continue;
2059 }
2060
2061 /* Success */
2062 prom_printf("done\n");
2063 prom_setprop(node, path, "linux,opened", NULL, 0);
2064
2065 /* Setup a usable color table when the appropriate
2066 * method is available. Should update this to set-colors */
2067 clut = default_colors;
2068 for (i = 0; i < 16; i++, clut += 3)
2069 if (prom_set_color(ih, i, clut[0], clut[1],
2070 clut[2]) != 0)
2071 break;
2072
2073#ifdef CONFIG_LOGO_LINUX_CLUT224
2074 clut = PTRRELOC(logo_linux_clut224.clut);
2075 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2076 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2077 clut[2]) != 0)
2078 break;
2079#endif /* CONFIG_LOGO_LINUX_CLUT224 */
2080
2081#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2082 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2083 PROM_ERROR) {
2084 u32 width, height, pitch, addr;
2085
2086 prom_printf("Setting btext !\n");
2087 prom_getprop(node, "width", &width, 4);
2088 prom_getprop(node, "height", &height, 4);
2089 prom_getprop(node, "linebytes", &pitch, 4);
2090 prom_getprop(node, "address", &addr, 4);
2091 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2092 width, height, pitch, addr);
2093 btext_setup_display(width, height, 8, pitch, addr);
2094 }
2095#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2096 }
2097}
2098
2099
2100/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2101static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2102 unsigned long needed, unsigned long align)
2103{
2104 void *ret;
2105
2106 *mem_start = _ALIGN(*mem_start, align);
2107 while ((*mem_start + needed) > *mem_end) {
2108 unsigned long room, chunk;
2109
2110 prom_debug("Chunk exhausted, claiming more at %x...\n",
2111 alloc_bottom);
2112 room = alloc_top - alloc_bottom;
2113 if (room > DEVTREE_CHUNK_SIZE)
2114 room = DEVTREE_CHUNK_SIZE;
2115 if (room < PAGE_SIZE)
2116 prom_panic("No memory for flatten_device_tree "
2117 "(no room)\n");
2118 chunk = alloc_up(room, 0);
2119 if (chunk == 0)
2120 prom_panic("No memory for flatten_device_tree "
2121 "(claim failed)\n");
2122 *mem_end = chunk + room;
2123 }
2124
2125 ret = (void *)*mem_start;
2126 *mem_start += needed;
2127
2128 return ret;
2129}
2130
2131#define dt_push_token(token, mem_start, mem_end) do { \
2132 void *room = make_room(mem_start, mem_end, 4, 4); \
2133 *(__be32 *)room = cpu_to_be32(token); \
2134 } while(0)
2135
2136static unsigned long __init dt_find_string(char *str)
2137{
2138 char *s, *os;
2139
2140 s = os = (char *)dt_string_start;
2141 s += 4;
2142 while (s < (char *)dt_string_end) {
2143 if (strcmp(s, str) == 0)
2144 return s - os;
2145 s += strlen(s) + 1;
2146 }
2147 return 0;
2148}
2149
2150/*
2151 * The Open Firmware 1275 specification states properties must be 31 bytes or
2152 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2153 */
2154#define MAX_PROPERTY_NAME 64
2155
2156static void __init scan_dt_build_strings(phandle node,
2157 unsigned long *mem_start,
2158 unsigned long *mem_end)
2159{
2160 char *prev_name, *namep, *sstart;
2161 unsigned long soff;
2162 phandle child;
2163
2164 sstart = (char *)dt_string_start;
2165
2166 /* get and store all property names */
2167 prev_name = "";
2168 for (;;) {
2169 /* 64 is max len of name including nul. */
2170 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2171 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2172 /* No more nodes: unwind alloc */
2173 *mem_start = (unsigned long)namep;
2174 break;
2175 }
2176
2177 /* skip "name" */
2178 if (strcmp(namep, "name") == 0) {
2179 *mem_start = (unsigned long)namep;
2180 prev_name = "name";
2181 continue;
2182 }
2183 /* get/create string entry */
2184 soff = dt_find_string(namep);
2185 if (soff != 0) {
2186 *mem_start = (unsigned long)namep;
2187 namep = sstart + soff;
2188 } else {
2189 /* Trim off some if we can */
2190 *mem_start = (unsigned long)namep + strlen(namep) + 1;
2191 dt_string_end = *mem_start;
2192 }
2193 prev_name = namep;
2194 }
2195
2196 /* do all our children */
2197 child = call_prom("child", 1, 1, node);
2198 while (child != 0) {
2199 scan_dt_build_strings(child, mem_start, mem_end);
2200 child = call_prom("peer", 1, 1, child);
2201 }
2202}
2203
2204static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2205 unsigned long *mem_end)
2206{
2207 phandle child;
2208 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2209 unsigned long soff;
2210 unsigned char *valp;
2211 static char pname[MAX_PROPERTY_NAME];
2212 int l, room, has_phandle = 0;
2213
2214 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2215
2216 /* get the node's full name */
2217 namep = (char *)*mem_start;
2218 room = *mem_end - *mem_start;
2219 if (room > 255)
2220 room = 255;
2221 l = call_prom("package-to-path", 3, 1, node, namep, room);
2222 if (l >= 0) {
2223 /* Didn't fit? Get more room. */
2224 if (l >= room) {
2225 if (l >= *mem_end - *mem_start)
2226 namep = make_room(mem_start, mem_end, l+1, 1);
2227 call_prom("package-to-path", 3, 1, node, namep, l);
2228 }
2229 namep[l] = '\0';
2230
2231 /* Fixup an Apple bug where they have bogus \0 chars in the
2232 * middle of the path in some properties, and extract
2233 * the unit name (everything after the last '/').
2234 */
2235 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2236 if (*p == '/')
2237 lp = namep;
2238 else if (*p != 0)
2239 *lp++ = *p;
2240 }
2241 *lp = 0;
2242 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
2243 }
2244
2245 /* get it again for debugging */
2246 path = prom_scratch;
2247 memset(path, 0, PROM_SCRATCH_SIZE);
2248 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2249
2250 /* get and store all properties */
2251 prev_name = "";
2252 sstart = (char *)dt_string_start;
2253 for (;;) {
2254 if (call_prom("nextprop", 3, 1, node, prev_name,
2255 pname) != 1)
2256 break;
2257
2258 /* skip "name" */
2259 if (strcmp(pname, "name") == 0) {
2260 prev_name = "name";
2261 continue;
2262 }
2263
2264 /* find string offset */
2265 soff = dt_find_string(pname);
2266 if (soff == 0) {
2267 prom_printf("WARNING: Can't find string index for"
2268 " <%s>, node %s\n", pname, path);
2269 break;
2270 }
2271 prev_name = sstart + soff;
2272
2273 /* get length */
2274 l = call_prom("getproplen", 2, 1, node, pname);
2275
2276 /* sanity checks */
2277 if (l == PROM_ERROR)
2278 continue;
2279
2280 /* push property head */
2281 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2282 dt_push_token(l, mem_start, mem_end);
2283 dt_push_token(soff, mem_start, mem_end);
2284
2285 /* push property content */
2286 valp = make_room(mem_start, mem_end, l, 4);
2287 call_prom("getprop", 4, 1, node, pname, valp, l);
2288 *mem_start = _ALIGN(*mem_start, 4);
2289
2290 if (!strcmp(pname, "phandle"))
2291 has_phandle = 1;
2292 }
2293
2294 /* Add a "linux,phandle" property if no "phandle" property already
2295 * existed (can happen with OPAL)
2296 */
2297 if (!has_phandle) {
2298 soff = dt_find_string("linux,phandle");
2299 if (soff == 0)
2300 prom_printf("WARNING: Can't find string index for"
2301 " <linux-phandle> node %s\n", path);
2302 else {
2303 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2304 dt_push_token(4, mem_start, mem_end);
2305 dt_push_token(soff, mem_start, mem_end);
2306 valp = make_room(mem_start, mem_end, 4, 4);
2307 *(__be32 *)valp = cpu_to_be32(node);
2308 }
2309 }
2310
2311 /* do all our children */
2312 child = call_prom("child", 1, 1, node);
2313 while (child != 0) {
2314 scan_dt_build_struct(child, mem_start, mem_end);
2315 child = call_prom("peer", 1, 1, child);
2316 }
2317
2318 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2319}
2320
2321static void __init flatten_device_tree(void)
2322{
2323 phandle root;
2324 unsigned long mem_start, mem_end, room;
2325 struct boot_param_header *hdr;
2326 char *namep;
2327 u64 *rsvmap;
2328
2329 /*
2330 * Check how much room we have between alloc top & bottom (+/- a
2331 * few pages), crop to 1MB, as this is our "chunk" size
2332 */
2333 room = alloc_top - alloc_bottom - 0x4000;
2334 if (room > DEVTREE_CHUNK_SIZE)
2335 room = DEVTREE_CHUNK_SIZE;
2336 prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2337
2338 /* Now try to claim that */
2339 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2340 if (mem_start == 0)
2341 prom_panic("Can't allocate initial device-tree chunk\n");
2342 mem_end = mem_start + room;
2343
2344 /* Get root of tree */
2345 root = call_prom("peer", 1, 1, (phandle)0);
2346 if (root == (phandle)0)
2347 prom_panic ("couldn't get device tree root\n");
2348
2349 /* Build header and make room for mem rsv map */
2350 mem_start = _ALIGN(mem_start, 4);
2351 hdr = make_room(&mem_start, &mem_end,
2352 sizeof(struct boot_param_header), 4);
2353 dt_header_start = (unsigned long)hdr;
2354 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2355
2356 /* Start of strings */
2357 mem_start = PAGE_ALIGN(mem_start);
2358 dt_string_start = mem_start;
2359 mem_start += 4; /* hole */
2360
2361 /* Add "linux,phandle" in there, we'll need it */
2362 namep = make_room(&mem_start, &mem_end, 16, 1);
2363 strcpy(namep, "linux,phandle");
2364 mem_start = (unsigned long)namep + strlen(namep) + 1;
2365
2366 /* Build string array */
2367 prom_printf("Building dt strings...\n");
2368 scan_dt_build_strings(root, &mem_start, &mem_end);
2369 dt_string_end = mem_start;
2370
2371 /* Build structure */
2372 mem_start = PAGE_ALIGN(mem_start);
2373 dt_struct_start = mem_start;
2374 prom_printf("Building dt structure...\n");
2375 scan_dt_build_struct(root, &mem_start, &mem_end);
2376 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2377 dt_struct_end = PAGE_ALIGN(mem_start);
2378
2379 /* Finish header */
2380 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2381 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2382 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2383 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2384 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2385 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2386 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2387 hdr->version = cpu_to_be32(OF_DT_VERSION);
2388 /* Version 16 is not backward compatible */
2389 hdr->last_comp_version = cpu_to_be32(0x10);
2390
2391 /* Copy the reserve map in */
2392 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2393
2394#ifdef DEBUG_PROM
2395 {
2396 int i;
2397 prom_printf("reserved memory map:\n");
2398 for (i = 0; i < mem_reserve_cnt; i++)
2399 prom_printf(" %x - %x\n",
2400 be64_to_cpu(mem_reserve_map[i].base),
2401 be64_to_cpu(mem_reserve_map[i].size));
2402 }
2403#endif
2404 /* Bump mem_reserve_cnt to cause further reservations to fail
2405 * since it's too late.
2406 */
2407 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2408
2409 prom_printf("Device tree strings 0x%x -> 0x%x\n",
2410 dt_string_start, dt_string_end);
2411 prom_printf("Device tree struct 0x%x -> 0x%x\n",
2412 dt_struct_start, dt_struct_end);
2413}
2414
2415#ifdef CONFIG_PPC_MAPLE
2416/* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2417 * The values are bad, and it doesn't even have the right number of cells. */
2418static void __init fixup_device_tree_maple(void)
2419{
2420 phandle isa;
2421 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2422 u32 isa_ranges[6];
2423 char *name;
2424
2425 name = "/ht@0/isa@4";
2426 isa = call_prom("finddevice", 1, 1, ADDR(name));
2427 if (!PHANDLE_VALID(isa)) {
2428 name = "/ht@0/isa@6";
2429 isa = call_prom("finddevice", 1, 1, ADDR(name));
2430 rloc = 0x01003000; /* IO space; PCI device = 6 */
2431 }
2432 if (!PHANDLE_VALID(isa))
2433 return;
2434
2435 if (prom_getproplen(isa, "ranges") != 12)
2436 return;
2437 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2438 == PROM_ERROR)
2439 return;
2440
2441 if (isa_ranges[0] != 0x1 ||
2442 isa_ranges[1] != 0xf4000000 ||
2443 isa_ranges[2] != 0x00010000)
2444 return;
2445
2446 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2447
2448 isa_ranges[0] = 0x1;
2449 isa_ranges[1] = 0x0;
2450 isa_ranges[2] = rloc;
2451 isa_ranges[3] = 0x0;
2452 isa_ranges[4] = 0x0;
2453 isa_ranges[5] = 0x00010000;
2454 prom_setprop(isa, name, "ranges",
2455 isa_ranges, sizeof(isa_ranges));
2456}
2457
2458#define CPC925_MC_START 0xf8000000
2459#define CPC925_MC_LENGTH 0x1000000
2460/* The values for memory-controller don't have right number of cells */
2461static void __init fixup_device_tree_maple_memory_controller(void)
2462{
2463 phandle mc;
2464 u32 mc_reg[4];
2465 char *name = "/hostbridge@f8000000";
2466 u32 ac, sc;
2467
2468 mc = call_prom("finddevice", 1, 1, ADDR(name));
2469 if (!PHANDLE_VALID(mc))
2470 return;
2471
2472 if (prom_getproplen(mc, "reg") != 8)
2473 return;
2474
2475 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2476 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2477 if ((ac != 2) || (sc != 2))
2478 return;
2479
2480 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2481 return;
2482
2483 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2484 return;
2485
2486 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2487
2488 mc_reg[0] = 0x0;
2489 mc_reg[1] = CPC925_MC_START;
2490 mc_reg[2] = 0x0;
2491 mc_reg[3] = CPC925_MC_LENGTH;
2492 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2493}
2494#else
2495#define fixup_device_tree_maple()
2496#define fixup_device_tree_maple_memory_controller()
2497#endif
2498
2499#ifdef CONFIG_PPC_CHRP
2500/*
2501 * Pegasos and BriQ lacks the "ranges" property in the isa node
2502 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2503 * Pegasos has the IDE configured in legacy mode, but advertised as native
2504 */
2505static void __init fixup_device_tree_chrp(void)
2506{
2507 phandle ph;
2508 u32 prop[6];
2509 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2510 char *name;
2511 int rc;
2512
2513 name = "/pci@80000000/isa@c";
2514 ph = call_prom("finddevice", 1, 1, ADDR(name));
2515 if (!PHANDLE_VALID(ph)) {
2516 name = "/pci@ff500000/isa@6";
2517 ph = call_prom("finddevice", 1, 1, ADDR(name));
2518 rloc = 0x01003000; /* IO space; PCI device = 6 */
2519 }
2520 if (PHANDLE_VALID(ph)) {
2521 rc = prom_getproplen(ph, "ranges");
2522 if (rc == 0 || rc == PROM_ERROR) {
2523 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2524
2525 prop[0] = 0x1;
2526 prop[1] = 0x0;
2527 prop[2] = rloc;
2528 prop[3] = 0x0;
2529 prop[4] = 0x0;
2530 prop[5] = 0x00010000;
2531 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2532 }
2533 }
2534
2535 name = "/pci@80000000/ide@C,1";
2536 ph = call_prom("finddevice", 1, 1, ADDR(name));
2537 if (PHANDLE_VALID(ph)) {
2538 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2539 prop[0] = 14;
2540 prop[1] = 0x0;
2541 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2542 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2543 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2544 if (rc == sizeof(u32)) {
2545 prop[0] &= ~0x5;
2546 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2547 }
2548 }
2549}
2550#else
2551#define fixup_device_tree_chrp()
2552#endif
2553
2554#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2555static void __init fixup_device_tree_pmac(void)
2556{
2557 phandle u3, i2c, mpic;
2558 u32 u3_rev;
2559 u32 interrupts[2];
2560 u32 parent;
2561
2562 /* Some G5s have a missing interrupt definition, fix it up here */
2563 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2564 if (!PHANDLE_VALID(u3))
2565 return;
2566 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2567 if (!PHANDLE_VALID(i2c))
2568 return;
2569 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2570 if (!PHANDLE_VALID(mpic))
2571 return;
2572
2573 /* check if proper rev of u3 */
2574 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2575 == PROM_ERROR)
2576 return;
2577 if (u3_rev < 0x35 || u3_rev > 0x39)
2578 return;
2579 /* does it need fixup ? */
2580 if (prom_getproplen(i2c, "interrupts") > 0)
2581 return;
2582
2583 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2584
2585 /* interrupt on this revision of u3 is number 0 and level */
2586 interrupts[0] = 0;
2587 interrupts[1] = 1;
2588 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2589 &interrupts, sizeof(interrupts));
2590 parent = (u32)mpic;
2591 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2592 &parent, sizeof(parent));
2593}
2594#else
2595#define fixup_device_tree_pmac()
2596#endif
2597
2598#ifdef CONFIG_PPC_EFIKA
2599/*
2600 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2601 * to talk to the phy. If the phy-handle property is missing, then this
2602 * function is called to add the appropriate nodes and link it to the
2603 * ethernet node.
2604 */
2605static void __init fixup_device_tree_efika_add_phy(void)
2606{
2607 u32 node;
2608 char prop[64];
2609 int rv;
2610
2611 /* Check if /builtin/ethernet exists - bail if it doesn't */
2612 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2613 if (!PHANDLE_VALID(node))
2614 return;
2615
2616 /* Check if the phy-handle property exists - bail if it does */
2617 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2618 if (!rv)
2619 return;
2620
2621 /*
2622 * At this point the ethernet device doesn't have a phy described.
2623 * Now we need to add the missing phy node and linkage
2624 */
2625
2626 /* Check for an MDIO bus node - if missing then create one */
2627 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2628 if (!PHANDLE_VALID(node)) {
2629 prom_printf("Adding Ethernet MDIO node\n");
2630 call_prom("interpret", 1, 1,
2631 " s\" /builtin\" find-device"
2632 " new-device"
2633 " 1 encode-int s\" #address-cells\" property"
2634 " 0 encode-int s\" #size-cells\" property"
2635 " s\" mdio\" device-name"
2636 " s\" fsl,mpc5200b-mdio\" encode-string"
2637 " s\" compatible\" property"
2638 " 0xf0003000 0x400 reg"
2639 " 0x2 encode-int"
2640 " 0x5 encode-int encode+"
2641 " 0x3 encode-int encode+"
2642 " s\" interrupts\" property"
2643 " finish-device");
2644 };
2645
2646 /* Check for a PHY device node - if missing then create one and
2647 * give it's phandle to the ethernet node */
2648 node = call_prom("finddevice", 1, 1,
2649 ADDR("/builtin/mdio/ethernet-phy"));
2650 if (!PHANDLE_VALID(node)) {
2651 prom_printf("Adding Ethernet PHY node\n");
2652 call_prom("interpret", 1, 1,
2653 " s\" /builtin/mdio\" find-device"
2654 " new-device"
2655 " s\" ethernet-phy\" device-name"
2656 " 0x10 encode-int s\" reg\" property"
2657 " my-self"
2658 " ihandle>phandle"
2659 " finish-device"
2660 " s\" /builtin/ethernet\" find-device"
2661 " encode-int"
2662 " s\" phy-handle\" property"
2663 " device-end");
2664 }
2665}
2666
2667static void __init fixup_device_tree_efika(void)
2668{
2669 int sound_irq[3] = { 2, 2, 0 };
2670 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2671 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2672 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2673 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2674 u32 node;
2675 char prop[64];
2676 int rv, len;
2677
2678 /* Check if we're really running on a EFIKA */
2679 node = call_prom("finddevice", 1, 1, ADDR("/"));
2680 if (!PHANDLE_VALID(node))
2681 return;
2682
2683 rv = prom_getprop(node, "model", prop, sizeof(prop));
2684 if (rv == PROM_ERROR)
2685 return;
2686 if (strcmp(prop, "EFIKA5K2"))
2687 return;
2688
2689 prom_printf("Applying EFIKA device tree fixups\n");
2690
2691 /* Claiming to be 'chrp' is death */
2692 node = call_prom("finddevice", 1, 1, ADDR("/"));
2693 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2694 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2695 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2696
2697 /* CODEGEN,description is exposed in /proc/cpuinfo so
2698 fix that too */
2699 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2700 if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2701 prom_setprop(node, "/", "CODEGEN,description",
2702 "Efika 5200B PowerPC System",
2703 sizeof("Efika 5200B PowerPC System"));
2704
2705 /* Fixup bestcomm interrupts property */
2706 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2707 if (PHANDLE_VALID(node)) {
2708 len = prom_getproplen(node, "interrupts");
2709 if (len == 12) {
2710 prom_printf("Fixing bestcomm interrupts property\n");
2711 prom_setprop(node, "/builtin/bestcom", "interrupts",
2712 bcomm_irq, sizeof(bcomm_irq));
2713 }
2714 }
2715
2716 /* Fixup sound interrupts property */
2717 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2718 if (PHANDLE_VALID(node)) {
2719 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2720 if (rv == PROM_ERROR) {
2721 prom_printf("Adding sound interrupts property\n");
2722 prom_setprop(node, "/builtin/sound", "interrupts",
2723 sound_irq, sizeof(sound_irq));
2724 }
2725 }
2726
2727 /* Make sure ethernet phy-handle property exists */
2728 fixup_device_tree_efika_add_phy();
2729}
2730#else
2731#define fixup_device_tree_efika()
2732#endif
2733
2734#ifdef CONFIG_PPC_PASEMI_NEMO
2735/*
2736 * CFE supplied on Nemo is broken in several ways, biggest
2737 * problem is that it reassigns ISA interrupts to unused mpic ints.
2738 * Add an interrupt-controller property for the io-bridge to use
2739 * and correct the ints so we can attach them to an irq_domain
2740 */
2741static void __init fixup_device_tree_pasemi(void)
2742{
2743 u32 interrupts[2], parent, rval, val = 0;
2744 char *name, *pci_name;
2745 phandle iob, node;
2746
2747 /* Find the root pci node */
2748 name = "/pxp@0,e0000000";
2749 iob = call_prom("finddevice", 1, 1, ADDR(name));
2750 if (!PHANDLE_VALID(iob))
2751 return;
2752
2753 /* check if interrupt-controller node set yet */
2754 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
2755 return;
2756
2757 prom_printf("adding interrupt-controller property for SB600...\n");
2758
2759 prom_setprop(iob, name, "interrupt-controller", &val, 0);
2760
2761 pci_name = "/pxp@0,e0000000/pci@11";
2762 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
2763 parent = ADDR(iob);
2764
2765 for( ; prom_next_node(&node); ) {
2766 /* scan each node for one with an interrupt */
2767 if (!PHANDLE_VALID(node))
2768 continue;
2769
2770 rval = prom_getproplen(node, "interrupts");
2771 if (rval == 0 || rval == PROM_ERROR)
2772 continue;
2773
2774 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
2775 if ((interrupts[0] < 212) || (interrupts[0] > 222))
2776 continue;
2777
2778 /* found a node, update both interrupts and interrupt-parent */
2779 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
2780 interrupts[0] -= 203;
2781 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
2782 interrupts[0] -= 213;
2783 if (interrupts[0] == 221)
2784 interrupts[0] = 14;
2785 if (interrupts[0] == 222)
2786 interrupts[0] = 8;
2787
2788 prom_setprop(node, pci_name, "interrupts", interrupts,
2789 sizeof(interrupts));
2790 prom_setprop(node, pci_name, "interrupt-parent", &parent,
2791 sizeof(parent));
2792 }
2793
2794 /*
2795 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
2796 * so that generic isa-bridge code can add the SB600 and its on-board
2797 * peripherals.
2798 */
2799 name = "/pxp@0,e0000000/io-bridge@0";
2800 iob = call_prom("finddevice", 1, 1, ADDR(name));
2801 if (!PHANDLE_VALID(iob))
2802 return;
2803
2804 /* device_type is already set, just change it. */
2805
2806 prom_printf("Changing device_type of SB600 node...\n");
2807
2808 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
2809}
2810#else /* !CONFIG_PPC_PASEMI_NEMO */
2811static inline void fixup_device_tree_pasemi(void) { }
2812#endif
2813
2814static void __init fixup_device_tree(void)
2815{
2816 fixup_device_tree_maple();
2817 fixup_device_tree_maple_memory_controller();
2818 fixup_device_tree_chrp();
2819 fixup_device_tree_pmac();
2820 fixup_device_tree_efika();
2821 fixup_device_tree_pasemi();
2822}
2823
2824static void __init prom_find_boot_cpu(void)
2825{
2826 __be32 rval;
2827 ihandle prom_cpu;
2828 phandle cpu_pkg;
2829
2830 rval = 0;
2831 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2832 return;
2833 prom_cpu = be32_to_cpu(rval);
2834
2835 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2836
2837 if (!PHANDLE_VALID(cpu_pkg))
2838 return;
2839
2840 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2841 prom.cpu = be32_to_cpu(rval);
2842
2843 prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
2844}
2845
2846static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2847{
2848#ifdef CONFIG_BLK_DEV_INITRD
2849 if (r3 && r4 && r4 != 0xdeadbeef) {
2850 __be64 val;
2851
2852 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2853 prom_initrd_end = prom_initrd_start + r4;
2854
2855 val = cpu_to_be64(prom_initrd_start);
2856 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2857 &val, sizeof(val));
2858 val = cpu_to_be64(prom_initrd_end);
2859 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2860 &val, sizeof(val));
2861
2862 reserve_mem(prom_initrd_start,
2863 prom_initrd_end - prom_initrd_start);
2864
2865 prom_debug("initrd_start=0x%x\n", prom_initrd_start);
2866 prom_debug("initrd_end=0x%x\n", prom_initrd_end);
2867 }
2868#endif /* CONFIG_BLK_DEV_INITRD */
2869}
2870
2871#ifdef CONFIG_PPC64
2872#ifdef CONFIG_RELOCATABLE
2873static void reloc_toc(void)
2874{
2875}
2876
2877static void unreloc_toc(void)
2878{
2879}
2880#else
2881static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2882{
2883 unsigned long i;
2884 unsigned long *toc_entry;
2885
2886 /* Get the start of the TOC by using r2 directly. */
2887 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2888
2889 for (i = 0; i < nr_entries; i++) {
2890 *toc_entry = *toc_entry + offset;
2891 toc_entry++;
2892 }
2893}
2894
2895static void reloc_toc(void)
2896{
2897 unsigned long offset = reloc_offset();
2898 unsigned long nr_entries =
2899 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2900
2901 __reloc_toc(offset, nr_entries);
2902
2903 mb();
2904}
2905
2906static void unreloc_toc(void)
2907{
2908 unsigned long offset = reloc_offset();
2909 unsigned long nr_entries =
2910 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2911
2912 mb();
2913
2914 __reloc_toc(-offset, nr_entries);
2915}
2916#endif
2917#endif
2918
2919/*
2920 * We enter here early on, when the Open Firmware prom is still
2921 * handling exceptions and the MMU hash table for us.
2922 */
2923
2924unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2925 unsigned long pp,
2926 unsigned long r6, unsigned long r7,
2927 unsigned long kbase)
2928{
2929 unsigned long hdr;
2930
2931#ifdef CONFIG_PPC32
2932 unsigned long offset = reloc_offset();
2933 reloc_got2(offset);
2934#else
2935 reloc_toc();
2936#endif
2937
2938 /*
2939 * First zero the BSS
2940 */
2941 memset(&__bss_start, 0, __bss_stop - __bss_start);
2942
2943 /*
2944 * Init interface to Open Firmware, get some node references,
2945 * like /chosen
2946 */
2947 prom_init_client_services(pp);
2948
2949 /*
2950 * See if this OF is old enough that we need to do explicit maps
2951 * and other workarounds
2952 */
2953 prom_find_mmu();
2954
2955 /*
2956 * Init prom stdout device
2957 */
2958 prom_init_stdout();
2959
2960 prom_printf("Preparing to boot %s", linux_banner);
2961
2962 /*
2963 * Get default machine type. At this point, we do not differentiate
2964 * between pSeries SMP and pSeries LPAR
2965 */
2966 of_platform = prom_find_machine_type();
2967 prom_printf("Detected machine type: %x\n", of_platform);
2968
2969#ifndef CONFIG_NONSTATIC_KERNEL
2970 /* Bail if this is a kdump kernel. */
2971 if (PHYSICAL_START > 0)
2972 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2973#endif
2974
2975 /*
2976 * Check for an initrd
2977 */
2978 prom_check_initrd(r3, r4);
2979
2980#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
2981 /*
2982 * On pSeries, inform the firmware about our capabilities
2983 */
2984 if (of_platform == PLATFORM_PSERIES ||
2985 of_platform == PLATFORM_PSERIES_LPAR)
2986 prom_send_capabilities();
2987#endif
2988
2989 /*
2990 * Copy the CPU hold code
2991 */
2992 if (of_platform != PLATFORM_POWERMAC)
2993 copy_and_flush(0, kbase, 0x100, 0);
2994
2995 /*
2996 * Do early parsing of command line
2997 */
2998 early_cmdline_parse();
2999
3000 /*
3001 * Initialize memory management within prom_init
3002 */
3003 prom_init_mem();
3004
3005 /*
3006 * Determine which cpu is actually running right _now_
3007 */
3008 prom_find_boot_cpu();
3009
3010 /*
3011 * Initialize display devices
3012 */
3013 prom_check_displays();
3014
3015#if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3016 /*
3017 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3018 * that uses the allocator, we need to make sure we get the top of memory
3019 * available for us here...
3020 */
3021 if (of_platform == PLATFORM_PSERIES)
3022 prom_initialize_tce_table();
3023#endif
3024
3025 /*
3026 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3027 * have a usable RTAS implementation.
3028 */
3029 if (of_platform != PLATFORM_POWERMAC &&
3030 of_platform != PLATFORM_OPAL)
3031 prom_instantiate_rtas();
3032
3033#ifdef CONFIG_PPC_POWERNV
3034 if (of_platform == PLATFORM_OPAL)
3035 prom_instantiate_opal();
3036#endif /* CONFIG_PPC_POWERNV */
3037
3038#ifdef CONFIG_PPC64
3039 /* instantiate sml */
3040 prom_instantiate_sml();
3041#endif
3042
3043 /*
3044 * On non-powermacs, put all CPUs in spin-loops.
3045 *
3046 * PowerMacs use a different mechanism to spin CPUs
3047 *
3048 * (This must be done after instanciating RTAS)
3049 */
3050 if (of_platform != PLATFORM_POWERMAC &&
3051 of_platform != PLATFORM_OPAL)
3052 prom_hold_cpus();
3053
3054 /*
3055 * Fill in some infos for use by the kernel later on
3056 */
3057 if (prom_memory_limit) {
3058 __be64 val = cpu_to_be64(prom_memory_limit);
3059 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3060 &val, sizeof(val));
3061 }
3062#ifdef CONFIG_PPC64
3063 if (prom_iommu_off)
3064 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3065 NULL, 0);
3066
3067 if (prom_iommu_force_on)
3068 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3069 NULL, 0);
3070
3071 if (prom_tce_alloc_start) {
3072 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3073 &prom_tce_alloc_start,
3074 sizeof(prom_tce_alloc_start));
3075 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3076 &prom_tce_alloc_end,
3077 sizeof(prom_tce_alloc_end));
3078 }
3079#endif
3080
3081 /*
3082 * Fixup any known bugs in the device-tree
3083 */
3084 fixup_device_tree();
3085
3086 /*
3087 * Now finally create the flattened device-tree
3088 */
3089 prom_printf("copying OF device tree...\n");
3090 flatten_device_tree();
3091
3092 /*
3093 * in case stdin is USB and still active on IBM machines...
3094 * Unfortunately quiesce crashes on some powermacs if we have
3095 * closed stdin already (in particular the powerbook 101). It
3096 * appears that the OPAL version of OFW doesn't like it either.
3097 */
3098 if (of_platform != PLATFORM_POWERMAC &&
3099 of_platform != PLATFORM_OPAL)
3100 prom_close_stdin();
3101
3102 /*
3103 * Call OF "quiesce" method to shut down pending DMA's from
3104 * devices etc...
3105 */
3106 prom_printf("Quiescing Open Firmware ...\n");
3107 call_prom("quiesce", 0, 0);
3108
3109 /*
3110 * And finally, call the kernel passing it the flattened device
3111 * tree and NULL as r5, thus triggering the new entry point which
3112 * is common to us and kexec
3113 */
3114 hdr = dt_header_start;
3115
3116 /* Don't print anything after quiesce under OPAL, it crashes OFW */
3117 if (of_platform != PLATFORM_OPAL) {
3118 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3119 prom_debug("->dt_header_start=0x%x\n", hdr);
3120 }
3121
3122#ifdef CONFIG_PPC32
3123 reloc_got2(-offset);
3124#else
3125 unreloc_toc();
3126#endif
3127
3128#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3129 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3130 __start(hdr, kbase, 0, 0, 0,
3131 prom_opal_base, prom_opal_entry);
3132#else
3133 __start(hdr, kbase, 0, 0, 0, 0, 0);
3134#endif
3135
3136 return 0;
3137}