Loading...
1/*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG_PROM
17
18/* we cannot use FORTIFY as it brings in new symbols */
19#define __NO_FORTIFY
20
21#include <stdarg.h>
22#include <linux/kernel.h>
23#include <linux/string.h>
24#include <linux/init.h>
25#include <linux/threads.h>
26#include <linux/spinlock.h>
27#include <linux/types.h>
28#include <linux/pci.h>
29#include <linux/proc_fs.h>
30#include <linux/stringify.h>
31#include <linux/delay.h>
32#include <linux/initrd.h>
33#include <linux/bitops.h>
34#include <asm/prom.h>
35#include <asm/rtas.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/smp.h>
41#include <asm/mmu.h>
42#include <asm/pgtable.h>
43#include <asm/iommu.h>
44#include <asm/btext.h>
45#include <asm/sections.h>
46#include <asm/machdep.h>
47#include <asm/opal.h>
48#include <asm/asm-prototypes.h>
49
50#include <linux/linux_logo.h>
51
52/*
53 * Eventually bump that one up
54 */
55#define DEVTREE_CHUNK_SIZE 0x100000
56
57/*
58 * This is the size of the local memory reserve map that gets copied
59 * into the boot params passed to the kernel. That size is totally
60 * flexible as the kernel just reads the list until it encounters an
61 * entry with size 0, so it can be changed without breaking binary
62 * compatibility
63 */
64#define MEM_RESERVE_MAP_SIZE 8
65
66/*
67 * prom_init() is called very early on, before the kernel text
68 * and data have been mapped to KERNELBASE. At this point the code
69 * is running at whatever address it has been loaded at.
70 * On ppc32 we compile with -mrelocatable, which means that references
71 * to extern and static variables get relocated automatically.
72 * ppc64 objects are always relocatable, we just need to relocate the
73 * TOC.
74 *
75 * Because OF may have mapped I/O devices into the area starting at
76 * KERNELBASE, particularly on CHRP machines, we can't safely call
77 * OF once the kernel has been mapped to KERNELBASE. Therefore all
78 * OF calls must be done within prom_init().
79 *
80 * ADDR is used in calls to call_prom. The 4th and following
81 * arguments to call_prom should be 32-bit values.
82 * On ppc64, 64 bit values are truncated to 32 bits (and
83 * fortunately don't get interpreted as two arguments).
84 */
85#define ADDR(x) (u32)(unsigned long)(x)
86
87#ifdef CONFIG_PPC64
88#define OF_WORKAROUNDS 0
89#else
90#define OF_WORKAROUNDS of_workarounds
91int of_workarounds;
92#endif
93
94#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
95#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
96
97#define PROM_BUG() do { \
98 prom_printf("kernel BUG at %s line 0x%x!\n", \
99 __FILE__, __LINE__); \
100 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
101} while (0)
102
103#ifdef DEBUG_PROM
104#define prom_debug(x...) prom_printf(x)
105#else
106#define prom_debug(x...)
107#endif
108
109
110typedef u32 prom_arg_t;
111
112struct prom_args {
113 __be32 service;
114 __be32 nargs;
115 __be32 nret;
116 __be32 args[10];
117};
118
119struct prom_t {
120 ihandle root;
121 phandle chosen;
122 int cpu;
123 ihandle stdout;
124 ihandle mmumap;
125 ihandle memory;
126};
127
128struct mem_map_entry {
129 __be64 base;
130 __be64 size;
131};
132
133typedef __be32 cell_t;
134
135extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
136 unsigned long r6, unsigned long r7, unsigned long r8,
137 unsigned long r9);
138
139#ifdef CONFIG_PPC64
140extern int enter_prom(struct prom_args *args, unsigned long entry);
141#else
142static inline int enter_prom(struct prom_args *args, unsigned long entry)
143{
144 return ((int (*)(struct prom_args *))entry)(args);
145}
146#endif
147
148extern void copy_and_flush(unsigned long dest, unsigned long src,
149 unsigned long size, unsigned long offset);
150
151/* prom structure */
152static struct prom_t __initdata prom;
153
154static unsigned long prom_entry __initdata;
155
156#define PROM_SCRATCH_SIZE 256
157
158static char __initdata of_stdout_device[256];
159static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
160
161static unsigned long __initdata dt_header_start;
162static unsigned long __initdata dt_struct_start, dt_struct_end;
163static unsigned long __initdata dt_string_start, dt_string_end;
164
165static unsigned long __initdata prom_initrd_start, prom_initrd_end;
166
167#ifdef CONFIG_PPC64
168static int __initdata prom_iommu_force_on;
169static int __initdata prom_iommu_off;
170static unsigned long __initdata prom_tce_alloc_start;
171static unsigned long __initdata prom_tce_alloc_end;
172#endif
173
174static bool prom_radix_disable __initdata = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
175
176struct platform_support {
177 bool hash_mmu;
178 bool radix_mmu;
179 bool radix_gtse;
180 bool xive;
181};
182
183/* Platforms codes are now obsolete in the kernel. Now only used within this
184 * file and ultimately gone too. Feel free to change them if you need, they
185 * are not shared with anything outside of this file anymore
186 */
187#define PLATFORM_PSERIES 0x0100
188#define PLATFORM_PSERIES_LPAR 0x0101
189#define PLATFORM_LPAR 0x0001
190#define PLATFORM_POWERMAC 0x0400
191#define PLATFORM_GENERIC 0x0500
192#define PLATFORM_OPAL 0x0600
193
194static int __initdata of_platform;
195
196static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
197
198static unsigned long __initdata prom_memory_limit;
199
200static unsigned long __initdata alloc_top;
201static unsigned long __initdata alloc_top_high;
202static unsigned long __initdata alloc_bottom;
203static unsigned long __initdata rmo_top;
204static unsigned long __initdata ram_top;
205
206static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
207static int __initdata mem_reserve_cnt;
208
209static cell_t __initdata regbuf[1024];
210
211static bool rtas_has_query_cpu_stopped;
212
213
214/*
215 * Error results ... some OF calls will return "-1" on error, some
216 * will return 0, some will return either. To simplify, here are
217 * macros to use with any ihandle or phandle return value to check if
218 * it is valid
219 */
220
221#define PROM_ERROR (-1u)
222#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
223#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
224
225
226/* This is the one and *ONLY* place where we actually call open
227 * firmware.
228 */
229
230static int __init call_prom(const char *service, int nargs, int nret, ...)
231{
232 int i;
233 struct prom_args args;
234 va_list list;
235
236 args.service = cpu_to_be32(ADDR(service));
237 args.nargs = cpu_to_be32(nargs);
238 args.nret = cpu_to_be32(nret);
239
240 va_start(list, nret);
241 for (i = 0; i < nargs; i++)
242 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
243 va_end(list);
244
245 for (i = 0; i < nret; i++)
246 args.args[nargs+i] = 0;
247
248 if (enter_prom(&args, prom_entry) < 0)
249 return PROM_ERROR;
250
251 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
252}
253
254static int __init call_prom_ret(const char *service, int nargs, int nret,
255 prom_arg_t *rets, ...)
256{
257 int i;
258 struct prom_args args;
259 va_list list;
260
261 args.service = cpu_to_be32(ADDR(service));
262 args.nargs = cpu_to_be32(nargs);
263 args.nret = cpu_to_be32(nret);
264
265 va_start(list, rets);
266 for (i = 0; i < nargs; i++)
267 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
268 va_end(list);
269
270 for (i = 0; i < nret; i++)
271 args.args[nargs+i] = 0;
272
273 if (enter_prom(&args, prom_entry) < 0)
274 return PROM_ERROR;
275
276 if (rets != NULL)
277 for (i = 1; i < nret; ++i)
278 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
279
280 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
281}
282
283
284static void __init prom_print(const char *msg)
285{
286 const char *p, *q;
287
288 if (prom.stdout == 0)
289 return;
290
291 for (p = msg; *p != 0; p = q) {
292 for (q = p; *q != 0 && *q != '\n'; ++q)
293 ;
294 if (q > p)
295 call_prom("write", 3, 1, prom.stdout, p, q - p);
296 if (*q == 0)
297 break;
298 ++q;
299 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
300 }
301}
302
303
304static void __init prom_print_hex(unsigned long val)
305{
306 int i, nibbles = sizeof(val)*2;
307 char buf[sizeof(val)*2+1];
308
309 for (i = nibbles-1; i >= 0; i--) {
310 buf[i] = (val & 0xf) + '0';
311 if (buf[i] > '9')
312 buf[i] += ('a'-'0'-10);
313 val >>= 4;
314 }
315 buf[nibbles] = '\0';
316 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
317}
318
319/* max number of decimal digits in an unsigned long */
320#define UL_DIGITS 21
321static void __init prom_print_dec(unsigned long val)
322{
323 int i, size;
324 char buf[UL_DIGITS+1];
325
326 for (i = UL_DIGITS-1; i >= 0; i--) {
327 buf[i] = (val % 10) + '0';
328 val = val/10;
329 if (val == 0)
330 break;
331 }
332 /* shift stuff down */
333 size = UL_DIGITS - i;
334 call_prom("write", 3, 1, prom.stdout, buf+i, size);
335}
336
337static void __init prom_printf(const char *format, ...)
338{
339 const char *p, *q, *s;
340 va_list args;
341 unsigned long v;
342 long vs;
343
344 va_start(args, format);
345 for (p = format; *p != 0; p = q) {
346 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
347 ;
348 if (q > p)
349 call_prom("write", 3, 1, prom.stdout, p, q - p);
350 if (*q == 0)
351 break;
352 if (*q == '\n') {
353 ++q;
354 call_prom("write", 3, 1, prom.stdout,
355 ADDR("\r\n"), 2);
356 continue;
357 }
358 ++q;
359 if (*q == 0)
360 break;
361 switch (*q) {
362 case 's':
363 ++q;
364 s = va_arg(args, const char *);
365 prom_print(s);
366 break;
367 case 'x':
368 ++q;
369 v = va_arg(args, unsigned long);
370 prom_print_hex(v);
371 break;
372 case 'd':
373 ++q;
374 vs = va_arg(args, int);
375 if (vs < 0) {
376 prom_print("-");
377 vs = -vs;
378 }
379 prom_print_dec(vs);
380 break;
381 case 'l':
382 ++q;
383 if (*q == 0)
384 break;
385 else if (*q == 'x') {
386 ++q;
387 v = va_arg(args, unsigned long);
388 prom_print_hex(v);
389 } else if (*q == 'u') { /* '%lu' */
390 ++q;
391 v = va_arg(args, unsigned long);
392 prom_print_dec(v);
393 } else if (*q == 'd') { /* %ld */
394 ++q;
395 vs = va_arg(args, long);
396 if (vs < 0) {
397 prom_print("-");
398 vs = -vs;
399 }
400 prom_print_dec(vs);
401 }
402 break;
403 }
404 }
405 va_end(args);
406}
407
408
409static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
410 unsigned long align)
411{
412
413 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
414 /*
415 * Old OF requires we claim physical and virtual separately
416 * and then map explicitly (assuming virtual mode)
417 */
418 int ret;
419 prom_arg_t result;
420
421 ret = call_prom_ret("call-method", 5, 2, &result,
422 ADDR("claim"), prom.memory,
423 align, size, virt);
424 if (ret != 0 || result == -1)
425 return -1;
426 ret = call_prom_ret("call-method", 5, 2, &result,
427 ADDR("claim"), prom.mmumap,
428 align, size, virt);
429 if (ret != 0) {
430 call_prom("call-method", 4, 1, ADDR("release"),
431 prom.memory, size, virt);
432 return -1;
433 }
434 /* the 0x12 is M (coherence) + PP == read/write */
435 call_prom("call-method", 6, 1,
436 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
437 return virt;
438 }
439 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
440 (prom_arg_t)align);
441}
442
443static void __init __attribute__((noreturn)) prom_panic(const char *reason)
444{
445 prom_print(reason);
446 /* Do not call exit because it clears the screen on pmac
447 * it also causes some sort of double-fault on early pmacs */
448 if (of_platform == PLATFORM_POWERMAC)
449 asm("trap\n");
450
451 /* ToDo: should put up an SRC here on pSeries */
452 call_prom("exit", 0, 0);
453
454 for (;;) /* should never get here */
455 ;
456}
457
458
459static int __init prom_next_node(phandle *nodep)
460{
461 phandle node;
462
463 if ((node = *nodep) != 0
464 && (*nodep = call_prom("child", 1, 1, node)) != 0)
465 return 1;
466 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
467 return 1;
468 for (;;) {
469 if ((node = call_prom("parent", 1, 1, node)) == 0)
470 return 0;
471 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
472 return 1;
473 }
474}
475
476static inline int prom_getprop(phandle node, const char *pname,
477 void *value, size_t valuelen)
478{
479 return call_prom("getprop", 4, 1, node, ADDR(pname),
480 (u32)(unsigned long) value, (u32) valuelen);
481}
482
483static inline int prom_getproplen(phandle node, const char *pname)
484{
485 return call_prom("getproplen", 2, 1, node, ADDR(pname));
486}
487
488static void add_string(char **str, const char *q)
489{
490 char *p = *str;
491
492 while (*q)
493 *p++ = *q++;
494 *p++ = ' ';
495 *str = p;
496}
497
498static char *tohex(unsigned int x)
499{
500 static char digits[] = "0123456789abcdef";
501 static char result[9];
502 int i;
503
504 result[8] = 0;
505 i = 8;
506 do {
507 --i;
508 result[i] = digits[x & 0xf];
509 x >>= 4;
510 } while (x != 0 && i > 0);
511 return &result[i];
512}
513
514static int __init prom_setprop(phandle node, const char *nodename,
515 const char *pname, void *value, size_t valuelen)
516{
517 char cmd[256], *p;
518
519 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
520 return call_prom("setprop", 4, 1, node, ADDR(pname),
521 (u32)(unsigned long) value, (u32) valuelen);
522
523 /* gah... setprop doesn't work on longtrail, have to use interpret */
524 p = cmd;
525 add_string(&p, "dev");
526 add_string(&p, nodename);
527 add_string(&p, tohex((u32)(unsigned long) value));
528 add_string(&p, tohex(valuelen));
529 add_string(&p, tohex(ADDR(pname)));
530 add_string(&p, tohex(strlen(pname)));
531 add_string(&p, "property");
532 *p = 0;
533 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
534}
535
536/* We can't use the standard versions because of relocation headaches. */
537#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
538 || ('a' <= (c) && (c) <= 'f') \
539 || ('A' <= (c) && (c) <= 'F'))
540
541#define isdigit(c) ('0' <= (c) && (c) <= '9')
542#define islower(c) ('a' <= (c) && (c) <= 'z')
543#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
544
545static unsigned long prom_strtoul(const char *cp, const char **endp)
546{
547 unsigned long result = 0, base = 10, value;
548
549 if (*cp == '0') {
550 base = 8;
551 cp++;
552 if (toupper(*cp) == 'X') {
553 cp++;
554 base = 16;
555 }
556 }
557
558 while (isxdigit(*cp) &&
559 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
560 result = result * base + value;
561 cp++;
562 }
563
564 if (endp)
565 *endp = cp;
566
567 return result;
568}
569
570static unsigned long prom_memparse(const char *ptr, const char **retptr)
571{
572 unsigned long ret = prom_strtoul(ptr, retptr);
573 int shift = 0;
574
575 /*
576 * We can't use a switch here because GCC *may* generate a
577 * jump table which won't work, because we're not running at
578 * the address we're linked at.
579 */
580 if ('G' == **retptr || 'g' == **retptr)
581 shift = 30;
582
583 if ('M' == **retptr || 'm' == **retptr)
584 shift = 20;
585
586 if ('K' == **retptr || 'k' == **retptr)
587 shift = 10;
588
589 if (shift) {
590 ret <<= shift;
591 (*retptr)++;
592 }
593
594 return ret;
595}
596
597/*
598 * Early parsing of the command line passed to the kernel, used for
599 * "mem=x" and the options that affect the iommu
600 */
601static void __init early_cmdline_parse(void)
602{
603 const char *opt;
604
605 char *p;
606 int l = 0;
607
608 prom_cmd_line[0] = 0;
609 p = prom_cmd_line;
610 if ((long)prom.chosen > 0)
611 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
612#ifdef CONFIG_CMDLINE
613 if (l <= 0 || p[0] == '\0') /* dbl check */
614 strlcpy(prom_cmd_line,
615 CONFIG_CMDLINE, sizeof(prom_cmd_line));
616#endif /* CONFIG_CMDLINE */
617 prom_printf("command line: %s\n", prom_cmd_line);
618
619#ifdef CONFIG_PPC64
620 opt = strstr(prom_cmd_line, "iommu=");
621 if (opt) {
622 prom_printf("iommu opt is: %s\n", opt);
623 opt += 6;
624 while (*opt && *opt == ' ')
625 opt++;
626 if (!strncmp(opt, "off", 3))
627 prom_iommu_off = 1;
628 else if (!strncmp(opt, "force", 5))
629 prom_iommu_force_on = 1;
630 }
631#endif
632 opt = strstr(prom_cmd_line, "mem=");
633 if (opt) {
634 opt += 4;
635 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
636#ifdef CONFIG_PPC64
637 /* Align to 16 MB == size of ppc64 large page */
638 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
639#endif
640 }
641
642 opt = strstr(prom_cmd_line, "disable_radix");
643 if (opt) {
644 opt += 13;
645 if (*opt && *opt == '=') {
646 bool val;
647
648 if (kstrtobool(++opt, &val))
649 prom_radix_disable = false;
650 else
651 prom_radix_disable = val;
652 } else
653 prom_radix_disable = true;
654 }
655 if (prom_radix_disable)
656 prom_debug("Radix disabled from cmdline\n");
657}
658
659#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
660/*
661 * The architecture vector has an array of PVR mask/value pairs,
662 * followed by # option vectors - 1, followed by the option vectors.
663 *
664 * See prom.h for the definition of the bits specified in the
665 * architecture vector.
666 */
667
668/* Firmware expects the value to be n - 1, where n is the # of vectors */
669#define NUM_VECTORS(n) ((n) - 1)
670
671/*
672 * Firmware expects 1 + n - 2, where n is the length of the option vector in
673 * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
674 */
675#define VECTOR_LENGTH(n) (1 + (n) - 2)
676
677struct option_vector1 {
678 u8 byte1;
679 u8 arch_versions;
680 u8 arch_versions3;
681} __packed;
682
683struct option_vector2 {
684 u8 byte1;
685 __be16 reserved;
686 __be32 real_base;
687 __be32 real_size;
688 __be32 virt_base;
689 __be32 virt_size;
690 __be32 load_base;
691 __be32 min_rma;
692 __be32 min_load;
693 u8 min_rma_percent;
694 u8 max_pft_size;
695} __packed;
696
697struct option_vector3 {
698 u8 byte1;
699 u8 byte2;
700} __packed;
701
702struct option_vector4 {
703 u8 byte1;
704 u8 min_vp_cap;
705} __packed;
706
707struct option_vector5 {
708 u8 byte1;
709 u8 byte2;
710 u8 byte3;
711 u8 cmo;
712 u8 associativity;
713 u8 bin_opts;
714 u8 micro_checkpoint;
715 u8 reserved0;
716 __be32 max_cpus;
717 __be16 papr_level;
718 __be16 reserved1;
719 u8 platform_facilities;
720 u8 reserved2;
721 __be16 reserved3;
722 u8 subprocessors;
723 u8 byte22;
724 u8 intarch;
725 u8 mmu;
726 u8 hash_ext;
727 u8 radix_ext;
728} __packed;
729
730struct option_vector6 {
731 u8 reserved;
732 u8 secondary_pteg;
733 u8 os_name;
734} __packed;
735
736struct ibm_arch_vec {
737 struct { u32 mask, val; } pvrs[12];
738
739 u8 num_vectors;
740
741 u8 vec1_len;
742 struct option_vector1 vec1;
743
744 u8 vec2_len;
745 struct option_vector2 vec2;
746
747 u8 vec3_len;
748 struct option_vector3 vec3;
749
750 u8 vec4_len;
751 struct option_vector4 vec4;
752
753 u8 vec5_len;
754 struct option_vector5 vec5;
755
756 u8 vec6_len;
757 struct option_vector6 vec6;
758} __packed;
759
760struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
761 .pvrs = {
762 {
763 .mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
764 .val = cpu_to_be32(0x003a0000),
765 },
766 {
767 .mask = cpu_to_be32(0xffff0000), /* POWER6 */
768 .val = cpu_to_be32(0x003e0000),
769 },
770 {
771 .mask = cpu_to_be32(0xffff0000), /* POWER7 */
772 .val = cpu_to_be32(0x003f0000),
773 },
774 {
775 .mask = cpu_to_be32(0xffff0000), /* POWER8E */
776 .val = cpu_to_be32(0x004b0000),
777 },
778 {
779 .mask = cpu_to_be32(0xffff0000), /* POWER8NVL */
780 .val = cpu_to_be32(0x004c0000),
781 },
782 {
783 .mask = cpu_to_be32(0xffff0000), /* POWER8 */
784 .val = cpu_to_be32(0x004d0000),
785 },
786 {
787 .mask = cpu_to_be32(0xffff0000), /* POWER9 */
788 .val = cpu_to_be32(0x004e0000),
789 },
790 {
791 .mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
792 .val = cpu_to_be32(0x0f000005),
793 },
794 {
795 .mask = cpu_to_be32(0xffffffff), /* all 2.07-compliant */
796 .val = cpu_to_be32(0x0f000004),
797 },
798 {
799 .mask = cpu_to_be32(0xffffffff), /* all 2.06-compliant */
800 .val = cpu_to_be32(0x0f000003),
801 },
802 {
803 .mask = cpu_to_be32(0xffffffff), /* all 2.05-compliant */
804 .val = cpu_to_be32(0x0f000002),
805 },
806 {
807 .mask = cpu_to_be32(0xfffffffe), /* all 2.04-compliant and earlier */
808 .val = cpu_to_be32(0x0f000001),
809 },
810 },
811
812 .num_vectors = NUM_VECTORS(6),
813
814 .vec1_len = VECTOR_LENGTH(sizeof(struct option_vector1)),
815 .vec1 = {
816 .byte1 = 0,
817 .arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
818 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
819 .arch_versions3 = OV1_PPC_3_00,
820 },
821
822 .vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
823 /* option vector 2: Open Firmware options supported */
824 .vec2 = {
825 .byte1 = OV2_REAL_MODE,
826 .reserved = 0,
827 .real_base = cpu_to_be32(0xffffffff),
828 .real_size = cpu_to_be32(0xffffffff),
829 .virt_base = cpu_to_be32(0xffffffff),
830 .virt_size = cpu_to_be32(0xffffffff),
831 .load_base = cpu_to_be32(0xffffffff),
832 .min_rma = cpu_to_be32(512), /* 512MB min RMA */
833 .min_load = cpu_to_be32(0xffffffff), /* full client load */
834 .min_rma_percent = 0, /* min RMA percentage of total RAM */
835 .max_pft_size = 48, /* max log_2(hash table size) */
836 },
837
838 .vec3_len = VECTOR_LENGTH(sizeof(struct option_vector3)),
839 /* option vector 3: processor options supported */
840 .vec3 = {
841 .byte1 = 0, /* don't ignore, don't halt */
842 .byte2 = OV3_FP | OV3_VMX | OV3_DFP,
843 },
844
845 .vec4_len = VECTOR_LENGTH(sizeof(struct option_vector4)),
846 /* option vector 4: IBM PAPR implementation */
847 .vec4 = {
848 .byte1 = 0, /* don't halt */
849 .min_vp_cap = OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
850 },
851
852 .vec5_len = VECTOR_LENGTH(sizeof(struct option_vector5)),
853 /* option vector 5: PAPR/OF options */
854 .vec5 = {
855 .byte1 = 0, /* don't ignore, don't halt */
856 .byte2 = OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
857 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
858#ifdef CONFIG_PCI_MSI
859 /* PCIe/MSI support. Without MSI full PCIe is not supported */
860 OV5_FEAT(OV5_MSI),
861#else
862 0,
863#endif
864 .byte3 = 0,
865 .cmo =
866#ifdef CONFIG_PPC_SMLPAR
867 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
868#else
869 0,
870#endif
871 .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
872 .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
873 .micro_checkpoint = 0,
874 .reserved0 = 0,
875 .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
876 .papr_level = 0,
877 .reserved1 = 0,
878 .platform_facilities = OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_842),
879 .reserved2 = 0,
880 .reserved3 = 0,
881 .subprocessors = 1,
882 .byte22 = OV5_FEAT(OV5_DRMEM_V2),
883 .intarch = 0,
884 .mmu = 0,
885 .hash_ext = 0,
886 .radix_ext = 0,
887 },
888
889 /* option vector 6: IBM PAPR hints */
890 .vec6_len = VECTOR_LENGTH(sizeof(struct option_vector6)),
891 .vec6 = {
892 .reserved = 0,
893 .secondary_pteg = 0,
894 .os_name = OV6_LINUX,
895 },
896};
897
898/* Old method - ELF header with PT_NOTE sections only works on BE */
899#ifdef __BIG_ENDIAN__
900static struct fake_elf {
901 Elf32_Ehdr elfhdr;
902 Elf32_Phdr phdr[2];
903 struct chrpnote {
904 u32 namesz;
905 u32 descsz;
906 u32 type;
907 char name[8]; /* "PowerPC" */
908 struct chrpdesc {
909 u32 real_mode;
910 u32 real_base;
911 u32 real_size;
912 u32 virt_base;
913 u32 virt_size;
914 u32 load_base;
915 } chrpdesc;
916 } chrpnote;
917 struct rpanote {
918 u32 namesz;
919 u32 descsz;
920 u32 type;
921 char name[24]; /* "IBM,RPA-Client-Config" */
922 struct rpadesc {
923 u32 lpar_affinity;
924 u32 min_rmo_size;
925 u32 min_rmo_percent;
926 u32 max_pft_size;
927 u32 splpar;
928 u32 min_load;
929 u32 new_mem_def;
930 u32 ignore_me;
931 } rpadesc;
932 } rpanote;
933} fake_elf = {
934 .elfhdr = {
935 .e_ident = { 0x7f, 'E', 'L', 'F',
936 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
937 .e_type = ET_EXEC, /* yeah right */
938 .e_machine = EM_PPC,
939 .e_version = EV_CURRENT,
940 .e_phoff = offsetof(struct fake_elf, phdr),
941 .e_phentsize = sizeof(Elf32_Phdr),
942 .e_phnum = 2
943 },
944 .phdr = {
945 [0] = {
946 .p_type = PT_NOTE,
947 .p_offset = offsetof(struct fake_elf, chrpnote),
948 .p_filesz = sizeof(struct chrpnote)
949 }, [1] = {
950 .p_type = PT_NOTE,
951 .p_offset = offsetof(struct fake_elf, rpanote),
952 .p_filesz = sizeof(struct rpanote)
953 }
954 },
955 .chrpnote = {
956 .namesz = sizeof("PowerPC"),
957 .descsz = sizeof(struct chrpdesc),
958 .type = 0x1275,
959 .name = "PowerPC",
960 .chrpdesc = {
961 .real_mode = ~0U, /* ~0 means "don't care" */
962 .real_base = ~0U,
963 .real_size = ~0U,
964 .virt_base = ~0U,
965 .virt_size = ~0U,
966 .load_base = ~0U
967 },
968 },
969 .rpanote = {
970 .namesz = sizeof("IBM,RPA-Client-Config"),
971 .descsz = sizeof(struct rpadesc),
972 .type = 0x12759999,
973 .name = "IBM,RPA-Client-Config",
974 .rpadesc = {
975 .lpar_affinity = 0,
976 .min_rmo_size = 64, /* in megabytes */
977 .min_rmo_percent = 0,
978 .max_pft_size = 48, /* 2^48 bytes max PFT size */
979 .splpar = 1,
980 .min_load = ~0U,
981 .new_mem_def = 0
982 }
983 }
984};
985#endif /* __BIG_ENDIAN__ */
986
987static int __init prom_count_smt_threads(void)
988{
989 phandle node;
990 char type[64];
991 unsigned int plen;
992
993 /* Pick up th first CPU node we can find */
994 for (node = 0; prom_next_node(&node); ) {
995 type[0] = 0;
996 prom_getprop(node, "device_type", type, sizeof(type));
997
998 if (strcmp(type, "cpu"))
999 continue;
1000 /*
1001 * There is an entry for each smt thread, each entry being
1002 * 4 bytes long. All cpus should have the same number of
1003 * smt threads, so return after finding the first.
1004 */
1005 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
1006 if (plen == PROM_ERROR)
1007 break;
1008 plen >>= 2;
1009 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
1010
1011 /* Sanity check */
1012 if (plen < 1 || plen > 64) {
1013 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
1014 (unsigned long)plen);
1015 return 1;
1016 }
1017 return plen;
1018 }
1019 prom_debug("No threads found, assuming 1 per core\n");
1020
1021 return 1;
1022
1023}
1024
1025static void __init prom_parse_mmu_model(u8 val,
1026 struct platform_support *support)
1027{
1028 switch (val) {
1029 case OV5_FEAT(OV5_MMU_DYNAMIC):
1030 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1031 prom_debug("MMU - either supported\n");
1032 support->radix_mmu = !prom_radix_disable;
1033 support->hash_mmu = true;
1034 break;
1035 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1036 prom_debug("MMU - radix only\n");
1037 if (prom_radix_disable) {
1038 /*
1039 * If we __have__ to do radix, we're better off ignoring
1040 * the command line rather than not booting.
1041 */
1042 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1043 }
1044 support->radix_mmu = true;
1045 break;
1046 case OV5_FEAT(OV5_MMU_HASH):
1047 prom_debug("MMU - hash only\n");
1048 support->hash_mmu = true;
1049 break;
1050 default:
1051 prom_debug("Unknown mmu support option: 0x%x\n", val);
1052 break;
1053 }
1054}
1055
1056static void __init prom_parse_xive_model(u8 val,
1057 struct platform_support *support)
1058{
1059 switch (val) {
1060 case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
1061 prom_debug("XIVE - either mode supported\n");
1062 support->xive = true;
1063 break;
1064 case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
1065 prom_debug("XIVE - exploitation mode supported\n");
1066 support->xive = true;
1067 break;
1068 case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
1069 prom_debug("XIVE - legacy mode supported\n");
1070 break;
1071 default:
1072 prom_debug("Unknown xive support option: 0x%x\n", val);
1073 break;
1074 }
1075}
1076
1077static void __init prom_parse_platform_support(u8 index, u8 val,
1078 struct platform_support *support)
1079{
1080 switch (index) {
1081 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1082 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1083 break;
1084 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1085 if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
1086 prom_debug("Radix - GTSE supported\n");
1087 support->radix_gtse = true;
1088 }
1089 break;
1090 case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
1091 prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
1092 support);
1093 break;
1094 }
1095}
1096
1097static void __init prom_check_platform_support(void)
1098{
1099 struct platform_support supported = {
1100 .hash_mmu = false,
1101 .radix_mmu = false,
1102 .radix_gtse = false,
1103 .xive = false
1104 };
1105 int prop_len = prom_getproplen(prom.chosen,
1106 "ibm,arch-vec-5-platform-support");
1107 if (prop_len > 1) {
1108 int i;
1109 u8 vec[prop_len];
1110 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1111 prop_len);
1112 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
1113 &vec, sizeof(vec));
1114 for (i = 0; i < prop_len; i += 2) {
1115 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
1116 , vec[i]
1117 , vec[i + 1]);
1118 prom_parse_platform_support(vec[i], vec[i + 1],
1119 &supported);
1120 }
1121 }
1122
1123 if (supported.radix_mmu && supported.radix_gtse &&
1124 IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
1125 /* Radix preferred - but we require GTSE for now */
1126 prom_debug("Asking for radix with GTSE\n");
1127 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1128 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
1129 } else if (supported.hash_mmu) {
1130 /* Default to hash mmu (if we can) */
1131 prom_debug("Asking for hash\n");
1132 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1133 } else {
1134 /* We're probably on a legacy hypervisor */
1135 prom_debug("Assuming legacy hash support\n");
1136 }
1137
1138 if (supported.xive) {
1139 prom_debug("Asking for XIVE\n");
1140 ibm_architecture_vec.vec5.intarch = OV5_FEAT(OV5_XIVE_EXPLOIT);
1141 }
1142}
1143
1144static void __init prom_send_capabilities(void)
1145{
1146 ihandle root;
1147 prom_arg_t ret;
1148 u32 cores;
1149
1150 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1151 prom_check_platform_support();
1152
1153 root = call_prom("open", 1, 1, ADDR("/"));
1154 if (root != 0) {
1155 /* We need to tell the FW about the number of cores we support.
1156 *
1157 * To do that, we count the number of threads on the first core
1158 * (we assume this is the same for all cores) and use it to
1159 * divide NR_CPUS.
1160 */
1161
1162 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
1163 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
1164 cores, NR_CPUS);
1165
1166 ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores);
1167
1168 /* try calling the ibm,client-architecture-support method */
1169 prom_printf("Calling ibm,client-architecture-support...");
1170 if (call_prom_ret("call-method", 3, 2, &ret,
1171 ADDR("ibm,client-architecture-support"),
1172 root,
1173 ADDR(&ibm_architecture_vec)) == 0) {
1174 /* the call exists... */
1175 if (ret)
1176 prom_printf("\nWARNING: ibm,client-architecture"
1177 "-support call FAILED!\n");
1178 call_prom("close", 1, 0, root);
1179 prom_printf(" done\n");
1180 return;
1181 }
1182 call_prom("close", 1, 0, root);
1183 prom_printf(" not implemented\n");
1184 }
1185
1186#ifdef __BIG_ENDIAN__
1187 {
1188 ihandle elfloader;
1189
1190 /* no ibm,client-architecture-support call, try the old way */
1191 elfloader = call_prom("open", 1, 1,
1192 ADDR("/packages/elf-loader"));
1193 if (elfloader == 0) {
1194 prom_printf("couldn't open /packages/elf-loader\n");
1195 return;
1196 }
1197 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
1198 elfloader, ADDR(&fake_elf));
1199 call_prom("close", 1, 0, elfloader);
1200 }
1201#endif /* __BIG_ENDIAN__ */
1202}
1203#endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1204
1205/*
1206 * Memory allocation strategy... our layout is normally:
1207 *
1208 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
1209 * rare cases, initrd might end up being before the kernel though.
1210 * We assume this won't override the final kernel at 0, we have no
1211 * provision to handle that in this version, but it should hopefully
1212 * never happen.
1213 *
1214 * alloc_top is set to the top of RMO, eventually shrink down if the
1215 * TCEs overlap
1216 *
1217 * alloc_bottom is set to the top of kernel/initrd
1218 *
1219 * from there, allocations are done this way : rtas is allocated
1220 * topmost, and the device-tree is allocated from the bottom. We try
1221 * to grow the device-tree allocation as we progress. If we can't,
1222 * then we fail, we don't currently have a facility to restart
1223 * elsewhere, but that shouldn't be necessary.
1224 *
1225 * Note that calls to reserve_mem have to be done explicitly, memory
1226 * allocated with either alloc_up or alloc_down isn't automatically
1227 * reserved.
1228 */
1229
1230
1231/*
1232 * Allocates memory in the RMO upward from the kernel/initrd
1233 *
1234 * When align is 0, this is a special case, it means to allocate in place
1235 * at the current location of alloc_bottom or fail (that is basically
1236 * extending the previous allocation). Used for the device-tree flattening
1237 */
1238static unsigned long __init alloc_up(unsigned long size, unsigned long align)
1239{
1240 unsigned long base = alloc_bottom;
1241 unsigned long addr = 0;
1242
1243 if (align)
1244 base = _ALIGN_UP(base, align);
1245 prom_debug("alloc_up(%x, %x)\n", size, align);
1246 if (ram_top == 0)
1247 prom_panic("alloc_up() called with mem not initialized\n");
1248
1249 if (align)
1250 base = _ALIGN_UP(alloc_bottom, align);
1251 else
1252 base = alloc_bottom;
1253
1254 for(; (base + size) <= alloc_top;
1255 base = _ALIGN_UP(base + 0x100000, align)) {
1256 prom_debug(" trying: 0x%x\n\r", base);
1257 addr = (unsigned long)prom_claim(base, size, 0);
1258 if (addr != PROM_ERROR && addr != 0)
1259 break;
1260 addr = 0;
1261 if (align == 0)
1262 break;
1263 }
1264 if (addr == 0)
1265 return 0;
1266 alloc_bottom = addr + size;
1267
1268 prom_debug(" -> %x\n", addr);
1269 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1270 prom_debug(" alloc_top : %x\n", alloc_top);
1271 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1272 prom_debug(" rmo_top : %x\n", rmo_top);
1273 prom_debug(" ram_top : %x\n", ram_top);
1274
1275 return addr;
1276}
1277
1278/*
1279 * Allocates memory downward, either from top of RMO, or if highmem
1280 * is set, from the top of RAM. Note that this one doesn't handle
1281 * failures. It does claim memory if highmem is not set.
1282 */
1283static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1284 int highmem)
1285{
1286 unsigned long base, addr = 0;
1287
1288 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
1289 highmem ? "(high)" : "(low)");
1290 if (ram_top == 0)
1291 prom_panic("alloc_down() called with mem not initialized\n");
1292
1293 if (highmem) {
1294 /* Carve out storage for the TCE table. */
1295 addr = _ALIGN_DOWN(alloc_top_high - size, align);
1296 if (addr <= alloc_bottom)
1297 return 0;
1298 /* Will we bump into the RMO ? If yes, check out that we
1299 * didn't overlap existing allocations there, if we did,
1300 * we are dead, we must be the first in town !
1301 */
1302 if (addr < rmo_top) {
1303 /* Good, we are first */
1304 if (alloc_top == rmo_top)
1305 alloc_top = rmo_top = addr;
1306 else
1307 return 0;
1308 }
1309 alloc_top_high = addr;
1310 goto bail;
1311 }
1312
1313 base = _ALIGN_DOWN(alloc_top - size, align);
1314 for (; base > alloc_bottom;
1315 base = _ALIGN_DOWN(base - 0x100000, align)) {
1316 prom_debug(" trying: 0x%x\n\r", base);
1317 addr = (unsigned long)prom_claim(base, size, 0);
1318 if (addr != PROM_ERROR && addr != 0)
1319 break;
1320 addr = 0;
1321 }
1322 if (addr == 0)
1323 return 0;
1324 alloc_top = addr;
1325
1326 bail:
1327 prom_debug(" -> %x\n", addr);
1328 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1329 prom_debug(" alloc_top : %x\n", alloc_top);
1330 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1331 prom_debug(" rmo_top : %x\n", rmo_top);
1332 prom_debug(" ram_top : %x\n", ram_top);
1333
1334 return addr;
1335}
1336
1337/*
1338 * Parse a "reg" cell
1339 */
1340static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1341{
1342 cell_t *p = *cellp;
1343 unsigned long r = 0;
1344
1345 /* Ignore more than 2 cells */
1346 while (s > sizeof(unsigned long) / 4) {
1347 p++;
1348 s--;
1349 }
1350 r = be32_to_cpu(*p++);
1351#ifdef CONFIG_PPC64
1352 if (s > 1) {
1353 r <<= 32;
1354 r |= be32_to_cpu(*(p++));
1355 }
1356#endif
1357 *cellp = p;
1358 return r;
1359}
1360
1361/*
1362 * Very dumb function for adding to the memory reserve list, but
1363 * we don't need anything smarter at this point
1364 *
1365 * XXX Eventually check for collisions. They should NEVER happen.
1366 * If problems seem to show up, it would be a good start to track
1367 * them down.
1368 */
1369static void __init reserve_mem(u64 base, u64 size)
1370{
1371 u64 top = base + size;
1372 unsigned long cnt = mem_reserve_cnt;
1373
1374 if (size == 0)
1375 return;
1376
1377 /* We need to always keep one empty entry so that we
1378 * have our terminator with "size" set to 0 since we are
1379 * dumb and just copy this entire array to the boot params
1380 */
1381 base = _ALIGN_DOWN(base, PAGE_SIZE);
1382 top = _ALIGN_UP(top, PAGE_SIZE);
1383 size = top - base;
1384
1385 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1386 prom_panic("Memory reserve map exhausted !\n");
1387 mem_reserve_map[cnt].base = cpu_to_be64(base);
1388 mem_reserve_map[cnt].size = cpu_to_be64(size);
1389 mem_reserve_cnt = cnt + 1;
1390}
1391
1392/*
1393 * Initialize memory allocation mechanism, parse "memory" nodes and
1394 * obtain that way the top of memory and RMO to setup out local allocator
1395 */
1396static void __init prom_init_mem(void)
1397{
1398 phandle node;
1399 char *path, type[64];
1400 unsigned int plen;
1401 cell_t *p, *endp;
1402 __be32 val;
1403 u32 rac, rsc;
1404
1405 /*
1406 * We iterate the memory nodes to find
1407 * 1) top of RMO (first node)
1408 * 2) top of memory
1409 */
1410 val = cpu_to_be32(2);
1411 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1412 rac = be32_to_cpu(val);
1413 val = cpu_to_be32(1);
1414 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1415 rsc = be32_to_cpu(val);
1416 prom_debug("root_addr_cells: %x\n", rac);
1417 prom_debug("root_size_cells: %x\n", rsc);
1418
1419 prom_debug("scanning memory:\n");
1420 path = prom_scratch;
1421
1422 for (node = 0; prom_next_node(&node); ) {
1423 type[0] = 0;
1424 prom_getprop(node, "device_type", type, sizeof(type));
1425
1426 if (type[0] == 0) {
1427 /*
1428 * CHRP Longtrail machines have no device_type
1429 * on the memory node, so check the name instead...
1430 */
1431 prom_getprop(node, "name", type, sizeof(type));
1432 }
1433 if (strcmp(type, "memory"))
1434 continue;
1435
1436 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1437 if (plen > sizeof(regbuf)) {
1438 prom_printf("memory node too large for buffer !\n");
1439 plen = sizeof(regbuf);
1440 }
1441 p = regbuf;
1442 endp = p + (plen / sizeof(cell_t));
1443
1444#ifdef DEBUG_PROM
1445 memset(path, 0, PROM_SCRATCH_SIZE);
1446 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1447 prom_debug(" node %s :\n", path);
1448#endif /* DEBUG_PROM */
1449
1450 while ((endp - p) >= (rac + rsc)) {
1451 unsigned long base, size;
1452
1453 base = prom_next_cell(rac, &p);
1454 size = prom_next_cell(rsc, &p);
1455
1456 if (size == 0)
1457 continue;
1458 prom_debug(" %x %x\n", base, size);
1459 if (base == 0 && (of_platform & PLATFORM_LPAR))
1460 rmo_top = size;
1461 if ((base + size) > ram_top)
1462 ram_top = base + size;
1463 }
1464 }
1465
1466 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1467
1468 /*
1469 * If prom_memory_limit is set we reduce the upper limits *except* for
1470 * alloc_top_high. This must be the real top of RAM so we can put
1471 * TCE's up there.
1472 */
1473
1474 alloc_top_high = ram_top;
1475
1476 if (prom_memory_limit) {
1477 if (prom_memory_limit <= alloc_bottom) {
1478 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1479 prom_memory_limit);
1480 prom_memory_limit = 0;
1481 } else if (prom_memory_limit >= ram_top) {
1482 prom_printf("Ignoring mem=%x >= ram_top.\n",
1483 prom_memory_limit);
1484 prom_memory_limit = 0;
1485 } else {
1486 ram_top = prom_memory_limit;
1487 rmo_top = min(rmo_top, prom_memory_limit);
1488 }
1489 }
1490
1491 /*
1492 * Setup our top alloc point, that is top of RMO or top of
1493 * segment 0 when running non-LPAR.
1494 * Some RS64 machines have buggy firmware where claims up at
1495 * 1GB fail. Cap at 768MB as a workaround.
1496 * Since 768MB is plenty of room, and we need to cap to something
1497 * reasonable on 32-bit, cap at 768MB on all machines.
1498 */
1499 if (!rmo_top)
1500 rmo_top = ram_top;
1501 rmo_top = min(0x30000000ul, rmo_top);
1502 alloc_top = rmo_top;
1503 alloc_top_high = ram_top;
1504
1505 /*
1506 * Check if we have an initrd after the kernel but still inside
1507 * the RMO. If we do move our bottom point to after it.
1508 */
1509 if (prom_initrd_start &&
1510 prom_initrd_start < rmo_top &&
1511 prom_initrd_end > alloc_bottom)
1512 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1513
1514 prom_printf("memory layout at init:\n");
1515 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1516 prom_printf(" alloc_bottom : %x\n", alloc_bottom);
1517 prom_printf(" alloc_top : %x\n", alloc_top);
1518 prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
1519 prom_printf(" rmo_top : %x\n", rmo_top);
1520 prom_printf(" ram_top : %x\n", ram_top);
1521}
1522
1523static void __init prom_close_stdin(void)
1524{
1525 __be32 val;
1526 ihandle stdin;
1527
1528 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1529 stdin = be32_to_cpu(val);
1530 call_prom("close", 1, 0, stdin);
1531 }
1532}
1533
1534#ifdef CONFIG_PPC_POWERNV
1535
1536#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1537static u64 __initdata prom_opal_base;
1538static u64 __initdata prom_opal_entry;
1539#endif
1540
1541/*
1542 * Allocate room for and instantiate OPAL
1543 */
1544static void __init prom_instantiate_opal(void)
1545{
1546 phandle opal_node;
1547 ihandle opal_inst;
1548 u64 base, entry;
1549 u64 size = 0, align = 0x10000;
1550 __be64 val64;
1551 u32 rets[2];
1552
1553 prom_debug("prom_instantiate_opal: start...\n");
1554
1555 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1556 prom_debug("opal_node: %x\n", opal_node);
1557 if (!PHANDLE_VALID(opal_node))
1558 return;
1559
1560 val64 = 0;
1561 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1562 size = be64_to_cpu(val64);
1563 if (size == 0)
1564 return;
1565 val64 = 0;
1566 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1567 align = be64_to_cpu(val64);
1568
1569 base = alloc_down(size, align, 0);
1570 if (base == 0) {
1571 prom_printf("OPAL allocation failed !\n");
1572 return;
1573 }
1574
1575 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1576 if (!IHANDLE_VALID(opal_inst)) {
1577 prom_printf("opening opal package failed (%x)\n", opal_inst);
1578 return;
1579 }
1580
1581 prom_printf("instantiating opal at 0x%x...", base);
1582
1583 if (call_prom_ret("call-method", 4, 3, rets,
1584 ADDR("load-opal-runtime"),
1585 opal_inst,
1586 base >> 32, base & 0xffffffff) != 0
1587 || (rets[0] == 0 && rets[1] == 0)) {
1588 prom_printf(" failed\n");
1589 return;
1590 }
1591 entry = (((u64)rets[0]) << 32) | rets[1];
1592
1593 prom_printf(" done\n");
1594
1595 reserve_mem(base, size);
1596
1597 prom_debug("opal base = 0x%x\n", base);
1598 prom_debug("opal align = 0x%x\n", align);
1599 prom_debug("opal entry = 0x%x\n", entry);
1600 prom_debug("opal size = 0x%x\n", (long)size);
1601
1602 prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1603 &base, sizeof(base));
1604 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1605 &entry, sizeof(entry));
1606
1607#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1608 prom_opal_base = base;
1609 prom_opal_entry = entry;
1610#endif
1611 prom_debug("prom_instantiate_opal: end...\n");
1612}
1613
1614#endif /* CONFIG_PPC_POWERNV */
1615
1616/*
1617 * Allocate room for and instantiate RTAS
1618 */
1619static void __init prom_instantiate_rtas(void)
1620{
1621 phandle rtas_node;
1622 ihandle rtas_inst;
1623 u32 base, entry = 0;
1624 __be32 val;
1625 u32 size = 0;
1626
1627 prom_debug("prom_instantiate_rtas: start...\n");
1628
1629 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1630 prom_debug("rtas_node: %x\n", rtas_node);
1631 if (!PHANDLE_VALID(rtas_node))
1632 return;
1633
1634 val = 0;
1635 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1636 size = be32_to_cpu(val);
1637 if (size == 0)
1638 return;
1639
1640 base = alloc_down(size, PAGE_SIZE, 0);
1641 if (base == 0)
1642 prom_panic("Could not allocate memory for RTAS\n");
1643
1644 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1645 if (!IHANDLE_VALID(rtas_inst)) {
1646 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1647 return;
1648 }
1649
1650 prom_printf("instantiating rtas at 0x%x...", base);
1651
1652 if (call_prom_ret("call-method", 3, 2, &entry,
1653 ADDR("instantiate-rtas"),
1654 rtas_inst, base) != 0
1655 || entry == 0) {
1656 prom_printf(" failed\n");
1657 return;
1658 }
1659 prom_printf(" done\n");
1660
1661 reserve_mem(base, size);
1662
1663 val = cpu_to_be32(base);
1664 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1665 &val, sizeof(val));
1666 val = cpu_to_be32(entry);
1667 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1668 &val, sizeof(val));
1669
1670 /* Check if it supports "query-cpu-stopped-state" */
1671 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1672 &val, sizeof(val)) != PROM_ERROR)
1673 rtas_has_query_cpu_stopped = true;
1674
1675 prom_debug("rtas base = 0x%x\n", base);
1676 prom_debug("rtas entry = 0x%x\n", entry);
1677 prom_debug("rtas size = 0x%x\n", (long)size);
1678
1679 prom_debug("prom_instantiate_rtas: end...\n");
1680}
1681
1682#ifdef CONFIG_PPC64
1683/*
1684 * Allocate room for and instantiate Stored Measurement Log (SML)
1685 */
1686static void __init prom_instantiate_sml(void)
1687{
1688 phandle ibmvtpm_node;
1689 ihandle ibmvtpm_inst;
1690 u32 entry = 0, size = 0, succ = 0;
1691 u64 base;
1692 __be32 val;
1693
1694 prom_debug("prom_instantiate_sml: start...\n");
1695
1696 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
1697 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1698 if (!PHANDLE_VALID(ibmvtpm_node))
1699 return;
1700
1701 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
1702 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1703 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1704 return;
1705 }
1706
1707 if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
1708 &val, sizeof(val)) != PROM_ERROR) {
1709 if (call_prom_ret("call-method", 2, 2, &succ,
1710 ADDR("reformat-sml-to-efi-alignment"),
1711 ibmvtpm_inst) != 0 || succ == 0) {
1712 prom_printf("Reformat SML to EFI alignment failed\n");
1713 return;
1714 }
1715
1716 if (call_prom_ret("call-method", 2, 2, &size,
1717 ADDR("sml-get-allocated-size"),
1718 ibmvtpm_inst) != 0 || size == 0) {
1719 prom_printf("SML get allocated size failed\n");
1720 return;
1721 }
1722 } else {
1723 if (call_prom_ret("call-method", 2, 2, &size,
1724 ADDR("sml-get-handover-size"),
1725 ibmvtpm_inst) != 0 || size == 0) {
1726 prom_printf("SML get handover size failed\n");
1727 return;
1728 }
1729 }
1730
1731 base = alloc_down(size, PAGE_SIZE, 0);
1732 if (base == 0)
1733 prom_panic("Could not allocate memory for sml\n");
1734
1735 prom_printf("instantiating sml at 0x%x...", base);
1736
1737 memset((void *)base, 0, size);
1738
1739 if (call_prom_ret("call-method", 4, 2, &entry,
1740 ADDR("sml-handover"),
1741 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1742 prom_printf("SML handover failed\n");
1743 return;
1744 }
1745 prom_printf(" done\n");
1746
1747 reserve_mem(base, size);
1748
1749 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
1750 &base, sizeof(base));
1751 prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
1752 &size, sizeof(size));
1753
1754 prom_debug("sml base = 0x%x\n", base);
1755 prom_debug("sml size = 0x%x\n", (long)size);
1756
1757 prom_debug("prom_instantiate_sml: end...\n");
1758}
1759
1760/*
1761 * Allocate room for and initialize TCE tables
1762 */
1763#ifdef __BIG_ENDIAN__
1764static void __init prom_initialize_tce_table(void)
1765{
1766 phandle node;
1767 ihandle phb_node;
1768 char compatible[64], type[64], model[64];
1769 char *path = prom_scratch;
1770 u64 base, align;
1771 u32 minalign, minsize;
1772 u64 tce_entry, *tce_entryp;
1773 u64 local_alloc_top, local_alloc_bottom;
1774 u64 i;
1775
1776 if (prom_iommu_off)
1777 return;
1778
1779 prom_debug("starting prom_initialize_tce_table\n");
1780
1781 /* Cache current top of allocs so we reserve a single block */
1782 local_alloc_top = alloc_top_high;
1783 local_alloc_bottom = local_alloc_top;
1784
1785 /* Search all nodes looking for PHBs. */
1786 for (node = 0; prom_next_node(&node); ) {
1787 compatible[0] = 0;
1788 type[0] = 0;
1789 model[0] = 0;
1790 prom_getprop(node, "compatible",
1791 compatible, sizeof(compatible));
1792 prom_getprop(node, "device_type", type, sizeof(type));
1793 prom_getprop(node, "model", model, sizeof(model));
1794
1795 if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1796 continue;
1797
1798 /* Keep the old logic intact to avoid regression. */
1799 if (compatible[0] != 0) {
1800 if ((strstr(compatible, "python") == NULL) &&
1801 (strstr(compatible, "Speedwagon") == NULL) &&
1802 (strstr(compatible, "Winnipeg") == NULL))
1803 continue;
1804 } else if (model[0] != 0) {
1805 if ((strstr(model, "ython") == NULL) &&
1806 (strstr(model, "peedwagon") == NULL) &&
1807 (strstr(model, "innipeg") == NULL))
1808 continue;
1809 }
1810
1811 if (prom_getprop(node, "tce-table-minalign", &minalign,
1812 sizeof(minalign)) == PROM_ERROR)
1813 minalign = 0;
1814 if (prom_getprop(node, "tce-table-minsize", &minsize,
1815 sizeof(minsize)) == PROM_ERROR)
1816 minsize = 4UL << 20;
1817
1818 /*
1819 * Even though we read what OF wants, we just set the table
1820 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1821 * By doing this, we avoid the pitfalls of trying to DMA to
1822 * MMIO space and the DMA alias hole.
1823 */
1824 minsize = 4UL << 20;
1825
1826 /* Align to the greater of the align or size */
1827 align = max(minalign, minsize);
1828 base = alloc_down(minsize, align, 1);
1829 if (base == 0)
1830 prom_panic("ERROR, cannot find space for TCE table.\n");
1831 if (base < local_alloc_bottom)
1832 local_alloc_bottom = base;
1833
1834 /* It seems OF doesn't null-terminate the path :-( */
1835 memset(path, 0, PROM_SCRATCH_SIZE);
1836 /* Call OF to setup the TCE hardware */
1837 if (call_prom("package-to-path", 3, 1, node,
1838 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1839 prom_printf("package-to-path failed\n");
1840 }
1841
1842 /* Save away the TCE table attributes for later use. */
1843 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1844 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1845
1846 prom_debug("TCE table: %s\n", path);
1847 prom_debug("\tnode = 0x%x\n", node);
1848 prom_debug("\tbase = 0x%x\n", base);
1849 prom_debug("\tsize = 0x%x\n", minsize);
1850
1851 /* Initialize the table to have a one-to-one mapping
1852 * over the allocated size.
1853 */
1854 tce_entryp = (u64 *)base;
1855 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1856 tce_entry = (i << PAGE_SHIFT);
1857 tce_entry |= 0x3;
1858 *tce_entryp = tce_entry;
1859 }
1860
1861 prom_printf("opening PHB %s", path);
1862 phb_node = call_prom("open", 1, 1, path);
1863 if (phb_node == 0)
1864 prom_printf("... failed\n");
1865 else
1866 prom_printf("... done\n");
1867
1868 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1869 phb_node, -1, minsize,
1870 (u32) base, (u32) (base >> 32));
1871 call_prom("close", 1, 0, phb_node);
1872 }
1873
1874 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1875
1876 /* These are only really needed if there is a memory limit in
1877 * effect, but we don't know so export them always. */
1878 prom_tce_alloc_start = local_alloc_bottom;
1879 prom_tce_alloc_end = local_alloc_top;
1880
1881 /* Flag the first invalid entry */
1882 prom_debug("ending prom_initialize_tce_table\n");
1883}
1884#endif /* __BIG_ENDIAN__ */
1885#endif /* CONFIG_PPC64 */
1886
1887/*
1888 * With CHRP SMP we need to use the OF to start the other processors.
1889 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1890 * so we have to put the processors into a holding pattern controlled
1891 * by the kernel (not OF) before we destroy the OF.
1892 *
1893 * This uses a chunk of low memory, puts some holding pattern
1894 * code there and sends the other processors off to there until
1895 * smp_boot_cpus tells them to do something. The holding pattern
1896 * checks that address until its cpu # is there, when it is that
1897 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1898 * of setting those values.
1899 *
1900 * We also use physical address 0x4 here to tell when a cpu
1901 * is in its holding pattern code.
1902 *
1903 * -- Cort
1904 */
1905/*
1906 * We want to reference the copy of __secondary_hold_* in the
1907 * 0 - 0x100 address range
1908 */
1909#define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1910
1911static void __init prom_hold_cpus(void)
1912{
1913 unsigned long i;
1914 phandle node;
1915 char type[64];
1916 unsigned long *spinloop
1917 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1918 unsigned long *acknowledge
1919 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1920 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1921
1922 /*
1923 * On pseries, if RTAS supports "query-cpu-stopped-state",
1924 * we skip this stage, the CPUs will be started by the
1925 * kernel using RTAS.
1926 */
1927 if ((of_platform == PLATFORM_PSERIES ||
1928 of_platform == PLATFORM_PSERIES_LPAR) &&
1929 rtas_has_query_cpu_stopped) {
1930 prom_printf("prom_hold_cpus: skipped\n");
1931 return;
1932 }
1933
1934 prom_debug("prom_hold_cpus: start...\n");
1935 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1936 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1937 prom_debug(" 1) acknowledge = 0x%x\n",
1938 (unsigned long)acknowledge);
1939 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1940 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1941
1942 /* Set the common spinloop variable, so all of the secondary cpus
1943 * will block when they are awakened from their OF spinloop.
1944 * This must occur for both SMP and non SMP kernels, since OF will
1945 * be trashed when we move the kernel.
1946 */
1947 *spinloop = 0;
1948
1949 /* look for cpus */
1950 for (node = 0; prom_next_node(&node); ) {
1951 unsigned int cpu_no;
1952 __be32 reg;
1953
1954 type[0] = 0;
1955 prom_getprop(node, "device_type", type, sizeof(type));
1956 if (strcmp(type, "cpu") != 0)
1957 continue;
1958
1959 /* Skip non-configured cpus. */
1960 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1961 if (strcmp(type, "okay") != 0)
1962 continue;
1963
1964 reg = cpu_to_be32(-1); /* make sparse happy */
1965 prom_getprop(node, "reg", ®, sizeof(reg));
1966 cpu_no = be32_to_cpu(reg);
1967
1968 prom_debug("cpu hw idx = %lu\n", cpu_no);
1969
1970 /* Init the acknowledge var which will be reset by
1971 * the secondary cpu when it awakens from its OF
1972 * spinloop.
1973 */
1974 *acknowledge = (unsigned long)-1;
1975
1976 if (cpu_no != prom.cpu) {
1977 /* Primary Thread of non-boot cpu or any thread */
1978 prom_printf("starting cpu hw idx %lu... ", cpu_no);
1979 call_prom("start-cpu", 3, 0, node,
1980 secondary_hold, cpu_no);
1981
1982 for (i = 0; (i < 100000000) &&
1983 (*acknowledge == ((unsigned long)-1)); i++ )
1984 mb();
1985
1986 if (*acknowledge == cpu_no)
1987 prom_printf("done\n");
1988 else
1989 prom_printf("failed: %x\n", *acknowledge);
1990 }
1991#ifdef CONFIG_SMP
1992 else
1993 prom_printf("boot cpu hw idx %lu\n", cpu_no);
1994#endif /* CONFIG_SMP */
1995 }
1996
1997 prom_debug("prom_hold_cpus: end...\n");
1998}
1999
2000
2001static void __init prom_init_client_services(unsigned long pp)
2002{
2003 /* Get a handle to the prom entry point before anything else */
2004 prom_entry = pp;
2005
2006 /* get a handle for the stdout device */
2007 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
2008 if (!PHANDLE_VALID(prom.chosen))
2009 prom_panic("cannot find chosen"); /* msg won't be printed :( */
2010
2011 /* get device tree root */
2012 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
2013 if (!PHANDLE_VALID(prom.root))
2014 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
2015
2016 prom.mmumap = 0;
2017}
2018
2019#ifdef CONFIG_PPC32
2020/*
2021 * For really old powermacs, we need to map things we claim.
2022 * For that, we need the ihandle of the mmu.
2023 * Also, on the longtrail, we need to work around other bugs.
2024 */
2025static void __init prom_find_mmu(void)
2026{
2027 phandle oprom;
2028 char version[64];
2029
2030 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
2031 if (!PHANDLE_VALID(oprom))
2032 return;
2033 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
2034 return;
2035 version[sizeof(version) - 1] = 0;
2036 /* XXX might need to add other versions here */
2037 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
2038 of_workarounds = OF_WA_CLAIM;
2039 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
2040 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
2041 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
2042 } else
2043 return;
2044 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
2045 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
2046 sizeof(prom.mmumap));
2047 prom.mmumap = be32_to_cpu(prom.mmumap);
2048 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
2049 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
2050}
2051#else
2052#define prom_find_mmu()
2053#endif
2054
2055static void __init prom_init_stdout(void)
2056{
2057 char *path = of_stdout_device;
2058 char type[16];
2059 phandle stdout_node;
2060 __be32 val;
2061
2062 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
2063 prom_panic("cannot find stdout");
2064
2065 prom.stdout = be32_to_cpu(val);
2066
2067 /* Get the full OF pathname of the stdout device */
2068 memset(path, 0, 256);
2069 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
2070 prom_printf("OF stdout device is: %s\n", of_stdout_device);
2071 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
2072 path, strlen(path) + 1);
2073
2074 /* instance-to-package fails on PA-Semi */
2075 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
2076 if (stdout_node != PROM_ERROR) {
2077 val = cpu_to_be32(stdout_node);
2078 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
2079 &val, sizeof(val));
2080
2081 /* If it's a display, note it */
2082 memset(type, 0, sizeof(type));
2083 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2084 if (strcmp(type, "display") == 0)
2085 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2086 }
2087}
2088
2089static int __init prom_find_machine_type(void)
2090{
2091 char compat[256];
2092 int len, i = 0;
2093#ifdef CONFIG_PPC64
2094 phandle rtas;
2095 int x;
2096#endif
2097
2098 /* Look for a PowerMac or a Cell */
2099 len = prom_getprop(prom.root, "compatible",
2100 compat, sizeof(compat)-1);
2101 if (len > 0) {
2102 compat[len] = 0;
2103 while (i < len) {
2104 char *p = &compat[i];
2105 int sl = strlen(p);
2106 if (sl == 0)
2107 break;
2108 if (strstr(p, "Power Macintosh") ||
2109 strstr(p, "MacRISC"))
2110 return PLATFORM_POWERMAC;
2111#ifdef CONFIG_PPC64
2112 /* We must make sure we don't detect the IBM Cell
2113 * blades as pSeries due to some firmware issues,
2114 * so we do it here.
2115 */
2116 if (strstr(p, "IBM,CBEA") ||
2117 strstr(p, "IBM,CPBW-1.0"))
2118 return PLATFORM_GENERIC;
2119#endif /* CONFIG_PPC64 */
2120 i += sl + 1;
2121 }
2122 }
2123#ifdef CONFIG_PPC64
2124 /* Try to detect OPAL */
2125 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2126 return PLATFORM_OPAL;
2127
2128 /* Try to figure out if it's an IBM pSeries or any other
2129 * PAPR compliant platform. We assume it is if :
2130 * - /device_type is "chrp" (please, do NOT use that for future
2131 * non-IBM designs !
2132 * - it has /rtas
2133 */
2134 len = prom_getprop(prom.root, "device_type",
2135 compat, sizeof(compat)-1);
2136 if (len <= 0)
2137 return PLATFORM_GENERIC;
2138 if (strcmp(compat, "chrp"))
2139 return PLATFORM_GENERIC;
2140
2141 /* Default to pSeries. We need to know if we are running LPAR */
2142 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2143 if (!PHANDLE_VALID(rtas))
2144 return PLATFORM_GENERIC;
2145 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2146 if (x != PROM_ERROR) {
2147 prom_debug("Hypertas detected, assuming LPAR !\n");
2148 return PLATFORM_PSERIES_LPAR;
2149 }
2150 return PLATFORM_PSERIES;
2151#else
2152 return PLATFORM_GENERIC;
2153#endif
2154}
2155
2156static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2157{
2158 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2159}
2160
2161/*
2162 * If we have a display that we don't know how to drive,
2163 * we will want to try to execute OF's open method for it
2164 * later. However, OF will probably fall over if we do that
2165 * we've taken over the MMU.
2166 * So we check whether we will need to open the display,
2167 * and if so, open it now.
2168 */
2169static void __init prom_check_displays(void)
2170{
2171 char type[16], *path;
2172 phandle node;
2173 ihandle ih;
2174 int i;
2175
2176 static unsigned char default_colors[] = {
2177 0x00, 0x00, 0x00,
2178 0x00, 0x00, 0xaa,
2179 0x00, 0xaa, 0x00,
2180 0x00, 0xaa, 0xaa,
2181 0xaa, 0x00, 0x00,
2182 0xaa, 0x00, 0xaa,
2183 0xaa, 0xaa, 0x00,
2184 0xaa, 0xaa, 0xaa,
2185 0x55, 0x55, 0x55,
2186 0x55, 0x55, 0xff,
2187 0x55, 0xff, 0x55,
2188 0x55, 0xff, 0xff,
2189 0xff, 0x55, 0x55,
2190 0xff, 0x55, 0xff,
2191 0xff, 0xff, 0x55,
2192 0xff, 0xff, 0xff
2193 };
2194 const unsigned char *clut;
2195
2196 prom_debug("Looking for displays\n");
2197 for (node = 0; prom_next_node(&node); ) {
2198 memset(type, 0, sizeof(type));
2199 prom_getprop(node, "device_type", type, sizeof(type));
2200 if (strcmp(type, "display") != 0)
2201 continue;
2202
2203 /* It seems OF doesn't null-terminate the path :-( */
2204 path = prom_scratch;
2205 memset(path, 0, PROM_SCRATCH_SIZE);
2206
2207 /*
2208 * leave some room at the end of the path for appending extra
2209 * arguments
2210 */
2211 if (call_prom("package-to-path", 3, 1, node, path,
2212 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2213 continue;
2214 prom_printf("found display : %s, opening... ", path);
2215
2216 ih = call_prom("open", 1, 1, path);
2217 if (ih == 0) {
2218 prom_printf("failed\n");
2219 continue;
2220 }
2221
2222 /* Success */
2223 prom_printf("done\n");
2224 prom_setprop(node, path, "linux,opened", NULL, 0);
2225
2226 /* Setup a usable color table when the appropriate
2227 * method is available. Should update this to set-colors */
2228 clut = default_colors;
2229 for (i = 0; i < 16; i++, clut += 3)
2230 if (prom_set_color(ih, i, clut[0], clut[1],
2231 clut[2]) != 0)
2232 break;
2233
2234#ifdef CONFIG_LOGO_LINUX_CLUT224
2235 clut = PTRRELOC(logo_linux_clut224.clut);
2236 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2237 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2238 clut[2]) != 0)
2239 break;
2240#endif /* CONFIG_LOGO_LINUX_CLUT224 */
2241
2242#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2243 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2244 PROM_ERROR) {
2245 u32 width, height, pitch, addr;
2246
2247 prom_printf("Setting btext !\n");
2248 prom_getprop(node, "width", &width, 4);
2249 prom_getprop(node, "height", &height, 4);
2250 prom_getprop(node, "linebytes", &pitch, 4);
2251 prom_getprop(node, "address", &addr, 4);
2252 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2253 width, height, pitch, addr);
2254 btext_setup_display(width, height, 8, pitch, addr);
2255 }
2256#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2257 }
2258}
2259
2260
2261/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2262static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2263 unsigned long needed, unsigned long align)
2264{
2265 void *ret;
2266
2267 *mem_start = _ALIGN(*mem_start, align);
2268 while ((*mem_start + needed) > *mem_end) {
2269 unsigned long room, chunk;
2270
2271 prom_debug("Chunk exhausted, claiming more at %x...\n",
2272 alloc_bottom);
2273 room = alloc_top - alloc_bottom;
2274 if (room > DEVTREE_CHUNK_SIZE)
2275 room = DEVTREE_CHUNK_SIZE;
2276 if (room < PAGE_SIZE)
2277 prom_panic("No memory for flatten_device_tree "
2278 "(no room)\n");
2279 chunk = alloc_up(room, 0);
2280 if (chunk == 0)
2281 prom_panic("No memory for flatten_device_tree "
2282 "(claim failed)\n");
2283 *mem_end = chunk + room;
2284 }
2285
2286 ret = (void *)*mem_start;
2287 *mem_start += needed;
2288
2289 return ret;
2290}
2291
2292#define dt_push_token(token, mem_start, mem_end) do { \
2293 void *room = make_room(mem_start, mem_end, 4, 4); \
2294 *(__be32 *)room = cpu_to_be32(token); \
2295 } while(0)
2296
2297static unsigned long __init dt_find_string(char *str)
2298{
2299 char *s, *os;
2300
2301 s = os = (char *)dt_string_start;
2302 s += 4;
2303 while (s < (char *)dt_string_end) {
2304 if (strcmp(s, str) == 0)
2305 return s - os;
2306 s += strlen(s) + 1;
2307 }
2308 return 0;
2309}
2310
2311/*
2312 * The Open Firmware 1275 specification states properties must be 31 bytes or
2313 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2314 */
2315#define MAX_PROPERTY_NAME 64
2316
2317static void __init scan_dt_build_strings(phandle node,
2318 unsigned long *mem_start,
2319 unsigned long *mem_end)
2320{
2321 char *prev_name, *namep, *sstart;
2322 unsigned long soff;
2323 phandle child;
2324
2325 sstart = (char *)dt_string_start;
2326
2327 /* get and store all property names */
2328 prev_name = "";
2329 for (;;) {
2330 /* 64 is max len of name including nul. */
2331 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2332 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2333 /* No more nodes: unwind alloc */
2334 *mem_start = (unsigned long)namep;
2335 break;
2336 }
2337
2338 /* skip "name" */
2339 if (strcmp(namep, "name") == 0) {
2340 *mem_start = (unsigned long)namep;
2341 prev_name = "name";
2342 continue;
2343 }
2344 /* get/create string entry */
2345 soff = dt_find_string(namep);
2346 if (soff != 0) {
2347 *mem_start = (unsigned long)namep;
2348 namep = sstart + soff;
2349 } else {
2350 /* Trim off some if we can */
2351 *mem_start = (unsigned long)namep + strlen(namep) + 1;
2352 dt_string_end = *mem_start;
2353 }
2354 prev_name = namep;
2355 }
2356
2357 /* do all our children */
2358 child = call_prom("child", 1, 1, node);
2359 while (child != 0) {
2360 scan_dt_build_strings(child, mem_start, mem_end);
2361 child = call_prom("peer", 1, 1, child);
2362 }
2363}
2364
2365static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2366 unsigned long *mem_end)
2367{
2368 phandle child;
2369 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2370 unsigned long soff;
2371 unsigned char *valp;
2372 static char pname[MAX_PROPERTY_NAME];
2373 int l, room, has_phandle = 0;
2374
2375 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2376
2377 /* get the node's full name */
2378 namep = (char *)*mem_start;
2379 room = *mem_end - *mem_start;
2380 if (room > 255)
2381 room = 255;
2382 l = call_prom("package-to-path", 3, 1, node, namep, room);
2383 if (l >= 0) {
2384 /* Didn't fit? Get more room. */
2385 if (l >= room) {
2386 if (l >= *mem_end - *mem_start)
2387 namep = make_room(mem_start, mem_end, l+1, 1);
2388 call_prom("package-to-path", 3, 1, node, namep, l);
2389 }
2390 namep[l] = '\0';
2391
2392 /* Fixup an Apple bug where they have bogus \0 chars in the
2393 * middle of the path in some properties, and extract
2394 * the unit name (everything after the last '/').
2395 */
2396 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2397 if (*p == '/')
2398 lp = namep;
2399 else if (*p != 0)
2400 *lp++ = *p;
2401 }
2402 *lp = 0;
2403 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
2404 }
2405
2406 /* get it again for debugging */
2407 path = prom_scratch;
2408 memset(path, 0, PROM_SCRATCH_SIZE);
2409 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2410
2411 /* get and store all properties */
2412 prev_name = "";
2413 sstart = (char *)dt_string_start;
2414 for (;;) {
2415 if (call_prom("nextprop", 3, 1, node, prev_name,
2416 pname) != 1)
2417 break;
2418
2419 /* skip "name" */
2420 if (strcmp(pname, "name") == 0) {
2421 prev_name = "name";
2422 continue;
2423 }
2424
2425 /* find string offset */
2426 soff = dt_find_string(pname);
2427 if (soff == 0) {
2428 prom_printf("WARNING: Can't find string index for"
2429 " <%s>, node %s\n", pname, path);
2430 break;
2431 }
2432 prev_name = sstart + soff;
2433
2434 /* get length */
2435 l = call_prom("getproplen", 2, 1, node, pname);
2436
2437 /* sanity checks */
2438 if (l == PROM_ERROR)
2439 continue;
2440
2441 /* push property head */
2442 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2443 dt_push_token(l, mem_start, mem_end);
2444 dt_push_token(soff, mem_start, mem_end);
2445
2446 /* push property content */
2447 valp = make_room(mem_start, mem_end, l, 4);
2448 call_prom("getprop", 4, 1, node, pname, valp, l);
2449 *mem_start = _ALIGN(*mem_start, 4);
2450
2451 if (!strcmp(pname, "phandle"))
2452 has_phandle = 1;
2453 }
2454
2455 /* Add a "linux,phandle" property if no "phandle" property already
2456 * existed (can happen with OPAL)
2457 */
2458 if (!has_phandle) {
2459 soff = dt_find_string("linux,phandle");
2460 if (soff == 0)
2461 prom_printf("WARNING: Can't find string index for"
2462 " <linux-phandle> node %s\n", path);
2463 else {
2464 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2465 dt_push_token(4, mem_start, mem_end);
2466 dt_push_token(soff, mem_start, mem_end);
2467 valp = make_room(mem_start, mem_end, 4, 4);
2468 *(__be32 *)valp = cpu_to_be32(node);
2469 }
2470 }
2471
2472 /* do all our children */
2473 child = call_prom("child", 1, 1, node);
2474 while (child != 0) {
2475 scan_dt_build_struct(child, mem_start, mem_end);
2476 child = call_prom("peer", 1, 1, child);
2477 }
2478
2479 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2480}
2481
2482static void __init flatten_device_tree(void)
2483{
2484 phandle root;
2485 unsigned long mem_start, mem_end, room;
2486 struct boot_param_header *hdr;
2487 char *namep;
2488 u64 *rsvmap;
2489
2490 /*
2491 * Check how much room we have between alloc top & bottom (+/- a
2492 * few pages), crop to 1MB, as this is our "chunk" size
2493 */
2494 room = alloc_top - alloc_bottom - 0x4000;
2495 if (room > DEVTREE_CHUNK_SIZE)
2496 room = DEVTREE_CHUNK_SIZE;
2497 prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2498
2499 /* Now try to claim that */
2500 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2501 if (mem_start == 0)
2502 prom_panic("Can't allocate initial device-tree chunk\n");
2503 mem_end = mem_start + room;
2504
2505 /* Get root of tree */
2506 root = call_prom("peer", 1, 1, (phandle)0);
2507 if (root == (phandle)0)
2508 prom_panic ("couldn't get device tree root\n");
2509
2510 /* Build header and make room for mem rsv map */
2511 mem_start = _ALIGN(mem_start, 4);
2512 hdr = make_room(&mem_start, &mem_end,
2513 sizeof(struct boot_param_header), 4);
2514 dt_header_start = (unsigned long)hdr;
2515 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2516
2517 /* Start of strings */
2518 mem_start = PAGE_ALIGN(mem_start);
2519 dt_string_start = mem_start;
2520 mem_start += 4; /* hole */
2521
2522 /* Add "linux,phandle" in there, we'll need it */
2523 namep = make_room(&mem_start, &mem_end, 16, 1);
2524 strcpy(namep, "linux,phandle");
2525 mem_start = (unsigned long)namep + strlen(namep) + 1;
2526
2527 /* Build string array */
2528 prom_printf("Building dt strings...\n");
2529 scan_dt_build_strings(root, &mem_start, &mem_end);
2530 dt_string_end = mem_start;
2531
2532 /* Build structure */
2533 mem_start = PAGE_ALIGN(mem_start);
2534 dt_struct_start = mem_start;
2535 prom_printf("Building dt structure...\n");
2536 scan_dt_build_struct(root, &mem_start, &mem_end);
2537 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2538 dt_struct_end = PAGE_ALIGN(mem_start);
2539
2540 /* Finish header */
2541 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2542 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2543 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2544 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2545 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2546 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2547 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2548 hdr->version = cpu_to_be32(OF_DT_VERSION);
2549 /* Version 16 is not backward compatible */
2550 hdr->last_comp_version = cpu_to_be32(0x10);
2551
2552 /* Copy the reserve map in */
2553 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2554
2555#ifdef DEBUG_PROM
2556 {
2557 int i;
2558 prom_printf("reserved memory map:\n");
2559 for (i = 0; i < mem_reserve_cnt; i++)
2560 prom_printf(" %x - %x\n",
2561 be64_to_cpu(mem_reserve_map[i].base),
2562 be64_to_cpu(mem_reserve_map[i].size));
2563 }
2564#endif
2565 /* Bump mem_reserve_cnt to cause further reservations to fail
2566 * since it's too late.
2567 */
2568 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2569
2570 prom_printf("Device tree strings 0x%x -> 0x%x\n",
2571 dt_string_start, dt_string_end);
2572 prom_printf("Device tree struct 0x%x -> 0x%x\n",
2573 dt_struct_start, dt_struct_end);
2574}
2575
2576#ifdef CONFIG_PPC_MAPLE
2577/* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2578 * The values are bad, and it doesn't even have the right number of cells. */
2579static void __init fixup_device_tree_maple(void)
2580{
2581 phandle isa;
2582 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2583 u32 isa_ranges[6];
2584 char *name;
2585
2586 name = "/ht@0/isa@4";
2587 isa = call_prom("finddevice", 1, 1, ADDR(name));
2588 if (!PHANDLE_VALID(isa)) {
2589 name = "/ht@0/isa@6";
2590 isa = call_prom("finddevice", 1, 1, ADDR(name));
2591 rloc = 0x01003000; /* IO space; PCI device = 6 */
2592 }
2593 if (!PHANDLE_VALID(isa))
2594 return;
2595
2596 if (prom_getproplen(isa, "ranges") != 12)
2597 return;
2598 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2599 == PROM_ERROR)
2600 return;
2601
2602 if (isa_ranges[0] != 0x1 ||
2603 isa_ranges[1] != 0xf4000000 ||
2604 isa_ranges[2] != 0x00010000)
2605 return;
2606
2607 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2608
2609 isa_ranges[0] = 0x1;
2610 isa_ranges[1] = 0x0;
2611 isa_ranges[2] = rloc;
2612 isa_ranges[3] = 0x0;
2613 isa_ranges[4] = 0x0;
2614 isa_ranges[5] = 0x00010000;
2615 prom_setprop(isa, name, "ranges",
2616 isa_ranges, sizeof(isa_ranges));
2617}
2618
2619#define CPC925_MC_START 0xf8000000
2620#define CPC925_MC_LENGTH 0x1000000
2621/* The values for memory-controller don't have right number of cells */
2622static void __init fixup_device_tree_maple_memory_controller(void)
2623{
2624 phandle mc;
2625 u32 mc_reg[4];
2626 char *name = "/hostbridge@f8000000";
2627 u32 ac, sc;
2628
2629 mc = call_prom("finddevice", 1, 1, ADDR(name));
2630 if (!PHANDLE_VALID(mc))
2631 return;
2632
2633 if (prom_getproplen(mc, "reg") != 8)
2634 return;
2635
2636 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2637 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2638 if ((ac != 2) || (sc != 2))
2639 return;
2640
2641 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2642 return;
2643
2644 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2645 return;
2646
2647 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2648
2649 mc_reg[0] = 0x0;
2650 mc_reg[1] = CPC925_MC_START;
2651 mc_reg[2] = 0x0;
2652 mc_reg[3] = CPC925_MC_LENGTH;
2653 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2654}
2655#else
2656#define fixup_device_tree_maple()
2657#define fixup_device_tree_maple_memory_controller()
2658#endif
2659
2660#ifdef CONFIG_PPC_CHRP
2661/*
2662 * Pegasos and BriQ lacks the "ranges" property in the isa node
2663 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2664 * Pegasos has the IDE configured in legacy mode, but advertised as native
2665 */
2666static void __init fixup_device_tree_chrp(void)
2667{
2668 phandle ph;
2669 u32 prop[6];
2670 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2671 char *name;
2672 int rc;
2673
2674 name = "/pci@80000000/isa@c";
2675 ph = call_prom("finddevice", 1, 1, ADDR(name));
2676 if (!PHANDLE_VALID(ph)) {
2677 name = "/pci@ff500000/isa@6";
2678 ph = call_prom("finddevice", 1, 1, ADDR(name));
2679 rloc = 0x01003000; /* IO space; PCI device = 6 */
2680 }
2681 if (PHANDLE_VALID(ph)) {
2682 rc = prom_getproplen(ph, "ranges");
2683 if (rc == 0 || rc == PROM_ERROR) {
2684 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2685
2686 prop[0] = 0x1;
2687 prop[1] = 0x0;
2688 prop[2] = rloc;
2689 prop[3] = 0x0;
2690 prop[4] = 0x0;
2691 prop[5] = 0x00010000;
2692 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2693 }
2694 }
2695
2696 name = "/pci@80000000/ide@C,1";
2697 ph = call_prom("finddevice", 1, 1, ADDR(name));
2698 if (PHANDLE_VALID(ph)) {
2699 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2700 prop[0] = 14;
2701 prop[1] = 0x0;
2702 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2703 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2704 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2705 if (rc == sizeof(u32)) {
2706 prop[0] &= ~0x5;
2707 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2708 }
2709 }
2710}
2711#else
2712#define fixup_device_tree_chrp()
2713#endif
2714
2715#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2716static void __init fixup_device_tree_pmac(void)
2717{
2718 phandle u3, i2c, mpic;
2719 u32 u3_rev;
2720 u32 interrupts[2];
2721 u32 parent;
2722
2723 /* Some G5s have a missing interrupt definition, fix it up here */
2724 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2725 if (!PHANDLE_VALID(u3))
2726 return;
2727 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2728 if (!PHANDLE_VALID(i2c))
2729 return;
2730 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2731 if (!PHANDLE_VALID(mpic))
2732 return;
2733
2734 /* check if proper rev of u3 */
2735 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2736 == PROM_ERROR)
2737 return;
2738 if (u3_rev < 0x35 || u3_rev > 0x39)
2739 return;
2740 /* does it need fixup ? */
2741 if (prom_getproplen(i2c, "interrupts") > 0)
2742 return;
2743
2744 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2745
2746 /* interrupt on this revision of u3 is number 0 and level */
2747 interrupts[0] = 0;
2748 interrupts[1] = 1;
2749 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2750 &interrupts, sizeof(interrupts));
2751 parent = (u32)mpic;
2752 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2753 &parent, sizeof(parent));
2754}
2755#else
2756#define fixup_device_tree_pmac()
2757#endif
2758
2759#ifdef CONFIG_PPC_EFIKA
2760/*
2761 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2762 * to talk to the phy. If the phy-handle property is missing, then this
2763 * function is called to add the appropriate nodes and link it to the
2764 * ethernet node.
2765 */
2766static void __init fixup_device_tree_efika_add_phy(void)
2767{
2768 u32 node;
2769 char prop[64];
2770 int rv;
2771
2772 /* Check if /builtin/ethernet exists - bail if it doesn't */
2773 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2774 if (!PHANDLE_VALID(node))
2775 return;
2776
2777 /* Check if the phy-handle property exists - bail if it does */
2778 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2779 if (!rv)
2780 return;
2781
2782 /*
2783 * At this point the ethernet device doesn't have a phy described.
2784 * Now we need to add the missing phy node and linkage
2785 */
2786
2787 /* Check for an MDIO bus node - if missing then create one */
2788 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2789 if (!PHANDLE_VALID(node)) {
2790 prom_printf("Adding Ethernet MDIO node\n");
2791 call_prom("interpret", 1, 1,
2792 " s\" /builtin\" find-device"
2793 " new-device"
2794 " 1 encode-int s\" #address-cells\" property"
2795 " 0 encode-int s\" #size-cells\" property"
2796 " s\" mdio\" device-name"
2797 " s\" fsl,mpc5200b-mdio\" encode-string"
2798 " s\" compatible\" property"
2799 " 0xf0003000 0x400 reg"
2800 " 0x2 encode-int"
2801 " 0x5 encode-int encode+"
2802 " 0x3 encode-int encode+"
2803 " s\" interrupts\" property"
2804 " finish-device");
2805 };
2806
2807 /* Check for a PHY device node - if missing then create one and
2808 * give it's phandle to the ethernet node */
2809 node = call_prom("finddevice", 1, 1,
2810 ADDR("/builtin/mdio/ethernet-phy"));
2811 if (!PHANDLE_VALID(node)) {
2812 prom_printf("Adding Ethernet PHY node\n");
2813 call_prom("interpret", 1, 1,
2814 " s\" /builtin/mdio\" find-device"
2815 " new-device"
2816 " s\" ethernet-phy\" device-name"
2817 " 0x10 encode-int s\" reg\" property"
2818 " my-self"
2819 " ihandle>phandle"
2820 " finish-device"
2821 " s\" /builtin/ethernet\" find-device"
2822 " encode-int"
2823 " s\" phy-handle\" property"
2824 " device-end");
2825 }
2826}
2827
2828static void __init fixup_device_tree_efika(void)
2829{
2830 int sound_irq[3] = { 2, 2, 0 };
2831 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2832 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2833 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2834 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2835 u32 node;
2836 char prop[64];
2837 int rv, len;
2838
2839 /* Check if we're really running on a EFIKA */
2840 node = call_prom("finddevice", 1, 1, ADDR("/"));
2841 if (!PHANDLE_VALID(node))
2842 return;
2843
2844 rv = prom_getprop(node, "model", prop, sizeof(prop));
2845 if (rv == PROM_ERROR)
2846 return;
2847 if (strcmp(prop, "EFIKA5K2"))
2848 return;
2849
2850 prom_printf("Applying EFIKA device tree fixups\n");
2851
2852 /* Claiming to be 'chrp' is death */
2853 node = call_prom("finddevice", 1, 1, ADDR("/"));
2854 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2855 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2856 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2857
2858 /* CODEGEN,description is exposed in /proc/cpuinfo so
2859 fix that too */
2860 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2861 if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2862 prom_setprop(node, "/", "CODEGEN,description",
2863 "Efika 5200B PowerPC System",
2864 sizeof("Efika 5200B PowerPC System"));
2865
2866 /* Fixup bestcomm interrupts property */
2867 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2868 if (PHANDLE_VALID(node)) {
2869 len = prom_getproplen(node, "interrupts");
2870 if (len == 12) {
2871 prom_printf("Fixing bestcomm interrupts property\n");
2872 prom_setprop(node, "/builtin/bestcom", "interrupts",
2873 bcomm_irq, sizeof(bcomm_irq));
2874 }
2875 }
2876
2877 /* Fixup sound interrupts property */
2878 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2879 if (PHANDLE_VALID(node)) {
2880 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2881 if (rv == PROM_ERROR) {
2882 prom_printf("Adding sound interrupts property\n");
2883 prom_setprop(node, "/builtin/sound", "interrupts",
2884 sound_irq, sizeof(sound_irq));
2885 }
2886 }
2887
2888 /* Make sure ethernet phy-handle property exists */
2889 fixup_device_tree_efika_add_phy();
2890}
2891#else
2892#define fixup_device_tree_efika()
2893#endif
2894
2895#ifdef CONFIG_PPC_PASEMI_NEMO
2896/*
2897 * CFE supplied on Nemo is broken in several ways, biggest
2898 * problem is that it reassigns ISA interrupts to unused mpic ints.
2899 * Add an interrupt-controller property for the io-bridge to use
2900 * and correct the ints so we can attach them to an irq_domain
2901 */
2902static void __init fixup_device_tree_pasemi(void)
2903{
2904 u32 interrupts[2], parent, rval, val = 0;
2905 char *name, *pci_name;
2906 phandle iob, node;
2907
2908 /* Find the root pci node */
2909 name = "/pxp@0,e0000000";
2910 iob = call_prom("finddevice", 1, 1, ADDR(name));
2911 if (!PHANDLE_VALID(iob))
2912 return;
2913
2914 /* check if interrupt-controller node set yet */
2915 if (prom_getproplen(iob, "interrupt-controller") !=PROM_ERROR)
2916 return;
2917
2918 prom_printf("adding interrupt-controller property for SB600...\n");
2919
2920 prom_setprop(iob, name, "interrupt-controller", &val, 0);
2921
2922 pci_name = "/pxp@0,e0000000/pci@11";
2923 node = call_prom("finddevice", 1, 1, ADDR(pci_name));
2924 parent = ADDR(iob);
2925
2926 for( ; prom_next_node(&node); ) {
2927 /* scan each node for one with an interrupt */
2928 if (!PHANDLE_VALID(node))
2929 continue;
2930
2931 rval = prom_getproplen(node, "interrupts");
2932 if (rval == 0 || rval == PROM_ERROR)
2933 continue;
2934
2935 prom_getprop(node, "interrupts", &interrupts, sizeof(interrupts));
2936 if ((interrupts[0] < 212) || (interrupts[0] > 222))
2937 continue;
2938
2939 /* found a node, update both interrupts and interrupt-parent */
2940 if ((interrupts[0] >= 212) && (interrupts[0] <= 215))
2941 interrupts[0] -= 203;
2942 if ((interrupts[0] >= 216) && (interrupts[0] <= 220))
2943 interrupts[0] -= 213;
2944 if (interrupts[0] == 221)
2945 interrupts[0] = 14;
2946 if (interrupts[0] == 222)
2947 interrupts[0] = 8;
2948
2949 prom_setprop(node, pci_name, "interrupts", interrupts,
2950 sizeof(interrupts));
2951 prom_setprop(node, pci_name, "interrupt-parent", &parent,
2952 sizeof(parent));
2953 }
2954
2955 /*
2956 * The io-bridge has device_type set to 'io-bridge' change it to 'isa'
2957 * so that generic isa-bridge code can add the SB600 and its on-board
2958 * peripherals.
2959 */
2960 name = "/pxp@0,e0000000/io-bridge@0";
2961 iob = call_prom("finddevice", 1, 1, ADDR(name));
2962 if (!PHANDLE_VALID(iob))
2963 return;
2964
2965 /* device_type is already set, just change it. */
2966
2967 prom_printf("Changing device_type of SB600 node...\n");
2968
2969 prom_setprop(iob, name, "device_type", "isa", sizeof("isa"));
2970}
2971#else /* !CONFIG_PPC_PASEMI_NEMO */
2972static inline void fixup_device_tree_pasemi(void) { }
2973#endif
2974
2975static void __init fixup_device_tree(void)
2976{
2977 fixup_device_tree_maple();
2978 fixup_device_tree_maple_memory_controller();
2979 fixup_device_tree_chrp();
2980 fixup_device_tree_pmac();
2981 fixup_device_tree_efika();
2982 fixup_device_tree_pasemi();
2983}
2984
2985static void __init prom_find_boot_cpu(void)
2986{
2987 __be32 rval;
2988 ihandle prom_cpu;
2989 phandle cpu_pkg;
2990
2991 rval = 0;
2992 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2993 return;
2994 prom_cpu = be32_to_cpu(rval);
2995
2996 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2997
2998 if (!PHANDLE_VALID(cpu_pkg))
2999 return;
3000
3001 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
3002 prom.cpu = be32_to_cpu(rval);
3003
3004 prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
3005}
3006
3007static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
3008{
3009#ifdef CONFIG_BLK_DEV_INITRD
3010 if (r3 && r4 && r4 != 0xdeadbeef) {
3011 __be64 val;
3012
3013 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
3014 prom_initrd_end = prom_initrd_start + r4;
3015
3016 val = cpu_to_be64(prom_initrd_start);
3017 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
3018 &val, sizeof(val));
3019 val = cpu_to_be64(prom_initrd_end);
3020 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
3021 &val, sizeof(val));
3022
3023 reserve_mem(prom_initrd_start,
3024 prom_initrd_end - prom_initrd_start);
3025
3026 prom_debug("initrd_start=0x%x\n", prom_initrd_start);
3027 prom_debug("initrd_end=0x%x\n", prom_initrd_end);
3028 }
3029#endif /* CONFIG_BLK_DEV_INITRD */
3030}
3031
3032#ifdef CONFIG_PPC64
3033#ifdef CONFIG_RELOCATABLE
3034static void reloc_toc(void)
3035{
3036}
3037
3038static void unreloc_toc(void)
3039{
3040}
3041#else
3042static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
3043{
3044 unsigned long i;
3045 unsigned long *toc_entry;
3046
3047 /* Get the start of the TOC by using r2 directly. */
3048 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
3049
3050 for (i = 0; i < nr_entries; i++) {
3051 *toc_entry = *toc_entry + offset;
3052 toc_entry++;
3053 }
3054}
3055
3056static void reloc_toc(void)
3057{
3058 unsigned long offset = reloc_offset();
3059 unsigned long nr_entries =
3060 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3061
3062 __reloc_toc(offset, nr_entries);
3063
3064 mb();
3065}
3066
3067static void unreloc_toc(void)
3068{
3069 unsigned long offset = reloc_offset();
3070 unsigned long nr_entries =
3071 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
3072
3073 mb();
3074
3075 __reloc_toc(-offset, nr_entries);
3076}
3077#endif
3078#endif
3079
3080/*
3081 * We enter here early on, when the Open Firmware prom is still
3082 * handling exceptions and the MMU hash table for us.
3083 */
3084
3085unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3086 unsigned long pp,
3087 unsigned long r6, unsigned long r7,
3088 unsigned long kbase)
3089{
3090 unsigned long hdr;
3091
3092#ifdef CONFIG_PPC32
3093 unsigned long offset = reloc_offset();
3094 reloc_got2(offset);
3095#else
3096 reloc_toc();
3097#endif
3098
3099 /*
3100 * First zero the BSS
3101 */
3102 memset(&__bss_start, 0, __bss_stop - __bss_start);
3103
3104 /*
3105 * Init interface to Open Firmware, get some node references,
3106 * like /chosen
3107 */
3108 prom_init_client_services(pp);
3109
3110 /*
3111 * See if this OF is old enough that we need to do explicit maps
3112 * and other workarounds
3113 */
3114 prom_find_mmu();
3115
3116 /*
3117 * Init prom stdout device
3118 */
3119 prom_init_stdout();
3120
3121 prom_printf("Preparing to boot %s", linux_banner);
3122
3123 /*
3124 * Get default machine type. At this point, we do not differentiate
3125 * between pSeries SMP and pSeries LPAR
3126 */
3127 of_platform = prom_find_machine_type();
3128 prom_printf("Detected machine type: %x\n", of_platform);
3129
3130#ifndef CONFIG_NONSTATIC_KERNEL
3131 /* Bail if this is a kdump kernel. */
3132 if (PHYSICAL_START > 0)
3133 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
3134#endif
3135
3136 /*
3137 * Check for an initrd
3138 */
3139 prom_check_initrd(r3, r4);
3140
3141 /*
3142 * Do early parsing of command line
3143 */
3144 early_cmdline_parse();
3145
3146#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
3147 /*
3148 * On pSeries, inform the firmware about our capabilities
3149 */
3150 if (of_platform == PLATFORM_PSERIES ||
3151 of_platform == PLATFORM_PSERIES_LPAR)
3152 prom_send_capabilities();
3153#endif
3154
3155 /*
3156 * Copy the CPU hold code
3157 */
3158 if (of_platform != PLATFORM_POWERMAC)
3159 copy_and_flush(0, kbase, 0x100, 0);
3160
3161 /*
3162 * Initialize memory management within prom_init
3163 */
3164 prom_init_mem();
3165
3166 /*
3167 * Determine which cpu is actually running right _now_
3168 */
3169 prom_find_boot_cpu();
3170
3171 /*
3172 * Initialize display devices
3173 */
3174 prom_check_displays();
3175
3176#if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3177 /*
3178 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3179 * that uses the allocator, we need to make sure we get the top of memory
3180 * available for us here...
3181 */
3182 if (of_platform == PLATFORM_PSERIES)
3183 prom_initialize_tce_table();
3184#endif
3185
3186 /*
3187 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3188 * have a usable RTAS implementation.
3189 */
3190 if (of_platform != PLATFORM_POWERMAC &&
3191 of_platform != PLATFORM_OPAL)
3192 prom_instantiate_rtas();
3193
3194#ifdef CONFIG_PPC_POWERNV
3195 if (of_platform == PLATFORM_OPAL)
3196 prom_instantiate_opal();
3197#endif /* CONFIG_PPC_POWERNV */
3198
3199#ifdef CONFIG_PPC64
3200 /* instantiate sml */
3201 prom_instantiate_sml();
3202#endif
3203
3204 /*
3205 * On non-powermacs, put all CPUs in spin-loops.
3206 *
3207 * PowerMacs use a different mechanism to spin CPUs
3208 *
3209 * (This must be done after instanciating RTAS)
3210 */
3211 if (of_platform != PLATFORM_POWERMAC &&
3212 of_platform != PLATFORM_OPAL)
3213 prom_hold_cpus();
3214
3215 /*
3216 * Fill in some infos for use by the kernel later on
3217 */
3218 if (prom_memory_limit) {
3219 __be64 val = cpu_to_be64(prom_memory_limit);
3220 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3221 &val, sizeof(val));
3222 }
3223#ifdef CONFIG_PPC64
3224 if (prom_iommu_off)
3225 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3226 NULL, 0);
3227
3228 if (prom_iommu_force_on)
3229 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3230 NULL, 0);
3231
3232 if (prom_tce_alloc_start) {
3233 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3234 &prom_tce_alloc_start,
3235 sizeof(prom_tce_alloc_start));
3236 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3237 &prom_tce_alloc_end,
3238 sizeof(prom_tce_alloc_end));
3239 }
3240#endif
3241
3242 /*
3243 * Fixup any known bugs in the device-tree
3244 */
3245 fixup_device_tree();
3246
3247 /*
3248 * Now finally create the flattened device-tree
3249 */
3250 prom_printf("copying OF device tree...\n");
3251 flatten_device_tree();
3252
3253 /*
3254 * in case stdin is USB and still active on IBM machines...
3255 * Unfortunately quiesce crashes on some powermacs if we have
3256 * closed stdin already (in particular the powerbook 101). It
3257 * appears that the OPAL version of OFW doesn't like it either.
3258 */
3259 if (of_platform != PLATFORM_POWERMAC &&
3260 of_platform != PLATFORM_OPAL)
3261 prom_close_stdin();
3262
3263 /*
3264 * Call OF "quiesce" method to shut down pending DMA's from
3265 * devices etc...
3266 */
3267 prom_printf("Quiescing Open Firmware ...\n");
3268 call_prom("quiesce", 0, 0);
3269
3270 /*
3271 * And finally, call the kernel passing it the flattened device
3272 * tree and NULL as r5, thus triggering the new entry point which
3273 * is common to us and kexec
3274 */
3275 hdr = dt_header_start;
3276
3277 /* Don't print anything after quiesce under OPAL, it crashes OFW */
3278 if (of_platform != PLATFORM_OPAL) {
3279 prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
3280 prom_debug("->dt_header_start=0x%x\n", hdr);
3281 }
3282
3283#ifdef CONFIG_PPC32
3284 reloc_got2(-offset);
3285#else
3286 unreloc_toc();
3287#endif
3288
3289#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3290 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3291 __start(hdr, kbase, 0, 0, 0,
3292 prom_opal_base, prom_opal_entry);
3293#else
3294 __start(hdr, kbase, 0, 0, 0, 0, 0);
3295#endif
3296
3297 return 0;
3298}
1/*
2 * Procedures for interfacing to Open Firmware.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG_PROM
17
18#include <stdarg.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/init.h>
22#include <linux/threads.h>
23#include <linux/spinlock.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/proc_fs.h>
27#include <linux/stringify.h>
28#include <linux/delay.h>
29#include <linux/initrd.h>
30#include <linux/bitops.h>
31#include <asm/prom.h>
32#include <asm/rtas.h>
33#include <asm/page.h>
34#include <asm/processor.h>
35#include <asm/irq.h>
36#include <asm/io.h>
37#include <asm/smp.h>
38#include <asm/mmu.h>
39#include <asm/pgtable.h>
40#include <asm/pci.h>
41#include <asm/iommu.h>
42#include <asm/btext.h>
43#include <asm/sections.h>
44#include <asm/machdep.h>
45#include <asm/opal.h>
46
47#include <linux/linux_logo.h>
48
49/*
50 * Eventually bump that one up
51 */
52#define DEVTREE_CHUNK_SIZE 0x100000
53
54/*
55 * This is the size of the local memory reserve map that gets copied
56 * into the boot params passed to the kernel. That size is totally
57 * flexible as the kernel just reads the list until it encounters an
58 * entry with size 0, so it can be changed without breaking binary
59 * compatibility
60 */
61#define MEM_RESERVE_MAP_SIZE 8
62
63/*
64 * prom_init() is called very early on, before the kernel text
65 * and data have been mapped to KERNELBASE. At this point the code
66 * is running at whatever address it has been loaded at.
67 * On ppc32 we compile with -mrelocatable, which means that references
68 * to extern and static variables get relocated automatically.
69 * ppc64 objects are always relocatable, we just need to relocate the
70 * TOC.
71 *
72 * Because OF may have mapped I/O devices into the area starting at
73 * KERNELBASE, particularly on CHRP machines, we can't safely call
74 * OF once the kernel has been mapped to KERNELBASE. Therefore all
75 * OF calls must be done within prom_init().
76 *
77 * ADDR is used in calls to call_prom. The 4th and following
78 * arguments to call_prom should be 32-bit values.
79 * On ppc64, 64 bit values are truncated to 32 bits (and
80 * fortunately don't get interpreted as two arguments).
81 */
82#define ADDR(x) (u32)(unsigned long)(x)
83
84#ifdef CONFIG_PPC64
85#define OF_WORKAROUNDS 0
86#else
87#define OF_WORKAROUNDS of_workarounds
88int of_workarounds;
89#endif
90
91#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
92#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
93
94#define PROM_BUG() do { \
95 prom_printf("kernel BUG at %s line 0x%x!\n", \
96 __FILE__, __LINE__); \
97 __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
98} while (0)
99
100#ifdef DEBUG_PROM
101#define prom_debug(x...) prom_printf(x)
102#else
103#define prom_debug(x...)
104#endif
105
106
107typedef u32 prom_arg_t;
108
109struct prom_args {
110 __be32 service;
111 __be32 nargs;
112 __be32 nret;
113 __be32 args[10];
114};
115
116struct prom_t {
117 ihandle root;
118 phandle chosen;
119 int cpu;
120 ihandle stdout;
121 ihandle mmumap;
122 ihandle memory;
123};
124
125struct mem_map_entry {
126 __be64 base;
127 __be64 size;
128};
129
130typedef __be32 cell_t;
131
132extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
133 unsigned long r6, unsigned long r7, unsigned long r8,
134 unsigned long r9);
135
136#ifdef CONFIG_PPC64
137extern int enter_prom(struct prom_args *args, unsigned long entry);
138#else
139static inline int enter_prom(struct prom_args *args, unsigned long entry)
140{
141 return ((int (*)(struct prom_args *))entry)(args);
142}
143#endif
144
145extern void copy_and_flush(unsigned long dest, unsigned long src,
146 unsigned long size, unsigned long offset);
147
148/* prom structure */
149static struct prom_t __initdata prom;
150
151static unsigned long prom_entry __initdata;
152
153#define PROM_SCRATCH_SIZE 256
154
155static char __initdata of_stdout_device[256];
156static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
157
158static unsigned long __initdata dt_header_start;
159static unsigned long __initdata dt_struct_start, dt_struct_end;
160static unsigned long __initdata dt_string_start, dt_string_end;
161
162static unsigned long __initdata prom_initrd_start, prom_initrd_end;
163
164#ifdef CONFIG_PPC64
165static int __initdata prom_iommu_force_on;
166static int __initdata prom_iommu_off;
167static unsigned long __initdata prom_tce_alloc_start;
168static unsigned long __initdata prom_tce_alloc_end;
169#endif
170
171/* Platforms codes are now obsolete in the kernel. Now only used within this
172 * file and ultimately gone too. Feel free to change them if you need, they
173 * are not shared with anything outside of this file anymore
174 */
175#define PLATFORM_PSERIES 0x0100
176#define PLATFORM_PSERIES_LPAR 0x0101
177#define PLATFORM_LPAR 0x0001
178#define PLATFORM_POWERMAC 0x0400
179#define PLATFORM_GENERIC 0x0500
180#define PLATFORM_OPAL 0x0600
181
182static int __initdata of_platform;
183
184static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
185
186static unsigned long __initdata prom_memory_limit;
187
188static unsigned long __initdata alloc_top;
189static unsigned long __initdata alloc_top_high;
190static unsigned long __initdata alloc_bottom;
191static unsigned long __initdata rmo_top;
192static unsigned long __initdata ram_top;
193
194static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
195static int __initdata mem_reserve_cnt;
196
197static cell_t __initdata regbuf[1024];
198
199static bool rtas_has_query_cpu_stopped;
200
201
202/*
203 * Error results ... some OF calls will return "-1" on error, some
204 * will return 0, some will return either. To simplify, here are
205 * macros to use with any ihandle or phandle return value to check if
206 * it is valid
207 */
208
209#define PROM_ERROR (-1u)
210#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
211#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
212
213
214/* This is the one and *ONLY* place where we actually call open
215 * firmware.
216 */
217
218static int __init call_prom(const char *service, int nargs, int nret, ...)
219{
220 int i;
221 struct prom_args args;
222 va_list list;
223
224 args.service = cpu_to_be32(ADDR(service));
225 args.nargs = cpu_to_be32(nargs);
226 args.nret = cpu_to_be32(nret);
227
228 va_start(list, nret);
229 for (i = 0; i < nargs; i++)
230 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
231 va_end(list);
232
233 for (i = 0; i < nret; i++)
234 args.args[nargs+i] = 0;
235
236 if (enter_prom(&args, prom_entry) < 0)
237 return PROM_ERROR;
238
239 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
240}
241
242static int __init call_prom_ret(const char *service, int nargs, int nret,
243 prom_arg_t *rets, ...)
244{
245 int i;
246 struct prom_args args;
247 va_list list;
248
249 args.service = cpu_to_be32(ADDR(service));
250 args.nargs = cpu_to_be32(nargs);
251 args.nret = cpu_to_be32(nret);
252
253 va_start(list, rets);
254 for (i = 0; i < nargs; i++)
255 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
256 va_end(list);
257
258 for (i = 0; i < nret; i++)
259 args.args[nargs+i] = 0;
260
261 if (enter_prom(&args, prom_entry) < 0)
262 return PROM_ERROR;
263
264 if (rets != NULL)
265 for (i = 1; i < nret; ++i)
266 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
267
268 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
269}
270
271
272static void __init prom_print(const char *msg)
273{
274 const char *p, *q;
275
276 if (prom.stdout == 0)
277 return;
278
279 for (p = msg; *p != 0; p = q) {
280 for (q = p; *q != 0 && *q != '\n'; ++q)
281 ;
282 if (q > p)
283 call_prom("write", 3, 1, prom.stdout, p, q - p);
284 if (*q == 0)
285 break;
286 ++q;
287 call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
288 }
289}
290
291
292static void __init prom_print_hex(unsigned long val)
293{
294 int i, nibbles = sizeof(val)*2;
295 char buf[sizeof(val)*2+1];
296
297 for (i = nibbles-1; i >= 0; i--) {
298 buf[i] = (val & 0xf) + '0';
299 if (buf[i] > '9')
300 buf[i] += ('a'-'0'-10);
301 val >>= 4;
302 }
303 buf[nibbles] = '\0';
304 call_prom("write", 3, 1, prom.stdout, buf, nibbles);
305}
306
307/* max number of decimal digits in an unsigned long */
308#define UL_DIGITS 21
309static void __init prom_print_dec(unsigned long val)
310{
311 int i, size;
312 char buf[UL_DIGITS+1];
313
314 for (i = UL_DIGITS-1; i >= 0; i--) {
315 buf[i] = (val % 10) + '0';
316 val = val/10;
317 if (val == 0)
318 break;
319 }
320 /* shift stuff down */
321 size = UL_DIGITS - i;
322 call_prom("write", 3, 1, prom.stdout, buf+i, size);
323}
324
325static void __init prom_printf(const char *format, ...)
326{
327 const char *p, *q, *s;
328 va_list args;
329 unsigned long v;
330 long vs;
331
332 va_start(args, format);
333 for (p = format; *p != 0; p = q) {
334 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
335 ;
336 if (q > p)
337 call_prom("write", 3, 1, prom.stdout, p, q - p);
338 if (*q == 0)
339 break;
340 if (*q == '\n') {
341 ++q;
342 call_prom("write", 3, 1, prom.stdout,
343 ADDR("\r\n"), 2);
344 continue;
345 }
346 ++q;
347 if (*q == 0)
348 break;
349 switch (*q) {
350 case 's':
351 ++q;
352 s = va_arg(args, const char *);
353 prom_print(s);
354 break;
355 case 'x':
356 ++q;
357 v = va_arg(args, unsigned long);
358 prom_print_hex(v);
359 break;
360 case 'd':
361 ++q;
362 vs = va_arg(args, int);
363 if (vs < 0) {
364 prom_print("-");
365 vs = -vs;
366 }
367 prom_print_dec(vs);
368 break;
369 case 'l':
370 ++q;
371 if (*q == 0)
372 break;
373 else if (*q == 'x') {
374 ++q;
375 v = va_arg(args, unsigned long);
376 prom_print_hex(v);
377 } else if (*q == 'u') { /* '%lu' */
378 ++q;
379 v = va_arg(args, unsigned long);
380 prom_print_dec(v);
381 } else if (*q == 'd') { /* %ld */
382 ++q;
383 vs = va_arg(args, long);
384 if (vs < 0) {
385 prom_print("-");
386 vs = -vs;
387 }
388 prom_print_dec(vs);
389 }
390 break;
391 }
392 }
393}
394
395
396static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
397 unsigned long align)
398{
399
400 if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
401 /*
402 * Old OF requires we claim physical and virtual separately
403 * and then map explicitly (assuming virtual mode)
404 */
405 int ret;
406 prom_arg_t result;
407
408 ret = call_prom_ret("call-method", 5, 2, &result,
409 ADDR("claim"), prom.memory,
410 align, size, virt);
411 if (ret != 0 || result == -1)
412 return -1;
413 ret = call_prom_ret("call-method", 5, 2, &result,
414 ADDR("claim"), prom.mmumap,
415 align, size, virt);
416 if (ret != 0) {
417 call_prom("call-method", 4, 1, ADDR("release"),
418 prom.memory, size, virt);
419 return -1;
420 }
421 /* the 0x12 is M (coherence) + PP == read/write */
422 call_prom("call-method", 6, 1,
423 ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
424 return virt;
425 }
426 return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
427 (prom_arg_t)align);
428}
429
430static void __init __attribute__((noreturn)) prom_panic(const char *reason)
431{
432 prom_print(reason);
433 /* Do not call exit because it clears the screen on pmac
434 * it also causes some sort of double-fault on early pmacs */
435 if (of_platform == PLATFORM_POWERMAC)
436 asm("trap\n");
437
438 /* ToDo: should put up an SRC here on pSeries */
439 call_prom("exit", 0, 0);
440
441 for (;;) /* should never get here */
442 ;
443}
444
445
446static int __init prom_next_node(phandle *nodep)
447{
448 phandle node;
449
450 if ((node = *nodep) != 0
451 && (*nodep = call_prom("child", 1, 1, node)) != 0)
452 return 1;
453 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
454 return 1;
455 for (;;) {
456 if ((node = call_prom("parent", 1, 1, node)) == 0)
457 return 0;
458 if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
459 return 1;
460 }
461}
462
463static int inline prom_getprop(phandle node, const char *pname,
464 void *value, size_t valuelen)
465{
466 return call_prom("getprop", 4, 1, node, ADDR(pname),
467 (u32)(unsigned long) value, (u32) valuelen);
468}
469
470static int inline prom_getproplen(phandle node, const char *pname)
471{
472 return call_prom("getproplen", 2, 1, node, ADDR(pname));
473}
474
475static void add_string(char **str, const char *q)
476{
477 char *p = *str;
478
479 while (*q)
480 *p++ = *q++;
481 *p++ = ' ';
482 *str = p;
483}
484
485static char *tohex(unsigned int x)
486{
487 static char digits[] = "0123456789abcdef";
488 static char result[9];
489 int i;
490
491 result[8] = 0;
492 i = 8;
493 do {
494 --i;
495 result[i] = digits[x & 0xf];
496 x >>= 4;
497 } while (x != 0 && i > 0);
498 return &result[i];
499}
500
501static int __init prom_setprop(phandle node, const char *nodename,
502 const char *pname, void *value, size_t valuelen)
503{
504 char cmd[256], *p;
505
506 if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
507 return call_prom("setprop", 4, 1, node, ADDR(pname),
508 (u32)(unsigned long) value, (u32) valuelen);
509
510 /* gah... setprop doesn't work on longtrail, have to use interpret */
511 p = cmd;
512 add_string(&p, "dev");
513 add_string(&p, nodename);
514 add_string(&p, tohex((u32)(unsigned long) value));
515 add_string(&p, tohex(valuelen));
516 add_string(&p, tohex(ADDR(pname)));
517 add_string(&p, tohex(strlen(pname)));
518 add_string(&p, "property");
519 *p = 0;
520 return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
521}
522
523/* We can't use the standard versions because of relocation headaches. */
524#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
525 || ('a' <= (c) && (c) <= 'f') \
526 || ('A' <= (c) && (c) <= 'F'))
527
528#define isdigit(c) ('0' <= (c) && (c) <= '9')
529#define islower(c) ('a' <= (c) && (c) <= 'z')
530#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
531
532static unsigned long prom_strtoul(const char *cp, const char **endp)
533{
534 unsigned long result = 0, base = 10, value;
535
536 if (*cp == '0') {
537 base = 8;
538 cp++;
539 if (toupper(*cp) == 'X') {
540 cp++;
541 base = 16;
542 }
543 }
544
545 while (isxdigit(*cp) &&
546 (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
547 result = result * base + value;
548 cp++;
549 }
550
551 if (endp)
552 *endp = cp;
553
554 return result;
555}
556
557static unsigned long prom_memparse(const char *ptr, const char **retptr)
558{
559 unsigned long ret = prom_strtoul(ptr, retptr);
560 int shift = 0;
561
562 /*
563 * We can't use a switch here because GCC *may* generate a
564 * jump table which won't work, because we're not running at
565 * the address we're linked at.
566 */
567 if ('G' == **retptr || 'g' == **retptr)
568 shift = 30;
569
570 if ('M' == **retptr || 'm' == **retptr)
571 shift = 20;
572
573 if ('K' == **retptr || 'k' == **retptr)
574 shift = 10;
575
576 if (shift) {
577 ret <<= shift;
578 (*retptr)++;
579 }
580
581 return ret;
582}
583
584/*
585 * Early parsing of the command line passed to the kernel, used for
586 * "mem=x" and the options that affect the iommu
587 */
588static void __init early_cmdline_parse(void)
589{
590 const char *opt;
591
592 char *p;
593 int l = 0;
594
595 prom_cmd_line[0] = 0;
596 p = prom_cmd_line;
597 if ((long)prom.chosen > 0)
598 l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
599#ifdef CONFIG_CMDLINE
600 if (l <= 0 || p[0] == '\0') /* dbl check */
601 strlcpy(prom_cmd_line,
602 CONFIG_CMDLINE, sizeof(prom_cmd_line));
603#endif /* CONFIG_CMDLINE */
604 prom_printf("command line: %s\n", prom_cmd_line);
605
606#ifdef CONFIG_PPC64
607 opt = strstr(prom_cmd_line, "iommu=");
608 if (opt) {
609 prom_printf("iommu opt is: %s\n", opt);
610 opt += 6;
611 while (*opt && *opt == ' ')
612 opt++;
613 if (!strncmp(opt, "off", 3))
614 prom_iommu_off = 1;
615 else if (!strncmp(opt, "force", 5))
616 prom_iommu_force_on = 1;
617 }
618#endif
619 opt = strstr(prom_cmd_line, "mem=");
620 if (opt) {
621 opt += 4;
622 prom_memory_limit = prom_memparse(opt, (const char **)&opt);
623#ifdef CONFIG_PPC64
624 /* Align to 16 MB == size of ppc64 large page */
625 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
626#endif
627 }
628}
629
630#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
631/*
632 * The architecture vector has an array of PVR mask/value pairs,
633 * followed by # option vectors - 1, followed by the option vectors.
634 *
635 * See prom.h for the definition of the bits specified in the
636 * architecture vector.
637 *
638 * Because the description vector contains a mix of byte and word
639 * values, we declare it as an unsigned char array, and use this
640 * macro to put word values in.
641 */
642#define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
643 ((x) >> 8) & 0xff, (x) & 0xff
644
645unsigned char ibm_architecture_vec[] = {
646 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
647 W(0xffff0000), W(0x003e0000), /* POWER6 */
648 W(0xffff0000), W(0x003f0000), /* POWER7 */
649 W(0xffff0000), W(0x004b0000), /* POWER8E */
650 W(0xffff0000), W(0x004d0000), /* POWER8 */
651 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
652 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
653 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
654 W(0xfffffffe), W(0x0f000001), /* all 2.04-compliant and earlier */
655 6 - 1, /* 6 option vectors */
656
657 /* option vector 1: processor architectures supported */
658 3 - 2, /* length */
659 0, /* don't ignore, don't halt */
660 OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
661 OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
662
663 /* option vector 2: Open Firmware options supported */
664 34 - 2, /* length */
665 OV2_REAL_MODE,
666 0, 0,
667 W(0xffffffff), /* real_base */
668 W(0xffffffff), /* real_size */
669 W(0xffffffff), /* virt_base */
670 W(0xffffffff), /* virt_size */
671 W(0xffffffff), /* load_base */
672 W(256), /* 256MB min RMA */
673 W(0xffffffff), /* full client load */
674 0, /* min RMA percentage of total RAM */
675 48, /* max log_2(hash table size) */
676
677 /* option vector 3: processor options supported */
678 3 - 2, /* length */
679 0, /* don't ignore, don't halt */
680 OV3_FP | OV3_VMX | OV3_DFP,
681
682 /* option vector 4: IBM PAPR implementation */
683 3 - 2, /* length */
684 0, /* don't halt */
685 OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
686
687 /* option vector 5: PAPR/OF options */
688 19 - 2, /* length */
689 0, /* don't ignore, don't halt */
690 OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
691 OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
692#ifdef CONFIG_PCI_MSI
693 /* PCIe/MSI support. Without MSI full PCIe is not supported */
694 OV5_FEAT(OV5_MSI),
695#else
696 0,
697#endif
698 0,
699#ifdef CONFIG_PPC_SMLPAR
700 OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
701#else
702 0,
703#endif
704 OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
705 0,
706 0,
707 0,
708 /* WARNING: The offset of the "number of cores" field below
709 * must match by the macro below. Update the definition if
710 * the structure layout changes.
711 */
712#define IBM_ARCH_VEC_NRCORES_OFFSET 125
713 W(NR_CPUS), /* number of cores supported */
714 0,
715 0,
716 0,
717 0,
718 OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
719 OV5_FEAT(OV5_PFO_HW_842),
720 OV5_FEAT(OV5_SUB_PROCESSORS),
721 /* option vector 6: IBM PAPR hints */
722 4 - 2, /* length */
723 0,
724 0,
725 OV6_LINUX,
726
727};
728
729/* Old method - ELF header with PT_NOTE sections only works on BE */
730#ifdef __BIG_ENDIAN__
731static struct fake_elf {
732 Elf32_Ehdr elfhdr;
733 Elf32_Phdr phdr[2];
734 struct chrpnote {
735 u32 namesz;
736 u32 descsz;
737 u32 type;
738 char name[8]; /* "PowerPC" */
739 struct chrpdesc {
740 u32 real_mode;
741 u32 real_base;
742 u32 real_size;
743 u32 virt_base;
744 u32 virt_size;
745 u32 load_base;
746 } chrpdesc;
747 } chrpnote;
748 struct rpanote {
749 u32 namesz;
750 u32 descsz;
751 u32 type;
752 char name[24]; /* "IBM,RPA-Client-Config" */
753 struct rpadesc {
754 u32 lpar_affinity;
755 u32 min_rmo_size;
756 u32 min_rmo_percent;
757 u32 max_pft_size;
758 u32 splpar;
759 u32 min_load;
760 u32 new_mem_def;
761 u32 ignore_me;
762 } rpadesc;
763 } rpanote;
764} fake_elf = {
765 .elfhdr = {
766 .e_ident = { 0x7f, 'E', 'L', 'F',
767 ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
768 .e_type = ET_EXEC, /* yeah right */
769 .e_machine = EM_PPC,
770 .e_version = EV_CURRENT,
771 .e_phoff = offsetof(struct fake_elf, phdr),
772 .e_phentsize = sizeof(Elf32_Phdr),
773 .e_phnum = 2
774 },
775 .phdr = {
776 [0] = {
777 .p_type = PT_NOTE,
778 .p_offset = offsetof(struct fake_elf, chrpnote),
779 .p_filesz = sizeof(struct chrpnote)
780 }, [1] = {
781 .p_type = PT_NOTE,
782 .p_offset = offsetof(struct fake_elf, rpanote),
783 .p_filesz = sizeof(struct rpanote)
784 }
785 },
786 .chrpnote = {
787 .namesz = sizeof("PowerPC"),
788 .descsz = sizeof(struct chrpdesc),
789 .type = 0x1275,
790 .name = "PowerPC",
791 .chrpdesc = {
792 .real_mode = ~0U, /* ~0 means "don't care" */
793 .real_base = ~0U,
794 .real_size = ~0U,
795 .virt_base = ~0U,
796 .virt_size = ~0U,
797 .load_base = ~0U
798 },
799 },
800 .rpanote = {
801 .namesz = sizeof("IBM,RPA-Client-Config"),
802 .descsz = sizeof(struct rpadesc),
803 .type = 0x12759999,
804 .name = "IBM,RPA-Client-Config",
805 .rpadesc = {
806 .lpar_affinity = 0,
807 .min_rmo_size = 64, /* in megabytes */
808 .min_rmo_percent = 0,
809 .max_pft_size = 48, /* 2^48 bytes max PFT size */
810 .splpar = 1,
811 .min_load = ~0U,
812 .new_mem_def = 0
813 }
814 }
815};
816#endif /* __BIG_ENDIAN__ */
817
818static int __init prom_count_smt_threads(void)
819{
820 phandle node;
821 char type[64];
822 unsigned int plen;
823
824 /* Pick up th first CPU node we can find */
825 for (node = 0; prom_next_node(&node); ) {
826 type[0] = 0;
827 prom_getprop(node, "device_type", type, sizeof(type));
828
829 if (strcmp(type, "cpu"))
830 continue;
831 /*
832 * There is an entry for each smt thread, each entry being
833 * 4 bytes long. All cpus should have the same number of
834 * smt threads, so return after finding the first.
835 */
836 plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
837 if (plen == PROM_ERROR)
838 break;
839 plen >>= 2;
840 prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
841
842 /* Sanity check */
843 if (plen < 1 || plen > 64) {
844 prom_printf("Threads per core %lu out of bounds, assuming 1\n",
845 (unsigned long)plen);
846 return 1;
847 }
848 return plen;
849 }
850 prom_debug("No threads found, assuming 1 per core\n");
851
852 return 1;
853
854}
855
856
857static void __init prom_send_capabilities(void)
858{
859 ihandle root;
860 prom_arg_t ret;
861 u32 cores;
862 unsigned char *ptcores;
863
864 root = call_prom("open", 1, 1, ADDR("/"));
865 if (root != 0) {
866 /* We need to tell the FW about the number of cores we support.
867 *
868 * To do that, we count the number of threads on the first core
869 * (we assume this is the same for all cores) and use it to
870 * divide NR_CPUS.
871 */
872
873 /* The core value may start at an odd address. If such a word
874 * access is made at a cache line boundary, this leads to an
875 * exception which may not be handled at this time.
876 * Forcing a per byte access to avoid exception.
877 */
878 ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
879 cores = 0;
880 cores |= ptcores[0] << 24;
881 cores |= ptcores[1] << 16;
882 cores |= ptcores[2] << 8;
883 cores |= ptcores[3];
884 if (cores != NR_CPUS) {
885 prom_printf("WARNING ! "
886 "ibm_architecture_vec structure inconsistent: %lu!\n",
887 cores);
888 } else {
889 cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
890 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
891 cores, NR_CPUS);
892 ptcores[0] = (cores >> 24) & 0xff;
893 ptcores[1] = (cores >> 16) & 0xff;
894 ptcores[2] = (cores >> 8) & 0xff;
895 ptcores[3] = cores & 0xff;
896 }
897
898 /* try calling the ibm,client-architecture-support method */
899 prom_printf("Calling ibm,client-architecture-support...");
900 if (call_prom_ret("call-method", 3, 2, &ret,
901 ADDR("ibm,client-architecture-support"),
902 root,
903 ADDR(ibm_architecture_vec)) == 0) {
904 /* the call exists... */
905 if (ret)
906 prom_printf("\nWARNING: ibm,client-architecture"
907 "-support call FAILED!\n");
908 call_prom("close", 1, 0, root);
909 prom_printf(" done\n");
910 return;
911 }
912 call_prom("close", 1, 0, root);
913 prom_printf(" not implemented\n");
914 }
915
916#ifdef __BIG_ENDIAN__
917 {
918 ihandle elfloader;
919
920 /* no ibm,client-architecture-support call, try the old way */
921 elfloader = call_prom("open", 1, 1,
922 ADDR("/packages/elf-loader"));
923 if (elfloader == 0) {
924 prom_printf("couldn't open /packages/elf-loader\n");
925 return;
926 }
927 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
928 elfloader, ADDR(&fake_elf));
929 call_prom("close", 1, 0, elfloader);
930 }
931#endif /* __BIG_ENDIAN__ */
932}
933#endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
934
935/*
936 * Memory allocation strategy... our layout is normally:
937 *
938 * at 14Mb or more we have vmlinux, then a gap and initrd. In some
939 * rare cases, initrd might end up being before the kernel though.
940 * We assume this won't override the final kernel at 0, we have no
941 * provision to handle that in this version, but it should hopefully
942 * never happen.
943 *
944 * alloc_top is set to the top of RMO, eventually shrink down if the
945 * TCEs overlap
946 *
947 * alloc_bottom is set to the top of kernel/initrd
948 *
949 * from there, allocations are done this way : rtas is allocated
950 * topmost, and the device-tree is allocated from the bottom. We try
951 * to grow the device-tree allocation as we progress. If we can't,
952 * then we fail, we don't currently have a facility to restart
953 * elsewhere, but that shouldn't be necessary.
954 *
955 * Note that calls to reserve_mem have to be done explicitly, memory
956 * allocated with either alloc_up or alloc_down isn't automatically
957 * reserved.
958 */
959
960
961/*
962 * Allocates memory in the RMO upward from the kernel/initrd
963 *
964 * When align is 0, this is a special case, it means to allocate in place
965 * at the current location of alloc_bottom or fail (that is basically
966 * extending the previous allocation). Used for the device-tree flattening
967 */
968static unsigned long __init alloc_up(unsigned long size, unsigned long align)
969{
970 unsigned long base = alloc_bottom;
971 unsigned long addr = 0;
972
973 if (align)
974 base = _ALIGN_UP(base, align);
975 prom_debug("alloc_up(%x, %x)\n", size, align);
976 if (ram_top == 0)
977 prom_panic("alloc_up() called with mem not initialized\n");
978
979 if (align)
980 base = _ALIGN_UP(alloc_bottom, align);
981 else
982 base = alloc_bottom;
983
984 for(; (base + size) <= alloc_top;
985 base = _ALIGN_UP(base + 0x100000, align)) {
986 prom_debug(" trying: 0x%x\n\r", base);
987 addr = (unsigned long)prom_claim(base, size, 0);
988 if (addr != PROM_ERROR && addr != 0)
989 break;
990 addr = 0;
991 if (align == 0)
992 break;
993 }
994 if (addr == 0)
995 return 0;
996 alloc_bottom = addr + size;
997
998 prom_debug(" -> %x\n", addr);
999 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1000 prom_debug(" alloc_top : %x\n", alloc_top);
1001 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1002 prom_debug(" rmo_top : %x\n", rmo_top);
1003 prom_debug(" ram_top : %x\n", ram_top);
1004
1005 return addr;
1006}
1007
1008/*
1009 * Allocates memory downward, either from top of RMO, or if highmem
1010 * is set, from the top of RAM. Note that this one doesn't handle
1011 * failures. It does claim memory if highmem is not set.
1012 */
1013static unsigned long __init alloc_down(unsigned long size, unsigned long align,
1014 int highmem)
1015{
1016 unsigned long base, addr = 0;
1017
1018 prom_debug("alloc_down(%x, %x, %s)\n", size, align,
1019 highmem ? "(high)" : "(low)");
1020 if (ram_top == 0)
1021 prom_panic("alloc_down() called with mem not initialized\n");
1022
1023 if (highmem) {
1024 /* Carve out storage for the TCE table. */
1025 addr = _ALIGN_DOWN(alloc_top_high - size, align);
1026 if (addr <= alloc_bottom)
1027 return 0;
1028 /* Will we bump into the RMO ? If yes, check out that we
1029 * didn't overlap existing allocations there, if we did,
1030 * we are dead, we must be the first in town !
1031 */
1032 if (addr < rmo_top) {
1033 /* Good, we are first */
1034 if (alloc_top == rmo_top)
1035 alloc_top = rmo_top = addr;
1036 else
1037 return 0;
1038 }
1039 alloc_top_high = addr;
1040 goto bail;
1041 }
1042
1043 base = _ALIGN_DOWN(alloc_top - size, align);
1044 for (; base > alloc_bottom;
1045 base = _ALIGN_DOWN(base - 0x100000, align)) {
1046 prom_debug(" trying: 0x%x\n\r", base);
1047 addr = (unsigned long)prom_claim(base, size, 0);
1048 if (addr != PROM_ERROR && addr != 0)
1049 break;
1050 addr = 0;
1051 }
1052 if (addr == 0)
1053 return 0;
1054 alloc_top = addr;
1055
1056 bail:
1057 prom_debug(" -> %x\n", addr);
1058 prom_debug(" alloc_bottom : %x\n", alloc_bottom);
1059 prom_debug(" alloc_top : %x\n", alloc_top);
1060 prom_debug(" alloc_top_hi : %x\n", alloc_top_high);
1061 prom_debug(" rmo_top : %x\n", rmo_top);
1062 prom_debug(" ram_top : %x\n", ram_top);
1063
1064 return addr;
1065}
1066
1067/*
1068 * Parse a "reg" cell
1069 */
1070static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1071{
1072 cell_t *p = *cellp;
1073 unsigned long r = 0;
1074
1075 /* Ignore more than 2 cells */
1076 while (s > sizeof(unsigned long) / 4) {
1077 p++;
1078 s--;
1079 }
1080 r = be32_to_cpu(*p++);
1081#ifdef CONFIG_PPC64
1082 if (s > 1) {
1083 r <<= 32;
1084 r |= be32_to_cpu(*(p++));
1085 }
1086#endif
1087 *cellp = p;
1088 return r;
1089}
1090
1091/*
1092 * Very dumb function for adding to the memory reserve list, but
1093 * we don't need anything smarter at this point
1094 *
1095 * XXX Eventually check for collisions. They should NEVER happen.
1096 * If problems seem to show up, it would be a good start to track
1097 * them down.
1098 */
1099static void __init reserve_mem(u64 base, u64 size)
1100{
1101 u64 top = base + size;
1102 unsigned long cnt = mem_reserve_cnt;
1103
1104 if (size == 0)
1105 return;
1106
1107 /* We need to always keep one empty entry so that we
1108 * have our terminator with "size" set to 0 since we are
1109 * dumb and just copy this entire array to the boot params
1110 */
1111 base = _ALIGN_DOWN(base, PAGE_SIZE);
1112 top = _ALIGN_UP(top, PAGE_SIZE);
1113 size = top - base;
1114
1115 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1116 prom_panic("Memory reserve map exhausted !\n");
1117 mem_reserve_map[cnt].base = cpu_to_be64(base);
1118 mem_reserve_map[cnt].size = cpu_to_be64(size);
1119 mem_reserve_cnt = cnt + 1;
1120}
1121
1122/*
1123 * Initialize memory allocation mechanism, parse "memory" nodes and
1124 * obtain that way the top of memory and RMO to setup out local allocator
1125 */
1126static void __init prom_init_mem(void)
1127{
1128 phandle node;
1129 char *path, type[64];
1130 unsigned int plen;
1131 cell_t *p, *endp;
1132 __be32 val;
1133 u32 rac, rsc;
1134
1135 /*
1136 * We iterate the memory nodes to find
1137 * 1) top of RMO (first node)
1138 * 2) top of memory
1139 */
1140 val = cpu_to_be32(2);
1141 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1142 rac = be32_to_cpu(val);
1143 val = cpu_to_be32(1);
1144 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1145 rsc = be32_to_cpu(val);
1146 prom_debug("root_addr_cells: %x\n", rac);
1147 prom_debug("root_size_cells: %x\n", rsc);
1148
1149 prom_debug("scanning memory:\n");
1150 path = prom_scratch;
1151
1152 for (node = 0; prom_next_node(&node); ) {
1153 type[0] = 0;
1154 prom_getprop(node, "device_type", type, sizeof(type));
1155
1156 if (type[0] == 0) {
1157 /*
1158 * CHRP Longtrail machines have no device_type
1159 * on the memory node, so check the name instead...
1160 */
1161 prom_getprop(node, "name", type, sizeof(type));
1162 }
1163 if (strcmp(type, "memory"))
1164 continue;
1165
1166 plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf));
1167 if (plen > sizeof(regbuf)) {
1168 prom_printf("memory node too large for buffer !\n");
1169 plen = sizeof(regbuf);
1170 }
1171 p = regbuf;
1172 endp = p + (plen / sizeof(cell_t));
1173
1174#ifdef DEBUG_PROM
1175 memset(path, 0, PROM_SCRATCH_SIZE);
1176 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
1177 prom_debug(" node %s :\n", path);
1178#endif /* DEBUG_PROM */
1179
1180 while ((endp - p) >= (rac + rsc)) {
1181 unsigned long base, size;
1182
1183 base = prom_next_cell(rac, &p);
1184 size = prom_next_cell(rsc, &p);
1185
1186 if (size == 0)
1187 continue;
1188 prom_debug(" %x %x\n", base, size);
1189 if (base == 0 && (of_platform & PLATFORM_LPAR))
1190 rmo_top = size;
1191 if ((base + size) > ram_top)
1192 ram_top = base + size;
1193 }
1194 }
1195
1196 alloc_bottom = PAGE_ALIGN((unsigned long)&_end + 0x4000);
1197
1198 /*
1199 * If prom_memory_limit is set we reduce the upper limits *except* for
1200 * alloc_top_high. This must be the real top of RAM so we can put
1201 * TCE's up there.
1202 */
1203
1204 alloc_top_high = ram_top;
1205
1206 if (prom_memory_limit) {
1207 if (prom_memory_limit <= alloc_bottom) {
1208 prom_printf("Ignoring mem=%x <= alloc_bottom.\n",
1209 prom_memory_limit);
1210 prom_memory_limit = 0;
1211 } else if (prom_memory_limit >= ram_top) {
1212 prom_printf("Ignoring mem=%x >= ram_top.\n",
1213 prom_memory_limit);
1214 prom_memory_limit = 0;
1215 } else {
1216 ram_top = prom_memory_limit;
1217 rmo_top = min(rmo_top, prom_memory_limit);
1218 }
1219 }
1220
1221 /*
1222 * Setup our top alloc point, that is top of RMO or top of
1223 * segment 0 when running non-LPAR.
1224 * Some RS64 machines have buggy firmware where claims up at
1225 * 1GB fail. Cap at 768MB as a workaround.
1226 * Since 768MB is plenty of room, and we need to cap to something
1227 * reasonable on 32-bit, cap at 768MB on all machines.
1228 */
1229 if (!rmo_top)
1230 rmo_top = ram_top;
1231 rmo_top = min(0x30000000ul, rmo_top);
1232 alloc_top = rmo_top;
1233 alloc_top_high = ram_top;
1234
1235 /*
1236 * Check if we have an initrd after the kernel but still inside
1237 * the RMO. If we do move our bottom point to after it.
1238 */
1239 if (prom_initrd_start &&
1240 prom_initrd_start < rmo_top &&
1241 prom_initrd_end > alloc_bottom)
1242 alloc_bottom = PAGE_ALIGN(prom_initrd_end);
1243
1244 prom_printf("memory layout at init:\n");
1245 prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit);
1246 prom_printf(" alloc_bottom : %x\n", alloc_bottom);
1247 prom_printf(" alloc_top : %x\n", alloc_top);
1248 prom_printf(" alloc_top_hi : %x\n", alloc_top_high);
1249 prom_printf(" rmo_top : %x\n", rmo_top);
1250 prom_printf(" ram_top : %x\n", ram_top);
1251}
1252
1253static void __init prom_close_stdin(void)
1254{
1255 __be32 val;
1256 ihandle stdin;
1257
1258 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1259 stdin = be32_to_cpu(val);
1260 call_prom("close", 1, 0, stdin);
1261 }
1262}
1263
1264#ifdef CONFIG_PPC_POWERNV
1265
1266#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1267static u64 __initdata prom_opal_base;
1268static u64 __initdata prom_opal_entry;
1269#endif
1270
1271#ifdef __BIG_ENDIAN__
1272/* XXX Don't change this structure without updating opal-takeover.S */
1273static struct opal_secondary_data {
1274 s64 ack; /* 0 */
1275 u64 go; /* 8 */
1276 struct opal_takeover_args args; /* 16 */
1277} opal_secondary_data;
1278
1279static u64 __initdata prom_opal_align;
1280static u64 __initdata prom_opal_size;
1281static int __initdata prom_rtas_start_cpu;
1282static u64 __initdata prom_rtas_data;
1283static u64 __initdata prom_rtas_entry;
1284
1285extern char opal_secondary_entry;
1286
1287static void __init prom_query_opal(void)
1288{
1289 long rc;
1290
1291 /* We must not query for OPAL presence on a machine that
1292 * supports TNK takeover (970 blades), as this uses the same
1293 * h-call with different arguments and will crash
1294 */
1295 if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
1296 ADDR("/tnk-memory-map")))) {
1297 prom_printf("TNK takeover detected, skipping OPAL check\n");
1298 return;
1299 }
1300
1301 prom_printf("Querying for OPAL presence... ");
1302
1303 rc = opal_query_takeover(&prom_opal_size,
1304 &prom_opal_align);
1305 prom_debug("(rc = %ld) ", rc);
1306 if (rc != 0) {
1307 prom_printf("not there.\n");
1308 return;
1309 }
1310 of_platform = PLATFORM_OPAL;
1311 prom_printf(" there !\n");
1312 prom_debug(" opal_size = 0x%lx\n", prom_opal_size);
1313 prom_debug(" opal_align = 0x%lx\n", prom_opal_align);
1314 if (prom_opal_align < 0x10000)
1315 prom_opal_align = 0x10000;
1316}
1317
1318static int __init prom_rtas_call(int token, int nargs, int nret,
1319 int *outputs, ...)
1320{
1321 struct rtas_args rtas_args;
1322 va_list list;
1323 int i;
1324
1325 rtas_args.token = token;
1326 rtas_args.nargs = nargs;
1327 rtas_args.nret = nret;
1328 rtas_args.rets = (rtas_arg_t *)&(rtas_args.args[nargs]);
1329 va_start(list, outputs);
1330 for (i = 0; i < nargs; ++i)
1331 rtas_args.args[i] = va_arg(list, rtas_arg_t);
1332 va_end(list);
1333
1334 for (i = 0; i < nret; ++i)
1335 rtas_args.rets[i] = 0;
1336
1337 opal_enter_rtas(&rtas_args, prom_rtas_data,
1338 prom_rtas_entry);
1339
1340 if (nret > 1 && outputs != NULL)
1341 for (i = 0; i < nret-1; ++i)
1342 outputs[i] = rtas_args.rets[i+1];
1343 return (nret > 0)? rtas_args.rets[0]: 0;
1344}
1345
1346static void __init prom_opal_hold_cpus(void)
1347{
1348 int i, cnt, cpu, rc;
1349 long j;
1350 phandle node;
1351 char type[64];
1352 u32 servers[8];
1353 void *entry = (unsigned long *)&opal_secondary_entry;
1354 struct opal_secondary_data *data = &opal_secondary_data;
1355
1356 prom_debug("prom_opal_hold_cpus: start...\n");
1357 prom_debug(" - entry = 0x%x\n", entry);
1358 prom_debug(" - data = 0x%x\n", data);
1359
1360 data->ack = -1;
1361 data->go = 0;
1362
1363 /* look for cpus */
1364 for (node = 0; prom_next_node(&node); ) {
1365 type[0] = 0;
1366 prom_getprop(node, "device_type", type, sizeof(type));
1367 if (strcmp(type, "cpu") != 0)
1368 continue;
1369
1370 /* Skip non-configured cpus. */
1371 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1372 if (strcmp(type, "okay") != 0)
1373 continue;
1374
1375 cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
1376 sizeof(servers));
1377 if (cnt == PROM_ERROR)
1378 break;
1379 cnt >>= 2;
1380 for (i = 0; i < cnt; i++) {
1381 cpu = servers[i];
1382 prom_debug("CPU %d ... ", cpu);
1383 if (cpu == prom.cpu) {
1384 prom_debug("booted !\n");
1385 continue;
1386 }
1387 prom_debug("starting ... ");
1388
1389 /* Init the acknowledge var which will be reset by
1390 * the secondary cpu when it awakens from its OF
1391 * spinloop.
1392 */
1393 data->ack = -1;
1394 rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
1395 NULL, cpu, entry, data);
1396 prom_debug("rtas rc=%d ...", rc);
1397
1398 for (j = 0; j < 100000000 && data->ack == -1; j++) {
1399 HMT_low();
1400 mb();
1401 }
1402 HMT_medium();
1403 if (data->ack != -1)
1404 prom_debug("done, PIR=0x%x\n", data->ack);
1405 else
1406 prom_debug("timeout !\n");
1407 }
1408 }
1409 prom_debug("prom_opal_hold_cpus: end...\n");
1410}
1411
1412static void __init prom_opal_takeover(void)
1413{
1414 struct opal_secondary_data *data = &opal_secondary_data;
1415 struct opal_takeover_args *args = &data->args;
1416 u64 align = prom_opal_align;
1417 u64 top_addr, opal_addr;
1418
1419 args->k_image = (u64)_stext;
1420 args->k_size = _end - _stext;
1421 args->k_entry = 0;
1422 args->k_entry2 = 0x60;
1423
1424 top_addr = _ALIGN_UP(args->k_size, align);
1425
1426 if (prom_initrd_start != 0) {
1427 args->rd_image = prom_initrd_start;
1428 args->rd_size = prom_initrd_end - args->rd_image;
1429 args->rd_loc = top_addr;
1430 top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
1431 }
1432
1433 /* Pickup an address for the HAL. We want to go really high
1434 * up to avoid problem with future kexecs. On the other hand
1435 * we don't want to be all over the TCEs on P5IOC2 machines
1436 * which are going to be up there too. We assume the machine
1437 * has plenty of memory, and we ask for the HAL for now to
1438 * be just below the 1G point, or above the initrd
1439 */
1440 opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
1441 if (opal_addr < top_addr)
1442 opal_addr = top_addr;
1443 args->hal_addr = opal_addr;
1444
1445 /* Copy the command line to the kernel image */
1446 strlcpy(boot_command_line, prom_cmd_line,
1447 COMMAND_LINE_SIZE);
1448
1449 prom_debug(" k_image = 0x%lx\n", args->k_image);
1450 prom_debug(" k_size = 0x%lx\n", args->k_size);
1451 prom_debug(" k_entry = 0x%lx\n", args->k_entry);
1452 prom_debug(" k_entry2 = 0x%lx\n", args->k_entry2);
1453 prom_debug(" hal_addr = 0x%lx\n", args->hal_addr);
1454 prom_debug(" rd_image = 0x%lx\n", args->rd_image);
1455 prom_debug(" rd_size = 0x%lx\n", args->rd_size);
1456 prom_debug(" rd_loc = 0x%lx\n", args->rd_loc);
1457 prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
1458 prom_close_stdin();
1459 mb();
1460 data->go = 1;
1461 for (;;)
1462 opal_do_takeover(args);
1463}
1464#endif /* __BIG_ENDIAN__ */
1465
1466/*
1467 * Allocate room for and instantiate OPAL
1468 */
1469static void __init prom_instantiate_opal(void)
1470{
1471 phandle opal_node;
1472 ihandle opal_inst;
1473 u64 base, entry;
1474 u64 size = 0, align = 0x10000;
1475 __be64 val64;
1476 u32 rets[2];
1477
1478 prom_debug("prom_instantiate_opal: start...\n");
1479
1480 opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
1481 prom_debug("opal_node: %x\n", opal_node);
1482 if (!PHANDLE_VALID(opal_node))
1483 return;
1484
1485 val64 = 0;
1486 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1487 size = be64_to_cpu(val64);
1488 if (size == 0)
1489 return;
1490 val64 = 0;
1491 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1492 align = be64_to_cpu(val64);
1493
1494 base = alloc_down(size, align, 0);
1495 if (base == 0) {
1496 prom_printf("OPAL allocation failed !\n");
1497 return;
1498 }
1499
1500 opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
1501 if (!IHANDLE_VALID(opal_inst)) {
1502 prom_printf("opening opal package failed (%x)\n", opal_inst);
1503 return;
1504 }
1505
1506 prom_printf("instantiating opal at 0x%x...", base);
1507
1508 if (call_prom_ret("call-method", 4, 3, rets,
1509 ADDR("load-opal-runtime"),
1510 opal_inst,
1511 base >> 32, base & 0xffffffff) != 0
1512 || (rets[0] == 0 && rets[1] == 0)) {
1513 prom_printf(" failed\n");
1514 return;
1515 }
1516 entry = (((u64)rets[0]) << 32) | rets[1];
1517
1518 prom_printf(" done\n");
1519
1520 reserve_mem(base, size);
1521
1522 prom_debug("opal base = 0x%x\n", base);
1523 prom_debug("opal align = 0x%x\n", align);
1524 prom_debug("opal entry = 0x%x\n", entry);
1525 prom_debug("opal size = 0x%x\n", (long)size);
1526
1527 prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
1528 &base, sizeof(base));
1529 prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
1530 &entry, sizeof(entry));
1531
1532#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1533 prom_opal_base = base;
1534 prom_opal_entry = entry;
1535#endif
1536 prom_debug("prom_instantiate_opal: end...\n");
1537}
1538
1539#endif /* CONFIG_PPC_POWERNV */
1540
1541/*
1542 * Allocate room for and instantiate RTAS
1543 */
1544static void __init prom_instantiate_rtas(void)
1545{
1546 phandle rtas_node;
1547 ihandle rtas_inst;
1548 u32 base, entry = 0;
1549 __be32 val;
1550 u32 size = 0;
1551
1552 prom_debug("prom_instantiate_rtas: start...\n");
1553
1554 rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas"));
1555 prom_debug("rtas_node: %x\n", rtas_node);
1556 if (!PHANDLE_VALID(rtas_node))
1557 return;
1558
1559 val = 0;
1560 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1561 size = be32_to_cpu(val);
1562 if (size == 0)
1563 return;
1564
1565 base = alloc_down(size, PAGE_SIZE, 0);
1566 if (base == 0)
1567 prom_panic("Could not allocate memory for RTAS\n");
1568
1569 rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
1570 if (!IHANDLE_VALID(rtas_inst)) {
1571 prom_printf("opening rtas package failed (%x)\n", rtas_inst);
1572 return;
1573 }
1574
1575 prom_printf("instantiating rtas at 0x%x...", base);
1576
1577 if (call_prom_ret("call-method", 3, 2, &entry,
1578 ADDR("instantiate-rtas"),
1579 rtas_inst, base) != 0
1580 || entry == 0) {
1581 prom_printf(" failed\n");
1582 return;
1583 }
1584 prom_printf(" done\n");
1585
1586 reserve_mem(base, size);
1587
1588 val = cpu_to_be32(base);
1589 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1590 &val, sizeof(val));
1591 val = cpu_to_be32(entry);
1592 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1593 &val, sizeof(val));
1594
1595 /* Check if it supports "query-cpu-stopped-state" */
1596 if (prom_getprop(rtas_node, "query-cpu-stopped-state",
1597 &val, sizeof(val)) != PROM_ERROR)
1598 rtas_has_query_cpu_stopped = true;
1599
1600#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1601 /* PowerVN takeover hack */
1602 prom_rtas_data = base;
1603 prom_rtas_entry = entry;
1604 prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
1605#endif
1606 prom_debug("rtas base = 0x%x\n", base);
1607 prom_debug("rtas entry = 0x%x\n", entry);
1608 prom_debug("rtas size = 0x%x\n", (long)size);
1609
1610 prom_debug("prom_instantiate_rtas: end...\n");
1611}
1612
1613#ifdef CONFIG_PPC64
1614/*
1615 * Allocate room for and instantiate Stored Measurement Log (SML)
1616 */
1617static void __init prom_instantiate_sml(void)
1618{
1619 phandle ibmvtpm_node;
1620 ihandle ibmvtpm_inst;
1621 u32 entry = 0, size = 0;
1622 u64 base;
1623
1624 prom_debug("prom_instantiate_sml: start...\n");
1625
1626 ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
1627 prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
1628 if (!PHANDLE_VALID(ibmvtpm_node))
1629 return;
1630
1631 ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
1632 if (!IHANDLE_VALID(ibmvtpm_inst)) {
1633 prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
1634 return;
1635 }
1636
1637 if (call_prom_ret("call-method", 2, 2, &size,
1638 ADDR("sml-get-handover-size"),
1639 ibmvtpm_inst) != 0 || size == 0) {
1640 prom_printf("SML get handover size failed\n");
1641 return;
1642 }
1643
1644 base = alloc_down(size, PAGE_SIZE, 0);
1645 if (base == 0)
1646 prom_panic("Could not allocate memory for sml\n");
1647
1648 prom_printf("instantiating sml at 0x%x...", base);
1649
1650 if (call_prom_ret("call-method", 4, 2, &entry,
1651 ADDR("sml-handover"),
1652 ibmvtpm_inst, size, base) != 0 || entry == 0) {
1653 prom_printf("SML handover failed\n");
1654 return;
1655 }
1656 prom_printf(" done\n");
1657
1658 reserve_mem(base, size);
1659
1660 prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
1661 &base, sizeof(base));
1662 prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
1663 &size, sizeof(size));
1664
1665 prom_debug("sml base = 0x%x\n", base);
1666 prom_debug("sml size = 0x%x\n", (long)size);
1667
1668 prom_debug("prom_instantiate_sml: end...\n");
1669}
1670
1671/*
1672 * Allocate room for and initialize TCE tables
1673 */
1674#ifdef __BIG_ENDIAN__
1675static void __init prom_initialize_tce_table(void)
1676{
1677 phandle node;
1678 ihandle phb_node;
1679 char compatible[64], type[64], model[64];
1680 char *path = prom_scratch;
1681 u64 base, align;
1682 u32 minalign, minsize;
1683 u64 tce_entry, *tce_entryp;
1684 u64 local_alloc_top, local_alloc_bottom;
1685 u64 i;
1686
1687 if (prom_iommu_off)
1688 return;
1689
1690 prom_debug("starting prom_initialize_tce_table\n");
1691
1692 /* Cache current top of allocs so we reserve a single block */
1693 local_alloc_top = alloc_top_high;
1694 local_alloc_bottom = local_alloc_top;
1695
1696 /* Search all nodes looking for PHBs. */
1697 for (node = 0; prom_next_node(&node); ) {
1698 compatible[0] = 0;
1699 type[0] = 0;
1700 model[0] = 0;
1701 prom_getprop(node, "compatible",
1702 compatible, sizeof(compatible));
1703 prom_getprop(node, "device_type", type, sizeof(type));
1704 prom_getprop(node, "model", model, sizeof(model));
1705
1706 if ((type[0] == 0) || (strstr(type, "pci") == NULL))
1707 continue;
1708
1709 /* Keep the old logic intact to avoid regression. */
1710 if (compatible[0] != 0) {
1711 if ((strstr(compatible, "python") == NULL) &&
1712 (strstr(compatible, "Speedwagon") == NULL) &&
1713 (strstr(compatible, "Winnipeg") == NULL))
1714 continue;
1715 } else if (model[0] != 0) {
1716 if ((strstr(model, "ython") == NULL) &&
1717 (strstr(model, "peedwagon") == NULL) &&
1718 (strstr(model, "innipeg") == NULL))
1719 continue;
1720 }
1721
1722 if (prom_getprop(node, "tce-table-minalign", &minalign,
1723 sizeof(minalign)) == PROM_ERROR)
1724 minalign = 0;
1725 if (prom_getprop(node, "tce-table-minsize", &minsize,
1726 sizeof(minsize)) == PROM_ERROR)
1727 minsize = 4UL << 20;
1728
1729 /*
1730 * Even though we read what OF wants, we just set the table
1731 * size to 4 MB. This is enough to map 2GB of PCI DMA space.
1732 * By doing this, we avoid the pitfalls of trying to DMA to
1733 * MMIO space and the DMA alias hole.
1734 *
1735 * On POWER4, firmware sets the TCE region by assuming
1736 * each TCE table is 8MB. Using this memory for anything
1737 * else will impact performance, so we always allocate 8MB.
1738 * Anton
1739 */
1740 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1741 minsize = 8UL << 20;
1742 else
1743 minsize = 4UL << 20;
1744
1745 /* Align to the greater of the align or size */
1746 align = max(minalign, minsize);
1747 base = alloc_down(minsize, align, 1);
1748 if (base == 0)
1749 prom_panic("ERROR, cannot find space for TCE table.\n");
1750 if (base < local_alloc_bottom)
1751 local_alloc_bottom = base;
1752
1753 /* It seems OF doesn't null-terminate the path :-( */
1754 memset(path, 0, PROM_SCRATCH_SIZE);
1755 /* Call OF to setup the TCE hardware */
1756 if (call_prom("package-to-path", 3, 1, node,
1757 path, PROM_SCRATCH_SIZE-1) == PROM_ERROR) {
1758 prom_printf("package-to-path failed\n");
1759 }
1760
1761 /* Save away the TCE table attributes for later use. */
1762 prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
1763 prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
1764
1765 prom_debug("TCE table: %s\n", path);
1766 prom_debug("\tnode = 0x%x\n", node);
1767 prom_debug("\tbase = 0x%x\n", base);
1768 prom_debug("\tsize = 0x%x\n", minsize);
1769
1770 /* Initialize the table to have a one-to-one mapping
1771 * over the allocated size.
1772 */
1773 tce_entryp = (u64 *)base;
1774 for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
1775 tce_entry = (i << PAGE_SHIFT);
1776 tce_entry |= 0x3;
1777 *tce_entryp = tce_entry;
1778 }
1779
1780 prom_printf("opening PHB %s", path);
1781 phb_node = call_prom("open", 1, 1, path);
1782 if (phb_node == 0)
1783 prom_printf("... failed\n");
1784 else
1785 prom_printf("... done\n");
1786
1787 call_prom("call-method", 6, 0, ADDR("set-64-bit-addressing"),
1788 phb_node, -1, minsize,
1789 (u32) base, (u32) (base >> 32));
1790 call_prom("close", 1, 0, phb_node);
1791 }
1792
1793 reserve_mem(local_alloc_bottom, local_alloc_top - local_alloc_bottom);
1794
1795 /* These are only really needed if there is a memory limit in
1796 * effect, but we don't know so export them always. */
1797 prom_tce_alloc_start = local_alloc_bottom;
1798 prom_tce_alloc_end = local_alloc_top;
1799
1800 /* Flag the first invalid entry */
1801 prom_debug("ending prom_initialize_tce_table\n");
1802}
1803#endif /* __BIG_ENDIAN__ */
1804#endif /* CONFIG_PPC64 */
1805
1806/*
1807 * With CHRP SMP we need to use the OF to start the other processors.
1808 * We can't wait until smp_boot_cpus (the OF is trashed by then)
1809 * so we have to put the processors into a holding pattern controlled
1810 * by the kernel (not OF) before we destroy the OF.
1811 *
1812 * This uses a chunk of low memory, puts some holding pattern
1813 * code there and sends the other processors off to there until
1814 * smp_boot_cpus tells them to do something. The holding pattern
1815 * checks that address until its cpu # is there, when it is that
1816 * cpu jumps to __secondary_start(). smp_boot_cpus() takes care
1817 * of setting those values.
1818 *
1819 * We also use physical address 0x4 here to tell when a cpu
1820 * is in its holding pattern code.
1821 *
1822 * -- Cort
1823 */
1824/*
1825 * We want to reference the copy of __secondary_hold_* in the
1826 * 0 - 0x100 address range
1827 */
1828#define LOW_ADDR(x) (((unsigned long) &(x)) & 0xff)
1829
1830static void __init prom_hold_cpus(void)
1831{
1832 unsigned long i;
1833 phandle node;
1834 char type[64];
1835 unsigned long *spinloop
1836 = (void *) LOW_ADDR(__secondary_hold_spinloop);
1837 unsigned long *acknowledge
1838 = (void *) LOW_ADDR(__secondary_hold_acknowledge);
1839 unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
1840
1841 /*
1842 * On pseries, if RTAS supports "query-cpu-stopped-state",
1843 * we skip this stage, the CPUs will be started by the
1844 * kernel using RTAS.
1845 */
1846 if ((of_platform == PLATFORM_PSERIES ||
1847 of_platform == PLATFORM_PSERIES_LPAR) &&
1848 rtas_has_query_cpu_stopped) {
1849 prom_printf("prom_hold_cpus: skipped\n");
1850 return;
1851 }
1852
1853 prom_debug("prom_hold_cpus: start...\n");
1854 prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
1855 prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
1856 prom_debug(" 1) acknowledge = 0x%x\n",
1857 (unsigned long)acknowledge);
1858 prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge);
1859 prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold);
1860
1861 /* Set the common spinloop variable, so all of the secondary cpus
1862 * will block when they are awakened from their OF spinloop.
1863 * This must occur for both SMP and non SMP kernels, since OF will
1864 * be trashed when we move the kernel.
1865 */
1866 *spinloop = 0;
1867
1868 /* look for cpus */
1869 for (node = 0; prom_next_node(&node); ) {
1870 unsigned int cpu_no;
1871 __be32 reg;
1872
1873 type[0] = 0;
1874 prom_getprop(node, "device_type", type, sizeof(type));
1875 if (strcmp(type, "cpu") != 0)
1876 continue;
1877
1878 /* Skip non-configured cpus. */
1879 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1880 if (strcmp(type, "okay") != 0)
1881 continue;
1882
1883 reg = cpu_to_be32(-1); /* make sparse happy */
1884 prom_getprop(node, "reg", ®, sizeof(reg));
1885 cpu_no = be32_to_cpu(reg);
1886
1887 prom_debug("cpu hw idx = %lu\n", cpu_no);
1888
1889 /* Init the acknowledge var which will be reset by
1890 * the secondary cpu when it awakens from its OF
1891 * spinloop.
1892 */
1893 *acknowledge = (unsigned long)-1;
1894
1895 if (cpu_no != prom.cpu) {
1896 /* Primary Thread of non-boot cpu or any thread */
1897 prom_printf("starting cpu hw idx %lu... ", cpu_no);
1898 call_prom("start-cpu", 3, 0, node,
1899 secondary_hold, cpu_no);
1900
1901 for (i = 0; (i < 100000000) &&
1902 (*acknowledge == ((unsigned long)-1)); i++ )
1903 mb();
1904
1905 if (*acknowledge == cpu_no)
1906 prom_printf("done\n");
1907 else
1908 prom_printf("failed: %x\n", *acknowledge);
1909 }
1910#ifdef CONFIG_SMP
1911 else
1912 prom_printf("boot cpu hw idx %lu\n", cpu_no);
1913#endif /* CONFIG_SMP */
1914 }
1915
1916 prom_debug("prom_hold_cpus: end...\n");
1917}
1918
1919
1920static void __init prom_init_client_services(unsigned long pp)
1921{
1922 /* Get a handle to the prom entry point before anything else */
1923 prom_entry = pp;
1924
1925 /* get a handle for the stdout device */
1926 prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen"));
1927 if (!PHANDLE_VALID(prom.chosen))
1928 prom_panic("cannot find chosen"); /* msg won't be printed :( */
1929
1930 /* get device tree root */
1931 prom.root = call_prom("finddevice", 1, 1, ADDR("/"));
1932 if (!PHANDLE_VALID(prom.root))
1933 prom_panic("cannot find device tree root"); /* msg won't be printed :( */
1934
1935 prom.mmumap = 0;
1936}
1937
1938#ifdef CONFIG_PPC32
1939/*
1940 * For really old powermacs, we need to map things we claim.
1941 * For that, we need the ihandle of the mmu.
1942 * Also, on the longtrail, we need to work around other bugs.
1943 */
1944static void __init prom_find_mmu(void)
1945{
1946 phandle oprom;
1947 char version[64];
1948
1949 oprom = call_prom("finddevice", 1, 1, ADDR("/openprom"));
1950 if (!PHANDLE_VALID(oprom))
1951 return;
1952 if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
1953 return;
1954 version[sizeof(version) - 1] = 0;
1955 /* XXX might need to add other versions here */
1956 if (strcmp(version, "Open Firmware, 1.0.5") == 0)
1957 of_workarounds = OF_WA_CLAIM;
1958 else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
1959 of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
1960 call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
1961 } else
1962 return;
1963 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
1964 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
1965 sizeof(prom.mmumap));
1966 prom.mmumap = be32_to_cpu(prom.mmumap);
1967 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
1968 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1969}
1970#else
1971#define prom_find_mmu()
1972#endif
1973
1974static void __init prom_init_stdout(void)
1975{
1976 char *path = of_stdout_device;
1977 char type[16];
1978 phandle stdout_node;
1979 __be32 val;
1980
1981 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
1982 prom_panic("cannot find stdout");
1983
1984 prom.stdout = be32_to_cpu(val);
1985
1986 /* Get the full OF pathname of the stdout device */
1987 memset(path, 0, 256);
1988 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
1989 prom_printf("OF stdout device is: %s\n", of_stdout_device);
1990 prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
1991 path, strlen(path) + 1);
1992
1993 /* instance-to-package fails on PA-Semi */
1994 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
1995 if (stdout_node != PROM_ERROR) {
1996 val = cpu_to_be32(stdout_node);
1997 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1998 &val, sizeof(val));
1999
2000 /* If it's a display, note it */
2001 memset(type, 0, sizeof(type));
2002 prom_getprop(stdout_node, "device_type", type, sizeof(type));
2003 if (strcmp(type, "display") == 0)
2004 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
2005 }
2006}
2007
2008static int __init prom_find_machine_type(void)
2009{
2010 char compat[256];
2011 int len, i = 0;
2012#ifdef CONFIG_PPC64
2013 phandle rtas;
2014 int x;
2015#endif
2016
2017 /* Look for a PowerMac or a Cell */
2018 len = prom_getprop(prom.root, "compatible",
2019 compat, sizeof(compat)-1);
2020 if (len > 0) {
2021 compat[len] = 0;
2022 while (i < len) {
2023 char *p = &compat[i];
2024 int sl = strlen(p);
2025 if (sl == 0)
2026 break;
2027 if (strstr(p, "Power Macintosh") ||
2028 strstr(p, "MacRISC"))
2029 return PLATFORM_POWERMAC;
2030#ifdef CONFIG_PPC64
2031 /* We must make sure we don't detect the IBM Cell
2032 * blades as pSeries due to some firmware issues,
2033 * so we do it here.
2034 */
2035 if (strstr(p, "IBM,CBEA") ||
2036 strstr(p, "IBM,CPBW-1.0"))
2037 return PLATFORM_GENERIC;
2038#endif /* CONFIG_PPC64 */
2039 i += sl + 1;
2040 }
2041 }
2042#ifdef CONFIG_PPC64
2043 /* Try to detect OPAL */
2044 if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
2045 return PLATFORM_OPAL;
2046
2047 /* Try to figure out if it's an IBM pSeries or any other
2048 * PAPR compliant platform. We assume it is if :
2049 * - /device_type is "chrp" (please, do NOT use that for future
2050 * non-IBM designs !
2051 * - it has /rtas
2052 */
2053 len = prom_getprop(prom.root, "device_type",
2054 compat, sizeof(compat)-1);
2055 if (len <= 0)
2056 return PLATFORM_GENERIC;
2057 if (strcmp(compat, "chrp"))
2058 return PLATFORM_GENERIC;
2059
2060 /* Default to pSeries. We need to know if we are running LPAR */
2061 rtas = call_prom("finddevice", 1, 1, ADDR("/rtas"));
2062 if (!PHANDLE_VALID(rtas))
2063 return PLATFORM_GENERIC;
2064 x = prom_getproplen(rtas, "ibm,hypertas-functions");
2065 if (x != PROM_ERROR) {
2066 prom_debug("Hypertas detected, assuming LPAR !\n");
2067 return PLATFORM_PSERIES_LPAR;
2068 }
2069 return PLATFORM_PSERIES;
2070#else
2071 return PLATFORM_GENERIC;
2072#endif
2073}
2074
2075static int __init prom_set_color(ihandle ih, int i, int r, int g, int b)
2076{
2077 return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
2078}
2079
2080/*
2081 * If we have a display that we don't know how to drive,
2082 * we will want to try to execute OF's open method for it
2083 * later. However, OF will probably fall over if we do that
2084 * we've taken over the MMU.
2085 * So we check whether we will need to open the display,
2086 * and if so, open it now.
2087 */
2088static void __init prom_check_displays(void)
2089{
2090 char type[16], *path;
2091 phandle node;
2092 ihandle ih;
2093 int i;
2094
2095 static unsigned char default_colors[] = {
2096 0x00, 0x00, 0x00,
2097 0x00, 0x00, 0xaa,
2098 0x00, 0xaa, 0x00,
2099 0x00, 0xaa, 0xaa,
2100 0xaa, 0x00, 0x00,
2101 0xaa, 0x00, 0xaa,
2102 0xaa, 0xaa, 0x00,
2103 0xaa, 0xaa, 0xaa,
2104 0x55, 0x55, 0x55,
2105 0x55, 0x55, 0xff,
2106 0x55, 0xff, 0x55,
2107 0x55, 0xff, 0xff,
2108 0xff, 0x55, 0x55,
2109 0xff, 0x55, 0xff,
2110 0xff, 0xff, 0x55,
2111 0xff, 0xff, 0xff
2112 };
2113 const unsigned char *clut;
2114
2115 prom_debug("Looking for displays\n");
2116 for (node = 0; prom_next_node(&node); ) {
2117 memset(type, 0, sizeof(type));
2118 prom_getprop(node, "device_type", type, sizeof(type));
2119 if (strcmp(type, "display") != 0)
2120 continue;
2121
2122 /* It seems OF doesn't null-terminate the path :-( */
2123 path = prom_scratch;
2124 memset(path, 0, PROM_SCRATCH_SIZE);
2125
2126 /*
2127 * leave some room at the end of the path for appending extra
2128 * arguments
2129 */
2130 if (call_prom("package-to-path", 3, 1, node, path,
2131 PROM_SCRATCH_SIZE-10) == PROM_ERROR)
2132 continue;
2133 prom_printf("found display : %s, opening... ", path);
2134
2135 ih = call_prom("open", 1, 1, path);
2136 if (ih == 0) {
2137 prom_printf("failed\n");
2138 continue;
2139 }
2140
2141 /* Success */
2142 prom_printf("done\n");
2143 prom_setprop(node, path, "linux,opened", NULL, 0);
2144
2145 /* Setup a usable color table when the appropriate
2146 * method is available. Should update this to set-colors */
2147 clut = default_colors;
2148 for (i = 0; i < 16; i++, clut += 3)
2149 if (prom_set_color(ih, i, clut[0], clut[1],
2150 clut[2]) != 0)
2151 break;
2152
2153#ifdef CONFIG_LOGO_LINUX_CLUT224
2154 clut = PTRRELOC(logo_linux_clut224.clut);
2155 for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3)
2156 if (prom_set_color(ih, i + 32, clut[0], clut[1],
2157 clut[2]) != 0)
2158 break;
2159#endif /* CONFIG_LOGO_LINUX_CLUT224 */
2160
2161#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2162 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2163 PROM_ERROR) {
2164 u32 width, height, pitch, addr;
2165
2166 prom_printf("Setting btext !\n");
2167 prom_getprop(node, "width", &width, 4);
2168 prom_getprop(node, "height", &height, 4);
2169 prom_getprop(node, "linebytes", &pitch, 4);
2170 prom_getprop(node, "address", &addr, 4);
2171 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2172 width, height, pitch, addr);
2173 btext_setup_display(width, height, 8, pitch, addr);
2174 }
2175#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2176 }
2177}
2178
2179
2180/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
2181static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2182 unsigned long needed, unsigned long align)
2183{
2184 void *ret;
2185
2186 *mem_start = _ALIGN(*mem_start, align);
2187 while ((*mem_start + needed) > *mem_end) {
2188 unsigned long room, chunk;
2189
2190 prom_debug("Chunk exhausted, claiming more at %x...\n",
2191 alloc_bottom);
2192 room = alloc_top - alloc_bottom;
2193 if (room > DEVTREE_CHUNK_SIZE)
2194 room = DEVTREE_CHUNK_SIZE;
2195 if (room < PAGE_SIZE)
2196 prom_panic("No memory for flatten_device_tree "
2197 "(no room)\n");
2198 chunk = alloc_up(room, 0);
2199 if (chunk == 0)
2200 prom_panic("No memory for flatten_device_tree "
2201 "(claim failed)\n");
2202 *mem_end = chunk + room;
2203 }
2204
2205 ret = (void *)*mem_start;
2206 *mem_start += needed;
2207
2208 return ret;
2209}
2210
2211#define dt_push_token(token, mem_start, mem_end) do { \
2212 void *room = make_room(mem_start, mem_end, 4, 4); \
2213 *(__be32 *)room = cpu_to_be32(token); \
2214 } while(0)
2215
2216static unsigned long __init dt_find_string(char *str)
2217{
2218 char *s, *os;
2219
2220 s = os = (char *)dt_string_start;
2221 s += 4;
2222 while (s < (char *)dt_string_end) {
2223 if (strcmp(s, str) == 0)
2224 return s - os;
2225 s += strlen(s) + 1;
2226 }
2227 return 0;
2228}
2229
2230/*
2231 * The Open Firmware 1275 specification states properties must be 31 bytes or
2232 * less, however not all firmwares obey this. Make it 64 bytes to be safe.
2233 */
2234#define MAX_PROPERTY_NAME 64
2235
2236static void __init scan_dt_build_strings(phandle node,
2237 unsigned long *mem_start,
2238 unsigned long *mem_end)
2239{
2240 char *prev_name, *namep, *sstart;
2241 unsigned long soff;
2242 phandle child;
2243
2244 sstart = (char *)dt_string_start;
2245
2246 /* get and store all property names */
2247 prev_name = "";
2248 for (;;) {
2249 /* 64 is max len of name including nul. */
2250 namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1);
2251 if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) {
2252 /* No more nodes: unwind alloc */
2253 *mem_start = (unsigned long)namep;
2254 break;
2255 }
2256
2257 /* skip "name" */
2258 if (strcmp(namep, "name") == 0) {
2259 *mem_start = (unsigned long)namep;
2260 prev_name = "name";
2261 continue;
2262 }
2263 /* get/create string entry */
2264 soff = dt_find_string(namep);
2265 if (soff != 0) {
2266 *mem_start = (unsigned long)namep;
2267 namep = sstart + soff;
2268 } else {
2269 /* Trim off some if we can */
2270 *mem_start = (unsigned long)namep + strlen(namep) + 1;
2271 dt_string_end = *mem_start;
2272 }
2273 prev_name = namep;
2274 }
2275
2276 /* do all our children */
2277 child = call_prom("child", 1, 1, node);
2278 while (child != 0) {
2279 scan_dt_build_strings(child, mem_start, mem_end);
2280 child = call_prom("peer", 1, 1, child);
2281 }
2282}
2283
2284static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2285 unsigned long *mem_end)
2286{
2287 phandle child;
2288 char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
2289 unsigned long soff;
2290 unsigned char *valp;
2291 static char pname[MAX_PROPERTY_NAME];
2292 int l, room, has_phandle = 0;
2293
2294 dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
2295
2296 /* get the node's full name */
2297 namep = (char *)*mem_start;
2298 room = *mem_end - *mem_start;
2299 if (room > 255)
2300 room = 255;
2301 l = call_prom("package-to-path", 3, 1, node, namep, room);
2302 if (l >= 0) {
2303 /* Didn't fit? Get more room. */
2304 if (l >= room) {
2305 if (l >= *mem_end - *mem_start)
2306 namep = make_room(mem_start, mem_end, l+1, 1);
2307 call_prom("package-to-path", 3, 1, node, namep, l);
2308 }
2309 namep[l] = '\0';
2310
2311 /* Fixup an Apple bug where they have bogus \0 chars in the
2312 * middle of the path in some properties, and extract
2313 * the unit name (everything after the last '/').
2314 */
2315 for (lp = p = namep, ep = namep + l; p < ep; p++) {
2316 if (*p == '/')
2317 lp = namep;
2318 else if (*p != 0)
2319 *lp++ = *p;
2320 }
2321 *lp = 0;
2322 *mem_start = _ALIGN((unsigned long)lp + 1, 4);
2323 }
2324
2325 /* get it again for debugging */
2326 path = prom_scratch;
2327 memset(path, 0, PROM_SCRATCH_SIZE);
2328 call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-1);
2329
2330 /* get and store all properties */
2331 prev_name = "";
2332 sstart = (char *)dt_string_start;
2333 for (;;) {
2334 if (call_prom("nextprop", 3, 1, node, prev_name,
2335 pname) != 1)
2336 break;
2337
2338 /* skip "name" */
2339 if (strcmp(pname, "name") == 0) {
2340 prev_name = "name";
2341 continue;
2342 }
2343
2344 /* find string offset */
2345 soff = dt_find_string(pname);
2346 if (soff == 0) {
2347 prom_printf("WARNING: Can't find string index for"
2348 " <%s>, node %s\n", pname, path);
2349 break;
2350 }
2351 prev_name = sstart + soff;
2352
2353 /* get length */
2354 l = call_prom("getproplen", 2, 1, node, pname);
2355
2356 /* sanity checks */
2357 if (l == PROM_ERROR)
2358 continue;
2359
2360 /* push property head */
2361 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2362 dt_push_token(l, mem_start, mem_end);
2363 dt_push_token(soff, mem_start, mem_end);
2364
2365 /* push property content */
2366 valp = make_room(mem_start, mem_end, l, 4);
2367 call_prom("getprop", 4, 1, node, pname, valp, l);
2368 *mem_start = _ALIGN(*mem_start, 4);
2369
2370 if (!strcmp(pname, "phandle"))
2371 has_phandle = 1;
2372 }
2373
2374 /* Add a "linux,phandle" property if no "phandle" property already
2375 * existed (can happen with OPAL)
2376 */
2377 if (!has_phandle) {
2378 soff = dt_find_string("linux,phandle");
2379 if (soff == 0)
2380 prom_printf("WARNING: Can't find string index for"
2381 " <linux-phandle> node %s\n", path);
2382 else {
2383 dt_push_token(OF_DT_PROP, mem_start, mem_end);
2384 dt_push_token(4, mem_start, mem_end);
2385 dt_push_token(soff, mem_start, mem_end);
2386 valp = make_room(mem_start, mem_end, 4, 4);
2387 *(__be32 *)valp = cpu_to_be32(node);
2388 }
2389 }
2390
2391 /* do all our children */
2392 child = call_prom("child", 1, 1, node);
2393 while (child != 0) {
2394 scan_dt_build_struct(child, mem_start, mem_end);
2395 child = call_prom("peer", 1, 1, child);
2396 }
2397
2398 dt_push_token(OF_DT_END_NODE, mem_start, mem_end);
2399}
2400
2401static void __init flatten_device_tree(void)
2402{
2403 phandle root;
2404 unsigned long mem_start, mem_end, room;
2405 struct boot_param_header *hdr;
2406 char *namep;
2407 u64 *rsvmap;
2408
2409 /*
2410 * Check how much room we have between alloc top & bottom (+/- a
2411 * few pages), crop to 1MB, as this is our "chunk" size
2412 */
2413 room = alloc_top - alloc_bottom - 0x4000;
2414 if (room > DEVTREE_CHUNK_SIZE)
2415 room = DEVTREE_CHUNK_SIZE;
2416 prom_debug("starting device tree allocs at %x\n", alloc_bottom);
2417
2418 /* Now try to claim that */
2419 mem_start = (unsigned long)alloc_up(room, PAGE_SIZE);
2420 if (mem_start == 0)
2421 prom_panic("Can't allocate initial device-tree chunk\n");
2422 mem_end = mem_start + room;
2423
2424 /* Get root of tree */
2425 root = call_prom("peer", 1, 1, (phandle)0);
2426 if (root == (phandle)0)
2427 prom_panic ("couldn't get device tree root\n");
2428
2429 /* Build header and make room for mem rsv map */
2430 mem_start = _ALIGN(mem_start, 4);
2431 hdr = make_room(&mem_start, &mem_end,
2432 sizeof(struct boot_param_header), 4);
2433 dt_header_start = (unsigned long)hdr;
2434 rsvmap = make_room(&mem_start, &mem_end, sizeof(mem_reserve_map), 8);
2435
2436 /* Start of strings */
2437 mem_start = PAGE_ALIGN(mem_start);
2438 dt_string_start = mem_start;
2439 mem_start += 4; /* hole */
2440
2441 /* Add "linux,phandle" in there, we'll need it */
2442 namep = make_room(&mem_start, &mem_end, 16, 1);
2443 strcpy(namep, "linux,phandle");
2444 mem_start = (unsigned long)namep + strlen(namep) + 1;
2445
2446 /* Build string array */
2447 prom_printf("Building dt strings...\n");
2448 scan_dt_build_strings(root, &mem_start, &mem_end);
2449 dt_string_end = mem_start;
2450
2451 /* Build structure */
2452 mem_start = PAGE_ALIGN(mem_start);
2453 dt_struct_start = mem_start;
2454 prom_printf("Building dt structure...\n");
2455 scan_dt_build_struct(root, &mem_start, &mem_end);
2456 dt_push_token(OF_DT_END, &mem_start, &mem_end);
2457 dt_struct_end = PAGE_ALIGN(mem_start);
2458
2459 /* Finish header */
2460 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2461 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2462 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2463 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2464 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2465 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2466 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2467 hdr->version = cpu_to_be32(OF_DT_VERSION);
2468 /* Version 16 is not backward compatible */
2469 hdr->last_comp_version = cpu_to_be32(0x10);
2470
2471 /* Copy the reserve map in */
2472 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
2473
2474#ifdef DEBUG_PROM
2475 {
2476 int i;
2477 prom_printf("reserved memory map:\n");
2478 for (i = 0; i < mem_reserve_cnt; i++)
2479 prom_printf(" %x - %x\n",
2480 be64_to_cpu(mem_reserve_map[i].base),
2481 be64_to_cpu(mem_reserve_map[i].size));
2482 }
2483#endif
2484 /* Bump mem_reserve_cnt to cause further reservations to fail
2485 * since it's too late.
2486 */
2487 mem_reserve_cnt = MEM_RESERVE_MAP_SIZE;
2488
2489 prom_printf("Device tree strings 0x%x -> 0x%x\n",
2490 dt_string_start, dt_string_end);
2491 prom_printf("Device tree struct 0x%x -> 0x%x\n",
2492 dt_struct_start, dt_struct_end);
2493}
2494
2495#ifdef CONFIG_PPC_MAPLE
2496/* PIBS Version 1.05.0000 04/26/2005 has an incorrect /ht/isa/ranges property.
2497 * The values are bad, and it doesn't even have the right number of cells. */
2498static void __init fixup_device_tree_maple(void)
2499{
2500 phandle isa;
2501 u32 rloc = 0x01002000; /* IO space; PCI device = 4 */
2502 u32 isa_ranges[6];
2503 char *name;
2504
2505 name = "/ht@0/isa@4";
2506 isa = call_prom("finddevice", 1, 1, ADDR(name));
2507 if (!PHANDLE_VALID(isa)) {
2508 name = "/ht@0/isa@6";
2509 isa = call_prom("finddevice", 1, 1, ADDR(name));
2510 rloc = 0x01003000; /* IO space; PCI device = 6 */
2511 }
2512 if (!PHANDLE_VALID(isa))
2513 return;
2514
2515 if (prom_getproplen(isa, "ranges") != 12)
2516 return;
2517 if (prom_getprop(isa, "ranges", isa_ranges, sizeof(isa_ranges))
2518 == PROM_ERROR)
2519 return;
2520
2521 if (isa_ranges[0] != 0x1 ||
2522 isa_ranges[1] != 0xf4000000 ||
2523 isa_ranges[2] != 0x00010000)
2524 return;
2525
2526 prom_printf("Fixing up bogus ISA range on Maple/Apache...\n");
2527
2528 isa_ranges[0] = 0x1;
2529 isa_ranges[1] = 0x0;
2530 isa_ranges[2] = rloc;
2531 isa_ranges[3] = 0x0;
2532 isa_ranges[4] = 0x0;
2533 isa_ranges[5] = 0x00010000;
2534 prom_setprop(isa, name, "ranges",
2535 isa_ranges, sizeof(isa_ranges));
2536}
2537
2538#define CPC925_MC_START 0xf8000000
2539#define CPC925_MC_LENGTH 0x1000000
2540/* The values for memory-controller don't have right number of cells */
2541static void __init fixup_device_tree_maple_memory_controller(void)
2542{
2543 phandle mc;
2544 u32 mc_reg[4];
2545 char *name = "/hostbridge@f8000000";
2546 u32 ac, sc;
2547
2548 mc = call_prom("finddevice", 1, 1, ADDR(name));
2549 if (!PHANDLE_VALID(mc))
2550 return;
2551
2552 if (prom_getproplen(mc, "reg") != 8)
2553 return;
2554
2555 prom_getprop(prom.root, "#address-cells", &ac, sizeof(ac));
2556 prom_getprop(prom.root, "#size-cells", &sc, sizeof(sc));
2557 if ((ac != 2) || (sc != 2))
2558 return;
2559
2560 if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
2561 return;
2562
2563 if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
2564 return;
2565
2566 prom_printf("Fixing up bogus hostbridge on Maple...\n");
2567
2568 mc_reg[0] = 0x0;
2569 mc_reg[1] = CPC925_MC_START;
2570 mc_reg[2] = 0x0;
2571 mc_reg[3] = CPC925_MC_LENGTH;
2572 prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
2573}
2574#else
2575#define fixup_device_tree_maple()
2576#define fixup_device_tree_maple_memory_controller()
2577#endif
2578
2579#ifdef CONFIG_PPC_CHRP
2580/*
2581 * Pegasos and BriQ lacks the "ranges" property in the isa node
2582 * Pegasos needs decimal IRQ 14/15, not hexadecimal
2583 * Pegasos has the IDE configured in legacy mode, but advertised as native
2584 */
2585static void __init fixup_device_tree_chrp(void)
2586{
2587 phandle ph;
2588 u32 prop[6];
2589 u32 rloc = 0x01006000; /* IO space; PCI device = 12 */
2590 char *name;
2591 int rc;
2592
2593 name = "/pci@80000000/isa@c";
2594 ph = call_prom("finddevice", 1, 1, ADDR(name));
2595 if (!PHANDLE_VALID(ph)) {
2596 name = "/pci@ff500000/isa@6";
2597 ph = call_prom("finddevice", 1, 1, ADDR(name));
2598 rloc = 0x01003000; /* IO space; PCI device = 6 */
2599 }
2600 if (PHANDLE_VALID(ph)) {
2601 rc = prom_getproplen(ph, "ranges");
2602 if (rc == 0 || rc == PROM_ERROR) {
2603 prom_printf("Fixing up missing ISA range on Pegasos...\n");
2604
2605 prop[0] = 0x1;
2606 prop[1] = 0x0;
2607 prop[2] = rloc;
2608 prop[3] = 0x0;
2609 prop[4] = 0x0;
2610 prop[5] = 0x00010000;
2611 prom_setprop(ph, name, "ranges", prop, sizeof(prop));
2612 }
2613 }
2614
2615 name = "/pci@80000000/ide@C,1";
2616 ph = call_prom("finddevice", 1, 1, ADDR(name));
2617 if (PHANDLE_VALID(ph)) {
2618 prom_printf("Fixing up IDE interrupt on Pegasos...\n");
2619 prop[0] = 14;
2620 prop[1] = 0x0;
2621 prom_setprop(ph, name, "interrupts", prop, 2*sizeof(u32));
2622 prom_printf("Fixing up IDE class-code on Pegasos...\n");
2623 rc = prom_getprop(ph, "class-code", prop, sizeof(u32));
2624 if (rc == sizeof(u32)) {
2625 prop[0] &= ~0x5;
2626 prom_setprop(ph, name, "class-code", prop, sizeof(u32));
2627 }
2628 }
2629}
2630#else
2631#define fixup_device_tree_chrp()
2632#endif
2633
2634#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
2635static void __init fixup_device_tree_pmac(void)
2636{
2637 phandle u3, i2c, mpic;
2638 u32 u3_rev;
2639 u32 interrupts[2];
2640 u32 parent;
2641
2642 /* Some G5s have a missing interrupt definition, fix it up here */
2643 u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000"));
2644 if (!PHANDLE_VALID(u3))
2645 return;
2646 i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000"));
2647 if (!PHANDLE_VALID(i2c))
2648 return;
2649 mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000"));
2650 if (!PHANDLE_VALID(mpic))
2651 return;
2652
2653 /* check if proper rev of u3 */
2654 if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
2655 == PROM_ERROR)
2656 return;
2657 if (u3_rev < 0x35 || u3_rev > 0x39)
2658 return;
2659 /* does it need fixup ? */
2660 if (prom_getproplen(i2c, "interrupts") > 0)
2661 return;
2662
2663 prom_printf("fixing up bogus interrupts for u3 i2c...\n");
2664
2665 /* interrupt on this revision of u3 is number 0 and level */
2666 interrupts[0] = 0;
2667 interrupts[1] = 1;
2668 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
2669 &interrupts, sizeof(interrupts));
2670 parent = (u32)mpic;
2671 prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
2672 &parent, sizeof(parent));
2673}
2674#else
2675#define fixup_device_tree_pmac()
2676#endif
2677
2678#ifdef CONFIG_PPC_EFIKA
2679/*
2680 * The MPC5200 FEC driver requires an phy-handle property to tell it how
2681 * to talk to the phy. If the phy-handle property is missing, then this
2682 * function is called to add the appropriate nodes and link it to the
2683 * ethernet node.
2684 */
2685static void __init fixup_device_tree_efika_add_phy(void)
2686{
2687 u32 node;
2688 char prop[64];
2689 int rv;
2690
2691 /* Check if /builtin/ethernet exists - bail if it doesn't */
2692 node = call_prom("finddevice", 1, 1, ADDR("/builtin/ethernet"));
2693 if (!PHANDLE_VALID(node))
2694 return;
2695
2696 /* Check if the phy-handle property exists - bail if it does */
2697 rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
2698 if (!rv)
2699 return;
2700
2701 /*
2702 * At this point the ethernet device doesn't have a phy described.
2703 * Now we need to add the missing phy node and linkage
2704 */
2705
2706 /* Check for an MDIO bus node - if missing then create one */
2707 node = call_prom("finddevice", 1, 1, ADDR("/builtin/mdio"));
2708 if (!PHANDLE_VALID(node)) {
2709 prom_printf("Adding Ethernet MDIO node\n");
2710 call_prom("interpret", 1, 1,
2711 " s\" /builtin\" find-device"
2712 " new-device"
2713 " 1 encode-int s\" #address-cells\" property"
2714 " 0 encode-int s\" #size-cells\" property"
2715 " s\" mdio\" device-name"
2716 " s\" fsl,mpc5200b-mdio\" encode-string"
2717 " s\" compatible\" property"
2718 " 0xf0003000 0x400 reg"
2719 " 0x2 encode-int"
2720 " 0x5 encode-int encode+"
2721 " 0x3 encode-int encode+"
2722 " s\" interrupts\" property"
2723 " finish-device");
2724 };
2725
2726 /* Check for a PHY device node - if missing then create one and
2727 * give it's phandle to the ethernet node */
2728 node = call_prom("finddevice", 1, 1,
2729 ADDR("/builtin/mdio/ethernet-phy"));
2730 if (!PHANDLE_VALID(node)) {
2731 prom_printf("Adding Ethernet PHY node\n");
2732 call_prom("interpret", 1, 1,
2733 " s\" /builtin/mdio\" find-device"
2734 " new-device"
2735 " s\" ethernet-phy\" device-name"
2736 " 0x10 encode-int s\" reg\" property"
2737 " my-self"
2738 " ihandle>phandle"
2739 " finish-device"
2740 " s\" /builtin/ethernet\" find-device"
2741 " encode-int"
2742 " s\" phy-handle\" property"
2743 " device-end");
2744 }
2745}
2746
2747static void __init fixup_device_tree_efika(void)
2748{
2749 int sound_irq[3] = { 2, 2, 0 };
2750 int bcomm_irq[3*16] = { 3,0,0, 3,1,0, 3,2,0, 3,3,0,
2751 3,4,0, 3,5,0, 3,6,0, 3,7,0,
2752 3,8,0, 3,9,0, 3,10,0, 3,11,0,
2753 3,12,0, 3,13,0, 3,14,0, 3,15,0 };
2754 u32 node;
2755 char prop[64];
2756 int rv, len;
2757
2758 /* Check if we're really running on a EFIKA */
2759 node = call_prom("finddevice", 1, 1, ADDR("/"));
2760 if (!PHANDLE_VALID(node))
2761 return;
2762
2763 rv = prom_getprop(node, "model", prop, sizeof(prop));
2764 if (rv == PROM_ERROR)
2765 return;
2766 if (strcmp(prop, "EFIKA5K2"))
2767 return;
2768
2769 prom_printf("Applying EFIKA device tree fixups\n");
2770
2771 /* Claiming to be 'chrp' is death */
2772 node = call_prom("finddevice", 1, 1, ADDR("/"));
2773 rv = prom_getprop(node, "device_type", prop, sizeof(prop));
2774 if (rv != PROM_ERROR && (strcmp(prop, "chrp") == 0))
2775 prom_setprop(node, "/", "device_type", "efika", sizeof("efika"));
2776
2777 /* CODEGEN,description is exposed in /proc/cpuinfo so
2778 fix that too */
2779 rv = prom_getprop(node, "CODEGEN,description", prop, sizeof(prop));
2780 if (rv != PROM_ERROR && (strstr(prop, "CHRP")))
2781 prom_setprop(node, "/", "CODEGEN,description",
2782 "Efika 5200B PowerPC System",
2783 sizeof("Efika 5200B PowerPC System"));
2784
2785 /* Fixup bestcomm interrupts property */
2786 node = call_prom("finddevice", 1, 1, ADDR("/builtin/bestcomm"));
2787 if (PHANDLE_VALID(node)) {
2788 len = prom_getproplen(node, "interrupts");
2789 if (len == 12) {
2790 prom_printf("Fixing bestcomm interrupts property\n");
2791 prom_setprop(node, "/builtin/bestcom", "interrupts",
2792 bcomm_irq, sizeof(bcomm_irq));
2793 }
2794 }
2795
2796 /* Fixup sound interrupts property */
2797 node = call_prom("finddevice", 1, 1, ADDR("/builtin/sound"));
2798 if (PHANDLE_VALID(node)) {
2799 rv = prom_getprop(node, "interrupts", prop, sizeof(prop));
2800 if (rv == PROM_ERROR) {
2801 prom_printf("Adding sound interrupts property\n");
2802 prom_setprop(node, "/builtin/sound", "interrupts",
2803 sound_irq, sizeof(sound_irq));
2804 }
2805 }
2806
2807 /* Make sure ethernet phy-handle property exists */
2808 fixup_device_tree_efika_add_phy();
2809}
2810#else
2811#define fixup_device_tree_efika()
2812#endif
2813
2814static void __init fixup_device_tree(void)
2815{
2816 fixup_device_tree_maple();
2817 fixup_device_tree_maple_memory_controller();
2818 fixup_device_tree_chrp();
2819 fixup_device_tree_pmac();
2820 fixup_device_tree_efika();
2821}
2822
2823static void __init prom_find_boot_cpu(void)
2824{
2825 __be32 rval;
2826 ihandle prom_cpu;
2827 phandle cpu_pkg;
2828
2829 rval = 0;
2830 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2831 return;
2832 prom_cpu = be32_to_cpu(rval);
2833
2834 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2835
2836 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2837 prom.cpu = be32_to_cpu(rval);
2838
2839 prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
2840}
2841
2842static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2843{
2844#ifdef CONFIG_BLK_DEV_INITRD
2845 if (r3 && r4 && r4 != 0xdeadbeef) {
2846 __be64 val;
2847
2848 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2849 prom_initrd_end = prom_initrd_start + r4;
2850
2851 val = cpu_to_be64(prom_initrd_start);
2852 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2853 &val, sizeof(val));
2854 val = cpu_to_be64(prom_initrd_end);
2855 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2856 &val, sizeof(val));
2857
2858 reserve_mem(prom_initrd_start,
2859 prom_initrd_end - prom_initrd_start);
2860
2861 prom_debug("initrd_start=0x%x\n", prom_initrd_start);
2862 prom_debug("initrd_end=0x%x\n", prom_initrd_end);
2863 }
2864#endif /* CONFIG_BLK_DEV_INITRD */
2865}
2866
2867#ifdef CONFIG_PPC64
2868#ifdef CONFIG_RELOCATABLE
2869static void reloc_toc(void)
2870{
2871}
2872
2873static void unreloc_toc(void)
2874{
2875}
2876#else
2877static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
2878{
2879 unsigned long i;
2880 unsigned long *toc_entry;
2881
2882 /* Get the start of the TOC by using r2 directly. */
2883 asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
2884
2885 for (i = 0; i < nr_entries; i++) {
2886 *toc_entry = *toc_entry + offset;
2887 toc_entry++;
2888 }
2889}
2890
2891static void reloc_toc(void)
2892{
2893 unsigned long offset = reloc_offset();
2894 unsigned long nr_entries =
2895 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2896
2897 __reloc_toc(offset, nr_entries);
2898
2899 mb();
2900}
2901
2902static void unreloc_toc(void)
2903{
2904 unsigned long offset = reloc_offset();
2905 unsigned long nr_entries =
2906 (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
2907
2908 mb();
2909
2910 __reloc_toc(-offset, nr_entries);
2911}
2912#endif
2913#endif
2914
2915/*
2916 * We enter here early on, when the Open Firmware prom is still
2917 * handling exceptions and the MMU hash table for us.
2918 */
2919
2920unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2921 unsigned long pp,
2922 unsigned long r6, unsigned long r7,
2923 unsigned long kbase)
2924{
2925 unsigned long hdr;
2926
2927#ifdef CONFIG_PPC32
2928 unsigned long offset = reloc_offset();
2929 reloc_got2(offset);
2930#else
2931 reloc_toc();
2932#endif
2933
2934 /*
2935 * First zero the BSS
2936 */
2937 memset(&__bss_start, 0, __bss_stop - __bss_start);
2938
2939 /*
2940 * Init interface to Open Firmware, get some node references,
2941 * like /chosen
2942 */
2943 prom_init_client_services(pp);
2944
2945 /*
2946 * See if this OF is old enough that we need to do explicit maps
2947 * and other workarounds
2948 */
2949 prom_find_mmu();
2950
2951 /*
2952 * Init prom stdout device
2953 */
2954 prom_init_stdout();
2955
2956 prom_printf("Preparing to boot %s", linux_banner);
2957
2958 /*
2959 * Get default machine type. At this point, we do not differentiate
2960 * between pSeries SMP and pSeries LPAR
2961 */
2962 of_platform = prom_find_machine_type();
2963 prom_printf("Detected machine type: %x\n", of_platform);
2964
2965#ifndef CONFIG_NONSTATIC_KERNEL
2966 /* Bail if this is a kdump kernel. */
2967 if (PHYSICAL_START > 0)
2968 prom_panic("Error: You can't boot a kdump kernel from OF!\n");
2969#endif
2970
2971 /*
2972 * Check for an initrd
2973 */
2974 prom_check_initrd(r3, r4);
2975
2976#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
2977 /*
2978 * On pSeries, inform the firmware about our capabilities
2979 */
2980 if (of_platform == PLATFORM_PSERIES ||
2981 of_platform == PLATFORM_PSERIES_LPAR)
2982 prom_send_capabilities();
2983#endif
2984
2985 /*
2986 * Copy the CPU hold code
2987 */
2988 if (of_platform != PLATFORM_POWERMAC)
2989 copy_and_flush(0, kbase, 0x100, 0);
2990
2991 /*
2992 * Do early parsing of command line
2993 */
2994 early_cmdline_parse();
2995
2996 /*
2997 * Initialize memory management within prom_init
2998 */
2999 prom_init_mem();
3000
3001 /*
3002 * Determine which cpu is actually running right _now_
3003 */
3004 prom_find_boot_cpu();
3005
3006 /*
3007 * Initialize display devices
3008 */
3009 prom_check_displays();
3010
3011#if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
3012 /*
3013 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
3014 * that uses the allocator, we need to make sure we get the top of memory
3015 * available for us here...
3016 */
3017 if (of_platform == PLATFORM_PSERIES)
3018 prom_initialize_tce_table();
3019#endif
3020
3021 /*
3022 * On non-powermacs, try to instantiate RTAS. PowerMacs don't
3023 * have a usable RTAS implementation.
3024 */
3025 if (of_platform != PLATFORM_POWERMAC &&
3026 of_platform != PLATFORM_OPAL)
3027 prom_instantiate_rtas();
3028
3029#ifdef CONFIG_PPC_POWERNV
3030#ifdef __BIG_ENDIAN__
3031 /* Detect HAL and try instanciating it & doing takeover */
3032 if (of_platform == PLATFORM_PSERIES_LPAR) {
3033 prom_query_opal();
3034 if (of_platform == PLATFORM_OPAL) {
3035 prom_opal_hold_cpus();
3036 prom_opal_takeover();
3037 }
3038 } else
3039#endif /* __BIG_ENDIAN__ */
3040 if (of_platform == PLATFORM_OPAL)
3041 prom_instantiate_opal();
3042#endif /* CONFIG_PPC_POWERNV */
3043
3044#ifdef CONFIG_PPC64
3045 /* instantiate sml */
3046 prom_instantiate_sml();
3047#endif
3048
3049 /*
3050 * On non-powermacs, put all CPUs in spin-loops.
3051 *
3052 * PowerMacs use a different mechanism to spin CPUs
3053 *
3054 * (This must be done after instanciating RTAS)
3055 */
3056 if (of_platform != PLATFORM_POWERMAC &&
3057 of_platform != PLATFORM_OPAL)
3058 prom_hold_cpus();
3059
3060 /*
3061 * Fill in some infos for use by the kernel later on
3062 */
3063 if (prom_memory_limit) {
3064 __be64 val = cpu_to_be64(prom_memory_limit);
3065 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
3066 &val, sizeof(val));
3067 }
3068#ifdef CONFIG_PPC64
3069 if (prom_iommu_off)
3070 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
3071 NULL, 0);
3072
3073 if (prom_iommu_force_on)
3074 prom_setprop(prom.chosen, "/chosen", "linux,iommu-force-on",
3075 NULL, 0);
3076
3077 if (prom_tce_alloc_start) {
3078 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-start",
3079 &prom_tce_alloc_start,
3080 sizeof(prom_tce_alloc_start));
3081 prom_setprop(prom.chosen, "/chosen", "linux,tce-alloc-end",
3082 &prom_tce_alloc_end,
3083 sizeof(prom_tce_alloc_end));
3084 }
3085#endif
3086
3087 /*
3088 * Fixup any known bugs in the device-tree
3089 */
3090 fixup_device_tree();
3091
3092 /*
3093 * Now finally create the flattened device-tree
3094 */
3095 prom_printf("copying OF device tree...\n");
3096 flatten_device_tree();
3097
3098 /*
3099 * in case stdin is USB and still active on IBM machines...
3100 * Unfortunately quiesce crashes on some powermacs if we have
3101 * closed stdin already (in particular the powerbook 101). It
3102 * appears that the OPAL version of OFW doesn't like it either.
3103 */
3104 if (of_platform != PLATFORM_POWERMAC &&
3105 of_platform != PLATFORM_OPAL)
3106 prom_close_stdin();
3107
3108 /*
3109 * Call OF "quiesce" method to shut down pending DMA's from
3110 * devices etc...
3111 */
3112 prom_printf("Calling quiesce...\n");
3113 call_prom("quiesce", 0, 0);
3114
3115 /*
3116 * And finally, call the kernel passing it the flattened device
3117 * tree and NULL as r5, thus triggering the new entry point which
3118 * is common to us and kexec
3119 */
3120 hdr = dt_header_start;
3121
3122 /* Don't print anything after quiesce under OPAL, it crashes OFW */
3123 if (of_platform != PLATFORM_OPAL) {
3124 prom_printf("returning from prom_init\n");
3125 prom_debug("->dt_header_start=0x%x\n", hdr);
3126 }
3127
3128#ifdef CONFIG_PPC32
3129 reloc_got2(-offset);
3130#else
3131 unreloc_toc();
3132#endif
3133
3134#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
3135 /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
3136 __start(hdr, kbase, 0, 0, 0,
3137 prom_opal_base, prom_opal_entry);
3138#else
3139 __start(hdr, kbase, 0, 0, 0, 0, 0);
3140#endif
3141
3142 return 0;
3143}