Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2000 by Silicon Graphics, Inc.
8 * Copyright (C) 2004 by Christoph Hellwig
9 *
10 * On SGI IP27 the ARC memory configuration data is completely bogus but
11 * alternate easier to use mechanisms are available.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/memblock.h>
16#include <linux/mm.h>
17#include <linux/mmzone.h>
18#include <linux/export.h>
19#include <linux/nodemask.h>
20#include <linux/swap.h>
21#include <linux/pfn.h>
22#include <linux/highmem.h>
23#include <asm/page.h>
24#include <asm/pgalloc.h>
25#include <asm/sections.h>
26
27#include <asm/sn/arch.h>
28#include <asm/sn/agent.h>
29#include <asm/sn/klconfig.h>
30
31#include "ip27-common.h"
32
33#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
34#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
35
36struct node_data *__node_data[MAX_NUMNODES];
37
38EXPORT_SYMBOL(__node_data);
39
40static u64 gen_region_mask(void)
41{
42 int region_shift;
43 u64 region_mask;
44 nasid_t nasid;
45
46 region_shift = get_region_shift();
47 region_mask = 0;
48 for_each_online_node(nasid)
49 region_mask |= BIT_ULL(nasid >> region_shift);
50
51 return region_mask;
52}
53
54#define rou_rflag rou_flags
55
56static int router_distance;
57
58static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
59{
60 klrou_t *router;
61 lboard_t *brd;
62 int port;
63
64 if (router_a->rou_rflag == 1)
65 return;
66
67 if (depth >= router_distance)
68 return;
69
70 router_a->rou_rflag = 1;
71
72 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
73 if (router_a->rou_port[port].port_nasid == INVALID_NASID)
74 continue;
75
76 brd = (lboard_t *)NODE_OFFSET_TO_K0(
77 router_a->rou_port[port].port_nasid,
78 router_a->rou_port[port].port_offset);
79
80 if (brd->brd_type == KLTYPE_ROUTER) {
81 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
82 if (router == router_b) {
83 if (depth < router_distance)
84 router_distance = depth;
85 }
86 else
87 router_recurse(router, router_b, depth + 1);
88 }
89 }
90
91 router_a->rou_rflag = 0;
92}
93
94unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
95EXPORT_SYMBOL(__node_distances);
96
97static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
98{
99 klrou_t *router, *router_a = NULL, *router_b = NULL;
100 lboard_t *brd, *dest_brd;
101 nasid_t nasid;
102 int port;
103
104 /* Figure out which routers nodes in question are connected to */
105 for_each_online_node(nasid) {
106 brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
107 KLTYPE_ROUTER);
108
109 if (!brd)
110 continue;
111
112 do {
113 if (brd->brd_flags & DUPLICATE_BOARD)
114 continue;
115
116 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
117 router->rou_rflag = 0;
118
119 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
120 if (router->rou_port[port].port_nasid == INVALID_NASID)
121 continue;
122
123 dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
124 router->rou_port[port].port_nasid,
125 router->rou_port[port].port_offset);
126
127 if (dest_brd->brd_type == KLTYPE_IP27) {
128 if (dest_brd->brd_nasid == nasid_a)
129 router_a = router;
130 if (dest_brd->brd_nasid == nasid_b)
131 router_b = router;
132 }
133 }
134
135 } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
136 }
137
138 if (nasid_a == nasid_b)
139 return LOCAL_DISTANCE;
140
141 if (router_a == router_b)
142 return LOCAL_DISTANCE + 1;
143
144 if (router_a == NULL) {
145 pr_info("node_distance: router_a NULL\n");
146 return 255;
147 }
148 if (router_b == NULL) {
149 pr_info("node_distance: router_b NULL\n");
150 return 255;
151 }
152
153 router_distance = 100;
154 router_recurse(router_a, router_b, 2);
155
156 return LOCAL_DISTANCE + router_distance;
157}
158
159static void __init init_topology_matrix(void)
160{
161 nasid_t row, col;
162
163 for (row = 0; row < MAX_NUMNODES; row++)
164 for (col = 0; col < MAX_NUMNODES; col++)
165 __node_distances[row][col] = -1;
166
167 for_each_online_node(row) {
168 for_each_online_node(col) {
169 __node_distances[row][col] =
170 compute_node_distance(row, col);
171 }
172 }
173}
174
175static void __init dump_topology(void)
176{
177 nasid_t nasid;
178 lboard_t *brd, *dest_brd;
179 int port;
180 int router_num = 0;
181 klrou_t *router;
182 nasid_t row, col;
183
184 pr_info("************** Topology ********************\n");
185
186 pr_info(" ");
187 for_each_online_node(col)
188 pr_cont("%02d ", col);
189 pr_cont("\n");
190 for_each_online_node(row) {
191 pr_info("%02d ", row);
192 for_each_online_node(col)
193 pr_cont("%2d ", node_distance(row, col));
194 pr_cont("\n");
195 }
196
197 for_each_online_node(nasid) {
198 brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
199 KLTYPE_ROUTER);
200
201 if (!brd)
202 continue;
203
204 do {
205 if (brd->brd_flags & DUPLICATE_BOARD)
206 continue;
207 pr_cont("Router %d:", router_num);
208 router_num++;
209
210 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
211
212 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
213 if (router->rou_port[port].port_nasid == INVALID_NASID)
214 continue;
215
216 dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
217 router->rou_port[port].port_nasid,
218 router->rou_port[port].port_offset);
219
220 if (dest_brd->brd_type == KLTYPE_IP27)
221 pr_cont(" %d", dest_brd->brd_nasid);
222 if (dest_brd->brd_type == KLTYPE_ROUTER)
223 pr_cont(" r");
224 }
225 pr_cont("\n");
226
227 } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
228 }
229}
230
231static unsigned long __init slot_getbasepfn(nasid_t nasid, int slot)
232{
233 return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
234}
235
236static unsigned long __init slot_psize_compute(nasid_t nasid, int slot)
237{
238 lboard_t *brd;
239 klmembnk_t *banks;
240 unsigned long size;
241
242 /* Find the node board */
243 brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
244 if (!brd)
245 return 0;
246
247 /* Get the memory bank structure */
248 banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
249 if (!banks)
250 return 0;
251
252 /* Size in _Megabytes_ */
253 size = (unsigned long)banks->membnk_bnksz[slot/4];
254
255 /* hack for 128 dimm banks */
256 if (size <= 128) {
257 if (slot % 4 == 0) {
258 size <<= 20; /* size in bytes */
259 return size >> PAGE_SHIFT;
260 } else
261 return 0;
262 } else {
263 size /= 4;
264 size <<= 20;
265 return size >> PAGE_SHIFT;
266 }
267}
268
269static void __init mlreset(void)
270{
271 u64 region_mask;
272 nasid_t nasid;
273
274 master_nasid = get_nasid();
275
276 /*
277 * Probe for all CPUs - this creates the cpumask and sets up the
278 * mapping tables. We need to do this as early as possible.
279 */
280#ifdef CONFIG_SMP
281 cpu_node_probe();
282#endif
283
284 init_topology_matrix();
285 dump_topology();
286
287 region_mask = gen_region_mask();
288
289 setup_replication_mask();
290
291 /*
292 * Set all nodes' calias sizes to 8k
293 */
294 for_each_online_node(nasid) {
295 /*
296 * Always have node 0 in the region mask, otherwise
297 * CALIAS accesses get exceptions since the hub
298 * thinks it is a node 0 address.
299 */
300 REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
301 REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
302
303#ifdef LATER
304 /*
305 * Set up all hubs to have a big window pointing at
306 * widget 0. Memory mode, widget 0, offset 0
307 */
308 REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
309 ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
310 (0 << IIO_ITTE_WIDGET_SHIFT)));
311#endif
312 }
313}
314
315static void __init szmem(void)
316{
317 unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
318 int slot;
319 nasid_t node;
320
321 for_each_online_node(node) {
322 nodebytes = 0;
323 for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
324 slot_psize = slot_psize_compute(node, slot);
325 if (slot == 0)
326 slot0sz = slot_psize;
327 /*
328 * We need to refine the hack when we have replicated
329 * kernel text.
330 */
331 nodebytes += (1LL << SLOT_SHIFT);
332
333 if (!slot_psize)
334 continue;
335
336 if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
337 (slot0sz << PAGE_SHIFT)) {
338 pr_info("Ignoring slot %d onwards on node %d\n",
339 slot, node);
340 slot = MAX_MEM_SLOTS;
341 continue;
342 }
343 memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
344 PFN_PHYS(slot_psize), node,
345 MEMBLOCK_NONE);
346 }
347 }
348}
349
350static void __init node_mem_init(nasid_t node)
351{
352 unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
353 unsigned long slot_freepfn = node_getfirstfree(node);
354 unsigned long start_pfn, end_pfn;
355
356 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
357
358 /*
359 * Allocate the node data structures on the node first.
360 */
361 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
362 memset(__node_data[node], 0, PAGE_SIZE);
363
364 NODE_DATA(node)->node_start_pfn = start_pfn;
365 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
366
367 cpumask_clear(&hub_data(node)->h_cpus);
368
369 slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
370 sizeof(struct hub_data));
371
372 memblock_reserve(slot_firstpfn << PAGE_SHIFT,
373 ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT));
374}
375
376/*
377 * A node with nothing. We use it to avoid any special casing in
378 * cpumask_of_node
379 */
380static struct node_data null_node = {
381 .hub = {
382 .h_cpus = CPU_MASK_NONE
383 }
384};
385
386/*
387 * Currently, the intranode memory hole support assumes that each slot
388 * contains at least 32 MBytes of memory. We assume all bootmem data
389 * fits on the first slot.
390 */
391void __init prom_meminit(void)
392{
393 nasid_t node;
394
395 mlreset();
396 szmem();
397 max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
398
399 for (node = 0; node < MAX_NUMNODES; node++) {
400 if (node_online(node)) {
401 node_mem_init(node);
402 continue;
403 }
404 __node_data[node] = &null_node;
405 }
406}
407
408extern void setup_zero_pages(void);
409
410void __init paging_init(void)
411{
412 unsigned long zones_size[MAX_NR_ZONES] = {0, };
413
414 pagetable_init();
415 zones_size[ZONE_NORMAL] = max_low_pfn;
416 free_area_init(zones_size);
417}
418
419void __init mem_init(void)
420{
421 high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
422 memblock_free_all();
423 setup_zero_pages(); /* This comes from node 0 */
424}
425
426pg_data_t * __init arch_alloc_nodedata(int nid)
427{
428 return memblock_alloc(sizeof(pg_data_t), SMP_CACHE_BYTES);
429}
430
431void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
432{
433 __node_data[nid] = (struct node_data *)pgdat;
434}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000, 05 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2000 by Silicon Graphics, Inc.
8 * Copyright (C) 2004 by Christoph Hellwig
9 *
10 * On SGI IP27 the ARC memory configuration data is completely bogus but
11 * alternate easier to use mechanisms are available.
12 */
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/memblock.h>
16#include <linux/mm.h>
17#include <linux/mmzone.h>
18#include <linux/export.h>
19#include <linux/nodemask.h>
20#include <linux/swap.h>
21#include <linux/pfn.h>
22#include <linux/highmem.h>
23#include <asm/page.h>
24#include <asm/pgalloc.h>
25#include <asm/sections.h>
26
27#include <asm/sn/arch.h>
28#include <asm/sn/hub.h>
29#include <asm/sn/klconfig.h>
30#include <asm/sn/sn_private.h>
31
32
33#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
34#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
35
36struct node_data *__node_data[MAX_COMPACT_NODES];
37
38EXPORT_SYMBOL(__node_data);
39
40static int fine_mode;
41
42static int is_fine_dirmode(void)
43{
44 return ((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE;
45}
46
47static u64 get_region(cnodeid_t cnode)
48{
49 if (fine_mode)
50 return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT;
51 else
52 return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT;
53}
54
55static u64 region_mask;
56
57static void gen_region_mask(u64 *region_mask)
58{
59 cnodeid_t cnode;
60
61 (*region_mask) = 0;
62 for_each_online_node(cnode) {
63 (*region_mask) |= 1ULL << get_region(cnode);
64 }
65}
66
67#define rou_rflag rou_flags
68
69static int router_distance;
70
71static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
72{
73 klrou_t *router;
74 lboard_t *brd;
75 int port;
76
77 if (router_a->rou_rflag == 1)
78 return;
79
80 if (depth >= router_distance)
81 return;
82
83 router_a->rou_rflag = 1;
84
85 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
86 if (router_a->rou_port[port].port_nasid == INVALID_NASID)
87 continue;
88
89 brd = (lboard_t *)NODE_OFFSET_TO_K0(
90 router_a->rou_port[port].port_nasid,
91 router_a->rou_port[port].port_offset);
92
93 if (brd->brd_type == KLTYPE_ROUTER) {
94 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
95 if (router == router_b) {
96 if (depth < router_distance)
97 router_distance = depth;
98 }
99 else
100 router_recurse(router, router_b, depth + 1);
101 }
102 }
103
104 router_a->rou_rflag = 0;
105}
106
107unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
108EXPORT_SYMBOL(__node_distances);
109
110static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
111{
112 klrou_t *router, *router_a = NULL, *router_b = NULL;
113 lboard_t *brd, *dest_brd;
114 cnodeid_t cnode;
115 nasid_t nasid;
116 int port;
117
118 /* Figure out which routers nodes in question are connected to */
119 for_each_online_node(cnode) {
120 nasid = COMPACT_TO_NASID_NODEID(cnode);
121
122 if (nasid == -1) continue;
123
124 brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
125 KLTYPE_ROUTER);
126
127 if (!brd)
128 continue;
129
130 do {
131 if (brd->brd_flags & DUPLICATE_BOARD)
132 continue;
133
134 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
135 router->rou_rflag = 0;
136
137 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
138 if (router->rou_port[port].port_nasid == INVALID_NASID)
139 continue;
140
141 dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
142 router->rou_port[port].port_nasid,
143 router->rou_port[port].port_offset);
144
145 if (dest_brd->brd_type == KLTYPE_IP27) {
146 if (dest_brd->brd_nasid == nasid_a)
147 router_a = router;
148 if (dest_brd->brd_nasid == nasid_b)
149 router_b = router;
150 }
151 }
152
153 } while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
154 }
155
156 if (router_a == NULL) {
157 pr_info("node_distance: router_a NULL\n");
158 return -1;
159 }
160 if (router_b == NULL) {
161 pr_info("node_distance: router_b NULL\n");
162 return -1;
163 }
164
165 if (nasid_a == nasid_b)
166 return 0;
167
168 if (router_a == router_b)
169 return 1;
170
171 router_distance = 100;
172 router_recurse(router_a, router_b, 2);
173
174 return router_distance;
175}
176
177static void __init init_topology_matrix(void)
178{
179 nasid_t nasid, nasid2;
180 cnodeid_t row, col;
181
182 for (row = 0; row < MAX_COMPACT_NODES; row++)
183 for (col = 0; col < MAX_COMPACT_NODES; col++)
184 __node_distances[row][col] = -1;
185
186 for_each_online_node(row) {
187 nasid = COMPACT_TO_NASID_NODEID(row);
188 for_each_online_node(col) {
189 nasid2 = COMPACT_TO_NASID_NODEID(col);
190 __node_distances[row][col] =
191 compute_node_distance(nasid, nasid2);
192 }
193 }
194}
195
196static void __init dump_topology(void)
197{
198 nasid_t nasid;
199 cnodeid_t cnode;
200 lboard_t *brd, *dest_brd;
201 int port;
202 int router_num = 0;
203 klrou_t *router;
204 cnodeid_t row, col;
205
206 pr_info("************** Topology ********************\n");
207
208 pr_info(" ");
209 for_each_online_node(col)
210 pr_cont("%02d ", col);
211 pr_cont("\n");
212 for_each_online_node(row) {
213 pr_info("%02d ", row);
214 for_each_online_node(col)
215 pr_cont("%2d ", node_distance(row, col));
216 pr_cont("\n");
217 }
218
219 for_each_online_node(cnode) {
220 nasid = COMPACT_TO_NASID_NODEID(cnode);
221
222 if (nasid == -1) continue;
223
224 brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
225 KLTYPE_ROUTER);
226
227 if (!brd)
228 continue;
229
230 do {
231 if (brd->brd_flags & DUPLICATE_BOARD)
232 continue;
233 pr_cont("Router %d:", router_num);
234 router_num++;
235
236 router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
237
238 for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
239 if (router->rou_port[port].port_nasid == INVALID_NASID)
240 continue;
241
242 dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
243 router->rou_port[port].port_nasid,
244 router->rou_port[port].port_offset);
245
246 if (dest_brd->brd_type == KLTYPE_IP27)
247 pr_cont(" %d", dest_brd->brd_nasid);
248 if (dest_brd->brd_type == KLTYPE_ROUTER)
249 pr_cont(" r");
250 }
251 pr_cont("\n");
252
253 } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
254 }
255}
256
257static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
258{
259 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
260
261 return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
262}
263
264static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
265{
266 nasid_t nasid;
267 lboard_t *brd;
268 klmembnk_t *banks;
269 unsigned long size;
270
271 nasid = COMPACT_TO_NASID_NODEID(node);
272 /* Find the node board */
273 brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
274 if (!brd)
275 return 0;
276
277 /* Get the memory bank structure */
278 banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
279 if (!banks)
280 return 0;
281
282 /* Size in _Megabytes_ */
283 size = (unsigned long)banks->membnk_bnksz[slot/4];
284
285 /* hack for 128 dimm banks */
286 if (size <= 128) {
287 if (slot % 4 == 0) {
288 size <<= 20; /* size in bytes */
289 return size >> PAGE_SHIFT;
290 } else
291 return 0;
292 } else {
293 size /= 4;
294 size <<= 20;
295 return size >> PAGE_SHIFT;
296 }
297}
298
299static void __init mlreset(void)
300{
301 int i;
302
303 master_nasid = get_nasid();
304 fine_mode = is_fine_dirmode();
305
306 /*
307 * Probe for all CPUs - this creates the cpumask and sets up the
308 * mapping tables. We need to do this as early as possible.
309 */
310#ifdef CONFIG_SMP
311 cpu_node_probe();
312#endif
313
314 init_topology_matrix();
315 dump_topology();
316
317 gen_region_mask(®ion_mask);
318
319 setup_replication_mask();
320
321 /*
322 * Set all nodes' calias sizes to 8k
323 */
324 for_each_online_node(i) {
325 nasid_t nasid;
326
327 nasid = COMPACT_TO_NASID_NODEID(i);
328
329 /*
330 * Always have node 0 in the region mask, otherwise
331 * CALIAS accesses get exceptions since the hub
332 * thinks it is a node 0 address.
333 */
334 REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
335 REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
336
337#ifdef LATER
338 /*
339 * Set up all hubs to have a big window pointing at
340 * widget 0. Memory mode, widget 0, offset 0
341 */
342 REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
343 ((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
344 (0 << IIO_ITTE_WIDGET_SHIFT)));
345#endif
346 }
347}
348
349static void __init szmem(void)
350{
351 unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
352 int slot;
353 cnodeid_t node;
354
355 for_each_online_node(node) {
356 nodebytes = 0;
357 for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
358 slot_psize = slot_psize_compute(node, slot);
359 if (slot == 0)
360 slot0sz = slot_psize;
361 /*
362 * We need to refine the hack when we have replicated
363 * kernel text.
364 */
365 nodebytes += (1LL << SLOT_SHIFT);
366
367 if (!slot_psize)
368 continue;
369
370 if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
371 (slot0sz << PAGE_SHIFT)) {
372 pr_info("Ignoring slot %d onwards on node %d\n",
373 slot, node);
374 slot = MAX_MEM_SLOTS;
375 continue;
376 }
377 memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
378 PFN_PHYS(slot_psize), node);
379 }
380 }
381}
382
383static void __init node_mem_init(cnodeid_t node)
384{
385 unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
386 unsigned long slot_freepfn = node_getfirstfree(node);
387 unsigned long start_pfn, end_pfn;
388
389 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
390
391 /*
392 * Allocate the node data structures on the node first.
393 */
394 __node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
395 memset(__node_data[node], 0, PAGE_SIZE);
396
397 NODE_DATA(node)->node_start_pfn = start_pfn;
398 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
399
400 cpumask_clear(&hub_data(node)->h_cpus);
401
402 slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
403 sizeof(struct hub_data));
404
405 free_bootmem_with_active_regions(node, end_pfn);
406
407 memblock_reserve(slot_firstpfn << PAGE_SHIFT,
408 ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT));
409
410 sparse_memory_present_with_active_regions(node);
411}
412
413/*
414 * A node with nothing. We use it to avoid any special casing in
415 * cpumask_of_node
416 */
417static struct node_data null_node = {
418 .hub = {
419 .h_cpus = CPU_MASK_NONE
420 }
421};
422
423/*
424 * Currently, the intranode memory hole support assumes that each slot
425 * contains at least 32 MBytes of memory. We assume all bootmem data
426 * fits on the first slot.
427 */
428void __init prom_meminit(void)
429{
430 cnodeid_t node;
431
432 mlreset();
433 szmem();
434 max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
435
436 for (node = 0; node < MAX_COMPACT_NODES; node++) {
437 if (node_online(node)) {
438 node_mem_init(node);
439 continue;
440 }
441 __node_data[node] = &null_node;
442 }
443}
444
445void __init prom_free_prom_memory(void)
446{
447 /* We got nothing to free here ... */
448}
449
450extern void setup_zero_pages(void);
451
452void __init paging_init(void)
453{
454 unsigned long zones_size[MAX_NR_ZONES] = {0, };
455
456 pagetable_init();
457 zones_size[ZONE_NORMAL] = max_low_pfn;
458 free_area_init_nodes(zones_size);
459}
460
461void __init mem_init(void)
462{
463 high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
464 memblock_free_all();
465 setup_zero_pages(); /* This comes from node 0 */
466 mem_init_print_info(NULL);
467}