Loading...
1/*
2 * inventory.c
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
10 * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
11 *
12 * These are the routines to discover what hardware exists in this box.
13 * This task is complicated by there being 3 different ways of
14 * performing an inventory, depending largely on the age of the box.
15 * The recommended way to do this is to check to see whether the machine
16 * is a `Snake' first, then try System Map, then try PAT. We try System
17 * Map before checking for a Snake -- this probably doesn't cause any
18 * problems, but...
19 */
20
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <asm/hardware.h>
27#include <asm/io.h>
28#include <asm/mmzone.h>
29#include <asm/pdc.h>
30#include <asm/pdcpat.h>
31#include <asm/processor.h>
32#include <asm/page.h>
33#include <asm/parisc-device.h>
34
35/*
36** Debug options
37** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
38*/
39#undef DEBUG_PAT
40
41int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
42
43void __init setup_pdc(void)
44{
45 long status;
46 unsigned int bus_id;
47 struct pdc_system_map_mod_info module_result;
48 struct pdc_module_path module_path;
49 struct pdc_model model;
50#ifdef CONFIG_64BIT
51 struct pdc_pat_cell_num cell_info;
52#endif
53
54 /* Determine the pdc "type" used on this machine */
55
56 printk(KERN_INFO "Determining PDC firmware type: ");
57
58 status = pdc_system_map_find_mods(&module_result, &module_path, 0);
59 if (status == PDC_OK) {
60 pdc_type = PDC_TYPE_SYSTEM_MAP;
61 printk("System Map.\n");
62 return;
63 }
64
65 /*
66 * If the machine doesn't support PDC_SYSTEM_MAP then either it
67 * is a pdc pat box, or it is an older box. All 64 bit capable
68 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
69 */
70
71 /*
72 * TODO: We should test for 64 bit capability and give a
73 * clearer message.
74 */
75
76#ifdef CONFIG_64BIT
77 status = pdc_pat_cell_get_number(&cell_info);
78 if (status == PDC_OK) {
79 pdc_type = PDC_TYPE_PAT;
80 printk("64 bit PAT.\n");
81 return;
82 }
83#endif
84
85 /* Check the CPU's bus ID. There's probably a better test. */
86
87 status = pdc_model_info(&model);
88
89 bus_id = (model.hversion >> (4 + 7)) & 0x1f;
90
91 switch (bus_id) {
92 case 0x4: /* 720, 730, 750, 735, 755 */
93 case 0x6: /* 705, 710 */
94 case 0x7: /* 715, 725 */
95 case 0x8: /* 745, 747, 742 */
96 case 0xA: /* 712 and similar */
97 case 0xC: /* 715/64, at least */
98
99 pdc_type = PDC_TYPE_SNAKE;
100 printk("Snake.\n");
101 return;
102
103 default: /* Everything else */
104
105 printk("Unsupported.\n");
106 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
107 }
108}
109
110#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
111
112static void __init
113set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
114 unsigned long pages4k)
115{
116 /* Rather than aligning and potentially throwing away
117 * memory, we'll assume that any ranges are already
118 * nicely aligned with any reasonable page size, and
119 * panic if they are not (it's more likely that the
120 * pdc info is bad in this case).
121 */
122
123 if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
124 || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
125
126 panic("Memory range doesn't align with page size!\n");
127 }
128
129 pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
130 pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
131}
132
133static void __init pagezero_memconfig(void)
134{
135 unsigned long npages;
136
137 /* Use the 32 bit information from page zero to create a single
138 * entry in the pmem_ranges[] table.
139 *
140 * We currently don't support machines with contiguous memory
141 * >= 4 Gb, who report that memory using 64 bit only fields
142 * on page zero. It's not worth doing until it can be tested,
143 * and it is not clear we can support those machines for other
144 * reasons.
145 *
146 * If that support is done in the future, this is where it
147 * should be done.
148 */
149
150 npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
151 set_pmem_entry(pmem_ranges,0UL,npages);
152 npmem_ranges = 1;
153}
154
155#ifdef CONFIG_64BIT
156
157/* All of the PDC PAT specific code is 64-bit only */
158
159/*
160** The module object is filled via PDC_PAT_CELL[Return Cell Module].
161** If a module is found, register module will get the IODC bytes via
162** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
163**
164** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
165** only for SBAs and LBAs. This view will cause an invalid
166** argument error for all other cell module types.
167**
168*/
169
170static int __init
171pat_query_module(ulong pcell_loc, ulong mod_index)
172{
173 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
174 unsigned long bytecnt;
175 unsigned long temp; /* 64-bit scratch value */
176 long status; /* PDC return value status */
177 struct parisc_device *dev;
178
179 pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
180 if (!pa_pdc_cell)
181 panic("couldn't allocate memory for PDC_PAT_CELL!");
182
183 /* return cell module (PA or Processor view) */
184 status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
185 PA_VIEW, pa_pdc_cell);
186
187 if (status != PDC_OK) {
188 /* no more cell modules or error */
189 kfree(pa_pdc_cell);
190 return status;
191 }
192
193 temp = pa_pdc_cell->cba;
194 dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
195 if (!dev) {
196 kfree(pa_pdc_cell);
197 return PDC_OK;
198 }
199
200 /* alloc_pa_dev sets dev->hpa */
201
202 /*
203 ** save parameters in the parisc_device
204 ** (The idea being the device driver will call pdc_pat_cell_module()
205 ** and store the results in its own data structure.)
206 */
207 dev->pcell_loc = pcell_loc;
208 dev->mod_index = mod_index;
209
210 /* save generic info returned from the call */
211 /* REVISIT: who is the consumer of this? not sure yet... */
212 dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
213 dev->pmod_loc = pa_pdc_cell->mod_location;
214 dev->mod0 = pa_pdc_cell->mod[0];
215
216 register_parisc_device(dev); /* advertise device */
217
218#ifdef DEBUG_PAT
219 pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
220 /* dump what we see so far... */
221 switch (PAT_GET_ENTITY(dev->mod_info)) {
222 unsigned long i;
223
224 case PAT_ENTITY_PROC:
225 printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
226 pa_pdc_cell->mod[0]);
227 break;
228
229 case PAT_ENTITY_MEM:
230 printk(KERN_DEBUG
231 "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
232 pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
233 pa_pdc_cell->mod[2]);
234 break;
235 case PAT_ENTITY_CA:
236 printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
237 break;
238
239 case PAT_ENTITY_PBC:
240 printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
241 goto print_ranges;
242
243 case PAT_ENTITY_SBA:
244 printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
245 goto print_ranges;
246
247 case PAT_ENTITY_LBA:
248 printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
249
250 print_ranges:
251 pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
252 IO_VIEW, &io_pdc_cell);
253 printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
254 for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
255 printk(KERN_DEBUG
256 " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
257 i, pa_pdc_cell->mod[2 + i * 3], /* type */
258 pa_pdc_cell->mod[3 + i * 3], /* start */
259 pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
260 printk(KERN_DEBUG
261 " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
262 i, io_pdc_cell->mod[2 + i * 3], /* type */
263 io_pdc_cell->mod[3 + i * 3], /* start */
264 io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
265 }
266 printk(KERN_DEBUG "\n");
267 break;
268 }
269#endif /* DEBUG_PAT */
270
271 kfree(pa_pdc_cell);
272
273 return PDC_OK;
274}
275
276
277/* pat pdc can return information about a variety of different
278 * types of memory (e.g. firmware,i/o, etc) but we only care about
279 * the usable physical ram right now. Since the firmware specific
280 * information is allocated on the stack, we'll be generous, in
281 * case there is a lot of other information we don't care about.
282 */
283
284#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
285
286static void __init pat_memconfig(void)
287{
288 unsigned long actual_len;
289 struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
290 struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
291 physmem_range_t *pmem_ptr;
292 long status;
293 int entries;
294 unsigned long length;
295 int i;
296
297 length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
298
299 status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
300
301 if ((status != PDC_OK)
302 || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
303
304 /* The above pdc call shouldn't fail, but, just in
305 * case, just use the PAGE0 info.
306 */
307
308 printk("\n\n\n");
309 printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
310 "All memory may not be used!\n\n\n");
311 pagezero_memconfig();
312 return;
313 }
314
315 entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
316
317 if (entries > PAT_MAX_RANGES) {
318 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
319 printk(KERN_WARNING "Some memory may not be used!\n");
320 }
321
322 /* Copy information into the firmware independent pmem_ranges
323 * array, skipping types we don't care about. Notice we said
324 * "may" above. We'll use all the entries that were returned.
325 */
326
327 npmem_ranges = 0;
328 mtbl_ptr = mem_table;
329 pmem_ptr = pmem_ranges; /* Global firmware independent table */
330 for (i = 0; i < entries; i++,mtbl_ptr++) {
331 if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
332 || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
333 || (mtbl_ptr->pages == 0)
334 || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
335 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
336 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
337
338 continue;
339 }
340
341 if (npmem_ranges == MAX_PHYSMEM_RANGES) {
342 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
343 printk(KERN_WARNING "Some memory will not be used!\n");
344 break;
345 }
346
347 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
348 npmem_ranges++;
349 }
350}
351
352static int __init pat_inventory(void)
353{
354 int status;
355 ulong mod_index = 0;
356 struct pdc_pat_cell_num cell_info;
357
358 /*
359 ** Note: Prelude (and it's successors: Lclass, A400/500) only
360 ** implement PDC_PAT_CELL sub-options 0 and 2.
361 */
362 status = pdc_pat_cell_get_number(&cell_info);
363 if (status != PDC_OK) {
364 return 0;
365 }
366
367#ifdef DEBUG_PAT
368 printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
369 cell_info.cell_loc);
370#endif
371
372 while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
373 mod_index++;
374 }
375
376 return mod_index;
377}
378
379/* We only look for extended memory ranges on a 64 bit capable box */
380static void __init sprockets_memconfig(void)
381{
382 struct pdc_memory_table_raddr r_addr;
383 struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
384 struct pdc_memory_table *mtbl_ptr;
385 physmem_range_t *pmem_ptr;
386 long status;
387 int entries;
388 int i;
389
390 status = pdc_mem_mem_table(&r_addr,mem_table,
391 (unsigned long)MAX_PHYSMEM_RANGES);
392
393 if (status != PDC_OK) {
394
395 /* The above pdc call only works on boxes with sprockets
396 * firmware (newer B,C,J class). Other non PAT PDC machines
397 * do support more than 3.75 Gb of memory, but we don't
398 * support them yet.
399 */
400
401 pagezero_memconfig();
402 return;
403 }
404
405 if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
406 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
407 printk(KERN_WARNING "Some memory will not be used!\n");
408 }
409
410 entries = (int)r_addr.entries_returned;
411
412 npmem_ranges = 0;
413 mtbl_ptr = mem_table;
414 pmem_ptr = pmem_ranges; /* Global firmware independent table */
415 for (i = 0; i < entries; i++,mtbl_ptr++) {
416 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
417 npmem_ranges++;
418 }
419}
420
421#else /* !CONFIG_64BIT */
422
423#define pat_inventory() do { } while (0)
424#define pat_memconfig() do { } while (0)
425#define sprockets_memconfig() pagezero_memconfig()
426
427#endif /* !CONFIG_64BIT */
428
429
430#ifndef CONFIG_PA20
431
432/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
433
434static struct parisc_device * __init
435legacy_create_device(struct pdc_memory_map *r_addr,
436 struct pdc_module_path *module_path)
437{
438 struct parisc_device *dev;
439 int status = pdc_mem_map_hpa(r_addr, module_path);
440 if (status != PDC_OK)
441 return NULL;
442
443 dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
444 if (dev == NULL)
445 return NULL;
446
447 register_parisc_device(dev);
448 return dev;
449}
450
451/**
452 * snake_inventory
453 *
454 * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
455 * To use it, we initialise the mod_path.bc to 0xff and try all values of
456 * mod to get the HPA for the top-level devices. Bus adapters may have
457 * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
458 * module, then trying all possible functions.
459 */
460static void __init snake_inventory(void)
461{
462 int mod;
463 for (mod = 0; mod < 16; mod++) {
464 struct parisc_device *dev;
465 struct pdc_module_path module_path;
466 struct pdc_memory_map r_addr;
467 unsigned int func;
468
469 memset(module_path.path.bc, 0xff, 6);
470 module_path.path.mod = mod;
471 dev = legacy_create_device(&r_addr, &module_path);
472 if ((!dev) || (dev->id.hw_type != HPHW_BA))
473 continue;
474
475 memset(module_path.path.bc, 0xff, 4);
476 module_path.path.bc[4] = mod;
477
478 for (func = 0; func < 16; func++) {
479 module_path.path.bc[5] = 0;
480 module_path.path.mod = func;
481 legacy_create_device(&r_addr, &module_path);
482 }
483 }
484}
485
486#else /* CONFIG_PA20 */
487#define snake_inventory() do { } while (0)
488#endif /* CONFIG_PA20 */
489
490/* Common 32/64 bit based code goes here */
491
492/**
493 * add_system_map_addresses - Add additional addresses to the parisc device.
494 * @dev: The parisc device.
495 * @num_addrs: Then number of addresses to add;
496 * @module_instance: The system_map module instance.
497 *
498 * This function adds any additional addresses reported by the system_map
499 * firmware to the parisc device.
500 */
501static void __init
502add_system_map_addresses(struct parisc_device *dev, int num_addrs,
503 int module_instance)
504{
505 int i;
506 long status;
507 struct pdc_system_map_addr_info addr_result;
508
509 dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
510 if(!dev->addr) {
511 printk(KERN_ERR "%s %s(): memory allocation failure\n",
512 __FILE__, __func__);
513 return;
514 }
515
516 for(i = 1; i <= num_addrs; ++i) {
517 status = pdc_system_map_find_addrs(&addr_result,
518 module_instance, i);
519 if(PDC_OK == status) {
520 dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
521 dev->num_addrs++;
522 } else {
523 printk(KERN_WARNING
524 "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
525 status, i);
526 }
527 }
528}
529
530/**
531 * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
532 *
533 * This function attempts to retrieve and register all the devices firmware
534 * knows about via the SYSTEM_MAP PDC call.
535 */
536static void __init system_map_inventory(void)
537{
538 int i;
539 long status = PDC_OK;
540
541 for (i = 0; i < 256; i++) {
542 struct parisc_device *dev;
543 struct pdc_system_map_mod_info module_result;
544 struct pdc_module_path module_path;
545
546 status = pdc_system_map_find_mods(&module_result,
547 &module_path, i);
548 if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
549 break;
550 if (status != PDC_OK)
551 continue;
552
553 dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
554 if (!dev)
555 continue;
556
557 register_parisc_device(dev);
558
559 /* if available, get the additional addresses for a module */
560 if (!module_result.add_addrs)
561 continue;
562
563 add_system_map_addresses(dev, module_result.add_addrs, i);
564 }
565
566 walk_central_bus();
567 return;
568}
569
570void __init do_memory_inventory(void)
571{
572 switch (pdc_type) {
573
574 case PDC_TYPE_PAT:
575 pat_memconfig();
576 break;
577
578 case PDC_TYPE_SYSTEM_MAP:
579 sprockets_memconfig();
580 break;
581
582 case PDC_TYPE_SNAKE:
583 pagezero_memconfig();
584 return;
585
586 default:
587 panic("Unknown PDC type!\n");
588 }
589
590 if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
591 printk(KERN_WARNING "Bad memory configuration returned!\n");
592 printk(KERN_WARNING "Some memory may not be used!\n");
593 pagezero_memconfig();
594 }
595}
596
597void __init do_device_inventory(void)
598{
599 printk(KERN_INFO "Searching for devices...\n");
600
601 init_parisc_bus();
602
603 switch (pdc_type) {
604
605 case PDC_TYPE_PAT:
606 pat_inventory();
607 break;
608
609 case PDC_TYPE_SYSTEM_MAP:
610 system_map_inventory();
611 break;
612
613 case PDC_TYPE_SNAKE:
614 snake_inventory();
615 break;
616
617 default:
618 panic("Unknown PDC type!\n");
619 }
620 printk(KERN_INFO "Found devices:\n");
621 print_parisc_devices();
622}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * inventory.c
4 *
5 * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
6 * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
7 *
8 * These are the routines to discover what hardware exists in this box.
9 * This task is complicated by there being 3 different ways of
10 * performing an inventory, depending largely on the age of the box.
11 * The recommended way to do this is to check to see whether the machine
12 * is a `Snake' first, then try System Map, then try PAT. We try System
13 * Map before checking for a Snake -- this probably doesn't cause any
14 * problems, but...
15 */
16
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/slab.h>
21#include <linux/mm.h>
22#include <linux/platform_device.h>
23#include <asm/hardware.h>
24#include <asm/io.h>
25#include <asm/mmzone.h>
26#include <asm/pdc.h>
27#include <asm/pdcpat.h>
28#include <asm/processor.h>
29#include <asm/page.h>
30#include <asm/parisc-device.h>
31#include <asm/tlbflush.h>
32
33/*
34** Debug options
35** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
36*/
37#undef DEBUG_PAT
38
39int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL;
40
41/* cell number and location (PAT firmware only) */
42unsigned long parisc_cell_num __ro_after_init;
43unsigned long parisc_cell_loc __ro_after_init;
44unsigned long parisc_pat_pdc_cap __ro_after_init;
45
46
47void __init setup_pdc(void)
48{
49 long status;
50 unsigned int bus_id;
51 struct pdc_system_map_mod_info module_result;
52 struct pdc_module_path module_path;
53 struct pdc_model model;
54#ifdef CONFIG_64BIT
55 struct pdc_pat_cell_num cell_info;
56#endif
57
58 /* Determine the pdc "type" used on this machine */
59
60 printk(KERN_INFO "Determining PDC firmware type: ");
61
62 status = pdc_system_map_find_mods(&module_result, &module_path, 0);
63 if (status == PDC_OK) {
64 pdc_type = PDC_TYPE_SYSTEM_MAP;
65 pr_cont("System Map.\n");
66 return;
67 }
68
69 /*
70 * If the machine doesn't support PDC_SYSTEM_MAP then either it
71 * is a pdc pat box, or it is an older box. All 64 bit capable
72 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
73 */
74
75 /*
76 * TODO: We should test for 64 bit capability and give a
77 * clearer message.
78 */
79
80#ifdef CONFIG_64BIT
81 status = pdc_pat_cell_get_number(&cell_info);
82 if (status == PDC_OK) {
83 unsigned long legacy_rev, pat_rev;
84 pdc_type = PDC_TYPE_PAT;
85 pr_cont("64 bit PAT.\n");
86 parisc_cell_num = cell_info.cell_num;
87 parisc_cell_loc = cell_info.cell_loc;
88 pr_info("PAT: Running on cell %lu and location %lu.\n",
89 parisc_cell_num, parisc_cell_loc);
90 status = pdc_pat_pd_get_pdc_revisions(&legacy_rev,
91 &pat_rev, &parisc_pat_pdc_cap);
92 pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n",
93 legacy_rev, pat_rev, parisc_pat_pdc_cap,
94 parisc_pat_pdc_cap
95 & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0,
96 parisc_pat_pdc_cap
97 & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ ? 1:0);
98 return;
99 }
100#endif
101
102 /* Check the CPU's bus ID. There's probably a better test. */
103
104 status = pdc_model_info(&model);
105
106 bus_id = (model.hversion >> (4 + 7)) & 0x1f;
107
108 switch (bus_id) {
109 case 0x4: /* 720, 730, 750, 735, 755 */
110 case 0x6: /* 705, 710 */
111 case 0x7: /* 715, 725 */
112 case 0x8: /* 745, 747, 742 */
113 case 0xA: /* 712 and similar */
114 case 0xC: /* 715/64, at least */
115
116 pdc_type = PDC_TYPE_SNAKE;
117 pr_cont("Snake.\n");
118 return;
119
120 default: /* Everything else */
121
122 pr_cont("Unsupported.\n");
123 panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
124 }
125}
126
127#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
128
129static void __init
130set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
131 unsigned long pages4k)
132{
133 /* Rather than aligning and potentially throwing away
134 * memory, we'll assume that any ranges are already
135 * nicely aligned with any reasonable page size, and
136 * panic if they are not (it's more likely that the
137 * pdc info is bad in this case).
138 */
139
140 if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
141 || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
142
143 panic("Memory range doesn't align with page size!\n");
144 }
145
146 pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
147 pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
148}
149
150static void __init pagezero_memconfig(void)
151{
152 unsigned long npages;
153
154 /* Use the 32 bit information from page zero to create a single
155 * entry in the pmem_ranges[] table.
156 *
157 * We currently don't support machines with contiguous memory
158 * >= 4 Gb, who report that memory using 64 bit only fields
159 * on page zero. It's not worth doing until it can be tested,
160 * and it is not clear we can support those machines for other
161 * reasons.
162 *
163 * If that support is done in the future, this is where it
164 * should be done.
165 */
166
167 npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
168 set_pmem_entry(pmem_ranges,0UL,npages);
169 npmem_ranges = 1;
170}
171
172#ifdef CONFIG_64BIT
173
174/* All of the PDC PAT specific code is 64-bit only */
175
176/*
177** The module object is filled via PDC_PAT_CELL[Return Cell Module].
178** If a module is found, register module will get the IODC bytes via
179** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
180**
181** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
182** only for SBAs and LBAs. This view will cause an invalid
183** argument error for all other cell module types.
184**
185*/
186
187static int __init
188pat_query_module(ulong pcell_loc, ulong mod_index)
189{
190 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
191 unsigned long bytecnt;
192 unsigned long temp; /* 64-bit scratch value */
193 long status; /* PDC return value status */
194 struct parisc_device *dev;
195
196 pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
197 if (!pa_pdc_cell)
198 panic("couldn't allocate memory for PDC_PAT_CELL!");
199
200 /* return cell module (PA or Processor view) */
201 status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
202 PA_VIEW, pa_pdc_cell);
203
204 if (status != PDC_OK) {
205 /* no more cell modules or error */
206 kfree(pa_pdc_cell);
207 return status;
208 }
209
210 temp = pa_pdc_cell->cba;
211 dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
212 if (!dev) {
213 kfree(pa_pdc_cell);
214 return PDC_OK;
215 }
216
217 /* alloc_pa_dev sets dev->hpa */
218
219 /*
220 ** save parameters in the parisc_device
221 ** (The idea being the device driver will call pdc_pat_cell_module()
222 ** and store the results in its own data structure.)
223 */
224 dev->pcell_loc = pcell_loc;
225 dev->mod_index = mod_index;
226
227 /* save generic info returned from the call */
228 /* REVISIT: who is the consumer of this? not sure yet... */
229 dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
230 dev->pmod_loc = pa_pdc_cell->mod_location;
231 dev->mod0 = pa_pdc_cell->mod[0];
232
233 register_parisc_device(dev); /* advertise device */
234
235#ifdef DEBUG_PAT
236 /* dump what we see so far... */
237 switch (PAT_GET_ENTITY(dev->mod_info)) {
238 pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
239 unsigned long i;
240
241 case PAT_ENTITY_PROC:
242 printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
243 pa_pdc_cell->mod[0]);
244 break;
245
246 case PAT_ENTITY_MEM:
247 printk(KERN_DEBUG
248 "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
249 pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
250 pa_pdc_cell->mod[2]);
251 break;
252 case PAT_ENTITY_CA:
253 printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
254 break;
255
256 case PAT_ENTITY_PBC:
257 printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
258 goto print_ranges;
259
260 case PAT_ENTITY_SBA:
261 printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
262 goto print_ranges;
263
264 case PAT_ENTITY_LBA:
265 printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
266
267 print_ranges:
268 pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
269 IO_VIEW, &io_pdc_cell);
270 printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
271 for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
272 printk(KERN_DEBUG
273 " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
274 i, pa_pdc_cell->mod[2 + i * 3], /* type */
275 pa_pdc_cell->mod[3 + i * 3], /* start */
276 pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
277 printk(KERN_DEBUG
278 " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
279 i, io_pdc_cell.mod[2 + i * 3], /* type */
280 io_pdc_cell.mod[3 + i * 3], /* start */
281 io_pdc_cell.mod[4 + i * 3]); /* finish (ie end) */
282 }
283 printk(KERN_DEBUG "\n");
284 break;
285 }
286#endif /* DEBUG_PAT */
287
288 kfree(pa_pdc_cell);
289
290 return PDC_OK;
291}
292
293
294/* pat pdc can return information about a variety of different
295 * types of memory (e.g. firmware,i/o, etc) but we only care about
296 * the usable physical ram right now. Since the firmware specific
297 * information is allocated on the stack, we'll be generous, in
298 * case there is a lot of other information we don't care about.
299 */
300
301#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
302
303static void __init pat_memconfig(void)
304{
305 unsigned long actual_len;
306 struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
307 struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
308 physmem_range_t *pmem_ptr;
309 long status;
310 int entries;
311 unsigned long length;
312 int i;
313
314 length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
315
316 status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
317
318 if ((status != PDC_OK)
319 || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
320
321 /* The above pdc call shouldn't fail, but, just in
322 * case, just use the PAGE0 info.
323 */
324
325 printk("\n\n\n");
326 printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
327 "All memory may not be used!\n\n\n");
328 pagezero_memconfig();
329 return;
330 }
331
332 entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
333
334 if (entries > PAT_MAX_RANGES) {
335 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
336 printk(KERN_WARNING "Some memory may not be used!\n");
337 }
338
339 /* Copy information into the firmware independent pmem_ranges
340 * array, skipping types we don't care about. Notice we said
341 * "may" above. We'll use all the entries that were returned.
342 */
343
344 npmem_ranges = 0;
345 mtbl_ptr = mem_table;
346 pmem_ptr = pmem_ranges; /* Global firmware independent table */
347 for (i = 0; i < entries; i++,mtbl_ptr++) {
348 if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
349 || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
350 || (mtbl_ptr->pages == 0)
351 || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
352 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
353 && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
354
355 continue;
356 }
357
358 if (npmem_ranges == MAX_PHYSMEM_RANGES) {
359 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
360 printk(KERN_WARNING "Some memory will not be used!\n");
361 break;
362 }
363
364 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
365 npmem_ranges++;
366 }
367}
368
369static int __init pat_inventory(void)
370{
371 int status;
372 ulong mod_index = 0;
373 struct pdc_pat_cell_num cell_info;
374
375 /*
376 ** Note: Prelude (and it's successors: Lclass, A400/500) only
377 ** implement PDC_PAT_CELL sub-options 0 and 2.
378 */
379 status = pdc_pat_cell_get_number(&cell_info);
380 if (status != PDC_OK) {
381 return 0;
382 }
383
384#ifdef DEBUG_PAT
385 printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
386 cell_info.cell_loc);
387#endif
388
389 while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
390 mod_index++;
391 }
392
393 return mod_index;
394}
395
396/* We only look for extended memory ranges on a 64 bit capable box */
397static void __init sprockets_memconfig(void)
398{
399 struct pdc_memory_table_raddr r_addr;
400 struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
401 struct pdc_memory_table *mtbl_ptr;
402 physmem_range_t *pmem_ptr;
403 long status;
404 int entries;
405 int i;
406
407 status = pdc_mem_mem_table(&r_addr,mem_table,
408 (unsigned long)MAX_PHYSMEM_RANGES);
409
410 if (status != PDC_OK) {
411
412 /* The above pdc call only works on boxes with sprockets
413 * firmware (newer B,C,J class). Other non PAT PDC machines
414 * do support more than 3.75 Gb of memory, but we don't
415 * support them yet.
416 */
417
418 pagezero_memconfig();
419 return;
420 }
421
422 if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
423 printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
424 printk(KERN_WARNING "Some memory will not be used!\n");
425 }
426
427 entries = (int)r_addr.entries_returned;
428
429 npmem_ranges = 0;
430 mtbl_ptr = mem_table;
431 pmem_ptr = pmem_ranges; /* Global firmware independent table */
432 for (i = 0; i < entries; i++,mtbl_ptr++) {
433 set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
434 npmem_ranges++;
435 }
436}
437
438#else /* !CONFIG_64BIT */
439
440#define pat_inventory() do { } while (0)
441#define pat_memconfig() do { } while (0)
442#define sprockets_memconfig() pagezero_memconfig()
443
444#endif /* !CONFIG_64BIT */
445
446
447#ifndef CONFIG_PA20
448
449/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
450
451static struct parisc_device * __init
452legacy_create_device(struct pdc_memory_map *r_addr,
453 struct pdc_module_path *module_path)
454{
455 struct parisc_device *dev;
456 int status = pdc_mem_map_hpa(r_addr, module_path);
457 if (status != PDC_OK)
458 return NULL;
459
460 dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
461 if (dev == NULL)
462 return NULL;
463
464 register_parisc_device(dev);
465 return dev;
466}
467
468/**
469 * snake_inventory
470 *
471 * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
472 * To use it, we initialise the mod_path.bc to 0xff and try all values of
473 * mod to get the HPA for the top-level devices. Bus adapters may have
474 * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
475 * module, then trying all possible functions.
476 */
477static void __init snake_inventory(void)
478{
479 int mod;
480 for (mod = 0; mod < 16; mod++) {
481 struct parisc_device *dev;
482 struct pdc_module_path module_path;
483 struct pdc_memory_map r_addr;
484 unsigned int func;
485
486 memset(module_path.path.bc, 0xff, 6);
487 module_path.path.mod = mod;
488 dev = legacy_create_device(&r_addr, &module_path);
489 if ((!dev) || (dev->id.hw_type != HPHW_BA))
490 continue;
491
492 memset(module_path.path.bc, 0xff, 4);
493 module_path.path.bc[4] = mod;
494
495 for (func = 0; func < 16; func++) {
496 module_path.path.bc[5] = 0;
497 module_path.path.mod = func;
498 legacy_create_device(&r_addr, &module_path);
499 }
500 }
501}
502
503#else /* CONFIG_PA20 */
504#define snake_inventory() do { } while (0)
505#endif /* CONFIG_PA20 */
506
507/* Common 32/64 bit based code goes here */
508
509/**
510 * add_system_map_addresses - Add additional addresses to the parisc device.
511 * @dev: The parisc device.
512 * @num_addrs: Then number of addresses to add;
513 * @module_instance: The system_map module instance.
514 *
515 * This function adds any additional addresses reported by the system_map
516 * firmware to the parisc device.
517 */
518static void __init
519add_system_map_addresses(struct parisc_device *dev, int num_addrs,
520 int module_instance)
521{
522 int i;
523 long status;
524 struct pdc_system_map_addr_info addr_result;
525
526 dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
527 if(!dev->addr) {
528 printk(KERN_ERR "%s %s(): memory allocation failure\n",
529 __FILE__, __func__);
530 return;
531 }
532
533 for(i = 1; i <= num_addrs; ++i) {
534 status = pdc_system_map_find_addrs(&addr_result,
535 module_instance, i);
536 if(PDC_OK == status) {
537 dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
538 dev->num_addrs++;
539 } else {
540 printk(KERN_WARNING
541 "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
542 status, i);
543 }
544 }
545}
546
547/**
548 * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
549 *
550 * This function attempts to retrieve and register all the devices firmware
551 * knows about via the SYSTEM_MAP PDC call.
552 */
553static void __init system_map_inventory(void)
554{
555 int i;
556 long status = PDC_OK;
557
558 for (i = 0; i < 256; i++) {
559 struct parisc_device *dev;
560 struct pdc_system_map_mod_info module_result;
561 struct pdc_module_path module_path;
562
563 status = pdc_system_map_find_mods(&module_result,
564 &module_path, i);
565 if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
566 break;
567 if (status != PDC_OK)
568 continue;
569
570 dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
571 if (!dev)
572 continue;
573
574 register_parisc_device(dev);
575
576 /* if available, get the additional addresses for a module */
577 if (!module_result.add_addrs)
578 continue;
579
580 add_system_map_addresses(dev, module_result.add_addrs, i);
581 }
582
583 walk_central_bus();
584 return;
585}
586
587void __init do_memory_inventory(void)
588{
589 switch (pdc_type) {
590
591 case PDC_TYPE_PAT:
592 pat_memconfig();
593 break;
594
595 case PDC_TYPE_SYSTEM_MAP:
596 sprockets_memconfig();
597 break;
598
599 case PDC_TYPE_SNAKE:
600 pagezero_memconfig();
601 return;
602
603 default:
604 panic("Unknown PDC type!\n");
605 }
606
607 if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
608 printk(KERN_WARNING "Bad memory configuration returned!\n");
609 printk(KERN_WARNING "Some memory may not be used!\n");
610 pagezero_memconfig();
611 }
612}
613
614void __init do_device_inventory(void)
615{
616 printk(KERN_INFO "Searching for devices...\n");
617
618 init_parisc_bus();
619
620 switch (pdc_type) {
621
622 case PDC_TYPE_PAT:
623 pat_inventory();
624 break;
625
626 case PDC_TYPE_SYSTEM_MAP:
627 system_map_inventory();
628 break;
629
630 case PDC_TYPE_SNAKE:
631 snake_inventory();
632 break;
633
634 default:
635 panic("Unknown PDC type!\n");
636 }
637 printk(KERN_INFO "Found devices:\n");
638 print_parisc_devices();
639
640#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
641 pa_serialize_tlb_flushes = machine_has_merced_bus();
642 if (pa_serialize_tlb_flushes)
643 pr_info("Merced bus found: Enable PxTLB serialization.\n");
644#endif
645
646#if defined(CONFIG_FW_CFG_SYSFS)
647 if (running_on_qemu) {
648 struct resource res[3] = {0,};
649 unsigned int base;
650
651 base = ((unsigned long long) PAGE0->pad0[2] << 32)
652 | PAGE0->pad0[3]; /* SeaBIOS stored it here */
653
654 res[0].name = "fw_cfg";
655 res[0].start = base;
656 res[0].end = base + 8 - 1;
657 res[0].flags = IORESOURCE_MEM;
658
659 res[1].name = "ctrl";
660 res[1].start = 0;
661 res[1].flags = IORESOURCE_REG;
662
663 res[2].name = "data";
664 res[2].start = 4;
665 res[2].flags = IORESOURCE_REG;
666
667 if (base) {
668 pr_info("Found qemu fw_cfg interface at %#08x\n", base);
669 platform_device_register_simple("fw_cfg",
670 PLATFORM_DEVID_NONE, res, 3);
671 }
672 }
673#endif
674}