Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * inventory.c
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public License
  6 * as published by the Free Software Foundation; either version
  7 * 2 of the License, or (at your option) any later version.
  8 *
  9 * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
 10 * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
 11 *
 12 * These are the routines to discover what hardware exists in this box.
 13 * This task is complicated by there being 3 different ways of
 14 * performing an inventory, depending largely on the age of the box.
 15 * The recommended way to do this is to check to see whether the machine
 16 * is a `Snake' first, then try System Map, then try PAT.  We try System
 17 * Map before checking for a Snake -- this probably doesn't cause any
 18 * problems, but...
 19 */
 20
 21#include <linux/types.h>
 22#include <linux/kernel.h>
 23#include <linux/init.h>
 24#include <linux/slab.h>
 25#include <linux/mm.h>
 
 26#include <asm/hardware.h>
 27#include <asm/io.h>
 28#include <asm/mmzone.h>
 29#include <asm/pdc.h>
 30#include <asm/pdcpat.h>
 31#include <asm/processor.h>
 32#include <asm/page.h>
 33#include <asm/parisc-device.h>
 
 34
 35/*
 36** Debug options
 37** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
 38*/
 39#undef DEBUG_PAT
 40
 41int pdc_type __read_mostly = PDC_TYPE_ILLEGAL;
 
 
 
 
 
 
 42
 43void __init setup_pdc(void)
 44{
 45	long status;
 46	unsigned int bus_id;
 47	struct pdc_system_map_mod_info module_result;
 48	struct pdc_module_path module_path;
 49	struct pdc_model model;
 50#ifdef CONFIG_64BIT
 51	struct pdc_pat_cell_num cell_info;
 52#endif
 53
 54	/* Determine the pdc "type" used on this machine */
 55
 56	printk(KERN_INFO "Determining PDC firmware type: ");
 57
 58	status = pdc_system_map_find_mods(&module_result, &module_path, 0);
 59	if (status == PDC_OK) {
 60		pdc_type = PDC_TYPE_SYSTEM_MAP;
 61		printk("System Map.\n");
 62		return;
 63	}
 64
 65	/*
 66	 * If the machine doesn't support PDC_SYSTEM_MAP then either it
 67	 * is a pdc pat box, or it is an older box. All 64 bit capable
 68	 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
 69	 */
 70
 71	/*
 72	 * TODO: We should test for 64 bit capability and give a
 73	 * clearer message.
 74	 */
 75
 76#ifdef CONFIG_64BIT
 77	status = pdc_pat_cell_get_number(&cell_info);
 78	if (status == PDC_OK) {
 
 79		pdc_type = PDC_TYPE_PAT;
 80		printk("64 bit PAT.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 81		return;
 82	}
 83#endif
 84
 85	/* Check the CPU's bus ID.  There's probably a better test.  */
 86
 87	status = pdc_model_info(&model);
 88
 89	bus_id = (model.hversion >> (4 + 7)) & 0x1f;
 90
 91	switch (bus_id) {
 92	case 0x4:		/* 720, 730, 750, 735, 755 */
 93	case 0x6:		/* 705, 710 */
 94	case 0x7:		/* 715, 725 */
 95	case 0x8:		/* 745, 747, 742 */
 96	case 0xA:		/* 712 and similar */
 97	case 0xC:		/* 715/64, at least */
 98
 99		pdc_type = PDC_TYPE_SNAKE;
100		printk("Snake.\n");
101		return;
102
103	default:		/* Everything else */
104
105		printk("Unsupported.\n");
106		panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
107	}
108}
109
110#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
111
112static void __init
113set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
114	       unsigned long pages4k)
115{
116	/* Rather than aligning and potentially throwing away
117	 * memory, we'll assume that any ranges are already
118	 * nicely aligned with any reasonable page size, and
119	 * panic if they are not (it's more likely that the
120	 * pdc info is bad in this case).
121	 */
122
123	if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
124	    || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
125
126		panic("Memory range doesn't align with page size!\n");
127	}
128
129	pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
130	pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
131}
132
133static void __init pagezero_memconfig(void)
134{
135	unsigned long npages;
136
137	/* Use the 32 bit information from page zero to create a single
138	 * entry in the pmem_ranges[] table.
139	 *
140	 * We currently don't support machines with contiguous memory
141	 * >= 4 Gb, who report that memory using 64 bit only fields
142	 * on page zero. It's not worth doing until it can be tested,
143	 * and it is not clear we can support those machines for other
144	 * reasons.
145	 *
146	 * If that support is done in the future, this is where it
147	 * should be done.
148	 */
149
150	npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
151	set_pmem_entry(pmem_ranges,0UL,npages);
152	npmem_ranges = 1;
153}
154
155#ifdef CONFIG_64BIT
156
157/* All of the PDC PAT specific code is 64-bit only */
158
159/*
160**  The module object is filled via PDC_PAT_CELL[Return Cell Module].
161**  If a module is found, register module will get the IODC bytes via
162**  pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
163**
164**  The IO view can be used by PDC_PAT_CELL[Return Cell Module]
165**  only for SBAs and LBAs.  This view will cause an invalid
166**  argument error for all other cell module types.
167**
168*/
169
170static int __init 
171pat_query_module(ulong pcell_loc, ulong mod_index)
172{
173	pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
174	unsigned long bytecnt;
175	unsigned long temp;	/* 64-bit scratch value */
176	long status;		/* PDC return value status */
177	struct parisc_device *dev;
178
179	pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
180	if (!pa_pdc_cell)
181		panic("couldn't allocate memory for PDC_PAT_CELL!");
182
183	/* return cell module (PA or Processor view) */
184	status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
185				     PA_VIEW, pa_pdc_cell);
186
187	if (status != PDC_OK) {
188		/* no more cell modules or error */
 
189		return status;
190	}
191
192	temp = pa_pdc_cell->cba;
193	dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
194	if (!dev) {
 
195		return PDC_OK;
196	}
197
198	/* alloc_pa_dev sets dev->hpa */
199
200	/*
201	** save parameters in the parisc_device
202	** (The idea being the device driver will call pdc_pat_cell_module()
203	** and store the results in its own data structure.)
204	*/
205	dev->pcell_loc = pcell_loc;
206	dev->mod_index = mod_index;
207
208	/* save generic info returned from the call */
209	/* REVISIT: who is the consumer of this? not sure yet... */
210	dev->mod_info = pa_pdc_cell->mod_info;	/* pass to PAT_GET_ENTITY() */
211	dev->pmod_loc = pa_pdc_cell->mod_location;
 
212
213	register_parisc_device(dev);	/* advertise device */
214
215#ifdef DEBUG_PAT
216	pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
217	/* dump what we see so far... */
218	switch (PAT_GET_ENTITY(dev->mod_info)) {
 
219		unsigned long i;
220
221	case PAT_ENTITY_PROC:
222		printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
223			pa_pdc_cell->mod[0]);
224		break;
225
226	case PAT_ENTITY_MEM:
227		printk(KERN_DEBUG 
228			"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
229			pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
230			pa_pdc_cell->mod[2]);
231		break;
232	case PAT_ENTITY_CA:
233		printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
234		break;
235
236	case PAT_ENTITY_PBC:
237		printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
238		goto print_ranges;
239
240	case PAT_ENTITY_SBA:
241		printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
242		goto print_ranges;
243
244	case PAT_ENTITY_LBA:
245		printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
246
247 print_ranges:
248		pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
249				    IO_VIEW, &io_pdc_cell);
250		printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
251		for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
252			printk(KERN_DEBUG 
253				"  PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 
254				i, pa_pdc_cell->mod[2 + i * 3],	/* type */
255				pa_pdc_cell->mod[3 + i * 3],	/* start */
256				pa_pdc_cell->mod[4 + i * 3]);	/* finish (ie end) */
257			printk(KERN_DEBUG 
258				"  IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 
259				i, io_pdc_cell->mod[2 + i * 3],	/* type */
260				io_pdc_cell->mod[3 + i * 3],	/* start */
261				io_pdc_cell->mod[4 + i * 3]);	/* finish (ie end) */
262		}
263		printk(KERN_DEBUG "\n");
264		break;
265	}
266#endif /* DEBUG_PAT */
267
268	kfree(pa_pdc_cell);
269
270	return PDC_OK;
271}
272
273
274/* pat pdc can return information about a variety of different
275 * types of memory (e.g. firmware,i/o, etc) but we only care about
276 * the usable physical ram right now. Since the firmware specific
277 * information is allocated on the stack, we'll be generous, in
278 * case there is a lot of other information we don't care about.
279 */
280
281#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
282
283static void __init pat_memconfig(void)
284{
285	unsigned long actual_len;
286	struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
287	struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
288	physmem_range_t *pmem_ptr;
289	long status;
290	int entries;
291	unsigned long length;
292	int i;
293
294	length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
295
296	status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
297
298	if ((status != PDC_OK)
299	    || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
300
301		/* The above pdc call shouldn't fail, but, just in
302		 * case, just use the PAGE0 info.
303		 */
304
305		printk("\n\n\n");
306		printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
307			"All memory may not be used!\n\n\n");
308		pagezero_memconfig();
309		return;
310	}
311
312	entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
313
314	if (entries > PAT_MAX_RANGES) {
315		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
316		printk(KERN_WARNING "Some memory may not be used!\n");
317	}
318
319	/* Copy information into the firmware independent pmem_ranges
320	 * array, skipping types we don't care about. Notice we said
321	 * "may" above. We'll use all the entries that were returned.
322	 */
323
324	npmem_ranges = 0;
325	mtbl_ptr = mem_table;
326	pmem_ptr = pmem_ranges; /* Global firmware independent table */
327	for (i = 0; i < entries; i++,mtbl_ptr++) {
328		if (   (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
329		    || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
330		    || (mtbl_ptr->pages == 0)
331		    || (   (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
332			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
333			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
334
335			continue;
336		}
337
338		if (npmem_ranges == MAX_PHYSMEM_RANGES) {
339			printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
340			printk(KERN_WARNING "Some memory will not be used!\n");
341			break;
342		}
343
344		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
345		npmem_ranges++;
346	}
347}
348
349static int __init pat_inventory(void)
350{
351	int status;
352	ulong mod_index = 0;
353	struct pdc_pat_cell_num cell_info;
354
355	/*
356	** Note:  Prelude (and it's successors: Lclass, A400/500) only
357	**        implement PDC_PAT_CELL sub-options 0 and 2.
358	*/
359	status = pdc_pat_cell_get_number(&cell_info);
360	if (status != PDC_OK) {
361		return 0;
362	}
363
364#ifdef DEBUG_PAT
365	printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num, 
366	       cell_info.cell_loc);
367#endif
368
369	while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
370		mod_index++;
371	}
372
373	return mod_index;
374}
375
376/* We only look for extended memory ranges on a 64 bit capable box */
377static void __init sprockets_memconfig(void)
378{
379	struct pdc_memory_table_raddr r_addr;
380	struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
381	struct pdc_memory_table *mtbl_ptr;
382	physmem_range_t *pmem_ptr;
383	long status;
384	int entries;
385	int i;
386
387	status = pdc_mem_mem_table(&r_addr,mem_table,
388				(unsigned long)MAX_PHYSMEM_RANGES);
389
390	if (status != PDC_OK) {
391
392		/* The above pdc call only works on boxes with sprockets
393		 * firmware (newer B,C,J class). Other non PAT PDC machines
394		 * do support more than 3.75 Gb of memory, but we don't
395		 * support them yet.
396		 */
397
398		pagezero_memconfig();
399		return;
400	}
401
402	if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
403		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
404		printk(KERN_WARNING "Some memory will not be used!\n");
405	}
406
407	entries = (int)r_addr.entries_returned;
408
409	npmem_ranges = 0;
410	mtbl_ptr = mem_table;
411	pmem_ptr = pmem_ranges; /* Global firmware independent table */
412	for (i = 0; i < entries; i++,mtbl_ptr++) {
413		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
414		npmem_ranges++;
415	}
416}
417
418#else   /* !CONFIG_64BIT */
419
420#define pat_inventory() do { } while (0)
421#define pat_memconfig() do { } while (0)
422#define sprockets_memconfig() pagezero_memconfig()
423
424#endif	/* !CONFIG_64BIT */
425
426
427#ifndef CONFIG_PA20
428
429/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
430
431static struct parisc_device * __init
432legacy_create_device(struct pdc_memory_map *r_addr,
433		struct pdc_module_path *module_path)
434{
435	struct parisc_device *dev;
436	int status = pdc_mem_map_hpa(r_addr, module_path);
437	if (status != PDC_OK)
438		return NULL;
439
440	dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
441	if (dev == NULL)
442		return NULL;
443
444	register_parisc_device(dev);
445	return dev;
446}
447
448/**
449 * snake_inventory
450 *
451 * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
452 * To use it, we initialise the mod_path.bc to 0xff and try all values of
453 * mod to get the HPA for the top-level devices.  Bus adapters may have
454 * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
455 * module, then trying all possible functions.
456 */
457static void __init snake_inventory(void)
458{
459	int mod;
460	for (mod = 0; mod < 16; mod++) {
461		struct parisc_device *dev;
462		struct pdc_module_path module_path;
463		struct pdc_memory_map r_addr;
464		unsigned int func;
465
466		memset(module_path.path.bc, 0xff, 6);
467		module_path.path.mod = mod;
468		dev = legacy_create_device(&r_addr, &module_path);
469		if ((!dev) || (dev->id.hw_type != HPHW_BA))
470			continue;
471
472		memset(module_path.path.bc, 0xff, 4);
473		module_path.path.bc[4] = mod;
474
475		for (func = 0; func < 16; func++) {
476			module_path.path.bc[5] = 0;
477			module_path.path.mod = func;
478			legacy_create_device(&r_addr, &module_path);
479		}
480	}
481}
482
483#else /* CONFIG_PA20 */
484#define snake_inventory() do { } while (0)
485#endif  /* CONFIG_PA20 */
486
487/* Common 32/64 bit based code goes here */
488
489/**
490 * add_system_map_addresses - Add additional addresses to the parisc device.
491 * @dev: The parisc device.
492 * @num_addrs: Then number of addresses to add;
493 * @module_instance: The system_map module instance.
494 *
495 * This function adds any additional addresses reported by the system_map
496 * firmware to the parisc device.
497 */
498static void __init
499add_system_map_addresses(struct parisc_device *dev, int num_addrs, 
500			 int module_instance)
501{
502	int i;
503	long status;
504	struct pdc_system_map_addr_info addr_result;
505
506	dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
507	if(!dev->addr) {
508		printk(KERN_ERR "%s %s(): memory allocation failure\n",
509		       __FILE__, __func__);
510		return;
511	}
512
513	for(i = 1; i <= num_addrs; ++i) {
514		status = pdc_system_map_find_addrs(&addr_result, 
515						   module_instance, i);
516		if(PDC_OK == status) {
517			dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
518			dev->num_addrs++;
519		} else {
520			printk(KERN_WARNING 
521			       "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
522			       status, i);
523		}
524	}
525}
526
527/**
528 * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
529 *
530 * This function attempts to retrieve and register all the devices firmware
531 * knows about via the SYSTEM_MAP PDC call.
532 */
533static void __init system_map_inventory(void)
534{
535	int i;
536	long status = PDC_OK;
537    
538	for (i = 0; i < 256; i++) {
539		struct parisc_device *dev;
540		struct pdc_system_map_mod_info module_result;
541		struct pdc_module_path module_path;
542
543		status = pdc_system_map_find_mods(&module_result,
544				&module_path, i);
545		if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
546			break;
547		if (status != PDC_OK)
548			continue;
549
550		dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
551		if (!dev)
552			continue;
553		
554		register_parisc_device(dev);
555
556		/* if available, get the additional addresses for a module */
557		if (!module_result.add_addrs)
558			continue;
559
560		add_system_map_addresses(dev, module_result.add_addrs, i);
561	}
562
563	walk_central_bus();
564	return;
565}
566
567void __init do_memory_inventory(void)
568{
569	switch (pdc_type) {
570
571	case PDC_TYPE_PAT:
572		pat_memconfig();
573		break;
574
575	case PDC_TYPE_SYSTEM_MAP:
576		sprockets_memconfig();
577		break;
578
579	case PDC_TYPE_SNAKE:
580		pagezero_memconfig();
581		return;
582
583	default:
584		panic("Unknown PDC type!\n");
585	}
586
587	if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
588		printk(KERN_WARNING "Bad memory configuration returned!\n");
589		printk(KERN_WARNING "Some memory may not be used!\n");
590		pagezero_memconfig();
591	}
592}
593
594void __init do_device_inventory(void)
595{
596	printk(KERN_INFO "Searching for devices...\n");
597
598	init_parisc_bus();
599
600	switch (pdc_type) {
601
602	case PDC_TYPE_PAT:
603		pat_inventory();
604		break;
605
606	case PDC_TYPE_SYSTEM_MAP:
607		system_map_inventory();
608		break;
609
610	case PDC_TYPE_SNAKE:
611		snake_inventory();
612		break;
613
614	default:
615		panic("Unknown PDC type!\n");
616	}
617	printk(KERN_INFO "Found devices:\n");
618	print_parisc_devices();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * inventory.c
  4 *
 
 
 
 
 
  5 * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
  6 * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
  7 *
  8 * These are the routines to discover what hardware exists in this box.
  9 * This task is complicated by there being 3 different ways of
 10 * performing an inventory, depending largely on the age of the box.
 11 * The recommended way to do this is to check to see whether the machine
 12 * is a `Snake' first, then try System Map, then try PAT.  We try System
 13 * Map before checking for a Snake -- this probably doesn't cause any
 14 * problems, but...
 15 */
 16
 17#include <linux/types.h>
 18#include <linux/kernel.h>
 19#include <linux/init.h>
 20#include <linux/slab.h>
 21#include <linux/mm.h>
 22#include <linux/platform_device.h>
 23#include <asm/hardware.h>
 24#include <asm/io.h>
 25#include <asm/mmzone.h>
 26#include <asm/pdc.h>
 27#include <asm/pdcpat.h>
 28#include <asm/processor.h>
 29#include <asm/page.h>
 30#include <asm/parisc-device.h>
 31#include <asm/tlbflush.h>
 32
 33/*
 34** Debug options
 35** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
 36*/
 37#undef DEBUG_PAT
 38
 39int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL;
 40
 41/* cell number and location (PAT firmware only) */
 42unsigned long parisc_cell_num __ro_after_init;
 43unsigned long parisc_cell_loc __ro_after_init;
 44unsigned long parisc_pat_pdc_cap __ro_after_init;
 45
 46
 47void __init setup_pdc(void)
 48{
 49	long status;
 50	unsigned int bus_id;
 51	struct pdc_system_map_mod_info module_result;
 52	struct pdc_module_path module_path;
 53	struct pdc_model model;
 54#ifdef CONFIG_64BIT
 55	struct pdc_pat_cell_num cell_info;
 56#endif
 57
 58	/* Determine the pdc "type" used on this machine */
 59
 60	printk(KERN_INFO "Determining PDC firmware type: ");
 61
 62	status = pdc_system_map_find_mods(&module_result, &module_path, 0);
 63	if (status == PDC_OK) {
 64		pdc_type = PDC_TYPE_SYSTEM_MAP;
 65		pr_cont("System Map.\n");
 66		return;
 67	}
 68
 69	/*
 70	 * If the machine doesn't support PDC_SYSTEM_MAP then either it
 71	 * is a pdc pat box, or it is an older box. All 64 bit capable
 72	 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
 73	 */
 74
 75	/*
 76	 * TODO: We should test for 64 bit capability and give a
 77	 * clearer message.
 78	 */
 79
 80#ifdef CONFIG_64BIT
 81	status = pdc_pat_cell_get_number(&cell_info);
 82	if (status == PDC_OK) {
 83		unsigned long legacy_rev, pat_rev;
 84		pdc_type = PDC_TYPE_PAT;
 85		pr_cont("64 bit PAT.\n");
 86		parisc_cell_num = cell_info.cell_num;
 87		parisc_cell_loc = cell_info.cell_loc;
 88		pr_info("PAT: Running on cell %lu and location %lu.\n",
 89			parisc_cell_num, parisc_cell_loc);
 90		status = pdc_pat_pd_get_pdc_revisions(&legacy_rev,
 91			&pat_rev, &parisc_pat_pdc_cap);
 92		pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n",
 93			legacy_rev, pat_rev, parisc_pat_pdc_cap,
 94			parisc_pat_pdc_cap
 95			 & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0,
 96			parisc_pat_pdc_cap
 97			 & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ   ? 1:0);
 98		return;
 99	}
100#endif
101
102	/* Check the CPU's bus ID.  There's probably a better test.  */
103
104	status = pdc_model_info(&model);
105
106	bus_id = (model.hversion >> (4 + 7)) & 0x1f;
107
108	switch (bus_id) {
109	case 0x4:		/* 720, 730, 750, 735, 755 */
110	case 0x6:		/* 705, 710 */
111	case 0x7:		/* 715, 725 */
112	case 0x8:		/* 745, 747, 742 */
113	case 0xA:		/* 712 and similar */
114	case 0xC:		/* 715/64, at least */
115
116		pdc_type = PDC_TYPE_SNAKE;
117		pr_cont("Snake.\n");
118		return;
119
120	default:		/* Everything else */
121
122		pr_cont("Unsupported.\n");
123		panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
124	}
125}
126
127#define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
128
129static void __init
130set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
131	       unsigned long pages4k)
132{
133	/* Rather than aligning and potentially throwing away
134	 * memory, we'll assume that any ranges are already
135	 * nicely aligned with any reasonable page size, and
136	 * panic if they are not (it's more likely that the
137	 * pdc info is bad in this case).
138	 */
139
140	if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
141	    || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
142
143		panic("Memory range doesn't align with page size!\n");
144	}
145
146	pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
147	pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
148}
149
150static void __init pagezero_memconfig(void)
151{
152	unsigned long npages;
153
154	/* Use the 32 bit information from page zero to create a single
155	 * entry in the pmem_ranges[] table.
156	 *
157	 * We currently don't support machines with contiguous memory
158	 * >= 4 Gb, who report that memory using 64 bit only fields
159	 * on page zero. It's not worth doing until it can be tested,
160	 * and it is not clear we can support those machines for other
161	 * reasons.
162	 *
163	 * If that support is done in the future, this is where it
164	 * should be done.
165	 */
166
167	npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
168	set_pmem_entry(pmem_ranges,0UL,npages);
169	npmem_ranges = 1;
170}
171
172#ifdef CONFIG_64BIT
173
174/* All of the PDC PAT specific code is 64-bit only */
175
176/*
177**  The module object is filled via PDC_PAT_CELL[Return Cell Module].
178**  If a module is found, register module will get the IODC bytes via
179**  pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
180**
181**  The IO view can be used by PDC_PAT_CELL[Return Cell Module]
182**  only for SBAs and LBAs.  This view will cause an invalid
183**  argument error for all other cell module types.
184**
185*/
186
187static int __init 
188pat_query_module(ulong pcell_loc, ulong mod_index)
189{
190	pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
191	unsigned long bytecnt;
192	unsigned long temp;	/* 64-bit scratch value */
193	long status;		/* PDC return value status */
194	struct parisc_device *dev;
195
196	pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
197	if (!pa_pdc_cell)
198		panic("couldn't allocate memory for PDC_PAT_CELL!");
199
200	/* return cell module (PA or Processor view) */
201	status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
202				     PA_VIEW, pa_pdc_cell);
203
204	if (status != PDC_OK) {
205		/* no more cell modules or error */
206		kfree(pa_pdc_cell);
207		return status;
208	}
209
210	temp = pa_pdc_cell->cba;
211	dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
212	if (!dev) {
213		kfree(pa_pdc_cell);
214		return PDC_OK;
215	}
216
217	/* alloc_pa_dev sets dev->hpa */
218
219	/*
220	** save parameters in the parisc_device
221	** (The idea being the device driver will call pdc_pat_cell_module()
222	** and store the results in its own data structure.)
223	*/
224	dev->pcell_loc = pcell_loc;
225	dev->mod_index = mod_index;
226
227	/* save generic info returned from the call */
228	/* REVISIT: who is the consumer of this? not sure yet... */
229	dev->mod_info = pa_pdc_cell->mod_info;	/* pass to PAT_GET_ENTITY() */
230	dev->pmod_loc = pa_pdc_cell->mod_location;
231	dev->mod0 = pa_pdc_cell->mod[0];
232
233	register_parisc_device(dev);	/* advertise device */
234
235#ifdef DEBUG_PAT
 
236	/* dump what we see so far... */
237	switch (PAT_GET_ENTITY(dev->mod_info)) {
238		pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
239		unsigned long i;
240
241	case PAT_ENTITY_PROC:
242		printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
243			pa_pdc_cell->mod[0]);
244		break;
245
246	case PAT_ENTITY_MEM:
247		printk(KERN_DEBUG 
248			"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
249			pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
250			pa_pdc_cell->mod[2]);
251		break;
252	case PAT_ENTITY_CA:
253		printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
254		break;
255
256	case PAT_ENTITY_PBC:
257		printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
258		goto print_ranges;
259
260	case PAT_ENTITY_SBA:
261		printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
262		goto print_ranges;
263
264	case PAT_ENTITY_LBA:
265		printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
266
267 print_ranges:
268		pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
269				    IO_VIEW, &io_pdc_cell);
270		printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
271		for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
272			printk(KERN_DEBUG 
273				"  PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 
274				i, pa_pdc_cell->mod[2 + i * 3],	/* type */
275				pa_pdc_cell->mod[3 + i * 3],	/* start */
276				pa_pdc_cell->mod[4 + i * 3]);	/* finish (ie end) */
277			printk(KERN_DEBUG 
278				"  IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n", 
279				i, io_pdc_cell.mod[2 + i * 3],	/* type */
280				io_pdc_cell.mod[3 + i * 3],	/* start */
281				io_pdc_cell.mod[4 + i * 3]);	/* finish (ie end) */
282		}
283		printk(KERN_DEBUG "\n");
284		break;
285	}
286#endif /* DEBUG_PAT */
287
288	kfree(pa_pdc_cell);
289
290	return PDC_OK;
291}
292
293
294/* pat pdc can return information about a variety of different
295 * types of memory (e.g. firmware,i/o, etc) but we only care about
296 * the usable physical ram right now. Since the firmware specific
297 * information is allocated on the stack, we'll be generous, in
298 * case there is a lot of other information we don't care about.
299 */
300
301#define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
302
303static void __init pat_memconfig(void)
304{
305	unsigned long actual_len;
306	struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
307	struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
308	physmem_range_t *pmem_ptr;
309	long status;
310	int entries;
311	unsigned long length;
312	int i;
313
314	length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
315
316	status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
317
318	if ((status != PDC_OK)
319	    || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
320
321		/* The above pdc call shouldn't fail, but, just in
322		 * case, just use the PAGE0 info.
323		 */
324
325		printk("\n\n\n");
326		printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
327			"All memory may not be used!\n\n\n");
328		pagezero_memconfig();
329		return;
330	}
331
332	entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
333
334	if (entries > PAT_MAX_RANGES) {
335		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
336		printk(KERN_WARNING "Some memory may not be used!\n");
337	}
338
339	/* Copy information into the firmware independent pmem_ranges
340	 * array, skipping types we don't care about. Notice we said
341	 * "may" above. We'll use all the entries that were returned.
342	 */
343
344	npmem_ranges = 0;
345	mtbl_ptr = mem_table;
346	pmem_ptr = pmem_ranges; /* Global firmware independent table */
347	for (i = 0; i < entries; i++,mtbl_ptr++) {
348		if (   (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
349		    || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
350		    || (mtbl_ptr->pages == 0)
351		    || (   (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
352			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
353			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
354
355			continue;
356		}
357
358		if (npmem_ranges == MAX_PHYSMEM_RANGES) {
359			printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
360			printk(KERN_WARNING "Some memory will not be used!\n");
361			break;
362		}
363
364		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
365		npmem_ranges++;
366	}
367}
368
369static int __init pat_inventory(void)
370{
371	int status;
372	ulong mod_index = 0;
373	struct pdc_pat_cell_num cell_info;
374
375	/*
376	** Note:  Prelude (and it's successors: Lclass, A400/500) only
377	**        implement PDC_PAT_CELL sub-options 0 and 2.
378	*/
379	status = pdc_pat_cell_get_number(&cell_info);
380	if (status != PDC_OK) {
381		return 0;
382	}
383
384#ifdef DEBUG_PAT
385	printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num, 
386	       cell_info.cell_loc);
387#endif
388
389	while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
390		mod_index++;
391	}
392
393	return mod_index;
394}
395
396/* We only look for extended memory ranges on a 64 bit capable box */
397static void __init sprockets_memconfig(void)
398{
399	struct pdc_memory_table_raddr r_addr;
400	struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
401	struct pdc_memory_table *mtbl_ptr;
402	physmem_range_t *pmem_ptr;
403	long status;
404	int entries;
405	int i;
406
407	status = pdc_mem_mem_table(&r_addr,mem_table,
408				(unsigned long)MAX_PHYSMEM_RANGES);
409
410	if (status != PDC_OK) {
411
412		/* The above pdc call only works on boxes with sprockets
413		 * firmware (newer B,C,J class). Other non PAT PDC machines
414		 * do support more than 3.75 Gb of memory, but we don't
415		 * support them yet.
416		 */
417
418		pagezero_memconfig();
419		return;
420	}
421
422	if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
423		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
424		printk(KERN_WARNING "Some memory will not be used!\n");
425	}
426
427	entries = (int)r_addr.entries_returned;
428
429	npmem_ranges = 0;
430	mtbl_ptr = mem_table;
431	pmem_ptr = pmem_ranges; /* Global firmware independent table */
432	for (i = 0; i < entries; i++,mtbl_ptr++) {
433		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
434		npmem_ranges++;
435	}
436}
437
438#else   /* !CONFIG_64BIT */
439
440#define pat_inventory() do { } while (0)
441#define pat_memconfig() do { } while (0)
442#define sprockets_memconfig() pagezero_memconfig()
443
444#endif	/* !CONFIG_64BIT */
445
446
447#ifndef CONFIG_PA20
448
449/* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
450
451static struct parisc_device * __init
452legacy_create_device(struct pdc_memory_map *r_addr,
453		struct pdc_module_path *module_path)
454{
455	struct parisc_device *dev;
456	int status = pdc_mem_map_hpa(r_addr, module_path);
457	if (status != PDC_OK)
458		return NULL;
459
460	dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
461	if (dev == NULL)
462		return NULL;
463
464	register_parisc_device(dev);
465	return dev;
466}
467
468/**
469 * snake_inventory
470 *
471 * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
472 * To use it, we initialise the mod_path.bc to 0xff and try all values of
473 * mod to get the HPA for the top-level devices.  Bus adapters may have
474 * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
475 * module, then trying all possible functions.
476 */
477static void __init snake_inventory(void)
478{
479	int mod;
480	for (mod = 0; mod < 16; mod++) {
481		struct parisc_device *dev;
482		struct pdc_module_path module_path;
483		struct pdc_memory_map r_addr;
484		unsigned int func;
485
486		memset(module_path.path.bc, 0xff, 6);
487		module_path.path.mod = mod;
488		dev = legacy_create_device(&r_addr, &module_path);
489		if ((!dev) || (dev->id.hw_type != HPHW_BA))
490			continue;
491
492		memset(module_path.path.bc, 0xff, 4);
493		module_path.path.bc[4] = mod;
494
495		for (func = 0; func < 16; func++) {
496			module_path.path.bc[5] = 0;
497			module_path.path.mod = func;
498			legacy_create_device(&r_addr, &module_path);
499		}
500	}
501}
502
503#else /* CONFIG_PA20 */
504#define snake_inventory() do { } while (0)
505#endif  /* CONFIG_PA20 */
506
507/* Common 32/64 bit based code goes here */
508
509/**
510 * add_system_map_addresses - Add additional addresses to the parisc device.
511 * @dev: The parisc device.
512 * @num_addrs: Then number of addresses to add;
513 * @module_instance: The system_map module instance.
514 *
515 * This function adds any additional addresses reported by the system_map
516 * firmware to the parisc device.
517 */
518static void __init
519add_system_map_addresses(struct parisc_device *dev, int num_addrs, 
520			 int module_instance)
521{
522	int i;
523	long status;
524	struct pdc_system_map_addr_info addr_result;
525
526	dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
527	if(!dev->addr) {
528		printk(KERN_ERR "%s %s(): memory allocation failure\n",
529		       __FILE__, __func__);
530		return;
531	}
532
533	for(i = 1; i <= num_addrs; ++i) {
534		status = pdc_system_map_find_addrs(&addr_result, 
535						   module_instance, i);
536		if(PDC_OK == status) {
537			dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
538			dev->num_addrs++;
539		} else {
540			printk(KERN_WARNING 
541			       "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
542			       status, i);
543		}
544	}
545}
546
547/**
548 * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
549 *
550 * This function attempts to retrieve and register all the devices firmware
551 * knows about via the SYSTEM_MAP PDC call.
552 */
553static void __init system_map_inventory(void)
554{
555	int i;
556	long status = PDC_OK;
557    
558	for (i = 0; i < 256; i++) {
559		struct parisc_device *dev;
560		struct pdc_system_map_mod_info module_result;
561		struct pdc_module_path module_path;
562
563		status = pdc_system_map_find_mods(&module_result,
564				&module_path, i);
565		if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
566			break;
567		if (status != PDC_OK)
568			continue;
569
570		dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
571		if (!dev)
572			continue;
573		
574		register_parisc_device(dev);
575
576		/* if available, get the additional addresses for a module */
577		if (!module_result.add_addrs)
578			continue;
579
580		add_system_map_addresses(dev, module_result.add_addrs, i);
581	}
582
583	walk_central_bus();
584	return;
585}
586
587void __init do_memory_inventory(void)
588{
589	switch (pdc_type) {
590
591	case PDC_TYPE_PAT:
592		pat_memconfig();
593		break;
594
595	case PDC_TYPE_SYSTEM_MAP:
596		sprockets_memconfig();
597		break;
598
599	case PDC_TYPE_SNAKE:
600		pagezero_memconfig();
601		return;
602
603	default:
604		panic("Unknown PDC type!\n");
605	}
606
607	if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
608		printk(KERN_WARNING "Bad memory configuration returned!\n");
609		printk(KERN_WARNING "Some memory may not be used!\n");
610		pagezero_memconfig();
611	}
612}
613
614void __init do_device_inventory(void)
615{
616	printk(KERN_INFO "Searching for devices...\n");
617
618	init_parisc_bus();
619
620	switch (pdc_type) {
621
622	case PDC_TYPE_PAT:
623		pat_inventory();
624		break;
625
626	case PDC_TYPE_SYSTEM_MAP:
627		system_map_inventory();
628		break;
629
630	case PDC_TYPE_SNAKE:
631		snake_inventory();
632		break;
633
634	default:
635		panic("Unknown PDC type!\n");
636	}
637	printk(KERN_INFO "Found devices:\n");
638	print_parisc_devices();
639
640#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
641	pa_serialize_tlb_flushes = machine_has_merced_bus();
642	if (pa_serialize_tlb_flushes)
643		pr_info("Merced bus found: Enable PxTLB serialization.\n");
644#endif
645
646#if defined(CONFIG_FW_CFG_SYSFS)
647	if (running_on_qemu) {
648		struct resource res[3] = {0,};
649		unsigned int base;
650
651		base = ((unsigned long long) PAGE0->pad0[2] << 32)
652			| PAGE0->pad0[3]; /* SeaBIOS stored it here */
653
654		res[0].name = "fw_cfg";
655		res[0].start = base;
656		res[0].end = base + 8 - 1;
657		res[0].flags = IORESOURCE_MEM;
658
659		res[1].name = "ctrl";
660		res[1].start = 0;
661		res[1].flags = IORESOURCE_REG;
662
663		res[2].name = "data";
664		res[2].start = 4;
665		res[2].flags = IORESOURCE_REG;
666
667		if (base) {
668			pr_info("Found qemu fw_cfg interface at %#08x\n", base);
669			platform_device_register_simple("fw_cfg",
670				PLATFORM_DEVID_NONE, res, 3);
671		}
672	}
673#endif
674}