Loading...
1/*
2 * palinfo.c
3 *
4 * Prints processor specific information reported by PAL.
5 * This code is based on specification of PAL as of the
6 * Intel IA-64 Architecture Software Developer's Manual v1.0.
7 *
8 *
9 * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * Copyright (C) 2004 Intel Corporation
12 * Ashok Raj <ashok.raj@intel.com>
13 *
14 * 05/26/2000 S.Eranian initial release
15 * 08/21/2000 S.Eranian updated to July 2000 PAL specs
16 * 02/05/2001 S.Eranian fixed module support
17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
19 * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
20 */
21#include <linux/types.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/proc_fs.h>
25#include <linux/mm.h>
26#include <linux/module.h>
27#include <linux/efi.h>
28#include <linux/notifier.h>
29#include <linux/cpu.h>
30#include <linux/cpumask.h>
31
32#include <asm/pal.h>
33#include <asm/sal.h>
34#include <asm/page.h>
35#include <asm/processor.h>
36#include <linux/smp.h>
37
38MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
39MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
40MODULE_LICENSE("GPL");
41
42#define PALINFO_VERSION "0.5"
43
44typedef int (*palinfo_func_t)(char*);
45
46typedef struct {
47 const char *name; /* name of the proc entry */
48 palinfo_func_t proc_read; /* function to call for reading */
49 struct proc_dir_entry *entry; /* registered entry (removal) */
50} palinfo_entry_t;
51
52
53/*
54 * A bunch of string array to get pretty printing
55 */
56
57static char *cache_types[] = {
58 "", /* not used */
59 "Instruction",
60 "Data",
61 "Data/Instruction" /* unified */
62};
63
64static const char *cache_mattrib[]={
65 "WriteThrough",
66 "WriteBack",
67 "", /* reserved */
68 "" /* reserved */
69};
70
71static const char *cache_st_hints[]={
72 "Temporal, level 1",
73 "Reserved",
74 "Reserved",
75 "Non-temporal, all levels",
76 "Reserved",
77 "Reserved",
78 "Reserved",
79 "Reserved"
80};
81
82static const char *cache_ld_hints[]={
83 "Temporal, level 1",
84 "Non-temporal, level 1",
85 "Reserved",
86 "Non-temporal, all levels",
87 "Reserved",
88 "Reserved",
89 "Reserved",
90 "Reserved"
91};
92
93static const char *rse_hints[]={
94 "enforced lazy",
95 "eager stores",
96 "eager loads",
97 "eager loads and stores"
98};
99
100#define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
101
102static const char *mem_attrib[]={
103 "WB", /* 000 */
104 "SW", /* 001 */
105 "010", /* 010 */
106 "011", /* 011 */
107 "UC", /* 100 */
108 "UCE", /* 101 */
109 "WC", /* 110 */
110 "NaTPage" /* 111 */
111};
112
113/*
114 * Take a 64bit vector and produces a string such that
115 * if bit n is set then 2^n in clear text is generated. The adjustment
116 * to the right unit is also done.
117 *
118 * Input:
119 * - a pointer to a buffer to hold the string
120 * - a 64-bit vector
121 * Ouput:
122 * - a pointer to the end of the buffer
123 *
124 */
125static char *
126bitvector_process(char *p, u64 vector)
127{
128 int i,j;
129 const char *units[]={ "", "K", "M", "G", "T" };
130
131 for (i=0, j=0; i < 64; i++ , j=i/10) {
132 if (vector & 0x1) {
133 p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]);
134 }
135 vector >>= 1;
136 }
137 return p;
138}
139
140/*
141 * Take a 64bit vector and produces a string such that
142 * if bit n is set then register n is present. The function
143 * takes into account consecutive registers and prints out ranges.
144 *
145 * Input:
146 * - a pointer to a buffer to hold the string
147 * - a 64-bit vector
148 * Ouput:
149 * - a pointer to the end of the buffer
150 *
151 */
152static char *
153bitregister_process(char *p, u64 *reg_info, int max)
154{
155 int i, begin, skip = 0;
156 u64 value = reg_info[0];
157
158 value >>= i = begin = ffs(value) - 1;
159
160 for(; i < max; i++ ) {
161
162 if (i != 0 && (i%64) == 0) value = *++reg_info;
163
164 if ((value & 0x1) == 0 && skip == 0) {
165 if (begin <= i - 2)
166 p += sprintf(p, "%d-%d ", begin, i-1);
167 else
168 p += sprintf(p, "%d ", i-1);
169 skip = 1;
170 begin = -1;
171 } else if ((value & 0x1) && skip == 1) {
172 skip = 0;
173 begin = i;
174 }
175 value >>=1;
176 }
177 if (begin > -1) {
178 if (begin < 127)
179 p += sprintf(p, "%d-127", begin);
180 else
181 p += sprintf(p, "127");
182 }
183
184 return p;
185}
186
187static int
188power_info(char *page)
189{
190 s64 status;
191 char *p = page;
192 u64 halt_info_buffer[8];
193 pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
194 int i;
195
196 status = ia64_pal_halt_info(halt_info);
197 if (status != 0) return 0;
198
199 for (i=0; i < 8 ; i++ ) {
200 if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
201 p += sprintf(p, "Power level %d:\n"
202 "\tentry_latency : %d cycles\n"
203 "\texit_latency : %d cycles\n"
204 "\tpower consumption : %d mW\n"
205 "\tCache+TLB coherency : %s\n", i,
206 halt_info[i].pal_power_mgmt_info_s.entry_latency,
207 halt_info[i].pal_power_mgmt_info_s.exit_latency,
208 halt_info[i].pal_power_mgmt_info_s.power_consumption,
209 halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
210 } else {
211 p += sprintf(p,"Power level %d: not implemented\n",i);
212 }
213 }
214 return p - page;
215}
216
217static int
218cache_info(char *page)
219{
220 char *p = page;
221 unsigned long i, levels, unique_caches;
222 pal_cache_config_info_t cci;
223 int j, k;
224 long status;
225
226 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
227 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
228 return 0;
229 }
230
231 p += sprintf(p, "Cache levels : %ld\nUnique caches : %ld\n\n", levels, unique_caches);
232
233 for (i=0; i < levels; i++) {
234
235 for (j=2; j >0 ; j--) {
236
237 /* even without unification some level may not be present */
238 if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
239 continue;
240 }
241 p += sprintf(p,
242 "%s Cache level %lu:\n"
243 "\tSize : %u bytes\n"
244 "\tAttributes : ",
245 cache_types[j+cci.pcci_unified], i+1,
246 cci.pcci_cache_size);
247
248 if (cci.pcci_unified) p += sprintf(p, "Unified ");
249
250 p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
251
252 p += sprintf(p,
253 "\tAssociativity : %d\n"
254 "\tLine size : %d bytes\n"
255 "\tStride : %d bytes\n",
256 cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride);
257 if (j == 1)
258 p += sprintf(p, "\tStore latency : N/A\n");
259 else
260 p += sprintf(p, "\tStore latency : %d cycle(s)\n",
261 cci.pcci_st_latency);
262
263 p += sprintf(p,
264 "\tLoad latency : %d cycle(s)\n"
265 "\tStore hints : ", cci.pcci_ld_latency);
266
267 for(k=0; k < 8; k++ ) {
268 if ( cci.pcci_st_hints & 0x1)
269 p += sprintf(p, "[%s]", cache_st_hints[k]);
270 cci.pcci_st_hints >>=1;
271 }
272 p += sprintf(p, "\n\tLoad hints : ");
273
274 for(k=0; k < 8; k++ ) {
275 if (cci.pcci_ld_hints & 0x1)
276 p += sprintf(p, "[%s]", cache_ld_hints[k]);
277 cci.pcci_ld_hints >>=1;
278 }
279 p += sprintf(p,
280 "\n\tAlias boundary : %d byte(s)\n"
281 "\tTag LSB : %d\n"
282 "\tTag MSB : %d\n",
283 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
284 cci.pcci_tag_msb);
285
286 /* when unified, data(j=2) is enough */
287 if (cci.pcci_unified) break;
288 }
289 }
290 return p - page;
291}
292
293
294static int
295vm_info(char *page)
296{
297 char *p = page;
298 u64 tr_pages =0, vw_pages=0, tc_pages;
299 u64 attrib;
300 pal_vm_info_1_u_t vm_info_1;
301 pal_vm_info_2_u_t vm_info_2;
302 pal_tc_info_u_t tc_info;
303 ia64_ptce_info_t ptce;
304 const char *sep;
305 int i, j;
306 long status;
307
308 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
309 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
310 } else {
311
312 p += sprintf(p,
313 "Physical Address Space : %d bits\n"
314 "Virtual Address Space : %d bits\n"
315 "Protection Key Registers(PKR) : %d\n"
316 "Implemented bits in PKR.key : %d\n"
317 "Hash Tag ID : 0x%x\n"
318 "Size of RR.rid : %d\n"
319 "Max Purges : ",
320 vm_info_1.pal_vm_info_1_s.phys_add_size,
321 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
322 vm_info_1.pal_vm_info_1_s.max_pkr+1,
323 vm_info_1.pal_vm_info_1_s.key_size,
324 vm_info_1.pal_vm_info_1_s.hash_tag_id,
325 vm_info_2.pal_vm_info_2_s.rid_size);
326 if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
327 p += sprintf(p, "unlimited\n");
328 else
329 p += sprintf(p, "%d\n",
330 vm_info_2.pal_vm_info_2_s.max_purges ?
331 vm_info_2.pal_vm_info_2_s.max_purges : 1);
332 }
333
334 if (ia64_pal_mem_attrib(&attrib) == 0) {
335 p += sprintf(p, "Supported memory attributes : ");
336 sep = "";
337 for (i = 0; i < 8; i++) {
338 if (attrib & (1 << i)) {
339 p += sprintf(p, "%s%s", sep, mem_attrib[i]);
340 sep = ", ";
341 }
342 }
343 p += sprintf(p, "\n");
344 }
345
346 if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
347 printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
348 } else {
349
350 p += sprintf(p,
351 "\nTLB walker : %simplemented\n"
352 "Number of DTR : %d\n"
353 "Number of ITR : %d\n"
354 "TLB insertable page sizes : ",
355 vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
356 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
357 vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
358
359
360 p = bitvector_process(p, tr_pages);
361
362 p += sprintf(p, "\nTLB purgeable page sizes : ");
363
364 p = bitvector_process(p, vw_pages);
365 }
366 if ((status=ia64_get_ptce(&ptce)) != 0) {
367 printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
368 } else {
369 p += sprintf(p,
370 "\nPurge base address : 0x%016lx\n"
371 "Purge outer loop count : %d\n"
372 "Purge inner loop count : %d\n"
373 "Purge outer loop stride : %d\n"
374 "Purge inner loop stride : %d\n",
375 ptce.base, ptce.count[0], ptce.count[1],
376 ptce.stride[0], ptce.stride[1]);
377
378 p += sprintf(p,
379 "TC Levels : %d\n"
380 "Unique TC(s) : %d\n",
381 vm_info_1.pal_vm_info_1_s.num_tc_levels,
382 vm_info_1.pal_vm_info_1_s.max_unique_tcs);
383
384 for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
385 for (j=2; j>0 ; j--) {
386 tc_pages = 0; /* just in case */
387
388
389 /* even without unification, some levels may not be present */
390 if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
391 continue;
392 }
393
394 p += sprintf(p,
395 "\n%s Translation Cache Level %d:\n"
396 "\tHash sets : %d\n"
397 "\tAssociativity : %d\n"
398 "\tNumber of entries : %d\n"
399 "\tFlags : ",
400 cache_types[j+tc_info.tc_unified], i+1,
401 tc_info.tc_num_sets,
402 tc_info.tc_associativity,
403 tc_info.tc_num_entries);
404
405 if (tc_info.tc_pf)
406 p += sprintf(p, "PreferredPageSizeOptimized ");
407 if (tc_info.tc_unified)
408 p += sprintf(p, "Unified ");
409 if (tc_info.tc_reduce_tr)
410 p += sprintf(p, "TCReduction");
411
412 p += sprintf(p, "\n\tSupported page sizes: ");
413
414 p = bitvector_process(p, tc_pages);
415
416 /* when unified date (j=2) is enough */
417 if (tc_info.tc_unified)
418 break;
419 }
420 }
421 }
422 p += sprintf(p, "\n");
423
424 return p - page;
425}
426
427
428static int
429register_info(char *page)
430{
431 char *p = page;
432 u64 reg_info[2];
433 u64 info;
434 unsigned long phys_stacked;
435 pal_hints_u_t hints;
436 unsigned long iregs, dregs;
437 static const char * const info_type[] = {
438 "Implemented AR(s)",
439 "AR(s) with read side-effects",
440 "Implemented CR(s)",
441 "CR(s) with read side-effects",
442 };
443
444 for(info=0; info < 4; info++) {
445
446 if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0) return 0;
447
448 p += sprintf(p, "%-32s : ", info_type[info]);
449
450 p = bitregister_process(p, reg_info, 128);
451
452 p += sprintf(p, "\n");
453 }
454
455 if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) {
456
457 p += sprintf(p,
458 "RSE stacked physical registers : %ld\n"
459 "RSE load/store hints : %ld (%s)\n",
460 phys_stacked, hints.ph_data,
461 hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
462 }
463 if (ia64_pal_debug_info(&iregs, &dregs))
464 return 0;
465
466 p += sprintf(p,
467 "Instruction debug register pairs : %ld\n"
468 "Data debug register pairs : %ld\n", iregs, dregs);
469
470 return p - page;
471}
472
473static char *proc_features_0[]={ /* Feature set 0 */
474 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
475 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
476 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
477 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
478 "Unimplemented instruction address fault",
479 "INIT, PMI, and LINT pins",
480 "Simple unimplemented instr addresses",
481 "Variable P-state performance",
482 "Virtual machine features implemented",
483 "XIP,XPSR,XFS implemented",
484 "XR1-XR3 implemented",
485 "Disable dynamic predicate prediction",
486 "Disable processor physical number",
487 "Disable dynamic data cache prefetch",
488 "Disable dynamic inst cache prefetch",
489 "Disable dynamic branch prediction",
490 NULL, NULL, NULL, NULL,
491 "Disable P-states",
492 "Enable MCA on Data Poisoning",
493 "Enable vmsw instruction",
494 "Enable extern environmental notification",
495 "Disable BINIT on processor time-out",
496 "Disable dynamic power management (DPM)",
497 "Disable coherency",
498 "Disable cache",
499 "Enable CMCI promotion",
500 "Enable MCA to BINIT promotion",
501 "Enable MCA promotion",
502 "Enable BERR promotion"
503};
504
505static char *proc_features_16[]={ /* Feature set 16 */
506 "Disable ETM",
507 "Enable ETM",
508 "Enable MCA on half-way timer",
509 "Enable snoop WC",
510 NULL,
511 "Enable Fast Deferral",
512 "Disable MCA on memory aliasing",
513 "Enable RSB",
514 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
515 "DP system processor",
516 "Low Voltage",
517 "HT supported",
518 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
519 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
520 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
521 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
522 NULL, NULL, NULL, NULL, NULL
523};
524
525static char **proc_features[]={
526 proc_features_0,
527 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
528 NULL, NULL, NULL, NULL,
529 proc_features_16,
530 NULL, NULL, NULL, NULL,
531};
532
533static char * feature_set_info(char *page, u64 avail, u64 status, u64 control,
534 unsigned long set)
535{
536 char *p = page;
537 char **vf, **v;
538 int i;
539
540 vf = v = proc_features[set];
541 for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) {
542
543 if (!(control)) /* No remaining bits set */
544 break;
545 if (!(avail & 0x1)) /* Print only bits that are available */
546 continue;
547 if (vf)
548 v = vf + i;
549 if ( v && *v ) {
550 p += sprintf(p, "%-40s : %s %s\n", *v,
551 avail & 0x1 ? (status & 0x1 ?
552 "On " : "Off"): "",
553 avail & 0x1 ? (control & 0x1 ?
554 "Ctrl" : "NoCtrl"): "");
555 } else {
556 p += sprintf(p, "Feature set %2ld bit %2d\t\t\t"
557 " : %s %s\n",
558 set, i,
559 avail & 0x1 ? (status & 0x1 ?
560 "On " : "Off"): "",
561 avail & 0x1 ? (control & 0x1 ?
562 "Ctrl" : "NoCtrl"): "");
563 }
564 }
565 return p;
566}
567
568static int
569processor_info(char *page)
570{
571 char *p = page;
572 u64 avail=1, status=1, control=1, feature_set=0;
573 s64 ret;
574
575 do {
576 ret = ia64_pal_proc_get_features(&avail, &status, &control,
577 feature_set);
578 if (ret < 0) {
579 return p - page;
580 }
581 if (ret == 1) {
582 feature_set++;
583 continue;
584 }
585
586 p = feature_set_info(p, avail, status, control, feature_set);
587
588 feature_set++;
589 } while(1);
590
591 return p - page;
592}
593
594static const char *bus_features[]={
595 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
596 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
597 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
598 NULL,NULL,
599 "Request Bus Parking",
600 "Bus Lock Mask",
601 "Enable Half Transfer",
602 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
603 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
604 NULL, NULL, NULL, NULL,
605 "Enable Cache Line Repl. Shared",
606 "Enable Cache Line Repl. Exclusive",
607 "Disable Transaction Queuing",
608 "Disable Response Error Checking",
609 "Disable Bus Error Checking",
610 "Disable Bus Requester Internal Error Signalling",
611 "Disable Bus Requester Error Signalling",
612 "Disable Bus Initialization Event Checking",
613 "Disable Bus Initialization Event Signalling",
614 "Disable Bus Address Error Checking",
615 "Disable Bus Address Error Signalling",
616 "Disable Bus Data Error Checking"
617};
618
619
620static int
621bus_info(char *page)
622{
623 char *p = page;
624 const char **v = bus_features;
625 pal_bus_features_u_t av, st, ct;
626 u64 avail, status, control;
627 int i;
628 s64 ret;
629
630 if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) return 0;
631
632 avail = av.pal_bus_features_val;
633 status = st.pal_bus_features_val;
634 control = ct.pal_bus_features_val;
635
636 for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
637 if ( ! *v ) continue;
638 p += sprintf(p, "%-48s : %s%s %s\n", *v,
639 avail & 0x1 ? "" : "NotImpl",
640 avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
641 avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
642 }
643 return p - page;
644}
645
646static int
647version_info(char *page)
648{
649 pal_version_u_t min_ver, cur_ver;
650 char *p = page;
651
652 if (ia64_pal_version(&min_ver, &cur_ver) != 0)
653 return 0;
654
655 p += sprintf(p,
656 "PAL_vendor : 0x%02x (min=0x%02x)\n"
657 "PAL_A : %02x.%02x (min=%02x.%02x)\n"
658 "PAL_B : %02x.%02x (min=%02x.%02x)\n",
659 cur_ver.pal_version_s.pv_pal_vendor,
660 min_ver.pal_version_s.pv_pal_vendor,
661 cur_ver.pal_version_s.pv_pal_a_model,
662 cur_ver.pal_version_s.pv_pal_a_rev,
663 min_ver.pal_version_s.pv_pal_a_model,
664 min_ver.pal_version_s.pv_pal_a_rev,
665 cur_ver.pal_version_s.pv_pal_b_model,
666 cur_ver.pal_version_s.pv_pal_b_rev,
667 min_ver.pal_version_s.pv_pal_b_model,
668 min_ver.pal_version_s.pv_pal_b_rev);
669 return p - page;
670}
671
672static int
673perfmon_info(char *page)
674{
675 char *p = page;
676 u64 pm_buffer[16];
677 pal_perf_mon_info_u_t pm_info;
678
679 if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0;
680
681 p += sprintf(p,
682 "PMC/PMD pairs : %d\n"
683 "Counter width : %d bits\n"
684 "Cycle event number : %d\n"
685 "Retired event number : %d\n"
686 "Implemented PMC : ",
687 pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width,
688 pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired);
689
690 p = bitregister_process(p, pm_buffer, 256);
691 p += sprintf(p, "\nImplemented PMD : ");
692 p = bitregister_process(p, pm_buffer+4, 256);
693 p += sprintf(p, "\nCycles count capable : ");
694 p = bitregister_process(p, pm_buffer+8, 256);
695 p += sprintf(p, "\nRetired bundles count capable : ");
696
697#ifdef CONFIG_ITANIUM
698 /*
699 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
700 * which is wrong, both PMC4 and PMD5 support it.
701 */
702 if (pm_buffer[12] == 0x10) pm_buffer[12]=0x30;
703#endif
704
705 p = bitregister_process(p, pm_buffer+12, 256);
706
707 p += sprintf(p, "\n");
708
709 return p - page;
710}
711
712static int
713frequency_info(char *page)
714{
715 char *p = page;
716 struct pal_freq_ratio proc, itc, bus;
717 unsigned long base;
718
719 if (ia64_pal_freq_base(&base) == -1)
720 p += sprintf(p, "Output clock : not implemented\n");
721 else
722 p += sprintf(p, "Output clock : %ld ticks/s\n", base);
723
724 if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
725
726 p += sprintf(p,
727 "Processor/Clock ratio : %d/%d\n"
728 "Bus/Clock ratio : %d/%d\n"
729 "ITC/Clock ratio : %d/%d\n",
730 proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
731
732 return p - page;
733}
734
735static int
736tr_info(char *page)
737{
738 char *p = page;
739 long status;
740 pal_tr_valid_u_t tr_valid;
741 u64 tr_buffer[4];
742 pal_vm_info_1_u_t vm_info_1;
743 pal_vm_info_2_u_t vm_info_2;
744 unsigned long i, j;
745 unsigned long max[3], pgm;
746 struct ifa_reg {
747 unsigned long valid:1;
748 unsigned long ig:11;
749 unsigned long vpn:52;
750 } *ifa_reg;
751 struct itir_reg {
752 unsigned long rv1:2;
753 unsigned long ps:6;
754 unsigned long key:24;
755 unsigned long rv2:32;
756 } *itir_reg;
757 struct gr_reg {
758 unsigned long p:1;
759 unsigned long rv1:1;
760 unsigned long ma:3;
761 unsigned long a:1;
762 unsigned long d:1;
763 unsigned long pl:2;
764 unsigned long ar:3;
765 unsigned long ppn:38;
766 unsigned long rv2:2;
767 unsigned long ed:1;
768 unsigned long ig:11;
769 } *gr_reg;
770 struct rid_reg {
771 unsigned long ig1:1;
772 unsigned long rv1:1;
773 unsigned long ig2:6;
774 unsigned long rid:24;
775 unsigned long rv2:32;
776 } *rid_reg;
777
778 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
779 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
780 return 0;
781 }
782 max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
783 max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
784
785 for (i=0; i < 2; i++ ) {
786 for (j=0; j < max[i]; j++) {
787
788 status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
789 if (status != 0) {
790 printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
791 i, j, status);
792 continue;
793 }
794
795 ifa_reg = (struct ifa_reg *)&tr_buffer[2];
796
797 if (ifa_reg->valid == 0) continue;
798
799 gr_reg = (struct gr_reg *)tr_buffer;
800 itir_reg = (struct itir_reg *)&tr_buffer[1];
801 rid_reg = (struct rid_reg *)&tr_buffer[3];
802
803 pgm = -1 << (itir_reg->ps - 12);
804 p += sprintf(p,
805 "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
806 "\tppn : 0x%lx\n"
807 "\tvpn : 0x%lx\n"
808 "\tps : ",
809 "ID"[i], j,
810 tr_valid.pal_tr_valid_s.access_rights_valid,
811 tr_valid.pal_tr_valid_s.priv_level_valid,
812 tr_valid.pal_tr_valid_s.dirty_bit_valid,
813 tr_valid.pal_tr_valid_s.mem_attr_valid,
814 (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
815
816 p = bitvector_process(p, 1<< itir_reg->ps);
817
818 p += sprintf(p,
819 "\n\tpl : %d\n"
820 "\tar : %d\n"
821 "\trid : %x\n"
822 "\tp : %d\n"
823 "\tma : %d\n"
824 "\td : %d\n",
825 gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
826 gr_reg->d);
827 }
828 }
829 return p - page;
830}
831
832
833
834/*
835 * List {name,function} pairs for every entry in /proc/palinfo/cpu*
836 */
837static palinfo_entry_t palinfo_entries[]={
838 { "version_info", version_info, },
839 { "vm_info", vm_info, },
840 { "cache_info", cache_info, },
841 { "power_info", power_info, },
842 { "register_info", register_info, },
843 { "processor_info", processor_info, },
844 { "perfmon_info", perfmon_info, },
845 { "frequency_info", frequency_info, },
846 { "bus_info", bus_info },
847 { "tr_info", tr_info, }
848};
849
850#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
851
852/*
853 * this array is used to keep track of the proc entries we create. This is
854 * required in the module mode when we need to remove all entries. The procfs code
855 * does not do recursion of deletion
856 *
857 * Notes:
858 * - +1 accounts for the cpuN directory entry in /proc/pal
859 */
860#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
861
862static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
863static struct proc_dir_entry *palinfo_dir;
864
865/*
866 * This data structure is used to pass which cpu,function is being requested
867 * It must fit in a 64bit quantity to be passed to the proc callback routine
868 *
869 * In SMP mode, when we get a request for another CPU, we must call that
870 * other CPU using IPI and wait for the result before returning.
871 */
872typedef union {
873 u64 value;
874 struct {
875 unsigned req_cpu: 32; /* for which CPU this info is */
876 unsigned func_id: 32; /* which function is requested */
877 } pal_func_cpu;
878} pal_func_cpu_u_t;
879
880#define req_cpu pal_func_cpu.req_cpu
881#define func_id pal_func_cpu.func_id
882
883#ifdef CONFIG_SMP
884
885/*
886 * used to hold information about final function to call
887 */
888typedef struct {
889 palinfo_func_t func; /* pointer to function to call */
890 char *page; /* buffer to store results */
891 int ret; /* return value from call */
892} palinfo_smp_data_t;
893
894
895/*
896 * this function does the actual final call and he called
897 * from the smp code, i.e., this is the palinfo callback routine
898 */
899static void
900palinfo_smp_call(void *info)
901{
902 palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
903 data->ret = (*data->func)(data->page);
904}
905
906/*
907 * function called to trigger the IPI, we need to access a remote CPU
908 * Return:
909 * 0 : error or nothing to output
910 * otherwise how many bytes in the "page" buffer were written
911 */
912static
913int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
914{
915 palinfo_smp_data_t ptr;
916 int ret;
917
918 ptr.func = palinfo_entries[f->func_id].proc_read;
919 ptr.page = page;
920 ptr.ret = 0; /* just in case */
921
922
923 /* will send IPI to other CPU and wait for completion of remote call */
924 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
925 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
926 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
927 return 0;
928 }
929 return ptr.ret;
930}
931#else /* ! CONFIG_SMP */
932static
933int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
934{
935 printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
936 return 0;
937}
938#endif /* CONFIG_SMP */
939
940/*
941 * Entry point routine: all calls go through this function
942 */
943static int
944palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data)
945{
946 int len=0;
947 pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&data;
948
949 /*
950 * in SMP mode, we may need to call another CPU to get correct
951 * information. PAL, by definition, is processor specific
952 */
953 if (f->req_cpu == get_cpu())
954 len = (*palinfo_entries[f->func_id].proc_read)(page);
955 else
956 len = palinfo_handle_smp(f, page);
957
958 put_cpu();
959
960 if (len <= off+count) *eof = 1;
961
962 *start = page + off;
963 len -= off;
964
965 if (len>count) len = count;
966 if (len<0) len = 0;
967
968 return len;
969}
970
971static void __cpuinit
972create_palinfo_proc_entries(unsigned int cpu)
973{
974# define CPUSTR "cpu%d"
975
976 pal_func_cpu_u_t f;
977 struct proc_dir_entry **pdir;
978 struct proc_dir_entry *cpu_dir;
979 int j;
980 char cpustr[sizeof(CPUSTR)];
981
982
983 /*
984 * we keep track of created entries in a depth-first order for
985 * cleanup purposes. Each entry is stored into palinfo_proc_entries
986 */
987 sprintf(cpustr,CPUSTR, cpu);
988
989 cpu_dir = proc_mkdir(cpustr, palinfo_dir);
990
991 f.req_cpu = cpu;
992
993 /*
994 * Compute the location to store per cpu entries
995 * We dont store the top level entry in this list, but
996 * remove it finally after removing all cpu entries.
997 */
998 pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
999 *pdir++ = cpu_dir;
1000 for (j=0; j < NR_PALINFO_ENTRIES; j++) {
1001 f.func_id = j;
1002 *pdir = create_proc_read_entry(
1003 palinfo_entries[j].name, 0, cpu_dir,
1004 palinfo_read_entry, (void *)f.value);
1005 pdir++;
1006 }
1007}
1008
1009static void
1010remove_palinfo_proc_entries(unsigned int hcpu)
1011{
1012 int j;
1013 struct proc_dir_entry *cpu_dir, **pdir;
1014
1015 pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
1016 cpu_dir = *pdir;
1017 *pdir++=NULL;
1018 for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
1019 if ((*pdir)) {
1020 remove_proc_entry ((*pdir)->name, cpu_dir);
1021 *pdir ++= NULL;
1022 }
1023 }
1024
1025 if (cpu_dir) {
1026 remove_proc_entry(cpu_dir->name, palinfo_dir);
1027 }
1028}
1029
1030static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
1031 unsigned long action, void *hcpu)
1032{
1033 unsigned int hotcpu = (unsigned long)hcpu;
1034
1035 switch (action) {
1036 case CPU_ONLINE:
1037 case CPU_ONLINE_FROZEN:
1038 create_palinfo_proc_entries(hotcpu);
1039 break;
1040 case CPU_DEAD:
1041 case CPU_DEAD_FROZEN:
1042 remove_palinfo_proc_entries(hotcpu);
1043 break;
1044 }
1045 return NOTIFY_OK;
1046}
1047
1048static struct notifier_block __refdata palinfo_cpu_notifier =
1049{
1050 .notifier_call = palinfo_cpu_callback,
1051 .priority = 0,
1052};
1053
1054static int __init
1055palinfo_init(void)
1056{
1057 int i = 0;
1058
1059 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
1060 palinfo_dir = proc_mkdir("pal", NULL);
1061
1062 /* Create palinfo dirs in /proc for all online cpus */
1063 for_each_online_cpu(i) {
1064 create_palinfo_proc_entries(i);
1065 }
1066
1067 /* Register for future delivery via notify registration */
1068 register_hotcpu_notifier(&palinfo_cpu_notifier);
1069
1070 return 0;
1071}
1072
1073static void __exit
1074palinfo_exit(void)
1075{
1076 int i = 0;
1077
1078 /* remove all nodes: depth first pass. Could optimize this */
1079 for_each_online_cpu(i) {
1080 remove_palinfo_proc_entries(i);
1081 }
1082
1083 /*
1084 * Remove the top level entry finally
1085 */
1086 remove_proc_entry(palinfo_dir->name, NULL);
1087
1088 /*
1089 * Unregister from cpu notifier callbacks
1090 */
1091 unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1092}
1093
1094module_init(palinfo_init);
1095module_exit(palinfo_exit);
1/*
2 * palinfo.c
3 *
4 * Prints processor specific information reported by PAL.
5 * This code is based on specification of PAL as of the
6 * Intel IA-64 Architecture Software Developer's Manual v1.0.
7 *
8 *
9 * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * Copyright (C) 2004 Intel Corporation
12 * Ashok Raj <ashok.raj@intel.com>
13 *
14 * 05/26/2000 S.Eranian initial release
15 * 08/21/2000 S.Eranian updated to July 2000 PAL specs
16 * 02/05/2001 S.Eranian fixed module support
17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
19 * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
20 */
21#include <linux/types.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/proc_fs.h>
25#include <linux/seq_file.h>
26#include <linux/mm.h>
27#include <linux/module.h>
28#include <linux/efi.h>
29#include <linux/notifier.h>
30#include <linux/cpu.h>
31#include <linux/cpumask.h>
32
33#include <asm/pal.h>
34#include <asm/sal.h>
35#include <asm/page.h>
36#include <asm/processor.h>
37#include <linux/smp.h>
38
39MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
40MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
41MODULE_LICENSE("GPL");
42
43#define PALINFO_VERSION "0.5"
44
45typedef int (*palinfo_func_t)(struct seq_file *);
46
47typedef struct {
48 const char *name; /* name of the proc entry */
49 palinfo_func_t proc_read; /* function to call for reading */
50 struct proc_dir_entry *entry; /* registered entry (removal) */
51} palinfo_entry_t;
52
53
54/*
55 * A bunch of string array to get pretty printing
56 */
57
58static const char *cache_types[] = {
59 "", /* not used */
60 "Instruction",
61 "Data",
62 "Data/Instruction" /* unified */
63};
64
65static const char *cache_mattrib[]={
66 "WriteThrough",
67 "WriteBack",
68 "", /* reserved */
69 "" /* reserved */
70};
71
72static const char *cache_st_hints[]={
73 "Temporal, level 1",
74 "Reserved",
75 "Reserved",
76 "Non-temporal, all levels",
77 "Reserved",
78 "Reserved",
79 "Reserved",
80 "Reserved"
81};
82
83static const char *cache_ld_hints[]={
84 "Temporal, level 1",
85 "Non-temporal, level 1",
86 "Reserved",
87 "Non-temporal, all levels",
88 "Reserved",
89 "Reserved",
90 "Reserved",
91 "Reserved"
92};
93
94static const char *rse_hints[]={
95 "enforced lazy",
96 "eager stores",
97 "eager loads",
98 "eager loads and stores"
99};
100
101#define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
102
103static const char *mem_attrib[]={
104 "WB", /* 000 */
105 "SW", /* 001 */
106 "010", /* 010 */
107 "011", /* 011 */
108 "UC", /* 100 */
109 "UCE", /* 101 */
110 "WC", /* 110 */
111 "NaTPage" /* 111 */
112};
113
114/*
115 * Take a 64bit vector and produces a string such that
116 * if bit n is set then 2^n in clear text is generated. The adjustment
117 * to the right unit is also done.
118 *
119 * Input:
120 * - a pointer to a buffer to hold the string
121 * - a 64-bit vector
122 * Ouput:
123 * - a pointer to the end of the buffer
124 *
125 */
126static void bitvector_process(struct seq_file *m, u64 vector)
127{
128 int i,j;
129 static const char *units[]={ "", "K", "M", "G", "T" };
130
131 for (i=0, j=0; i < 64; i++ , j=i/10) {
132 if (vector & 0x1)
133 seq_printf(m, "%d%s ", 1 << (i-j*10), units[j]);
134 vector >>= 1;
135 }
136}
137
138/*
139 * Take a 64bit vector and produces a string such that
140 * if bit n is set then register n is present. The function
141 * takes into account consecutive registers and prints out ranges.
142 *
143 * Input:
144 * - a pointer to a buffer to hold the string
145 * - a 64-bit vector
146 * Ouput:
147 * - a pointer to the end of the buffer
148 *
149 */
150static void bitregister_process(struct seq_file *m, u64 *reg_info, int max)
151{
152 int i, begin, skip = 0;
153 u64 value = reg_info[0];
154
155 value >>= i = begin = ffs(value) - 1;
156
157 for(; i < max; i++ ) {
158
159 if (i != 0 && (i%64) == 0) value = *++reg_info;
160
161 if ((value & 0x1) == 0 && skip == 0) {
162 if (begin <= i - 2)
163 seq_printf(m, "%d-%d ", begin, i-1);
164 else
165 seq_printf(m, "%d ", i-1);
166 skip = 1;
167 begin = -1;
168 } else if ((value & 0x1) && skip == 1) {
169 skip = 0;
170 begin = i;
171 }
172 value >>=1;
173 }
174 if (begin > -1) {
175 if (begin < 127)
176 seq_printf(m, "%d-127", begin);
177 else
178 seq_puts(m, "127");
179 }
180}
181
182static int power_info(struct seq_file *m)
183{
184 s64 status;
185 u64 halt_info_buffer[8];
186 pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
187 int i;
188
189 status = ia64_pal_halt_info(halt_info);
190 if (status != 0) return 0;
191
192 for (i=0; i < 8 ; i++ ) {
193 if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
194 seq_printf(m,
195 "Power level %d:\n"
196 "\tentry_latency : %d cycles\n"
197 "\texit_latency : %d cycles\n"
198 "\tpower consumption : %d mW\n"
199 "\tCache+TLB coherency : %s\n", i,
200 halt_info[i].pal_power_mgmt_info_s.entry_latency,
201 halt_info[i].pal_power_mgmt_info_s.exit_latency,
202 halt_info[i].pal_power_mgmt_info_s.power_consumption,
203 halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
204 } else {
205 seq_printf(m,"Power level %d: not implemented\n", i);
206 }
207 }
208 return 0;
209}
210
211static int cache_info(struct seq_file *m)
212{
213 unsigned long i, levels, unique_caches;
214 pal_cache_config_info_t cci;
215 int j, k;
216 long status;
217
218 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
219 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
220 return 0;
221 }
222
223 seq_printf(m, "Cache levels : %ld\nUnique caches : %ld\n\n",
224 levels, unique_caches);
225
226 for (i=0; i < levels; i++) {
227 for (j=2; j >0 ; j--) {
228 /* even without unification some level may not be present */
229 if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0)
230 continue;
231
232 seq_printf(m,
233 "%s Cache level %lu:\n"
234 "\tSize : %u bytes\n"
235 "\tAttributes : ",
236 cache_types[j+cci.pcci_unified], i+1,
237 cci.pcci_cache_size);
238
239 if (cci.pcci_unified)
240 seq_puts(m, "Unified ");
241
242 seq_printf(m, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
243
244 seq_printf(m,
245 "\tAssociativity : %d\n"
246 "\tLine size : %d bytes\n"
247 "\tStride : %d bytes\n",
248 cci.pcci_assoc,
249 1<<cci.pcci_line_size,
250 1<<cci.pcci_stride);
251 if (j == 1)
252 seq_puts(m, "\tStore latency : N/A\n");
253 else
254 seq_printf(m, "\tStore latency : %d cycle(s)\n",
255 cci.pcci_st_latency);
256
257 seq_printf(m,
258 "\tLoad latency : %d cycle(s)\n"
259 "\tStore hints : ", cci.pcci_ld_latency);
260
261 for(k=0; k < 8; k++ ) {
262 if ( cci.pcci_st_hints & 0x1)
263 seq_printf(m, "[%s]", cache_st_hints[k]);
264 cci.pcci_st_hints >>=1;
265 }
266 seq_puts(m, "\n\tLoad hints : ");
267
268 for(k=0; k < 8; k++ ) {
269 if (cci.pcci_ld_hints & 0x1)
270 seq_printf(m, "[%s]", cache_ld_hints[k]);
271 cci.pcci_ld_hints >>=1;
272 }
273 seq_printf(m,
274 "\n\tAlias boundary : %d byte(s)\n"
275 "\tTag LSB : %d\n"
276 "\tTag MSB : %d\n",
277 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
278 cci.pcci_tag_msb);
279
280 /* when unified, data(j=2) is enough */
281 if (cci.pcci_unified)
282 break;
283 }
284 }
285 return 0;
286}
287
288
289static int vm_info(struct seq_file *m)
290{
291 u64 tr_pages =0, vw_pages=0, tc_pages;
292 u64 attrib;
293 pal_vm_info_1_u_t vm_info_1;
294 pal_vm_info_2_u_t vm_info_2;
295 pal_tc_info_u_t tc_info;
296 ia64_ptce_info_t ptce;
297 const char *sep;
298 int i, j;
299 long status;
300
301 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
302 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
303 } else {
304
305 seq_printf(m,
306 "Physical Address Space : %d bits\n"
307 "Virtual Address Space : %d bits\n"
308 "Protection Key Registers(PKR) : %d\n"
309 "Implemented bits in PKR.key : %d\n"
310 "Hash Tag ID : 0x%x\n"
311 "Size of RR.rid : %d\n"
312 "Max Purges : ",
313 vm_info_1.pal_vm_info_1_s.phys_add_size,
314 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
315 vm_info_1.pal_vm_info_1_s.max_pkr+1,
316 vm_info_1.pal_vm_info_1_s.key_size,
317 vm_info_1.pal_vm_info_1_s.hash_tag_id,
318 vm_info_2.pal_vm_info_2_s.rid_size);
319 if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
320 seq_puts(m, "unlimited\n");
321 else
322 seq_printf(m, "%d\n",
323 vm_info_2.pal_vm_info_2_s.max_purges ?
324 vm_info_2.pal_vm_info_2_s.max_purges : 1);
325 }
326
327 if (ia64_pal_mem_attrib(&attrib) == 0) {
328 seq_puts(m, "Supported memory attributes : ");
329 sep = "";
330 for (i = 0; i < 8; i++) {
331 if (attrib & (1 << i)) {
332 seq_printf(m, "%s%s", sep, mem_attrib[i]);
333 sep = ", ";
334 }
335 }
336 seq_putc(m, '\n');
337 }
338
339 if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
340 printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
341 } else {
342
343 seq_printf(m,
344 "\nTLB walker : %simplemented\n"
345 "Number of DTR : %d\n"
346 "Number of ITR : %d\n"
347 "TLB insertable page sizes : ",
348 vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
349 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
350 vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
351
352 bitvector_process(m, tr_pages);
353
354 seq_puts(m, "\nTLB purgeable page sizes : ");
355
356 bitvector_process(m, vw_pages);
357 }
358
359 if ((status = ia64_get_ptce(&ptce)) != 0) {
360 printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
361 } else {
362 seq_printf(m,
363 "\nPurge base address : 0x%016lx\n"
364 "Purge outer loop count : %d\n"
365 "Purge inner loop count : %d\n"
366 "Purge outer loop stride : %d\n"
367 "Purge inner loop stride : %d\n",
368 ptce.base, ptce.count[0], ptce.count[1],
369 ptce.stride[0], ptce.stride[1]);
370
371 seq_printf(m,
372 "TC Levels : %d\n"
373 "Unique TC(s) : %d\n",
374 vm_info_1.pal_vm_info_1_s.num_tc_levels,
375 vm_info_1.pal_vm_info_1_s.max_unique_tcs);
376
377 for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
378 for (j=2; j>0 ; j--) {
379 tc_pages = 0; /* just in case */
380
381 /* even without unification, some levels may not be present */
382 if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0)
383 continue;
384
385 seq_printf(m,
386 "\n%s Translation Cache Level %d:\n"
387 "\tHash sets : %d\n"
388 "\tAssociativity : %d\n"
389 "\tNumber of entries : %d\n"
390 "\tFlags : ",
391 cache_types[j+tc_info.tc_unified], i+1,
392 tc_info.tc_num_sets,
393 tc_info.tc_associativity,
394 tc_info.tc_num_entries);
395
396 if (tc_info.tc_pf)
397 seq_puts(m, "PreferredPageSizeOptimized ");
398 if (tc_info.tc_unified)
399 seq_puts(m, "Unified ");
400 if (tc_info.tc_reduce_tr)
401 seq_puts(m, "TCReduction");
402
403 seq_puts(m, "\n\tSupported page sizes: ");
404
405 bitvector_process(m, tc_pages);
406
407 /* when unified date (j=2) is enough */
408 if (tc_info.tc_unified)
409 break;
410 }
411 }
412 }
413
414 seq_putc(m, '\n');
415 return 0;
416}
417
418
419static int register_info(struct seq_file *m)
420{
421 u64 reg_info[2];
422 u64 info;
423 unsigned long phys_stacked;
424 pal_hints_u_t hints;
425 unsigned long iregs, dregs;
426 static const char * const info_type[] = {
427 "Implemented AR(s)",
428 "AR(s) with read side-effects",
429 "Implemented CR(s)",
430 "CR(s) with read side-effects",
431 };
432
433 for(info=0; info < 4; info++) {
434 if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0)
435 return 0;
436 seq_printf(m, "%-32s : ", info_type[info]);
437 bitregister_process(m, reg_info, 128);
438 seq_putc(m, '\n');
439 }
440
441 if (ia64_pal_rse_info(&phys_stacked, &hints) == 0)
442 seq_printf(m,
443 "RSE stacked physical registers : %ld\n"
444 "RSE load/store hints : %ld (%s)\n",
445 phys_stacked, hints.ph_data,
446 hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
447
448 if (ia64_pal_debug_info(&iregs, &dregs))
449 return 0;
450
451 seq_printf(m,
452 "Instruction debug register pairs : %ld\n"
453 "Data debug register pairs : %ld\n", iregs, dregs);
454
455 return 0;
456}
457
458static const char *const proc_features_0[]={ /* Feature set 0 */
459 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
460 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
461 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
462 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
463 "Unimplemented instruction address fault",
464 "INIT, PMI, and LINT pins",
465 "Simple unimplemented instr addresses",
466 "Variable P-state performance",
467 "Virtual machine features implemented",
468 "XIP,XPSR,XFS implemented",
469 "XR1-XR3 implemented",
470 "Disable dynamic predicate prediction",
471 "Disable processor physical number",
472 "Disable dynamic data cache prefetch",
473 "Disable dynamic inst cache prefetch",
474 "Disable dynamic branch prediction",
475 NULL, NULL, NULL, NULL,
476 "Disable P-states",
477 "Enable MCA on Data Poisoning",
478 "Enable vmsw instruction",
479 "Enable extern environmental notification",
480 "Disable BINIT on processor time-out",
481 "Disable dynamic power management (DPM)",
482 "Disable coherency",
483 "Disable cache",
484 "Enable CMCI promotion",
485 "Enable MCA to BINIT promotion",
486 "Enable MCA promotion",
487 "Enable BERR promotion"
488};
489
490static const char *const proc_features_16[]={ /* Feature set 16 */
491 "Disable ETM",
492 "Enable ETM",
493 "Enable MCA on half-way timer",
494 "Enable snoop WC",
495 NULL,
496 "Enable Fast Deferral",
497 "Disable MCA on memory aliasing",
498 "Enable RSB",
499 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
500 "DP system processor",
501 "Low Voltage",
502 "HT supported",
503 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL, NULL
508};
509
510static const char *const *const proc_features[]={
511 proc_features_0,
512 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
513 NULL, NULL, NULL, NULL,
514 proc_features_16,
515 NULL, NULL, NULL, NULL,
516};
517
518static void feature_set_info(struct seq_file *m, u64 avail, u64 status, u64 control,
519 unsigned long set)
520{
521 const char *const *vf, *const *v;
522 int i;
523
524 vf = v = proc_features[set];
525 for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) {
526
527 if (!(control)) /* No remaining bits set */
528 break;
529 if (!(avail & 0x1)) /* Print only bits that are available */
530 continue;
531 if (vf)
532 v = vf + i;
533 if ( v && *v ) {
534 seq_printf(m, "%-40s : %s %s\n", *v,
535 avail & 0x1 ? (status & 0x1 ?
536 "On " : "Off"): "",
537 avail & 0x1 ? (control & 0x1 ?
538 "Ctrl" : "NoCtrl"): "");
539 } else {
540 seq_printf(m, "Feature set %2ld bit %2d\t\t\t"
541 " : %s %s\n",
542 set, i,
543 avail & 0x1 ? (status & 0x1 ?
544 "On " : "Off"): "",
545 avail & 0x1 ? (control & 0x1 ?
546 "Ctrl" : "NoCtrl"): "");
547 }
548 }
549}
550
551static int processor_info(struct seq_file *m)
552{
553 u64 avail=1, status=1, control=1, feature_set=0;
554 s64 ret;
555
556 do {
557 ret = ia64_pal_proc_get_features(&avail, &status, &control,
558 feature_set);
559 if (ret < 0)
560 return 0;
561
562 if (ret == 1) {
563 feature_set++;
564 continue;
565 }
566
567 feature_set_info(m, avail, status, control, feature_set);
568 feature_set++;
569 } while(1);
570
571 return 0;
572}
573
574static const char *const bus_features[]={
575 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
576 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
577 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
578 NULL,NULL,
579 "Request Bus Parking",
580 "Bus Lock Mask",
581 "Enable Half Transfer",
582 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
583 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
584 NULL, NULL, NULL, NULL,
585 "Enable Cache Line Repl. Shared",
586 "Enable Cache Line Repl. Exclusive",
587 "Disable Transaction Queuing",
588 "Disable Response Error Checking",
589 "Disable Bus Error Checking",
590 "Disable Bus Requester Internal Error Signalling",
591 "Disable Bus Requester Error Signalling",
592 "Disable Bus Initialization Event Checking",
593 "Disable Bus Initialization Event Signalling",
594 "Disable Bus Address Error Checking",
595 "Disable Bus Address Error Signalling",
596 "Disable Bus Data Error Checking"
597};
598
599
600static int bus_info(struct seq_file *m)
601{
602 const char *const *v = bus_features;
603 pal_bus_features_u_t av, st, ct;
604 u64 avail, status, control;
605 int i;
606 s64 ret;
607
608 if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0)
609 return 0;
610
611 avail = av.pal_bus_features_val;
612 status = st.pal_bus_features_val;
613 control = ct.pal_bus_features_val;
614
615 for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
616 if ( ! *v )
617 continue;
618 seq_printf(m, "%-48s : %s%s %s\n", *v,
619 avail & 0x1 ? "" : "NotImpl",
620 avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
621 avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
622 }
623 return 0;
624}
625
626static int version_info(struct seq_file *m)
627{
628 pal_version_u_t min_ver, cur_ver;
629
630 if (ia64_pal_version(&min_ver, &cur_ver) != 0)
631 return 0;
632
633 seq_printf(m,
634 "PAL_vendor : 0x%02x (min=0x%02x)\n"
635 "PAL_A : %02x.%02x (min=%02x.%02x)\n"
636 "PAL_B : %02x.%02x (min=%02x.%02x)\n",
637 cur_ver.pal_version_s.pv_pal_vendor,
638 min_ver.pal_version_s.pv_pal_vendor,
639 cur_ver.pal_version_s.pv_pal_a_model,
640 cur_ver.pal_version_s.pv_pal_a_rev,
641 min_ver.pal_version_s.pv_pal_a_model,
642 min_ver.pal_version_s.pv_pal_a_rev,
643 cur_ver.pal_version_s.pv_pal_b_model,
644 cur_ver.pal_version_s.pv_pal_b_rev,
645 min_ver.pal_version_s.pv_pal_b_model,
646 min_ver.pal_version_s.pv_pal_b_rev);
647 return 0;
648}
649
650static int perfmon_info(struct seq_file *m)
651{
652 u64 pm_buffer[16];
653 pal_perf_mon_info_u_t pm_info;
654
655 if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0)
656 return 0;
657
658 seq_printf(m,
659 "PMC/PMD pairs : %d\n"
660 "Counter width : %d bits\n"
661 "Cycle event number : %d\n"
662 "Retired event number : %d\n"
663 "Implemented PMC : ",
664 pm_info.pal_perf_mon_info_s.generic,
665 pm_info.pal_perf_mon_info_s.width,
666 pm_info.pal_perf_mon_info_s.cycles,
667 pm_info.pal_perf_mon_info_s.retired);
668
669 bitregister_process(m, pm_buffer, 256);
670 seq_puts(m, "\nImplemented PMD : ");
671 bitregister_process(m, pm_buffer+4, 256);
672 seq_puts(m, "\nCycles count capable : ");
673 bitregister_process(m, pm_buffer+8, 256);
674 seq_puts(m, "\nRetired bundles count capable : ");
675
676#ifdef CONFIG_ITANIUM
677 /*
678 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
679 * which is wrong, both PMC4 and PMD5 support it.
680 */
681 if (pm_buffer[12] == 0x10)
682 pm_buffer[12]=0x30;
683#endif
684
685 bitregister_process(m, pm_buffer+12, 256);
686 seq_putc(m, '\n');
687 return 0;
688}
689
690static int frequency_info(struct seq_file *m)
691{
692 struct pal_freq_ratio proc, itc, bus;
693 unsigned long base;
694
695 if (ia64_pal_freq_base(&base) == -1)
696 seq_puts(m, "Output clock : not implemented\n");
697 else
698 seq_printf(m, "Output clock : %ld ticks/s\n", base);
699
700 if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
701
702 seq_printf(m,
703 "Processor/Clock ratio : %d/%d\n"
704 "Bus/Clock ratio : %d/%d\n"
705 "ITC/Clock ratio : %d/%d\n",
706 proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
707 return 0;
708}
709
710static int tr_info(struct seq_file *m)
711{
712 long status;
713 pal_tr_valid_u_t tr_valid;
714 u64 tr_buffer[4];
715 pal_vm_info_1_u_t vm_info_1;
716 pal_vm_info_2_u_t vm_info_2;
717 unsigned long i, j;
718 unsigned long max[3], pgm;
719 struct ifa_reg {
720 unsigned long valid:1;
721 unsigned long ig:11;
722 unsigned long vpn:52;
723 } *ifa_reg;
724 struct itir_reg {
725 unsigned long rv1:2;
726 unsigned long ps:6;
727 unsigned long key:24;
728 unsigned long rv2:32;
729 } *itir_reg;
730 struct gr_reg {
731 unsigned long p:1;
732 unsigned long rv1:1;
733 unsigned long ma:3;
734 unsigned long a:1;
735 unsigned long d:1;
736 unsigned long pl:2;
737 unsigned long ar:3;
738 unsigned long ppn:38;
739 unsigned long rv2:2;
740 unsigned long ed:1;
741 unsigned long ig:11;
742 } *gr_reg;
743 struct rid_reg {
744 unsigned long ig1:1;
745 unsigned long rv1:1;
746 unsigned long ig2:6;
747 unsigned long rid:24;
748 unsigned long rv2:32;
749 } *rid_reg;
750
751 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
752 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
753 return 0;
754 }
755 max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
756 max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
757
758 for (i=0; i < 2; i++ ) {
759 for (j=0; j < max[i]; j++) {
760
761 status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
762 if (status != 0) {
763 printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
764 i, j, status);
765 continue;
766 }
767
768 ifa_reg = (struct ifa_reg *)&tr_buffer[2];
769
770 if (ifa_reg->valid == 0)
771 continue;
772
773 gr_reg = (struct gr_reg *)tr_buffer;
774 itir_reg = (struct itir_reg *)&tr_buffer[1];
775 rid_reg = (struct rid_reg *)&tr_buffer[3];
776
777 pgm = -1 << (itir_reg->ps - 12);
778 seq_printf(m,
779 "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
780 "\tppn : 0x%lx\n"
781 "\tvpn : 0x%lx\n"
782 "\tps : ",
783 "ID"[i], j,
784 tr_valid.pal_tr_valid_s.access_rights_valid,
785 tr_valid.pal_tr_valid_s.priv_level_valid,
786 tr_valid.pal_tr_valid_s.dirty_bit_valid,
787 tr_valid.pal_tr_valid_s.mem_attr_valid,
788 (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
789
790 bitvector_process(m, 1<< itir_reg->ps);
791
792 seq_printf(m,
793 "\n\tpl : %d\n"
794 "\tar : %d\n"
795 "\trid : %x\n"
796 "\tp : %d\n"
797 "\tma : %d\n"
798 "\td : %d\n",
799 gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
800 gr_reg->d);
801 }
802 }
803 return 0;
804}
805
806
807
808/*
809 * List {name,function} pairs for every entry in /proc/palinfo/cpu*
810 */
811static const palinfo_entry_t palinfo_entries[]={
812 { "version_info", version_info, },
813 { "vm_info", vm_info, },
814 { "cache_info", cache_info, },
815 { "power_info", power_info, },
816 { "register_info", register_info, },
817 { "processor_info", processor_info, },
818 { "perfmon_info", perfmon_info, },
819 { "frequency_info", frequency_info, },
820 { "bus_info", bus_info },
821 { "tr_info", tr_info, }
822};
823
824#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
825
826static struct proc_dir_entry *palinfo_dir;
827
828/*
829 * This data structure is used to pass which cpu,function is being requested
830 * It must fit in a 64bit quantity to be passed to the proc callback routine
831 *
832 * In SMP mode, when we get a request for another CPU, we must call that
833 * other CPU using IPI and wait for the result before returning.
834 */
835typedef union {
836 u64 value;
837 struct {
838 unsigned req_cpu: 32; /* for which CPU this info is */
839 unsigned func_id: 32; /* which function is requested */
840 } pal_func_cpu;
841} pal_func_cpu_u_t;
842
843#define req_cpu pal_func_cpu.req_cpu
844#define func_id pal_func_cpu.func_id
845
846#ifdef CONFIG_SMP
847
848/*
849 * used to hold information about final function to call
850 */
851typedef struct {
852 palinfo_func_t func; /* pointer to function to call */
853 struct seq_file *m; /* buffer to store results */
854 int ret; /* return value from call */
855} palinfo_smp_data_t;
856
857
858/*
859 * this function does the actual final call and he called
860 * from the smp code, i.e., this is the palinfo callback routine
861 */
862static void
863palinfo_smp_call(void *info)
864{
865 palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
866 data->ret = (*data->func)(data->m);
867}
868
869/*
870 * function called to trigger the IPI, we need to access a remote CPU
871 * Return:
872 * 0 : error or nothing to output
873 * otherwise how many bytes in the "page" buffer were written
874 */
875static
876int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)
877{
878 palinfo_smp_data_t ptr;
879 int ret;
880
881 ptr.func = palinfo_entries[f->func_id].proc_read;
882 ptr.m = m;
883 ptr.ret = 0; /* just in case */
884
885
886 /* will send IPI to other CPU and wait for completion of remote call */
887 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
888 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
889 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
890 return 0;
891 }
892 return ptr.ret;
893}
894#else /* ! CONFIG_SMP */
895static
896int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)
897{
898 printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
899 return 0;
900}
901#endif /* CONFIG_SMP */
902
903/*
904 * Entry point routine: all calls go through this function
905 */
906static int proc_palinfo_show(struct seq_file *m, void *v)
907{
908 pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&m->private;
909
910 /*
911 * in SMP mode, we may need to call another CPU to get correct
912 * information. PAL, by definition, is processor specific
913 */
914 if (f->req_cpu == get_cpu())
915 (*palinfo_entries[f->func_id].proc_read)(m);
916 else
917 palinfo_handle_smp(m, f);
918
919 put_cpu();
920 return 0;
921}
922
923static int proc_palinfo_open(struct inode *inode, struct file *file)
924{
925 return single_open(file, proc_palinfo_show, PDE_DATA(inode));
926}
927
928static const struct file_operations proc_palinfo_fops = {
929 .open = proc_palinfo_open,
930 .read = seq_read,
931 .llseek = seq_lseek,
932 .release = single_release,
933};
934
935static void
936create_palinfo_proc_entries(unsigned int cpu)
937{
938 pal_func_cpu_u_t f;
939 struct proc_dir_entry *cpu_dir;
940 int j;
941 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
942 sprintf(cpustr, "cpu%d", cpu);
943
944 cpu_dir = proc_mkdir(cpustr, palinfo_dir);
945 if (!cpu_dir)
946 return;
947
948 f.req_cpu = cpu;
949
950 for (j=0; j < NR_PALINFO_ENTRIES; j++) {
951 f.func_id = j;
952 proc_create_data(palinfo_entries[j].name, 0, cpu_dir,
953 &proc_palinfo_fops, (void *)f.value);
954 }
955}
956
957static void
958remove_palinfo_proc_entries(unsigned int hcpu)
959{
960 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
961 sprintf(cpustr, "cpu%d", hcpu);
962 remove_proc_subtree(cpustr, palinfo_dir);
963}
964
965static int palinfo_cpu_callback(struct notifier_block *nfb,
966 unsigned long action, void *hcpu)
967{
968 unsigned int hotcpu = (unsigned long)hcpu;
969
970 switch (action) {
971 case CPU_ONLINE:
972 case CPU_ONLINE_FROZEN:
973 create_palinfo_proc_entries(hotcpu);
974 break;
975 case CPU_DEAD:
976 case CPU_DEAD_FROZEN:
977 remove_palinfo_proc_entries(hotcpu);
978 break;
979 }
980 return NOTIFY_OK;
981}
982
983static struct notifier_block __refdata palinfo_cpu_notifier =
984{
985 .notifier_call = palinfo_cpu_callback,
986 .priority = 0,
987};
988
989static int __init
990palinfo_init(void)
991{
992 int i = 0;
993
994 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
995 palinfo_dir = proc_mkdir("pal", NULL);
996 if (!palinfo_dir)
997 return -ENOMEM;
998
999 cpu_notifier_register_begin();
1000
1001 /* Create palinfo dirs in /proc for all online cpus */
1002 for_each_online_cpu(i) {
1003 create_palinfo_proc_entries(i);
1004 }
1005
1006 /* Register for future delivery via notify registration */
1007 __register_hotcpu_notifier(&palinfo_cpu_notifier);
1008
1009 cpu_notifier_register_done();
1010
1011 return 0;
1012}
1013
1014static void __exit
1015palinfo_exit(void)
1016{
1017 unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1018 remove_proc_subtree("pal", NULL);
1019}
1020
1021module_init(palinfo_init);
1022module_exit(palinfo_exit);