Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * palinfo.c
4 *
5 * Prints processor specific information reported by PAL.
6 * This code is based on specification of PAL as of the
7 * Intel IA-64 Architecture Software Developer's Manual v1.0.
8 *
9 *
10 * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
11 * Stephane Eranian <eranian@hpl.hp.com>
12 * Copyright (C) 2004 Intel Corporation
13 * Ashok Raj <ashok.raj@intel.com>
14 *
15 * 05/26/2000 S.Eranian initial release
16 * 08/21/2000 S.Eranian updated to July 2000 PAL specs
17 * 02/05/2001 S.Eranian fixed module support
18 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
19 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
20 * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
21 */
22#include <linux/types.h>
23#include <linux/errno.h>
24#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/mm.h>
28#include <linux/module.h>
29#include <linux/efi.h>
30#include <linux/notifier.h>
31#include <linux/cpu.h>
32#include <linux/cpumask.h>
33
34#include <asm/pal.h>
35#include <asm/sal.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <linux/smp.h>
39
40MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
41MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
42MODULE_LICENSE("GPL");
43
44#define PALINFO_VERSION "0.5"
45
46typedef int (*palinfo_func_t)(struct seq_file *);
47
48typedef struct {
49 const char *name; /* name of the proc entry */
50 palinfo_func_t proc_read; /* function to call for reading */
51 struct proc_dir_entry *entry; /* registered entry (removal) */
52} palinfo_entry_t;
53
54
55/*
56 * A bunch of string array to get pretty printing
57 */
58
59static const char *cache_types[] = {
60 "", /* not used */
61 "Instruction",
62 "Data",
63 "Data/Instruction" /* unified */
64};
65
66static const char *cache_mattrib[]={
67 "WriteThrough",
68 "WriteBack",
69 "", /* reserved */
70 "" /* reserved */
71};
72
73static const char *cache_st_hints[]={
74 "Temporal, level 1",
75 "Reserved",
76 "Reserved",
77 "Non-temporal, all levels",
78 "Reserved",
79 "Reserved",
80 "Reserved",
81 "Reserved"
82};
83
84static const char *cache_ld_hints[]={
85 "Temporal, level 1",
86 "Non-temporal, level 1",
87 "Reserved",
88 "Non-temporal, all levels",
89 "Reserved",
90 "Reserved",
91 "Reserved",
92 "Reserved"
93};
94
95static const char *rse_hints[]={
96 "enforced lazy",
97 "eager stores",
98 "eager loads",
99 "eager loads and stores"
100};
101
102#define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
103
104static const char *mem_attrib[]={
105 "WB", /* 000 */
106 "SW", /* 001 */
107 "010", /* 010 */
108 "011", /* 011 */
109 "UC", /* 100 */
110 "UCE", /* 101 */
111 "WC", /* 110 */
112 "NaTPage" /* 111 */
113};
114
115/*
116 * Take a 64bit vector and produces a string such that
117 * if bit n is set then 2^n in clear text is generated. The adjustment
118 * to the right unit is also done.
119 *
120 * Input:
121 * - a pointer to a buffer to hold the string
122 * - a 64-bit vector
123 * Ouput:
124 * - a pointer to the end of the buffer
125 *
126 */
127static void bitvector_process(struct seq_file *m, u64 vector)
128{
129 int i,j;
130 static const char *units[]={ "", "K", "M", "G", "T" };
131
132 for (i=0, j=0; i < 64; i++ , j=i/10) {
133 if (vector & 0x1)
134 seq_printf(m, "%d%s ", 1 << (i-j*10), units[j]);
135 vector >>= 1;
136 }
137}
138
139/*
140 * Take a 64bit vector and produces a string such that
141 * if bit n is set then register n is present. The function
142 * takes into account consecutive registers and prints out ranges.
143 *
144 * Input:
145 * - a pointer to a buffer to hold the string
146 * - a 64-bit vector
147 * Ouput:
148 * - a pointer to the end of the buffer
149 *
150 */
151static void bitregister_process(struct seq_file *m, u64 *reg_info, int max)
152{
153 int i, begin, skip = 0;
154 u64 value = reg_info[0];
155
156 value >>= i = begin = ffs(value) - 1;
157
158 for(; i < max; i++ ) {
159
160 if (i != 0 && (i%64) == 0) value = *++reg_info;
161
162 if ((value & 0x1) == 0 && skip == 0) {
163 if (begin <= i - 2)
164 seq_printf(m, "%d-%d ", begin, i-1);
165 else
166 seq_printf(m, "%d ", i-1);
167 skip = 1;
168 begin = -1;
169 } else if ((value & 0x1) && skip == 1) {
170 skip = 0;
171 begin = i;
172 }
173 value >>=1;
174 }
175 if (begin > -1) {
176 if (begin < 127)
177 seq_printf(m, "%d-127", begin);
178 else
179 seq_puts(m, "127");
180 }
181}
182
183static int power_info(struct seq_file *m)
184{
185 s64 status;
186 u64 halt_info_buffer[8];
187 pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
188 int i;
189
190 status = ia64_pal_halt_info(halt_info);
191 if (status != 0) return 0;
192
193 for (i=0; i < 8 ; i++ ) {
194 if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
195 seq_printf(m,
196 "Power level %d:\n"
197 "\tentry_latency : %d cycles\n"
198 "\texit_latency : %d cycles\n"
199 "\tpower consumption : %d mW\n"
200 "\tCache+TLB coherency : %s\n", i,
201 halt_info[i].pal_power_mgmt_info_s.entry_latency,
202 halt_info[i].pal_power_mgmt_info_s.exit_latency,
203 halt_info[i].pal_power_mgmt_info_s.power_consumption,
204 halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
205 } else {
206 seq_printf(m,"Power level %d: not implemented\n", i);
207 }
208 }
209 return 0;
210}
211
212static int cache_info(struct seq_file *m)
213{
214 unsigned long i, levels, unique_caches;
215 pal_cache_config_info_t cci;
216 int j, k;
217 long status;
218
219 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
220 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
221 return 0;
222 }
223
224 seq_printf(m, "Cache levels : %ld\nUnique caches : %ld\n\n",
225 levels, unique_caches);
226
227 for (i=0; i < levels; i++) {
228 for (j=2; j >0 ; j--) {
229 /* even without unification some level may not be present */
230 if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0)
231 continue;
232
233 seq_printf(m,
234 "%s Cache level %lu:\n"
235 "\tSize : %u bytes\n"
236 "\tAttributes : ",
237 cache_types[j+cci.pcci_unified], i+1,
238 cci.pcci_cache_size);
239
240 if (cci.pcci_unified)
241 seq_puts(m, "Unified ");
242
243 seq_printf(m, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
244
245 seq_printf(m,
246 "\tAssociativity : %d\n"
247 "\tLine size : %d bytes\n"
248 "\tStride : %d bytes\n",
249 cci.pcci_assoc,
250 1<<cci.pcci_line_size,
251 1<<cci.pcci_stride);
252 if (j == 1)
253 seq_puts(m, "\tStore latency : N/A\n");
254 else
255 seq_printf(m, "\tStore latency : %d cycle(s)\n",
256 cci.pcci_st_latency);
257
258 seq_printf(m,
259 "\tLoad latency : %d cycle(s)\n"
260 "\tStore hints : ", cci.pcci_ld_latency);
261
262 for(k=0; k < 8; k++ ) {
263 if ( cci.pcci_st_hints & 0x1)
264 seq_printf(m, "[%s]", cache_st_hints[k]);
265 cci.pcci_st_hints >>=1;
266 }
267 seq_puts(m, "\n\tLoad hints : ");
268
269 for(k=0; k < 8; k++ ) {
270 if (cci.pcci_ld_hints & 0x1)
271 seq_printf(m, "[%s]", cache_ld_hints[k]);
272 cci.pcci_ld_hints >>=1;
273 }
274 seq_printf(m,
275 "\n\tAlias boundary : %d byte(s)\n"
276 "\tTag LSB : %d\n"
277 "\tTag MSB : %d\n",
278 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
279 cci.pcci_tag_msb);
280
281 /* when unified, data(j=2) is enough */
282 if (cci.pcci_unified)
283 break;
284 }
285 }
286 return 0;
287}
288
289
290static int vm_info(struct seq_file *m)
291{
292 u64 tr_pages =0, vw_pages=0, tc_pages;
293 u64 attrib;
294 pal_vm_info_1_u_t vm_info_1;
295 pal_vm_info_2_u_t vm_info_2;
296 pal_tc_info_u_t tc_info;
297 ia64_ptce_info_t ptce;
298 const char *sep;
299 int i, j;
300 long status;
301
302 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
303 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
304 } else {
305
306 seq_printf(m,
307 "Physical Address Space : %d bits\n"
308 "Virtual Address Space : %d bits\n"
309 "Protection Key Registers(PKR) : %d\n"
310 "Implemented bits in PKR.key : %d\n"
311 "Hash Tag ID : 0x%x\n"
312 "Size of RR.rid : %d\n"
313 "Max Purges : ",
314 vm_info_1.pal_vm_info_1_s.phys_add_size,
315 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
316 vm_info_1.pal_vm_info_1_s.max_pkr+1,
317 vm_info_1.pal_vm_info_1_s.key_size,
318 vm_info_1.pal_vm_info_1_s.hash_tag_id,
319 vm_info_2.pal_vm_info_2_s.rid_size);
320 if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
321 seq_puts(m, "unlimited\n");
322 else
323 seq_printf(m, "%d\n",
324 vm_info_2.pal_vm_info_2_s.max_purges ?
325 vm_info_2.pal_vm_info_2_s.max_purges : 1);
326 }
327
328 if (ia64_pal_mem_attrib(&attrib) == 0) {
329 seq_puts(m, "Supported memory attributes : ");
330 sep = "";
331 for (i = 0; i < 8; i++) {
332 if (attrib & (1 << i)) {
333 seq_printf(m, "%s%s", sep, mem_attrib[i]);
334 sep = ", ";
335 }
336 }
337 seq_putc(m, '\n');
338 }
339
340 if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
341 printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
342 } else {
343
344 seq_printf(m,
345 "\nTLB walker : %simplemented\n"
346 "Number of DTR : %d\n"
347 "Number of ITR : %d\n"
348 "TLB insertable page sizes : ",
349 vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
350 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
351 vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
352
353 bitvector_process(m, tr_pages);
354
355 seq_puts(m, "\nTLB purgeable page sizes : ");
356
357 bitvector_process(m, vw_pages);
358 }
359
360 if ((status = ia64_get_ptce(&ptce)) != 0) {
361 printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
362 } else {
363 seq_printf(m,
364 "\nPurge base address : 0x%016lx\n"
365 "Purge outer loop count : %d\n"
366 "Purge inner loop count : %d\n"
367 "Purge outer loop stride : %d\n"
368 "Purge inner loop stride : %d\n",
369 ptce.base, ptce.count[0], ptce.count[1],
370 ptce.stride[0], ptce.stride[1]);
371
372 seq_printf(m,
373 "TC Levels : %d\n"
374 "Unique TC(s) : %d\n",
375 vm_info_1.pal_vm_info_1_s.num_tc_levels,
376 vm_info_1.pal_vm_info_1_s.max_unique_tcs);
377
378 for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
379 for (j=2; j>0 ; j--) {
380 tc_pages = 0; /* just in case */
381
382 /* even without unification, some levels may not be present */
383 if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0)
384 continue;
385
386 seq_printf(m,
387 "\n%s Translation Cache Level %d:\n"
388 "\tHash sets : %d\n"
389 "\tAssociativity : %d\n"
390 "\tNumber of entries : %d\n"
391 "\tFlags : ",
392 cache_types[j+tc_info.tc_unified], i+1,
393 tc_info.tc_num_sets,
394 tc_info.tc_associativity,
395 tc_info.tc_num_entries);
396
397 if (tc_info.tc_pf)
398 seq_puts(m, "PreferredPageSizeOptimized ");
399 if (tc_info.tc_unified)
400 seq_puts(m, "Unified ");
401 if (tc_info.tc_reduce_tr)
402 seq_puts(m, "TCReduction");
403
404 seq_puts(m, "\n\tSupported page sizes: ");
405
406 bitvector_process(m, tc_pages);
407
408 /* when unified date (j=2) is enough */
409 if (tc_info.tc_unified)
410 break;
411 }
412 }
413 }
414
415 seq_putc(m, '\n');
416 return 0;
417}
418
419
420static int register_info(struct seq_file *m)
421{
422 u64 reg_info[2];
423 u64 info;
424 unsigned long phys_stacked;
425 pal_hints_u_t hints;
426 unsigned long iregs, dregs;
427 static const char * const info_type[] = {
428 "Implemented AR(s)",
429 "AR(s) with read side-effects",
430 "Implemented CR(s)",
431 "CR(s) with read side-effects",
432 };
433
434 for(info=0; info < 4; info++) {
435 if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0)
436 return 0;
437 seq_printf(m, "%-32s : ", info_type[info]);
438 bitregister_process(m, reg_info, 128);
439 seq_putc(m, '\n');
440 }
441
442 if (ia64_pal_rse_info(&phys_stacked, &hints) == 0)
443 seq_printf(m,
444 "RSE stacked physical registers : %ld\n"
445 "RSE load/store hints : %ld (%s)\n",
446 phys_stacked, hints.ph_data,
447 hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
448
449 if (ia64_pal_debug_info(&iregs, &dregs))
450 return 0;
451
452 seq_printf(m,
453 "Instruction debug register pairs : %ld\n"
454 "Data debug register pairs : %ld\n", iregs, dregs);
455
456 return 0;
457}
458
459static const char *const proc_features_0[]={ /* Feature set 0 */
460 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
461 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
462 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
463 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
464 "Unimplemented instruction address fault",
465 "INIT, PMI, and LINT pins",
466 "Simple unimplemented instr addresses",
467 "Variable P-state performance",
468 "Virtual machine features implemented",
469 "XIP,XPSR,XFS implemented",
470 "XR1-XR3 implemented",
471 "Disable dynamic predicate prediction",
472 "Disable processor physical number",
473 "Disable dynamic data cache prefetch",
474 "Disable dynamic inst cache prefetch",
475 "Disable dynamic branch prediction",
476 NULL, NULL, NULL, NULL,
477 "Disable P-states",
478 "Enable MCA on Data Poisoning",
479 "Enable vmsw instruction",
480 "Enable extern environmental notification",
481 "Disable BINIT on processor time-out",
482 "Disable dynamic power management (DPM)",
483 "Disable coherency",
484 "Disable cache",
485 "Enable CMCI promotion",
486 "Enable MCA to BINIT promotion",
487 "Enable MCA promotion",
488 "Enable BERR promotion"
489};
490
491static const char *const proc_features_16[]={ /* Feature set 16 */
492 "Disable ETM",
493 "Enable ETM",
494 "Enable MCA on half-way timer",
495 "Enable snoop WC",
496 NULL,
497 "Enable Fast Deferral",
498 "Disable MCA on memory aliasing",
499 "Enable RSB",
500 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
501 "DP system processor",
502 "Low Voltage",
503 "HT supported",
504 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL, NULL
509};
510
511static const char *const *const proc_features[]={
512 proc_features_0,
513 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
514 NULL, NULL, NULL, NULL,
515 proc_features_16,
516 NULL, NULL, NULL, NULL,
517};
518
519static void feature_set_info(struct seq_file *m, u64 avail, u64 status, u64 control,
520 unsigned long set)
521{
522 const char *const *vf, *const *v;
523 int i;
524
525 vf = v = proc_features[set];
526 for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) {
527
528 if (!(control)) /* No remaining bits set */
529 break;
530 if (!(avail & 0x1)) /* Print only bits that are available */
531 continue;
532 if (vf)
533 v = vf + i;
534 if ( v && *v ) {
535 seq_printf(m, "%-40s : %s %s\n", *v,
536 avail & 0x1 ? (status & 0x1 ?
537 "On " : "Off"): "",
538 avail & 0x1 ? (control & 0x1 ?
539 "Ctrl" : "NoCtrl"): "");
540 } else {
541 seq_printf(m, "Feature set %2ld bit %2d\t\t\t"
542 " : %s %s\n",
543 set, i,
544 avail & 0x1 ? (status & 0x1 ?
545 "On " : "Off"): "",
546 avail & 0x1 ? (control & 0x1 ?
547 "Ctrl" : "NoCtrl"): "");
548 }
549 }
550}
551
552static int processor_info(struct seq_file *m)
553{
554 u64 avail=1, status=1, control=1, feature_set=0;
555 s64 ret;
556
557 do {
558 ret = ia64_pal_proc_get_features(&avail, &status, &control,
559 feature_set);
560 if (ret < 0)
561 return 0;
562
563 if (ret == 1) {
564 feature_set++;
565 continue;
566 }
567
568 feature_set_info(m, avail, status, control, feature_set);
569 feature_set++;
570 } while(1);
571
572 return 0;
573}
574
575static const char *const bus_features[]={
576 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
577 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
578 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
579 NULL,NULL,
580 "Request Bus Parking",
581 "Bus Lock Mask",
582 "Enable Half Transfer",
583 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
584 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
585 NULL, NULL, NULL, NULL,
586 "Enable Cache Line Repl. Shared",
587 "Enable Cache Line Repl. Exclusive",
588 "Disable Transaction Queuing",
589 "Disable Response Error Checking",
590 "Disable Bus Error Checking",
591 "Disable Bus Requester Internal Error Signalling",
592 "Disable Bus Requester Error Signalling",
593 "Disable Bus Initialization Event Checking",
594 "Disable Bus Initialization Event Signalling",
595 "Disable Bus Address Error Checking",
596 "Disable Bus Address Error Signalling",
597 "Disable Bus Data Error Checking"
598};
599
600
601static int bus_info(struct seq_file *m)
602{
603 const char *const *v = bus_features;
604 pal_bus_features_u_t av, st, ct;
605 u64 avail, status, control;
606 int i;
607 s64 ret;
608
609 if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0)
610 return 0;
611
612 avail = av.pal_bus_features_val;
613 status = st.pal_bus_features_val;
614 control = ct.pal_bus_features_val;
615
616 for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
617 if ( ! *v )
618 continue;
619 seq_printf(m, "%-48s : %s%s %s\n", *v,
620 avail & 0x1 ? "" : "NotImpl",
621 avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
622 avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
623 }
624 return 0;
625}
626
627static int version_info(struct seq_file *m)
628{
629 pal_version_u_t min_ver, cur_ver;
630
631 if (ia64_pal_version(&min_ver, &cur_ver) != 0)
632 return 0;
633
634 seq_printf(m,
635 "PAL_vendor : 0x%02x (min=0x%02x)\n"
636 "PAL_A : %02x.%02x (min=%02x.%02x)\n"
637 "PAL_B : %02x.%02x (min=%02x.%02x)\n",
638 cur_ver.pal_version_s.pv_pal_vendor,
639 min_ver.pal_version_s.pv_pal_vendor,
640 cur_ver.pal_version_s.pv_pal_a_model,
641 cur_ver.pal_version_s.pv_pal_a_rev,
642 min_ver.pal_version_s.pv_pal_a_model,
643 min_ver.pal_version_s.pv_pal_a_rev,
644 cur_ver.pal_version_s.pv_pal_b_model,
645 cur_ver.pal_version_s.pv_pal_b_rev,
646 min_ver.pal_version_s.pv_pal_b_model,
647 min_ver.pal_version_s.pv_pal_b_rev);
648 return 0;
649}
650
651static int perfmon_info(struct seq_file *m)
652{
653 u64 pm_buffer[16];
654 pal_perf_mon_info_u_t pm_info;
655
656 if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0)
657 return 0;
658
659 seq_printf(m,
660 "PMC/PMD pairs : %d\n"
661 "Counter width : %d bits\n"
662 "Cycle event number : %d\n"
663 "Retired event number : %d\n"
664 "Implemented PMC : ",
665 pm_info.pal_perf_mon_info_s.generic,
666 pm_info.pal_perf_mon_info_s.width,
667 pm_info.pal_perf_mon_info_s.cycles,
668 pm_info.pal_perf_mon_info_s.retired);
669
670 bitregister_process(m, pm_buffer, 256);
671 seq_puts(m, "\nImplemented PMD : ");
672 bitregister_process(m, pm_buffer+4, 256);
673 seq_puts(m, "\nCycles count capable : ");
674 bitregister_process(m, pm_buffer+8, 256);
675 seq_puts(m, "\nRetired bundles count capable : ");
676
677#ifdef CONFIG_ITANIUM
678 /*
679 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
680 * which is wrong, both PMC4 and PMD5 support it.
681 */
682 if (pm_buffer[12] == 0x10)
683 pm_buffer[12]=0x30;
684#endif
685
686 bitregister_process(m, pm_buffer+12, 256);
687 seq_putc(m, '\n');
688 return 0;
689}
690
691static int frequency_info(struct seq_file *m)
692{
693 struct pal_freq_ratio proc, itc, bus;
694 unsigned long base;
695
696 if (ia64_pal_freq_base(&base) == -1)
697 seq_puts(m, "Output clock : not implemented\n");
698 else
699 seq_printf(m, "Output clock : %ld ticks/s\n", base);
700
701 if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
702
703 seq_printf(m,
704 "Processor/Clock ratio : %d/%d\n"
705 "Bus/Clock ratio : %d/%d\n"
706 "ITC/Clock ratio : %d/%d\n",
707 proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
708 return 0;
709}
710
711static int tr_info(struct seq_file *m)
712{
713 long status;
714 pal_tr_valid_u_t tr_valid;
715 u64 tr_buffer[4];
716 pal_vm_info_1_u_t vm_info_1;
717 pal_vm_info_2_u_t vm_info_2;
718 unsigned long i, j;
719 unsigned long max[3], pgm;
720 struct ifa_reg {
721 unsigned long valid:1;
722 unsigned long ig:11;
723 unsigned long vpn:52;
724 } *ifa_reg;
725 struct itir_reg {
726 unsigned long rv1:2;
727 unsigned long ps:6;
728 unsigned long key:24;
729 unsigned long rv2:32;
730 } *itir_reg;
731 struct gr_reg {
732 unsigned long p:1;
733 unsigned long rv1:1;
734 unsigned long ma:3;
735 unsigned long a:1;
736 unsigned long d:1;
737 unsigned long pl:2;
738 unsigned long ar:3;
739 unsigned long ppn:38;
740 unsigned long rv2:2;
741 unsigned long ed:1;
742 unsigned long ig:11;
743 } *gr_reg;
744 struct rid_reg {
745 unsigned long ig1:1;
746 unsigned long rv1:1;
747 unsigned long ig2:6;
748 unsigned long rid:24;
749 unsigned long rv2:32;
750 } *rid_reg;
751
752 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
753 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
754 return 0;
755 }
756 max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
757 max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
758
759 for (i=0; i < 2; i++ ) {
760 for (j=0; j < max[i]; j++) {
761
762 status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
763 if (status != 0) {
764 printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
765 i, j, status);
766 continue;
767 }
768
769 ifa_reg = (struct ifa_reg *)&tr_buffer[2];
770
771 if (ifa_reg->valid == 0)
772 continue;
773
774 gr_reg = (struct gr_reg *)tr_buffer;
775 itir_reg = (struct itir_reg *)&tr_buffer[1];
776 rid_reg = (struct rid_reg *)&tr_buffer[3];
777
778 pgm = -1 << (itir_reg->ps - 12);
779 seq_printf(m,
780 "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
781 "\tppn : 0x%lx\n"
782 "\tvpn : 0x%lx\n"
783 "\tps : ",
784 "ID"[i], j,
785 tr_valid.pal_tr_valid_s.access_rights_valid,
786 tr_valid.pal_tr_valid_s.priv_level_valid,
787 tr_valid.pal_tr_valid_s.dirty_bit_valid,
788 tr_valid.pal_tr_valid_s.mem_attr_valid,
789 (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
790
791 bitvector_process(m, 1<< itir_reg->ps);
792
793 seq_printf(m,
794 "\n\tpl : %d\n"
795 "\tar : %d\n"
796 "\trid : %x\n"
797 "\tp : %d\n"
798 "\tma : %d\n"
799 "\td : %d\n",
800 gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
801 gr_reg->d);
802 }
803 }
804 return 0;
805}
806
807
808
809/*
810 * List {name,function} pairs for every entry in /proc/palinfo/cpu*
811 */
812static const palinfo_entry_t palinfo_entries[]={
813 { "version_info", version_info, },
814 { "vm_info", vm_info, },
815 { "cache_info", cache_info, },
816 { "power_info", power_info, },
817 { "register_info", register_info, },
818 { "processor_info", processor_info, },
819 { "perfmon_info", perfmon_info, },
820 { "frequency_info", frequency_info, },
821 { "bus_info", bus_info },
822 { "tr_info", tr_info, }
823};
824
825#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
826
827static struct proc_dir_entry *palinfo_dir;
828
829/*
830 * This data structure is used to pass which cpu,function is being requested
831 * It must fit in a 64bit quantity to be passed to the proc callback routine
832 *
833 * In SMP mode, when we get a request for another CPU, we must call that
834 * other CPU using IPI and wait for the result before returning.
835 */
836typedef union {
837 u64 value;
838 struct {
839 unsigned req_cpu: 32; /* for which CPU this info is */
840 unsigned func_id: 32; /* which function is requested */
841 } pal_func_cpu;
842} pal_func_cpu_u_t;
843
844#define req_cpu pal_func_cpu.req_cpu
845#define func_id pal_func_cpu.func_id
846
847#ifdef CONFIG_SMP
848
849/*
850 * used to hold information about final function to call
851 */
852typedef struct {
853 palinfo_func_t func; /* pointer to function to call */
854 struct seq_file *m; /* buffer to store results */
855 int ret; /* return value from call */
856} palinfo_smp_data_t;
857
858
859/*
860 * this function does the actual final call and he called
861 * from the smp code, i.e., this is the palinfo callback routine
862 */
863static void
864palinfo_smp_call(void *info)
865{
866 palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
867 data->ret = (*data->func)(data->m);
868}
869
870/*
871 * function called to trigger the IPI, we need to access a remote CPU
872 * Return:
873 * 0 : error or nothing to output
874 * otherwise how many bytes in the "page" buffer were written
875 */
876static
877int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)
878{
879 palinfo_smp_data_t ptr;
880 int ret;
881
882 ptr.func = palinfo_entries[f->func_id].proc_read;
883 ptr.m = m;
884 ptr.ret = 0; /* just in case */
885
886
887 /* will send IPI to other CPU and wait for completion of remote call */
888 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
889 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
890 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
891 return 0;
892 }
893 return ptr.ret;
894}
895#else /* ! CONFIG_SMP */
896static
897int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)
898{
899 printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
900 return 0;
901}
902#endif /* CONFIG_SMP */
903
904/*
905 * Entry point routine: all calls go through this function
906 */
907static int proc_palinfo_show(struct seq_file *m, void *v)
908{
909 pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&m->private;
910
911 /*
912 * in SMP mode, we may need to call another CPU to get correct
913 * information. PAL, by definition, is processor specific
914 */
915 if (f->req_cpu == get_cpu())
916 (*palinfo_entries[f->func_id].proc_read)(m);
917 else
918 palinfo_handle_smp(m, f);
919
920 put_cpu();
921 return 0;
922}
923
924static int palinfo_add_proc(unsigned int cpu)
925{
926 pal_func_cpu_u_t f;
927 struct proc_dir_entry *cpu_dir;
928 int j;
929 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
930 sprintf(cpustr, "cpu%d", cpu);
931
932 cpu_dir = proc_mkdir(cpustr, palinfo_dir);
933 if (!cpu_dir)
934 return -EINVAL;
935
936 f.req_cpu = cpu;
937
938 for (j=0; j < NR_PALINFO_ENTRIES; j++) {
939 f.func_id = j;
940 proc_create_single_data(palinfo_entries[j].name, 0, cpu_dir,
941 proc_palinfo_show, (void *)f.value);
942 }
943 return 0;
944}
945
946static int palinfo_del_proc(unsigned int hcpu)
947{
948 char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
949
950 sprintf(cpustr, "cpu%d", hcpu);
951 remove_proc_subtree(cpustr, palinfo_dir);
952 return 0;
953}
954
955static enum cpuhp_state hp_online;
956
957static int __init palinfo_init(void)
958{
959 int i = 0;
960
961 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
962 palinfo_dir = proc_mkdir("pal", NULL);
963 if (!palinfo_dir)
964 return -ENOMEM;
965
966 i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/palinfo:online",
967 palinfo_add_proc, palinfo_del_proc);
968 if (i < 0) {
969 remove_proc_subtree("pal", NULL);
970 return i;
971 }
972 hp_online = i;
973 return 0;
974}
975
976static void __exit palinfo_exit(void)
977{
978 cpuhp_remove_state(hp_online);
979 remove_proc_subtree("pal", NULL);
980}
981
982module_init(palinfo_init);
983module_exit(palinfo_exit);
1/*
2 * palinfo.c
3 *
4 * Prints processor specific information reported by PAL.
5 * This code is based on specification of PAL as of the
6 * Intel IA-64 Architecture Software Developer's Manual v1.0.
7 *
8 *
9 * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * Copyright (C) 2004 Intel Corporation
12 * Ashok Raj <ashok.raj@intel.com>
13 *
14 * 05/26/2000 S.Eranian initial release
15 * 08/21/2000 S.Eranian updated to July 2000 PAL specs
16 * 02/05/2001 S.Eranian fixed module support
17 * 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
18 * 03/24/2004 Ashok Raj updated to work with CPU Hotplug
19 * 10/26/2006 Russ Anderson updated processor features to rev 2.2 spec
20 */
21#include <linux/types.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/proc_fs.h>
25#include <linux/mm.h>
26#include <linux/module.h>
27#include <linux/efi.h>
28#include <linux/notifier.h>
29#include <linux/cpu.h>
30#include <linux/cpumask.h>
31
32#include <asm/pal.h>
33#include <asm/sal.h>
34#include <asm/page.h>
35#include <asm/processor.h>
36#include <linux/smp.h>
37
38MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
39MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
40MODULE_LICENSE("GPL");
41
42#define PALINFO_VERSION "0.5"
43
44typedef int (*palinfo_func_t)(char*);
45
46typedef struct {
47 const char *name; /* name of the proc entry */
48 palinfo_func_t proc_read; /* function to call for reading */
49 struct proc_dir_entry *entry; /* registered entry (removal) */
50} palinfo_entry_t;
51
52
53/*
54 * A bunch of string array to get pretty printing
55 */
56
57static char *cache_types[] = {
58 "", /* not used */
59 "Instruction",
60 "Data",
61 "Data/Instruction" /* unified */
62};
63
64static const char *cache_mattrib[]={
65 "WriteThrough",
66 "WriteBack",
67 "", /* reserved */
68 "" /* reserved */
69};
70
71static const char *cache_st_hints[]={
72 "Temporal, level 1",
73 "Reserved",
74 "Reserved",
75 "Non-temporal, all levels",
76 "Reserved",
77 "Reserved",
78 "Reserved",
79 "Reserved"
80};
81
82static const char *cache_ld_hints[]={
83 "Temporal, level 1",
84 "Non-temporal, level 1",
85 "Reserved",
86 "Non-temporal, all levels",
87 "Reserved",
88 "Reserved",
89 "Reserved",
90 "Reserved"
91};
92
93static const char *rse_hints[]={
94 "enforced lazy",
95 "eager stores",
96 "eager loads",
97 "eager loads and stores"
98};
99
100#define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
101
102static const char *mem_attrib[]={
103 "WB", /* 000 */
104 "SW", /* 001 */
105 "010", /* 010 */
106 "011", /* 011 */
107 "UC", /* 100 */
108 "UCE", /* 101 */
109 "WC", /* 110 */
110 "NaTPage" /* 111 */
111};
112
113/*
114 * Take a 64bit vector and produces a string such that
115 * if bit n is set then 2^n in clear text is generated. The adjustment
116 * to the right unit is also done.
117 *
118 * Input:
119 * - a pointer to a buffer to hold the string
120 * - a 64-bit vector
121 * Ouput:
122 * - a pointer to the end of the buffer
123 *
124 */
125static char *
126bitvector_process(char *p, u64 vector)
127{
128 int i,j;
129 const char *units[]={ "", "K", "M", "G", "T" };
130
131 for (i=0, j=0; i < 64; i++ , j=i/10) {
132 if (vector & 0x1) {
133 p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]);
134 }
135 vector >>= 1;
136 }
137 return p;
138}
139
140/*
141 * Take a 64bit vector and produces a string such that
142 * if bit n is set then register n is present. The function
143 * takes into account consecutive registers and prints out ranges.
144 *
145 * Input:
146 * - a pointer to a buffer to hold the string
147 * - a 64-bit vector
148 * Ouput:
149 * - a pointer to the end of the buffer
150 *
151 */
152static char *
153bitregister_process(char *p, u64 *reg_info, int max)
154{
155 int i, begin, skip = 0;
156 u64 value = reg_info[0];
157
158 value >>= i = begin = ffs(value) - 1;
159
160 for(; i < max; i++ ) {
161
162 if (i != 0 && (i%64) == 0) value = *++reg_info;
163
164 if ((value & 0x1) == 0 && skip == 0) {
165 if (begin <= i - 2)
166 p += sprintf(p, "%d-%d ", begin, i-1);
167 else
168 p += sprintf(p, "%d ", i-1);
169 skip = 1;
170 begin = -1;
171 } else if ((value & 0x1) && skip == 1) {
172 skip = 0;
173 begin = i;
174 }
175 value >>=1;
176 }
177 if (begin > -1) {
178 if (begin < 127)
179 p += sprintf(p, "%d-127", begin);
180 else
181 p += sprintf(p, "127");
182 }
183
184 return p;
185}
186
187static int
188power_info(char *page)
189{
190 s64 status;
191 char *p = page;
192 u64 halt_info_buffer[8];
193 pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
194 int i;
195
196 status = ia64_pal_halt_info(halt_info);
197 if (status != 0) return 0;
198
199 for (i=0; i < 8 ; i++ ) {
200 if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
201 p += sprintf(p, "Power level %d:\n"
202 "\tentry_latency : %d cycles\n"
203 "\texit_latency : %d cycles\n"
204 "\tpower consumption : %d mW\n"
205 "\tCache+TLB coherency : %s\n", i,
206 halt_info[i].pal_power_mgmt_info_s.entry_latency,
207 halt_info[i].pal_power_mgmt_info_s.exit_latency,
208 halt_info[i].pal_power_mgmt_info_s.power_consumption,
209 halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
210 } else {
211 p += sprintf(p,"Power level %d: not implemented\n",i);
212 }
213 }
214 return p - page;
215}
216
217static int
218cache_info(char *page)
219{
220 char *p = page;
221 unsigned long i, levels, unique_caches;
222 pal_cache_config_info_t cci;
223 int j, k;
224 long status;
225
226 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
227 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
228 return 0;
229 }
230
231 p += sprintf(p, "Cache levels : %ld\nUnique caches : %ld\n\n", levels, unique_caches);
232
233 for (i=0; i < levels; i++) {
234
235 for (j=2; j >0 ; j--) {
236
237 /* even without unification some level may not be present */
238 if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
239 continue;
240 }
241 p += sprintf(p,
242 "%s Cache level %lu:\n"
243 "\tSize : %u bytes\n"
244 "\tAttributes : ",
245 cache_types[j+cci.pcci_unified], i+1,
246 cci.pcci_cache_size);
247
248 if (cci.pcci_unified) p += sprintf(p, "Unified ");
249
250 p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
251
252 p += sprintf(p,
253 "\tAssociativity : %d\n"
254 "\tLine size : %d bytes\n"
255 "\tStride : %d bytes\n",
256 cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride);
257 if (j == 1)
258 p += sprintf(p, "\tStore latency : N/A\n");
259 else
260 p += sprintf(p, "\tStore latency : %d cycle(s)\n",
261 cci.pcci_st_latency);
262
263 p += sprintf(p,
264 "\tLoad latency : %d cycle(s)\n"
265 "\tStore hints : ", cci.pcci_ld_latency);
266
267 for(k=0; k < 8; k++ ) {
268 if ( cci.pcci_st_hints & 0x1)
269 p += sprintf(p, "[%s]", cache_st_hints[k]);
270 cci.pcci_st_hints >>=1;
271 }
272 p += sprintf(p, "\n\tLoad hints : ");
273
274 for(k=0; k < 8; k++ ) {
275 if (cci.pcci_ld_hints & 0x1)
276 p += sprintf(p, "[%s]", cache_ld_hints[k]);
277 cci.pcci_ld_hints >>=1;
278 }
279 p += sprintf(p,
280 "\n\tAlias boundary : %d byte(s)\n"
281 "\tTag LSB : %d\n"
282 "\tTag MSB : %d\n",
283 1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
284 cci.pcci_tag_msb);
285
286 /* when unified, data(j=2) is enough */
287 if (cci.pcci_unified) break;
288 }
289 }
290 return p - page;
291}
292
293
294static int
295vm_info(char *page)
296{
297 char *p = page;
298 u64 tr_pages =0, vw_pages=0, tc_pages;
299 u64 attrib;
300 pal_vm_info_1_u_t vm_info_1;
301 pal_vm_info_2_u_t vm_info_2;
302 pal_tc_info_u_t tc_info;
303 ia64_ptce_info_t ptce;
304 const char *sep;
305 int i, j;
306 long status;
307
308 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
309 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
310 } else {
311
312 p += sprintf(p,
313 "Physical Address Space : %d bits\n"
314 "Virtual Address Space : %d bits\n"
315 "Protection Key Registers(PKR) : %d\n"
316 "Implemented bits in PKR.key : %d\n"
317 "Hash Tag ID : 0x%x\n"
318 "Size of RR.rid : %d\n"
319 "Max Purges : ",
320 vm_info_1.pal_vm_info_1_s.phys_add_size,
321 vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
322 vm_info_1.pal_vm_info_1_s.max_pkr+1,
323 vm_info_1.pal_vm_info_1_s.key_size,
324 vm_info_1.pal_vm_info_1_s.hash_tag_id,
325 vm_info_2.pal_vm_info_2_s.rid_size);
326 if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
327 p += sprintf(p, "unlimited\n");
328 else
329 p += sprintf(p, "%d\n",
330 vm_info_2.pal_vm_info_2_s.max_purges ?
331 vm_info_2.pal_vm_info_2_s.max_purges : 1);
332 }
333
334 if (ia64_pal_mem_attrib(&attrib) == 0) {
335 p += sprintf(p, "Supported memory attributes : ");
336 sep = "";
337 for (i = 0; i < 8; i++) {
338 if (attrib & (1 << i)) {
339 p += sprintf(p, "%s%s", sep, mem_attrib[i]);
340 sep = ", ";
341 }
342 }
343 p += sprintf(p, "\n");
344 }
345
346 if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
347 printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
348 } else {
349
350 p += sprintf(p,
351 "\nTLB walker : %simplemented\n"
352 "Number of DTR : %d\n"
353 "Number of ITR : %d\n"
354 "TLB insertable page sizes : ",
355 vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
356 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
357 vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
358
359
360 p = bitvector_process(p, tr_pages);
361
362 p += sprintf(p, "\nTLB purgeable page sizes : ");
363
364 p = bitvector_process(p, vw_pages);
365 }
366 if ((status=ia64_get_ptce(&ptce)) != 0) {
367 printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
368 } else {
369 p += sprintf(p,
370 "\nPurge base address : 0x%016lx\n"
371 "Purge outer loop count : %d\n"
372 "Purge inner loop count : %d\n"
373 "Purge outer loop stride : %d\n"
374 "Purge inner loop stride : %d\n",
375 ptce.base, ptce.count[0], ptce.count[1],
376 ptce.stride[0], ptce.stride[1]);
377
378 p += sprintf(p,
379 "TC Levels : %d\n"
380 "Unique TC(s) : %d\n",
381 vm_info_1.pal_vm_info_1_s.num_tc_levels,
382 vm_info_1.pal_vm_info_1_s.max_unique_tcs);
383
384 for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
385 for (j=2; j>0 ; j--) {
386 tc_pages = 0; /* just in case */
387
388
389 /* even without unification, some levels may not be present */
390 if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
391 continue;
392 }
393
394 p += sprintf(p,
395 "\n%s Translation Cache Level %d:\n"
396 "\tHash sets : %d\n"
397 "\tAssociativity : %d\n"
398 "\tNumber of entries : %d\n"
399 "\tFlags : ",
400 cache_types[j+tc_info.tc_unified], i+1,
401 tc_info.tc_num_sets,
402 tc_info.tc_associativity,
403 tc_info.tc_num_entries);
404
405 if (tc_info.tc_pf)
406 p += sprintf(p, "PreferredPageSizeOptimized ");
407 if (tc_info.tc_unified)
408 p += sprintf(p, "Unified ");
409 if (tc_info.tc_reduce_tr)
410 p += sprintf(p, "TCReduction");
411
412 p += sprintf(p, "\n\tSupported page sizes: ");
413
414 p = bitvector_process(p, tc_pages);
415
416 /* when unified date (j=2) is enough */
417 if (tc_info.tc_unified)
418 break;
419 }
420 }
421 }
422 p += sprintf(p, "\n");
423
424 return p - page;
425}
426
427
428static int
429register_info(char *page)
430{
431 char *p = page;
432 u64 reg_info[2];
433 u64 info;
434 unsigned long phys_stacked;
435 pal_hints_u_t hints;
436 unsigned long iregs, dregs;
437 static const char * const info_type[] = {
438 "Implemented AR(s)",
439 "AR(s) with read side-effects",
440 "Implemented CR(s)",
441 "CR(s) with read side-effects",
442 };
443
444 for(info=0; info < 4; info++) {
445
446 if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0) return 0;
447
448 p += sprintf(p, "%-32s : ", info_type[info]);
449
450 p = bitregister_process(p, reg_info, 128);
451
452 p += sprintf(p, "\n");
453 }
454
455 if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) {
456
457 p += sprintf(p,
458 "RSE stacked physical registers : %ld\n"
459 "RSE load/store hints : %ld (%s)\n",
460 phys_stacked, hints.ph_data,
461 hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
462 }
463 if (ia64_pal_debug_info(&iregs, &dregs))
464 return 0;
465
466 p += sprintf(p,
467 "Instruction debug register pairs : %ld\n"
468 "Data debug register pairs : %ld\n", iregs, dregs);
469
470 return p - page;
471}
472
473static char *proc_features_0[]={ /* Feature set 0 */
474 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
475 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
476 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
477 NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
478 "Unimplemented instruction address fault",
479 "INIT, PMI, and LINT pins",
480 "Simple unimplemented instr addresses",
481 "Variable P-state performance",
482 "Virtual machine features implemented",
483 "XIP,XPSR,XFS implemented",
484 "XR1-XR3 implemented",
485 "Disable dynamic predicate prediction",
486 "Disable processor physical number",
487 "Disable dynamic data cache prefetch",
488 "Disable dynamic inst cache prefetch",
489 "Disable dynamic branch prediction",
490 NULL, NULL, NULL, NULL,
491 "Disable P-states",
492 "Enable MCA on Data Poisoning",
493 "Enable vmsw instruction",
494 "Enable extern environmental notification",
495 "Disable BINIT on processor time-out",
496 "Disable dynamic power management (DPM)",
497 "Disable coherency",
498 "Disable cache",
499 "Enable CMCI promotion",
500 "Enable MCA to BINIT promotion",
501 "Enable MCA promotion",
502 "Enable BERR promotion"
503};
504
505static char *proc_features_16[]={ /* Feature set 16 */
506 "Disable ETM",
507 "Enable ETM",
508 "Enable MCA on half-way timer",
509 "Enable snoop WC",
510 NULL,
511 "Enable Fast Deferral",
512 "Disable MCA on memory aliasing",
513 "Enable RSB",
514 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
515 "DP system processor",
516 "Low Voltage",
517 "HT supported",
518 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
519 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
520 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
521 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
522 NULL, NULL, NULL, NULL, NULL
523};
524
525static char **proc_features[]={
526 proc_features_0,
527 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
528 NULL, NULL, NULL, NULL,
529 proc_features_16,
530 NULL, NULL, NULL, NULL,
531};
532
533static char * feature_set_info(char *page, u64 avail, u64 status, u64 control,
534 unsigned long set)
535{
536 char *p = page;
537 char **vf, **v;
538 int i;
539
540 vf = v = proc_features[set];
541 for(i=0; i < 64; i++, avail >>=1, status >>=1, control >>=1) {
542
543 if (!(control)) /* No remaining bits set */
544 break;
545 if (!(avail & 0x1)) /* Print only bits that are available */
546 continue;
547 if (vf)
548 v = vf + i;
549 if ( v && *v ) {
550 p += sprintf(p, "%-40s : %s %s\n", *v,
551 avail & 0x1 ? (status & 0x1 ?
552 "On " : "Off"): "",
553 avail & 0x1 ? (control & 0x1 ?
554 "Ctrl" : "NoCtrl"): "");
555 } else {
556 p += sprintf(p, "Feature set %2ld bit %2d\t\t\t"
557 " : %s %s\n",
558 set, i,
559 avail & 0x1 ? (status & 0x1 ?
560 "On " : "Off"): "",
561 avail & 0x1 ? (control & 0x1 ?
562 "Ctrl" : "NoCtrl"): "");
563 }
564 }
565 return p;
566}
567
568static int
569processor_info(char *page)
570{
571 char *p = page;
572 u64 avail=1, status=1, control=1, feature_set=0;
573 s64 ret;
574
575 do {
576 ret = ia64_pal_proc_get_features(&avail, &status, &control,
577 feature_set);
578 if (ret < 0) {
579 return p - page;
580 }
581 if (ret == 1) {
582 feature_set++;
583 continue;
584 }
585
586 p = feature_set_info(p, avail, status, control, feature_set);
587
588 feature_set++;
589 } while(1);
590
591 return p - page;
592}
593
594static const char *bus_features[]={
595 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
596 NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
597 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
598 NULL,NULL,
599 "Request Bus Parking",
600 "Bus Lock Mask",
601 "Enable Half Transfer",
602 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
603 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
604 NULL, NULL, NULL, NULL,
605 "Enable Cache Line Repl. Shared",
606 "Enable Cache Line Repl. Exclusive",
607 "Disable Transaction Queuing",
608 "Disable Response Error Checking",
609 "Disable Bus Error Checking",
610 "Disable Bus Requester Internal Error Signalling",
611 "Disable Bus Requester Error Signalling",
612 "Disable Bus Initialization Event Checking",
613 "Disable Bus Initialization Event Signalling",
614 "Disable Bus Address Error Checking",
615 "Disable Bus Address Error Signalling",
616 "Disable Bus Data Error Checking"
617};
618
619
620static int
621bus_info(char *page)
622{
623 char *p = page;
624 const char **v = bus_features;
625 pal_bus_features_u_t av, st, ct;
626 u64 avail, status, control;
627 int i;
628 s64 ret;
629
630 if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) return 0;
631
632 avail = av.pal_bus_features_val;
633 status = st.pal_bus_features_val;
634 control = ct.pal_bus_features_val;
635
636 for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
637 if ( ! *v ) continue;
638 p += sprintf(p, "%-48s : %s%s %s\n", *v,
639 avail & 0x1 ? "" : "NotImpl",
640 avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
641 avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
642 }
643 return p - page;
644}
645
646static int
647version_info(char *page)
648{
649 pal_version_u_t min_ver, cur_ver;
650 char *p = page;
651
652 if (ia64_pal_version(&min_ver, &cur_ver) != 0)
653 return 0;
654
655 p += sprintf(p,
656 "PAL_vendor : 0x%02x (min=0x%02x)\n"
657 "PAL_A : %02x.%02x (min=%02x.%02x)\n"
658 "PAL_B : %02x.%02x (min=%02x.%02x)\n",
659 cur_ver.pal_version_s.pv_pal_vendor,
660 min_ver.pal_version_s.pv_pal_vendor,
661 cur_ver.pal_version_s.pv_pal_a_model,
662 cur_ver.pal_version_s.pv_pal_a_rev,
663 min_ver.pal_version_s.pv_pal_a_model,
664 min_ver.pal_version_s.pv_pal_a_rev,
665 cur_ver.pal_version_s.pv_pal_b_model,
666 cur_ver.pal_version_s.pv_pal_b_rev,
667 min_ver.pal_version_s.pv_pal_b_model,
668 min_ver.pal_version_s.pv_pal_b_rev);
669 return p - page;
670}
671
672static int
673perfmon_info(char *page)
674{
675 char *p = page;
676 u64 pm_buffer[16];
677 pal_perf_mon_info_u_t pm_info;
678
679 if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0;
680
681 p += sprintf(p,
682 "PMC/PMD pairs : %d\n"
683 "Counter width : %d bits\n"
684 "Cycle event number : %d\n"
685 "Retired event number : %d\n"
686 "Implemented PMC : ",
687 pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width,
688 pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired);
689
690 p = bitregister_process(p, pm_buffer, 256);
691 p += sprintf(p, "\nImplemented PMD : ");
692 p = bitregister_process(p, pm_buffer+4, 256);
693 p += sprintf(p, "\nCycles count capable : ");
694 p = bitregister_process(p, pm_buffer+8, 256);
695 p += sprintf(p, "\nRetired bundles count capable : ");
696
697#ifdef CONFIG_ITANIUM
698 /*
699 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
700 * which is wrong, both PMC4 and PMD5 support it.
701 */
702 if (pm_buffer[12] == 0x10) pm_buffer[12]=0x30;
703#endif
704
705 p = bitregister_process(p, pm_buffer+12, 256);
706
707 p += sprintf(p, "\n");
708
709 return p - page;
710}
711
712static int
713frequency_info(char *page)
714{
715 char *p = page;
716 struct pal_freq_ratio proc, itc, bus;
717 unsigned long base;
718
719 if (ia64_pal_freq_base(&base) == -1)
720 p += sprintf(p, "Output clock : not implemented\n");
721 else
722 p += sprintf(p, "Output clock : %ld ticks/s\n", base);
723
724 if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
725
726 p += sprintf(p,
727 "Processor/Clock ratio : %d/%d\n"
728 "Bus/Clock ratio : %d/%d\n"
729 "ITC/Clock ratio : %d/%d\n",
730 proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
731
732 return p - page;
733}
734
735static int
736tr_info(char *page)
737{
738 char *p = page;
739 long status;
740 pal_tr_valid_u_t tr_valid;
741 u64 tr_buffer[4];
742 pal_vm_info_1_u_t vm_info_1;
743 pal_vm_info_2_u_t vm_info_2;
744 unsigned long i, j;
745 unsigned long max[3], pgm;
746 struct ifa_reg {
747 unsigned long valid:1;
748 unsigned long ig:11;
749 unsigned long vpn:52;
750 } *ifa_reg;
751 struct itir_reg {
752 unsigned long rv1:2;
753 unsigned long ps:6;
754 unsigned long key:24;
755 unsigned long rv2:32;
756 } *itir_reg;
757 struct gr_reg {
758 unsigned long p:1;
759 unsigned long rv1:1;
760 unsigned long ma:3;
761 unsigned long a:1;
762 unsigned long d:1;
763 unsigned long pl:2;
764 unsigned long ar:3;
765 unsigned long ppn:38;
766 unsigned long rv2:2;
767 unsigned long ed:1;
768 unsigned long ig:11;
769 } *gr_reg;
770 struct rid_reg {
771 unsigned long ig1:1;
772 unsigned long rv1:1;
773 unsigned long ig2:6;
774 unsigned long rid:24;
775 unsigned long rv2:32;
776 } *rid_reg;
777
778 if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
779 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
780 return 0;
781 }
782 max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
783 max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
784
785 for (i=0; i < 2; i++ ) {
786 for (j=0; j < max[i]; j++) {
787
788 status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
789 if (status != 0) {
790 printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
791 i, j, status);
792 continue;
793 }
794
795 ifa_reg = (struct ifa_reg *)&tr_buffer[2];
796
797 if (ifa_reg->valid == 0) continue;
798
799 gr_reg = (struct gr_reg *)tr_buffer;
800 itir_reg = (struct itir_reg *)&tr_buffer[1];
801 rid_reg = (struct rid_reg *)&tr_buffer[3];
802
803 pgm = -1 << (itir_reg->ps - 12);
804 p += sprintf(p,
805 "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
806 "\tppn : 0x%lx\n"
807 "\tvpn : 0x%lx\n"
808 "\tps : ",
809 "ID"[i], j,
810 tr_valid.pal_tr_valid_s.access_rights_valid,
811 tr_valid.pal_tr_valid_s.priv_level_valid,
812 tr_valid.pal_tr_valid_s.dirty_bit_valid,
813 tr_valid.pal_tr_valid_s.mem_attr_valid,
814 (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
815
816 p = bitvector_process(p, 1<< itir_reg->ps);
817
818 p += sprintf(p,
819 "\n\tpl : %d\n"
820 "\tar : %d\n"
821 "\trid : %x\n"
822 "\tp : %d\n"
823 "\tma : %d\n"
824 "\td : %d\n",
825 gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
826 gr_reg->d);
827 }
828 }
829 return p - page;
830}
831
832
833
834/*
835 * List {name,function} pairs for every entry in /proc/palinfo/cpu*
836 */
837static palinfo_entry_t palinfo_entries[]={
838 { "version_info", version_info, },
839 { "vm_info", vm_info, },
840 { "cache_info", cache_info, },
841 { "power_info", power_info, },
842 { "register_info", register_info, },
843 { "processor_info", processor_info, },
844 { "perfmon_info", perfmon_info, },
845 { "frequency_info", frequency_info, },
846 { "bus_info", bus_info },
847 { "tr_info", tr_info, }
848};
849
850#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
851
852/*
853 * this array is used to keep track of the proc entries we create. This is
854 * required in the module mode when we need to remove all entries. The procfs code
855 * does not do recursion of deletion
856 *
857 * Notes:
858 * - +1 accounts for the cpuN directory entry in /proc/pal
859 */
860#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
861
862static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
863static struct proc_dir_entry *palinfo_dir;
864
865/*
866 * This data structure is used to pass which cpu,function is being requested
867 * It must fit in a 64bit quantity to be passed to the proc callback routine
868 *
869 * In SMP mode, when we get a request for another CPU, we must call that
870 * other CPU using IPI and wait for the result before returning.
871 */
872typedef union {
873 u64 value;
874 struct {
875 unsigned req_cpu: 32; /* for which CPU this info is */
876 unsigned func_id: 32; /* which function is requested */
877 } pal_func_cpu;
878} pal_func_cpu_u_t;
879
880#define req_cpu pal_func_cpu.req_cpu
881#define func_id pal_func_cpu.func_id
882
883#ifdef CONFIG_SMP
884
885/*
886 * used to hold information about final function to call
887 */
888typedef struct {
889 palinfo_func_t func; /* pointer to function to call */
890 char *page; /* buffer to store results */
891 int ret; /* return value from call */
892} palinfo_smp_data_t;
893
894
895/*
896 * this function does the actual final call and he called
897 * from the smp code, i.e., this is the palinfo callback routine
898 */
899static void
900palinfo_smp_call(void *info)
901{
902 palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
903 data->ret = (*data->func)(data->page);
904}
905
906/*
907 * function called to trigger the IPI, we need to access a remote CPU
908 * Return:
909 * 0 : error or nothing to output
910 * otherwise how many bytes in the "page" buffer were written
911 */
912static
913int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
914{
915 palinfo_smp_data_t ptr;
916 int ret;
917
918 ptr.func = palinfo_entries[f->func_id].proc_read;
919 ptr.page = page;
920 ptr.ret = 0; /* just in case */
921
922
923 /* will send IPI to other CPU and wait for completion of remote call */
924 if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) {
925 printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
926 "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
927 return 0;
928 }
929 return ptr.ret;
930}
931#else /* ! CONFIG_SMP */
932static
933int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
934{
935 printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
936 return 0;
937}
938#endif /* CONFIG_SMP */
939
940/*
941 * Entry point routine: all calls go through this function
942 */
943static int
944palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data)
945{
946 int len=0;
947 pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&data;
948
949 /*
950 * in SMP mode, we may need to call another CPU to get correct
951 * information. PAL, by definition, is processor specific
952 */
953 if (f->req_cpu == get_cpu())
954 len = (*palinfo_entries[f->func_id].proc_read)(page);
955 else
956 len = palinfo_handle_smp(f, page);
957
958 put_cpu();
959
960 if (len <= off+count) *eof = 1;
961
962 *start = page + off;
963 len -= off;
964
965 if (len>count) len = count;
966 if (len<0) len = 0;
967
968 return len;
969}
970
971static void __cpuinit
972create_palinfo_proc_entries(unsigned int cpu)
973{
974# define CPUSTR "cpu%d"
975
976 pal_func_cpu_u_t f;
977 struct proc_dir_entry **pdir;
978 struct proc_dir_entry *cpu_dir;
979 int j;
980 char cpustr[sizeof(CPUSTR)];
981
982
983 /*
984 * we keep track of created entries in a depth-first order for
985 * cleanup purposes. Each entry is stored into palinfo_proc_entries
986 */
987 sprintf(cpustr,CPUSTR, cpu);
988
989 cpu_dir = proc_mkdir(cpustr, palinfo_dir);
990
991 f.req_cpu = cpu;
992
993 /*
994 * Compute the location to store per cpu entries
995 * We dont store the top level entry in this list, but
996 * remove it finally after removing all cpu entries.
997 */
998 pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
999 *pdir++ = cpu_dir;
1000 for (j=0; j < NR_PALINFO_ENTRIES; j++) {
1001 f.func_id = j;
1002 *pdir = create_proc_read_entry(
1003 palinfo_entries[j].name, 0, cpu_dir,
1004 palinfo_read_entry, (void *)f.value);
1005 pdir++;
1006 }
1007}
1008
1009static void
1010remove_palinfo_proc_entries(unsigned int hcpu)
1011{
1012 int j;
1013 struct proc_dir_entry *cpu_dir, **pdir;
1014
1015 pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
1016 cpu_dir = *pdir;
1017 *pdir++=NULL;
1018 for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
1019 if ((*pdir)) {
1020 remove_proc_entry ((*pdir)->name, cpu_dir);
1021 *pdir ++= NULL;
1022 }
1023 }
1024
1025 if (cpu_dir) {
1026 remove_proc_entry(cpu_dir->name, palinfo_dir);
1027 }
1028}
1029
1030static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
1031 unsigned long action, void *hcpu)
1032{
1033 unsigned int hotcpu = (unsigned long)hcpu;
1034
1035 switch (action) {
1036 case CPU_ONLINE:
1037 case CPU_ONLINE_FROZEN:
1038 create_palinfo_proc_entries(hotcpu);
1039 break;
1040 case CPU_DEAD:
1041 case CPU_DEAD_FROZEN:
1042 remove_palinfo_proc_entries(hotcpu);
1043 break;
1044 }
1045 return NOTIFY_OK;
1046}
1047
1048static struct notifier_block __refdata palinfo_cpu_notifier =
1049{
1050 .notifier_call = palinfo_cpu_callback,
1051 .priority = 0,
1052};
1053
1054static int __init
1055palinfo_init(void)
1056{
1057 int i = 0;
1058
1059 printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
1060 palinfo_dir = proc_mkdir("pal", NULL);
1061
1062 /* Create palinfo dirs in /proc for all online cpus */
1063 for_each_online_cpu(i) {
1064 create_palinfo_proc_entries(i);
1065 }
1066
1067 /* Register for future delivery via notify registration */
1068 register_hotcpu_notifier(&palinfo_cpu_notifier);
1069
1070 return 0;
1071}
1072
1073static void __exit
1074palinfo_exit(void)
1075{
1076 int i = 0;
1077
1078 /* remove all nodes: depth first pass. Could optimize this */
1079 for_each_online_cpu(i) {
1080 remove_palinfo_proc_entries(i);
1081 }
1082
1083 /*
1084 * Remove the top level entry finally
1085 */
1086 remove_proc_entry(palinfo_dir->name, NULL);
1087
1088 /*
1089 * Unregister from cpu notifier callbacks
1090 */
1091 unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1092}
1093
1094module_init(palinfo_init);
1095module_exit(palinfo_exit);