xref: /openbmc/linux/arch/ia64/kernel/palinfo.c (revision 64c70b1c)
1 /*
2  * palinfo.c
3  *
4  * Prints processor specific information reported by PAL.
5  * This code is based on specification of PAL as of the
6  * Intel IA-64 Architecture Software Developer's Manual v1.0.
7  *
8  *
9  * Copyright (C) 2000-2001, 2003 Hewlett-Packard Co
10  *	Stephane Eranian <eranian@hpl.hp.com>
11  * Copyright (C) 2004 Intel Corporation
12  *  Ashok Raj <ashok.raj@intel.com>
13  *
14  * 05/26/2000	S.Eranian	initial release
15  * 08/21/2000	S.Eranian	updated to July 2000 PAL specs
16  * 02/05/2001   S.Eranian	fixed module support
17  * 10/23/2001	S.Eranian	updated pal_perf_mon_info bug fixes
18  * 03/24/2004	Ashok Raj	updated to work with CPU Hotplug
19  * 10/26/2006   Russ Anderson	updated processor features to rev 2.2 spec
20  */
21 #include <linux/types.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/proc_fs.h>
25 #include <linux/mm.h>
26 #include <linux/module.h>
27 #include <linux/efi.h>
28 #include <linux/notifier.h>
29 #include <linux/cpu.h>
30 #include <linux/cpumask.h>
31 
32 #include <asm/pal.h>
33 #include <asm/sal.h>
34 #include <asm/page.h>
35 #include <asm/processor.h>
36 #include <linux/smp.h>
37 
38 MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
39 MODULE_DESCRIPTION("/proc interface to IA-64 PAL");
40 MODULE_LICENSE("GPL");
41 
42 #define PALINFO_VERSION "0.5"
43 
44 typedef int (*palinfo_func_t)(char*);
45 
46 typedef struct {
47 	const char		*name;		/* name of the proc entry */
48 	palinfo_func_t		proc_read;	/* function to call for reading */
49 	struct proc_dir_entry	*entry;		/* registered entry (removal) */
50 } palinfo_entry_t;
51 
52 
53 /*
54  *  A bunch of string array to get pretty printing
55  */
56 
57 static char *cache_types[] = {
58 	"",			/* not used */
59 	"Instruction",
60 	"Data",
61 	"Data/Instruction"	/* unified */
62 };
63 
64 static const char *cache_mattrib[]={
65 	"WriteThrough",
66 	"WriteBack",
67 	"",		/* reserved */
68 	""		/* reserved */
69 };
70 
71 static const char *cache_st_hints[]={
72 	"Temporal, level 1",
73 	"Reserved",
74 	"Reserved",
75 	"Non-temporal, all levels",
76 	"Reserved",
77 	"Reserved",
78 	"Reserved",
79 	"Reserved"
80 };
81 
82 static const char *cache_ld_hints[]={
83 	"Temporal, level 1",
84 	"Non-temporal, level 1",
85 	"Reserved",
86 	"Non-temporal, all levels",
87 	"Reserved",
88 	"Reserved",
89 	"Reserved",
90 	"Reserved"
91 };
92 
93 static const char *rse_hints[]={
94 	"enforced lazy",
95 	"eager stores",
96 	"eager loads",
97 	"eager loads and stores"
98 };
99 
100 #define RSE_HINTS_COUNT ARRAY_SIZE(rse_hints)
101 
102 static const char *mem_attrib[]={
103 	"WB",		/* 000 */
104 	"SW",		/* 001 */
105 	"010",		/* 010 */
106 	"011",		/* 011 */
107 	"UC",		/* 100 */
108 	"UCE",		/* 101 */
109 	"WC",		/* 110 */
110 	"NaTPage"	/* 111 */
111 };
112 
113 /*
114  * Take a 64bit vector and produces a string such that
115  * if bit n is set then 2^n in clear text is generated. The adjustment
116  * to the right unit is also done.
117  *
118  * Input:
119  *	- a pointer to a buffer to hold the string
120  *	- a 64-bit vector
121  * Ouput:
122  *	- a pointer to the end of the buffer
123  *
124  */
125 static char *
126 bitvector_process(char *p, u64 vector)
127 {
128 	int i,j;
129 	const char *units[]={ "", "K", "M", "G", "T" };
130 
131 	for (i=0, j=0; i < 64; i++ , j=i/10) {
132 		if (vector & 0x1) {
133 			p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]);
134 		}
135 		vector >>= 1;
136 	}
137 	return p;
138 }
139 
140 /*
141  * Take a 64bit vector and produces a string such that
142  * if bit n is set then register n is present. The function
143  * takes into account consecutive registers and prints out ranges.
144  *
145  * Input:
146  *	- a pointer to a buffer to hold the string
147  *	- a 64-bit vector
148  * Ouput:
149  *	- a pointer to the end of the buffer
150  *
151  */
152 static char *
153 bitregister_process(char *p, u64 *reg_info, int max)
154 {
155 	int i, begin, skip = 0;
156 	u64 value = reg_info[0];
157 
158 	value >>= i = begin = ffs(value) - 1;
159 
160 	for(; i < max; i++ ) {
161 
162 		if (i != 0 && (i%64) == 0) value = *++reg_info;
163 
164 		if ((value & 0x1) == 0 && skip == 0) {
165 			if (begin  <= i - 2)
166 				p += sprintf(p, "%d-%d ", begin, i-1);
167 			else
168 				p += sprintf(p, "%d ", i-1);
169 			skip  = 1;
170 			begin = -1;
171 		} else if ((value & 0x1) && skip == 1) {
172 			skip = 0;
173 			begin = i;
174 		}
175 		value >>=1;
176 	}
177 	if (begin > -1) {
178 		if (begin < 127)
179 			p += sprintf(p, "%d-127", begin);
180 		else
181 			p += sprintf(p, "127");
182 	}
183 
184 	return p;
185 }
186 
187 static int
188 power_info(char *page)
189 {
190 	s64 status;
191 	char *p = page;
192 	u64 halt_info_buffer[8];
193 	pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;
194 	int i;
195 
196 	status = ia64_pal_halt_info(halt_info);
197 	if (status != 0) return 0;
198 
199 	for (i=0; i < 8 ; i++ ) {
200 		if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
201 			p += sprintf(p,	"Power level %d:\n"
202 				     "\tentry_latency       : %d cycles\n"
203 				     "\texit_latency        : %d cycles\n"
204 				     "\tpower consumption   : %d mW\n"
205 				     "\tCache+TLB coherency : %s\n", i,
206 				     halt_info[i].pal_power_mgmt_info_s.entry_latency,
207 				     halt_info[i].pal_power_mgmt_info_s.exit_latency,
208 				     halt_info[i].pal_power_mgmt_info_s.power_consumption,
209 				     halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
210 		} else {
211 			p += sprintf(p,"Power level %d: not implemented\n",i);
212 		}
213 	}
214 	return p - page;
215 }
216 
217 static int
218 cache_info(char *page)
219 {
220 	char *p = page;
221 	u64 i, levels, unique_caches;
222 	pal_cache_config_info_t cci;
223 	int j, k;
224 	s64 status;
225 
226 	if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
227 		printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
228 		return 0;
229 	}
230 
231 	p += sprintf(p, "Cache levels  : %ld\nUnique caches : %ld\n\n", levels, unique_caches);
232 
233 	for (i=0; i < levels; i++) {
234 
235 		for (j=2; j >0 ; j--) {
236 
237 			/* even without unification some level may not be present */
238 			if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
239 				continue;
240 			}
241 			p += sprintf(p,
242 				     "%s Cache level %lu:\n"
243 				     "\tSize           : %u bytes\n"
244 				     "\tAttributes     : ",
245 				     cache_types[j+cci.pcci_unified], i+1,
246 				     cci.pcci_cache_size);
247 
248 			if (cci.pcci_unified) p += sprintf(p, "Unified ");
249 
250 			p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
251 
252 			p += sprintf(p,
253 				     "\tAssociativity  : %d\n"
254 				     "\tLine size      : %d bytes\n"
255 				     "\tStride         : %d bytes\n",
256 				     cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride);
257 			if (j == 1)
258 				p += sprintf(p, "\tStore latency  : N/A\n");
259 			else
260 				p += sprintf(p, "\tStore latency  : %d cycle(s)\n",
261 						cci.pcci_st_latency);
262 
263 			p += sprintf(p,
264 				     "\tLoad latency   : %d cycle(s)\n"
265 				     "\tStore hints    : ", cci.pcci_ld_latency);
266 
267 			for(k=0; k < 8; k++ ) {
268 				if ( cci.pcci_st_hints & 0x1)
269 					p += sprintf(p, "[%s]", cache_st_hints[k]);
270 				cci.pcci_st_hints >>=1;
271 			}
272 			p += sprintf(p, "\n\tLoad hints     : ");
273 
274 			for(k=0; k < 8; k++ ) {
275 				if (cci.pcci_ld_hints & 0x1)
276 					p += sprintf(p, "[%s]", cache_ld_hints[k]);
277 				cci.pcci_ld_hints >>=1;
278 			}
279 			p += sprintf(p,
280 				     "\n\tAlias boundary : %d byte(s)\n"
281 				     "\tTag LSB        : %d\n"
282 				     "\tTag MSB        : %d\n",
283 				     1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb,
284 				     cci.pcci_tag_msb);
285 
286 			/* when unified, data(j=2) is enough */
287 			if (cci.pcci_unified) break;
288 		}
289 	}
290 	return p - page;
291 }
292 
293 
294 static int
295 vm_info(char *page)
296 {
297 	char *p = page;
298 	u64 tr_pages =0, vw_pages=0, tc_pages;
299 	u64 attrib;
300 	pal_vm_info_1_u_t vm_info_1;
301 	pal_vm_info_2_u_t vm_info_2;
302 	pal_tc_info_u_t	tc_info;
303 	ia64_ptce_info_t ptce;
304 	const char *sep;
305 	int i, j;
306 	s64 status;
307 
308 	if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
309 		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
310 	} else {
311 
312 		p += sprintf(p,
313 		     "Physical Address Space         : %d bits\n"
314 		     "Virtual Address Space          : %d bits\n"
315 		     "Protection Key Registers(PKR)  : %d\n"
316 		     "Implemented bits in PKR.key    : %d\n"
317 		     "Hash Tag ID                    : 0x%x\n"
318 		     "Size of RR.rid                 : %d\n"
319 		     "Max Purges                     : ",
320 		     vm_info_1.pal_vm_info_1_s.phys_add_size,
321 		     vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
322 		     vm_info_1.pal_vm_info_1_s.max_pkr+1,
323 		     vm_info_1.pal_vm_info_1_s.key_size,
324 		     vm_info_1.pal_vm_info_1_s.hash_tag_id,
325 		     vm_info_2.pal_vm_info_2_s.rid_size);
326 		if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES)
327 			p += sprintf(p, "unlimited\n");
328 		else
329 			p += sprintf(p, "%d\n",
330 		     		vm_info_2.pal_vm_info_2_s.max_purges ?
331 				vm_info_2.pal_vm_info_2_s.max_purges : 1);
332 	}
333 
334 	if (ia64_pal_mem_attrib(&attrib) == 0) {
335 		p += sprintf(p, "Supported memory attributes    : ");
336 		sep = "";
337 		for (i = 0; i < 8; i++) {
338 			if (attrib & (1 << i)) {
339 				p += sprintf(p, "%s%s", sep, mem_attrib[i]);
340 				sep = ", ";
341 			}
342 		}
343 		p += sprintf(p, "\n");
344 	}
345 
346 	if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
347 		printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);
348 	} else {
349 
350 		p += sprintf(p,
351 			     "\nTLB walker                     : %simplemented\n"
352 			     "Number of DTR                  : %d\n"
353 			     "Number of ITR                  : %d\n"
354 			     "TLB insertable page sizes      : ",
355 			     vm_info_1.pal_vm_info_1_s.vw ? "" : "not ",
356 			     vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
357 			     vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
358 
359 
360 		p = bitvector_process(p, tr_pages);
361 
362 		p += sprintf(p, "\nTLB purgeable page sizes       : ");
363 
364 		p = bitvector_process(p, vw_pages);
365 	}
366 	if ((status=ia64_get_ptce(&ptce)) != 0) {
367 		printk(KERN_ERR "ia64_get_ptce=%ld\n", status);
368 	} else {
369 		p += sprintf(p,
370 		     "\nPurge base address             : 0x%016lx\n"
371 		     "Purge outer loop count         : %d\n"
372 		     "Purge inner loop count         : %d\n"
373 		     "Purge outer loop stride        : %d\n"
374 		     "Purge inner loop stride        : %d\n",
375 		     ptce.base, ptce.count[0], ptce.count[1],
376 		     ptce.stride[0], ptce.stride[1]);
377 
378 		p += sprintf(p,
379 		     "TC Levels                      : %d\n"
380 		     "Unique TC(s)                   : %d\n",
381 		     vm_info_1.pal_vm_info_1_s.num_tc_levels,
382 		     vm_info_1.pal_vm_info_1_s.max_unique_tcs);
383 
384 		for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
385 			for (j=2; j>0 ; j--) {
386 				tc_pages = 0; /* just in case */
387 
388 
389 				/* even without unification, some levels may not be present */
390 				if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
391 					continue;
392 				}
393 
394 				p += sprintf(p,
395 				     "\n%s Translation Cache Level %d:\n"
396 				     "\tHash sets           : %d\n"
397 				     "\tAssociativity       : %d\n"
398 				     "\tNumber of entries   : %d\n"
399 				     "\tFlags               : ",
400 				     cache_types[j+tc_info.tc_unified], i+1,
401 				     tc_info.tc_num_sets,
402 				     tc_info.tc_associativity,
403 				     tc_info.tc_num_entries);
404 
405 				if (tc_info.tc_pf)
406 					p += sprintf(p, "PreferredPageSizeOptimized ");
407 				if (tc_info.tc_unified)
408 					p += sprintf(p, "Unified ");
409 				if (tc_info.tc_reduce_tr)
410 					p += sprintf(p, "TCReduction");
411 
412 				p += sprintf(p, "\n\tSupported page sizes: ");
413 
414 				p = bitvector_process(p, tc_pages);
415 
416 				/* when unified date (j=2) is enough */
417 				if (tc_info.tc_unified)
418 					break;
419 			}
420 		}
421 	}
422 	p += sprintf(p, "\n");
423 
424 	return p - page;
425 }
426 
427 
428 static int
429 register_info(char *page)
430 {
431 	char *p = page;
432 	u64 reg_info[2];
433 	u64 info;
434 	u64 phys_stacked;
435 	pal_hints_u_t hints;
436 	u64 iregs, dregs;
437 	char *info_type[]={
438 		"Implemented AR(s)",
439 		"AR(s) with read side-effects",
440 		"Implemented CR(s)",
441 		"CR(s) with read side-effects",
442 	};
443 
444 	for(info=0; info < 4; info++) {
445 
446 		if (ia64_pal_register_info(info, &reg_info[0], &reg_info[1]) != 0) return 0;
447 
448 		p += sprintf(p, "%-32s : ", info_type[info]);
449 
450 		p = bitregister_process(p, reg_info, 128);
451 
452 		p += sprintf(p, "\n");
453 	}
454 
455 	if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) {
456 
457 	p += sprintf(p,
458 		     "RSE stacked physical registers   : %ld\n"
459 		     "RSE load/store hints             : %ld (%s)\n",
460 		     phys_stacked, hints.ph_data,
461 		     hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
462 	}
463 	if (ia64_pal_debug_info(&iregs, &dregs))
464 		return 0;
465 
466 	p += sprintf(p,
467 		     "Instruction debug register pairs : %ld\n"
468 		     "Data debug register pairs        : %ld\n", iregs, dregs);
469 
470 	return p - page;
471 }
472 
473 static const char *proc_features[]={
474 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
475 	NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
476 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
477 	NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
478 	"Unimplemented instruction address fault",
479 	"INIT, PMI, and LINT pins",
480 	"Simple unimplemented instr addresses",
481 	"Variable P-state performance",
482 	"Virtual machine features implemented",
483 	"XIP,XPSR,XFS implemented",
484 	"XR1-XR3 implemented",
485 	"Disable dynamic predicate prediction",
486 	"Disable processor physical number",
487 	"Disable dynamic data cache prefetch",
488 	"Disable dynamic inst cache prefetch",
489 	"Disable dynamic branch prediction",
490 	NULL, NULL, NULL, NULL,
491 	"Disable P-states",
492 	"Enable MCA on Data Poisoning",
493 	"Enable vmsw instruction",
494 	"Enable extern environmental notification",
495 	"Disable BINIT on processor time-out",
496 	"Disable dynamic power management (DPM)",
497 	"Disable coherency",
498 	"Disable cache",
499 	"Enable CMCI promotion",
500 	"Enable MCA to BINIT promotion",
501 	"Enable MCA promotion",
502 	"Enable BERR promotion"
503 };
504 
505 
506 static int
507 processor_info(char *page)
508 {
509 	char *p = page;
510 	const char **v = proc_features;
511 	u64 avail=1, status=1, control=1;
512 	int i;
513 	s64 ret;
514 
515 	if ((ret=ia64_pal_proc_get_features(&avail, &status, &control)) != 0) return 0;
516 
517 	for(i=0; i < 64; i++, v++,avail >>=1, status >>=1, control >>=1) {
518 		if ( ! *v ) continue;
519 		p += sprintf(p, "%-40s : %s%s %s\n", *v,
520 				avail & 0x1 ? "" : "NotImpl",
521 				avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
522 				avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
523 	}
524 	return p - page;
525 }
526 
527 static const char *bus_features[]={
528 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
529 	NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
530 	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
531 	NULL,NULL,
532 	"Request  Bus Parking",
533 	"Bus Lock Mask",
534 	"Enable Half Transfer",
535 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
536 	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
537 	NULL, NULL, NULL, NULL,
538 	"Enable Cache Line Repl. Shared",
539 	"Enable Cache Line Repl. Exclusive",
540 	"Disable Transaction Queuing",
541 	"Disable Response Error Checking",
542 	"Disable Bus Error Checking",
543 	"Disable Bus Requester Internal Error Signalling",
544 	"Disable Bus Requester Error Signalling",
545 	"Disable Bus Initialization Event Checking",
546 	"Disable Bus Initialization Event Signalling",
547 	"Disable Bus Address Error Checking",
548 	"Disable Bus Address Error Signalling",
549 	"Disable Bus Data Error Checking"
550 };
551 
552 
553 static int
554 bus_info(char *page)
555 {
556 	char *p = page;
557 	const char **v = bus_features;
558 	pal_bus_features_u_t av, st, ct;
559 	u64 avail, status, control;
560 	int i;
561 	s64 ret;
562 
563 	if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) return 0;
564 
565 	avail   = av.pal_bus_features_val;
566 	status  = st.pal_bus_features_val;
567 	control = ct.pal_bus_features_val;
568 
569 	for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) {
570 		if ( ! *v ) continue;
571 		p += sprintf(p, "%-48s : %s%s %s\n", *v,
572 				avail & 0x1 ? "" : "NotImpl",
573 				avail & 0x1 ? (status  & 0x1 ? "On" : "Off"): "",
574 				avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
575 	}
576 	return p - page;
577 }
578 
579 static int
580 version_info(char *page)
581 {
582 	pal_version_u_t min_ver, cur_ver;
583 	char *p = page;
584 
585 	if (ia64_pal_version(&min_ver, &cur_ver) != 0)
586 		return 0;
587 
588 	p += sprintf(p,
589 		     "PAL_vendor : 0x%02x (min=0x%02x)\n"
590 		     "PAL_A      : %02x.%02x (min=%02x.%02x)\n"
591 		     "PAL_B      : %02x.%02x (min=%02x.%02x)\n",
592 		     cur_ver.pal_version_s.pv_pal_vendor,
593 		     min_ver.pal_version_s.pv_pal_vendor,
594 		     cur_ver.pal_version_s.pv_pal_a_model,
595 		     cur_ver.pal_version_s.pv_pal_a_rev,
596 		     min_ver.pal_version_s.pv_pal_a_model,
597 		     min_ver.pal_version_s.pv_pal_a_rev,
598 		     cur_ver.pal_version_s.pv_pal_b_model,
599 		     cur_ver.pal_version_s.pv_pal_b_rev,
600 		     min_ver.pal_version_s.pv_pal_b_model,
601 		     min_ver.pal_version_s.pv_pal_b_rev);
602 	return p - page;
603 }
604 
605 static int
606 perfmon_info(char *page)
607 {
608 	char *p = page;
609 	u64 pm_buffer[16];
610 	pal_perf_mon_info_u_t pm_info;
611 
612 	if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0;
613 
614 	p += sprintf(p,
615 		     "PMC/PMD pairs                 : %d\n"
616 		     "Counter width                 : %d bits\n"
617 		     "Cycle event number            : %d\n"
618 		     "Retired event number          : %d\n"
619 		     "Implemented PMC               : ",
620 		     pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width,
621 		     pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired);
622 
623 	p = bitregister_process(p, pm_buffer, 256);
624 	p += sprintf(p, "\nImplemented PMD               : ");
625 	p = bitregister_process(p, pm_buffer+4, 256);
626 	p += sprintf(p, "\nCycles count capable          : ");
627 	p = bitregister_process(p, pm_buffer+8, 256);
628 	p += sprintf(p, "\nRetired bundles count capable : ");
629 
630 #ifdef CONFIG_ITANIUM
631 	/*
632 	 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES
633 	 * which is wrong, both PMC4 and PMD5 support it.
634 	 */
635 	if (pm_buffer[12] == 0x10) pm_buffer[12]=0x30;
636 #endif
637 
638 	p = bitregister_process(p, pm_buffer+12, 256);
639 
640 	p += sprintf(p, "\n");
641 
642 	return p - page;
643 }
644 
645 static int
646 frequency_info(char *page)
647 {
648 	char *p = page;
649 	struct pal_freq_ratio proc, itc, bus;
650 	u64 base;
651 
652 	if (ia64_pal_freq_base(&base) == -1)
653 		p += sprintf(p, "Output clock            : not implemented\n");
654 	else
655 		p += sprintf(p, "Output clock            : %ld ticks/s\n", base);
656 
657 	if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
658 
659 	p += sprintf(p,
660 		     "Processor/Clock ratio   : %d/%d\n"
661 		     "Bus/Clock ratio         : %d/%d\n"
662 		     "ITC/Clock ratio         : %d/%d\n",
663 		     proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
664 
665 	return p - page;
666 }
667 
668 static int
669 tr_info(char *page)
670 {
671 	char *p = page;
672 	s64 status;
673 	pal_tr_valid_u_t tr_valid;
674 	u64 tr_buffer[4];
675 	pal_vm_info_1_u_t vm_info_1;
676 	pal_vm_info_2_u_t vm_info_2;
677 	u64 i, j;
678 	u64 max[3], pgm;
679 	struct ifa_reg {
680 		u64 valid:1;
681 		u64 ig:11;
682 		u64 vpn:52;
683 	} *ifa_reg;
684 	struct itir_reg {
685 		u64 rv1:2;
686 		u64 ps:6;
687 		u64 key:24;
688 		u64 rv2:32;
689 	} *itir_reg;
690 	struct gr_reg {
691 		u64 p:1;
692 		u64 rv1:1;
693 		u64 ma:3;
694 		u64 a:1;
695 		u64 d:1;
696 		u64 pl:2;
697 		u64 ar:3;
698 		u64 ppn:38;
699 		u64 rv2:2;
700 		u64 ed:1;
701 		u64 ig:11;
702 	} *gr_reg;
703 	struct rid_reg {
704 		u64 ig1:1;
705 		u64 rv1:1;
706 		u64 ig2:6;
707 		u64 rid:24;
708 		u64 rv2:32;
709 	} *rid_reg;
710 
711 	if ((status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
712 		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
713 		return 0;
714 	}
715 	max[0] = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
716 	max[1] = vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
717 
718 	for (i=0; i < 2; i++ ) {
719 		for (j=0; j < max[i]; j++) {
720 
721 		status = ia64_pal_tr_read(j, i, tr_buffer, &tr_valid);
722 		if (status != 0) {
723 			printk(KERN_ERR "palinfo: pal call failed on tr[%lu:%lu]=%ld\n",
724 			       i, j, status);
725 			continue;
726 		}
727 
728 		ifa_reg  = (struct ifa_reg *)&tr_buffer[2];
729 
730 		if (ifa_reg->valid == 0) continue;
731 
732 		gr_reg   = (struct gr_reg *)tr_buffer;
733 		itir_reg = (struct itir_reg *)&tr_buffer[1];
734 		rid_reg  = (struct rid_reg *)&tr_buffer[3];
735 
736 		pgm	 = -1 << (itir_reg->ps - 12);
737 		p += sprintf(p,
738 			     "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n"
739 			     "\tppn  : 0x%lx\n"
740 			     "\tvpn  : 0x%lx\n"
741 			     "\tps   : ",
742 			     "ID"[i], j,
743 			     tr_valid.pal_tr_valid_s.access_rights_valid,
744 			     tr_valid.pal_tr_valid_s.priv_level_valid,
745 			     tr_valid.pal_tr_valid_s.dirty_bit_valid,
746 			     tr_valid.pal_tr_valid_s.mem_attr_valid,
747 			     (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12);
748 
749 		p = bitvector_process(p, 1<< itir_reg->ps);
750 
751 		p += sprintf(p,
752 			     "\n\tpl   : %d\n"
753 			     "\tar   : %d\n"
754 			     "\trid  : %x\n"
755 			     "\tp    : %d\n"
756 			     "\tma   : %d\n"
757 			     "\td    : %d\n",
758 			     gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma,
759 			     gr_reg->d);
760 		}
761 	}
762 	return p - page;
763 }
764 
765 
766 
767 /*
768  * List {name,function} pairs for every entry in /proc/palinfo/cpu*
769  */
770 static palinfo_entry_t palinfo_entries[]={
771 	{ "version_info",	version_info, },
772 	{ "vm_info",		vm_info, },
773 	{ "cache_info",		cache_info, },
774 	{ "power_info",		power_info, },
775 	{ "register_info",	register_info, },
776 	{ "processor_info",	processor_info, },
777 	{ "perfmon_info",	perfmon_info, },
778 	{ "frequency_info",	frequency_info, },
779 	{ "bus_info",		bus_info },
780 	{ "tr_info",		tr_info, }
781 };
782 
783 #define NR_PALINFO_ENTRIES	(int) ARRAY_SIZE(palinfo_entries)
784 
785 /*
786  * this array is used to keep track of the proc entries we create. This is
787  * required in the module mode when we need to remove all entries. The procfs code
788  * does not do recursion of deletion
789  *
790  * Notes:
791  *	- +1 accounts for the cpuN directory entry in /proc/pal
792  */
793 #define NR_PALINFO_PROC_ENTRIES	(NR_CPUS*(NR_PALINFO_ENTRIES+1))
794 
795 static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
796 static struct proc_dir_entry *palinfo_dir;
797 
798 /*
799  * This data structure is used to pass which cpu,function is being requested
800  * It must fit in a 64bit quantity to be passed to the proc callback routine
801  *
802  * In SMP mode, when we get a request for another CPU, we must call that
803  * other CPU using IPI and wait for the result before returning.
804  */
805 typedef union {
806 	u64 value;
807 	struct {
808 		unsigned	req_cpu: 32;	/* for which CPU this info is */
809 		unsigned	func_id: 32;	/* which function is requested */
810 	} pal_func_cpu;
811 } pal_func_cpu_u_t;
812 
813 #define req_cpu	pal_func_cpu.req_cpu
814 #define func_id pal_func_cpu.func_id
815 
816 #ifdef CONFIG_SMP
817 
818 /*
819  * used to hold information about final function to call
820  */
821 typedef struct {
822 	palinfo_func_t	func;	/* pointer to function to call */
823 	char		*page;	/* buffer to store results */
824 	int		ret;	/* return value from call */
825 } palinfo_smp_data_t;
826 
827 
828 /*
829  * this function does the actual final call and he called
830  * from the smp code, i.e., this is the palinfo callback routine
831  */
832 static void
833 palinfo_smp_call(void *info)
834 {
835 	palinfo_smp_data_t *data = (palinfo_smp_data_t *)info;
836 	if (data == NULL) {
837 		printk(KERN_ERR "palinfo: data pointer is NULL\n");
838 		data->ret = 0; /* no output */
839 		return;
840 	}
841 	/* does this actual call */
842 	data->ret = (*data->func)(data->page);
843 }
844 
845 /*
846  * function called to trigger the IPI, we need to access a remote CPU
847  * Return:
848  *	0 : error or nothing to output
849  *	otherwise how many bytes in the "page" buffer were written
850  */
851 static
852 int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
853 {
854 	palinfo_smp_data_t ptr;
855 	int ret;
856 
857 	ptr.func = palinfo_entries[f->func_id].proc_read;
858 	ptr.page = page;
859 	ptr.ret  = 0; /* just in case */
860 
861 
862 	/* will send IPI to other CPU and wait for completion of remote call */
863 	if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) {
864 		printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: "
865 		       "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret);
866 		return 0;
867 	}
868 	return ptr.ret;
869 }
870 #else /* ! CONFIG_SMP */
871 static
872 int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)
873 {
874 	printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");
875 	return 0;
876 }
877 #endif /* CONFIG_SMP */
878 
879 /*
880  * Entry point routine: all calls go through this function
881  */
882 static int
883 palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data)
884 {
885 	int len=0;
886 	pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&data;
887 
888 	/*
889 	 * in SMP mode, we may need to call another CPU to get correct
890 	 * information. PAL, by definition, is processor specific
891 	 */
892 	if (f->req_cpu == get_cpu())
893 		len = (*palinfo_entries[f->func_id].proc_read)(page);
894 	else
895 		len = palinfo_handle_smp(f, page);
896 
897 	put_cpu();
898 
899 	if (len <= off+count) *eof = 1;
900 
901 	*start = page + off;
902 	len   -= off;
903 
904 	if (len>count) len = count;
905 	if (len<0) len = 0;
906 
907 	return len;
908 }
909 
910 static void
911 create_palinfo_proc_entries(unsigned int cpu)
912 {
913 #	define CPUSTR	"cpu%d"
914 
915 	pal_func_cpu_u_t f;
916 	struct proc_dir_entry **pdir;
917 	struct proc_dir_entry *cpu_dir;
918 	int j;
919 	char cpustr[sizeof(CPUSTR)];
920 
921 
922 	/*
923 	 * we keep track of created entries in a depth-first order for
924 	 * cleanup purposes. Each entry is stored into palinfo_proc_entries
925 	 */
926 	sprintf(cpustr,CPUSTR, cpu);
927 
928 	cpu_dir = proc_mkdir(cpustr, palinfo_dir);
929 
930 	f.req_cpu = cpu;
931 
932 	/*
933 	 * Compute the location to store per cpu entries
934 	 * We dont store the top level entry in this list, but
935 	 * remove it finally after removing all cpu entries.
936 	 */
937 	pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
938 	*pdir++ = cpu_dir;
939 	for (j=0; j < NR_PALINFO_ENTRIES; j++) {
940 		f.func_id = j;
941 		*pdir = create_proc_read_entry(
942 				palinfo_entries[j].name, 0, cpu_dir,
943 				palinfo_read_entry, (void *)f.value);
944 		if (*pdir)
945 			(*pdir)->owner = THIS_MODULE;
946 		pdir++;
947 	}
948 }
949 
950 static void
951 remove_palinfo_proc_entries(unsigned int hcpu)
952 {
953 	int j;
954 	struct proc_dir_entry *cpu_dir, **pdir;
955 
956 	pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
957 	cpu_dir = *pdir;
958 	*pdir++=NULL;
959 	for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
960 		if ((*pdir)) {
961 			remove_proc_entry ((*pdir)->name, cpu_dir);
962 			*pdir ++= NULL;
963 		}
964 	}
965 
966 	if (cpu_dir) {
967 		remove_proc_entry(cpu_dir->name, palinfo_dir);
968 	}
969 }
970 
971 static int palinfo_cpu_callback(struct notifier_block *nfb,
972 					unsigned long action, void *hcpu)
973 {
974 	unsigned int hotcpu = (unsigned long)hcpu;
975 
976 	switch (action) {
977 	case CPU_ONLINE:
978 	case CPU_ONLINE_FROZEN:
979 		create_palinfo_proc_entries(hotcpu);
980 		break;
981 	case CPU_DEAD:
982 	case CPU_DEAD_FROZEN:
983 		remove_palinfo_proc_entries(hotcpu);
984 		break;
985 	}
986 	return NOTIFY_OK;
987 }
988 
989 static struct notifier_block palinfo_cpu_notifier =
990 {
991 	.notifier_call = palinfo_cpu_callback,
992 	.priority = 0,
993 };
994 
995 static int __init
996 palinfo_init(void)
997 {
998 	int i = 0;
999 
1000 	printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
1001 	palinfo_dir = proc_mkdir("pal", NULL);
1002 
1003 	/* Create palinfo dirs in /proc for all online cpus */
1004 	for_each_online_cpu(i) {
1005 		create_palinfo_proc_entries(i);
1006 	}
1007 
1008 	/* Register for future delivery via notify registration */
1009 	register_hotcpu_notifier(&palinfo_cpu_notifier);
1010 
1011 	return 0;
1012 }
1013 
1014 static void __exit
1015 palinfo_exit(void)
1016 {
1017 	int i = 0;
1018 
1019 	/* remove all nodes: depth first pass. Could optimize this  */
1020 	for_each_online_cpu(i) {
1021 		remove_palinfo_proc_entries(i);
1022 	}
1023 
1024 	/*
1025 	 * Remove the top level entry finally
1026 	 */
1027 	remove_proc_entry(palinfo_dir->name, NULL);
1028 
1029 	/*
1030 	 * Unregister from cpu notifier callbacks
1031 	 */
1032 	unregister_hotcpu_notifier(&palinfo_cpu_notifier);
1033 }
1034 
1035 module_init(palinfo_init);
1036 module_exit(palinfo_exit);
1037