xref: /openbmc/linux/arch/powerpc/kernel/sysfs.c (revision 022dacdd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/device.h>
3 #include <linux/cpu.h>
4 #include <linux/smp.h>
5 #include <linux/percpu.h>
6 #include <linux/init.h>
7 #include <linux/sched.h>
8 #include <linux/export.h>
9 #include <linux/nodemask.h>
10 #include <linux/cpumask.h>
11 #include <linux/notifier.h>
12 
13 #include <asm/current.h>
14 #include <asm/processor.h>
15 #include <asm/cputable.h>
16 #include <asm/hvcall.h>
17 #include <asm/prom.h>
18 #include <asm/machdep.h>
19 #include <asm/smp.h>
20 #include <asm/pmc.h>
21 #include <asm/firmware.h>
22 #include <asm/svm.h>
23 
24 #include "cacheinfo.h"
25 #include "setup.h"
26 
27 #ifdef CONFIG_PPC64
28 #include <asm/paca.h>
29 #include <asm/lppaca.h>
30 #endif
31 
32 static DEFINE_PER_CPU(struct cpu, cpu_devices);
33 
34 /*
35  * SMT snooze delay stuff, 64-bit only for now
36  */
37 
38 #ifdef CONFIG_PPC64
39 
40 /* Time in microseconds we delay before sleeping in the idle loop */
41 static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
42 
43 static ssize_t store_smt_snooze_delay(struct device *dev,
44 				      struct device_attribute *attr,
45 				      const char *buf,
46 				      size_t count)
47 {
48 	struct cpu *cpu = container_of(dev, struct cpu, dev);
49 	ssize_t ret;
50 	long snooze;
51 
52 	ret = sscanf(buf, "%ld", &snooze);
53 	if (ret != 1)
54 		return -EINVAL;
55 
56 	per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
57 	return count;
58 }
59 
60 static ssize_t show_smt_snooze_delay(struct device *dev,
61 				     struct device_attribute *attr,
62 				     char *buf)
63 {
64 	struct cpu *cpu = container_of(dev, struct cpu, dev);
65 
66 	return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
67 }
68 
69 static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
70 		   store_smt_snooze_delay);
71 
72 static int __init setup_smt_snooze_delay(char *str)
73 {
74 	unsigned int cpu;
75 	long snooze;
76 
77 	if (!cpu_has_feature(CPU_FTR_SMT))
78 		return 1;
79 
80 	snooze = simple_strtol(str, NULL, 10);
81 	for_each_possible_cpu(cpu)
82 		per_cpu(smt_snooze_delay, cpu) = snooze;
83 
84 	return 1;
85 }
86 __setup("smt-snooze-delay=", setup_smt_snooze_delay);
87 
88 #endif /* CONFIG_PPC64 */
89 
90 #define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
91 static void read_##NAME(void *val) \
92 { \
93 	*(unsigned long *)val = mfspr(ADDRESS);	\
94 } \
95 static void write_##NAME(void *val) \
96 { \
97 	EXTRA; \
98 	mtspr(ADDRESS, *(unsigned long *)val);	\
99 }
100 
101 #define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
102 static ssize_t show_##NAME(struct device *dev, \
103 			struct device_attribute *attr, \
104 			char *buf) \
105 { \
106 	struct cpu *cpu = container_of(dev, struct cpu, dev); \
107 	unsigned long val; \
108 	smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1);	\
109 	return sprintf(buf, "%lx\n", val); \
110 } \
111 static ssize_t __used \
112 	store_##NAME(struct device *dev, struct device_attribute *attr, \
113 			const char *buf, size_t count) \
114 { \
115 	struct cpu *cpu = container_of(dev, struct cpu, dev); \
116 	unsigned long val; \
117 	int ret = sscanf(buf, "%lx", &val); \
118 	if (ret != 1) \
119 		return -EINVAL; \
120 	smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
121 	return count; \
122 }
123 
124 #define SYSFS_PMCSETUP(NAME, ADDRESS) \
125 	__SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
126 	__SYSFS_SPRSETUP_SHOW_STORE(NAME)
127 #define SYSFS_SPRSETUP(NAME, ADDRESS) \
128 	__SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
129 	__SYSFS_SPRSETUP_SHOW_STORE(NAME)
130 
131 #define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
132 	__SYSFS_SPRSETUP_SHOW_STORE(NAME)
133 
134 #ifdef CONFIG_PPC64
135 
136 /*
137  * This is the system wide DSCR register default value. Any
138  * change to this default value through the sysfs interface
139  * will update all per cpu DSCR default values across the
140  * system stored in their respective PACA structures.
141  */
142 static unsigned long dscr_default;
143 
144 /**
145  * read_dscr() - Fetch the cpu specific DSCR default
146  * @val:	Returned cpu specific DSCR default value
147  *
148  * This function returns the per cpu DSCR default value
149  * for any cpu which is contained in it's PACA structure.
150  */
151 static void read_dscr(void *val)
152 {
153 	*(unsigned long *)val = get_paca()->dscr_default;
154 }
155 
156 
157 /**
158  * write_dscr() - Update the cpu specific DSCR default
159  * @val:	New cpu specific DSCR default value to update
160  *
161  * This function updates the per cpu DSCR default value
162  * for any cpu which is contained in it's PACA structure.
163  */
164 static void write_dscr(void *val)
165 {
166 	get_paca()->dscr_default = *(unsigned long *)val;
167 	if (!current->thread.dscr_inherit) {
168 		current->thread.dscr = *(unsigned long *)val;
169 		mtspr(SPRN_DSCR, *(unsigned long *)val);
170 	}
171 }
172 
173 SYSFS_SPRSETUP_SHOW_STORE(dscr);
174 static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
175 
176 static void add_write_permission_dev_attr(struct device_attribute *attr)
177 {
178 	attr->attr.mode |= 0200;
179 }
180 
181 /**
182  * show_dscr_default() - Fetch the system wide DSCR default
183  * @dev:	Device structure
184  * @attr:	Device attribute structure
185  * @buf:	Interface buffer
186  *
187  * This function returns the system wide DSCR default value.
188  */
189 static ssize_t show_dscr_default(struct device *dev,
190 		struct device_attribute *attr, char *buf)
191 {
192 	return sprintf(buf, "%lx\n", dscr_default);
193 }
194 
195 /**
196  * store_dscr_default() - Update the system wide DSCR default
197  * @dev:	Device structure
198  * @attr:	Device attribute structure
199  * @buf:	Interface buffer
200  * @count:	Size of the update
201  *
202  * This function updates the system wide DSCR default value.
203  */
204 static ssize_t __used store_dscr_default(struct device *dev,
205 		struct device_attribute *attr, const char *buf,
206 		size_t count)
207 {
208 	unsigned long val;
209 	int ret = 0;
210 
211 	ret = sscanf(buf, "%lx", &val);
212 	if (ret != 1)
213 		return -EINVAL;
214 	dscr_default = val;
215 
216 	on_each_cpu(write_dscr, &val, 1);
217 
218 	return count;
219 }
220 
221 static DEVICE_ATTR(dscr_default, 0600,
222 		show_dscr_default, store_dscr_default);
223 
224 static void sysfs_create_dscr_default(void)
225 {
226 	if (cpu_has_feature(CPU_FTR_DSCR)) {
227 		int err = 0;
228 		int cpu;
229 
230 		dscr_default = spr_default_dscr;
231 		for_each_possible_cpu(cpu)
232 			paca_ptrs[cpu]->dscr_default = dscr_default;
233 
234 		err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
235 	}
236 }
237 #endif /* CONFIG_PPC64 */
238 
239 #ifdef CONFIG_PPC_FSL_BOOK3E
240 #define MAX_BIT				63
241 
242 static u64 pw20_wt;
243 static u64 altivec_idle_wt;
244 
245 static unsigned int get_idle_ticks_bit(u64 ns)
246 {
247 	u64 cycle;
248 
249 	if (ns >= 10000)
250 		cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
251 	else
252 		cycle = div_u64(ns * tb_ticks_per_usec, 1000);
253 
254 	if (!cycle)
255 		return 0;
256 
257 	return ilog2(cycle);
258 }
259 
260 static void do_show_pwrmgtcr0(void *val)
261 {
262 	u32 *value = val;
263 
264 	*value = mfspr(SPRN_PWRMGTCR0);
265 }
266 
267 static ssize_t show_pw20_state(struct device *dev,
268 				struct device_attribute *attr, char *buf)
269 {
270 	u32 value;
271 	unsigned int cpu = dev->id;
272 
273 	smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
274 
275 	value &= PWRMGTCR0_PW20_WAIT;
276 
277 	return sprintf(buf, "%u\n", value ? 1 : 0);
278 }
279 
280 static void do_store_pw20_state(void *val)
281 {
282 	u32 *value = val;
283 	u32 pw20_state;
284 
285 	pw20_state = mfspr(SPRN_PWRMGTCR0);
286 
287 	if (*value)
288 		pw20_state |= PWRMGTCR0_PW20_WAIT;
289 	else
290 		pw20_state &= ~PWRMGTCR0_PW20_WAIT;
291 
292 	mtspr(SPRN_PWRMGTCR0, pw20_state);
293 }
294 
295 static ssize_t store_pw20_state(struct device *dev,
296 				struct device_attribute *attr,
297 				const char *buf, size_t count)
298 {
299 	u32 value;
300 	unsigned int cpu = dev->id;
301 
302 	if (kstrtou32(buf, 0, &value))
303 		return -EINVAL;
304 
305 	if (value > 1)
306 		return -EINVAL;
307 
308 	smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
309 
310 	return count;
311 }
312 
313 static ssize_t show_pw20_wait_time(struct device *dev,
314 				struct device_attribute *attr, char *buf)
315 {
316 	u32 value;
317 	u64 tb_cycle = 1;
318 	u64 time;
319 
320 	unsigned int cpu = dev->id;
321 
322 	if (!pw20_wt) {
323 		smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
324 		value = (value & PWRMGTCR0_PW20_ENT) >>
325 					PWRMGTCR0_PW20_ENT_SHIFT;
326 
327 		tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
328 		/* convert ms to ns */
329 		if (tb_ticks_per_usec > 1000) {
330 			time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
331 		} else {
332 			u32 rem_us;
333 
334 			time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
335 						&rem_us);
336 			time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
337 		}
338 	} else {
339 		time = pw20_wt;
340 	}
341 
342 	return sprintf(buf, "%llu\n", time > 0 ? time : 0);
343 }
344 
345 static void set_pw20_wait_entry_bit(void *val)
346 {
347 	u32 *value = val;
348 	u32 pw20_idle;
349 
350 	pw20_idle = mfspr(SPRN_PWRMGTCR0);
351 
352 	/* Set Automatic PW20 Core Idle Count */
353 	/* clear count */
354 	pw20_idle &= ~PWRMGTCR0_PW20_ENT;
355 
356 	/* set count */
357 	pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
358 
359 	mtspr(SPRN_PWRMGTCR0, pw20_idle);
360 }
361 
362 static ssize_t store_pw20_wait_time(struct device *dev,
363 				struct device_attribute *attr,
364 				const char *buf, size_t count)
365 {
366 	u32 entry_bit;
367 	u64 value;
368 
369 	unsigned int cpu = dev->id;
370 
371 	if (kstrtou64(buf, 0, &value))
372 		return -EINVAL;
373 
374 	if (!value)
375 		return -EINVAL;
376 
377 	entry_bit = get_idle_ticks_bit(value);
378 	if (entry_bit > MAX_BIT)
379 		return -EINVAL;
380 
381 	pw20_wt = value;
382 
383 	smp_call_function_single(cpu, set_pw20_wait_entry_bit,
384 				&entry_bit, 1);
385 
386 	return count;
387 }
388 
389 static ssize_t show_altivec_idle(struct device *dev,
390 				struct device_attribute *attr, char *buf)
391 {
392 	u32 value;
393 	unsigned int cpu = dev->id;
394 
395 	smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
396 
397 	value &= PWRMGTCR0_AV_IDLE_PD_EN;
398 
399 	return sprintf(buf, "%u\n", value ? 1 : 0);
400 }
401 
402 static void do_store_altivec_idle(void *val)
403 {
404 	u32 *value = val;
405 	u32 altivec_idle;
406 
407 	altivec_idle = mfspr(SPRN_PWRMGTCR0);
408 
409 	if (*value)
410 		altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
411 	else
412 		altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
413 
414 	mtspr(SPRN_PWRMGTCR0, altivec_idle);
415 }
416 
417 static ssize_t store_altivec_idle(struct device *dev,
418 				struct device_attribute *attr,
419 				const char *buf, size_t count)
420 {
421 	u32 value;
422 	unsigned int cpu = dev->id;
423 
424 	if (kstrtou32(buf, 0, &value))
425 		return -EINVAL;
426 
427 	if (value > 1)
428 		return -EINVAL;
429 
430 	smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
431 
432 	return count;
433 }
434 
435 static ssize_t show_altivec_idle_wait_time(struct device *dev,
436 				struct device_attribute *attr, char *buf)
437 {
438 	u32 value;
439 	u64 tb_cycle = 1;
440 	u64 time;
441 
442 	unsigned int cpu = dev->id;
443 
444 	if (!altivec_idle_wt) {
445 		smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
446 		value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
447 					PWRMGTCR0_AV_IDLE_CNT_SHIFT;
448 
449 		tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
450 		/* convert ms to ns */
451 		if (tb_ticks_per_usec > 1000) {
452 			time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
453 		} else {
454 			u32 rem_us;
455 
456 			time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
457 						&rem_us);
458 			time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
459 		}
460 	} else {
461 		time = altivec_idle_wt;
462 	}
463 
464 	return sprintf(buf, "%llu\n", time > 0 ? time : 0);
465 }
466 
467 static void set_altivec_idle_wait_entry_bit(void *val)
468 {
469 	u32 *value = val;
470 	u32 altivec_idle;
471 
472 	altivec_idle = mfspr(SPRN_PWRMGTCR0);
473 
474 	/* Set Automatic AltiVec Idle Count */
475 	/* clear count */
476 	altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
477 
478 	/* set count */
479 	altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
480 
481 	mtspr(SPRN_PWRMGTCR0, altivec_idle);
482 }
483 
484 static ssize_t store_altivec_idle_wait_time(struct device *dev,
485 				struct device_attribute *attr,
486 				const char *buf, size_t count)
487 {
488 	u32 entry_bit;
489 	u64 value;
490 
491 	unsigned int cpu = dev->id;
492 
493 	if (kstrtou64(buf, 0, &value))
494 		return -EINVAL;
495 
496 	if (!value)
497 		return -EINVAL;
498 
499 	entry_bit = get_idle_ticks_bit(value);
500 	if (entry_bit > MAX_BIT)
501 		return -EINVAL;
502 
503 	altivec_idle_wt = value;
504 
505 	smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
506 				&entry_bit, 1);
507 
508 	return count;
509 }
510 
511 /*
512  * Enable/Disable interface:
513  * 0, disable. 1, enable.
514  */
515 static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
516 static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
517 
518 /*
519  * Set wait time interface:(Nanosecond)
520  * Example: Base on TBfreq is 41MHZ.
521  * 1~48(ns): TB[63]
522  * 49~97(ns): TB[62]
523  * 98~195(ns): TB[61]
524  * 196~390(ns): TB[60]
525  * 391~780(ns): TB[59]
526  * 781~1560(ns): TB[58]
527  * ...
528  */
529 static DEVICE_ATTR(pw20_wait_time, 0600,
530 			show_pw20_wait_time,
531 			store_pw20_wait_time);
532 static DEVICE_ATTR(altivec_idle_wait_time, 0600,
533 			show_altivec_idle_wait_time,
534 			store_altivec_idle_wait_time);
535 #endif
536 
537 /*
538  * Enabling PMCs will slow partition context switch times so we only do
539  * it the first time we write to the PMCs.
540  */
541 
542 static DEFINE_PER_CPU(char, pmcs_enabled);
543 
544 void ppc_enable_pmcs(void)
545 {
546 	ppc_set_pmu_inuse(1);
547 
548 	/* Only need to enable them once */
549 	if (__this_cpu_read(pmcs_enabled))
550 		return;
551 
552 	__this_cpu_write(pmcs_enabled, 1);
553 
554 	if (ppc_md.enable_pmcs)
555 		ppc_md.enable_pmcs();
556 }
557 EXPORT_SYMBOL(ppc_enable_pmcs);
558 
559 
560 
561 /* Let's define all possible registers, we'll only hook up the ones
562  * that are implemented on the current processor
563  */
564 
565 #ifdef CONFIG_PMU_SYSFS
566 #if defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
567 #define HAS_PPC_PMC_CLASSIC	1
568 #define HAS_PPC_PMC_IBM		1
569 #endif
570 
571 #ifdef CONFIG_PPC64
572 #define HAS_PPC_PMC_PA6T	1
573 #define HAS_PPC_PMC56          1
574 #endif
575 
576 #ifdef CONFIG_PPC_BOOK3S_32
577 #define HAS_PPC_PMC_G4		1
578 #endif
579 #endif /* CONFIG_PMU_SYSFS */
580 
581 #if defined(CONFIG_PPC64) && defined(CONFIG_DEBUG_MISC)
582 #define HAS_PPC_PA6T
583 #endif
584 /*
585  * SPRs which are not related to PMU.
586  */
587 #ifdef CONFIG_PPC64
588 SYSFS_SPRSETUP(purr, SPRN_PURR);
589 SYSFS_SPRSETUP(spurr, SPRN_SPURR);
590 SYSFS_SPRSETUP(pir, SPRN_PIR);
591 SYSFS_SPRSETUP(tscr, SPRN_TSCR);
592 
593 /*
594   Lets only enable read for phyp resources and
595   enable write when needed with a separate function.
596   Lets be conservative and default to pseries.
597 */
598 static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
599 static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
600 static DEVICE_ATTR(pir, 0400, show_pir, NULL);
601 static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
602 #endif /* CONFIG_PPC64 */
603 
604 #ifdef HAS_PPC_PMC_CLASSIC
605 SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
606 SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
607 SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
608 SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
609 SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
610 SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
611 SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
612 SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
613 #endif
614 
615 #ifdef HAS_PPC_PMC_G4
616 SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
617 #endif
618 
619 #ifdef HAS_PPC_PMC56
620 SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
621 SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
622 
623 SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
624 
625 static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
626 #endif /* HAS_PPC_PMC56 */
627 
628 
629 
630 
631 #ifdef HAS_PPC_PMC_PA6T
632 SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
633 SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
634 SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
635 SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
636 SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
637 SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
638 #endif
639 
640 #ifdef HAS_PPC_PA6T
641 SYSFS_SPRSETUP(hid0, SPRN_HID0);
642 SYSFS_SPRSETUP(hid1, SPRN_HID1);
643 SYSFS_SPRSETUP(hid4, SPRN_HID4);
644 SYSFS_SPRSETUP(hid5, SPRN_HID5);
645 SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
646 SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
647 SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
648 SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
649 SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
650 SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
651 SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
652 SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
653 SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
654 SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
655 SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
656 SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
657 SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
658 SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
659 SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
660 SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
661 SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
662 SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
663 SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
664 SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
665 SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
666 SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
667 SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
668 SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
669 #endif /* HAS_PPC_PA6T */
670 
671 #ifdef HAS_PPC_PMC_IBM
672 static struct device_attribute ibm_common_attrs[] = {
673 	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
674 	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
675 };
676 #endif /* HAS_PPC_PMC_IBM */
677 
678 #ifdef HAS_PPC_PMC_G4
679 static struct device_attribute g4_common_attrs[] = {
680 	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
681 	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
682 	__ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
683 };
684 #endif /* HAS_PPC_PMC_G4 */
685 
686 #ifdef HAS_PPC_PMC_CLASSIC
687 static struct device_attribute classic_pmc_attrs[] = {
688 	__ATTR(pmc1, 0600, show_pmc1, store_pmc1),
689 	__ATTR(pmc2, 0600, show_pmc2, store_pmc2),
690 	__ATTR(pmc3, 0600, show_pmc3, store_pmc3),
691 	__ATTR(pmc4, 0600, show_pmc4, store_pmc4),
692 	__ATTR(pmc5, 0600, show_pmc5, store_pmc5),
693 	__ATTR(pmc6, 0600, show_pmc6, store_pmc6),
694 #ifdef HAS_PPC_PMC56
695 	__ATTR(pmc7, 0600, show_pmc7, store_pmc7),
696 	__ATTR(pmc8, 0600, show_pmc8, store_pmc8),
697 #endif
698 };
699 #endif
700 
701 #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
702 static struct device_attribute pa6t_attrs[] = {
703 #ifdef HAS_PPC_PMC_PA6T
704 	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
705 	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
706 	__ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
707 	__ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
708 	__ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
709 	__ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
710 	__ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
711 	__ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
712 #endif
713 #ifdef HAS_PPC_PA6T
714 	__ATTR(hid0, 0600, show_hid0, store_hid0),
715 	__ATTR(hid1, 0600, show_hid1, store_hid1),
716 	__ATTR(hid4, 0600, show_hid4, store_hid4),
717 	__ATTR(hid5, 0600, show_hid5, store_hid5),
718 	__ATTR(ima0, 0600, show_ima0, store_ima0),
719 	__ATTR(ima1, 0600, show_ima1, store_ima1),
720 	__ATTR(ima2, 0600, show_ima2, store_ima2),
721 	__ATTR(ima3, 0600, show_ima3, store_ima3),
722 	__ATTR(ima4, 0600, show_ima4, store_ima4),
723 	__ATTR(ima5, 0600, show_ima5, store_ima5),
724 	__ATTR(ima6, 0600, show_ima6, store_ima6),
725 	__ATTR(ima7, 0600, show_ima7, store_ima7),
726 	__ATTR(ima8, 0600, show_ima8, store_ima8),
727 	__ATTR(ima9, 0600, show_ima9, store_ima9),
728 	__ATTR(imaat, 0600, show_imaat, store_imaat),
729 	__ATTR(btcr, 0600, show_btcr, store_btcr),
730 	__ATTR(pccr, 0600, show_pccr, store_pccr),
731 	__ATTR(rpccr, 0600, show_rpccr, store_rpccr),
732 	__ATTR(der, 0600, show_der, store_der),
733 	__ATTR(mer, 0600, show_mer, store_mer),
734 	__ATTR(ber, 0600, show_ber, store_ber),
735 	__ATTR(ier, 0600, show_ier, store_ier),
736 	__ATTR(sier, 0600, show_sier, store_sier),
737 	__ATTR(siar, 0600, show_siar, store_siar),
738 	__ATTR(tsr0, 0600, show_tsr0, store_tsr0),
739 	__ATTR(tsr1, 0600, show_tsr1, store_tsr1),
740 	__ATTR(tsr2, 0600, show_tsr2, store_tsr2),
741 	__ATTR(tsr3, 0600, show_tsr3, store_tsr3),
742 #endif /* HAS_PPC_PA6T */
743 };
744 #endif
745 
746 #ifdef CONFIG_PPC_SVM
747 static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char *buf)
748 {
749 	return sprintf(buf, "%u\n", is_secure_guest());
750 }
751 static DEVICE_ATTR(svm, 0444, show_svm, NULL);
752 
753 static void create_svm_file(void)
754 {
755 	device_create_file(cpu_subsys.dev_root, &dev_attr_svm);
756 }
757 #else
758 static void create_svm_file(void)
759 {
760 }
761 #endif /* CONFIG_PPC_SVM */
762 
763 static int register_cpu_online(unsigned int cpu)
764 {
765 	struct cpu *c = &per_cpu(cpu_devices, cpu);
766 	struct device *s = &c->dev;
767 	struct device_attribute *attrs, *pmc_attrs;
768 	int i, nattrs;
769 
770 	/* For cpus present at boot a reference was already grabbed in register_cpu() */
771 	if (!s->of_node)
772 		s->of_node = of_get_cpu_node(cpu, NULL);
773 
774 #ifdef CONFIG_PPC64
775 	if (cpu_has_feature(CPU_FTR_SMT))
776 		device_create_file(s, &dev_attr_smt_snooze_delay);
777 #endif
778 
779 	/* PMC stuff */
780 	switch (cur_cpu_spec->pmc_type) {
781 #ifdef HAS_PPC_PMC_IBM
782 	case PPC_PMC_IBM:
783 		attrs = ibm_common_attrs;
784 		nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
785 		pmc_attrs = classic_pmc_attrs;
786 		break;
787 #endif /* HAS_PPC_PMC_IBM */
788 #ifdef HAS_PPC_PMC_G4
789 	case PPC_PMC_G4:
790 		attrs = g4_common_attrs;
791 		nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
792 		pmc_attrs = classic_pmc_attrs;
793 		break;
794 #endif /* HAS_PPC_PMC_G4 */
795 #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
796 	case PPC_PMC_PA6T:
797 		/* PA Semi starts counting at PMC0 */
798 		attrs = pa6t_attrs;
799 		nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
800 		pmc_attrs = NULL;
801 		break;
802 #endif
803 	default:
804 		attrs = NULL;
805 		nattrs = 0;
806 		pmc_attrs = NULL;
807 	}
808 
809 	for (i = 0; i < nattrs; i++)
810 		device_create_file(s, &attrs[i]);
811 
812 	if (pmc_attrs)
813 		for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
814 			device_create_file(s, &pmc_attrs[i]);
815 
816 #ifdef CONFIG_PPC64
817 #ifdef	CONFIG_PMU_SYSFS
818 	if (cpu_has_feature(CPU_FTR_MMCRA))
819 		device_create_file(s, &dev_attr_mmcra);
820 #endif /* CONFIG_PMU_SYSFS */
821 
822 	if (cpu_has_feature(CPU_FTR_PURR)) {
823 		if (!firmware_has_feature(FW_FEATURE_LPAR))
824 			add_write_permission_dev_attr(&dev_attr_purr);
825 		device_create_file(s, &dev_attr_purr);
826 	}
827 
828 	if (cpu_has_feature(CPU_FTR_SPURR))
829 		device_create_file(s, &dev_attr_spurr);
830 
831 	if (cpu_has_feature(CPU_FTR_DSCR))
832 		device_create_file(s, &dev_attr_dscr);
833 
834 	if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
835 		device_create_file(s, &dev_attr_pir);
836 
837 	if (cpu_has_feature(CPU_FTR_ARCH_206) &&
838 		!firmware_has_feature(FW_FEATURE_LPAR))
839 		device_create_file(s, &dev_attr_tscr);
840 #endif /* CONFIG_PPC64 */
841 
842 #ifdef CONFIG_PPC_FSL_BOOK3E
843 	if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
844 		device_create_file(s, &dev_attr_pw20_state);
845 		device_create_file(s, &dev_attr_pw20_wait_time);
846 
847 		device_create_file(s, &dev_attr_altivec_idle);
848 		device_create_file(s, &dev_attr_altivec_idle_wait_time);
849 	}
850 #endif
851 	cacheinfo_cpu_online(cpu);
852 	return 0;
853 }
854 
855 #ifdef CONFIG_HOTPLUG_CPU
856 static int unregister_cpu_online(unsigned int cpu)
857 {
858 	struct cpu *c = &per_cpu(cpu_devices, cpu);
859 	struct device *s = &c->dev;
860 	struct device_attribute *attrs, *pmc_attrs;
861 	int i, nattrs;
862 
863 	BUG_ON(!c->hotpluggable);
864 
865 #ifdef CONFIG_PPC64
866 	if (cpu_has_feature(CPU_FTR_SMT))
867 		device_remove_file(s, &dev_attr_smt_snooze_delay);
868 #endif
869 
870 	/* PMC stuff */
871 	switch (cur_cpu_spec->pmc_type) {
872 #ifdef HAS_PPC_PMC_IBM
873 	case PPC_PMC_IBM:
874 		attrs = ibm_common_attrs;
875 		nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
876 		pmc_attrs = classic_pmc_attrs;
877 		break;
878 #endif /* HAS_PPC_PMC_IBM */
879 #ifdef HAS_PPC_PMC_G4
880 	case PPC_PMC_G4:
881 		attrs = g4_common_attrs;
882 		nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
883 		pmc_attrs = classic_pmc_attrs;
884 		break;
885 #endif /* HAS_PPC_PMC_G4 */
886 #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
887 	case PPC_PMC_PA6T:
888 		/* PA Semi starts counting at PMC0 */
889 		attrs = pa6t_attrs;
890 		nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
891 		pmc_attrs = NULL;
892 		break;
893 #endif
894 	default:
895 		attrs = NULL;
896 		nattrs = 0;
897 		pmc_attrs = NULL;
898 	}
899 
900 	for (i = 0; i < nattrs; i++)
901 		device_remove_file(s, &attrs[i]);
902 
903 	if (pmc_attrs)
904 		for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
905 			device_remove_file(s, &pmc_attrs[i]);
906 
907 #ifdef CONFIG_PPC64
908 #ifdef CONFIG_PMU_SYSFS
909 	if (cpu_has_feature(CPU_FTR_MMCRA))
910 		device_remove_file(s, &dev_attr_mmcra);
911 #endif /* CONFIG_PMU_SYSFS */
912 
913 	if (cpu_has_feature(CPU_FTR_PURR))
914 		device_remove_file(s, &dev_attr_purr);
915 
916 	if (cpu_has_feature(CPU_FTR_SPURR))
917 		device_remove_file(s, &dev_attr_spurr);
918 
919 	if (cpu_has_feature(CPU_FTR_DSCR))
920 		device_remove_file(s, &dev_attr_dscr);
921 
922 	if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
923 		device_remove_file(s, &dev_attr_pir);
924 
925 	if (cpu_has_feature(CPU_FTR_ARCH_206) &&
926 		!firmware_has_feature(FW_FEATURE_LPAR))
927 		device_remove_file(s, &dev_attr_tscr);
928 #endif /* CONFIG_PPC64 */
929 
930 #ifdef CONFIG_PPC_FSL_BOOK3E
931 	if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
932 		device_remove_file(s, &dev_attr_pw20_state);
933 		device_remove_file(s, &dev_attr_pw20_wait_time);
934 
935 		device_remove_file(s, &dev_attr_altivec_idle);
936 		device_remove_file(s, &dev_attr_altivec_idle_wait_time);
937 	}
938 #endif
939 	cacheinfo_cpu_offline(cpu);
940 	of_node_put(s->of_node);
941 	s->of_node = NULL;
942 	return 0;
943 }
944 #else /* !CONFIG_HOTPLUG_CPU */
945 #define unregister_cpu_online NULL
946 #endif
947 
948 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
949 ssize_t arch_cpu_probe(const char *buf, size_t count)
950 {
951 	if (ppc_md.cpu_probe)
952 		return ppc_md.cpu_probe(buf, count);
953 
954 	return -EINVAL;
955 }
956 
957 ssize_t arch_cpu_release(const char *buf, size_t count)
958 {
959 	if (ppc_md.cpu_release)
960 		return ppc_md.cpu_release(buf, count);
961 
962 	return -EINVAL;
963 }
964 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
965 
966 static DEFINE_MUTEX(cpu_mutex);
967 
968 int cpu_add_dev_attr(struct device_attribute *attr)
969 {
970 	int cpu;
971 
972 	mutex_lock(&cpu_mutex);
973 
974 	for_each_possible_cpu(cpu) {
975 		device_create_file(get_cpu_device(cpu), attr);
976 	}
977 
978 	mutex_unlock(&cpu_mutex);
979 	return 0;
980 }
981 EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
982 
983 int cpu_add_dev_attr_group(struct attribute_group *attrs)
984 {
985 	int cpu;
986 	struct device *dev;
987 	int ret;
988 
989 	mutex_lock(&cpu_mutex);
990 
991 	for_each_possible_cpu(cpu) {
992 		dev = get_cpu_device(cpu);
993 		ret = sysfs_create_group(&dev->kobj, attrs);
994 		WARN_ON(ret != 0);
995 	}
996 
997 	mutex_unlock(&cpu_mutex);
998 	return 0;
999 }
1000 EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
1001 
1002 
1003 void cpu_remove_dev_attr(struct device_attribute *attr)
1004 {
1005 	int cpu;
1006 
1007 	mutex_lock(&cpu_mutex);
1008 
1009 	for_each_possible_cpu(cpu) {
1010 		device_remove_file(get_cpu_device(cpu), attr);
1011 	}
1012 
1013 	mutex_unlock(&cpu_mutex);
1014 }
1015 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
1016 
1017 void cpu_remove_dev_attr_group(struct attribute_group *attrs)
1018 {
1019 	int cpu;
1020 	struct device *dev;
1021 
1022 	mutex_lock(&cpu_mutex);
1023 
1024 	for_each_possible_cpu(cpu) {
1025 		dev = get_cpu_device(cpu);
1026 		sysfs_remove_group(&dev->kobj, attrs);
1027 	}
1028 
1029 	mutex_unlock(&cpu_mutex);
1030 }
1031 EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
1032 
1033 
1034 /* NUMA stuff */
1035 
1036 #ifdef CONFIG_NUMA
1037 static void register_nodes(void)
1038 {
1039 	int i;
1040 
1041 	for (i = 0; i < MAX_NUMNODES; i++)
1042 		register_one_node(i);
1043 }
1044 
1045 int sysfs_add_device_to_node(struct device *dev, int nid)
1046 {
1047 	struct node *node = node_devices[nid];
1048 	return sysfs_create_link(&node->dev.kobj, &dev->kobj,
1049 			kobject_name(&dev->kobj));
1050 }
1051 EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
1052 
1053 void sysfs_remove_device_from_node(struct device *dev, int nid)
1054 {
1055 	struct node *node = node_devices[nid];
1056 	sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
1057 }
1058 EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
1059 
1060 #else
1061 static void register_nodes(void)
1062 {
1063 	return;
1064 }
1065 
1066 #endif
1067 
1068 /* Only valid if CPU is present. */
1069 static ssize_t show_physical_id(struct device *dev,
1070 				struct device_attribute *attr, char *buf)
1071 {
1072 	struct cpu *cpu = container_of(dev, struct cpu, dev);
1073 
1074 	return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
1075 }
1076 static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
1077 
1078 static int __init topology_init(void)
1079 {
1080 	int cpu, r;
1081 
1082 	register_nodes();
1083 
1084 	for_each_possible_cpu(cpu) {
1085 		struct cpu *c = &per_cpu(cpu_devices, cpu);
1086 
1087 		/*
1088 		 * For now, we just see if the system supports making
1089 		 * the RTAS calls for CPU hotplug.  But, there may be a
1090 		 * more comprehensive way to do this for an individual
1091 		 * CPU.  For instance, the boot cpu might never be valid
1092 		 * for hotplugging.
1093 		 */
1094 		if (ppc_md.cpu_die)
1095 			c->hotpluggable = 1;
1096 
1097 		if (cpu_online(cpu) || c->hotpluggable) {
1098 			register_cpu(c, cpu);
1099 
1100 			device_create_file(&c->dev, &dev_attr_physical_id);
1101 		}
1102 	}
1103 	r = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/topology:online",
1104 			      register_cpu_online, unregister_cpu_online);
1105 	WARN_ON(r < 0);
1106 #ifdef CONFIG_PPC64
1107 	sysfs_create_dscr_default();
1108 #endif /* CONFIG_PPC64 */
1109 
1110 	create_svm_file();
1111 
1112 	return 0;
1113 }
1114 subsys_initcall(topology_init);
1115