xref: /openbmc/linux/arch/powerpc/kernel/sysfs.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1  // SPDX-License-Identifier: GPL-2.0-only
2  #include <linux/device.h>
3  #include <linux/cpu.h>
4  #include <linux/smp.h>
5  #include <linux/percpu.h>
6  #include <linux/init.h>
7  #include <linux/sched.h>
8  #include <linux/export.h>
9  #include <linux/nodemask.h>
10  #include <linux/cpumask.h>
11  #include <linux/notifier.h>
12  #include <linux/of.h>
13  
14  #include <asm/current.h>
15  #include <asm/processor.h>
16  #include <asm/cputable.h>
17  #include <asm/hvcall.h>
18  #include <asm/machdep.h>
19  #include <asm/smp.h>
20  #include <asm/pmc.h>
21  #include <asm/firmware.h>
22  #include <asm/idle.h>
23  #include <asm/svm.h>
24  
25  #include "cacheinfo.h"
26  #include "setup.h"
27  
28  #ifdef CONFIG_PPC64
29  #include <asm/paca.h>
30  #include <asm/lppaca.h>
31  #endif
32  
33  static DEFINE_PER_CPU(struct cpu, cpu_devices);
34  
35  #ifdef CONFIG_PPC64
36  
37  /*
38   * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle:
39   * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in
40   * 2014:
41   *
42   *  "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean
43   *  up the kernel code."
44   *
45   * powerpc-utils stopped using it as of 1.3.8. At some point in the future this
46   * code should be removed.
47   */
48  
store_smt_snooze_delay(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)49  static ssize_t store_smt_snooze_delay(struct device *dev,
50  				      struct device_attribute *attr,
51  				      const char *buf,
52  				      size_t count)
53  {
54  	pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n",
55  		     current->comm, current->pid);
56  	return count;
57  }
58  
show_smt_snooze_delay(struct device * dev,struct device_attribute * attr,char * buf)59  static ssize_t show_smt_snooze_delay(struct device *dev,
60  				     struct device_attribute *attr,
61  				     char *buf)
62  {
63  	pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n",
64  		     current->comm, current->pid);
65  	return sprintf(buf, "100\n");
66  }
67  
68  static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
69  		   store_smt_snooze_delay);
70  
setup_smt_snooze_delay(char * str)71  static int __init setup_smt_snooze_delay(char *str)
72  {
73  	if (!cpu_has_feature(CPU_FTR_SMT))
74  		return 1;
75  
76  	pr_warn("smt-snooze-delay command line option has no effect\n");
77  	return 1;
78  }
79  __setup("smt-snooze-delay=", setup_smt_snooze_delay);
80  
81  #endif /* CONFIG_PPC64 */
82  
83  #define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
84  static void read_##NAME(void *val) \
85  { \
86  	*(unsigned long *)val = mfspr(ADDRESS);	\
87  } \
88  static void write_##NAME(void *val) \
89  { \
90  	EXTRA; \
91  	mtspr(ADDRESS, *(unsigned long *)val);	\
92  }
93  
94  #define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
95  static ssize_t show_##NAME(struct device *dev, \
96  			struct device_attribute *attr, \
97  			char *buf) \
98  { \
99  	struct cpu *cpu = container_of(dev, struct cpu, dev); \
100  	unsigned long val; \
101  	smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1);	\
102  	return sprintf(buf, "%lx\n", val); \
103  } \
104  static ssize_t __used \
105  	store_##NAME(struct device *dev, struct device_attribute *attr, \
106  			const char *buf, size_t count) \
107  { \
108  	struct cpu *cpu = container_of(dev, struct cpu, dev); \
109  	unsigned long val; \
110  	int ret = sscanf(buf, "%lx", &val); \
111  	if (ret != 1) \
112  		return -EINVAL; \
113  	smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
114  	return count; \
115  }
116  
117  #define SYSFS_PMCSETUP(NAME, ADDRESS) \
118  	__SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
119  	__SYSFS_SPRSETUP_SHOW_STORE(NAME)
120  #define SYSFS_SPRSETUP(NAME, ADDRESS) \
121  	__SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
122  	__SYSFS_SPRSETUP_SHOW_STORE(NAME)
123  
124  #define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
125  	__SYSFS_SPRSETUP_SHOW_STORE(NAME)
126  
127  #ifdef CONFIG_PPC64
128  
129  /*
130   * This is the system wide DSCR register default value. Any
131   * change to this default value through the sysfs interface
132   * will update all per cpu DSCR default values across the
133   * system stored in their respective PACA structures.
134   */
135  static unsigned long dscr_default;
136  
137  /**
138   * read_dscr() - Fetch the cpu specific DSCR default
139   * @val:	Returned cpu specific DSCR default value
140   *
141   * This function returns the per cpu DSCR default value
142   * for any cpu which is contained in it's PACA structure.
143   */
read_dscr(void * val)144  static void read_dscr(void *val)
145  {
146  	*(unsigned long *)val = get_paca()->dscr_default;
147  }
148  
149  
150  /**
151   * write_dscr() - Update the cpu specific DSCR default
152   * @val:	New cpu specific DSCR default value to update
153   *
154   * This function updates the per cpu DSCR default value
155   * for any cpu which is contained in it's PACA structure.
156   */
write_dscr(void * val)157  static void write_dscr(void *val)
158  {
159  	get_paca()->dscr_default = *(unsigned long *)val;
160  	if (!current->thread.dscr_inherit) {
161  		current->thread.dscr = *(unsigned long *)val;
162  		mtspr(SPRN_DSCR, *(unsigned long *)val);
163  	}
164  }
165  
166  SYSFS_SPRSETUP_SHOW_STORE(dscr);
167  static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
168  
add_write_permission_dev_attr(struct device_attribute * attr)169  static void add_write_permission_dev_attr(struct device_attribute *attr)
170  {
171  	attr->attr.mode |= 0200;
172  }
173  
174  /**
175   * show_dscr_default() - Fetch the system wide DSCR default
176   * @dev:	Device structure
177   * @attr:	Device attribute structure
178   * @buf:	Interface buffer
179   *
180   * This function returns the system wide DSCR default value.
181   */
show_dscr_default(struct device * dev,struct device_attribute * attr,char * buf)182  static ssize_t show_dscr_default(struct device *dev,
183  		struct device_attribute *attr, char *buf)
184  {
185  	return sprintf(buf, "%lx\n", dscr_default);
186  }
187  
188  /**
189   * store_dscr_default() - Update the system wide DSCR default
190   * @dev:	Device structure
191   * @attr:	Device attribute structure
192   * @buf:	Interface buffer
193   * @count:	Size of the update
194   *
195   * This function updates the system wide DSCR default value.
196   */
store_dscr_default(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)197  static ssize_t __used store_dscr_default(struct device *dev,
198  		struct device_attribute *attr, const char *buf,
199  		size_t count)
200  {
201  	unsigned long val;
202  	int ret = 0;
203  
204  	ret = sscanf(buf, "%lx", &val);
205  	if (ret != 1)
206  		return -EINVAL;
207  	dscr_default = val;
208  
209  	on_each_cpu(write_dscr, &val, 1);
210  
211  	return count;
212  }
213  
214  static DEVICE_ATTR(dscr_default, 0600,
215  		show_dscr_default, store_dscr_default);
216  
sysfs_create_dscr_default(void)217  static void __init sysfs_create_dscr_default(void)
218  {
219  	if (cpu_has_feature(CPU_FTR_DSCR)) {
220  		struct device *dev_root;
221  		int cpu;
222  
223  		dscr_default = spr_default_dscr;
224  		for_each_possible_cpu(cpu)
225  			paca_ptrs[cpu]->dscr_default = dscr_default;
226  
227  		dev_root = bus_get_dev_root(&cpu_subsys);
228  		if (dev_root) {
229  			device_create_file(dev_root, &dev_attr_dscr_default);
230  			put_device(dev_root);
231  		}
232  	}
233  }
234  #endif /* CONFIG_PPC64 */
235  
236  #ifdef CONFIG_PPC_E500
237  #define MAX_BIT				63
238  
239  static u64 pw20_wt;
240  static u64 altivec_idle_wt;
241  
get_idle_ticks_bit(u64 ns)242  static unsigned int get_idle_ticks_bit(u64 ns)
243  {
244  	u64 cycle;
245  
246  	if (ns >= 10000)
247  		cycle = div_u64(ns + 500, 1000) * tb_ticks_per_usec;
248  	else
249  		cycle = div_u64(ns * tb_ticks_per_usec, 1000);
250  
251  	if (!cycle)
252  		return 0;
253  
254  	return ilog2(cycle);
255  }
256  
do_show_pwrmgtcr0(void * val)257  static void do_show_pwrmgtcr0(void *val)
258  {
259  	u32 *value = val;
260  
261  	*value = mfspr(SPRN_PWRMGTCR0);
262  }
263  
show_pw20_state(struct device * dev,struct device_attribute * attr,char * buf)264  static ssize_t show_pw20_state(struct device *dev,
265  				struct device_attribute *attr, char *buf)
266  {
267  	u32 value;
268  	unsigned int cpu = dev->id;
269  
270  	smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
271  
272  	value &= PWRMGTCR0_PW20_WAIT;
273  
274  	return sprintf(buf, "%u\n", value ? 1 : 0);
275  }
276  
do_store_pw20_state(void * val)277  static void do_store_pw20_state(void *val)
278  {
279  	u32 *value = val;
280  	u32 pw20_state;
281  
282  	pw20_state = mfspr(SPRN_PWRMGTCR0);
283  
284  	if (*value)
285  		pw20_state |= PWRMGTCR0_PW20_WAIT;
286  	else
287  		pw20_state &= ~PWRMGTCR0_PW20_WAIT;
288  
289  	mtspr(SPRN_PWRMGTCR0, pw20_state);
290  }
291  
store_pw20_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)292  static ssize_t store_pw20_state(struct device *dev,
293  				struct device_attribute *attr,
294  				const char *buf, size_t count)
295  {
296  	u32 value;
297  	unsigned int cpu = dev->id;
298  
299  	if (kstrtou32(buf, 0, &value))
300  		return -EINVAL;
301  
302  	if (value > 1)
303  		return -EINVAL;
304  
305  	smp_call_function_single(cpu, do_store_pw20_state, &value, 1);
306  
307  	return count;
308  }
309  
show_pw20_wait_time(struct device * dev,struct device_attribute * attr,char * buf)310  static ssize_t show_pw20_wait_time(struct device *dev,
311  				struct device_attribute *attr, char *buf)
312  {
313  	u32 value;
314  	u64 tb_cycle = 1;
315  	u64 time;
316  
317  	unsigned int cpu = dev->id;
318  
319  	if (!pw20_wt) {
320  		smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
321  		value = (value & PWRMGTCR0_PW20_ENT) >>
322  					PWRMGTCR0_PW20_ENT_SHIFT;
323  
324  		tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
325  		/* convert ms to ns */
326  		if (tb_ticks_per_usec > 1000) {
327  			time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
328  		} else {
329  			u32 rem_us;
330  
331  			time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
332  						&rem_us);
333  			time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
334  		}
335  	} else {
336  		time = pw20_wt;
337  	}
338  
339  	return sprintf(buf, "%llu\n", time > 0 ? time : 0);
340  }
341  
set_pw20_wait_entry_bit(void * val)342  static void set_pw20_wait_entry_bit(void *val)
343  {
344  	u32 *value = val;
345  	u32 pw20_idle;
346  
347  	pw20_idle = mfspr(SPRN_PWRMGTCR0);
348  
349  	/* Set Automatic PW20 Core Idle Count */
350  	/* clear count */
351  	pw20_idle &= ~PWRMGTCR0_PW20_ENT;
352  
353  	/* set count */
354  	pw20_idle |= ((MAX_BIT - *value) << PWRMGTCR0_PW20_ENT_SHIFT);
355  
356  	mtspr(SPRN_PWRMGTCR0, pw20_idle);
357  }
358  
store_pw20_wait_time(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)359  static ssize_t store_pw20_wait_time(struct device *dev,
360  				struct device_attribute *attr,
361  				const char *buf, size_t count)
362  {
363  	u32 entry_bit;
364  	u64 value;
365  
366  	unsigned int cpu = dev->id;
367  
368  	if (kstrtou64(buf, 0, &value))
369  		return -EINVAL;
370  
371  	if (!value)
372  		return -EINVAL;
373  
374  	entry_bit = get_idle_ticks_bit(value);
375  	if (entry_bit > MAX_BIT)
376  		return -EINVAL;
377  
378  	pw20_wt = value;
379  
380  	smp_call_function_single(cpu, set_pw20_wait_entry_bit,
381  				&entry_bit, 1);
382  
383  	return count;
384  }
385  
show_altivec_idle(struct device * dev,struct device_attribute * attr,char * buf)386  static ssize_t show_altivec_idle(struct device *dev,
387  				struct device_attribute *attr, char *buf)
388  {
389  	u32 value;
390  	unsigned int cpu = dev->id;
391  
392  	smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
393  
394  	value &= PWRMGTCR0_AV_IDLE_PD_EN;
395  
396  	return sprintf(buf, "%u\n", value ? 1 : 0);
397  }
398  
do_store_altivec_idle(void * val)399  static void do_store_altivec_idle(void *val)
400  {
401  	u32 *value = val;
402  	u32 altivec_idle;
403  
404  	altivec_idle = mfspr(SPRN_PWRMGTCR0);
405  
406  	if (*value)
407  		altivec_idle |= PWRMGTCR0_AV_IDLE_PD_EN;
408  	else
409  		altivec_idle &= ~PWRMGTCR0_AV_IDLE_PD_EN;
410  
411  	mtspr(SPRN_PWRMGTCR0, altivec_idle);
412  }
413  
store_altivec_idle(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)414  static ssize_t store_altivec_idle(struct device *dev,
415  				struct device_attribute *attr,
416  				const char *buf, size_t count)
417  {
418  	u32 value;
419  	unsigned int cpu = dev->id;
420  
421  	if (kstrtou32(buf, 0, &value))
422  		return -EINVAL;
423  
424  	if (value > 1)
425  		return -EINVAL;
426  
427  	smp_call_function_single(cpu, do_store_altivec_idle, &value, 1);
428  
429  	return count;
430  }
431  
show_altivec_idle_wait_time(struct device * dev,struct device_attribute * attr,char * buf)432  static ssize_t show_altivec_idle_wait_time(struct device *dev,
433  				struct device_attribute *attr, char *buf)
434  {
435  	u32 value;
436  	u64 tb_cycle = 1;
437  	u64 time;
438  
439  	unsigned int cpu = dev->id;
440  
441  	if (!altivec_idle_wt) {
442  		smp_call_function_single(cpu, do_show_pwrmgtcr0, &value, 1);
443  		value = (value & PWRMGTCR0_AV_IDLE_CNT) >>
444  					PWRMGTCR0_AV_IDLE_CNT_SHIFT;
445  
446  		tb_cycle = (tb_cycle << (MAX_BIT - value + 1));
447  		/* convert ms to ns */
448  		if (tb_ticks_per_usec > 1000) {
449  			time = div_u64(tb_cycle, tb_ticks_per_usec / 1000);
450  		} else {
451  			u32 rem_us;
452  
453  			time = div_u64_rem(tb_cycle, tb_ticks_per_usec,
454  						&rem_us);
455  			time = time * 1000 + rem_us * 1000 / tb_ticks_per_usec;
456  		}
457  	} else {
458  		time = altivec_idle_wt;
459  	}
460  
461  	return sprintf(buf, "%llu\n", time > 0 ? time : 0);
462  }
463  
set_altivec_idle_wait_entry_bit(void * val)464  static void set_altivec_idle_wait_entry_bit(void *val)
465  {
466  	u32 *value = val;
467  	u32 altivec_idle;
468  
469  	altivec_idle = mfspr(SPRN_PWRMGTCR0);
470  
471  	/* Set Automatic AltiVec Idle Count */
472  	/* clear count */
473  	altivec_idle &= ~PWRMGTCR0_AV_IDLE_CNT;
474  
475  	/* set count */
476  	altivec_idle |= ((MAX_BIT - *value) << PWRMGTCR0_AV_IDLE_CNT_SHIFT);
477  
478  	mtspr(SPRN_PWRMGTCR0, altivec_idle);
479  }
480  
store_altivec_idle_wait_time(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)481  static ssize_t store_altivec_idle_wait_time(struct device *dev,
482  				struct device_attribute *attr,
483  				const char *buf, size_t count)
484  {
485  	u32 entry_bit;
486  	u64 value;
487  
488  	unsigned int cpu = dev->id;
489  
490  	if (kstrtou64(buf, 0, &value))
491  		return -EINVAL;
492  
493  	if (!value)
494  		return -EINVAL;
495  
496  	entry_bit = get_idle_ticks_bit(value);
497  	if (entry_bit > MAX_BIT)
498  		return -EINVAL;
499  
500  	altivec_idle_wt = value;
501  
502  	smp_call_function_single(cpu, set_altivec_idle_wait_entry_bit,
503  				&entry_bit, 1);
504  
505  	return count;
506  }
507  
508  /*
509   * Enable/Disable interface:
510   * 0, disable. 1, enable.
511   */
512  static DEVICE_ATTR(pw20_state, 0600, show_pw20_state, store_pw20_state);
513  static DEVICE_ATTR(altivec_idle, 0600, show_altivec_idle, store_altivec_idle);
514  
515  /*
516   * Set wait time interface:(Nanosecond)
517   * Example: Base on TBfreq is 41MHZ.
518   * 1~48(ns): TB[63]
519   * 49~97(ns): TB[62]
520   * 98~195(ns): TB[61]
521   * 196~390(ns): TB[60]
522   * 391~780(ns): TB[59]
523   * 781~1560(ns): TB[58]
524   * ...
525   */
526  static DEVICE_ATTR(pw20_wait_time, 0600,
527  			show_pw20_wait_time,
528  			store_pw20_wait_time);
529  static DEVICE_ATTR(altivec_idle_wait_time, 0600,
530  			show_altivec_idle_wait_time,
531  			store_altivec_idle_wait_time);
532  #endif
533  
534  /*
535   * Enabling PMCs will slow partition context switch times so we only do
536   * it the first time we write to the PMCs.
537   */
538  
539  static DEFINE_PER_CPU(char, pmcs_enabled);
540  
ppc_enable_pmcs(void)541  void ppc_enable_pmcs(void)
542  {
543  	ppc_set_pmu_inuse(1);
544  
545  	/* Only need to enable them once */
546  	if (__this_cpu_read(pmcs_enabled))
547  		return;
548  
549  	__this_cpu_write(pmcs_enabled, 1);
550  
551  	if (ppc_md.enable_pmcs)
552  		ppc_md.enable_pmcs();
553  }
554  EXPORT_SYMBOL(ppc_enable_pmcs);
555  
556  
557  
558  /* Let's define all possible registers, we'll only hook up the ones
559   * that are implemented on the current processor
560   */
561  
562  #ifdef CONFIG_PMU_SYSFS
563  #if defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
564  #define HAS_PPC_PMC_CLASSIC	1
565  #define HAS_PPC_PMC_IBM		1
566  #endif
567  
568  #ifdef CONFIG_PPC64
569  #define HAS_PPC_PMC_PA6T	1
570  #define HAS_PPC_PMC56          1
571  #endif
572  
573  #ifdef CONFIG_PPC_BOOK3S_32
574  #define HAS_PPC_PMC_G4		1
575  #endif
576  #endif /* CONFIG_PMU_SYSFS */
577  
578  #if defined(CONFIG_PPC64) && defined(CONFIG_DEBUG_MISC)
579  #define HAS_PPC_PA6T
580  #endif
581  /*
582   * SPRs which are not related to PMU.
583   */
584  #ifdef CONFIG_PPC64
585  SYSFS_SPRSETUP(purr, SPRN_PURR);
586  SYSFS_SPRSETUP(spurr, SPRN_SPURR);
587  SYSFS_SPRSETUP(pir, SPRN_PIR);
588  SYSFS_SPRSETUP(tscr, SPRN_TSCR);
589  
590  /*
591    Lets only enable read for phyp resources and
592    enable write when needed with a separate function.
593    Lets be conservative and default to pseries.
594  */
595  static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
596  static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
597  static DEVICE_ATTR(pir, 0400, show_pir, NULL);
598  static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
599  #endif /* CONFIG_PPC64 */
600  
601  #ifdef HAS_PPC_PMC_CLASSIC
602  SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
603  SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
604  SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
605  SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
606  SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
607  SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
608  SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
609  SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
610  #endif
611  
612  #ifdef HAS_PPC_PMC_G4
613  SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
614  #endif
615  
616  #ifdef HAS_PPC_PMC56
617  SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
618  SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
619  
620  SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
621  SYSFS_PMCSETUP(mmcr3, SPRN_MMCR3);
622  
623  static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
624  static DEVICE_ATTR(mmcr3, 0600, show_mmcr3, store_mmcr3);
625  #endif /* HAS_PPC_PMC56 */
626  
627  
628  
629  
630  #ifdef HAS_PPC_PMC_PA6T
631  SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
632  SYSFS_PMCSETUP(pa6t_pmc1, SPRN_PA6T_PMC1);
633  SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
634  SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
635  SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
636  SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
637  #endif
638  
639  #ifdef HAS_PPC_PA6T
640  SYSFS_SPRSETUP(hid0, SPRN_HID0);
641  SYSFS_SPRSETUP(hid1, SPRN_HID1);
642  SYSFS_SPRSETUP(hid4, SPRN_HID4);
643  SYSFS_SPRSETUP(hid5, SPRN_HID5);
644  SYSFS_SPRSETUP(ima0, SPRN_PA6T_IMA0);
645  SYSFS_SPRSETUP(ima1, SPRN_PA6T_IMA1);
646  SYSFS_SPRSETUP(ima2, SPRN_PA6T_IMA2);
647  SYSFS_SPRSETUP(ima3, SPRN_PA6T_IMA3);
648  SYSFS_SPRSETUP(ima4, SPRN_PA6T_IMA4);
649  SYSFS_SPRSETUP(ima5, SPRN_PA6T_IMA5);
650  SYSFS_SPRSETUP(ima6, SPRN_PA6T_IMA6);
651  SYSFS_SPRSETUP(ima7, SPRN_PA6T_IMA7);
652  SYSFS_SPRSETUP(ima8, SPRN_PA6T_IMA8);
653  SYSFS_SPRSETUP(ima9, SPRN_PA6T_IMA9);
654  SYSFS_SPRSETUP(imaat, SPRN_PA6T_IMAAT);
655  SYSFS_SPRSETUP(btcr, SPRN_PA6T_BTCR);
656  SYSFS_SPRSETUP(pccr, SPRN_PA6T_PCCR);
657  SYSFS_SPRSETUP(rpccr, SPRN_PA6T_RPCCR);
658  SYSFS_SPRSETUP(der, SPRN_PA6T_DER);
659  SYSFS_SPRSETUP(mer, SPRN_PA6T_MER);
660  SYSFS_SPRSETUP(ber, SPRN_PA6T_BER);
661  SYSFS_SPRSETUP(ier, SPRN_PA6T_IER);
662  SYSFS_SPRSETUP(sier, SPRN_PA6T_SIER);
663  SYSFS_SPRSETUP(siar, SPRN_PA6T_SIAR);
664  SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
665  SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
666  SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
667  SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
668  #endif /* HAS_PPC_PA6T */
669  
670  #ifdef HAS_PPC_PMC_IBM
671  static struct device_attribute ibm_common_attrs[] = {
672  	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
673  	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
674  };
675  #endif /* HAS_PPC_PMC_IBM */
676  
677  #ifdef HAS_PPC_PMC_G4
678  static struct device_attribute g4_common_attrs[] = {
679  	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
680  	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
681  	__ATTR(mmcr2, 0600, show_mmcr2, store_mmcr2),
682  };
683  #endif /* HAS_PPC_PMC_G4 */
684  
685  #ifdef HAS_PPC_PMC_CLASSIC
686  static struct device_attribute classic_pmc_attrs[] = {
687  	__ATTR(pmc1, 0600, show_pmc1, store_pmc1),
688  	__ATTR(pmc2, 0600, show_pmc2, store_pmc2),
689  	__ATTR(pmc3, 0600, show_pmc3, store_pmc3),
690  	__ATTR(pmc4, 0600, show_pmc4, store_pmc4),
691  	__ATTR(pmc5, 0600, show_pmc5, store_pmc5),
692  	__ATTR(pmc6, 0600, show_pmc6, store_pmc6),
693  #ifdef HAS_PPC_PMC56
694  	__ATTR(pmc7, 0600, show_pmc7, store_pmc7),
695  	__ATTR(pmc8, 0600, show_pmc8, store_pmc8),
696  #endif
697  };
698  #endif
699  
700  #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
701  static struct device_attribute pa6t_attrs[] = {
702  #ifdef HAS_PPC_PMC_PA6T
703  	__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
704  	__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
705  	__ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
706  	__ATTR(pmc1, 0600, show_pa6t_pmc1, store_pa6t_pmc1),
707  	__ATTR(pmc2, 0600, show_pa6t_pmc2, store_pa6t_pmc2),
708  	__ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
709  	__ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
710  	__ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
711  #endif
712  #ifdef HAS_PPC_PA6T
713  	__ATTR(hid0, 0600, show_hid0, store_hid0),
714  	__ATTR(hid1, 0600, show_hid1, store_hid1),
715  	__ATTR(hid4, 0600, show_hid4, store_hid4),
716  	__ATTR(hid5, 0600, show_hid5, store_hid5),
717  	__ATTR(ima0, 0600, show_ima0, store_ima0),
718  	__ATTR(ima1, 0600, show_ima1, store_ima1),
719  	__ATTR(ima2, 0600, show_ima2, store_ima2),
720  	__ATTR(ima3, 0600, show_ima3, store_ima3),
721  	__ATTR(ima4, 0600, show_ima4, store_ima4),
722  	__ATTR(ima5, 0600, show_ima5, store_ima5),
723  	__ATTR(ima6, 0600, show_ima6, store_ima6),
724  	__ATTR(ima7, 0600, show_ima7, store_ima7),
725  	__ATTR(ima8, 0600, show_ima8, store_ima8),
726  	__ATTR(ima9, 0600, show_ima9, store_ima9),
727  	__ATTR(imaat, 0600, show_imaat, store_imaat),
728  	__ATTR(btcr, 0600, show_btcr, store_btcr),
729  	__ATTR(pccr, 0600, show_pccr, store_pccr),
730  	__ATTR(rpccr, 0600, show_rpccr, store_rpccr),
731  	__ATTR(der, 0600, show_der, store_der),
732  	__ATTR(mer, 0600, show_mer, store_mer),
733  	__ATTR(ber, 0600, show_ber, store_ber),
734  	__ATTR(ier, 0600, show_ier, store_ier),
735  	__ATTR(sier, 0600, show_sier, store_sier),
736  	__ATTR(siar, 0600, show_siar, store_siar),
737  	__ATTR(tsr0, 0600, show_tsr0, store_tsr0),
738  	__ATTR(tsr1, 0600, show_tsr1, store_tsr1),
739  	__ATTR(tsr2, 0600, show_tsr2, store_tsr2),
740  	__ATTR(tsr3, 0600, show_tsr3, store_tsr3),
741  #endif /* HAS_PPC_PA6T */
742  };
743  #endif
744  
745  #ifdef CONFIG_PPC_SVM
show_svm(struct device * dev,struct device_attribute * attr,char * buf)746  static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char *buf)
747  {
748  	return sprintf(buf, "%u\n", is_secure_guest());
749  }
750  static DEVICE_ATTR(svm, 0444, show_svm, NULL);
751  
create_svm_file(void)752  static void __init create_svm_file(void)
753  {
754  	struct device *dev_root = bus_get_dev_root(&cpu_subsys);
755  
756  	if (dev_root) {
757  		device_create_file(dev_root, &dev_attr_svm);
758  		put_device(dev_root);
759  	}
760  }
761  #else
create_svm_file(void)762  static void __init create_svm_file(void)
763  {
764  }
765  #endif /* CONFIG_PPC_SVM */
766  
767  #ifdef CONFIG_PPC_PSERIES
read_idle_purr(void * val)768  static void read_idle_purr(void *val)
769  {
770  	u64 *ret = val;
771  
772  	*ret = read_this_idle_purr();
773  }
774  
idle_purr_show(struct device * dev,struct device_attribute * attr,char * buf)775  static ssize_t idle_purr_show(struct device *dev,
776  			      struct device_attribute *attr, char *buf)
777  {
778  	struct cpu *cpu = container_of(dev, struct cpu, dev);
779  	u64 val;
780  
781  	smp_call_function_single(cpu->dev.id, read_idle_purr, &val, 1);
782  	return sprintf(buf, "%llx\n", val);
783  }
784  static DEVICE_ATTR(idle_purr, 0400, idle_purr_show, NULL);
785  
create_idle_purr_file(struct device * s)786  static void create_idle_purr_file(struct device *s)
787  {
788  	if (firmware_has_feature(FW_FEATURE_LPAR))
789  		device_create_file(s, &dev_attr_idle_purr);
790  }
791  
remove_idle_purr_file(struct device * s)792  static void remove_idle_purr_file(struct device *s)
793  {
794  	if (firmware_has_feature(FW_FEATURE_LPAR))
795  		device_remove_file(s, &dev_attr_idle_purr);
796  }
797  
read_idle_spurr(void * val)798  static void read_idle_spurr(void *val)
799  {
800  	u64 *ret = val;
801  
802  	*ret = read_this_idle_spurr();
803  }
804  
idle_spurr_show(struct device * dev,struct device_attribute * attr,char * buf)805  static ssize_t idle_spurr_show(struct device *dev,
806  			       struct device_attribute *attr, char *buf)
807  {
808  	struct cpu *cpu = container_of(dev, struct cpu, dev);
809  	u64 val;
810  
811  	smp_call_function_single(cpu->dev.id, read_idle_spurr, &val, 1);
812  	return sprintf(buf, "%llx\n", val);
813  }
814  static DEVICE_ATTR(idle_spurr, 0400, idle_spurr_show, NULL);
815  
create_idle_spurr_file(struct device * s)816  static void create_idle_spurr_file(struct device *s)
817  {
818  	if (firmware_has_feature(FW_FEATURE_LPAR))
819  		device_create_file(s, &dev_attr_idle_spurr);
820  }
821  
remove_idle_spurr_file(struct device * s)822  static void remove_idle_spurr_file(struct device *s)
823  {
824  	if (firmware_has_feature(FW_FEATURE_LPAR))
825  		device_remove_file(s, &dev_attr_idle_spurr);
826  }
827  
828  #else /* CONFIG_PPC_PSERIES */
829  #define create_idle_purr_file(s)
830  #define remove_idle_purr_file(s)
831  #define create_idle_spurr_file(s)
832  #define remove_idle_spurr_file(s)
833  #endif /* CONFIG_PPC_PSERIES */
834  
register_cpu_online(unsigned int cpu)835  static int register_cpu_online(unsigned int cpu)
836  {
837  	struct cpu *c = &per_cpu(cpu_devices, cpu);
838  	struct device *s = &c->dev;
839  	struct device_attribute *attrs, *pmc_attrs;
840  	int i, nattrs;
841  
842  	/* For cpus present at boot a reference was already grabbed in register_cpu() */
843  	if (!s->of_node)
844  		s->of_node = of_get_cpu_node(cpu, NULL);
845  
846  #ifdef CONFIG_PPC64
847  	if (cpu_has_feature(CPU_FTR_SMT))
848  		device_create_file(s, &dev_attr_smt_snooze_delay);
849  #endif
850  
851  	/* PMC stuff */
852  	switch (cur_cpu_spec->pmc_type) {
853  #ifdef HAS_PPC_PMC_IBM
854  	case PPC_PMC_IBM:
855  		attrs = ibm_common_attrs;
856  		nattrs = ARRAY_SIZE(ibm_common_attrs);
857  		pmc_attrs = classic_pmc_attrs;
858  		break;
859  #endif /* HAS_PPC_PMC_IBM */
860  #ifdef HAS_PPC_PMC_G4
861  	case PPC_PMC_G4:
862  		attrs = g4_common_attrs;
863  		nattrs = ARRAY_SIZE(g4_common_attrs);
864  		pmc_attrs = classic_pmc_attrs;
865  		break;
866  #endif /* HAS_PPC_PMC_G4 */
867  #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
868  	case PPC_PMC_PA6T:
869  		/* PA Semi starts counting at PMC0 */
870  		attrs = pa6t_attrs;
871  		nattrs = ARRAY_SIZE(pa6t_attrs);
872  		pmc_attrs = NULL;
873  		break;
874  #endif
875  	default:
876  		attrs = NULL;
877  		nattrs = 0;
878  		pmc_attrs = NULL;
879  	}
880  
881  	for (i = 0; i < nattrs; i++)
882  		device_create_file(s, &attrs[i]);
883  
884  	if (pmc_attrs)
885  		for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
886  			device_create_file(s, &pmc_attrs[i]);
887  
888  #ifdef CONFIG_PPC64
889  #ifdef	CONFIG_PMU_SYSFS
890  	if (cpu_has_feature(CPU_FTR_MMCRA))
891  		device_create_file(s, &dev_attr_mmcra);
892  
893  	if (cpu_has_feature(CPU_FTR_ARCH_31))
894  		device_create_file(s, &dev_attr_mmcr3);
895  #endif /* CONFIG_PMU_SYSFS */
896  
897  	if (cpu_has_feature(CPU_FTR_PURR)) {
898  		if (!firmware_has_feature(FW_FEATURE_LPAR))
899  			add_write_permission_dev_attr(&dev_attr_purr);
900  		device_create_file(s, &dev_attr_purr);
901  		create_idle_purr_file(s);
902  	}
903  
904  	if (cpu_has_feature(CPU_FTR_SPURR)) {
905  		device_create_file(s, &dev_attr_spurr);
906  		create_idle_spurr_file(s);
907  	}
908  
909  	if (cpu_has_feature(CPU_FTR_DSCR))
910  		device_create_file(s, &dev_attr_dscr);
911  
912  	if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
913  		device_create_file(s, &dev_attr_pir);
914  
915  	if (cpu_has_feature(CPU_FTR_ARCH_206) &&
916  		!firmware_has_feature(FW_FEATURE_LPAR))
917  		device_create_file(s, &dev_attr_tscr);
918  #endif /* CONFIG_PPC64 */
919  
920  #ifdef CONFIG_PPC_E500
921  	if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
922  		device_create_file(s, &dev_attr_pw20_state);
923  		device_create_file(s, &dev_attr_pw20_wait_time);
924  
925  		device_create_file(s, &dev_attr_altivec_idle);
926  		device_create_file(s, &dev_attr_altivec_idle_wait_time);
927  	}
928  #endif
929  	cacheinfo_cpu_online(cpu);
930  	return 0;
931  }
932  
933  #ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_online(unsigned int cpu)934  static int unregister_cpu_online(unsigned int cpu)
935  {
936  	struct cpu *c = &per_cpu(cpu_devices, cpu);
937  	struct device *s = &c->dev;
938  	struct device_attribute *attrs, *pmc_attrs;
939  	int i, nattrs;
940  
941  	if (WARN_RATELIMIT(!c->hotpluggable, "cpu %d can't be offlined\n", cpu))
942  		return -EBUSY;
943  
944  #ifdef CONFIG_PPC64
945  	if (cpu_has_feature(CPU_FTR_SMT))
946  		device_remove_file(s, &dev_attr_smt_snooze_delay);
947  #endif
948  
949  	/* PMC stuff */
950  	switch (cur_cpu_spec->pmc_type) {
951  #ifdef HAS_PPC_PMC_IBM
952  	case PPC_PMC_IBM:
953  		attrs = ibm_common_attrs;
954  		nattrs = ARRAY_SIZE(ibm_common_attrs);
955  		pmc_attrs = classic_pmc_attrs;
956  		break;
957  #endif /* HAS_PPC_PMC_IBM */
958  #ifdef HAS_PPC_PMC_G4
959  	case PPC_PMC_G4:
960  		attrs = g4_common_attrs;
961  		nattrs = ARRAY_SIZE(g4_common_attrs);
962  		pmc_attrs = classic_pmc_attrs;
963  		break;
964  #endif /* HAS_PPC_PMC_G4 */
965  #if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
966  	case PPC_PMC_PA6T:
967  		/* PA Semi starts counting at PMC0 */
968  		attrs = pa6t_attrs;
969  		nattrs = ARRAY_SIZE(pa6t_attrs);
970  		pmc_attrs = NULL;
971  		break;
972  #endif
973  	default:
974  		attrs = NULL;
975  		nattrs = 0;
976  		pmc_attrs = NULL;
977  	}
978  
979  	for (i = 0; i < nattrs; i++)
980  		device_remove_file(s, &attrs[i]);
981  
982  	if (pmc_attrs)
983  		for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
984  			device_remove_file(s, &pmc_attrs[i]);
985  
986  #ifdef CONFIG_PPC64
987  #ifdef CONFIG_PMU_SYSFS
988  	if (cpu_has_feature(CPU_FTR_MMCRA))
989  		device_remove_file(s, &dev_attr_mmcra);
990  
991  	if (cpu_has_feature(CPU_FTR_ARCH_31))
992  		device_remove_file(s, &dev_attr_mmcr3);
993  #endif /* CONFIG_PMU_SYSFS */
994  
995  	if (cpu_has_feature(CPU_FTR_PURR)) {
996  		device_remove_file(s, &dev_attr_purr);
997  		remove_idle_purr_file(s);
998  	}
999  
1000  	if (cpu_has_feature(CPU_FTR_SPURR)) {
1001  		device_remove_file(s, &dev_attr_spurr);
1002  		remove_idle_spurr_file(s);
1003  	}
1004  
1005  	if (cpu_has_feature(CPU_FTR_DSCR))
1006  		device_remove_file(s, &dev_attr_dscr);
1007  
1008  	if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
1009  		device_remove_file(s, &dev_attr_pir);
1010  
1011  	if (cpu_has_feature(CPU_FTR_ARCH_206) &&
1012  		!firmware_has_feature(FW_FEATURE_LPAR))
1013  		device_remove_file(s, &dev_attr_tscr);
1014  #endif /* CONFIG_PPC64 */
1015  
1016  #ifdef CONFIG_PPC_E500
1017  	if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
1018  		device_remove_file(s, &dev_attr_pw20_state);
1019  		device_remove_file(s, &dev_attr_pw20_wait_time);
1020  
1021  		device_remove_file(s, &dev_attr_altivec_idle);
1022  		device_remove_file(s, &dev_attr_altivec_idle_wait_time);
1023  	}
1024  #endif
1025  	cacheinfo_cpu_offline(cpu);
1026  	of_node_put(s->of_node);
1027  	s->of_node = NULL;
1028  	return 0;
1029  }
1030  #else /* !CONFIG_HOTPLUG_CPU */
1031  #define unregister_cpu_online NULL
1032  #endif
1033  
1034  #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
arch_cpu_probe(const char * buf,size_t count)1035  ssize_t arch_cpu_probe(const char *buf, size_t count)
1036  {
1037  	if (ppc_md.cpu_probe)
1038  		return ppc_md.cpu_probe(buf, count);
1039  
1040  	return -EINVAL;
1041  }
1042  
arch_cpu_release(const char * buf,size_t count)1043  ssize_t arch_cpu_release(const char *buf, size_t count)
1044  {
1045  	if (ppc_md.cpu_release)
1046  		return ppc_md.cpu_release(buf, count);
1047  
1048  	return -EINVAL;
1049  }
1050  #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
1051  
1052  static DEFINE_MUTEX(cpu_mutex);
1053  
cpu_add_dev_attr(struct device_attribute * attr)1054  int cpu_add_dev_attr(struct device_attribute *attr)
1055  {
1056  	int cpu;
1057  
1058  	mutex_lock(&cpu_mutex);
1059  
1060  	for_each_possible_cpu(cpu) {
1061  		device_create_file(get_cpu_device(cpu), attr);
1062  	}
1063  
1064  	mutex_unlock(&cpu_mutex);
1065  	return 0;
1066  }
1067  EXPORT_SYMBOL_GPL(cpu_add_dev_attr);
1068  
cpu_add_dev_attr_group(struct attribute_group * attrs)1069  int cpu_add_dev_attr_group(struct attribute_group *attrs)
1070  {
1071  	int cpu;
1072  	struct device *dev;
1073  	int ret;
1074  
1075  	mutex_lock(&cpu_mutex);
1076  
1077  	for_each_possible_cpu(cpu) {
1078  		dev = get_cpu_device(cpu);
1079  		ret = sysfs_create_group(&dev->kobj, attrs);
1080  		WARN_ON(ret != 0);
1081  	}
1082  
1083  	mutex_unlock(&cpu_mutex);
1084  	return 0;
1085  }
1086  EXPORT_SYMBOL_GPL(cpu_add_dev_attr_group);
1087  
1088  
cpu_remove_dev_attr(struct device_attribute * attr)1089  void cpu_remove_dev_attr(struct device_attribute *attr)
1090  {
1091  	int cpu;
1092  
1093  	mutex_lock(&cpu_mutex);
1094  
1095  	for_each_possible_cpu(cpu) {
1096  		device_remove_file(get_cpu_device(cpu), attr);
1097  	}
1098  
1099  	mutex_unlock(&cpu_mutex);
1100  }
1101  EXPORT_SYMBOL_GPL(cpu_remove_dev_attr);
1102  
cpu_remove_dev_attr_group(struct attribute_group * attrs)1103  void cpu_remove_dev_attr_group(struct attribute_group *attrs)
1104  {
1105  	int cpu;
1106  	struct device *dev;
1107  
1108  	mutex_lock(&cpu_mutex);
1109  
1110  	for_each_possible_cpu(cpu) {
1111  		dev = get_cpu_device(cpu);
1112  		sysfs_remove_group(&dev->kobj, attrs);
1113  	}
1114  
1115  	mutex_unlock(&cpu_mutex);
1116  }
1117  EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
1118  
1119  
1120  /* NUMA stuff */
1121  
1122  #ifdef CONFIG_NUMA
sysfs_add_device_to_node(struct device * dev,int nid)1123  int sysfs_add_device_to_node(struct device *dev, int nid)
1124  {
1125  	struct node *node = node_devices[nid];
1126  	return sysfs_create_link(&node->dev.kobj, &dev->kobj,
1127  			kobject_name(&dev->kobj));
1128  }
1129  EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
1130  
sysfs_remove_device_from_node(struct device * dev,int nid)1131  void sysfs_remove_device_from_node(struct device *dev, int nid)
1132  {
1133  	struct node *node = node_devices[nid];
1134  	sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
1135  }
1136  EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
1137  #endif
1138  
1139  /* Only valid if CPU is present. */
show_physical_id(struct device * dev,struct device_attribute * attr,char * buf)1140  static ssize_t show_physical_id(struct device *dev,
1141  				struct device_attribute *attr, char *buf)
1142  {
1143  	struct cpu *cpu = container_of(dev, struct cpu, dev);
1144  
1145  	return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->dev.id));
1146  }
1147  static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
1148  
topology_init(void)1149  static int __init topology_init(void)
1150  {
1151  	int cpu, r;
1152  
1153  	for_each_possible_cpu(cpu) {
1154  		struct cpu *c = &per_cpu(cpu_devices, cpu);
1155  
1156  #ifdef CONFIG_HOTPLUG_CPU
1157  		/*
1158  		 * For now, we just see if the system supports making
1159  		 * the RTAS calls for CPU hotplug.  But, there may be a
1160  		 * more comprehensive way to do this for an individual
1161  		 * CPU.  For instance, the boot cpu might never be valid
1162  		 * for hotplugging.
1163  		 */
1164  		if (smp_ops && smp_ops->cpu_offline_self)
1165  			c->hotpluggable = 1;
1166  #endif
1167  
1168  		if (cpu_online(cpu) || c->hotpluggable) {
1169  			register_cpu(c, cpu);
1170  
1171  			device_create_file(&c->dev, &dev_attr_physical_id);
1172  		}
1173  	}
1174  	r = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/topology:online",
1175  			      register_cpu_online, unregister_cpu_online);
1176  	WARN_ON(r < 0);
1177  #ifdef CONFIG_PPC64
1178  	sysfs_create_dscr_default();
1179  #endif /* CONFIG_PPC64 */
1180  
1181  	create_svm_file();
1182  
1183  	return 0;
1184  }
1185  subsys_initcall(topology_init);
1186