1 /* 2 * Copyright IBM Corp. 2007 3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 4 */ 5 6 #define KMSG_COMPONENT "cpu" 7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/mm.h> 11 #include <linux/init.h> 12 #include <linux/device.h> 13 #include <linux/bootmem.h> 14 #include <linux/sched.h> 15 #include <linux/workqueue.h> 16 #include <linux/cpu.h> 17 #include <linux/smp.h> 18 #include <linux/cpuset.h> 19 #include <asm/delay.h> 20 #include <asm/s390_ext.h> 21 #include <asm/sysinfo.h> 22 23 #define CPU_BITS 64 24 #define NR_MAG 6 25 26 #define PTF_HORIZONTAL (0UL) 27 #define PTF_VERTICAL (1UL) 28 #define PTF_CHECK (2UL) 29 30 struct tl_cpu { 31 unsigned char reserved0[4]; 32 unsigned char :6; 33 unsigned char pp:2; 34 unsigned char reserved1; 35 unsigned short origin; 36 unsigned long mask[CPU_BITS / BITS_PER_LONG]; 37 }; 38 39 struct tl_container { 40 unsigned char reserved[8]; 41 }; 42 43 union tl_entry { 44 unsigned char nl; 45 struct tl_cpu cpu; 46 struct tl_container container; 47 }; 48 49 struct tl_info { 50 unsigned char reserved0[2]; 51 unsigned short length; 52 unsigned char mag[NR_MAG]; 53 unsigned char reserved1; 54 unsigned char mnest; 55 unsigned char reserved2[4]; 56 union tl_entry tle[0]; 57 }; 58 59 struct core_info { 60 struct core_info *next; 61 cpumask_t mask; 62 }; 63 64 static int topology_enabled; 65 static void topology_work_fn(struct work_struct *work); 66 static struct tl_info *tl_info; 67 static struct core_info core_info; 68 static int machine_has_topology; 69 static struct timer_list topology_timer; 70 static void set_topology_timer(void); 71 static DECLARE_WORK(topology_work, topology_work_fn); 72 /* topology_lock protects the core linked list */ 73 static DEFINE_SPINLOCK(topology_lock); 74 75 cpumask_t cpu_core_map[NR_CPUS]; 76 77 static cpumask_t cpu_coregroup_map(unsigned int cpu) 78 { 79 struct core_info *core = &core_info; 80 unsigned long flags; 81 cpumask_t mask; 82 83 cpus_clear(mask); 84 if (!topology_enabled || !machine_has_topology) 85 return cpu_possible_map; 86 spin_lock_irqsave(&topology_lock, flags); 87 while (core) { 88 if (cpu_isset(cpu, core->mask)) { 89 mask = core->mask; 90 break; 91 } 92 core = core->next; 93 } 94 spin_unlock_irqrestore(&topology_lock, flags); 95 if (cpus_empty(mask)) 96 mask = cpumask_of_cpu(cpu); 97 return mask; 98 } 99 100 const struct cpumask *cpu_coregroup_mask(unsigned int cpu) 101 { 102 return &cpu_core_map[cpu]; 103 } 104 105 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) 106 { 107 unsigned int cpu; 108 109 for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS); 110 cpu < CPU_BITS; 111 cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1)) 112 { 113 unsigned int rcpu, lcpu; 114 115 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; 116 for_each_present_cpu(lcpu) { 117 if (__cpu_logical_map[lcpu] == rcpu) { 118 cpu_set(lcpu, core->mask); 119 smp_cpu_polarization[lcpu] = tl_cpu->pp; 120 } 121 } 122 } 123 } 124 125 static void clear_cores(void) 126 { 127 struct core_info *core = &core_info; 128 129 while (core) { 130 cpus_clear(core->mask); 131 core = core->next; 132 } 133 } 134 135 static union tl_entry *next_tle(union tl_entry *tle) 136 { 137 if (tle->nl) 138 return (union tl_entry *)((struct tl_container *)tle + 1); 139 else 140 return (union tl_entry *)((struct tl_cpu *)tle + 1); 141 } 142 143 static void tl_to_cores(struct tl_info *info) 144 { 145 union tl_entry *tle, *end; 146 struct core_info *core = &core_info; 147 148 spin_lock_irq(&topology_lock); 149 clear_cores(); 150 tle = info->tle; 151 end = (union tl_entry *)((unsigned long)info + info->length); 152 while (tle < end) { 153 switch (tle->nl) { 154 case 5: 155 case 4: 156 case 3: 157 case 2: 158 break; 159 case 1: 160 core = core->next; 161 break; 162 case 0: 163 add_cpus_to_core(&tle->cpu, core); 164 break; 165 default: 166 clear_cores(); 167 machine_has_topology = 0; 168 return; 169 } 170 tle = next_tle(tle); 171 } 172 spin_unlock_irq(&topology_lock); 173 } 174 175 static void topology_update_polarization_simple(void) 176 { 177 int cpu; 178 179 mutex_lock(&smp_cpu_state_mutex); 180 for_each_possible_cpu(cpu) 181 smp_cpu_polarization[cpu] = POLARIZATION_HRZ; 182 mutex_unlock(&smp_cpu_state_mutex); 183 } 184 185 static int ptf(unsigned long fc) 186 { 187 int rc; 188 189 asm volatile( 190 " .insn rre,0xb9a20000,%1,%1\n" 191 " ipm %0\n" 192 " srl %0,28\n" 193 : "=d" (rc) 194 : "d" (fc) : "cc"); 195 return rc; 196 } 197 198 int topology_set_cpu_management(int fc) 199 { 200 int cpu; 201 int rc; 202 203 if (!machine_has_topology) 204 return -EOPNOTSUPP; 205 if (fc) 206 rc = ptf(PTF_VERTICAL); 207 else 208 rc = ptf(PTF_HORIZONTAL); 209 if (rc) 210 return -EBUSY; 211 for_each_possible_cpu(cpu) 212 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; 213 return rc; 214 } 215 216 static void update_cpu_core_map(void) 217 { 218 int cpu; 219 220 for_each_possible_cpu(cpu) 221 cpu_core_map[cpu] = cpu_coregroup_map(cpu); 222 } 223 224 int arch_update_cpu_topology(void) 225 { 226 struct tl_info *info = tl_info; 227 struct sys_device *sysdev; 228 int cpu; 229 230 if (!machine_has_topology) { 231 update_cpu_core_map(); 232 topology_update_polarization_simple(); 233 return 0; 234 } 235 stsi(info, 15, 1, 2); 236 tl_to_cores(info); 237 update_cpu_core_map(); 238 for_each_online_cpu(cpu) { 239 sysdev = get_cpu_sysdev(cpu); 240 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); 241 } 242 return 1; 243 } 244 245 static void topology_work_fn(struct work_struct *work) 246 { 247 rebuild_sched_domains(); 248 } 249 250 void topology_schedule_update(void) 251 { 252 schedule_work(&topology_work); 253 } 254 255 static void topology_timer_fn(unsigned long ignored) 256 { 257 if (ptf(PTF_CHECK)) 258 topology_schedule_update(); 259 set_topology_timer(); 260 } 261 262 static void set_topology_timer(void) 263 { 264 topology_timer.function = topology_timer_fn; 265 topology_timer.data = 0; 266 topology_timer.expires = jiffies + 60 * HZ; 267 add_timer(&topology_timer); 268 } 269 270 static int __init early_parse_topology(char *p) 271 { 272 if (strncmp(p, "on", 2)) 273 return 0; 274 topology_enabled = 1; 275 return 0; 276 } 277 early_param("topology", early_parse_topology); 278 279 static int __init init_topology_update(void) 280 { 281 int rc; 282 283 rc = 0; 284 if (!machine_has_topology) { 285 topology_update_polarization_simple(); 286 goto out; 287 } 288 init_timer_deferrable(&topology_timer); 289 set_topology_timer(); 290 out: 291 update_cpu_core_map(); 292 return rc; 293 } 294 __initcall(init_topology_update); 295 296 void __init s390_init_cpu_topology(void) 297 { 298 unsigned long long facility_bits; 299 struct tl_info *info; 300 struct core_info *core; 301 int nr_cores; 302 int i; 303 304 if (stfle(&facility_bits, 1) <= 0) 305 return; 306 if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61))) 307 return; 308 machine_has_topology = 1; 309 310 tl_info = alloc_bootmem_pages(PAGE_SIZE); 311 info = tl_info; 312 stsi(info, 15, 1, 2); 313 314 nr_cores = info->mag[NR_MAG - 2]; 315 for (i = 0; i < info->mnest - 2; i++) 316 nr_cores *= info->mag[NR_MAG - 3 - i]; 317 318 pr_info("The CPU configuration topology of the machine is:"); 319 for (i = 0; i < NR_MAG; i++) 320 printk(" %d", info->mag[i]); 321 printk(" / %d\n", info->mnest); 322 323 core = &core_info; 324 for (i = 0; i < nr_cores; i++) { 325 core->next = alloc_bootmem(sizeof(struct core_info)); 326 core = core->next; 327 if (!core) 328 goto error; 329 } 330 return; 331 error: 332 machine_has_topology = 0; 333 } 334