1 /* 2 * Copyright IBM Corp. 2007 3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/mm.h> 8 #include <linux/init.h> 9 #include <linux/device.h> 10 #include <linux/bootmem.h> 11 #include <linux/sched.h> 12 #include <linux/workqueue.h> 13 #include <linux/cpu.h> 14 #include <linux/smp.h> 15 #include <asm/delay.h> 16 #include <asm/s390_ext.h> 17 #include <asm/sysinfo.h> 18 19 #define CPU_BITS 64 20 #define NR_MAG 6 21 22 #define PTF_HORIZONTAL (0UL) 23 #define PTF_VERTICAL (1UL) 24 #define PTF_CHECK (2UL) 25 26 struct tl_cpu { 27 unsigned char reserved0[4]; 28 unsigned char :6; 29 unsigned char pp:2; 30 unsigned char reserved1; 31 unsigned short origin; 32 unsigned long mask[CPU_BITS / BITS_PER_LONG]; 33 }; 34 35 struct tl_container { 36 unsigned char reserved[8]; 37 }; 38 39 union tl_entry { 40 unsigned char nl; 41 struct tl_cpu cpu; 42 struct tl_container container; 43 }; 44 45 struct tl_info { 46 unsigned char reserved0[2]; 47 unsigned short length; 48 unsigned char mag[NR_MAG]; 49 unsigned char reserved1; 50 unsigned char mnest; 51 unsigned char reserved2[4]; 52 union tl_entry tle[0]; 53 }; 54 55 struct core_info { 56 struct core_info *next; 57 cpumask_t mask; 58 }; 59 60 static void topology_work_fn(struct work_struct *work); 61 static struct tl_info *tl_info; 62 static struct core_info core_info; 63 static int machine_has_topology; 64 static int machine_has_topology_irq; 65 static struct timer_list topology_timer; 66 static void set_topology_timer(void); 67 static DECLARE_WORK(topology_work, topology_work_fn); 68 69 cpumask_t cpu_core_map[NR_CPUS]; 70 71 cpumask_t cpu_coregroup_map(unsigned int cpu) 72 { 73 struct core_info *core = &core_info; 74 cpumask_t mask; 75 76 cpus_clear(mask); 77 if (!machine_has_topology) 78 return cpu_present_map; 79 mutex_lock(&smp_cpu_state_mutex); 80 while (core) { 81 if (cpu_isset(cpu, core->mask)) { 82 mask = core->mask; 83 break; 84 } 85 core = core->next; 86 } 87 mutex_unlock(&smp_cpu_state_mutex); 88 if (cpus_empty(mask)) 89 mask = cpumask_of_cpu(cpu); 90 return mask; 91 } 92 93 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) 94 { 95 unsigned int cpu; 96 97 for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS); 98 cpu < CPU_BITS; 99 cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1)) 100 { 101 unsigned int rcpu, lcpu; 102 103 rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; 104 for_each_present_cpu(lcpu) { 105 if (__cpu_logical_map[lcpu] == rcpu) { 106 cpu_set(lcpu, core->mask); 107 smp_cpu_polarization[lcpu] = tl_cpu->pp; 108 } 109 } 110 } 111 } 112 113 static void clear_cores(void) 114 { 115 struct core_info *core = &core_info; 116 117 while (core) { 118 cpus_clear(core->mask); 119 core = core->next; 120 } 121 } 122 123 static union tl_entry *next_tle(union tl_entry *tle) 124 { 125 if (tle->nl) 126 return (union tl_entry *)((struct tl_container *)tle + 1); 127 else 128 return (union tl_entry *)((struct tl_cpu *)tle + 1); 129 } 130 131 static void tl_to_cores(struct tl_info *info) 132 { 133 union tl_entry *tle, *end; 134 struct core_info *core = &core_info; 135 136 mutex_lock(&smp_cpu_state_mutex); 137 clear_cores(); 138 tle = info->tle; 139 end = (union tl_entry *)((unsigned long)info + info->length); 140 while (tle < end) { 141 switch (tle->nl) { 142 case 5: 143 case 4: 144 case 3: 145 case 2: 146 break; 147 case 1: 148 core = core->next; 149 break; 150 case 0: 151 add_cpus_to_core(&tle->cpu, core); 152 break; 153 default: 154 clear_cores(); 155 machine_has_topology = 0; 156 return; 157 } 158 tle = next_tle(tle); 159 } 160 mutex_unlock(&smp_cpu_state_mutex); 161 } 162 163 static void topology_update_polarization_simple(void) 164 { 165 int cpu; 166 167 mutex_lock(&smp_cpu_state_mutex); 168 for_each_present_cpu(cpu) 169 smp_cpu_polarization[cpu] = POLARIZATION_HRZ; 170 mutex_unlock(&smp_cpu_state_mutex); 171 } 172 173 static int ptf(unsigned long fc) 174 { 175 int rc; 176 177 asm volatile( 178 " .insn rre,0xb9a20000,%1,%1\n" 179 " ipm %0\n" 180 " srl %0,28\n" 181 : "=d" (rc) 182 : "d" (fc) : "cc"); 183 return rc; 184 } 185 186 int topology_set_cpu_management(int fc) 187 { 188 int cpu; 189 int rc; 190 191 if (!machine_has_topology) 192 return -EOPNOTSUPP; 193 if (fc) 194 rc = ptf(PTF_VERTICAL); 195 else 196 rc = ptf(PTF_HORIZONTAL); 197 if (rc) 198 return -EBUSY; 199 for_each_present_cpu(cpu) 200 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; 201 return rc; 202 } 203 204 static void update_cpu_core_map(void) 205 { 206 int cpu; 207 208 for_each_present_cpu(cpu) 209 cpu_core_map[cpu] = cpu_coregroup_map(cpu); 210 } 211 212 void arch_update_cpu_topology(void) 213 { 214 struct tl_info *info = tl_info; 215 struct sys_device *sysdev; 216 int cpu; 217 218 if (!machine_has_topology) { 219 update_cpu_core_map(); 220 topology_update_polarization_simple(); 221 return; 222 } 223 stsi(info, 15, 1, 2); 224 tl_to_cores(info); 225 update_cpu_core_map(); 226 for_each_online_cpu(cpu) { 227 sysdev = get_cpu_sysdev(cpu); 228 kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); 229 } 230 } 231 232 static void topology_work_fn(struct work_struct *work) 233 { 234 arch_reinit_sched_domains(); 235 } 236 237 void topology_schedule_update(void) 238 { 239 schedule_work(&topology_work); 240 } 241 242 static void topology_timer_fn(unsigned long ignored) 243 { 244 if (ptf(PTF_CHECK)) 245 topology_schedule_update(); 246 set_topology_timer(); 247 } 248 249 static void set_topology_timer(void) 250 { 251 topology_timer.function = topology_timer_fn; 252 topology_timer.data = 0; 253 topology_timer.expires = jiffies + 60 * HZ; 254 add_timer(&topology_timer); 255 } 256 257 static void topology_interrupt(__u16 code) 258 { 259 schedule_work(&topology_work); 260 } 261 262 static int __init init_topology_update(void) 263 { 264 int rc; 265 266 rc = 0; 267 if (!machine_has_topology) { 268 topology_update_polarization_simple(); 269 goto out; 270 } 271 init_timer_deferrable(&topology_timer); 272 if (machine_has_topology_irq) { 273 rc = register_external_interrupt(0x2005, topology_interrupt); 274 if (rc) 275 goto out; 276 ctl_set_bit(0, 8); 277 } 278 else 279 set_topology_timer(); 280 out: 281 update_cpu_core_map(); 282 return rc; 283 } 284 __initcall(init_topology_update); 285 286 void __init s390_init_cpu_topology(void) 287 { 288 unsigned long long facility_bits; 289 struct tl_info *info; 290 struct core_info *core; 291 int nr_cores; 292 int i; 293 294 if (stfle(&facility_bits, 1) <= 0) 295 return; 296 if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61))) 297 return; 298 machine_has_topology = 1; 299 300 if (facility_bits & (1ULL << 51)) 301 machine_has_topology_irq = 1; 302 303 tl_info = alloc_bootmem_pages(PAGE_SIZE); 304 info = tl_info; 305 stsi(info, 15, 1, 2); 306 307 nr_cores = info->mag[NR_MAG - 2]; 308 for (i = 0; i < info->mnest - 2; i++) 309 nr_cores *= info->mag[NR_MAG - 3 - i]; 310 311 printk(KERN_INFO "CPU topology:"); 312 for (i = 0; i < NR_MAG; i++) 313 printk(" %d", info->mag[i]); 314 printk(" / %d\n", info->mnest); 315 316 core = &core_info; 317 for (i = 0; i < nr_cores; i++) { 318 core->next = alloc_bootmem(sizeof(struct core_info)); 319 core = core->next; 320 if (!core) 321 goto error; 322 } 323 return; 324 error: 325 machine_has_topology = 0; 326 machine_has_topology_irq = 0; 327 } 328