xref: /openbmc/linux/arch/s390/kernel/topology.c (revision 65cf840f)
1 /*
2  *    Copyright IBM Corp. 2007
3  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4  */
5 
6 #define KMSG_COMPONENT "cpu"
7 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8 
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/bootmem.h>
14 #include <linux/sched.h>
15 #include <linux/workqueue.h>
16 #include <linux/cpu.h>
17 #include <linux/smp.h>
18 #include <linux/cpuset.h>
19 #include <asm/delay.h>
20 #include <asm/s390_ext.h>
21 #include <asm/sysinfo.h>
22 
23 #define CPU_BITS 64
24 #define NR_MAG 6
25 
26 #define PTF_HORIZONTAL	(0UL)
27 #define PTF_VERTICAL	(1UL)
28 #define PTF_CHECK	(2UL)
29 
30 struct tl_cpu {
31 	unsigned char reserved0[4];
32 	unsigned char :6;
33 	unsigned char pp:2;
34 	unsigned char reserved1;
35 	unsigned short origin;
36 	unsigned long mask[CPU_BITS / BITS_PER_LONG];
37 };
38 
39 struct tl_container {
40 	unsigned char reserved[7];
41 	unsigned char id;
42 };
43 
44 union tl_entry {
45 	unsigned char nl;
46 	struct tl_cpu cpu;
47 	struct tl_container container;
48 };
49 
50 struct tl_info {
51 	unsigned char reserved0[2];
52 	unsigned short length;
53 	unsigned char mag[NR_MAG];
54 	unsigned char reserved1;
55 	unsigned char mnest;
56 	unsigned char reserved2[4];
57 	union tl_entry tle[0];
58 };
59 
60 struct core_info {
61 	struct core_info *next;
62 	unsigned char id;
63 	cpumask_t mask;
64 };
65 
66 static int topology_enabled;
67 static void topology_work_fn(struct work_struct *work);
68 static struct tl_info *tl_info;
69 static struct core_info core_info;
70 static int machine_has_topology;
71 static struct timer_list topology_timer;
72 static void set_topology_timer(void);
73 static DECLARE_WORK(topology_work, topology_work_fn);
74 /* topology_lock protects the core linked list */
75 static DEFINE_SPINLOCK(topology_lock);
76 
77 cpumask_t cpu_core_map[NR_CPUS];
78 unsigned char cpu_core_id[NR_CPUS];
79 
80 static cpumask_t cpu_coregroup_map(unsigned int cpu)
81 {
82 	struct core_info *core = &core_info;
83 	unsigned long flags;
84 	cpumask_t mask;
85 
86 	cpus_clear(mask);
87 	if (!topology_enabled || !machine_has_topology)
88 		return cpu_possible_map;
89 	spin_lock_irqsave(&topology_lock, flags);
90 	while (core) {
91 		if (cpu_isset(cpu, core->mask)) {
92 			mask = core->mask;
93 			break;
94 		}
95 		core = core->next;
96 	}
97 	spin_unlock_irqrestore(&topology_lock, flags);
98 	if (cpus_empty(mask))
99 		mask = cpumask_of_cpu(cpu);
100 	return mask;
101 }
102 
103 const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
104 {
105 	return &cpu_core_map[cpu];
106 }
107 
108 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
109 {
110 	unsigned int cpu;
111 
112 	for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
113 	     cpu < CPU_BITS;
114 	     cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
115 	{
116 		unsigned int rcpu, lcpu;
117 
118 		rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
119 		for_each_present_cpu(lcpu) {
120 			if (cpu_logical_map(lcpu) == rcpu) {
121 				cpu_set(lcpu, core->mask);
122 				cpu_core_id[lcpu] = core->id;
123 				smp_cpu_polarization[lcpu] = tl_cpu->pp;
124 			}
125 		}
126 	}
127 }
128 
129 static void clear_cores(void)
130 {
131 	struct core_info *core = &core_info;
132 
133 	while (core) {
134 		cpus_clear(core->mask);
135 		core = core->next;
136 	}
137 }
138 
139 static union tl_entry *next_tle(union tl_entry *tle)
140 {
141 	if (tle->nl)
142 		return (union tl_entry *)((struct tl_container *)tle + 1);
143 	else
144 		return (union tl_entry *)((struct tl_cpu *)tle + 1);
145 }
146 
147 static void tl_to_cores(struct tl_info *info)
148 {
149 	union tl_entry *tle, *end;
150 	struct core_info *core = &core_info;
151 
152 	spin_lock_irq(&topology_lock);
153 	clear_cores();
154 	tle = info->tle;
155 	end = (union tl_entry *)((unsigned long)info + info->length);
156 	while (tle < end) {
157 		switch (tle->nl) {
158 		case 5:
159 		case 4:
160 		case 3:
161 		case 2:
162 			break;
163 		case 1:
164 			core = core->next;
165 			core->id = tle->container.id;
166 			break;
167 		case 0:
168 			add_cpus_to_core(&tle->cpu, core);
169 			break;
170 		default:
171 			clear_cores();
172 			machine_has_topology = 0;
173 			goto out;
174 		}
175 		tle = next_tle(tle);
176 	}
177 out:
178 	spin_unlock_irq(&topology_lock);
179 }
180 
181 static void topology_update_polarization_simple(void)
182 {
183 	int cpu;
184 
185 	mutex_lock(&smp_cpu_state_mutex);
186 	for_each_possible_cpu(cpu)
187 		smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
188 	mutex_unlock(&smp_cpu_state_mutex);
189 }
190 
191 static int ptf(unsigned long fc)
192 {
193 	int rc;
194 
195 	asm volatile(
196 		"	.insn	rre,0xb9a20000,%1,%1\n"
197 		"	ipm	%0\n"
198 		"	srl	%0,28\n"
199 		: "=d" (rc)
200 		: "d" (fc)  : "cc");
201 	return rc;
202 }
203 
204 int topology_set_cpu_management(int fc)
205 {
206 	int cpu;
207 	int rc;
208 
209 	if (!machine_has_topology)
210 		return -EOPNOTSUPP;
211 	if (fc)
212 		rc = ptf(PTF_VERTICAL);
213 	else
214 		rc = ptf(PTF_HORIZONTAL);
215 	if (rc)
216 		return -EBUSY;
217 	for_each_possible_cpu(cpu)
218 		smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
219 	return rc;
220 }
221 
222 static void update_cpu_core_map(void)
223 {
224 	int cpu;
225 
226 	for_each_possible_cpu(cpu)
227 		cpu_core_map[cpu] = cpu_coregroup_map(cpu);
228 }
229 
230 int arch_update_cpu_topology(void)
231 {
232 	struct tl_info *info = tl_info;
233 	struct sys_device *sysdev;
234 	int cpu;
235 
236 	if (!machine_has_topology) {
237 		update_cpu_core_map();
238 		topology_update_polarization_simple();
239 		return 0;
240 	}
241 	stsi(info, 15, 1, 2);
242 	tl_to_cores(info);
243 	update_cpu_core_map();
244 	for_each_online_cpu(cpu) {
245 		sysdev = get_cpu_sysdev(cpu);
246 		kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
247 	}
248 	return 1;
249 }
250 
251 static void topology_work_fn(struct work_struct *work)
252 {
253 	rebuild_sched_domains();
254 }
255 
256 void topology_schedule_update(void)
257 {
258 	schedule_work(&topology_work);
259 }
260 
261 static void topology_timer_fn(unsigned long ignored)
262 {
263 	if (ptf(PTF_CHECK))
264 		topology_schedule_update();
265 	set_topology_timer();
266 }
267 
268 static void set_topology_timer(void)
269 {
270 	topology_timer.function = topology_timer_fn;
271 	topology_timer.data = 0;
272 	topology_timer.expires = jiffies + 60 * HZ;
273 	add_timer(&topology_timer);
274 }
275 
276 static int __init early_parse_topology(char *p)
277 {
278 	if (strncmp(p, "on", 2))
279 		return 0;
280 	topology_enabled = 1;
281 	return 0;
282 }
283 early_param("topology", early_parse_topology);
284 
285 static int __init init_topology_update(void)
286 {
287 	int rc;
288 
289 	rc = 0;
290 	if (!machine_has_topology) {
291 		topology_update_polarization_simple();
292 		goto out;
293 	}
294 	init_timer_deferrable(&topology_timer);
295 	set_topology_timer();
296 out:
297 	update_cpu_core_map();
298 	return rc;
299 }
300 __initcall(init_topology_update);
301 
302 void __init s390_init_cpu_topology(void)
303 {
304 	unsigned long long facility_bits;
305 	struct tl_info *info;
306 	struct core_info *core;
307 	int nr_cores;
308 	int i;
309 
310 	if (stfle(&facility_bits, 1) <= 0)
311 		return;
312 	if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
313 		return;
314 	machine_has_topology = 1;
315 
316 	tl_info = alloc_bootmem_pages(PAGE_SIZE);
317 	info = tl_info;
318 	stsi(info, 15, 1, 2);
319 
320 	nr_cores = info->mag[NR_MAG - 2];
321 	for (i = 0; i < info->mnest - 2; i++)
322 		nr_cores *= info->mag[NR_MAG - 3 - i];
323 
324 	pr_info("The CPU configuration topology of the machine is:");
325 	for (i = 0; i < NR_MAG; i++)
326 		printk(" %d", info->mag[i]);
327 	printk(" / %d\n", info->mnest);
328 
329 	core = &core_info;
330 	for (i = 0; i < nr_cores; i++) {
331 		core->next = alloc_bootmem(sizeof(struct core_info));
332 		core = core->next;
333 		if (!core)
334 			goto error;
335 	}
336 	return;
337 error:
338 	machine_has_topology = 0;
339 }
340