xref: /openbmc/linux/arch/s390/kernel/topology.c (revision f42b3800)
1 /*
2  *    Copyright IBM Corp. 2007
3  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/mm.h>
8 #include <linux/init.h>
9 #include <linux/device.h>
10 #include <linux/bootmem.h>
11 #include <linux/sched.h>
12 #include <linux/workqueue.h>
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <asm/delay.h>
16 #include <asm/s390_ext.h>
17 #include <asm/sysinfo.h>
18 
19 #define CPU_BITS 64
20 #define NR_MAG 6
21 
22 #define PTF_HORIZONTAL	(0UL)
23 #define PTF_VERTICAL	(1UL)
24 #define PTF_CHECK	(2UL)
25 
26 struct tl_cpu {
27 	unsigned char reserved0[4];
28 	unsigned char :6;
29 	unsigned char pp:2;
30 	unsigned char reserved1;
31 	unsigned short origin;
32 	unsigned long mask[CPU_BITS / BITS_PER_LONG];
33 };
34 
35 struct tl_container {
36 	unsigned char reserved[8];
37 };
38 
39 union tl_entry {
40 	unsigned char nl;
41 	struct tl_cpu cpu;
42 	struct tl_container container;
43 };
44 
45 struct tl_info {
46 	unsigned char reserved0[2];
47 	unsigned short length;
48 	unsigned char mag[NR_MAG];
49 	unsigned char reserved1;
50 	unsigned char mnest;
51 	unsigned char reserved2[4];
52 	union tl_entry tle[0];
53 };
54 
55 struct core_info {
56 	struct core_info *next;
57 	cpumask_t mask;
58 };
59 
60 static void topology_work_fn(struct work_struct *work);
61 static struct tl_info *tl_info;
62 static struct core_info core_info;
63 static int machine_has_topology;
64 static int machine_has_topology_irq;
65 static struct timer_list topology_timer;
66 static void set_topology_timer(void);
67 static DECLARE_WORK(topology_work, topology_work_fn);
68 
69 cpumask_t cpu_coregroup_map(unsigned int cpu)
70 {
71 	struct core_info *core = &core_info;
72 	cpumask_t mask;
73 
74 	cpus_clear(mask);
75 	if (!machine_has_topology)
76 		return cpu_present_map;
77 	mutex_lock(&smp_cpu_state_mutex);
78 	while (core) {
79 		if (cpu_isset(cpu, core->mask)) {
80 			mask = core->mask;
81 			break;
82 		}
83 		core = core->next;
84 	}
85 	mutex_unlock(&smp_cpu_state_mutex);
86 	if (cpus_empty(mask))
87 		mask = cpumask_of_cpu(cpu);
88 	return mask;
89 }
90 
91 static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
92 {
93 	unsigned int cpu;
94 
95 	for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
96 	     cpu < CPU_BITS;
97 	     cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
98 	{
99 		unsigned int rcpu, lcpu;
100 
101 		rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
102 		for_each_present_cpu(lcpu) {
103 			if (__cpu_logical_map[lcpu] == rcpu) {
104 				cpu_set(lcpu, core->mask);
105 				smp_cpu_polarization[lcpu] = tl_cpu->pp;
106 			}
107 		}
108 	}
109 }
110 
111 static void clear_cores(void)
112 {
113 	struct core_info *core = &core_info;
114 
115 	while (core) {
116 		cpus_clear(core->mask);
117 		core = core->next;
118 	}
119 }
120 
121 static union tl_entry *next_tle(union tl_entry *tle)
122 {
123 	if (tle->nl)
124 		return (union tl_entry *)((struct tl_container *)tle + 1);
125 	else
126 		return (union tl_entry *)((struct tl_cpu *)tle + 1);
127 }
128 
129 static void tl_to_cores(struct tl_info *info)
130 {
131 	union tl_entry *tle, *end;
132 	struct core_info *core = &core_info;
133 
134 	mutex_lock(&smp_cpu_state_mutex);
135 	clear_cores();
136 	tle = info->tle;
137 	end = (union tl_entry *)((unsigned long)info + info->length);
138 	while (tle < end) {
139 		switch (tle->nl) {
140 		case 5:
141 		case 4:
142 		case 3:
143 		case 2:
144 			break;
145 		case 1:
146 			core = core->next;
147 			break;
148 		case 0:
149 			add_cpus_to_core(&tle->cpu, core);
150 			break;
151 		default:
152 			clear_cores();
153 			machine_has_topology = 0;
154 			return;
155 		}
156 		tle = next_tle(tle);
157 	}
158 	mutex_unlock(&smp_cpu_state_mutex);
159 }
160 
161 static void topology_update_polarization_simple(void)
162 {
163 	int cpu;
164 
165 	mutex_lock(&smp_cpu_state_mutex);
166 	for_each_present_cpu(cpu)
167 		smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
168 	mutex_unlock(&smp_cpu_state_mutex);
169 }
170 
171 static int ptf(unsigned long fc)
172 {
173 	int rc;
174 
175 	asm volatile(
176 		"	.insn	rre,0xb9a20000,%1,%1\n"
177 		"	ipm	%0\n"
178 		"	srl	%0,28\n"
179 		: "=d" (rc)
180 		: "d" (fc)  : "cc");
181 	return rc;
182 }
183 
184 int topology_set_cpu_management(int fc)
185 {
186 	int cpu;
187 	int rc;
188 
189 	if (!machine_has_topology)
190 		return -EOPNOTSUPP;
191 	if (fc)
192 		rc = ptf(PTF_VERTICAL);
193 	else
194 		rc = ptf(PTF_HORIZONTAL);
195 	if (rc)
196 		return -EBUSY;
197 	for_each_present_cpu(cpu)
198 		smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
199 	return rc;
200 }
201 
202 void arch_update_cpu_topology(void)
203 {
204 	struct tl_info *info = tl_info;
205 	struct sys_device *sysdev;
206 	int cpu;
207 
208 	if (!machine_has_topology) {
209 		topology_update_polarization_simple();
210 		return;
211 	}
212 	stsi(info, 15, 1, 2);
213 	tl_to_cores(info);
214 	for_each_online_cpu(cpu) {
215 		sysdev = get_cpu_sysdev(cpu);
216 		kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
217 	}
218 }
219 
220 static void topology_work_fn(struct work_struct *work)
221 {
222 	arch_reinit_sched_domains();
223 }
224 
225 void topology_schedule_update(void)
226 {
227 	schedule_work(&topology_work);
228 }
229 
230 static void topology_timer_fn(unsigned long ignored)
231 {
232 	if (ptf(PTF_CHECK))
233 		topology_schedule_update();
234 	set_topology_timer();
235 }
236 
237 static void set_topology_timer(void)
238 {
239 	topology_timer.function = topology_timer_fn;
240 	topology_timer.data = 0;
241 	topology_timer.expires = jiffies + 60 * HZ;
242 	add_timer(&topology_timer);
243 }
244 
245 static void topology_interrupt(__u16 code)
246 {
247 	schedule_work(&topology_work);
248 }
249 
250 static int __init init_topology_update(void)
251 {
252 	int rc;
253 
254 	if (!machine_has_topology) {
255 		topology_update_polarization_simple();
256 		return 0;
257 	}
258 	init_timer_deferrable(&topology_timer);
259 	if (machine_has_topology_irq) {
260 		rc = register_external_interrupt(0x2005, topology_interrupt);
261 		if (rc)
262 			return rc;
263 		ctl_set_bit(0, 8);
264 	}
265 	else
266 		set_topology_timer();
267 	return 0;
268 }
269 __initcall(init_topology_update);
270 
271 void __init s390_init_cpu_topology(void)
272 {
273 	unsigned long long facility_bits;
274 	struct tl_info *info;
275 	struct core_info *core;
276 	int nr_cores;
277 	int i;
278 
279 	if (stfle(&facility_bits, 1) <= 0)
280 		return;
281 	if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
282 		return;
283 	machine_has_topology = 1;
284 
285 	if (facility_bits & (1ULL << 51))
286 		machine_has_topology_irq = 1;
287 
288 	tl_info = alloc_bootmem_pages(PAGE_SIZE);
289 	if (!tl_info)
290 		goto error;
291 	info = tl_info;
292 	stsi(info, 15, 1, 2);
293 
294 	nr_cores = info->mag[NR_MAG - 2];
295 	for (i = 0; i < info->mnest - 2; i++)
296 		nr_cores *= info->mag[NR_MAG - 3 - i];
297 
298 	printk(KERN_INFO "CPU topology:");
299 	for (i = 0; i < NR_MAG; i++)
300 		printk(" %d", info->mag[i]);
301 	printk(" / %d\n", info->mnest);
302 
303 	core = &core_info;
304 	for (i = 0; i < nr_cores; i++) {
305 		core->next = alloc_bootmem(sizeof(struct core_info));
306 		core = core->next;
307 		if (!core)
308 			goto error;
309 	}
310 	return;
311 error:
312 	machine_has_topology = 0;
313 	machine_has_topology_irq = 0;
314 }
315