xref: /openbmc/linux/arch/parisc/kernel/topology.c (revision bf7b4c1b)
11da177e4SLinus Torvalds /*
2bf7b4c1bSHelge Deller  * arch/parisc/kernel/topology.c
31da177e4SLinus Torvalds  *
4bf7b4c1bSHelge Deller  * Copyright (C) 2017 Helge Deller <deller@gmx.de>
51da177e4SLinus Torvalds  *
6bf7b4c1bSHelge Deller  * based on arch/arm/kernel/topology.c
71da177e4SLinus Torvalds  *
8bf7b4c1bSHelge Deller  * This file is subject to the terms and conditions of the GNU General Public
9bf7b4c1bSHelge Deller  * License.  See the file "COPYING" in the main directory of this archive
10bf7b4c1bSHelge Deller  * for more details.
111da177e4SLinus Torvalds  */
121da177e4SLinus Torvalds 
13bf7b4c1bSHelge Deller #include <linux/percpu.h>
14bf7b4c1bSHelge Deller #include <linux/sched.h>
15bf7b4c1bSHelge Deller #include <linux/sched/topology.h>
161da177e4SLinus Torvalds 
17bf7b4c1bSHelge Deller #include <asm/topology.h>
181da177e4SLinus Torvalds 
19bf7b4c1bSHelge Deller  /*
20bf7b4c1bSHelge Deller   * cpu topology table
21bf7b4c1bSHelge Deller   */
22bf7b4c1bSHelge Deller struct cputopo_parisc cpu_topology[NR_CPUS] __read_mostly;
23bf7b4c1bSHelge Deller EXPORT_SYMBOL_GPL(cpu_topology);
24bf7b4c1bSHelge Deller 
25bf7b4c1bSHelge Deller const struct cpumask *cpu_coregroup_mask(int cpu)
261da177e4SLinus Torvalds {
27bf7b4c1bSHelge Deller 	return &cpu_topology[cpu].core_sibling;
281da177e4SLinus Torvalds }
291da177e4SLinus Torvalds 
30bf7b4c1bSHelge Deller static void update_siblings_masks(unsigned int cpuid)
31bf7b4c1bSHelge Deller {
32bf7b4c1bSHelge Deller 	struct cputopo_parisc *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
33bf7b4c1bSHelge Deller 	int cpu;
34bf7b4c1bSHelge Deller 
35bf7b4c1bSHelge Deller 	/* update core and thread sibling masks */
36bf7b4c1bSHelge Deller 	for_each_possible_cpu(cpu) {
37bf7b4c1bSHelge Deller 		cpu_topo = &cpu_topology[cpu];
38bf7b4c1bSHelge Deller 
39bf7b4c1bSHelge Deller 		if (cpuid_topo->socket_id != cpu_topo->socket_id)
40bf7b4c1bSHelge Deller 			continue;
41bf7b4c1bSHelge Deller 
42bf7b4c1bSHelge Deller 		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
43bf7b4c1bSHelge Deller 		if (cpu != cpuid)
44bf7b4c1bSHelge Deller 			cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
45bf7b4c1bSHelge Deller 
46bf7b4c1bSHelge Deller 		if (cpuid_topo->core_id != cpu_topo->core_id)
47bf7b4c1bSHelge Deller 			continue;
48bf7b4c1bSHelge Deller 
49bf7b4c1bSHelge Deller 		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
50bf7b4c1bSHelge Deller 		if (cpu != cpuid)
51bf7b4c1bSHelge Deller 			cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
52bf7b4c1bSHelge Deller 	}
53bf7b4c1bSHelge Deller 	smp_wmb();
54bf7b4c1bSHelge Deller }
55bf7b4c1bSHelge Deller 
56bf7b4c1bSHelge Deller static int dualcores_found __initdata;
57bf7b4c1bSHelge Deller 
58bf7b4c1bSHelge Deller /*
59bf7b4c1bSHelge Deller  * store_cpu_topology is called at boot when only one cpu is running
60bf7b4c1bSHelge Deller  * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
61bf7b4c1bSHelge Deller  * which prevents simultaneous write access to cpu_topology array
62bf7b4c1bSHelge Deller  */
63bf7b4c1bSHelge Deller void __init store_cpu_topology(unsigned int cpuid)
64bf7b4c1bSHelge Deller {
65bf7b4c1bSHelge Deller 	struct cputopo_parisc *cpuid_topo = &cpu_topology[cpuid];
66bf7b4c1bSHelge Deller 	struct cpuinfo_parisc *p;
67bf7b4c1bSHelge Deller 	int max_socket = -1;
68bf7b4c1bSHelge Deller 	unsigned long cpu;
69bf7b4c1bSHelge Deller 
70bf7b4c1bSHelge Deller 	/* If the cpu topology has been already set, just return */
71bf7b4c1bSHelge Deller 	if (cpuid_topo->core_id != -1)
72bf7b4c1bSHelge Deller 		return;
73bf7b4c1bSHelge Deller 
74bf7b4c1bSHelge Deller 	/* create cpu topology mapping */
75bf7b4c1bSHelge Deller 	cpuid_topo->thread_id = -1;
76bf7b4c1bSHelge Deller 	cpuid_topo->core_id = 0;
77bf7b4c1bSHelge Deller 
78bf7b4c1bSHelge Deller 	p = &per_cpu(cpu_data, cpuid);
79bf7b4c1bSHelge Deller 	for_each_online_cpu(cpu) {
80bf7b4c1bSHelge Deller 		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
81bf7b4c1bSHelge Deller 
82bf7b4c1bSHelge Deller 		if (cpu == cpuid) /* ignore current cpu */
83bf7b4c1bSHelge Deller 			continue;
84bf7b4c1bSHelge Deller 
85bf7b4c1bSHelge Deller 		if (cpuinfo->cpu_loc == p->cpu_loc) {
86bf7b4c1bSHelge Deller 			cpuid_topo->core_id = cpu_topology[cpu].core_id;
87bf7b4c1bSHelge Deller 			if (p->cpu_loc) {
88bf7b4c1bSHelge Deller 				cpuid_topo->core_id++;
89bf7b4c1bSHelge Deller 				cpuid_topo->socket_id = cpu_topology[cpu].socket_id;
90bf7b4c1bSHelge Deller 				dualcores_found = 1;
91bf7b4c1bSHelge Deller 				continue;
92bf7b4c1bSHelge Deller 			}
93bf7b4c1bSHelge Deller 		}
94bf7b4c1bSHelge Deller 
95bf7b4c1bSHelge Deller 		if (cpuid_topo->socket_id == -1)
96bf7b4c1bSHelge Deller 			max_socket = max(max_socket, cpu_topology[cpu].socket_id);
97bf7b4c1bSHelge Deller 	}
98bf7b4c1bSHelge Deller 
99bf7b4c1bSHelge Deller 	if (cpuid_topo->socket_id == -1)
100bf7b4c1bSHelge Deller 		cpuid_topo->socket_id = max_socket + 1;
101bf7b4c1bSHelge Deller 
102bf7b4c1bSHelge Deller 	update_siblings_masks(cpuid);
103bf7b4c1bSHelge Deller 
104bf7b4c1bSHelge Deller 	pr_info("CPU%u: thread %d, cpu %d, socket %d\n",
105bf7b4c1bSHelge Deller 		cpuid, cpu_topology[cpuid].thread_id,
106bf7b4c1bSHelge Deller 		cpu_topology[cpuid].core_id,
107bf7b4c1bSHelge Deller 		cpu_topology[cpuid].socket_id);
108bf7b4c1bSHelge Deller }
109bf7b4c1bSHelge Deller 
110bf7b4c1bSHelge Deller static struct sched_domain_topology_level parisc_mc_topology[] = {
111bf7b4c1bSHelge Deller #ifdef CONFIG_SCHED_MC
112bf7b4c1bSHelge Deller 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
113bf7b4c1bSHelge Deller #endif
114bf7b4c1bSHelge Deller 
115bf7b4c1bSHelge Deller 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
116bf7b4c1bSHelge Deller 	{ NULL, },
117bf7b4c1bSHelge Deller };
118bf7b4c1bSHelge Deller 
119bf7b4c1bSHelge Deller /*
120bf7b4c1bSHelge Deller  * init_cpu_topology is called at boot when only one cpu is running
121bf7b4c1bSHelge Deller  * which prevent simultaneous write access to cpu_topology array
122bf7b4c1bSHelge Deller  */
123bf7b4c1bSHelge Deller void __init init_cpu_topology(void)
124bf7b4c1bSHelge Deller {
125bf7b4c1bSHelge Deller 	unsigned int cpu;
126bf7b4c1bSHelge Deller 
127bf7b4c1bSHelge Deller 	/* init core mask and capacity */
128bf7b4c1bSHelge Deller 	for_each_possible_cpu(cpu) {
129bf7b4c1bSHelge Deller 		struct cputopo_parisc *cpu_topo = &(cpu_topology[cpu]);
130bf7b4c1bSHelge Deller 
131bf7b4c1bSHelge Deller 		cpu_topo->thread_id = -1;
132bf7b4c1bSHelge Deller 		cpu_topo->core_id =  -1;
133bf7b4c1bSHelge Deller 		cpu_topo->socket_id = -1;
134bf7b4c1bSHelge Deller 		cpumask_clear(&cpu_topo->core_sibling);
135bf7b4c1bSHelge Deller 		cpumask_clear(&cpu_topo->thread_sibling);
136bf7b4c1bSHelge Deller 	}
137bf7b4c1bSHelge Deller 	smp_wmb();
138bf7b4c1bSHelge Deller 
139bf7b4c1bSHelge Deller 	/* Set scheduler topology descriptor */
140bf7b4c1bSHelge Deller 	if (dualcores_found)
141bf7b4c1bSHelge Deller 		set_sched_topology(parisc_mc_topology);
142bf7b4c1bSHelge Deller }
143