xref: /openbmc/linux/arch/parisc/kernel/topology.c (revision 95370b40)
11da177e4SLinus Torvalds /*
2bf7b4c1bSHelge Deller  * arch/parisc/kernel/topology.c
31da177e4SLinus Torvalds  *
4bf7b4c1bSHelge Deller  * Copyright (C) 2017 Helge Deller <deller@gmx.de>
51da177e4SLinus Torvalds  *
6bf7b4c1bSHelge Deller  * based on arch/arm/kernel/topology.c
71da177e4SLinus Torvalds  *
8bf7b4c1bSHelge Deller  * This file is subject to the terms and conditions of the GNU General Public
9bf7b4c1bSHelge Deller  * License.  See the file "COPYING" in the main directory of this archive
10bf7b4c1bSHelge Deller  * for more details.
111da177e4SLinus Torvalds  */
121da177e4SLinus Torvalds 
13bf7b4c1bSHelge Deller #include <linux/percpu.h>
14bf7b4c1bSHelge Deller #include <linux/sched.h>
15bf7b4c1bSHelge Deller #include <linux/sched/topology.h>
1662773112SHelge Deller #include <linux/cpu.h>
171da177e4SLinus Torvalds 
18bf7b4c1bSHelge Deller #include <asm/topology.h>
19*95370b40SHelge Deller #include <asm/sections.h>
201da177e4SLinus Torvalds 
2162773112SHelge Deller static DEFINE_PER_CPU(struct cpu, cpu_devices);
22bf7b4c1bSHelge Deller 
23*95370b40SHelge Deller static int dualcores_found;
24bf7b4c1bSHelge Deller 
25bf7b4c1bSHelge Deller /*
26bf7b4c1bSHelge Deller  * store_cpu_topology is called at boot when only one cpu is running
27bf7b4c1bSHelge Deller  * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
28bf7b4c1bSHelge Deller  * which prevents simultaneous write access to cpu_topology array
29bf7b4c1bSHelge Deller  */
30*95370b40SHelge Deller void store_cpu_topology(unsigned int cpuid)
31bf7b4c1bSHelge Deller {
3262773112SHelge Deller 	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
33bf7b4c1bSHelge Deller 	struct cpuinfo_parisc *p;
34bf7b4c1bSHelge Deller 	int max_socket = -1;
35bf7b4c1bSHelge Deller 	unsigned long cpu;
36bf7b4c1bSHelge Deller 
37bf7b4c1bSHelge Deller 	/* If the cpu topology has been already set, just return */
38bf7b4c1bSHelge Deller 	if (cpuid_topo->core_id != -1)
39bf7b4c1bSHelge Deller 		return;
40bf7b4c1bSHelge Deller 
4162773112SHelge Deller #ifdef CONFIG_HOTPLUG_CPU
4262773112SHelge Deller 	per_cpu(cpu_devices, cpuid).hotpluggable = 1;
4362773112SHelge Deller #endif
4462773112SHelge Deller 	if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid))
4562773112SHelge Deller 		pr_warn("Failed to register CPU%d device", cpuid);
4662773112SHelge Deller 
47bf7b4c1bSHelge Deller 	/* create cpu topology mapping */
48bf7b4c1bSHelge Deller 	cpuid_topo->thread_id = -1;
49bf7b4c1bSHelge Deller 	cpuid_topo->core_id = 0;
50bf7b4c1bSHelge Deller 
51bf7b4c1bSHelge Deller 	p = &per_cpu(cpu_data, cpuid);
52bf7b4c1bSHelge Deller 	for_each_online_cpu(cpu) {
53bf7b4c1bSHelge Deller 		const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
54bf7b4c1bSHelge Deller 
55bf7b4c1bSHelge Deller 		if (cpu == cpuid) /* ignore current cpu */
56bf7b4c1bSHelge Deller 			continue;
57bf7b4c1bSHelge Deller 
58bf7b4c1bSHelge Deller 		if (cpuinfo->cpu_loc == p->cpu_loc) {
59bf7b4c1bSHelge Deller 			cpuid_topo->core_id = cpu_topology[cpu].core_id;
60bf7b4c1bSHelge Deller 			if (p->cpu_loc) {
61bf7b4c1bSHelge Deller 				cpuid_topo->core_id++;
6262773112SHelge Deller 				cpuid_topo->package_id = cpu_topology[cpu].package_id;
63bf7b4c1bSHelge Deller 				dualcores_found = 1;
64bf7b4c1bSHelge Deller 				continue;
65bf7b4c1bSHelge Deller 			}
66bf7b4c1bSHelge Deller 		}
67bf7b4c1bSHelge Deller 
6862773112SHelge Deller 		if (cpuid_topo->package_id == -1)
6962773112SHelge Deller 			max_socket = max(max_socket, cpu_topology[cpu].package_id);
70bf7b4c1bSHelge Deller 	}
71bf7b4c1bSHelge Deller 
7262773112SHelge Deller 	if (cpuid_topo->package_id == -1)
7362773112SHelge Deller 		cpuid_topo->package_id = max_socket + 1;
74bf7b4c1bSHelge Deller 
75bf7b4c1bSHelge Deller 	update_siblings_masks(cpuid);
76bf7b4c1bSHelge Deller 
7746162ac2SHelge Deller 	pr_info("CPU%u: cpu core %d of socket %d\n",
7846162ac2SHelge Deller 		cpuid,
79bf7b4c1bSHelge Deller 		cpu_topology[cpuid].core_id,
8062773112SHelge Deller 		cpu_topology[cpuid].package_id);
81bf7b4c1bSHelge Deller }
82bf7b4c1bSHelge Deller 
83bf7b4c1bSHelge Deller static struct sched_domain_topology_level parisc_mc_topology[] = {
84bf7b4c1bSHelge Deller #ifdef CONFIG_SCHED_MC
85bf7b4c1bSHelge Deller 	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
86bf7b4c1bSHelge Deller #endif
87bf7b4c1bSHelge Deller 
88bf7b4c1bSHelge Deller 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
89bf7b4c1bSHelge Deller 	{ NULL, },
90bf7b4c1bSHelge Deller };
91bf7b4c1bSHelge Deller 
92bf7b4c1bSHelge Deller /*
93bf7b4c1bSHelge Deller  * init_cpu_topology is called at boot when only one cpu is running
94bf7b4c1bSHelge Deller  * which prevent simultaneous write access to cpu_topology array
95bf7b4c1bSHelge Deller  */
96bf7b4c1bSHelge Deller void __init init_cpu_topology(void)
97bf7b4c1bSHelge Deller {
98bf7b4c1bSHelge Deller 	/* Set scheduler topology descriptor */
99bf7b4c1bSHelge Deller 	if (dualcores_found)
100bf7b4c1bSHelge Deller 		set_sched_topology(parisc_mc_topology);
101bf7b4c1bSHelge Deller }
102