xref: /openbmc/linux/include/linux/sched/topology.h (revision e496132e)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2105ab3d8SIngo Molnar #ifndef _LINUX_SCHED_TOPOLOGY_H
3105ab3d8SIngo Molnar #define _LINUX_SCHED_TOPOLOGY_H
4105ab3d8SIngo Molnar 
5ee6a3d19SIngo Molnar #include <linux/topology.h>
6ee6a3d19SIngo Molnar 
74c822698SIngo Molnar #include <linux/sched/idle.h>
84c822698SIngo Molnar 
9a60b9edaSIngo Molnar /*
10a60b9edaSIngo Molnar  * sched-domains (multiprocessor balancing) declarations:
11a60b9edaSIngo Molnar  */
12a60b9edaSIngo Molnar #ifdef CONFIG_SMP
13a60b9edaSIngo Molnar 
14d54a9658SValentin Schneider /* Generate SD flag indexes */
15b6e862f3SValentin Schneider #define SD_FLAG(name, mflags) __##name,
16d54a9658SValentin Schneider enum {
17d54a9658SValentin Schneider 	#include <linux/sched/sd_flags.h>
18d54a9658SValentin Schneider 	__SD_FLAG_CNT,
19d54a9658SValentin Schneider };
20d54a9658SValentin Schneider #undef SD_FLAG
21d54a9658SValentin Schneider /* Generate SD flag bits */
22b6e862f3SValentin Schneider #define SD_FLAG(name, mflags) name = 1 << __##name,
23d54a9658SValentin Schneider enum {
24d54a9658SValentin Schneider 	#include <linux/sched/sd_flags.h>
25d54a9658SValentin Schneider };
26d54a9658SValentin Schneider #undef SD_FLAG
27a60b9edaSIngo Molnar 
28b6e862f3SValentin Schneider #ifdef CONFIG_SCHED_DEBUG
298fca9494SValentin Schneider 
308fca9494SValentin Schneider struct sd_flag_debug {
31b6e862f3SValentin Schneider 	unsigned int meta_flags;
32b6e862f3SValentin Schneider 	char *name;
33b6e862f3SValentin Schneider };
348fca9494SValentin Schneider extern const struct sd_flag_debug sd_flag_debug[];
358fca9494SValentin Schneider 
36b6e862f3SValentin Schneider #endif
37b6e862f3SValentin Schneider 
38a60b9edaSIngo Molnar #ifdef CONFIG_SCHED_SMT
39a60b9edaSIngo Molnar static inline int cpu_smt_flags(void)
40a60b9edaSIngo Molnar {
41a60b9edaSIngo Molnar 	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
42a60b9edaSIngo Molnar }
43a60b9edaSIngo Molnar #endif
44a60b9edaSIngo Molnar 
45778c558fSBarry Song #ifdef CONFIG_SCHED_CLUSTER
46778c558fSBarry Song static inline int cpu_cluster_flags(void)
47778c558fSBarry Song {
48778c558fSBarry Song 	return SD_SHARE_PKG_RESOURCES;
49778c558fSBarry Song }
50778c558fSBarry Song #endif
51778c558fSBarry Song 
52a60b9edaSIngo Molnar #ifdef CONFIG_SCHED_MC
53a60b9edaSIngo Molnar static inline int cpu_core_flags(void)
54a60b9edaSIngo Molnar {
55a60b9edaSIngo Molnar 	return SD_SHARE_PKG_RESOURCES;
56a60b9edaSIngo Molnar }
57a60b9edaSIngo Molnar #endif
58a60b9edaSIngo Molnar 
59a60b9edaSIngo Molnar #ifdef CONFIG_NUMA
60a60b9edaSIngo Molnar static inline int cpu_numa_flags(void)
61a60b9edaSIngo Molnar {
62a60b9edaSIngo Molnar 	return SD_NUMA;
63a60b9edaSIngo Molnar }
64a60b9edaSIngo Molnar #endif
65a60b9edaSIngo Molnar 
66a60b9edaSIngo Molnar extern int arch_asym_cpu_priority(int cpu);
67a60b9edaSIngo Molnar 
68a60b9edaSIngo Molnar struct sched_domain_attr {
69a60b9edaSIngo Molnar 	int relax_domain_level;
70a60b9edaSIngo Molnar };
71a60b9edaSIngo Molnar 
72a60b9edaSIngo Molnar #define SD_ATTR_INIT	(struct sched_domain_attr) {	\
73a60b9edaSIngo Molnar 	.relax_domain_level = -1,			\
74a60b9edaSIngo Molnar }
75a60b9edaSIngo Molnar 
76a60b9edaSIngo Molnar extern int sched_domain_level_max;
77a60b9edaSIngo Molnar 
78a60b9edaSIngo Molnar struct sched_group;
79a60b9edaSIngo Molnar 
80a60b9edaSIngo Molnar struct sched_domain_shared {
81a60b9edaSIngo Molnar 	atomic_t	ref;
82a60b9edaSIngo Molnar 	atomic_t	nr_busy_cpus;
83a60b9edaSIngo Molnar 	int		has_idle_cores;
84a60b9edaSIngo Molnar };
85a60b9edaSIngo Molnar 
86a60b9edaSIngo Molnar struct sched_domain {
87a60b9edaSIngo Molnar 	/* These fields must be setup */
88994aeb7aSJoel Fernandes (Google) 	struct sched_domain __rcu *parent;	/* top domain must be null terminated */
89994aeb7aSJoel Fernandes (Google) 	struct sched_domain __rcu *child;	/* bottom domain must be null terminated */
90a60b9edaSIngo Molnar 	struct sched_group *groups;	/* the balancing groups of the domain */
91a60b9edaSIngo Molnar 	unsigned long min_interval;	/* Minimum balance interval ms */
92a60b9edaSIngo Molnar 	unsigned long max_interval;	/* Maximum balance interval ms */
93a60b9edaSIngo Molnar 	unsigned int busy_factor;	/* less balancing by factor if busy */
94a60b9edaSIngo Molnar 	unsigned int imbalance_pct;	/* No balance until over watermark */
95a60b9edaSIngo Molnar 	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
96*e496132eSMel Gorman 	unsigned int imb_numa_nr;	/* Nr running tasks that allows a NUMA imbalance */
97a60b9edaSIngo Molnar 
98a60b9edaSIngo Molnar 	int nohz_idle;			/* NOHZ IDLE status */
99a60b9edaSIngo Molnar 	int flags;			/* See SD_* */
100a60b9edaSIngo Molnar 	int level;
101a60b9edaSIngo Molnar 
102a60b9edaSIngo Molnar 	/* Runtime fields. */
103a60b9edaSIngo Molnar 	unsigned long last_balance;	/* init to jiffies. units in jiffies */
104a60b9edaSIngo Molnar 	unsigned int balance_interval;	/* initialise to 1. units in ms. */
105a60b9edaSIngo Molnar 	unsigned int nr_balance_failed; /* initialise to 0 */
106a60b9edaSIngo Molnar 
107a60b9edaSIngo Molnar 	/* idle_balance() stats */
108a60b9edaSIngo Molnar 	u64 max_newidle_lb_cost;
109e60b56e4SVincent Guittot 	unsigned long last_decay_max_lb_cost;
110a60b9edaSIngo Molnar 
111a60b9edaSIngo Molnar 	u64 avg_scan_cost;		/* select_idle_sibling */
112a60b9edaSIngo Molnar 
113a60b9edaSIngo Molnar #ifdef CONFIG_SCHEDSTATS
114a60b9edaSIngo Molnar 	/* load_balance() stats */
115a60b9edaSIngo Molnar 	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
116a60b9edaSIngo Molnar 	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
117a60b9edaSIngo Molnar 	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
118a60b9edaSIngo Molnar 	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
119a60b9edaSIngo Molnar 	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
120a60b9edaSIngo Molnar 	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
121a60b9edaSIngo Molnar 	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
122a60b9edaSIngo Molnar 	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
123a60b9edaSIngo Molnar 
124a60b9edaSIngo Molnar 	/* Active load balancing */
125a60b9edaSIngo Molnar 	unsigned int alb_count;
126a60b9edaSIngo Molnar 	unsigned int alb_failed;
127a60b9edaSIngo Molnar 	unsigned int alb_pushed;
128a60b9edaSIngo Molnar 
129a60b9edaSIngo Molnar 	/* SD_BALANCE_EXEC stats */
130a60b9edaSIngo Molnar 	unsigned int sbe_count;
131a60b9edaSIngo Molnar 	unsigned int sbe_balanced;
132a60b9edaSIngo Molnar 	unsigned int sbe_pushed;
133a60b9edaSIngo Molnar 
134a60b9edaSIngo Molnar 	/* SD_BALANCE_FORK stats */
135a60b9edaSIngo Molnar 	unsigned int sbf_count;
136a60b9edaSIngo Molnar 	unsigned int sbf_balanced;
137a60b9edaSIngo Molnar 	unsigned int sbf_pushed;
138a60b9edaSIngo Molnar 
139a60b9edaSIngo Molnar 	/* try_to_wake_up() stats */
140a60b9edaSIngo Molnar 	unsigned int ttwu_wake_remote;
141a60b9edaSIngo Molnar 	unsigned int ttwu_move_affine;
142a60b9edaSIngo Molnar 	unsigned int ttwu_move_balance;
143a60b9edaSIngo Molnar #endif
144a60b9edaSIngo Molnar #ifdef CONFIG_SCHED_DEBUG
145a60b9edaSIngo Molnar 	char *name;
146a60b9edaSIngo Molnar #endif
147a60b9edaSIngo Molnar 	union {
148a60b9edaSIngo Molnar 		void *private;		/* used during construction */
149a60b9edaSIngo Molnar 		struct rcu_head rcu;	/* used during destruction */
150a60b9edaSIngo Molnar 	};
151a60b9edaSIngo Molnar 	struct sched_domain_shared *shared;
152a60b9edaSIngo Molnar 
153a60b9edaSIngo Molnar 	unsigned int span_weight;
154a60b9edaSIngo Molnar 	/*
155a60b9edaSIngo Molnar 	 * Span of all CPUs in this domain.
156a60b9edaSIngo Molnar 	 *
157a60b9edaSIngo Molnar 	 * NOTE: this field is variable length. (Allocated dynamically
158a60b9edaSIngo Molnar 	 * by attaching extra space to the end of the structure,
159a60b9edaSIngo Molnar 	 * depending on how many CPUs the kernel has booted up with)
160a60b9edaSIngo Molnar 	 */
161fe946db6SGustavo A. R. Silva 	unsigned long span[];
162a60b9edaSIngo Molnar };
163a60b9edaSIngo Molnar 
164a60b9edaSIngo Molnar static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
165a60b9edaSIngo Molnar {
166a60b9edaSIngo Molnar 	return to_cpumask(sd->span);
167a60b9edaSIngo Molnar }
168a60b9edaSIngo Molnar 
169c22645f4SMathieu Poirier extern void partition_sched_domains_locked(int ndoms_new,
170c22645f4SMathieu Poirier 					   cpumask_var_t doms_new[],
171c22645f4SMathieu Poirier 					   struct sched_domain_attr *dattr_new);
172c22645f4SMathieu Poirier 
173a60b9edaSIngo Molnar extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
174a60b9edaSIngo Molnar 				    struct sched_domain_attr *dattr_new);
175a60b9edaSIngo Molnar 
176a60b9edaSIngo Molnar /* Allocate an array of sched domains, for partition_sched_domains(). */
177a60b9edaSIngo Molnar cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
178a60b9edaSIngo Molnar void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
179a60b9edaSIngo Molnar 
180a60b9edaSIngo Molnar bool cpus_share_cache(int this_cpu, int that_cpu);
181a60b9edaSIngo Molnar 
182a60b9edaSIngo Molnar typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
183a60b9edaSIngo Molnar typedef int (*sched_domain_flags_f)(void);
184a60b9edaSIngo Molnar 
185a60b9edaSIngo Molnar #define SDTL_OVERLAP	0x01
186a60b9edaSIngo Molnar 
187a60b9edaSIngo Molnar struct sd_data {
18899687cdbSLuc Van Oostenryck 	struct sched_domain *__percpu *sd;
18999687cdbSLuc Van Oostenryck 	struct sched_domain_shared *__percpu *sds;
19099687cdbSLuc Van Oostenryck 	struct sched_group *__percpu *sg;
19199687cdbSLuc Van Oostenryck 	struct sched_group_capacity *__percpu *sgc;
192a60b9edaSIngo Molnar };
193a60b9edaSIngo Molnar 
194a60b9edaSIngo Molnar struct sched_domain_topology_level {
195a60b9edaSIngo Molnar 	sched_domain_mask_f mask;
196a60b9edaSIngo Molnar 	sched_domain_flags_f sd_flags;
197a60b9edaSIngo Molnar 	int		    flags;
198a60b9edaSIngo Molnar 	int		    numa_level;
199a60b9edaSIngo Molnar 	struct sd_data      data;
200a60b9edaSIngo Molnar #ifdef CONFIG_SCHED_DEBUG
201a60b9edaSIngo Molnar 	char                *name;
202a60b9edaSIngo Molnar #endif
203a60b9edaSIngo Molnar };
204a60b9edaSIngo Molnar 
205a60b9edaSIngo Molnar extern void set_sched_topology(struct sched_domain_topology_level *tl);
206a60b9edaSIngo Molnar 
207a60b9edaSIngo Molnar #ifdef CONFIG_SCHED_DEBUG
208a60b9edaSIngo Molnar # define SD_INIT_NAME(type)		.name = #type
209a60b9edaSIngo Molnar #else
210a60b9edaSIngo Molnar # define SD_INIT_NAME(type)
211a60b9edaSIngo Molnar #endif
212a60b9edaSIngo Molnar 
213a60b9edaSIngo Molnar #else /* CONFIG_SMP */
214a60b9edaSIngo Molnar 
215a60b9edaSIngo Molnar struct sched_domain_attr;
216a60b9edaSIngo Molnar 
217a60b9edaSIngo Molnar static inline void
218c22645f4SMathieu Poirier partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
219c22645f4SMathieu Poirier 			       struct sched_domain_attr *dattr_new)
220c22645f4SMathieu Poirier {
221c22645f4SMathieu Poirier }
222c22645f4SMathieu Poirier 
223c22645f4SMathieu Poirier static inline void
224a60b9edaSIngo Molnar partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
225a60b9edaSIngo Molnar 			struct sched_domain_attr *dattr_new)
226a60b9edaSIngo Molnar {
227a60b9edaSIngo Molnar }
228a60b9edaSIngo Molnar 
229a60b9edaSIngo Molnar static inline bool cpus_share_cache(int this_cpu, int that_cpu)
230a60b9edaSIngo Molnar {
231a60b9edaSIngo Molnar 	return true;
232a60b9edaSIngo Molnar }
233a60b9edaSIngo Molnar 
2348ec59c0fSVincent Guittot #endif	/* !CONFIG_SMP */
2358ec59c0fSVincent Guittot 
23631f6a8c0SIonela Voinescu #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
23731f6a8c0SIonela Voinescu extern void rebuild_sched_domains_energy(void);
23831f6a8c0SIonela Voinescu #else
23931f6a8c0SIonela Voinescu static inline void rebuild_sched_domains_energy(void)
24031f6a8c0SIonela Voinescu {
24131f6a8c0SIonela Voinescu }
24231f6a8c0SIonela Voinescu #endif
24331f6a8c0SIonela Voinescu 
2445bd0988bSQuentin Perret #ifndef arch_scale_cpu_capacity
245f4470cdfSValentin Schneider /**
246f4470cdfSValentin Schneider  * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
247f4470cdfSValentin Schneider  * @cpu: the CPU in question.
248f4470cdfSValentin Schneider  *
249f4470cdfSValentin Schneider  * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
250f4470cdfSValentin Schneider  *
251f4470cdfSValentin Schneider  *             max_perf(cpu)
252f4470cdfSValentin Schneider  *      ----------------------------- * SCHED_CAPACITY_SCALE
253f4470cdfSValentin Schneider  *      max(max_perf(c) : c \in CPUs)
254f4470cdfSValentin Schneider  */
2555bd0988bSQuentin Perret static __always_inline
2568ec59c0fSVincent Guittot unsigned long arch_scale_cpu_capacity(int cpu)
2575bd0988bSQuentin Perret {
2585bd0988bSQuentin Perret 	return SCHED_CAPACITY_SCALE;
2595bd0988bSQuentin Perret }
2605bd0988bSQuentin Perret #endif
2615bd0988bSQuentin Perret 
26236a0df85SThara Gopinath #ifndef arch_scale_thermal_pressure
26336a0df85SThara Gopinath static __always_inline
26436a0df85SThara Gopinath unsigned long arch_scale_thermal_pressure(int cpu)
26536a0df85SThara Gopinath {
26636a0df85SThara Gopinath 	return 0;
26736a0df85SThara Gopinath }
26836a0df85SThara Gopinath #endif
26936a0df85SThara Gopinath 
270c214f124SLukasz Luba #ifndef arch_update_thermal_pressure
271c214f124SLukasz Luba static __always_inline
272c214f124SLukasz Luba void arch_update_thermal_pressure(const struct cpumask *cpus,
273c214f124SLukasz Luba 				  unsigned long capped_frequency)
274c214f124SLukasz Luba { }
275c214f124SLukasz Luba #endif
276c214f124SLukasz Luba 
277ee6a3d19SIngo Molnar static inline int task_node(const struct task_struct *p)
278ee6a3d19SIngo Molnar {
279ee6a3d19SIngo Molnar 	return cpu_to_node(task_cpu(p));
280ee6a3d19SIngo Molnar }
281ee6a3d19SIngo Molnar 
282105ab3d8SIngo Molnar #endif /* _LINUX_SCHED_TOPOLOGY_H */
283