xref: /openbmc/linux/include/linux/cpuset.h (revision 8b036556)
1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4  *  cpuset interface
5  *
6  *  Copyright (C) 2003 BULL SA
7  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8  *
9  */
10 
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/mm.h>
15 #include <linux/jump_label.h>
16 
17 #ifdef CONFIG_CPUSETS
18 
19 extern struct static_key cpusets_enabled_key;
20 static inline bool cpusets_enabled(void)
21 {
22 	return static_key_false(&cpusets_enabled_key);
23 }
24 
25 static inline int nr_cpusets(void)
26 {
27 	/* jump label reference count + the top-level cpuset */
28 	return static_key_count(&cpusets_enabled_key) + 1;
29 }
30 
31 static inline void cpuset_inc(void)
32 {
33 	static_key_slow_inc(&cpusets_enabled_key);
34 }
35 
36 static inline void cpuset_dec(void)
37 {
38 	static_key_slow_dec(&cpusets_enabled_key);
39 }
40 
41 extern int cpuset_init(void);
42 extern void cpuset_init_smp(void);
43 extern void cpuset_update_active_cpus(bool cpu_online);
44 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
45 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
46 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
47 #define cpuset_current_mems_allowed (current->mems_allowed)
48 void cpuset_init_current_mems_allowed(void);
49 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50 
51 extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
52 
53 static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
54 {
55 	return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
56 }
57 
58 static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
59 {
60 	return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
61 }
62 
63 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
64 					  const struct task_struct *tsk2);
65 
66 #define cpuset_memory_pressure_bump() 				\
67 	do {							\
68 		if (cpuset_memory_pressure_enabled)		\
69 			__cpuset_memory_pressure_bump();	\
70 	} while (0)
71 extern int cpuset_memory_pressure_enabled;
72 extern void __cpuset_memory_pressure_bump(void);
73 
74 extern void cpuset_task_status_allowed(struct seq_file *m,
75 					struct task_struct *task);
76 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
77 			    struct pid *pid, struct task_struct *tsk);
78 
79 extern int cpuset_mem_spread_node(void);
80 extern int cpuset_slab_spread_node(void);
81 
82 static inline int cpuset_do_page_mem_spread(void)
83 {
84 	return task_spread_page(current);
85 }
86 
87 static inline int cpuset_do_slab_mem_spread(void)
88 {
89 	return task_spread_slab(current);
90 }
91 
92 extern int current_cpuset_is_being_rebound(void);
93 
94 extern void rebuild_sched_domains(void);
95 
96 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
97 
98 /*
99  * read_mems_allowed_begin is required when making decisions involving
100  * mems_allowed such as during page allocation. mems_allowed can be updated in
101  * parallel and depending on the new value an operation can fail potentially
102  * causing process failure. A retry loop with read_mems_allowed_begin and
103  * read_mems_allowed_retry prevents these artificial failures.
104  */
105 static inline unsigned int read_mems_allowed_begin(void)
106 {
107 	return read_seqcount_begin(&current->mems_allowed_seq);
108 }
109 
110 /*
111  * If this returns true, the operation that took place after
112  * read_mems_allowed_begin may have failed artificially due to a concurrent
113  * update of mems_allowed. It is up to the caller to retry the operation if
114  * appropriate.
115  */
116 static inline bool read_mems_allowed_retry(unsigned int seq)
117 {
118 	return read_seqcount_retry(&current->mems_allowed_seq, seq);
119 }
120 
121 static inline void set_mems_allowed(nodemask_t nodemask)
122 {
123 	unsigned long flags;
124 
125 	task_lock(current);
126 	local_irq_save(flags);
127 	write_seqcount_begin(&current->mems_allowed_seq);
128 	current->mems_allowed = nodemask;
129 	write_seqcount_end(&current->mems_allowed_seq);
130 	local_irq_restore(flags);
131 	task_unlock(current);
132 }
133 
134 #else /* !CONFIG_CPUSETS */
135 
136 static inline bool cpusets_enabled(void) { return false; }
137 
138 static inline int cpuset_init(void) { return 0; }
139 static inline void cpuset_init_smp(void) {}
140 
141 static inline void cpuset_update_active_cpus(bool cpu_online)
142 {
143 	partition_sched_domains(1, NULL, NULL);
144 }
145 
146 static inline void cpuset_cpus_allowed(struct task_struct *p,
147 				       struct cpumask *mask)
148 {
149 	cpumask_copy(mask, cpu_possible_mask);
150 }
151 
152 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
153 {
154 }
155 
156 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
157 {
158 	return node_possible_map;
159 }
160 
161 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
162 static inline void cpuset_init_current_mems_allowed(void) {}
163 
164 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
165 {
166 	return 1;
167 }
168 
169 static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
170 {
171 	return 1;
172 }
173 
174 static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
175 {
176 	return 1;
177 }
178 
179 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
180 						 const struct task_struct *tsk2)
181 {
182 	return 1;
183 }
184 
185 static inline void cpuset_memory_pressure_bump(void) {}
186 
187 static inline void cpuset_task_status_allowed(struct seq_file *m,
188 						struct task_struct *task)
189 {
190 }
191 
192 static inline int cpuset_mem_spread_node(void)
193 {
194 	return 0;
195 }
196 
197 static inline int cpuset_slab_spread_node(void)
198 {
199 	return 0;
200 }
201 
202 static inline int cpuset_do_page_mem_spread(void)
203 {
204 	return 0;
205 }
206 
207 static inline int cpuset_do_slab_mem_spread(void)
208 {
209 	return 0;
210 }
211 
212 static inline int current_cpuset_is_being_rebound(void)
213 {
214 	return 0;
215 }
216 
217 static inline void rebuild_sched_domains(void)
218 {
219 	partition_sched_domains(1, NULL, NULL);
220 }
221 
222 static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
223 {
224 }
225 
226 static inline void set_mems_allowed(nodemask_t nodemask)
227 {
228 }
229 
230 static inline unsigned int read_mems_allowed_begin(void)
231 {
232 	return 0;
233 }
234 
235 static inline bool read_mems_allowed_retry(unsigned int seq)
236 {
237 	return false;
238 }
239 
240 #endif /* !CONFIG_CPUSETS */
241 
242 #endif /* _LINUX_CPUSET_H */
243