1 /*
2  * Copyright IBM Corporation, 2012
3  * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2.1 of the GNU Lesser General Public License
7  * as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12  *
13  */
14 
15 #ifndef _LINUX_HUGETLB_CGROUP_H
16 #define _LINUX_HUGETLB_CGROUP_H
17 
18 #include <linux/mmdebug.h>
19 
20 struct hugetlb_cgroup;
21 struct resv_map;
22 struct file_region;
23 
24 #ifdef CONFIG_CGROUP_HUGETLB
25 /*
26  * Minimum page order trackable by hugetlb cgroup.
27  * At least 3 pages are necessary for all the tracking information.
28  * The second tail page contains all of the hugetlb-specific fields.
29  */
30 #define HUGETLB_CGROUP_MIN_ORDER order_base_2(__NR_USED_SUBPAGE)
31 
32 enum hugetlb_memory_event {
33 	HUGETLB_MAX,
34 	HUGETLB_NR_MEMORY_EVENTS,
35 };
36 
37 struct hugetlb_cgroup_per_node {
38 	/* hugetlb usage in pages over all hstates. */
39 	unsigned long usage[HUGE_MAX_HSTATE];
40 };
41 
42 struct hugetlb_cgroup {
43 	struct cgroup_subsys_state css;
44 
45 	/*
46 	 * the counter to account for hugepages from hugetlb.
47 	 */
48 	struct page_counter hugepage[HUGE_MAX_HSTATE];
49 
50 	/*
51 	 * the counter to account for hugepage reservations from hugetlb.
52 	 */
53 	struct page_counter rsvd_hugepage[HUGE_MAX_HSTATE];
54 
55 	atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
56 	atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
57 
58 	/* Handle for "hugetlb.events" */
59 	struct cgroup_file events_file[HUGE_MAX_HSTATE];
60 
61 	/* Handle for "hugetlb.events.local" */
62 	struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
63 
64 	struct hugetlb_cgroup_per_node *nodeinfo[];
65 };
66 
67 static inline struct hugetlb_cgroup *
68 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
69 {
70 	VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
71 	if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
72 		return NULL;
73 	if (rsvd)
74 		return folio->_hugetlb_cgroup_rsvd;
75 	else
76 		return folio->_hugetlb_cgroup;
77 }
78 
79 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
80 {
81 	return __hugetlb_cgroup_from_folio(folio, false);
82 }
83 
84 static inline struct hugetlb_cgroup *
85 hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
86 {
87 	return __hugetlb_cgroup_from_folio(folio, true);
88 }
89 
90 static inline void __set_hugetlb_cgroup(struct folio *folio,
91 				       struct hugetlb_cgroup *h_cg, bool rsvd)
92 {
93 	VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
94 	if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
95 		return;
96 	if (rsvd)
97 		folio->_hugetlb_cgroup_rsvd = h_cg;
98 	else
99 		folio->_hugetlb_cgroup = h_cg;
100 }
101 
102 static inline void set_hugetlb_cgroup(struct folio *folio,
103 				     struct hugetlb_cgroup *h_cg)
104 {
105 	__set_hugetlb_cgroup(folio, h_cg, false);
106 }
107 
108 static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
109 					  struct hugetlb_cgroup *h_cg)
110 {
111 	__set_hugetlb_cgroup(folio, h_cg, true);
112 }
113 
114 static inline bool hugetlb_cgroup_disabled(void)
115 {
116 	return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
117 }
118 
119 static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
120 {
121 	css_put(&h_cg->css);
122 }
123 
124 static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
125 						struct resv_map *resv_map)
126 {
127 	if (resv_map->css)
128 		css_get(resv_map->css);
129 }
130 
131 static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
132 						struct resv_map *resv_map)
133 {
134 	if (resv_map->css)
135 		css_put(resv_map->css);
136 }
137 
138 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
139 					struct hugetlb_cgroup **ptr);
140 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
141 					     struct hugetlb_cgroup **ptr);
142 extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
143 					 struct hugetlb_cgroup *h_cg,
144 					 struct folio *folio);
145 extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
146 					      struct hugetlb_cgroup *h_cg,
147 					      struct folio *folio);
148 extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
149 					 struct folio *folio);
150 extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
151 					      struct folio *folio);
152 
153 extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
154 					   struct hugetlb_cgroup *h_cg);
155 extern void hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
156 						struct hugetlb_cgroup *h_cg);
157 extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
158 					    unsigned long start,
159 					    unsigned long end);
160 
161 extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
162 						struct file_region *rg,
163 						unsigned long nr_pages,
164 						bool region_del);
165 
166 extern void hugetlb_cgroup_file_init(void) __init;
167 extern void hugetlb_cgroup_migrate(struct folio *old_folio,
168 				   struct folio *new_folio);
169 
170 #else
171 static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
172 						       struct file_region *rg,
173 						       unsigned long nr_pages,
174 						       bool region_del)
175 {
176 }
177 
178 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio)
179 {
180 	return NULL;
181 }
182 
183 static inline struct hugetlb_cgroup *
184 hugetlb_cgroup_from_folio_rsvd(struct folio *folio)
185 {
186 	return NULL;
187 }
188 
189 static inline void set_hugetlb_cgroup(struct folio *folio,
190 				     struct hugetlb_cgroup *h_cg)
191 {
192 }
193 
194 static inline void set_hugetlb_cgroup_rsvd(struct folio *folio,
195 					  struct hugetlb_cgroup *h_cg)
196 {
197 }
198 
199 static inline bool hugetlb_cgroup_disabled(void)
200 {
201 	return true;
202 }
203 
204 static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
205 {
206 }
207 
208 static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
209 						struct resv_map *resv_map)
210 {
211 }
212 
213 static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
214 						struct resv_map *resv_map)
215 {
216 }
217 
218 static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
219 					       struct hugetlb_cgroup **ptr)
220 {
221 	return 0;
222 }
223 
224 static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,
225 						    unsigned long nr_pages,
226 						    struct hugetlb_cgroup **ptr)
227 {
228 	return 0;
229 }
230 
231 static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
232 						struct hugetlb_cgroup *h_cg,
233 						struct folio *folio)
234 {
235 }
236 
237 static inline void
238 hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
239 				  struct hugetlb_cgroup *h_cg,
240 				  struct folio *folio)
241 {
242 }
243 
244 static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
245 						struct folio *folio)
246 {
247 }
248 
249 static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
250 						     unsigned long nr_pages,
251 						     struct folio *folio)
252 {
253 }
254 static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
255 						  unsigned long nr_pages,
256 						  struct hugetlb_cgroup *h_cg)
257 {
258 }
259 
260 static inline void
261 hugetlb_cgroup_uncharge_cgroup_rsvd(int idx, unsigned long nr_pages,
262 				    struct hugetlb_cgroup *h_cg)
263 {
264 }
265 
266 static inline void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
267 						   unsigned long start,
268 						   unsigned long end)
269 {
270 }
271 
272 static inline void hugetlb_cgroup_file_init(void)
273 {
274 }
275 
276 static inline void hugetlb_cgroup_migrate(struct folio *old_folio,
277 					  struct folio *new_folio)
278 {
279 }
280 
281 #endif  /* CONFIG_MEM_RES_CTLR_HUGETLB */
282 #endif
283