1 /* 2 * 3 * Copyright IBM Corporation, 2012 4 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> 5 * 6 * Cgroup v2 7 * Copyright (C) 2019 Red Hat, Inc. 8 * Author: Giuseppe Scrivano <gscrivan@redhat.com> 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of version 2.1 of the GNU Lesser General Public License 12 * as published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it would be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 17 * 18 */ 19 20 #include <linux/cgroup.h> 21 #include <linux/page_counter.h> 22 #include <linux/slab.h> 23 #include <linux/hugetlb.h> 24 #include <linux/hugetlb_cgroup.h> 25 26 enum hugetlb_memory_event { 27 HUGETLB_MAX, 28 HUGETLB_NR_MEMORY_EVENTS, 29 }; 30 31 struct hugetlb_cgroup { 32 struct cgroup_subsys_state css; 33 34 /* 35 * the counter to account for hugepages from hugetlb. 36 */ 37 struct page_counter hugepage[HUGE_MAX_HSTATE]; 38 39 atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS]; 40 atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS]; 41 42 /* Handle for "hugetlb.events" */ 43 struct cgroup_file events_file[HUGE_MAX_HSTATE]; 44 45 /* Handle for "hugetlb.events.local" */ 46 struct cgroup_file events_local_file[HUGE_MAX_HSTATE]; 47 }; 48 49 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) 50 #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff) 51 #define MEMFILE_ATTR(val) ((val) & 0xffff) 52 53 #define hugetlb_cgroup_from_counter(counter, idx) \ 54 container_of(counter, struct hugetlb_cgroup, hugepage[idx]) 55 56 static struct hugetlb_cgroup *root_h_cgroup __read_mostly; 57 58 static inline 59 struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) 60 { 61 return s ? container_of(s, struct hugetlb_cgroup, css) : NULL; 62 } 63 64 static inline 65 struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task) 66 { 67 return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id)); 68 } 69 70 static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg) 71 { 72 return (h_cg == root_h_cgroup); 73 } 74 75 static inline struct hugetlb_cgroup * 76 parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg) 77 { 78 return hugetlb_cgroup_from_css(h_cg->css.parent); 79 } 80 81 static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg) 82 { 83 int idx; 84 85 for (idx = 0; idx < hugetlb_max_hstate; idx++) { 86 if (page_counter_read(&h_cg->hugepage[idx])) 87 return true; 88 } 89 return false; 90 } 91 92 static void hugetlb_cgroup_init(struct hugetlb_cgroup *h_cgroup, 93 struct hugetlb_cgroup *parent_h_cgroup) 94 { 95 int idx; 96 97 for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) { 98 struct page_counter *counter = &h_cgroup->hugepage[idx]; 99 struct page_counter *parent = NULL; 100 unsigned long limit; 101 int ret; 102 103 if (parent_h_cgroup) 104 parent = &parent_h_cgroup->hugepage[idx]; 105 page_counter_init(counter, parent); 106 107 limit = round_down(PAGE_COUNTER_MAX, 108 1 << huge_page_order(&hstates[idx])); 109 ret = page_counter_set_max(counter, limit); 110 VM_BUG_ON(ret); 111 } 112 } 113 114 static struct cgroup_subsys_state * 115 hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) 116 { 117 struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css); 118 struct hugetlb_cgroup *h_cgroup; 119 120 h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL); 121 if (!h_cgroup) 122 return ERR_PTR(-ENOMEM); 123 124 if (!parent_h_cgroup) 125 root_h_cgroup = h_cgroup; 126 127 hugetlb_cgroup_init(h_cgroup, parent_h_cgroup); 128 return &h_cgroup->css; 129 } 130 131 static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css) 132 { 133 struct hugetlb_cgroup *h_cgroup; 134 135 h_cgroup = hugetlb_cgroup_from_css(css); 136 kfree(h_cgroup); 137 } 138 139 140 /* 141 * Should be called with hugetlb_lock held. 142 * Since we are holding hugetlb_lock, pages cannot get moved from 143 * active list or uncharged from the cgroup, So no need to get 144 * page reference and test for page active here. This function 145 * cannot fail. 146 */ 147 static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg, 148 struct page *page) 149 { 150 unsigned int nr_pages; 151 struct page_counter *counter; 152 struct hugetlb_cgroup *page_hcg; 153 struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg); 154 155 page_hcg = hugetlb_cgroup_from_page(page); 156 /* 157 * We can have pages in active list without any cgroup 158 * ie, hugepage with less than 3 pages. We can safely 159 * ignore those pages. 160 */ 161 if (!page_hcg || page_hcg != h_cg) 162 goto out; 163 164 nr_pages = compound_nr(page); 165 if (!parent) { 166 parent = root_h_cgroup; 167 /* root has no limit */ 168 page_counter_charge(&parent->hugepage[idx], nr_pages); 169 } 170 counter = &h_cg->hugepage[idx]; 171 /* Take the pages off the local counter */ 172 page_counter_cancel(counter, nr_pages); 173 174 set_hugetlb_cgroup(page, parent); 175 out: 176 return; 177 } 178 179 /* 180 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to 181 * the parent cgroup. 182 */ 183 static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css) 184 { 185 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); 186 struct hstate *h; 187 struct page *page; 188 int idx = 0; 189 190 do { 191 for_each_hstate(h) { 192 spin_lock(&hugetlb_lock); 193 list_for_each_entry(page, &h->hugepage_activelist, lru) 194 hugetlb_cgroup_move_parent(idx, h_cg, page); 195 196 spin_unlock(&hugetlb_lock); 197 idx++; 198 } 199 cond_resched(); 200 } while (hugetlb_cgroup_have_usage(h_cg)); 201 } 202 203 static inline void hugetlb_event(struct hugetlb_cgroup *hugetlb, int idx, 204 enum hugetlb_memory_event event) 205 { 206 atomic_long_inc(&hugetlb->events_local[idx][event]); 207 cgroup_file_notify(&hugetlb->events_local_file[idx]); 208 209 do { 210 atomic_long_inc(&hugetlb->events[idx][event]); 211 cgroup_file_notify(&hugetlb->events_file[idx]); 212 } while ((hugetlb = parent_hugetlb_cgroup(hugetlb)) && 213 !hugetlb_cgroup_is_root(hugetlb)); 214 } 215 216 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, 217 struct hugetlb_cgroup **ptr) 218 { 219 int ret = 0; 220 struct page_counter *counter; 221 struct hugetlb_cgroup *h_cg = NULL; 222 223 if (hugetlb_cgroup_disabled()) 224 goto done; 225 /* 226 * We don't charge any cgroup if the compound page have less 227 * than 3 pages. 228 */ 229 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) 230 goto done; 231 again: 232 rcu_read_lock(); 233 h_cg = hugetlb_cgroup_from_task(current); 234 if (!css_tryget(&h_cg->css)) { 235 rcu_read_unlock(); 236 goto again; 237 } 238 rcu_read_unlock(); 239 240 if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, 241 &counter)) { 242 ret = -ENOMEM; 243 hugetlb_event(hugetlb_cgroup_from_counter(counter, idx), idx, 244 HUGETLB_MAX); 245 } 246 css_put(&h_cg->css); 247 done: 248 *ptr = h_cg; 249 return ret; 250 } 251 252 /* Should be called with hugetlb_lock held */ 253 void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, 254 struct hugetlb_cgroup *h_cg, 255 struct page *page) 256 { 257 if (hugetlb_cgroup_disabled() || !h_cg) 258 return; 259 260 set_hugetlb_cgroup(page, h_cg); 261 return; 262 } 263 264 /* 265 * Should be called with hugetlb_lock held 266 */ 267 void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, 268 struct page *page) 269 { 270 struct hugetlb_cgroup *h_cg; 271 272 if (hugetlb_cgroup_disabled()) 273 return; 274 lockdep_assert_held(&hugetlb_lock); 275 h_cg = hugetlb_cgroup_from_page(page); 276 if (unlikely(!h_cg)) 277 return; 278 set_hugetlb_cgroup(page, NULL); 279 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages); 280 return; 281 } 282 283 void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, 284 struct hugetlb_cgroup *h_cg) 285 { 286 if (hugetlb_cgroup_disabled() || !h_cg) 287 return; 288 289 if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) 290 return; 291 292 page_counter_uncharge(&h_cg->hugepage[idx], nr_pages); 293 return; 294 } 295 296 enum { 297 RES_USAGE, 298 RES_LIMIT, 299 RES_MAX_USAGE, 300 RES_FAILCNT, 301 }; 302 303 static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css, 304 struct cftype *cft) 305 { 306 struct page_counter *counter; 307 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); 308 309 counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)]; 310 311 switch (MEMFILE_ATTR(cft->private)) { 312 case RES_USAGE: 313 return (u64)page_counter_read(counter) * PAGE_SIZE; 314 case RES_LIMIT: 315 return (u64)counter->max * PAGE_SIZE; 316 case RES_MAX_USAGE: 317 return (u64)counter->watermark * PAGE_SIZE; 318 case RES_FAILCNT: 319 return counter->failcnt; 320 default: 321 BUG(); 322 } 323 } 324 325 static int hugetlb_cgroup_read_u64_max(struct seq_file *seq, void *v) 326 { 327 int idx; 328 u64 val; 329 struct cftype *cft = seq_cft(seq); 330 unsigned long limit; 331 struct page_counter *counter; 332 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq)); 333 334 idx = MEMFILE_IDX(cft->private); 335 counter = &h_cg->hugepage[idx]; 336 337 limit = round_down(PAGE_COUNTER_MAX, 338 1 << huge_page_order(&hstates[idx])); 339 340 switch (MEMFILE_ATTR(cft->private)) { 341 case RES_USAGE: 342 val = (u64)page_counter_read(counter); 343 seq_printf(seq, "%llu\n", val * PAGE_SIZE); 344 break; 345 case RES_LIMIT: 346 val = (u64)counter->max; 347 if (val == limit) 348 seq_puts(seq, "max\n"); 349 else 350 seq_printf(seq, "%llu\n", val * PAGE_SIZE); 351 break; 352 default: 353 BUG(); 354 } 355 356 return 0; 357 } 358 359 static DEFINE_MUTEX(hugetlb_limit_mutex); 360 361 static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of, 362 char *buf, size_t nbytes, loff_t off, 363 const char *max) 364 { 365 int ret, idx; 366 unsigned long nr_pages; 367 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of)); 368 369 if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */ 370 return -EINVAL; 371 372 buf = strstrip(buf); 373 ret = page_counter_memparse(buf, max, &nr_pages); 374 if (ret) 375 return ret; 376 377 idx = MEMFILE_IDX(of_cft(of)->private); 378 nr_pages = round_down(nr_pages, 1 << huge_page_order(&hstates[idx])); 379 380 switch (MEMFILE_ATTR(of_cft(of)->private)) { 381 case RES_LIMIT: 382 mutex_lock(&hugetlb_limit_mutex); 383 ret = page_counter_set_max(&h_cg->hugepage[idx], nr_pages); 384 mutex_unlock(&hugetlb_limit_mutex); 385 break; 386 default: 387 ret = -EINVAL; 388 break; 389 } 390 return ret ?: nbytes; 391 } 392 393 static ssize_t hugetlb_cgroup_write_legacy(struct kernfs_open_file *of, 394 char *buf, size_t nbytes, loff_t off) 395 { 396 return hugetlb_cgroup_write(of, buf, nbytes, off, "-1"); 397 } 398 399 static ssize_t hugetlb_cgroup_write_dfl(struct kernfs_open_file *of, 400 char *buf, size_t nbytes, loff_t off) 401 { 402 return hugetlb_cgroup_write(of, buf, nbytes, off, "max"); 403 } 404 405 static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of, 406 char *buf, size_t nbytes, loff_t off) 407 { 408 int ret = 0; 409 struct page_counter *counter; 410 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of)); 411 412 counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)]; 413 414 switch (MEMFILE_ATTR(of_cft(of)->private)) { 415 case RES_MAX_USAGE: 416 page_counter_reset_watermark(counter); 417 break; 418 case RES_FAILCNT: 419 counter->failcnt = 0; 420 break; 421 default: 422 ret = -EINVAL; 423 break; 424 } 425 return ret ?: nbytes; 426 } 427 428 static char *mem_fmt(char *buf, int size, unsigned long hsize) 429 { 430 if (hsize >= (1UL << 30)) 431 snprintf(buf, size, "%luGB", hsize >> 30); 432 else if (hsize >= (1UL << 20)) 433 snprintf(buf, size, "%luMB", hsize >> 20); 434 else 435 snprintf(buf, size, "%luKB", hsize >> 10); 436 return buf; 437 } 438 439 static int __hugetlb_events_show(struct seq_file *seq, bool local) 440 { 441 int idx; 442 long max; 443 struct cftype *cft = seq_cft(seq); 444 struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(seq_css(seq)); 445 446 idx = MEMFILE_IDX(cft->private); 447 448 if (local) 449 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]); 450 else 451 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]); 452 453 seq_printf(seq, "max %lu\n", max); 454 455 return 0; 456 } 457 458 static int hugetlb_events_show(struct seq_file *seq, void *v) 459 { 460 return __hugetlb_events_show(seq, false); 461 } 462 463 static int hugetlb_events_local_show(struct seq_file *seq, void *v) 464 { 465 return __hugetlb_events_show(seq, true); 466 } 467 468 static void __init __hugetlb_cgroup_file_dfl_init(int idx) 469 { 470 char buf[32]; 471 struct cftype *cft; 472 struct hstate *h = &hstates[idx]; 473 474 /* format the size */ 475 mem_fmt(buf, 32, huge_page_size(h)); 476 477 /* Add the limit file */ 478 cft = &h->cgroup_files_dfl[0]; 479 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max", buf); 480 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT); 481 cft->seq_show = hugetlb_cgroup_read_u64_max; 482 cft->write = hugetlb_cgroup_write_dfl; 483 cft->flags = CFTYPE_NOT_ON_ROOT; 484 485 /* Add the current usage file */ 486 cft = &h->cgroup_files_dfl[1]; 487 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.current", buf); 488 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE); 489 cft->seq_show = hugetlb_cgroup_read_u64_max; 490 cft->flags = CFTYPE_NOT_ON_ROOT; 491 492 /* Add the events file */ 493 cft = &h->cgroup_files_dfl[2]; 494 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events", buf); 495 cft->private = MEMFILE_PRIVATE(idx, 0); 496 cft->seq_show = hugetlb_events_show; 497 cft->file_offset = offsetof(struct hugetlb_cgroup, events_file[idx]), 498 cft->flags = CFTYPE_NOT_ON_ROOT; 499 500 /* Add the events.local file */ 501 cft = &h->cgroup_files_dfl[3]; 502 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.events.local", buf); 503 cft->private = MEMFILE_PRIVATE(idx, 0); 504 cft->seq_show = hugetlb_events_local_show; 505 cft->file_offset = offsetof(struct hugetlb_cgroup, 506 events_local_file[idx]), 507 cft->flags = CFTYPE_NOT_ON_ROOT; 508 509 /* NULL terminate the last cft */ 510 cft = &h->cgroup_files_dfl[4]; 511 memset(cft, 0, sizeof(*cft)); 512 513 WARN_ON(cgroup_add_dfl_cftypes(&hugetlb_cgrp_subsys, 514 h->cgroup_files_dfl)); 515 } 516 517 static void __init __hugetlb_cgroup_file_legacy_init(int idx) 518 { 519 char buf[32]; 520 struct cftype *cft; 521 struct hstate *h = &hstates[idx]; 522 523 /* format the size */ 524 mem_fmt(buf, 32, huge_page_size(h)); 525 526 /* Add the limit file */ 527 cft = &h->cgroup_files_legacy[0]; 528 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf); 529 cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT); 530 cft->read_u64 = hugetlb_cgroup_read_u64; 531 cft->write = hugetlb_cgroup_write_legacy; 532 533 /* Add the usage file */ 534 cft = &h->cgroup_files_legacy[1]; 535 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf); 536 cft->private = MEMFILE_PRIVATE(idx, RES_USAGE); 537 cft->read_u64 = hugetlb_cgroup_read_u64; 538 539 /* Add the MAX usage file */ 540 cft = &h->cgroup_files_legacy[2]; 541 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf); 542 cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE); 543 cft->write = hugetlb_cgroup_reset; 544 cft->read_u64 = hugetlb_cgroup_read_u64; 545 546 /* Add the failcntfile */ 547 cft = &h->cgroup_files_legacy[3]; 548 snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf); 549 cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT); 550 cft->write = hugetlb_cgroup_reset; 551 cft->read_u64 = hugetlb_cgroup_read_u64; 552 553 /* NULL terminate the last cft */ 554 cft = &h->cgroup_files_legacy[4]; 555 memset(cft, 0, sizeof(*cft)); 556 557 WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys, 558 h->cgroup_files_legacy)); 559 } 560 561 static void __init __hugetlb_cgroup_file_init(int idx) 562 { 563 __hugetlb_cgroup_file_dfl_init(idx); 564 __hugetlb_cgroup_file_legacy_init(idx); 565 } 566 567 void __init hugetlb_cgroup_file_init(void) 568 { 569 struct hstate *h; 570 571 for_each_hstate(h) { 572 /* 573 * Add cgroup control files only if the huge page consists 574 * of more than two normal pages. This is because we use 575 * page[2].private for storing cgroup details. 576 */ 577 if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER) 578 __hugetlb_cgroup_file_init(hstate_index(h)); 579 } 580 } 581 582 /* 583 * hugetlb_lock will make sure a parallel cgroup rmdir won't happen 584 * when we migrate hugepages 585 */ 586 void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) 587 { 588 struct hugetlb_cgroup *h_cg; 589 struct hstate *h = page_hstate(oldhpage); 590 591 if (hugetlb_cgroup_disabled()) 592 return; 593 594 VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage); 595 spin_lock(&hugetlb_lock); 596 h_cg = hugetlb_cgroup_from_page(oldhpage); 597 set_hugetlb_cgroup(oldhpage, NULL); 598 599 /* move the h_cg details to new cgroup */ 600 set_hugetlb_cgroup(newhpage, h_cg); 601 list_move(&newhpage->lru, &h->hugepage_activelist); 602 spin_unlock(&hugetlb_lock); 603 return; 604 } 605 606 static struct cftype hugetlb_files[] = { 607 {} /* terminate */ 608 }; 609 610 struct cgroup_subsys hugetlb_cgrp_subsys = { 611 .css_alloc = hugetlb_cgroup_css_alloc, 612 .css_offline = hugetlb_cgroup_css_offline, 613 .css_free = hugetlb_cgroup_css_free, 614 .dfl_cftypes = hugetlb_files, 615 .legacy_cftypes = hugetlb_files, 616 }; 617