1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * User interface for Resource Alloction in Resource Director Technology(RDT) 4 * 5 * Copyright (C) 2016 Intel Corporation 6 * 7 * Author: Fenghua Yu <fenghua.yu@intel.com> 8 * 9 * More information about RDT be found in the Intel (R) x86 Architecture 10 * Software Developer Manual. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/cacheinfo.h> 16 #include <linux/cpu.h> 17 #include <linux/debugfs.h> 18 #include <linux/fs.h> 19 #include <linux/fs_parser.h> 20 #include <linux/sysfs.h> 21 #include <linux/kernfs.h> 22 #include <linux/seq_buf.h> 23 #include <linux/seq_file.h> 24 #include <linux/sched/signal.h> 25 #include <linux/sched/task.h> 26 #include <linux/slab.h> 27 #include <linux/task_work.h> 28 #include <linux/user_namespace.h> 29 30 #include <uapi/linux/magic.h> 31 32 #include <asm/resctrl.h> 33 #include "internal.h" 34 35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key); 36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); 37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); 38 static struct kernfs_root *rdt_root; 39 struct rdtgroup rdtgroup_default; 40 LIST_HEAD(rdt_all_groups); 41 42 /* Kernel fs node for "info" directory under root */ 43 static struct kernfs_node *kn_info; 44 45 /* Kernel fs node for "mon_groups" directory under root */ 46 static struct kernfs_node *kn_mongrp; 47 48 /* Kernel fs node for "mon_data" directory under root */ 49 static struct kernfs_node *kn_mondata; 50 51 static struct seq_buf last_cmd_status; 52 static char last_cmd_status_buf[512]; 53 54 struct dentry *debugfs_resctrl; 55 56 void rdt_last_cmd_clear(void) 57 { 58 lockdep_assert_held(&rdtgroup_mutex); 59 seq_buf_clear(&last_cmd_status); 60 } 61 62 void rdt_last_cmd_puts(const char *s) 63 { 64 lockdep_assert_held(&rdtgroup_mutex); 65 seq_buf_puts(&last_cmd_status, s); 66 } 67 68 void rdt_last_cmd_printf(const char *fmt, ...) 69 { 70 va_list ap; 71 72 va_start(ap, fmt); 73 lockdep_assert_held(&rdtgroup_mutex); 74 seq_buf_vprintf(&last_cmd_status, fmt, ap); 75 va_end(ap); 76 } 77 78 /* 79 * Trivial allocator for CLOSIDs. Since h/w only supports a small number, 80 * we can keep a bitmap of free CLOSIDs in a single integer. 81 * 82 * Using a global CLOSID across all resources has some advantages and 83 * some drawbacks: 84 * + We can simply set "current->closid" to assign a task to a resource 85 * group. 86 * + Context switch code can avoid extra memory references deciding which 87 * CLOSID to load into the PQR_ASSOC MSR 88 * - We give up some options in configuring resource groups across multi-socket 89 * systems. 90 * - Our choices on how to configure each resource become progressively more 91 * limited as the number of resources grows. 92 */ 93 static int closid_free_map; 94 static int closid_free_map_len; 95 96 int closids_supported(void) 97 { 98 return closid_free_map_len; 99 } 100 101 static void closid_init(void) 102 { 103 struct rdt_resource *r; 104 int rdt_min_closid = 32; 105 106 /* Compute rdt_min_closid across all resources */ 107 for_each_alloc_enabled_rdt_resource(r) 108 rdt_min_closid = min(rdt_min_closid, r->num_closid); 109 110 closid_free_map = BIT_MASK(rdt_min_closid) - 1; 111 112 /* CLOSID 0 is always reserved for the default group */ 113 closid_free_map &= ~1; 114 closid_free_map_len = rdt_min_closid; 115 } 116 117 static int closid_alloc(void) 118 { 119 u32 closid = ffs(closid_free_map); 120 121 if (closid == 0) 122 return -ENOSPC; 123 closid--; 124 closid_free_map &= ~(1 << closid); 125 126 return closid; 127 } 128 129 void closid_free(int closid) 130 { 131 closid_free_map |= 1 << closid; 132 } 133 134 /** 135 * closid_allocated - test if provided closid is in use 136 * @closid: closid to be tested 137 * 138 * Return: true if @closid is currently associated with a resource group, 139 * false if @closid is free 140 */ 141 static bool closid_allocated(unsigned int closid) 142 { 143 return (closid_free_map & (1 << closid)) == 0; 144 } 145 146 /** 147 * rdtgroup_mode_by_closid - Return mode of resource group with closid 148 * @closid: closid if the resource group 149 * 150 * Each resource group is associated with a @closid. Here the mode 151 * of a resource group can be queried by searching for it using its closid. 152 * 153 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid 154 */ 155 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) 156 { 157 struct rdtgroup *rdtgrp; 158 159 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 160 if (rdtgrp->closid == closid) 161 return rdtgrp->mode; 162 } 163 164 return RDT_NUM_MODES; 165 } 166 167 static const char * const rdt_mode_str[] = { 168 [RDT_MODE_SHAREABLE] = "shareable", 169 [RDT_MODE_EXCLUSIVE] = "exclusive", 170 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", 171 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", 172 }; 173 174 /** 175 * rdtgroup_mode_str - Return the string representation of mode 176 * @mode: the resource group mode as &enum rdtgroup_mode 177 * 178 * Return: string representation of valid mode, "unknown" otherwise 179 */ 180 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) 181 { 182 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) 183 return "unknown"; 184 185 return rdt_mode_str[mode]; 186 } 187 188 /* set uid and gid of rdtgroup dirs and files to that of the creator */ 189 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) 190 { 191 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 192 .ia_uid = current_fsuid(), 193 .ia_gid = current_fsgid(), }; 194 195 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 196 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 197 return 0; 198 199 return kernfs_setattr(kn, &iattr); 200 } 201 202 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) 203 { 204 struct kernfs_node *kn; 205 int ret; 206 207 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, 208 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 209 0, rft->kf_ops, rft, NULL, NULL); 210 if (IS_ERR(kn)) 211 return PTR_ERR(kn); 212 213 ret = rdtgroup_kn_set_ugid(kn); 214 if (ret) { 215 kernfs_remove(kn); 216 return ret; 217 } 218 219 return 0; 220 } 221 222 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) 223 { 224 struct kernfs_open_file *of = m->private; 225 struct rftype *rft = of->kn->priv; 226 227 if (rft->seq_show) 228 return rft->seq_show(of, m, arg); 229 return 0; 230 } 231 232 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, 233 size_t nbytes, loff_t off) 234 { 235 struct rftype *rft = of->kn->priv; 236 237 if (rft->write) 238 return rft->write(of, buf, nbytes, off); 239 240 return -EINVAL; 241 } 242 243 static struct kernfs_ops rdtgroup_kf_single_ops = { 244 .atomic_write_len = PAGE_SIZE, 245 .write = rdtgroup_file_write, 246 .seq_show = rdtgroup_seqfile_show, 247 }; 248 249 static struct kernfs_ops kf_mondata_ops = { 250 .atomic_write_len = PAGE_SIZE, 251 .seq_show = rdtgroup_mondata_show, 252 }; 253 254 static bool is_cpu_list(struct kernfs_open_file *of) 255 { 256 struct rftype *rft = of->kn->priv; 257 258 return rft->flags & RFTYPE_FLAGS_CPUS_LIST; 259 } 260 261 static int rdtgroup_cpus_show(struct kernfs_open_file *of, 262 struct seq_file *s, void *v) 263 { 264 struct rdtgroup *rdtgrp; 265 struct cpumask *mask; 266 int ret = 0; 267 268 rdtgrp = rdtgroup_kn_lock_live(of->kn); 269 270 if (rdtgrp) { 271 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 272 if (!rdtgrp->plr->d) { 273 rdt_last_cmd_clear(); 274 rdt_last_cmd_puts("Cache domain offline\n"); 275 ret = -ENODEV; 276 } else { 277 mask = &rdtgrp->plr->d->cpu_mask; 278 seq_printf(s, is_cpu_list(of) ? 279 "%*pbl\n" : "%*pb\n", 280 cpumask_pr_args(mask)); 281 } 282 } else { 283 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", 284 cpumask_pr_args(&rdtgrp->cpu_mask)); 285 } 286 } else { 287 ret = -ENOENT; 288 } 289 rdtgroup_kn_unlock(of->kn); 290 291 return ret; 292 } 293 294 /* 295 * This is safe against resctrl_sched_in() called from __switch_to() 296 * because __switch_to() is executed with interrupts disabled. A local call 297 * from update_closid_rmid() is proteced against __switch_to() because 298 * preemption is disabled. 299 */ 300 static void update_cpu_closid_rmid(void *info) 301 { 302 struct rdtgroup *r = info; 303 304 if (r) { 305 this_cpu_write(pqr_state.default_closid, r->closid); 306 this_cpu_write(pqr_state.default_rmid, r->mon.rmid); 307 } 308 309 /* 310 * We cannot unconditionally write the MSR because the current 311 * executing task might have its own closid selected. Just reuse 312 * the context switch code. 313 */ 314 resctrl_sched_in(); 315 } 316 317 /* 318 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, 319 * 320 * Per task closids/rmids must have been set up before calling this function. 321 */ 322 static void 323 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) 324 { 325 int cpu = get_cpu(); 326 327 if (cpumask_test_cpu(cpu, cpu_mask)) 328 update_cpu_closid_rmid(r); 329 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1); 330 put_cpu(); 331 } 332 333 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 334 cpumask_var_t tmpmask) 335 { 336 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; 337 struct list_head *head; 338 339 /* Check whether cpus belong to parent ctrl group */ 340 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); 341 if (cpumask_weight(tmpmask)) { 342 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); 343 return -EINVAL; 344 } 345 346 /* Check whether cpus are dropped from this group */ 347 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 348 if (cpumask_weight(tmpmask)) { 349 /* Give any dropped cpus to parent rdtgroup */ 350 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); 351 update_closid_rmid(tmpmask, prgrp); 352 } 353 354 /* 355 * If we added cpus, remove them from previous group that owned them 356 * and update per-cpu rmid 357 */ 358 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 359 if (cpumask_weight(tmpmask)) { 360 head = &prgrp->mon.crdtgrp_list; 361 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 362 if (crgrp == rdtgrp) 363 continue; 364 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, 365 tmpmask); 366 } 367 update_closid_rmid(tmpmask, rdtgrp); 368 } 369 370 /* Done pushing/pulling - update this group with new mask */ 371 cpumask_copy(&rdtgrp->cpu_mask, newmask); 372 373 return 0; 374 } 375 376 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) 377 { 378 struct rdtgroup *crgrp; 379 380 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); 381 /* update the child mon group masks as well*/ 382 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) 383 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); 384 } 385 386 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 387 cpumask_var_t tmpmask, cpumask_var_t tmpmask1) 388 { 389 struct rdtgroup *r, *crgrp; 390 struct list_head *head; 391 392 /* Check whether cpus are dropped from this group */ 393 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 394 if (cpumask_weight(tmpmask)) { 395 /* Can't drop from default group */ 396 if (rdtgrp == &rdtgroup_default) { 397 rdt_last_cmd_puts("Can't drop CPUs from default group\n"); 398 return -EINVAL; 399 } 400 401 /* Give any dropped cpus to rdtgroup_default */ 402 cpumask_or(&rdtgroup_default.cpu_mask, 403 &rdtgroup_default.cpu_mask, tmpmask); 404 update_closid_rmid(tmpmask, &rdtgroup_default); 405 } 406 407 /* 408 * If we added cpus, remove them from previous group and 409 * the prev group's child groups that owned them 410 * and update per-cpu closid/rmid. 411 */ 412 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 413 if (cpumask_weight(tmpmask)) { 414 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { 415 if (r == rdtgrp) 416 continue; 417 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); 418 if (cpumask_weight(tmpmask1)) 419 cpumask_rdtgrp_clear(r, tmpmask1); 420 } 421 update_closid_rmid(tmpmask, rdtgrp); 422 } 423 424 /* Done pushing/pulling - update this group with new mask */ 425 cpumask_copy(&rdtgrp->cpu_mask, newmask); 426 427 /* 428 * Clear child mon group masks since there is a new parent mask 429 * now and update the rmid for the cpus the child lost. 430 */ 431 head = &rdtgrp->mon.crdtgrp_list; 432 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 433 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); 434 update_closid_rmid(tmpmask, rdtgrp); 435 cpumask_clear(&crgrp->cpu_mask); 436 } 437 438 return 0; 439 } 440 441 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, 442 char *buf, size_t nbytes, loff_t off) 443 { 444 cpumask_var_t tmpmask, newmask, tmpmask1; 445 struct rdtgroup *rdtgrp; 446 int ret; 447 448 if (!buf) 449 return -EINVAL; 450 451 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 452 return -ENOMEM; 453 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { 454 free_cpumask_var(tmpmask); 455 return -ENOMEM; 456 } 457 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { 458 free_cpumask_var(tmpmask); 459 free_cpumask_var(newmask); 460 return -ENOMEM; 461 } 462 463 rdtgrp = rdtgroup_kn_lock_live(of->kn); 464 if (!rdtgrp) { 465 ret = -ENOENT; 466 goto unlock; 467 } 468 469 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 470 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 471 ret = -EINVAL; 472 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 473 goto unlock; 474 } 475 476 if (is_cpu_list(of)) 477 ret = cpulist_parse(buf, newmask); 478 else 479 ret = cpumask_parse(buf, newmask); 480 481 if (ret) { 482 rdt_last_cmd_puts("Bad CPU list/mask\n"); 483 goto unlock; 484 } 485 486 /* check that user didn't specify any offline cpus */ 487 cpumask_andnot(tmpmask, newmask, cpu_online_mask); 488 if (cpumask_weight(tmpmask)) { 489 ret = -EINVAL; 490 rdt_last_cmd_puts("Can only assign online CPUs\n"); 491 goto unlock; 492 } 493 494 if (rdtgrp->type == RDTCTRL_GROUP) 495 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); 496 else if (rdtgrp->type == RDTMON_GROUP) 497 ret = cpus_mon_write(rdtgrp, newmask, tmpmask); 498 else 499 ret = -EINVAL; 500 501 unlock: 502 rdtgroup_kn_unlock(of->kn); 503 free_cpumask_var(tmpmask); 504 free_cpumask_var(newmask); 505 free_cpumask_var(tmpmask1); 506 507 return ret ?: nbytes; 508 } 509 510 /** 511 * rdtgroup_remove - the helper to remove resource group safely 512 * @rdtgrp: resource group to remove 513 * 514 * On resource group creation via a mkdir, an extra kernfs_node reference is 515 * taken to ensure that the rdtgroup structure remains accessible for the 516 * rdtgroup_kn_unlock() calls where it is removed. 517 * 518 * Drop the extra reference here, then free the rdtgroup structure. 519 * 520 * Return: void 521 */ 522 static void rdtgroup_remove(struct rdtgroup *rdtgrp) 523 { 524 kernfs_put(rdtgrp->kn); 525 kfree(rdtgrp); 526 } 527 528 static void _update_task_closid_rmid(void *task) 529 { 530 /* 531 * If the task is still current on this CPU, update PQR_ASSOC MSR. 532 * Otherwise, the MSR is updated when the task is scheduled in. 533 */ 534 if (task == current) 535 resctrl_sched_in(); 536 } 537 538 static void update_task_closid_rmid(struct task_struct *t) 539 { 540 if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) 541 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); 542 else 543 _update_task_closid_rmid(t); 544 } 545 546 static int __rdtgroup_move_task(struct task_struct *tsk, 547 struct rdtgroup *rdtgrp) 548 { 549 /* 550 * Set the task's closid/rmid before the PQR_ASSOC MSR can be 551 * updated by them. 552 * 553 * For ctrl_mon groups, move both closid and rmid. 554 * For monitor groups, can move the tasks only from 555 * their parent CTRL group. 556 */ 557 558 if (rdtgrp->type == RDTCTRL_GROUP) { 559 tsk->closid = rdtgrp->closid; 560 tsk->rmid = rdtgrp->mon.rmid; 561 } else if (rdtgrp->type == RDTMON_GROUP) { 562 if (rdtgrp->mon.parent->closid == tsk->closid) { 563 tsk->rmid = rdtgrp->mon.rmid; 564 } else { 565 rdt_last_cmd_puts("Can't move task to different control group\n"); 566 return -EINVAL; 567 } 568 } 569 570 /* 571 * Ensure the task's closid and rmid are written before determining if 572 * the task is current that will decide if it will be interrupted. 573 */ 574 barrier(); 575 576 /* 577 * By now, the task's closid and rmid are set. If the task is current 578 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource 579 * group go into effect. If the task is not current, the MSR will be 580 * updated when the task is scheduled in. 581 */ 582 update_task_closid_rmid(tsk); 583 584 return 0; 585 } 586 587 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) 588 { 589 return (rdt_alloc_capable && 590 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); 591 } 592 593 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) 594 { 595 return (rdt_mon_capable && 596 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); 597 } 598 599 /** 600 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group 601 * @r: Resource group 602 * 603 * Return: 1 if tasks have been assigned to @r, 0 otherwise 604 */ 605 int rdtgroup_tasks_assigned(struct rdtgroup *r) 606 { 607 struct task_struct *p, *t; 608 int ret = 0; 609 610 lockdep_assert_held(&rdtgroup_mutex); 611 612 rcu_read_lock(); 613 for_each_process_thread(p, t) { 614 if (is_closid_match(t, r) || is_rmid_match(t, r)) { 615 ret = 1; 616 break; 617 } 618 } 619 rcu_read_unlock(); 620 621 return ret; 622 } 623 624 static int rdtgroup_task_write_permission(struct task_struct *task, 625 struct kernfs_open_file *of) 626 { 627 const struct cred *tcred = get_task_cred(task); 628 const struct cred *cred = current_cred(); 629 int ret = 0; 630 631 /* 632 * Even if we're attaching all tasks in the thread group, we only 633 * need to check permissions on one of them. 634 */ 635 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 636 !uid_eq(cred->euid, tcred->uid) && 637 !uid_eq(cred->euid, tcred->suid)) { 638 rdt_last_cmd_printf("No permission to move task %d\n", task->pid); 639 ret = -EPERM; 640 } 641 642 put_cred(tcred); 643 return ret; 644 } 645 646 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, 647 struct kernfs_open_file *of) 648 { 649 struct task_struct *tsk; 650 int ret; 651 652 rcu_read_lock(); 653 if (pid) { 654 tsk = find_task_by_vpid(pid); 655 if (!tsk) { 656 rcu_read_unlock(); 657 rdt_last_cmd_printf("No task %d\n", pid); 658 return -ESRCH; 659 } 660 } else { 661 tsk = current; 662 } 663 664 get_task_struct(tsk); 665 rcu_read_unlock(); 666 667 ret = rdtgroup_task_write_permission(tsk, of); 668 if (!ret) 669 ret = __rdtgroup_move_task(tsk, rdtgrp); 670 671 put_task_struct(tsk); 672 return ret; 673 } 674 675 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, 676 char *buf, size_t nbytes, loff_t off) 677 { 678 struct rdtgroup *rdtgrp; 679 int ret = 0; 680 pid_t pid; 681 682 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 683 return -EINVAL; 684 rdtgrp = rdtgroup_kn_lock_live(of->kn); 685 if (!rdtgrp) { 686 rdtgroup_kn_unlock(of->kn); 687 return -ENOENT; 688 } 689 rdt_last_cmd_clear(); 690 691 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 692 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 693 ret = -EINVAL; 694 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 695 goto unlock; 696 } 697 698 ret = rdtgroup_move_task(pid, rdtgrp, of); 699 700 unlock: 701 rdtgroup_kn_unlock(of->kn); 702 703 return ret ?: nbytes; 704 } 705 706 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) 707 { 708 struct task_struct *p, *t; 709 710 rcu_read_lock(); 711 for_each_process_thread(p, t) { 712 if (is_closid_match(t, r) || is_rmid_match(t, r)) 713 seq_printf(s, "%d\n", t->pid); 714 } 715 rcu_read_unlock(); 716 } 717 718 static int rdtgroup_tasks_show(struct kernfs_open_file *of, 719 struct seq_file *s, void *v) 720 { 721 struct rdtgroup *rdtgrp; 722 int ret = 0; 723 724 rdtgrp = rdtgroup_kn_lock_live(of->kn); 725 if (rdtgrp) 726 show_rdt_tasks(rdtgrp, s); 727 else 728 ret = -ENOENT; 729 rdtgroup_kn_unlock(of->kn); 730 731 return ret; 732 } 733 734 #ifdef CONFIG_PROC_CPU_RESCTRL 735 736 /* 737 * A task can only be part of one resctrl control group and of one monitor 738 * group which is associated to that control group. 739 * 740 * 1) res: 741 * mon: 742 * 743 * resctrl is not available. 744 * 745 * 2) res:/ 746 * mon: 747 * 748 * Task is part of the root resctrl control group, and it is not associated 749 * to any monitor group. 750 * 751 * 3) res:/ 752 * mon:mon0 753 * 754 * Task is part of the root resctrl control group and monitor group mon0. 755 * 756 * 4) res:group0 757 * mon: 758 * 759 * Task is part of resctrl control group group0, and it is not associated 760 * to any monitor group. 761 * 762 * 5) res:group0 763 * mon:mon1 764 * 765 * Task is part of resctrl control group group0 and monitor group mon1. 766 */ 767 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, 768 struct pid *pid, struct task_struct *tsk) 769 { 770 struct rdtgroup *rdtg; 771 int ret = 0; 772 773 mutex_lock(&rdtgroup_mutex); 774 775 /* Return empty if resctrl has not been mounted. */ 776 if (!static_branch_unlikely(&rdt_enable_key)) { 777 seq_puts(s, "res:\nmon:\n"); 778 goto unlock; 779 } 780 781 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { 782 struct rdtgroup *crg; 783 784 /* 785 * Task information is only relevant for shareable 786 * and exclusive groups. 787 */ 788 if (rdtg->mode != RDT_MODE_SHAREABLE && 789 rdtg->mode != RDT_MODE_EXCLUSIVE) 790 continue; 791 792 if (rdtg->closid != tsk->closid) 793 continue; 794 795 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", 796 rdtg->kn->name); 797 seq_puts(s, "mon:"); 798 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, 799 mon.crdtgrp_list) { 800 if (tsk->rmid != crg->mon.rmid) 801 continue; 802 seq_printf(s, "%s", crg->kn->name); 803 break; 804 } 805 seq_putc(s, '\n'); 806 goto unlock; 807 } 808 /* 809 * The above search should succeed. Otherwise return 810 * with an error. 811 */ 812 ret = -ENOENT; 813 unlock: 814 mutex_unlock(&rdtgroup_mutex); 815 816 return ret; 817 } 818 #endif 819 820 static int rdt_last_cmd_status_show(struct kernfs_open_file *of, 821 struct seq_file *seq, void *v) 822 { 823 int len; 824 825 mutex_lock(&rdtgroup_mutex); 826 len = seq_buf_used(&last_cmd_status); 827 if (len) 828 seq_printf(seq, "%.*s", len, last_cmd_status_buf); 829 else 830 seq_puts(seq, "ok\n"); 831 mutex_unlock(&rdtgroup_mutex); 832 return 0; 833 } 834 835 static int rdt_num_closids_show(struct kernfs_open_file *of, 836 struct seq_file *seq, void *v) 837 { 838 struct rdt_resource *r = of->kn->parent->priv; 839 840 seq_printf(seq, "%d\n", r->num_closid); 841 return 0; 842 } 843 844 static int rdt_default_ctrl_show(struct kernfs_open_file *of, 845 struct seq_file *seq, void *v) 846 { 847 struct rdt_resource *r = of->kn->parent->priv; 848 849 seq_printf(seq, "%x\n", r->default_ctrl); 850 return 0; 851 } 852 853 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, 854 struct seq_file *seq, void *v) 855 { 856 struct rdt_resource *r = of->kn->parent->priv; 857 858 seq_printf(seq, "%u\n", r->cache.min_cbm_bits); 859 return 0; 860 } 861 862 static int rdt_shareable_bits_show(struct kernfs_open_file *of, 863 struct seq_file *seq, void *v) 864 { 865 struct rdt_resource *r = of->kn->parent->priv; 866 867 seq_printf(seq, "%x\n", r->cache.shareable_bits); 868 return 0; 869 } 870 871 /** 872 * rdt_bit_usage_show - Display current usage of resources 873 * 874 * A domain is a shared resource that can now be allocated differently. Here 875 * we display the current regions of the domain as an annotated bitmask. 876 * For each domain of this resource its allocation bitmask 877 * is annotated as below to indicate the current usage of the corresponding bit: 878 * 0 - currently unused 879 * X - currently available for sharing and used by software and hardware 880 * H - currently used by hardware only but available for software use 881 * S - currently used and shareable by software only 882 * E - currently used exclusively by one resource group 883 * P - currently pseudo-locked by one resource group 884 */ 885 static int rdt_bit_usage_show(struct kernfs_open_file *of, 886 struct seq_file *seq, void *v) 887 { 888 struct rdt_resource *r = of->kn->parent->priv; 889 /* 890 * Use unsigned long even though only 32 bits are used to ensure 891 * test_bit() is used safely. 892 */ 893 unsigned long sw_shareable = 0, hw_shareable = 0; 894 unsigned long exclusive = 0, pseudo_locked = 0; 895 struct rdt_domain *dom; 896 int i, hwb, swb, excl, psl; 897 enum rdtgrp_mode mode; 898 bool sep = false; 899 u32 *ctrl; 900 901 mutex_lock(&rdtgroup_mutex); 902 hw_shareable = r->cache.shareable_bits; 903 list_for_each_entry(dom, &r->domains, list) { 904 if (sep) 905 seq_putc(seq, ';'); 906 ctrl = dom->ctrl_val; 907 sw_shareable = 0; 908 exclusive = 0; 909 seq_printf(seq, "%d=", dom->id); 910 for (i = 0; i < closids_supported(); i++, ctrl++) { 911 if (!closid_allocated(i)) 912 continue; 913 mode = rdtgroup_mode_by_closid(i); 914 switch (mode) { 915 case RDT_MODE_SHAREABLE: 916 sw_shareable |= *ctrl; 917 break; 918 case RDT_MODE_EXCLUSIVE: 919 exclusive |= *ctrl; 920 break; 921 case RDT_MODE_PSEUDO_LOCKSETUP: 922 /* 923 * RDT_MODE_PSEUDO_LOCKSETUP is possible 924 * here but not included since the CBM 925 * associated with this CLOSID in this mode 926 * is not initialized and no task or cpu can be 927 * assigned this CLOSID. 928 */ 929 break; 930 case RDT_MODE_PSEUDO_LOCKED: 931 case RDT_NUM_MODES: 932 WARN(1, 933 "invalid mode for closid %d\n", i); 934 break; 935 } 936 } 937 for (i = r->cache.cbm_len - 1; i >= 0; i--) { 938 pseudo_locked = dom->plr ? dom->plr->cbm : 0; 939 hwb = test_bit(i, &hw_shareable); 940 swb = test_bit(i, &sw_shareable); 941 excl = test_bit(i, &exclusive); 942 psl = test_bit(i, &pseudo_locked); 943 if (hwb && swb) 944 seq_putc(seq, 'X'); 945 else if (hwb && !swb) 946 seq_putc(seq, 'H'); 947 else if (!hwb && swb) 948 seq_putc(seq, 'S'); 949 else if (excl) 950 seq_putc(seq, 'E'); 951 else if (psl) 952 seq_putc(seq, 'P'); 953 else /* Unused bits remain */ 954 seq_putc(seq, '0'); 955 } 956 sep = true; 957 } 958 seq_putc(seq, '\n'); 959 mutex_unlock(&rdtgroup_mutex); 960 return 0; 961 } 962 963 static int rdt_min_bw_show(struct kernfs_open_file *of, 964 struct seq_file *seq, void *v) 965 { 966 struct rdt_resource *r = of->kn->parent->priv; 967 968 seq_printf(seq, "%u\n", r->membw.min_bw); 969 return 0; 970 } 971 972 static int rdt_num_rmids_show(struct kernfs_open_file *of, 973 struct seq_file *seq, void *v) 974 { 975 struct rdt_resource *r = of->kn->parent->priv; 976 977 seq_printf(seq, "%d\n", r->num_rmid); 978 979 return 0; 980 } 981 982 static int rdt_mon_features_show(struct kernfs_open_file *of, 983 struct seq_file *seq, void *v) 984 { 985 struct rdt_resource *r = of->kn->parent->priv; 986 struct mon_evt *mevt; 987 988 list_for_each_entry(mevt, &r->evt_list, list) 989 seq_printf(seq, "%s\n", mevt->name); 990 991 return 0; 992 } 993 994 static int rdt_bw_gran_show(struct kernfs_open_file *of, 995 struct seq_file *seq, void *v) 996 { 997 struct rdt_resource *r = of->kn->parent->priv; 998 999 seq_printf(seq, "%u\n", r->membw.bw_gran); 1000 return 0; 1001 } 1002 1003 static int rdt_delay_linear_show(struct kernfs_open_file *of, 1004 struct seq_file *seq, void *v) 1005 { 1006 struct rdt_resource *r = of->kn->parent->priv; 1007 1008 seq_printf(seq, "%u\n", r->membw.delay_linear); 1009 return 0; 1010 } 1011 1012 static int max_threshold_occ_show(struct kernfs_open_file *of, 1013 struct seq_file *seq, void *v) 1014 { 1015 struct rdt_resource *r = of->kn->parent->priv; 1016 1017 seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale); 1018 1019 return 0; 1020 } 1021 1022 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, 1023 struct seq_file *seq, void *v) 1024 { 1025 struct rdt_resource *r = of->kn->parent->priv; 1026 1027 if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) 1028 seq_puts(seq, "per-thread\n"); 1029 else 1030 seq_puts(seq, "max\n"); 1031 1032 return 0; 1033 } 1034 1035 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, 1036 char *buf, size_t nbytes, loff_t off) 1037 { 1038 struct rdt_resource *r = of->kn->parent->priv; 1039 unsigned int bytes; 1040 int ret; 1041 1042 ret = kstrtouint(buf, 0, &bytes); 1043 if (ret) 1044 return ret; 1045 1046 if (bytes > (boot_cpu_data.x86_cache_size * 1024)) 1047 return -EINVAL; 1048 1049 resctrl_cqm_threshold = bytes / r->mon_scale; 1050 1051 return nbytes; 1052 } 1053 1054 /* 1055 * rdtgroup_mode_show - Display mode of this resource group 1056 */ 1057 static int rdtgroup_mode_show(struct kernfs_open_file *of, 1058 struct seq_file *s, void *v) 1059 { 1060 struct rdtgroup *rdtgrp; 1061 1062 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1063 if (!rdtgrp) { 1064 rdtgroup_kn_unlock(of->kn); 1065 return -ENOENT; 1066 } 1067 1068 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); 1069 1070 rdtgroup_kn_unlock(of->kn); 1071 return 0; 1072 } 1073 1074 /** 1075 * rdt_cdp_peer_get - Retrieve CDP peer if it exists 1076 * @r: RDT resource to which RDT domain @d belongs 1077 * @d: Cache instance for which a CDP peer is requested 1078 * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer) 1079 * Used to return the result. 1080 * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer) 1081 * Used to return the result. 1082 * 1083 * RDT resources are managed independently and by extension the RDT domains 1084 * (RDT resource instances) are managed independently also. The Code and 1085 * Data Prioritization (CDP) RDT resources, while managed independently, 1086 * could refer to the same underlying hardware. For example, 1087 * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache. 1088 * 1089 * When provided with an RDT resource @r and an instance of that RDT 1090 * resource @d rdt_cdp_peer_get() will return if there is a peer RDT 1091 * resource and the exact instance that shares the same hardware. 1092 * 1093 * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists. 1094 * If a CDP peer was found, @r_cdp will point to the peer RDT resource 1095 * and @d_cdp will point to the peer RDT domain. 1096 */ 1097 static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, 1098 struct rdt_resource **r_cdp, 1099 struct rdt_domain **d_cdp) 1100 { 1101 struct rdt_resource *_r_cdp = NULL; 1102 struct rdt_domain *_d_cdp = NULL; 1103 int ret = 0; 1104 1105 switch (r->rid) { 1106 case RDT_RESOURCE_L3DATA: 1107 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE]; 1108 break; 1109 case RDT_RESOURCE_L3CODE: 1110 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA]; 1111 break; 1112 case RDT_RESOURCE_L2DATA: 1113 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE]; 1114 break; 1115 case RDT_RESOURCE_L2CODE: 1116 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA]; 1117 break; 1118 default: 1119 ret = -ENOENT; 1120 goto out; 1121 } 1122 1123 /* 1124 * When a new CPU comes online and CDP is enabled then the new 1125 * RDT domains (if any) associated with both CDP RDT resources 1126 * are added in the same CPU online routine while the 1127 * rdtgroup_mutex is held. It should thus not happen for one 1128 * RDT domain to exist and be associated with its RDT CDP 1129 * resource but there is no RDT domain associated with the 1130 * peer RDT CDP resource. Hence the WARN. 1131 */ 1132 _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); 1133 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { 1134 _r_cdp = NULL; 1135 _d_cdp = NULL; 1136 ret = -EINVAL; 1137 } 1138 1139 out: 1140 *r_cdp = _r_cdp; 1141 *d_cdp = _d_cdp; 1142 1143 return ret; 1144 } 1145 1146 /** 1147 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other 1148 * @r: Resource to which domain instance @d belongs. 1149 * @d: The domain instance for which @closid is being tested. 1150 * @cbm: Capacity bitmask being tested. 1151 * @closid: Intended closid for @cbm. 1152 * @exclusive: Only check if overlaps with exclusive resource groups 1153 * 1154 * Checks if provided @cbm intended to be used for @closid on domain 1155 * @d overlaps with any other closids or other hardware usage associated 1156 * with this domain. If @exclusive is true then only overlaps with 1157 * resource groups in exclusive mode will be considered. If @exclusive 1158 * is false then overlaps with any resource group or hardware entities 1159 * will be considered. 1160 * 1161 * @cbm is unsigned long, even if only 32 bits are used, to make the 1162 * bitmap functions work correctly. 1163 * 1164 * Return: false if CBM does not overlap, true if it does. 1165 */ 1166 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 1167 unsigned long cbm, int closid, bool exclusive) 1168 { 1169 enum rdtgrp_mode mode; 1170 unsigned long ctrl_b; 1171 u32 *ctrl; 1172 int i; 1173 1174 /* Check for any overlap with regions used by hardware directly */ 1175 if (!exclusive) { 1176 ctrl_b = r->cache.shareable_bits; 1177 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) 1178 return true; 1179 } 1180 1181 /* Check for overlap with other resource groups */ 1182 ctrl = d->ctrl_val; 1183 for (i = 0; i < closids_supported(); i++, ctrl++) { 1184 ctrl_b = *ctrl; 1185 mode = rdtgroup_mode_by_closid(i); 1186 if (closid_allocated(i) && i != closid && 1187 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1188 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { 1189 if (exclusive) { 1190 if (mode == RDT_MODE_EXCLUSIVE) 1191 return true; 1192 continue; 1193 } 1194 return true; 1195 } 1196 } 1197 } 1198 1199 return false; 1200 } 1201 1202 /** 1203 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware 1204 * @r: Resource to which domain instance @d belongs. 1205 * @d: The domain instance for which @closid is being tested. 1206 * @cbm: Capacity bitmask being tested. 1207 * @closid: Intended closid for @cbm. 1208 * @exclusive: Only check if overlaps with exclusive resource groups 1209 * 1210 * Resources that can be allocated using a CBM can use the CBM to control 1211 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test 1212 * for overlap. Overlap test is not limited to the specific resource for 1213 * which the CBM is intended though - when dealing with CDP resources that 1214 * share the underlying hardware the overlap check should be performed on 1215 * the CDP resource sharing the hardware also. 1216 * 1217 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the 1218 * overlap test. 1219 * 1220 * Return: true if CBM overlap detected, false if there is no overlap 1221 */ 1222 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 1223 unsigned long cbm, int closid, bool exclusive) 1224 { 1225 struct rdt_resource *r_cdp; 1226 struct rdt_domain *d_cdp; 1227 1228 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive)) 1229 return true; 1230 1231 if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0) 1232 return false; 1233 1234 return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive); 1235 } 1236 1237 /** 1238 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive 1239 * 1240 * An exclusive resource group implies that there should be no sharing of 1241 * its allocated resources. At the time this group is considered to be 1242 * exclusive this test can determine if its current schemata supports this 1243 * setting by testing for overlap with all other resource groups. 1244 * 1245 * Return: true if resource group can be exclusive, false if there is overlap 1246 * with allocations of other resource groups and thus this resource group 1247 * cannot be exclusive. 1248 */ 1249 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) 1250 { 1251 int closid = rdtgrp->closid; 1252 struct rdt_resource *r; 1253 bool has_cache = false; 1254 struct rdt_domain *d; 1255 1256 for_each_alloc_enabled_rdt_resource(r) { 1257 if (r->rid == RDT_RESOURCE_MBA) 1258 continue; 1259 has_cache = true; 1260 list_for_each_entry(d, &r->domains, list) { 1261 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], 1262 rdtgrp->closid, false)) { 1263 rdt_last_cmd_puts("Schemata overlaps\n"); 1264 return false; 1265 } 1266 } 1267 } 1268 1269 if (!has_cache) { 1270 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); 1271 return false; 1272 } 1273 1274 return true; 1275 } 1276 1277 /** 1278 * rdtgroup_mode_write - Modify the resource group's mode 1279 * 1280 */ 1281 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, 1282 char *buf, size_t nbytes, loff_t off) 1283 { 1284 struct rdtgroup *rdtgrp; 1285 enum rdtgrp_mode mode; 1286 int ret = 0; 1287 1288 /* Valid input requires a trailing newline */ 1289 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1290 return -EINVAL; 1291 buf[nbytes - 1] = '\0'; 1292 1293 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1294 if (!rdtgrp) { 1295 rdtgroup_kn_unlock(of->kn); 1296 return -ENOENT; 1297 } 1298 1299 rdt_last_cmd_clear(); 1300 1301 mode = rdtgrp->mode; 1302 1303 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || 1304 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || 1305 (!strcmp(buf, "pseudo-locksetup") && 1306 mode == RDT_MODE_PSEUDO_LOCKSETUP) || 1307 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) 1308 goto out; 1309 1310 if (mode == RDT_MODE_PSEUDO_LOCKED) { 1311 rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); 1312 ret = -EINVAL; 1313 goto out; 1314 } 1315 1316 if (!strcmp(buf, "shareable")) { 1317 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1318 ret = rdtgroup_locksetup_exit(rdtgrp); 1319 if (ret) 1320 goto out; 1321 } 1322 rdtgrp->mode = RDT_MODE_SHAREABLE; 1323 } else if (!strcmp(buf, "exclusive")) { 1324 if (!rdtgroup_mode_test_exclusive(rdtgrp)) { 1325 ret = -EINVAL; 1326 goto out; 1327 } 1328 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1329 ret = rdtgroup_locksetup_exit(rdtgrp); 1330 if (ret) 1331 goto out; 1332 } 1333 rdtgrp->mode = RDT_MODE_EXCLUSIVE; 1334 } else if (!strcmp(buf, "pseudo-locksetup")) { 1335 ret = rdtgroup_locksetup_enter(rdtgrp); 1336 if (ret) 1337 goto out; 1338 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; 1339 } else { 1340 rdt_last_cmd_puts("Unknown or unsupported mode\n"); 1341 ret = -EINVAL; 1342 } 1343 1344 out: 1345 rdtgroup_kn_unlock(of->kn); 1346 return ret ?: nbytes; 1347 } 1348 1349 /** 1350 * rdtgroup_cbm_to_size - Translate CBM to size in bytes 1351 * @r: RDT resource to which @d belongs. 1352 * @d: RDT domain instance. 1353 * @cbm: bitmask for which the size should be computed. 1354 * 1355 * The bitmask provided associated with the RDT domain instance @d will be 1356 * translated into how many bytes it represents. The size in bytes is 1357 * computed by first dividing the total cache size by the CBM length to 1358 * determine how many bytes each bit in the bitmask represents. The result 1359 * is multiplied with the number of bits set in the bitmask. 1360 * 1361 * @cbm is unsigned long, even if only 32 bits are used to make the 1362 * bitmap functions work correctly. 1363 */ 1364 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1365 struct rdt_domain *d, unsigned long cbm) 1366 { 1367 struct cpu_cacheinfo *ci; 1368 unsigned int size = 0; 1369 int num_b, i; 1370 1371 num_b = bitmap_weight(&cbm, r->cache.cbm_len); 1372 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); 1373 for (i = 0; i < ci->num_leaves; i++) { 1374 if (ci->info_list[i].level == r->cache_level) { 1375 size = ci->info_list[i].size / r->cache.cbm_len * num_b; 1376 break; 1377 } 1378 } 1379 1380 return size; 1381 } 1382 1383 /** 1384 * rdtgroup_size_show - Display size in bytes of allocated regions 1385 * 1386 * The "size" file mirrors the layout of the "schemata" file, printing the 1387 * size in bytes of each region instead of the capacity bitmask. 1388 * 1389 */ 1390 static int rdtgroup_size_show(struct kernfs_open_file *of, 1391 struct seq_file *s, void *v) 1392 { 1393 struct rdtgroup *rdtgrp; 1394 struct rdt_resource *r; 1395 struct rdt_domain *d; 1396 unsigned int size; 1397 int ret = 0; 1398 bool sep; 1399 u32 ctrl; 1400 1401 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1402 if (!rdtgrp) { 1403 rdtgroup_kn_unlock(of->kn); 1404 return -ENOENT; 1405 } 1406 1407 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 1408 if (!rdtgrp->plr->d) { 1409 rdt_last_cmd_clear(); 1410 rdt_last_cmd_puts("Cache domain offline\n"); 1411 ret = -ENODEV; 1412 } else { 1413 seq_printf(s, "%*s:", max_name_width, 1414 rdtgrp->plr->r->name); 1415 size = rdtgroup_cbm_to_size(rdtgrp->plr->r, 1416 rdtgrp->plr->d, 1417 rdtgrp->plr->cbm); 1418 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); 1419 } 1420 goto out; 1421 } 1422 1423 for_each_alloc_enabled_rdt_resource(r) { 1424 sep = false; 1425 seq_printf(s, "%*s:", max_name_width, r->name); 1426 list_for_each_entry(d, &r->domains, list) { 1427 if (sep) 1428 seq_putc(s, ';'); 1429 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1430 size = 0; 1431 } else { 1432 ctrl = (!is_mba_sc(r) ? 1433 d->ctrl_val[rdtgrp->closid] : 1434 d->mbps_val[rdtgrp->closid]); 1435 if (r->rid == RDT_RESOURCE_MBA) 1436 size = ctrl; 1437 else 1438 size = rdtgroup_cbm_to_size(r, d, ctrl); 1439 } 1440 seq_printf(s, "%d=%u", d->id, size); 1441 sep = true; 1442 } 1443 seq_putc(s, '\n'); 1444 } 1445 1446 out: 1447 rdtgroup_kn_unlock(of->kn); 1448 1449 return ret; 1450 } 1451 1452 /* rdtgroup information files for one cache resource. */ 1453 static struct rftype res_common_files[] = { 1454 { 1455 .name = "last_cmd_status", 1456 .mode = 0444, 1457 .kf_ops = &rdtgroup_kf_single_ops, 1458 .seq_show = rdt_last_cmd_status_show, 1459 .fflags = RF_TOP_INFO, 1460 }, 1461 { 1462 .name = "num_closids", 1463 .mode = 0444, 1464 .kf_ops = &rdtgroup_kf_single_ops, 1465 .seq_show = rdt_num_closids_show, 1466 .fflags = RF_CTRL_INFO, 1467 }, 1468 { 1469 .name = "mon_features", 1470 .mode = 0444, 1471 .kf_ops = &rdtgroup_kf_single_ops, 1472 .seq_show = rdt_mon_features_show, 1473 .fflags = RF_MON_INFO, 1474 }, 1475 { 1476 .name = "num_rmids", 1477 .mode = 0444, 1478 .kf_ops = &rdtgroup_kf_single_ops, 1479 .seq_show = rdt_num_rmids_show, 1480 .fflags = RF_MON_INFO, 1481 }, 1482 { 1483 .name = "cbm_mask", 1484 .mode = 0444, 1485 .kf_ops = &rdtgroup_kf_single_ops, 1486 .seq_show = rdt_default_ctrl_show, 1487 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1488 }, 1489 { 1490 .name = "min_cbm_bits", 1491 .mode = 0444, 1492 .kf_ops = &rdtgroup_kf_single_ops, 1493 .seq_show = rdt_min_cbm_bits_show, 1494 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1495 }, 1496 { 1497 .name = "shareable_bits", 1498 .mode = 0444, 1499 .kf_ops = &rdtgroup_kf_single_ops, 1500 .seq_show = rdt_shareable_bits_show, 1501 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1502 }, 1503 { 1504 .name = "bit_usage", 1505 .mode = 0444, 1506 .kf_ops = &rdtgroup_kf_single_ops, 1507 .seq_show = rdt_bit_usage_show, 1508 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1509 }, 1510 { 1511 .name = "min_bandwidth", 1512 .mode = 0444, 1513 .kf_ops = &rdtgroup_kf_single_ops, 1514 .seq_show = rdt_min_bw_show, 1515 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1516 }, 1517 { 1518 .name = "bandwidth_gran", 1519 .mode = 0444, 1520 .kf_ops = &rdtgroup_kf_single_ops, 1521 .seq_show = rdt_bw_gran_show, 1522 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1523 }, 1524 { 1525 .name = "delay_linear", 1526 .mode = 0444, 1527 .kf_ops = &rdtgroup_kf_single_ops, 1528 .seq_show = rdt_delay_linear_show, 1529 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1530 }, 1531 /* 1532 * Platform specific which (if any) capabilities are provided by 1533 * thread_throttle_mode. Defer "fflags" initialization to platform 1534 * discovery. 1535 */ 1536 { 1537 .name = "thread_throttle_mode", 1538 .mode = 0444, 1539 .kf_ops = &rdtgroup_kf_single_ops, 1540 .seq_show = rdt_thread_throttle_mode_show, 1541 }, 1542 { 1543 .name = "max_threshold_occupancy", 1544 .mode = 0644, 1545 .kf_ops = &rdtgroup_kf_single_ops, 1546 .write = max_threshold_occ_write, 1547 .seq_show = max_threshold_occ_show, 1548 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, 1549 }, 1550 { 1551 .name = "cpus", 1552 .mode = 0644, 1553 .kf_ops = &rdtgroup_kf_single_ops, 1554 .write = rdtgroup_cpus_write, 1555 .seq_show = rdtgroup_cpus_show, 1556 .fflags = RFTYPE_BASE, 1557 }, 1558 { 1559 .name = "cpus_list", 1560 .mode = 0644, 1561 .kf_ops = &rdtgroup_kf_single_ops, 1562 .write = rdtgroup_cpus_write, 1563 .seq_show = rdtgroup_cpus_show, 1564 .flags = RFTYPE_FLAGS_CPUS_LIST, 1565 .fflags = RFTYPE_BASE, 1566 }, 1567 { 1568 .name = "tasks", 1569 .mode = 0644, 1570 .kf_ops = &rdtgroup_kf_single_ops, 1571 .write = rdtgroup_tasks_write, 1572 .seq_show = rdtgroup_tasks_show, 1573 .fflags = RFTYPE_BASE, 1574 }, 1575 { 1576 .name = "schemata", 1577 .mode = 0644, 1578 .kf_ops = &rdtgroup_kf_single_ops, 1579 .write = rdtgroup_schemata_write, 1580 .seq_show = rdtgroup_schemata_show, 1581 .fflags = RF_CTRL_BASE, 1582 }, 1583 { 1584 .name = "mode", 1585 .mode = 0644, 1586 .kf_ops = &rdtgroup_kf_single_ops, 1587 .write = rdtgroup_mode_write, 1588 .seq_show = rdtgroup_mode_show, 1589 .fflags = RF_CTRL_BASE, 1590 }, 1591 { 1592 .name = "size", 1593 .mode = 0444, 1594 .kf_ops = &rdtgroup_kf_single_ops, 1595 .seq_show = rdtgroup_size_show, 1596 .fflags = RF_CTRL_BASE, 1597 }, 1598 1599 }; 1600 1601 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) 1602 { 1603 struct rftype *rfts, *rft; 1604 int ret, len; 1605 1606 rfts = res_common_files; 1607 len = ARRAY_SIZE(res_common_files); 1608 1609 lockdep_assert_held(&rdtgroup_mutex); 1610 1611 for (rft = rfts; rft < rfts + len; rft++) { 1612 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { 1613 ret = rdtgroup_add_file(kn, rft); 1614 if (ret) 1615 goto error; 1616 } 1617 } 1618 1619 return 0; 1620 error: 1621 pr_warn("Failed to add %s, err=%d\n", rft->name, ret); 1622 while (--rft >= rfts) { 1623 if ((fflags & rft->fflags) == rft->fflags) 1624 kernfs_remove_by_name(kn, rft->name); 1625 } 1626 return ret; 1627 } 1628 1629 static struct rftype *rdtgroup_get_rftype_by_name(const char *name) 1630 { 1631 struct rftype *rfts, *rft; 1632 int len; 1633 1634 rfts = res_common_files; 1635 len = ARRAY_SIZE(res_common_files); 1636 1637 for (rft = rfts; rft < rfts + len; rft++) { 1638 if (!strcmp(rft->name, name)) 1639 return rft; 1640 } 1641 1642 return NULL; 1643 } 1644 1645 void __init thread_throttle_mode_init(void) 1646 { 1647 struct rftype *rft; 1648 1649 rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); 1650 if (!rft) 1651 return; 1652 1653 rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; 1654 } 1655 1656 /** 1657 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file 1658 * @r: The resource group with which the file is associated. 1659 * @name: Name of the file 1660 * 1661 * The permissions of named resctrl file, directory, or link are modified 1662 * to not allow read, write, or execute by any user. 1663 * 1664 * WARNING: This function is intended to communicate to the user that the 1665 * resctrl file has been locked down - that it is not relevant to the 1666 * particular state the system finds itself in. It should not be relied 1667 * on to protect from user access because after the file's permissions 1668 * are restricted the user can still change the permissions using chmod 1669 * from the command line. 1670 * 1671 * Return: 0 on success, <0 on failure. 1672 */ 1673 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) 1674 { 1675 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1676 struct kernfs_node *kn; 1677 int ret = 0; 1678 1679 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1680 if (!kn) 1681 return -ENOENT; 1682 1683 switch (kernfs_type(kn)) { 1684 case KERNFS_DIR: 1685 iattr.ia_mode = S_IFDIR; 1686 break; 1687 case KERNFS_FILE: 1688 iattr.ia_mode = S_IFREG; 1689 break; 1690 case KERNFS_LINK: 1691 iattr.ia_mode = S_IFLNK; 1692 break; 1693 } 1694 1695 ret = kernfs_setattr(kn, &iattr); 1696 kernfs_put(kn); 1697 return ret; 1698 } 1699 1700 /** 1701 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file 1702 * @r: The resource group with which the file is associated. 1703 * @name: Name of the file 1704 * @mask: Mask of permissions that should be restored 1705 * 1706 * Restore the permissions of the named file. If @name is a directory the 1707 * permissions of its parent will be used. 1708 * 1709 * Return: 0 on success, <0 on failure. 1710 */ 1711 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, 1712 umode_t mask) 1713 { 1714 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1715 struct kernfs_node *kn, *parent; 1716 struct rftype *rfts, *rft; 1717 int ret, len; 1718 1719 rfts = res_common_files; 1720 len = ARRAY_SIZE(res_common_files); 1721 1722 for (rft = rfts; rft < rfts + len; rft++) { 1723 if (!strcmp(rft->name, name)) 1724 iattr.ia_mode = rft->mode & mask; 1725 } 1726 1727 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1728 if (!kn) 1729 return -ENOENT; 1730 1731 switch (kernfs_type(kn)) { 1732 case KERNFS_DIR: 1733 parent = kernfs_get_parent(kn); 1734 if (parent) { 1735 iattr.ia_mode |= parent->mode; 1736 kernfs_put(parent); 1737 } 1738 iattr.ia_mode |= S_IFDIR; 1739 break; 1740 case KERNFS_FILE: 1741 iattr.ia_mode |= S_IFREG; 1742 break; 1743 case KERNFS_LINK: 1744 iattr.ia_mode |= S_IFLNK; 1745 break; 1746 } 1747 1748 ret = kernfs_setattr(kn, &iattr); 1749 kernfs_put(kn); 1750 return ret; 1751 } 1752 1753 static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name, 1754 unsigned long fflags) 1755 { 1756 struct kernfs_node *kn_subdir; 1757 int ret; 1758 1759 kn_subdir = kernfs_create_dir(kn_info, name, 1760 kn_info->mode, r); 1761 if (IS_ERR(kn_subdir)) 1762 return PTR_ERR(kn_subdir); 1763 1764 ret = rdtgroup_kn_set_ugid(kn_subdir); 1765 if (ret) 1766 return ret; 1767 1768 ret = rdtgroup_add_files(kn_subdir, fflags); 1769 if (!ret) 1770 kernfs_activate(kn_subdir); 1771 1772 return ret; 1773 } 1774 1775 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) 1776 { 1777 struct rdt_resource *r; 1778 unsigned long fflags; 1779 char name[32]; 1780 int ret; 1781 1782 /* create the directory */ 1783 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); 1784 if (IS_ERR(kn_info)) 1785 return PTR_ERR(kn_info); 1786 1787 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); 1788 if (ret) 1789 goto out_destroy; 1790 1791 for_each_alloc_enabled_rdt_resource(r) { 1792 fflags = r->fflags | RF_CTRL_INFO; 1793 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags); 1794 if (ret) 1795 goto out_destroy; 1796 } 1797 1798 for_each_mon_enabled_rdt_resource(r) { 1799 fflags = r->fflags | RF_MON_INFO; 1800 sprintf(name, "%s_MON", r->name); 1801 ret = rdtgroup_mkdir_info_resdir(r, name, fflags); 1802 if (ret) 1803 goto out_destroy; 1804 } 1805 1806 ret = rdtgroup_kn_set_ugid(kn_info); 1807 if (ret) 1808 goto out_destroy; 1809 1810 kernfs_activate(kn_info); 1811 1812 return 0; 1813 1814 out_destroy: 1815 kernfs_remove(kn_info); 1816 return ret; 1817 } 1818 1819 static int 1820 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, 1821 char *name, struct kernfs_node **dest_kn) 1822 { 1823 struct kernfs_node *kn; 1824 int ret; 1825 1826 /* create the directory */ 1827 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 1828 if (IS_ERR(kn)) 1829 return PTR_ERR(kn); 1830 1831 if (dest_kn) 1832 *dest_kn = kn; 1833 1834 ret = rdtgroup_kn_set_ugid(kn); 1835 if (ret) 1836 goto out_destroy; 1837 1838 kernfs_activate(kn); 1839 1840 return 0; 1841 1842 out_destroy: 1843 kernfs_remove(kn); 1844 return ret; 1845 } 1846 1847 static void l3_qos_cfg_update(void *arg) 1848 { 1849 bool *enable = arg; 1850 1851 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); 1852 } 1853 1854 static void l2_qos_cfg_update(void *arg) 1855 { 1856 bool *enable = arg; 1857 1858 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); 1859 } 1860 1861 static inline bool is_mba_linear(void) 1862 { 1863 return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear; 1864 } 1865 1866 static int set_cache_qos_cfg(int level, bool enable) 1867 { 1868 void (*update)(void *arg); 1869 struct rdt_resource *r_l; 1870 cpumask_var_t cpu_mask; 1871 struct rdt_domain *d; 1872 int cpu; 1873 1874 if (level == RDT_RESOURCE_L3) 1875 update = l3_qos_cfg_update; 1876 else if (level == RDT_RESOURCE_L2) 1877 update = l2_qos_cfg_update; 1878 else 1879 return -EINVAL; 1880 1881 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 1882 return -ENOMEM; 1883 1884 r_l = &rdt_resources_all[level]; 1885 list_for_each_entry(d, &r_l->domains, list) { 1886 if (r_l->cache.arch_has_per_cpu_cfg) 1887 /* Pick all the CPUs in the domain instance */ 1888 for_each_cpu(cpu, &d->cpu_mask) 1889 cpumask_set_cpu(cpu, cpu_mask); 1890 else 1891 /* Pick one CPU from each domain instance to update MSR */ 1892 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 1893 } 1894 cpu = get_cpu(); 1895 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */ 1896 if (cpumask_test_cpu(cpu, cpu_mask)) 1897 update(&enable); 1898 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */ 1899 smp_call_function_many(cpu_mask, update, &enable, 1); 1900 put_cpu(); 1901 1902 free_cpumask_var(cpu_mask); 1903 1904 return 0; 1905 } 1906 1907 /* Restore the qos cfg state when a domain comes online */ 1908 void rdt_domain_reconfigure_cdp(struct rdt_resource *r) 1909 { 1910 if (!r->alloc_capable) 1911 return; 1912 1913 if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA]) 1914 l2_qos_cfg_update(&r->alloc_enabled); 1915 1916 if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA]) 1917 l3_qos_cfg_update(&r->alloc_enabled); 1918 } 1919 1920 /* 1921 * Enable or disable the MBA software controller 1922 * which helps user specify bandwidth in MBps. 1923 * MBA software controller is supported only if 1924 * MBM is supported and MBA is in linear scale. 1925 */ 1926 static int set_mba_sc(bool mba_sc) 1927 { 1928 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA]; 1929 struct rdt_domain *d; 1930 1931 if (!is_mbm_enabled() || !is_mba_linear() || 1932 mba_sc == is_mba_sc(r)) 1933 return -EINVAL; 1934 1935 r->membw.mba_sc = mba_sc; 1936 list_for_each_entry(d, &r->domains, list) 1937 setup_default_ctrlval(r, d->ctrl_val, d->mbps_val); 1938 1939 return 0; 1940 } 1941 1942 static int cdp_enable(int level, int data_type, int code_type) 1943 { 1944 struct rdt_resource *r_ldata = &rdt_resources_all[data_type]; 1945 struct rdt_resource *r_lcode = &rdt_resources_all[code_type]; 1946 struct rdt_resource *r_l = &rdt_resources_all[level]; 1947 int ret; 1948 1949 if (!r_l->alloc_capable || !r_ldata->alloc_capable || 1950 !r_lcode->alloc_capable) 1951 return -EINVAL; 1952 1953 ret = set_cache_qos_cfg(level, true); 1954 if (!ret) { 1955 r_l->alloc_enabled = false; 1956 r_ldata->alloc_enabled = true; 1957 r_lcode->alloc_enabled = true; 1958 } 1959 return ret; 1960 } 1961 1962 static int cdpl3_enable(void) 1963 { 1964 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, 1965 RDT_RESOURCE_L3CODE); 1966 } 1967 1968 static int cdpl2_enable(void) 1969 { 1970 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, 1971 RDT_RESOURCE_L2CODE); 1972 } 1973 1974 static void cdp_disable(int level, int data_type, int code_type) 1975 { 1976 struct rdt_resource *r = &rdt_resources_all[level]; 1977 1978 r->alloc_enabled = r->alloc_capable; 1979 1980 if (rdt_resources_all[data_type].alloc_enabled) { 1981 rdt_resources_all[data_type].alloc_enabled = false; 1982 rdt_resources_all[code_type].alloc_enabled = false; 1983 set_cache_qos_cfg(level, false); 1984 } 1985 } 1986 1987 static void cdpl3_disable(void) 1988 { 1989 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE); 1990 } 1991 1992 static void cdpl2_disable(void) 1993 { 1994 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE); 1995 } 1996 1997 static void cdp_disable_all(void) 1998 { 1999 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) 2000 cdpl3_disable(); 2001 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) 2002 cdpl2_disable(); 2003 } 2004 2005 /* 2006 * We don't allow rdtgroup directories to be created anywhere 2007 * except the root directory. Thus when looking for the rdtgroup 2008 * structure for a kernfs node we are either looking at a directory, 2009 * in which case the rdtgroup structure is pointed at by the "priv" 2010 * field, otherwise we have a file, and need only look to the parent 2011 * to find the rdtgroup. 2012 */ 2013 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) 2014 { 2015 if (kernfs_type(kn) == KERNFS_DIR) { 2016 /* 2017 * All the resource directories use "kn->priv" 2018 * to point to the "struct rdtgroup" for the 2019 * resource. "info" and its subdirectories don't 2020 * have rdtgroup structures, so return NULL here. 2021 */ 2022 if (kn == kn_info || kn->parent == kn_info) 2023 return NULL; 2024 else 2025 return kn->priv; 2026 } else { 2027 return kn->parent->priv; 2028 } 2029 } 2030 2031 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) 2032 { 2033 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2034 2035 if (!rdtgrp) 2036 return NULL; 2037 2038 atomic_inc(&rdtgrp->waitcount); 2039 kernfs_break_active_protection(kn); 2040 2041 mutex_lock(&rdtgroup_mutex); 2042 2043 /* Was this group deleted while we waited? */ 2044 if (rdtgrp->flags & RDT_DELETED) 2045 return NULL; 2046 2047 return rdtgrp; 2048 } 2049 2050 void rdtgroup_kn_unlock(struct kernfs_node *kn) 2051 { 2052 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2053 2054 if (!rdtgrp) 2055 return; 2056 2057 mutex_unlock(&rdtgroup_mutex); 2058 2059 if (atomic_dec_and_test(&rdtgrp->waitcount) && 2060 (rdtgrp->flags & RDT_DELETED)) { 2061 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2062 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2063 rdtgroup_pseudo_lock_remove(rdtgrp); 2064 kernfs_unbreak_active_protection(kn); 2065 rdtgroup_remove(rdtgrp); 2066 } else { 2067 kernfs_unbreak_active_protection(kn); 2068 } 2069 } 2070 2071 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2072 struct rdtgroup *prgrp, 2073 struct kernfs_node **mon_data_kn); 2074 2075 static int rdt_enable_ctx(struct rdt_fs_context *ctx) 2076 { 2077 int ret = 0; 2078 2079 if (ctx->enable_cdpl2) 2080 ret = cdpl2_enable(); 2081 2082 if (!ret && ctx->enable_cdpl3) 2083 ret = cdpl3_enable(); 2084 2085 if (!ret && ctx->enable_mba_mbps) 2086 ret = set_mba_sc(true); 2087 2088 return ret; 2089 } 2090 2091 static int rdt_get_tree(struct fs_context *fc) 2092 { 2093 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2094 struct rdt_domain *dom; 2095 struct rdt_resource *r; 2096 int ret; 2097 2098 cpus_read_lock(); 2099 mutex_lock(&rdtgroup_mutex); 2100 /* 2101 * resctrl file system can only be mounted once. 2102 */ 2103 if (static_branch_unlikely(&rdt_enable_key)) { 2104 ret = -EBUSY; 2105 goto out; 2106 } 2107 2108 ret = rdt_enable_ctx(ctx); 2109 if (ret < 0) 2110 goto out_cdp; 2111 2112 closid_init(); 2113 2114 ret = rdtgroup_create_info_dir(rdtgroup_default.kn); 2115 if (ret < 0) 2116 goto out_mba; 2117 2118 if (rdt_mon_capable) { 2119 ret = mongroup_create_dir(rdtgroup_default.kn, 2120 &rdtgroup_default, "mon_groups", 2121 &kn_mongrp); 2122 if (ret < 0) 2123 goto out_info; 2124 2125 ret = mkdir_mondata_all(rdtgroup_default.kn, 2126 &rdtgroup_default, &kn_mondata); 2127 if (ret < 0) 2128 goto out_mongrp; 2129 rdtgroup_default.mon.mon_data_kn = kn_mondata; 2130 } 2131 2132 ret = rdt_pseudo_lock_init(); 2133 if (ret) 2134 goto out_mondata; 2135 2136 ret = kernfs_get_tree(fc); 2137 if (ret < 0) 2138 goto out_psl; 2139 2140 if (rdt_alloc_capable) 2141 static_branch_enable_cpuslocked(&rdt_alloc_enable_key); 2142 if (rdt_mon_capable) 2143 static_branch_enable_cpuslocked(&rdt_mon_enable_key); 2144 2145 if (rdt_alloc_capable || rdt_mon_capable) 2146 static_branch_enable_cpuslocked(&rdt_enable_key); 2147 2148 if (is_mbm_enabled()) { 2149 r = &rdt_resources_all[RDT_RESOURCE_L3]; 2150 list_for_each_entry(dom, &r->domains, list) 2151 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); 2152 } 2153 2154 goto out; 2155 2156 out_psl: 2157 rdt_pseudo_lock_release(); 2158 out_mondata: 2159 if (rdt_mon_capable) 2160 kernfs_remove(kn_mondata); 2161 out_mongrp: 2162 if (rdt_mon_capable) 2163 kernfs_remove(kn_mongrp); 2164 out_info: 2165 kernfs_remove(kn_info); 2166 out_mba: 2167 if (ctx->enable_mba_mbps) 2168 set_mba_sc(false); 2169 out_cdp: 2170 cdp_disable_all(); 2171 out: 2172 rdt_last_cmd_clear(); 2173 mutex_unlock(&rdtgroup_mutex); 2174 cpus_read_unlock(); 2175 return ret; 2176 } 2177 2178 enum rdt_param { 2179 Opt_cdp, 2180 Opt_cdpl2, 2181 Opt_mba_mbps, 2182 nr__rdt_params 2183 }; 2184 2185 static const struct fs_parameter_spec rdt_fs_parameters[] = { 2186 fsparam_flag("cdp", Opt_cdp), 2187 fsparam_flag("cdpl2", Opt_cdpl2), 2188 fsparam_flag("mba_MBps", Opt_mba_mbps), 2189 {} 2190 }; 2191 2192 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) 2193 { 2194 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2195 struct fs_parse_result result; 2196 int opt; 2197 2198 opt = fs_parse(fc, rdt_fs_parameters, param, &result); 2199 if (opt < 0) 2200 return opt; 2201 2202 switch (opt) { 2203 case Opt_cdp: 2204 ctx->enable_cdpl3 = true; 2205 return 0; 2206 case Opt_cdpl2: 2207 ctx->enable_cdpl2 = true; 2208 return 0; 2209 case Opt_mba_mbps: 2210 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 2211 return -EINVAL; 2212 ctx->enable_mba_mbps = true; 2213 return 0; 2214 } 2215 2216 return -EINVAL; 2217 } 2218 2219 static void rdt_fs_context_free(struct fs_context *fc) 2220 { 2221 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2222 2223 kernfs_free_fs_context(fc); 2224 kfree(ctx); 2225 } 2226 2227 static const struct fs_context_operations rdt_fs_context_ops = { 2228 .free = rdt_fs_context_free, 2229 .parse_param = rdt_parse_param, 2230 .get_tree = rdt_get_tree, 2231 }; 2232 2233 static int rdt_init_fs_context(struct fs_context *fc) 2234 { 2235 struct rdt_fs_context *ctx; 2236 2237 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); 2238 if (!ctx) 2239 return -ENOMEM; 2240 2241 ctx->kfc.root = rdt_root; 2242 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; 2243 fc->fs_private = &ctx->kfc; 2244 fc->ops = &rdt_fs_context_ops; 2245 put_user_ns(fc->user_ns); 2246 fc->user_ns = get_user_ns(&init_user_ns); 2247 fc->global = true; 2248 return 0; 2249 } 2250 2251 static int reset_all_ctrls(struct rdt_resource *r) 2252 { 2253 struct msr_param msr_param; 2254 cpumask_var_t cpu_mask; 2255 struct rdt_domain *d; 2256 int i, cpu; 2257 2258 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 2259 return -ENOMEM; 2260 2261 msr_param.res = r; 2262 msr_param.low = 0; 2263 msr_param.high = r->num_closid; 2264 2265 /* 2266 * Disable resource control for this resource by setting all 2267 * CBMs in all domains to the maximum mask value. Pick one CPU 2268 * from each domain to update the MSRs below. 2269 */ 2270 list_for_each_entry(d, &r->domains, list) { 2271 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 2272 2273 for (i = 0; i < r->num_closid; i++) 2274 d->ctrl_val[i] = r->default_ctrl; 2275 } 2276 cpu = get_cpu(); 2277 /* Update CBM on this cpu if it's in cpu_mask. */ 2278 if (cpumask_test_cpu(cpu, cpu_mask)) 2279 rdt_ctrl_update(&msr_param); 2280 /* Update CBM on all other cpus in cpu_mask. */ 2281 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1); 2282 put_cpu(); 2283 2284 free_cpumask_var(cpu_mask); 2285 2286 return 0; 2287 } 2288 2289 /* 2290 * Move tasks from one to the other group. If @from is NULL, then all tasks 2291 * in the systems are moved unconditionally (used for teardown). 2292 * 2293 * If @mask is not NULL the cpus on which moved tasks are running are set 2294 * in that mask so the update smp function call is restricted to affected 2295 * cpus. 2296 */ 2297 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, 2298 struct cpumask *mask) 2299 { 2300 struct task_struct *p, *t; 2301 2302 read_lock(&tasklist_lock); 2303 for_each_process_thread(p, t) { 2304 if (!from || is_closid_match(t, from) || 2305 is_rmid_match(t, from)) { 2306 t->closid = to->closid; 2307 t->rmid = to->mon.rmid; 2308 2309 #ifdef CONFIG_SMP 2310 /* 2311 * This is safe on x86 w/o barriers as the ordering 2312 * of writing to task_cpu() and t->on_cpu is 2313 * reverse to the reading here. The detection is 2314 * inaccurate as tasks might move or schedule 2315 * before the smp function call takes place. In 2316 * such a case the function call is pointless, but 2317 * there is no other side effect. 2318 */ 2319 if (mask && t->on_cpu) 2320 cpumask_set_cpu(task_cpu(t), mask); 2321 #endif 2322 } 2323 } 2324 read_unlock(&tasklist_lock); 2325 } 2326 2327 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) 2328 { 2329 struct rdtgroup *sentry, *stmp; 2330 struct list_head *head; 2331 2332 head = &rdtgrp->mon.crdtgrp_list; 2333 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { 2334 free_rmid(sentry->mon.rmid); 2335 list_del(&sentry->mon.crdtgrp_list); 2336 2337 if (atomic_read(&sentry->waitcount) != 0) 2338 sentry->flags = RDT_DELETED; 2339 else 2340 rdtgroup_remove(sentry); 2341 } 2342 } 2343 2344 /* 2345 * Forcibly remove all of subdirectories under root. 2346 */ 2347 static void rmdir_all_sub(void) 2348 { 2349 struct rdtgroup *rdtgrp, *tmp; 2350 2351 /* Move all tasks to the default resource group */ 2352 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); 2353 2354 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { 2355 /* Free any child rmids */ 2356 free_all_child_rdtgrp(rdtgrp); 2357 2358 /* Remove each rdtgroup other than root */ 2359 if (rdtgrp == &rdtgroup_default) 2360 continue; 2361 2362 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2363 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2364 rdtgroup_pseudo_lock_remove(rdtgrp); 2365 2366 /* 2367 * Give any CPUs back to the default group. We cannot copy 2368 * cpu_online_mask because a CPU might have executed the 2369 * offline callback already, but is still marked online. 2370 */ 2371 cpumask_or(&rdtgroup_default.cpu_mask, 2372 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 2373 2374 free_rmid(rdtgrp->mon.rmid); 2375 2376 kernfs_remove(rdtgrp->kn); 2377 list_del(&rdtgrp->rdtgroup_list); 2378 2379 if (atomic_read(&rdtgrp->waitcount) != 0) 2380 rdtgrp->flags = RDT_DELETED; 2381 else 2382 rdtgroup_remove(rdtgrp); 2383 } 2384 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ 2385 update_closid_rmid(cpu_online_mask, &rdtgroup_default); 2386 2387 kernfs_remove(kn_info); 2388 kernfs_remove(kn_mongrp); 2389 kernfs_remove(kn_mondata); 2390 } 2391 2392 static void rdt_kill_sb(struct super_block *sb) 2393 { 2394 struct rdt_resource *r; 2395 2396 cpus_read_lock(); 2397 mutex_lock(&rdtgroup_mutex); 2398 2399 set_mba_sc(false); 2400 2401 /*Put everything back to default values. */ 2402 for_each_alloc_enabled_rdt_resource(r) 2403 reset_all_ctrls(r); 2404 cdp_disable_all(); 2405 rmdir_all_sub(); 2406 rdt_pseudo_lock_release(); 2407 rdtgroup_default.mode = RDT_MODE_SHAREABLE; 2408 static_branch_disable_cpuslocked(&rdt_alloc_enable_key); 2409 static_branch_disable_cpuslocked(&rdt_mon_enable_key); 2410 static_branch_disable_cpuslocked(&rdt_enable_key); 2411 kernfs_kill_sb(sb); 2412 mutex_unlock(&rdtgroup_mutex); 2413 cpus_read_unlock(); 2414 } 2415 2416 static struct file_system_type rdt_fs_type = { 2417 .name = "resctrl", 2418 .init_fs_context = rdt_init_fs_context, 2419 .parameters = rdt_fs_parameters, 2420 .kill_sb = rdt_kill_sb, 2421 }; 2422 2423 static int mon_addfile(struct kernfs_node *parent_kn, const char *name, 2424 void *priv) 2425 { 2426 struct kernfs_node *kn; 2427 int ret = 0; 2428 2429 kn = __kernfs_create_file(parent_kn, name, 0444, 2430 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, 2431 &kf_mondata_ops, priv, NULL, NULL); 2432 if (IS_ERR(kn)) 2433 return PTR_ERR(kn); 2434 2435 ret = rdtgroup_kn_set_ugid(kn); 2436 if (ret) { 2437 kernfs_remove(kn); 2438 return ret; 2439 } 2440 2441 return ret; 2442 } 2443 2444 /* 2445 * Remove all subdirectories of mon_data of ctrl_mon groups 2446 * and monitor groups with given domain id. 2447 */ 2448 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id) 2449 { 2450 struct rdtgroup *prgrp, *crgrp; 2451 char name[32]; 2452 2453 if (!r->mon_enabled) 2454 return; 2455 2456 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2457 sprintf(name, "mon_%s_%02d", r->name, dom_id); 2458 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); 2459 2460 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) 2461 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); 2462 } 2463 } 2464 2465 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, 2466 struct rdt_domain *d, 2467 struct rdt_resource *r, struct rdtgroup *prgrp) 2468 { 2469 union mon_data_bits priv; 2470 struct kernfs_node *kn; 2471 struct mon_evt *mevt; 2472 struct rmid_read rr; 2473 char name[32]; 2474 int ret; 2475 2476 sprintf(name, "mon_%s_%02d", r->name, d->id); 2477 /* create the directory */ 2478 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 2479 if (IS_ERR(kn)) 2480 return PTR_ERR(kn); 2481 2482 ret = rdtgroup_kn_set_ugid(kn); 2483 if (ret) 2484 goto out_destroy; 2485 2486 if (WARN_ON(list_empty(&r->evt_list))) { 2487 ret = -EPERM; 2488 goto out_destroy; 2489 } 2490 2491 priv.u.rid = r->rid; 2492 priv.u.domid = d->id; 2493 list_for_each_entry(mevt, &r->evt_list, list) { 2494 priv.u.evtid = mevt->evtid; 2495 ret = mon_addfile(kn, mevt->name, priv.priv); 2496 if (ret) 2497 goto out_destroy; 2498 2499 if (is_mbm_event(mevt->evtid)) 2500 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); 2501 } 2502 kernfs_activate(kn); 2503 return 0; 2504 2505 out_destroy: 2506 kernfs_remove(kn); 2507 return ret; 2508 } 2509 2510 /* 2511 * Add all subdirectories of mon_data for "ctrl_mon" groups 2512 * and "monitor" groups with given domain id. 2513 */ 2514 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 2515 struct rdt_domain *d) 2516 { 2517 struct kernfs_node *parent_kn; 2518 struct rdtgroup *prgrp, *crgrp; 2519 struct list_head *head; 2520 2521 if (!r->mon_enabled) 2522 return; 2523 2524 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2525 parent_kn = prgrp->mon.mon_data_kn; 2526 mkdir_mondata_subdir(parent_kn, d, r, prgrp); 2527 2528 head = &prgrp->mon.crdtgrp_list; 2529 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 2530 parent_kn = crgrp->mon.mon_data_kn; 2531 mkdir_mondata_subdir(parent_kn, d, r, crgrp); 2532 } 2533 } 2534 } 2535 2536 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, 2537 struct rdt_resource *r, 2538 struct rdtgroup *prgrp) 2539 { 2540 struct rdt_domain *dom; 2541 int ret; 2542 2543 list_for_each_entry(dom, &r->domains, list) { 2544 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); 2545 if (ret) 2546 return ret; 2547 } 2548 2549 return 0; 2550 } 2551 2552 /* 2553 * This creates a directory mon_data which contains the monitored data. 2554 * 2555 * mon_data has one directory for each domain whic are named 2556 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data 2557 * with L3 domain looks as below: 2558 * ./mon_data: 2559 * mon_L3_00 2560 * mon_L3_01 2561 * mon_L3_02 2562 * ... 2563 * 2564 * Each domain directory has one file per event: 2565 * ./mon_L3_00/: 2566 * llc_occupancy 2567 * 2568 */ 2569 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2570 struct rdtgroup *prgrp, 2571 struct kernfs_node **dest_kn) 2572 { 2573 struct rdt_resource *r; 2574 struct kernfs_node *kn; 2575 int ret; 2576 2577 /* 2578 * Create the mon_data directory first. 2579 */ 2580 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); 2581 if (ret) 2582 return ret; 2583 2584 if (dest_kn) 2585 *dest_kn = kn; 2586 2587 /* 2588 * Create the subdirectories for each domain. Note that all events 2589 * in a domain like L3 are grouped into a resource whose domain is L3 2590 */ 2591 for_each_mon_enabled_rdt_resource(r) { 2592 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); 2593 if (ret) 2594 goto out_destroy; 2595 } 2596 2597 return 0; 2598 2599 out_destroy: 2600 kernfs_remove(kn); 2601 return ret; 2602 } 2603 2604 /** 2605 * cbm_ensure_valid - Enforce validity on provided CBM 2606 * @_val: Candidate CBM 2607 * @r: RDT resource to which the CBM belongs 2608 * 2609 * The provided CBM represents all cache portions available for use. This 2610 * may be represented by a bitmap that does not consist of contiguous ones 2611 * and thus be an invalid CBM. 2612 * Here the provided CBM is forced to be a valid CBM by only considering 2613 * the first set of contiguous bits as valid and clearing all bits. 2614 * The intention here is to provide a valid default CBM with which a new 2615 * resource group is initialized. The user can follow this with a 2616 * modification to the CBM if the default does not satisfy the 2617 * requirements. 2618 */ 2619 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) 2620 { 2621 unsigned int cbm_len = r->cache.cbm_len; 2622 unsigned long first_bit, zero_bit; 2623 unsigned long val = _val; 2624 2625 if (!val) 2626 return 0; 2627 2628 first_bit = find_first_bit(&val, cbm_len); 2629 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 2630 2631 /* Clear any remaining bits to ensure contiguous region */ 2632 bitmap_clear(&val, zero_bit, cbm_len - zero_bit); 2633 return (u32)val; 2634 } 2635 2636 /* 2637 * Initialize cache resources per RDT domain 2638 * 2639 * Set the RDT domain up to start off with all usable allocations. That is, 2640 * all shareable and unused bits. All-zero CBM is invalid. 2641 */ 2642 static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r, 2643 u32 closid) 2644 { 2645 struct rdt_resource *r_cdp = NULL; 2646 struct rdt_domain *d_cdp = NULL; 2647 u32 used_b = 0, unused_b = 0; 2648 unsigned long tmp_cbm; 2649 enum rdtgrp_mode mode; 2650 u32 peer_ctl, *ctrl; 2651 int i; 2652 2653 rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp); 2654 d->have_new_ctrl = false; 2655 d->new_ctrl = r->cache.shareable_bits; 2656 used_b = r->cache.shareable_bits; 2657 ctrl = d->ctrl_val; 2658 for (i = 0; i < closids_supported(); i++, ctrl++) { 2659 if (closid_allocated(i) && i != closid) { 2660 mode = rdtgroup_mode_by_closid(i); 2661 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 2662 /* 2663 * ctrl values for locksetup aren't relevant 2664 * until the schemata is written, and the mode 2665 * becomes RDT_MODE_PSEUDO_LOCKED. 2666 */ 2667 continue; 2668 /* 2669 * If CDP is active include peer domain's 2670 * usage to ensure there is no overlap 2671 * with an exclusive group. 2672 */ 2673 if (d_cdp) 2674 peer_ctl = d_cdp->ctrl_val[i]; 2675 else 2676 peer_ctl = 0; 2677 used_b |= *ctrl | peer_ctl; 2678 if (mode == RDT_MODE_SHAREABLE) 2679 d->new_ctrl |= *ctrl | peer_ctl; 2680 } 2681 } 2682 if (d->plr && d->plr->cbm > 0) 2683 used_b |= d->plr->cbm; 2684 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); 2685 unused_b &= BIT_MASK(r->cache.cbm_len) - 1; 2686 d->new_ctrl |= unused_b; 2687 /* 2688 * Force the initial CBM to be valid, user can 2689 * modify the CBM based on system availability. 2690 */ 2691 d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r); 2692 /* 2693 * Assign the u32 CBM to an unsigned long to ensure that 2694 * bitmap_weight() does not access out-of-bound memory. 2695 */ 2696 tmp_cbm = d->new_ctrl; 2697 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { 2698 rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id); 2699 return -ENOSPC; 2700 } 2701 d->have_new_ctrl = true; 2702 2703 return 0; 2704 } 2705 2706 /* 2707 * Initialize cache resources with default values. 2708 * 2709 * A new RDT group is being created on an allocation capable (CAT) 2710 * supporting system. Set this group up to start off with all usable 2711 * allocations. 2712 * 2713 * If there are no more shareable bits available on any domain then 2714 * the entire allocation will fail. 2715 */ 2716 static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid) 2717 { 2718 struct rdt_domain *d; 2719 int ret; 2720 2721 list_for_each_entry(d, &r->domains, list) { 2722 ret = __init_one_rdt_domain(d, r, closid); 2723 if (ret < 0) 2724 return ret; 2725 } 2726 2727 return 0; 2728 } 2729 2730 /* Initialize MBA resource with default values. */ 2731 static void rdtgroup_init_mba(struct rdt_resource *r) 2732 { 2733 struct rdt_domain *d; 2734 2735 list_for_each_entry(d, &r->domains, list) { 2736 d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl; 2737 d->have_new_ctrl = true; 2738 } 2739 } 2740 2741 /* Initialize the RDT group's allocations. */ 2742 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) 2743 { 2744 struct rdt_resource *r; 2745 int ret; 2746 2747 for_each_alloc_enabled_rdt_resource(r) { 2748 if (r->rid == RDT_RESOURCE_MBA) { 2749 rdtgroup_init_mba(r); 2750 } else { 2751 ret = rdtgroup_init_cat(r, rdtgrp->closid); 2752 if (ret < 0) 2753 return ret; 2754 } 2755 2756 ret = update_domains(r, rdtgrp->closid); 2757 if (ret < 0) { 2758 rdt_last_cmd_puts("Failed to initialize allocations\n"); 2759 return ret; 2760 } 2761 2762 } 2763 2764 rdtgrp->mode = RDT_MODE_SHAREABLE; 2765 2766 return 0; 2767 } 2768 2769 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, 2770 const char *name, umode_t mode, 2771 enum rdt_group_type rtype, struct rdtgroup **r) 2772 { 2773 struct rdtgroup *prdtgrp, *rdtgrp; 2774 struct kernfs_node *kn; 2775 uint files = 0; 2776 int ret; 2777 2778 prdtgrp = rdtgroup_kn_lock_live(parent_kn); 2779 if (!prdtgrp) { 2780 ret = -ENODEV; 2781 goto out_unlock; 2782 } 2783 2784 if (rtype == RDTMON_GROUP && 2785 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2786 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { 2787 ret = -EINVAL; 2788 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 2789 goto out_unlock; 2790 } 2791 2792 /* allocate the rdtgroup. */ 2793 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); 2794 if (!rdtgrp) { 2795 ret = -ENOSPC; 2796 rdt_last_cmd_puts("Kernel out of memory\n"); 2797 goto out_unlock; 2798 } 2799 *r = rdtgrp; 2800 rdtgrp->mon.parent = prdtgrp; 2801 rdtgrp->type = rtype; 2802 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); 2803 2804 /* kernfs creates the directory for rdtgrp */ 2805 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); 2806 if (IS_ERR(kn)) { 2807 ret = PTR_ERR(kn); 2808 rdt_last_cmd_puts("kernfs create error\n"); 2809 goto out_free_rgrp; 2810 } 2811 rdtgrp->kn = kn; 2812 2813 /* 2814 * kernfs_remove() will drop the reference count on "kn" which 2815 * will free it. But we still need it to stick around for the 2816 * rdtgroup_kn_unlock(kn) call. Take one extra reference here, 2817 * which will be dropped by kernfs_put() in rdtgroup_remove(). 2818 */ 2819 kernfs_get(kn); 2820 2821 ret = rdtgroup_kn_set_ugid(kn); 2822 if (ret) { 2823 rdt_last_cmd_puts("kernfs perm error\n"); 2824 goto out_destroy; 2825 } 2826 2827 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); 2828 ret = rdtgroup_add_files(kn, files); 2829 if (ret) { 2830 rdt_last_cmd_puts("kernfs fill error\n"); 2831 goto out_destroy; 2832 } 2833 2834 if (rdt_mon_capable) { 2835 ret = alloc_rmid(); 2836 if (ret < 0) { 2837 rdt_last_cmd_puts("Out of RMIDs\n"); 2838 goto out_destroy; 2839 } 2840 rdtgrp->mon.rmid = ret; 2841 2842 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); 2843 if (ret) { 2844 rdt_last_cmd_puts("kernfs subdir error\n"); 2845 goto out_idfree; 2846 } 2847 } 2848 kernfs_activate(kn); 2849 2850 /* 2851 * The caller unlocks the parent_kn upon success. 2852 */ 2853 return 0; 2854 2855 out_idfree: 2856 free_rmid(rdtgrp->mon.rmid); 2857 out_destroy: 2858 kernfs_put(rdtgrp->kn); 2859 kernfs_remove(rdtgrp->kn); 2860 out_free_rgrp: 2861 kfree(rdtgrp); 2862 out_unlock: 2863 rdtgroup_kn_unlock(parent_kn); 2864 return ret; 2865 } 2866 2867 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) 2868 { 2869 kernfs_remove(rgrp->kn); 2870 free_rmid(rgrp->mon.rmid); 2871 rdtgroup_remove(rgrp); 2872 } 2873 2874 /* 2875 * Create a monitor group under "mon_groups" directory of a control 2876 * and monitor group(ctrl_mon). This is a resource group 2877 * to monitor a subset of tasks and cpus in its parent ctrl_mon group. 2878 */ 2879 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, 2880 const char *name, umode_t mode) 2881 { 2882 struct rdtgroup *rdtgrp, *prgrp; 2883 int ret; 2884 2885 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); 2886 if (ret) 2887 return ret; 2888 2889 prgrp = rdtgrp->mon.parent; 2890 rdtgrp->closid = prgrp->closid; 2891 2892 /* 2893 * Add the rdtgrp to the list of rdtgrps the parent 2894 * ctrl_mon group has to track. 2895 */ 2896 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); 2897 2898 rdtgroup_kn_unlock(parent_kn); 2899 return ret; 2900 } 2901 2902 /* 2903 * These are rdtgroups created under the root directory. Can be used 2904 * to allocate and monitor resources. 2905 */ 2906 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, 2907 const char *name, umode_t mode) 2908 { 2909 struct rdtgroup *rdtgrp; 2910 struct kernfs_node *kn; 2911 u32 closid; 2912 int ret; 2913 2914 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); 2915 if (ret) 2916 return ret; 2917 2918 kn = rdtgrp->kn; 2919 ret = closid_alloc(); 2920 if (ret < 0) { 2921 rdt_last_cmd_puts("Out of CLOSIDs\n"); 2922 goto out_common_fail; 2923 } 2924 closid = ret; 2925 ret = 0; 2926 2927 rdtgrp->closid = closid; 2928 ret = rdtgroup_init_alloc(rdtgrp); 2929 if (ret < 0) 2930 goto out_id_free; 2931 2932 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); 2933 2934 if (rdt_mon_capable) { 2935 /* 2936 * Create an empty mon_groups directory to hold the subset 2937 * of tasks and cpus to monitor. 2938 */ 2939 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); 2940 if (ret) { 2941 rdt_last_cmd_puts("kernfs subdir error\n"); 2942 goto out_del_list; 2943 } 2944 } 2945 2946 goto out_unlock; 2947 2948 out_del_list: 2949 list_del(&rdtgrp->rdtgroup_list); 2950 out_id_free: 2951 closid_free(closid); 2952 out_common_fail: 2953 mkdir_rdt_prepare_clean(rdtgrp); 2954 out_unlock: 2955 rdtgroup_kn_unlock(parent_kn); 2956 return ret; 2957 } 2958 2959 /* 2960 * We allow creating mon groups only with in a directory called "mon_groups" 2961 * which is present in every ctrl_mon group. Check if this is a valid 2962 * "mon_groups" directory. 2963 * 2964 * 1. The directory should be named "mon_groups". 2965 * 2. The mon group itself should "not" be named "mon_groups". 2966 * This makes sure "mon_groups" directory always has a ctrl_mon group 2967 * as parent. 2968 */ 2969 static bool is_mon_groups(struct kernfs_node *kn, const char *name) 2970 { 2971 return (!strcmp(kn->name, "mon_groups") && 2972 strcmp(name, "mon_groups")); 2973 } 2974 2975 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 2976 umode_t mode) 2977 { 2978 /* Do not accept '\n' to avoid unparsable situation. */ 2979 if (strchr(name, '\n')) 2980 return -EINVAL; 2981 2982 /* 2983 * If the parent directory is the root directory and RDT 2984 * allocation is supported, add a control and monitoring 2985 * subdirectory 2986 */ 2987 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) 2988 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); 2989 2990 /* 2991 * If RDT monitoring is supported and the parent directory is a valid 2992 * "mon_groups" directory, add a monitoring subdirectory. 2993 */ 2994 if (rdt_mon_capable && is_mon_groups(parent_kn, name)) 2995 return rdtgroup_mkdir_mon(parent_kn, name, mode); 2996 2997 return -EPERM; 2998 } 2999 3000 static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp, 3001 cpumask_var_t tmpmask) 3002 { 3003 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; 3004 int cpu; 3005 3006 /* Give any tasks back to the parent group */ 3007 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); 3008 3009 /* Update per cpu rmid of the moved CPUs first */ 3010 for_each_cpu(cpu, &rdtgrp->cpu_mask) 3011 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; 3012 /* 3013 * Update the MSR on moved CPUs and CPUs which have moved 3014 * task running on them. 3015 */ 3016 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3017 update_closid_rmid(tmpmask, NULL); 3018 3019 rdtgrp->flags = RDT_DELETED; 3020 free_rmid(rdtgrp->mon.rmid); 3021 3022 /* 3023 * Remove the rdtgrp from the parent ctrl_mon group's list 3024 */ 3025 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); 3026 list_del(&rdtgrp->mon.crdtgrp_list); 3027 3028 kernfs_remove(rdtgrp->kn); 3029 3030 return 0; 3031 } 3032 3033 static int rdtgroup_ctrl_remove(struct kernfs_node *kn, 3034 struct rdtgroup *rdtgrp) 3035 { 3036 rdtgrp->flags = RDT_DELETED; 3037 list_del(&rdtgrp->rdtgroup_list); 3038 3039 kernfs_remove(rdtgrp->kn); 3040 return 0; 3041 } 3042 3043 static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp, 3044 cpumask_var_t tmpmask) 3045 { 3046 int cpu; 3047 3048 /* Give any tasks back to the default group */ 3049 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); 3050 3051 /* Give any CPUs back to the default group */ 3052 cpumask_or(&rdtgroup_default.cpu_mask, 3053 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 3054 3055 /* Update per cpu closid and rmid of the moved CPUs first */ 3056 for_each_cpu(cpu, &rdtgrp->cpu_mask) { 3057 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; 3058 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; 3059 } 3060 3061 /* 3062 * Update the MSR on moved CPUs and CPUs which have moved 3063 * task running on them. 3064 */ 3065 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3066 update_closid_rmid(tmpmask, NULL); 3067 3068 closid_free(rdtgrp->closid); 3069 free_rmid(rdtgrp->mon.rmid); 3070 3071 rdtgroup_ctrl_remove(kn, rdtgrp); 3072 3073 /* 3074 * Free all the child monitor group rmids. 3075 */ 3076 free_all_child_rdtgrp(rdtgrp); 3077 3078 return 0; 3079 } 3080 3081 static int rdtgroup_rmdir(struct kernfs_node *kn) 3082 { 3083 struct kernfs_node *parent_kn = kn->parent; 3084 struct rdtgroup *rdtgrp; 3085 cpumask_var_t tmpmask; 3086 int ret = 0; 3087 3088 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 3089 return -ENOMEM; 3090 3091 rdtgrp = rdtgroup_kn_lock_live(kn); 3092 if (!rdtgrp) { 3093 ret = -EPERM; 3094 goto out; 3095 } 3096 3097 /* 3098 * If the rdtgroup is a ctrl_mon group and parent directory 3099 * is the root directory, remove the ctrl_mon group. 3100 * 3101 * If the rdtgroup is a mon group and parent directory 3102 * is a valid "mon_groups" directory, remove the mon group. 3103 */ 3104 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && 3105 rdtgrp != &rdtgroup_default) { 3106 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3107 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 3108 ret = rdtgroup_ctrl_remove(kn, rdtgrp); 3109 } else { 3110 ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask); 3111 } 3112 } else if (rdtgrp->type == RDTMON_GROUP && 3113 is_mon_groups(parent_kn, kn->name)) { 3114 ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask); 3115 } else { 3116 ret = -EPERM; 3117 } 3118 3119 out: 3120 rdtgroup_kn_unlock(kn); 3121 free_cpumask_var(tmpmask); 3122 return ret; 3123 } 3124 3125 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) 3126 { 3127 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) 3128 seq_puts(seq, ",cdp"); 3129 3130 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) 3131 seq_puts(seq, ",cdpl2"); 3132 3133 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA])) 3134 seq_puts(seq, ",mba_MBps"); 3135 3136 return 0; 3137 } 3138 3139 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { 3140 .mkdir = rdtgroup_mkdir, 3141 .rmdir = rdtgroup_rmdir, 3142 .show_options = rdtgroup_show_options, 3143 }; 3144 3145 static int __init rdtgroup_setup_root(void) 3146 { 3147 int ret; 3148 3149 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, 3150 KERNFS_ROOT_CREATE_DEACTIVATED | 3151 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, 3152 &rdtgroup_default); 3153 if (IS_ERR(rdt_root)) 3154 return PTR_ERR(rdt_root); 3155 3156 mutex_lock(&rdtgroup_mutex); 3157 3158 rdtgroup_default.closid = 0; 3159 rdtgroup_default.mon.rmid = 0; 3160 rdtgroup_default.type = RDTCTRL_GROUP; 3161 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); 3162 3163 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); 3164 3165 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE); 3166 if (ret) { 3167 kernfs_destroy_root(rdt_root); 3168 goto out; 3169 } 3170 3171 rdtgroup_default.kn = rdt_root->kn; 3172 kernfs_activate(rdtgroup_default.kn); 3173 3174 out: 3175 mutex_unlock(&rdtgroup_mutex); 3176 3177 return ret; 3178 } 3179 3180 /* 3181 * rdtgroup_init - rdtgroup initialization 3182 * 3183 * Setup resctrl file system including set up root, create mount point, 3184 * register rdtgroup filesystem, and initialize files under root directory. 3185 * 3186 * Return: 0 on success or -errno 3187 */ 3188 int __init rdtgroup_init(void) 3189 { 3190 int ret = 0; 3191 3192 seq_buf_init(&last_cmd_status, last_cmd_status_buf, 3193 sizeof(last_cmd_status_buf)); 3194 3195 ret = rdtgroup_setup_root(); 3196 if (ret) 3197 return ret; 3198 3199 ret = sysfs_create_mount_point(fs_kobj, "resctrl"); 3200 if (ret) 3201 goto cleanup_root; 3202 3203 ret = register_filesystem(&rdt_fs_type); 3204 if (ret) 3205 goto cleanup_mountpoint; 3206 3207 /* 3208 * Adding the resctrl debugfs directory here may not be ideal since 3209 * it would let the resctrl debugfs directory appear on the debugfs 3210 * filesystem before the resctrl filesystem is mounted. 3211 * It may also be ok since that would enable debugging of RDT before 3212 * resctrl is mounted. 3213 * The reason why the debugfs directory is created here and not in 3214 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and 3215 * during the debugfs directory creation also &sb->s_type->i_mutex_key 3216 * (the lockdep class of inode->i_rwsem). Other filesystem 3217 * interactions (eg. SyS_getdents) have the lock ordering: 3218 * &sb->s_type->i_mutex_key --> &mm->mmap_lock 3219 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex 3220 * is taken, thus creating dependency: 3221 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause 3222 * issues considering the other two lock dependencies. 3223 * By creating the debugfs directory here we avoid a dependency 3224 * that may cause deadlock (even though file operations cannot 3225 * occur until the filesystem is mounted, but I do not know how to 3226 * tell lockdep that). 3227 */ 3228 debugfs_resctrl = debugfs_create_dir("resctrl", NULL); 3229 3230 return 0; 3231 3232 cleanup_mountpoint: 3233 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3234 cleanup_root: 3235 kernfs_destroy_root(rdt_root); 3236 3237 return ret; 3238 } 3239 3240 void __exit rdtgroup_exit(void) 3241 { 3242 debugfs_remove_recursive(debugfs_resctrl); 3243 unregister_filesystem(&rdt_fs_type); 3244 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3245 kernfs_destroy_root(rdt_root); 3246 } 3247