1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * User interface for Resource Allocation in Resource Director Technology(RDT) 4 * 5 * Copyright (C) 2016 Intel Corporation 6 * 7 * Author: Fenghua Yu <fenghua.yu@intel.com> 8 * 9 * More information about RDT be found in the Intel (R) x86 Architecture 10 * Software Developer Manual. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/cacheinfo.h> 16 #include <linux/cpu.h> 17 #include <linux/debugfs.h> 18 #include <linux/fs.h> 19 #include <linux/fs_parser.h> 20 #include <linux/sysfs.h> 21 #include <linux/kernfs.h> 22 #include <linux/seq_buf.h> 23 #include <linux/seq_file.h> 24 #include <linux/sched/signal.h> 25 #include <linux/sched/task.h> 26 #include <linux/slab.h> 27 #include <linux/task_work.h> 28 #include <linux/user_namespace.h> 29 30 #include <uapi/linux/magic.h> 31 32 #include <asm/resctrl.h> 33 #include "internal.h" 34 35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key); 36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); 37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); 38 static struct kernfs_root *rdt_root; 39 struct rdtgroup rdtgroup_default; 40 LIST_HEAD(rdt_all_groups); 41 42 /* list of entries for the schemata file */ 43 LIST_HEAD(resctrl_schema_all); 44 45 /* Kernel fs node for "info" directory under root */ 46 static struct kernfs_node *kn_info; 47 48 /* Kernel fs node for "mon_groups" directory under root */ 49 static struct kernfs_node *kn_mongrp; 50 51 /* Kernel fs node for "mon_data" directory under root */ 52 static struct kernfs_node *kn_mondata; 53 54 static struct seq_buf last_cmd_status; 55 static char last_cmd_status_buf[512]; 56 57 struct dentry *debugfs_resctrl; 58 59 void rdt_last_cmd_clear(void) 60 { 61 lockdep_assert_held(&rdtgroup_mutex); 62 seq_buf_clear(&last_cmd_status); 63 } 64 65 void rdt_last_cmd_puts(const char *s) 66 { 67 lockdep_assert_held(&rdtgroup_mutex); 68 seq_buf_puts(&last_cmd_status, s); 69 } 70 71 void rdt_last_cmd_printf(const char *fmt, ...) 72 { 73 va_list ap; 74 75 va_start(ap, fmt); 76 lockdep_assert_held(&rdtgroup_mutex); 77 seq_buf_vprintf(&last_cmd_status, fmt, ap); 78 va_end(ap); 79 } 80 81 /* 82 * Trivial allocator for CLOSIDs. Since h/w only supports a small number, 83 * we can keep a bitmap of free CLOSIDs in a single integer. 84 * 85 * Using a global CLOSID across all resources has some advantages and 86 * some drawbacks: 87 * + We can simply set "current->closid" to assign a task to a resource 88 * group. 89 * + Context switch code can avoid extra memory references deciding which 90 * CLOSID to load into the PQR_ASSOC MSR 91 * - We give up some options in configuring resource groups across multi-socket 92 * systems. 93 * - Our choices on how to configure each resource become progressively more 94 * limited as the number of resources grows. 95 */ 96 static int closid_free_map; 97 static int closid_free_map_len; 98 99 int closids_supported(void) 100 { 101 return closid_free_map_len; 102 } 103 104 static void closid_init(void) 105 { 106 struct resctrl_schema *s; 107 u32 rdt_min_closid = 32; 108 109 /* Compute rdt_min_closid across all resources */ 110 list_for_each_entry(s, &resctrl_schema_all, list) 111 rdt_min_closid = min(rdt_min_closid, s->num_closid); 112 113 closid_free_map = BIT_MASK(rdt_min_closid) - 1; 114 115 /* CLOSID 0 is always reserved for the default group */ 116 closid_free_map &= ~1; 117 closid_free_map_len = rdt_min_closid; 118 } 119 120 static int closid_alloc(void) 121 { 122 u32 closid = ffs(closid_free_map); 123 124 if (closid == 0) 125 return -ENOSPC; 126 closid--; 127 closid_free_map &= ~(1 << closid); 128 129 return closid; 130 } 131 132 void closid_free(int closid) 133 { 134 closid_free_map |= 1 << closid; 135 } 136 137 /** 138 * closid_allocated - test if provided closid is in use 139 * @closid: closid to be tested 140 * 141 * Return: true if @closid is currently associated with a resource group, 142 * false if @closid is free 143 */ 144 static bool closid_allocated(unsigned int closid) 145 { 146 return (closid_free_map & (1 << closid)) == 0; 147 } 148 149 /** 150 * rdtgroup_mode_by_closid - Return mode of resource group with closid 151 * @closid: closid if the resource group 152 * 153 * Each resource group is associated with a @closid. Here the mode 154 * of a resource group can be queried by searching for it using its closid. 155 * 156 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid 157 */ 158 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) 159 { 160 struct rdtgroup *rdtgrp; 161 162 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 163 if (rdtgrp->closid == closid) 164 return rdtgrp->mode; 165 } 166 167 return RDT_NUM_MODES; 168 } 169 170 static const char * const rdt_mode_str[] = { 171 [RDT_MODE_SHAREABLE] = "shareable", 172 [RDT_MODE_EXCLUSIVE] = "exclusive", 173 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", 174 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", 175 }; 176 177 /** 178 * rdtgroup_mode_str - Return the string representation of mode 179 * @mode: the resource group mode as &enum rdtgroup_mode 180 * 181 * Return: string representation of valid mode, "unknown" otherwise 182 */ 183 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) 184 { 185 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) 186 return "unknown"; 187 188 return rdt_mode_str[mode]; 189 } 190 191 /* set uid and gid of rdtgroup dirs and files to that of the creator */ 192 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) 193 { 194 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 195 .ia_uid = current_fsuid(), 196 .ia_gid = current_fsgid(), }; 197 198 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 199 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 200 return 0; 201 202 return kernfs_setattr(kn, &iattr); 203 } 204 205 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) 206 { 207 struct kernfs_node *kn; 208 int ret; 209 210 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, 211 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 212 0, rft->kf_ops, rft, NULL, NULL); 213 if (IS_ERR(kn)) 214 return PTR_ERR(kn); 215 216 ret = rdtgroup_kn_set_ugid(kn); 217 if (ret) { 218 kernfs_remove(kn); 219 return ret; 220 } 221 222 return 0; 223 } 224 225 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) 226 { 227 struct kernfs_open_file *of = m->private; 228 struct rftype *rft = of->kn->priv; 229 230 if (rft->seq_show) 231 return rft->seq_show(of, m, arg); 232 return 0; 233 } 234 235 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, 236 size_t nbytes, loff_t off) 237 { 238 struct rftype *rft = of->kn->priv; 239 240 if (rft->write) 241 return rft->write(of, buf, nbytes, off); 242 243 return -EINVAL; 244 } 245 246 static const struct kernfs_ops rdtgroup_kf_single_ops = { 247 .atomic_write_len = PAGE_SIZE, 248 .write = rdtgroup_file_write, 249 .seq_show = rdtgroup_seqfile_show, 250 }; 251 252 static const struct kernfs_ops kf_mondata_ops = { 253 .atomic_write_len = PAGE_SIZE, 254 .seq_show = rdtgroup_mondata_show, 255 }; 256 257 static bool is_cpu_list(struct kernfs_open_file *of) 258 { 259 struct rftype *rft = of->kn->priv; 260 261 return rft->flags & RFTYPE_FLAGS_CPUS_LIST; 262 } 263 264 static int rdtgroup_cpus_show(struct kernfs_open_file *of, 265 struct seq_file *s, void *v) 266 { 267 struct rdtgroup *rdtgrp; 268 struct cpumask *mask; 269 int ret = 0; 270 271 rdtgrp = rdtgroup_kn_lock_live(of->kn); 272 273 if (rdtgrp) { 274 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 275 if (!rdtgrp->plr->d) { 276 rdt_last_cmd_clear(); 277 rdt_last_cmd_puts("Cache domain offline\n"); 278 ret = -ENODEV; 279 } else { 280 mask = &rdtgrp->plr->d->cpu_mask; 281 seq_printf(s, is_cpu_list(of) ? 282 "%*pbl\n" : "%*pb\n", 283 cpumask_pr_args(mask)); 284 } 285 } else { 286 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", 287 cpumask_pr_args(&rdtgrp->cpu_mask)); 288 } 289 } else { 290 ret = -ENOENT; 291 } 292 rdtgroup_kn_unlock(of->kn); 293 294 return ret; 295 } 296 297 /* 298 * This is safe against resctrl_sched_in() called from __switch_to() 299 * because __switch_to() is executed with interrupts disabled. A local call 300 * from update_closid_rmid() is protected against __switch_to() because 301 * preemption is disabled. 302 */ 303 static void update_cpu_closid_rmid(void *info) 304 { 305 struct rdtgroup *r = info; 306 307 if (r) { 308 this_cpu_write(pqr_state.default_closid, r->closid); 309 this_cpu_write(pqr_state.default_rmid, r->mon.rmid); 310 } 311 312 /* 313 * We cannot unconditionally write the MSR because the current 314 * executing task might have its own closid selected. Just reuse 315 * the context switch code. 316 */ 317 resctrl_sched_in(); 318 } 319 320 /* 321 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, 322 * 323 * Per task closids/rmids must have been set up before calling this function. 324 */ 325 static void 326 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) 327 { 328 on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1); 329 } 330 331 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 332 cpumask_var_t tmpmask) 333 { 334 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; 335 struct list_head *head; 336 337 /* Check whether cpus belong to parent ctrl group */ 338 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); 339 if (!cpumask_empty(tmpmask)) { 340 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); 341 return -EINVAL; 342 } 343 344 /* Check whether cpus are dropped from this group */ 345 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 346 if (!cpumask_empty(tmpmask)) { 347 /* Give any dropped cpus to parent rdtgroup */ 348 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); 349 update_closid_rmid(tmpmask, prgrp); 350 } 351 352 /* 353 * If we added cpus, remove them from previous group that owned them 354 * and update per-cpu rmid 355 */ 356 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 357 if (!cpumask_empty(tmpmask)) { 358 head = &prgrp->mon.crdtgrp_list; 359 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 360 if (crgrp == rdtgrp) 361 continue; 362 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, 363 tmpmask); 364 } 365 update_closid_rmid(tmpmask, rdtgrp); 366 } 367 368 /* Done pushing/pulling - update this group with new mask */ 369 cpumask_copy(&rdtgrp->cpu_mask, newmask); 370 371 return 0; 372 } 373 374 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) 375 { 376 struct rdtgroup *crgrp; 377 378 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); 379 /* update the child mon group masks as well*/ 380 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) 381 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); 382 } 383 384 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 385 cpumask_var_t tmpmask, cpumask_var_t tmpmask1) 386 { 387 struct rdtgroup *r, *crgrp; 388 struct list_head *head; 389 390 /* Check whether cpus are dropped from this group */ 391 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 392 if (!cpumask_empty(tmpmask)) { 393 /* Can't drop from default group */ 394 if (rdtgrp == &rdtgroup_default) { 395 rdt_last_cmd_puts("Can't drop CPUs from default group\n"); 396 return -EINVAL; 397 } 398 399 /* Give any dropped cpus to rdtgroup_default */ 400 cpumask_or(&rdtgroup_default.cpu_mask, 401 &rdtgroup_default.cpu_mask, tmpmask); 402 update_closid_rmid(tmpmask, &rdtgroup_default); 403 } 404 405 /* 406 * If we added cpus, remove them from previous group and 407 * the prev group's child groups that owned them 408 * and update per-cpu closid/rmid. 409 */ 410 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 411 if (!cpumask_empty(tmpmask)) { 412 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { 413 if (r == rdtgrp) 414 continue; 415 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); 416 if (!cpumask_empty(tmpmask1)) 417 cpumask_rdtgrp_clear(r, tmpmask1); 418 } 419 update_closid_rmid(tmpmask, rdtgrp); 420 } 421 422 /* Done pushing/pulling - update this group with new mask */ 423 cpumask_copy(&rdtgrp->cpu_mask, newmask); 424 425 /* 426 * Clear child mon group masks since there is a new parent mask 427 * now and update the rmid for the cpus the child lost. 428 */ 429 head = &rdtgrp->mon.crdtgrp_list; 430 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 431 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); 432 update_closid_rmid(tmpmask, rdtgrp); 433 cpumask_clear(&crgrp->cpu_mask); 434 } 435 436 return 0; 437 } 438 439 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, 440 char *buf, size_t nbytes, loff_t off) 441 { 442 cpumask_var_t tmpmask, newmask, tmpmask1; 443 struct rdtgroup *rdtgrp; 444 int ret; 445 446 if (!buf) 447 return -EINVAL; 448 449 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 450 return -ENOMEM; 451 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { 452 free_cpumask_var(tmpmask); 453 return -ENOMEM; 454 } 455 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { 456 free_cpumask_var(tmpmask); 457 free_cpumask_var(newmask); 458 return -ENOMEM; 459 } 460 461 rdtgrp = rdtgroup_kn_lock_live(of->kn); 462 if (!rdtgrp) { 463 ret = -ENOENT; 464 goto unlock; 465 } 466 467 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 468 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 469 ret = -EINVAL; 470 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 471 goto unlock; 472 } 473 474 if (is_cpu_list(of)) 475 ret = cpulist_parse(buf, newmask); 476 else 477 ret = cpumask_parse(buf, newmask); 478 479 if (ret) { 480 rdt_last_cmd_puts("Bad CPU list/mask\n"); 481 goto unlock; 482 } 483 484 /* check that user didn't specify any offline cpus */ 485 cpumask_andnot(tmpmask, newmask, cpu_online_mask); 486 if (!cpumask_empty(tmpmask)) { 487 ret = -EINVAL; 488 rdt_last_cmd_puts("Can only assign online CPUs\n"); 489 goto unlock; 490 } 491 492 if (rdtgrp->type == RDTCTRL_GROUP) 493 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); 494 else if (rdtgrp->type == RDTMON_GROUP) 495 ret = cpus_mon_write(rdtgrp, newmask, tmpmask); 496 else 497 ret = -EINVAL; 498 499 unlock: 500 rdtgroup_kn_unlock(of->kn); 501 free_cpumask_var(tmpmask); 502 free_cpumask_var(newmask); 503 free_cpumask_var(tmpmask1); 504 505 return ret ?: nbytes; 506 } 507 508 /** 509 * rdtgroup_remove - the helper to remove resource group safely 510 * @rdtgrp: resource group to remove 511 * 512 * On resource group creation via a mkdir, an extra kernfs_node reference is 513 * taken to ensure that the rdtgroup structure remains accessible for the 514 * rdtgroup_kn_unlock() calls where it is removed. 515 * 516 * Drop the extra reference here, then free the rdtgroup structure. 517 * 518 * Return: void 519 */ 520 static void rdtgroup_remove(struct rdtgroup *rdtgrp) 521 { 522 kernfs_put(rdtgrp->kn); 523 kfree(rdtgrp); 524 } 525 526 static void _update_task_closid_rmid(void *task) 527 { 528 /* 529 * If the task is still current on this CPU, update PQR_ASSOC MSR. 530 * Otherwise, the MSR is updated when the task is scheduled in. 531 */ 532 if (task == current) 533 resctrl_sched_in(); 534 } 535 536 static void update_task_closid_rmid(struct task_struct *t) 537 { 538 if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) 539 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); 540 else 541 _update_task_closid_rmid(t); 542 } 543 544 static int __rdtgroup_move_task(struct task_struct *tsk, 545 struct rdtgroup *rdtgrp) 546 { 547 /* If the task is already in rdtgrp, no need to move the task. */ 548 if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid && 549 tsk->rmid == rdtgrp->mon.rmid) || 550 (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid && 551 tsk->closid == rdtgrp->mon.parent->closid)) 552 return 0; 553 554 /* 555 * Set the task's closid/rmid before the PQR_ASSOC MSR can be 556 * updated by them. 557 * 558 * For ctrl_mon groups, move both closid and rmid. 559 * For monitor groups, can move the tasks only from 560 * their parent CTRL group. 561 */ 562 563 if (rdtgrp->type == RDTCTRL_GROUP) { 564 WRITE_ONCE(tsk->closid, rdtgrp->closid); 565 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); 566 } else if (rdtgrp->type == RDTMON_GROUP) { 567 if (rdtgrp->mon.parent->closid == tsk->closid) { 568 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); 569 } else { 570 rdt_last_cmd_puts("Can't move task to different control group\n"); 571 return -EINVAL; 572 } 573 } 574 575 /* 576 * Ensure the task's closid and rmid are written before determining if 577 * the task is current that will decide if it will be interrupted. 578 * This pairs with the full barrier between the rq->curr update and 579 * resctrl_sched_in() during context switch. 580 */ 581 smp_mb(); 582 583 /* 584 * By now, the task's closid and rmid are set. If the task is current 585 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource 586 * group go into effect. If the task is not current, the MSR will be 587 * updated when the task is scheduled in. 588 */ 589 update_task_closid_rmid(tsk); 590 591 return 0; 592 } 593 594 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) 595 { 596 return (rdt_alloc_capable && 597 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); 598 } 599 600 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) 601 { 602 return (rdt_mon_capable && 603 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); 604 } 605 606 /** 607 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group 608 * @r: Resource group 609 * 610 * Return: 1 if tasks have been assigned to @r, 0 otherwise 611 */ 612 int rdtgroup_tasks_assigned(struct rdtgroup *r) 613 { 614 struct task_struct *p, *t; 615 int ret = 0; 616 617 lockdep_assert_held(&rdtgroup_mutex); 618 619 rcu_read_lock(); 620 for_each_process_thread(p, t) { 621 if (is_closid_match(t, r) || is_rmid_match(t, r)) { 622 ret = 1; 623 break; 624 } 625 } 626 rcu_read_unlock(); 627 628 return ret; 629 } 630 631 static int rdtgroup_task_write_permission(struct task_struct *task, 632 struct kernfs_open_file *of) 633 { 634 const struct cred *tcred = get_task_cred(task); 635 const struct cred *cred = current_cred(); 636 int ret = 0; 637 638 /* 639 * Even if we're attaching all tasks in the thread group, we only 640 * need to check permissions on one of them. 641 */ 642 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 643 !uid_eq(cred->euid, tcred->uid) && 644 !uid_eq(cred->euid, tcred->suid)) { 645 rdt_last_cmd_printf("No permission to move task %d\n", task->pid); 646 ret = -EPERM; 647 } 648 649 put_cred(tcred); 650 return ret; 651 } 652 653 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, 654 struct kernfs_open_file *of) 655 { 656 struct task_struct *tsk; 657 int ret; 658 659 rcu_read_lock(); 660 if (pid) { 661 tsk = find_task_by_vpid(pid); 662 if (!tsk) { 663 rcu_read_unlock(); 664 rdt_last_cmd_printf("No task %d\n", pid); 665 return -ESRCH; 666 } 667 } else { 668 tsk = current; 669 } 670 671 get_task_struct(tsk); 672 rcu_read_unlock(); 673 674 ret = rdtgroup_task_write_permission(tsk, of); 675 if (!ret) 676 ret = __rdtgroup_move_task(tsk, rdtgrp); 677 678 put_task_struct(tsk); 679 return ret; 680 } 681 682 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, 683 char *buf, size_t nbytes, loff_t off) 684 { 685 struct rdtgroup *rdtgrp; 686 int ret = 0; 687 pid_t pid; 688 689 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 690 return -EINVAL; 691 rdtgrp = rdtgroup_kn_lock_live(of->kn); 692 if (!rdtgrp) { 693 rdtgroup_kn_unlock(of->kn); 694 return -ENOENT; 695 } 696 rdt_last_cmd_clear(); 697 698 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 699 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 700 ret = -EINVAL; 701 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 702 goto unlock; 703 } 704 705 ret = rdtgroup_move_task(pid, rdtgrp, of); 706 707 unlock: 708 rdtgroup_kn_unlock(of->kn); 709 710 return ret ?: nbytes; 711 } 712 713 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) 714 { 715 struct task_struct *p, *t; 716 717 rcu_read_lock(); 718 for_each_process_thread(p, t) { 719 if (is_closid_match(t, r) || is_rmid_match(t, r)) 720 seq_printf(s, "%d\n", t->pid); 721 } 722 rcu_read_unlock(); 723 } 724 725 static int rdtgroup_tasks_show(struct kernfs_open_file *of, 726 struct seq_file *s, void *v) 727 { 728 struct rdtgroup *rdtgrp; 729 int ret = 0; 730 731 rdtgrp = rdtgroup_kn_lock_live(of->kn); 732 if (rdtgrp) 733 show_rdt_tasks(rdtgrp, s); 734 else 735 ret = -ENOENT; 736 rdtgroup_kn_unlock(of->kn); 737 738 return ret; 739 } 740 741 #ifdef CONFIG_PROC_CPU_RESCTRL 742 743 /* 744 * A task can only be part of one resctrl control group and of one monitor 745 * group which is associated to that control group. 746 * 747 * 1) res: 748 * mon: 749 * 750 * resctrl is not available. 751 * 752 * 2) res:/ 753 * mon: 754 * 755 * Task is part of the root resctrl control group, and it is not associated 756 * to any monitor group. 757 * 758 * 3) res:/ 759 * mon:mon0 760 * 761 * Task is part of the root resctrl control group and monitor group mon0. 762 * 763 * 4) res:group0 764 * mon: 765 * 766 * Task is part of resctrl control group group0, and it is not associated 767 * to any monitor group. 768 * 769 * 5) res:group0 770 * mon:mon1 771 * 772 * Task is part of resctrl control group group0 and monitor group mon1. 773 */ 774 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, 775 struct pid *pid, struct task_struct *tsk) 776 { 777 struct rdtgroup *rdtg; 778 int ret = 0; 779 780 mutex_lock(&rdtgroup_mutex); 781 782 /* Return empty if resctrl has not been mounted. */ 783 if (!static_branch_unlikely(&rdt_enable_key)) { 784 seq_puts(s, "res:\nmon:\n"); 785 goto unlock; 786 } 787 788 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { 789 struct rdtgroup *crg; 790 791 /* 792 * Task information is only relevant for shareable 793 * and exclusive groups. 794 */ 795 if (rdtg->mode != RDT_MODE_SHAREABLE && 796 rdtg->mode != RDT_MODE_EXCLUSIVE) 797 continue; 798 799 if (rdtg->closid != tsk->closid) 800 continue; 801 802 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", 803 rdtg->kn->name); 804 seq_puts(s, "mon:"); 805 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, 806 mon.crdtgrp_list) { 807 if (tsk->rmid != crg->mon.rmid) 808 continue; 809 seq_printf(s, "%s", crg->kn->name); 810 break; 811 } 812 seq_putc(s, '\n'); 813 goto unlock; 814 } 815 /* 816 * The above search should succeed. Otherwise return 817 * with an error. 818 */ 819 ret = -ENOENT; 820 unlock: 821 mutex_unlock(&rdtgroup_mutex); 822 823 return ret; 824 } 825 #endif 826 827 static int rdt_last_cmd_status_show(struct kernfs_open_file *of, 828 struct seq_file *seq, void *v) 829 { 830 int len; 831 832 mutex_lock(&rdtgroup_mutex); 833 len = seq_buf_used(&last_cmd_status); 834 if (len) 835 seq_printf(seq, "%.*s", len, last_cmd_status_buf); 836 else 837 seq_puts(seq, "ok\n"); 838 mutex_unlock(&rdtgroup_mutex); 839 return 0; 840 } 841 842 static int rdt_num_closids_show(struct kernfs_open_file *of, 843 struct seq_file *seq, void *v) 844 { 845 struct resctrl_schema *s = of->kn->parent->priv; 846 847 seq_printf(seq, "%u\n", s->num_closid); 848 return 0; 849 } 850 851 static int rdt_default_ctrl_show(struct kernfs_open_file *of, 852 struct seq_file *seq, void *v) 853 { 854 struct resctrl_schema *s = of->kn->parent->priv; 855 struct rdt_resource *r = s->res; 856 857 seq_printf(seq, "%x\n", r->default_ctrl); 858 return 0; 859 } 860 861 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, 862 struct seq_file *seq, void *v) 863 { 864 struct resctrl_schema *s = of->kn->parent->priv; 865 struct rdt_resource *r = s->res; 866 867 seq_printf(seq, "%u\n", r->cache.min_cbm_bits); 868 return 0; 869 } 870 871 static int rdt_shareable_bits_show(struct kernfs_open_file *of, 872 struct seq_file *seq, void *v) 873 { 874 struct resctrl_schema *s = of->kn->parent->priv; 875 struct rdt_resource *r = s->res; 876 877 seq_printf(seq, "%x\n", r->cache.shareable_bits); 878 return 0; 879 } 880 881 /** 882 * rdt_bit_usage_show - Display current usage of resources 883 * 884 * A domain is a shared resource that can now be allocated differently. Here 885 * we display the current regions of the domain as an annotated bitmask. 886 * For each domain of this resource its allocation bitmask 887 * is annotated as below to indicate the current usage of the corresponding bit: 888 * 0 - currently unused 889 * X - currently available for sharing and used by software and hardware 890 * H - currently used by hardware only but available for software use 891 * S - currently used and shareable by software only 892 * E - currently used exclusively by one resource group 893 * P - currently pseudo-locked by one resource group 894 */ 895 static int rdt_bit_usage_show(struct kernfs_open_file *of, 896 struct seq_file *seq, void *v) 897 { 898 struct resctrl_schema *s = of->kn->parent->priv; 899 /* 900 * Use unsigned long even though only 32 bits are used to ensure 901 * test_bit() is used safely. 902 */ 903 unsigned long sw_shareable = 0, hw_shareable = 0; 904 unsigned long exclusive = 0, pseudo_locked = 0; 905 struct rdt_resource *r = s->res; 906 struct rdt_domain *dom; 907 int i, hwb, swb, excl, psl; 908 enum rdtgrp_mode mode; 909 bool sep = false; 910 u32 ctrl_val; 911 912 mutex_lock(&rdtgroup_mutex); 913 hw_shareable = r->cache.shareable_bits; 914 list_for_each_entry(dom, &r->domains, list) { 915 if (sep) 916 seq_putc(seq, ';'); 917 sw_shareable = 0; 918 exclusive = 0; 919 seq_printf(seq, "%d=", dom->id); 920 for (i = 0; i < closids_supported(); i++) { 921 if (!closid_allocated(i)) 922 continue; 923 ctrl_val = resctrl_arch_get_config(r, dom, i, 924 s->conf_type); 925 mode = rdtgroup_mode_by_closid(i); 926 switch (mode) { 927 case RDT_MODE_SHAREABLE: 928 sw_shareable |= ctrl_val; 929 break; 930 case RDT_MODE_EXCLUSIVE: 931 exclusive |= ctrl_val; 932 break; 933 case RDT_MODE_PSEUDO_LOCKSETUP: 934 /* 935 * RDT_MODE_PSEUDO_LOCKSETUP is possible 936 * here but not included since the CBM 937 * associated with this CLOSID in this mode 938 * is not initialized and no task or cpu can be 939 * assigned this CLOSID. 940 */ 941 break; 942 case RDT_MODE_PSEUDO_LOCKED: 943 case RDT_NUM_MODES: 944 WARN(1, 945 "invalid mode for closid %d\n", i); 946 break; 947 } 948 } 949 for (i = r->cache.cbm_len - 1; i >= 0; i--) { 950 pseudo_locked = dom->plr ? dom->plr->cbm : 0; 951 hwb = test_bit(i, &hw_shareable); 952 swb = test_bit(i, &sw_shareable); 953 excl = test_bit(i, &exclusive); 954 psl = test_bit(i, &pseudo_locked); 955 if (hwb && swb) 956 seq_putc(seq, 'X'); 957 else if (hwb && !swb) 958 seq_putc(seq, 'H'); 959 else if (!hwb && swb) 960 seq_putc(seq, 'S'); 961 else if (excl) 962 seq_putc(seq, 'E'); 963 else if (psl) 964 seq_putc(seq, 'P'); 965 else /* Unused bits remain */ 966 seq_putc(seq, '0'); 967 } 968 sep = true; 969 } 970 seq_putc(seq, '\n'); 971 mutex_unlock(&rdtgroup_mutex); 972 return 0; 973 } 974 975 static int rdt_min_bw_show(struct kernfs_open_file *of, 976 struct seq_file *seq, void *v) 977 { 978 struct resctrl_schema *s = of->kn->parent->priv; 979 struct rdt_resource *r = s->res; 980 981 seq_printf(seq, "%u\n", r->membw.min_bw); 982 return 0; 983 } 984 985 static int rdt_num_rmids_show(struct kernfs_open_file *of, 986 struct seq_file *seq, void *v) 987 { 988 struct rdt_resource *r = of->kn->parent->priv; 989 990 seq_printf(seq, "%d\n", r->num_rmid); 991 992 return 0; 993 } 994 995 static int rdt_mon_features_show(struct kernfs_open_file *of, 996 struct seq_file *seq, void *v) 997 { 998 struct rdt_resource *r = of->kn->parent->priv; 999 struct mon_evt *mevt; 1000 1001 list_for_each_entry(mevt, &r->evt_list, list) { 1002 seq_printf(seq, "%s\n", mevt->name); 1003 if (mevt->configurable) 1004 seq_printf(seq, "%s_config\n", mevt->name); 1005 } 1006 1007 return 0; 1008 } 1009 1010 static int rdt_bw_gran_show(struct kernfs_open_file *of, 1011 struct seq_file *seq, void *v) 1012 { 1013 struct resctrl_schema *s = of->kn->parent->priv; 1014 struct rdt_resource *r = s->res; 1015 1016 seq_printf(seq, "%u\n", r->membw.bw_gran); 1017 return 0; 1018 } 1019 1020 static int rdt_delay_linear_show(struct kernfs_open_file *of, 1021 struct seq_file *seq, void *v) 1022 { 1023 struct resctrl_schema *s = of->kn->parent->priv; 1024 struct rdt_resource *r = s->res; 1025 1026 seq_printf(seq, "%u\n", r->membw.delay_linear); 1027 return 0; 1028 } 1029 1030 static int max_threshold_occ_show(struct kernfs_open_file *of, 1031 struct seq_file *seq, void *v) 1032 { 1033 seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); 1034 1035 return 0; 1036 } 1037 1038 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, 1039 struct seq_file *seq, void *v) 1040 { 1041 struct resctrl_schema *s = of->kn->parent->priv; 1042 struct rdt_resource *r = s->res; 1043 1044 if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) 1045 seq_puts(seq, "per-thread\n"); 1046 else 1047 seq_puts(seq, "max\n"); 1048 1049 return 0; 1050 } 1051 1052 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, 1053 char *buf, size_t nbytes, loff_t off) 1054 { 1055 unsigned int bytes; 1056 int ret; 1057 1058 ret = kstrtouint(buf, 0, &bytes); 1059 if (ret) 1060 return ret; 1061 1062 if (bytes > resctrl_rmid_realloc_limit) 1063 return -EINVAL; 1064 1065 resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); 1066 1067 return nbytes; 1068 } 1069 1070 /* 1071 * rdtgroup_mode_show - Display mode of this resource group 1072 */ 1073 static int rdtgroup_mode_show(struct kernfs_open_file *of, 1074 struct seq_file *s, void *v) 1075 { 1076 struct rdtgroup *rdtgrp; 1077 1078 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1079 if (!rdtgrp) { 1080 rdtgroup_kn_unlock(of->kn); 1081 return -ENOENT; 1082 } 1083 1084 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); 1085 1086 rdtgroup_kn_unlock(of->kn); 1087 return 0; 1088 } 1089 1090 static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) 1091 { 1092 switch (my_type) { 1093 case CDP_CODE: 1094 return CDP_DATA; 1095 case CDP_DATA: 1096 return CDP_CODE; 1097 default: 1098 case CDP_NONE: 1099 return CDP_NONE; 1100 } 1101 } 1102 1103 /** 1104 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other 1105 * @r: Resource to which domain instance @d belongs. 1106 * @d: The domain instance for which @closid is being tested. 1107 * @cbm: Capacity bitmask being tested. 1108 * @closid: Intended closid for @cbm. 1109 * @exclusive: Only check if overlaps with exclusive resource groups 1110 * 1111 * Checks if provided @cbm intended to be used for @closid on domain 1112 * @d overlaps with any other closids or other hardware usage associated 1113 * with this domain. If @exclusive is true then only overlaps with 1114 * resource groups in exclusive mode will be considered. If @exclusive 1115 * is false then overlaps with any resource group or hardware entities 1116 * will be considered. 1117 * 1118 * @cbm is unsigned long, even if only 32 bits are used, to make the 1119 * bitmap functions work correctly. 1120 * 1121 * Return: false if CBM does not overlap, true if it does. 1122 */ 1123 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 1124 unsigned long cbm, int closid, 1125 enum resctrl_conf_type type, bool exclusive) 1126 { 1127 enum rdtgrp_mode mode; 1128 unsigned long ctrl_b; 1129 int i; 1130 1131 /* Check for any overlap with regions used by hardware directly */ 1132 if (!exclusive) { 1133 ctrl_b = r->cache.shareable_bits; 1134 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) 1135 return true; 1136 } 1137 1138 /* Check for overlap with other resource groups */ 1139 for (i = 0; i < closids_supported(); i++) { 1140 ctrl_b = resctrl_arch_get_config(r, d, i, type); 1141 mode = rdtgroup_mode_by_closid(i); 1142 if (closid_allocated(i) && i != closid && 1143 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1144 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { 1145 if (exclusive) { 1146 if (mode == RDT_MODE_EXCLUSIVE) 1147 return true; 1148 continue; 1149 } 1150 return true; 1151 } 1152 } 1153 } 1154 1155 return false; 1156 } 1157 1158 /** 1159 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware 1160 * @s: Schema for the resource to which domain instance @d belongs. 1161 * @d: The domain instance for which @closid is being tested. 1162 * @cbm: Capacity bitmask being tested. 1163 * @closid: Intended closid for @cbm. 1164 * @exclusive: Only check if overlaps with exclusive resource groups 1165 * 1166 * Resources that can be allocated using a CBM can use the CBM to control 1167 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test 1168 * for overlap. Overlap test is not limited to the specific resource for 1169 * which the CBM is intended though - when dealing with CDP resources that 1170 * share the underlying hardware the overlap check should be performed on 1171 * the CDP resource sharing the hardware also. 1172 * 1173 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the 1174 * overlap test. 1175 * 1176 * Return: true if CBM overlap detected, false if there is no overlap 1177 */ 1178 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, 1179 unsigned long cbm, int closid, bool exclusive) 1180 { 1181 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); 1182 struct rdt_resource *r = s->res; 1183 1184 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, 1185 exclusive)) 1186 return true; 1187 1188 if (!resctrl_arch_get_cdp_enabled(r->rid)) 1189 return false; 1190 return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); 1191 } 1192 1193 /** 1194 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive 1195 * 1196 * An exclusive resource group implies that there should be no sharing of 1197 * its allocated resources. At the time this group is considered to be 1198 * exclusive this test can determine if its current schemata supports this 1199 * setting by testing for overlap with all other resource groups. 1200 * 1201 * Return: true if resource group can be exclusive, false if there is overlap 1202 * with allocations of other resource groups and thus this resource group 1203 * cannot be exclusive. 1204 */ 1205 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) 1206 { 1207 int closid = rdtgrp->closid; 1208 struct resctrl_schema *s; 1209 struct rdt_resource *r; 1210 bool has_cache = false; 1211 struct rdt_domain *d; 1212 u32 ctrl; 1213 1214 list_for_each_entry(s, &resctrl_schema_all, list) { 1215 r = s->res; 1216 if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) 1217 continue; 1218 has_cache = true; 1219 list_for_each_entry(d, &r->domains, list) { 1220 ctrl = resctrl_arch_get_config(r, d, closid, 1221 s->conf_type); 1222 if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { 1223 rdt_last_cmd_puts("Schemata overlaps\n"); 1224 return false; 1225 } 1226 } 1227 } 1228 1229 if (!has_cache) { 1230 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); 1231 return false; 1232 } 1233 1234 return true; 1235 } 1236 1237 /** 1238 * rdtgroup_mode_write - Modify the resource group's mode 1239 * 1240 */ 1241 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, 1242 char *buf, size_t nbytes, loff_t off) 1243 { 1244 struct rdtgroup *rdtgrp; 1245 enum rdtgrp_mode mode; 1246 int ret = 0; 1247 1248 /* Valid input requires a trailing newline */ 1249 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1250 return -EINVAL; 1251 buf[nbytes - 1] = '\0'; 1252 1253 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1254 if (!rdtgrp) { 1255 rdtgroup_kn_unlock(of->kn); 1256 return -ENOENT; 1257 } 1258 1259 rdt_last_cmd_clear(); 1260 1261 mode = rdtgrp->mode; 1262 1263 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || 1264 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || 1265 (!strcmp(buf, "pseudo-locksetup") && 1266 mode == RDT_MODE_PSEUDO_LOCKSETUP) || 1267 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) 1268 goto out; 1269 1270 if (mode == RDT_MODE_PSEUDO_LOCKED) { 1271 rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); 1272 ret = -EINVAL; 1273 goto out; 1274 } 1275 1276 if (!strcmp(buf, "shareable")) { 1277 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1278 ret = rdtgroup_locksetup_exit(rdtgrp); 1279 if (ret) 1280 goto out; 1281 } 1282 rdtgrp->mode = RDT_MODE_SHAREABLE; 1283 } else if (!strcmp(buf, "exclusive")) { 1284 if (!rdtgroup_mode_test_exclusive(rdtgrp)) { 1285 ret = -EINVAL; 1286 goto out; 1287 } 1288 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1289 ret = rdtgroup_locksetup_exit(rdtgrp); 1290 if (ret) 1291 goto out; 1292 } 1293 rdtgrp->mode = RDT_MODE_EXCLUSIVE; 1294 } else if (!strcmp(buf, "pseudo-locksetup")) { 1295 ret = rdtgroup_locksetup_enter(rdtgrp); 1296 if (ret) 1297 goto out; 1298 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; 1299 } else { 1300 rdt_last_cmd_puts("Unknown or unsupported mode\n"); 1301 ret = -EINVAL; 1302 } 1303 1304 out: 1305 rdtgroup_kn_unlock(of->kn); 1306 return ret ?: nbytes; 1307 } 1308 1309 /** 1310 * rdtgroup_cbm_to_size - Translate CBM to size in bytes 1311 * @r: RDT resource to which @d belongs. 1312 * @d: RDT domain instance. 1313 * @cbm: bitmask for which the size should be computed. 1314 * 1315 * The bitmask provided associated with the RDT domain instance @d will be 1316 * translated into how many bytes it represents. The size in bytes is 1317 * computed by first dividing the total cache size by the CBM length to 1318 * determine how many bytes each bit in the bitmask represents. The result 1319 * is multiplied with the number of bits set in the bitmask. 1320 * 1321 * @cbm is unsigned long, even if only 32 bits are used to make the 1322 * bitmap functions work correctly. 1323 */ 1324 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1325 struct rdt_domain *d, unsigned long cbm) 1326 { 1327 struct cpu_cacheinfo *ci; 1328 unsigned int size = 0; 1329 int num_b, i; 1330 1331 num_b = bitmap_weight(&cbm, r->cache.cbm_len); 1332 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); 1333 for (i = 0; i < ci->num_leaves; i++) { 1334 if (ci->info_list[i].level == r->cache_level) { 1335 size = ci->info_list[i].size / r->cache.cbm_len * num_b; 1336 break; 1337 } 1338 } 1339 1340 return size; 1341 } 1342 1343 /** 1344 * rdtgroup_size_show - Display size in bytes of allocated regions 1345 * 1346 * The "size" file mirrors the layout of the "schemata" file, printing the 1347 * size in bytes of each region instead of the capacity bitmask. 1348 * 1349 */ 1350 static int rdtgroup_size_show(struct kernfs_open_file *of, 1351 struct seq_file *s, void *v) 1352 { 1353 struct resctrl_schema *schema; 1354 enum resctrl_conf_type type; 1355 struct rdtgroup *rdtgrp; 1356 struct rdt_resource *r; 1357 struct rdt_domain *d; 1358 unsigned int size; 1359 int ret = 0; 1360 u32 closid; 1361 bool sep; 1362 u32 ctrl; 1363 1364 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1365 if (!rdtgrp) { 1366 rdtgroup_kn_unlock(of->kn); 1367 return -ENOENT; 1368 } 1369 1370 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 1371 if (!rdtgrp->plr->d) { 1372 rdt_last_cmd_clear(); 1373 rdt_last_cmd_puts("Cache domain offline\n"); 1374 ret = -ENODEV; 1375 } else { 1376 seq_printf(s, "%*s:", max_name_width, 1377 rdtgrp->plr->s->name); 1378 size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, 1379 rdtgrp->plr->d, 1380 rdtgrp->plr->cbm); 1381 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); 1382 } 1383 goto out; 1384 } 1385 1386 closid = rdtgrp->closid; 1387 1388 list_for_each_entry(schema, &resctrl_schema_all, list) { 1389 r = schema->res; 1390 type = schema->conf_type; 1391 sep = false; 1392 seq_printf(s, "%*s:", max_name_width, schema->name); 1393 list_for_each_entry(d, &r->domains, list) { 1394 if (sep) 1395 seq_putc(s, ';'); 1396 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1397 size = 0; 1398 } else { 1399 if (is_mba_sc(r)) 1400 ctrl = d->mbps_val[closid]; 1401 else 1402 ctrl = resctrl_arch_get_config(r, d, 1403 closid, 1404 type); 1405 if (r->rid == RDT_RESOURCE_MBA || 1406 r->rid == RDT_RESOURCE_SMBA) 1407 size = ctrl; 1408 else 1409 size = rdtgroup_cbm_to_size(r, d, ctrl); 1410 } 1411 seq_printf(s, "%d=%u", d->id, size); 1412 sep = true; 1413 } 1414 seq_putc(s, '\n'); 1415 } 1416 1417 out: 1418 rdtgroup_kn_unlock(of->kn); 1419 1420 return ret; 1421 } 1422 1423 struct mon_config_info { 1424 u32 evtid; 1425 u32 mon_config; 1426 }; 1427 1428 #define INVALID_CONFIG_INDEX UINT_MAX 1429 1430 /** 1431 * mon_event_config_index_get - get the hardware index for the 1432 * configurable event 1433 * @evtid: event id. 1434 * 1435 * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID 1436 * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID 1437 * INVALID_CONFIG_INDEX for invalid evtid 1438 */ 1439 static inline unsigned int mon_event_config_index_get(u32 evtid) 1440 { 1441 switch (evtid) { 1442 case QOS_L3_MBM_TOTAL_EVENT_ID: 1443 return 0; 1444 case QOS_L3_MBM_LOCAL_EVENT_ID: 1445 return 1; 1446 default: 1447 /* Should never reach here */ 1448 return INVALID_CONFIG_INDEX; 1449 } 1450 } 1451 1452 static void mon_event_config_read(void *info) 1453 { 1454 struct mon_config_info *mon_info = info; 1455 unsigned int index; 1456 u32 h; 1457 1458 index = mon_event_config_index_get(mon_info->evtid); 1459 if (index == INVALID_CONFIG_INDEX) { 1460 pr_warn_once("Invalid event id %d\n", mon_info->evtid); 1461 return; 1462 } 1463 rdmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, h); 1464 1465 /* Report only the valid event configuration bits */ 1466 mon_info->mon_config &= MAX_EVT_CONFIG_BITS; 1467 } 1468 1469 static void mondata_config_read(struct rdt_domain *d, struct mon_config_info *mon_info) 1470 { 1471 smp_call_function_any(&d->cpu_mask, mon_event_config_read, mon_info, 1); 1472 } 1473 1474 static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) 1475 { 1476 struct mon_config_info mon_info = {0}; 1477 struct rdt_domain *dom; 1478 bool sep = false; 1479 1480 mutex_lock(&rdtgroup_mutex); 1481 1482 list_for_each_entry(dom, &r->domains, list) { 1483 if (sep) 1484 seq_puts(s, ";"); 1485 1486 memset(&mon_info, 0, sizeof(struct mon_config_info)); 1487 mon_info.evtid = evtid; 1488 mondata_config_read(dom, &mon_info); 1489 1490 seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); 1491 sep = true; 1492 } 1493 seq_puts(s, "\n"); 1494 1495 mutex_unlock(&rdtgroup_mutex); 1496 1497 return 0; 1498 } 1499 1500 static int mbm_total_bytes_config_show(struct kernfs_open_file *of, 1501 struct seq_file *seq, void *v) 1502 { 1503 struct rdt_resource *r = of->kn->parent->priv; 1504 1505 mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); 1506 1507 return 0; 1508 } 1509 1510 static int mbm_local_bytes_config_show(struct kernfs_open_file *of, 1511 struct seq_file *seq, void *v) 1512 { 1513 struct rdt_resource *r = of->kn->parent->priv; 1514 1515 mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); 1516 1517 return 0; 1518 } 1519 1520 static void mon_event_config_write(void *info) 1521 { 1522 struct mon_config_info *mon_info = info; 1523 unsigned int index; 1524 1525 index = mon_event_config_index_get(mon_info->evtid); 1526 if (index == INVALID_CONFIG_INDEX) { 1527 pr_warn_once("Invalid event id %d\n", mon_info->evtid); 1528 return; 1529 } 1530 wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); 1531 } 1532 1533 static int mbm_config_write_domain(struct rdt_resource *r, 1534 struct rdt_domain *d, u32 evtid, u32 val) 1535 { 1536 struct mon_config_info mon_info = {0}; 1537 int ret = 0; 1538 1539 /* mon_config cannot be more than the supported set of events */ 1540 if (val > MAX_EVT_CONFIG_BITS) { 1541 rdt_last_cmd_puts("Invalid event configuration\n"); 1542 return -EINVAL; 1543 } 1544 1545 /* 1546 * Read the current config value first. If both are the same then 1547 * no need to write it again. 1548 */ 1549 mon_info.evtid = evtid; 1550 mondata_config_read(d, &mon_info); 1551 if (mon_info.mon_config == val) 1552 goto out; 1553 1554 mon_info.mon_config = val; 1555 1556 /* 1557 * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the 1558 * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE 1559 * are scoped at the domain level. Writing any of these MSRs 1560 * on one CPU is observed by all the CPUs in the domain. 1561 */ 1562 smp_call_function_any(&d->cpu_mask, mon_event_config_write, 1563 &mon_info, 1); 1564 1565 /* 1566 * When an Event Configuration is changed, the bandwidth counters 1567 * for all RMIDs and Events will be cleared by the hardware. The 1568 * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for 1569 * every RMID on the next read to any event for every RMID. 1570 * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) 1571 * cleared while it is tracked by the hardware. Clear the 1572 * mbm_local and mbm_total counts for all the RMIDs. 1573 */ 1574 resctrl_arch_reset_rmid_all(r, d); 1575 1576 out: 1577 return ret; 1578 } 1579 1580 static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) 1581 { 1582 char *dom_str = NULL, *id_str; 1583 unsigned long dom_id, val; 1584 struct rdt_domain *d; 1585 int ret = 0; 1586 1587 next: 1588 if (!tok || tok[0] == '\0') 1589 return 0; 1590 1591 /* Start processing the strings for each domain */ 1592 dom_str = strim(strsep(&tok, ";")); 1593 id_str = strsep(&dom_str, "="); 1594 1595 if (!id_str || kstrtoul(id_str, 10, &dom_id)) { 1596 rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); 1597 return -EINVAL; 1598 } 1599 1600 if (!dom_str || kstrtoul(dom_str, 16, &val)) { 1601 rdt_last_cmd_puts("Non-numeric event configuration value\n"); 1602 return -EINVAL; 1603 } 1604 1605 list_for_each_entry(d, &r->domains, list) { 1606 if (d->id == dom_id) { 1607 ret = mbm_config_write_domain(r, d, evtid, val); 1608 if (ret) 1609 return -EINVAL; 1610 goto next; 1611 } 1612 } 1613 1614 return -EINVAL; 1615 } 1616 1617 static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, 1618 char *buf, size_t nbytes, 1619 loff_t off) 1620 { 1621 struct rdt_resource *r = of->kn->parent->priv; 1622 int ret; 1623 1624 /* Valid input requires a trailing newline */ 1625 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1626 return -EINVAL; 1627 1628 mutex_lock(&rdtgroup_mutex); 1629 1630 rdt_last_cmd_clear(); 1631 1632 buf[nbytes - 1] = '\0'; 1633 1634 ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); 1635 1636 mutex_unlock(&rdtgroup_mutex); 1637 1638 return ret ?: nbytes; 1639 } 1640 1641 /* rdtgroup information files for one cache resource. */ 1642 static struct rftype res_common_files[] = { 1643 { 1644 .name = "last_cmd_status", 1645 .mode = 0444, 1646 .kf_ops = &rdtgroup_kf_single_ops, 1647 .seq_show = rdt_last_cmd_status_show, 1648 .fflags = RF_TOP_INFO, 1649 }, 1650 { 1651 .name = "num_closids", 1652 .mode = 0444, 1653 .kf_ops = &rdtgroup_kf_single_ops, 1654 .seq_show = rdt_num_closids_show, 1655 .fflags = RF_CTRL_INFO, 1656 }, 1657 { 1658 .name = "mon_features", 1659 .mode = 0444, 1660 .kf_ops = &rdtgroup_kf_single_ops, 1661 .seq_show = rdt_mon_features_show, 1662 .fflags = RF_MON_INFO, 1663 }, 1664 { 1665 .name = "num_rmids", 1666 .mode = 0444, 1667 .kf_ops = &rdtgroup_kf_single_ops, 1668 .seq_show = rdt_num_rmids_show, 1669 .fflags = RF_MON_INFO, 1670 }, 1671 { 1672 .name = "cbm_mask", 1673 .mode = 0444, 1674 .kf_ops = &rdtgroup_kf_single_ops, 1675 .seq_show = rdt_default_ctrl_show, 1676 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1677 }, 1678 { 1679 .name = "min_cbm_bits", 1680 .mode = 0444, 1681 .kf_ops = &rdtgroup_kf_single_ops, 1682 .seq_show = rdt_min_cbm_bits_show, 1683 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1684 }, 1685 { 1686 .name = "shareable_bits", 1687 .mode = 0444, 1688 .kf_ops = &rdtgroup_kf_single_ops, 1689 .seq_show = rdt_shareable_bits_show, 1690 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1691 }, 1692 { 1693 .name = "bit_usage", 1694 .mode = 0444, 1695 .kf_ops = &rdtgroup_kf_single_ops, 1696 .seq_show = rdt_bit_usage_show, 1697 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1698 }, 1699 { 1700 .name = "min_bandwidth", 1701 .mode = 0444, 1702 .kf_ops = &rdtgroup_kf_single_ops, 1703 .seq_show = rdt_min_bw_show, 1704 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1705 }, 1706 { 1707 .name = "bandwidth_gran", 1708 .mode = 0444, 1709 .kf_ops = &rdtgroup_kf_single_ops, 1710 .seq_show = rdt_bw_gran_show, 1711 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1712 }, 1713 { 1714 .name = "delay_linear", 1715 .mode = 0444, 1716 .kf_ops = &rdtgroup_kf_single_ops, 1717 .seq_show = rdt_delay_linear_show, 1718 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1719 }, 1720 /* 1721 * Platform specific which (if any) capabilities are provided by 1722 * thread_throttle_mode. Defer "fflags" initialization to platform 1723 * discovery. 1724 */ 1725 { 1726 .name = "thread_throttle_mode", 1727 .mode = 0444, 1728 .kf_ops = &rdtgroup_kf_single_ops, 1729 .seq_show = rdt_thread_throttle_mode_show, 1730 }, 1731 { 1732 .name = "max_threshold_occupancy", 1733 .mode = 0644, 1734 .kf_ops = &rdtgroup_kf_single_ops, 1735 .write = max_threshold_occ_write, 1736 .seq_show = max_threshold_occ_show, 1737 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, 1738 }, 1739 { 1740 .name = "mbm_total_bytes_config", 1741 .mode = 0644, 1742 .kf_ops = &rdtgroup_kf_single_ops, 1743 .seq_show = mbm_total_bytes_config_show, 1744 .write = mbm_total_bytes_config_write, 1745 }, 1746 { 1747 .name = "mbm_local_bytes_config", 1748 .mode = 0444, 1749 .kf_ops = &rdtgroup_kf_single_ops, 1750 .seq_show = mbm_local_bytes_config_show, 1751 }, 1752 { 1753 .name = "cpus", 1754 .mode = 0644, 1755 .kf_ops = &rdtgroup_kf_single_ops, 1756 .write = rdtgroup_cpus_write, 1757 .seq_show = rdtgroup_cpus_show, 1758 .fflags = RFTYPE_BASE, 1759 }, 1760 { 1761 .name = "cpus_list", 1762 .mode = 0644, 1763 .kf_ops = &rdtgroup_kf_single_ops, 1764 .write = rdtgroup_cpus_write, 1765 .seq_show = rdtgroup_cpus_show, 1766 .flags = RFTYPE_FLAGS_CPUS_LIST, 1767 .fflags = RFTYPE_BASE, 1768 }, 1769 { 1770 .name = "tasks", 1771 .mode = 0644, 1772 .kf_ops = &rdtgroup_kf_single_ops, 1773 .write = rdtgroup_tasks_write, 1774 .seq_show = rdtgroup_tasks_show, 1775 .fflags = RFTYPE_BASE, 1776 }, 1777 { 1778 .name = "schemata", 1779 .mode = 0644, 1780 .kf_ops = &rdtgroup_kf_single_ops, 1781 .write = rdtgroup_schemata_write, 1782 .seq_show = rdtgroup_schemata_show, 1783 .fflags = RF_CTRL_BASE, 1784 }, 1785 { 1786 .name = "mode", 1787 .mode = 0644, 1788 .kf_ops = &rdtgroup_kf_single_ops, 1789 .write = rdtgroup_mode_write, 1790 .seq_show = rdtgroup_mode_show, 1791 .fflags = RF_CTRL_BASE, 1792 }, 1793 { 1794 .name = "size", 1795 .mode = 0444, 1796 .kf_ops = &rdtgroup_kf_single_ops, 1797 .seq_show = rdtgroup_size_show, 1798 .fflags = RF_CTRL_BASE, 1799 }, 1800 1801 }; 1802 1803 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) 1804 { 1805 struct rftype *rfts, *rft; 1806 int ret, len; 1807 1808 rfts = res_common_files; 1809 len = ARRAY_SIZE(res_common_files); 1810 1811 lockdep_assert_held(&rdtgroup_mutex); 1812 1813 for (rft = rfts; rft < rfts + len; rft++) { 1814 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { 1815 ret = rdtgroup_add_file(kn, rft); 1816 if (ret) 1817 goto error; 1818 } 1819 } 1820 1821 return 0; 1822 error: 1823 pr_warn("Failed to add %s, err=%d\n", rft->name, ret); 1824 while (--rft >= rfts) { 1825 if ((fflags & rft->fflags) == rft->fflags) 1826 kernfs_remove_by_name(kn, rft->name); 1827 } 1828 return ret; 1829 } 1830 1831 static struct rftype *rdtgroup_get_rftype_by_name(const char *name) 1832 { 1833 struct rftype *rfts, *rft; 1834 int len; 1835 1836 rfts = res_common_files; 1837 len = ARRAY_SIZE(res_common_files); 1838 1839 for (rft = rfts; rft < rfts + len; rft++) { 1840 if (!strcmp(rft->name, name)) 1841 return rft; 1842 } 1843 1844 return NULL; 1845 } 1846 1847 void __init thread_throttle_mode_init(void) 1848 { 1849 struct rftype *rft; 1850 1851 rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); 1852 if (!rft) 1853 return; 1854 1855 rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; 1856 } 1857 1858 void __init mbm_config_rftype_init(const char *config) 1859 { 1860 struct rftype *rft; 1861 1862 rft = rdtgroup_get_rftype_by_name(config); 1863 if (rft) 1864 rft->fflags = RF_MON_INFO | RFTYPE_RES_CACHE; 1865 } 1866 1867 /** 1868 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file 1869 * @r: The resource group with which the file is associated. 1870 * @name: Name of the file 1871 * 1872 * The permissions of named resctrl file, directory, or link are modified 1873 * to not allow read, write, or execute by any user. 1874 * 1875 * WARNING: This function is intended to communicate to the user that the 1876 * resctrl file has been locked down - that it is not relevant to the 1877 * particular state the system finds itself in. It should not be relied 1878 * on to protect from user access because after the file's permissions 1879 * are restricted the user can still change the permissions using chmod 1880 * from the command line. 1881 * 1882 * Return: 0 on success, <0 on failure. 1883 */ 1884 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) 1885 { 1886 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1887 struct kernfs_node *kn; 1888 int ret = 0; 1889 1890 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1891 if (!kn) 1892 return -ENOENT; 1893 1894 switch (kernfs_type(kn)) { 1895 case KERNFS_DIR: 1896 iattr.ia_mode = S_IFDIR; 1897 break; 1898 case KERNFS_FILE: 1899 iattr.ia_mode = S_IFREG; 1900 break; 1901 case KERNFS_LINK: 1902 iattr.ia_mode = S_IFLNK; 1903 break; 1904 } 1905 1906 ret = kernfs_setattr(kn, &iattr); 1907 kernfs_put(kn); 1908 return ret; 1909 } 1910 1911 /** 1912 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file 1913 * @r: The resource group with which the file is associated. 1914 * @name: Name of the file 1915 * @mask: Mask of permissions that should be restored 1916 * 1917 * Restore the permissions of the named file. If @name is a directory the 1918 * permissions of its parent will be used. 1919 * 1920 * Return: 0 on success, <0 on failure. 1921 */ 1922 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, 1923 umode_t mask) 1924 { 1925 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1926 struct kernfs_node *kn, *parent; 1927 struct rftype *rfts, *rft; 1928 int ret, len; 1929 1930 rfts = res_common_files; 1931 len = ARRAY_SIZE(res_common_files); 1932 1933 for (rft = rfts; rft < rfts + len; rft++) { 1934 if (!strcmp(rft->name, name)) 1935 iattr.ia_mode = rft->mode & mask; 1936 } 1937 1938 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1939 if (!kn) 1940 return -ENOENT; 1941 1942 switch (kernfs_type(kn)) { 1943 case KERNFS_DIR: 1944 parent = kernfs_get_parent(kn); 1945 if (parent) { 1946 iattr.ia_mode |= parent->mode; 1947 kernfs_put(parent); 1948 } 1949 iattr.ia_mode |= S_IFDIR; 1950 break; 1951 case KERNFS_FILE: 1952 iattr.ia_mode |= S_IFREG; 1953 break; 1954 case KERNFS_LINK: 1955 iattr.ia_mode |= S_IFLNK; 1956 break; 1957 } 1958 1959 ret = kernfs_setattr(kn, &iattr); 1960 kernfs_put(kn); 1961 return ret; 1962 } 1963 1964 static int rdtgroup_mkdir_info_resdir(void *priv, char *name, 1965 unsigned long fflags) 1966 { 1967 struct kernfs_node *kn_subdir; 1968 int ret; 1969 1970 kn_subdir = kernfs_create_dir(kn_info, name, 1971 kn_info->mode, priv); 1972 if (IS_ERR(kn_subdir)) 1973 return PTR_ERR(kn_subdir); 1974 1975 ret = rdtgroup_kn_set_ugid(kn_subdir); 1976 if (ret) 1977 return ret; 1978 1979 ret = rdtgroup_add_files(kn_subdir, fflags); 1980 if (!ret) 1981 kernfs_activate(kn_subdir); 1982 1983 return ret; 1984 } 1985 1986 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) 1987 { 1988 struct resctrl_schema *s; 1989 struct rdt_resource *r; 1990 unsigned long fflags; 1991 char name[32]; 1992 int ret; 1993 1994 /* create the directory */ 1995 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); 1996 if (IS_ERR(kn_info)) 1997 return PTR_ERR(kn_info); 1998 1999 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); 2000 if (ret) 2001 goto out_destroy; 2002 2003 /* loop over enabled controls, these are all alloc_capable */ 2004 list_for_each_entry(s, &resctrl_schema_all, list) { 2005 r = s->res; 2006 fflags = r->fflags | RF_CTRL_INFO; 2007 ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); 2008 if (ret) 2009 goto out_destroy; 2010 } 2011 2012 for_each_mon_capable_rdt_resource(r) { 2013 fflags = r->fflags | RF_MON_INFO; 2014 sprintf(name, "%s_MON", r->name); 2015 ret = rdtgroup_mkdir_info_resdir(r, name, fflags); 2016 if (ret) 2017 goto out_destroy; 2018 } 2019 2020 ret = rdtgroup_kn_set_ugid(kn_info); 2021 if (ret) 2022 goto out_destroy; 2023 2024 kernfs_activate(kn_info); 2025 2026 return 0; 2027 2028 out_destroy: 2029 kernfs_remove(kn_info); 2030 return ret; 2031 } 2032 2033 static int 2034 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, 2035 char *name, struct kernfs_node **dest_kn) 2036 { 2037 struct kernfs_node *kn; 2038 int ret; 2039 2040 /* create the directory */ 2041 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 2042 if (IS_ERR(kn)) 2043 return PTR_ERR(kn); 2044 2045 if (dest_kn) 2046 *dest_kn = kn; 2047 2048 ret = rdtgroup_kn_set_ugid(kn); 2049 if (ret) 2050 goto out_destroy; 2051 2052 kernfs_activate(kn); 2053 2054 return 0; 2055 2056 out_destroy: 2057 kernfs_remove(kn); 2058 return ret; 2059 } 2060 2061 static void l3_qos_cfg_update(void *arg) 2062 { 2063 bool *enable = arg; 2064 2065 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); 2066 } 2067 2068 static void l2_qos_cfg_update(void *arg) 2069 { 2070 bool *enable = arg; 2071 2072 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); 2073 } 2074 2075 static inline bool is_mba_linear(void) 2076 { 2077 return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear; 2078 } 2079 2080 static int set_cache_qos_cfg(int level, bool enable) 2081 { 2082 void (*update)(void *arg); 2083 struct rdt_resource *r_l; 2084 cpumask_var_t cpu_mask; 2085 struct rdt_domain *d; 2086 int cpu; 2087 2088 if (level == RDT_RESOURCE_L3) 2089 update = l3_qos_cfg_update; 2090 else if (level == RDT_RESOURCE_L2) 2091 update = l2_qos_cfg_update; 2092 else 2093 return -EINVAL; 2094 2095 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 2096 return -ENOMEM; 2097 2098 r_l = &rdt_resources_all[level].r_resctrl; 2099 list_for_each_entry(d, &r_l->domains, list) { 2100 if (r_l->cache.arch_has_per_cpu_cfg) 2101 /* Pick all the CPUs in the domain instance */ 2102 for_each_cpu(cpu, &d->cpu_mask) 2103 cpumask_set_cpu(cpu, cpu_mask); 2104 else 2105 /* Pick one CPU from each domain instance to update MSR */ 2106 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 2107 } 2108 2109 /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ 2110 on_each_cpu_mask(cpu_mask, update, &enable, 1); 2111 2112 free_cpumask_var(cpu_mask); 2113 2114 return 0; 2115 } 2116 2117 /* Restore the qos cfg state when a domain comes online */ 2118 void rdt_domain_reconfigure_cdp(struct rdt_resource *r) 2119 { 2120 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 2121 2122 if (!r->cdp_capable) 2123 return; 2124 2125 if (r->rid == RDT_RESOURCE_L2) 2126 l2_qos_cfg_update(&hw_res->cdp_enabled); 2127 2128 if (r->rid == RDT_RESOURCE_L3) 2129 l3_qos_cfg_update(&hw_res->cdp_enabled); 2130 } 2131 2132 static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) 2133 { 2134 u32 num_closid = resctrl_arch_get_num_closid(r); 2135 int cpu = cpumask_any(&d->cpu_mask); 2136 int i; 2137 2138 d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), 2139 GFP_KERNEL, cpu_to_node(cpu)); 2140 if (!d->mbps_val) 2141 return -ENOMEM; 2142 2143 for (i = 0; i < num_closid; i++) 2144 d->mbps_val[i] = MBA_MAX_MBPS; 2145 2146 return 0; 2147 } 2148 2149 static void mba_sc_domain_destroy(struct rdt_resource *r, 2150 struct rdt_domain *d) 2151 { 2152 kfree(d->mbps_val); 2153 d->mbps_val = NULL; 2154 } 2155 2156 /* 2157 * MBA software controller is supported only if 2158 * MBM is supported and MBA is in linear scale. 2159 */ 2160 static bool supports_mba_mbps(void) 2161 { 2162 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; 2163 2164 return (is_mbm_local_enabled() && 2165 r->alloc_capable && is_mba_linear()); 2166 } 2167 2168 /* 2169 * Enable or disable the MBA software controller 2170 * which helps user specify bandwidth in MBps. 2171 */ 2172 static int set_mba_sc(bool mba_sc) 2173 { 2174 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; 2175 u32 num_closid = resctrl_arch_get_num_closid(r); 2176 struct rdt_domain *d; 2177 int i; 2178 2179 if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) 2180 return -EINVAL; 2181 2182 r->membw.mba_sc = mba_sc; 2183 2184 list_for_each_entry(d, &r->domains, list) { 2185 for (i = 0; i < num_closid; i++) 2186 d->mbps_val[i] = MBA_MAX_MBPS; 2187 } 2188 2189 return 0; 2190 } 2191 2192 static int cdp_enable(int level) 2193 { 2194 struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; 2195 int ret; 2196 2197 if (!r_l->alloc_capable) 2198 return -EINVAL; 2199 2200 ret = set_cache_qos_cfg(level, true); 2201 if (!ret) 2202 rdt_resources_all[level].cdp_enabled = true; 2203 2204 return ret; 2205 } 2206 2207 static void cdp_disable(int level) 2208 { 2209 struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; 2210 2211 if (r_hw->cdp_enabled) { 2212 set_cache_qos_cfg(level, false); 2213 r_hw->cdp_enabled = false; 2214 } 2215 } 2216 2217 int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) 2218 { 2219 struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; 2220 2221 if (!hw_res->r_resctrl.cdp_capable) 2222 return -EINVAL; 2223 2224 if (enable) 2225 return cdp_enable(l); 2226 2227 cdp_disable(l); 2228 2229 return 0; 2230 } 2231 2232 static void cdp_disable_all(void) 2233 { 2234 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) 2235 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); 2236 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) 2237 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); 2238 } 2239 2240 /* 2241 * We don't allow rdtgroup directories to be created anywhere 2242 * except the root directory. Thus when looking for the rdtgroup 2243 * structure for a kernfs node we are either looking at a directory, 2244 * in which case the rdtgroup structure is pointed at by the "priv" 2245 * field, otherwise we have a file, and need only look to the parent 2246 * to find the rdtgroup. 2247 */ 2248 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) 2249 { 2250 if (kernfs_type(kn) == KERNFS_DIR) { 2251 /* 2252 * All the resource directories use "kn->priv" 2253 * to point to the "struct rdtgroup" for the 2254 * resource. "info" and its subdirectories don't 2255 * have rdtgroup structures, so return NULL here. 2256 */ 2257 if (kn == kn_info || kn->parent == kn_info) 2258 return NULL; 2259 else 2260 return kn->priv; 2261 } else { 2262 return kn->parent->priv; 2263 } 2264 } 2265 2266 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) 2267 { 2268 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2269 2270 if (!rdtgrp) 2271 return NULL; 2272 2273 atomic_inc(&rdtgrp->waitcount); 2274 kernfs_break_active_protection(kn); 2275 2276 mutex_lock(&rdtgroup_mutex); 2277 2278 /* Was this group deleted while we waited? */ 2279 if (rdtgrp->flags & RDT_DELETED) 2280 return NULL; 2281 2282 return rdtgrp; 2283 } 2284 2285 void rdtgroup_kn_unlock(struct kernfs_node *kn) 2286 { 2287 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2288 2289 if (!rdtgrp) 2290 return; 2291 2292 mutex_unlock(&rdtgroup_mutex); 2293 2294 if (atomic_dec_and_test(&rdtgrp->waitcount) && 2295 (rdtgrp->flags & RDT_DELETED)) { 2296 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2297 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2298 rdtgroup_pseudo_lock_remove(rdtgrp); 2299 kernfs_unbreak_active_protection(kn); 2300 rdtgroup_remove(rdtgrp); 2301 } else { 2302 kernfs_unbreak_active_protection(kn); 2303 } 2304 } 2305 2306 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2307 struct rdtgroup *prgrp, 2308 struct kernfs_node **mon_data_kn); 2309 2310 static int rdt_enable_ctx(struct rdt_fs_context *ctx) 2311 { 2312 int ret = 0; 2313 2314 if (ctx->enable_cdpl2) 2315 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); 2316 2317 if (!ret && ctx->enable_cdpl3) 2318 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); 2319 2320 if (!ret && ctx->enable_mba_mbps) 2321 ret = set_mba_sc(true); 2322 2323 return ret; 2324 } 2325 2326 static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) 2327 { 2328 struct resctrl_schema *s; 2329 const char *suffix = ""; 2330 int ret, cl; 2331 2332 s = kzalloc(sizeof(*s), GFP_KERNEL); 2333 if (!s) 2334 return -ENOMEM; 2335 2336 s->res = r; 2337 s->num_closid = resctrl_arch_get_num_closid(r); 2338 if (resctrl_arch_get_cdp_enabled(r->rid)) 2339 s->num_closid /= 2; 2340 2341 s->conf_type = type; 2342 switch (type) { 2343 case CDP_CODE: 2344 suffix = "CODE"; 2345 break; 2346 case CDP_DATA: 2347 suffix = "DATA"; 2348 break; 2349 case CDP_NONE: 2350 suffix = ""; 2351 break; 2352 } 2353 2354 ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); 2355 if (ret >= sizeof(s->name)) { 2356 kfree(s); 2357 return -EINVAL; 2358 } 2359 2360 cl = strlen(s->name); 2361 2362 /* 2363 * If CDP is supported by this resource, but not enabled, 2364 * include the suffix. This ensures the tabular format of the 2365 * schemata file does not change between mounts of the filesystem. 2366 */ 2367 if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) 2368 cl += 4; 2369 2370 if (cl > max_name_width) 2371 max_name_width = cl; 2372 2373 INIT_LIST_HEAD(&s->list); 2374 list_add(&s->list, &resctrl_schema_all); 2375 2376 return 0; 2377 } 2378 2379 static int schemata_list_create(void) 2380 { 2381 struct rdt_resource *r; 2382 int ret = 0; 2383 2384 for_each_alloc_capable_rdt_resource(r) { 2385 if (resctrl_arch_get_cdp_enabled(r->rid)) { 2386 ret = schemata_list_add(r, CDP_CODE); 2387 if (ret) 2388 break; 2389 2390 ret = schemata_list_add(r, CDP_DATA); 2391 } else { 2392 ret = schemata_list_add(r, CDP_NONE); 2393 } 2394 2395 if (ret) 2396 break; 2397 } 2398 2399 return ret; 2400 } 2401 2402 static void schemata_list_destroy(void) 2403 { 2404 struct resctrl_schema *s, *tmp; 2405 2406 list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { 2407 list_del(&s->list); 2408 kfree(s); 2409 } 2410 } 2411 2412 static int rdt_get_tree(struct fs_context *fc) 2413 { 2414 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2415 struct rdt_domain *dom; 2416 struct rdt_resource *r; 2417 int ret; 2418 2419 cpus_read_lock(); 2420 mutex_lock(&rdtgroup_mutex); 2421 /* 2422 * resctrl file system can only be mounted once. 2423 */ 2424 if (static_branch_unlikely(&rdt_enable_key)) { 2425 ret = -EBUSY; 2426 goto out; 2427 } 2428 2429 ret = rdt_enable_ctx(ctx); 2430 if (ret < 0) 2431 goto out_cdp; 2432 2433 ret = schemata_list_create(); 2434 if (ret) { 2435 schemata_list_destroy(); 2436 goto out_mba; 2437 } 2438 2439 closid_init(); 2440 2441 ret = rdtgroup_create_info_dir(rdtgroup_default.kn); 2442 if (ret < 0) 2443 goto out_schemata_free; 2444 2445 if (rdt_mon_capable) { 2446 ret = mongroup_create_dir(rdtgroup_default.kn, 2447 &rdtgroup_default, "mon_groups", 2448 &kn_mongrp); 2449 if (ret < 0) 2450 goto out_info; 2451 2452 ret = mkdir_mondata_all(rdtgroup_default.kn, 2453 &rdtgroup_default, &kn_mondata); 2454 if (ret < 0) 2455 goto out_mongrp; 2456 rdtgroup_default.mon.mon_data_kn = kn_mondata; 2457 } 2458 2459 ret = rdt_pseudo_lock_init(); 2460 if (ret) 2461 goto out_mondata; 2462 2463 ret = kernfs_get_tree(fc); 2464 if (ret < 0) 2465 goto out_psl; 2466 2467 if (rdt_alloc_capable) 2468 static_branch_enable_cpuslocked(&rdt_alloc_enable_key); 2469 if (rdt_mon_capable) 2470 static_branch_enable_cpuslocked(&rdt_mon_enable_key); 2471 2472 if (rdt_alloc_capable || rdt_mon_capable) 2473 static_branch_enable_cpuslocked(&rdt_enable_key); 2474 2475 if (is_mbm_enabled()) { 2476 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; 2477 list_for_each_entry(dom, &r->domains, list) 2478 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); 2479 } 2480 2481 goto out; 2482 2483 out_psl: 2484 rdt_pseudo_lock_release(); 2485 out_mondata: 2486 if (rdt_mon_capable) 2487 kernfs_remove(kn_mondata); 2488 out_mongrp: 2489 if (rdt_mon_capable) 2490 kernfs_remove(kn_mongrp); 2491 out_info: 2492 kernfs_remove(kn_info); 2493 out_schemata_free: 2494 schemata_list_destroy(); 2495 out_mba: 2496 if (ctx->enable_mba_mbps) 2497 set_mba_sc(false); 2498 out_cdp: 2499 cdp_disable_all(); 2500 out: 2501 rdt_last_cmd_clear(); 2502 mutex_unlock(&rdtgroup_mutex); 2503 cpus_read_unlock(); 2504 return ret; 2505 } 2506 2507 enum rdt_param { 2508 Opt_cdp, 2509 Opt_cdpl2, 2510 Opt_mba_mbps, 2511 nr__rdt_params 2512 }; 2513 2514 static const struct fs_parameter_spec rdt_fs_parameters[] = { 2515 fsparam_flag("cdp", Opt_cdp), 2516 fsparam_flag("cdpl2", Opt_cdpl2), 2517 fsparam_flag("mba_MBps", Opt_mba_mbps), 2518 {} 2519 }; 2520 2521 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) 2522 { 2523 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2524 struct fs_parse_result result; 2525 int opt; 2526 2527 opt = fs_parse(fc, rdt_fs_parameters, param, &result); 2528 if (opt < 0) 2529 return opt; 2530 2531 switch (opt) { 2532 case Opt_cdp: 2533 ctx->enable_cdpl3 = true; 2534 return 0; 2535 case Opt_cdpl2: 2536 ctx->enable_cdpl2 = true; 2537 return 0; 2538 case Opt_mba_mbps: 2539 if (!supports_mba_mbps()) 2540 return -EINVAL; 2541 ctx->enable_mba_mbps = true; 2542 return 0; 2543 } 2544 2545 return -EINVAL; 2546 } 2547 2548 static void rdt_fs_context_free(struct fs_context *fc) 2549 { 2550 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2551 2552 kernfs_free_fs_context(fc); 2553 kfree(ctx); 2554 } 2555 2556 static const struct fs_context_operations rdt_fs_context_ops = { 2557 .free = rdt_fs_context_free, 2558 .parse_param = rdt_parse_param, 2559 .get_tree = rdt_get_tree, 2560 }; 2561 2562 static int rdt_init_fs_context(struct fs_context *fc) 2563 { 2564 struct rdt_fs_context *ctx; 2565 2566 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); 2567 if (!ctx) 2568 return -ENOMEM; 2569 2570 ctx->kfc.root = rdt_root; 2571 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; 2572 fc->fs_private = &ctx->kfc; 2573 fc->ops = &rdt_fs_context_ops; 2574 put_user_ns(fc->user_ns); 2575 fc->user_ns = get_user_ns(&init_user_ns); 2576 fc->global = true; 2577 return 0; 2578 } 2579 2580 static int reset_all_ctrls(struct rdt_resource *r) 2581 { 2582 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 2583 struct rdt_hw_domain *hw_dom; 2584 struct msr_param msr_param; 2585 cpumask_var_t cpu_mask; 2586 struct rdt_domain *d; 2587 int i; 2588 2589 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 2590 return -ENOMEM; 2591 2592 msr_param.res = r; 2593 msr_param.low = 0; 2594 msr_param.high = hw_res->num_closid; 2595 2596 /* 2597 * Disable resource control for this resource by setting all 2598 * CBMs in all domains to the maximum mask value. Pick one CPU 2599 * from each domain to update the MSRs below. 2600 */ 2601 list_for_each_entry(d, &r->domains, list) { 2602 hw_dom = resctrl_to_arch_dom(d); 2603 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 2604 2605 for (i = 0; i < hw_res->num_closid; i++) 2606 hw_dom->ctrl_val[i] = r->default_ctrl; 2607 } 2608 2609 /* Update CBM on all the CPUs in cpu_mask */ 2610 on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); 2611 2612 free_cpumask_var(cpu_mask); 2613 2614 return 0; 2615 } 2616 2617 /* 2618 * Move tasks from one to the other group. If @from is NULL, then all tasks 2619 * in the systems are moved unconditionally (used for teardown). 2620 * 2621 * If @mask is not NULL the cpus on which moved tasks are running are set 2622 * in that mask so the update smp function call is restricted to affected 2623 * cpus. 2624 */ 2625 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, 2626 struct cpumask *mask) 2627 { 2628 struct task_struct *p, *t; 2629 2630 read_lock(&tasklist_lock); 2631 for_each_process_thread(p, t) { 2632 if (!from || is_closid_match(t, from) || 2633 is_rmid_match(t, from)) { 2634 WRITE_ONCE(t->closid, to->closid); 2635 WRITE_ONCE(t->rmid, to->mon.rmid); 2636 2637 /* 2638 * Order the closid/rmid stores above before the loads 2639 * in task_curr(). This pairs with the full barrier 2640 * between the rq->curr update and resctrl_sched_in() 2641 * during context switch. 2642 */ 2643 smp_mb(); 2644 2645 /* 2646 * If the task is on a CPU, set the CPU in the mask. 2647 * The detection is inaccurate as tasks might move or 2648 * schedule before the smp function call takes place. 2649 * In such a case the function call is pointless, but 2650 * there is no other side effect. 2651 */ 2652 if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) 2653 cpumask_set_cpu(task_cpu(t), mask); 2654 } 2655 } 2656 read_unlock(&tasklist_lock); 2657 } 2658 2659 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) 2660 { 2661 struct rdtgroup *sentry, *stmp; 2662 struct list_head *head; 2663 2664 head = &rdtgrp->mon.crdtgrp_list; 2665 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { 2666 free_rmid(sentry->mon.rmid); 2667 list_del(&sentry->mon.crdtgrp_list); 2668 2669 if (atomic_read(&sentry->waitcount) != 0) 2670 sentry->flags = RDT_DELETED; 2671 else 2672 rdtgroup_remove(sentry); 2673 } 2674 } 2675 2676 /* 2677 * Forcibly remove all of subdirectories under root. 2678 */ 2679 static void rmdir_all_sub(void) 2680 { 2681 struct rdtgroup *rdtgrp, *tmp; 2682 2683 /* Move all tasks to the default resource group */ 2684 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); 2685 2686 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { 2687 /* Free any child rmids */ 2688 free_all_child_rdtgrp(rdtgrp); 2689 2690 /* Remove each rdtgroup other than root */ 2691 if (rdtgrp == &rdtgroup_default) 2692 continue; 2693 2694 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2695 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2696 rdtgroup_pseudo_lock_remove(rdtgrp); 2697 2698 /* 2699 * Give any CPUs back to the default group. We cannot copy 2700 * cpu_online_mask because a CPU might have executed the 2701 * offline callback already, but is still marked online. 2702 */ 2703 cpumask_or(&rdtgroup_default.cpu_mask, 2704 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 2705 2706 free_rmid(rdtgrp->mon.rmid); 2707 2708 kernfs_remove(rdtgrp->kn); 2709 list_del(&rdtgrp->rdtgroup_list); 2710 2711 if (atomic_read(&rdtgrp->waitcount) != 0) 2712 rdtgrp->flags = RDT_DELETED; 2713 else 2714 rdtgroup_remove(rdtgrp); 2715 } 2716 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ 2717 update_closid_rmid(cpu_online_mask, &rdtgroup_default); 2718 2719 kernfs_remove(kn_info); 2720 kernfs_remove(kn_mongrp); 2721 kernfs_remove(kn_mondata); 2722 } 2723 2724 static void rdt_kill_sb(struct super_block *sb) 2725 { 2726 struct rdt_resource *r; 2727 2728 cpus_read_lock(); 2729 mutex_lock(&rdtgroup_mutex); 2730 2731 set_mba_sc(false); 2732 2733 /*Put everything back to default values. */ 2734 for_each_alloc_capable_rdt_resource(r) 2735 reset_all_ctrls(r); 2736 cdp_disable_all(); 2737 rmdir_all_sub(); 2738 rdt_pseudo_lock_release(); 2739 rdtgroup_default.mode = RDT_MODE_SHAREABLE; 2740 schemata_list_destroy(); 2741 static_branch_disable_cpuslocked(&rdt_alloc_enable_key); 2742 static_branch_disable_cpuslocked(&rdt_mon_enable_key); 2743 static_branch_disable_cpuslocked(&rdt_enable_key); 2744 kernfs_kill_sb(sb); 2745 mutex_unlock(&rdtgroup_mutex); 2746 cpus_read_unlock(); 2747 } 2748 2749 static struct file_system_type rdt_fs_type = { 2750 .name = "resctrl", 2751 .init_fs_context = rdt_init_fs_context, 2752 .parameters = rdt_fs_parameters, 2753 .kill_sb = rdt_kill_sb, 2754 }; 2755 2756 static int mon_addfile(struct kernfs_node *parent_kn, const char *name, 2757 void *priv) 2758 { 2759 struct kernfs_node *kn; 2760 int ret = 0; 2761 2762 kn = __kernfs_create_file(parent_kn, name, 0444, 2763 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, 2764 &kf_mondata_ops, priv, NULL, NULL); 2765 if (IS_ERR(kn)) 2766 return PTR_ERR(kn); 2767 2768 ret = rdtgroup_kn_set_ugid(kn); 2769 if (ret) { 2770 kernfs_remove(kn); 2771 return ret; 2772 } 2773 2774 return ret; 2775 } 2776 2777 /* 2778 * Remove all subdirectories of mon_data of ctrl_mon groups 2779 * and monitor groups with given domain id. 2780 */ 2781 static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 2782 unsigned int dom_id) 2783 { 2784 struct rdtgroup *prgrp, *crgrp; 2785 char name[32]; 2786 2787 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2788 sprintf(name, "mon_%s_%02d", r->name, dom_id); 2789 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); 2790 2791 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) 2792 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); 2793 } 2794 } 2795 2796 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, 2797 struct rdt_domain *d, 2798 struct rdt_resource *r, struct rdtgroup *prgrp) 2799 { 2800 union mon_data_bits priv; 2801 struct kernfs_node *kn; 2802 struct mon_evt *mevt; 2803 struct rmid_read rr; 2804 char name[32]; 2805 int ret; 2806 2807 sprintf(name, "mon_%s_%02d", r->name, d->id); 2808 /* create the directory */ 2809 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 2810 if (IS_ERR(kn)) 2811 return PTR_ERR(kn); 2812 2813 ret = rdtgroup_kn_set_ugid(kn); 2814 if (ret) 2815 goto out_destroy; 2816 2817 if (WARN_ON(list_empty(&r->evt_list))) { 2818 ret = -EPERM; 2819 goto out_destroy; 2820 } 2821 2822 priv.u.rid = r->rid; 2823 priv.u.domid = d->id; 2824 list_for_each_entry(mevt, &r->evt_list, list) { 2825 priv.u.evtid = mevt->evtid; 2826 ret = mon_addfile(kn, mevt->name, priv.priv); 2827 if (ret) 2828 goto out_destroy; 2829 2830 if (is_mbm_event(mevt->evtid)) 2831 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); 2832 } 2833 kernfs_activate(kn); 2834 return 0; 2835 2836 out_destroy: 2837 kernfs_remove(kn); 2838 return ret; 2839 } 2840 2841 /* 2842 * Add all subdirectories of mon_data for "ctrl_mon" groups 2843 * and "monitor" groups with given domain id. 2844 */ 2845 static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 2846 struct rdt_domain *d) 2847 { 2848 struct kernfs_node *parent_kn; 2849 struct rdtgroup *prgrp, *crgrp; 2850 struct list_head *head; 2851 2852 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2853 parent_kn = prgrp->mon.mon_data_kn; 2854 mkdir_mondata_subdir(parent_kn, d, r, prgrp); 2855 2856 head = &prgrp->mon.crdtgrp_list; 2857 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 2858 parent_kn = crgrp->mon.mon_data_kn; 2859 mkdir_mondata_subdir(parent_kn, d, r, crgrp); 2860 } 2861 } 2862 } 2863 2864 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, 2865 struct rdt_resource *r, 2866 struct rdtgroup *prgrp) 2867 { 2868 struct rdt_domain *dom; 2869 int ret; 2870 2871 list_for_each_entry(dom, &r->domains, list) { 2872 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); 2873 if (ret) 2874 return ret; 2875 } 2876 2877 return 0; 2878 } 2879 2880 /* 2881 * This creates a directory mon_data which contains the monitored data. 2882 * 2883 * mon_data has one directory for each domain which are named 2884 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data 2885 * with L3 domain looks as below: 2886 * ./mon_data: 2887 * mon_L3_00 2888 * mon_L3_01 2889 * mon_L3_02 2890 * ... 2891 * 2892 * Each domain directory has one file per event: 2893 * ./mon_L3_00/: 2894 * llc_occupancy 2895 * 2896 */ 2897 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2898 struct rdtgroup *prgrp, 2899 struct kernfs_node **dest_kn) 2900 { 2901 struct rdt_resource *r; 2902 struct kernfs_node *kn; 2903 int ret; 2904 2905 /* 2906 * Create the mon_data directory first. 2907 */ 2908 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); 2909 if (ret) 2910 return ret; 2911 2912 if (dest_kn) 2913 *dest_kn = kn; 2914 2915 /* 2916 * Create the subdirectories for each domain. Note that all events 2917 * in a domain like L3 are grouped into a resource whose domain is L3 2918 */ 2919 for_each_mon_capable_rdt_resource(r) { 2920 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); 2921 if (ret) 2922 goto out_destroy; 2923 } 2924 2925 return 0; 2926 2927 out_destroy: 2928 kernfs_remove(kn); 2929 return ret; 2930 } 2931 2932 /** 2933 * cbm_ensure_valid - Enforce validity on provided CBM 2934 * @_val: Candidate CBM 2935 * @r: RDT resource to which the CBM belongs 2936 * 2937 * The provided CBM represents all cache portions available for use. This 2938 * may be represented by a bitmap that does not consist of contiguous ones 2939 * and thus be an invalid CBM. 2940 * Here the provided CBM is forced to be a valid CBM by only considering 2941 * the first set of contiguous bits as valid and clearing all bits. 2942 * The intention here is to provide a valid default CBM with which a new 2943 * resource group is initialized. The user can follow this with a 2944 * modification to the CBM if the default does not satisfy the 2945 * requirements. 2946 */ 2947 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) 2948 { 2949 unsigned int cbm_len = r->cache.cbm_len; 2950 unsigned long first_bit, zero_bit; 2951 unsigned long val = _val; 2952 2953 if (!val) 2954 return 0; 2955 2956 first_bit = find_first_bit(&val, cbm_len); 2957 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 2958 2959 /* Clear any remaining bits to ensure contiguous region */ 2960 bitmap_clear(&val, zero_bit, cbm_len - zero_bit); 2961 return (u32)val; 2962 } 2963 2964 /* 2965 * Initialize cache resources per RDT domain 2966 * 2967 * Set the RDT domain up to start off with all usable allocations. That is, 2968 * all shareable and unused bits. All-zero CBM is invalid. 2969 */ 2970 static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, 2971 u32 closid) 2972 { 2973 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); 2974 enum resctrl_conf_type t = s->conf_type; 2975 struct resctrl_staged_config *cfg; 2976 struct rdt_resource *r = s->res; 2977 u32 used_b = 0, unused_b = 0; 2978 unsigned long tmp_cbm; 2979 enum rdtgrp_mode mode; 2980 u32 peer_ctl, ctrl_val; 2981 int i; 2982 2983 cfg = &d->staged_config[t]; 2984 cfg->have_new_ctrl = false; 2985 cfg->new_ctrl = r->cache.shareable_bits; 2986 used_b = r->cache.shareable_bits; 2987 for (i = 0; i < closids_supported(); i++) { 2988 if (closid_allocated(i) && i != closid) { 2989 mode = rdtgroup_mode_by_closid(i); 2990 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 2991 /* 2992 * ctrl values for locksetup aren't relevant 2993 * until the schemata is written, and the mode 2994 * becomes RDT_MODE_PSEUDO_LOCKED. 2995 */ 2996 continue; 2997 /* 2998 * If CDP is active include peer domain's 2999 * usage to ensure there is no overlap 3000 * with an exclusive group. 3001 */ 3002 if (resctrl_arch_get_cdp_enabled(r->rid)) 3003 peer_ctl = resctrl_arch_get_config(r, d, i, 3004 peer_type); 3005 else 3006 peer_ctl = 0; 3007 ctrl_val = resctrl_arch_get_config(r, d, i, 3008 s->conf_type); 3009 used_b |= ctrl_val | peer_ctl; 3010 if (mode == RDT_MODE_SHAREABLE) 3011 cfg->new_ctrl |= ctrl_val | peer_ctl; 3012 } 3013 } 3014 if (d->plr && d->plr->cbm > 0) 3015 used_b |= d->plr->cbm; 3016 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); 3017 unused_b &= BIT_MASK(r->cache.cbm_len) - 1; 3018 cfg->new_ctrl |= unused_b; 3019 /* 3020 * Force the initial CBM to be valid, user can 3021 * modify the CBM based on system availability. 3022 */ 3023 cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); 3024 /* 3025 * Assign the u32 CBM to an unsigned long to ensure that 3026 * bitmap_weight() does not access out-of-bound memory. 3027 */ 3028 tmp_cbm = cfg->new_ctrl; 3029 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { 3030 rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); 3031 return -ENOSPC; 3032 } 3033 cfg->have_new_ctrl = true; 3034 3035 return 0; 3036 } 3037 3038 /* 3039 * Initialize cache resources with default values. 3040 * 3041 * A new RDT group is being created on an allocation capable (CAT) 3042 * supporting system. Set this group up to start off with all usable 3043 * allocations. 3044 * 3045 * If there are no more shareable bits available on any domain then 3046 * the entire allocation will fail. 3047 */ 3048 static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) 3049 { 3050 struct rdt_domain *d; 3051 int ret; 3052 3053 list_for_each_entry(d, &s->res->domains, list) { 3054 ret = __init_one_rdt_domain(d, s, closid); 3055 if (ret < 0) 3056 return ret; 3057 } 3058 3059 return 0; 3060 } 3061 3062 /* Initialize MBA resource with default values. */ 3063 static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) 3064 { 3065 struct resctrl_staged_config *cfg; 3066 struct rdt_domain *d; 3067 3068 list_for_each_entry(d, &r->domains, list) { 3069 if (is_mba_sc(r)) { 3070 d->mbps_val[closid] = MBA_MAX_MBPS; 3071 continue; 3072 } 3073 3074 cfg = &d->staged_config[CDP_NONE]; 3075 cfg->new_ctrl = r->default_ctrl; 3076 cfg->have_new_ctrl = true; 3077 } 3078 } 3079 3080 /* Initialize the RDT group's allocations. */ 3081 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) 3082 { 3083 struct resctrl_schema *s; 3084 struct rdt_resource *r; 3085 int ret; 3086 3087 list_for_each_entry(s, &resctrl_schema_all, list) { 3088 r = s->res; 3089 if (r->rid == RDT_RESOURCE_MBA || 3090 r->rid == RDT_RESOURCE_SMBA) { 3091 rdtgroup_init_mba(r, rdtgrp->closid); 3092 if (is_mba_sc(r)) 3093 continue; 3094 } else { 3095 ret = rdtgroup_init_cat(s, rdtgrp->closid); 3096 if (ret < 0) 3097 return ret; 3098 } 3099 3100 ret = resctrl_arch_update_domains(r, rdtgrp->closid); 3101 if (ret < 0) { 3102 rdt_last_cmd_puts("Failed to initialize allocations\n"); 3103 return ret; 3104 } 3105 3106 } 3107 3108 rdtgrp->mode = RDT_MODE_SHAREABLE; 3109 3110 return 0; 3111 } 3112 3113 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, 3114 const char *name, umode_t mode, 3115 enum rdt_group_type rtype, struct rdtgroup **r) 3116 { 3117 struct rdtgroup *prdtgrp, *rdtgrp; 3118 struct kernfs_node *kn; 3119 uint files = 0; 3120 int ret; 3121 3122 prdtgrp = rdtgroup_kn_lock_live(parent_kn); 3123 if (!prdtgrp) { 3124 ret = -ENODEV; 3125 goto out_unlock; 3126 } 3127 3128 if (rtype == RDTMON_GROUP && 3129 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3130 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { 3131 ret = -EINVAL; 3132 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 3133 goto out_unlock; 3134 } 3135 3136 /* allocate the rdtgroup. */ 3137 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); 3138 if (!rdtgrp) { 3139 ret = -ENOSPC; 3140 rdt_last_cmd_puts("Kernel out of memory\n"); 3141 goto out_unlock; 3142 } 3143 *r = rdtgrp; 3144 rdtgrp->mon.parent = prdtgrp; 3145 rdtgrp->type = rtype; 3146 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); 3147 3148 /* kernfs creates the directory for rdtgrp */ 3149 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); 3150 if (IS_ERR(kn)) { 3151 ret = PTR_ERR(kn); 3152 rdt_last_cmd_puts("kernfs create error\n"); 3153 goto out_free_rgrp; 3154 } 3155 rdtgrp->kn = kn; 3156 3157 /* 3158 * kernfs_remove() will drop the reference count on "kn" which 3159 * will free it. But we still need it to stick around for the 3160 * rdtgroup_kn_unlock(kn) call. Take one extra reference here, 3161 * which will be dropped by kernfs_put() in rdtgroup_remove(). 3162 */ 3163 kernfs_get(kn); 3164 3165 ret = rdtgroup_kn_set_ugid(kn); 3166 if (ret) { 3167 rdt_last_cmd_puts("kernfs perm error\n"); 3168 goto out_destroy; 3169 } 3170 3171 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); 3172 ret = rdtgroup_add_files(kn, files); 3173 if (ret) { 3174 rdt_last_cmd_puts("kernfs fill error\n"); 3175 goto out_destroy; 3176 } 3177 3178 if (rdt_mon_capable) { 3179 ret = alloc_rmid(); 3180 if (ret < 0) { 3181 rdt_last_cmd_puts("Out of RMIDs\n"); 3182 goto out_destroy; 3183 } 3184 rdtgrp->mon.rmid = ret; 3185 3186 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); 3187 if (ret) { 3188 rdt_last_cmd_puts("kernfs subdir error\n"); 3189 goto out_idfree; 3190 } 3191 } 3192 kernfs_activate(kn); 3193 3194 /* 3195 * The caller unlocks the parent_kn upon success. 3196 */ 3197 return 0; 3198 3199 out_idfree: 3200 free_rmid(rdtgrp->mon.rmid); 3201 out_destroy: 3202 kernfs_put(rdtgrp->kn); 3203 kernfs_remove(rdtgrp->kn); 3204 out_free_rgrp: 3205 kfree(rdtgrp); 3206 out_unlock: 3207 rdtgroup_kn_unlock(parent_kn); 3208 return ret; 3209 } 3210 3211 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) 3212 { 3213 kernfs_remove(rgrp->kn); 3214 free_rmid(rgrp->mon.rmid); 3215 rdtgroup_remove(rgrp); 3216 } 3217 3218 /* 3219 * Create a monitor group under "mon_groups" directory of a control 3220 * and monitor group(ctrl_mon). This is a resource group 3221 * to monitor a subset of tasks and cpus in its parent ctrl_mon group. 3222 */ 3223 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, 3224 const char *name, umode_t mode) 3225 { 3226 struct rdtgroup *rdtgrp, *prgrp; 3227 int ret; 3228 3229 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); 3230 if (ret) 3231 return ret; 3232 3233 prgrp = rdtgrp->mon.parent; 3234 rdtgrp->closid = prgrp->closid; 3235 3236 /* 3237 * Add the rdtgrp to the list of rdtgrps the parent 3238 * ctrl_mon group has to track. 3239 */ 3240 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); 3241 3242 rdtgroup_kn_unlock(parent_kn); 3243 return ret; 3244 } 3245 3246 /* 3247 * These are rdtgroups created under the root directory. Can be used 3248 * to allocate and monitor resources. 3249 */ 3250 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, 3251 const char *name, umode_t mode) 3252 { 3253 struct rdtgroup *rdtgrp; 3254 struct kernfs_node *kn; 3255 u32 closid; 3256 int ret; 3257 3258 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); 3259 if (ret) 3260 return ret; 3261 3262 kn = rdtgrp->kn; 3263 ret = closid_alloc(); 3264 if (ret < 0) { 3265 rdt_last_cmd_puts("Out of CLOSIDs\n"); 3266 goto out_common_fail; 3267 } 3268 closid = ret; 3269 ret = 0; 3270 3271 rdtgrp->closid = closid; 3272 ret = rdtgroup_init_alloc(rdtgrp); 3273 if (ret < 0) 3274 goto out_id_free; 3275 3276 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); 3277 3278 if (rdt_mon_capable) { 3279 /* 3280 * Create an empty mon_groups directory to hold the subset 3281 * of tasks and cpus to monitor. 3282 */ 3283 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); 3284 if (ret) { 3285 rdt_last_cmd_puts("kernfs subdir error\n"); 3286 goto out_del_list; 3287 } 3288 } 3289 3290 goto out_unlock; 3291 3292 out_del_list: 3293 list_del(&rdtgrp->rdtgroup_list); 3294 out_id_free: 3295 closid_free(closid); 3296 out_common_fail: 3297 mkdir_rdt_prepare_clean(rdtgrp); 3298 out_unlock: 3299 rdtgroup_kn_unlock(parent_kn); 3300 return ret; 3301 } 3302 3303 /* 3304 * We allow creating mon groups only with in a directory called "mon_groups" 3305 * which is present in every ctrl_mon group. Check if this is a valid 3306 * "mon_groups" directory. 3307 * 3308 * 1. The directory should be named "mon_groups". 3309 * 2. The mon group itself should "not" be named "mon_groups". 3310 * This makes sure "mon_groups" directory always has a ctrl_mon group 3311 * as parent. 3312 */ 3313 static bool is_mon_groups(struct kernfs_node *kn, const char *name) 3314 { 3315 return (!strcmp(kn->name, "mon_groups") && 3316 strcmp(name, "mon_groups")); 3317 } 3318 3319 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 3320 umode_t mode) 3321 { 3322 /* Do not accept '\n' to avoid unparsable situation. */ 3323 if (strchr(name, '\n')) 3324 return -EINVAL; 3325 3326 /* 3327 * If the parent directory is the root directory and RDT 3328 * allocation is supported, add a control and monitoring 3329 * subdirectory 3330 */ 3331 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) 3332 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); 3333 3334 /* 3335 * If RDT monitoring is supported and the parent directory is a valid 3336 * "mon_groups" directory, add a monitoring subdirectory. 3337 */ 3338 if (rdt_mon_capable && is_mon_groups(parent_kn, name)) 3339 return rdtgroup_mkdir_mon(parent_kn, name, mode); 3340 3341 return -EPERM; 3342 } 3343 3344 static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) 3345 { 3346 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; 3347 int cpu; 3348 3349 /* Give any tasks back to the parent group */ 3350 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); 3351 3352 /* Update per cpu rmid of the moved CPUs first */ 3353 for_each_cpu(cpu, &rdtgrp->cpu_mask) 3354 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; 3355 /* 3356 * Update the MSR on moved CPUs and CPUs which have moved 3357 * task running on them. 3358 */ 3359 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3360 update_closid_rmid(tmpmask, NULL); 3361 3362 rdtgrp->flags = RDT_DELETED; 3363 free_rmid(rdtgrp->mon.rmid); 3364 3365 /* 3366 * Remove the rdtgrp from the parent ctrl_mon group's list 3367 */ 3368 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); 3369 list_del(&rdtgrp->mon.crdtgrp_list); 3370 3371 kernfs_remove(rdtgrp->kn); 3372 3373 return 0; 3374 } 3375 3376 static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) 3377 { 3378 rdtgrp->flags = RDT_DELETED; 3379 list_del(&rdtgrp->rdtgroup_list); 3380 3381 kernfs_remove(rdtgrp->kn); 3382 return 0; 3383 } 3384 3385 static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) 3386 { 3387 int cpu; 3388 3389 /* Give any tasks back to the default group */ 3390 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); 3391 3392 /* Give any CPUs back to the default group */ 3393 cpumask_or(&rdtgroup_default.cpu_mask, 3394 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 3395 3396 /* Update per cpu closid and rmid of the moved CPUs first */ 3397 for_each_cpu(cpu, &rdtgrp->cpu_mask) { 3398 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; 3399 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; 3400 } 3401 3402 /* 3403 * Update the MSR on moved CPUs and CPUs which have moved 3404 * task running on them. 3405 */ 3406 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3407 update_closid_rmid(tmpmask, NULL); 3408 3409 closid_free(rdtgrp->closid); 3410 free_rmid(rdtgrp->mon.rmid); 3411 3412 rdtgroup_ctrl_remove(rdtgrp); 3413 3414 /* 3415 * Free all the child monitor group rmids. 3416 */ 3417 free_all_child_rdtgrp(rdtgrp); 3418 3419 return 0; 3420 } 3421 3422 static int rdtgroup_rmdir(struct kernfs_node *kn) 3423 { 3424 struct kernfs_node *parent_kn = kn->parent; 3425 struct rdtgroup *rdtgrp; 3426 cpumask_var_t tmpmask; 3427 int ret = 0; 3428 3429 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 3430 return -ENOMEM; 3431 3432 rdtgrp = rdtgroup_kn_lock_live(kn); 3433 if (!rdtgrp) { 3434 ret = -EPERM; 3435 goto out; 3436 } 3437 3438 /* 3439 * If the rdtgroup is a ctrl_mon group and parent directory 3440 * is the root directory, remove the ctrl_mon group. 3441 * 3442 * If the rdtgroup is a mon group and parent directory 3443 * is a valid "mon_groups" directory, remove the mon group. 3444 */ 3445 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && 3446 rdtgrp != &rdtgroup_default) { 3447 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3448 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 3449 ret = rdtgroup_ctrl_remove(rdtgrp); 3450 } else { 3451 ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); 3452 } 3453 } else if (rdtgrp->type == RDTMON_GROUP && 3454 is_mon_groups(parent_kn, kn->name)) { 3455 ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); 3456 } else { 3457 ret = -EPERM; 3458 } 3459 3460 out: 3461 rdtgroup_kn_unlock(kn); 3462 free_cpumask_var(tmpmask); 3463 return ret; 3464 } 3465 3466 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) 3467 { 3468 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) 3469 seq_puts(seq, ",cdp"); 3470 3471 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) 3472 seq_puts(seq, ",cdpl2"); 3473 3474 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) 3475 seq_puts(seq, ",mba_MBps"); 3476 3477 return 0; 3478 } 3479 3480 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { 3481 .mkdir = rdtgroup_mkdir, 3482 .rmdir = rdtgroup_rmdir, 3483 .show_options = rdtgroup_show_options, 3484 }; 3485 3486 static int __init rdtgroup_setup_root(void) 3487 { 3488 int ret; 3489 3490 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, 3491 KERNFS_ROOT_CREATE_DEACTIVATED | 3492 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, 3493 &rdtgroup_default); 3494 if (IS_ERR(rdt_root)) 3495 return PTR_ERR(rdt_root); 3496 3497 mutex_lock(&rdtgroup_mutex); 3498 3499 rdtgroup_default.closid = 0; 3500 rdtgroup_default.mon.rmid = 0; 3501 rdtgroup_default.type = RDTCTRL_GROUP; 3502 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); 3503 3504 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); 3505 3506 ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RF_CTRL_BASE); 3507 if (ret) { 3508 kernfs_destroy_root(rdt_root); 3509 goto out; 3510 } 3511 3512 rdtgroup_default.kn = kernfs_root_to_node(rdt_root); 3513 kernfs_activate(rdtgroup_default.kn); 3514 3515 out: 3516 mutex_unlock(&rdtgroup_mutex); 3517 3518 return ret; 3519 } 3520 3521 static void domain_destroy_mon_state(struct rdt_domain *d) 3522 { 3523 bitmap_free(d->rmid_busy_llc); 3524 kfree(d->mbm_total); 3525 kfree(d->mbm_local); 3526 } 3527 3528 void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) 3529 { 3530 lockdep_assert_held(&rdtgroup_mutex); 3531 3532 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) 3533 mba_sc_domain_destroy(r, d); 3534 3535 if (!r->mon_capable) 3536 return; 3537 3538 /* 3539 * If resctrl is mounted, remove all the 3540 * per domain monitor data directories. 3541 */ 3542 if (static_branch_unlikely(&rdt_mon_enable_key)) 3543 rmdir_mondata_subdir_allrdtgrp(r, d->id); 3544 3545 if (is_mbm_enabled()) 3546 cancel_delayed_work(&d->mbm_over); 3547 if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { 3548 /* 3549 * When a package is going down, forcefully 3550 * decrement rmid->ebusy. There is no way to know 3551 * that the L3 was flushed and hence may lead to 3552 * incorrect counts in rare scenarios, but leaving 3553 * the RMID as busy creates RMID leaks if the 3554 * package never comes back. 3555 */ 3556 __check_limbo(d, true); 3557 cancel_delayed_work(&d->cqm_limbo); 3558 } 3559 3560 domain_destroy_mon_state(d); 3561 } 3562 3563 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) 3564 { 3565 size_t tsize; 3566 3567 if (is_llc_occupancy_enabled()) { 3568 d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL); 3569 if (!d->rmid_busy_llc) 3570 return -ENOMEM; 3571 } 3572 if (is_mbm_total_enabled()) { 3573 tsize = sizeof(*d->mbm_total); 3574 d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL); 3575 if (!d->mbm_total) { 3576 bitmap_free(d->rmid_busy_llc); 3577 return -ENOMEM; 3578 } 3579 } 3580 if (is_mbm_local_enabled()) { 3581 tsize = sizeof(*d->mbm_local); 3582 d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL); 3583 if (!d->mbm_local) { 3584 bitmap_free(d->rmid_busy_llc); 3585 kfree(d->mbm_total); 3586 return -ENOMEM; 3587 } 3588 } 3589 3590 return 0; 3591 } 3592 3593 int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) 3594 { 3595 int err; 3596 3597 lockdep_assert_held(&rdtgroup_mutex); 3598 3599 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) 3600 /* RDT_RESOURCE_MBA is never mon_capable */ 3601 return mba_sc_domain_allocate(r, d); 3602 3603 if (!r->mon_capable) 3604 return 0; 3605 3606 err = domain_setup_mon_state(r, d); 3607 if (err) 3608 return err; 3609 3610 if (is_mbm_enabled()) { 3611 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); 3612 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL); 3613 } 3614 3615 if (is_llc_occupancy_enabled()) 3616 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); 3617 3618 /* If resctrl is mounted, add per domain monitor data directories. */ 3619 if (static_branch_unlikely(&rdt_mon_enable_key)) 3620 mkdir_mondata_subdir_allrdtgrp(r, d); 3621 3622 return 0; 3623 } 3624 3625 /* 3626 * rdtgroup_init - rdtgroup initialization 3627 * 3628 * Setup resctrl file system including set up root, create mount point, 3629 * register rdtgroup filesystem, and initialize files under root directory. 3630 * 3631 * Return: 0 on success or -errno 3632 */ 3633 int __init rdtgroup_init(void) 3634 { 3635 int ret = 0; 3636 3637 seq_buf_init(&last_cmd_status, last_cmd_status_buf, 3638 sizeof(last_cmd_status_buf)); 3639 3640 ret = rdtgroup_setup_root(); 3641 if (ret) 3642 return ret; 3643 3644 ret = sysfs_create_mount_point(fs_kobj, "resctrl"); 3645 if (ret) 3646 goto cleanup_root; 3647 3648 ret = register_filesystem(&rdt_fs_type); 3649 if (ret) 3650 goto cleanup_mountpoint; 3651 3652 /* 3653 * Adding the resctrl debugfs directory here may not be ideal since 3654 * it would let the resctrl debugfs directory appear on the debugfs 3655 * filesystem before the resctrl filesystem is mounted. 3656 * It may also be ok since that would enable debugging of RDT before 3657 * resctrl is mounted. 3658 * The reason why the debugfs directory is created here and not in 3659 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and 3660 * during the debugfs directory creation also &sb->s_type->i_mutex_key 3661 * (the lockdep class of inode->i_rwsem). Other filesystem 3662 * interactions (eg. SyS_getdents) have the lock ordering: 3663 * &sb->s_type->i_mutex_key --> &mm->mmap_lock 3664 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex 3665 * is taken, thus creating dependency: 3666 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause 3667 * issues considering the other two lock dependencies. 3668 * By creating the debugfs directory here we avoid a dependency 3669 * that may cause deadlock (even though file operations cannot 3670 * occur until the filesystem is mounted, but I do not know how to 3671 * tell lockdep that). 3672 */ 3673 debugfs_resctrl = debugfs_create_dir("resctrl", NULL); 3674 3675 return 0; 3676 3677 cleanup_mountpoint: 3678 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3679 cleanup_root: 3680 kernfs_destroy_root(rdt_root); 3681 3682 return ret; 3683 } 3684 3685 void __exit rdtgroup_exit(void) 3686 { 3687 debugfs_remove_recursive(debugfs_resctrl); 3688 unregister_filesystem(&rdt_fs_type); 3689 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3690 kernfs_destroy_root(rdt_root); 3691 } 3692