1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * User interface for Resource Allocation in Resource Director Technology(RDT) 4 * 5 * Copyright (C) 2016 Intel Corporation 6 * 7 * Author: Fenghua Yu <fenghua.yu@intel.com> 8 * 9 * More information about RDT be found in the Intel (R) x86 Architecture 10 * Software Developer Manual. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/cacheinfo.h> 16 #include <linux/cpu.h> 17 #include <linux/debugfs.h> 18 #include <linux/fs.h> 19 #include <linux/fs_parser.h> 20 #include <linux/sysfs.h> 21 #include <linux/kernfs.h> 22 #include <linux/seq_buf.h> 23 #include <linux/seq_file.h> 24 #include <linux/sched/signal.h> 25 #include <linux/sched/task.h> 26 #include <linux/slab.h> 27 #include <linux/task_work.h> 28 #include <linux/user_namespace.h> 29 30 #include <uapi/linux/magic.h> 31 32 #include <asm/resctrl.h> 33 #include "internal.h" 34 35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key); 36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); 37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); 38 static struct kernfs_root *rdt_root; 39 struct rdtgroup rdtgroup_default; 40 LIST_HEAD(rdt_all_groups); 41 42 /* list of entries for the schemata file */ 43 LIST_HEAD(resctrl_schema_all); 44 45 /* Kernel fs node for "info" directory under root */ 46 static struct kernfs_node *kn_info; 47 48 /* Kernel fs node for "mon_groups" directory under root */ 49 static struct kernfs_node *kn_mongrp; 50 51 /* Kernel fs node for "mon_data" directory under root */ 52 static struct kernfs_node *kn_mondata; 53 54 static struct seq_buf last_cmd_status; 55 static char last_cmd_status_buf[512]; 56 57 struct dentry *debugfs_resctrl; 58 59 void rdt_last_cmd_clear(void) 60 { 61 lockdep_assert_held(&rdtgroup_mutex); 62 seq_buf_clear(&last_cmd_status); 63 } 64 65 void rdt_last_cmd_puts(const char *s) 66 { 67 lockdep_assert_held(&rdtgroup_mutex); 68 seq_buf_puts(&last_cmd_status, s); 69 } 70 71 void rdt_last_cmd_printf(const char *fmt, ...) 72 { 73 va_list ap; 74 75 va_start(ap, fmt); 76 lockdep_assert_held(&rdtgroup_mutex); 77 seq_buf_vprintf(&last_cmd_status, fmt, ap); 78 va_end(ap); 79 } 80 81 /* 82 * Trivial allocator for CLOSIDs. Since h/w only supports a small number, 83 * we can keep a bitmap of free CLOSIDs in a single integer. 84 * 85 * Using a global CLOSID across all resources has some advantages and 86 * some drawbacks: 87 * + We can simply set "current->closid" to assign a task to a resource 88 * group. 89 * + Context switch code can avoid extra memory references deciding which 90 * CLOSID to load into the PQR_ASSOC MSR 91 * - We give up some options in configuring resource groups across multi-socket 92 * systems. 93 * - Our choices on how to configure each resource become progressively more 94 * limited as the number of resources grows. 95 */ 96 static int closid_free_map; 97 static int closid_free_map_len; 98 99 int closids_supported(void) 100 { 101 return closid_free_map_len; 102 } 103 104 static void closid_init(void) 105 { 106 struct resctrl_schema *s; 107 u32 rdt_min_closid = 32; 108 109 /* Compute rdt_min_closid across all resources */ 110 list_for_each_entry(s, &resctrl_schema_all, list) 111 rdt_min_closid = min(rdt_min_closid, s->num_closid); 112 113 closid_free_map = BIT_MASK(rdt_min_closid) - 1; 114 115 /* CLOSID 0 is always reserved for the default group */ 116 closid_free_map &= ~1; 117 closid_free_map_len = rdt_min_closid; 118 } 119 120 static int closid_alloc(void) 121 { 122 u32 closid = ffs(closid_free_map); 123 124 if (closid == 0) 125 return -ENOSPC; 126 closid--; 127 closid_free_map &= ~(1 << closid); 128 129 return closid; 130 } 131 132 void closid_free(int closid) 133 { 134 closid_free_map |= 1 << closid; 135 } 136 137 /** 138 * closid_allocated - test if provided closid is in use 139 * @closid: closid to be tested 140 * 141 * Return: true if @closid is currently associated with a resource group, 142 * false if @closid is free 143 */ 144 static bool closid_allocated(unsigned int closid) 145 { 146 return (closid_free_map & (1 << closid)) == 0; 147 } 148 149 /** 150 * rdtgroup_mode_by_closid - Return mode of resource group with closid 151 * @closid: closid if the resource group 152 * 153 * Each resource group is associated with a @closid. Here the mode 154 * of a resource group can be queried by searching for it using its closid. 155 * 156 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid 157 */ 158 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) 159 { 160 struct rdtgroup *rdtgrp; 161 162 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 163 if (rdtgrp->closid == closid) 164 return rdtgrp->mode; 165 } 166 167 return RDT_NUM_MODES; 168 } 169 170 static const char * const rdt_mode_str[] = { 171 [RDT_MODE_SHAREABLE] = "shareable", 172 [RDT_MODE_EXCLUSIVE] = "exclusive", 173 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", 174 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", 175 }; 176 177 /** 178 * rdtgroup_mode_str - Return the string representation of mode 179 * @mode: the resource group mode as &enum rdtgroup_mode 180 * 181 * Return: string representation of valid mode, "unknown" otherwise 182 */ 183 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) 184 { 185 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) 186 return "unknown"; 187 188 return rdt_mode_str[mode]; 189 } 190 191 /* set uid and gid of rdtgroup dirs and files to that of the creator */ 192 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) 193 { 194 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 195 .ia_uid = current_fsuid(), 196 .ia_gid = current_fsgid(), }; 197 198 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 199 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 200 return 0; 201 202 return kernfs_setattr(kn, &iattr); 203 } 204 205 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) 206 { 207 struct kernfs_node *kn; 208 int ret; 209 210 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, 211 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 212 0, rft->kf_ops, rft, NULL, NULL); 213 if (IS_ERR(kn)) 214 return PTR_ERR(kn); 215 216 ret = rdtgroup_kn_set_ugid(kn); 217 if (ret) { 218 kernfs_remove(kn); 219 return ret; 220 } 221 222 return 0; 223 } 224 225 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) 226 { 227 struct kernfs_open_file *of = m->private; 228 struct rftype *rft = of->kn->priv; 229 230 if (rft->seq_show) 231 return rft->seq_show(of, m, arg); 232 return 0; 233 } 234 235 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, 236 size_t nbytes, loff_t off) 237 { 238 struct rftype *rft = of->kn->priv; 239 240 if (rft->write) 241 return rft->write(of, buf, nbytes, off); 242 243 return -EINVAL; 244 } 245 246 static const struct kernfs_ops rdtgroup_kf_single_ops = { 247 .atomic_write_len = PAGE_SIZE, 248 .write = rdtgroup_file_write, 249 .seq_show = rdtgroup_seqfile_show, 250 }; 251 252 static const struct kernfs_ops kf_mondata_ops = { 253 .atomic_write_len = PAGE_SIZE, 254 .seq_show = rdtgroup_mondata_show, 255 }; 256 257 static bool is_cpu_list(struct kernfs_open_file *of) 258 { 259 struct rftype *rft = of->kn->priv; 260 261 return rft->flags & RFTYPE_FLAGS_CPUS_LIST; 262 } 263 264 static int rdtgroup_cpus_show(struct kernfs_open_file *of, 265 struct seq_file *s, void *v) 266 { 267 struct rdtgroup *rdtgrp; 268 struct cpumask *mask; 269 int ret = 0; 270 271 rdtgrp = rdtgroup_kn_lock_live(of->kn); 272 273 if (rdtgrp) { 274 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 275 if (!rdtgrp->plr->d) { 276 rdt_last_cmd_clear(); 277 rdt_last_cmd_puts("Cache domain offline\n"); 278 ret = -ENODEV; 279 } else { 280 mask = &rdtgrp->plr->d->cpu_mask; 281 seq_printf(s, is_cpu_list(of) ? 282 "%*pbl\n" : "%*pb\n", 283 cpumask_pr_args(mask)); 284 } 285 } else { 286 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", 287 cpumask_pr_args(&rdtgrp->cpu_mask)); 288 } 289 } else { 290 ret = -ENOENT; 291 } 292 rdtgroup_kn_unlock(of->kn); 293 294 return ret; 295 } 296 297 /* 298 * This is safe against resctrl_sched_in() called from __switch_to() 299 * because __switch_to() is executed with interrupts disabled. A local call 300 * from update_closid_rmid() is protected against __switch_to() because 301 * preemption is disabled. 302 */ 303 static void update_cpu_closid_rmid(void *info) 304 { 305 struct rdtgroup *r = info; 306 307 if (r) { 308 this_cpu_write(pqr_state.default_closid, r->closid); 309 this_cpu_write(pqr_state.default_rmid, r->mon.rmid); 310 } 311 312 /* 313 * We cannot unconditionally write the MSR because the current 314 * executing task might have its own closid selected. Just reuse 315 * the context switch code. 316 */ 317 resctrl_sched_in(); 318 } 319 320 /* 321 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, 322 * 323 * Per task closids/rmids must have been set up before calling this function. 324 */ 325 static void 326 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) 327 { 328 on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1); 329 } 330 331 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 332 cpumask_var_t tmpmask) 333 { 334 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; 335 struct list_head *head; 336 337 /* Check whether cpus belong to parent ctrl group */ 338 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); 339 if (!cpumask_empty(tmpmask)) { 340 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); 341 return -EINVAL; 342 } 343 344 /* Check whether cpus are dropped from this group */ 345 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 346 if (!cpumask_empty(tmpmask)) { 347 /* Give any dropped cpus to parent rdtgroup */ 348 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); 349 update_closid_rmid(tmpmask, prgrp); 350 } 351 352 /* 353 * If we added cpus, remove them from previous group that owned them 354 * and update per-cpu rmid 355 */ 356 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 357 if (!cpumask_empty(tmpmask)) { 358 head = &prgrp->mon.crdtgrp_list; 359 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 360 if (crgrp == rdtgrp) 361 continue; 362 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, 363 tmpmask); 364 } 365 update_closid_rmid(tmpmask, rdtgrp); 366 } 367 368 /* Done pushing/pulling - update this group with new mask */ 369 cpumask_copy(&rdtgrp->cpu_mask, newmask); 370 371 return 0; 372 } 373 374 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) 375 { 376 struct rdtgroup *crgrp; 377 378 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); 379 /* update the child mon group masks as well*/ 380 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) 381 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); 382 } 383 384 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 385 cpumask_var_t tmpmask, cpumask_var_t tmpmask1) 386 { 387 struct rdtgroup *r, *crgrp; 388 struct list_head *head; 389 390 /* Check whether cpus are dropped from this group */ 391 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 392 if (!cpumask_empty(tmpmask)) { 393 /* Can't drop from default group */ 394 if (rdtgrp == &rdtgroup_default) { 395 rdt_last_cmd_puts("Can't drop CPUs from default group\n"); 396 return -EINVAL; 397 } 398 399 /* Give any dropped cpus to rdtgroup_default */ 400 cpumask_or(&rdtgroup_default.cpu_mask, 401 &rdtgroup_default.cpu_mask, tmpmask); 402 update_closid_rmid(tmpmask, &rdtgroup_default); 403 } 404 405 /* 406 * If we added cpus, remove them from previous group and 407 * the prev group's child groups that owned them 408 * and update per-cpu closid/rmid. 409 */ 410 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 411 if (!cpumask_empty(tmpmask)) { 412 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { 413 if (r == rdtgrp) 414 continue; 415 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); 416 if (!cpumask_empty(tmpmask1)) 417 cpumask_rdtgrp_clear(r, tmpmask1); 418 } 419 update_closid_rmid(tmpmask, rdtgrp); 420 } 421 422 /* Done pushing/pulling - update this group with new mask */ 423 cpumask_copy(&rdtgrp->cpu_mask, newmask); 424 425 /* 426 * Clear child mon group masks since there is a new parent mask 427 * now and update the rmid for the cpus the child lost. 428 */ 429 head = &rdtgrp->mon.crdtgrp_list; 430 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 431 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); 432 update_closid_rmid(tmpmask, rdtgrp); 433 cpumask_clear(&crgrp->cpu_mask); 434 } 435 436 return 0; 437 } 438 439 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, 440 char *buf, size_t nbytes, loff_t off) 441 { 442 cpumask_var_t tmpmask, newmask, tmpmask1; 443 struct rdtgroup *rdtgrp; 444 int ret; 445 446 if (!buf) 447 return -EINVAL; 448 449 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 450 return -ENOMEM; 451 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { 452 free_cpumask_var(tmpmask); 453 return -ENOMEM; 454 } 455 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { 456 free_cpumask_var(tmpmask); 457 free_cpumask_var(newmask); 458 return -ENOMEM; 459 } 460 461 rdtgrp = rdtgroup_kn_lock_live(of->kn); 462 if (!rdtgrp) { 463 ret = -ENOENT; 464 goto unlock; 465 } 466 467 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 468 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 469 ret = -EINVAL; 470 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 471 goto unlock; 472 } 473 474 if (is_cpu_list(of)) 475 ret = cpulist_parse(buf, newmask); 476 else 477 ret = cpumask_parse(buf, newmask); 478 479 if (ret) { 480 rdt_last_cmd_puts("Bad CPU list/mask\n"); 481 goto unlock; 482 } 483 484 /* check that user didn't specify any offline cpus */ 485 cpumask_andnot(tmpmask, newmask, cpu_online_mask); 486 if (!cpumask_empty(tmpmask)) { 487 ret = -EINVAL; 488 rdt_last_cmd_puts("Can only assign online CPUs\n"); 489 goto unlock; 490 } 491 492 if (rdtgrp->type == RDTCTRL_GROUP) 493 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); 494 else if (rdtgrp->type == RDTMON_GROUP) 495 ret = cpus_mon_write(rdtgrp, newmask, tmpmask); 496 else 497 ret = -EINVAL; 498 499 unlock: 500 rdtgroup_kn_unlock(of->kn); 501 free_cpumask_var(tmpmask); 502 free_cpumask_var(newmask); 503 free_cpumask_var(tmpmask1); 504 505 return ret ?: nbytes; 506 } 507 508 /** 509 * rdtgroup_remove - the helper to remove resource group safely 510 * @rdtgrp: resource group to remove 511 * 512 * On resource group creation via a mkdir, an extra kernfs_node reference is 513 * taken to ensure that the rdtgroup structure remains accessible for the 514 * rdtgroup_kn_unlock() calls where it is removed. 515 * 516 * Drop the extra reference here, then free the rdtgroup structure. 517 * 518 * Return: void 519 */ 520 static void rdtgroup_remove(struct rdtgroup *rdtgrp) 521 { 522 kernfs_put(rdtgrp->kn); 523 kfree(rdtgrp); 524 } 525 526 static void _update_task_closid_rmid(void *task) 527 { 528 /* 529 * If the task is still current on this CPU, update PQR_ASSOC MSR. 530 * Otherwise, the MSR is updated when the task is scheduled in. 531 */ 532 if (task == current) 533 resctrl_sched_in(); 534 } 535 536 static void update_task_closid_rmid(struct task_struct *t) 537 { 538 if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) 539 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); 540 else 541 _update_task_closid_rmid(t); 542 } 543 544 static int __rdtgroup_move_task(struct task_struct *tsk, 545 struct rdtgroup *rdtgrp) 546 { 547 /* If the task is already in rdtgrp, no need to move the task. */ 548 if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid && 549 tsk->rmid == rdtgrp->mon.rmid) || 550 (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid && 551 tsk->closid == rdtgrp->mon.parent->closid)) 552 return 0; 553 554 /* 555 * Set the task's closid/rmid before the PQR_ASSOC MSR can be 556 * updated by them. 557 * 558 * For ctrl_mon groups, move both closid and rmid. 559 * For monitor groups, can move the tasks only from 560 * their parent CTRL group. 561 */ 562 563 if (rdtgrp->type == RDTCTRL_GROUP) { 564 WRITE_ONCE(tsk->closid, rdtgrp->closid); 565 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); 566 } else if (rdtgrp->type == RDTMON_GROUP) { 567 if (rdtgrp->mon.parent->closid == tsk->closid) { 568 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); 569 } else { 570 rdt_last_cmd_puts("Can't move task to different control group\n"); 571 return -EINVAL; 572 } 573 } 574 575 /* 576 * Ensure the task's closid and rmid are written before determining if 577 * the task is current that will decide if it will be interrupted. 578 * This pairs with the full barrier between the rq->curr update and 579 * resctrl_sched_in() during context switch. 580 */ 581 smp_mb(); 582 583 /* 584 * By now, the task's closid and rmid are set. If the task is current 585 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource 586 * group go into effect. If the task is not current, the MSR will be 587 * updated when the task is scheduled in. 588 */ 589 update_task_closid_rmid(tsk); 590 591 return 0; 592 } 593 594 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) 595 { 596 return (rdt_alloc_capable && 597 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); 598 } 599 600 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) 601 { 602 return (rdt_mon_capable && 603 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); 604 } 605 606 /** 607 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group 608 * @r: Resource group 609 * 610 * Return: 1 if tasks have been assigned to @r, 0 otherwise 611 */ 612 int rdtgroup_tasks_assigned(struct rdtgroup *r) 613 { 614 struct task_struct *p, *t; 615 int ret = 0; 616 617 lockdep_assert_held(&rdtgroup_mutex); 618 619 rcu_read_lock(); 620 for_each_process_thread(p, t) { 621 if (is_closid_match(t, r) || is_rmid_match(t, r)) { 622 ret = 1; 623 break; 624 } 625 } 626 rcu_read_unlock(); 627 628 return ret; 629 } 630 631 static int rdtgroup_task_write_permission(struct task_struct *task, 632 struct kernfs_open_file *of) 633 { 634 const struct cred *tcred = get_task_cred(task); 635 const struct cred *cred = current_cred(); 636 int ret = 0; 637 638 /* 639 * Even if we're attaching all tasks in the thread group, we only 640 * need to check permissions on one of them. 641 */ 642 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 643 !uid_eq(cred->euid, tcred->uid) && 644 !uid_eq(cred->euid, tcred->suid)) { 645 rdt_last_cmd_printf("No permission to move task %d\n", task->pid); 646 ret = -EPERM; 647 } 648 649 put_cred(tcred); 650 return ret; 651 } 652 653 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, 654 struct kernfs_open_file *of) 655 { 656 struct task_struct *tsk; 657 int ret; 658 659 rcu_read_lock(); 660 if (pid) { 661 tsk = find_task_by_vpid(pid); 662 if (!tsk) { 663 rcu_read_unlock(); 664 rdt_last_cmd_printf("No task %d\n", pid); 665 return -ESRCH; 666 } 667 } else { 668 tsk = current; 669 } 670 671 get_task_struct(tsk); 672 rcu_read_unlock(); 673 674 ret = rdtgroup_task_write_permission(tsk, of); 675 if (!ret) 676 ret = __rdtgroup_move_task(tsk, rdtgrp); 677 678 put_task_struct(tsk); 679 return ret; 680 } 681 682 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, 683 char *buf, size_t nbytes, loff_t off) 684 { 685 struct rdtgroup *rdtgrp; 686 int ret = 0; 687 pid_t pid; 688 689 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 690 return -EINVAL; 691 rdtgrp = rdtgroup_kn_lock_live(of->kn); 692 if (!rdtgrp) { 693 rdtgroup_kn_unlock(of->kn); 694 return -ENOENT; 695 } 696 rdt_last_cmd_clear(); 697 698 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 699 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 700 ret = -EINVAL; 701 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 702 goto unlock; 703 } 704 705 ret = rdtgroup_move_task(pid, rdtgrp, of); 706 707 unlock: 708 rdtgroup_kn_unlock(of->kn); 709 710 return ret ?: nbytes; 711 } 712 713 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) 714 { 715 struct task_struct *p, *t; 716 717 rcu_read_lock(); 718 for_each_process_thread(p, t) { 719 if (is_closid_match(t, r) || is_rmid_match(t, r)) 720 seq_printf(s, "%d\n", t->pid); 721 } 722 rcu_read_unlock(); 723 } 724 725 static int rdtgroup_tasks_show(struct kernfs_open_file *of, 726 struct seq_file *s, void *v) 727 { 728 struct rdtgroup *rdtgrp; 729 int ret = 0; 730 731 rdtgrp = rdtgroup_kn_lock_live(of->kn); 732 if (rdtgrp) 733 show_rdt_tasks(rdtgrp, s); 734 else 735 ret = -ENOENT; 736 rdtgroup_kn_unlock(of->kn); 737 738 return ret; 739 } 740 741 #ifdef CONFIG_PROC_CPU_RESCTRL 742 743 /* 744 * A task can only be part of one resctrl control group and of one monitor 745 * group which is associated to that control group. 746 * 747 * 1) res: 748 * mon: 749 * 750 * resctrl is not available. 751 * 752 * 2) res:/ 753 * mon: 754 * 755 * Task is part of the root resctrl control group, and it is not associated 756 * to any monitor group. 757 * 758 * 3) res:/ 759 * mon:mon0 760 * 761 * Task is part of the root resctrl control group and monitor group mon0. 762 * 763 * 4) res:group0 764 * mon: 765 * 766 * Task is part of resctrl control group group0, and it is not associated 767 * to any monitor group. 768 * 769 * 5) res:group0 770 * mon:mon1 771 * 772 * Task is part of resctrl control group group0 and monitor group mon1. 773 */ 774 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, 775 struct pid *pid, struct task_struct *tsk) 776 { 777 struct rdtgroup *rdtg; 778 int ret = 0; 779 780 mutex_lock(&rdtgroup_mutex); 781 782 /* Return empty if resctrl has not been mounted. */ 783 if (!static_branch_unlikely(&rdt_enable_key)) { 784 seq_puts(s, "res:\nmon:\n"); 785 goto unlock; 786 } 787 788 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { 789 struct rdtgroup *crg; 790 791 /* 792 * Task information is only relevant for shareable 793 * and exclusive groups. 794 */ 795 if (rdtg->mode != RDT_MODE_SHAREABLE && 796 rdtg->mode != RDT_MODE_EXCLUSIVE) 797 continue; 798 799 if (rdtg->closid != tsk->closid) 800 continue; 801 802 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", 803 rdtg->kn->name); 804 seq_puts(s, "mon:"); 805 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, 806 mon.crdtgrp_list) { 807 if (tsk->rmid != crg->mon.rmid) 808 continue; 809 seq_printf(s, "%s", crg->kn->name); 810 break; 811 } 812 seq_putc(s, '\n'); 813 goto unlock; 814 } 815 /* 816 * The above search should succeed. Otherwise return 817 * with an error. 818 */ 819 ret = -ENOENT; 820 unlock: 821 mutex_unlock(&rdtgroup_mutex); 822 823 return ret; 824 } 825 #endif 826 827 static int rdt_last_cmd_status_show(struct kernfs_open_file *of, 828 struct seq_file *seq, void *v) 829 { 830 int len; 831 832 mutex_lock(&rdtgroup_mutex); 833 len = seq_buf_used(&last_cmd_status); 834 if (len) 835 seq_printf(seq, "%.*s", len, last_cmd_status_buf); 836 else 837 seq_puts(seq, "ok\n"); 838 mutex_unlock(&rdtgroup_mutex); 839 return 0; 840 } 841 842 static int rdt_num_closids_show(struct kernfs_open_file *of, 843 struct seq_file *seq, void *v) 844 { 845 struct resctrl_schema *s = of->kn->parent->priv; 846 847 seq_printf(seq, "%u\n", s->num_closid); 848 return 0; 849 } 850 851 static int rdt_default_ctrl_show(struct kernfs_open_file *of, 852 struct seq_file *seq, void *v) 853 { 854 struct resctrl_schema *s = of->kn->parent->priv; 855 struct rdt_resource *r = s->res; 856 857 seq_printf(seq, "%x\n", r->default_ctrl); 858 return 0; 859 } 860 861 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, 862 struct seq_file *seq, void *v) 863 { 864 struct resctrl_schema *s = of->kn->parent->priv; 865 struct rdt_resource *r = s->res; 866 867 seq_printf(seq, "%u\n", r->cache.min_cbm_bits); 868 return 0; 869 } 870 871 static int rdt_shareable_bits_show(struct kernfs_open_file *of, 872 struct seq_file *seq, void *v) 873 { 874 struct resctrl_schema *s = of->kn->parent->priv; 875 struct rdt_resource *r = s->res; 876 877 seq_printf(seq, "%x\n", r->cache.shareable_bits); 878 return 0; 879 } 880 881 /** 882 * rdt_bit_usage_show - Display current usage of resources 883 * 884 * A domain is a shared resource that can now be allocated differently. Here 885 * we display the current regions of the domain as an annotated bitmask. 886 * For each domain of this resource its allocation bitmask 887 * is annotated as below to indicate the current usage of the corresponding bit: 888 * 0 - currently unused 889 * X - currently available for sharing and used by software and hardware 890 * H - currently used by hardware only but available for software use 891 * S - currently used and shareable by software only 892 * E - currently used exclusively by one resource group 893 * P - currently pseudo-locked by one resource group 894 */ 895 static int rdt_bit_usage_show(struct kernfs_open_file *of, 896 struct seq_file *seq, void *v) 897 { 898 struct resctrl_schema *s = of->kn->parent->priv; 899 /* 900 * Use unsigned long even though only 32 bits are used to ensure 901 * test_bit() is used safely. 902 */ 903 unsigned long sw_shareable = 0, hw_shareable = 0; 904 unsigned long exclusive = 0, pseudo_locked = 0; 905 struct rdt_resource *r = s->res; 906 struct rdt_domain *dom; 907 int i, hwb, swb, excl, psl; 908 enum rdtgrp_mode mode; 909 bool sep = false; 910 u32 ctrl_val; 911 912 mutex_lock(&rdtgroup_mutex); 913 hw_shareable = r->cache.shareable_bits; 914 list_for_each_entry(dom, &r->domains, list) { 915 if (sep) 916 seq_putc(seq, ';'); 917 sw_shareable = 0; 918 exclusive = 0; 919 seq_printf(seq, "%d=", dom->id); 920 for (i = 0; i < closids_supported(); i++) { 921 if (!closid_allocated(i)) 922 continue; 923 ctrl_val = resctrl_arch_get_config(r, dom, i, 924 s->conf_type); 925 mode = rdtgroup_mode_by_closid(i); 926 switch (mode) { 927 case RDT_MODE_SHAREABLE: 928 sw_shareable |= ctrl_val; 929 break; 930 case RDT_MODE_EXCLUSIVE: 931 exclusive |= ctrl_val; 932 break; 933 case RDT_MODE_PSEUDO_LOCKSETUP: 934 /* 935 * RDT_MODE_PSEUDO_LOCKSETUP is possible 936 * here but not included since the CBM 937 * associated with this CLOSID in this mode 938 * is not initialized and no task or cpu can be 939 * assigned this CLOSID. 940 */ 941 break; 942 case RDT_MODE_PSEUDO_LOCKED: 943 case RDT_NUM_MODES: 944 WARN(1, 945 "invalid mode for closid %d\n", i); 946 break; 947 } 948 } 949 for (i = r->cache.cbm_len - 1; i >= 0; i--) { 950 pseudo_locked = dom->plr ? dom->plr->cbm : 0; 951 hwb = test_bit(i, &hw_shareable); 952 swb = test_bit(i, &sw_shareable); 953 excl = test_bit(i, &exclusive); 954 psl = test_bit(i, &pseudo_locked); 955 if (hwb && swb) 956 seq_putc(seq, 'X'); 957 else if (hwb && !swb) 958 seq_putc(seq, 'H'); 959 else if (!hwb && swb) 960 seq_putc(seq, 'S'); 961 else if (excl) 962 seq_putc(seq, 'E'); 963 else if (psl) 964 seq_putc(seq, 'P'); 965 else /* Unused bits remain */ 966 seq_putc(seq, '0'); 967 } 968 sep = true; 969 } 970 seq_putc(seq, '\n'); 971 mutex_unlock(&rdtgroup_mutex); 972 return 0; 973 } 974 975 static int rdt_min_bw_show(struct kernfs_open_file *of, 976 struct seq_file *seq, void *v) 977 { 978 struct resctrl_schema *s = of->kn->parent->priv; 979 struct rdt_resource *r = s->res; 980 981 seq_printf(seq, "%u\n", r->membw.min_bw); 982 return 0; 983 } 984 985 static int rdt_num_rmids_show(struct kernfs_open_file *of, 986 struct seq_file *seq, void *v) 987 { 988 struct rdt_resource *r = of->kn->parent->priv; 989 990 seq_printf(seq, "%d\n", r->num_rmid); 991 992 return 0; 993 } 994 995 static int rdt_mon_features_show(struct kernfs_open_file *of, 996 struct seq_file *seq, void *v) 997 { 998 struct rdt_resource *r = of->kn->parent->priv; 999 struct mon_evt *mevt; 1000 1001 list_for_each_entry(mevt, &r->evt_list, list) { 1002 seq_printf(seq, "%s\n", mevt->name); 1003 if (mevt->configurable) 1004 seq_printf(seq, "%s_config\n", mevt->name); 1005 } 1006 1007 return 0; 1008 } 1009 1010 static int rdt_bw_gran_show(struct kernfs_open_file *of, 1011 struct seq_file *seq, void *v) 1012 { 1013 struct resctrl_schema *s = of->kn->parent->priv; 1014 struct rdt_resource *r = s->res; 1015 1016 seq_printf(seq, "%u\n", r->membw.bw_gran); 1017 return 0; 1018 } 1019 1020 static int rdt_delay_linear_show(struct kernfs_open_file *of, 1021 struct seq_file *seq, void *v) 1022 { 1023 struct resctrl_schema *s = of->kn->parent->priv; 1024 struct rdt_resource *r = s->res; 1025 1026 seq_printf(seq, "%u\n", r->membw.delay_linear); 1027 return 0; 1028 } 1029 1030 static int max_threshold_occ_show(struct kernfs_open_file *of, 1031 struct seq_file *seq, void *v) 1032 { 1033 seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); 1034 1035 return 0; 1036 } 1037 1038 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, 1039 struct seq_file *seq, void *v) 1040 { 1041 struct resctrl_schema *s = of->kn->parent->priv; 1042 struct rdt_resource *r = s->res; 1043 1044 if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) 1045 seq_puts(seq, "per-thread\n"); 1046 else 1047 seq_puts(seq, "max\n"); 1048 1049 return 0; 1050 } 1051 1052 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, 1053 char *buf, size_t nbytes, loff_t off) 1054 { 1055 unsigned int bytes; 1056 int ret; 1057 1058 ret = kstrtouint(buf, 0, &bytes); 1059 if (ret) 1060 return ret; 1061 1062 if (bytes > resctrl_rmid_realloc_limit) 1063 return -EINVAL; 1064 1065 resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); 1066 1067 return nbytes; 1068 } 1069 1070 /* 1071 * rdtgroup_mode_show - Display mode of this resource group 1072 */ 1073 static int rdtgroup_mode_show(struct kernfs_open_file *of, 1074 struct seq_file *s, void *v) 1075 { 1076 struct rdtgroup *rdtgrp; 1077 1078 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1079 if (!rdtgrp) { 1080 rdtgroup_kn_unlock(of->kn); 1081 return -ENOENT; 1082 } 1083 1084 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); 1085 1086 rdtgroup_kn_unlock(of->kn); 1087 return 0; 1088 } 1089 1090 static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) 1091 { 1092 switch (my_type) { 1093 case CDP_CODE: 1094 return CDP_DATA; 1095 case CDP_DATA: 1096 return CDP_CODE; 1097 default: 1098 case CDP_NONE: 1099 return CDP_NONE; 1100 } 1101 } 1102 1103 /** 1104 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other 1105 * @r: Resource to which domain instance @d belongs. 1106 * @d: The domain instance for which @closid is being tested. 1107 * @cbm: Capacity bitmask being tested. 1108 * @closid: Intended closid for @cbm. 1109 * @exclusive: Only check if overlaps with exclusive resource groups 1110 * 1111 * Checks if provided @cbm intended to be used for @closid on domain 1112 * @d overlaps with any other closids or other hardware usage associated 1113 * with this domain. If @exclusive is true then only overlaps with 1114 * resource groups in exclusive mode will be considered. If @exclusive 1115 * is false then overlaps with any resource group or hardware entities 1116 * will be considered. 1117 * 1118 * @cbm is unsigned long, even if only 32 bits are used, to make the 1119 * bitmap functions work correctly. 1120 * 1121 * Return: false if CBM does not overlap, true if it does. 1122 */ 1123 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 1124 unsigned long cbm, int closid, 1125 enum resctrl_conf_type type, bool exclusive) 1126 { 1127 enum rdtgrp_mode mode; 1128 unsigned long ctrl_b; 1129 int i; 1130 1131 /* Check for any overlap with regions used by hardware directly */ 1132 if (!exclusive) { 1133 ctrl_b = r->cache.shareable_bits; 1134 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) 1135 return true; 1136 } 1137 1138 /* Check for overlap with other resource groups */ 1139 for (i = 0; i < closids_supported(); i++) { 1140 ctrl_b = resctrl_arch_get_config(r, d, i, type); 1141 mode = rdtgroup_mode_by_closid(i); 1142 if (closid_allocated(i) && i != closid && 1143 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1144 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { 1145 if (exclusive) { 1146 if (mode == RDT_MODE_EXCLUSIVE) 1147 return true; 1148 continue; 1149 } 1150 return true; 1151 } 1152 } 1153 } 1154 1155 return false; 1156 } 1157 1158 /** 1159 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware 1160 * @s: Schema for the resource to which domain instance @d belongs. 1161 * @d: The domain instance for which @closid is being tested. 1162 * @cbm: Capacity bitmask being tested. 1163 * @closid: Intended closid for @cbm. 1164 * @exclusive: Only check if overlaps with exclusive resource groups 1165 * 1166 * Resources that can be allocated using a CBM can use the CBM to control 1167 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test 1168 * for overlap. Overlap test is not limited to the specific resource for 1169 * which the CBM is intended though - when dealing with CDP resources that 1170 * share the underlying hardware the overlap check should be performed on 1171 * the CDP resource sharing the hardware also. 1172 * 1173 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the 1174 * overlap test. 1175 * 1176 * Return: true if CBM overlap detected, false if there is no overlap 1177 */ 1178 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, 1179 unsigned long cbm, int closid, bool exclusive) 1180 { 1181 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); 1182 struct rdt_resource *r = s->res; 1183 1184 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, 1185 exclusive)) 1186 return true; 1187 1188 if (!resctrl_arch_get_cdp_enabled(r->rid)) 1189 return false; 1190 return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); 1191 } 1192 1193 /** 1194 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive 1195 * 1196 * An exclusive resource group implies that there should be no sharing of 1197 * its allocated resources. At the time this group is considered to be 1198 * exclusive this test can determine if its current schemata supports this 1199 * setting by testing for overlap with all other resource groups. 1200 * 1201 * Return: true if resource group can be exclusive, false if there is overlap 1202 * with allocations of other resource groups and thus this resource group 1203 * cannot be exclusive. 1204 */ 1205 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) 1206 { 1207 int closid = rdtgrp->closid; 1208 struct resctrl_schema *s; 1209 struct rdt_resource *r; 1210 bool has_cache = false; 1211 struct rdt_domain *d; 1212 u32 ctrl; 1213 1214 list_for_each_entry(s, &resctrl_schema_all, list) { 1215 r = s->res; 1216 if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) 1217 continue; 1218 has_cache = true; 1219 list_for_each_entry(d, &r->domains, list) { 1220 ctrl = resctrl_arch_get_config(r, d, closid, 1221 s->conf_type); 1222 if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { 1223 rdt_last_cmd_puts("Schemata overlaps\n"); 1224 return false; 1225 } 1226 } 1227 } 1228 1229 if (!has_cache) { 1230 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); 1231 return false; 1232 } 1233 1234 return true; 1235 } 1236 1237 /** 1238 * rdtgroup_mode_write - Modify the resource group's mode 1239 * 1240 */ 1241 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, 1242 char *buf, size_t nbytes, loff_t off) 1243 { 1244 struct rdtgroup *rdtgrp; 1245 enum rdtgrp_mode mode; 1246 int ret = 0; 1247 1248 /* Valid input requires a trailing newline */ 1249 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1250 return -EINVAL; 1251 buf[nbytes - 1] = '\0'; 1252 1253 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1254 if (!rdtgrp) { 1255 rdtgroup_kn_unlock(of->kn); 1256 return -ENOENT; 1257 } 1258 1259 rdt_last_cmd_clear(); 1260 1261 mode = rdtgrp->mode; 1262 1263 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || 1264 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || 1265 (!strcmp(buf, "pseudo-locksetup") && 1266 mode == RDT_MODE_PSEUDO_LOCKSETUP) || 1267 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) 1268 goto out; 1269 1270 if (mode == RDT_MODE_PSEUDO_LOCKED) { 1271 rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); 1272 ret = -EINVAL; 1273 goto out; 1274 } 1275 1276 if (!strcmp(buf, "shareable")) { 1277 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1278 ret = rdtgroup_locksetup_exit(rdtgrp); 1279 if (ret) 1280 goto out; 1281 } 1282 rdtgrp->mode = RDT_MODE_SHAREABLE; 1283 } else if (!strcmp(buf, "exclusive")) { 1284 if (!rdtgroup_mode_test_exclusive(rdtgrp)) { 1285 ret = -EINVAL; 1286 goto out; 1287 } 1288 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1289 ret = rdtgroup_locksetup_exit(rdtgrp); 1290 if (ret) 1291 goto out; 1292 } 1293 rdtgrp->mode = RDT_MODE_EXCLUSIVE; 1294 } else if (!strcmp(buf, "pseudo-locksetup")) { 1295 ret = rdtgroup_locksetup_enter(rdtgrp); 1296 if (ret) 1297 goto out; 1298 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; 1299 } else { 1300 rdt_last_cmd_puts("Unknown or unsupported mode\n"); 1301 ret = -EINVAL; 1302 } 1303 1304 out: 1305 rdtgroup_kn_unlock(of->kn); 1306 return ret ?: nbytes; 1307 } 1308 1309 /** 1310 * rdtgroup_cbm_to_size - Translate CBM to size in bytes 1311 * @r: RDT resource to which @d belongs. 1312 * @d: RDT domain instance. 1313 * @cbm: bitmask for which the size should be computed. 1314 * 1315 * The bitmask provided associated with the RDT domain instance @d will be 1316 * translated into how many bytes it represents. The size in bytes is 1317 * computed by first dividing the total cache size by the CBM length to 1318 * determine how many bytes each bit in the bitmask represents. The result 1319 * is multiplied with the number of bits set in the bitmask. 1320 * 1321 * @cbm is unsigned long, even if only 32 bits are used to make the 1322 * bitmap functions work correctly. 1323 */ 1324 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1325 struct rdt_domain *d, unsigned long cbm) 1326 { 1327 struct cpu_cacheinfo *ci; 1328 unsigned int size = 0; 1329 int num_b, i; 1330 1331 num_b = bitmap_weight(&cbm, r->cache.cbm_len); 1332 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); 1333 for (i = 0; i < ci->num_leaves; i++) { 1334 if (ci->info_list[i].level == r->cache_level) { 1335 size = ci->info_list[i].size / r->cache.cbm_len * num_b; 1336 break; 1337 } 1338 } 1339 1340 return size; 1341 } 1342 1343 /** 1344 * rdtgroup_size_show - Display size in bytes of allocated regions 1345 * 1346 * The "size" file mirrors the layout of the "schemata" file, printing the 1347 * size in bytes of each region instead of the capacity bitmask. 1348 * 1349 */ 1350 static int rdtgroup_size_show(struct kernfs_open_file *of, 1351 struct seq_file *s, void *v) 1352 { 1353 struct resctrl_schema *schema; 1354 enum resctrl_conf_type type; 1355 struct rdtgroup *rdtgrp; 1356 struct rdt_resource *r; 1357 struct rdt_domain *d; 1358 unsigned int size; 1359 int ret = 0; 1360 u32 closid; 1361 bool sep; 1362 u32 ctrl; 1363 1364 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1365 if (!rdtgrp) { 1366 rdtgroup_kn_unlock(of->kn); 1367 return -ENOENT; 1368 } 1369 1370 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 1371 if (!rdtgrp->plr->d) { 1372 rdt_last_cmd_clear(); 1373 rdt_last_cmd_puts("Cache domain offline\n"); 1374 ret = -ENODEV; 1375 } else { 1376 seq_printf(s, "%*s:", max_name_width, 1377 rdtgrp->plr->s->name); 1378 size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, 1379 rdtgrp->plr->d, 1380 rdtgrp->plr->cbm); 1381 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); 1382 } 1383 goto out; 1384 } 1385 1386 closid = rdtgrp->closid; 1387 1388 list_for_each_entry(schema, &resctrl_schema_all, list) { 1389 r = schema->res; 1390 type = schema->conf_type; 1391 sep = false; 1392 seq_printf(s, "%*s:", max_name_width, schema->name); 1393 list_for_each_entry(d, &r->domains, list) { 1394 if (sep) 1395 seq_putc(s, ';'); 1396 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1397 size = 0; 1398 } else { 1399 if (is_mba_sc(r)) 1400 ctrl = d->mbps_val[closid]; 1401 else 1402 ctrl = resctrl_arch_get_config(r, d, 1403 closid, 1404 type); 1405 if (r->rid == RDT_RESOURCE_MBA || 1406 r->rid == RDT_RESOURCE_SMBA) 1407 size = ctrl; 1408 else 1409 size = rdtgroup_cbm_to_size(r, d, ctrl); 1410 } 1411 seq_printf(s, "%d=%u", d->id, size); 1412 sep = true; 1413 } 1414 seq_putc(s, '\n'); 1415 } 1416 1417 out: 1418 rdtgroup_kn_unlock(of->kn); 1419 1420 return ret; 1421 } 1422 1423 /* rdtgroup information files for one cache resource. */ 1424 static struct rftype res_common_files[] = { 1425 { 1426 .name = "last_cmd_status", 1427 .mode = 0444, 1428 .kf_ops = &rdtgroup_kf_single_ops, 1429 .seq_show = rdt_last_cmd_status_show, 1430 .fflags = RF_TOP_INFO, 1431 }, 1432 { 1433 .name = "num_closids", 1434 .mode = 0444, 1435 .kf_ops = &rdtgroup_kf_single_ops, 1436 .seq_show = rdt_num_closids_show, 1437 .fflags = RF_CTRL_INFO, 1438 }, 1439 { 1440 .name = "mon_features", 1441 .mode = 0444, 1442 .kf_ops = &rdtgroup_kf_single_ops, 1443 .seq_show = rdt_mon_features_show, 1444 .fflags = RF_MON_INFO, 1445 }, 1446 { 1447 .name = "num_rmids", 1448 .mode = 0444, 1449 .kf_ops = &rdtgroup_kf_single_ops, 1450 .seq_show = rdt_num_rmids_show, 1451 .fflags = RF_MON_INFO, 1452 }, 1453 { 1454 .name = "cbm_mask", 1455 .mode = 0444, 1456 .kf_ops = &rdtgroup_kf_single_ops, 1457 .seq_show = rdt_default_ctrl_show, 1458 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1459 }, 1460 { 1461 .name = "min_cbm_bits", 1462 .mode = 0444, 1463 .kf_ops = &rdtgroup_kf_single_ops, 1464 .seq_show = rdt_min_cbm_bits_show, 1465 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1466 }, 1467 { 1468 .name = "shareable_bits", 1469 .mode = 0444, 1470 .kf_ops = &rdtgroup_kf_single_ops, 1471 .seq_show = rdt_shareable_bits_show, 1472 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1473 }, 1474 { 1475 .name = "bit_usage", 1476 .mode = 0444, 1477 .kf_ops = &rdtgroup_kf_single_ops, 1478 .seq_show = rdt_bit_usage_show, 1479 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1480 }, 1481 { 1482 .name = "min_bandwidth", 1483 .mode = 0444, 1484 .kf_ops = &rdtgroup_kf_single_ops, 1485 .seq_show = rdt_min_bw_show, 1486 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1487 }, 1488 { 1489 .name = "bandwidth_gran", 1490 .mode = 0444, 1491 .kf_ops = &rdtgroup_kf_single_ops, 1492 .seq_show = rdt_bw_gran_show, 1493 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1494 }, 1495 { 1496 .name = "delay_linear", 1497 .mode = 0444, 1498 .kf_ops = &rdtgroup_kf_single_ops, 1499 .seq_show = rdt_delay_linear_show, 1500 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1501 }, 1502 /* 1503 * Platform specific which (if any) capabilities are provided by 1504 * thread_throttle_mode. Defer "fflags" initialization to platform 1505 * discovery. 1506 */ 1507 { 1508 .name = "thread_throttle_mode", 1509 .mode = 0444, 1510 .kf_ops = &rdtgroup_kf_single_ops, 1511 .seq_show = rdt_thread_throttle_mode_show, 1512 }, 1513 { 1514 .name = "max_threshold_occupancy", 1515 .mode = 0644, 1516 .kf_ops = &rdtgroup_kf_single_ops, 1517 .write = max_threshold_occ_write, 1518 .seq_show = max_threshold_occ_show, 1519 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, 1520 }, 1521 { 1522 .name = "cpus", 1523 .mode = 0644, 1524 .kf_ops = &rdtgroup_kf_single_ops, 1525 .write = rdtgroup_cpus_write, 1526 .seq_show = rdtgroup_cpus_show, 1527 .fflags = RFTYPE_BASE, 1528 }, 1529 { 1530 .name = "cpus_list", 1531 .mode = 0644, 1532 .kf_ops = &rdtgroup_kf_single_ops, 1533 .write = rdtgroup_cpus_write, 1534 .seq_show = rdtgroup_cpus_show, 1535 .flags = RFTYPE_FLAGS_CPUS_LIST, 1536 .fflags = RFTYPE_BASE, 1537 }, 1538 { 1539 .name = "tasks", 1540 .mode = 0644, 1541 .kf_ops = &rdtgroup_kf_single_ops, 1542 .write = rdtgroup_tasks_write, 1543 .seq_show = rdtgroup_tasks_show, 1544 .fflags = RFTYPE_BASE, 1545 }, 1546 { 1547 .name = "schemata", 1548 .mode = 0644, 1549 .kf_ops = &rdtgroup_kf_single_ops, 1550 .write = rdtgroup_schemata_write, 1551 .seq_show = rdtgroup_schemata_show, 1552 .fflags = RF_CTRL_BASE, 1553 }, 1554 { 1555 .name = "mode", 1556 .mode = 0644, 1557 .kf_ops = &rdtgroup_kf_single_ops, 1558 .write = rdtgroup_mode_write, 1559 .seq_show = rdtgroup_mode_show, 1560 .fflags = RF_CTRL_BASE, 1561 }, 1562 { 1563 .name = "size", 1564 .mode = 0444, 1565 .kf_ops = &rdtgroup_kf_single_ops, 1566 .seq_show = rdtgroup_size_show, 1567 .fflags = RF_CTRL_BASE, 1568 }, 1569 1570 }; 1571 1572 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) 1573 { 1574 struct rftype *rfts, *rft; 1575 int ret, len; 1576 1577 rfts = res_common_files; 1578 len = ARRAY_SIZE(res_common_files); 1579 1580 lockdep_assert_held(&rdtgroup_mutex); 1581 1582 for (rft = rfts; rft < rfts + len; rft++) { 1583 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { 1584 ret = rdtgroup_add_file(kn, rft); 1585 if (ret) 1586 goto error; 1587 } 1588 } 1589 1590 return 0; 1591 error: 1592 pr_warn("Failed to add %s, err=%d\n", rft->name, ret); 1593 while (--rft >= rfts) { 1594 if ((fflags & rft->fflags) == rft->fflags) 1595 kernfs_remove_by_name(kn, rft->name); 1596 } 1597 return ret; 1598 } 1599 1600 static struct rftype *rdtgroup_get_rftype_by_name(const char *name) 1601 { 1602 struct rftype *rfts, *rft; 1603 int len; 1604 1605 rfts = res_common_files; 1606 len = ARRAY_SIZE(res_common_files); 1607 1608 for (rft = rfts; rft < rfts + len; rft++) { 1609 if (!strcmp(rft->name, name)) 1610 return rft; 1611 } 1612 1613 return NULL; 1614 } 1615 1616 void __init thread_throttle_mode_init(void) 1617 { 1618 struct rftype *rft; 1619 1620 rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); 1621 if (!rft) 1622 return; 1623 1624 rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; 1625 } 1626 1627 /** 1628 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file 1629 * @r: The resource group with which the file is associated. 1630 * @name: Name of the file 1631 * 1632 * The permissions of named resctrl file, directory, or link are modified 1633 * to not allow read, write, or execute by any user. 1634 * 1635 * WARNING: This function is intended to communicate to the user that the 1636 * resctrl file has been locked down - that it is not relevant to the 1637 * particular state the system finds itself in. It should not be relied 1638 * on to protect from user access because after the file's permissions 1639 * are restricted the user can still change the permissions using chmod 1640 * from the command line. 1641 * 1642 * Return: 0 on success, <0 on failure. 1643 */ 1644 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) 1645 { 1646 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1647 struct kernfs_node *kn; 1648 int ret = 0; 1649 1650 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1651 if (!kn) 1652 return -ENOENT; 1653 1654 switch (kernfs_type(kn)) { 1655 case KERNFS_DIR: 1656 iattr.ia_mode = S_IFDIR; 1657 break; 1658 case KERNFS_FILE: 1659 iattr.ia_mode = S_IFREG; 1660 break; 1661 case KERNFS_LINK: 1662 iattr.ia_mode = S_IFLNK; 1663 break; 1664 } 1665 1666 ret = kernfs_setattr(kn, &iattr); 1667 kernfs_put(kn); 1668 return ret; 1669 } 1670 1671 /** 1672 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file 1673 * @r: The resource group with which the file is associated. 1674 * @name: Name of the file 1675 * @mask: Mask of permissions that should be restored 1676 * 1677 * Restore the permissions of the named file. If @name is a directory the 1678 * permissions of its parent will be used. 1679 * 1680 * Return: 0 on success, <0 on failure. 1681 */ 1682 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, 1683 umode_t mask) 1684 { 1685 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1686 struct kernfs_node *kn, *parent; 1687 struct rftype *rfts, *rft; 1688 int ret, len; 1689 1690 rfts = res_common_files; 1691 len = ARRAY_SIZE(res_common_files); 1692 1693 for (rft = rfts; rft < rfts + len; rft++) { 1694 if (!strcmp(rft->name, name)) 1695 iattr.ia_mode = rft->mode & mask; 1696 } 1697 1698 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1699 if (!kn) 1700 return -ENOENT; 1701 1702 switch (kernfs_type(kn)) { 1703 case KERNFS_DIR: 1704 parent = kernfs_get_parent(kn); 1705 if (parent) { 1706 iattr.ia_mode |= parent->mode; 1707 kernfs_put(parent); 1708 } 1709 iattr.ia_mode |= S_IFDIR; 1710 break; 1711 case KERNFS_FILE: 1712 iattr.ia_mode |= S_IFREG; 1713 break; 1714 case KERNFS_LINK: 1715 iattr.ia_mode |= S_IFLNK; 1716 break; 1717 } 1718 1719 ret = kernfs_setattr(kn, &iattr); 1720 kernfs_put(kn); 1721 return ret; 1722 } 1723 1724 static int rdtgroup_mkdir_info_resdir(void *priv, char *name, 1725 unsigned long fflags) 1726 { 1727 struct kernfs_node *kn_subdir; 1728 int ret; 1729 1730 kn_subdir = kernfs_create_dir(kn_info, name, 1731 kn_info->mode, priv); 1732 if (IS_ERR(kn_subdir)) 1733 return PTR_ERR(kn_subdir); 1734 1735 ret = rdtgroup_kn_set_ugid(kn_subdir); 1736 if (ret) 1737 return ret; 1738 1739 ret = rdtgroup_add_files(kn_subdir, fflags); 1740 if (!ret) 1741 kernfs_activate(kn_subdir); 1742 1743 return ret; 1744 } 1745 1746 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) 1747 { 1748 struct resctrl_schema *s; 1749 struct rdt_resource *r; 1750 unsigned long fflags; 1751 char name[32]; 1752 int ret; 1753 1754 /* create the directory */ 1755 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); 1756 if (IS_ERR(kn_info)) 1757 return PTR_ERR(kn_info); 1758 1759 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); 1760 if (ret) 1761 goto out_destroy; 1762 1763 /* loop over enabled controls, these are all alloc_capable */ 1764 list_for_each_entry(s, &resctrl_schema_all, list) { 1765 r = s->res; 1766 fflags = r->fflags | RF_CTRL_INFO; 1767 ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); 1768 if (ret) 1769 goto out_destroy; 1770 } 1771 1772 for_each_mon_capable_rdt_resource(r) { 1773 fflags = r->fflags | RF_MON_INFO; 1774 sprintf(name, "%s_MON", r->name); 1775 ret = rdtgroup_mkdir_info_resdir(r, name, fflags); 1776 if (ret) 1777 goto out_destroy; 1778 } 1779 1780 ret = rdtgroup_kn_set_ugid(kn_info); 1781 if (ret) 1782 goto out_destroy; 1783 1784 kernfs_activate(kn_info); 1785 1786 return 0; 1787 1788 out_destroy: 1789 kernfs_remove(kn_info); 1790 return ret; 1791 } 1792 1793 static int 1794 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, 1795 char *name, struct kernfs_node **dest_kn) 1796 { 1797 struct kernfs_node *kn; 1798 int ret; 1799 1800 /* create the directory */ 1801 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 1802 if (IS_ERR(kn)) 1803 return PTR_ERR(kn); 1804 1805 if (dest_kn) 1806 *dest_kn = kn; 1807 1808 ret = rdtgroup_kn_set_ugid(kn); 1809 if (ret) 1810 goto out_destroy; 1811 1812 kernfs_activate(kn); 1813 1814 return 0; 1815 1816 out_destroy: 1817 kernfs_remove(kn); 1818 return ret; 1819 } 1820 1821 static void l3_qos_cfg_update(void *arg) 1822 { 1823 bool *enable = arg; 1824 1825 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); 1826 } 1827 1828 static void l2_qos_cfg_update(void *arg) 1829 { 1830 bool *enable = arg; 1831 1832 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); 1833 } 1834 1835 static inline bool is_mba_linear(void) 1836 { 1837 return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear; 1838 } 1839 1840 static int set_cache_qos_cfg(int level, bool enable) 1841 { 1842 void (*update)(void *arg); 1843 struct rdt_resource *r_l; 1844 cpumask_var_t cpu_mask; 1845 struct rdt_domain *d; 1846 int cpu; 1847 1848 if (level == RDT_RESOURCE_L3) 1849 update = l3_qos_cfg_update; 1850 else if (level == RDT_RESOURCE_L2) 1851 update = l2_qos_cfg_update; 1852 else 1853 return -EINVAL; 1854 1855 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 1856 return -ENOMEM; 1857 1858 r_l = &rdt_resources_all[level].r_resctrl; 1859 list_for_each_entry(d, &r_l->domains, list) { 1860 if (r_l->cache.arch_has_per_cpu_cfg) 1861 /* Pick all the CPUs in the domain instance */ 1862 for_each_cpu(cpu, &d->cpu_mask) 1863 cpumask_set_cpu(cpu, cpu_mask); 1864 else 1865 /* Pick one CPU from each domain instance to update MSR */ 1866 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 1867 } 1868 1869 /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ 1870 on_each_cpu_mask(cpu_mask, update, &enable, 1); 1871 1872 free_cpumask_var(cpu_mask); 1873 1874 return 0; 1875 } 1876 1877 /* Restore the qos cfg state when a domain comes online */ 1878 void rdt_domain_reconfigure_cdp(struct rdt_resource *r) 1879 { 1880 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 1881 1882 if (!r->cdp_capable) 1883 return; 1884 1885 if (r->rid == RDT_RESOURCE_L2) 1886 l2_qos_cfg_update(&hw_res->cdp_enabled); 1887 1888 if (r->rid == RDT_RESOURCE_L3) 1889 l3_qos_cfg_update(&hw_res->cdp_enabled); 1890 } 1891 1892 static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) 1893 { 1894 u32 num_closid = resctrl_arch_get_num_closid(r); 1895 int cpu = cpumask_any(&d->cpu_mask); 1896 int i; 1897 1898 d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), 1899 GFP_KERNEL, cpu_to_node(cpu)); 1900 if (!d->mbps_val) 1901 return -ENOMEM; 1902 1903 for (i = 0; i < num_closid; i++) 1904 d->mbps_val[i] = MBA_MAX_MBPS; 1905 1906 return 0; 1907 } 1908 1909 static void mba_sc_domain_destroy(struct rdt_resource *r, 1910 struct rdt_domain *d) 1911 { 1912 kfree(d->mbps_val); 1913 d->mbps_val = NULL; 1914 } 1915 1916 /* 1917 * MBA software controller is supported only if 1918 * MBM is supported and MBA is in linear scale. 1919 */ 1920 static bool supports_mba_mbps(void) 1921 { 1922 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; 1923 1924 return (is_mbm_local_enabled() && 1925 r->alloc_capable && is_mba_linear()); 1926 } 1927 1928 /* 1929 * Enable or disable the MBA software controller 1930 * which helps user specify bandwidth in MBps. 1931 */ 1932 static int set_mba_sc(bool mba_sc) 1933 { 1934 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; 1935 u32 num_closid = resctrl_arch_get_num_closid(r); 1936 struct rdt_domain *d; 1937 int i; 1938 1939 if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) 1940 return -EINVAL; 1941 1942 r->membw.mba_sc = mba_sc; 1943 1944 list_for_each_entry(d, &r->domains, list) { 1945 for (i = 0; i < num_closid; i++) 1946 d->mbps_val[i] = MBA_MAX_MBPS; 1947 } 1948 1949 return 0; 1950 } 1951 1952 static int cdp_enable(int level) 1953 { 1954 struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; 1955 int ret; 1956 1957 if (!r_l->alloc_capable) 1958 return -EINVAL; 1959 1960 ret = set_cache_qos_cfg(level, true); 1961 if (!ret) 1962 rdt_resources_all[level].cdp_enabled = true; 1963 1964 return ret; 1965 } 1966 1967 static void cdp_disable(int level) 1968 { 1969 struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; 1970 1971 if (r_hw->cdp_enabled) { 1972 set_cache_qos_cfg(level, false); 1973 r_hw->cdp_enabled = false; 1974 } 1975 } 1976 1977 int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) 1978 { 1979 struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; 1980 1981 if (!hw_res->r_resctrl.cdp_capable) 1982 return -EINVAL; 1983 1984 if (enable) 1985 return cdp_enable(l); 1986 1987 cdp_disable(l); 1988 1989 return 0; 1990 } 1991 1992 static void cdp_disable_all(void) 1993 { 1994 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) 1995 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); 1996 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) 1997 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); 1998 } 1999 2000 /* 2001 * We don't allow rdtgroup directories to be created anywhere 2002 * except the root directory. Thus when looking for the rdtgroup 2003 * structure for a kernfs node we are either looking at a directory, 2004 * in which case the rdtgroup structure is pointed at by the "priv" 2005 * field, otherwise we have a file, and need only look to the parent 2006 * to find the rdtgroup. 2007 */ 2008 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) 2009 { 2010 if (kernfs_type(kn) == KERNFS_DIR) { 2011 /* 2012 * All the resource directories use "kn->priv" 2013 * to point to the "struct rdtgroup" for the 2014 * resource. "info" and its subdirectories don't 2015 * have rdtgroup structures, so return NULL here. 2016 */ 2017 if (kn == kn_info || kn->parent == kn_info) 2018 return NULL; 2019 else 2020 return kn->priv; 2021 } else { 2022 return kn->parent->priv; 2023 } 2024 } 2025 2026 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) 2027 { 2028 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2029 2030 if (!rdtgrp) 2031 return NULL; 2032 2033 atomic_inc(&rdtgrp->waitcount); 2034 kernfs_break_active_protection(kn); 2035 2036 mutex_lock(&rdtgroup_mutex); 2037 2038 /* Was this group deleted while we waited? */ 2039 if (rdtgrp->flags & RDT_DELETED) 2040 return NULL; 2041 2042 return rdtgrp; 2043 } 2044 2045 void rdtgroup_kn_unlock(struct kernfs_node *kn) 2046 { 2047 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 2048 2049 if (!rdtgrp) 2050 return; 2051 2052 mutex_unlock(&rdtgroup_mutex); 2053 2054 if (atomic_dec_and_test(&rdtgrp->waitcount) && 2055 (rdtgrp->flags & RDT_DELETED)) { 2056 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2057 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2058 rdtgroup_pseudo_lock_remove(rdtgrp); 2059 kernfs_unbreak_active_protection(kn); 2060 rdtgroup_remove(rdtgrp); 2061 } else { 2062 kernfs_unbreak_active_protection(kn); 2063 } 2064 } 2065 2066 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2067 struct rdtgroup *prgrp, 2068 struct kernfs_node **mon_data_kn); 2069 2070 static int rdt_enable_ctx(struct rdt_fs_context *ctx) 2071 { 2072 int ret = 0; 2073 2074 if (ctx->enable_cdpl2) 2075 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); 2076 2077 if (!ret && ctx->enable_cdpl3) 2078 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); 2079 2080 if (!ret && ctx->enable_mba_mbps) 2081 ret = set_mba_sc(true); 2082 2083 return ret; 2084 } 2085 2086 static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) 2087 { 2088 struct resctrl_schema *s; 2089 const char *suffix = ""; 2090 int ret, cl; 2091 2092 s = kzalloc(sizeof(*s), GFP_KERNEL); 2093 if (!s) 2094 return -ENOMEM; 2095 2096 s->res = r; 2097 s->num_closid = resctrl_arch_get_num_closid(r); 2098 if (resctrl_arch_get_cdp_enabled(r->rid)) 2099 s->num_closid /= 2; 2100 2101 s->conf_type = type; 2102 switch (type) { 2103 case CDP_CODE: 2104 suffix = "CODE"; 2105 break; 2106 case CDP_DATA: 2107 suffix = "DATA"; 2108 break; 2109 case CDP_NONE: 2110 suffix = ""; 2111 break; 2112 } 2113 2114 ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); 2115 if (ret >= sizeof(s->name)) { 2116 kfree(s); 2117 return -EINVAL; 2118 } 2119 2120 cl = strlen(s->name); 2121 2122 /* 2123 * If CDP is supported by this resource, but not enabled, 2124 * include the suffix. This ensures the tabular format of the 2125 * schemata file does not change between mounts of the filesystem. 2126 */ 2127 if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) 2128 cl += 4; 2129 2130 if (cl > max_name_width) 2131 max_name_width = cl; 2132 2133 INIT_LIST_HEAD(&s->list); 2134 list_add(&s->list, &resctrl_schema_all); 2135 2136 return 0; 2137 } 2138 2139 static int schemata_list_create(void) 2140 { 2141 struct rdt_resource *r; 2142 int ret = 0; 2143 2144 for_each_alloc_capable_rdt_resource(r) { 2145 if (resctrl_arch_get_cdp_enabled(r->rid)) { 2146 ret = schemata_list_add(r, CDP_CODE); 2147 if (ret) 2148 break; 2149 2150 ret = schemata_list_add(r, CDP_DATA); 2151 } else { 2152 ret = schemata_list_add(r, CDP_NONE); 2153 } 2154 2155 if (ret) 2156 break; 2157 } 2158 2159 return ret; 2160 } 2161 2162 static void schemata_list_destroy(void) 2163 { 2164 struct resctrl_schema *s, *tmp; 2165 2166 list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { 2167 list_del(&s->list); 2168 kfree(s); 2169 } 2170 } 2171 2172 static int rdt_get_tree(struct fs_context *fc) 2173 { 2174 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2175 struct rdt_domain *dom; 2176 struct rdt_resource *r; 2177 int ret; 2178 2179 cpus_read_lock(); 2180 mutex_lock(&rdtgroup_mutex); 2181 /* 2182 * resctrl file system can only be mounted once. 2183 */ 2184 if (static_branch_unlikely(&rdt_enable_key)) { 2185 ret = -EBUSY; 2186 goto out; 2187 } 2188 2189 ret = rdt_enable_ctx(ctx); 2190 if (ret < 0) 2191 goto out_cdp; 2192 2193 ret = schemata_list_create(); 2194 if (ret) { 2195 schemata_list_destroy(); 2196 goto out_mba; 2197 } 2198 2199 closid_init(); 2200 2201 ret = rdtgroup_create_info_dir(rdtgroup_default.kn); 2202 if (ret < 0) 2203 goto out_schemata_free; 2204 2205 if (rdt_mon_capable) { 2206 ret = mongroup_create_dir(rdtgroup_default.kn, 2207 &rdtgroup_default, "mon_groups", 2208 &kn_mongrp); 2209 if (ret < 0) 2210 goto out_info; 2211 2212 ret = mkdir_mondata_all(rdtgroup_default.kn, 2213 &rdtgroup_default, &kn_mondata); 2214 if (ret < 0) 2215 goto out_mongrp; 2216 rdtgroup_default.mon.mon_data_kn = kn_mondata; 2217 } 2218 2219 ret = rdt_pseudo_lock_init(); 2220 if (ret) 2221 goto out_mondata; 2222 2223 ret = kernfs_get_tree(fc); 2224 if (ret < 0) 2225 goto out_psl; 2226 2227 if (rdt_alloc_capable) 2228 static_branch_enable_cpuslocked(&rdt_alloc_enable_key); 2229 if (rdt_mon_capable) 2230 static_branch_enable_cpuslocked(&rdt_mon_enable_key); 2231 2232 if (rdt_alloc_capable || rdt_mon_capable) 2233 static_branch_enable_cpuslocked(&rdt_enable_key); 2234 2235 if (is_mbm_enabled()) { 2236 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; 2237 list_for_each_entry(dom, &r->domains, list) 2238 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); 2239 } 2240 2241 goto out; 2242 2243 out_psl: 2244 rdt_pseudo_lock_release(); 2245 out_mondata: 2246 if (rdt_mon_capable) 2247 kernfs_remove(kn_mondata); 2248 out_mongrp: 2249 if (rdt_mon_capable) 2250 kernfs_remove(kn_mongrp); 2251 out_info: 2252 kernfs_remove(kn_info); 2253 out_schemata_free: 2254 schemata_list_destroy(); 2255 out_mba: 2256 if (ctx->enable_mba_mbps) 2257 set_mba_sc(false); 2258 out_cdp: 2259 cdp_disable_all(); 2260 out: 2261 rdt_last_cmd_clear(); 2262 mutex_unlock(&rdtgroup_mutex); 2263 cpus_read_unlock(); 2264 return ret; 2265 } 2266 2267 enum rdt_param { 2268 Opt_cdp, 2269 Opt_cdpl2, 2270 Opt_mba_mbps, 2271 nr__rdt_params 2272 }; 2273 2274 static const struct fs_parameter_spec rdt_fs_parameters[] = { 2275 fsparam_flag("cdp", Opt_cdp), 2276 fsparam_flag("cdpl2", Opt_cdpl2), 2277 fsparam_flag("mba_MBps", Opt_mba_mbps), 2278 {} 2279 }; 2280 2281 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) 2282 { 2283 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2284 struct fs_parse_result result; 2285 int opt; 2286 2287 opt = fs_parse(fc, rdt_fs_parameters, param, &result); 2288 if (opt < 0) 2289 return opt; 2290 2291 switch (opt) { 2292 case Opt_cdp: 2293 ctx->enable_cdpl3 = true; 2294 return 0; 2295 case Opt_cdpl2: 2296 ctx->enable_cdpl2 = true; 2297 return 0; 2298 case Opt_mba_mbps: 2299 if (!supports_mba_mbps()) 2300 return -EINVAL; 2301 ctx->enable_mba_mbps = true; 2302 return 0; 2303 } 2304 2305 return -EINVAL; 2306 } 2307 2308 static void rdt_fs_context_free(struct fs_context *fc) 2309 { 2310 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2311 2312 kernfs_free_fs_context(fc); 2313 kfree(ctx); 2314 } 2315 2316 static const struct fs_context_operations rdt_fs_context_ops = { 2317 .free = rdt_fs_context_free, 2318 .parse_param = rdt_parse_param, 2319 .get_tree = rdt_get_tree, 2320 }; 2321 2322 static int rdt_init_fs_context(struct fs_context *fc) 2323 { 2324 struct rdt_fs_context *ctx; 2325 2326 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); 2327 if (!ctx) 2328 return -ENOMEM; 2329 2330 ctx->kfc.root = rdt_root; 2331 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; 2332 fc->fs_private = &ctx->kfc; 2333 fc->ops = &rdt_fs_context_ops; 2334 put_user_ns(fc->user_ns); 2335 fc->user_ns = get_user_ns(&init_user_ns); 2336 fc->global = true; 2337 return 0; 2338 } 2339 2340 static int reset_all_ctrls(struct rdt_resource *r) 2341 { 2342 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 2343 struct rdt_hw_domain *hw_dom; 2344 struct msr_param msr_param; 2345 cpumask_var_t cpu_mask; 2346 struct rdt_domain *d; 2347 int i; 2348 2349 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 2350 return -ENOMEM; 2351 2352 msr_param.res = r; 2353 msr_param.low = 0; 2354 msr_param.high = hw_res->num_closid; 2355 2356 /* 2357 * Disable resource control for this resource by setting all 2358 * CBMs in all domains to the maximum mask value. Pick one CPU 2359 * from each domain to update the MSRs below. 2360 */ 2361 list_for_each_entry(d, &r->domains, list) { 2362 hw_dom = resctrl_to_arch_dom(d); 2363 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 2364 2365 for (i = 0; i < hw_res->num_closid; i++) 2366 hw_dom->ctrl_val[i] = r->default_ctrl; 2367 } 2368 2369 /* Update CBM on all the CPUs in cpu_mask */ 2370 on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); 2371 2372 free_cpumask_var(cpu_mask); 2373 2374 return 0; 2375 } 2376 2377 /* 2378 * Move tasks from one to the other group. If @from is NULL, then all tasks 2379 * in the systems are moved unconditionally (used for teardown). 2380 * 2381 * If @mask is not NULL the cpus on which moved tasks are running are set 2382 * in that mask so the update smp function call is restricted to affected 2383 * cpus. 2384 */ 2385 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, 2386 struct cpumask *mask) 2387 { 2388 struct task_struct *p, *t; 2389 2390 read_lock(&tasklist_lock); 2391 for_each_process_thread(p, t) { 2392 if (!from || is_closid_match(t, from) || 2393 is_rmid_match(t, from)) { 2394 WRITE_ONCE(t->closid, to->closid); 2395 WRITE_ONCE(t->rmid, to->mon.rmid); 2396 2397 /* 2398 * Order the closid/rmid stores above before the loads 2399 * in task_curr(). This pairs with the full barrier 2400 * between the rq->curr update and resctrl_sched_in() 2401 * during context switch. 2402 */ 2403 smp_mb(); 2404 2405 /* 2406 * If the task is on a CPU, set the CPU in the mask. 2407 * The detection is inaccurate as tasks might move or 2408 * schedule before the smp function call takes place. 2409 * In such a case the function call is pointless, but 2410 * there is no other side effect. 2411 */ 2412 if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) 2413 cpumask_set_cpu(task_cpu(t), mask); 2414 } 2415 } 2416 read_unlock(&tasklist_lock); 2417 } 2418 2419 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) 2420 { 2421 struct rdtgroup *sentry, *stmp; 2422 struct list_head *head; 2423 2424 head = &rdtgrp->mon.crdtgrp_list; 2425 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { 2426 free_rmid(sentry->mon.rmid); 2427 list_del(&sentry->mon.crdtgrp_list); 2428 2429 if (atomic_read(&sentry->waitcount) != 0) 2430 sentry->flags = RDT_DELETED; 2431 else 2432 rdtgroup_remove(sentry); 2433 } 2434 } 2435 2436 /* 2437 * Forcibly remove all of subdirectories under root. 2438 */ 2439 static void rmdir_all_sub(void) 2440 { 2441 struct rdtgroup *rdtgrp, *tmp; 2442 2443 /* Move all tasks to the default resource group */ 2444 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); 2445 2446 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { 2447 /* Free any child rmids */ 2448 free_all_child_rdtgrp(rdtgrp); 2449 2450 /* Remove each rdtgroup other than root */ 2451 if (rdtgrp == &rdtgroup_default) 2452 continue; 2453 2454 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2455 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2456 rdtgroup_pseudo_lock_remove(rdtgrp); 2457 2458 /* 2459 * Give any CPUs back to the default group. We cannot copy 2460 * cpu_online_mask because a CPU might have executed the 2461 * offline callback already, but is still marked online. 2462 */ 2463 cpumask_or(&rdtgroup_default.cpu_mask, 2464 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 2465 2466 free_rmid(rdtgrp->mon.rmid); 2467 2468 kernfs_remove(rdtgrp->kn); 2469 list_del(&rdtgrp->rdtgroup_list); 2470 2471 if (atomic_read(&rdtgrp->waitcount) != 0) 2472 rdtgrp->flags = RDT_DELETED; 2473 else 2474 rdtgroup_remove(rdtgrp); 2475 } 2476 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ 2477 update_closid_rmid(cpu_online_mask, &rdtgroup_default); 2478 2479 kernfs_remove(kn_info); 2480 kernfs_remove(kn_mongrp); 2481 kernfs_remove(kn_mondata); 2482 } 2483 2484 static void rdt_kill_sb(struct super_block *sb) 2485 { 2486 struct rdt_resource *r; 2487 2488 cpus_read_lock(); 2489 mutex_lock(&rdtgroup_mutex); 2490 2491 set_mba_sc(false); 2492 2493 /*Put everything back to default values. */ 2494 for_each_alloc_capable_rdt_resource(r) 2495 reset_all_ctrls(r); 2496 cdp_disable_all(); 2497 rmdir_all_sub(); 2498 rdt_pseudo_lock_release(); 2499 rdtgroup_default.mode = RDT_MODE_SHAREABLE; 2500 schemata_list_destroy(); 2501 static_branch_disable_cpuslocked(&rdt_alloc_enable_key); 2502 static_branch_disable_cpuslocked(&rdt_mon_enable_key); 2503 static_branch_disable_cpuslocked(&rdt_enable_key); 2504 kernfs_kill_sb(sb); 2505 mutex_unlock(&rdtgroup_mutex); 2506 cpus_read_unlock(); 2507 } 2508 2509 static struct file_system_type rdt_fs_type = { 2510 .name = "resctrl", 2511 .init_fs_context = rdt_init_fs_context, 2512 .parameters = rdt_fs_parameters, 2513 .kill_sb = rdt_kill_sb, 2514 }; 2515 2516 static int mon_addfile(struct kernfs_node *parent_kn, const char *name, 2517 void *priv) 2518 { 2519 struct kernfs_node *kn; 2520 int ret = 0; 2521 2522 kn = __kernfs_create_file(parent_kn, name, 0444, 2523 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, 2524 &kf_mondata_ops, priv, NULL, NULL); 2525 if (IS_ERR(kn)) 2526 return PTR_ERR(kn); 2527 2528 ret = rdtgroup_kn_set_ugid(kn); 2529 if (ret) { 2530 kernfs_remove(kn); 2531 return ret; 2532 } 2533 2534 return ret; 2535 } 2536 2537 /* 2538 * Remove all subdirectories of mon_data of ctrl_mon groups 2539 * and monitor groups with given domain id. 2540 */ 2541 static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 2542 unsigned int dom_id) 2543 { 2544 struct rdtgroup *prgrp, *crgrp; 2545 char name[32]; 2546 2547 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2548 sprintf(name, "mon_%s_%02d", r->name, dom_id); 2549 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); 2550 2551 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) 2552 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); 2553 } 2554 } 2555 2556 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, 2557 struct rdt_domain *d, 2558 struct rdt_resource *r, struct rdtgroup *prgrp) 2559 { 2560 union mon_data_bits priv; 2561 struct kernfs_node *kn; 2562 struct mon_evt *mevt; 2563 struct rmid_read rr; 2564 char name[32]; 2565 int ret; 2566 2567 sprintf(name, "mon_%s_%02d", r->name, d->id); 2568 /* create the directory */ 2569 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 2570 if (IS_ERR(kn)) 2571 return PTR_ERR(kn); 2572 2573 ret = rdtgroup_kn_set_ugid(kn); 2574 if (ret) 2575 goto out_destroy; 2576 2577 if (WARN_ON(list_empty(&r->evt_list))) { 2578 ret = -EPERM; 2579 goto out_destroy; 2580 } 2581 2582 priv.u.rid = r->rid; 2583 priv.u.domid = d->id; 2584 list_for_each_entry(mevt, &r->evt_list, list) { 2585 priv.u.evtid = mevt->evtid; 2586 ret = mon_addfile(kn, mevt->name, priv.priv); 2587 if (ret) 2588 goto out_destroy; 2589 2590 if (is_mbm_event(mevt->evtid)) 2591 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); 2592 } 2593 kernfs_activate(kn); 2594 return 0; 2595 2596 out_destroy: 2597 kernfs_remove(kn); 2598 return ret; 2599 } 2600 2601 /* 2602 * Add all subdirectories of mon_data for "ctrl_mon" groups 2603 * and "monitor" groups with given domain id. 2604 */ 2605 static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 2606 struct rdt_domain *d) 2607 { 2608 struct kernfs_node *parent_kn; 2609 struct rdtgroup *prgrp, *crgrp; 2610 struct list_head *head; 2611 2612 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2613 parent_kn = prgrp->mon.mon_data_kn; 2614 mkdir_mondata_subdir(parent_kn, d, r, prgrp); 2615 2616 head = &prgrp->mon.crdtgrp_list; 2617 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 2618 parent_kn = crgrp->mon.mon_data_kn; 2619 mkdir_mondata_subdir(parent_kn, d, r, crgrp); 2620 } 2621 } 2622 } 2623 2624 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, 2625 struct rdt_resource *r, 2626 struct rdtgroup *prgrp) 2627 { 2628 struct rdt_domain *dom; 2629 int ret; 2630 2631 list_for_each_entry(dom, &r->domains, list) { 2632 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); 2633 if (ret) 2634 return ret; 2635 } 2636 2637 return 0; 2638 } 2639 2640 /* 2641 * This creates a directory mon_data which contains the monitored data. 2642 * 2643 * mon_data has one directory for each domain which are named 2644 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data 2645 * with L3 domain looks as below: 2646 * ./mon_data: 2647 * mon_L3_00 2648 * mon_L3_01 2649 * mon_L3_02 2650 * ... 2651 * 2652 * Each domain directory has one file per event: 2653 * ./mon_L3_00/: 2654 * llc_occupancy 2655 * 2656 */ 2657 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2658 struct rdtgroup *prgrp, 2659 struct kernfs_node **dest_kn) 2660 { 2661 struct rdt_resource *r; 2662 struct kernfs_node *kn; 2663 int ret; 2664 2665 /* 2666 * Create the mon_data directory first. 2667 */ 2668 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); 2669 if (ret) 2670 return ret; 2671 2672 if (dest_kn) 2673 *dest_kn = kn; 2674 2675 /* 2676 * Create the subdirectories for each domain. Note that all events 2677 * in a domain like L3 are grouped into a resource whose domain is L3 2678 */ 2679 for_each_mon_capable_rdt_resource(r) { 2680 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); 2681 if (ret) 2682 goto out_destroy; 2683 } 2684 2685 return 0; 2686 2687 out_destroy: 2688 kernfs_remove(kn); 2689 return ret; 2690 } 2691 2692 /** 2693 * cbm_ensure_valid - Enforce validity on provided CBM 2694 * @_val: Candidate CBM 2695 * @r: RDT resource to which the CBM belongs 2696 * 2697 * The provided CBM represents all cache portions available for use. This 2698 * may be represented by a bitmap that does not consist of contiguous ones 2699 * and thus be an invalid CBM. 2700 * Here the provided CBM is forced to be a valid CBM by only considering 2701 * the first set of contiguous bits as valid and clearing all bits. 2702 * The intention here is to provide a valid default CBM with which a new 2703 * resource group is initialized. The user can follow this with a 2704 * modification to the CBM if the default does not satisfy the 2705 * requirements. 2706 */ 2707 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) 2708 { 2709 unsigned int cbm_len = r->cache.cbm_len; 2710 unsigned long first_bit, zero_bit; 2711 unsigned long val = _val; 2712 2713 if (!val) 2714 return 0; 2715 2716 first_bit = find_first_bit(&val, cbm_len); 2717 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 2718 2719 /* Clear any remaining bits to ensure contiguous region */ 2720 bitmap_clear(&val, zero_bit, cbm_len - zero_bit); 2721 return (u32)val; 2722 } 2723 2724 /* 2725 * Initialize cache resources per RDT domain 2726 * 2727 * Set the RDT domain up to start off with all usable allocations. That is, 2728 * all shareable and unused bits. All-zero CBM is invalid. 2729 */ 2730 static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, 2731 u32 closid) 2732 { 2733 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); 2734 enum resctrl_conf_type t = s->conf_type; 2735 struct resctrl_staged_config *cfg; 2736 struct rdt_resource *r = s->res; 2737 u32 used_b = 0, unused_b = 0; 2738 unsigned long tmp_cbm; 2739 enum rdtgrp_mode mode; 2740 u32 peer_ctl, ctrl_val; 2741 int i; 2742 2743 cfg = &d->staged_config[t]; 2744 cfg->have_new_ctrl = false; 2745 cfg->new_ctrl = r->cache.shareable_bits; 2746 used_b = r->cache.shareable_bits; 2747 for (i = 0; i < closids_supported(); i++) { 2748 if (closid_allocated(i) && i != closid) { 2749 mode = rdtgroup_mode_by_closid(i); 2750 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 2751 /* 2752 * ctrl values for locksetup aren't relevant 2753 * until the schemata is written, and the mode 2754 * becomes RDT_MODE_PSEUDO_LOCKED. 2755 */ 2756 continue; 2757 /* 2758 * If CDP is active include peer domain's 2759 * usage to ensure there is no overlap 2760 * with an exclusive group. 2761 */ 2762 if (resctrl_arch_get_cdp_enabled(r->rid)) 2763 peer_ctl = resctrl_arch_get_config(r, d, i, 2764 peer_type); 2765 else 2766 peer_ctl = 0; 2767 ctrl_val = resctrl_arch_get_config(r, d, i, 2768 s->conf_type); 2769 used_b |= ctrl_val | peer_ctl; 2770 if (mode == RDT_MODE_SHAREABLE) 2771 cfg->new_ctrl |= ctrl_val | peer_ctl; 2772 } 2773 } 2774 if (d->plr && d->plr->cbm > 0) 2775 used_b |= d->plr->cbm; 2776 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); 2777 unused_b &= BIT_MASK(r->cache.cbm_len) - 1; 2778 cfg->new_ctrl |= unused_b; 2779 /* 2780 * Force the initial CBM to be valid, user can 2781 * modify the CBM based on system availability. 2782 */ 2783 cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); 2784 /* 2785 * Assign the u32 CBM to an unsigned long to ensure that 2786 * bitmap_weight() does not access out-of-bound memory. 2787 */ 2788 tmp_cbm = cfg->new_ctrl; 2789 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { 2790 rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); 2791 return -ENOSPC; 2792 } 2793 cfg->have_new_ctrl = true; 2794 2795 return 0; 2796 } 2797 2798 /* 2799 * Initialize cache resources with default values. 2800 * 2801 * A new RDT group is being created on an allocation capable (CAT) 2802 * supporting system. Set this group up to start off with all usable 2803 * allocations. 2804 * 2805 * If there are no more shareable bits available on any domain then 2806 * the entire allocation will fail. 2807 */ 2808 static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) 2809 { 2810 struct rdt_domain *d; 2811 int ret; 2812 2813 list_for_each_entry(d, &s->res->domains, list) { 2814 ret = __init_one_rdt_domain(d, s, closid); 2815 if (ret < 0) 2816 return ret; 2817 } 2818 2819 return 0; 2820 } 2821 2822 /* Initialize MBA resource with default values. */ 2823 static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) 2824 { 2825 struct resctrl_staged_config *cfg; 2826 struct rdt_domain *d; 2827 2828 list_for_each_entry(d, &r->domains, list) { 2829 if (is_mba_sc(r)) { 2830 d->mbps_val[closid] = MBA_MAX_MBPS; 2831 continue; 2832 } 2833 2834 cfg = &d->staged_config[CDP_NONE]; 2835 cfg->new_ctrl = r->default_ctrl; 2836 cfg->have_new_ctrl = true; 2837 } 2838 } 2839 2840 /* Initialize the RDT group's allocations. */ 2841 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) 2842 { 2843 struct resctrl_schema *s; 2844 struct rdt_resource *r; 2845 int ret; 2846 2847 list_for_each_entry(s, &resctrl_schema_all, list) { 2848 r = s->res; 2849 if (r->rid == RDT_RESOURCE_MBA || 2850 r->rid == RDT_RESOURCE_SMBA) { 2851 rdtgroup_init_mba(r, rdtgrp->closid); 2852 if (is_mba_sc(r)) 2853 continue; 2854 } else { 2855 ret = rdtgroup_init_cat(s, rdtgrp->closid); 2856 if (ret < 0) 2857 return ret; 2858 } 2859 2860 ret = resctrl_arch_update_domains(r, rdtgrp->closid); 2861 if (ret < 0) { 2862 rdt_last_cmd_puts("Failed to initialize allocations\n"); 2863 return ret; 2864 } 2865 2866 } 2867 2868 rdtgrp->mode = RDT_MODE_SHAREABLE; 2869 2870 return 0; 2871 } 2872 2873 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, 2874 const char *name, umode_t mode, 2875 enum rdt_group_type rtype, struct rdtgroup **r) 2876 { 2877 struct rdtgroup *prdtgrp, *rdtgrp; 2878 struct kernfs_node *kn; 2879 uint files = 0; 2880 int ret; 2881 2882 prdtgrp = rdtgroup_kn_lock_live(parent_kn); 2883 if (!prdtgrp) { 2884 ret = -ENODEV; 2885 goto out_unlock; 2886 } 2887 2888 if (rtype == RDTMON_GROUP && 2889 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2890 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { 2891 ret = -EINVAL; 2892 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 2893 goto out_unlock; 2894 } 2895 2896 /* allocate the rdtgroup. */ 2897 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); 2898 if (!rdtgrp) { 2899 ret = -ENOSPC; 2900 rdt_last_cmd_puts("Kernel out of memory\n"); 2901 goto out_unlock; 2902 } 2903 *r = rdtgrp; 2904 rdtgrp->mon.parent = prdtgrp; 2905 rdtgrp->type = rtype; 2906 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); 2907 2908 /* kernfs creates the directory for rdtgrp */ 2909 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); 2910 if (IS_ERR(kn)) { 2911 ret = PTR_ERR(kn); 2912 rdt_last_cmd_puts("kernfs create error\n"); 2913 goto out_free_rgrp; 2914 } 2915 rdtgrp->kn = kn; 2916 2917 /* 2918 * kernfs_remove() will drop the reference count on "kn" which 2919 * will free it. But we still need it to stick around for the 2920 * rdtgroup_kn_unlock(kn) call. Take one extra reference here, 2921 * which will be dropped by kernfs_put() in rdtgroup_remove(). 2922 */ 2923 kernfs_get(kn); 2924 2925 ret = rdtgroup_kn_set_ugid(kn); 2926 if (ret) { 2927 rdt_last_cmd_puts("kernfs perm error\n"); 2928 goto out_destroy; 2929 } 2930 2931 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); 2932 ret = rdtgroup_add_files(kn, files); 2933 if (ret) { 2934 rdt_last_cmd_puts("kernfs fill error\n"); 2935 goto out_destroy; 2936 } 2937 2938 if (rdt_mon_capable) { 2939 ret = alloc_rmid(); 2940 if (ret < 0) { 2941 rdt_last_cmd_puts("Out of RMIDs\n"); 2942 goto out_destroy; 2943 } 2944 rdtgrp->mon.rmid = ret; 2945 2946 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); 2947 if (ret) { 2948 rdt_last_cmd_puts("kernfs subdir error\n"); 2949 goto out_idfree; 2950 } 2951 } 2952 kernfs_activate(kn); 2953 2954 /* 2955 * The caller unlocks the parent_kn upon success. 2956 */ 2957 return 0; 2958 2959 out_idfree: 2960 free_rmid(rdtgrp->mon.rmid); 2961 out_destroy: 2962 kernfs_put(rdtgrp->kn); 2963 kernfs_remove(rdtgrp->kn); 2964 out_free_rgrp: 2965 kfree(rdtgrp); 2966 out_unlock: 2967 rdtgroup_kn_unlock(parent_kn); 2968 return ret; 2969 } 2970 2971 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) 2972 { 2973 kernfs_remove(rgrp->kn); 2974 free_rmid(rgrp->mon.rmid); 2975 rdtgroup_remove(rgrp); 2976 } 2977 2978 /* 2979 * Create a monitor group under "mon_groups" directory of a control 2980 * and monitor group(ctrl_mon). This is a resource group 2981 * to monitor a subset of tasks and cpus in its parent ctrl_mon group. 2982 */ 2983 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, 2984 const char *name, umode_t mode) 2985 { 2986 struct rdtgroup *rdtgrp, *prgrp; 2987 int ret; 2988 2989 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); 2990 if (ret) 2991 return ret; 2992 2993 prgrp = rdtgrp->mon.parent; 2994 rdtgrp->closid = prgrp->closid; 2995 2996 /* 2997 * Add the rdtgrp to the list of rdtgrps the parent 2998 * ctrl_mon group has to track. 2999 */ 3000 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); 3001 3002 rdtgroup_kn_unlock(parent_kn); 3003 return ret; 3004 } 3005 3006 /* 3007 * These are rdtgroups created under the root directory. Can be used 3008 * to allocate and monitor resources. 3009 */ 3010 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, 3011 const char *name, umode_t mode) 3012 { 3013 struct rdtgroup *rdtgrp; 3014 struct kernfs_node *kn; 3015 u32 closid; 3016 int ret; 3017 3018 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); 3019 if (ret) 3020 return ret; 3021 3022 kn = rdtgrp->kn; 3023 ret = closid_alloc(); 3024 if (ret < 0) { 3025 rdt_last_cmd_puts("Out of CLOSIDs\n"); 3026 goto out_common_fail; 3027 } 3028 closid = ret; 3029 ret = 0; 3030 3031 rdtgrp->closid = closid; 3032 ret = rdtgroup_init_alloc(rdtgrp); 3033 if (ret < 0) 3034 goto out_id_free; 3035 3036 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); 3037 3038 if (rdt_mon_capable) { 3039 /* 3040 * Create an empty mon_groups directory to hold the subset 3041 * of tasks and cpus to monitor. 3042 */ 3043 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); 3044 if (ret) { 3045 rdt_last_cmd_puts("kernfs subdir error\n"); 3046 goto out_del_list; 3047 } 3048 } 3049 3050 goto out_unlock; 3051 3052 out_del_list: 3053 list_del(&rdtgrp->rdtgroup_list); 3054 out_id_free: 3055 closid_free(closid); 3056 out_common_fail: 3057 mkdir_rdt_prepare_clean(rdtgrp); 3058 out_unlock: 3059 rdtgroup_kn_unlock(parent_kn); 3060 return ret; 3061 } 3062 3063 /* 3064 * We allow creating mon groups only with in a directory called "mon_groups" 3065 * which is present in every ctrl_mon group. Check if this is a valid 3066 * "mon_groups" directory. 3067 * 3068 * 1. The directory should be named "mon_groups". 3069 * 2. The mon group itself should "not" be named "mon_groups". 3070 * This makes sure "mon_groups" directory always has a ctrl_mon group 3071 * as parent. 3072 */ 3073 static bool is_mon_groups(struct kernfs_node *kn, const char *name) 3074 { 3075 return (!strcmp(kn->name, "mon_groups") && 3076 strcmp(name, "mon_groups")); 3077 } 3078 3079 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 3080 umode_t mode) 3081 { 3082 /* Do not accept '\n' to avoid unparsable situation. */ 3083 if (strchr(name, '\n')) 3084 return -EINVAL; 3085 3086 /* 3087 * If the parent directory is the root directory and RDT 3088 * allocation is supported, add a control and monitoring 3089 * subdirectory 3090 */ 3091 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) 3092 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); 3093 3094 /* 3095 * If RDT monitoring is supported and the parent directory is a valid 3096 * "mon_groups" directory, add a monitoring subdirectory. 3097 */ 3098 if (rdt_mon_capable && is_mon_groups(parent_kn, name)) 3099 return rdtgroup_mkdir_mon(parent_kn, name, mode); 3100 3101 return -EPERM; 3102 } 3103 3104 static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) 3105 { 3106 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; 3107 int cpu; 3108 3109 /* Give any tasks back to the parent group */ 3110 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); 3111 3112 /* Update per cpu rmid of the moved CPUs first */ 3113 for_each_cpu(cpu, &rdtgrp->cpu_mask) 3114 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; 3115 /* 3116 * Update the MSR on moved CPUs and CPUs which have moved 3117 * task running on them. 3118 */ 3119 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3120 update_closid_rmid(tmpmask, NULL); 3121 3122 rdtgrp->flags = RDT_DELETED; 3123 free_rmid(rdtgrp->mon.rmid); 3124 3125 /* 3126 * Remove the rdtgrp from the parent ctrl_mon group's list 3127 */ 3128 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); 3129 list_del(&rdtgrp->mon.crdtgrp_list); 3130 3131 kernfs_remove(rdtgrp->kn); 3132 3133 return 0; 3134 } 3135 3136 static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) 3137 { 3138 rdtgrp->flags = RDT_DELETED; 3139 list_del(&rdtgrp->rdtgroup_list); 3140 3141 kernfs_remove(rdtgrp->kn); 3142 return 0; 3143 } 3144 3145 static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) 3146 { 3147 int cpu; 3148 3149 /* Give any tasks back to the default group */ 3150 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); 3151 3152 /* Give any CPUs back to the default group */ 3153 cpumask_or(&rdtgroup_default.cpu_mask, 3154 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 3155 3156 /* Update per cpu closid and rmid of the moved CPUs first */ 3157 for_each_cpu(cpu, &rdtgrp->cpu_mask) { 3158 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; 3159 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; 3160 } 3161 3162 /* 3163 * Update the MSR on moved CPUs and CPUs which have moved 3164 * task running on them. 3165 */ 3166 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 3167 update_closid_rmid(tmpmask, NULL); 3168 3169 closid_free(rdtgrp->closid); 3170 free_rmid(rdtgrp->mon.rmid); 3171 3172 rdtgroup_ctrl_remove(rdtgrp); 3173 3174 /* 3175 * Free all the child monitor group rmids. 3176 */ 3177 free_all_child_rdtgrp(rdtgrp); 3178 3179 return 0; 3180 } 3181 3182 static int rdtgroup_rmdir(struct kernfs_node *kn) 3183 { 3184 struct kernfs_node *parent_kn = kn->parent; 3185 struct rdtgroup *rdtgrp; 3186 cpumask_var_t tmpmask; 3187 int ret = 0; 3188 3189 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 3190 return -ENOMEM; 3191 3192 rdtgrp = rdtgroup_kn_lock_live(kn); 3193 if (!rdtgrp) { 3194 ret = -EPERM; 3195 goto out; 3196 } 3197 3198 /* 3199 * If the rdtgroup is a ctrl_mon group and parent directory 3200 * is the root directory, remove the ctrl_mon group. 3201 * 3202 * If the rdtgroup is a mon group and parent directory 3203 * is a valid "mon_groups" directory, remove the mon group. 3204 */ 3205 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && 3206 rdtgrp != &rdtgroup_default) { 3207 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 3208 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 3209 ret = rdtgroup_ctrl_remove(rdtgrp); 3210 } else { 3211 ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); 3212 } 3213 } else if (rdtgrp->type == RDTMON_GROUP && 3214 is_mon_groups(parent_kn, kn->name)) { 3215 ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); 3216 } else { 3217 ret = -EPERM; 3218 } 3219 3220 out: 3221 rdtgroup_kn_unlock(kn); 3222 free_cpumask_var(tmpmask); 3223 return ret; 3224 } 3225 3226 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) 3227 { 3228 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) 3229 seq_puts(seq, ",cdp"); 3230 3231 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) 3232 seq_puts(seq, ",cdpl2"); 3233 3234 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) 3235 seq_puts(seq, ",mba_MBps"); 3236 3237 return 0; 3238 } 3239 3240 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { 3241 .mkdir = rdtgroup_mkdir, 3242 .rmdir = rdtgroup_rmdir, 3243 .show_options = rdtgroup_show_options, 3244 }; 3245 3246 static int __init rdtgroup_setup_root(void) 3247 { 3248 int ret; 3249 3250 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, 3251 KERNFS_ROOT_CREATE_DEACTIVATED | 3252 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, 3253 &rdtgroup_default); 3254 if (IS_ERR(rdt_root)) 3255 return PTR_ERR(rdt_root); 3256 3257 mutex_lock(&rdtgroup_mutex); 3258 3259 rdtgroup_default.closid = 0; 3260 rdtgroup_default.mon.rmid = 0; 3261 rdtgroup_default.type = RDTCTRL_GROUP; 3262 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); 3263 3264 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); 3265 3266 ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RF_CTRL_BASE); 3267 if (ret) { 3268 kernfs_destroy_root(rdt_root); 3269 goto out; 3270 } 3271 3272 rdtgroup_default.kn = kernfs_root_to_node(rdt_root); 3273 kernfs_activate(rdtgroup_default.kn); 3274 3275 out: 3276 mutex_unlock(&rdtgroup_mutex); 3277 3278 return ret; 3279 } 3280 3281 static void domain_destroy_mon_state(struct rdt_domain *d) 3282 { 3283 bitmap_free(d->rmid_busy_llc); 3284 kfree(d->mbm_total); 3285 kfree(d->mbm_local); 3286 } 3287 3288 void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) 3289 { 3290 lockdep_assert_held(&rdtgroup_mutex); 3291 3292 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) 3293 mba_sc_domain_destroy(r, d); 3294 3295 if (!r->mon_capable) 3296 return; 3297 3298 /* 3299 * If resctrl is mounted, remove all the 3300 * per domain monitor data directories. 3301 */ 3302 if (static_branch_unlikely(&rdt_mon_enable_key)) 3303 rmdir_mondata_subdir_allrdtgrp(r, d->id); 3304 3305 if (is_mbm_enabled()) 3306 cancel_delayed_work(&d->mbm_over); 3307 if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { 3308 /* 3309 * When a package is going down, forcefully 3310 * decrement rmid->ebusy. There is no way to know 3311 * that the L3 was flushed and hence may lead to 3312 * incorrect counts in rare scenarios, but leaving 3313 * the RMID as busy creates RMID leaks if the 3314 * package never comes back. 3315 */ 3316 __check_limbo(d, true); 3317 cancel_delayed_work(&d->cqm_limbo); 3318 } 3319 3320 domain_destroy_mon_state(d); 3321 } 3322 3323 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) 3324 { 3325 size_t tsize; 3326 3327 if (is_llc_occupancy_enabled()) { 3328 d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL); 3329 if (!d->rmid_busy_llc) 3330 return -ENOMEM; 3331 } 3332 if (is_mbm_total_enabled()) { 3333 tsize = sizeof(*d->mbm_total); 3334 d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL); 3335 if (!d->mbm_total) { 3336 bitmap_free(d->rmid_busy_llc); 3337 return -ENOMEM; 3338 } 3339 } 3340 if (is_mbm_local_enabled()) { 3341 tsize = sizeof(*d->mbm_local); 3342 d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL); 3343 if (!d->mbm_local) { 3344 bitmap_free(d->rmid_busy_llc); 3345 kfree(d->mbm_total); 3346 return -ENOMEM; 3347 } 3348 } 3349 3350 return 0; 3351 } 3352 3353 int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) 3354 { 3355 int err; 3356 3357 lockdep_assert_held(&rdtgroup_mutex); 3358 3359 if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) 3360 /* RDT_RESOURCE_MBA is never mon_capable */ 3361 return mba_sc_domain_allocate(r, d); 3362 3363 if (!r->mon_capable) 3364 return 0; 3365 3366 err = domain_setup_mon_state(r, d); 3367 if (err) 3368 return err; 3369 3370 if (is_mbm_enabled()) { 3371 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); 3372 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL); 3373 } 3374 3375 if (is_llc_occupancy_enabled()) 3376 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); 3377 3378 /* If resctrl is mounted, add per domain monitor data directories. */ 3379 if (static_branch_unlikely(&rdt_mon_enable_key)) 3380 mkdir_mondata_subdir_allrdtgrp(r, d); 3381 3382 return 0; 3383 } 3384 3385 /* 3386 * rdtgroup_init - rdtgroup initialization 3387 * 3388 * Setup resctrl file system including set up root, create mount point, 3389 * register rdtgroup filesystem, and initialize files under root directory. 3390 * 3391 * Return: 0 on success or -errno 3392 */ 3393 int __init rdtgroup_init(void) 3394 { 3395 int ret = 0; 3396 3397 seq_buf_init(&last_cmd_status, last_cmd_status_buf, 3398 sizeof(last_cmd_status_buf)); 3399 3400 ret = rdtgroup_setup_root(); 3401 if (ret) 3402 return ret; 3403 3404 ret = sysfs_create_mount_point(fs_kobj, "resctrl"); 3405 if (ret) 3406 goto cleanup_root; 3407 3408 ret = register_filesystem(&rdt_fs_type); 3409 if (ret) 3410 goto cleanup_mountpoint; 3411 3412 /* 3413 * Adding the resctrl debugfs directory here may not be ideal since 3414 * it would let the resctrl debugfs directory appear on the debugfs 3415 * filesystem before the resctrl filesystem is mounted. 3416 * It may also be ok since that would enable debugging of RDT before 3417 * resctrl is mounted. 3418 * The reason why the debugfs directory is created here and not in 3419 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and 3420 * during the debugfs directory creation also &sb->s_type->i_mutex_key 3421 * (the lockdep class of inode->i_rwsem). Other filesystem 3422 * interactions (eg. SyS_getdents) have the lock ordering: 3423 * &sb->s_type->i_mutex_key --> &mm->mmap_lock 3424 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex 3425 * is taken, thus creating dependency: 3426 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause 3427 * issues considering the other two lock dependencies. 3428 * By creating the debugfs directory here we avoid a dependency 3429 * that may cause deadlock (even though file operations cannot 3430 * occur until the filesystem is mounted, but I do not know how to 3431 * tell lockdep that). 3432 */ 3433 debugfs_resctrl = debugfs_create_dir("resctrl", NULL); 3434 3435 return 0; 3436 3437 cleanup_mountpoint: 3438 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3439 cleanup_root: 3440 kernfs_destroy_root(rdt_root); 3441 3442 return ret; 3443 } 3444 3445 void __exit rdtgroup_exit(void) 3446 { 3447 debugfs_remove_recursive(debugfs_resctrl); 3448 unregister_filesystem(&rdt_fs_type); 3449 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3450 kernfs_destroy_root(rdt_root); 3451 } 3452