1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * User interface for Resource Alloction in Resource Director Technology(RDT) 4 * 5 * Copyright (C) 2016 Intel Corporation 6 * 7 * Author: Fenghua Yu <fenghua.yu@intel.com> 8 * 9 * More information about RDT be found in the Intel (R) x86 Architecture 10 * Software Developer Manual. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/cacheinfo.h> 16 #include <linux/cpu.h> 17 #include <linux/debugfs.h> 18 #include <linux/fs.h> 19 #include <linux/fs_parser.h> 20 #include <linux/sysfs.h> 21 #include <linux/kernfs.h> 22 #include <linux/seq_buf.h> 23 #include <linux/seq_file.h> 24 #include <linux/sched/signal.h> 25 #include <linux/sched/task.h> 26 #include <linux/slab.h> 27 #include <linux/task_work.h> 28 #include <linux/user_namespace.h> 29 30 #include <uapi/linux/magic.h> 31 32 #include <asm/resctrl_sched.h> 33 #include "internal.h" 34 35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key); 36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); 37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); 38 static struct kernfs_root *rdt_root; 39 struct rdtgroup rdtgroup_default; 40 LIST_HEAD(rdt_all_groups); 41 42 /* Kernel fs node for "info" directory under root */ 43 static struct kernfs_node *kn_info; 44 45 /* Kernel fs node for "mon_groups" directory under root */ 46 static struct kernfs_node *kn_mongrp; 47 48 /* Kernel fs node for "mon_data" directory under root */ 49 static struct kernfs_node *kn_mondata; 50 51 static struct seq_buf last_cmd_status; 52 static char last_cmd_status_buf[512]; 53 54 struct dentry *debugfs_resctrl; 55 56 void rdt_last_cmd_clear(void) 57 { 58 lockdep_assert_held(&rdtgroup_mutex); 59 seq_buf_clear(&last_cmd_status); 60 } 61 62 void rdt_last_cmd_puts(const char *s) 63 { 64 lockdep_assert_held(&rdtgroup_mutex); 65 seq_buf_puts(&last_cmd_status, s); 66 } 67 68 void rdt_last_cmd_printf(const char *fmt, ...) 69 { 70 va_list ap; 71 72 va_start(ap, fmt); 73 lockdep_assert_held(&rdtgroup_mutex); 74 seq_buf_vprintf(&last_cmd_status, fmt, ap); 75 va_end(ap); 76 } 77 78 /* 79 * Trivial allocator for CLOSIDs. Since h/w only supports a small number, 80 * we can keep a bitmap of free CLOSIDs in a single integer. 81 * 82 * Using a global CLOSID across all resources has some advantages and 83 * some drawbacks: 84 * + We can simply set "current->closid" to assign a task to a resource 85 * group. 86 * + Context switch code can avoid extra memory references deciding which 87 * CLOSID to load into the PQR_ASSOC MSR 88 * - We give up some options in configuring resource groups across multi-socket 89 * systems. 90 * - Our choices on how to configure each resource become progressively more 91 * limited as the number of resources grows. 92 */ 93 static int closid_free_map; 94 static int closid_free_map_len; 95 96 int closids_supported(void) 97 { 98 return closid_free_map_len; 99 } 100 101 static void closid_init(void) 102 { 103 struct rdt_resource *r; 104 int rdt_min_closid = 32; 105 106 /* Compute rdt_min_closid across all resources */ 107 for_each_alloc_enabled_rdt_resource(r) 108 rdt_min_closid = min(rdt_min_closid, r->num_closid); 109 110 closid_free_map = BIT_MASK(rdt_min_closid) - 1; 111 112 /* CLOSID 0 is always reserved for the default group */ 113 closid_free_map &= ~1; 114 closid_free_map_len = rdt_min_closid; 115 } 116 117 static int closid_alloc(void) 118 { 119 u32 closid = ffs(closid_free_map); 120 121 if (closid == 0) 122 return -ENOSPC; 123 closid--; 124 closid_free_map &= ~(1 << closid); 125 126 return closid; 127 } 128 129 void closid_free(int closid) 130 { 131 closid_free_map |= 1 << closid; 132 } 133 134 /** 135 * closid_allocated - test if provided closid is in use 136 * @closid: closid to be tested 137 * 138 * Return: true if @closid is currently associated with a resource group, 139 * false if @closid is free 140 */ 141 static bool closid_allocated(unsigned int closid) 142 { 143 return (closid_free_map & (1 << closid)) == 0; 144 } 145 146 /** 147 * rdtgroup_mode_by_closid - Return mode of resource group with closid 148 * @closid: closid if the resource group 149 * 150 * Each resource group is associated with a @closid. Here the mode 151 * of a resource group can be queried by searching for it using its closid. 152 * 153 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid 154 */ 155 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) 156 { 157 struct rdtgroup *rdtgrp; 158 159 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 160 if (rdtgrp->closid == closid) 161 return rdtgrp->mode; 162 } 163 164 return RDT_NUM_MODES; 165 } 166 167 static const char * const rdt_mode_str[] = { 168 [RDT_MODE_SHAREABLE] = "shareable", 169 [RDT_MODE_EXCLUSIVE] = "exclusive", 170 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", 171 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", 172 }; 173 174 /** 175 * rdtgroup_mode_str - Return the string representation of mode 176 * @mode: the resource group mode as &enum rdtgroup_mode 177 * 178 * Return: string representation of valid mode, "unknown" otherwise 179 */ 180 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) 181 { 182 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) 183 return "unknown"; 184 185 return rdt_mode_str[mode]; 186 } 187 188 /* set uid and gid of rdtgroup dirs and files to that of the creator */ 189 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) 190 { 191 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, 192 .ia_uid = current_fsuid(), 193 .ia_gid = current_fsgid(), }; 194 195 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && 196 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) 197 return 0; 198 199 return kernfs_setattr(kn, &iattr); 200 } 201 202 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) 203 { 204 struct kernfs_node *kn; 205 int ret; 206 207 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, 208 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 209 0, rft->kf_ops, rft, NULL, NULL); 210 if (IS_ERR(kn)) 211 return PTR_ERR(kn); 212 213 ret = rdtgroup_kn_set_ugid(kn); 214 if (ret) { 215 kernfs_remove(kn); 216 return ret; 217 } 218 219 return 0; 220 } 221 222 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) 223 { 224 struct kernfs_open_file *of = m->private; 225 struct rftype *rft = of->kn->priv; 226 227 if (rft->seq_show) 228 return rft->seq_show(of, m, arg); 229 return 0; 230 } 231 232 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, 233 size_t nbytes, loff_t off) 234 { 235 struct rftype *rft = of->kn->priv; 236 237 if (rft->write) 238 return rft->write(of, buf, nbytes, off); 239 240 return -EINVAL; 241 } 242 243 static struct kernfs_ops rdtgroup_kf_single_ops = { 244 .atomic_write_len = PAGE_SIZE, 245 .write = rdtgroup_file_write, 246 .seq_show = rdtgroup_seqfile_show, 247 }; 248 249 static struct kernfs_ops kf_mondata_ops = { 250 .atomic_write_len = PAGE_SIZE, 251 .seq_show = rdtgroup_mondata_show, 252 }; 253 254 static bool is_cpu_list(struct kernfs_open_file *of) 255 { 256 struct rftype *rft = of->kn->priv; 257 258 return rft->flags & RFTYPE_FLAGS_CPUS_LIST; 259 } 260 261 static int rdtgroup_cpus_show(struct kernfs_open_file *of, 262 struct seq_file *s, void *v) 263 { 264 struct rdtgroup *rdtgrp; 265 struct cpumask *mask; 266 int ret = 0; 267 268 rdtgrp = rdtgroup_kn_lock_live(of->kn); 269 270 if (rdtgrp) { 271 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 272 if (!rdtgrp->plr->d) { 273 rdt_last_cmd_clear(); 274 rdt_last_cmd_puts("Cache domain offline\n"); 275 ret = -ENODEV; 276 } else { 277 mask = &rdtgrp->plr->d->cpu_mask; 278 seq_printf(s, is_cpu_list(of) ? 279 "%*pbl\n" : "%*pb\n", 280 cpumask_pr_args(mask)); 281 } 282 } else { 283 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", 284 cpumask_pr_args(&rdtgrp->cpu_mask)); 285 } 286 } else { 287 ret = -ENOENT; 288 } 289 rdtgroup_kn_unlock(of->kn); 290 291 return ret; 292 } 293 294 /* 295 * This is safe against resctrl_sched_in() called from __switch_to() 296 * because __switch_to() is executed with interrupts disabled. A local call 297 * from update_closid_rmid() is proteced against __switch_to() because 298 * preemption is disabled. 299 */ 300 static void update_cpu_closid_rmid(void *info) 301 { 302 struct rdtgroup *r = info; 303 304 if (r) { 305 this_cpu_write(pqr_state.default_closid, r->closid); 306 this_cpu_write(pqr_state.default_rmid, r->mon.rmid); 307 } 308 309 /* 310 * We cannot unconditionally write the MSR because the current 311 * executing task might have its own closid selected. Just reuse 312 * the context switch code. 313 */ 314 resctrl_sched_in(); 315 } 316 317 /* 318 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, 319 * 320 * Per task closids/rmids must have been set up before calling this function. 321 */ 322 static void 323 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) 324 { 325 int cpu = get_cpu(); 326 327 if (cpumask_test_cpu(cpu, cpu_mask)) 328 update_cpu_closid_rmid(r); 329 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1); 330 put_cpu(); 331 } 332 333 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 334 cpumask_var_t tmpmask) 335 { 336 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; 337 struct list_head *head; 338 339 /* Check whether cpus belong to parent ctrl group */ 340 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); 341 if (cpumask_weight(tmpmask)) { 342 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); 343 return -EINVAL; 344 } 345 346 /* Check whether cpus are dropped from this group */ 347 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 348 if (cpumask_weight(tmpmask)) { 349 /* Give any dropped cpus to parent rdtgroup */ 350 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); 351 update_closid_rmid(tmpmask, prgrp); 352 } 353 354 /* 355 * If we added cpus, remove them from previous group that owned them 356 * and update per-cpu rmid 357 */ 358 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 359 if (cpumask_weight(tmpmask)) { 360 head = &prgrp->mon.crdtgrp_list; 361 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 362 if (crgrp == rdtgrp) 363 continue; 364 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, 365 tmpmask); 366 } 367 update_closid_rmid(tmpmask, rdtgrp); 368 } 369 370 /* Done pushing/pulling - update this group with new mask */ 371 cpumask_copy(&rdtgrp->cpu_mask, newmask); 372 373 return 0; 374 } 375 376 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) 377 { 378 struct rdtgroup *crgrp; 379 380 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); 381 /* update the child mon group masks as well*/ 382 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) 383 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); 384 } 385 386 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, 387 cpumask_var_t tmpmask, cpumask_var_t tmpmask1) 388 { 389 struct rdtgroup *r, *crgrp; 390 struct list_head *head; 391 392 /* Check whether cpus are dropped from this group */ 393 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); 394 if (cpumask_weight(tmpmask)) { 395 /* Can't drop from default group */ 396 if (rdtgrp == &rdtgroup_default) { 397 rdt_last_cmd_puts("Can't drop CPUs from default group\n"); 398 return -EINVAL; 399 } 400 401 /* Give any dropped cpus to rdtgroup_default */ 402 cpumask_or(&rdtgroup_default.cpu_mask, 403 &rdtgroup_default.cpu_mask, tmpmask); 404 update_closid_rmid(tmpmask, &rdtgroup_default); 405 } 406 407 /* 408 * If we added cpus, remove them from previous group and 409 * the prev group's child groups that owned them 410 * and update per-cpu closid/rmid. 411 */ 412 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); 413 if (cpumask_weight(tmpmask)) { 414 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { 415 if (r == rdtgrp) 416 continue; 417 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); 418 if (cpumask_weight(tmpmask1)) 419 cpumask_rdtgrp_clear(r, tmpmask1); 420 } 421 update_closid_rmid(tmpmask, rdtgrp); 422 } 423 424 /* Done pushing/pulling - update this group with new mask */ 425 cpumask_copy(&rdtgrp->cpu_mask, newmask); 426 427 /* 428 * Clear child mon group masks since there is a new parent mask 429 * now and update the rmid for the cpus the child lost. 430 */ 431 head = &rdtgrp->mon.crdtgrp_list; 432 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 433 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); 434 update_closid_rmid(tmpmask, rdtgrp); 435 cpumask_clear(&crgrp->cpu_mask); 436 } 437 438 return 0; 439 } 440 441 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, 442 char *buf, size_t nbytes, loff_t off) 443 { 444 cpumask_var_t tmpmask, newmask, tmpmask1; 445 struct rdtgroup *rdtgrp; 446 int ret; 447 448 if (!buf) 449 return -EINVAL; 450 451 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 452 return -ENOMEM; 453 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { 454 free_cpumask_var(tmpmask); 455 return -ENOMEM; 456 } 457 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { 458 free_cpumask_var(tmpmask); 459 free_cpumask_var(newmask); 460 return -ENOMEM; 461 } 462 463 rdtgrp = rdtgroup_kn_lock_live(of->kn); 464 if (!rdtgrp) { 465 ret = -ENOENT; 466 goto unlock; 467 } 468 469 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 470 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 471 ret = -EINVAL; 472 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 473 goto unlock; 474 } 475 476 if (is_cpu_list(of)) 477 ret = cpulist_parse(buf, newmask); 478 else 479 ret = cpumask_parse(buf, newmask); 480 481 if (ret) { 482 rdt_last_cmd_puts("Bad CPU list/mask\n"); 483 goto unlock; 484 } 485 486 /* check that user didn't specify any offline cpus */ 487 cpumask_andnot(tmpmask, newmask, cpu_online_mask); 488 if (cpumask_weight(tmpmask)) { 489 ret = -EINVAL; 490 rdt_last_cmd_puts("Can only assign online CPUs\n"); 491 goto unlock; 492 } 493 494 if (rdtgrp->type == RDTCTRL_GROUP) 495 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); 496 else if (rdtgrp->type == RDTMON_GROUP) 497 ret = cpus_mon_write(rdtgrp, newmask, tmpmask); 498 else 499 ret = -EINVAL; 500 501 unlock: 502 rdtgroup_kn_unlock(of->kn); 503 free_cpumask_var(tmpmask); 504 free_cpumask_var(newmask); 505 free_cpumask_var(tmpmask1); 506 507 return ret ?: nbytes; 508 } 509 510 struct task_move_callback { 511 struct callback_head work; 512 struct rdtgroup *rdtgrp; 513 }; 514 515 static void move_myself(struct callback_head *head) 516 { 517 struct task_move_callback *callback; 518 struct rdtgroup *rdtgrp; 519 520 callback = container_of(head, struct task_move_callback, work); 521 rdtgrp = callback->rdtgrp; 522 523 /* 524 * If resource group was deleted before this task work callback 525 * was invoked, then assign the task to root group and free the 526 * resource group. 527 */ 528 if (atomic_dec_and_test(&rdtgrp->waitcount) && 529 (rdtgrp->flags & RDT_DELETED)) { 530 current->closid = 0; 531 current->rmid = 0; 532 kfree(rdtgrp); 533 } 534 535 if (unlikely(current->flags & PF_EXITING)) 536 goto out; 537 538 preempt_disable(); 539 /* update PQR_ASSOC MSR to make resource group go into effect */ 540 resctrl_sched_in(); 541 preempt_enable(); 542 543 out: 544 kfree(callback); 545 } 546 547 static int __rdtgroup_move_task(struct task_struct *tsk, 548 struct rdtgroup *rdtgrp) 549 { 550 struct task_move_callback *callback; 551 int ret; 552 553 callback = kzalloc(sizeof(*callback), GFP_KERNEL); 554 if (!callback) 555 return -ENOMEM; 556 callback->work.func = move_myself; 557 callback->rdtgrp = rdtgrp; 558 559 /* 560 * Take a refcount, so rdtgrp cannot be freed before the 561 * callback has been invoked. 562 */ 563 atomic_inc(&rdtgrp->waitcount); 564 ret = task_work_add(tsk, &callback->work, true); 565 if (ret) { 566 /* 567 * Task is exiting. Drop the refcount and free the callback. 568 * No need to check the refcount as the group cannot be 569 * deleted before the write function unlocks rdtgroup_mutex. 570 */ 571 atomic_dec(&rdtgrp->waitcount); 572 kfree(callback); 573 rdt_last_cmd_puts("Task exited\n"); 574 } else { 575 /* 576 * For ctrl_mon groups move both closid and rmid. 577 * For monitor groups, can move the tasks only from 578 * their parent CTRL group. 579 */ 580 if (rdtgrp->type == RDTCTRL_GROUP) { 581 tsk->closid = rdtgrp->closid; 582 tsk->rmid = rdtgrp->mon.rmid; 583 } else if (rdtgrp->type == RDTMON_GROUP) { 584 if (rdtgrp->mon.parent->closid == tsk->closid) { 585 tsk->rmid = rdtgrp->mon.rmid; 586 } else { 587 rdt_last_cmd_puts("Can't move task to different control group\n"); 588 ret = -EINVAL; 589 } 590 } 591 } 592 return ret; 593 } 594 595 /** 596 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group 597 * @r: Resource group 598 * 599 * Return: 1 if tasks have been assigned to @r, 0 otherwise 600 */ 601 int rdtgroup_tasks_assigned(struct rdtgroup *r) 602 { 603 struct task_struct *p, *t; 604 int ret = 0; 605 606 lockdep_assert_held(&rdtgroup_mutex); 607 608 rcu_read_lock(); 609 for_each_process_thread(p, t) { 610 if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) || 611 (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) { 612 ret = 1; 613 break; 614 } 615 } 616 rcu_read_unlock(); 617 618 return ret; 619 } 620 621 static int rdtgroup_task_write_permission(struct task_struct *task, 622 struct kernfs_open_file *of) 623 { 624 const struct cred *tcred = get_task_cred(task); 625 const struct cred *cred = current_cred(); 626 int ret = 0; 627 628 /* 629 * Even if we're attaching all tasks in the thread group, we only 630 * need to check permissions on one of them. 631 */ 632 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && 633 !uid_eq(cred->euid, tcred->uid) && 634 !uid_eq(cred->euid, tcred->suid)) { 635 rdt_last_cmd_printf("No permission to move task %d\n", task->pid); 636 ret = -EPERM; 637 } 638 639 put_cred(tcred); 640 return ret; 641 } 642 643 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, 644 struct kernfs_open_file *of) 645 { 646 struct task_struct *tsk; 647 int ret; 648 649 rcu_read_lock(); 650 if (pid) { 651 tsk = find_task_by_vpid(pid); 652 if (!tsk) { 653 rcu_read_unlock(); 654 rdt_last_cmd_printf("No task %d\n", pid); 655 return -ESRCH; 656 } 657 } else { 658 tsk = current; 659 } 660 661 get_task_struct(tsk); 662 rcu_read_unlock(); 663 664 ret = rdtgroup_task_write_permission(tsk, of); 665 if (!ret) 666 ret = __rdtgroup_move_task(tsk, rdtgrp); 667 668 put_task_struct(tsk); 669 return ret; 670 } 671 672 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, 673 char *buf, size_t nbytes, loff_t off) 674 { 675 struct rdtgroup *rdtgrp; 676 int ret = 0; 677 pid_t pid; 678 679 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) 680 return -EINVAL; 681 rdtgrp = rdtgroup_kn_lock_live(of->kn); 682 if (!rdtgrp) { 683 rdtgroup_kn_unlock(of->kn); 684 return -ENOENT; 685 } 686 rdt_last_cmd_clear(); 687 688 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || 689 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 690 ret = -EINVAL; 691 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 692 goto unlock; 693 } 694 695 ret = rdtgroup_move_task(pid, rdtgrp, of); 696 697 unlock: 698 rdtgroup_kn_unlock(of->kn); 699 700 return ret ?: nbytes; 701 } 702 703 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) 704 { 705 struct task_struct *p, *t; 706 707 rcu_read_lock(); 708 for_each_process_thread(p, t) { 709 if ((r->type == RDTCTRL_GROUP && t->closid == r->closid) || 710 (r->type == RDTMON_GROUP && t->rmid == r->mon.rmid)) 711 seq_printf(s, "%d\n", t->pid); 712 } 713 rcu_read_unlock(); 714 } 715 716 static int rdtgroup_tasks_show(struct kernfs_open_file *of, 717 struct seq_file *s, void *v) 718 { 719 struct rdtgroup *rdtgrp; 720 int ret = 0; 721 722 rdtgrp = rdtgroup_kn_lock_live(of->kn); 723 if (rdtgrp) 724 show_rdt_tasks(rdtgrp, s); 725 else 726 ret = -ENOENT; 727 rdtgroup_kn_unlock(of->kn); 728 729 return ret; 730 } 731 732 static int rdt_last_cmd_status_show(struct kernfs_open_file *of, 733 struct seq_file *seq, void *v) 734 { 735 int len; 736 737 mutex_lock(&rdtgroup_mutex); 738 len = seq_buf_used(&last_cmd_status); 739 if (len) 740 seq_printf(seq, "%.*s", len, last_cmd_status_buf); 741 else 742 seq_puts(seq, "ok\n"); 743 mutex_unlock(&rdtgroup_mutex); 744 return 0; 745 } 746 747 static int rdt_num_closids_show(struct kernfs_open_file *of, 748 struct seq_file *seq, void *v) 749 { 750 struct rdt_resource *r = of->kn->parent->priv; 751 752 seq_printf(seq, "%d\n", r->num_closid); 753 return 0; 754 } 755 756 static int rdt_default_ctrl_show(struct kernfs_open_file *of, 757 struct seq_file *seq, void *v) 758 { 759 struct rdt_resource *r = of->kn->parent->priv; 760 761 seq_printf(seq, "%x\n", r->default_ctrl); 762 return 0; 763 } 764 765 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, 766 struct seq_file *seq, void *v) 767 { 768 struct rdt_resource *r = of->kn->parent->priv; 769 770 seq_printf(seq, "%u\n", r->cache.min_cbm_bits); 771 return 0; 772 } 773 774 static int rdt_shareable_bits_show(struct kernfs_open_file *of, 775 struct seq_file *seq, void *v) 776 { 777 struct rdt_resource *r = of->kn->parent->priv; 778 779 seq_printf(seq, "%x\n", r->cache.shareable_bits); 780 return 0; 781 } 782 783 /** 784 * rdt_bit_usage_show - Display current usage of resources 785 * 786 * A domain is a shared resource that can now be allocated differently. Here 787 * we display the current regions of the domain as an annotated bitmask. 788 * For each domain of this resource its allocation bitmask 789 * is annotated as below to indicate the current usage of the corresponding bit: 790 * 0 - currently unused 791 * X - currently available for sharing and used by software and hardware 792 * H - currently used by hardware only but available for software use 793 * S - currently used and shareable by software only 794 * E - currently used exclusively by one resource group 795 * P - currently pseudo-locked by one resource group 796 */ 797 static int rdt_bit_usage_show(struct kernfs_open_file *of, 798 struct seq_file *seq, void *v) 799 { 800 struct rdt_resource *r = of->kn->parent->priv; 801 /* 802 * Use unsigned long even though only 32 bits are used to ensure 803 * test_bit() is used safely. 804 */ 805 unsigned long sw_shareable = 0, hw_shareable = 0; 806 unsigned long exclusive = 0, pseudo_locked = 0; 807 struct rdt_domain *dom; 808 int i, hwb, swb, excl, psl; 809 enum rdtgrp_mode mode; 810 bool sep = false; 811 u32 *ctrl; 812 813 mutex_lock(&rdtgroup_mutex); 814 hw_shareable = r->cache.shareable_bits; 815 list_for_each_entry(dom, &r->domains, list) { 816 if (sep) 817 seq_putc(seq, ';'); 818 ctrl = dom->ctrl_val; 819 sw_shareable = 0; 820 exclusive = 0; 821 seq_printf(seq, "%d=", dom->id); 822 for (i = 0; i < closids_supported(); i++, ctrl++) { 823 if (!closid_allocated(i)) 824 continue; 825 mode = rdtgroup_mode_by_closid(i); 826 switch (mode) { 827 case RDT_MODE_SHAREABLE: 828 sw_shareable |= *ctrl; 829 break; 830 case RDT_MODE_EXCLUSIVE: 831 exclusive |= *ctrl; 832 break; 833 case RDT_MODE_PSEUDO_LOCKSETUP: 834 /* 835 * RDT_MODE_PSEUDO_LOCKSETUP is possible 836 * here but not included since the CBM 837 * associated with this CLOSID in this mode 838 * is not initialized and no task or cpu can be 839 * assigned this CLOSID. 840 */ 841 break; 842 case RDT_MODE_PSEUDO_LOCKED: 843 case RDT_NUM_MODES: 844 WARN(1, 845 "invalid mode for closid %d\n", i); 846 break; 847 } 848 } 849 for (i = r->cache.cbm_len - 1; i >= 0; i--) { 850 pseudo_locked = dom->plr ? dom->plr->cbm : 0; 851 hwb = test_bit(i, &hw_shareable); 852 swb = test_bit(i, &sw_shareable); 853 excl = test_bit(i, &exclusive); 854 psl = test_bit(i, &pseudo_locked); 855 if (hwb && swb) 856 seq_putc(seq, 'X'); 857 else if (hwb && !swb) 858 seq_putc(seq, 'H'); 859 else if (!hwb && swb) 860 seq_putc(seq, 'S'); 861 else if (excl) 862 seq_putc(seq, 'E'); 863 else if (psl) 864 seq_putc(seq, 'P'); 865 else /* Unused bits remain */ 866 seq_putc(seq, '0'); 867 } 868 sep = true; 869 } 870 seq_putc(seq, '\n'); 871 mutex_unlock(&rdtgroup_mutex); 872 return 0; 873 } 874 875 static int rdt_min_bw_show(struct kernfs_open_file *of, 876 struct seq_file *seq, void *v) 877 { 878 struct rdt_resource *r = of->kn->parent->priv; 879 880 seq_printf(seq, "%u\n", r->membw.min_bw); 881 return 0; 882 } 883 884 static int rdt_num_rmids_show(struct kernfs_open_file *of, 885 struct seq_file *seq, void *v) 886 { 887 struct rdt_resource *r = of->kn->parent->priv; 888 889 seq_printf(seq, "%d\n", r->num_rmid); 890 891 return 0; 892 } 893 894 static int rdt_mon_features_show(struct kernfs_open_file *of, 895 struct seq_file *seq, void *v) 896 { 897 struct rdt_resource *r = of->kn->parent->priv; 898 struct mon_evt *mevt; 899 900 list_for_each_entry(mevt, &r->evt_list, list) 901 seq_printf(seq, "%s\n", mevt->name); 902 903 return 0; 904 } 905 906 static int rdt_bw_gran_show(struct kernfs_open_file *of, 907 struct seq_file *seq, void *v) 908 { 909 struct rdt_resource *r = of->kn->parent->priv; 910 911 seq_printf(seq, "%u\n", r->membw.bw_gran); 912 return 0; 913 } 914 915 static int rdt_delay_linear_show(struct kernfs_open_file *of, 916 struct seq_file *seq, void *v) 917 { 918 struct rdt_resource *r = of->kn->parent->priv; 919 920 seq_printf(seq, "%u\n", r->membw.delay_linear); 921 return 0; 922 } 923 924 static int max_threshold_occ_show(struct kernfs_open_file *of, 925 struct seq_file *seq, void *v) 926 { 927 struct rdt_resource *r = of->kn->parent->priv; 928 929 seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale); 930 931 return 0; 932 } 933 934 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, 935 char *buf, size_t nbytes, loff_t off) 936 { 937 struct rdt_resource *r = of->kn->parent->priv; 938 unsigned int bytes; 939 int ret; 940 941 ret = kstrtouint(buf, 0, &bytes); 942 if (ret) 943 return ret; 944 945 if (bytes > (boot_cpu_data.x86_cache_size * 1024)) 946 return -EINVAL; 947 948 resctrl_cqm_threshold = bytes / r->mon_scale; 949 950 return nbytes; 951 } 952 953 /* 954 * rdtgroup_mode_show - Display mode of this resource group 955 */ 956 static int rdtgroup_mode_show(struct kernfs_open_file *of, 957 struct seq_file *s, void *v) 958 { 959 struct rdtgroup *rdtgrp; 960 961 rdtgrp = rdtgroup_kn_lock_live(of->kn); 962 if (!rdtgrp) { 963 rdtgroup_kn_unlock(of->kn); 964 return -ENOENT; 965 } 966 967 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); 968 969 rdtgroup_kn_unlock(of->kn); 970 return 0; 971 } 972 973 /** 974 * rdt_cdp_peer_get - Retrieve CDP peer if it exists 975 * @r: RDT resource to which RDT domain @d belongs 976 * @d: Cache instance for which a CDP peer is requested 977 * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer) 978 * Used to return the result. 979 * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer) 980 * Used to return the result. 981 * 982 * RDT resources are managed independently and by extension the RDT domains 983 * (RDT resource instances) are managed independently also. The Code and 984 * Data Prioritization (CDP) RDT resources, while managed independently, 985 * could refer to the same underlying hardware. For example, 986 * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache. 987 * 988 * When provided with an RDT resource @r and an instance of that RDT 989 * resource @d rdt_cdp_peer_get() will return if there is a peer RDT 990 * resource and the exact instance that shares the same hardware. 991 * 992 * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists. 993 * If a CDP peer was found, @r_cdp will point to the peer RDT resource 994 * and @d_cdp will point to the peer RDT domain. 995 */ 996 static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d, 997 struct rdt_resource **r_cdp, 998 struct rdt_domain **d_cdp) 999 { 1000 struct rdt_resource *_r_cdp = NULL; 1001 struct rdt_domain *_d_cdp = NULL; 1002 int ret = 0; 1003 1004 switch (r->rid) { 1005 case RDT_RESOURCE_L3DATA: 1006 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE]; 1007 break; 1008 case RDT_RESOURCE_L3CODE: 1009 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA]; 1010 break; 1011 case RDT_RESOURCE_L2DATA: 1012 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE]; 1013 break; 1014 case RDT_RESOURCE_L2CODE: 1015 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA]; 1016 break; 1017 default: 1018 ret = -ENOENT; 1019 goto out; 1020 } 1021 1022 /* 1023 * When a new CPU comes online and CDP is enabled then the new 1024 * RDT domains (if any) associated with both CDP RDT resources 1025 * are added in the same CPU online routine while the 1026 * rdtgroup_mutex is held. It should thus not happen for one 1027 * RDT domain to exist and be associated with its RDT CDP 1028 * resource but there is no RDT domain associated with the 1029 * peer RDT CDP resource. Hence the WARN. 1030 */ 1031 _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); 1032 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { 1033 _r_cdp = NULL; 1034 ret = -EINVAL; 1035 } 1036 1037 out: 1038 *r_cdp = _r_cdp; 1039 *d_cdp = _d_cdp; 1040 1041 return ret; 1042 } 1043 1044 /** 1045 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other 1046 * @r: Resource to which domain instance @d belongs. 1047 * @d: The domain instance for which @closid is being tested. 1048 * @cbm: Capacity bitmask being tested. 1049 * @closid: Intended closid for @cbm. 1050 * @exclusive: Only check if overlaps with exclusive resource groups 1051 * 1052 * Checks if provided @cbm intended to be used for @closid on domain 1053 * @d overlaps with any other closids or other hardware usage associated 1054 * with this domain. If @exclusive is true then only overlaps with 1055 * resource groups in exclusive mode will be considered. If @exclusive 1056 * is false then overlaps with any resource group or hardware entities 1057 * will be considered. 1058 * 1059 * @cbm is unsigned long, even if only 32 bits are used, to make the 1060 * bitmap functions work correctly. 1061 * 1062 * Return: false if CBM does not overlap, true if it does. 1063 */ 1064 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 1065 unsigned long cbm, int closid, bool exclusive) 1066 { 1067 enum rdtgrp_mode mode; 1068 unsigned long ctrl_b; 1069 u32 *ctrl; 1070 int i; 1071 1072 /* Check for any overlap with regions used by hardware directly */ 1073 if (!exclusive) { 1074 ctrl_b = r->cache.shareable_bits; 1075 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) 1076 return true; 1077 } 1078 1079 /* Check for overlap with other resource groups */ 1080 ctrl = d->ctrl_val; 1081 for (i = 0; i < closids_supported(); i++, ctrl++) { 1082 ctrl_b = *ctrl; 1083 mode = rdtgroup_mode_by_closid(i); 1084 if (closid_allocated(i) && i != closid && 1085 mode != RDT_MODE_PSEUDO_LOCKSETUP) { 1086 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { 1087 if (exclusive) { 1088 if (mode == RDT_MODE_EXCLUSIVE) 1089 return true; 1090 continue; 1091 } 1092 return true; 1093 } 1094 } 1095 } 1096 1097 return false; 1098 } 1099 1100 /** 1101 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware 1102 * @r: Resource to which domain instance @d belongs. 1103 * @d: The domain instance for which @closid is being tested. 1104 * @cbm: Capacity bitmask being tested. 1105 * @closid: Intended closid for @cbm. 1106 * @exclusive: Only check if overlaps with exclusive resource groups 1107 * 1108 * Resources that can be allocated using a CBM can use the CBM to control 1109 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test 1110 * for overlap. Overlap test is not limited to the specific resource for 1111 * which the CBM is intended though - when dealing with CDP resources that 1112 * share the underlying hardware the overlap check should be performed on 1113 * the CDP resource sharing the hardware also. 1114 * 1115 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the 1116 * overlap test. 1117 * 1118 * Return: true if CBM overlap detected, false if there is no overlap 1119 */ 1120 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, 1121 unsigned long cbm, int closid, bool exclusive) 1122 { 1123 struct rdt_resource *r_cdp; 1124 struct rdt_domain *d_cdp; 1125 1126 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive)) 1127 return true; 1128 1129 if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0) 1130 return false; 1131 1132 return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive); 1133 } 1134 1135 /** 1136 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive 1137 * 1138 * An exclusive resource group implies that there should be no sharing of 1139 * its allocated resources. At the time this group is considered to be 1140 * exclusive this test can determine if its current schemata supports this 1141 * setting by testing for overlap with all other resource groups. 1142 * 1143 * Return: true if resource group can be exclusive, false if there is overlap 1144 * with allocations of other resource groups and thus this resource group 1145 * cannot be exclusive. 1146 */ 1147 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) 1148 { 1149 int closid = rdtgrp->closid; 1150 struct rdt_resource *r; 1151 bool has_cache = false; 1152 struct rdt_domain *d; 1153 1154 for_each_alloc_enabled_rdt_resource(r) { 1155 if (r->rid == RDT_RESOURCE_MBA) 1156 continue; 1157 has_cache = true; 1158 list_for_each_entry(d, &r->domains, list) { 1159 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid], 1160 rdtgrp->closid, false)) { 1161 rdt_last_cmd_puts("Schemata overlaps\n"); 1162 return false; 1163 } 1164 } 1165 } 1166 1167 if (!has_cache) { 1168 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); 1169 return false; 1170 } 1171 1172 return true; 1173 } 1174 1175 /** 1176 * rdtgroup_mode_write - Modify the resource group's mode 1177 * 1178 */ 1179 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, 1180 char *buf, size_t nbytes, loff_t off) 1181 { 1182 struct rdtgroup *rdtgrp; 1183 enum rdtgrp_mode mode; 1184 int ret = 0; 1185 1186 /* Valid input requires a trailing newline */ 1187 if (nbytes == 0 || buf[nbytes - 1] != '\n') 1188 return -EINVAL; 1189 buf[nbytes - 1] = '\0'; 1190 1191 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1192 if (!rdtgrp) { 1193 rdtgroup_kn_unlock(of->kn); 1194 return -ENOENT; 1195 } 1196 1197 rdt_last_cmd_clear(); 1198 1199 mode = rdtgrp->mode; 1200 1201 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || 1202 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || 1203 (!strcmp(buf, "pseudo-locksetup") && 1204 mode == RDT_MODE_PSEUDO_LOCKSETUP) || 1205 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) 1206 goto out; 1207 1208 if (mode == RDT_MODE_PSEUDO_LOCKED) { 1209 rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); 1210 ret = -EINVAL; 1211 goto out; 1212 } 1213 1214 if (!strcmp(buf, "shareable")) { 1215 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1216 ret = rdtgroup_locksetup_exit(rdtgrp); 1217 if (ret) 1218 goto out; 1219 } 1220 rdtgrp->mode = RDT_MODE_SHAREABLE; 1221 } else if (!strcmp(buf, "exclusive")) { 1222 if (!rdtgroup_mode_test_exclusive(rdtgrp)) { 1223 ret = -EINVAL; 1224 goto out; 1225 } 1226 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1227 ret = rdtgroup_locksetup_exit(rdtgrp); 1228 if (ret) 1229 goto out; 1230 } 1231 rdtgrp->mode = RDT_MODE_EXCLUSIVE; 1232 } else if (!strcmp(buf, "pseudo-locksetup")) { 1233 ret = rdtgroup_locksetup_enter(rdtgrp); 1234 if (ret) 1235 goto out; 1236 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; 1237 } else { 1238 rdt_last_cmd_puts("Unknown or unsupported mode\n"); 1239 ret = -EINVAL; 1240 } 1241 1242 out: 1243 rdtgroup_kn_unlock(of->kn); 1244 return ret ?: nbytes; 1245 } 1246 1247 /** 1248 * rdtgroup_cbm_to_size - Translate CBM to size in bytes 1249 * @r: RDT resource to which @d belongs. 1250 * @d: RDT domain instance. 1251 * @cbm: bitmask for which the size should be computed. 1252 * 1253 * The bitmask provided associated with the RDT domain instance @d will be 1254 * translated into how many bytes it represents. The size in bytes is 1255 * computed by first dividing the total cache size by the CBM length to 1256 * determine how many bytes each bit in the bitmask represents. The result 1257 * is multiplied with the number of bits set in the bitmask. 1258 * 1259 * @cbm is unsigned long, even if only 32 bits are used to make the 1260 * bitmap functions work correctly. 1261 */ 1262 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, 1263 struct rdt_domain *d, unsigned long cbm) 1264 { 1265 struct cpu_cacheinfo *ci; 1266 unsigned int size = 0; 1267 int num_b, i; 1268 1269 num_b = bitmap_weight(&cbm, r->cache.cbm_len); 1270 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); 1271 for (i = 0; i < ci->num_leaves; i++) { 1272 if (ci->info_list[i].level == r->cache_level) { 1273 size = ci->info_list[i].size / r->cache.cbm_len * num_b; 1274 break; 1275 } 1276 } 1277 1278 return size; 1279 } 1280 1281 /** 1282 * rdtgroup_size_show - Display size in bytes of allocated regions 1283 * 1284 * The "size" file mirrors the layout of the "schemata" file, printing the 1285 * size in bytes of each region instead of the capacity bitmask. 1286 * 1287 */ 1288 static int rdtgroup_size_show(struct kernfs_open_file *of, 1289 struct seq_file *s, void *v) 1290 { 1291 struct rdtgroup *rdtgrp; 1292 struct rdt_resource *r; 1293 struct rdt_domain *d; 1294 unsigned int size; 1295 int ret = 0; 1296 bool sep; 1297 u32 ctrl; 1298 1299 rdtgrp = rdtgroup_kn_lock_live(of->kn); 1300 if (!rdtgrp) { 1301 rdtgroup_kn_unlock(of->kn); 1302 return -ENOENT; 1303 } 1304 1305 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 1306 if (!rdtgrp->plr->d) { 1307 rdt_last_cmd_clear(); 1308 rdt_last_cmd_puts("Cache domain offline\n"); 1309 ret = -ENODEV; 1310 } else { 1311 seq_printf(s, "%*s:", max_name_width, 1312 rdtgrp->plr->r->name); 1313 size = rdtgroup_cbm_to_size(rdtgrp->plr->r, 1314 rdtgrp->plr->d, 1315 rdtgrp->plr->cbm); 1316 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); 1317 } 1318 goto out; 1319 } 1320 1321 for_each_alloc_enabled_rdt_resource(r) { 1322 sep = false; 1323 seq_printf(s, "%*s:", max_name_width, r->name); 1324 list_for_each_entry(d, &r->domains, list) { 1325 if (sep) 1326 seq_putc(s, ';'); 1327 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { 1328 size = 0; 1329 } else { 1330 ctrl = (!is_mba_sc(r) ? 1331 d->ctrl_val[rdtgrp->closid] : 1332 d->mbps_val[rdtgrp->closid]); 1333 if (r->rid == RDT_RESOURCE_MBA) 1334 size = ctrl; 1335 else 1336 size = rdtgroup_cbm_to_size(r, d, ctrl); 1337 } 1338 seq_printf(s, "%d=%u", d->id, size); 1339 sep = true; 1340 } 1341 seq_putc(s, '\n'); 1342 } 1343 1344 out: 1345 rdtgroup_kn_unlock(of->kn); 1346 1347 return ret; 1348 } 1349 1350 /* rdtgroup information files for one cache resource. */ 1351 static struct rftype res_common_files[] = { 1352 { 1353 .name = "last_cmd_status", 1354 .mode = 0444, 1355 .kf_ops = &rdtgroup_kf_single_ops, 1356 .seq_show = rdt_last_cmd_status_show, 1357 .fflags = RF_TOP_INFO, 1358 }, 1359 { 1360 .name = "num_closids", 1361 .mode = 0444, 1362 .kf_ops = &rdtgroup_kf_single_ops, 1363 .seq_show = rdt_num_closids_show, 1364 .fflags = RF_CTRL_INFO, 1365 }, 1366 { 1367 .name = "mon_features", 1368 .mode = 0444, 1369 .kf_ops = &rdtgroup_kf_single_ops, 1370 .seq_show = rdt_mon_features_show, 1371 .fflags = RF_MON_INFO, 1372 }, 1373 { 1374 .name = "num_rmids", 1375 .mode = 0444, 1376 .kf_ops = &rdtgroup_kf_single_ops, 1377 .seq_show = rdt_num_rmids_show, 1378 .fflags = RF_MON_INFO, 1379 }, 1380 { 1381 .name = "cbm_mask", 1382 .mode = 0444, 1383 .kf_ops = &rdtgroup_kf_single_ops, 1384 .seq_show = rdt_default_ctrl_show, 1385 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1386 }, 1387 { 1388 .name = "min_cbm_bits", 1389 .mode = 0444, 1390 .kf_ops = &rdtgroup_kf_single_ops, 1391 .seq_show = rdt_min_cbm_bits_show, 1392 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1393 }, 1394 { 1395 .name = "shareable_bits", 1396 .mode = 0444, 1397 .kf_ops = &rdtgroup_kf_single_ops, 1398 .seq_show = rdt_shareable_bits_show, 1399 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1400 }, 1401 { 1402 .name = "bit_usage", 1403 .mode = 0444, 1404 .kf_ops = &rdtgroup_kf_single_ops, 1405 .seq_show = rdt_bit_usage_show, 1406 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, 1407 }, 1408 { 1409 .name = "min_bandwidth", 1410 .mode = 0444, 1411 .kf_ops = &rdtgroup_kf_single_ops, 1412 .seq_show = rdt_min_bw_show, 1413 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1414 }, 1415 { 1416 .name = "bandwidth_gran", 1417 .mode = 0444, 1418 .kf_ops = &rdtgroup_kf_single_ops, 1419 .seq_show = rdt_bw_gran_show, 1420 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1421 }, 1422 { 1423 .name = "delay_linear", 1424 .mode = 0444, 1425 .kf_ops = &rdtgroup_kf_single_ops, 1426 .seq_show = rdt_delay_linear_show, 1427 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, 1428 }, 1429 { 1430 .name = "max_threshold_occupancy", 1431 .mode = 0644, 1432 .kf_ops = &rdtgroup_kf_single_ops, 1433 .write = max_threshold_occ_write, 1434 .seq_show = max_threshold_occ_show, 1435 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, 1436 }, 1437 { 1438 .name = "cpus", 1439 .mode = 0644, 1440 .kf_ops = &rdtgroup_kf_single_ops, 1441 .write = rdtgroup_cpus_write, 1442 .seq_show = rdtgroup_cpus_show, 1443 .fflags = RFTYPE_BASE, 1444 }, 1445 { 1446 .name = "cpus_list", 1447 .mode = 0644, 1448 .kf_ops = &rdtgroup_kf_single_ops, 1449 .write = rdtgroup_cpus_write, 1450 .seq_show = rdtgroup_cpus_show, 1451 .flags = RFTYPE_FLAGS_CPUS_LIST, 1452 .fflags = RFTYPE_BASE, 1453 }, 1454 { 1455 .name = "tasks", 1456 .mode = 0644, 1457 .kf_ops = &rdtgroup_kf_single_ops, 1458 .write = rdtgroup_tasks_write, 1459 .seq_show = rdtgroup_tasks_show, 1460 .fflags = RFTYPE_BASE, 1461 }, 1462 { 1463 .name = "schemata", 1464 .mode = 0644, 1465 .kf_ops = &rdtgroup_kf_single_ops, 1466 .write = rdtgroup_schemata_write, 1467 .seq_show = rdtgroup_schemata_show, 1468 .fflags = RF_CTRL_BASE, 1469 }, 1470 { 1471 .name = "mode", 1472 .mode = 0644, 1473 .kf_ops = &rdtgroup_kf_single_ops, 1474 .write = rdtgroup_mode_write, 1475 .seq_show = rdtgroup_mode_show, 1476 .fflags = RF_CTRL_BASE, 1477 }, 1478 { 1479 .name = "size", 1480 .mode = 0444, 1481 .kf_ops = &rdtgroup_kf_single_ops, 1482 .seq_show = rdtgroup_size_show, 1483 .fflags = RF_CTRL_BASE, 1484 }, 1485 1486 }; 1487 1488 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) 1489 { 1490 struct rftype *rfts, *rft; 1491 int ret, len; 1492 1493 rfts = res_common_files; 1494 len = ARRAY_SIZE(res_common_files); 1495 1496 lockdep_assert_held(&rdtgroup_mutex); 1497 1498 for (rft = rfts; rft < rfts + len; rft++) { 1499 if ((fflags & rft->fflags) == rft->fflags) { 1500 ret = rdtgroup_add_file(kn, rft); 1501 if (ret) 1502 goto error; 1503 } 1504 } 1505 1506 return 0; 1507 error: 1508 pr_warn("Failed to add %s, err=%d\n", rft->name, ret); 1509 while (--rft >= rfts) { 1510 if ((fflags & rft->fflags) == rft->fflags) 1511 kernfs_remove_by_name(kn, rft->name); 1512 } 1513 return ret; 1514 } 1515 1516 /** 1517 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file 1518 * @r: The resource group with which the file is associated. 1519 * @name: Name of the file 1520 * 1521 * The permissions of named resctrl file, directory, or link are modified 1522 * to not allow read, write, or execute by any user. 1523 * 1524 * WARNING: This function is intended to communicate to the user that the 1525 * resctrl file has been locked down - that it is not relevant to the 1526 * particular state the system finds itself in. It should not be relied 1527 * on to protect from user access because after the file's permissions 1528 * are restricted the user can still change the permissions using chmod 1529 * from the command line. 1530 * 1531 * Return: 0 on success, <0 on failure. 1532 */ 1533 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) 1534 { 1535 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1536 struct kernfs_node *kn; 1537 int ret = 0; 1538 1539 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1540 if (!kn) 1541 return -ENOENT; 1542 1543 switch (kernfs_type(kn)) { 1544 case KERNFS_DIR: 1545 iattr.ia_mode = S_IFDIR; 1546 break; 1547 case KERNFS_FILE: 1548 iattr.ia_mode = S_IFREG; 1549 break; 1550 case KERNFS_LINK: 1551 iattr.ia_mode = S_IFLNK; 1552 break; 1553 } 1554 1555 ret = kernfs_setattr(kn, &iattr); 1556 kernfs_put(kn); 1557 return ret; 1558 } 1559 1560 /** 1561 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file 1562 * @r: The resource group with which the file is associated. 1563 * @name: Name of the file 1564 * @mask: Mask of permissions that should be restored 1565 * 1566 * Restore the permissions of the named file. If @name is a directory the 1567 * permissions of its parent will be used. 1568 * 1569 * Return: 0 on success, <0 on failure. 1570 */ 1571 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, 1572 umode_t mask) 1573 { 1574 struct iattr iattr = {.ia_valid = ATTR_MODE,}; 1575 struct kernfs_node *kn, *parent; 1576 struct rftype *rfts, *rft; 1577 int ret, len; 1578 1579 rfts = res_common_files; 1580 len = ARRAY_SIZE(res_common_files); 1581 1582 for (rft = rfts; rft < rfts + len; rft++) { 1583 if (!strcmp(rft->name, name)) 1584 iattr.ia_mode = rft->mode & mask; 1585 } 1586 1587 kn = kernfs_find_and_get_ns(r->kn, name, NULL); 1588 if (!kn) 1589 return -ENOENT; 1590 1591 switch (kernfs_type(kn)) { 1592 case KERNFS_DIR: 1593 parent = kernfs_get_parent(kn); 1594 if (parent) { 1595 iattr.ia_mode |= parent->mode; 1596 kernfs_put(parent); 1597 } 1598 iattr.ia_mode |= S_IFDIR; 1599 break; 1600 case KERNFS_FILE: 1601 iattr.ia_mode |= S_IFREG; 1602 break; 1603 case KERNFS_LINK: 1604 iattr.ia_mode |= S_IFLNK; 1605 break; 1606 } 1607 1608 ret = kernfs_setattr(kn, &iattr); 1609 kernfs_put(kn); 1610 return ret; 1611 } 1612 1613 static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name, 1614 unsigned long fflags) 1615 { 1616 struct kernfs_node *kn_subdir; 1617 int ret; 1618 1619 kn_subdir = kernfs_create_dir(kn_info, name, 1620 kn_info->mode, r); 1621 if (IS_ERR(kn_subdir)) 1622 return PTR_ERR(kn_subdir); 1623 1624 kernfs_get(kn_subdir); 1625 ret = rdtgroup_kn_set_ugid(kn_subdir); 1626 if (ret) 1627 return ret; 1628 1629 ret = rdtgroup_add_files(kn_subdir, fflags); 1630 if (!ret) 1631 kernfs_activate(kn_subdir); 1632 1633 return ret; 1634 } 1635 1636 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) 1637 { 1638 struct rdt_resource *r; 1639 unsigned long fflags; 1640 char name[32]; 1641 int ret; 1642 1643 /* create the directory */ 1644 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); 1645 if (IS_ERR(kn_info)) 1646 return PTR_ERR(kn_info); 1647 kernfs_get(kn_info); 1648 1649 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); 1650 if (ret) 1651 goto out_destroy; 1652 1653 for_each_alloc_enabled_rdt_resource(r) { 1654 fflags = r->fflags | RF_CTRL_INFO; 1655 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags); 1656 if (ret) 1657 goto out_destroy; 1658 } 1659 1660 for_each_mon_enabled_rdt_resource(r) { 1661 fflags = r->fflags | RF_MON_INFO; 1662 sprintf(name, "%s_MON", r->name); 1663 ret = rdtgroup_mkdir_info_resdir(r, name, fflags); 1664 if (ret) 1665 goto out_destroy; 1666 } 1667 1668 /* 1669 * This extra ref will be put in kernfs_remove() and guarantees 1670 * that @rdtgrp->kn is always accessible. 1671 */ 1672 kernfs_get(kn_info); 1673 1674 ret = rdtgroup_kn_set_ugid(kn_info); 1675 if (ret) 1676 goto out_destroy; 1677 1678 kernfs_activate(kn_info); 1679 1680 return 0; 1681 1682 out_destroy: 1683 kernfs_remove(kn_info); 1684 return ret; 1685 } 1686 1687 static int 1688 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, 1689 char *name, struct kernfs_node **dest_kn) 1690 { 1691 struct kernfs_node *kn; 1692 int ret; 1693 1694 /* create the directory */ 1695 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 1696 if (IS_ERR(kn)) 1697 return PTR_ERR(kn); 1698 1699 if (dest_kn) 1700 *dest_kn = kn; 1701 1702 /* 1703 * This extra ref will be put in kernfs_remove() and guarantees 1704 * that @rdtgrp->kn is always accessible. 1705 */ 1706 kernfs_get(kn); 1707 1708 ret = rdtgroup_kn_set_ugid(kn); 1709 if (ret) 1710 goto out_destroy; 1711 1712 kernfs_activate(kn); 1713 1714 return 0; 1715 1716 out_destroy: 1717 kernfs_remove(kn); 1718 return ret; 1719 } 1720 1721 static void l3_qos_cfg_update(void *arg) 1722 { 1723 bool *enable = arg; 1724 1725 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); 1726 } 1727 1728 static void l2_qos_cfg_update(void *arg) 1729 { 1730 bool *enable = arg; 1731 1732 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); 1733 } 1734 1735 static inline bool is_mba_linear(void) 1736 { 1737 return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear; 1738 } 1739 1740 static int set_cache_qos_cfg(int level, bool enable) 1741 { 1742 void (*update)(void *arg); 1743 struct rdt_resource *r_l; 1744 cpumask_var_t cpu_mask; 1745 struct rdt_domain *d; 1746 int cpu; 1747 1748 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 1749 return -ENOMEM; 1750 1751 if (level == RDT_RESOURCE_L3) 1752 update = l3_qos_cfg_update; 1753 else if (level == RDT_RESOURCE_L2) 1754 update = l2_qos_cfg_update; 1755 else 1756 return -EINVAL; 1757 1758 r_l = &rdt_resources_all[level]; 1759 list_for_each_entry(d, &r_l->domains, list) { 1760 /* Pick one CPU from each domain instance to update MSR */ 1761 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 1762 } 1763 cpu = get_cpu(); 1764 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */ 1765 if (cpumask_test_cpu(cpu, cpu_mask)) 1766 update(&enable); 1767 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */ 1768 smp_call_function_many(cpu_mask, update, &enable, 1); 1769 put_cpu(); 1770 1771 free_cpumask_var(cpu_mask); 1772 1773 return 0; 1774 } 1775 1776 /* 1777 * Enable or disable the MBA software controller 1778 * which helps user specify bandwidth in MBps. 1779 * MBA software controller is supported only if 1780 * MBM is supported and MBA is in linear scale. 1781 */ 1782 static int set_mba_sc(bool mba_sc) 1783 { 1784 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA]; 1785 struct rdt_domain *d; 1786 1787 if (!is_mbm_enabled() || !is_mba_linear() || 1788 mba_sc == is_mba_sc(r)) 1789 return -EINVAL; 1790 1791 r->membw.mba_sc = mba_sc; 1792 list_for_each_entry(d, &r->domains, list) 1793 setup_default_ctrlval(r, d->ctrl_val, d->mbps_val); 1794 1795 return 0; 1796 } 1797 1798 static int cdp_enable(int level, int data_type, int code_type) 1799 { 1800 struct rdt_resource *r_ldata = &rdt_resources_all[data_type]; 1801 struct rdt_resource *r_lcode = &rdt_resources_all[code_type]; 1802 struct rdt_resource *r_l = &rdt_resources_all[level]; 1803 int ret; 1804 1805 if (!r_l->alloc_capable || !r_ldata->alloc_capable || 1806 !r_lcode->alloc_capable) 1807 return -EINVAL; 1808 1809 ret = set_cache_qos_cfg(level, true); 1810 if (!ret) { 1811 r_l->alloc_enabled = false; 1812 r_ldata->alloc_enabled = true; 1813 r_lcode->alloc_enabled = true; 1814 } 1815 return ret; 1816 } 1817 1818 static int cdpl3_enable(void) 1819 { 1820 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, 1821 RDT_RESOURCE_L3CODE); 1822 } 1823 1824 static int cdpl2_enable(void) 1825 { 1826 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, 1827 RDT_RESOURCE_L2CODE); 1828 } 1829 1830 static void cdp_disable(int level, int data_type, int code_type) 1831 { 1832 struct rdt_resource *r = &rdt_resources_all[level]; 1833 1834 r->alloc_enabled = r->alloc_capable; 1835 1836 if (rdt_resources_all[data_type].alloc_enabled) { 1837 rdt_resources_all[data_type].alloc_enabled = false; 1838 rdt_resources_all[code_type].alloc_enabled = false; 1839 set_cache_qos_cfg(level, false); 1840 } 1841 } 1842 1843 static void cdpl3_disable(void) 1844 { 1845 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE); 1846 } 1847 1848 static void cdpl2_disable(void) 1849 { 1850 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE); 1851 } 1852 1853 static void cdp_disable_all(void) 1854 { 1855 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) 1856 cdpl3_disable(); 1857 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) 1858 cdpl2_disable(); 1859 } 1860 1861 /* 1862 * We don't allow rdtgroup directories to be created anywhere 1863 * except the root directory. Thus when looking for the rdtgroup 1864 * structure for a kernfs node we are either looking at a directory, 1865 * in which case the rdtgroup structure is pointed at by the "priv" 1866 * field, otherwise we have a file, and need only look to the parent 1867 * to find the rdtgroup. 1868 */ 1869 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) 1870 { 1871 if (kernfs_type(kn) == KERNFS_DIR) { 1872 /* 1873 * All the resource directories use "kn->priv" 1874 * to point to the "struct rdtgroup" for the 1875 * resource. "info" and its subdirectories don't 1876 * have rdtgroup structures, so return NULL here. 1877 */ 1878 if (kn == kn_info || kn->parent == kn_info) 1879 return NULL; 1880 else 1881 return kn->priv; 1882 } else { 1883 return kn->parent->priv; 1884 } 1885 } 1886 1887 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) 1888 { 1889 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 1890 1891 if (!rdtgrp) 1892 return NULL; 1893 1894 atomic_inc(&rdtgrp->waitcount); 1895 kernfs_break_active_protection(kn); 1896 1897 mutex_lock(&rdtgroup_mutex); 1898 1899 /* Was this group deleted while we waited? */ 1900 if (rdtgrp->flags & RDT_DELETED) 1901 return NULL; 1902 1903 return rdtgrp; 1904 } 1905 1906 void rdtgroup_kn_unlock(struct kernfs_node *kn) 1907 { 1908 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); 1909 1910 if (!rdtgrp) 1911 return; 1912 1913 mutex_unlock(&rdtgroup_mutex); 1914 1915 if (atomic_dec_and_test(&rdtgrp->waitcount) && 1916 (rdtgrp->flags & RDT_DELETED)) { 1917 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 1918 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 1919 rdtgroup_pseudo_lock_remove(rdtgrp); 1920 kernfs_unbreak_active_protection(kn); 1921 kernfs_put(rdtgrp->kn); 1922 kfree(rdtgrp); 1923 } else { 1924 kernfs_unbreak_active_protection(kn); 1925 } 1926 } 1927 1928 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 1929 struct rdtgroup *prgrp, 1930 struct kernfs_node **mon_data_kn); 1931 1932 static int rdt_enable_ctx(struct rdt_fs_context *ctx) 1933 { 1934 int ret = 0; 1935 1936 if (ctx->enable_cdpl2) 1937 ret = cdpl2_enable(); 1938 1939 if (!ret && ctx->enable_cdpl3) 1940 ret = cdpl3_enable(); 1941 1942 if (!ret && ctx->enable_mba_mbps) 1943 ret = set_mba_sc(true); 1944 1945 return ret; 1946 } 1947 1948 static int rdt_get_tree(struct fs_context *fc) 1949 { 1950 struct rdt_fs_context *ctx = rdt_fc2context(fc); 1951 struct rdt_domain *dom; 1952 struct rdt_resource *r; 1953 int ret; 1954 1955 cpus_read_lock(); 1956 mutex_lock(&rdtgroup_mutex); 1957 /* 1958 * resctrl file system can only be mounted once. 1959 */ 1960 if (static_branch_unlikely(&rdt_enable_key)) { 1961 ret = -EBUSY; 1962 goto out; 1963 } 1964 1965 ret = rdt_enable_ctx(ctx); 1966 if (ret < 0) 1967 goto out_cdp; 1968 1969 closid_init(); 1970 1971 ret = rdtgroup_create_info_dir(rdtgroup_default.kn); 1972 if (ret < 0) 1973 goto out_mba; 1974 1975 if (rdt_mon_capable) { 1976 ret = mongroup_create_dir(rdtgroup_default.kn, 1977 NULL, "mon_groups", 1978 &kn_mongrp); 1979 if (ret < 0) 1980 goto out_info; 1981 kernfs_get(kn_mongrp); 1982 1983 ret = mkdir_mondata_all(rdtgroup_default.kn, 1984 &rdtgroup_default, &kn_mondata); 1985 if (ret < 0) 1986 goto out_mongrp; 1987 kernfs_get(kn_mondata); 1988 rdtgroup_default.mon.mon_data_kn = kn_mondata; 1989 } 1990 1991 ret = rdt_pseudo_lock_init(); 1992 if (ret) 1993 goto out_mondata; 1994 1995 ret = kernfs_get_tree(fc); 1996 if (ret < 0) 1997 goto out_psl; 1998 1999 if (rdt_alloc_capable) 2000 static_branch_enable_cpuslocked(&rdt_alloc_enable_key); 2001 if (rdt_mon_capable) 2002 static_branch_enable_cpuslocked(&rdt_mon_enable_key); 2003 2004 if (rdt_alloc_capable || rdt_mon_capable) 2005 static_branch_enable_cpuslocked(&rdt_enable_key); 2006 2007 if (is_mbm_enabled()) { 2008 r = &rdt_resources_all[RDT_RESOURCE_L3]; 2009 list_for_each_entry(dom, &r->domains, list) 2010 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); 2011 } 2012 2013 goto out; 2014 2015 out_psl: 2016 rdt_pseudo_lock_release(); 2017 out_mondata: 2018 if (rdt_mon_capable) 2019 kernfs_remove(kn_mondata); 2020 out_mongrp: 2021 if (rdt_mon_capable) 2022 kernfs_remove(kn_mongrp); 2023 out_info: 2024 kernfs_remove(kn_info); 2025 out_mba: 2026 if (ctx->enable_mba_mbps) 2027 set_mba_sc(false); 2028 out_cdp: 2029 cdp_disable_all(); 2030 out: 2031 rdt_last_cmd_clear(); 2032 mutex_unlock(&rdtgroup_mutex); 2033 cpus_read_unlock(); 2034 return ret; 2035 } 2036 2037 enum rdt_param { 2038 Opt_cdp, 2039 Opt_cdpl2, 2040 Opt_mba_mbps, 2041 nr__rdt_params 2042 }; 2043 2044 static const struct fs_parameter_spec rdt_param_specs[] = { 2045 fsparam_flag("cdp", Opt_cdp), 2046 fsparam_flag("cdpl2", Opt_cdpl2), 2047 fsparam_flag("mba_MBps", Opt_mba_mbps), 2048 {} 2049 }; 2050 2051 static const struct fs_parameter_description rdt_fs_parameters = { 2052 .name = "rdt", 2053 .specs = rdt_param_specs, 2054 }; 2055 2056 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) 2057 { 2058 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2059 struct fs_parse_result result; 2060 int opt; 2061 2062 opt = fs_parse(fc, &rdt_fs_parameters, param, &result); 2063 if (opt < 0) 2064 return opt; 2065 2066 switch (opt) { 2067 case Opt_cdp: 2068 ctx->enable_cdpl3 = true; 2069 return 0; 2070 case Opt_cdpl2: 2071 ctx->enable_cdpl2 = true; 2072 return 0; 2073 case Opt_mba_mbps: 2074 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 2075 return -EINVAL; 2076 ctx->enable_mba_mbps = true; 2077 return 0; 2078 } 2079 2080 return -EINVAL; 2081 } 2082 2083 static void rdt_fs_context_free(struct fs_context *fc) 2084 { 2085 struct rdt_fs_context *ctx = rdt_fc2context(fc); 2086 2087 kernfs_free_fs_context(fc); 2088 kfree(ctx); 2089 } 2090 2091 static const struct fs_context_operations rdt_fs_context_ops = { 2092 .free = rdt_fs_context_free, 2093 .parse_param = rdt_parse_param, 2094 .get_tree = rdt_get_tree, 2095 }; 2096 2097 static int rdt_init_fs_context(struct fs_context *fc) 2098 { 2099 struct rdt_fs_context *ctx; 2100 2101 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); 2102 if (!ctx) 2103 return -ENOMEM; 2104 2105 ctx->kfc.root = rdt_root; 2106 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; 2107 fc->fs_private = &ctx->kfc; 2108 fc->ops = &rdt_fs_context_ops; 2109 put_user_ns(fc->user_ns); 2110 fc->user_ns = get_user_ns(&init_user_ns); 2111 fc->global = true; 2112 return 0; 2113 } 2114 2115 static int reset_all_ctrls(struct rdt_resource *r) 2116 { 2117 struct msr_param msr_param; 2118 cpumask_var_t cpu_mask; 2119 struct rdt_domain *d; 2120 int i, cpu; 2121 2122 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) 2123 return -ENOMEM; 2124 2125 msr_param.res = r; 2126 msr_param.low = 0; 2127 msr_param.high = r->num_closid; 2128 2129 /* 2130 * Disable resource control for this resource by setting all 2131 * CBMs in all domains to the maximum mask value. Pick one CPU 2132 * from each domain to update the MSRs below. 2133 */ 2134 list_for_each_entry(d, &r->domains, list) { 2135 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); 2136 2137 for (i = 0; i < r->num_closid; i++) 2138 d->ctrl_val[i] = r->default_ctrl; 2139 } 2140 cpu = get_cpu(); 2141 /* Update CBM on this cpu if it's in cpu_mask. */ 2142 if (cpumask_test_cpu(cpu, cpu_mask)) 2143 rdt_ctrl_update(&msr_param); 2144 /* Update CBM on all other cpus in cpu_mask. */ 2145 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1); 2146 put_cpu(); 2147 2148 free_cpumask_var(cpu_mask); 2149 2150 return 0; 2151 } 2152 2153 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) 2154 { 2155 return (rdt_alloc_capable && 2156 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); 2157 } 2158 2159 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) 2160 { 2161 return (rdt_mon_capable && 2162 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); 2163 } 2164 2165 /* 2166 * Move tasks from one to the other group. If @from is NULL, then all tasks 2167 * in the systems are moved unconditionally (used for teardown). 2168 * 2169 * If @mask is not NULL the cpus on which moved tasks are running are set 2170 * in that mask so the update smp function call is restricted to affected 2171 * cpus. 2172 */ 2173 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, 2174 struct cpumask *mask) 2175 { 2176 struct task_struct *p, *t; 2177 2178 read_lock(&tasklist_lock); 2179 for_each_process_thread(p, t) { 2180 if (!from || is_closid_match(t, from) || 2181 is_rmid_match(t, from)) { 2182 t->closid = to->closid; 2183 t->rmid = to->mon.rmid; 2184 2185 #ifdef CONFIG_SMP 2186 /* 2187 * This is safe on x86 w/o barriers as the ordering 2188 * of writing to task_cpu() and t->on_cpu is 2189 * reverse to the reading here. The detection is 2190 * inaccurate as tasks might move or schedule 2191 * before the smp function call takes place. In 2192 * such a case the function call is pointless, but 2193 * there is no other side effect. 2194 */ 2195 if (mask && t->on_cpu) 2196 cpumask_set_cpu(task_cpu(t), mask); 2197 #endif 2198 } 2199 } 2200 read_unlock(&tasklist_lock); 2201 } 2202 2203 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) 2204 { 2205 struct rdtgroup *sentry, *stmp; 2206 struct list_head *head; 2207 2208 head = &rdtgrp->mon.crdtgrp_list; 2209 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { 2210 free_rmid(sentry->mon.rmid); 2211 list_del(&sentry->mon.crdtgrp_list); 2212 kfree(sentry); 2213 } 2214 } 2215 2216 /* 2217 * Forcibly remove all of subdirectories under root. 2218 */ 2219 static void rmdir_all_sub(void) 2220 { 2221 struct rdtgroup *rdtgrp, *tmp; 2222 2223 /* Move all tasks to the default resource group */ 2224 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); 2225 2226 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { 2227 /* Free any child rmids */ 2228 free_all_child_rdtgrp(rdtgrp); 2229 2230 /* Remove each rdtgroup other than root */ 2231 if (rdtgrp == &rdtgroup_default) 2232 continue; 2233 2234 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2235 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) 2236 rdtgroup_pseudo_lock_remove(rdtgrp); 2237 2238 /* 2239 * Give any CPUs back to the default group. We cannot copy 2240 * cpu_online_mask because a CPU might have executed the 2241 * offline callback already, but is still marked online. 2242 */ 2243 cpumask_or(&rdtgroup_default.cpu_mask, 2244 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 2245 2246 free_rmid(rdtgrp->mon.rmid); 2247 2248 kernfs_remove(rdtgrp->kn); 2249 list_del(&rdtgrp->rdtgroup_list); 2250 kfree(rdtgrp); 2251 } 2252 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ 2253 update_closid_rmid(cpu_online_mask, &rdtgroup_default); 2254 2255 kernfs_remove(kn_info); 2256 kernfs_remove(kn_mongrp); 2257 kernfs_remove(kn_mondata); 2258 } 2259 2260 static void rdt_kill_sb(struct super_block *sb) 2261 { 2262 struct rdt_resource *r; 2263 2264 cpus_read_lock(); 2265 mutex_lock(&rdtgroup_mutex); 2266 2267 set_mba_sc(false); 2268 2269 /*Put everything back to default values. */ 2270 for_each_alloc_enabled_rdt_resource(r) 2271 reset_all_ctrls(r); 2272 cdp_disable_all(); 2273 rmdir_all_sub(); 2274 rdt_pseudo_lock_release(); 2275 rdtgroup_default.mode = RDT_MODE_SHAREABLE; 2276 static_branch_disable_cpuslocked(&rdt_alloc_enable_key); 2277 static_branch_disable_cpuslocked(&rdt_mon_enable_key); 2278 static_branch_disable_cpuslocked(&rdt_enable_key); 2279 kernfs_kill_sb(sb); 2280 mutex_unlock(&rdtgroup_mutex); 2281 cpus_read_unlock(); 2282 } 2283 2284 static struct file_system_type rdt_fs_type = { 2285 .name = "resctrl", 2286 .init_fs_context = rdt_init_fs_context, 2287 .parameters = &rdt_fs_parameters, 2288 .kill_sb = rdt_kill_sb, 2289 }; 2290 2291 static int mon_addfile(struct kernfs_node *parent_kn, const char *name, 2292 void *priv) 2293 { 2294 struct kernfs_node *kn; 2295 int ret = 0; 2296 2297 kn = __kernfs_create_file(parent_kn, name, 0444, 2298 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, 2299 &kf_mondata_ops, priv, NULL, NULL); 2300 if (IS_ERR(kn)) 2301 return PTR_ERR(kn); 2302 2303 ret = rdtgroup_kn_set_ugid(kn); 2304 if (ret) { 2305 kernfs_remove(kn); 2306 return ret; 2307 } 2308 2309 return ret; 2310 } 2311 2312 /* 2313 * Remove all subdirectories of mon_data of ctrl_mon groups 2314 * and monitor groups with given domain id. 2315 */ 2316 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id) 2317 { 2318 struct rdtgroup *prgrp, *crgrp; 2319 char name[32]; 2320 2321 if (!r->mon_enabled) 2322 return; 2323 2324 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2325 sprintf(name, "mon_%s_%02d", r->name, dom_id); 2326 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); 2327 2328 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) 2329 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); 2330 } 2331 } 2332 2333 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, 2334 struct rdt_domain *d, 2335 struct rdt_resource *r, struct rdtgroup *prgrp) 2336 { 2337 union mon_data_bits priv; 2338 struct kernfs_node *kn; 2339 struct mon_evt *mevt; 2340 struct rmid_read rr; 2341 char name[32]; 2342 int ret; 2343 2344 sprintf(name, "mon_%s_%02d", r->name, d->id); 2345 /* create the directory */ 2346 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); 2347 if (IS_ERR(kn)) 2348 return PTR_ERR(kn); 2349 2350 /* 2351 * This extra ref will be put in kernfs_remove() and guarantees 2352 * that kn is always accessible. 2353 */ 2354 kernfs_get(kn); 2355 ret = rdtgroup_kn_set_ugid(kn); 2356 if (ret) 2357 goto out_destroy; 2358 2359 if (WARN_ON(list_empty(&r->evt_list))) { 2360 ret = -EPERM; 2361 goto out_destroy; 2362 } 2363 2364 priv.u.rid = r->rid; 2365 priv.u.domid = d->id; 2366 list_for_each_entry(mevt, &r->evt_list, list) { 2367 priv.u.evtid = mevt->evtid; 2368 ret = mon_addfile(kn, mevt->name, priv.priv); 2369 if (ret) 2370 goto out_destroy; 2371 2372 if (is_mbm_event(mevt->evtid)) 2373 mon_event_read(&rr, d, prgrp, mevt->evtid, true); 2374 } 2375 kernfs_activate(kn); 2376 return 0; 2377 2378 out_destroy: 2379 kernfs_remove(kn); 2380 return ret; 2381 } 2382 2383 /* 2384 * Add all subdirectories of mon_data for "ctrl_mon" groups 2385 * and "monitor" groups with given domain id. 2386 */ 2387 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, 2388 struct rdt_domain *d) 2389 { 2390 struct kernfs_node *parent_kn; 2391 struct rdtgroup *prgrp, *crgrp; 2392 struct list_head *head; 2393 2394 if (!r->mon_enabled) 2395 return; 2396 2397 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 2398 parent_kn = prgrp->mon.mon_data_kn; 2399 mkdir_mondata_subdir(parent_kn, d, r, prgrp); 2400 2401 head = &prgrp->mon.crdtgrp_list; 2402 list_for_each_entry(crgrp, head, mon.crdtgrp_list) { 2403 parent_kn = crgrp->mon.mon_data_kn; 2404 mkdir_mondata_subdir(parent_kn, d, r, crgrp); 2405 } 2406 } 2407 } 2408 2409 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, 2410 struct rdt_resource *r, 2411 struct rdtgroup *prgrp) 2412 { 2413 struct rdt_domain *dom; 2414 int ret; 2415 2416 list_for_each_entry(dom, &r->domains, list) { 2417 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); 2418 if (ret) 2419 return ret; 2420 } 2421 2422 return 0; 2423 } 2424 2425 /* 2426 * This creates a directory mon_data which contains the monitored data. 2427 * 2428 * mon_data has one directory for each domain whic are named 2429 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data 2430 * with L3 domain looks as below: 2431 * ./mon_data: 2432 * mon_L3_00 2433 * mon_L3_01 2434 * mon_L3_02 2435 * ... 2436 * 2437 * Each domain directory has one file per event: 2438 * ./mon_L3_00/: 2439 * llc_occupancy 2440 * 2441 */ 2442 static int mkdir_mondata_all(struct kernfs_node *parent_kn, 2443 struct rdtgroup *prgrp, 2444 struct kernfs_node **dest_kn) 2445 { 2446 struct rdt_resource *r; 2447 struct kernfs_node *kn; 2448 int ret; 2449 2450 /* 2451 * Create the mon_data directory first. 2452 */ 2453 ret = mongroup_create_dir(parent_kn, NULL, "mon_data", &kn); 2454 if (ret) 2455 return ret; 2456 2457 if (dest_kn) 2458 *dest_kn = kn; 2459 2460 /* 2461 * Create the subdirectories for each domain. Note that all events 2462 * in a domain like L3 are grouped into a resource whose domain is L3 2463 */ 2464 for_each_mon_enabled_rdt_resource(r) { 2465 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); 2466 if (ret) 2467 goto out_destroy; 2468 } 2469 2470 return 0; 2471 2472 out_destroy: 2473 kernfs_remove(kn); 2474 return ret; 2475 } 2476 2477 /** 2478 * cbm_ensure_valid - Enforce validity on provided CBM 2479 * @_val: Candidate CBM 2480 * @r: RDT resource to which the CBM belongs 2481 * 2482 * The provided CBM represents all cache portions available for use. This 2483 * may be represented by a bitmap that does not consist of contiguous ones 2484 * and thus be an invalid CBM. 2485 * Here the provided CBM is forced to be a valid CBM by only considering 2486 * the first set of contiguous bits as valid and clearing all bits. 2487 * The intention here is to provide a valid default CBM with which a new 2488 * resource group is initialized. The user can follow this with a 2489 * modification to the CBM if the default does not satisfy the 2490 * requirements. 2491 */ 2492 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) 2493 { 2494 unsigned int cbm_len = r->cache.cbm_len; 2495 unsigned long first_bit, zero_bit; 2496 unsigned long val = _val; 2497 2498 if (!val) 2499 return 0; 2500 2501 first_bit = find_first_bit(&val, cbm_len); 2502 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); 2503 2504 /* Clear any remaining bits to ensure contiguous region */ 2505 bitmap_clear(&val, zero_bit, cbm_len - zero_bit); 2506 return (u32)val; 2507 } 2508 2509 /* 2510 * Initialize cache resources per RDT domain 2511 * 2512 * Set the RDT domain up to start off with all usable allocations. That is, 2513 * all shareable and unused bits. All-zero CBM is invalid. 2514 */ 2515 static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r, 2516 u32 closid) 2517 { 2518 struct rdt_resource *r_cdp = NULL; 2519 struct rdt_domain *d_cdp = NULL; 2520 u32 used_b = 0, unused_b = 0; 2521 unsigned long tmp_cbm; 2522 enum rdtgrp_mode mode; 2523 u32 peer_ctl, *ctrl; 2524 int i; 2525 2526 rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp); 2527 d->have_new_ctrl = false; 2528 d->new_ctrl = r->cache.shareable_bits; 2529 used_b = r->cache.shareable_bits; 2530 ctrl = d->ctrl_val; 2531 for (i = 0; i < closids_supported(); i++, ctrl++) { 2532 if (closid_allocated(i) && i != closid) { 2533 mode = rdtgroup_mode_by_closid(i); 2534 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 2535 /* 2536 * ctrl values for locksetup aren't relevant 2537 * until the schemata is written, and the mode 2538 * becomes RDT_MODE_PSEUDO_LOCKED. 2539 */ 2540 continue; 2541 /* 2542 * If CDP is active include peer domain's 2543 * usage to ensure there is no overlap 2544 * with an exclusive group. 2545 */ 2546 if (d_cdp) 2547 peer_ctl = d_cdp->ctrl_val[i]; 2548 else 2549 peer_ctl = 0; 2550 used_b |= *ctrl | peer_ctl; 2551 if (mode == RDT_MODE_SHAREABLE) 2552 d->new_ctrl |= *ctrl | peer_ctl; 2553 } 2554 } 2555 if (d->plr && d->plr->cbm > 0) 2556 used_b |= d->plr->cbm; 2557 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); 2558 unused_b &= BIT_MASK(r->cache.cbm_len) - 1; 2559 d->new_ctrl |= unused_b; 2560 /* 2561 * Force the initial CBM to be valid, user can 2562 * modify the CBM based on system availability. 2563 */ 2564 d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r); 2565 /* 2566 * Assign the u32 CBM to an unsigned long to ensure that 2567 * bitmap_weight() does not access out-of-bound memory. 2568 */ 2569 tmp_cbm = d->new_ctrl; 2570 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { 2571 rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id); 2572 return -ENOSPC; 2573 } 2574 d->have_new_ctrl = true; 2575 2576 return 0; 2577 } 2578 2579 /* 2580 * Initialize cache resources with default values. 2581 * 2582 * A new RDT group is being created on an allocation capable (CAT) 2583 * supporting system. Set this group up to start off with all usable 2584 * allocations. 2585 * 2586 * If there are no more shareable bits available on any domain then 2587 * the entire allocation will fail. 2588 */ 2589 static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid) 2590 { 2591 struct rdt_domain *d; 2592 int ret; 2593 2594 list_for_each_entry(d, &r->domains, list) { 2595 ret = __init_one_rdt_domain(d, r, closid); 2596 if (ret < 0) 2597 return ret; 2598 } 2599 2600 return 0; 2601 } 2602 2603 /* Initialize MBA resource with default values. */ 2604 static void rdtgroup_init_mba(struct rdt_resource *r) 2605 { 2606 struct rdt_domain *d; 2607 2608 list_for_each_entry(d, &r->domains, list) { 2609 d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl; 2610 d->have_new_ctrl = true; 2611 } 2612 } 2613 2614 /* Initialize the RDT group's allocations. */ 2615 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) 2616 { 2617 struct rdt_resource *r; 2618 int ret; 2619 2620 for_each_alloc_enabled_rdt_resource(r) { 2621 if (r->rid == RDT_RESOURCE_MBA) { 2622 rdtgroup_init_mba(r); 2623 } else { 2624 ret = rdtgroup_init_cat(r, rdtgrp->closid); 2625 if (ret < 0) 2626 return ret; 2627 } 2628 2629 ret = update_domains(r, rdtgrp->closid); 2630 if (ret < 0) { 2631 rdt_last_cmd_puts("Failed to initialize allocations\n"); 2632 return ret; 2633 } 2634 2635 } 2636 2637 rdtgrp->mode = RDT_MODE_SHAREABLE; 2638 2639 return 0; 2640 } 2641 2642 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, 2643 struct kernfs_node *prgrp_kn, 2644 const char *name, umode_t mode, 2645 enum rdt_group_type rtype, struct rdtgroup **r) 2646 { 2647 struct rdtgroup *prdtgrp, *rdtgrp; 2648 struct kernfs_node *kn; 2649 uint files = 0; 2650 int ret; 2651 2652 prdtgrp = rdtgroup_kn_lock_live(prgrp_kn); 2653 if (!prdtgrp) { 2654 ret = -ENODEV; 2655 goto out_unlock; 2656 } 2657 2658 if (rtype == RDTMON_GROUP && 2659 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2660 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { 2661 ret = -EINVAL; 2662 rdt_last_cmd_puts("Pseudo-locking in progress\n"); 2663 goto out_unlock; 2664 } 2665 2666 /* allocate the rdtgroup. */ 2667 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); 2668 if (!rdtgrp) { 2669 ret = -ENOSPC; 2670 rdt_last_cmd_puts("Kernel out of memory\n"); 2671 goto out_unlock; 2672 } 2673 *r = rdtgrp; 2674 rdtgrp->mon.parent = prdtgrp; 2675 rdtgrp->type = rtype; 2676 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); 2677 2678 /* kernfs creates the directory for rdtgrp */ 2679 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); 2680 if (IS_ERR(kn)) { 2681 ret = PTR_ERR(kn); 2682 rdt_last_cmd_puts("kernfs create error\n"); 2683 goto out_free_rgrp; 2684 } 2685 rdtgrp->kn = kn; 2686 2687 /* 2688 * kernfs_remove() will drop the reference count on "kn" which 2689 * will free it. But we still need it to stick around for the 2690 * rdtgroup_kn_unlock(kn} call below. Take one extra reference 2691 * here, which will be dropped inside rdtgroup_kn_unlock(). 2692 */ 2693 kernfs_get(kn); 2694 2695 ret = rdtgroup_kn_set_ugid(kn); 2696 if (ret) { 2697 rdt_last_cmd_puts("kernfs perm error\n"); 2698 goto out_destroy; 2699 } 2700 2701 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); 2702 ret = rdtgroup_add_files(kn, files); 2703 if (ret) { 2704 rdt_last_cmd_puts("kernfs fill error\n"); 2705 goto out_destroy; 2706 } 2707 2708 if (rdt_mon_capable) { 2709 ret = alloc_rmid(); 2710 if (ret < 0) { 2711 rdt_last_cmd_puts("Out of RMIDs\n"); 2712 goto out_destroy; 2713 } 2714 rdtgrp->mon.rmid = ret; 2715 2716 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); 2717 if (ret) { 2718 rdt_last_cmd_puts("kernfs subdir error\n"); 2719 goto out_idfree; 2720 } 2721 } 2722 kernfs_activate(kn); 2723 2724 /* 2725 * The caller unlocks the prgrp_kn upon success. 2726 */ 2727 return 0; 2728 2729 out_idfree: 2730 free_rmid(rdtgrp->mon.rmid); 2731 out_destroy: 2732 kernfs_remove(rdtgrp->kn); 2733 out_free_rgrp: 2734 kfree(rdtgrp); 2735 out_unlock: 2736 rdtgroup_kn_unlock(prgrp_kn); 2737 return ret; 2738 } 2739 2740 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) 2741 { 2742 kernfs_remove(rgrp->kn); 2743 free_rmid(rgrp->mon.rmid); 2744 kfree(rgrp); 2745 } 2746 2747 /* 2748 * Create a monitor group under "mon_groups" directory of a control 2749 * and monitor group(ctrl_mon). This is a resource group 2750 * to monitor a subset of tasks and cpus in its parent ctrl_mon group. 2751 */ 2752 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, 2753 struct kernfs_node *prgrp_kn, 2754 const char *name, 2755 umode_t mode) 2756 { 2757 struct rdtgroup *rdtgrp, *prgrp; 2758 int ret; 2759 2760 ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTMON_GROUP, 2761 &rdtgrp); 2762 if (ret) 2763 return ret; 2764 2765 prgrp = rdtgrp->mon.parent; 2766 rdtgrp->closid = prgrp->closid; 2767 2768 /* 2769 * Add the rdtgrp to the list of rdtgrps the parent 2770 * ctrl_mon group has to track. 2771 */ 2772 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); 2773 2774 rdtgroup_kn_unlock(prgrp_kn); 2775 return ret; 2776 } 2777 2778 /* 2779 * These are rdtgroups created under the root directory. Can be used 2780 * to allocate and monitor resources. 2781 */ 2782 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, 2783 struct kernfs_node *prgrp_kn, 2784 const char *name, umode_t mode) 2785 { 2786 struct rdtgroup *rdtgrp; 2787 struct kernfs_node *kn; 2788 u32 closid; 2789 int ret; 2790 2791 ret = mkdir_rdt_prepare(parent_kn, prgrp_kn, name, mode, RDTCTRL_GROUP, 2792 &rdtgrp); 2793 if (ret) 2794 return ret; 2795 2796 kn = rdtgrp->kn; 2797 ret = closid_alloc(); 2798 if (ret < 0) { 2799 rdt_last_cmd_puts("Out of CLOSIDs\n"); 2800 goto out_common_fail; 2801 } 2802 closid = ret; 2803 ret = 0; 2804 2805 rdtgrp->closid = closid; 2806 ret = rdtgroup_init_alloc(rdtgrp); 2807 if (ret < 0) 2808 goto out_id_free; 2809 2810 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); 2811 2812 if (rdt_mon_capable) { 2813 /* 2814 * Create an empty mon_groups directory to hold the subset 2815 * of tasks and cpus to monitor. 2816 */ 2817 ret = mongroup_create_dir(kn, NULL, "mon_groups", NULL); 2818 if (ret) { 2819 rdt_last_cmd_puts("kernfs subdir error\n"); 2820 goto out_del_list; 2821 } 2822 } 2823 2824 goto out_unlock; 2825 2826 out_del_list: 2827 list_del(&rdtgrp->rdtgroup_list); 2828 out_id_free: 2829 closid_free(closid); 2830 out_common_fail: 2831 mkdir_rdt_prepare_clean(rdtgrp); 2832 out_unlock: 2833 rdtgroup_kn_unlock(prgrp_kn); 2834 return ret; 2835 } 2836 2837 /* 2838 * We allow creating mon groups only with in a directory called "mon_groups" 2839 * which is present in every ctrl_mon group. Check if this is a valid 2840 * "mon_groups" directory. 2841 * 2842 * 1. The directory should be named "mon_groups". 2843 * 2. The mon group itself should "not" be named "mon_groups". 2844 * This makes sure "mon_groups" directory always has a ctrl_mon group 2845 * as parent. 2846 */ 2847 static bool is_mon_groups(struct kernfs_node *kn, const char *name) 2848 { 2849 return (!strcmp(kn->name, "mon_groups") && 2850 strcmp(name, "mon_groups")); 2851 } 2852 2853 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, 2854 umode_t mode) 2855 { 2856 /* Do not accept '\n' to avoid unparsable situation. */ 2857 if (strchr(name, '\n')) 2858 return -EINVAL; 2859 2860 /* 2861 * If the parent directory is the root directory and RDT 2862 * allocation is supported, add a control and monitoring 2863 * subdirectory 2864 */ 2865 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) 2866 return rdtgroup_mkdir_ctrl_mon(parent_kn, parent_kn, name, mode); 2867 2868 /* 2869 * If RDT monitoring is supported and the parent directory is a valid 2870 * "mon_groups" directory, add a monitoring subdirectory. 2871 */ 2872 if (rdt_mon_capable && is_mon_groups(parent_kn, name)) 2873 return rdtgroup_mkdir_mon(parent_kn, parent_kn->parent, name, mode); 2874 2875 return -EPERM; 2876 } 2877 2878 static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp, 2879 cpumask_var_t tmpmask) 2880 { 2881 struct rdtgroup *prdtgrp = rdtgrp->mon.parent; 2882 int cpu; 2883 2884 /* Give any tasks back to the parent group */ 2885 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); 2886 2887 /* Update per cpu rmid of the moved CPUs first */ 2888 for_each_cpu(cpu, &rdtgrp->cpu_mask) 2889 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; 2890 /* 2891 * Update the MSR on moved CPUs and CPUs which have moved 2892 * task running on them. 2893 */ 2894 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 2895 update_closid_rmid(tmpmask, NULL); 2896 2897 rdtgrp->flags = RDT_DELETED; 2898 free_rmid(rdtgrp->mon.rmid); 2899 2900 /* 2901 * Remove the rdtgrp from the parent ctrl_mon group's list 2902 */ 2903 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); 2904 list_del(&rdtgrp->mon.crdtgrp_list); 2905 2906 /* 2907 * one extra hold on this, will drop when we kfree(rdtgrp) 2908 * in rdtgroup_kn_unlock() 2909 */ 2910 kernfs_get(kn); 2911 kernfs_remove(rdtgrp->kn); 2912 2913 return 0; 2914 } 2915 2916 static int rdtgroup_ctrl_remove(struct kernfs_node *kn, 2917 struct rdtgroup *rdtgrp) 2918 { 2919 rdtgrp->flags = RDT_DELETED; 2920 list_del(&rdtgrp->rdtgroup_list); 2921 2922 /* 2923 * one extra hold on this, will drop when we kfree(rdtgrp) 2924 * in rdtgroup_kn_unlock() 2925 */ 2926 kernfs_get(kn); 2927 kernfs_remove(rdtgrp->kn); 2928 return 0; 2929 } 2930 2931 static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp, 2932 cpumask_var_t tmpmask) 2933 { 2934 int cpu; 2935 2936 /* Give any tasks back to the default group */ 2937 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); 2938 2939 /* Give any CPUs back to the default group */ 2940 cpumask_or(&rdtgroup_default.cpu_mask, 2941 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); 2942 2943 /* Update per cpu closid and rmid of the moved CPUs first */ 2944 for_each_cpu(cpu, &rdtgrp->cpu_mask) { 2945 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; 2946 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; 2947 } 2948 2949 /* 2950 * Update the MSR on moved CPUs and CPUs which have moved 2951 * task running on them. 2952 */ 2953 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); 2954 update_closid_rmid(tmpmask, NULL); 2955 2956 closid_free(rdtgrp->closid); 2957 free_rmid(rdtgrp->mon.rmid); 2958 2959 /* 2960 * Free all the child monitor group rmids. 2961 */ 2962 free_all_child_rdtgrp(rdtgrp); 2963 2964 rdtgroup_ctrl_remove(kn, rdtgrp); 2965 2966 return 0; 2967 } 2968 2969 static int rdtgroup_rmdir(struct kernfs_node *kn) 2970 { 2971 struct kernfs_node *parent_kn = kn->parent; 2972 struct rdtgroup *rdtgrp; 2973 cpumask_var_t tmpmask; 2974 int ret = 0; 2975 2976 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) 2977 return -ENOMEM; 2978 2979 rdtgrp = rdtgroup_kn_lock_live(kn); 2980 if (!rdtgrp) { 2981 ret = -EPERM; 2982 goto out; 2983 } 2984 2985 /* 2986 * If the rdtgroup is a ctrl_mon group and parent directory 2987 * is the root directory, remove the ctrl_mon group. 2988 * 2989 * If the rdtgroup is a mon group and parent directory 2990 * is a valid "mon_groups" directory, remove the mon group. 2991 */ 2992 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn) { 2993 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || 2994 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { 2995 ret = rdtgroup_ctrl_remove(kn, rdtgrp); 2996 } else { 2997 ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask); 2998 } 2999 } else if (rdtgrp->type == RDTMON_GROUP && 3000 is_mon_groups(parent_kn, kn->name)) { 3001 ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask); 3002 } else { 3003 ret = -EPERM; 3004 } 3005 3006 out: 3007 rdtgroup_kn_unlock(kn); 3008 free_cpumask_var(tmpmask); 3009 return ret; 3010 } 3011 3012 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) 3013 { 3014 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) 3015 seq_puts(seq, ",cdp"); 3016 3017 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled) 3018 seq_puts(seq, ",cdpl2"); 3019 3020 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA])) 3021 seq_puts(seq, ",mba_MBps"); 3022 3023 return 0; 3024 } 3025 3026 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { 3027 .mkdir = rdtgroup_mkdir, 3028 .rmdir = rdtgroup_rmdir, 3029 .show_options = rdtgroup_show_options, 3030 }; 3031 3032 static int __init rdtgroup_setup_root(void) 3033 { 3034 int ret; 3035 3036 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, 3037 KERNFS_ROOT_CREATE_DEACTIVATED | 3038 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, 3039 &rdtgroup_default); 3040 if (IS_ERR(rdt_root)) 3041 return PTR_ERR(rdt_root); 3042 3043 mutex_lock(&rdtgroup_mutex); 3044 3045 rdtgroup_default.closid = 0; 3046 rdtgroup_default.mon.rmid = 0; 3047 rdtgroup_default.type = RDTCTRL_GROUP; 3048 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); 3049 3050 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); 3051 3052 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE); 3053 if (ret) { 3054 kernfs_destroy_root(rdt_root); 3055 goto out; 3056 } 3057 3058 rdtgroup_default.kn = rdt_root->kn; 3059 kernfs_activate(rdtgroup_default.kn); 3060 3061 out: 3062 mutex_unlock(&rdtgroup_mutex); 3063 3064 return ret; 3065 } 3066 3067 /* 3068 * rdtgroup_init - rdtgroup initialization 3069 * 3070 * Setup resctrl file system including set up root, create mount point, 3071 * register rdtgroup filesystem, and initialize files under root directory. 3072 * 3073 * Return: 0 on success or -errno 3074 */ 3075 int __init rdtgroup_init(void) 3076 { 3077 int ret = 0; 3078 3079 seq_buf_init(&last_cmd_status, last_cmd_status_buf, 3080 sizeof(last_cmd_status_buf)); 3081 3082 ret = rdtgroup_setup_root(); 3083 if (ret) 3084 return ret; 3085 3086 ret = sysfs_create_mount_point(fs_kobj, "resctrl"); 3087 if (ret) 3088 goto cleanup_root; 3089 3090 ret = register_filesystem(&rdt_fs_type); 3091 if (ret) 3092 goto cleanup_mountpoint; 3093 3094 /* 3095 * Adding the resctrl debugfs directory here may not be ideal since 3096 * it would let the resctrl debugfs directory appear on the debugfs 3097 * filesystem before the resctrl filesystem is mounted. 3098 * It may also be ok since that would enable debugging of RDT before 3099 * resctrl is mounted. 3100 * The reason why the debugfs directory is created here and not in 3101 * rdt_mount() is because rdt_mount() takes rdtgroup_mutex and 3102 * during the debugfs directory creation also &sb->s_type->i_mutex_key 3103 * (the lockdep class of inode->i_rwsem). Other filesystem 3104 * interactions (eg. SyS_getdents) have the lock ordering: 3105 * &sb->s_type->i_mutex_key --> &mm->mmap_sem 3106 * During mmap(), called with &mm->mmap_sem, the rdtgroup_mutex 3107 * is taken, thus creating dependency: 3108 * &mm->mmap_sem --> rdtgroup_mutex for the latter that can cause 3109 * issues considering the other two lock dependencies. 3110 * By creating the debugfs directory here we avoid a dependency 3111 * that may cause deadlock (even though file operations cannot 3112 * occur until the filesystem is mounted, but I do not know how to 3113 * tell lockdep that). 3114 */ 3115 debugfs_resctrl = debugfs_create_dir("resctrl", NULL); 3116 3117 return 0; 3118 3119 cleanup_mountpoint: 3120 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3121 cleanup_root: 3122 kernfs_destroy_root(rdt_root); 3123 3124 return ret; 3125 } 3126 3127 void __exit rdtgroup_exit(void) 3128 { 3129 debugfs_remove_recursive(debugfs_resctrl); 3130 unregister_filesystem(&rdt_fs_type); 3131 sysfs_remove_mount_point(fs_kobj, "resctrl"); 3132 kernfs_destroy_root(rdt_root); 3133 } 3134