1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Resource Director Technology(RDT) 4 * - Cache Allocation code. 5 * 6 * Copyright (C) 2016 Intel Corporation 7 * 8 * Authors: 9 * Fenghua Yu <fenghua.yu@intel.com> 10 * Tony Luck <tony.luck@intel.com> 11 * Vikas Shivappa <vikas.shivappa@intel.com> 12 * 13 * More information about RDT be found in the Intel (R) x86 Architecture 14 * Software Developer Manual June 2016, volume 3, section 17.17. 15 */ 16 17 #define pr_fmt(fmt) "resctrl: " fmt 18 19 #include <linux/slab.h> 20 #include <linux/err.h> 21 #include <linux/cacheinfo.h> 22 #include <linux/cpuhotplug.h> 23 24 #include <asm/intel-family.h> 25 #include <asm/resctrl.h> 26 #include "internal.h" 27 28 /* Mutex to protect rdtgroup access. */ 29 DEFINE_MUTEX(rdtgroup_mutex); 30 31 /* 32 * The cached resctrl_pqr_state is strictly per CPU and can never be 33 * updated from a remote CPU. Functions which modify the state 34 * are called with interrupts disabled and no preemption, which 35 * is sufficient for the protection. 36 */ 37 DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); 38 39 /* 40 * Used to store the max resource name width and max resource data width 41 * to display the schemata in a tabular format 42 */ 43 int max_name_width, max_data_width; 44 45 /* 46 * Global boolean for rdt_alloc which is true if any 47 * resource allocation is enabled. 48 */ 49 bool rdt_alloc_capable; 50 51 static void 52 mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, 53 struct rdt_resource *r); 54 static void 55 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r); 56 static void 57 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, 58 struct rdt_resource *r); 59 60 #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains) 61 62 struct rdt_hw_resource rdt_resources_all[] = { 63 [RDT_RESOURCE_L3] = 64 { 65 .r_resctrl = { 66 .rid = RDT_RESOURCE_L3, 67 .name = "L3", 68 .cache_level = 3, 69 .cache = { 70 .min_cbm_bits = 1, 71 }, 72 .domains = domain_init(RDT_RESOURCE_L3), 73 .parse_ctrlval = parse_cbm, 74 .format_str = "%d=%0*x", 75 .fflags = RFTYPE_RES_CACHE, 76 }, 77 .msr_base = MSR_IA32_L3_CBM_BASE, 78 .msr_update = cat_wrmsr, 79 }, 80 [RDT_RESOURCE_L2] = 81 { 82 .r_resctrl = { 83 .rid = RDT_RESOURCE_L2, 84 .name = "L2", 85 .cache_level = 2, 86 .cache = { 87 .min_cbm_bits = 1, 88 }, 89 .domains = domain_init(RDT_RESOURCE_L2), 90 .parse_ctrlval = parse_cbm, 91 .format_str = "%d=%0*x", 92 .fflags = RFTYPE_RES_CACHE, 93 }, 94 .msr_base = MSR_IA32_L2_CBM_BASE, 95 .msr_update = cat_wrmsr, 96 }, 97 [RDT_RESOURCE_MBA] = 98 { 99 .r_resctrl = { 100 .rid = RDT_RESOURCE_MBA, 101 .name = "MB", 102 .cache_level = 3, 103 .domains = domain_init(RDT_RESOURCE_MBA), 104 .parse_ctrlval = parse_bw, 105 .format_str = "%d=%*u", 106 .fflags = RFTYPE_RES_MB, 107 }, 108 }, 109 }; 110 111 /* 112 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs 113 * as they do not have CPUID enumeration support for Cache allocation. 114 * The check for Vendor/Family/Model is not enough to guarantee that 115 * the MSRs won't #GP fault because only the following SKUs support 116 * CAT: 117 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz 118 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz 119 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz 120 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz 121 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz 122 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz 123 * 124 * Probe by trying to write the first of the L3 cache mask registers 125 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length 126 * is always 20 on hsw server parts. The minimum cache bitmask length 127 * allowed for HSW server is always 2 bits. Hardcode all of them. 128 */ 129 static inline void cache_alloc_hsw_probe(void) 130 { 131 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3]; 132 struct rdt_resource *r = &hw_res->r_resctrl; 133 u32 l, h, max_cbm = BIT_MASK(20) - 1; 134 135 if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0)) 136 return; 137 138 rdmsr(MSR_IA32_L3_CBM_BASE, l, h); 139 140 /* If all the bits were set in MSR, return success */ 141 if (l != max_cbm) 142 return; 143 144 hw_res->num_closid = 4; 145 r->default_ctrl = max_cbm; 146 r->cache.cbm_len = 20; 147 r->cache.shareable_bits = 0xc0000; 148 r->cache.min_cbm_bits = 2; 149 r->alloc_capable = true; 150 151 rdt_alloc_capable = true; 152 } 153 154 bool is_mba_sc(struct rdt_resource *r) 155 { 156 if (!r) 157 return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc; 158 159 return r->membw.mba_sc; 160 } 161 162 /* 163 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values 164 * exposed to user interface and the h/w understandable delay values. 165 * 166 * The non-linear delay values have the granularity of power of two 167 * and also the h/w does not guarantee a curve for configured delay 168 * values vs. actual b/w enforced. 169 * Hence we need a mapping that is pre calibrated so the user can 170 * express the memory b/w as a percentage value. 171 */ 172 static inline bool rdt_get_mb_table(struct rdt_resource *r) 173 { 174 /* 175 * There are no Intel SKUs as of now to support non-linear delay. 176 */ 177 pr_info("MBA b/w map not implemented for cpu:%d, model:%d", 178 boot_cpu_data.x86, boot_cpu_data.x86_model); 179 180 return false; 181 } 182 183 static bool __get_mem_config_intel(struct rdt_resource *r) 184 { 185 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 186 union cpuid_0x10_3_eax eax; 187 union cpuid_0x10_x_edx edx; 188 u32 ebx, ecx, max_delay; 189 190 cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full); 191 hw_res->num_closid = edx.split.cos_max + 1; 192 max_delay = eax.split.max_delay + 1; 193 r->default_ctrl = MAX_MBA_BW; 194 r->membw.arch_needs_linear = true; 195 if (ecx & MBA_IS_LINEAR) { 196 r->membw.delay_linear = true; 197 r->membw.min_bw = MAX_MBA_BW - max_delay; 198 r->membw.bw_gran = MAX_MBA_BW - max_delay; 199 } else { 200 if (!rdt_get_mb_table(r)) 201 return false; 202 r->membw.arch_needs_linear = false; 203 } 204 r->data_width = 3; 205 206 if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA)) 207 r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; 208 else 209 r->membw.throttle_mode = THREAD_THROTTLE_MAX; 210 thread_throttle_mode_init(); 211 212 r->alloc_capable = true; 213 214 return true; 215 } 216 217 static bool __rdt_get_mem_config_amd(struct rdt_resource *r) 218 { 219 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 220 union cpuid_0x10_3_eax eax; 221 union cpuid_0x10_x_edx edx; 222 u32 ebx, ecx; 223 224 cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full); 225 hw_res->num_closid = edx.split.cos_max + 1; 226 r->default_ctrl = MAX_MBA_BW_AMD; 227 228 /* AMD does not use delay */ 229 r->membw.delay_linear = false; 230 r->membw.arch_needs_linear = false; 231 232 /* 233 * AMD does not use memory delay throttle model to control 234 * the allocation like Intel does. 235 */ 236 r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; 237 r->membw.min_bw = 0; 238 r->membw.bw_gran = 1; 239 /* Max value is 2048, Data width should be 4 in decimal */ 240 r->data_width = 4; 241 242 r->alloc_capable = true; 243 244 return true; 245 } 246 247 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) 248 { 249 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 250 union cpuid_0x10_1_eax eax; 251 union cpuid_0x10_x_edx edx; 252 u32 ebx, ecx; 253 254 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); 255 hw_res->num_closid = edx.split.cos_max + 1; 256 r->cache.cbm_len = eax.split.cbm_len + 1; 257 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; 258 r->cache.shareable_bits = ebx & r->default_ctrl; 259 r->data_width = (r->cache.cbm_len + 3) / 4; 260 r->alloc_capable = true; 261 } 262 263 static void rdt_get_cdp_config(int level) 264 { 265 /* 266 * By default, CDP is disabled. CDP can be enabled by mount parameter 267 * "cdp" during resctrl file system mount time. 268 */ 269 rdt_resources_all[level].cdp_enabled = false; 270 rdt_resources_all[level].r_resctrl.cdp_capable = true; 271 } 272 273 static void rdt_get_cdp_l3_config(void) 274 { 275 rdt_get_cdp_config(RDT_RESOURCE_L3); 276 } 277 278 static void rdt_get_cdp_l2_config(void) 279 { 280 rdt_get_cdp_config(RDT_RESOURCE_L2); 281 } 282 283 static void 284 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) 285 { 286 unsigned int i; 287 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); 288 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 289 290 for (i = m->low; i < m->high; i++) 291 wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); 292 } 293 294 /* 295 * Map the memory b/w percentage value to delay values 296 * that can be written to QOS_MSRs. 297 * There are currently no SKUs which support non linear delay values. 298 */ 299 static u32 delay_bw_map(unsigned long bw, struct rdt_resource *r) 300 { 301 if (r->membw.delay_linear) 302 return MAX_MBA_BW - bw; 303 304 pr_warn_once("Non Linear delay-bw map not supported but queried\n"); 305 return r->default_ctrl; 306 } 307 308 static void 309 mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m, 310 struct rdt_resource *r) 311 { 312 unsigned int i; 313 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); 314 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 315 316 /* Write the delay values for mba. */ 317 for (i = m->low; i < m->high; i++) 318 wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r)); 319 } 320 321 static void 322 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) 323 { 324 unsigned int i; 325 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); 326 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 327 328 for (i = m->low; i < m->high; i++) 329 wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); 330 } 331 332 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) 333 { 334 struct rdt_domain *d; 335 336 list_for_each_entry(d, &r->domains, list) { 337 /* Find the domain that contains this CPU */ 338 if (cpumask_test_cpu(cpu, &d->cpu_mask)) 339 return d; 340 } 341 342 return NULL; 343 } 344 345 u32 resctrl_arch_get_num_closid(struct rdt_resource *r) 346 { 347 return resctrl_to_arch_res(r)->num_closid; 348 } 349 350 void rdt_ctrl_update(void *arg) 351 { 352 struct msr_param *m = arg; 353 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res); 354 struct rdt_resource *r = m->res; 355 int cpu = smp_processor_id(); 356 struct rdt_domain *d; 357 358 d = get_domain_from_cpu(cpu, r); 359 if (d) { 360 hw_res->msr_update(d, m, r); 361 return; 362 } 363 pr_warn_once("cpu %d not found in any domain for resource %s\n", 364 cpu, r->name); 365 } 366 367 /* 368 * rdt_find_domain - Find a domain in a resource that matches input resource id 369 * 370 * Search resource r's domain list to find the resource id. If the resource 371 * id is found in a domain, return the domain. Otherwise, if requested by 372 * caller, return the first domain whose id is bigger than the input id. 373 * The domain list is sorted by id in ascending order. 374 */ 375 struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, 376 struct list_head **pos) 377 { 378 struct rdt_domain *d; 379 struct list_head *l; 380 381 if (id < 0) 382 return ERR_PTR(-ENODEV); 383 384 list_for_each(l, &r->domains) { 385 d = list_entry(l, struct rdt_domain, list); 386 /* When id is found, return its domain. */ 387 if (id == d->id) 388 return d; 389 /* Stop searching when finding id's position in sorted list. */ 390 if (id < d->id) 391 break; 392 } 393 394 if (pos) 395 *pos = l; 396 397 return NULL; 398 } 399 400 static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc) 401 { 402 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 403 int i; 404 405 /* 406 * Initialize the Control MSRs to having no control. 407 * For Cache Allocation: Set all bits in cbm 408 * For Memory Allocation: Set b/w requested to 100% 409 */ 410 for (i = 0; i < hw_res->num_closid; i++, dc++) 411 *dc = r->default_ctrl; 412 } 413 414 static void domain_free(struct rdt_hw_domain *hw_dom) 415 { 416 kfree(hw_dom->arch_mbm_total); 417 kfree(hw_dom->arch_mbm_local); 418 kfree(hw_dom->ctrl_val); 419 kfree(hw_dom); 420 } 421 422 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d) 423 { 424 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 425 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); 426 struct msr_param m; 427 u32 *dc; 428 429 dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val), 430 GFP_KERNEL); 431 if (!dc) 432 return -ENOMEM; 433 434 hw_dom->ctrl_val = dc; 435 setup_default_ctrlval(r, dc); 436 437 m.low = 0; 438 m.high = hw_res->num_closid; 439 hw_res->msr_update(d, &m, r); 440 return 0; 441 } 442 443 /** 444 * arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters 445 * @num_rmid: The size of the MBM counter array 446 * @hw_dom: The domain that owns the allocated arrays 447 */ 448 static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom) 449 { 450 size_t tsize; 451 452 if (is_mbm_total_enabled()) { 453 tsize = sizeof(*hw_dom->arch_mbm_total); 454 hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL); 455 if (!hw_dom->arch_mbm_total) 456 return -ENOMEM; 457 } 458 if (is_mbm_local_enabled()) { 459 tsize = sizeof(*hw_dom->arch_mbm_local); 460 hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL); 461 if (!hw_dom->arch_mbm_local) { 462 kfree(hw_dom->arch_mbm_total); 463 hw_dom->arch_mbm_total = NULL; 464 return -ENOMEM; 465 } 466 } 467 468 return 0; 469 } 470 471 /* 472 * domain_add_cpu - Add a cpu to a resource's domain list. 473 * 474 * If an existing domain in the resource r's domain list matches the cpu's 475 * resource id, add the cpu in the domain. 476 * 477 * Otherwise, a new domain is allocated and inserted into the right position 478 * in the domain list sorted by id in ascending order. 479 * 480 * The order in the domain list is visible to users when we print entries 481 * in the schemata file and schemata input is validated to have the same order 482 * as this list. 483 */ 484 static void domain_add_cpu(int cpu, struct rdt_resource *r) 485 { 486 int id = get_cpu_cacheinfo_id(cpu, r->cache_level); 487 struct list_head *add_pos = NULL; 488 struct rdt_hw_domain *hw_dom; 489 struct rdt_domain *d; 490 int err; 491 492 d = rdt_find_domain(r, id, &add_pos); 493 if (IS_ERR(d)) { 494 pr_warn("Couldn't find cache id for CPU %d\n", cpu); 495 return; 496 } 497 498 if (d) { 499 cpumask_set_cpu(cpu, &d->cpu_mask); 500 if (r->cache.arch_has_per_cpu_cfg) 501 rdt_domain_reconfigure_cdp(r); 502 return; 503 } 504 505 hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu)); 506 if (!hw_dom) 507 return; 508 509 d = &hw_dom->d_resctrl; 510 d->id = id; 511 cpumask_set_cpu(cpu, &d->cpu_mask); 512 513 rdt_domain_reconfigure_cdp(r); 514 515 if (r->alloc_capable && domain_setup_ctrlval(r, d)) { 516 domain_free(hw_dom); 517 return; 518 } 519 520 if (r->mon_capable && arch_domain_mbm_alloc(r->num_rmid, hw_dom)) { 521 domain_free(hw_dom); 522 return; 523 } 524 525 list_add_tail(&d->list, add_pos); 526 527 err = resctrl_online_domain(r, d); 528 if (err) { 529 list_del(&d->list); 530 domain_free(hw_dom); 531 } 532 } 533 534 static void domain_remove_cpu(int cpu, struct rdt_resource *r) 535 { 536 int id = get_cpu_cacheinfo_id(cpu, r->cache_level); 537 struct rdt_hw_domain *hw_dom; 538 struct rdt_domain *d; 539 540 d = rdt_find_domain(r, id, NULL); 541 if (IS_ERR_OR_NULL(d)) { 542 pr_warn("Couldn't find cache id for CPU %d\n", cpu); 543 return; 544 } 545 hw_dom = resctrl_to_arch_dom(d); 546 547 cpumask_clear_cpu(cpu, &d->cpu_mask); 548 if (cpumask_empty(&d->cpu_mask)) { 549 resctrl_offline_domain(r, d); 550 list_del(&d->list); 551 552 /* 553 * rdt_domain "d" is going to be freed below, so clear 554 * its pointer from pseudo_lock_region struct. 555 */ 556 if (d->plr) 557 d->plr->d = NULL; 558 domain_free(hw_dom); 559 560 return; 561 } 562 563 if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) { 564 if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { 565 cancel_delayed_work(&d->mbm_over); 566 mbm_setup_overflow_handler(d, 0); 567 } 568 if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && 569 has_busy_rmid(r, d)) { 570 cancel_delayed_work(&d->cqm_limbo); 571 cqm_setup_limbo_handler(d, 0); 572 } 573 } 574 } 575 576 static void clear_closid_rmid(int cpu) 577 { 578 struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); 579 580 state->default_closid = 0; 581 state->default_rmid = 0; 582 state->cur_closid = 0; 583 state->cur_rmid = 0; 584 wrmsr(IA32_PQR_ASSOC, 0, 0); 585 } 586 587 static int resctrl_online_cpu(unsigned int cpu) 588 { 589 struct rdt_resource *r; 590 591 mutex_lock(&rdtgroup_mutex); 592 for_each_capable_rdt_resource(r) 593 domain_add_cpu(cpu, r); 594 /* The cpu is set in default rdtgroup after online. */ 595 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); 596 clear_closid_rmid(cpu); 597 mutex_unlock(&rdtgroup_mutex); 598 599 return 0; 600 } 601 602 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) 603 { 604 struct rdtgroup *cr; 605 606 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { 607 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) { 608 break; 609 } 610 } 611 } 612 613 static int resctrl_offline_cpu(unsigned int cpu) 614 { 615 struct rdtgroup *rdtgrp; 616 struct rdt_resource *r; 617 618 mutex_lock(&rdtgroup_mutex); 619 for_each_capable_rdt_resource(r) 620 domain_remove_cpu(cpu, r); 621 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { 622 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { 623 clear_childcpus(rdtgrp, cpu); 624 break; 625 } 626 } 627 clear_closid_rmid(cpu); 628 mutex_unlock(&rdtgroup_mutex); 629 630 return 0; 631 } 632 633 /* 634 * Choose a width for the resource name and resource data based on the 635 * resource that has widest name and cbm. 636 */ 637 static __init void rdt_init_padding(void) 638 { 639 struct rdt_resource *r; 640 641 for_each_alloc_capable_rdt_resource(r) { 642 if (r->data_width > max_data_width) 643 max_data_width = r->data_width; 644 } 645 } 646 647 enum { 648 RDT_FLAG_CMT, 649 RDT_FLAG_MBM_TOTAL, 650 RDT_FLAG_MBM_LOCAL, 651 RDT_FLAG_L3_CAT, 652 RDT_FLAG_L3_CDP, 653 RDT_FLAG_L2_CAT, 654 RDT_FLAG_L2_CDP, 655 RDT_FLAG_MBA, 656 }; 657 658 #define RDT_OPT(idx, n, f) \ 659 [idx] = { \ 660 .name = n, \ 661 .flag = f \ 662 } 663 664 struct rdt_options { 665 char *name; 666 int flag; 667 bool force_off, force_on; 668 }; 669 670 static struct rdt_options rdt_options[] __initdata = { 671 RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC), 672 RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL), 673 RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL), 674 RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3), 675 RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3), 676 RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2), 677 RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2), 678 RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA), 679 }; 680 #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options) 681 682 static int __init set_rdt_options(char *str) 683 { 684 struct rdt_options *o; 685 bool force_off; 686 char *tok; 687 688 if (*str == '=') 689 str++; 690 while ((tok = strsep(&str, ",")) != NULL) { 691 force_off = *tok == '!'; 692 if (force_off) 693 tok++; 694 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { 695 if (strcmp(tok, o->name) == 0) { 696 if (force_off) 697 o->force_off = true; 698 else 699 o->force_on = true; 700 break; 701 } 702 } 703 } 704 return 1; 705 } 706 __setup("rdt", set_rdt_options); 707 708 static bool __init rdt_cpu_has(int flag) 709 { 710 bool ret = boot_cpu_has(flag); 711 struct rdt_options *o; 712 713 if (!ret) 714 return ret; 715 716 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { 717 if (flag == o->flag) { 718 if (o->force_off) 719 ret = false; 720 if (o->force_on) 721 ret = true; 722 break; 723 } 724 } 725 return ret; 726 } 727 728 static __init bool get_mem_config(void) 729 { 730 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA]; 731 732 if (!rdt_cpu_has(X86_FEATURE_MBA)) 733 return false; 734 735 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 736 return __get_mem_config_intel(&hw_res->r_resctrl); 737 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 738 return __rdt_get_mem_config_amd(&hw_res->r_resctrl); 739 740 return false; 741 } 742 743 static __init bool get_rdt_alloc_resources(void) 744 { 745 struct rdt_resource *r; 746 bool ret = false; 747 748 if (rdt_alloc_capable) 749 return true; 750 751 if (!boot_cpu_has(X86_FEATURE_RDT_A)) 752 return false; 753 754 if (rdt_cpu_has(X86_FEATURE_CAT_L3)) { 755 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; 756 rdt_get_cache_alloc_cfg(1, r); 757 if (rdt_cpu_has(X86_FEATURE_CDP_L3)) 758 rdt_get_cdp_l3_config(); 759 ret = true; 760 } 761 if (rdt_cpu_has(X86_FEATURE_CAT_L2)) { 762 /* CPUID 0x10.2 fields are same format at 0x10.1 */ 763 r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl; 764 rdt_get_cache_alloc_cfg(2, r); 765 if (rdt_cpu_has(X86_FEATURE_CDP_L2)) 766 rdt_get_cdp_l2_config(); 767 ret = true; 768 } 769 770 if (get_mem_config()) 771 ret = true; 772 773 return ret; 774 } 775 776 static __init bool get_rdt_mon_resources(void) 777 { 778 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; 779 780 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC)) 781 rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID); 782 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) 783 rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID); 784 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) 785 rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID); 786 787 if (!rdt_mon_features) 788 return false; 789 790 return !rdt_get_mon_l3_config(r); 791 } 792 793 static __init void __check_quirks_intel(void) 794 { 795 switch (boot_cpu_data.x86_model) { 796 case INTEL_FAM6_HASWELL_X: 797 if (!rdt_options[RDT_FLAG_L3_CAT].force_off) 798 cache_alloc_hsw_probe(); 799 break; 800 case INTEL_FAM6_SKYLAKE_X: 801 if (boot_cpu_data.x86_stepping <= 4) 802 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); 803 else 804 set_rdt_options("!l3cat"); 805 fallthrough; 806 case INTEL_FAM6_BROADWELL_X: 807 intel_rdt_mbm_apply_quirk(); 808 break; 809 } 810 } 811 812 static __init void check_quirks(void) 813 { 814 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 815 __check_quirks_intel(); 816 } 817 818 static __init bool get_rdt_resources(void) 819 { 820 rdt_alloc_capable = get_rdt_alloc_resources(); 821 rdt_mon_capable = get_rdt_mon_resources(); 822 823 return (rdt_mon_capable || rdt_alloc_capable); 824 } 825 826 static __init void rdt_init_res_defs_intel(void) 827 { 828 struct rdt_hw_resource *hw_res; 829 struct rdt_resource *r; 830 831 for_each_rdt_resource(r) { 832 hw_res = resctrl_to_arch_res(r); 833 834 if (r->rid == RDT_RESOURCE_L3 || 835 r->rid == RDT_RESOURCE_L2) { 836 r->cache.arch_has_sparse_bitmaps = false; 837 r->cache.arch_has_empty_bitmaps = false; 838 r->cache.arch_has_per_cpu_cfg = false; 839 } else if (r->rid == RDT_RESOURCE_MBA) { 840 hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE; 841 hw_res->msr_update = mba_wrmsr_intel; 842 } 843 } 844 } 845 846 static __init void rdt_init_res_defs_amd(void) 847 { 848 struct rdt_hw_resource *hw_res; 849 struct rdt_resource *r; 850 851 for_each_rdt_resource(r) { 852 hw_res = resctrl_to_arch_res(r); 853 854 if (r->rid == RDT_RESOURCE_L3 || 855 r->rid == RDT_RESOURCE_L2) { 856 r->cache.arch_has_sparse_bitmaps = true; 857 r->cache.arch_has_empty_bitmaps = true; 858 r->cache.arch_has_per_cpu_cfg = true; 859 } else if (r->rid == RDT_RESOURCE_MBA) { 860 hw_res->msr_base = MSR_IA32_MBA_BW_BASE; 861 hw_res->msr_update = mba_wrmsr_amd; 862 } 863 } 864 } 865 866 static __init void rdt_init_res_defs(void) 867 { 868 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 869 rdt_init_res_defs_intel(); 870 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 871 rdt_init_res_defs_amd(); 872 } 873 874 static enum cpuhp_state rdt_online; 875 876 /* Runs once on the BSP during boot. */ 877 void resctrl_cpu_detect(struct cpuinfo_x86 *c) 878 { 879 if (!cpu_has(c, X86_FEATURE_CQM_LLC)) { 880 c->x86_cache_max_rmid = -1; 881 c->x86_cache_occ_scale = -1; 882 c->x86_cache_mbm_width_offset = -1; 883 return; 884 } 885 886 /* will be overridden if occupancy monitoring exists */ 887 c->x86_cache_max_rmid = cpuid_ebx(0xf); 888 889 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) || 890 cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) || 891 cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) { 892 u32 eax, ebx, ecx, edx; 893 894 /* QoS sub-leaf, EAX=0Fh, ECX=1 */ 895 cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx); 896 897 c->x86_cache_max_rmid = ecx; 898 c->x86_cache_occ_scale = ebx; 899 c->x86_cache_mbm_width_offset = eax & 0xff; 900 901 if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) 902 c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; 903 } 904 } 905 906 static int __init resctrl_late_init(void) 907 { 908 struct rdt_resource *r; 909 int state, ret; 910 911 /* 912 * Initialize functions(or definitions) that are different 913 * between vendors here. 914 */ 915 rdt_init_res_defs(); 916 917 check_quirks(); 918 919 if (!get_rdt_resources()) 920 return -ENODEV; 921 922 rdt_init_padding(); 923 924 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 925 "x86/resctrl/cat:online:", 926 resctrl_online_cpu, resctrl_offline_cpu); 927 if (state < 0) 928 return state; 929 930 ret = rdtgroup_init(); 931 if (ret) { 932 cpuhp_remove_state(state); 933 return ret; 934 } 935 rdt_online = state; 936 937 for_each_alloc_capable_rdt_resource(r) 938 pr_info("%s allocation detected\n", r->name); 939 940 for_each_mon_capable_rdt_resource(r) 941 pr_info("%s monitoring detected\n", r->name); 942 943 return 0; 944 } 945 946 late_initcall(resctrl_late_init); 947 948 static void __exit resctrl_exit(void) 949 { 950 cpuhp_remove_state(rdt_online); 951 rdtgroup_exit(); 952 } 953 954 __exitcall(resctrl_exit); 955