1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Resource Director Technology(RDT) 4 * - Monitoring code 5 * 6 * Copyright (C) 2017 Intel Corporation 7 * 8 * Author: 9 * Vikas Shivappa <vikas.shivappa@intel.com> 10 * 11 * This replaces the cqm.c based on perf but we reuse a lot of 12 * code and datastructures originally from Peter Zijlstra and Matt Fleming. 13 * 14 * More information about RDT be found in the Intel (R) x86 Architecture 15 * Software Developer Manual June 2016, volume 3, section 17.17. 16 */ 17 18 #include <linux/module.h> 19 #include <linux/sizes.h> 20 #include <linux/slab.h> 21 22 #include <asm/cpu_device_id.h> 23 #include <asm/resctrl.h> 24 25 #include "internal.h" 26 27 struct rmid_entry { 28 u32 rmid; 29 int busy; 30 struct list_head list; 31 }; 32 33 /** 34 * @rmid_free_lru A least recently used list of free RMIDs 35 * These RMIDs are guaranteed to have an occupancy less than the 36 * threshold occupancy 37 */ 38 static LIST_HEAD(rmid_free_lru); 39 40 /** 41 * @rmid_limbo_count count of currently unused but (potentially) 42 * dirty RMIDs. 43 * This counts RMIDs that no one is currently using but that 44 * may have a occupancy value > resctrl_rmid_realloc_threshold. User can 45 * change the threshold occupancy value. 46 */ 47 static unsigned int rmid_limbo_count; 48 49 /** 50 * @rmid_entry - The entry in the limbo and free lists. 51 */ 52 static struct rmid_entry *rmid_ptrs; 53 54 /* 55 * Global boolean for rdt_monitor which is true if any 56 * resource monitoring is enabled. 57 */ 58 bool rdt_mon_capable; 59 60 /* 61 * Global to indicate which monitoring events are enabled. 62 */ 63 unsigned int rdt_mon_features; 64 65 /* 66 * This is the threshold cache occupancy in bytes at which we will consider an 67 * RMID available for re-allocation. 68 */ 69 unsigned int resctrl_rmid_realloc_threshold; 70 71 /* 72 * This is the maximum value for the reallocation threshold, in bytes. 73 */ 74 unsigned int resctrl_rmid_realloc_limit; 75 76 #define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5)) 77 78 /* 79 * The correction factor table is documented in Documentation/x86/resctrl.rst. 80 * If rmid > rmid threshold, MBM total and local values should be multiplied 81 * by the correction factor. 82 * 83 * The original table is modified for better code: 84 * 85 * 1. The threshold 0 is changed to rmid count - 1 so don't do correction 86 * for the case. 87 * 2. MBM total and local correction table indexed by core counter which is 88 * equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27. 89 * 3. The correction factor is normalized to 2^20 (1048576) so it's faster 90 * to calculate corrected value by shifting: 91 * corrected_value = (original_value * correction_factor) >> 20 92 */ 93 static const struct mbm_correction_factor_table { 94 u32 rmidthreshold; 95 u64 cf; 96 } mbm_cf_table[] __initconst = { 97 {7, CF(1.000000)}, 98 {15, CF(1.000000)}, 99 {15, CF(0.969650)}, 100 {31, CF(1.000000)}, 101 {31, CF(1.066667)}, 102 {31, CF(0.969650)}, 103 {47, CF(1.142857)}, 104 {63, CF(1.000000)}, 105 {63, CF(1.185115)}, 106 {63, CF(1.066553)}, 107 {79, CF(1.454545)}, 108 {95, CF(1.000000)}, 109 {95, CF(1.230769)}, 110 {95, CF(1.142857)}, 111 {95, CF(1.066667)}, 112 {127, CF(1.000000)}, 113 {127, CF(1.254863)}, 114 {127, CF(1.185255)}, 115 {151, CF(1.000000)}, 116 {127, CF(1.066667)}, 117 {167, CF(1.000000)}, 118 {159, CF(1.454334)}, 119 {183, CF(1.000000)}, 120 {127, CF(0.969744)}, 121 {191, CF(1.280246)}, 122 {191, CF(1.230921)}, 123 {215, CF(1.000000)}, 124 {191, CF(1.143118)}, 125 }; 126 127 static u32 mbm_cf_rmidthreshold __read_mostly = UINT_MAX; 128 static u64 mbm_cf __read_mostly; 129 130 static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) 131 { 132 /* Correct MBM value. */ 133 if (rmid > mbm_cf_rmidthreshold) 134 val = (val * mbm_cf) >> 20; 135 136 return val; 137 } 138 139 static inline struct rmid_entry *__rmid_entry(u32 rmid) 140 { 141 struct rmid_entry *entry; 142 143 entry = &rmid_ptrs[rmid]; 144 WARN_ON(entry->rmid != rmid); 145 146 return entry; 147 } 148 149 static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, 150 u32 rmid, 151 enum resctrl_event_id eventid) 152 { 153 switch (eventid) { 154 case QOS_L3_OCCUP_EVENT_ID: 155 return NULL; 156 case QOS_L3_MBM_TOTAL_EVENT_ID: 157 return &hw_dom->arch_mbm_total[rmid]; 158 case QOS_L3_MBM_LOCAL_EVENT_ID: 159 return &hw_dom->arch_mbm_local[rmid]; 160 } 161 162 /* Never expect to get here */ 163 WARN_ON_ONCE(1); 164 165 return NULL; 166 } 167 168 void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, 169 u32 rmid, enum resctrl_event_id eventid) 170 { 171 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); 172 struct arch_mbm_state *am; 173 174 am = get_arch_mbm_state(hw_dom, rmid, eventid); 175 if (am) 176 memset(am, 0, sizeof(*am)); 177 } 178 179 static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) 180 { 181 u64 shift = 64 - width, chunks; 182 183 chunks = (cur_msr << shift) - (prev_msr << shift); 184 return chunks >> shift; 185 } 186 187 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, 188 u32 rmid, enum resctrl_event_id eventid, u64 *val) 189 { 190 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 191 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); 192 struct arch_mbm_state *am; 193 u64 msr_val, chunks; 194 195 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) 196 return -EINVAL; 197 198 /* 199 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured 200 * with a valid event code for supported resource type and the bits 201 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID, 202 * IA32_QM_CTR.data (bits 61:0) reports the monitored data. 203 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62) 204 * are error bits. 205 */ 206 wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid); 207 rdmsrl(MSR_IA32_QM_CTR, msr_val); 208 209 if (msr_val & RMID_VAL_ERROR) 210 return -EIO; 211 if (msr_val & RMID_VAL_UNAVAIL) 212 return -EINVAL; 213 214 am = get_arch_mbm_state(hw_dom, rmid, eventid); 215 if (am) { 216 am->chunks += mbm_overflow_count(am->prev_msr, msr_val, 217 hw_res->mbm_width); 218 chunks = get_corrected_mbm_count(rmid, am->chunks); 219 am->prev_msr = msr_val; 220 } else { 221 chunks = msr_val; 222 } 223 224 *val = chunks * hw_res->mon_scale; 225 226 return 0; 227 } 228 229 /* 230 * Check the RMIDs that are marked as busy for this domain. If the 231 * reported LLC occupancy is below the threshold clear the busy bit and 232 * decrement the count. If the busy count gets to zero on an RMID, we 233 * free the RMID 234 */ 235 void __check_limbo(struct rdt_domain *d, bool force_free) 236 { 237 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; 238 struct rmid_entry *entry; 239 u32 crmid = 1, nrmid; 240 bool rmid_dirty; 241 u64 val = 0; 242 243 /* 244 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that 245 * are marked as busy for occupancy < threshold. If the occupancy 246 * is less than the threshold decrement the busy counter of the 247 * RMID and move it to the free list when the counter reaches 0. 248 */ 249 for (;;) { 250 nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid); 251 if (nrmid >= r->num_rmid) 252 break; 253 254 entry = __rmid_entry(nrmid); 255 256 if (resctrl_arch_rmid_read(r, d, entry->rmid, 257 QOS_L3_OCCUP_EVENT_ID, &val)) { 258 rmid_dirty = true; 259 } else { 260 rmid_dirty = (val >= resctrl_rmid_realloc_threshold); 261 } 262 263 if (force_free || !rmid_dirty) { 264 clear_bit(entry->rmid, d->rmid_busy_llc); 265 if (!--entry->busy) { 266 rmid_limbo_count--; 267 list_add_tail(&entry->list, &rmid_free_lru); 268 } 269 } 270 crmid = nrmid + 1; 271 } 272 } 273 274 bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d) 275 { 276 return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid; 277 } 278 279 /* 280 * As of now the RMIDs allocation is global. 281 * However we keep track of which packages the RMIDs 282 * are used to optimize the limbo list management. 283 */ 284 int alloc_rmid(void) 285 { 286 struct rmid_entry *entry; 287 288 lockdep_assert_held(&rdtgroup_mutex); 289 290 if (list_empty(&rmid_free_lru)) 291 return rmid_limbo_count ? -EBUSY : -ENOSPC; 292 293 entry = list_first_entry(&rmid_free_lru, 294 struct rmid_entry, list); 295 list_del(&entry->list); 296 297 return entry->rmid; 298 } 299 300 static void add_rmid_to_limbo(struct rmid_entry *entry) 301 { 302 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; 303 struct rdt_domain *d; 304 int cpu, err; 305 u64 val = 0; 306 307 entry->busy = 0; 308 cpu = get_cpu(); 309 list_for_each_entry(d, &r->domains, list) { 310 if (cpumask_test_cpu(cpu, &d->cpu_mask)) { 311 err = resctrl_arch_rmid_read(r, d, entry->rmid, 312 QOS_L3_OCCUP_EVENT_ID, 313 &val); 314 if (err || val <= resctrl_rmid_realloc_threshold) 315 continue; 316 } 317 318 /* 319 * For the first limbo RMID in the domain, 320 * setup up the limbo worker. 321 */ 322 if (!has_busy_rmid(r, d)) 323 cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL); 324 set_bit(entry->rmid, d->rmid_busy_llc); 325 entry->busy++; 326 } 327 put_cpu(); 328 329 if (entry->busy) 330 rmid_limbo_count++; 331 else 332 list_add_tail(&entry->list, &rmid_free_lru); 333 } 334 335 void free_rmid(u32 rmid) 336 { 337 struct rmid_entry *entry; 338 339 if (!rmid) 340 return; 341 342 lockdep_assert_held(&rdtgroup_mutex); 343 344 entry = __rmid_entry(rmid); 345 346 if (is_llc_occupancy_enabled()) 347 add_rmid_to_limbo(entry); 348 else 349 list_add_tail(&entry->list, &rmid_free_lru); 350 } 351 352 static int __mon_event_count(u32 rmid, struct rmid_read *rr) 353 { 354 struct mbm_state *m; 355 u64 tval = 0; 356 357 if (rr->first) 358 resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid); 359 360 rr->err = resctrl_arch_rmid_read(rr->r, rr->d, rmid, rr->evtid, &tval); 361 if (rr->err) 362 return rr->err; 363 364 switch (rr->evtid) { 365 case QOS_L3_OCCUP_EVENT_ID: 366 rr->val += tval; 367 return 0; 368 case QOS_L3_MBM_TOTAL_EVENT_ID: 369 m = &rr->d->mbm_total[rmid]; 370 break; 371 case QOS_L3_MBM_LOCAL_EVENT_ID: 372 m = &rr->d->mbm_local[rmid]; 373 break; 374 default: 375 /* 376 * Code would never reach here because an invalid 377 * event id would fail in resctrl_arch_rmid_read(). 378 */ 379 return -EINVAL; 380 } 381 382 if (rr->first) { 383 memset(m, 0, sizeof(struct mbm_state)); 384 return 0; 385 } 386 387 rr->val += tval; 388 389 return 0; 390 } 391 392 /* 393 * mbm_bw_count() - Update bw count from values previously read by 394 * __mon_event_count(). 395 * @rmid: The rmid used to identify the cached mbm_state. 396 * @rr: The struct rmid_read populated by __mon_event_count(). 397 * 398 * Supporting function to calculate the memory bandwidth 399 * and delta bandwidth in MBps. The chunks value previously read by 400 * __mon_event_count() is compared with the chunks value from the previous 401 * invocation. This must be called once per second to maintain values in MBps. 402 */ 403 static void mbm_bw_count(u32 rmid, struct rmid_read *rr) 404 { 405 struct mbm_state *m = &rr->d->mbm_local[rmid]; 406 u64 cur_bw, bytes, cur_bytes; 407 408 cur_bytes = rr->val; 409 bytes = cur_bytes - m->prev_bw_bytes; 410 m->prev_bw_bytes = cur_bytes; 411 412 cur_bw = bytes / SZ_1M; 413 414 if (m->delta_comp) 415 m->delta_bw = abs(cur_bw - m->prev_bw); 416 m->delta_comp = false; 417 m->prev_bw = cur_bw; 418 } 419 420 /* 421 * This is called via IPI to read the CQM/MBM counters 422 * on a domain. 423 */ 424 void mon_event_count(void *info) 425 { 426 struct rdtgroup *rdtgrp, *entry; 427 struct rmid_read *rr = info; 428 struct list_head *head; 429 int ret; 430 431 rdtgrp = rr->rgrp; 432 433 ret = __mon_event_count(rdtgrp->mon.rmid, rr); 434 435 /* 436 * For Ctrl groups read data from child monitor groups and 437 * add them together. Count events which are read successfully. 438 * Discard the rmid_read's reporting errors. 439 */ 440 head = &rdtgrp->mon.crdtgrp_list; 441 442 if (rdtgrp->type == RDTCTRL_GROUP) { 443 list_for_each_entry(entry, head, mon.crdtgrp_list) { 444 if (__mon_event_count(entry->mon.rmid, rr) == 0) 445 ret = 0; 446 } 447 } 448 449 /* 450 * __mon_event_count() calls for newly created monitor groups may 451 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. 452 * Discard error if any of the monitor event reads succeeded. 453 */ 454 if (ret == 0) 455 rr->err = 0; 456 } 457 458 /* 459 * Feedback loop for MBA software controller (mba_sc) 460 * 461 * mba_sc is a feedback loop where we periodically read MBM counters and 462 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so 463 * that: 464 * 465 * current bandwidth(cur_bw) < user specified bandwidth(user_bw) 466 * 467 * This uses the MBM counters to measure the bandwidth and MBA throttle 468 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the 469 * fact that resctrl rdtgroups have both monitoring and control. 470 * 471 * The frequency of the checks is 1s and we just tag along the MBM overflow 472 * timer. Having 1s interval makes the calculation of bandwidth simpler. 473 * 474 * Although MBA's goal is to restrict the bandwidth to a maximum, there may 475 * be a need to increase the bandwidth to avoid unnecessarily restricting 476 * the L2 <-> L3 traffic. 477 * 478 * Since MBA controls the L2 external bandwidth where as MBM measures the 479 * L3 external bandwidth the following sequence could lead to such a 480 * situation. 481 * 482 * Consider an rdtgroup which had high L3 <-> memory traffic in initial 483 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but 484 * after some time rdtgroup has mostly L2 <-> L3 traffic. 485 * 486 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its 487 * throttle MSRs already have low percentage values. To avoid 488 * unnecessarily restricting such rdtgroups, we also increase the bandwidth. 489 */ 490 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) 491 { 492 u32 closid, rmid, cur_msr_val, new_msr_val; 493 struct mbm_state *pmbm_data, *cmbm_data; 494 u32 cur_bw, delta_bw, user_bw; 495 struct rdt_resource *r_mba; 496 struct rdt_domain *dom_mba; 497 struct list_head *head; 498 struct rdtgroup *entry; 499 500 if (!is_mbm_local_enabled()) 501 return; 502 503 r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; 504 505 closid = rgrp->closid; 506 rmid = rgrp->mon.rmid; 507 pmbm_data = &dom_mbm->mbm_local[rmid]; 508 509 dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba); 510 if (!dom_mba) { 511 pr_warn_once("Failure to get domain for MBA update\n"); 512 return; 513 } 514 515 cur_bw = pmbm_data->prev_bw; 516 user_bw = dom_mba->mbps_val[closid]; 517 delta_bw = pmbm_data->delta_bw; 518 519 /* MBA resource doesn't support CDP */ 520 cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); 521 522 /* 523 * For Ctrl groups read data from child monitor groups. 524 */ 525 head = &rgrp->mon.crdtgrp_list; 526 list_for_each_entry(entry, head, mon.crdtgrp_list) { 527 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; 528 cur_bw += cmbm_data->prev_bw; 529 delta_bw += cmbm_data->delta_bw; 530 } 531 532 /* 533 * Scale up/down the bandwidth linearly for the ctrl group. The 534 * bandwidth step is the bandwidth granularity specified by the 535 * hardware. 536 * 537 * The delta_bw is used when increasing the bandwidth so that we 538 * dont alternately increase and decrease the control values 539 * continuously. 540 * 541 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if 542 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep 543 * switching between 90 and 110 continuously if we only check 544 * cur_bw < user_bw. 545 */ 546 if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { 547 new_msr_val = cur_msr_val - r_mba->membw.bw_gran; 548 } else if (cur_msr_val < MAX_MBA_BW && 549 (user_bw > (cur_bw + delta_bw))) { 550 new_msr_val = cur_msr_val + r_mba->membw.bw_gran; 551 } else { 552 return; 553 } 554 555 resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); 556 557 /* 558 * Delta values are updated dynamically package wise for each 559 * rdtgrp every time the throttle MSR changes value. 560 * 561 * This is because (1)the increase in bandwidth is not perfectly 562 * linear and only "approximately" linear even when the hardware 563 * says it is linear.(2)Also since MBA is a core specific 564 * mechanism, the delta values vary based on number of cores used 565 * by the rdtgrp. 566 */ 567 pmbm_data->delta_comp = true; 568 list_for_each_entry(entry, head, mon.crdtgrp_list) { 569 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; 570 cmbm_data->delta_comp = true; 571 } 572 } 573 574 static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) 575 { 576 struct rmid_read rr; 577 578 rr.first = false; 579 rr.r = r; 580 rr.d = d; 581 582 /* 583 * This is protected from concurrent reads from user 584 * as both the user and we hold the global mutex. 585 */ 586 if (is_mbm_total_enabled()) { 587 rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; 588 rr.val = 0; 589 __mon_event_count(rmid, &rr); 590 } 591 if (is_mbm_local_enabled()) { 592 rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; 593 rr.val = 0; 594 __mon_event_count(rmid, &rr); 595 596 /* 597 * Call the MBA software controller only for the 598 * control groups and when user has enabled 599 * the software controller explicitly. 600 */ 601 if (is_mba_sc(NULL)) 602 mbm_bw_count(rmid, &rr); 603 } 604 } 605 606 /* 607 * Handler to scan the limbo list and move the RMIDs 608 * to free list whose occupancy < threshold_occupancy. 609 */ 610 void cqm_handle_limbo(struct work_struct *work) 611 { 612 unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); 613 int cpu = smp_processor_id(); 614 struct rdt_resource *r; 615 struct rdt_domain *d; 616 617 mutex_lock(&rdtgroup_mutex); 618 619 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; 620 d = container_of(work, struct rdt_domain, cqm_limbo.work); 621 622 __check_limbo(d, false); 623 624 if (has_busy_rmid(r, d)) 625 schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); 626 627 mutex_unlock(&rdtgroup_mutex); 628 } 629 630 void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) 631 { 632 unsigned long delay = msecs_to_jiffies(delay_ms); 633 int cpu; 634 635 cpu = cpumask_any(&dom->cpu_mask); 636 dom->cqm_work_cpu = cpu; 637 638 schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); 639 } 640 641 void mbm_handle_overflow(struct work_struct *work) 642 { 643 unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); 644 struct rdtgroup *prgrp, *crgrp; 645 int cpu = smp_processor_id(); 646 struct list_head *head; 647 struct rdt_resource *r; 648 struct rdt_domain *d; 649 650 mutex_lock(&rdtgroup_mutex); 651 652 if (!static_branch_likely(&rdt_mon_enable_key)) 653 goto out_unlock; 654 655 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; 656 d = container_of(work, struct rdt_domain, mbm_over.work); 657 658 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 659 mbm_update(r, d, prgrp->mon.rmid); 660 661 head = &prgrp->mon.crdtgrp_list; 662 list_for_each_entry(crgrp, head, mon.crdtgrp_list) 663 mbm_update(r, d, crgrp->mon.rmid); 664 665 if (is_mba_sc(NULL)) 666 update_mba_bw(prgrp, d); 667 } 668 669 schedule_delayed_work_on(cpu, &d->mbm_over, delay); 670 671 out_unlock: 672 mutex_unlock(&rdtgroup_mutex); 673 } 674 675 void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) 676 { 677 unsigned long delay = msecs_to_jiffies(delay_ms); 678 int cpu; 679 680 if (!static_branch_likely(&rdt_mon_enable_key)) 681 return; 682 cpu = cpumask_any(&dom->cpu_mask); 683 dom->mbm_work_cpu = cpu; 684 schedule_delayed_work_on(cpu, &dom->mbm_over, delay); 685 } 686 687 static int dom_data_init(struct rdt_resource *r) 688 { 689 struct rmid_entry *entry = NULL; 690 int i, nr_rmids; 691 692 nr_rmids = r->num_rmid; 693 rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL); 694 if (!rmid_ptrs) 695 return -ENOMEM; 696 697 for (i = 0; i < nr_rmids; i++) { 698 entry = &rmid_ptrs[i]; 699 INIT_LIST_HEAD(&entry->list); 700 701 entry->rmid = i; 702 list_add_tail(&entry->list, &rmid_free_lru); 703 } 704 705 /* 706 * RMID 0 is special and is always allocated. It's used for all 707 * tasks that are not monitored. 708 */ 709 entry = __rmid_entry(0); 710 list_del(&entry->list); 711 712 return 0; 713 } 714 715 static struct mon_evt llc_occupancy_event = { 716 .name = "llc_occupancy", 717 .evtid = QOS_L3_OCCUP_EVENT_ID, 718 }; 719 720 static struct mon_evt mbm_total_event = { 721 .name = "mbm_total_bytes", 722 .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, 723 }; 724 725 static struct mon_evt mbm_local_event = { 726 .name = "mbm_local_bytes", 727 .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, 728 }; 729 730 /* 731 * Initialize the event list for the resource. 732 * 733 * Note that MBM events are also part of RDT_RESOURCE_L3 resource 734 * because as per the SDM the total and local memory bandwidth 735 * are enumerated as part of L3 monitoring. 736 */ 737 static void l3_mon_evt_init(struct rdt_resource *r) 738 { 739 INIT_LIST_HEAD(&r->evt_list); 740 741 if (is_llc_occupancy_enabled()) 742 list_add_tail(&llc_occupancy_event.list, &r->evt_list); 743 if (is_mbm_total_enabled()) 744 list_add_tail(&mbm_total_event.list, &r->evt_list); 745 if (is_mbm_local_enabled()) 746 list_add_tail(&mbm_local_event.list, &r->evt_list); 747 } 748 749 int rdt_get_mon_l3_config(struct rdt_resource *r) 750 { 751 unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset; 752 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); 753 unsigned int threshold; 754 int ret; 755 756 resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024; 757 hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale; 758 r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1; 759 hw_res->mbm_width = MBM_CNTR_WIDTH_BASE; 760 761 if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX) 762 hw_res->mbm_width += mbm_offset; 763 else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX) 764 pr_warn("Ignoring impossible MBM counter offset\n"); 765 766 /* 767 * A reasonable upper limit on the max threshold is the number 768 * of lines tagged per RMID if all RMIDs have the same number of 769 * lines tagged in the LLC. 770 * 771 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. 772 */ 773 threshold = resctrl_rmid_realloc_limit / r->num_rmid; 774 775 /* 776 * Because num_rmid may not be a power of two, round the value 777 * to the nearest multiple of hw_res->mon_scale so it matches a 778 * value the hardware will measure. mon_scale may not be a power of 2. 779 */ 780 resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold); 781 782 ret = dom_data_init(r); 783 if (ret) 784 return ret; 785 786 l3_mon_evt_init(r); 787 788 r->mon_capable = true; 789 790 return 0; 791 } 792 793 void __init intel_rdt_mbm_apply_quirk(void) 794 { 795 int cf_index; 796 797 cf_index = (boot_cpu_data.x86_cache_max_rmid + 1) / 8 - 1; 798 if (cf_index >= ARRAY_SIZE(mbm_cf_table)) { 799 pr_info("No MBM correction factor available\n"); 800 return; 801 } 802 803 mbm_cf_rmidthreshold = mbm_cf_table[cf_index].rmidthreshold; 804 mbm_cf = mbm_cf_table[cf_index].cf; 805 } 806