1 /* 2 * Resource Director Technology(RDT) 3 * - Monitoring code 4 * 5 * Copyright (C) 2017 Intel Corporation 6 * 7 * Author: 8 * Vikas Shivappa <vikas.shivappa@intel.com> 9 * 10 * This replaces the cqm.c based on perf but we reuse a lot of 11 * code and datastructures originally from Peter Zijlstra and Matt Fleming. 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms and conditions of the GNU General Public License, 15 * version 2, as published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 20 * more details. 21 * 22 * More information about RDT be found in the Intel (R) x86 Architecture 23 * Software Developer Manual June 2016, volume 3, section 17.17. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/slab.h> 28 #include <asm/cpu_device_id.h> 29 #include "internal.h" 30 31 struct rmid_entry { 32 u32 rmid; 33 int busy; 34 struct list_head list; 35 }; 36 37 /** 38 * @rmid_free_lru A least recently used list of free RMIDs 39 * These RMIDs are guaranteed to have an occupancy less than the 40 * threshold occupancy 41 */ 42 static LIST_HEAD(rmid_free_lru); 43 44 /** 45 * @rmid_limbo_count count of currently unused but (potentially) 46 * dirty RMIDs. 47 * This counts RMIDs that no one is currently using but that 48 * may have a occupancy value > intel_cqm_threshold. User can change 49 * the threshold occupancy value. 50 */ 51 static unsigned int rmid_limbo_count; 52 53 /** 54 * @rmid_entry - The entry in the limbo and free lists. 55 */ 56 static struct rmid_entry *rmid_ptrs; 57 58 /* 59 * Global boolean for rdt_monitor which is true if any 60 * resource monitoring is enabled. 61 */ 62 bool rdt_mon_capable; 63 64 /* 65 * Global to indicate which monitoring events are enabled. 66 */ 67 unsigned int rdt_mon_features; 68 69 /* 70 * This is the threshold cache occupancy at which we will consider an 71 * RMID available for re-allocation. 72 */ 73 unsigned int resctrl_cqm_threshold; 74 75 static inline struct rmid_entry *__rmid_entry(u32 rmid) 76 { 77 struct rmid_entry *entry; 78 79 entry = &rmid_ptrs[rmid]; 80 WARN_ON(entry->rmid != rmid); 81 82 return entry; 83 } 84 85 static u64 __rmid_read(u32 rmid, u32 eventid) 86 { 87 u64 val; 88 89 /* 90 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured 91 * with a valid event code for supported resource type and the bits 92 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID, 93 * IA32_QM_CTR.data (bits 61:0) reports the monitored data. 94 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62) 95 * are error bits. 96 */ 97 wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid); 98 rdmsrl(MSR_IA32_QM_CTR, val); 99 100 return val; 101 } 102 103 static bool rmid_dirty(struct rmid_entry *entry) 104 { 105 u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); 106 107 return val >= resctrl_cqm_threshold; 108 } 109 110 /* 111 * Check the RMIDs that are marked as busy for this domain. If the 112 * reported LLC occupancy is below the threshold clear the busy bit and 113 * decrement the count. If the busy count gets to zero on an RMID, we 114 * free the RMID 115 */ 116 void __check_limbo(struct rdt_domain *d, bool force_free) 117 { 118 struct rmid_entry *entry; 119 struct rdt_resource *r; 120 u32 crmid = 1, nrmid; 121 122 r = &rdt_resources_all[RDT_RESOURCE_L3]; 123 124 /* 125 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that 126 * are marked as busy for occupancy < threshold. If the occupancy 127 * is less than the threshold decrement the busy counter of the 128 * RMID and move it to the free list when the counter reaches 0. 129 */ 130 for (;;) { 131 nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid); 132 if (nrmid >= r->num_rmid) 133 break; 134 135 entry = __rmid_entry(nrmid); 136 if (force_free || !rmid_dirty(entry)) { 137 clear_bit(entry->rmid, d->rmid_busy_llc); 138 if (!--entry->busy) { 139 rmid_limbo_count--; 140 list_add_tail(&entry->list, &rmid_free_lru); 141 } 142 } 143 crmid = nrmid + 1; 144 } 145 } 146 147 bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d) 148 { 149 return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid; 150 } 151 152 /* 153 * As of now the RMIDs allocation is global. 154 * However we keep track of which packages the RMIDs 155 * are used to optimize the limbo list management. 156 */ 157 int alloc_rmid(void) 158 { 159 struct rmid_entry *entry; 160 161 lockdep_assert_held(&rdtgroup_mutex); 162 163 if (list_empty(&rmid_free_lru)) 164 return rmid_limbo_count ? -EBUSY : -ENOSPC; 165 166 entry = list_first_entry(&rmid_free_lru, 167 struct rmid_entry, list); 168 list_del(&entry->list); 169 170 return entry->rmid; 171 } 172 173 static void add_rmid_to_limbo(struct rmid_entry *entry) 174 { 175 struct rdt_resource *r; 176 struct rdt_domain *d; 177 int cpu; 178 u64 val; 179 180 r = &rdt_resources_all[RDT_RESOURCE_L3]; 181 182 entry->busy = 0; 183 cpu = get_cpu(); 184 list_for_each_entry(d, &r->domains, list) { 185 if (cpumask_test_cpu(cpu, &d->cpu_mask)) { 186 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID); 187 if (val <= resctrl_cqm_threshold) 188 continue; 189 } 190 191 /* 192 * For the first limbo RMID in the domain, 193 * setup up the limbo worker. 194 */ 195 if (!has_busy_rmid(r, d)) 196 cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL); 197 set_bit(entry->rmid, d->rmid_busy_llc); 198 entry->busy++; 199 } 200 put_cpu(); 201 202 if (entry->busy) 203 rmid_limbo_count++; 204 else 205 list_add_tail(&entry->list, &rmid_free_lru); 206 } 207 208 void free_rmid(u32 rmid) 209 { 210 struct rmid_entry *entry; 211 212 if (!rmid) 213 return; 214 215 lockdep_assert_held(&rdtgroup_mutex); 216 217 entry = __rmid_entry(rmid); 218 219 if (is_llc_occupancy_enabled()) 220 add_rmid_to_limbo(entry); 221 else 222 list_add_tail(&entry->list, &rmid_free_lru); 223 } 224 225 static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr) 226 { 227 u64 shift = 64 - MBM_CNTR_WIDTH, chunks; 228 229 chunks = (cur_msr << shift) - (prev_msr << shift); 230 return chunks >>= shift; 231 } 232 233 static int __mon_event_count(u32 rmid, struct rmid_read *rr) 234 { 235 struct mbm_state *m; 236 u64 chunks, tval; 237 238 tval = __rmid_read(rmid, rr->evtid); 239 if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) { 240 rr->val = tval; 241 return -EINVAL; 242 } 243 switch (rr->evtid) { 244 case QOS_L3_OCCUP_EVENT_ID: 245 rr->val += tval; 246 return 0; 247 case QOS_L3_MBM_TOTAL_EVENT_ID: 248 m = &rr->d->mbm_total[rmid]; 249 break; 250 case QOS_L3_MBM_LOCAL_EVENT_ID: 251 m = &rr->d->mbm_local[rmid]; 252 break; 253 default: 254 /* 255 * Code would never reach here because 256 * an invalid event id would fail the __rmid_read. 257 */ 258 return -EINVAL; 259 } 260 261 if (rr->first) { 262 memset(m, 0, sizeof(struct mbm_state)); 263 m->prev_bw_msr = m->prev_msr = tval; 264 return 0; 265 } 266 267 chunks = mbm_overflow_count(m->prev_msr, tval); 268 m->chunks += chunks; 269 m->prev_msr = tval; 270 271 rr->val += m->chunks; 272 return 0; 273 } 274 275 /* 276 * Supporting function to calculate the memory bandwidth 277 * and delta bandwidth in MBps. 278 */ 279 static void mbm_bw_count(u32 rmid, struct rmid_read *rr) 280 { 281 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; 282 struct mbm_state *m = &rr->d->mbm_local[rmid]; 283 u64 tval, cur_bw, chunks; 284 285 tval = __rmid_read(rmid, rr->evtid); 286 if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) 287 return; 288 289 chunks = mbm_overflow_count(m->prev_bw_msr, tval); 290 m->chunks_bw += chunks; 291 m->chunks = m->chunks_bw; 292 cur_bw = (chunks * r->mon_scale) >> 20; 293 294 if (m->delta_comp) 295 m->delta_bw = abs(cur_bw - m->prev_bw); 296 m->delta_comp = false; 297 m->prev_bw = cur_bw; 298 m->prev_bw_msr = tval; 299 } 300 301 /* 302 * This is called via IPI to read the CQM/MBM counters 303 * on a domain. 304 */ 305 void mon_event_count(void *info) 306 { 307 struct rdtgroup *rdtgrp, *entry; 308 struct rmid_read *rr = info; 309 struct list_head *head; 310 311 rdtgrp = rr->rgrp; 312 313 if (__mon_event_count(rdtgrp->mon.rmid, rr)) 314 return; 315 316 /* 317 * For Ctrl groups read data from child monitor groups. 318 */ 319 head = &rdtgrp->mon.crdtgrp_list; 320 321 if (rdtgrp->type == RDTCTRL_GROUP) { 322 list_for_each_entry(entry, head, mon.crdtgrp_list) { 323 if (__mon_event_count(entry->mon.rmid, rr)) 324 return; 325 } 326 } 327 } 328 329 /* 330 * Feedback loop for MBA software controller (mba_sc) 331 * 332 * mba_sc is a feedback loop where we periodically read MBM counters and 333 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so 334 * that: 335 * 336 * current bandwdith(cur_bw) < user specified bandwidth(user_bw) 337 * 338 * This uses the MBM counters to measure the bandwidth and MBA throttle 339 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the 340 * fact that resctrl rdtgroups have both monitoring and control. 341 * 342 * The frequency of the checks is 1s and we just tag along the MBM overflow 343 * timer. Having 1s interval makes the calculation of bandwidth simpler. 344 * 345 * Although MBA's goal is to restrict the bandwidth to a maximum, there may 346 * be a need to increase the bandwidth to avoid uncecessarily restricting 347 * the L2 <-> L3 traffic. 348 * 349 * Since MBA controls the L2 external bandwidth where as MBM measures the 350 * L3 external bandwidth the following sequence could lead to such a 351 * situation. 352 * 353 * Consider an rdtgroup which had high L3 <-> memory traffic in initial 354 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but 355 * after some time rdtgroup has mostly L2 <-> L3 traffic. 356 * 357 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its 358 * throttle MSRs already have low percentage values. To avoid 359 * unnecessarily restricting such rdtgroups, we also increase the bandwidth. 360 */ 361 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) 362 { 363 u32 closid, rmid, cur_msr, cur_msr_val, new_msr_val; 364 struct mbm_state *pmbm_data, *cmbm_data; 365 u32 cur_bw, delta_bw, user_bw; 366 struct rdt_resource *r_mba; 367 struct rdt_domain *dom_mba; 368 struct list_head *head; 369 struct rdtgroup *entry; 370 371 r_mba = &rdt_resources_all[RDT_RESOURCE_MBA]; 372 closid = rgrp->closid; 373 rmid = rgrp->mon.rmid; 374 pmbm_data = &dom_mbm->mbm_local[rmid]; 375 376 dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba); 377 if (!dom_mba) { 378 pr_warn_once("Failure to get domain for MBA update\n"); 379 return; 380 } 381 382 cur_bw = pmbm_data->prev_bw; 383 user_bw = dom_mba->mbps_val[closid]; 384 delta_bw = pmbm_data->delta_bw; 385 cur_msr_val = dom_mba->ctrl_val[closid]; 386 387 /* 388 * For Ctrl groups read data from child monitor groups. 389 */ 390 head = &rgrp->mon.crdtgrp_list; 391 list_for_each_entry(entry, head, mon.crdtgrp_list) { 392 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; 393 cur_bw += cmbm_data->prev_bw; 394 delta_bw += cmbm_data->delta_bw; 395 } 396 397 /* 398 * Scale up/down the bandwidth linearly for the ctrl group. The 399 * bandwidth step is the bandwidth granularity specified by the 400 * hardware. 401 * 402 * The delta_bw is used when increasing the bandwidth so that we 403 * dont alternately increase and decrease the control values 404 * continuously. 405 * 406 * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if 407 * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep 408 * switching between 90 and 110 continuously if we only check 409 * cur_bw < user_bw. 410 */ 411 if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { 412 new_msr_val = cur_msr_val - r_mba->membw.bw_gran; 413 } else if (cur_msr_val < MAX_MBA_BW && 414 (user_bw > (cur_bw + delta_bw))) { 415 new_msr_val = cur_msr_val + r_mba->membw.bw_gran; 416 } else { 417 return; 418 } 419 420 cur_msr = r_mba->msr_base + closid; 421 wrmsrl(cur_msr, delay_bw_map(new_msr_val, r_mba)); 422 dom_mba->ctrl_val[closid] = new_msr_val; 423 424 /* 425 * Delta values are updated dynamically package wise for each 426 * rdtgrp everytime the throttle MSR changes value. 427 * 428 * This is because (1)the increase in bandwidth is not perfectly 429 * linear and only "approximately" linear even when the hardware 430 * says it is linear.(2)Also since MBA is a core specific 431 * mechanism, the delta values vary based on number of cores used 432 * by the rdtgrp. 433 */ 434 pmbm_data->delta_comp = true; 435 list_for_each_entry(entry, head, mon.crdtgrp_list) { 436 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; 437 cmbm_data->delta_comp = true; 438 } 439 } 440 441 static void mbm_update(struct rdt_domain *d, int rmid) 442 { 443 struct rmid_read rr; 444 445 rr.first = false; 446 rr.d = d; 447 448 /* 449 * This is protected from concurrent reads from user 450 * as both the user and we hold the global mutex. 451 */ 452 if (is_mbm_total_enabled()) { 453 rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; 454 __mon_event_count(rmid, &rr); 455 } 456 if (is_mbm_local_enabled()) { 457 rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; 458 459 /* 460 * Call the MBA software controller only for the 461 * control groups and when user has enabled 462 * the software controller explicitly. 463 */ 464 if (!is_mba_sc(NULL)) 465 __mon_event_count(rmid, &rr); 466 else 467 mbm_bw_count(rmid, &rr); 468 } 469 } 470 471 /* 472 * Handler to scan the limbo list and move the RMIDs 473 * to free list whose occupancy < threshold_occupancy. 474 */ 475 void cqm_handle_limbo(struct work_struct *work) 476 { 477 unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); 478 int cpu = smp_processor_id(); 479 struct rdt_resource *r; 480 struct rdt_domain *d; 481 482 mutex_lock(&rdtgroup_mutex); 483 484 r = &rdt_resources_all[RDT_RESOURCE_L3]; 485 d = get_domain_from_cpu(cpu, r); 486 487 if (!d) { 488 pr_warn_once("Failure to get domain for limbo worker\n"); 489 goto out_unlock; 490 } 491 492 __check_limbo(d, false); 493 494 if (has_busy_rmid(r, d)) 495 schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); 496 497 out_unlock: 498 mutex_unlock(&rdtgroup_mutex); 499 } 500 501 void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) 502 { 503 unsigned long delay = msecs_to_jiffies(delay_ms); 504 int cpu; 505 506 cpu = cpumask_any(&dom->cpu_mask); 507 dom->cqm_work_cpu = cpu; 508 509 schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); 510 } 511 512 void mbm_handle_overflow(struct work_struct *work) 513 { 514 unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); 515 struct rdtgroup *prgrp, *crgrp; 516 int cpu = smp_processor_id(); 517 struct list_head *head; 518 struct rdt_domain *d; 519 520 mutex_lock(&rdtgroup_mutex); 521 522 if (!static_branch_likely(&rdt_enable_key)) 523 goto out_unlock; 524 525 d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]); 526 if (!d) 527 goto out_unlock; 528 529 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { 530 mbm_update(d, prgrp->mon.rmid); 531 532 head = &prgrp->mon.crdtgrp_list; 533 list_for_each_entry(crgrp, head, mon.crdtgrp_list) 534 mbm_update(d, crgrp->mon.rmid); 535 536 if (is_mba_sc(NULL)) 537 update_mba_bw(prgrp, d); 538 } 539 540 schedule_delayed_work_on(cpu, &d->mbm_over, delay); 541 542 out_unlock: 543 mutex_unlock(&rdtgroup_mutex); 544 } 545 546 void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) 547 { 548 unsigned long delay = msecs_to_jiffies(delay_ms); 549 int cpu; 550 551 if (!static_branch_likely(&rdt_enable_key)) 552 return; 553 cpu = cpumask_any(&dom->cpu_mask); 554 dom->mbm_work_cpu = cpu; 555 schedule_delayed_work_on(cpu, &dom->mbm_over, delay); 556 } 557 558 static int dom_data_init(struct rdt_resource *r) 559 { 560 struct rmid_entry *entry = NULL; 561 int i, nr_rmids; 562 563 nr_rmids = r->num_rmid; 564 rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL); 565 if (!rmid_ptrs) 566 return -ENOMEM; 567 568 for (i = 0; i < nr_rmids; i++) { 569 entry = &rmid_ptrs[i]; 570 INIT_LIST_HEAD(&entry->list); 571 572 entry->rmid = i; 573 list_add_tail(&entry->list, &rmid_free_lru); 574 } 575 576 /* 577 * RMID 0 is special and is always allocated. It's used for all 578 * tasks that are not monitored. 579 */ 580 entry = __rmid_entry(0); 581 list_del(&entry->list); 582 583 return 0; 584 } 585 586 static struct mon_evt llc_occupancy_event = { 587 .name = "llc_occupancy", 588 .evtid = QOS_L3_OCCUP_EVENT_ID, 589 }; 590 591 static struct mon_evt mbm_total_event = { 592 .name = "mbm_total_bytes", 593 .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, 594 }; 595 596 static struct mon_evt mbm_local_event = { 597 .name = "mbm_local_bytes", 598 .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, 599 }; 600 601 /* 602 * Initialize the event list for the resource. 603 * 604 * Note that MBM events are also part of RDT_RESOURCE_L3 resource 605 * because as per the SDM the total and local memory bandwidth 606 * are enumerated as part of L3 monitoring. 607 */ 608 static void l3_mon_evt_init(struct rdt_resource *r) 609 { 610 INIT_LIST_HEAD(&r->evt_list); 611 612 if (is_llc_occupancy_enabled()) 613 list_add_tail(&llc_occupancy_event.list, &r->evt_list); 614 if (is_mbm_total_enabled()) 615 list_add_tail(&mbm_total_event.list, &r->evt_list); 616 if (is_mbm_local_enabled()) 617 list_add_tail(&mbm_local_event.list, &r->evt_list); 618 } 619 620 int rdt_get_mon_l3_config(struct rdt_resource *r) 621 { 622 unsigned int cl_size = boot_cpu_data.x86_cache_size; 623 int ret; 624 625 r->mon_scale = boot_cpu_data.x86_cache_occ_scale; 626 r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1; 627 628 /* 629 * A reasonable upper limit on the max threshold is the number 630 * of lines tagged per RMID if all RMIDs have the same number of 631 * lines tagged in the LLC. 632 * 633 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC. 634 */ 635 resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid; 636 637 /* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */ 638 resctrl_cqm_threshold /= r->mon_scale; 639 640 ret = dom_data_init(r); 641 if (ret) 642 return ret; 643 644 l3_mon_evt_init(r); 645 646 r->mon_capable = true; 647 r->mon_enabled = true; 648 649 return 0; 650 } 651