1 /* 2 * NUMA parameter parsing routines 3 * 4 * Copyright (c) 2014 Fujitsu Ltd. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/units.h" 27 #include "sysemu/hostmem.h" 28 #include "sysemu/numa.h" 29 #include "sysemu/sysemu.h" 30 #include "exec/cpu-common.h" 31 #include "exec/ramlist.h" 32 #include "qemu/bitmap.h" 33 #include "qemu/error-report.h" 34 #include "qapi/error.h" 35 #include "qapi/opts-visitor.h" 36 #include "qapi/qapi-visit-machine.h" 37 #include "sysemu/qtest.h" 38 #include "hw/core/cpu.h" 39 #include "hw/mem/pc-dimm.h" 40 #include "migration/vmstate.h" 41 #include "hw/boards.h" 42 #include "hw/mem/memory-device.h" 43 #include "qemu/option.h" 44 #include "qemu/config-file.h" 45 #include "qemu/cutils.h" 46 47 QemuOptsList qemu_numa_opts = { 48 .name = "numa", 49 .implied_opt_name = "type", 50 .head = QTAILQ_HEAD_INITIALIZER(qemu_numa_opts.head), 51 .desc = { { 0 } } /* validated with OptsVisitor */ 52 }; 53 54 static int have_memdevs; 55 static int have_mem; 56 static int max_numa_nodeid; /* Highest specified NUMA node ID, plus one. 57 * For all nodes, nodeid < max_numa_nodeid 58 */ 59 60 static void parse_numa_node(MachineState *ms, NumaNodeOptions *node, 61 Error **errp) 62 { 63 Error *err = NULL; 64 uint16_t nodenr; 65 uint16List *cpus = NULL; 66 MachineClass *mc = MACHINE_GET_CLASS(ms); 67 unsigned int max_cpus = ms->smp.max_cpus; 68 NodeInfo *numa_info = ms->numa_state->nodes; 69 70 if (node->has_nodeid) { 71 nodenr = node->nodeid; 72 } else { 73 nodenr = ms->numa_state->num_nodes; 74 } 75 76 if (nodenr >= MAX_NODES) { 77 error_setg(errp, "Max number of NUMA nodes reached: %" 78 PRIu16 "", nodenr); 79 return; 80 } 81 82 if (numa_info[nodenr].present) { 83 error_setg(errp, "Duplicate NUMA nodeid: %" PRIu16, nodenr); 84 return; 85 } 86 87 if (!mc->cpu_index_to_instance_props || !mc->get_default_cpu_node_id) { 88 error_setg(errp, "NUMA is not supported by this machine-type"); 89 return; 90 } 91 for (cpus = node->cpus; cpus; cpus = cpus->next) { 92 CpuInstanceProperties props; 93 if (cpus->value >= max_cpus) { 94 error_setg(errp, 95 "CPU index (%" PRIu16 ")" 96 " should be smaller than maxcpus (%d)", 97 cpus->value, max_cpus); 98 return; 99 } 100 props = mc->cpu_index_to_instance_props(ms, cpus->value); 101 props.node_id = nodenr; 102 props.has_node_id = true; 103 machine_set_cpu_numa_node(ms, &props, &err); 104 if (err) { 105 error_propagate(errp, err); 106 return; 107 } 108 } 109 110 have_memdevs = have_memdevs ? : node->has_memdev; 111 have_mem = have_mem ? : node->has_mem; 112 if ((node->has_mem && have_memdevs) || (node->has_memdev && have_mem)) { 113 error_setg(errp, "numa configuration should use either mem= or memdev=," 114 "mixing both is not allowed"); 115 return; 116 } 117 118 if (node->has_mem) { 119 numa_info[nodenr].node_mem = node->mem; 120 if (!qtest_enabled()) { 121 warn_report("Parameter -numa node,mem is deprecated," 122 " use -numa node,memdev instead"); 123 } 124 } 125 if (node->has_memdev) { 126 Object *o; 127 o = object_resolve_path_type(node->memdev, TYPE_MEMORY_BACKEND, NULL); 128 if (!o) { 129 error_setg(errp, "memdev=%s is ambiguous", node->memdev); 130 return; 131 } 132 133 object_ref(o); 134 numa_info[nodenr].node_mem = object_property_get_uint(o, "size", NULL); 135 numa_info[nodenr].node_memdev = MEMORY_BACKEND(o); 136 } 137 138 /* 139 * If not set the initiator, set it to MAX_NODES. And if 140 * HMAT is enabled and this node has no cpus, QEMU will raise error. 141 */ 142 numa_info[nodenr].initiator = MAX_NODES; 143 if (node->has_initiator) { 144 if (!ms->numa_state->hmat_enabled) { 145 error_setg(errp, "ACPI Heterogeneous Memory Attribute Table " 146 "(HMAT) is disabled, enable it with -machine hmat=on " 147 "before using any of hmat specific options"); 148 return; 149 } 150 151 if (node->initiator >= MAX_NODES) { 152 error_report("The initiator id %" PRIu16 " expects an integer " 153 "between 0 and %d", node->initiator, 154 MAX_NODES - 1); 155 return; 156 } 157 158 numa_info[nodenr].initiator = node->initiator; 159 } 160 numa_info[nodenr].present = true; 161 max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); 162 ms->numa_state->num_nodes++; 163 } 164 165 static 166 void parse_numa_distance(MachineState *ms, NumaDistOptions *dist, Error **errp) 167 { 168 uint16_t src = dist->src; 169 uint16_t dst = dist->dst; 170 uint8_t val = dist->val; 171 NodeInfo *numa_info = ms->numa_state->nodes; 172 173 if (src >= MAX_NODES || dst >= MAX_NODES) { 174 error_setg(errp, "Parameter '%s' expects an integer between 0 and %d", 175 src >= MAX_NODES ? "src" : "dst", MAX_NODES - 1); 176 return; 177 } 178 179 if (!numa_info[src].present || !numa_info[dst].present) { 180 error_setg(errp, "Source/Destination NUMA node is missing. " 181 "Please use '-numa node' option to declare it first."); 182 return; 183 } 184 185 if (val < NUMA_DISTANCE_MIN) { 186 error_setg(errp, "NUMA distance (%" PRIu8 ") is invalid, " 187 "it shouldn't be less than %d.", 188 val, NUMA_DISTANCE_MIN); 189 return; 190 } 191 192 if (src == dst && val != NUMA_DISTANCE_MIN) { 193 error_setg(errp, "Local distance of node %d should be %d.", 194 src, NUMA_DISTANCE_MIN); 195 return; 196 } 197 198 numa_info[src].distance[dst] = val; 199 ms->numa_state->have_numa_distance = true; 200 } 201 202 void parse_numa_hmat_lb(NumaState *numa_state, NumaHmatLBOptions *node, 203 Error **errp) 204 { 205 int i, first_bit, last_bit; 206 uint64_t max_entry, temp_base, bitmap_copy; 207 NodeInfo *numa_info = numa_state->nodes; 208 HMAT_LB_Info *hmat_lb = 209 numa_state->hmat_lb[node->hierarchy][node->data_type]; 210 HMAT_LB_Data lb_data = {}; 211 HMAT_LB_Data *lb_temp; 212 213 /* Error checking */ 214 if (node->initiator > numa_state->num_nodes) { 215 error_setg(errp, "Invalid initiator=%d, it should be less than %d", 216 node->initiator, numa_state->num_nodes); 217 return; 218 } 219 if (node->target > numa_state->num_nodes) { 220 error_setg(errp, "Invalid target=%d, it should be less than %d", 221 node->target, numa_state->num_nodes); 222 return; 223 } 224 if (!numa_info[node->initiator].has_cpu) { 225 error_setg(errp, "Invalid initiator=%d, it isn't an " 226 "initiator proximity domain", node->initiator); 227 return; 228 } 229 if (!numa_info[node->target].present) { 230 error_setg(errp, "The target=%d should point to an existing node", 231 node->target); 232 return; 233 } 234 235 if (!hmat_lb) { 236 hmat_lb = g_malloc0(sizeof(*hmat_lb)); 237 numa_state->hmat_lb[node->hierarchy][node->data_type] = hmat_lb; 238 hmat_lb->list = g_array_new(false, true, sizeof(HMAT_LB_Data)); 239 } 240 hmat_lb->hierarchy = node->hierarchy; 241 hmat_lb->data_type = node->data_type; 242 lb_data.initiator = node->initiator; 243 lb_data.target = node->target; 244 245 if (node->data_type <= HMATLB_DATA_TYPE_WRITE_LATENCY) { 246 /* Input latency data */ 247 248 if (!node->has_latency) { 249 error_setg(errp, "Missing 'latency' option"); 250 return; 251 } 252 if (node->has_bandwidth) { 253 error_setg(errp, "Invalid option 'bandwidth' since " 254 "the data type is latency"); 255 return; 256 } 257 258 /* Detect duplicate configuration */ 259 for (i = 0; i < hmat_lb->list->len; i++) { 260 lb_temp = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); 261 262 if (node->initiator == lb_temp->initiator && 263 node->target == lb_temp->target) { 264 error_setg(errp, "Duplicate configuration of the latency for " 265 "initiator=%d and target=%d", node->initiator, 266 node->target); 267 return; 268 } 269 } 270 271 hmat_lb->base = hmat_lb->base ? hmat_lb->base : UINT64_MAX; 272 273 if (node->latency) { 274 /* Calculate the temporary base and compressed latency */ 275 max_entry = node->latency; 276 temp_base = 1; 277 while (QEMU_IS_ALIGNED(max_entry, 10)) { 278 max_entry /= 10; 279 temp_base *= 10; 280 } 281 282 /* Calculate the max compressed latency */ 283 temp_base = MIN(hmat_lb->base, temp_base); 284 max_entry = node->latency / hmat_lb->base; 285 max_entry = MAX(hmat_lb->range_bitmap, max_entry); 286 287 /* 288 * For latency hmat_lb->range_bitmap record the max compressed 289 * latency which should be less than 0xFFFF (UINT16_MAX) 290 */ 291 if (max_entry >= UINT16_MAX) { 292 error_setg(errp, "Latency %" PRIu64 " between initiator=%d and " 293 "target=%d should not differ from previously entered " 294 "min or max values on more than %d", node->latency, 295 node->initiator, node->target, UINT16_MAX - 1); 296 return; 297 } else { 298 hmat_lb->base = temp_base; 299 hmat_lb->range_bitmap = max_entry; 300 } 301 302 /* 303 * Set lb_info_provided bit 0 as 1, 304 * latency information is provided 305 */ 306 numa_info[node->target].lb_info_provided |= BIT(0); 307 } 308 lb_data.data = node->latency; 309 } else if (node->data_type >= HMATLB_DATA_TYPE_ACCESS_BANDWIDTH) { 310 /* Input bandwidth data */ 311 if (!node->has_bandwidth) { 312 error_setg(errp, "Missing 'bandwidth' option"); 313 return; 314 } 315 if (node->has_latency) { 316 error_setg(errp, "Invalid option 'latency' since " 317 "the data type is bandwidth"); 318 return; 319 } 320 if (!QEMU_IS_ALIGNED(node->bandwidth, MiB)) { 321 error_setg(errp, "Bandwidth %" PRIu64 " between initiator=%d and " 322 "target=%d should be 1MB aligned", node->bandwidth, 323 node->initiator, node->target); 324 return; 325 } 326 327 /* Detect duplicate configuration */ 328 for (i = 0; i < hmat_lb->list->len; i++) { 329 lb_temp = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); 330 331 if (node->initiator == lb_temp->initiator && 332 node->target == lb_temp->target) { 333 error_setg(errp, "Duplicate configuration of the bandwidth for " 334 "initiator=%d and target=%d", node->initiator, 335 node->target); 336 return; 337 } 338 } 339 340 hmat_lb->base = hmat_lb->base ? hmat_lb->base : 1; 341 342 if (node->bandwidth) { 343 /* Keep bitmap unchanged when bandwidth out of range */ 344 bitmap_copy = hmat_lb->range_bitmap; 345 bitmap_copy |= node->bandwidth; 346 first_bit = ctz64(bitmap_copy); 347 temp_base = UINT64_C(1) << first_bit; 348 max_entry = node->bandwidth / temp_base; 349 last_bit = 64 - clz64(bitmap_copy); 350 351 /* 352 * For bandwidth, first_bit record the base unit of bandwidth bits, 353 * last_bit record the last bit of the max bandwidth. The max 354 * compressed bandwidth should be less than 0xFFFF (UINT16_MAX) 355 */ 356 if ((last_bit - first_bit) > UINT16_BITS || 357 max_entry >= UINT16_MAX) { 358 error_setg(errp, "Bandwidth %" PRIu64 " between initiator=%d " 359 "and target=%d should not differ from previously " 360 "entered values on more than %d", node->bandwidth, 361 node->initiator, node->target, UINT16_MAX - 1); 362 return; 363 } else { 364 hmat_lb->base = temp_base; 365 hmat_lb->range_bitmap = bitmap_copy; 366 } 367 368 /* 369 * Set lb_info_provided bit 1 as 1, 370 * bandwidth information is provided 371 */ 372 numa_info[node->target].lb_info_provided |= BIT(1); 373 } 374 lb_data.data = node->bandwidth; 375 } else { 376 assert(0); 377 } 378 379 g_array_append_val(hmat_lb->list, lb_data); 380 } 381 382 void parse_numa_hmat_cache(MachineState *ms, NumaHmatCacheOptions *node, 383 Error **errp) 384 { 385 int nb_numa_nodes = ms->numa_state->num_nodes; 386 NodeInfo *numa_info = ms->numa_state->nodes; 387 NumaHmatCacheOptions *hmat_cache = NULL; 388 389 if (node->node_id >= nb_numa_nodes) { 390 error_setg(errp, "Invalid node-id=%" PRIu32 ", it should be less " 391 "than %d", node->node_id, nb_numa_nodes); 392 return; 393 } 394 395 if (numa_info[node->node_id].lb_info_provided != (BIT(0) | BIT(1))) { 396 error_setg(errp, "The latency and bandwidth information of " 397 "node-id=%" PRIu32 " should be provided before memory side " 398 "cache attributes", node->node_id); 399 return; 400 } 401 402 if (node->level < 1 || node->level >= HMAT_LB_LEVELS) { 403 error_setg(errp, "Invalid level=%" PRIu8 ", it should be larger than 0 " 404 "and less than or equal to %d", node->level, 405 HMAT_LB_LEVELS - 1); 406 return; 407 } 408 409 assert(node->associativity < HMAT_CACHE_ASSOCIATIVITY__MAX); 410 assert(node->policy < HMAT_CACHE_WRITE_POLICY__MAX); 411 if (ms->numa_state->hmat_cache[node->node_id][node->level]) { 412 error_setg(errp, "Duplicate configuration of the side cache for " 413 "node-id=%" PRIu32 " and level=%" PRIu8, 414 node->node_id, node->level); 415 return; 416 } 417 418 if ((node->level > 1) && 419 ms->numa_state->hmat_cache[node->node_id][node->level - 1] && 420 (node->size >= 421 ms->numa_state->hmat_cache[node->node_id][node->level - 1]->size)) { 422 error_setg(errp, "Invalid size=%" PRIu64 ", the size of level=%" PRIu8 423 " should be less than the size(%" PRIu64 ") of " 424 "level=%u", node->size, node->level, 425 ms->numa_state->hmat_cache[node->node_id] 426 [node->level - 1]->size, 427 node->level - 1); 428 return; 429 } 430 431 if ((node->level < HMAT_LB_LEVELS - 1) && 432 ms->numa_state->hmat_cache[node->node_id][node->level + 1] && 433 (node->size <= 434 ms->numa_state->hmat_cache[node->node_id][node->level + 1]->size)) { 435 error_setg(errp, "Invalid size=%" PRIu64 ", the size of level=%" PRIu8 436 " should be larger than the size(%" PRIu64 ") of " 437 "level=%u", node->size, node->level, 438 ms->numa_state->hmat_cache[node->node_id] 439 [node->level + 1]->size, 440 node->level + 1); 441 return; 442 } 443 444 hmat_cache = g_malloc0(sizeof(*hmat_cache)); 445 memcpy(hmat_cache, node, sizeof(*hmat_cache)); 446 ms->numa_state->hmat_cache[node->node_id][node->level] = hmat_cache; 447 } 448 449 void set_numa_options(MachineState *ms, NumaOptions *object, Error **errp) 450 { 451 Error *err = NULL; 452 MachineClass *mc = MACHINE_GET_CLASS(ms); 453 454 if (!mc->numa_mem_supported) { 455 error_setg(errp, "NUMA is not supported by this machine-type"); 456 goto end; 457 } 458 459 switch (object->type) { 460 case NUMA_OPTIONS_TYPE_NODE: 461 parse_numa_node(ms, &object->u.node, &err); 462 if (err) { 463 goto end; 464 } 465 break; 466 case NUMA_OPTIONS_TYPE_DIST: 467 parse_numa_distance(ms, &object->u.dist, &err); 468 if (err) { 469 goto end; 470 } 471 break; 472 case NUMA_OPTIONS_TYPE_CPU: 473 if (!object->u.cpu.has_node_id) { 474 error_setg(&err, "Missing mandatory node-id property"); 475 goto end; 476 } 477 if (!ms->numa_state->nodes[object->u.cpu.node_id].present) { 478 error_setg(&err, "Invalid node-id=%" PRId64 ", NUMA node must be " 479 "defined with -numa node,nodeid=ID before it's used with " 480 "-numa cpu,node-id=ID", object->u.cpu.node_id); 481 goto end; 482 } 483 484 machine_set_cpu_numa_node(ms, qapi_NumaCpuOptions_base(&object->u.cpu), 485 &err); 486 break; 487 case NUMA_OPTIONS_TYPE_HMAT_LB: 488 if (!ms->numa_state->hmat_enabled) { 489 error_setg(errp, "ACPI Heterogeneous Memory Attribute Table " 490 "(HMAT) is disabled, enable it with -machine hmat=on " 491 "before using any of hmat specific options"); 492 return; 493 } 494 495 parse_numa_hmat_lb(ms->numa_state, &object->u.hmat_lb, &err); 496 if (err) { 497 goto end; 498 } 499 break; 500 case NUMA_OPTIONS_TYPE_HMAT_CACHE: 501 if (!ms->numa_state->hmat_enabled) { 502 error_setg(errp, "ACPI Heterogeneous Memory Attribute Table " 503 "(HMAT) is disabled, enable it with -machine hmat=on " 504 "before using any of hmat specific options"); 505 return; 506 } 507 508 parse_numa_hmat_cache(ms, &object->u.hmat_cache, &err); 509 if (err) { 510 goto end; 511 } 512 break; 513 default: 514 abort(); 515 } 516 517 end: 518 error_propagate(errp, err); 519 } 520 521 static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) 522 { 523 NumaOptions *object = NULL; 524 MachineState *ms = MACHINE(opaque); 525 Error *err = NULL; 526 Visitor *v = opts_visitor_new(opts); 527 528 visit_type_NumaOptions(v, NULL, &object, &err); 529 visit_free(v); 530 if (err) { 531 goto end; 532 } 533 534 /* Fix up legacy suffix-less format */ 535 if ((object->type == NUMA_OPTIONS_TYPE_NODE) && object->u.node.has_mem) { 536 const char *mem_str = qemu_opt_get(opts, "mem"); 537 qemu_strtosz_MiB(mem_str, NULL, &object->u.node.mem); 538 } 539 540 set_numa_options(ms, object, &err); 541 542 end: 543 qapi_free_NumaOptions(object); 544 if (err) { 545 error_propagate(errp, err); 546 return -1; 547 } 548 549 return 0; 550 } 551 552 /* If all node pair distances are symmetric, then only distances 553 * in one direction are enough. If there is even one asymmetric 554 * pair, though, then all distances must be provided. The 555 * distance from a node to itself is always NUMA_DISTANCE_MIN, 556 * so providing it is never necessary. 557 */ 558 static void validate_numa_distance(MachineState *ms) 559 { 560 int src, dst; 561 bool is_asymmetrical = false; 562 int nb_numa_nodes = ms->numa_state->num_nodes; 563 NodeInfo *numa_info = ms->numa_state->nodes; 564 565 for (src = 0; src < nb_numa_nodes; src++) { 566 for (dst = src; dst < nb_numa_nodes; dst++) { 567 if (numa_info[src].distance[dst] == 0 && 568 numa_info[dst].distance[src] == 0) { 569 if (src != dst) { 570 error_report("The distance between node %d and %d is " 571 "missing, at least one distance value " 572 "between each nodes should be provided.", 573 src, dst); 574 exit(EXIT_FAILURE); 575 } 576 } 577 578 if (numa_info[src].distance[dst] != 0 && 579 numa_info[dst].distance[src] != 0 && 580 numa_info[src].distance[dst] != 581 numa_info[dst].distance[src]) { 582 is_asymmetrical = true; 583 } 584 } 585 } 586 587 if (is_asymmetrical) { 588 for (src = 0; src < nb_numa_nodes; src++) { 589 for (dst = 0; dst < nb_numa_nodes; dst++) { 590 if (src != dst && numa_info[src].distance[dst] == 0) { 591 error_report("At least one asymmetrical pair of " 592 "distances is given, please provide distances " 593 "for both directions of all node pairs."); 594 exit(EXIT_FAILURE); 595 } 596 } 597 } 598 } 599 } 600 601 static void complete_init_numa_distance(MachineState *ms) 602 { 603 int src, dst; 604 NodeInfo *numa_info = ms->numa_state->nodes; 605 606 /* Fixup NUMA distance by symmetric policy because if it is an 607 * asymmetric distance table, it should be a complete table and 608 * there would not be any missing distance except local node, which 609 * is verified by validate_numa_distance above. 610 */ 611 for (src = 0; src < ms->numa_state->num_nodes; src++) { 612 for (dst = 0; dst < ms->numa_state->num_nodes; dst++) { 613 if (numa_info[src].distance[dst] == 0) { 614 if (src == dst) { 615 numa_info[src].distance[dst] = NUMA_DISTANCE_MIN; 616 } else { 617 numa_info[src].distance[dst] = numa_info[dst].distance[src]; 618 } 619 } 620 } 621 } 622 } 623 624 void numa_legacy_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, 625 int nb_nodes, ram_addr_t size) 626 { 627 int i; 628 uint64_t usedmem = 0; 629 630 /* Align each node according to the alignment 631 * requirements of the machine class 632 */ 633 634 for (i = 0; i < nb_nodes - 1; i++) { 635 nodes[i].node_mem = (size / nb_nodes) & 636 ~((1 << mc->numa_mem_align_shift) - 1); 637 usedmem += nodes[i].node_mem; 638 } 639 nodes[i].node_mem = size - usedmem; 640 } 641 642 void numa_default_auto_assign_ram(MachineClass *mc, NodeInfo *nodes, 643 int nb_nodes, ram_addr_t size) 644 { 645 int i; 646 uint64_t usedmem = 0, node_mem; 647 uint64_t granularity = size / nb_nodes; 648 uint64_t propagate = 0; 649 650 for (i = 0; i < nb_nodes - 1; i++) { 651 node_mem = (granularity + propagate) & 652 ~((1 << mc->numa_mem_align_shift) - 1); 653 propagate = granularity + propagate - node_mem; 654 nodes[i].node_mem = node_mem; 655 usedmem += node_mem; 656 } 657 nodes[i].node_mem = size - usedmem; 658 } 659 660 void numa_complete_configuration(MachineState *ms) 661 { 662 int i; 663 MachineClass *mc = MACHINE_GET_CLASS(ms); 664 NodeInfo *numa_info = ms->numa_state->nodes; 665 666 /* 667 * If memory hotplug is enabled (slots > 0) but without '-numa' 668 * options explicitly on CLI, guestes will break. 669 * 670 * Windows: won't enable memory hotplug without SRAT table at all 671 * 672 * Linux: if QEMU is started with initial memory all below 4Gb 673 * and no SRAT table present, guest kernel will use nommu DMA ops, 674 * which breaks 32bit hw drivers when memory is hotplugged and 675 * guest tries to use it with that drivers. 676 * 677 * Enable NUMA implicitly by adding a new NUMA node automatically. 678 * 679 * Or if MachineClass::auto_enable_numa is true and no NUMA nodes, 680 * assume there is just one node with whole RAM. 681 */ 682 if (ms->numa_state->num_nodes == 0 && 683 ((ms->ram_slots > 0 && 684 mc->auto_enable_numa_with_memhp) || 685 mc->auto_enable_numa)) { 686 NumaNodeOptions node = { }; 687 parse_numa_node(ms, &node, &error_abort); 688 numa_info[0].node_mem = ram_size; 689 } 690 691 assert(max_numa_nodeid <= MAX_NODES); 692 693 /* No support for sparse NUMA node IDs yet: */ 694 for (i = max_numa_nodeid - 1; i >= 0; i--) { 695 /* Report large node IDs first, to make mistakes easier to spot */ 696 if (!numa_info[i].present) { 697 error_report("numa: Node ID missing: %d", i); 698 exit(1); 699 } 700 } 701 702 /* This must be always true if all nodes are present: */ 703 assert(ms->numa_state->num_nodes == max_numa_nodeid); 704 705 if (ms->numa_state->num_nodes > 0) { 706 uint64_t numa_total; 707 708 if (ms->numa_state->num_nodes > MAX_NODES) { 709 ms->numa_state->num_nodes = MAX_NODES; 710 } 711 712 /* If no memory size is given for any node, assume the default case 713 * and distribute the available memory equally across all nodes 714 */ 715 for (i = 0; i < ms->numa_state->num_nodes; i++) { 716 if (numa_info[i].node_mem != 0) { 717 break; 718 } 719 } 720 if (i == ms->numa_state->num_nodes) { 721 assert(mc->numa_auto_assign_ram); 722 mc->numa_auto_assign_ram(mc, numa_info, 723 ms->numa_state->num_nodes, ram_size); 724 if (!qtest_enabled()) { 725 warn_report("Default splitting of RAM between nodes is deprecated," 726 " Use '-numa node,memdev' to explictly define RAM" 727 " allocation per node"); 728 } 729 } 730 731 numa_total = 0; 732 for (i = 0; i < ms->numa_state->num_nodes; i++) { 733 numa_total += numa_info[i].node_mem; 734 } 735 if (numa_total != ram_size) { 736 error_report("total memory for NUMA nodes (0x%" PRIx64 ")" 737 " should equal RAM size (0x" RAM_ADDR_FMT ")", 738 numa_total, ram_size); 739 exit(1); 740 } 741 742 /* QEMU needs at least all unique node pair distances to build 743 * the whole NUMA distance table. QEMU treats the distance table 744 * as symmetric by default, i.e. distance A->B == distance B->A. 745 * Thus, QEMU is able to complete the distance table 746 * initialization even though only distance A->B is provided and 747 * distance B->A is not. QEMU knows the distance of a node to 748 * itself is always 10, so A->A distances may be omitted. When 749 * the distances of two nodes of a pair differ, i.e. distance 750 * A->B != distance B->A, then that means the distance table is 751 * asymmetric. In this case, the distances for both directions 752 * of all node pairs are required. 753 */ 754 if (ms->numa_state->have_numa_distance) { 755 /* Validate enough NUMA distance information was provided. */ 756 validate_numa_distance(ms); 757 758 /* Validation succeeded, now fill in any missing distances. */ 759 complete_init_numa_distance(ms); 760 } 761 } 762 } 763 764 void parse_numa_opts(MachineState *ms) 765 { 766 qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, ms, &error_fatal); 767 } 768 769 void numa_cpu_pre_plug(const CPUArchId *slot, DeviceState *dev, Error **errp) 770 { 771 int node_id = object_property_get_int(OBJECT(dev), "node-id", &error_abort); 772 773 if (node_id == CPU_UNSET_NUMA_NODE_ID) { 774 /* due to bug in libvirt, it doesn't pass node-id from props on 775 * device_add as expected, so we have to fix it up here */ 776 if (slot->props.has_node_id) { 777 object_property_set_int(OBJECT(dev), slot->props.node_id, 778 "node-id", errp); 779 } 780 } else if (node_id != slot->props.node_id) { 781 error_setg(errp, "invalid node-id, must be %"PRId64, 782 slot->props.node_id); 783 } 784 } 785 786 static void allocate_system_memory_nonnuma(MemoryRegion *mr, Object *owner, 787 const char *name, 788 uint64_t ram_size) 789 { 790 if (mem_path) { 791 #ifdef __linux__ 792 Error *err = NULL; 793 memory_region_init_ram_from_file(mr, owner, name, ram_size, 0, 0, 794 mem_path, &err); 795 if (err) { 796 error_report_err(err); 797 if (mem_prealloc) { 798 exit(1); 799 } 800 warn_report("falling back to regular RAM allocation"); 801 error_printf("This is deprecated. Make sure that -mem-path " 802 " specified path has sufficient resources to allocate" 803 " -m specified RAM amount\n"); 804 /* Legacy behavior: if allocation failed, fall back to 805 * regular RAM allocation. 806 */ 807 mem_path = NULL; 808 memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal); 809 } 810 #else 811 fprintf(stderr, "-mem-path not supported on this host\n"); 812 exit(1); 813 #endif 814 } else { 815 memory_region_init_ram_nomigrate(mr, owner, name, ram_size, &error_fatal); 816 } 817 vmstate_register_ram_global(mr); 818 } 819 820 void memory_region_allocate_system_memory(MemoryRegion *mr, Object *owner, 821 const char *name, 822 uint64_t ram_size) 823 { 824 uint64_t addr = 0; 825 int i; 826 MachineState *ms = MACHINE(qdev_get_machine()); 827 828 if (ms->numa_state == NULL || 829 ms->numa_state->num_nodes == 0 || !have_memdevs) { 830 allocate_system_memory_nonnuma(mr, owner, name, ram_size); 831 return; 832 } 833 834 memory_region_init(mr, owner, name, ram_size); 835 for (i = 0; i < ms->numa_state->num_nodes; i++) { 836 uint64_t size = ms->numa_state->nodes[i].node_mem; 837 HostMemoryBackend *backend = ms->numa_state->nodes[i].node_memdev; 838 if (!backend) { 839 continue; 840 } 841 MemoryRegion *seg = host_memory_backend_get_memory(backend); 842 843 if (memory_region_is_mapped(seg)) { 844 char *path = object_get_canonical_path_component(OBJECT(backend)); 845 error_report("memory backend %s is used multiple times. Each " 846 "-numa option must use a different memdev value.", 847 path); 848 g_free(path); 849 exit(1); 850 } 851 852 host_memory_backend_set_mapped(backend, true); 853 memory_region_add_subregion(mr, addr, seg); 854 vmstate_register_ram_global(seg); 855 addr += size; 856 } 857 } 858 859 static void numa_stat_memory_devices(NumaNodeMem node_mem[]) 860 { 861 MemoryDeviceInfoList *info_list = qmp_memory_device_list(); 862 MemoryDeviceInfoList *info; 863 PCDIMMDeviceInfo *pcdimm_info; 864 VirtioPMEMDeviceInfo *vpi; 865 866 for (info = info_list; info; info = info->next) { 867 MemoryDeviceInfo *value = info->value; 868 869 if (value) { 870 switch (value->type) { 871 case MEMORY_DEVICE_INFO_KIND_DIMM: 872 case MEMORY_DEVICE_INFO_KIND_NVDIMM: 873 pcdimm_info = value->type == MEMORY_DEVICE_INFO_KIND_DIMM ? 874 value->u.dimm.data : value->u.nvdimm.data; 875 node_mem[pcdimm_info->node].node_mem += pcdimm_info->size; 876 node_mem[pcdimm_info->node].node_plugged_mem += 877 pcdimm_info->size; 878 break; 879 case MEMORY_DEVICE_INFO_KIND_VIRTIO_PMEM: 880 vpi = value->u.virtio_pmem.data; 881 /* TODO: once we support numa, assign to right node */ 882 node_mem[0].node_mem += vpi->size; 883 node_mem[0].node_plugged_mem += vpi->size; 884 break; 885 default: 886 g_assert_not_reached(); 887 } 888 } 889 } 890 qapi_free_MemoryDeviceInfoList(info_list); 891 } 892 893 void query_numa_node_mem(NumaNodeMem node_mem[], MachineState *ms) 894 { 895 int i; 896 897 if (ms->numa_state == NULL || ms->numa_state->num_nodes <= 0) { 898 return; 899 } 900 901 numa_stat_memory_devices(node_mem); 902 for (i = 0; i < ms->numa_state->num_nodes; i++) { 903 node_mem[i].node_mem += ms->numa_state->nodes[i].node_mem; 904 } 905 } 906 907 void ram_block_notifier_add(RAMBlockNotifier *n) 908 { 909 QLIST_INSERT_HEAD(&ram_list.ramblock_notifiers, n, next); 910 } 911 912 void ram_block_notifier_remove(RAMBlockNotifier *n) 913 { 914 QLIST_REMOVE(n, next); 915 } 916 917 void ram_block_notify_add(void *host, size_t size) 918 { 919 RAMBlockNotifier *notifier; 920 921 QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { 922 notifier->ram_block_added(notifier, host, size); 923 } 924 } 925 926 void ram_block_notify_remove(void *host, size_t size) 927 { 928 RAMBlockNotifier *notifier; 929 930 QLIST_FOREACH(notifier, &ram_list.ramblock_notifiers, next) { 931 notifier->ram_block_removed(notifier, host, size); 932 } 933 } 934