xref: /openbmc/qemu/hw/ppc/spapr_numa.c (revision 3b880445e61b6509a9a5b4236eaf07718ae4a51a)
1 /*
2  * QEMU PowerPC pSeries Logical Partition NUMA associativity handling
3  *
4  * Copyright IBM Corp. 2020
5  *
6  * Authors:
7  *  Daniel Henrique Barboza      <danielhb413@gmail.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or later.
10  * See the COPYING file in the top-level directory.
11  */
12 
13 #include "qemu/osdep.h"
14 #include "qemu-common.h"
15 #include "hw/ppc/spapr_numa.h"
16 #include "hw/pci-host/spapr.h"
17 #include "hw/ppc/fdt.h"
18 
19 /* Moved from hw/ppc/spapr_pci_nvlink2.c */
20 #define SPAPR_GPU_NUMA_ID           (cpu_to_be32(1))
21 
22 static bool spapr_machine_using_legacy_numa(SpaprMachineState *spapr)
23 {
24     MachineState *machine = MACHINE(spapr);
25     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
26 
27     return smc->pre_5_2_numa_associativity ||
28            machine->numa_state->num_nodes <= 1;
29 }
30 
31 static bool spapr_numa_is_symmetrical(MachineState *ms)
32 {
33     int src, dst;
34     int nb_numa_nodes = ms->numa_state->num_nodes;
35     NodeInfo *numa_info = ms->numa_state->nodes;
36 
37     for (src = 0; src < nb_numa_nodes; src++) {
38         for (dst = src; dst < nb_numa_nodes; dst++) {
39             if (numa_info[src].distance[dst] !=
40                 numa_info[dst].distance[src]) {
41                 return false;
42             }
43         }
44     }
45 
46     return true;
47 }
48 
49 /*
50  * This function will translate the user distances into
51  * what the kernel understand as possible values: 10
52  * (local distance), 20, 40, 80 and 160, and return the equivalent
53  * NUMA level for each. Current heuristic is:
54  *  - local distance (10) returns numa_level = 0x4, meaning there is
55  *    no rounding for local distance
56  *  - distances between 11 and 30 inclusive -> rounded to 20,
57  *    numa_level = 0x3
58  *  - distances between 31 and 60 inclusive -> rounded to 40,
59  *    numa_level = 0x2
60  *  - distances between 61 and 120 inclusive -> rounded to 80,
61  *    numa_level = 0x1
62  *  - everything above 120 returns numa_level = 0 to indicate that
63  *    there is no match. This will be calculated as disntace = 160
64  *    by the kernel (as of v5.9)
65  */
66 static uint8_t spapr_numa_get_numa_level(uint8_t distance)
67 {
68     if (distance == 10) {
69         return 0x4;
70     } else if (distance > 11 && distance <= 30) {
71         return 0x3;
72     } else if (distance > 31 && distance <= 60) {
73         return 0x2;
74     } else if (distance > 61 && distance <= 120) {
75         return 0x1;
76     }
77 
78     return 0;
79 }
80 
81 static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr)
82 {
83     MachineState *ms = MACHINE(spapr);
84     NodeInfo *numa_info = ms->numa_state->nodes;
85     int nb_numa_nodes = ms->numa_state->num_nodes;
86     int src, dst, i;
87 
88     for (src = 0; src < nb_numa_nodes; src++) {
89         for (dst = src; dst < nb_numa_nodes; dst++) {
90             /*
91              * This is how the associativity domain between A and B
92              * is calculated:
93              *
94              * - get the distance D between them
95              * - get the correspondent NUMA level 'n_level' for D
96              * - all associativity arrays were initialized with their own
97              * numa_ids, and we're calculating the distance in node_id
98              * ascending order, starting from node id 0 (the first node
99              * retrieved by numa_state). This will have a cascade effect in
100              * the algorithm because the associativity domains that node 0
101              * defines will be carried over to other nodes, and node 1
102              * associativities will be carried over after taking node 0
103              * associativities into account, and so on. This happens because
104              * we'll assign assoc_src as the associativity domain of dst
105              * as well, for all NUMA levels beyond and including n_level.
106              *
107              * The PPC kernel expects the associativity domains of node 0 to
108              * be always 0, and this algorithm will grant that by default.
109              */
110             uint8_t distance = numa_info[src].distance[dst];
111             uint8_t n_level = spapr_numa_get_numa_level(distance);
112             uint32_t assoc_src;
113 
114             /*
115              * n_level = 0 means that the distance is greater than our last
116              * rounded value (120). In this case there is no NUMA level match
117              * between src and dst and we can skip the remaining of the loop.
118              *
119              * The Linux kernel will assume that the distance between src and
120              * dst, in this case of no match, is 10 (local distance) doubled
121              * for each NUMA it didn't match. We have MAX_DISTANCE_REF_POINTS
122              * levels (4), so this gives us 10*2*2*2*2 = 160.
123              *
124              * This logic can be seen in the Linux kernel source code, as of
125              * v5.9, in arch/powerpc/mm/numa.c, function __node_distance().
126              */
127             if (n_level == 0) {
128                 continue;
129             }
130 
131             /*
132              * We must assign all assoc_src to dst, starting from n_level
133              * and going up to 0x1.
134              */
135             for (i = n_level; i > 0; i--) {
136                 assoc_src = spapr->numa_assoc_array[src][i];
137                 spapr->numa_assoc_array[dst][i] = assoc_src;
138             }
139         }
140     }
141 
142 }
143 
144 void spapr_numa_associativity_init(SpaprMachineState *spapr,
145                                    MachineState *machine)
146 {
147     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
148     int nb_numa_nodes = machine->numa_state->num_nodes;
149     int i, j, max_nodes_with_gpus;
150     bool using_legacy_numa = spapr_machine_using_legacy_numa(spapr);
151 
152     /*
153      * For all associativity arrays: first position is the size,
154      * position MAX_DISTANCE_REF_POINTS is always the numa_id,
155      * represented by the index 'i'.
156      *
157      * This will break on sparse NUMA setups, when/if QEMU starts
158      * to support it, because there will be no more guarantee that
159      * 'i' will be a valid node_id set by the user.
160      */
161     for (i = 0; i < nb_numa_nodes; i++) {
162         spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
163         spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i);
164 
165         /*
166          * Fill all associativity domains of non-zero NUMA nodes with
167          * node_id. This is required because the default value (0) is
168          * considered a match with associativity domains of node 0.
169          */
170         if (!using_legacy_numa && i != 0) {
171             for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) {
172                 spapr->numa_assoc_array[i][j] = cpu_to_be32(i);
173             }
174         }
175     }
176 
177     /*
178      * Initialize NVLink GPU associativity arrays. We know that
179      * the first GPU will take the first available NUMA id, and
180      * we'll have a maximum of NVGPU_MAX_NUM GPUs in the machine.
181      * At this point we're not sure if there are GPUs or not, but
182      * let's initialize the associativity arrays and allow NVLink
183      * GPUs to be handled like regular NUMA nodes later on.
184      */
185     max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM;
186 
187     for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) {
188         spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
189 
190         for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) {
191             uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ?
192                                  SPAPR_GPU_NUMA_ID : cpu_to_be32(i);
193             spapr->numa_assoc_array[i][j] = gpu_assoc;
194         }
195 
196         spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i);
197     }
198 
199     /*
200      * Legacy NUMA guests (pseries-5.1 and older, or guests with only
201      * 1 NUMA node) will not benefit from anything we're going to do
202      * after this point.
203      */
204     if (using_legacy_numa) {
205         return;
206     }
207 
208     if (!spapr_numa_is_symmetrical(machine)) {
209         error_report("Asymmetrical NUMA topologies aren't supported "
210                      "in the pSeries machine");
211         exit(EXIT_FAILURE);
212     }
213 
214     spapr_numa_define_associativity_domains(spapr);
215 }
216 
217 void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt,
218                                        int offset, int nodeid)
219 {
220     _FDT((fdt_setprop(fdt, offset, "ibm,associativity",
221                       spapr->numa_assoc_array[nodeid],
222                       sizeof(spapr->numa_assoc_array[nodeid]))));
223 }
224 
225 static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr,
226                                            PowerPCCPU *cpu)
227 {
228     uint32_t *vcpu_assoc = g_new(uint32_t, VCPU_ASSOC_SIZE);
229     int index = spapr_get_vcpu_id(cpu);
230 
231     /*
232      * VCPUs have an extra 'cpu_id' value in ibm,associativity
233      * compared to other resources. Increment the size at index
234      * 0, put cpu_id last, then copy the remaining associativity
235      * domains.
236      */
237     vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1);
238     vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index);
239     memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1,
240            (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t));
241 
242     return vcpu_assoc;
243 }
244 
245 int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt,
246                             int offset, PowerPCCPU *cpu)
247 {
248     g_autofree uint32_t *vcpu_assoc = NULL;
249 
250     vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu);
251 
252     /* Advertise NUMA via ibm,associativity */
253     return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc,
254                        VCPU_ASSOC_SIZE * sizeof(uint32_t));
255 }
256 
257 
258 int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
259                                          int offset)
260 {
261     MachineState *machine = MACHINE(spapr);
262     int nb_numa_nodes = machine->numa_state->num_nodes;
263     int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
264     uint32_t *int_buf, *cur_index, buf_len;
265     int ret, i;
266 
267     /* ibm,associativity-lookup-arrays */
268     buf_len = (nr_nodes * MAX_DISTANCE_REF_POINTS + 2) * sizeof(uint32_t);
269     cur_index = int_buf = g_malloc0(buf_len);
270     int_buf[0] = cpu_to_be32(nr_nodes);
271      /* Number of entries per associativity list */
272     int_buf[1] = cpu_to_be32(MAX_DISTANCE_REF_POINTS);
273     cur_index += 2;
274     for (i = 0; i < nr_nodes; i++) {
275         /*
276          * For the lookup-array we use the ibm,associativity array,
277          * from numa_assoc_array. without the first element (size).
278          */
279         uint32_t *associativity = spapr->numa_assoc_array[i];
280         memcpy(cur_index, ++associativity,
281                sizeof(uint32_t) * MAX_DISTANCE_REF_POINTS);
282         cur_index += MAX_DISTANCE_REF_POINTS;
283     }
284     ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
285                       (cur_index - int_buf) * sizeof(uint32_t));
286     g_free(int_buf);
287 
288     return ret;
289 }
290 
291 /*
292  * Helper that writes ibm,associativity-reference-points and
293  * max-associativity-domains in the RTAS pointed by @rtas
294  * in the DT @fdt.
295  */
296 void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas)
297 {
298     MachineState *ms = MACHINE(spapr);
299     SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
300     uint32_t refpoints[] = {
301         cpu_to_be32(0x4),
302         cpu_to_be32(0x3),
303         cpu_to_be32(0x2),
304         cpu_to_be32(0x1),
305     };
306     uint32_t nr_refpoints = ARRAY_SIZE(refpoints);
307     uint32_t maxdomain = ms->numa_state->num_nodes + spapr->gpu_numa_id;
308     uint32_t maxdomains[] = {
309         cpu_to_be32(4),
310         cpu_to_be32(maxdomain),
311         cpu_to_be32(maxdomain),
312         cpu_to_be32(maxdomain),
313         cpu_to_be32(maxdomain)
314     };
315 
316     if (spapr_machine_using_legacy_numa(spapr)) {
317         uint32_t legacy_refpoints[] = {
318             cpu_to_be32(0x4),
319             cpu_to_be32(0x4),
320             cpu_to_be32(0x2),
321         };
322         uint32_t legacy_maxdomain = spapr->gpu_numa_id > 1 ? 1 : 0;
323         uint32_t legacy_maxdomains[] = {
324             cpu_to_be32(4),
325             cpu_to_be32(legacy_maxdomain),
326             cpu_to_be32(legacy_maxdomain),
327             cpu_to_be32(legacy_maxdomain),
328             cpu_to_be32(spapr->gpu_numa_id),
329         };
330 
331         G_STATIC_ASSERT(sizeof(legacy_refpoints) <= sizeof(refpoints));
332         G_STATIC_ASSERT(sizeof(legacy_maxdomains) <= sizeof(maxdomains));
333 
334         nr_refpoints = 3;
335 
336         memcpy(refpoints, legacy_refpoints, sizeof(legacy_refpoints));
337         memcpy(maxdomains, legacy_maxdomains, sizeof(legacy_maxdomains));
338 
339         /* pseries-5.0 and older reference-points array is {0x4, 0x4} */
340         if (smc->pre_5_1_assoc_refpoints) {
341             nr_refpoints = 2;
342         }
343     }
344 
345     _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points",
346                      refpoints, nr_refpoints * sizeof(refpoints[0])));
347 
348     _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains",
349                      maxdomains, sizeof(maxdomains)));
350 }
351 
352 static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
353                                               SpaprMachineState *spapr,
354                                               target_ulong opcode,
355                                               target_ulong *args)
356 {
357     g_autofree uint32_t *vcpu_assoc = NULL;
358     target_ulong flags = args[0];
359     target_ulong procno = args[1];
360     PowerPCCPU *tcpu;
361     int idx, assoc_idx;
362 
363     /* only support procno from H_REGISTER_VPA */
364     if (flags != 0x1) {
365         return H_FUNCTION;
366     }
367 
368     tcpu = spapr_find_cpu(procno);
369     if (tcpu == NULL) {
370         return H_P2;
371     }
372 
373     /*
374      * Given that we want to be flexible with the sizes and indexes,
375      * we must consider that there is a hard limit of how many
376      * associativities domain we can fit in R4 up to R9, which would be
377      * 12 associativity domains for vcpus. Assert and bail if that's
378      * not the case.
379      */
380     G_STATIC_ASSERT((VCPU_ASSOC_SIZE - 1) <= 12);
381 
382     vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, tcpu);
383     /* assoc_idx starts at 1 to skip associativity size */
384     assoc_idx = 1;
385 
386 #define ASSOCIATIVITY(a, b) (((uint64_t)(a) << 32) | \
387                              ((uint64_t)(b) & 0xffffffff))
388 
389     for (idx = 0; idx < 6; idx++) {
390         int32_t a, b;
391 
392         /*
393          * vcpu_assoc[] will contain the associativity domains for tcpu,
394          * including tcpu->node_id and procno, meaning that we don't
395          * need to use these variables here.
396          *
397          * We'll read 2 values at a time to fill up the ASSOCIATIVITY()
398          * macro. The ternary will fill the remaining registers with -1
399          * after we went through vcpu_assoc[].
400          */
401         a = assoc_idx < VCPU_ASSOC_SIZE ?
402             be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1;
403         b = assoc_idx < VCPU_ASSOC_SIZE ?
404             be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1;
405 
406         args[idx] = ASSOCIATIVITY(a, b);
407     }
408 #undef ASSOCIATIVITY
409 
410     return H_SUCCESS;
411 }
412 
413 static void spapr_numa_register_types(void)
414 {
415     /* Virtual Processor Home Node */
416     spapr_register_hypercall(H_HOME_NODE_ASSOCIATIVITY,
417                              h_home_node_associativity);
418 }
419 
420 type_init(spapr_numa_register_types)
421