xref: /openbmc/qemu/hw/riscv/numa.c (revision 9ea2e69f)
1 /*
2  * QEMU RISC-V NUMA Helper
3  *
4  * Copyright (c) 2020 Western Digital Corporation or its affiliates.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include "qemu/osdep.h"
20 #include "qemu/units.h"
21 #include "qemu/error-report.h"
22 #include "qapi/error.h"
23 #include "hw/boards.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/riscv/numa.h"
26 #include "sysemu/device_tree.h"
27 
28 static bool numa_enabled(const MachineState *ms)
29 {
30     return (ms->numa_state && ms->numa_state->num_nodes) ? true : false;
31 }
32 
33 int riscv_socket_count(const MachineState *ms)
34 {
35     return (numa_enabled(ms)) ? ms->numa_state->num_nodes : 1;
36 }
37 
38 int riscv_socket_first_hartid(const MachineState *ms, int socket_id)
39 {
40     int i, first_hartid = ms->smp.cpus;
41 
42     if (!numa_enabled(ms)) {
43         return (!socket_id) ? 0 : -1;
44     }
45 
46     for (i = 0; i < ms->smp.cpus; i++) {
47         if (ms->possible_cpus->cpus[i].props.node_id != socket_id) {
48             continue;
49         }
50         if (i < first_hartid) {
51             first_hartid = i;
52         }
53     }
54 
55     return (first_hartid < ms->smp.cpus) ? first_hartid : -1;
56 }
57 
58 int riscv_socket_last_hartid(const MachineState *ms, int socket_id)
59 {
60     int i, last_hartid = -1;
61 
62     if (!numa_enabled(ms)) {
63         return (!socket_id) ? ms->smp.cpus - 1 : -1;
64     }
65 
66     for (i = 0; i < ms->smp.cpus; i++) {
67         if (ms->possible_cpus->cpus[i].props.node_id != socket_id) {
68             continue;
69         }
70         if (i > last_hartid) {
71             last_hartid = i;
72         }
73     }
74 
75     return (last_hartid < ms->smp.cpus) ? last_hartid : -1;
76 }
77 
78 int riscv_socket_hart_count(const MachineState *ms, int socket_id)
79 {
80     int first_hartid, last_hartid;
81 
82     if (!numa_enabled(ms)) {
83         return (!socket_id) ? ms->smp.cpus : -1;
84     }
85 
86     first_hartid = riscv_socket_first_hartid(ms, socket_id);
87     if (first_hartid < 0) {
88         return -1;
89     }
90 
91     last_hartid = riscv_socket_last_hartid(ms, socket_id);
92     if (last_hartid < 0) {
93         return -1;
94     }
95 
96     if (first_hartid > last_hartid) {
97         return -1;
98     }
99 
100     return last_hartid - first_hartid + 1;
101 }
102 
103 bool riscv_socket_check_hartids(const MachineState *ms, int socket_id)
104 {
105     int i, first_hartid, last_hartid;
106 
107     if (!numa_enabled(ms)) {
108         return (!socket_id) ? true : false;
109     }
110 
111     first_hartid = riscv_socket_first_hartid(ms, socket_id);
112     if (first_hartid < 0) {
113         return false;
114     }
115 
116     last_hartid = riscv_socket_last_hartid(ms, socket_id);
117     if (last_hartid < 0) {
118         return false;
119     }
120 
121     for (i = first_hartid; i <= last_hartid; i++) {
122         if (ms->possible_cpus->cpus[i].props.node_id != socket_id) {
123             return false;
124         }
125     }
126 
127     return true;
128 }
129 
130 uint64_t riscv_socket_mem_offset(const MachineState *ms, int socket_id)
131 {
132     int i;
133     uint64_t mem_offset = 0;
134 
135     if (!numa_enabled(ms)) {
136         return 0;
137     }
138 
139     for (i = 0; i < ms->numa_state->num_nodes; i++) {
140         if (i == socket_id) {
141             break;
142         }
143         mem_offset += ms->numa_state->nodes[i].node_mem;
144     }
145 
146     return (i == socket_id) ? mem_offset : 0;
147 }
148 
149 uint64_t riscv_socket_mem_size(const MachineState *ms, int socket_id)
150 {
151     if (!numa_enabled(ms)) {
152         return (!socket_id) ? ms->ram_size : 0;
153     }
154 
155     return (socket_id < ms->numa_state->num_nodes) ?
156             ms->numa_state->nodes[socket_id].node_mem : 0;
157 }
158 
159 void riscv_socket_fdt_write_id(const MachineState *ms, const char *node_name,
160                                int socket_id)
161 {
162     if (numa_enabled(ms)) {
163         qemu_fdt_setprop_cell(ms->fdt, node_name, "numa-node-id", socket_id);
164     }
165 }
166 
167 void riscv_socket_fdt_write_distance_matrix(const MachineState *ms)
168 {
169     int i, j, idx;
170     uint32_t *dist_matrix, dist_matrix_size;
171 
172     if (numa_enabled(ms) && ms->numa_state->have_numa_distance) {
173         dist_matrix_size = riscv_socket_count(ms) * riscv_socket_count(ms);
174         dist_matrix_size *= (3 * sizeof(uint32_t));
175         dist_matrix = g_malloc0(dist_matrix_size);
176 
177         for (i = 0; i < riscv_socket_count(ms); i++) {
178             for (j = 0; j < riscv_socket_count(ms); j++) {
179                 idx = (i * riscv_socket_count(ms) + j) * 3;
180                 dist_matrix[idx + 0] = cpu_to_be32(i);
181                 dist_matrix[idx + 1] = cpu_to_be32(j);
182                 dist_matrix[idx + 2] =
183                     cpu_to_be32(ms->numa_state->nodes[i].distance[j]);
184             }
185         }
186 
187         qemu_fdt_add_subnode(ms->fdt, "/distance-map");
188         qemu_fdt_setprop_string(ms->fdt, "/distance-map", "compatible",
189                                 "numa-distance-map-v1");
190         qemu_fdt_setprop(ms->fdt, "/distance-map", "distance-matrix",
191                          dist_matrix, dist_matrix_size);
192         g_free(dist_matrix);
193     }
194 }
195 
196 CpuInstanceProperties
197 riscv_numa_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
198 {
199     MachineClass *mc = MACHINE_GET_CLASS(ms);
200     const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
201 
202     assert(cpu_index < possible_cpus->len);
203     return possible_cpus->cpus[cpu_index].props;
204 }
205 
206 int64_t riscv_numa_get_default_cpu_node_id(const MachineState *ms, int idx)
207 {
208     int64_t nidx = 0;
209 
210     if (ms->numa_state->num_nodes > ms->smp.cpus) {
211         error_report("Number of NUMA nodes (%d)"
212                      " cannot exceed the number of available CPUs (%d).",
213                      ms->numa_state->num_nodes, ms->smp.max_cpus);
214         exit(EXIT_FAILURE);
215     }
216     if (ms->numa_state->num_nodes) {
217         nidx = idx / (ms->smp.cpus / ms->numa_state->num_nodes);
218         if (ms->numa_state->num_nodes <= nidx) {
219             nidx = ms->numa_state->num_nodes - 1;
220         }
221     }
222 
223     return nidx;
224 }
225 
226 const CPUArchIdList *riscv_numa_possible_cpu_arch_ids(MachineState *ms)
227 {
228     int n;
229     unsigned int max_cpus = ms->smp.max_cpus;
230 
231     if (ms->possible_cpus) {
232         assert(ms->possible_cpus->len == max_cpus);
233         return ms->possible_cpus;
234     }
235 
236     ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
237                                   sizeof(CPUArchId) * max_cpus);
238     ms->possible_cpus->len = max_cpus;
239     for (n = 0; n < ms->possible_cpus->len; n++) {
240         ms->possible_cpus->cpus[n].type = ms->cpu_type;
241         ms->possible_cpus->cpus[n].arch_id = n;
242         ms->possible_cpus->cpus[n].props.has_core_id = true;
243         ms->possible_cpus->cpus[n].props.core_id = n;
244     }
245 
246     return ms->possible_cpus;
247 }
248