1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * QEMU S390x CPU Topology 4 * 5 * Copyright IBM Corp. 2022, 2023 6 * Author(s): Pierre Morel <pmorel@linux.ibm.com> 7 * 8 */ 9 #include "qemu/osdep.h" 10 #include "cpu.h" 11 #include "hw/s390x/sclp.h" 12 #include "hw/s390x/cpu-topology.h" 13 14 QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_LOW != 1); 15 QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_MEDIUM != 2); 16 QEMU_BUILD_BUG_ON(S390_CPU_ENTITLEMENT_HIGH != 3); 17 18 /** 19 * fill_container: 20 * @p: The address of the container TLE to fill 21 * @level: The level of nesting for this container 22 * @id: The container receives a unique ID inside its own container 23 * 24 * Returns the next free TLE entry. 25 */ 26 static char *fill_container(char *p, int level, int id) 27 { 28 SYSIBContainerListEntry *tle = (SYSIBContainerListEntry *)p; 29 30 tle->nl = level; 31 tle->id = id; 32 return p + sizeof(*tle); 33 } 34 35 /** 36 * fill_tle_cpu: 37 * @p: The address of the CPU TLE to fill 38 * @entry: a pointer to the S390TopologyEntry defining this 39 * CPU container. 40 * 41 * Returns the next free TLE entry. 42 */ 43 static char *fill_tle_cpu(char *p, S390TopologyEntry *entry) 44 { 45 SysIBCPUListEntry *tle = (SysIBCPUListEntry *)p; 46 S390TopologyId topology_id = entry->id; 47 48 tle->nl = 0; 49 tle->flags = 0; 50 if (topology_id.vertical) { 51 tle->flags |= topology_id.entitlement; 52 } 53 if (topology_id.dedicated) { 54 tle->flags |= SYSIB_TLE_DEDICATED; 55 } 56 tle->type = topology_id.type; 57 tle->origin = cpu_to_be16(topology_id.origin * 64); 58 tle->mask = cpu_to_be64(entry->mask); 59 return p + sizeof(*tle); 60 } 61 62 /* 63 * Macro to check that the size of data after increment 64 * will not get bigger than the size of the SysIB. 65 */ 66 #define SYSIB_GUARD(data, x) do { \ 67 data += x; \ 68 if (data > sizeof(SysIB)) { \ 69 return 0; \ 70 } \ 71 } while (0) 72 73 /** 74 * stsi_topology_fill_sysib: 75 * @p: A pointer to the position of the first TLE 76 * @level: The nested level wanted by the guest 77 * 78 * Fill the SYSIB with the topology information as described in 79 * the PoP, nesting containers as appropriate, with the maximum 80 * nesting limited by @level. 81 * 82 * Return value: 83 * On success: the size of the SysIB_15x after being filled with TLE. 84 * On error: 0 in the case we would overrun the end of the SysIB. 85 */ 86 static int stsi_topology_fill_sysib(S390TopologyList *topology_list, 87 char *p, int level) 88 { 89 S390TopologyEntry *entry; 90 int last_drawer = -1; 91 int last_book = -1; 92 int last_socket = -1; 93 int drawer_id = 0; 94 int book_id = 0; 95 int socket_id = 0; 96 int n = sizeof(SysIB_151x); 97 98 QTAILQ_FOREACH(entry, topology_list, next) { 99 bool drawer_change = last_drawer != entry->id.drawer; 100 bool book_change = drawer_change || last_book != entry->id.book; 101 bool socket_change = book_change || last_socket != entry->id.socket; 102 103 if (level > 3 && drawer_change) { 104 SYSIB_GUARD(n, sizeof(SYSIBContainerListEntry)); 105 p = fill_container(p, 3, drawer_id++); 106 book_id = 0; 107 } 108 if (level > 2 && book_change) { 109 SYSIB_GUARD(n, sizeof(SYSIBContainerListEntry)); 110 p = fill_container(p, 2, book_id++); 111 socket_id = 0; 112 } 113 if (socket_change) { 114 SYSIB_GUARD(n, sizeof(SYSIBContainerListEntry)); 115 p = fill_container(p, 1, socket_id++); 116 } 117 118 SYSIB_GUARD(n, sizeof(SysIBCPUListEntry)); 119 p = fill_tle_cpu(p, entry); 120 last_drawer = entry->id.drawer; 121 last_book = entry->id.book; 122 last_socket = entry->id.socket; 123 } 124 125 return n; 126 } 127 128 /** 129 * setup_stsi: 130 * @topology_list: ordered list of groups of CPUs with same properties 131 * @sysib: pointer to a SysIB to be filled with SysIB_151x data 132 * @level: Nested level specified by the guest 133 * 134 * Setup the SYSIB for STSI 15.1, the header as well as the description 135 * of the topology. 136 */ 137 static int setup_stsi(S390TopologyList *topology_list, SysIB_151x *sysib, 138 int level) 139 { 140 sysib->mnest = level; 141 switch (level) { 142 case 4: 143 sysib->mag[S390_TOPOLOGY_MAG4] = current_machine->smp.drawers; 144 sysib->mag[S390_TOPOLOGY_MAG3] = current_machine->smp.books; 145 sysib->mag[S390_TOPOLOGY_MAG2] = current_machine->smp.sockets; 146 sysib->mag[S390_TOPOLOGY_MAG1] = current_machine->smp.cores; 147 break; 148 case 3: 149 sysib->mag[S390_TOPOLOGY_MAG3] = current_machine->smp.drawers * 150 current_machine->smp.books; 151 sysib->mag[S390_TOPOLOGY_MAG2] = current_machine->smp.sockets; 152 sysib->mag[S390_TOPOLOGY_MAG1] = current_machine->smp.cores; 153 break; 154 case 2: 155 sysib->mag[S390_TOPOLOGY_MAG2] = current_machine->smp.drawers * 156 current_machine->smp.books * 157 current_machine->smp.sockets; 158 sysib->mag[S390_TOPOLOGY_MAG1] = current_machine->smp.cores; 159 break; 160 } 161 162 return stsi_topology_fill_sysib(topology_list, sysib->tle, level); 163 } 164 165 /** 166 * s390_topology_add_cpu_to_entry: 167 * @entry: Topology entry to setup 168 * @cpu: the S390CPU to add 169 * 170 * Set the core bit inside the topology mask. 171 */ 172 static void s390_topology_add_cpu_to_entry(S390TopologyEntry *entry, 173 S390CPU *cpu) 174 { 175 set_bit(63 - (cpu->env.core_id % 64), &entry->mask); 176 } 177 178 /** 179 * s390_topology_from_cpu: 180 * @cpu: S390CPU to calculate the topology id 181 * 182 * Initialize the topology id from the CPU environment. 183 */ 184 static S390TopologyId s390_topology_from_cpu(S390CPU *cpu) 185 { 186 S390TopologyId topology_id = { 187 .drawer = cpu->env.drawer_id, 188 .book = cpu->env.book_id, 189 .socket = cpu->env.socket_id, 190 .type = S390_TOPOLOGY_CPU_IFL, 191 .vertical = s390_topology.polarization == S390_CPU_POLARIZATION_VERTICAL, 192 .entitlement = cpu->env.entitlement, 193 .dedicated = cpu->env.dedicated, 194 .origin = cpu->env.core_id / 64, 195 }; 196 197 return topology_id; 198 } 199 200 /** 201 * s390_topology_id_cmp: 202 * @l: first S390TopologyId 203 * @r: second S390TopologyId 204 * 205 * Compare two topology ids according to the sorting order specified by the PoP. 206 * 207 * Returns a negative number if the first id is less than, 0 if it is equal to 208 * and positive if it is larger than the second id. 209 */ 210 static int s390_topology_id_cmp(const S390TopologyId *l, 211 const S390TopologyId *r) 212 { 213 int l_polarization = l->vertical ? l->entitlement : 0; 214 int r_polarization = r->vertical ? r->entitlement : 0; 215 216 /* 217 * lexical order, compare less significant values only if more significant 218 * ones are equal 219 */ 220 return l->sentinel - r->sentinel ?: 221 l->drawer - r->drawer ?: 222 l->book - r->book ?: 223 l->socket - r->socket ?: 224 l->type - r->type ?: 225 /* logic is inverted for the next two */ 226 r_polarization - l_polarization ?: 227 r->dedicated - l->dedicated ?: 228 l->origin - r->origin; 229 } 230 231 static bool s390_topology_id_eq(const S390TopologyId *l, 232 const S390TopologyId *r) 233 { 234 return !s390_topology_id_cmp(l, r); 235 } 236 237 static bool s390_topology_id_lt(const S390TopologyId *l, 238 const S390TopologyId *r) 239 { 240 return s390_topology_id_cmp(l, r) < 0; 241 } 242 243 /** 244 * s390_topology_fill_list_sorted: 245 * @topology_list: list to fill 246 * 247 * Create S390TopologyEntrys as appropriate from all CPUs and fill the 248 * topology_list with the entries according to the order specified by the PoP. 249 */ 250 static void s390_topology_fill_list_sorted(S390TopologyList *topology_list) 251 { 252 CPUState *cs; 253 S390TopologyEntry sentinel = { .id.sentinel = 1 }; 254 255 QTAILQ_INIT(topology_list); 256 257 QTAILQ_INSERT_HEAD(topology_list, &sentinel, next); 258 259 CPU_FOREACH(cs) { 260 S390TopologyId id = s390_topology_from_cpu(S390_CPU(cs)); 261 S390TopologyEntry *entry = NULL, *tmp; 262 263 QTAILQ_FOREACH(tmp, topology_list, next) { 264 if (s390_topology_id_eq(&id, &tmp->id)) { 265 entry = tmp; 266 break; 267 } else if (s390_topology_id_lt(&id, &tmp->id)) { 268 entry = g_malloc0(sizeof(*entry)); 269 entry->id = id; 270 QTAILQ_INSERT_BEFORE(tmp, entry, next); 271 break; 272 } 273 } 274 assert(entry); 275 s390_topology_add_cpu_to_entry(entry, S390_CPU(cs)); 276 } 277 278 QTAILQ_REMOVE(topology_list, &sentinel, next); 279 } 280 281 /** 282 * s390_topology_empty_list: 283 * 284 * Clear all entries in the S390Topology list. 285 */ 286 static void s390_topology_empty_list(S390TopologyList *topology_list) 287 { 288 S390TopologyEntry *entry = NULL; 289 S390TopologyEntry *tmp = NULL; 290 291 QTAILQ_FOREACH_SAFE(entry, topology_list, next, tmp) { 292 QTAILQ_REMOVE(topology_list, entry, next); 293 g_free(entry); 294 } 295 } 296 297 /** 298 * insert_stsi_15_1_x: 299 * @cpu: the CPU doing the call for which we set CC 300 * @sel2: the selector 2, containing the nested level 301 * @addr: Guest logical address of the guest SysIB 302 * @ar: the access register number 303 * @ra: the return address 304 * 305 * Emulate STSI 15.1.x, that is, perform all necessary checks and 306 * fill the SYSIB. 307 * In case the topology description is too long to fit into the SYSIB, 308 * set CC=3 and abort without writing the SYSIB. 309 */ 310 void insert_stsi_15_1_x(S390CPU *cpu, int sel2, uint64_t addr, uint8_t ar, uintptr_t ra) 311 { 312 S390TopologyList topology_list; 313 SysIB sysib = {0}; 314 int length; 315 316 if (!s390_has_topology() || sel2 < 2 || sel2 > SCLP_READ_SCP_INFO_MNEST) { 317 setcc(cpu, 3); 318 return; 319 } 320 321 s390_topology_fill_list_sorted(&topology_list); 322 length = setup_stsi(&topology_list, &sysib.sysib_151x, sel2); 323 s390_topology_empty_list(&topology_list); 324 325 if (!length) { 326 setcc(cpu, 3); 327 return; 328 } 329 330 sysib.sysib_151x.length = cpu_to_be16(length); 331 if (!s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, length)) { 332 setcc(cpu, 0); 333 } else { 334 s390_cpu_virt_mem_handle_exc(cpu, ra); 335 } 336 } 337