1 /* 2 * s390 PCI instructions 3 * 4 * Copyright 2014 IBM Corp. 5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com> 6 * Hong Bo Li <lihbbj@cn.ibm.com> 7 * Yi Min Zhao <zyimin@cn.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at 10 * your option) any later version. See the COPYING file in the top-level 11 * directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "cpu.h" 16 #include "s390-pci-inst.h" 17 #include "s390-pci-bus.h" 18 #include "exec/memop.h" 19 #include "exec/memory-internal.h" 20 #include "qemu/error-report.h" 21 #include "sysemu/hw_accel.h" 22 #include "hw/s390x/tod.h" 23 24 #ifndef DEBUG_S390PCI_INST 25 #define DEBUG_S390PCI_INST 0 26 #endif 27 28 #define DPRINTF(fmt, ...) \ 29 do { \ 30 if (DEBUG_S390PCI_INST) { \ 31 fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); \ 32 } \ 33 } while (0) 34 35 static void s390_set_status_code(CPUS390XState *env, 36 uint8_t r, uint64_t status_code) 37 { 38 env->regs[r] &= ~0xff000000ULL; 39 env->regs[r] |= (status_code & 0xff) << 24; 40 } 41 42 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) 43 { 44 S390PCIBusDevice *pbdev = NULL; 45 S390pciState *s = s390_get_phb(); 46 uint32_t res_code, initial_l2, g_l2; 47 int rc, i; 48 uint64_t resume_token; 49 50 rc = 0; 51 if (lduw_p(&rrb->request.hdr.len) != 32) { 52 res_code = CLP_RC_LEN; 53 rc = -EINVAL; 54 goto out; 55 } 56 57 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) { 58 res_code = CLP_RC_FMT; 59 rc = -EINVAL; 60 goto out; 61 } 62 63 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 || 64 ldq_p(&rrb->request.reserved1) != 0) { 65 res_code = CLP_RC_RESNOT0; 66 rc = -EINVAL; 67 goto out; 68 } 69 70 resume_token = ldq_p(&rrb->request.resume_token); 71 72 if (resume_token) { 73 pbdev = s390_pci_find_dev_by_idx(s, resume_token); 74 if (!pbdev) { 75 res_code = CLP_RC_LISTPCI_BADRT; 76 rc = -EINVAL; 77 goto out; 78 } 79 } else { 80 pbdev = s390_pci_find_next_avail_dev(s, NULL); 81 } 82 83 if (lduw_p(&rrb->response.hdr.len) < 48) { 84 res_code = CLP_RC_8K; 85 rc = -EINVAL; 86 goto out; 87 } 88 89 initial_l2 = lduw_p(&rrb->response.hdr.len); 90 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry) 91 != 0) { 92 res_code = CLP_RC_LEN; 93 rc = -EINVAL; 94 *cc = 3; 95 goto out; 96 } 97 98 stl_p(&rrb->response.fmt, 0); 99 stq_p(&rrb->response.reserved1, 0); 100 stl_p(&rrb->response.mdd, FH_MASK_SHM); 101 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS); 102 rrb->response.flags = UID_CHECKING_ENABLED; 103 rrb->response.entry_size = sizeof(ClpFhListEntry); 104 105 i = 0; 106 g_l2 = LIST_PCI_HDR_LEN; 107 while (g_l2 < initial_l2 && pbdev) { 108 stw_p(&rrb->response.fh_list[i].device_id, 109 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID)); 110 stw_p(&rrb->response.fh_list[i].vendor_id, 111 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID)); 112 /* Ignore RESERVED devices. */ 113 stl_p(&rrb->response.fh_list[i].config, 114 pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31); 115 stl_p(&rrb->response.fh_list[i].fid, pbdev->fid); 116 stl_p(&rrb->response.fh_list[i].fh, pbdev->fh); 117 118 g_l2 += sizeof(ClpFhListEntry); 119 /* Add endian check for DPRINTF? */ 120 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n", 121 g_l2, 122 lduw_p(&rrb->response.fh_list[i].vendor_id), 123 lduw_p(&rrb->response.fh_list[i].device_id), 124 ldl_p(&rrb->response.fh_list[i].fid), 125 ldl_p(&rrb->response.fh_list[i].fh)); 126 pbdev = s390_pci_find_next_avail_dev(s, pbdev); 127 i++; 128 } 129 130 if (!pbdev) { 131 resume_token = 0; 132 } else { 133 resume_token = pbdev->fh & FH_MASK_INDEX; 134 } 135 stq_p(&rrb->response.resume_token, resume_token); 136 stw_p(&rrb->response.hdr.len, g_l2); 137 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK); 138 out: 139 if (rc) { 140 DPRINTF("list pci failed rc 0x%x\n", rc); 141 stw_p(&rrb->response.hdr.rsp, res_code); 142 } 143 return rc; 144 } 145 146 int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra) 147 { 148 ClpReqHdr *reqh; 149 ClpRspHdr *resh; 150 S390PCIBusDevice *pbdev; 151 uint32_t req_len; 152 uint32_t res_len; 153 uint8_t buffer[4096 * 2]; 154 uint8_t cc = 0; 155 CPUS390XState *env = &cpu->env; 156 S390pciState *s = s390_get_phb(); 157 int i; 158 159 if (env->psw.mask & PSW_MASK_PSTATE) { 160 s390_program_interrupt(env, PGM_PRIVILEGED, ra); 161 return 0; 162 } 163 164 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) { 165 s390_cpu_virt_mem_handle_exc(cpu, ra); 166 return 0; 167 } 168 reqh = (ClpReqHdr *)buffer; 169 req_len = lduw_p(&reqh->len); 170 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) { 171 s390_program_interrupt(env, PGM_OPERAND, ra); 172 return 0; 173 } 174 175 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, 176 req_len + sizeof(*resh))) { 177 s390_cpu_virt_mem_handle_exc(cpu, ra); 178 return 0; 179 } 180 resh = (ClpRspHdr *)(buffer + req_len); 181 res_len = lduw_p(&resh->len); 182 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) { 183 s390_program_interrupt(env, PGM_OPERAND, ra); 184 return 0; 185 } 186 if ((req_len + res_len) > 8192) { 187 s390_program_interrupt(env, PGM_OPERAND, ra); 188 return 0; 189 } 190 191 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, 192 req_len + res_len)) { 193 s390_cpu_virt_mem_handle_exc(cpu, ra); 194 return 0; 195 } 196 197 if (req_len != 32) { 198 stw_p(&resh->rsp, CLP_RC_LEN); 199 goto out; 200 } 201 202 switch (lduw_p(&reqh->cmd)) { 203 case CLP_LIST_PCI: { 204 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer; 205 list_pci(rrb, &cc); 206 break; 207 } 208 case CLP_SET_PCI_FN: { 209 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh; 210 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh; 211 212 pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqsetpci->fh)); 213 if (!pbdev) { 214 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); 215 goto out; 216 } 217 218 switch (reqsetpci->oc) { 219 case CLP_SET_ENABLE_PCI_FN: 220 switch (reqsetpci->ndas) { 221 case 0: 222 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS); 223 goto out; 224 case 1: 225 break; 226 default: 227 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES); 228 goto out; 229 } 230 231 if (pbdev->fh & FH_MASK_ENABLE) { 232 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); 233 goto out; 234 } 235 236 pbdev->fh |= FH_MASK_ENABLE; 237 pbdev->state = ZPCI_FS_ENABLED; 238 stl_p(&ressetpci->fh, pbdev->fh); 239 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); 240 break; 241 case CLP_SET_DISABLE_PCI_FN: 242 if (!(pbdev->fh & FH_MASK_ENABLE)) { 243 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); 244 goto out; 245 } 246 device_reset(DEVICE(pbdev)); 247 pbdev->fh &= ~FH_MASK_ENABLE; 248 pbdev->state = ZPCI_FS_DISABLED; 249 stl_p(&ressetpci->fh, pbdev->fh); 250 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); 251 break; 252 default: 253 DPRINTF("unknown set pci command\n"); 254 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); 255 break; 256 } 257 break; 258 } 259 case CLP_QUERY_PCI_FN: { 260 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh; 261 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh; 262 263 pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqquery->fh)); 264 if (!pbdev) { 265 DPRINTF("query pci no pci dev\n"); 266 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH); 267 goto out; 268 } 269 270 for (i = 0; i < PCI_BAR_COUNT; i++) { 271 uint32_t data = pci_get_long(pbdev->pdev->config + 272 PCI_BASE_ADDRESS_0 + (i * 4)); 273 274 stl_p(&resquery->bar[i], data); 275 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ? 276 ctz64(pbdev->pdev->io_regions[i].size) : 0; 277 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i, 278 ldl_p(&resquery->bar[i]), 279 pbdev->pdev->io_regions[i].size, 280 resquery->bar_size[i]); 281 } 282 283 stq_p(&resquery->sdma, ZPCI_SDMA_ADDR); 284 stq_p(&resquery->edma, ZPCI_EDMA_ADDR); 285 stl_p(&resquery->fid, pbdev->fid); 286 stw_p(&resquery->pchid, 0); 287 stw_p(&resquery->ug, 1); 288 stl_p(&resquery->uid, pbdev->uid); 289 stw_p(&resquery->hdr.rsp, CLP_RC_OK); 290 break; 291 } 292 case CLP_QUERY_PCI_FNGRP: { 293 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh; 294 resgrp->fr = 1; 295 stq_p(&resgrp->dasm, 0); 296 stq_p(&resgrp->msia, ZPCI_MSI_ADDR); 297 stw_p(&resgrp->mui, DEFAULT_MUI); 298 stw_p(&resgrp->i, 128); 299 stw_p(&resgrp->maxstbl, 128); 300 resgrp->version = 0; 301 302 stw_p(&resgrp->hdr.rsp, CLP_RC_OK); 303 break; 304 } 305 default: 306 DPRINTF("unknown clp command\n"); 307 stw_p(&resh->rsp, CLP_RC_CMD); 308 break; 309 } 310 311 out: 312 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer, 313 req_len + res_len)) { 314 s390_cpu_virt_mem_handle_exc(cpu, ra); 315 return 0; 316 } 317 setcc(cpu, cc); 318 return 0; 319 } 320 321 /** 322 * Swap data contained in s390x big endian registers to little endian 323 * PCI bars. 324 * 325 * @ptr: a pointer to a uint64_t data field 326 * @len: the length of the valid data, must be 1,2,4 or 8 327 */ 328 static int zpci_endian_swap(uint64_t *ptr, uint8_t len) 329 { 330 uint64_t data = *ptr; 331 332 switch (len) { 333 case 1: 334 break; 335 case 2: 336 data = bswap16(data); 337 break; 338 case 4: 339 data = bswap32(data); 340 break; 341 case 8: 342 data = bswap64(data); 343 break; 344 default: 345 return -EINVAL; 346 } 347 *ptr = data; 348 return 0; 349 } 350 351 static MemoryRegion *s390_get_subregion(MemoryRegion *mr, uint64_t offset, 352 uint8_t len) 353 { 354 MemoryRegion *subregion; 355 uint64_t subregion_size; 356 357 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { 358 subregion_size = int128_get64(subregion->size); 359 if ((offset >= subregion->addr) && 360 (offset + len) <= (subregion->addr + subregion_size)) { 361 mr = subregion; 362 break; 363 } 364 } 365 return mr; 366 } 367 368 static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias, 369 uint64_t offset, uint64_t *data, uint8_t len) 370 { 371 MemoryRegion *mr; 372 373 mr = pbdev->pdev->io_regions[pcias].memory; 374 mr = s390_get_subregion(mr, offset, len); 375 offset -= mr->addr; 376 return memory_region_dispatch_read(mr, offset, data, 377 size_memop(len) | MO_BE, 378 MEMTXATTRS_UNSPECIFIED); 379 } 380 381 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) 382 { 383 CPUS390XState *env = &cpu->env; 384 S390PCIBusDevice *pbdev; 385 uint64_t offset; 386 uint64_t data; 387 MemTxResult result; 388 uint8_t len; 389 uint32_t fh; 390 uint8_t pcias; 391 392 if (env->psw.mask & PSW_MASK_PSTATE) { 393 s390_program_interrupt(env, PGM_PRIVILEGED, ra); 394 return 0; 395 } 396 397 if (r2 & 0x1) { 398 s390_program_interrupt(env, PGM_SPECIFICATION, ra); 399 return 0; 400 } 401 402 fh = env->regs[r2] >> 32; 403 pcias = (env->regs[r2] >> 16) & 0xf; 404 len = env->regs[r2] & 0xf; 405 offset = env->regs[r2 + 1]; 406 407 if (!(fh & FH_MASK_ENABLE)) { 408 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 409 return 0; 410 } 411 412 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); 413 if (!pbdev) { 414 DPRINTF("pcilg no pci dev\n"); 415 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 416 return 0; 417 } 418 419 switch (pbdev->state) { 420 case ZPCI_FS_PERMANENT_ERROR: 421 case ZPCI_FS_ERROR: 422 setcc(cpu, ZPCI_PCI_LS_ERR); 423 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); 424 return 0; 425 default: 426 break; 427 } 428 429 switch (pcias) { 430 case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX: 431 if (!len || (len > (8 - (offset & 0x7)))) { 432 s390_program_interrupt(env, PGM_OPERAND, ra); 433 return 0; 434 } 435 result = zpci_read_bar(pbdev, pcias, offset, &data, len); 436 if (result != MEMTX_OK) { 437 s390_program_interrupt(env, PGM_OPERAND, ra); 438 return 0; 439 } 440 break; 441 case ZPCI_CONFIG_BAR: 442 if (!len || (len > (4 - (offset & 0x3))) || len == 3) { 443 s390_program_interrupt(env, PGM_OPERAND, ra); 444 return 0; 445 } 446 data = pci_host_config_read_common( 447 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len); 448 449 if (zpci_endian_swap(&data, len)) { 450 s390_program_interrupt(env, PGM_OPERAND, ra); 451 return 0; 452 } 453 break; 454 default: 455 DPRINTF("pcilg invalid space\n"); 456 setcc(cpu, ZPCI_PCI_LS_ERR); 457 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); 458 return 0; 459 } 460 461 pbdev->fmb.counter[ZPCI_FMB_CNT_LD]++; 462 463 env->regs[r1] = data; 464 setcc(cpu, ZPCI_PCI_LS_OK); 465 return 0; 466 } 467 468 static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias, 469 uint64_t offset, uint64_t data, uint8_t len) 470 { 471 MemoryRegion *mr; 472 473 mr = pbdev->pdev->io_regions[pcias].memory; 474 mr = s390_get_subregion(mr, offset, len); 475 offset -= mr->addr; 476 return memory_region_dispatch_write(mr, offset, data, 477 size_memop(len) | MO_BE, 478 MEMTXATTRS_UNSPECIFIED); 479 } 480 481 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) 482 { 483 CPUS390XState *env = &cpu->env; 484 uint64_t offset, data; 485 S390PCIBusDevice *pbdev; 486 MemTxResult result; 487 uint8_t len; 488 uint32_t fh; 489 uint8_t pcias; 490 491 if (env->psw.mask & PSW_MASK_PSTATE) { 492 s390_program_interrupt(env, PGM_PRIVILEGED, ra); 493 return 0; 494 } 495 496 if (r2 & 0x1) { 497 s390_program_interrupt(env, PGM_SPECIFICATION, ra); 498 return 0; 499 } 500 501 fh = env->regs[r2] >> 32; 502 pcias = (env->regs[r2] >> 16) & 0xf; 503 len = env->regs[r2] & 0xf; 504 offset = env->regs[r2 + 1]; 505 data = env->regs[r1]; 506 507 if (!(fh & FH_MASK_ENABLE)) { 508 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 509 return 0; 510 } 511 512 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); 513 if (!pbdev) { 514 DPRINTF("pcistg no pci dev\n"); 515 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 516 return 0; 517 } 518 519 switch (pbdev->state) { 520 /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED 521 * are already covered by the FH_MASK_ENABLE check above 522 */ 523 case ZPCI_FS_PERMANENT_ERROR: 524 case ZPCI_FS_ERROR: 525 setcc(cpu, ZPCI_PCI_LS_ERR); 526 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); 527 return 0; 528 default: 529 break; 530 } 531 532 switch (pcias) { 533 /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */ 534 case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX: 535 /* Check length: 536 * A length of 0 is invalid and length should not cross a double word 537 */ 538 if (!len || (len > (8 - (offset & 0x7)))) { 539 s390_program_interrupt(env, PGM_OPERAND, ra); 540 return 0; 541 } 542 543 result = zpci_write_bar(pbdev, pcias, offset, data, len); 544 if (result != MEMTX_OK) { 545 s390_program_interrupt(env, PGM_OPERAND, ra); 546 return 0; 547 } 548 break; 549 case ZPCI_CONFIG_BAR: 550 /* ZPCI uses the pseudo BAR number 15 as configuration space */ 551 /* possible access lengths are 1,2,4 and must not cross a word */ 552 if (!len || (len > (4 - (offset & 0x3))) || len == 3) { 553 s390_program_interrupt(env, PGM_OPERAND, ra); 554 return 0; 555 } 556 /* len = 1,2,4 so we do not need to test */ 557 zpci_endian_swap(&data, len); 558 pci_host_config_write_common(pbdev->pdev, offset, 559 pci_config_size(pbdev->pdev), 560 data, len); 561 break; 562 default: 563 DPRINTF("pcistg invalid space\n"); 564 setcc(cpu, ZPCI_PCI_LS_ERR); 565 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); 566 return 0; 567 } 568 569 pbdev->fmb.counter[ZPCI_FMB_CNT_ST]++; 570 571 setcc(cpu, ZPCI_PCI_LS_OK); 572 return 0; 573 } 574 575 static void s390_pci_update_iotlb(S390PCIIOMMU *iommu, S390IOTLBEntry *entry) 576 { 577 S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova); 578 IOMMUTLBEntry notify = { 579 .target_as = &address_space_memory, 580 .iova = entry->iova, 581 .translated_addr = entry->translated_addr, 582 .perm = entry->perm, 583 .addr_mask = ~PAGE_MASK, 584 }; 585 586 if (entry->perm == IOMMU_NONE) { 587 if (!cache) { 588 return; 589 } 590 g_hash_table_remove(iommu->iotlb, &entry->iova); 591 } else { 592 if (cache) { 593 if (cache->perm == entry->perm && 594 cache->translated_addr == entry->translated_addr) { 595 return; 596 } 597 598 notify.perm = IOMMU_NONE; 599 memory_region_notify_iommu(&iommu->iommu_mr, 0, notify); 600 notify.perm = entry->perm; 601 } 602 603 cache = g_new(S390IOTLBEntry, 1); 604 cache->iova = entry->iova; 605 cache->translated_addr = entry->translated_addr; 606 cache->len = PAGE_SIZE; 607 cache->perm = entry->perm; 608 g_hash_table_replace(iommu->iotlb, &cache->iova, cache); 609 } 610 611 memory_region_notify_iommu(&iommu->iommu_mr, 0, notify); 612 } 613 614 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra) 615 { 616 CPUS390XState *env = &cpu->env; 617 uint32_t fh; 618 uint16_t error = 0; 619 S390PCIBusDevice *pbdev; 620 S390PCIIOMMU *iommu; 621 S390IOTLBEntry entry; 622 hwaddr start, end; 623 624 if (env->psw.mask & PSW_MASK_PSTATE) { 625 s390_program_interrupt(env, PGM_PRIVILEGED, ra); 626 return 0; 627 } 628 629 if (r2 & 0x1) { 630 s390_program_interrupt(env, PGM_SPECIFICATION, ra); 631 return 0; 632 } 633 634 fh = env->regs[r1] >> 32; 635 start = env->regs[r2]; 636 end = start + env->regs[r2 + 1]; 637 638 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); 639 if (!pbdev) { 640 DPRINTF("rpcit no pci dev\n"); 641 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 642 return 0; 643 } 644 645 switch (pbdev->state) { 646 case ZPCI_FS_RESERVED: 647 case ZPCI_FS_STANDBY: 648 case ZPCI_FS_DISABLED: 649 case ZPCI_FS_PERMANENT_ERROR: 650 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 651 return 0; 652 case ZPCI_FS_ERROR: 653 setcc(cpu, ZPCI_PCI_LS_ERR); 654 s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER); 655 return 0; 656 default: 657 break; 658 } 659 660 iommu = pbdev->iommu; 661 if (!iommu->g_iota) { 662 error = ERR_EVENT_INVALAS; 663 goto err; 664 } 665 666 if (end < iommu->pba || start > iommu->pal) { 667 error = ERR_EVENT_OORANGE; 668 goto err; 669 } 670 671 while (start < end) { 672 error = s390_guest_io_table_walk(iommu->g_iota, start, &entry); 673 if (error) { 674 break; 675 } 676 677 start += entry.len; 678 while (entry.iova < start && entry.iova < end) { 679 s390_pci_update_iotlb(iommu, &entry); 680 entry.iova += PAGE_SIZE; 681 entry.translated_addr += PAGE_SIZE; 682 } 683 } 684 err: 685 if (error) { 686 pbdev->state = ZPCI_FS_ERROR; 687 setcc(cpu, ZPCI_PCI_LS_ERR); 688 s390_set_status_code(env, r1, ZPCI_PCI_ST_FUNC_IN_ERR); 689 s390_pci_generate_error_event(error, pbdev->fh, pbdev->fid, start, 0); 690 } else { 691 pbdev->fmb.counter[ZPCI_FMB_CNT_RPCIT]++; 692 setcc(cpu, ZPCI_PCI_LS_OK); 693 } 694 return 0; 695 } 696 697 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, 698 uint8_t ar, uintptr_t ra) 699 { 700 CPUS390XState *env = &cpu->env; 701 S390PCIBusDevice *pbdev; 702 MemoryRegion *mr; 703 MemTxResult result; 704 uint64_t offset; 705 int i; 706 uint32_t fh; 707 uint8_t pcias; 708 uint8_t len; 709 uint8_t buffer[128]; 710 711 if (env->psw.mask & PSW_MASK_PSTATE) { 712 s390_program_interrupt(env, PGM_PRIVILEGED, ra); 713 return 0; 714 } 715 716 fh = env->regs[r1] >> 32; 717 pcias = (env->regs[r1] >> 16) & 0xf; 718 len = env->regs[r1] & 0xff; 719 offset = env->regs[r3]; 720 721 if (!(fh & FH_MASK_ENABLE)) { 722 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 723 return 0; 724 } 725 726 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); 727 if (!pbdev) { 728 DPRINTF("pcistb no pci dev fh 0x%x\n", fh); 729 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 730 return 0; 731 } 732 733 switch (pbdev->state) { 734 case ZPCI_FS_PERMANENT_ERROR: 735 case ZPCI_FS_ERROR: 736 setcc(cpu, ZPCI_PCI_LS_ERR); 737 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED); 738 return 0; 739 default: 740 break; 741 } 742 743 if (pcias > ZPCI_IO_BAR_MAX) { 744 DPRINTF("pcistb invalid space\n"); 745 setcc(cpu, ZPCI_PCI_LS_ERR); 746 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS); 747 return 0; 748 } 749 750 /* Verify the address, offset and length */ 751 /* offset must be a multiple of 8 */ 752 if (offset % 8) { 753 goto specification_error; 754 } 755 /* Length must be greater than 8, a multiple of 8 */ 756 /* and not greater than maxstbl */ 757 if ((len <= 8) || (len % 8) || (len > pbdev->maxstbl)) { 758 goto specification_error; 759 } 760 /* Do not cross a 4K-byte boundary */ 761 if (((offset & 0xfff) + len) > 0x1000) { 762 goto specification_error; 763 } 764 /* Guest address must be double word aligned */ 765 if (gaddr & 0x07UL) { 766 goto specification_error; 767 } 768 769 mr = pbdev->pdev->io_regions[pcias].memory; 770 mr = s390_get_subregion(mr, offset, len); 771 offset -= mr->addr; 772 773 if (!memory_region_access_valid(mr, offset, len, true, 774 MEMTXATTRS_UNSPECIFIED)) { 775 s390_program_interrupt(env, PGM_OPERAND, ra); 776 return 0; 777 } 778 779 if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) { 780 s390_cpu_virt_mem_handle_exc(cpu, ra); 781 return 0; 782 } 783 784 for (i = 0; i < len / 8; i++) { 785 result = memory_region_dispatch_write(mr, offset + i * 8, 786 ldq_p(buffer + i * 8), 787 MO_64, MEMTXATTRS_UNSPECIFIED); 788 if (result != MEMTX_OK) { 789 s390_program_interrupt(env, PGM_OPERAND, ra); 790 return 0; 791 } 792 } 793 794 pbdev->fmb.counter[ZPCI_FMB_CNT_STB]++; 795 796 setcc(cpu, ZPCI_PCI_LS_OK); 797 return 0; 798 799 specification_error: 800 s390_program_interrupt(env, PGM_SPECIFICATION, ra); 801 return 0; 802 } 803 804 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) 805 { 806 int ret, len; 807 uint8_t isc = FIB_DATA_ISC(ldl_p(&fib.data)); 808 809 pbdev->routes.adapter.adapter_id = css_get_adapter_id( 810 CSS_IO_ADAPTER_PCI, isc); 811 pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t)); 812 len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long); 813 pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len); 814 815 ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind); 816 if (ret) { 817 goto out; 818 } 819 820 ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator); 821 if (ret) { 822 goto out; 823 } 824 825 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb); 826 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data)); 827 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv); 828 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data)); 829 pbdev->isc = isc; 830 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data)); 831 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data)); 832 833 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); 834 return 0; 835 out: 836 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind); 837 release_indicator(&pbdev->routes.adapter, pbdev->indicator); 838 pbdev->summary_ind = NULL; 839 pbdev->indicator = NULL; 840 return ret; 841 } 842 843 int pci_dereg_irqs(S390PCIBusDevice *pbdev) 844 { 845 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind); 846 release_indicator(&pbdev->routes.adapter, pbdev->indicator); 847 848 pbdev->summary_ind = NULL; 849 pbdev->indicator = NULL; 850 pbdev->routes.adapter.summary_addr = 0; 851 pbdev->routes.adapter.summary_offset = 0; 852 pbdev->routes.adapter.ind_addr = 0; 853 pbdev->routes.adapter.ind_offset = 0; 854 pbdev->isc = 0; 855 pbdev->noi = 0; 856 pbdev->sum = 0; 857 858 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); 859 return 0; 860 } 861 862 static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib, 863 uintptr_t ra) 864 { 865 uint64_t pba = ldq_p(&fib.pba); 866 uint64_t pal = ldq_p(&fib.pal); 867 uint64_t g_iota = ldq_p(&fib.iota); 868 uint8_t dt = (g_iota >> 2) & 0x7; 869 uint8_t t = (g_iota >> 11) & 0x1; 870 871 pba &= ~0xfff; 872 pal |= 0xfff; 873 if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) { 874 s390_program_interrupt(env, PGM_OPERAND, ra); 875 return -EINVAL; 876 } 877 878 /* currently we only support designation type 1 with translation */ 879 if (!(dt == ZPCI_IOTA_RTTO && t)) { 880 error_report("unsupported ioat dt %d t %d", dt, t); 881 s390_program_interrupt(env, PGM_OPERAND, ra); 882 return -EINVAL; 883 } 884 885 iommu->pba = pba; 886 iommu->pal = pal; 887 iommu->g_iota = g_iota; 888 889 s390_pci_iommu_enable(iommu); 890 891 return 0; 892 } 893 894 void pci_dereg_ioat(S390PCIIOMMU *iommu) 895 { 896 s390_pci_iommu_disable(iommu); 897 iommu->pba = 0; 898 iommu->pal = 0; 899 iommu->g_iota = 0; 900 } 901 902 void fmb_timer_free(S390PCIBusDevice *pbdev) 903 { 904 if (pbdev->fmb_timer) { 905 timer_del(pbdev->fmb_timer); 906 timer_free(pbdev->fmb_timer); 907 pbdev->fmb_timer = NULL; 908 } 909 pbdev->fmb_addr = 0; 910 memset(&pbdev->fmb, 0, sizeof(ZpciFmb)); 911 } 912 913 static int fmb_do_update(S390PCIBusDevice *pbdev, int offset, uint64_t val, 914 int len) 915 { 916 MemTxResult ret; 917 uint64_t dst = pbdev->fmb_addr + offset; 918 919 switch (len) { 920 case 8: 921 address_space_stq_be(&address_space_memory, dst, val, 922 MEMTXATTRS_UNSPECIFIED, 923 &ret); 924 break; 925 case 4: 926 address_space_stl_be(&address_space_memory, dst, val, 927 MEMTXATTRS_UNSPECIFIED, 928 &ret); 929 break; 930 case 2: 931 address_space_stw_be(&address_space_memory, dst, val, 932 MEMTXATTRS_UNSPECIFIED, 933 &ret); 934 break; 935 case 1: 936 address_space_stb(&address_space_memory, dst, val, 937 MEMTXATTRS_UNSPECIFIED, 938 &ret); 939 break; 940 default: 941 ret = MEMTX_ERROR; 942 break; 943 } 944 if (ret != MEMTX_OK) { 945 s390_pci_generate_error_event(ERR_EVENT_FMBA, pbdev->fh, pbdev->fid, 946 pbdev->fmb_addr, 0); 947 fmb_timer_free(pbdev); 948 } 949 950 return ret; 951 } 952 953 static void fmb_update(void *opaque) 954 { 955 S390PCIBusDevice *pbdev = opaque; 956 int64_t t = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); 957 int i; 958 959 /* Update U bit */ 960 pbdev->fmb.last_update *= 2; 961 pbdev->fmb.last_update |= UPDATE_U_BIT; 962 if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update), 963 pbdev->fmb.last_update, 964 sizeof(pbdev->fmb.last_update))) { 965 return; 966 } 967 968 /* Update FMB sample count */ 969 if (fmb_do_update(pbdev, offsetof(ZpciFmb, sample), 970 pbdev->fmb.sample++, 971 sizeof(pbdev->fmb.sample))) { 972 return; 973 } 974 975 /* Update FMB counters */ 976 for (i = 0; i < ZPCI_FMB_CNT_MAX; i++) { 977 if (fmb_do_update(pbdev, offsetof(ZpciFmb, counter[i]), 978 pbdev->fmb.counter[i], 979 sizeof(pbdev->fmb.counter[0]))) { 980 return; 981 } 982 } 983 984 /* Clear U bit and update the time */ 985 pbdev->fmb.last_update = time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); 986 pbdev->fmb.last_update *= 2; 987 if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update), 988 pbdev->fmb.last_update, 989 sizeof(pbdev->fmb.last_update))) { 990 return; 991 } 992 timer_mod(pbdev->fmb_timer, t + DEFAULT_MUI); 993 } 994 995 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, 996 uintptr_t ra) 997 { 998 CPUS390XState *env = &cpu->env; 999 uint8_t oc, dmaas; 1000 uint32_t fh; 1001 ZpciFib fib; 1002 S390PCIBusDevice *pbdev; 1003 uint64_t cc = ZPCI_PCI_LS_OK; 1004 1005 if (env->psw.mask & PSW_MASK_PSTATE) { 1006 s390_program_interrupt(env, PGM_PRIVILEGED, ra); 1007 return 0; 1008 } 1009 1010 oc = env->regs[r1] & 0xff; 1011 dmaas = (env->regs[r1] >> 16) & 0xff; 1012 fh = env->regs[r1] >> 32; 1013 1014 if (fiba & 0x7) { 1015 s390_program_interrupt(env, PGM_SPECIFICATION, ra); 1016 return 0; 1017 } 1018 1019 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh); 1020 if (!pbdev) { 1021 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh); 1022 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 1023 return 0; 1024 } 1025 1026 switch (pbdev->state) { 1027 case ZPCI_FS_RESERVED: 1028 case ZPCI_FS_STANDBY: 1029 case ZPCI_FS_DISABLED: 1030 case ZPCI_FS_PERMANENT_ERROR: 1031 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 1032 return 0; 1033 default: 1034 break; 1035 } 1036 1037 if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { 1038 s390_cpu_virt_mem_handle_exc(cpu, ra); 1039 return 0; 1040 } 1041 1042 if (fib.fmt != 0) { 1043 s390_program_interrupt(env, PGM_OPERAND, ra); 1044 return 0; 1045 } 1046 1047 switch (oc) { 1048 case ZPCI_MOD_FC_REG_INT: 1049 if (pbdev->summary_ind) { 1050 cc = ZPCI_PCI_LS_ERR; 1051 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 1052 } else if (reg_irqs(env, pbdev, fib)) { 1053 cc = ZPCI_PCI_LS_ERR; 1054 s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL); 1055 } 1056 break; 1057 case ZPCI_MOD_FC_DEREG_INT: 1058 if (!pbdev->summary_ind) { 1059 cc = ZPCI_PCI_LS_ERR; 1060 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 1061 } else { 1062 pci_dereg_irqs(pbdev); 1063 } 1064 break; 1065 case ZPCI_MOD_FC_REG_IOAT: 1066 if (dmaas != 0) { 1067 cc = ZPCI_PCI_LS_ERR; 1068 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); 1069 } else if (pbdev->iommu->enabled) { 1070 cc = ZPCI_PCI_LS_ERR; 1071 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 1072 } else if (reg_ioat(env, pbdev->iommu, fib, ra)) { 1073 cc = ZPCI_PCI_LS_ERR; 1074 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES); 1075 } 1076 break; 1077 case ZPCI_MOD_FC_DEREG_IOAT: 1078 if (dmaas != 0) { 1079 cc = ZPCI_PCI_LS_ERR; 1080 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); 1081 } else if (!pbdev->iommu->enabled) { 1082 cc = ZPCI_PCI_LS_ERR; 1083 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 1084 } else { 1085 pci_dereg_ioat(pbdev->iommu); 1086 } 1087 break; 1088 case ZPCI_MOD_FC_REREG_IOAT: 1089 if (dmaas != 0) { 1090 cc = ZPCI_PCI_LS_ERR; 1091 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); 1092 } else if (!pbdev->iommu->enabled) { 1093 cc = ZPCI_PCI_LS_ERR; 1094 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 1095 } else { 1096 pci_dereg_ioat(pbdev->iommu); 1097 if (reg_ioat(env, pbdev->iommu, fib, ra)) { 1098 cc = ZPCI_PCI_LS_ERR; 1099 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES); 1100 } 1101 } 1102 break; 1103 case ZPCI_MOD_FC_RESET_ERROR: 1104 switch (pbdev->state) { 1105 case ZPCI_FS_BLOCKED: 1106 case ZPCI_FS_ERROR: 1107 pbdev->state = ZPCI_FS_ENABLED; 1108 break; 1109 default: 1110 cc = ZPCI_PCI_LS_ERR; 1111 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 1112 } 1113 break; 1114 case ZPCI_MOD_FC_RESET_BLOCK: 1115 switch (pbdev->state) { 1116 case ZPCI_FS_ERROR: 1117 pbdev->state = ZPCI_FS_BLOCKED; 1118 break; 1119 default: 1120 cc = ZPCI_PCI_LS_ERR; 1121 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 1122 } 1123 break; 1124 case ZPCI_MOD_FC_SET_MEASURE: { 1125 uint64_t fmb_addr = ldq_p(&fib.fmb_addr); 1126 1127 if (fmb_addr & FMBK_MASK) { 1128 cc = ZPCI_PCI_LS_ERR; 1129 s390_pci_generate_error_event(ERR_EVENT_FMBPRO, pbdev->fh, 1130 pbdev->fid, fmb_addr, 0); 1131 fmb_timer_free(pbdev); 1132 break; 1133 } 1134 1135 if (!fmb_addr) { 1136 /* Stop updating FMB. */ 1137 fmb_timer_free(pbdev); 1138 break; 1139 } 1140 1141 if (!pbdev->fmb_timer) { 1142 pbdev->fmb_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 1143 fmb_update, pbdev); 1144 } else if (timer_pending(pbdev->fmb_timer)) { 1145 /* Remove pending timer to update FMB address. */ 1146 timer_del(pbdev->fmb_timer); 1147 } 1148 pbdev->fmb_addr = fmb_addr; 1149 timer_mod(pbdev->fmb_timer, 1150 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + DEFAULT_MUI); 1151 break; 1152 } 1153 default: 1154 s390_program_interrupt(&cpu->env, PGM_OPERAND, ra); 1155 cc = ZPCI_PCI_LS_ERR; 1156 } 1157 1158 setcc(cpu, cc); 1159 return 0; 1160 } 1161 1162 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar, 1163 uintptr_t ra) 1164 { 1165 CPUS390XState *env = &cpu->env; 1166 uint8_t dmaas; 1167 uint32_t fh; 1168 ZpciFib fib; 1169 S390PCIBusDevice *pbdev; 1170 uint32_t data; 1171 uint64_t cc = ZPCI_PCI_LS_OK; 1172 1173 if (env->psw.mask & PSW_MASK_PSTATE) { 1174 s390_program_interrupt(env, PGM_PRIVILEGED, ra); 1175 return 0; 1176 } 1177 1178 fh = env->regs[r1] >> 32; 1179 dmaas = (env->regs[r1] >> 16) & 0xff; 1180 1181 if (dmaas) { 1182 setcc(cpu, ZPCI_PCI_LS_ERR); 1183 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS); 1184 return 0; 1185 } 1186 1187 if (fiba & 0x7) { 1188 s390_program_interrupt(env, PGM_SPECIFICATION, ra); 1189 return 0; 1190 } 1191 1192 pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), fh & FH_MASK_INDEX); 1193 if (!pbdev) { 1194 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 1195 return 0; 1196 } 1197 1198 memset(&fib, 0, sizeof(fib)); 1199 1200 switch (pbdev->state) { 1201 case ZPCI_FS_RESERVED: 1202 case ZPCI_FS_STANDBY: 1203 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 1204 return 0; 1205 case ZPCI_FS_DISABLED: 1206 if (fh & FH_MASK_ENABLE) { 1207 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 1208 return 0; 1209 } 1210 goto out; 1211 /* BLOCKED bit is set to one coincident with the setting of ERROR bit. 1212 * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */ 1213 case ZPCI_FS_ERROR: 1214 fib.fc |= 0x20; 1215 /* fallthrough */ 1216 case ZPCI_FS_BLOCKED: 1217 fib.fc |= 0x40; 1218 /* fallthrough */ 1219 case ZPCI_FS_ENABLED: 1220 fib.fc |= 0x80; 1221 if (pbdev->iommu->enabled) { 1222 fib.fc |= 0x10; 1223 } 1224 if (!(fh & FH_MASK_ENABLE)) { 1225 env->regs[r1] |= 1ULL << 63; 1226 } 1227 break; 1228 case ZPCI_FS_PERMANENT_ERROR: 1229 setcc(cpu, ZPCI_PCI_LS_ERR); 1230 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR); 1231 return 0; 1232 } 1233 1234 stq_p(&fib.pba, pbdev->iommu->pba); 1235 stq_p(&fib.pal, pbdev->iommu->pal); 1236 stq_p(&fib.iota, pbdev->iommu->g_iota); 1237 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr); 1238 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr); 1239 stq_p(&fib.fmb_addr, pbdev->fmb_addr); 1240 1241 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) | 1242 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) | 1243 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset; 1244 stl_p(&fib.data, data); 1245 1246 out: 1247 if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { 1248 s390_cpu_virt_mem_handle_exc(cpu, ra); 1249 return 0; 1250 } 1251 1252 setcc(cpu, cc); 1253 return 0; 1254 } 1255