1 /* 2 * s390 PCI instructions 3 * 4 * Copyright 2014 IBM Corp. 5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com> 6 * Hong Bo Li <lihbbj@cn.ibm.com> 7 * Yi Min Zhao <zyimin@cn.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at 10 * your option) any later version. See the COPYING file in the top-level 11 * directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu-common.h" 16 #include "cpu.h" 17 #include "s390-pci-inst.h" 18 #include "s390-pci-bus.h" 19 #include "exec/memory-internal.h" 20 #include "qemu/error-report.h" 21 22 /* #define DEBUG_S390PCI_INST */ 23 #ifdef DEBUG_S390PCI_INST 24 #define DPRINTF(fmt, ...) \ 25 do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0) 26 #else 27 #define DPRINTF(fmt, ...) \ 28 do { } while (0) 29 #endif 30 31 static void s390_set_status_code(CPUS390XState *env, 32 uint8_t r, uint64_t status_code) 33 { 34 env->regs[r] &= ~0xff000000ULL; 35 env->regs[r] |= (status_code & 0xff) << 24; 36 } 37 38 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) 39 { 40 S390PCIBusDevice *pbdev = NULL; 41 uint32_t res_code, initial_l2, g_l2; 42 int rc, i; 43 uint64_t resume_token; 44 45 rc = 0; 46 if (lduw_p(&rrb->request.hdr.len) != 32) { 47 res_code = CLP_RC_LEN; 48 rc = -EINVAL; 49 goto out; 50 } 51 52 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) { 53 res_code = CLP_RC_FMT; 54 rc = -EINVAL; 55 goto out; 56 } 57 58 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 || 59 ldq_p(&rrb->request.reserved1) != 0) { 60 res_code = CLP_RC_RESNOT0; 61 rc = -EINVAL; 62 goto out; 63 } 64 65 resume_token = ldq_p(&rrb->request.resume_token); 66 67 if (resume_token) { 68 pbdev = s390_pci_find_dev_by_idx(resume_token); 69 if (!pbdev) { 70 res_code = CLP_RC_LISTPCI_BADRT; 71 rc = -EINVAL; 72 goto out; 73 } 74 } else { 75 pbdev = s390_pci_find_next_avail_dev(NULL); 76 } 77 78 if (lduw_p(&rrb->response.hdr.len) < 48) { 79 res_code = CLP_RC_8K; 80 rc = -EINVAL; 81 goto out; 82 } 83 84 initial_l2 = lduw_p(&rrb->response.hdr.len); 85 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry) 86 != 0) { 87 res_code = CLP_RC_LEN; 88 rc = -EINVAL; 89 *cc = 3; 90 goto out; 91 } 92 93 stl_p(&rrb->response.fmt, 0); 94 stq_p(&rrb->response.reserved1, 0); 95 stl_p(&rrb->response.mdd, FH_MASK_SHM); 96 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS); 97 rrb->response.flags = UID_CHECKING_ENABLED; 98 rrb->response.entry_size = sizeof(ClpFhListEntry); 99 100 i = 0; 101 g_l2 = LIST_PCI_HDR_LEN; 102 while (g_l2 < initial_l2 && pbdev) { 103 stw_p(&rrb->response.fh_list[i].device_id, 104 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID)); 105 stw_p(&rrb->response.fh_list[i].vendor_id, 106 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID)); 107 /* Ignore RESERVED devices. */ 108 stl_p(&rrb->response.fh_list[i].config, 109 pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31); 110 stl_p(&rrb->response.fh_list[i].fid, pbdev->fid); 111 stl_p(&rrb->response.fh_list[i].fh, pbdev->fh); 112 113 g_l2 += sizeof(ClpFhListEntry); 114 /* Add endian check for DPRINTF? */ 115 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n", 116 g_l2, 117 lduw_p(&rrb->response.fh_list[i].vendor_id), 118 lduw_p(&rrb->response.fh_list[i].device_id), 119 ldl_p(&rrb->response.fh_list[i].fid), 120 ldl_p(&rrb->response.fh_list[i].fh)); 121 pbdev = s390_pci_find_next_avail_dev(pbdev); 122 i++; 123 } 124 125 if (!pbdev) { 126 resume_token = 0; 127 } else { 128 resume_token = pbdev->fh & FH_MASK_INDEX; 129 } 130 stq_p(&rrb->response.resume_token, resume_token); 131 stw_p(&rrb->response.hdr.len, g_l2); 132 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK); 133 out: 134 if (rc) { 135 DPRINTF("list pci failed rc 0x%x\n", rc); 136 stw_p(&rrb->response.hdr.rsp, res_code); 137 } 138 return rc; 139 } 140 141 int clp_service_call(S390CPU *cpu, uint8_t r2) 142 { 143 ClpReqHdr *reqh; 144 ClpRspHdr *resh; 145 S390PCIBusDevice *pbdev; 146 uint32_t req_len; 147 uint32_t res_len; 148 uint8_t buffer[4096 * 2]; 149 uint8_t cc = 0; 150 CPUS390XState *env = &cpu->env; 151 int i; 152 153 cpu_synchronize_state(CPU(cpu)); 154 155 if (env->psw.mask & PSW_MASK_PSTATE) { 156 program_interrupt(env, PGM_PRIVILEGED, 4); 157 return 0; 158 } 159 160 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) { 161 return 0; 162 } 163 reqh = (ClpReqHdr *)buffer; 164 req_len = lduw_p(&reqh->len); 165 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) { 166 program_interrupt(env, PGM_OPERAND, 4); 167 return 0; 168 } 169 170 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, 171 req_len + sizeof(*resh))) { 172 return 0; 173 } 174 resh = (ClpRspHdr *)(buffer + req_len); 175 res_len = lduw_p(&resh->len); 176 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) { 177 program_interrupt(env, PGM_OPERAND, 4); 178 return 0; 179 } 180 if ((req_len + res_len) > 8192) { 181 program_interrupt(env, PGM_OPERAND, 4); 182 return 0; 183 } 184 185 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, 186 req_len + res_len)) { 187 return 0; 188 } 189 190 if (req_len != 32) { 191 stw_p(&resh->rsp, CLP_RC_LEN); 192 goto out; 193 } 194 195 switch (lduw_p(&reqh->cmd)) { 196 case CLP_LIST_PCI: { 197 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer; 198 list_pci(rrb, &cc); 199 break; 200 } 201 case CLP_SET_PCI_FN: { 202 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh; 203 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh; 204 205 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh)); 206 if (!pbdev) { 207 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); 208 goto out; 209 } 210 211 switch (reqsetpci->oc) { 212 case CLP_SET_ENABLE_PCI_FN: 213 switch (reqsetpci->ndas) { 214 case 0: 215 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS); 216 goto out; 217 case 1: 218 break; 219 default: 220 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES); 221 goto out; 222 } 223 224 if (pbdev->fh & FH_MASK_ENABLE) { 225 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); 226 goto out; 227 } 228 229 pbdev->fh |= FH_MASK_ENABLE; 230 pbdev->state = ZPCI_FS_ENABLED; 231 stl_p(&ressetpci->fh, pbdev->fh); 232 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); 233 break; 234 case CLP_SET_DISABLE_PCI_FN: 235 if (!(pbdev->fh & FH_MASK_ENABLE)) { 236 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); 237 goto out; 238 } 239 device_reset(DEVICE(pbdev)); 240 pbdev->fh &= ~FH_MASK_ENABLE; 241 pbdev->state = ZPCI_FS_DISABLED; 242 stl_p(&ressetpci->fh, pbdev->fh); 243 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); 244 break; 245 default: 246 DPRINTF("unknown set pci command\n"); 247 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); 248 break; 249 } 250 break; 251 } 252 case CLP_QUERY_PCI_FN: { 253 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh; 254 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh; 255 256 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh)); 257 if (!pbdev) { 258 DPRINTF("query pci no pci dev\n"); 259 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH); 260 goto out; 261 } 262 263 for (i = 0; i < PCI_BAR_COUNT; i++) { 264 uint32_t data = pci_get_long(pbdev->pdev->config + 265 PCI_BASE_ADDRESS_0 + (i * 4)); 266 267 stl_p(&resquery->bar[i], data); 268 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ? 269 ctz64(pbdev->pdev->io_regions[i].size) : 0; 270 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i, 271 ldl_p(&resquery->bar[i]), 272 pbdev->pdev->io_regions[i].size, 273 resquery->bar_size[i]); 274 } 275 276 stq_p(&resquery->sdma, ZPCI_SDMA_ADDR); 277 stq_p(&resquery->edma, ZPCI_EDMA_ADDR); 278 stl_p(&resquery->fid, pbdev->fid); 279 stw_p(&resquery->pchid, 0); 280 stw_p(&resquery->ug, 1); 281 stl_p(&resquery->uid, pbdev->uid); 282 stw_p(&resquery->hdr.rsp, CLP_RC_OK); 283 break; 284 } 285 case CLP_QUERY_PCI_FNGRP: { 286 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh; 287 resgrp->fr = 1; 288 stq_p(&resgrp->dasm, 0); 289 stq_p(&resgrp->msia, ZPCI_MSI_ADDR); 290 stw_p(&resgrp->mui, 0); 291 stw_p(&resgrp->i, 128); 292 resgrp->version = 0; 293 294 stw_p(&resgrp->hdr.rsp, CLP_RC_OK); 295 break; 296 } 297 default: 298 DPRINTF("unknown clp command\n"); 299 stw_p(&resh->rsp, CLP_RC_CMD); 300 break; 301 } 302 303 out: 304 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer, 305 req_len + res_len)) { 306 return 0; 307 } 308 setcc(cpu, cc); 309 return 0; 310 } 311 312 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 313 { 314 CPUS390XState *env = &cpu->env; 315 S390PCIBusDevice *pbdev; 316 uint64_t offset; 317 uint64_t data; 318 MemoryRegion *mr; 319 uint8_t len; 320 uint32_t fh; 321 uint8_t pcias; 322 323 cpu_synchronize_state(CPU(cpu)); 324 325 if (env->psw.mask & PSW_MASK_PSTATE) { 326 program_interrupt(env, PGM_PRIVILEGED, 4); 327 return 0; 328 } 329 330 if (r2 & 0x1) { 331 program_interrupt(env, PGM_SPECIFICATION, 4); 332 return 0; 333 } 334 335 fh = env->regs[r2] >> 32; 336 pcias = (env->regs[r2] >> 16) & 0xf; 337 len = env->regs[r2] & 0xf; 338 offset = env->regs[r2 + 1]; 339 340 pbdev = s390_pci_find_dev_by_fh(fh); 341 if (!pbdev) { 342 DPRINTF("pcilg no pci dev\n"); 343 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 344 return 0; 345 } 346 347 switch (pbdev->state) { 348 case ZPCI_FS_RESERVED: 349 case ZPCI_FS_STANDBY: 350 case ZPCI_FS_DISABLED: 351 case ZPCI_FS_PERMANENT_ERROR: 352 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 353 return 0; 354 case ZPCI_FS_ERROR: 355 setcc(cpu, ZPCI_PCI_LS_ERR); 356 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); 357 return 0; 358 default: 359 break; 360 } 361 362 if (pcias < 6) { 363 if ((8 - (offset & 0x7)) < len) { 364 program_interrupt(env, PGM_OPERAND, 4); 365 return 0; 366 } 367 mr = pbdev->pdev->io_regions[pcias].memory; 368 memory_region_dispatch_read(mr, offset, &data, len, 369 MEMTXATTRS_UNSPECIFIED); 370 } else if (pcias == 15) { 371 if ((4 - (offset & 0x3)) < len) { 372 program_interrupt(env, PGM_OPERAND, 4); 373 return 0; 374 } 375 data = pci_host_config_read_common( 376 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len); 377 378 switch (len) { 379 case 1: 380 break; 381 case 2: 382 data = bswap16(data); 383 break; 384 case 4: 385 data = bswap32(data); 386 break; 387 case 8: 388 data = bswap64(data); 389 break; 390 default: 391 program_interrupt(env, PGM_OPERAND, 4); 392 return 0; 393 } 394 } else { 395 DPRINTF("invalid space\n"); 396 setcc(cpu, ZPCI_PCI_LS_ERR); 397 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); 398 return 0; 399 } 400 401 env->regs[r1] = data; 402 setcc(cpu, ZPCI_PCI_LS_OK); 403 return 0; 404 } 405 406 static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset, 407 uint64_t *data, uint8_t len) 408 { 409 uint32_t val; 410 uint8_t *msg_data; 411 412 if (offset % PCI_MSIX_ENTRY_SIZE != 8) { 413 return; 414 } 415 416 if (len != 4) { 417 DPRINTF("access msix table msg data but len is %d\n", len); 418 return; 419 } 420 421 msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE + 422 PCI_MSIX_ENTRY_VECTOR_CTRL; 423 val = pci_get_long(msg_data) | 424 ((pbdev->fh & FH_MASK_INDEX) << ZPCI_MSI_VEC_BITS); 425 pci_set_long(msg_data, val); 426 DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data); 427 } 428 429 static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias) 430 { 431 if (pbdev->msix.available && pbdev->msix.table_bar == pcias && 432 offset >= pbdev->msix.table_offset && 433 offset <= pbdev->msix.table_offset + 434 (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) { 435 return 1; 436 } else { 437 return 0; 438 } 439 } 440 441 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 442 { 443 CPUS390XState *env = &cpu->env; 444 uint64_t offset, data; 445 S390PCIBusDevice *pbdev; 446 MemoryRegion *mr; 447 uint8_t len; 448 uint32_t fh; 449 uint8_t pcias; 450 451 cpu_synchronize_state(CPU(cpu)); 452 453 if (env->psw.mask & PSW_MASK_PSTATE) { 454 program_interrupt(env, PGM_PRIVILEGED, 4); 455 return 0; 456 } 457 458 if (r2 & 0x1) { 459 program_interrupt(env, PGM_SPECIFICATION, 4); 460 return 0; 461 } 462 463 fh = env->regs[r2] >> 32; 464 pcias = (env->regs[r2] >> 16) & 0xf; 465 len = env->regs[r2] & 0xf; 466 offset = env->regs[r2 + 1]; 467 468 pbdev = s390_pci_find_dev_by_fh(fh); 469 if (!pbdev) { 470 DPRINTF("pcistg no pci dev\n"); 471 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 472 return 0; 473 } 474 475 switch (pbdev->state) { 476 case ZPCI_FS_RESERVED: 477 case ZPCI_FS_STANDBY: 478 case ZPCI_FS_DISABLED: 479 case ZPCI_FS_PERMANENT_ERROR: 480 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 481 return 0; 482 case ZPCI_FS_ERROR: 483 setcc(cpu, ZPCI_PCI_LS_ERR); 484 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); 485 return 0; 486 default: 487 break; 488 } 489 490 data = env->regs[r1]; 491 if (pcias < 6) { 492 if ((8 - (offset & 0x7)) < len) { 493 program_interrupt(env, PGM_OPERAND, 4); 494 return 0; 495 } 496 497 if (trap_msix(pbdev, offset, pcias)) { 498 offset = offset - pbdev->msix.table_offset; 499 mr = &pbdev->pdev->msix_table_mmio; 500 update_msix_table_msg_data(pbdev, offset, &data, len); 501 } else { 502 mr = pbdev->pdev->io_regions[pcias].memory; 503 } 504 505 memory_region_dispatch_write(mr, offset, data, len, 506 MEMTXATTRS_UNSPECIFIED); 507 } else if (pcias == 15) { 508 if ((4 - (offset & 0x3)) < len) { 509 program_interrupt(env, PGM_OPERAND, 4); 510 return 0; 511 } 512 switch (len) { 513 case 1: 514 break; 515 case 2: 516 data = bswap16(data); 517 break; 518 case 4: 519 data = bswap32(data); 520 break; 521 case 8: 522 data = bswap64(data); 523 break; 524 default: 525 program_interrupt(env, PGM_OPERAND, 4); 526 return 0; 527 } 528 529 pci_host_config_write_common(pbdev->pdev, offset, 530 pci_config_size(pbdev->pdev), 531 data, len); 532 } else { 533 DPRINTF("pcistg invalid space\n"); 534 setcc(cpu, ZPCI_PCI_LS_ERR); 535 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); 536 return 0; 537 } 538 539 setcc(cpu, ZPCI_PCI_LS_OK); 540 return 0; 541 } 542 543 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 544 { 545 CPUS390XState *env = &cpu->env; 546 uint32_t fh; 547 S390PCIBusDevice *pbdev; 548 hwaddr start, end; 549 IOMMUTLBEntry entry; 550 MemoryRegion *mr; 551 552 cpu_synchronize_state(CPU(cpu)); 553 554 if (env->psw.mask & PSW_MASK_PSTATE) { 555 program_interrupt(env, PGM_PRIVILEGED, 4); 556 goto out; 557 } 558 559 if (r2 & 0x1) { 560 program_interrupt(env, PGM_SPECIFICATION, 4); 561 goto out; 562 } 563 564 fh = env->regs[r1] >> 32; 565 start = env->regs[r2]; 566 end = start + env->regs[r2 + 1]; 567 568 pbdev = s390_pci_find_dev_by_fh(fh); 569 if (!pbdev) { 570 DPRINTF("rpcit no pci dev\n"); 571 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 572 goto out; 573 } 574 575 switch (pbdev->state) { 576 case ZPCI_FS_RESERVED: 577 case ZPCI_FS_STANDBY: 578 case ZPCI_FS_DISABLED: 579 case ZPCI_FS_PERMANENT_ERROR: 580 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 581 return 0; 582 case ZPCI_FS_ERROR: 583 setcc(cpu, ZPCI_PCI_LS_ERR); 584 s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER); 585 return 0; 586 default: 587 break; 588 } 589 590 if (!pbdev->g_iota) { 591 pbdev->state = ZPCI_FS_ERROR; 592 setcc(cpu, ZPCI_PCI_LS_ERR); 593 s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES); 594 s390_pci_generate_error_event(ERR_EVENT_INVALAS, pbdev->fh, pbdev->fid, 595 start, 0); 596 goto out; 597 } 598 599 if (end < pbdev->pba || start > pbdev->pal) { 600 pbdev->state = ZPCI_FS_ERROR; 601 setcc(cpu, ZPCI_PCI_LS_ERR); 602 s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES); 603 s390_pci_generate_error_event(ERR_EVENT_OORANGE, pbdev->fh, pbdev->fid, 604 start, 0); 605 goto out; 606 } 607 608 mr = &pbdev->iommu_mr; 609 while (start < end) { 610 entry = mr->iommu_ops->translate(mr, start, 0); 611 612 if (!entry.translated_addr) { 613 pbdev->state = ZPCI_FS_ERROR; 614 setcc(cpu, ZPCI_PCI_LS_ERR); 615 s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES); 616 s390_pci_generate_error_event(ERR_EVENT_SERR, pbdev->fh, pbdev->fid, 617 start, ERR_EVENT_Q_BIT); 618 goto out; 619 } 620 621 memory_region_notify_iommu(mr, entry); 622 start += entry.addr_mask + 1; 623 } 624 625 setcc(cpu, ZPCI_PCI_LS_OK); 626 out: 627 return 0; 628 } 629 630 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, 631 uint8_t ar) 632 { 633 CPUS390XState *env = &cpu->env; 634 S390PCIBusDevice *pbdev; 635 MemoryRegion *mr; 636 int i; 637 uint32_t fh; 638 uint8_t pcias; 639 uint8_t len; 640 uint8_t buffer[128]; 641 642 if (env->psw.mask & PSW_MASK_PSTATE) { 643 program_interrupt(env, PGM_PRIVILEGED, 6); 644 return 0; 645 } 646 647 fh = env->regs[r1] >> 32; 648 pcias = (env->regs[r1] >> 16) & 0xf; 649 len = env->regs[r1] & 0xff; 650 651 if (pcias > 5) { 652 DPRINTF("pcistb invalid space\n"); 653 setcc(cpu, ZPCI_PCI_LS_ERR); 654 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS); 655 return 0; 656 } 657 658 switch (len) { 659 case 16: 660 case 32: 661 case 64: 662 case 128: 663 break; 664 default: 665 program_interrupt(env, PGM_SPECIFICATION, 6); 666 return 0; 667 } 668 669 pbdev = s390_pci_find_dev_by_fh(fh); 670 if (!pbdev) { 671 DPRINTF("pcistb no pci dev fh 0x%x\n", fh); 672 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 673 return 0; 674 } 675 676 switch (pbdev->state) { 677 case ZPCI_FS_RESERVED: 678 case ZPCI_FS_STANDBY: 679 case ZPCI_FS_DISABLED: 680 case ZPCI_FS_PERMANENT_ERROR: 681 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 682 return 0; 683 case ZPCI_FS_ERROR: 684 setcc(cpu, ZPCI_PCI_LS_ERR); 685 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED); 686 return 0; 687 default: 688 break; 689 } 690 691 mr = pbdev->pdev->io_regions[pcias].memory; 692 if (!memory_region_access_valid(mr, env->regs[r3], len, true)) { 693 program_interrupt(env, PGM_ADDRESSING, 6); 694 return 0; 695 } 696 697 if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) { 698 return 0; 699 } 700 701 for (i = 0; i < len / 8; i++) { 702 memory_region_dispatch_write(mr, env->regs[r3] + i * 8, 703 ldq_p(buffer + i * 8), 8, 704 MEMTXATTRS_UNSPECIFIED); 705 } 706 707 setcc(cpu, ZPCI_PCI_LS_OK); 708 return 0; 709 } 710 711 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) 712 { 713 int ret, len; 714 715 ret = css_register_io_adapter(S390_PCIPT_ADAPTER, 716 FIB_DATA_ISC(ldl_p(&fib.data)), true, false, 717 &pbdev->routes.adapter.adapter_id); 718 assert(ret == 0); 719 720 pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t)); 721 len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long); 722 pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len); 723 724 ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind); 725 if (ret) { 726 goto out; 727 } 728 729 ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator); 730 if (ret) { 731 goto out; 732 } 733 734 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb); 735 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data)); 736 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv); 737 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data)); 738 pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data)); 739 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data)); 740 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data)); 741 742 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); 743 return 0; 744 out: 745 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind); 746 release_indicator(&pbdev->routes.adapter, pbdev->indicator); 747 pbdev->summary_ind = NULL; 748 pbdev->indicator = NULL; 749 return ret; 750 } 751 752 int pci_dereg_irqs(S390PCIBusDevice *pbdev) 753 { 754 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind); 755 release_indicator(&pbdev->routes.adapter, pbdev->indicator); 756 757 pbdev->summary_ind = NULL; 758 pbdev->indicator = NULL; 759 pbdev->routes.adapter.summary_addr = 0; 760 pbdev->routes.adapter.summary_offset = 0; 761 pbdev->routes.adapter.ind_addr = 0; 762 pbdev->routes.adapter.ind_offset = 0; 763 pbdev->isc = 0; 764 pbdev->noi = 0; 765 pbdev->sum = 0; 766 767 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); 768 return 0; 769 } 770 771 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) 772 { 773 uint64_t pba = ldq_p(&fib.pba); 774 uint64_t pal = ldq_p(&fib.pal); 775 uint64_t g_iota = ldq_p(&fib.iota); 776 uint8_t dt = (g_iota >> 2) & 0x7; 777 uint8_t t = (g_iota >> 11) & 0x1; 778 779 if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) { 780 program_interrupt(env, PGM_OPERAND, 6); 781 return -EINVAL; 782 } 783 784 /* currently we only support designation type 1 with translation */ 785 if (!(dt == ZPCI_IOTA_RTTO && t)) { 786 error_report("unsupported ioat dt %d t %d", dt, t); 787 program_interrupt(env, PGM_OPERAND, 6); 788 return -EINVAL; 789 } 790 791 pbdev->pba = pba; 792 pbdev->pal = pal; 793 pbdev->g_iota = g_iota; 794 795 s390_pci_iommu_enable(pbdev); 796 797 return 0; 798 } 799 800 void pci_dereg_ioat(S390PCIBusDevice *pbdev) 801 { 802 s390_pci_iommu_disable(pbdev); 803 pbdev->pba = 0; 804 pbdev->pal = 0; 805 pbdev->g_iota = 0; 806 } 807 808 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) 809 { 810 CPUS390XState *env = &cpu->env; 811 uint8_t oc, dmaas; 812 uint32_t fh; 813 ZpciFib fib; 814 S390PCIBusDevice *pbdev; 815 uint64_t cc = ZPCI_PCI_LS_OK; 816 817 if (env->psw.mask & PSW_MASK_PSTATE) { 818 program_interrupt(env, PGM_PRIVILEGED, 6); 819 return 0; 820 } 821 822 oc = env->regs[r1] & 0xff; 823 dmaas = (env->regs[r1] >> 16) & 0xff; 824 fh = env->regs[r1] >> 32; 825 826 if (fiba & 0x7) { 827 program_interrupt(env, PGM_SPECIFICATION, 6); 828 return 0; 829 } 830 831 pbdev = s390_pci_find_dev_by_fh(fh); 832 if (!pbdev) { 833 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh); 834 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 835 return 0; 836 } 837 838 switch (pbdev->state) { 839 case ZPCI_FS_RESERVED: 840 case ZPCI_FS_STANDBY: 841 case ZPCI_FS_DISABLED: 842 case ZPCI_FS_PERMANENT_ERROR: 843 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 844 return 0; 845 default: 846 break; 847 } 848 849 if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { 850 return 0; 851 } 852 853 if (fib.fmt != 0) { 854 program_interrupt(env, PGM_OPERAND, 6); 855 return 0; 856 } 857 858 switch (oc) { 859 case ZPCI_MOD_FC_REG_INT: 860 if (pbdev->summary_ind) { 861 cc = ZPCI_PCI_LS_ERR; 862 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 863 } else if (reg_irqs(env, pbdev, fib)) { 864 cc = ZPCI_PCI_LS_ERR; 865 s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL); 866 } 867 break; 868 case ZPCI_MOD_FC_DEREG_INT: 869 if (!pbdev->summary_ind) { 870 cc = ZPCI_PCI_LS_ERR; 871 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 872 } else { 873 pci_dereg_irqs(pbdev); 874 } 875 break; 876 case ZPCI_MOD_FC_REG_IOAT: 877 if (dmaas != 0) { 878 cc = ZPCI_PCI_LS_ERR; 879 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); 880 } else if (pbdev->iommu_enabled) { 881 cc = ZPCI_PCI_LS_ERR; 882 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 883 } else if (reg_ioat(env, pbdev, fib)) { 884 cc = ZPCI_PCI_LS_ERR; 885 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES); 886 } 887 break; 888 case ZPCI_MOD_FC_DEREG_IOAT: 889 if (dmaas != 0) { 890 cc = ZPCI_PCI_LS_ERR; 891 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); 892 } else if (!pbdev->iommu_enabled) { 893 cc = ZPCI_PCI_LS_ERR; 894 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 895 } else { 896 pci_dereg_ioat(pbdev); 897 } 898 break; 899 case ZPCI_MOD_FC_REREG_IOAT: 900 if (dmaas != 0) { 901 cc = ZPCI_PCI_LS_ERR; 902 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL); 903 } else if (!pbdev->iommu_enabled) { 904 cc = ZPCI_PCI_LS_ERR; 905 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 906 } else { 907 pci_dereg_ioat(pbdev); 908 if (reg_ioat(env, pbdev, fib)) { 909 cc = ZPCI_PCI_LS_ERR; 910 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES); 911 } 912 } 913 break; 914 case ZPCI_MOD_FC_RESET_ERROR: 915 switch (pbdev->state) { 916 case ZPCI_FS_BLOCKED: 917 case ZPCI_FS_ERROR: 918 pbdev->state = ZPCI_FS_ENABLED; 919 break; 920 default: 921 cc = ZPCI_PCI_LS_ERR; 922 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 923 } 924 break; 925 case ZPCI_MOD_FC_RESET_BLOCK: 926 switch (pbdev->state) { 927 case ZPCI_FS_ERROR: 928 pbdev->state = ZPCI_FS_BLOCKED; 929 break; 930 default: 931 cc = ZPCI_PCI_LS_ERR; 932 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE); 933 } 934 break; 935 case ZPCI_MOD_FC_SET_MEASURE: 936 pbdev->fmb_addr = ldq_p(&fib.fmb_addr); 937 break; 938 default: 939 program_interrupt(&cpu->env, PGM_OPERAND, 6); 940 cc = ZPCI_PCI_LS_ERR; 941 } 942 943 setcc(cpu, cc); 944 return 0; 945 } 946 947 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) 948 { 949 CPUS390XState *env = &cpu->env; 950 uint8_t dmaas; 951 uint32_t fh; 952 ZpciFib fib; 953 S390PCIBusDevice *pbdev; 954 uint32_t data; 955 uint64_t cc = ZPCI_PCI_LS_OK; 956 957 if (env->psw.mask & PSW_MASK_PSTATE) { 958 program_interrupt(env, PGM_PRIVILEGED, 6); 959 return 0; 960 } 961 962 fh = env->regs[r1] >> 32; 963 dmaas = (env->regs[r1] >> 16) & 0xff; 964 965 if (dmaas) { 966 setcc(cpu, ZPCI_PCI_LS_ERR); 967 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS); 968 return 0; 969 } 970 971 if (fiba & 0x7) { 972 program_interrupt(env, PGM_SPECIFICATION, 6); 973 return 0; 974 } 975 976 pbdev = s390_pci_find_dev_by_idx(fh & FH_MASK_INDEX); 977 if (!pbdev) { 978 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 979 return 0; 980 } 981 982 memset(&fib, 0, sizeof(fib)); 983 984 switch (pbdev->state) { 985 case ZPCI_FS_RESERVED: 986 case ZPCI_FS_STANDBY: 987 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 988 return 0; 989 case ZPCI_FS_DISABLED: 990 if (fh & FH_MASK_ENABLE) { 991 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 992 return 0; 993 } 994 goto out; 995 /* BLOCKED bit is set to one coincident with the setting of ERROR bit. 996 * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */ 997 case ZPCI_FS_ERROR: 998 fib.fc |= 0x20; 999 case ZPCI_FS_BLOCKED: 1000 fib.fc |= 0x40; 1001 case ZPCI_FS_ENABLED: 1002 fib.fc |= 0x80; 1003 if (pbdev->iommu_enabled) { 1004 fib.fc |= 0x10; 1005 } 1006 if (!(fh & FH_MASK_ENABLE)) { 1007 env->regs[r1] |= 1ULL << 63; 1008 } 1009 break; 1010 case ZPCI_FS_PERMANENT_ERROR: 1011 setcc(cpu, ZPCI_PCI_LS_ERR); 1012 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR); 1013 return 0; 1014 } 1015 1016 stq_p(&fib.pba, pbdev->pba); 1017 stq_p(&fib.pal, pbdev->pal); 1018 stq_p(&fib.iota, pbdev->g_iota); 1019 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr); 1020 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr); 1021 stq_p(&fib.fmb_addr, pbdev->fmb_addr); 1022 1023 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) | 1024 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) | 1025 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset; 1026 stl_p(&fib.data, data); 1027 1028 out: 1029 if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { 1030 return 0; 1031 } 1032 1033 setcc(cpu, cc); 1034 return 0; 1035 } 1036