1 /* 2 * s390 PCI instructions 3 * 4 * Copyright 2014 IBM Corp. 5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com> 6 * Hong Bo Li <lihbbj@cn.ibm.com> 7 * Yi Min Zhao <zyimin@cn.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at 10 * your option) any later version. See the COPYING file in the top-level 11 * directory. 12 */ 13 14 #include "s390-pci-inst.h" 15 #include "s390-pci-bus.h" 16 #include <exec/memory-internal.h> 17 #include <qemu/error-report.h> 18 19 /* #define DEBUG_S390PCI_INST */ 20 #ifdef DEBUG_S390PCI_INST 21 #define DPRINTF(fmt, ...) \ 22 do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0) 23 #else 24 #define DPRINTF(fmt, ...) \ 25 do { } while (0) 26 #endif 27 28 static void s390_set_status_code(CPUS390XState *env, 29 uint8_t r, uint64_t status_code) 30 { 31 env->regs[r] &= ~0xff000000ULL; 32 env->regs[r] |= (status_code & 0xff) << 24; 33 } 34 35 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) 36 { 37 S390PCIBusDevice *pbdev; 38 uint32_t res_code, initial_l2, g_l2, finish; 39 int rc, idx; 40 uint64_t resume_token; 41 42 rc = 0; 43 if (lduw_p(&rrb->request.hdr.len) != 32) { 44 res_code = CLP_RC_LEN; 45 rc = -EINVAL; 46 goto out; 47 } 48 49 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) { 50 res_code = CLP_RC_FMT; 51 rc = -EINVAL; 52 goto out; 53 } 54 55 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 || 56 ldq_p(&rrb->request.reserved1) != 0 || 57 ldq_p(&rrb->request.reserved2) != 0) { 58 res_code = CLP_RC_RESNOT0; 59 rc = -EINVAL; 60 goto out; 61 } 62 63 resume_token = ldq_p(&rrb->request.resume_token); 64 65 if (resume_token) { 66 pbdev = s390_pci_find_dev_by_idx(resume_token); 67 if (!pbdev) { 68 res_code = CLP_RC_LISTPCI_BADRT; 69 rc = -EINVAL; 70 goto out; 71 } 72 } 73 74 if (lduw_p(&rrb->response.hdr.len) < 48) { 75 res_code = CLP_RC_8K; 76 rc = -EINVAL; 77 goto out; 78 } 79 80 initial_l2 = lduw_p(&rrb->response.hdr.len); 81 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry) 82 != 0) { 83 res_code = CLP_RC_LEN; 84 rc = -EINVAL; 85 *cc = 3; 86 goto out; 87 } 88 89 stl_p(&rrb->response.fmt, 0); 90 stq_p(&rrb->response.reserved1, 0); 91 stq_p(&rrb->response.reserved2, 0); 92 stl_p(&rrb->response.mdd, FH_VIRT); 93 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS); 94 rrb->response.entry_size = sizeof(ClpFhListEntry); 95 finish = 0; 96 idx = resume_token; 97 g_l2 = LIST_PCI_HDR_LEN; 98 do { 99 pbdev = s390_pci_find_dev_by_idx(idx); 100 if (!pbdev) { 101 finish = 1; 102 break; 103 } 104 stw_p(&rrb->response.fh_list[idx - resume_token].device_id, 105 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID)); 106 stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id, 107 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID)); 108 stl_p(&rrb->response.fh_list[idx - resume_token].config, 0x80000000); 109 stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid); 110 stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh); 111 112 g_l2 += sizeof(ClpFhListEntry); 113 /* Add endian check for DPRINTF? */ 114 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n", 115 g_l2, 116 lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id), 117 lduw_p(&rrb->response.fh_list[idx - resume_token].device_id), 118 ldl_p(&rrb->response.fh_list[idx - resume_token].fid), 119 ldl_p(&rrb->response.fh_list[idx - resume_token].fh)); 120 idx++; 121 } while (g_l2 < initial_l2); 122 123 if (finish == 1) { 124 resume_token = 0; 125 } else { 126 resume_token = idx; 127 } 128 stq_p(&rrb->response.resume_token, resume_token); 129 stw_p(&rrb->response.hdr.len, g_l2); 130 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK); 131 out: 132 if (rc) { 133 DPRINTF("list pci failed rc 0x%x\n", rc); 134 stw_p(&rrb->response.hdr.rsp, res_code); 135 } 136 return rc; 137 } 138 139 int clp_service_call(S390CPU *cpu, uint8_t r2) 140 { 141 ClpReqHdr *reqh; 142 ClpRspHdr *resh; 143 S390PCIBusDevice *pbdev; 144 uint32_t req_len; 145 uint32_t res_len; 146 uint8_t buffer[4096 * 2]; 147 uint8_t cc = 0; 148 CPUS390XState *env = &cpu->env; 149 int i; 150 151 cpu_synchronize_state(CPU(cpu)); 152 153 if (env->psw.mask & PSW_MASK_PSTATE) { 154 program_interrupt(env, PGM_PRIVILEGED, 4); 155 return 0; 156 } 157 158 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer, sizeof(*reqh))) { 159 return 0; 160 } 161 reqh = (ClpReqHdr *)buffer; 162 req_len = lduw_p(&reqh->len); 163 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) { 164 program_interrupt(env, PGM_OPERAND, 4); 165 return 0; 166 } 167 168 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer, 169 req_len + sizeof(*resh))) { 170 return 0; 171 } 172 resh = (ClpRspHdr *)(buffer + req_len); 173 res_len = lduw_p(&resh->len); 174 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) { 175 program_interrupt(env, PGM_OPERAND, 4); 176 return 0; 177 } 178 if ((req_len + res_len) > 8192) { 179 program_interrupt(env, PGM_OPERAND, 4); 180 return 0; 181 } 182 183 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], buffer, 184 req_len + res_len)) { 185 return 0; 186 } 187 188 if (req_len != 32) { 189 stw_p(&resh->rsp, CLP_RC_LEN); 190 goto out; 191 } 192 193 switch (lduw_p(&reqh->cmd)) { 194 case CLP_LIST_PCI: { 195 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer; 196 list_pci(rrb, &cc); 197 break; 198 } 199 case CLP_SET_PCI_FN: { 200 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh; 201 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh; 202 203 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh)); 204 if (!pbdev) { 205 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); 206 goto out; 207 } 208 209 switch (reqsetpci->oc) { 210 case CLP_SET_ENABLE_PCI_FN: 211 pbdev->fh = pbdev->fh | 1 << ENABLE_BIT_OFFSET; 212 stl_p(&ressetpci->fh, pbdev->fh); 213 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); 214 break; 215 case CLP_SET_DISABLE_PCI_FN: 216 pbdev->fh = pbdev->fh & ~(1 << ENABLE_BIT_OFFSET); 217 pbdev->error_state = false; 218 pbdev->lgstg_blocked = false; 219 stl_p(&ressetpci->fh, pbdev->fh); 220 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); 221 break; 222 default: 223 DPRINTF("unknown set pci command\n"); 224 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); 225 break; 226 } 227 break; 228 } 229 case CLP_QUERY_PCI_FN: { 230 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh; 231 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh; 232 233 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh)); 234 if (!pbdev) { 235 DPRINTF("query pci no pci dev\n"); 236 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH); 237 goto out; 238 } 239 240 for (i = 0; i < PCI_BAR_COUNT; i++) { 241 uint32_t data = pci_get_long(pbdev->pdev->config + 242 PCI_BASE_ADDRESS_0 + (i * 4)); 243 244 stl_p(&resquery->bar[i], data); 245 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ? 246 ctz64(pbdev->pdev->io_regions[i].size) : 0; 247 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i, 248 ldl_p(&resquery->bar[i]), 249 pbdev->pdev->io_regions[i].size, 250 resquery->bar_size[i]); 251 } 252 253 stq_p(&resquery->sdma, ZPCI_SDMA_ADDR); 254 stq_p(&resquery->edma, ZPCI_EDMA_ADDR); 255 stw_p(&resquery->pchid, 0); 256 stw_p(&resquery->ug, 1); 257 stl_p(&resquery->uid, pbdev->fid); 258 stw_p(&resquery->hdr.rsp, CLP_RC_OK); 259 break; 260 } 261 case CLP_QUERY_PCI_FNGRP: { 262 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh; 263 resgrp->fr = 1; 264 stq_p(&resgrp->dasm, 0); 265 stq_p(&resgrp->msia, ZPCI_MSI_ADDR); 266 stw_p(&resgrp->mui, 0); 267 stw_p(&resgrp->i, 128); 268 resgrp->version = 0; 269 270 stw_p(&resgrp->hdr.rsp, CLP_RC_OK); 271 break; 272 } 273 default: 274 DPRINTF("unknown clp command\n"); 275 stw_p(&resh->rsp, CLP_RC_CMD); 276 break; 277 } 278 279 out: 280 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], buffer, 281 req_len + res_len)) { 282 return 0; 283 } 284 setcc(cpu, cc); 285 return 0; 286 } 287 288 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 289 { 290 CPUS390XState *env = &cpu->env; 291 S390PCIBusDevice *pbdev; 292 uint64_t offset; 293 uint64_t data; 294 uint8_t len; 295 uint32_t fh; 296 uint8_t pcias; 297 298 cpu_synchronize_state(CPU(cpu)); 299 300 if (env->psw.mask & PSW_MASK_PSTATE) { 301 program_interrupt(env, PGM_PRIVILEGED, 4); 302 return 0; 303 } 304 305 if (r2 & 0x1) { 306 program_interrupt(env, PGM_SPECIFICATION, 4); 307 return 0; 308 } 309 310 fh = env->regs[r2] >> 32; 311 pcias = (env->regs[r2] >> 16) & 0xf; 312 len = env->regs[r2] & 0xf; 313 offset = env->regs[r2 + 1]; 314 315 pbdev = s390_pci_find_dev_by_fh(fh); 316 if (!pbdev) { 317 DPRINTF("pcilg no pci dev\n"); 318 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 319 return 0; 320 } 321 322 if (pbdev->lgstg_blocked) { 323 setcc(cpu, ZPCI_PCI_LS_ERR); 324 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); 325 return 0; 326 } 327 328 if (pcias < 6) { 329 if ((8 - (offset & 0x7)) < len) { 330 program_interrupt(env, PGM_OPERAND, 4); 331 return 0; 332 } 333 MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory; 334 memory_region_dispatch_read(mr, offset, &data, len, 335 MEMTXATTRS_UNSPECIFIED); 336 } else if (pcias == 15) { 337 if ((4 - (offset & 0x3)) < len) { 338 program_interrupt(env, PGM_OPERAND, 4); 339 return 0; 340 } 341 data = pci_host_config_read_common( 342 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len); 343 344 switch (len) { 345 case 1: 346 break; 347 case 2: 348 data = bswap16(data); 349 break; 350 case 4: 351 data = bswap32(data); 352 break; 353 case 8: 354 data = bswap64(data); 355 break; 356 default: 357 program_interrupt(env, PGM_OPERAND, 4); 358 return 0; 359 } 360 } else { 361 DPRINTF("invalid space\n"); 362 setcc(cpu, ZPCI_PCI_LS_ERR); 363 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); 364 return 0; 365 } 366 367 env->regs[r1] = data; 368 setcc(cpu, ZPCI_PCI_LS_OK); 369 return 0; 370 } 371 372 static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset, 373 uint64_t *data, uint8_t len) 374 { 375 uint32_t val; 376 uint8_t *msg_data; 377 378 if (offset % PCI_MSIX_ENTRY_SIZE != 8) { 379 return; 380 } 381 382 if (len != 4) { 383 DPRINTF("access msix table msg data but len is %d\n", len); 384 return; 385 } 386 387 msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE + 388 PCI_MSIX_ENTRY_VECTOR_CTRL; 389 val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS); 390 pci_set_long(msg_data, val); 391 DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data); 392 } 393 394 static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias) 395 { 396 if (pbdev->msix.available && pbdev->msix.table_bar == pcias && 397 offset >= pbdev->msix.table_offset && 398 offset <= pbdev->msix.table_offset + 399 (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) { 400 return 1; 401 } else { 402 return 0; 403 } 404 } 405 406 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 407 { 408 CPUS390XState *env = &cpu->env; 409 uint64_t offset, data; 410 S390PCIBusDevice *pbdev; 411 uint8_t len; 412 uint32_t fh; 413 uint8_t pcias; 414 415 cpu_synchronize_state(CPU(cpu)); 416 417 if (env->psw.mask & PSW_MASK_PSTATE) { 418 program_interrupt(env, PGM_PRIVILEGED, 4); 419 return 0; 420 } 421 422 if (r2 & 0x1) { 423 program_interrupt(env, PGM_SPECIFICATION, 4); 424 return 0; 425 } 426 427 fh = env->regs[r2] >> 32; 428 pcias = (env->regs[r2] >> 16) & 0xf; 429 len = env->regs[r2] & 0xf; 430 offset = env->regs[r2 + 1]; 431 432 pbdev = s390_pci_find_dev_by_fh(fh); 433 if (!pbdev) { 434 DPRINTF("pcistg no pci dev\n"); 435 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 436 return 0; 437 } 438 439 if (pbdev->lgstg_blocked) { 440 setcc(cpu, ZPCI_PCI_LS_ERR); 441 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); 442 return 0; 443 } 444 445 data = env->regs[r1]; 446 if (pcias < 6) { 447 if ((8 - (offset & 0x7)) < len) { 448 program_interrupt(env, PGM_OPERAND, 4); 449 return 0; 450 } 451 MemoryRegion *mr; 452 if (trap_msix(pbdev, offset, pcias)) { 453 offset = offset - pbdev->msix.table_offset; 454 mr = &pbdev->pdev->msix_table_mmio; 455 update_msix_table_msg_data(pbdev, offset, &data, len); 456 } else { 457 mr = pbdev->pdev->io_regions[pcias].memory; 458 } 459 460 memory_region_dispatch_write(mr, offset, data, len, 461 MEMTXATTRS_UNSPECIFIED); 462 } else if (pcias == 15) { 463 if ((4 - (offset & 0x3)) < len) { 464 program_interrupt(env, PGM_OPERAND, 4); 465 return 0; 466 } 467 switch (len) { 468 case 1: 469 break; 470 case 2: 471 data = bswap16(data); 472 break; 473 case 4: 474 data = bswap32(data); 475 break; 476 case 8: 477 data = bswap64(data); 478 break; 479 default: 480 program_interrupt(env, PGM_OPERAND, 4); 481 return 0; 482 } 483 484 pci_host_config_write_common(pbdev->pdev, offset, 485 pci_config_size(pbdev->pdev), 486 data, len); 487 } else { 488 DPRINTF("pcistg invalid space\n"); 489 setcc(cpu, ZPCI_PCI_LS_ERR); 490 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); 491 return 0; 492 } 493 494 setcc(cpu, ZPCI_PCI_LS_OK); 495 return 0; 496 } 497 498 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 499 { 500 CPUS390XState *env = &cpu->env; 501 uint32_t fh; 502 S390PCIBusDevice *pbdev; 503 hwaddr start, end; 504 IOMMUTLBEntry entry; 505 MemoryRegion *mr; 506 507 cpu_synchronize_state(CPU(cpu)); 508 509 if (env->psw.mask & PSW_MASK_PSTATE) { 510 program_interrupt(env, PGM_PRIVILEGED, 4); 511 goto out; 512 } 513 514 if (r2 & 0x1) { 515 program_interrupt(env, PGM_SPECIFICATION, 4); 516 goto out; 517 } 518 519 fh = env->regs[r1] >> 32; 520 start = env->regs[r2]; 521 end = start + env->regs[r2 + 1]; 522 523 pbdev = s390_pci_find_dev_by_fh(fh); 524 525 if (!pbdev) { 526 DPRINTF("rpcit no pci dev\n"); 527 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 528 goto out; 529 } 530 531 mr = pci_device_iommu_address_space(pbdev->pdev)->root; 532 while (start < end) { 533 entry = mr->iommu_ops->translate(mr, start, 0); 534 535 if (!entry.translated_addr) { 536 setcc(cpu, ZPCI_PCI_LS_ERR); 537 goto out; 538 } 539 540 memory_region_notify_iommu(mr, entry); 541 start += entry.addr_mask + 1; 542 } 543 544 setcc(cpu, ZPCI_PCI_LS_OK); 545 out: 546 return 0; 547 } 548 549 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr) 550 { 551 CPUS390XState *env = &cpu->env; 552 S390PCIBusDevice *pbdev; 553 MemoryRegion *mr; 554 int i; 555 uint32_t fh; 556 uint8_t pcias; 557 uint8_t len; 558 uint8_t buffer[128]; 559 560 if (env->psw.mask & PSW_MASK_PSTATE) { 561 program_interrupt(env, PGM_PRIVILEGED, 6); 562 return 0; 563 } 564 565 fh = env->regs[r1] >> 32; 566 pcias = (env->regs[r1] >> 16) & 0xf; 567 len = env->regs[r1] & 0xff; 568 569 if (pcias > 5) { 570 DPRINTF("pcistb invalid space\n"); 571 setcc(cpu, ZPCI_PCI_LS_ERR); 572 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS); 573 return 0; 574 } 575 576 switch (len) { 577 case 16: 578 case 32: 579 case 64: 580 case 128: 581 break; 582 default: 583 program_interrupt(env, PGM_SPECIFICATION, 6); 584 return 0; 585 } 586 587 pbdev = s390_pci_find_dev_by_fh(fh); 588 if (!pbdev) { 589 DPRINTF("pcistb no pci dev fh 0x%x\n", fh); 590 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 591 return 0; 592 } 593 594 if (pbdev->lgstg_blocked) { 595 setcc(cpu, ZPCI_PCI_LS_ERR); 596 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED); 597 return 0; 598 } 599 600 mr = pbdev->pdev->io_regions[pcias].memory; 601 if (!memory_region_access_valid(mr, env->regs[r3], len, true)) { 602 program_interrupt(env, PGM_ADDRESSING, 6); 603 return 0; 604 } 605 606 if (s390_cpu_virt_mem_read(cpu, gaddr, buffer, len)) { 607 return 0; 608 } 609 610 for (i = 0; i < len / 8; i++) { 611 memory_region_dispatch_write(mr, env->regs[r3] + i * 8, 612 ldq_p(buffer + i * 8), 8, 613 MEMTXATTRS_UNSPECIFIED); 614 } 615 616 setcc(cpu, ZPCI_PCI_LS_OK); 617 return 0; 618 } 619 620 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) 621 { 622 int ret; 623 S390FLICState *fs = s390_get_flic(); 624 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); 625 626 ret = css_register_io_adapter(S390_PCIPT_ADAPTER, 627 FIB_DATA_ISC(ldl_p(&fib.data)), true, false, 628 &pbdev->routes.adapter.adapter_id); 629 assert(ret == 0); 630 631 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, 632 ldq_p(&fib.aisb), true); 633 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, 634 ldq_p(&fib.aibv), true); 635 636 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb); 637 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data)); 638 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv); 639 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data)); 640 pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data)); 641 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data)); 642 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data)); 643 644 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); 645 return 0; 646 } 647 648 static int dereg_irqs(S390PCIBusDevice *pbdev) 649 { 650 S390FLICState *fs = s390_get_flic(); 651 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); 652 653 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, 654 pbdev->routes.adapter.ind_addr, false); 655 656 pbdev->routes.adapter.summary_addr = 0; 657 pbdev->routes.adapter.summary_offset = 0; 658 pbdev->routes.adapter.ind_addr = 0; 659 pbdev->routes.adapter.ind_offset = 0; 660 pbdev->isc = 0; 661 pbdev->noi = 0; 662 pbdev->sum = 0; 663 664 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); 665 return 0; 666 } 667 668 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) 669 { 670 uint64_t pba = ldq_p(&fib.pba); 671 uint64_t pal = ldq_p(&fib.pal); 672 uint64_t g_iota = ldq_p(&fib.iota); 673 uint8_t dt = (g_iota >> 2) & 0x7; 674 uint8_t t = (g_iota >> 11) & 0x1; 675 676 if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) { 677 program_interrupt(env, PGM_OPERAND, 6); 678 return -EINVAL; 679 } 680 681 /* currently we only support designation type 1 with translation */ 682 if (!(dt == ZPCI_IOTA_RTTO && t)) { 683 error_report("unsupported ioat dt %d t %d", dt, t); 684 program_interrupt(env, PGM_OPERAND, 6); 685 return -EINVAL; 686 } 687 688 pbdev->pba = pba; 689 pbdev->pal = pal; 690 pbdev->g_iota = g_iota; 691 return 0; 692 } 693 694 static void dereg_ioat(S390PCIBusDevice *pbdev) 695 { 696 pbdev->pba = 0; 697 pbdev->pal = 0; 698 pbdev->g_iota = 0; 699 } 700 701 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba) 702 { 703 CPUS390XState *env = &cpu->env; 704 uint8_t oc; 705 uint32_t fh; 706 ZpciFib fib; 707 S390PCIBusDevice *pbdev; 708 uint64_t cc = ZPCI_PCI_LS_OK; 709 710 if (env->psw.mask & PSW_MASK_PSTATE) { 711 program_interrupt(env, PGM_PRIVILEGED, 6); 712 return 0; 713 } 714 715 oc = env->regs[r1] & 0xff; 716 fh = env->regs[r1] >> 32; 717 718 if (fiba & 0x7) { 719 program_interrupt(env, PGM_SPECIFICATION, 6); 720 return 0; 721 } 722 723 pbdev = s390_pci_find_dev_by_fh(fh); 724 if (!pbdev) { 725 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh); 726 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 727 return 0; 728 } 729 730 if (s390_cpu_virt_mem_read(cpu, fiba, (uint8_t *)&fib, sizeof(fib))) { 731 return 0; 732 } 733 734 switch (oc) { 735 case ZPCI_MOD_FC_REG_INT: 736 if (reg_irqs(env, pbdev, fib)) { 737 cc = ZPCI_PCI_LS_ERR; 738 } 739 break; 740 case ZPCI_MOD_FC_DEREG_INT: 741 dereg_irqs(pbdev); 742 break; 743 case ZPCI_MOD_FC_REG_IOAT: 744 if (reg_ioat(env, pbdev, fib)) { 745 cc = ZPCI_PCI_LS_ERR; 746 } 747 break; 748 case ZPCI_MOD_FC_DEREG_IOAT: 749 dereg_ioat(pbdev); 750 break; 751 case ZPCI_MOD_FC_REREG_IOAT: 752 dereg_ioat(pbdev); 753 if (reg_ioat(env, pbdev, fib)) { 754 cc = ZPCI_PCI_LS_ERR; 755 } 756 break; 757 case ZPCI_MOD_FC_RESET_ERROR: 758 pbdev->error_state = false; 759 pbdev->lgstg_blocked = false; 760 break; 761 case ZPCI_MOD_FC_RESET_BLOCK: 762 pbdev->lgstg_blocked = false; 763 break; 764 case ZPCI_MOD_FC_SET_MEASURE: 765 pbdev->fmb_addr = ldq_p(&fib.fmb_addr); 766 break; 767 default: 768 program_interrupt(&cpu->env, PGM_OPERAND, 6); 769 cc = ZPCI_PCI_LS_ERR; 770 } 771 772 setcc(cpu, cc); 773 return 0; 774 } 775 776 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba) 777 { 778 CPUS390XState *env = &cpu->env; 779 uint32_t fh; 780 ZpciFib fib; 781 S390PCIBusDevice *pbdev; 782 uint32_t data; 783 uint64_t cc = ZPCI_PCI_LS_OK; 784 785 if (env->psw.mask & PSW_MASK_PSTATE) { 786 program_interrupt(env, PGM_PRIVILEGED, 6); 787 return 0; 788 } 789 790 fh = env->regs[r1] >> 32; 791 792 if (fiba & 0x7) { 793 program_interrupt(env, PGM_SPECIFICATION, 6); 794 return 0; 795 } 796 797 pbdev = s390_pci_find_dev_by_fh(fh); 798 if (!pbdev) { 799 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 800 return 0; 801 } 802 803 memset(&fib, 0, sizeof(fib)); 804 stq_p(&fib.pba, pbdev->pba); 805 stq_p(&fib.pal, pbdev->pal); 806 stq_p(&fib.iota, pbdev->g_iota); 807 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr); 808 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr); 809 stq_p(&fib.fmb_addr, pbdev->fmb_addr); 810 811 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) | 812 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) | 813 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset; 814 stl_p(&fib.data, data); 815 816 if (pbdev->fh >> ENABLE_BIT_OFFSET) { 817 fib.fc |= 0x80; 818 } 819 820 if (pbdev->error_state) { 821 fib.fc |= 0x40; 822 } 823 824 if (pbdev->lgstg_blocked) { 825 fib.fc |= 0x20; 826 } 827 828 if (pbdev->g_iota) { 829 fib.fc |= 0x10; 830 } 831 832 if (s390_cpu_virt_mem_write(cpu, fiba, (uint8_t *)&fib, sizeof(fib))) { 833 return 0; 834 } 835 836 setcc(cpu, cc); 837 return 0; 838 } 839