1 /* 2 * s390 PCI instructions 3 * 4 * Copyright 2014 IBM Corp. 5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com> 6 * Hong Bo Li <lihbbj@cn.ibm.com> 7 * Yi Min Zhao <zyimin@cn.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or (at 10 * your option) any later version. See the COPYING file in the top-level 11 * directory. 12 */ 13 14 #include "s390-pci-inst.h" 15 #include "s390-pci-bus.h" 16 #include <exec/memory-internal.h> 17 #include <qemu/error-report.h> 18 19 /* #define DEBUG_S390PCI_INST */ 20 #ifdef DEBUG_S390PCI_INST 21 #define DPRINTF(fmt, ...) \ 22 do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0) 23 #else 24 #define DPRINTF(fmt, ...) \ 25 do { } while (0) 26 #endif 27 28 static void s390_set_status_code(CPUS390XState *env, 29 uint8_t r, uint64_t status_code) 30 { 31 env->regs[r] &= ~0xff000000ULL; 32 env->regs[r] |= (status_code & 0xff) << 24; 33 } 34 35 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc) 36 { 37 S390PCIBusDevice *pbdev; 38 uint32_t res_code, initial_l2, g_l2, finish; 39 int rc, idx; 40 uint64_t resume_token; 41 42 rc = 0; 43 if (lduw_p(&rrb->request.hdr.len) != 32) { 44 res_code = CLP_RC_LEN; 45 rc = -EINVAL; 46 goto out; 47 } 48 49 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) { 50 res_code = CLP_RC_FMT; 51 rc = -EINVAL; 52 goto out; 53 } 54 55 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 || 56 ldq_p(&rrb->request.reserved1) != 0 || 57 ldq_p(&rrb->request.reserved2) != 0) { 58 res_code = CLP_RC_RESNOT0; 59 rc = -EINVAL; 60 goto out; 61 } 62 63 resume_token = ldq_p(&rrb->request.resume_token); 64 65 if (resume_token) { 66 pbdev = s390_pci_find_dev_by_idx(resume_token); 67 if (!pbdev) { 68 res_code = CLP_RC_LISTPCI_BADRT; 69 rc = -EINVAL; 70 goto out; 71 } 72 } 73 74 if (lduw_p(&rrb->response.hdr.len) < 48) { 75 res_code = CLP_RC_8K; 76 rc = -EINVAL; 77 goto out; 78 } 79 80 initial_l2 = lduw_p(&rrb->response.hdr.len); 81 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry) 82 != 0) { 83 res_code = CLP_RC_LEN; 84 rc = -EINVAL; 85 *cc = 3; 86 goto out; 87 } 88 89 stl_p(&rrb->response.fmt, 0); 90 stq_p(&rrb->response.reserved1, 0); 91 stq_p(&rrb->response.reserved2, 0); 92 stl_p(&rrb->response.mdd, FH_VIRT); 93 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS); 94 rrb->response.entry_size = sizeof(ClpFhListEntry); 95 finish = 0; 96 idx = resume_token; 97 g_l2 = LIST_PCI_HDR_LEN; 98 do { 99 pbdev = s390_pci_find_dev_by_idx(idx); 100 if (!pbdev) { 101 finish = 1; 102 break; 103 } 104 stw_p(&rrb->response.fh_list[idx - resume_token].device_id, 105 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID)); 106 stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id, 107 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID)); 108 stl_p(&rrb->response.fh_list[idx - resume_token].config, 109 pbdev->configured << 31); 110 stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid); 111 stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh); 112 113 g_l2 += sizeof(ClpFhListEntry); 114 /* Add endian check for DPRINTF? */ 115 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n", 116 g_l2, 117 lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id), 118 lduw_p(&rrb->response.fh_list[idx - resume_token].device_id), 119 ldl_p(&rrb->response.fh_list[idx - resume_token].fid), 120 ldl_p(&rrb->response.fh_list[idx - resume_token].fh)); 121 idx++; 122 } while (g_l2 < initial_l2); 123 124 if (finish == 1) { 125 resume_token = 0; 126 } else { 127 resume_token = idx; 128 } 129 stq_p(&rrb->response.resume_token, resume_token); 130 stw_p(&rrb->response.hdr.len, g_l2); 131 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK); 132 out: 133 if (rc) { 134 DPRINTF("list pci failed rc 0x%x\n", rc); 135 stw_p(&rrb->response.hdr.rsp, res_code); 136 } 137 return rc; 138 } 139 140 int clp_service_call(S390CPU *cpu, uint8_t r2) 141 { 142 ClpReqHdr *reqh; 143 ClpRspHdr *resh; 144 S390PCIBusDevice *pbdev; 145 uint32_t req_len; 146 uint32_t res_len; 147 uint8_t buffer[4096 * 2]; 148 uint8_t cc = 0; 149 CPUS390XState *env = &cpu->env; 150 int i; 151 152 cpu_synchronize_state(CPU(cpu)); 153 154 if (env->psw.mask & PSW_MASK_PSTATE) { 155 program_interrupt(env, PGM_PRIVILEGED, 4); 156 return 0; 157 } 158 159 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) { 160 return 0; 161 } 162 reqh = (ClpReqHdr *)buffer; 163 req_len = lduw_p(&reqh->len); 164 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) { 165 program_interrupt(env, PGM_OPERAND, 4); 166 return 0; 167 } 168 169 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, 170 req_len + sizeof(*resh))) { 171 return 0; 172 } 173 resh = (ClpRspHdr *)(buffer + req_len); 174 res_len = lduw_p(&resh->len); 175 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) { 176 program_interrupt(env, PGM_OPERAND, 4); 177 return 0; 178 } 179 if ((req_len + res_len) > 8192) { 180 program_interrupt(env, PGM_OPERAND, 4); 181 return 0; 182 } 183 184 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, 185 req_len + res_len)) { 186 return 0; 187 } 188 189 if (req_len != 32) { 190 stw_p(&resh->rsp, CLP_RC_LEN); 191 goto out; 192 } 193 194 switch (lduw_p(&reqh->cmd)) { 195 case CLP_LIST_PCI: { 196 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer; 197 list_pci(rrb, &cc); 198 break; 199 } 200 case CLP_SET_PCI_FN: { 201 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh; 202 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh; 203 204 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh)); 205 if (!pbdev) { 206 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH); 207 goto out; 208 } 209 210 switch (reqsetpci->oc) { 211 case CLP_SET_ENABLE_PCI_FN: 212 pbdev->fh = pbdev->fh | FH_ENABLED; 213 stl_p(&ressetpci->fh, pbdev->fh); 214 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); 215 break; 216 case CLP_SET_DISABLE_PCI_FN: 217 pbdev->fh = pbdev->fh & ~FH_ENABLED; 218 pbdev->error_state = false; 219 pbdev->lgstg_blocked = false; 220 stl_p(&ressetpci->fh, pbdev->fh); 221 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK); 222 break; 223 default: 224 DPRINTF("unknown set pci command\n"); 225 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP); 226 break; 227 } 228 break; 229 } 230 case CLP_QUERY_PCI_FN: { 231 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh; 232 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh; 233 234 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh)); 235 if (!pbdev) { 236 DPRINTF("query pci no pci dev\n"); 237 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH); 238 goto out; 239 } 240 241 for (i = 0; i < PCI_BAR_COUNT; i++) { 242 uint32_t data = pci_get_long(pbdev->pdev->config + 243 PCI_BASE_ADDRESS_0 + (i * 4)); 244 245 stl_p(&resquery->bar[i], data); 246 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ? 247 ctz64(pbdev->pdev->io_regions[i].size) : 0; 248 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i, 249 ldl_p(&resquery->bar[i]), 250 pbdev->pdev->io_regions[i].size, 251 resquery->bar_size[i]); 252 } 253 254 stq_p(&resquery->sdma, ZPCI_SDMA_ADDR); 255 stq_p(&resquery->edma, ZPCI_EDMA_ADDR); 256 stw_p(&resquery->pchid, 0); 257 stw_p(&resquery->ug, 1); 258 stl_p(&resquery->uid, pbdev->fid); 259 stw_p(&resquery->hdr.rsp, CLP_RC_OK); 260 break; 261 } 262 case CLP_QUERY_PCI_FNGRP: { 263 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh; 264 resgrp->fr = 1; 265 stq_p(&resgrp->dasm, 0); 266 stq_p(&resgrp->msia, ZPCI_MSI_ADDR); 267 stw_p(&resgrp->mui, 0); 268 stw_p(&resgrp->i, 128); 269 resgrp->version = 0; 270 271 stw_p(&resgrp->hdr.rsp, CLP_RC_OK); 272 break; 273 } 274 default: 275 DPRINTF("unknown clp command\n"); 276 stw_p(&resh->rsp, CLP_RC_CMD); 277 break; 278 } 279 280 out: 281 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer, 282 req_len + res_len)) { 283 return 0; 284 } 285 setcc(cpu, cc); 286 return 0; 287 } 288 289 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 290 { 291 CPUS390XState *env = &cpu->env; 292 S390PCIBusDevice *pbdev; 293 uint64_t offset; 294 uint64_t data; 295 uint8_t len; 296 uint32_t fh; 297 uint8_t pcias; 298 299 cpu_synchronize_state(CPU(cpu)); 300 301 if (env->psw.mask & PSW_MASK_PSTATE) { 302 program_interrupt(env, PGM_PRIVILEGED, 4); 303 return 0; 304 } 305 306 if (r2 & 0x1) { 307 program_interrupt(env, PGM_SPECIFICATION, 4); 308 return 0; 309 } 310 311 fh = env->regs[r2] >> 32; 312 pcias = (env->regs[r2] >> 16) & 0xf; 313 len = env->regs[r2] & 0xf; 314 offset = env->regs[r2 + 1]; 315 316 pbdev = s390_pci_find_dev_by_fh(fh); 317 if (!pbdev || !(pbdev->fh & FH_ENABLED)) { 318 DPRINTF("pcilg no pci dev\n"); 319 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 320 return 0; 321 } 322 323 if (pbdev->lgstg_blocked) { 324 setcc(cpu, ZPCI_PCI_LS_ERR); 325 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); 326 return 0; 327 } 328 329 if (pcias < 6) { 330 if ((8 - (offset & 0x7)) < len) { 331 program_interrupt(env, PGM_OPERAND, 4); 332 return 0; 333 } 334 MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory; 335 memory_region_dispatch_read(mr, offset, &data, len, 336 MEMTXATTRS_UNSPECIFIED); 337 } else if (pcias == 15) { 338 if ((4 - (offset & 0x3)) < len) { 339 program_interrupt(env, PGM_OPERAND, 4); 340 return 0; 341 } 342 data = pci_host_config_read_common( 343 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len); 344 345 switch (len) { 346 case 1: 347 break; 348 case 2: 349 data = bswap16(data); 350 break; 351 case 4: 352 data = bswap32(data); 353 break; 354 case 8: 355 data = bswap64(data); 356 break; 357 default: 358 program_interrupt(env, PGM_OPERAND, 4); 359 return 0; 360 } 361 } else { 362 DPRINTF("invalid space\n"); 363 setcc(cpu, ZPCI_PCI_LS_ERR); 364 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); 365 return 0; 366 } 367 368 env->regs[r1] = data; 369 setcc(cpu, ZPCI_PCI_LS_OK); 370 return 0; 371 } 372 373 static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset, 374 uint64_t *data, uint8_t len) 375 { 376 uint32_t val; 377 uint8_t *msg_data; 378 379 if (offset % PCI_MSIX_ENTRY_SIZE != 8) { 380 return; 381 } 382 383 if (len != 4) { 384 DPRINTF("access msix table msg data but len is %d\n", len); 385 return; 386 } 387 388 msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE + 389 PCI_MSIX_ENTRY_VECTOR_CTRL; 390 val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS); 391 pci_set_long(msg_data, val); 392 DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data); 393 } 394 395 static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias) 396 { 397 if (pbdev->msix.available && pbdev->msix.table_bar == pcias && 398 offset >= pbdev->msix.table_offset && 399 offset <= pbdev->msix.table_offset + 400 (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) { 401 return 1; 402 } else { 403 return 0; 404 } 405 } 406 407 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 408 { 409 CPUS390XState *env = &cpu->env; 410 uint64_t offset, data; 411 S390PCIBusDevice *pbdev; 412 uint8_t len; 413 uint32_t fh; 414 uint8_t pcias; 415 416 cpu_synchronize_state(CPU(cpu)); 417 418 if (env->psw.mask & PSW_MASK_PSTATE) { 419 program_interrupt(env, PGM_PRIVILEGED, 4); 420 return 0; 421 } 422 423 if (r2 & 0x1) { 424 program_interrupt(env, PGM_SPECIFICATION, 4); 425 return 0; 426 } 427 428 fh = env->regs[r2] >> 32; 429 pcias = (env->regs[r2] >> 16) & 0xf; 430 len = env->regs[r2] & 0xf; 431 offset = env->regs[r2 + 1]; 432 433 pbdev = s390_pci_find_dev_by_fh(fh); 434 if (!pbdev || !(pbdev->fh & FH_ENABLED)) { 435 DPRINTF("pcistg no pci dev\n"); 436 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 437 return 0; 438 } 439 440 if (pbdev->lgstg_blocked) { 441 setcc(cpu, ZPCI_PCI_LS_ERR); 442 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED); 443 return 0; 444 } 445 446 data = env->regs[r1]; 447 if (pcias < 6) { 448 if ((8 - (offset & 0x7)) < len) { 449 program_interrupt(env, PGM_OPERAND, 4); 450 return 0; 451 } 452 MemoryRegion *mr; 453 if (trap_msix(pbdev, offset, pcias)) { 454 offset = offset - pbdev->msix.table_offset; 455 mr = &pbdev->pdev->msix_table_mmio; 456 update_msix_table_msg_data(pbdev, offset, &data, len); 457 } else { 458 mr = pbdev->pdev->io_regions[pcias].memory; 459 } 460 461 memory_region_dispatch_write(mr, offset, data, len, 462 MEMTXATTRS_UNSPECIFIED); 463 } else if (pcias == 15) { 464 if ((4 - (offset & 0x3)) < len) { 465 program_interrupt(env, PGM_OPERAND, 4); 466 return 0; 467 } 468 switch (len) { 469 case 1: 470 break; 471 case 2: 472 data = bswap16(data); 473 break; 474 case 4: 475 data = bswap32(data); 476 break; 477 case 8: 478 data = bswap64(data); 479 break; 480 default: 481 program_interrupt(env, PGM_OPERAND, 4); 482 return 0; 483 } 484 485 pci_host_config_write_common(pbdev->pdev, offset, 486 pci_config_size(pbdev->pdev), 487 data, len); 488 } else { 489 DPRINTF("pcistg invalid space\n"); 490 setcc(cpu, ZPCI_PCI_LS_ERR); 491 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS); 492 return 0; 493 } 494 495 setcc(cpu, ZPCI_PCI_LS_OK); 496 return 0; 497 } 498 499 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2) 500 { 501 CPUS390XState *env = &cpu->env; 502 uint32_t fh; 503 S390PCIBusDevice *pbdev; 504 hwaddr start, end; 505 IOMMUTLBEntry entry; 506 MemoryRegion *mr; 507 508 cpu_synchronize_state(CPU(cpu)); 509 510 if (env->psw.mask & PSW_MASK_PSTATE) { 511 program_interrupt(env, PGM_PRIVILEGED, 4); 512 goto out; 513 } 514 515 if (r2 & 0x1) { 516 program_interrupt(env, PGM_SPECIFICATION, 4); 517 goto out; 518 } 519 520 fh = env->regs[r1] >> 32; 521 start = env->regs[r2]; 522 end = start + env->regs[r2 + 1]; 523 524 pbdev = s390_pci_find_dev_by_fh(fh); 525 if (!pbdev || !(pbdev->fh & FH_ENABLED)) { 526 DPRINTF("rpcit no pci dev\n"); 527 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 528 goto out; 529 } 530 531 mr = &pbdev->iommu_mr; 532 while (start < end) { 533 entry = mr->iommu_ops->translate(mr, start, 0); 534 535 if (!entry.translated_addr) { 536 setcc(cpu, ZPCI_PCI_LS_ERR); 537 goto out; 538 } 539 540 memory_region_notify_iommu(mr, entry); 541 start += entry.addr_mask + 1; 542 } 543 544 setcc(cpu, ZPCI_PCI_LS_OK); 545 out: 546 return 0; 547 } 548 549 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr, 550 uint8_t ar) 551 { 552 CPUS390XState *env = &cpu->env; 553 S390PCIBusDevice *pbdev; 554 MemoryRegion *mr; 555 int i; 556 uint32_t fh; 557 uint8_t pcias; 558 uint8_t len; 559 uint8_t buffer[128]; 560 561 if (env->psw.mask & PSW_MASK_PSTATE) { 562 program_interrupt(env, PGM_PRIVILEGED, 6); 563 return 0; 564 } 565 566 fh = env->regs[r1] >> 32; 567 pcias = (env->regs[r1] >> 16) & 0xf; 568 len = env->regs[r1] & 0xff; 569 570 if (pcias > 5) { 571 DPRINTF("pcistb invalid space\n"); 572 setcc(cpu, ZPCI_PCI_LS_ERR); 573 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS); 574 return 0; 575 } 576 577 switch (len) { 578 case 16: 579 case 32: 580 case 64: 581 case 128: 582 break; 583 default: 584 program_interrupt(env, PGM_SPECIFICATION, 6); 585 return 0; 586 } 587 588 pbdev = s390_pci_find_dev_by_fh(fh); 589 if (!pbdev || !(pbdev->fh & FH_ENABLED)) { 590 DPRINTF("pcistb no pci dev fh 0x%x\n", fh); 591 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 592 return 0; 593 } 594 595 if (pbdev->lgstg_blocked) { 596 setcc(cpu, ZPCI_PCI_LS_ERR); 597 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED); 598 return 0; 599 } 600 601 mr = pbdev->pdev->io_regions[pcias].memory; 602 if (!memory_region_access_valid(mr, env->regs[r3], len, true)) { 603 program_interrupt(env, PGM_ADDRESSING, 6); 604 return 0; 605 } 606 607 if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) { 608 return 0; 609 } 610 611 for (i = 0; i < len / 8; i++) { 612 memory_region_dispatch_write(mr, env->regs[r3] + i * 8, 613 ldq_p(buffer + i * 8), 8, 614 MEMTXATTRS_UNSPECIFIED); 615 } 616 617 setcc(cpu, ZPCI_PCI_LS_OK); 618 return 0; 619 } 620 621 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) 622 { 623 int ret; 624 S390FLICState *fs = s390_get_flic(); 625 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); 626 627 ret = css_register_io_adapter(S390_PCIPT_ADAPTER, 628 FIB_DATA_ISC(ldl_p(&fib.data)), true, false, 629 &pbdev->routes.adapter.adapter_id); 630 assert(ret == 0); 631 632 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, 633 ldq_p(&fib.aisb), true); 634 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, 635 ldq_p(&fib.aibv), true); 636 637 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb); 638 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data)); 639 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv); 640 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data)); 641 pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data)); 642 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data)); 643 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data)); 644 645 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); 646 return 0; 647 } 648 649 static int dereg_irqs(S390PCIBusDevice *pbdev) 650 { 651 S390FLICState *fs = s390_get_flic(); 652 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); 653 654 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id, 655 pbdev->routes.adapter.ind_addr, false); 656 657 pbdev->routes.adapter.summary_addr = 0; 658 pbdev->routes.adapter.summary_offset = 0; 659 pbdev->routes.adapter.ind_addr = 0; 660 pbdev->routes.adapter.ind_offset = 0; 661 pbdev->isc = 0; 662 pbdev->noi = 0; 663 pbdev->sum = 0; 664 665 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id); 666 return 0; 667 } 668 669 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib) 670 { 671 uint64_t pba = ldq_p(&fib.pba); 672 uint64_t pal = ldq_p(&fib.pal); 673 uint64_t g_iota = ldq_p(&fib.iota); 674 uint8_t dt = (g_iota >> 2) & 0x7; 675 uint8_t t = (g_iota >> 11) & 0x1; 676 677 if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) { 678 program_interrupt(env, PGM_OPERAND, 6); 679 return -EINVAL; 680 } 681 682 /* currently we only support designation type 1 with translation */ 683 if (!(dt == ZPCI_IOTA_RTTO && t)) { 684 error_report("unsupported ioat dt %d t %d", dt, t); 685 program_interrupt(env, PGM_OPERAND, 6); 686 return -EINVAL; 687 } 688 689 pbdev->pba = pba; 690 pbdev->pal = pal; 691 pbdev->g_iota = g_iota; 692 693 s390_pcihost_iommu_configure(pbdev, true); 694 695 return 0; 696 } 697 698 static void dereg_ioat(S390PCIBusDevice *pbdev) 699 { 700 pbdev->pba = 0; 701 pbdev->pal = 0; 702 pbdev->g_iota = 0; 703 704 s390_pcihost_iommu_configure(pbdev, false); 705 } 706 707 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) 708 { 709 CPUS390XState *env = &cpu->env; 710 uint8_t oc; 711 uint32_t fh; 712 ZpciFib fib; 713 S390PCIBusDevice *pbdev; 714 uint64_t cc = ZPCI_PCI_LS_OK; 715 716 if (env->psw.mask & PSW_MASK_PSTATE) { 717 program_interrupt(env, PGM_PRIVILEGED, 6); 718 return 0; 719 } 720 721 oc = env->regs[r1] & 0xff; 722 fh = env->regs[r1] >> 32; 723 724 if (fiba & 0x7) { 725 program_interrupt(env, PGM_SPECIFICATION, 6); 726 return 0; 727 } 728 729 pbdev = s390_pci_find_dev_by_fh(fh); 730 if (!pbdev || !(pbdev->fh & FH_ENABLED)) { 731 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh); 732 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 733 return 0; 734 } 735 736 if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { 737 return 0; 738 } 739 740 switch (oc) { 741 case ZPCI_MOD_FC_REG_INT: 742 if (reg_irqs(env, pbdev, fib)) { 743 cc = ZPCI_PCI_LS_ERR; 744 } 745 break; 746 case ZPCI_MOD_FC_DEREG_INT: 747 dereg_irqs(pbdev); 748 break; 749 case ZPCI_MOD_FC_REG_IOAT: 750 if (reg_ioat(env, pbdev, fib)) { 751 cc = ZPCI_PCI_LS_ERR; 752 } 753 break; 754 case ZPCI_MOD_FC_DEREG_IOAT: 755 dereg_ioat(pbdev); 756 break; 757 case ZPCI_MOD_FC_REREG_IOAT: 758 dereg_ioat(pbdev); 759 if (reg_ioat(env, pbdev, fib)) { 760 cc = ZPCI_PCI_LS_ERR; 761 } 762 break; 763 case ZPCI_MOD_FC_RESET_ERROR: 764 pbdev->error_state = false; 765 pbdev->lgstg_blocked = false; 766 break; 767 case ZPCI_MOD_FC_RESET_BLOCK: 768 pbdev->lgstg_blocked = false; 769 break; 770 case ZPCI_MOD_FC_SET_MEASURE: 771 pbdev->fmb_addr = ldq_p(&fib.fmb_addr); 772 break; 773 default: 774 program_interrupt(&cpu->env, PGM_OPERAND, 6); 775 cc = ZPCI_PCI_LS_ERR; 776 } 777 778 setcc(cpu, cc); 779 return 0; 780 } 781 782 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar) 783 { 784 CPUS390XState *env = &cpu->env; 785 uint32_t fh; 786 ZpciFib fib; 787 S390PCIBusDevice *pbdev; 788 uint32_t data; 789 uint64_t cc = ZPCI_PCI_LS_OK; 790 791 if (env->psw.mask & PSW_MASK_PSTATE) { 792 program_interrupt(env, PGM_PRIVILEGED, 6); 793 return 0; 794 } 795 796 fh = env->regs[r1] >> 32; 797 798 if (fiba & 0x7) { 799 program_interrupt(env, PGM_SPECIFICATION, 6); 800 return 0; 801 } 802 803 pbdev = s390_pci_find_dev_by_fh(fh); 804 if (!pbdev) { 805 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE); 806 return 0; 807 } 808 809 memset(&fib, 0, sizeof(fib)); 810 stq_p(&fib.pba, pbdev->pba); 811 stq_p(&fib.pal, pbdev->pal); 812 stq_p(&fib.iota, pbdev->g_iota); 813 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr); 814 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr); 815 stq_p(&fib.fmb_addr, pbdev->fmb_addr); 816 817 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) | 818 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) | 819 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset; 820 stl_p(&fib.data, data); 821 822 if (pbdev->fh & FH_ENABLED) { 823 fib.fc |= 0x80; 824 } 825 826 if (pbdev->error_state) { 827 fib.fc |= 0x40; 828 } 829 830 if (pbdev->lgstg_blocked) { 831 fib.fc |= 0x20; 832 } 833 834 if (pbdev->g_iota) { 835 fib.fc |= 0x10; 836 } 837 838 if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) { 839 return 0; 840 } 841 842 setcc(cpu, cc); 843 return 0; 844 } 845