xref: /openbmc/qemu/hw/s390x/s390-pci-inst.c (revision 30b6852c)
1 /*
2  * s390 PCI instructions
3  *
4  * Copyright 2014 IBM Corp.
5  * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6  *            Hong Bo Li <lihbbj@cn.ibm.com>
7  *            Yi Min Zhao <zyimin@cn.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or (at
10  * your option) any later version. See the COPYING file in the top-level
11  * directory.
12  */
13 
14 #include "qemu/osdep.h"
15 #include "exec/memop.h"
16 #include "exec/memory-internal.h"
17 #include "qemu/error-report.h"
18 #include "sysemu/hw_accel.h"
19 #include "hw/s390x/s390-pci-inst.h"
20 #include "hw/s390x/s390-pci-bus.h"
21 #include "hw/s390x/tod.h"
22 
23 #ifndef DEBUG_S390PCI_INST
24 #define DEBUG_S390PCI_INST  0
25 #endif
26 
27 #define DPRINTF(fmt, ...)                                          \
28     do {                                                           \
29         if (DEBUG_S390PCI_INST) {                                  \
30             fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); \
31         }                                                          \
32     } while (0)
33 
34 static inline void inc_dma_avail(S390PCIIOMMU *iommu)
35 {
36     if (iommu->dma_limit) {
37         iommu->dma_limit->avail++;
38     }
39 }
40 
41 static inline void dec_dma_avail(S390PCIIOMMU *iommu)
42 {
43     if (iommu->dma_limit) {
44         iommu->dma_limit->avail--;
45     }
46 }
47 
48 static void s390_set_status_code(CPUS390XState *env,
49                                  uint8_t r, uint64_t status_code)
50 {
51     env->regs[r] &= ~0xff000000ULL;
52     env->regs[r] |= (status_code & 0xff) << 24;
53 }
54 
55 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
56 {
57     S390PCIBusDevice *pbdev = NULL;
58     S390pciState *s = s390_get_phb();
59     uint32_t res_code, initial_l2, g_l2;
60     int rc, i;
61     uint64_t resume_token;
62 
63     rc = 0;
64     if (lduw_p(&rrb->request.hdr.len) != 32) {
65         res_code = CLP_RC_LEN;
66         rc = -EINVAL;
67         goto out;
68     }
69 
70     if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
71         res_code = CLP_RC_FMT;
72         rc = -EINVAL;
73         goto out;
74     }
75 
76     if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
77         ldq_p(&rrb->request.reserved1) != 0) {
78         res_code = CLP_RC_RESNOT0;
79         rc = -EINVAL;
80         goto out;
81     }
82 
83     resume_token = ldq_p(&rrb->request.resume_token);
84 
85     if (resume_token) {
86         pbdev = s390_pci_find_dev_by_idx(s, resume_token);
87         if (!pbdev) {
88             res_code = CLP_RC_LISTPCI_BADRT;
89             rc = -EINVAL;
90             goto out;
91         }
92     } else {
93         pbdev = s390_pci_find_next_avail_dev(s, NULL);
94     }
95 
96     if (lduw_p(&rrb->response.hdr.len) < 48) {
97         res_code = CLP_RC_8K;
98         rc = -EINVAL;
99         goto out;
100     }
101 
102     initial_l2 = lduw_p(&rrb->response.hdr.len);
103     if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
104         != 0) {
105         res_code = CLP_RC_LEN;
106         rc = -EINVAL;
107         *cc = 3;
108         goto out;
109     }
110 
111     stl_p(&rrb->response.fmt, 0);
112     stq_p(&rrb->response.reserved1, 0);
113     stl_p(&rrb->response.mdd, FH_MASK_SHM);
114     stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
115     rrb->response.flags = UID_CHECKING_ENABLED;
116     rrb->response.entry_size = sizeof(ClpFhListEntry);
117 
118     i = 0;
119     g_l2 = LIST_PCI_HDR_LEN;
120     while (g_l2 < initial_l2 && pbdev) {
121         stw_p(&rrb->response.fh_list[i].device_id,
122             pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
123         stw_p(&rrb->response.fh_list[i].vendor_id,
124             pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
125         /* Ignore RESERVED devices. */
126         stl_p(&rrb->response.fh_list[i].config,
127             pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31);
128         stl_p(&rrb->response.fh_list[i].fid, pbdev->fid);
129         stl_p(&rrb->response.fh_list[i].fh, pbdev->fh);
130 
131         g_l2 += sizeof(ClpFhListEntry);
132         /* Add endian check for DPRINTF? */
133         DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
134                 g_l2,
135                 lduw_p(&rrb->response.fh_list[i].vendor_id),
136                 lduw_p(&rrb->response.fh_list[i].device_id),
137                 ldl_p(&rrb->response.fh_list[i].fid),
138                 ldl_p(&rrb->response.fh_list[i].fh));
139         pbdev = s390_pci_find_next_avail_dev(s, pbdev);
140         i++;
141     }
142 
143     if (!pbdev) {
144         resume_token = 0;
145     } else {
146         resume_token = pbdev->fh & FH_MASK_INDEX;
147     }
148     stq_p(&rrb->response.resume_token, resume_token);
149     stw_p(&rrb->response.hdr.len, g_l2);
150     stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
151 out:
152     if (rc) {
153         DPRINTF("list pci failed rc 0x%x\n", rc);
154         stw_p(&rrb->response.hdr.rsp, res_code);
155     }
156     return rc;
157 }
158 
159 int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
160 {
161     ClpReqHdr *reqh;
162     ClpRspHdr *resh;
163     S390PCIBusDevice *pbdev;
164     uint32_t req_len;
165     uint32_t res_len;
166     uint8_t buffer[4096 * 2];
167     uint8_t cc = 0;
168     CPUS390XState *env = &cpu->env;
169     S390pciState *s = s390_get_phb();
170     int i;
171 
172     if (env->psw.mask & PSW_MASK_PSTATE) {
173         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
174         return 0;
175     }
176 
177     if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
178         s390_cpu_virt_mem_handle_exc(cpu, ra);
179         return 0;
180     }
181     reqh = (ClpReqHdr *)buffer;
182     req_len = lduw_p(&reqh->len);
183     if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
184         s390_program_interrupt(env, PGM_OPERAND, ra);
185         return 0;
186     }
187 
188     if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
189                                req_len + sizeof(*resh))) {
190         s390_cpu_virt_mem_handle_exc(cpu, ra);
191         return 0;
192     }
193     resh = (ClpRspHdr *)(buffer + req_len);
194     res_len = lduw_p(&resh->len);
195     if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
196         s390_program_interrupt(env, PGM_OPERAND, ra);
197         return 0;
198     }
199     if ((req_len + res_len) > 8192) {
200         s390_program_interrupt(env, PGM_OPERAND, ra);
201         return 0;
202     }
203 
204     if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
205                                req_len + res_len)) {
206         s390_cpu_virt_mem_handle_exc(cpu, ra);
207         return 0;
208     }
209 
210     if (req_len != 32) {
211         stw_p(&resh->rsp, CLP_RC_LEN);
212         goto out;
213     }
214 
215     switch (lduw_p(&reqh->cmd)) {
216     case CLP_LIST_PCI: {
217         ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
218         list_pci(rrb, &cc);
219         break;
220     }
221     case CLP_SET_PCI_FN: {
222         ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
223         ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
224 
225         pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqsetpci->fh));
226         if (!pbdev) {
227                 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
228                 goto out;
229         }
230 
231         switch (reqsetpci->oc) {
232         case CLP_SET_ENABLE_PCI_FN:
233             switch (reqsetpci->ndas) {
234             case 0:
235                 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS);
236                 goto out;
237             case 1:
238                 break;
239             default:
240                 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES);
241                 goto out;
242             }
243 
244             if (pbdev->fh & FH_MASK_ENABLE) {
245                 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
246                 goto out;
247             }
248 
249             pbdev->fh |= FH_MASK_ENABLE;
250             pbdev->state = ZPCI_FS_ENABLED;
251             stl_p(&ressetpci->fh, pbdev->fh);
252             stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
253             break;
254         case CLP_SET_DISABLE_PCI_FN:
255             if (!(pbdev->fh & FH_MASK_ENABLE)) {
256                 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
257                 goto out;
258             }
259             device_legacy_reset(DEVICE(pbdev));
260             pbdev->fh &= ~FH_MASK_ENABLE;
261             pbdev->state = ZPCI_FS_DISABLED;
262             stl_p(&ressetpci->fh, pbdev->fh);
263             stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
264             break;
265         default:
266             DPRINTF("unknown set pci command\n");
267             stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
268             break;
269         }
270         break;
271     }
272     case CLP_QUERY_PCI_FN: {
273         ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
274         ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
275 
276         pbdev = s390_pci_find_dev_by_fh(s, ldl_p(&reqquery->fh));
277         if (!pbdev) {
278             DPRINTF("query pci no pci dev\n");
279             stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
280             goto out;
281         }
282 
283         stq_p(&resquery->sdma, pbdev->zpci_fn.sdma);
284         stq_p(&resquery->edma, pbdev->zpci_fn.edma);
285         stw_p(&resquery->pchid, pbdev->zpci_fn.pchid);
286         stw_p(&resquery->vfn, pbdev->zpci_fn.vfn);
287         resquery->flags = pbdev->zpci_fn.flags;
288         resquery->pfgid = pbdev->zpci_fn.pfgid;
289         resquery->pft = pbdev->zpci_fn.pft;
290         resquery->fmbl = pbdev->zpci_fn.fmbl;
291         stl_p(&resquery->fid, pbdev->zpci_fn.fid);
292         stl_p(&resquery->uid, pbdev->zpci_fn.uid);
293         memcpy(resquery->pfip, pbdev->zpci_fn.pfip, CLP_PFIP_NR_SEGMENTS);
294         memcpy(resquery->util_str, pbdev->zpci_fn.util_str, CLP_UTIL_STR_LEN);
295 
296         for (i = 0; i < PCI_BAR_COUNT; i++) {
297             uint32_t data = pci_get_long(pbdev->pdev->config +
298                 PCI_BASE_ADDRESS_0 + (i * 4));
299 
300             stl_p(&resquery->bar[i], data);
301             resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
302                                     ctz64(pbdev->pdev->io_regions[i].size) : 0;
303             DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
304                     ldl_p(&resquery->bar[i]),
305                     pbdev->pdev->io_regions[i].size,
306                     resquery->bar_size[i]);
307         }
308 
309         stw_p(&resquery->hdr.rsp, CLP_RC_OK);
310         break;
311     }
312     case CLP_QUERY_PCI_FNGRP: {
313         ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
314 
315         ClpReqQueryPciGrp *reqgrp = (ClpReqQueryPciGrp *)reqh;
316         S390PCIGroup *group;
317 
318         group = s390_group_find(reqgrp->g);
319         if (!group) {
320             /* We do not allow access to unknown groups */
321             /* The group must have been obtained with a vfio device */
322             stw_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID);
323             goto out;
324         }
325         resgrp->fr = group->zpci_group.fr;
326         stq_p(&resgrp->dasm, group->zpci_group.dasm);
327         stq_p(&resgrp->msia, group->zpci_group.msia);
328         stw_p(&resgrp->mui, group->zpci_group.mui);
329         stw_p(&resgrp->i, group->zpci_group.i);
330         stw_p(&resgrp->maxstbl, group->zpci_group.maxstbl);
331         resgrp->version = group->zpci_group.version;
332         stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
333         break;
334     }
335     default:
336         DPRINTF("unknown clp command\n");
337         stw_p(&resh->rsp, CLP_RC_CMD);
338         break;
339     }
340 
341 out:
342     if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
343                                 req_len + res_len)) {
344         s390_cpu_virt_mem_handle_exc(cpu, ra);
345         return 0;
346     }
347     setcc(cpu, cc);
348     return 0;
349 }
350 
351 /**
352  * Swap data contained in s390x big endian registers to little endian
353  * PCI bars.
354  *
355  * @ptr: a pointer to a uint64_t data field
356  * @len: the length of the valid data, must be 1,2,4 or 8
357  */
358 static int zpci_endian_swap(uint64_t *ptr, uint8_t len)
359 {
360     uint64_t data = *ptr;
361 
362     switch (len) {
363     case 1:
364         break;
365     case 2:
366         data = bswap16(data);
367         break;
368     case 4:
369         data = bswap32(data);
370         break;
371     case 8:
372         data = bswap64(data);
373         break;
374     default:
375         return -EINVAL;
376     }
377     *ptr = data;
378     return 0;
379 }
380 
381 static MemoryRegion *s390_get_subregion(MemoryRegion *mr, uint64_t offset,
382                                         uint8_t len)
383 {
384     MemoryRegion *subregion;
385     uint64_t subregion_size;
386 
387     QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
388         subregion_size = int128_get64(subregion->size);
389         if ((offset >= subregion->addr) &&
390             (offset + len) <= (subregion->addr + subregion_size)) {
391             mr = subregion;
392             break;
393         }
394     }
395     return mr;
396 }
397 
398 static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
399                                  uint64_t offset, uint64_t *data, uint8_t len)
400 {
401     MemoryRegion *mr;
402 
403     mr = pbdev->pdev->io_regions[pcias].memory;
404     mr = s390_get_subregion(mr, offset, len);
405     offset -= mr->addr;
406     return memory_region_dispatch_read(mr, offset, data,
407                                        size_memop(len) | MO_BE,
408                                        MEMTXATTRS_UNSPECIFIED);
409 }
410 
411 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
412 {
413     CPUS390XState *env = &cpu->env;
414     S390PCIBusDevice *pbdev;
415     uint64_t offset;
416     uint64_t data;
417     MemTxResult result;
418     uint8_t len;
419     uint32_t fh;
420     uint8_t pcias;
421 
422     if (env->psw.mask & PSW_MASK_PSTATE) {
423         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
424         return 0;
425     }
426 
427     if (r2 & 0x1) {
428         s390_program_interrupt(env, PGM_SPECIFICATION, ra);
429         return 0;
430     }
431 
432     fh = env->regs[r2] >> 32;
433     pcias = (env->regs[r2] >> 16) & 0xf;
434     len = env->regs[r2] & 0xf;
435     offset = env->regs[r2 + 1];
436 
437     if (!(fh & FH_MASK_ENABLE)) {
438         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
439         return 0;
440     }
441 
442     pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
443     if (!pbdev) {
444         DPRINTF("pcilg no pci dev\n");
445         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
446         return 0;
447     }
448 
449     switch (pbdev->state) {
450     case ZPCI_FS_PERMANENT_ERROR:
451     case ZPCI_FS_ERROR:
452         setcc(cpu, ZPCI_PCI_LS_ERR);
453         s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
454         return 0;
455     default:
456         break;
457     }
458 
459     switch (pcias) {
460     case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
461         if (!len || (len > (8 - (offset & 0x7)))) {
462             s390_program_interrupt(env, PGM_OPERAND, ra);
463             return 0;
464         }
465         result = zpci_read_bar(pbdev, pcias, offset, &data, len);
466         if (result != MEMTX_OK) {
467             s390_program_interrupt(env, PGM_OPERAND, ra);
468             return 0;
469         }
470         break;
471     case ZPCI_CONFIG_BAR:
472         if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
473             s390_program_interrupt(env, PGM_OPERAND, ra);
474             return 0;
475         }
476         data =  pci_host_config_read_common(
477                    pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
478 
479         if (zpci_endian_swap(&data, len)) {
480             s390_program_interrupt(env, PGM_OPERAND, ra);
481             return 0;
482         }
483         break;
484     default:
485         DPRINTF("pcilg invalid space\n");
486         setcc(cpu, ZPCI_PCI_LS_ERR);
487         s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
488         return 0;
489     }
490 
491     pbdev->fmb.counter[ZPCI_FMB_CNT_LD]++;
492 
493     env->regs[r1] = data;
494     setcc(cpu, ZPCI_PCI_LS_OK);
495     return 0;
496 }
497 
498 static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
499                                   uint64_t offset, uint64_t data, uint8_t len)
500 {
501     MemoryRegion *mr;
502 
503     mr = pbdev->pdev->io_regions[pcias].memory;
504     mr = s390_get_subregion(mr, offset, len);
505     offset -= mr->addr;
506     return memory_region_dispatch_write(mr, offset, data,
507                                         size_memop(len) | MO_BE,
508                                         MEMTXATTRS_UNSPECIFIED);
509 }
510 
511 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
512 {
513     CPUS390XState *env = &cpu->env;
514     uint64_t offset, data;
515     S390PCIBusDevice *pbdev;
516     MemTxResult result;
517     uint8_t len;
518     uint32_t fh;
519     uint8_t pcias;
520 
521     if (env->psw.mask & PSW_MASK_PSTATE) {
522         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
523         return 0;
524     }
525 
526     if (r2 & 0x1) {
527         s390_program_interrupt(env, PGM_SPECIFICATION, ra);
528         return 0;
529     }
530 
531     fh = env->regs[r2] >> 32;
532     pcias = (env->regs[r2] >> 16) & 0xf;
533     len = env->regs[r2] & 0xf;
534     offset = env->regs[r2 + 1];
535     data = env->regs[r1];
536 
537     if (!(fh & FH_MASK_ENABLE)) {
538         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
539         return 0;
540     }
541 
542     pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
543     if (!pbdev) {
544         DPRINTF("pcistg no pci dev\n");
545         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
546         return 0;
547     }
548 
549     switch (pbdev->state) {
550     /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
551      * are already covered by the FH_MASK_ENABLE check above
552      */
553     case ZPCI_FS_PERMANENT_ERROR:
554     case ZPCI_FS_ERROR:
555         setcc(cpu, ZPCI_PCI_LS_ERR);
556         s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
557         return 0;
558     default:
559         break;
560     }
561 
562     switch (pcias) {
563         /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
564     case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
565         /* Check length:
566          * A length of 0 is invalid and length should not cross a double word
567          */
568         if (!len || (len > (8 - (offset & 0x7)))) {
569             s390_program_interrupt(env, PGM_OPERAND, ra);
570             return 0;
571         }
572 
573         result = zpci_write_bar(pbdev, pcias, offset, data, len);
574         if (result != MEMTX_OK) {
575             s390_program_interrupt(env, PGM_OPERAND, ra);
576             return 0;
577         }
578         break;
579     case ZPCI_CONFIG_BAR:
580         /* ZPCI uses the pseudo BAR number 15 as configuration space */
581         /* possible access lengths are 1,2,4 and must not cross a word */
582         if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
583             s390_program_interrupt(env, PGM_OPERAND, ra);
584             return 0;
585         }
586         /* len = 1,2,4 so we do not need to test */
587         zpci_endian_swap(&data, len);
588         pci_host_config_write_common(pbdev->pdev, offset,
589                                      pci_config_size(pbdev->pdev),
590                                      data, len);
591         break;
592     default:
593         DPRINTF("pcistg invalid space\n");
594         setcc(cpu, ZPCI_PCI_LS_ERR);
595         s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
596         return 0;
597     }
598 
599     pbdev->fmb.counter[ZPCI_FMB_CNT_ST]++;
600 
601     setcc(cpu, ZPCI_PCI_LS_OK);
602     return 0;
603 }
604 
605 static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu,
606                                       S390IOTLBEntry *entry)
607 {
608     S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova);
609     IOMMUTLBEvent event = {
610         .type = entry->perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP,
611         .entry = {
612             .target_as = &address_space_memory,
613             .iova = entry->iova,
614             .translated_addr = entry->translated_addr,
615             .perm = entry->perm,
616             .addr_mask = ~TARGET_PAGE_MASK,
617         },
618     };
619 
620     if (event.type == IOMMU_NOTIFIER_UNMAP) {
621         if (!cache) {
622             goto out;
623         }
624         g_hash_table_remove(iommu->iotlb, &entry->iova);
625         inc_dma_avail(iommu);
626     } else {
627         if (cache) {
628             if (cache->perm == entry->perm &&
629                 cache->translated_addr == entry->translated_addr) {
630                 goto out;
631             }
632 
633             event.type = IOMMU_NOTIFIER_UNMAP;
634             event.entry.perm = IOMMU_NONE;
635             memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
636             event.type = IOMMU_NOTIFIER_MAP;
637             event.entry.perm = entry->perm;
638         }
639 
640         cache = g_new(S390IOTLBEntry, 1);
641         cache->iova = entry->iova;
642         cache->translated_addr = entry->translated_addr;
643         cache->len = TARGET_PAGE_SIZE;
644         cache->perm = entry->perm;
645         g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
646         dec_dma_avail(iommu);
647     }
648 
649     memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
650 
651 out:
652     return iommu->dma_limit ? iommu->dma_limit->avail : 1;
653 }
654 
655 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
656 {
657     CPUS390XState *env = &cpu->env;
658     uint32_t fh;
659     uint16_t error = 0;
660     S390PCIBusDevice *pbdev;
661     S390PCIIOMMU *iommu;
662     S390IOTLBEntry entry;
663     hwaddr start, end;
664     uint32_t dma_avail;
665 
666     if (env->psw.mask & PSW_MASK_PSTATE) {
667         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
668         return 0;
669     }
670 
671     if (r2 & 0x1) {
672         s390_program_interrupt(env, PGM_SPECIFICATION, ra);
673         return 0;
674     }
675 
676     fh = env->regs[r1] >> 32;
677     start = env->regs[r2];
678     end = start + env->regs[r2 + 1];
679 
680     pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
681     if (!pbdev) {
682         DPRINTF("rpcit no pci dev\n");
683         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
684         return 0;
685     }
686 
687     switch (pbdev->state) {
688     case ZPCI_FS_RESERVED:
689     case ZPCI_FS_STANDBY:
690     case ZPCI_FS_DISABLED:
691     case ZPCI_FS_PERMANENT_ERROR:
692         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
693         return 0;
694     case ZPCI_FS_ERROR:
695         setcc(cpu, ZPCI_PCI_LS_ERR);
696         s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER);
697         return 0;
698     default:
699         break;
700     }
701 
702     iommu = pbdev->iommu;
703     if (iommu->dma_limit) {
704         dma_avail = iommu->dma_limit->avail;
705     } else {
706         dma_avail = 1;
707     }
708     if (!iommu->g_iota) {
709         error = ERR_EVENT_INVALAS;
710         goto err;
711     }
712 
713     if (end < iommu->pba || start > iommu->pal) {
714         error = ERR_EVENT_OORANGE;
715         goto err;
716     }
717 
718     while (start < end) {
719         error = s390_guest_io_table_walk(iommu->g_iota, start, &entry);
720         if (error) {
721             break;
722         }
723 
724         start += entry.len;
725         while (entry.iova < start && entry.iova < end &&
726                (dma_avail > 0 || entry.perm == IOMMU_NONE)) {
727             dma_avail = s390_pci_update_iotlb(iommu, &entry);
728             entry.iova += TARGET_PAGE_SIZE;
729             entry.translated_addr += TARGET_PAGE_SIZE;
730         }
731     }
732 err:
733     if (error) {
734         pbdev->state = ZPCI_FS_ERROR;
735         setcc(cpu, ZPCI_PCI_LS_ERR);
736         s390_set_status_code(env, r1, ZPCI_PCI_ST_FUNC_IN_ERR);
737         s390_pci_generate_error_event(error, pbdev->fh, pbdev->fid, start, 0);
738     } else {
739         pbdev->fmb.counter[ZPCI_FMB_CNT_RPCIT]++;
740         if (dma_avail > 0) {
741             setcc(cpu, ZPCI_PCI_LS_OK);
742         } else {
743             /* vfio DMA mappings are exhausted, trigger a RPCIT */
744             setcc(cpu, ZPCI_PCI_LS_ERR);
745             s390_set_status_code(env, r1, ZPCI_RPCIT_ST_INSUFF_RES);
746         }
747     }
748     return 0;
749 }
750 
751 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
752                         uint8_t ar, uintptr_t ra)
753 {
754     CPUS390XState *env = &cpu->env;
755     S390PCIBusDevice *pbdev;
756     MemoryRegion *mr;
757     MemTxResult result;
758     uint64_t offset;
759     int i;
760     uint32_t fh;
761     uint8_t pcias;
762     uint16_t len;
763     uint8_t buffer[128];
764 
765     if (env->psw.mask & PSW_MASK_PSTATE) {
766         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
767         return 0;
768     }
769 
770     fh = env->regs[r1] >> 32;
771     pcias = (env->regs[r1] >> 16) & 0xf;
772     len = env->regs[r1] & 0x1fff;
773     offset = env->regs[r3];
774 
775     if (!(fh & FH_MASK_ENABLE)) {
776         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
777         return 0;
778     }
779 
780     pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
781     if (!pbdev) {
782         DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
783         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
784         return 0;
785     }
786 
787     switch (pbdev->state) {
788     case ZPCI_FS_PERMANENT_ERROR:
789     case ZPCI_FS_ERROR:
790         setcc(cpu, ZPCI_PCI_LS_ERR);
791         s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
792         return 0;
793     default:
794         break;
795     }
796 
797     if (pcias > ZPCI_IO_BAR_MAX) {
798         DPRINTF("pcistb invalid space\n");
799         setcc(cpu, ZPCI_PCI_LS_ERR);
800         s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
801         return 0;
802     }
803 
804     /* Verify the address, offset and length */
805     /* offset must be a multiple of 8 */
806     if (offset % 8) {
807         goto specification_error;
808     }
809     /* Length must be greater than 8, a multiple of 8 */
810     /* and not greater than maxstbl */
811     if ((len <= 8) || (len % 8) ||
812         (len > pbdev->pci_group->zpci_group.maxstbl)) {
813         goto specification_error;
814     }
815     /* Do not cross a 4K-byte boundary */
816     if (((offset & 0xfff) + len) > 0x1000) {
817         goto specification_error;
818     }
819     /* Guest address must be double word aligned */
820     if (gaddr & 0x07UL) {
821         goto specification_error;
822     }
823 
824     mr = pbdev->pdev->io_regions[pcias].memory;
825     mr = s390_get_subregion(mr, offset, len);
826     offset -= mr->addr;
827 
828     for (i = 0; i < len; i += 8) {
829         if (!memory_region_access_valid(mr, offset + i, 8, true,
830                                         MEMTXATTRS_UNSPECIFIED)) {
831             s390_program_interrupt(env, PGM_OPERAND, ra);
832             return 0;
833         }
834     }
835 
836     if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
837         s390_cpu_virt_mem_handle_exc(cpu, ra);
838         return 0;
839     }
840 
841     for (i = 0; i < len / 8; i++) {
842         result = memory_region_dispatch_write(mr, offset + i * 8,
843                                               ldq_p(buffer + i * 8),
844                                               MO_64, MEMTXATTRS_UNSPECIFIED);
845         if (result != MEMTX_OK) {
846             s390_program_interrupt(env, PGM_OPERAND, ra);
847             return 0;
848         }
849     }
850 
851     pbdev->fmb.counter[ZPCI_FMB_CNT_STB]++;
852 
853     setcc(cpu, ZPCI_PCI_LS_OK);
854     return 0;
855 
856 specification_error:
857     s390_program_interrupt(env, PGM_SPECIFICATION, ra);
858     return 0;
859 }
860 
861 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
862 {
863     int ret, len;
864     uint8_t isc = FIB_DATA_ISC(ldl_p(&fib.data));
865 
866     pbdev->routes.adapter.adapter_id = css_get_adapter_id(
867                                        CSS_IO_ADAPTER_PCI, isc);
868     pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t));
869     len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long);
870     pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len);
871 
872     ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
873     if (ret) {
874         goto out;
875     }
876 
877     ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator);
878     if (ret) {
879         goto out;
880     }
881 
882     pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
883     pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
884     pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
885     pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
886     pbdev->isc = isc;
887     pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
888     pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
889 
890     DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
891     return 0;
892 out:
893     release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
894     release_indicator(&pbdev->routes.adapter, pbdev->indicator);
895     pbdev->summary_ind = NULL;
896     pbdev->indicator = NULL;
897     return ret;
898 }
899 
900 int pci_dereg_irqs(S390PCIBusDevice *pbdev)
901 {
902     release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
903     release_indicator(&pbdev->routes.adapter, pbdev->indicator);
904 
905     pbdev->summary_ind = NULL;
906     pbdev->indicator = NULL;
907     pbdev->routes.adapter.summary_addr = 0;
908     pbdev->routes.adapter.summary_offset = 0;
909     pbdev->routes.adapter.ind_addr = 0;
910     pbdev->routes.adapter.ind_offset = 0;
911     pbdev->isc = 0;
912     pbdev->noi = 0;
913     pbdev->sum = 0;
914 
915     DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
916     return 0;
917 }
918 
919 static int reg_ioat(CPUS390XState *env, S390PCIIOMMU *iommu, ZpciFib fib,
920                     uintptr_t ra)
921 {
922     uint64_t pba = ldq_p(&fib.pba);
923     uint64_t pal = ldq_p(&fib.pal);
924     uint64_t g_iota = ldq_p(&fib.iota);
925     uint8_t dt = (g_iota >> 2) & 0x7;
926     uint8_t t = (g_iota >> 11) & 0x1;
927 
928     pba &= ~0xfff;
929     pal |= 0xfff;
930     if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
931         s390_program_interrupt(env, PGM_OPERAND, ra);
932         return -EINVAL;
933     }
934 
935     /* currently we only support designation type 1 with translation */
936     if (!(dt == ZPCI_IOTA_RTTO && t)) {
937         error_report("unsupported ioat dt %d t %d", dt, t);
938         s390_program_interrupt(env, PGM_OPERAND, ra);
939         return -EINVAL;
940     }
941 
942     iommu->pba = pba;
943     iommu->pal = pal;
944     iommu->g_iota = g_iota;
945 
946     s390_pci_iommu_enable(iommu);
947 
948     return 0;
949 }
950 
951 void pci_dereg_ioat(S390PCIIOMMU *iommu)
952 {
953     s390_pci_iommu_disable(iommu);
954     iommu->pba = 0;
955     iommu->pal = 0;
956     iommu->g_iota = 0;
957 }
958 
959 void fmb_timer_free(S390PCIBusDevice *pbdev)
960 {
961     if (pbdev->fmb_timer) {
962         timer_free(pbdev->fmb_timer);
963         pbdev->fmb_timer = NULL;
964     }
965     pbdev->fmb_addr = 0;
966     memset(&pbdev->fmb, 0, sizeof(ZpciFmb));
967 }
968 
969 static int fmb_do_update(S390PCIBusDevice *pbdev, int offset, uint64_t val,
970                          int len)
971 {
972     MemTxResult ret;
973     uint64_t dst = pbdev->fmb_addr + offset;
974 
975     switch (len) {
976     case 8:
977         address_space_stq_be(&address_space_memory, dst, val,
978                              MEMTXATTRS_UNSPECIFIED,
979                              &ret);
980         break;
981     case 4:
982         address_space_stl_be(&address_space_memory, dst, val,
983                              MEMTXATTRS_UNSPECIFIED,
984                              &ret);
985         break;
986     case 2:
987         address_space_stw_be(&address_space_memory, dst, val,
988                              MEMTXATTRS_UNSPECIFIED,
989                              &ret);
990         break;
991     case 1:
992         address_space_stb(&address_space_memory, dst, val,
993                           MEMTXATTRS_UNSPECIFIED,
994                           &ret);
995         break;
996     default:
997         ret = MEMTX_ERROR;
998         break;
999     }
1000     if (ret != MEMTX_OK) {
1001         s390_pci_generate_error_event(ERR_EVENT_FMBA, pbdev->fh, pbdev->fid,
1002                                       pbdev->fmb_addr, 0);
1003         fmb_timer_free(pbdev);
1004     }
1005 
1006     return ret;
1007 }
1008 
1009 static void fmb_update(void *opaque)
1010 {
1011     S390PCIBusDevice *pbdev = opaque;
1012     int64_t t = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1013     int i;
1014 
1015     /* Update U bit */
1016     pbdev->fmb.last_update *= 2;
1017     pbdev->fmb.last_update |= UPDATE_U_BIT;
1018     if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1019                       pbdev->fmb.last_update,
1020                       sizeof(pbdev->fmb.last_update))) {
1021         return;
1022     }
1023 
1024     /* Update FMB sample count */
1025     if (fmb_do_update(pbdev, offsetof(ZpciFmb, sample),
1026                       pbdev->fmb.sample++,
1027                       sizeof(pbdev->fmb.sample))) {
1028         return;
1029     }
1030 
1031     /* Update FMB counters */
1032     for (i = 0; i < ZPCI_FMB_CNT_MAX; i++) {
1033         if (fmb_do_update(pbdev, offsetof(ZpciFmb, counter[i]),
1034                           pbdev->fmb.counter[i],
1035                           sizeof(pbdev->fmb.counter[0]))) {
1036             return;
1037         }
1038     }
1039 
1040     /* Clear U bit and update the time */
1041     pbdev->fmb.last_update = time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
1042     pbdev->fmb.last_update *= 2;
1043     if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1044                       pbdev->fmb.last_update,
1045                       sizeof(pbdev->fmb.last_update))) {
1046         return;
1047     }
1048     timer_mod(pbdev->fmb_timer, t + DEFAULT_MUI);
1049 }
1050 
1051 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1052                         uintptr_t ra)
1053 {
1054     CPUS390XState *env = &cpu->env;
1055     uint8_t oc, dmaas;
1056     uint32_t fh;
1057     ZpciFib fib;
1058     S390PCIBusDevice *pbdev;
1059     uint64_t cc = ZPCI_PCI_LS_OK;
1060 
1061     if (env->psw.mask & PSW_MASK_PSTATE) {
1062         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1063         return 0;
1064     }
1065 
1066     oc = env->regs[r1] & 0xff;
1067     dmaas = (env->regs[r1] >> 16) & 0xff;
1068     fh = env->regs[r1] >> 32;
1069 
1070     if (fiba & 0x7) {
1071         s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1072         return 0;
1073     }
1074 
1075     pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
1076     if (!pbdev) {
1077         DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
1078         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1079         return 0;
1080     }
1081 
1082     switch (pbdev->state) {
1083     case ZPCI_FS_RESERVED:
1084     case ZPCI_FS_STANDBY:
1085     case ZPCI_FS_DISABLED:
1086     case ZPCI_FS_PERMANENT_ERROR:
1087         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1088         return 0;
1089     default:
1090         break;
1091     }
1092 
1093     if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1094         s390_cpu_virt_mem_handle_exc(cpu, ra);
1095         return 0;
1096     }
1097 
1098     if (fib.fmt != 0) {
1099         s390_program_interrupt(env, PGM_OPERAND, ra);
1100         return 0;
1101     }
1102 
1103     switch (oc) {
1104     case ZPCI_MOD_FC_REG_INT:
1105         if (pbdev->summary_ind) {
1106             cc = ZPCI_PCI_LS_ERR;
1107             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1108         } else if (reg_irqs(env, pbdev, fib)) {
1109             cc = ZPCI_PCI_LS_ERR;
1110             s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL);
1111         }
1112         break;
1113     case ZPCI_MOD_FC_DEREG_INT:
1114         if (!pbdev->summary_ind) {
1115             cc = ZPCI_PCI_LS_ERR;
1116             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1117         } else {
1118             pci_dereg_irqs(pbdev);
1119         }
1120         break;
1121     case ZPCI_MOD_FC_REG_IOAT:
1122         if (dmaas != 0) {
1123             cc = ZPCI_PCI_LS_ERR;
1124             s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1125         } else if (pbdev->iommu->enabled) {
1126             cc = ZPCI_PCI_LS_ERR;
1127             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1128         } else if (reg_ioat(env, pbdev->iommu, fib, ra)) {
1129             cc = ZPCI_PCI_LS_ERR;
1130             s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1131         }
1132         break;
1133     case ZPCI_MOD_FC_DEREG_IOAT:
1134         if (dmaas != 0) {
1135             cc = ZPCI_PCI_LS_ERR;
1136             s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1137         } else if (!pbdev->iommu->enabled) {
1138             cc = ZPCI_PCI_LS_ERR;
1139             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1140         } else {
1141             pci_dereg_ioat(pbdev->iommu);
1142         }
1143         break;
1144     case ZPCI_MOD_FC_REREG_IOAT:
1145         if (dmaas != 0) {
1146             cc = ZPCI_PCI_LS_ERR;
1147             s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1148         } else if (!pbdev->iommu->enabled) {
1149             cc = ZPCI_PCI_LS_ERR;
1150             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1151         } else {
1152             pci_dereg_ioat(pbdev->iommu);
1153             if (reg_ioat(env, pbdev->iommu, fib, ra)) {
1154                 cc = ZPCI_PCI_LS_ERR;
1155                 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1156             }
1157         }
1158         break;
1159     case ZPCI_MOD_FC_RESET_ERROR:
1160         switch (pbdev->state) {
1161         case ZPCI_FS_BLOCKED:
1162         case ZPCI_FS_ERROR:
1163             pbdev->state = ZPCI_FS_ENABLED;
1164             break;
1165         default:
1166             cc = ZPCI_PCI_LS_ERR;
1167             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1168         }
1169         break;
1170     case ZPCI_MOD_FC_RESET_BLOCK:
1171         switch (pbdev->state) {
1172         case ZPCI_FS_ERROR:
1173             pbdev->state = ZPCI_FS_BLOCKED;
1174             break;
1175         default:
1176             cc = ZPCI_PCI_LS_ERR;
1177             s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1178         }
1179         break;
1180     case ZPCI_MOD_FC_SET_MEASURE: {
1181         uint64_t fmb_addr = ldq_p(&fib.fmb_addr);
1182 
1183         if (fmb_addr & FMBK_MASK) {
1184             cc = ZPCI_PCI_LS_ERR;
1185             s390_pci_generate_error_event(ERR_EVENT_FMBPRO, pbdev->fh,
1186                                           pbdev->fid, fmb_addr, 0);
1187             fmb_timer_free(pbdev);
1188             break;
1189         }
1190 
1191         if (!fmb_addr) {
1192             /* Stop updating FMB. */
1193             fmb_timer_free(pbdev);
1194             break;
1195         }
1196 
1197         if (!pbdev->fmb_timer) {
1198             pbdev->fmb_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1199                                             fmb_update, pbdev);
1200         } else if (timer_pending(pbdev->fmb_timer)) {
1201             /* Remove pending timer to update FMB address. */
1202             timer_del(pbdev->fmb_timer);
1203         }
1204         pbdev->fmb_addr = fmb_addr;
1205         timer_mod(pbdev->fmb_timer,
1206                   qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + DEFAULT_MUI);
1207         break;
1208     }
1209     default:
1210         s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
1211         cc = ZPCI_PCI_LS_ERR;
1212     }
1213 
1214     setcc(cpu, cc);
1215     return 0;
1216 }
1217 
1218 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1219                          uintptr_t ra)
1220 {
1221     CPUS390XState *env = &cpu->env;
1222     uint8_t dmaas;
1223     uint32_t fh;
1224     ZpciFib fib;
1225     S390PCIBusDevice *pbdev;
1226     uint32_t data;
1227     uint64_t cc = ZPCI_PCI_LS_OK;
1228 
1229     if (env->psw.mask & PSW_MASK_PSTATE) {
1230         s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1231         return 0;
1232     }
1233 
1234     fh = env->regs[r1] >> 32;
1235     dmaas = (env->regs[r1] >> 16) & 0xff;
1236 
1237     if (dmaas) {
1238         setcc(cpu, ZPCI_PCI_LS_ERR);
1239         s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS);
1240         return 0;
1241     }
1242 
1243     if (fiba & 0x7) {
1244         s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1245         return 0;
1246     }
1247 
1248     pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), fh & FH_MASK_INDEX);
1249     if (!pbdev) {
1250         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1251         return 0;
1252     }
1253 
1254     memset(&fib, 0, sizeof(fib));
1255 
1256     switch (pbdev->state) {
1257     case ZPCI_FS_RESERVED:
1258     case ZPCI_FS_STANDBY:
1259         setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1260         return 0;
1261     case ZPCI_FS_DISABLED:
1262         if (fh & FH_MASK_ENABLE) {
1263             setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1264             return 0;
1265         }
1266         goto out;
1267     /* BLOCKED bit is set to one coincident with the setting of ERROR bit.
1268      * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */
1269     case ZPCI_FS_ERROR:
1270         fib.fc |= 0x20;
1271         /* fallthrough */
1272     case ZPCI_FS_BLOCKED:
1273         fib.fc |= 0x40;
1274         /* fallthrough */
1275     case ZPCI_FS_ENABLED:
1276         fib.fc |= 0x80;
1277         if (pbdev->iommu->enabled) {
1278             fib.fc |= 0x10;
1279         }
1280         if (!(fh & FH_MASK_ENABLE)) {
1281             env->regs[r1] |= 1ULL << 63;
1282         }
1283         break;
1284     case ZPCI_FS_PERMANENT_ERROR:
1285         setcc(cpu, ZPCI_PCI_LS_ERR);
1286         s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR);
1287         return 0;
1288     }
1289 
1290     stq_p(&fib.pba, pbdev->iommu->pba);
1291     stq_p(&fib.pal, pbdev->iommu->pal);
1292     stq_p(&fib.iota, pbdev->iommu->g_iota);
1293     stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
1294     stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
1295     stq_p(&fib.fmb_addr, pbdev->fmb_addr);
1296 
1297     data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
1298            ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
1299            ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
1300     stl_p(&fib.data, data);
1301 
1302 out:
1303     if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1304         s390_cpu_virt_mem_handle_exc(cpu, ra);
1305         return 0;
1306     }
1307 
1308     setcc(cpu, cc);
1309     return 0;
1310 }
1311