1 /*
2 * s390 PCI instructions
3 *
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
11 * directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "exec/memop.h"
16 #include "exec/target_page.h"
17 #include "system/memory.h"
18 #include "qemu/error-report.h"
19 #include "qemu/bswap.h"
20 #include "system/hw_accel.h"
21 #include "hw/boards.h"
22 #include "hw/pci/pci_device.h"
23 #include "hw/s390x/s390-pci-inst.h"
24 #include "hw/s390x/s390-pci-bus.h"
25 #include "hw/s390x/s390-pci-kvm.h"
26 #include "hw/s390x/s390-pci-vfio.h"
27 #include "hw/s390x/tod.h"
28
29 #include "trace.h"
30
inc_dma_avail(S390PCIIOMMU * iommu)31 static inline void inc_dma_avail(S390PCIIOMMU *iommu)
32 {
33 if (iommu->dma_limit) {
34 iommu->dma_limit->avail++;
35 }
36 }
37
dec_dma_avail(S390PCIIOMMU * iommu)38 static inline void dec_dma_avail(S390PCIIOMMU *iommu)
39 {
40 if (iommu->dma_limit) {
41 iommu->dma_limit->avail--;
42 }
43 }
44
s390_set_status_code(CPUS390XState * env,uint8_t r,uint64_t status_code)45 static void s390_set_status_code(CPUS390XState *env,
46 uint8_t r, uint64_t status_code)
47 {
48 env->regs[r] &= ~0xff000000ULL;
49 env->regs[r] |= (status_code & 0xff) << 24;
50 }
51
list_pci(ClpReqRspListPci * rrb,uint8_t * cc)52 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
53 {
54 S390PCIBusDevice *pbdev = NULL;
55 S390pciState *s = s390_get_phb();
56 uint32_t res_code, initial_l2, g_l2;
57 int rc, i;
58 uint64_t resume_token;
59
60 rc = 0;
61 if (lduw_be_p(&rrb->request.hdr.len) != 32) {
62 res_code = CLP_RC_LEN;
63 rc = -EINVAL;
64 goto out;
65 }
66
67 if ((ldl_be_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
68 res_code = CLP_RC_FMT;
69 rc = -EINVAL;
70 goto out;
71 }
72
73 if ((ldl_be_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
74 ldq_be_p(&rrb->request.reserved1) != 0) {
75 res_code = CLP_RC_RESNOT0;
76 rc = -EINVAL;
77 goto out;
78 }
79
80 resume_token = ldq_be_p(&rrb->request.resume_token);
81
82 if (resume_token) {
83 pbdev = s390_pci_find_dev_by_idx(s, resume_token);
84 if (!pbdev) {
85 res_code = CLP_RC_LISTPCI_BADRT;
86 rc = -EINVAL;
87 goto out;
88 }
89 } else {
90 pbdev = s390_pci_find_next_avail_dev(s, NULL);
91 }
92
93 if (lduw_be_p(&rrb->response.hdr.len) < 48) {
94 res_code = CLP_RC_8K;
95 rc = -EINVAL;
96 goto out;
97 }
98
99 initial_l2 = lduw_be_p(&rrb->response.hdr.len);
100 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
101 != 0) {
102 res_code = CLP_RC_LEN;
103 rc = -EINVAL;
104 *cc = 3;
105 goto out;
106 }
107
108 stl_be_p(&rrb->response.fmt, 0);
109 stq_be_p(&rrb->response.reserved1, 0);
110 stl_be_p(&rrb->response.mdd, FH_MASK_SHM);
111 stw_be_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
112 rrb->response.flags = UID_CHECKING_ENABLED;
113 rrb->response.entry_size = sizeof(ClpFhListEntry);
114
115 i = 0;
116 g_l2 = LIST_PCI_HDR_LEN;
117 while (g_l2 < initial_l2 && pbdev) {
118 stw_be_p(&rrb->response.fh_list[i].device_id,
119 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
120 stw_be_p(&rrb->response.fh_list[i].vendor_id,
121 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
122 /* Ignore RESERVED devices. */
123 stl_be_p(&rrb->response.fh_list[i].config,
124 pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31);
125 stl_be_p(&rrb->response.fh_list[i].fid, pbdev->fid);
126 stl_be_p(&rrb->response.fh_list[i].fh, pbdev->fh);
127
128 g_l2 += sizeof(ClpFhListEntry);
129 /* Add endian check for DPRINTF? */
130 trace_s390_pci_list_entry(g_l2,
131 lduw_be_p(&rrb->response.fh_list[i].vendor_id),
132 lduw_be_p(&rrb->response.fh_list[i].device_id),
133 ldl_be_p(&rrb->response.fh_list[i].fid),
134 ldl_be_p(&rrb->response.fh_list[i].fh));
135 pbdev = s390_pci_find_next_avail_dev(s, pbdev);
136 i++;
137 }
138
139 if (!pbdev) {
140 resume_token = 0;
141 } else {
142 resume_token = pbdev->fh & FH_MASK_INDEX;
143 }
144 stq_be_p(&rrb->response.resume_token, resume_token);
145 stw_be_p(&rrb->response.hdr.len, g_l2);
146 stw_be_p(&rrb->response.hdr.rsp, CLP_RC_OK);
147 out:
148 if (rc) {
149 trace_s390_pci_list(rc);
150 stw_be_p(&rrb->response.hdr.rsp, res_code);
151 }
152 return rc;
153 }
154
clp_service_call(S390CPU * cpu,uint8_t r2,uintptr_t ra)155 int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
156 {
157 ClpReqHdr *reqh;
158 ClpRspHdr *resh;
159 S390PCIBusDevice *pbdev;
160 uint32_t req_len;
161 uint32_t res_len;
162 uint8_t buffer[4096 * 2];
163 uint8_t cc = 0;
164 CPUS390XState *env = &cpu->env;
165 S390pciState *s = s390_get_phb();
166 int i;
167
168 if (env->psw.mask & PSW_MASK_PSTATE) {
169 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
170 return 0;
171 }
172
173 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
174 s390_cpu_virt_mem_handle_exc(cpu, ra);
175 return 0;
176 }
177 reqh = (ClpReqHdr *)buffer;
178 req_len = lduw_be_p(&reqh->len);
179 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
180 s390_program_interrupt(env, PGM_OPERAND, ra);
181 return 0;
182 }
183
184 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
185 req_len + sizeof(*resh))) {
186 s390_cpu_virt_mem_handle_exc(cpu, ra);
187 return 0;
188 }
189 resh = (ClpRspHdr *)(buffer + req_len);
190 res_len = lduw_be_p(&resh->len);
191 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
192 s390_program_interrupt(env, PGM_OPERAND, ra);
193 return 0;
194 }
195 if ((req_len + res_len) > 8192) {
196 s390_program_interrupt(env, PGM_OPERAND, ra);
197 return 0;
198 }
199
200 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
201 req_len + res_len)) {
202 s390_cpu_virt_mem_handle_exc(cpu, ra);
203 return 0;
204 }
205
206 if (req_len != 32) {
207 stw_be_p(&resh->rsp, CLP_RC_LEN);
208 goto out;
209 }
210
211 switch (lduw_be_p(&reqh->cmd)) {
212 case CLP_LIST_PCI: {
213 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
214 list_pci(rrb, &cc);
215 break;
216 }
217 case CLP_SET_PCI_FN: {
218 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
219 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
220
221 pbdev = s390_pci_find_dev_by_fh(s, ldl_be_p(&reqsetpci->fh));
222 if (!pbdev) {
223 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
224 goto out;
225 }
226
227 switch (reqsetpci->oc) {
228 case CLP_SET_ENABLE_PCI_FN:
229 switch (reqsetpci->ndas) {
230 case 0:
231 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS);
232 goto out;
233 case 1:
234 break;
235 default:
236 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES);
237 goto out;
238 }
239
240 if (pbdev->fh & FH_MASK_ENABLE) {
241 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
242 goto out;
243 }
244
245 /*
246 * Take this opportunity to make sure we still have an accurate
247 * host fh. It's possible part of the handle changed while the
248 * device was disabled to the guest (e.g. vfio hot reset for
249 * ISM during plug)
250 */
251 if (pbdev->interp) {
252 /* Take this opportunity to make sure we are sync'd with host */
253 if (!s390_pci_get_host_fh(pbdev, &pbdev->fh) ||
254 !(pbdev->fh & FH_MASK_ENABLE)) {
255 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
256 goto out;
257 }
258 }
259 pbdev->fh |= FH_MASK_ENABLE;
260 pbdev->state = ZPCI_FS_ENABLED;
261 stl_be_p(&ressetpci->fh, pbdev->fh);
262 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_OK);
263 break;
264 case CLP_SET_DISABLE_PCI_FN:
265 if (!(pbdev->fh & FH_MASK_ENABLE)) {
266 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
267 goto out;
268 }
269 device_cold_reset(DEVICE(pbdev));
270 pbdev->fh &= ~FH_MASK_ENABLE;
271 pbdev->state = ZPCI_FS_DISABLED;
272 stl_be_p(&ressetpci->fh, pbdev->fh);
273 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_OK);
274 break;
275 default:
276 trace_s390_pci_unknown("set-pci", reqsetpci->oc);
277 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
278 break;
279 }
280 break;
281 }
282 case CLP_QUERY_PCI_FN: {
283 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
284 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
285
286 pbdev = s390_pci_find_dev_by_fh(s, ldl_be_p(&reqquery->fh));
287 if (!pbdev) {
288 trace_s390_pci_nodev("query", ldl_be_p(&reqquery->fh));
289 stw_be_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
290 goto out;
291 }
292
293 stq_be_p(&resquery->sdma, pbdev->zpci_fn.sdma);
294 stq_be_p(&resquery->edma, pbdev->zpci_fn.edma);
295 stw_be_p(&resquery->pchid, pbdev->zpci_fn.pchid);
296 stw_be_p(&resquery->vfn, pbdev->zpci_fn.vfn);
297 resquery->flags = pbdev->zpci_fn.flags;
298 resquery->pfgid = pbdev->zpci_fn.pfgid;
299 resquery->pft = pbdev->zpci_fn.pft;
300 resquery->fmbl = pbdev->zpci_fn.fmbl;
301 stl_be_p(&resquery->fid, pbdev->zpci_fn.fid);
302 stl_be_p(&resquery->uid, pbdev->zpci_fn.uid);
303 memcpy(resquery->pfip, pbdev->zpci_fn.pfip, CLP_PFIP_NR_SEGMENTS);
304 memcpy(resquery->util_str, pbdev->zpci_fn.util_str, CLP_UTIL_STR_LEN);
305
306 for (i = 0; i < PCI_BAR_COUNT; i++) {
307 uint32_t data = pci_get_long(pbdev->pdev->config +
308 PCI_BASE_ADDRESS_0 + (i * 4));
309
310 stl_be_p(&resquery->bar[i], data);
311 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
312 ctz64(pbdev->pdev->io_regions[i].size) : 0;
313 trace_s390_pci_bar(i,
314 ldl_be_p(&resquery->bar[i]),
315 pbdev->pdev->io_regions[i].size,
316 resquery->bar_size[i]);
317 }
318
319 stw_be_p(&resquery->hdr.rsp, CLP_RC_OK);
320 break;
321 }
322 case CLP_QUERY_PCI_FNGRP: {
323 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
324
325 ClpReqQueryPciGrp *reqgrp = (ClpReqQueryPciGrp *)reqh;
326 S390PCIGroup *group;
327
328 group = s390_group_find(reqgrp->g);
329 if (!group) {
330 /* We do not allow access to unknown groups */
331 /* The group must have been obtained with a vfio device */
332 stw_be_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID);
333 goto out;
334 }
335 resgrp->fr = group->zpci_group.fr;
336 stq_be_p(&resgrp->dasm, group->zpci_group.dasm);
337 stq_be_p(&resgrp->msia, group->zpci_group.msia);
338 stw_be_p(&resgrp->mui, group->zpci_group.mui);
339 stw_be_p(&resgrp->i, group->zpci_group.i);
340 stw_be_p(&resgrp->maxstbl, group->zpci_group.maxstbl);
341 resgrp->version = group->zpci_group.version;
342 resgrp->dtsm = group->zpci_group.dtsm;
343 stw_be_p(&resgrp->hdr.rsp, CLP_RC_OK);
344 break;
345 }
346 default:
347 trace_s390_pci_unknown("clp", lduw_be_p(&reqh->cmd));
348 stw_be_p(&resh->rsp, CLP_RC_CMD);
349 break;
350 }
351
352 out:
353 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
354 req_len + res_len)) {
355 s390_cpu_virt_mem_handle_exc(cpu, ra);
356 return 0;
357 }
358 setcc(cpu, cc);
359 return 0;
360 }
361
362 /**
363 * Swap data contained in s390x big endian registers to little endian
364 * PCI bars.
365 *
366 * @ptr: a pointer to a uint64_t data field
367 * @len: the length of the valid data, must be 1,2,4 or 8
368 */
zpci_endian_swap(uint64_t * ptr,uint8_t len)369 static int zpci_endian_swap(uint64_t *ptr, uint8_t len)
370 {
371 uint64_t data = *ptr;
372
373 switch (len) {
374 case 1:
375 break;
376 case 2:
377 data = bswap16(data);
378 break;
379 case 4:
380 data = bswap32(data);
381 break;
382 case 8:
383 data = bswap64(data);
384 break;
385 default:
386 return -EINVAL;
387 }
388 *ptr = data;
389 return 0;
390 }
391
s390_get_subregion(MemoryRegion * mr,uint64_t offset,uint8_t len)392 static MemoryRegion *s390_get_subregion(MemoryRegion *mr, uint64_t offset,
393 uint8_t len)
394 {
395 MemoryRegion *subregion;
396 uint64_t subregion_size;
397
398 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
399 subregion_size = int128_get64(subregion->size);
400 if ((offset >= subregion->addr) &&
401 (offset + len) <= (subregion->addr + subregion_size)) {
402 mr = subregion;
403 break;
404 }
405 }
406 return mr;
407 }
408
zpci_read_bar(S390PCIBusDevice * pbdev,uint8_t pcias,uint64_t offset,uint64_t * data,uint8_t len)409 static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
410 uint64_t offset, uint64_t *data, uint8_t len)
411 {
412 MemoryRegion *mr;
413
414 mr = pbdev->pdev->io_regions[pcias].memory;
415 mr = s390_get_subregion(mr, offset, len);
416 offset -= mr->addr;
417 return memory_region_dispatch_read(mr, offset, data,
418 size_memop(len) | MO_BE,
419 MEMTXATTRS_UNSPECIFIED);
420 }
421
pcilg_service_call(S390CPU * cpu,uint8_t r1,uint8_t r2,uintptr_t ra)422 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
423 {
424 CPUS390XState *env = &cpu->env;
425 S390PCIBusDevice *pbdev;
426 uint64_t offset;
427 uint64_t data;
428 MemTxResult result;
429 uint8_t len;
430 uint32_t fh;
431 uint8_t pcias;
432
433 if (env->psw.mask & PSW_MASK_PSTATE) {
434 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
435 return 0;
436 }
437
438 if (r2 & 0x1) {
439 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
440 return 0;
441 }
442
443 fh = env->regs[r2] >> 32;
444 pcias = (env->regs[r2] >> 16) & 0xf;
445 len = env->regs[r2] & 0xf;
446 offset = env->regs[r2 + 1];
447
448 if (!(fh & FH_MASK_ENABLE)) {
449 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
450 return 0;
451 }
452
453 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
454 if (!pbdev) {
455 trace_s390_pci_nodev("pcilg", fh);
456 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
457 return 0;
458 }
459
460 switch (pbdev->state) {
461 case ZPCI_FS_PERMANENT_ERROR:
462 case ZPCI_FS_ERROR:
463 setcc(cpu, ZPCI_PCI_LS_ERR);
464 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
465 return 0;
466 default:
467 break;
468 }
469
470 switch (pcias) {
471 case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
472 if (!len || (len > (8 - (offset & 0x7)))) {
473 s390_program_interrupt(env, PGM_OPERAND, ra);
474 return 0;
475 }
476 result = zpci_read_bar(pbdev, pcias, offset, &data, len);
477 if (result != MEMTX_OK) {
478 s390_program_interrupt(env, PGM_OPERAND, ra);
479 return 0;
480 }
481 break;
482 case ZPCI_CONFIG_BAR:
483 if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
484 s390_program_interrupt(env, PGM_OPERAND, ra);
485 return 0;
486 }
487 data = pci_host_config_read_common(
488 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
489
490 if (zpci_endian_swap(&data, len)) {
491 s390_program_interrupt(env, PGM_OPERAND, ra);
492 return 0;
493 }
494 break;
495 default:
496 trace_s390_pci_invalid("pcilg", fh);
497 setcc(cpu, ZPCI_PCI_LS_ERR);
498 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
499 return 0;
500 }
501
502 pbdev->fmb.counter[ZPCI_FMB_CNT_LD]++;
503
504 env->regs[r1] = data;
505 setcc(cpu, ZPCI_PCI_LS_OK);
506 return 0;
507 }
508
zpci_write_bar(S390PCIBusDevice * pbdev,uint8_t pcias,uint64_t offset,uint64_t data,uint8_t len)509 static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
510 uint64_t offset, uint64_t data, uint8_t len)
511 {
512 MemoryRegion *mr;
513
514 mr = pbdev->pdev->io_regions[pcias].memory;
515 mr = s390_get_subregion(mr, offset, len);
516 offset -= mr->addr;
517 return memory_region_dispatch_write(mr, offset, data,
518 size_memop(len) | MO_BE,
519 MEMTXATTRS_UNSPECIFIED);
520 }
521
pcistg_service_call(S390CPU * cpu,uint8_t r1,uint8_t r2,uintptr_t ra)522 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
523 {
524 CPUS390XState *env = &cpu->env;
525 uint64_t offset, data;
526 S390PCIBusDevice *pbdev;
527 MemTxResult result;
528 uint8_t len;
529 uint32_t fh;
530 uint8_t pcias;
531
532 if (env->psw.mask & PSW_MASK_PSTATE) {
533 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
534 return 0;
535 }
536
537 if (r2 & 0x1) {
538 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
539 return 0;
540 }
541
542 fh = env->regs[r2] >> 32;
543 pcias = (env->regs[r2] >> 16) & 0xf;
544 len = env->regs[r2] & 0xf;
545 offset = env->regs[r2 + 1];
546 data = env->regs[r1];
547
548 if (!(fh & FH_MASK_ENABLE)) {
549 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
550 return 0;
551 }
552
553 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
554 if (!pbdev) {
555 trace_s390_pci_nodev("pcistg", fh);
556 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
557 return 0;
558 }
559
560 switch (pbdev->state) {
561 /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
562 * are already covered by the FH_MASK_ENABLE check above
563 */
564 case ZPCI_FS_PERMANENT_ERROR:
565 case ZPCI_FS_ERROR:
566 setcc(cpu, ZPCI_PCI_LS_ERR);
567 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
568 return 0;
569 default:
570 break;
571 }
572
573 switch (pcias) {
574 /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
575 case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
576 /* Check length:
577 * A length of 0 is invalid and length should not cross a double word
578 */
579 if (!len || (len > (8 - (offset & 0x7)))) {
580 s390_program_interrupt(env, PGM_OPERAND, ra);
581 return 0;
582 }
583
584 result = zpci_write_bar(pbdev, pcias, offset, data, len);
585 if (result != MEMTX_OK) {
586 s390_program_interrupt(env, PGM_OPERAND, ra);
587 return 0;
588 }
589 break;
590 case ZPCI_CONFIG_BAR:
591 /* ZPCI uses the pseudo BAR number 15 as configuration space */
592 /* possible access lengths are 1,2,4 and must not cross a word */
593 if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
594 s390_program_interrupt(env, PGM_OPERAND, ra);
595 return 0;
596 }
597 /* len = 1,2,4 so we do not need to test */
598 zpci_endian_swap(&data, len);
599 pci_host_config_write_common(pbdev->pdev, offset,
600 pci_config_size(pbdev->pdev),
601 data, len);
602 break;
603 default:
604 trace_s390_pci_invalid("pcistg", fh);
605 setcc(cpu, ZPCI_PCI_LS_ERR);
606 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
607 return 0;
608 }
609
610 pbdev->fmb.counter[ZPCI_FMB_CNT_ST]++;
611
612 setcc(cpu, ZPCI_PCI_LS_OK);
613 return 0;
614 }
615
s390_pci_update_iotlb(S390PCIIOMMU * iommu,S390IOTLBEntry * entry)616 static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu,
617 S390IOTLBEntry *entry)
618 {
619 S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova);
620 IOMMUTLBEvent event = {
621 .type = entry->perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP,
622 .entry = {
623 .target_as = &address_space_memory,
624 .iova = entry->iova,
625 .translated_addr = entry->translated_addr,
626 .perm = entry->perm,
627 .addr_mask = ~TARGET_PAGE_MASK,
628 },
629 };
630
631 if (event.type == IOMMU_NOTIFIER_UNMAP) {
632 if (!cache) {
633 goto out;
634 }
635 g_hash_table_remove(iommu->iotlb, &entry->iova);
636 inc_dma_avail(iommu);
637 /* Don't notify the iommu yet, maybe we can bundle contiguous unmaps */
638 goto out;
639 } else {
640 if (cache) {
641 if (cache->perm == entry->perm &&
642 cache->translated_addr == entry->translated_addr) {
643 goto out;
644 }
645
646 event.type = IOMMU_NOTIFIER_UNMAP;
647 event.entry.perm = IOMMU_NONE;
648 memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
649 event.type = IOMMU_NOTIFIER_MAP;
650 event.entry.perm = entry->perm;
651 }
652
653 cache = g_new(S390IOTLBEntry, 1);
654 cache->iova = entry->iova;
655 cache->translated_addr = entry->translated_addr;
656 cache->len = TARGET_PAGE_SIZE;
657 cache->perm = entry->perm;
658 g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
659 dec_dma_avail(iommu);
660 }
661
662 /*
663 * All associated iotlb entries have already been cleared, trigger the
664 * unmaps.
665 */
666 memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
667
668 out:
669 return iommu->dma_limit ? iommu->dma_limit->avail : 1;
670 }
671
s390_pci_batch_unmap(S390PCIIOMMU * iommu,uint64_t iova,uint64_t len)672 static void s390_pci_batch_unmap(S390PCIIOMMU *iommu, uint64_t iova,
673 uint64_t len)
674 {
675 uint64_t remain = len, start = iova, end = start + len - 1, mask, size;
676 IOMMUTLBEvent event = {
677 .type = IOMMU_NOTIFIER_UNMAP,
678 .entry = {
679 .target_as = &address_space_memory,
680 .translated_addr = 0,
681 .perm = IOMMU_NONE,
682 },
683 };
684
685 while (remain >= TARGET_PAGE_SIZE) {
686 mask = dma_aligned_pow2_mask(start, end, 64);
687 size = mask + 1;
688 event.entry.iova = start;
689 event.entry.addr_mask = mask;
690 memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
691 start += size;
692 remain -= size;
693 }
694 }
695
rpcit_service_call(S390CPU * cpu,uint8_t r1,uint8_t r2,uintptr_t ra)696 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
697 {
698 CPUS390XState *env = &cpu->env;
699 uint64_t iova, coalesce = 0;
700 uint32_t fh;
701 uint16_t error = 0;
702 S390PCIBusDevice *pbdev;
703 S390PCIIOMMU *iommu;
704 S390IOTLBEntry entry;
705 hwaddr start, end, sstart;
706 uint32_t dma_avail;
707 bool again;
708
709 if (env->psw.mask & PSW_MASK_PSTATE) {
710 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
711 return 0;
712 }
713
714 if (r2 & 0x1) {
715 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
716 return 0;
717 }
718
719 fh = env->regs[r1] >> 32;
720 sstart = start = env->regs[r2];
721 end = start + env->regs[r2 + 1];
722
723 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
724 if (!pbdev) {
725 trace_s390_pci_nodev("rpcit", fh);
726 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
727 return 0;
728 }
729
730 switch (pbdev->state) {
731 case ZPCI_FS_RESERVED:
732 case ZPCI_FS_STANDBY:
733 case ZPCI_FS_DISABLED:
734 case ZPCI_FS_PERMANENT_ERROR:
735 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
736 return 0;
737 case ZPCI_FS_ERROR:
738 setcc(cpu, ZPCI_PCI_LS_ERR);
739 s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER);
740 return 0;
741 default:
742 break;
743 }
744
745 iommu = pbdev->iommu;
746 if (iommu->dma_limit) {
747 dma_avail = iommu->dma_limit->avail;
748 } else {
749 dma_avail = 1;
750 }
751 if (!iommu->g_iota) {
752 error = ERR_EVENT_INVALAS;
753 goto err;
754 }
755
756 if (end < iommu->pba || start > iommu->pal) {
757 error = ERR_EVENT_OORANGE;
758 goto err;
759 }
760
761 retry:
762 start = sstart;
763 again = false;
764 while (start < end) {
765 error = s390_guest_io_table_walk(iommu->g_iota, start, &entry);
766 if (error) {
767 break;
768 }
769
770 /*
771 * If this is an unmap of a PTE, let's try to coalesce multiple unmaps
772 * into as few notifier events as possible.
773 */
774 if (entry.perm == IOMMU_NONE && entry.len == TARGET_PAGE_SIZE) {
775 if (coalesce == 0) {
776 iova = entry.iova;
777 }
778 coalesce += entry.len;
779 } else if (coalesce > 0) {
780 /* Unleash the coalesced unmap before processing a new map */
781 s390_pci_batch_unmap(iommu, iova, coalesce);
782 coalesce = 0;
783 }
784
785 start += entry.len;
786 while (entry.iova < start && entry.iova < end) {
787 if (dma_avail > 0 || entry.perm == IOMMU_NONE) {
788 dma_avail = s390_pci_update_iotlb(iommu, &entry);
789 entry.iova += TARGET_PAGE_SIZE;
790 entry.translated_addr += TARGET_PAGE_SIZE;
791 } else {
792 /*
793 * We are unable to make a new mapping at this time, continue
794 * on and hopefully free up more space. Then attempt another
795 * pass.
796 */
797 again = true;
798 break;
799 }
800 }
801 }
802 if (coalesce) {
803 /* Unleash the coalesced unmap before finishing rpcit */
804 s390_pci_batch_unmap(iommu, iova, coalesce);
805 coalesce = 0;
806 }
807 if (again && dma_avail > 0)
808 goto retry;
809 err:
810 if (error) {
811 pbdev->state = ZPCI_FS_ERROR;
812 setcc(cpu, ZPCI_PCI_LS_ERR);
813 s390_set_status_code(env, r1, ZPCI_PCI_ST_FUNC_IN_ERR);
814 s390_pci_generate_error_event(error, pbdev->fh, pbdev->fid, start, 0);
815 } else {
816 pbdev->fmb.counter[ZPCI_FMB_CNT_RPCIT]++;
817 if (dma_avail > 0) {
818 setcc(cpu, ZPCI_PCI_LS_OK);
819 } else {
820 /* vfio DMA mappings are exhausted, trigger a RPCIT */
821 setcc(cpu, ZPCI_PCI_LS_ERR);
822 s390_set_status_code(env, r1, ZPCI_RPCIT_ST_INSUFF_RES);
823 }
824 }
825 return 0;
826 }
827
pcistb_service_call(S390CPU * cpu,uint8_t r1,uint8_t r3,uint64_t gaddr,uint8_t ar,uintptr_t ra)828 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
829 uint8_t ar, uintptr_t ra)
830 {
831 CPUS390XState *env = &cpu->env;
832 S390PCIBusDevice *pbdev;
833 MemoryRegion *mr;
834 MemTxResult result;
835 uint64_t offset;
836 int i;
837 uint32_t fh;
838 uint8_t pcias;
839 uint16_t len;
840 uint8_t buffer[128];
841
842 if (env->psw.mask & PSW_MASK_PSTATE) {
843 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
844 return 0;
845 }
846
847 fh = env->regs[r1] >> 32;
848 pcias = (env->regs[r1] >> 16) & 0xf;
849 len = env->regs[r1] & 0x1fff;
850 offset = env->regs[r3];
851
852 if (!(fh & FH_MASK_ENABLE)) {
853 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
854 return 0;
855 }
856
857 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
858 if (!pbdev) {
859 trace_s390_pci_nodev("pcistb", fh);
860 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
861 return 0;
862 }
863
864 switch (pbdev->state) {
865 case ZPCI_FS_PERMANENT_ERROR:
866 case ZPCI_FS_ERROR:
867 setcc(cpu, ZPCI_PCI_LS_ERR);
868 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
869 return 0;
870 default:
871 break;
872 }
873
874 if (pcias > ZPCI_IO_BAR_MAX) {
875 trace_s390_pci_invalid("pcistb", fh);
876 setcc(cpu, ZPCI_PCI_LS_ERR);
877 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
878 return 0;
879 }
880
881 /* Verify the address, offset and length */
882 /* offset must be a multiple of 8 */
883 if (offset % 8) {
884 goto specification_error;
885 }
886 /* Length must be greater than 8, a multiple of 8 */
887 /* and not greater than maxstbl */
888 if ((len <= 8) || (len % 8) ||
889 (len > pbdev->pci_group->zpci_group.maxstbl)) {
890 goto specification_error;
891 }
892 /* Do not cross a 4K-byte boundary */
893 if (((offset & 0xfff) + len) > 0x1000) {
894 goto specification_error;
895 }
896 /* Guest address must be double word aligned */
897 if (gaddr & 0x07UL) {
898 goto specification_error;
899 }
900
901 mr = pbdev->pdev->io_regions[pcias].memory;
902 mr = s390_get_subregion(mr, offset, len);
903 offset -= mr->addr;
904
905 for (i = 0; i < len; i += 8) {
906 if (!memory_region_access_valid(mr, offset + i, 8, true,
907 MEMTXATTRS_UNSPECIFIED)) {
908 s390_program_interrupt(env, PGM_OPERAND, ra);
909 return 0;
910 }
911 }
912
913 if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
914 s390_cpu_virt_mem_handle_exc(cpu, ra);
915 return 0;
916 }
917
918 for (i = 0; i < len / 8; i++) {
919 result = memory_region_dispatch_write(mr, offset + i * 8,
920 ldq_be_p(buffer + i * 8),
921 MO_64, MEMTXATTRS_UNSPECIFIED);
922 if (result != MEMTX_OK) {
923 s390_program_interrupt(env, PGM_OPERAND, ra);
924 return 0;
925 }
926 }
927
928 pbdev->fmb.counter[ZPCI_FMB_CNT_STB]++;
929
930 setcc(cpu, ZPCI_PCI_LS_OK);
931 return 0;
932
933 specification_error:
934 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
935 return 0;
936 }
937
reg_irqs(CPUS390XState * env,S390PCIBusDevice * pbdev,ZpciFib fib)938 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
939 {
940 int ret, len;
941 uint8_t isc = FIB_DATA_ISC(ldl_be_p(&fib.data));
942
943 pbdev->routes.adapter.adapter_id = css_get_adapter_id(
944 CSS_IO_ADAPTER_PCI, isc);
945 pbdev->summary_ind = get_indicator(ldq_be_p(&fib.aisb), sizeof(uint64_t));
946 len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_be_p(&fib.data))) * sizeof(unsigned long);
947 pbdev->indicator = get_indicator(ldq_be_p(&fib.aibv), len);
948
949 ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
950 if (ret) {
951 goto out;
952 }
953
954 ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator);
955 if (ret) {
956 goto out;
957 }
958
959 pbdev->routes.adapter.summary_addr = ldq_be_p(&fib.aisb);
960 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_be_p(&fib.data));
961 pbdev->routes.adapter.ind_addr = ldq_be_p(&fib.aibv);
962 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_be_p(&fib.data));
963 pbdev->isc = isc;
964 pbdev->noi = FIB_DATA_NOI(ldl_be_p(&fib.data));
965 pbdev->sum = FIB_DATA_SUM(ldl_be_p(&fib.data));
966
967 trace_s390_pci_irqs("register", pbdev->routes.adapter.adapter_id);
968 return 0;
969 out:
970 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
971 release_indicator(&pbdev->routes.adapter, pbdev->indicator);
972 pbdev->summary_ind = NULL;
973 pbdev->indicator = NULL;
974 return ret;
975 }
976
pci_dereg_irqs(S390PCIBusDevice * pbdev)977 int pci_dereg_irqs(S390PCIBusDevice *pbdev)
978 {
979 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
980 release_indicator(&pbdev->routes.adapter, pbdev->indicator);
981
982 pbdev->summary_ind = NULL;
983 pbdev->indicator = NULL;
984 pbdev->routes.adapter.summary_addr = 0;
985 pbdev->routes.adapter.summary_offset = 0;
986 pbdev->routes.adapter.ind_addr = 0;
987 pbdev->routes.adapter.ind_offset = 0;
988 pbdev->isc = 0;
989 pbdev->noi = 0;
990 pbdev->sum = 0;
991
992 trace_s390_pci_irqs("unregister", pbdev->routes.adapter.adapter_id);
993 return 0;
994 }
995
reg_ioat(CPUS390XState * env,S390PCIBusDevice * pbdev,ZpciFib fib,uintptr_t ra)996 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib,
997 uintptr_t ra)
998 {
999 S390PCIIOMMU *iommu = pbdev->iommu;
1000 uint64_t pba = ldq_be_p(&fib.pba);
1001 uint64_t pal = ldq_be_p(&fib.pal);
1002 uint64_t g_iota = ldq_be_p(&fib.iota);
1003 uint8_t dt = (g_iota >> 2) & 0x7;
1004 uint8_t t = (g_iota >> 11) & 0x1;
1005
1006 pba &= ~0xfff;
1007 pal |= 0xfff;
1008 if (pba > pal || pba < pbdev->zpci_fn.sdma || pal > pbdev->zpci_fn.edma) {
1009 s390_program_interrupt(env, PGM_OPERAND, ra);
1010 return -EINVAL;
1011 }
1012
1013 /* currently we only support designation type 1 with translation */
1014 if (t && dt != ZPCI_IOTA_RTTO) {
1015 error_report("unsupported ioat dt %d t %d", dt, t);
1016 s390_program_interrupt(env, PGM_OPERAND, ra);
1017 return -EINVAL;
1018 } else if (!t && !pbdev->rtr_avail) {
1019 error_report("relaxed translation not allowed");
1020 s390_program_interrupt(env, PGM_OPERAND, ra);
1021 return -EINVAL;
1022 }
1023
1024 iommu->pba = pba;
1025 iommu->pal = pal;
1026 iommu->g_iota = g_iota;
1027
1028 if (t) {
1029 s390_pci_iommu_enable(iommu);
1030 } else {
1031 s390_pci_iommu_direct_map_enable(iommu);
1032 }
1033
1034 return 0;
1035 }
1036
pci_dereg_ioat(S390PCIIOMMU * iommu)1037 void pci_dereg_ioat(S390PCIIOMMU *iommu)
1038 {
1039 s390_pci_iommu_disable(iommu);
1040 iommu->pba = 0;
1041 iommu->pal = 0;
1042 iommu->g_iota = 0;
1043 }
1044
fmb_timer_free(S390PCIBusDevice * pbdev)1045 void fmb_timer_free(S390PCIBusDevice *pbdev)
1046 {
1047 if (pbdev->fmb_timer) {
1048 timer_free(pbdev->fmb_timer);
1049 pbdev->fmb_timer = NULL;
1050 }
1051 pbdev->fmb_addr = 0;
1052 memset(&pbdev->fmb, 0, sizeof(ZpciFmb));
1053 }
1054
fmb_do_update(S390PCIBusDevice * pbdev,int offset,uint64_t val,int len)1055 static int fmb_do_update(S390PCIBusDevice *pbdev, int offset, uint64_t val,
1056 int len)
1057 {
1058 MemTxResult ret;
1059 uint64_t dst = pbdev->fmb_addr + offset;
1060
1061 switch (len) {
1062 case 8:
1063 address_space_stq_be(&address_space_memory, dst, val,
1064 MEMTXATTRS_UNSPECIFIED,
1065 &ret);
1066 break;
1067 case 4:
1068 address_space_stl_be(&address_space_memory, dst, val,
1069 MEMTXATTRS_UNSPECIFIED,
1070 &ret);
1071 break;
1072 case 2:
1073 address_space_stw_be(&address_space_memory, dst, val,
1074 MEMTXATTRS_UNSPECIFIED,
1075 &ret);
1076 break;
1077 case 1:
1078 address_space_stb(&address_space_memory, dst, val,
1079 MEMTXATTRS_UNSPECIFIED,
1080 &ret);
1081 break;
1082 default:
1083 ret = MEMTX_ERROR;
1084 break;
1085 }
1086 if (ret != MEMTX_OK) {
1087 s390_pci_generate_error_event(ERR_EVENT_FMBA, pbdev->fh, pbdev->fid,
1088 pbdev->fmb_addr, 0);
1089 fmb_timer_free(pbdev);
1090 }
1091
1092 return ret;
1093 }
1094
fmb_update(void * opaque)1095 static void fmb_update(void *opaque)
1096 {
1097 S390PCIBusDevice *pbdev = opaque;
1098 int64_t t = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1099 int i;
1100
1101 /* Update U bit */
1102 pbdev->fmb.last_update *= 2;
1103 pbdev->fmb.last_update |= UPDATE_U_BIT;
1104 if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1105 pbdev->fmb.last_update,
1106 sizeof(pbdev->fmb.last_update))) {
1107 return;
1108 }
1109
1110 /* Update FMB sample count */
1111 if (fmb_do_update(pbdev, offsetof(ZpciFmb, sample),
1112 pbdev->fmb.sample++,
1113 sizeof(pbdev->fmb.sample))) {
1114 return;
1115 }
1116
1117 /* Update FMB counters */
1118 for (i = 0; i < ZPCI_FMB_CNT_MAX; i++) {
1119 if (fmb_do_update(pbdev, offsetof(ZpciFmb, counter[i]),
1120 pbdev->fmb.counter[i],
1121 sizeof(pbdev->fmb.counter[0]))) {
1122 return;
1123 }
1124 }
1125
1126 /* Clear U bit and update the time */
1127 pbdev->fmb.last_update = time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
1128 pbdev->fmb.last_update *= 2;
1129 if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1130 pbdev->fmb.last_update,
1131 sizeof(pbdev->fmb.last_update))) {
1132 return;
1133 }
1134 timer_mod(pbdev->fmb_timer, t + pbdev->pci_group->zpci_group.mui);
1135 }
1136
mpcifc_reg_int_interp(S390PCIBusDevice * pbdev,ZpciFib * fib)1137 static int mpcifc_reg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib)
1138 {
1139 int rc;
1140
1141 rc = s390_pci_kvm_aif_enable(pbdev, fib, pbdev->forwarding_assist);
1142 if (rc) {
1143 trace_s390_pci_kvm_aif("enable");
1144 return rc;
1145 }
1146
1147 return 0;
1148 }
1149
mpcifc_dereg_int_interp(S390PCIBusDevice * pbdev,ZpciFib * fib)1150 static int mpcifc_dereg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib)
1151 {
1152 int rc;
1153
1154 rc = s390_pci_kvm_aif_disable(pbdev);
1155 if (rc) {
1156 trace_s390_pci_kvm_aif("disable");
1157 return rc;
1158 }
1159
1160 return 0;
1161 }
1162
mpcifc_service_call(S390CPU * cpu,uint8_t r1,uint64_t fiba,uint8_t ar,uintptr_t ra)1163 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1164 uintptr_t ra)
1165 {
1166 CPUS390XState *env = &cpu->env;
1167 uint8_t oc, dmaas;
1168 uint32_t fh;
1169 ZpciFib fib;
1170 S390PCIBusDevice *pbdev;
1171 uint64_t cc = ZPCI_PCI_LS_OK;
1172
1173 if (env->psw.mask & PSW_MASK_PSTATE) {
1174 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1175 return 0;
1176 }
1177
1178 oc = env->regs[r1] & 0xff;
1179 dmaas = (env->regs[r1] >> 16) & 0xff;
1180 fh = env->regs[r1] >> 32;
1181
1182 if (fiba & 0x7) {
1183 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1184 return 0;
1185 }
1186
1187 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
1188 if (!pbdev) {
1189 trace_s390_pci_nodev("mpcifc", fh);
1190 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1191 return 0;
1192 }
1193
1194 switch (pbdev->state) {
1195 case ZPCI_FS_RESERVED:
1196 case ZPCI_FS_STANDBY:
1197 case ZPCI_FS_DISABLED:
1198 case ZPCI_FS_PERMANENT_ERROR:
1199 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1200 return 0;
1201 default:
1202 break;
1203 }
1204
1205 if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1206 s390_cpu_virt_mem_handle_exc(cpu, ra);
1207 return 0;
1208 }
1209
1210 if (fib.fmt != 0) {
1211 s390_program_interrupt(env, PGM_OPERAND, ra);
1212 return 0;
1213 }
1214
1215 switch (oc) {
1216 case ZPCI_MOD_FC_REG_INT:
1217 if (pbdev->interp) {
1218 if (mpcifc_reg_int_interp(pbdev, &fib)) {
1219 cc = ZPCI_PCI_LS_ERR;
1220 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1221 }
1222 } else if (pbdev->summary_ind) {
1223 cc = ZPCI_PCI_LS_ERR;
1224 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1225 } else if (reg_irqs(env, pbdev, fib)) {
1226 cc = ZPCI_PCI_LS_ERR;
1227 s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL);
1228 }
1229 break;
1230 case ZPCI_MOD_FC_DEREG_INT:
1231 if (pbdev->interp) {
1232 if (mpcifc_dereg_int_interp(pbdev, &fib)) {
1233 cc = ZPCI_PCI_LS_ERR;
1234 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1235 }
1236 } else if (!pbdev->summary_ind) {
1237 cc = ZPCI_PCI_LS_ERR;
1238 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1239 } else {
1240 pci_dereg_irqs(pbdev);
1241 }
1242 break;
1243 case ZPCI_MOD_FC_REG_IOAT:
1244 if (dmaas != 0) {
1245 cc = ZPCI_PCI_LS_ERR;
1246 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1247 } else if (pbdev->iommu->enabled) {
1248 cc = ZPCI_PCI_LS_ERR;
1249 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1250 } else if (reg_ioat(env, pbdev, fib, ra)) {
1251 cc = ZPCI_PCI_LS_ERR;
1252 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1253 }
1254 break;
1255 case ZPCI_MOD_FC_DEREG_IOAT:
1256 if (dmaas != 0) {
1257 cc = ZPCI_PCI_LS_ERR;
1258 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1259 } else if (!pbdev->iommu->enabled) {
1260 cc = ZPCI_PCI_LS_ERR;
1261 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1262 } else {
1263 pci_dereg_ioat(pbdev->iommu);
1264 }
1265 break;
1266 case ZPCI_MOD_FC_REREG_IOAT:
1267 if (dmaas != 0) {
1268 cc = ZPCI_PCI_LS_ERR;
1269 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1270 } else if (!pbdev->iommu->enabled) {
1271 cc = ZPCI_PCI_LS_ERR;
1272 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1273 } else {
1274 pci_dereg_ioat(pbdev->iommu);
1275 if (reg_ioat(env, pbdev, fib, ra)) {
1276 cc = ZPCI_PCI_LS_ERR;
1277 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1278 }
1279 }
1280 break;
1281 case ZPCI_MOD_FC_RESET_ERROR:
1282 switch (pbdev->state) {
1283 case ZPCI_FS_BLOCKED:
1284 case ZPCI_FS_ERROR:
1285 pbdev->state = ZPCI_FS_ENABLED;
1286 break;
1287 default:
1288 cc = ZPCI_PCI_LS_ERR;
1289 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1290 }
1291 break;
1292 case ZPCI_MOD_FC_RESET_BLOCK:
1293 switch (pbdev->state) {
1294 case ZPCI_FS_ERROR:
1295 pbdev->state = ZPCI_FS_BLOCKED;
1296 break;
1297 default:
1298 cc = ZPCI_PCI_LS_ERR;
1299 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1300 }
1301 break;
1302 case ZPCI_MOD_FC_SET_MEASURE: {
1303 uint64_t fmb_addr = ldq_be_p(&fib.fmb_addr);
1304
1305 if (fmb_addr & FMBK_MASK) {
1306 cc = ZPCI_PCI_LS_ERR;
1307 s390_pci_generate_error_event(ERR_EVENT_FMBPRO, pbdev->fh,
1308 pbdev->fid, fmb_addr, 0);
1309 fmb_timer_free(pbdev);
1310 break;
1311 }
1312
1313 if (!fmb_addr) {
1314 /* Stop updating FMB. */
1315 fmb_timer_free(pbdev);
1316 break;
1317 }
1318
1319 if (!pbdev->fmb_timer) {
1320 pbdev->fmb_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1321 fmb_update, pbdev);
1322 } else if (timer_pending(pbdev->fmb_timer)) {
1323 /* Remove pending timer to update FMB address. */
1324 timer_del(pbdev->fmb_timer);
1325 }
1326 pbdev->fmb_addr = fmb_addr;
1327 timer_mod(pbdev->fmb_timer,
1328 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
1329 pbdev->pci_group->zpci_group.mui);
1330 break;
1331 }
1332 default:
1333 s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
1334 cc = ZPCI_PCI_LS_ERR;
1335 }
1336
1337 setcc(cpu, cc);
1338 return 0;
1339 }
1340
stpcifc_service_call(S390CPU * cpu,uint8_t r1,uint64_t fiba,uint8_t ar,uintptr_t ra)1341 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1342 uintptr_t ra)
1343 {
1344 CPUS390XState *env = &cpu->env;
1345 uint8_t dmaas;
1346 uint32_t fh;
1347 ZpciFib fib;
1348 S390PCIBusDevice *pbdev;
1349 uint32_t data;
1350 uint64_t cc = ZPCI_PCI_LS_OK;
1351
1352 if (env->psw.mask & PSW_MASK_PSTATE) {
1353 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1354 return 0;
1355 }
1356
1357 fh = env->regs[r1] >> 32;
1358 dmaas = (env->regs[r1] >> 16) & 0xff;
1359
1360 if (dmaas) {
1361 setcc(cpu, ZPCI_PCI_LS_ERR);
1362 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS);
1363 return 0;
1364 }
1365
1366 if (fiba & 0x7) {
1367 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1368 return 0;
1369 }
1370
1371 pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), fh & FH_MASK_INDEX);
1372 if (!pbdev) {
1373 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1374 return 0;
1375 }
1376
1377 memset(&fib, 0, sizeof(fib));
1378
1379 switch (pbdev->state) {
1380 case ZPCI_FS_RESERVED:
1381 case ZPCI_FS_STANDBY:
1382 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1383 return 0;
1384 case ZPCI_FS_DISABLED:
1385 if (fh & FH_MASK_ENABLE) {
1386 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1387 return 0;
1388 }
1389 goto out;
1390 /* BLOCKED bit is set to one coincident with the setting of ERROR bit.
1391 * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */
1392 case ZPCI_FS_ERROR:
1393 fib.fc |= 0x20;
1394 /* fallthrough */
1395 case ZPCI_FS_BLOCKED:
1396 fib.fc |= 0x40;
1397 /* fallthrough */
1398 case ZPCI_FS_ENABLED:
1399 fib.fc |= 0x80;
1400 if (pbdev->iommu->enabled) {
1401 fib.fc |= 0x10;
1402 }
1403 if (!(fh & FH_MASK_ENABLE)) {
1404 env->regs[r1] |= 1ULL << 63;
1405 }
1406 break;
1407 case ZPCI_FS_PERMANENT_ERROR:
1408 setcc(cpu, ZPCI_PCI_LS_ERR);
1409 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR);
1410 return 0;
1411 }
1412
1413 stq_be_p(&fib.pba, pbdev->iommu->pba);
1414 stq_be_p(&fib.pal, pbdev->iommu->pal);
1415 stq_be_p(&fib.iota, pbdev->iommu->g_iota);
1416 stq_be_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
1417 stq_be_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
1418 stq_be_p(&fib.fmb_addr, pbdev->fmb_addr);
1419
1420 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
1421 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
1422 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
1423 stl_be_p(&fib.data, data);
1424
1425 out:
1426 if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1427 s390_cpu_virt_mem_handle_exc(cpu, ra);
1428 return 0;
1429 }
1430
1431 setcc(cpu, cc);
1432 return 0;
1433 }
1434