1 /*
2 * s390 PCI instructions
3 *
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
11 * directory.
12 */
13
14 #include "qemu/osdep.h"
15 #include "exec/memop.h"
16 #include "exec/memory.h"
17 #include "qemu/error-report.h"
18 #include "system/hw_accel.h"
19 #include "hw/boards.h"
20 #include "hw/pci/pci_device.h"
21 #include "hw/s390x/s390-pci-inst.h"
22 #include "hw/s390x/s390-pci-bus.h"
23 #include "hw/s390x/s390-pci-kvm.h"
24 #include "hw/s390x/s390-pci-vfio.h"
25 #include "hw/s390x/tod.h"
26
27 #include "trace.h"
28
inc_dma_avail(S390PCIIOMMU * iommu)29 static inline void inc_dma_avail(S390PCIIOMMU *iommu)
30 {
31 if (iommu->dma_limit) {
32 iommu->dma_limit->avail++;
33 }
34 }
35
dec_dma_avail(S390PCIIOMMU * iommu)36 static inline void dec_dma_avail(S390PCIIOMMU *iommu)
37 {
38 if (iommu->dma_limit) {
39 iommu->dma_limit->avail--;
40 }
41 }
42
s390_set_status_code(CPUS390XState * env,uint8_t r,uint64_t status_code)43 static void s390_set_status_code(CPUS390XState *env,
44 uint8_t r, uint64_t status_code)
45 {
46 env->regs[r] &= ~0xff000000ULL;
47 env->regs[r] |= (status_code & 0xff) << 24;
48 }
49
list_pci(ClpReqRspListPci * rrb,uint8_t * cc)50 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
51 {
52 S390PCIBusDevice *pbdev = NULL;
53 S390pciState *s = s390_get_phb();
54 uint32_t res_code, initial_l2, g_l2;
55 int rc, i;
56 uint64_t resume_token;
57
58 rc = 0;
59 if (lduw_be_p(&rrb->request.hdr.len) != 32) {
60 res_code = CLP_RC_LEN;
61 rc = -EINVAL;
62 goto out;
63 }
64
65 if ((ldl_be_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
66 res_code = CLP_RC_FMT;
67 rc = -EINVAL;
68 goto out;
69 }
70
71 if ((ldl_be_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
72 ldq_be_p(&rrb->request.reserved1) != 0) {
73 res_code = CLP_RC_RESNOT0;
74 rc = -EINVAL;
75 goto out;
76 }
77
78 resume_token = ldq_be_p(&rrb->request.resume_token);
79
80 if (resume_token) {
81 pbdev = s390_pci_find_dev_by_idx(s, resume_token);
82 if (!pbdev) {
83 res_code = CLP_RC_LISTPCI_BADRT;
84 rc = -EINVAL;
85 goto out;
86 }
87 } else {
88 pbdev = s390_pci_find_next_avail_dev(s, NULL);
89 }
90
91 if (lduw_be_p(&rrb->response.hdr.len) < 48) {
92 res_code = CLP_RC_8K;
93 rc = -EINVAL;
94 goto out;
95 }
96
97 initial_l2 = lduw_be_p(&rrb->response.hdr.len);
98 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
99 != 0) {
100 res_code = CLP_RC_LEN;
101 rc = -EINVAL;
102 *cc = 3;
103 goto out;
104 }
105
106 stl_be_p(&rrb->response.fmt, 0);
107 stq_be_p(&rrb->response.reserved1, 0);
108 stl_be_p(&rrb->response.mdd, FH_MASK_SHM);
109 stw_be_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
110 rrb->response.flags = UID_CHECKING_ENABLED;
111 rrb->response.entry_size = sizeof(ClpFhListEntry);
112
113 i = 0;
114 g_l2 = LIST_PCI_HDR_LEN;
115 while (g_l2 < initial_l2 && pbdev) {
116 stw_be_p(&rrb->response.fh_list[i].device_id,
117 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
118 stw_be_p(&rrb->response.fh_list[i].vendor_id,
119 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
120 /* Ignore RESERVED devices. */
121 stl_be_p(&rrb->response.fh_list[i].config,
122 pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31);
123 stl_be_p(&rrb->response.fh_list[i].fid, pbdev->fid);
124 stl_be_p(&rrb->response.fh_list[i].fh, pbdev->fh);
125
126 g_l2 += sizeof(ClpFhListEntry);
127 /* Add endian check for DPRINTF? */
128 trace_s390_pci_list_entry(g_l2,
129 lduw_be_p(&rrb->response.fh_list[i].vendor_id),
130 lduw_be_p(&rrb->response.fh_list[i].device_id),
131 ldl_be_p(&rrb->response.fh_list[i].fid),
132 ldl_be_p(&rrb->response.fh_list[i].fh));
133 pbdev = s390_pci_find_next_avail_dev(s, pbdev);
134 i++;
135 }
136
137 if (!pbdev) {
138 resume_token = 0;
139 } else {
140 resume_token = pbdev->fh & FH_MASK_INDEX;
141 }
142 stq_be_p(&rrb->response.resume_token, resume_token);
143 stw_be_p(&rrb->response.hdr.len, g_l2);
144 stw_be_p(&rrb->response.hdr.rsp, CLP_RC_OK);
145 out:
146 if (rc) {
147 trace_s390_pci_list(rc);
148 stw_be_p(&rrb->response.hdr.rsp, res_code);
149 }
150 return rc;
151 }
152
clp_service_call(S390CPU * cpu,uint8_t r2,uintptr_t ra)153 int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
154 {
155 ClpReqHdr *reqh;
156 ClpRspHdr *resh;
157 S390PCIBusDevice *pbdev;
158 uint32_t req_len;
159 uint32_t res_len;
160 uint8_t buffer[4096 * 2];
161 uint8_t cc = 0;
162 CPUS390XState *env = &cpu->env;
163 S390pciState *s = s390_get_phb();
164 int i;
165
166 if (env->psw.mask & PSW_MASK_PSTATE) {
167 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
168 return 0;
169 }
170
171 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
172 s390_cpu_virt_mem_handle_exc(cpu, ra);
173 return 0;
174 }
175 reqh = (ClpReqHdr *)buffer;
176 req_len = lduw_be_p(&reqh->len);
177 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
178 s390_program_interrupt(env, PGM_OPERAND, ra);
179 return 0;
180 }
181
182 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
183 req_len + sizeof(*resh))) {
184 s390_cpu_virt_mem_handle_exc(cpu, ra);
185 return 0;
186 }
187 resh = (ClpRspHdr *)(buffer + req_len);
188 res_len = lduw_be_p(&resh->len);
189 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
190 s390_program_interrupt(env, PGM_OPERAND, ra);
191 return 0;
192 }
193 if ((req_len + res_len) > 8192) {
194 s390_program_interrupt(env, PGM_OPERAND, ra);
195 return 0;
196 }
197
198 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
199 req_len + res_len)) {
200 s390_cpu_virt_mem_handle_exc(cpu, ra);
201 return 0;
202 }
203
204 if (req_len != 32) {
205 stw_be_p(&resh->rsp, CLP_RC_LEN);
206 goto out;
207 }
208
209 switch (lduw_be_p(&reqh->cmd)) {
210 case CLP_LIST_PCI: {
211 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
212 list_pci(rrb, &cc);
213 break;
214 }
215 case CLP_SET_PCI_FN: {
216 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
217 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
218
219 pbdev = s390_pci_find_dev_by_fh(s, ldl_be_p(&reqsetpci->fh));
220 if (!pbdev) {
221 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
222 goto out;
223 }
224
225 switch (reqsetpci->oc) {
226 case CLP_SET_ENABLE_PCI_FN:
227 switch (reqsetpci->ndas) {
228 case 0:
229 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_DMAAS);
230 goto out;
231 case 1:
232 break;
233 default:
234 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_RES);
235 goto out;
236 }
237
238 if (pbdev->fh & FH_MASK_ENABLE) {
239 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
240 goto out;
241 }
242
243 /*
244 * Take this opportunity to make sure we still have an accurate
245 * host fh. It's possible part of the handle changed while the
246 * device was disabled to the guest (e.g. vfio hot reset for
247 * ISM during plug)
248 */
249 if (pbdev->interp) {
250 /* Take this opportunity to make sure we are sync'd with host */
251 if (!s390_pci_get_host_fh(pbdev, &pbdev->fh) ||
252 !(pbdev->fh & FH_MASK_ENABLE)) {
253 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
254 goto out;
255 }
256 }
257 pbdev->fh |= FH_MASK_ENABLE;
258 pbdev->state = ZPCI_FS_ENABLED;
259 stl_be_p(&ressetpci->fh, pbdev->fh);
260 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_OK);
261 break;
262 case CLP_SET_DISABLE_PCI_FN:
263 if (!(pbdev->fh & FH_MASK_ENABLE)) {
264 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
265 goto out;
266 }
267 device_cold_reset(DEVICE(pbdev));
268 pbdev->fh &= ~FH_MASK_ENABLE;
269 pbdev->state = ZPCI_FS_DISABLED;
270 stl_be_p(&ressetpci->fh, pbdev->fh);
271 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_OK);
272 break;
273 default:
274 trace_s390_pci_unknown("set-pci", reqsetpci->oc);
275 stw_be_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
276 break;
277 }
278 break;
279 }
280 case CLP_QUERY_PCI_FN: {
281 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
282 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
283
284 pbdev = s390_pci_find_dev_by_fh(s, ldl_be_p(&reqquery->fh));
285 if (!pbdev) {
286 trace_s390_pci_nodev("query", ldl_be_p(&reqquery->fh));
287 stw_be_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
288 goto out;
289 }
290
291 stq_be_p(&resquery->sdma, pbdev->zpci_fn.sdma);
292 stq_be_p(&resquery->edma, pbdev->zpci_fn.edma);
293 stw_be_p(&resquery->pchid, pbdev->zpci_fn.pchid);
294 stw_be_p(&resquery->vfn, pbdev->zpci_fn.vfn);
295 resquery->flags = pbdev->zpci_fn.flags;
296 resquery->pfgid = pbdev->zpci_fn.pfgid;
297 resquery->pft = pbdev->zpci_fn.pft;
298 resquery->fmbl = pbdev->zpci_fn.fmbl;
299 stl_be_p(&resquery->fid, pbdev->zpci_fn.fid);
300 stl_be_p(&resquery->uid, pbdev->zpci_fn.uid);
301 memcpy(resquery->pfip, pbdev->zpci_fn.pfip, CLP_PFIP_NR_SEGMENTS);
302 memcpy(resquery->util_str, pbdev->zpci_fn.util_str, CLP_UTIL_STR_LEN);
303
304 for (i = 0; i < PCI_BAR_COUNT; i++) {
305 uint32_t data = pci_get_long(pbdev->pdev->config +
306 PCI_BASE_ADDRESS_0 + (i * 4));
307
308 stl_be_p(&resquery->bar[i], data);
309 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
310 ctz64(pbdev->pdev->io_regions[i].size) : 0;
311 trace_s390_pci_bar(i,
312 ldl_be_p(&resquery->bar[i]),
313 pbdev->pdev->io_regions[i].size,
314 resquery->bar_size[i]);
315 }
316
317 stw_be_p(&resquery->hdr.rsp, CLP_RC_OK);
318 break;
319 }
320 case CLP_QUERY_PCI_FNGRP: {
321 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
322
323 ClpReqQueryPciGrp *reqgrp = (ClpReqQueryPciGrp *)reqh;
324 S390PCIGroup *group;
325
326 group = s390_group_find(reqgrp->g);
327 if (!group) {
328 /* We do not allow access to unknown groups */
329 /* The group must have been obtained with a vfio device */
330 stw_be_p(&resgrp->hdr.rsp, CLP_RC_QUERYPCIFG_PFGID);
331 goto out;
332 }
333 resgrp->fr = group->zpci_group.fr;
334 stq_be_p(&resgrp->dasm, group->zpci_group.dasm);
335 stq_be_p(&resgrp->msia, group->zpci_group.msia);
336 stw_be_p(&resgrp->mui, group->zpci_group.mui);
337 stw_be_p(&resgrp->i, group->zpci_group.i);
338 stw_be_p(&resgrp->maxstbl, group->zpci_group.maxstbl);
339 resgrp->version = group->zpci_group.version;
340 resgrp->dtsm = group->zpci_group.dtsm;
341 stw_be_p(&resgrp->hdr.rsp, CLP_RC_OK);
342 break;
343 }
344 default:
345 trace_s390_pci_unknown("clp", lduw_be_p(&reqh->cmd));
346 stw_be_p(&resh->rsp, CLP_RC_CMD);
347 break;
348 }
349
350 out:
351 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
352 req_len + res_len)) {
353 s390_cpu_virt_mem_handle_exc(cpu, ra);
354 return 0;
355 }
356 setcc(cpu, cc);
357 return 0;
358 }
359
360 /**
361 * Swap data contained in s390x big endian registers to little endian
362 * PCI bars.
363 *
364 * @ptr: a pointer to a uint64_t data field
365 * @len: the length of the valid data, must be 1,2,4 or 8
366 */
zpci_endian_swap(uint64_t * ptr,uint8_t len)367 static int zpci_endian_swap(uint64_t *ptr, uint8_t len)
368 {
369 uint64_t data = *ptr;
370
371 switch (len) {
372 case 1:
373 break;
374 case 2:
375 data = bswap16(data);
376 break;
377 case 4:
378 data = bswap32(data);
379 break;
380 case 8:
381 data = bswap64(data);
382 break;
383 default:
384 return -EINVAL;
385 }
386 *ptr = data;
387 return 0;
388 }
389
s390_get_subregion(MemoryRegion * mr,uint64_t offset,uint8_t len)390 static MemoryRegion *s390_get_subregion(MemoryRegion *mr, uint64_t offset,
391 uint8_t len)
392 {
393 MemoryRegion *subregion;
394 uint64_t subregion_size;
395
396 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
397 subregion_size = int128_get64(subregion->size);
398 if ((offset >= subregion->addr) &&
399 (offset + len) <= (subregion->addr + subregion_size)) {
400 mr = subregion;
401 break;
402 }
403 }
404 return mr;
405 }
406
zpci_read_bar(S390PCIBusDevice * pbdev,uint8_t pcias,uint64_t offset,uint64_t * data,uint8_t len)407 static MemTxResult zpci_read_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
408 uint64_t offset, uint64_t *data, uint8_t len)
409 {
410 MemoryRegion *mr;
411
412 mr = pbdev->pdev->io_regions[pcias].memory;
413 mr = s390_get_subregion(mr, offset, len);
414 offset -= mr->addr;
415 return memory_region_dispatch_read(mr, offset, data,
416 size_memop(len) | MO_BE,
417 MEMTXATTRS_UNSPECIFIED);
418 }
419
pcilg_service_call(S390CPU * cpu,uint8_t r1,uint8_t r2,uintptr_t ra)420 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
421 {
422 CPUS390XState *env = &cpu->env;
423 S390PCIBusDevice *pbdev;
424 uint64_t offset;
425 uint64_t data;
426 MemTxResult result;
427 uint8_t len;
428 uint32_t fh;
429 uint8_t pcias;
430
431 if (env->psw.mask & PSW_MASK_PSTATE) {
432 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
433 return 0;
434 }
435
436 if (r2 & 0x1) {
437 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
438 return 0;
439 }
440
441 fh = env->regs[r2] >> 32;
442 pcias = (env->regs[r2] >> 16) & 0xf;
443 len = env->regs[r2] & 0xf;
444 offset = env->regs[r2 + 1];
445
446 if (!(fh & FH_MASK_ENABLE)) {
447 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
448 return 0;
449 }
450
451 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
452 if (!pbdev) {
453 trace_s390_pci_nodev("pcilg", fh);
454 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
455 return 0;
456 }
457
458 switch (pbdev->state) {
459 case ZPCI_FS_PERMANENT_ERROR:
460 case ZPCI_FS_ERROR:
461 setcc(cpu, ZPCI_PCI_LS_ERR);
462 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
463 return 0;
464 default:
465 break;
466 }
467
468 switch (pcias) {
469 case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
470 if (!len || (len > (8 - (offset & 0x7)))) {
471 s390_program_interrupt(env, PGM_OPERAND, ra);
472 return 0;
473 }
474 result = zpci_read_bar(pbdev, pcias, offset, &data, len);
475 if (result != MEMTX_OK) {
476 s390_program_interrupt(env, PGM_OPERAND, ra);
477 return 0;
478 }
479 break;
480 case ZPCI_CONFIG_BAR:
481 if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
482 s390_program_interrupt(env, PGM_OPERAND, ra);
483 return 0;
484 }
485 data = pci_host_config_read_common(
486 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
487
488 if (zpci_endian_swap(&data, len)) {
489 s390_program_interrupt(env, PGM_OPERAND, ra);
490 return 0;
491 }
492 break;
493 default:
494 trace_s390_pci_invalid("pcilg", fh);
495 setcc(cpu, ZPCI_PCI_LS_ERR);
496 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
497 return 0;
498 }
499
500 pbdev->fmb.counter[ZPCI_FMB_CNT_LD]++;
501
502 env->regs[r1] = data;
503 setcc(cpu, ZPCI_PCI_LS_OK);
504 return 0;
505 }
506
zpci_write_bar(S390PCIBusDevice * pbdev,uint8_t pcias,uint64_t offset,uint64_t data,uint8_t len)507 static MemTxResult zpci_write_bar(S390PCIBusDevice *pbdev, uint8_t pcias,
508 uint64_t offset, uint64_t data, uint8_t len)
509 {
510 MemoryRegion *mr;
511
512 mr = pbdev->pdev->io_regions[pcias].memory;
513 mr = s390_get_subregion(mr, offset, len);
514 offset -= mr->addr;
515 return memory_region_dispatch_write(mr, offset, data,
516 size_memop(len) | MO_BE,
517 MEMTXATTRS_UNSPECIFIED);
518 }
519
pcistg_service_call(S390CPU * cpu,uint8_t r1,uint8_t r2,uintptr_t ra)520 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
521 {
522 CPUS390XState *env = &cpu->env;
523 uint64_t offset, data;
524 S390PCIBusDevice *pbdev;
525 MemTxResult result;
526 uint8_t len;
527 uint32_t fh;
528 uint8_t pcias;
529
530 if (env->psw.mask & PSW_MASK_PSTATE) {
531 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
532 return 0;
533 }
534
535 if (r2 & 0x1) {
536 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
537 return 0;
538 }
539
540 fh = env->regs[r2] >> 32;
541 pcias = (env->regs[r2] >> 16) & 0xf;
542 len = env->regs[r2] & 0xf;
543 offset = env->regs[r2 + 1];
544 data = env->regs[r1];
545
546 if (!(fh & FH_MASK_ENABLE)) {
547 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
548 return 0;
549 }
550
551 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
552 if (!pbdev) {
553 trace_s390_pci_nodev("pcistg", fh);
554 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
555 return 0;
556 }
557
558 switch (pbdev->state) {
559 /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
560 * are already covered by the FH_MASK_ENABLE check above
561 */
562 case ZPCI_FS_PERMANENT_ERROR:
563 case ZPCI_FS_ERROR:
564 setcc(cpu, ZPCI_PCI_LS_ERR);
565 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
566 return 0;
567 default:
568 break;
569 }
570
571 switch (pcias) {
572 /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
573 case ZPCI_IO_BAR_MIN...ZPCI_IO_BAR_MAX:
574 /* Check length:
575 * A length of 0 is invalid and length should not cross a double word
576 */
577 if (!len || (len > (8 - (offset & 0x7)))) {
578 s390_program_interrupt(env, PGM_OPERAND, ra);
579 return 0;
580 }
581
582 result = zpci_write_bar(pbdev, pcias, offset, data, len);
583 if (result != MEMTX_OK) {
584 s390_program_interrupt(env, PGM_OPERAND, ra);
585 return 0;
586 }
587 break;
588 case ZPCI_CONFIG_BAR:
589 /* ZPCI uses the pseudo BAR number 15 as configuration space */
590 /* possible access lengths are 1,2,4 and must not cross a word */
591 if (!len || (len > (4 - (offset & 0x3))) || len == 3) {
592 s390_program_interrupt(env, PGM_OPERAND, ra);
593 return 0;
594 }
595 /* len = 1,2,4 so we do not need to test */
596 zpci_endian_swap(&data, len);
597 pci_host_config_write_common(pbdev->pdev, offset,
598 pci_config_size(pbdev->pdev),
599 data, len);
600 break;
601 default:
602 trace_s390_pci_invalid("pcistg", fh);
603 setcc(cpu, ZPCI_PCI_LS_ERR);
604 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
605 return 0;
606 }
607
608 pbdev->fmb.counter[ZPCI_FMB_CNT_ST]++;
609
610 setcc(cpu, ZPCI_PCI_LS_OK);
611 return 0;
612 }
613
s390_pci_update_iotlb(S390PCIIOMMU * iommu,S390IOTLBEntry * entry)614 static uint32_t s390_pci_update_iotlb(S390PCIIOMMU *iommu,
615 S390IOTLBEntry *entry)
616 {
617 S390IOTLBEntry *cache = g_hash_table_lookup(iommu->iotlb, &entry->iova);
618 IOMMUTLBEvent event = {
619 .type = entry->perm ? IOMMU_NOTIFIER_MAP : IOMMU_NOTIFIER_UNMAP,
620 .entry = {
621 .target_as = &address_space_memory,
622 .iova = entry->iova,
623 .translated_addr = entry->translated_addr,
624 .perm = entry->perm,
625 .addr_mask = ~TARGET_PAGE_MASK,
626 },
627 };
628
629 if (event.type == IOMMU_NOTIFIER_UNMAP) {
630 if (!cache) {
631 goto out;
632 }
633 g_hash_table_remove(iommu->iotlb, &entry->iova);
634 inc_dma_avail(iommu);
635 /* Don't notify the iommu yet, maybe we can bundle contiguous unmaps */
636 goto out;
637 } else {
638 if (cache) {
639 if (cache->perm == entry->perm &&
640 cache->translated_addr == entry->translated_addr) {
641 goto out;
642 }
643
644 event.type = IOMMU_NOTIFIER_UNMAP;
645 event.entry.perm = IOMMU_NONE;
646 memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
647 event.type = IOMMU_NOTIFIER_MAP;
648 event.entry.perm = entry->perm;
649 }
650
651 cache = g_new(S390IOTLBEntry, 1);
652 cache->iova = entry->iova;
653 cache->translated_addr = entry->translated_addr;
654 cache->len = TARGET_PAGE_SIZE;
655 cache->perm = entry->perm;
656 g_hash_table_replace(iommu->iotlb, &cache->iova, cache);
657 dec_dma_avail(iommu);
658 }
659
660 /*
661 * All associated iotlb entries have already been cleared, trigger the
662 * unmaps.
663 */
664 memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
665
666 out:
667 return iommu->dma_limit ? iommu->dma_limit->avail : 1;
668 }
669
s390_pci_batch_unmap(S390PCIIOMMU * iommu,uint64_t iova,uint64_t len)670 static void s390_pci_batch_unmap(S390PCIIOMMU *iommu, uint64_t iova,
671 uint64_t len)
672 {
673 uint64_t remain = len, start = iova, end = start + len - 1, mask, size;
674 IOMMUTLBEvent event = {
675 .type = IOMMU_NOTIFIER_UNMAP,
676 .entry = {
677 .target_as = &address_space_memory,
678 .translated_addr = 0,
679 .perm = IOMMU_NONE,
680 },
681 };
682
683 while (remain >= TARGET_PAGE_SIZE) {
684 mask = dma_aligned_pow2_mask(start, end, 64);
685 size = mask + 1;
686 event.entry.iova = start;
687 event.entry.addr_mask = mask;
688 memory_region_notify_iommu(&iommu->iommu_mr, 0, event);
689 start += size;
690 remain -= size;
691 }
692 }
693
rpcit_service_call(S390CPU * cpu,uint8_t r1,uint8_t r2,uintptr_t ra)694 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
695 {
696 CPUS390XState *env = &cpu->env;
697 uint64_t iova, coalesce = 0;
698 uint32_t fh;
699 uint16_t error = 0;
700 S390PCIBusDevice *pbdev;
701 S390PCIIOMMU *iommu;
702 S390IOTLBEntry entry;
703 hwaddr start, end, sstart;
704 uint32_t dma_avail;
705 bool again;
706
707 if (env->psw.mask & PSW_MASK_PSTATE) {
708 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
709 return 0;
710 }
711
712 if (r2 & 0x1) {
713 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
714 return 0;
715 }
716
717 fh = env->regs[r1] >> 32;
718 sstart = start = env->regs[r2];
719 end = start + env->regs[r2 + 1];
720
721 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
722 if (!pbdev) {
723 trace_s390_pci_nodev("rpcit", fh);
724 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
725 return 0;
726 }
727
728 switch (pbdev->state) {
729 case ZPCI_FS_RESERVED:
730 case ZPCI_FS_STANDBY:
731 case ZPCI_FS_DISABLED:
732 case ZPCI_FS_PERMANENT_ERROR:
733 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
734 return 0;
735 case ZPCI_FS_ERROR:
736 setcc(cpu, ZPCI_PCI_LS_ERR);
737 s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER);
738 return 0;
739 default:
740 break;
741 }
742
743 iommu = pbdev->iommu;
744 if (iommu->dma_limit) {
745 dma_avail = iommu->dma_limit->avail;
746 } else {
747 dma_avail = 1;
748 }
749 if (!iommu->g_iota) {
750 error = ERR_EVENT_INVALAS;
751 goto err;
752 }
753
754 if (end < iommu->pba || start > iommu->pal) {
755 error = ERR_EVENT_OORANGE;
756 goto err;
757 }
758
759 retry:
760 start = sstart;
761 again = false;
762 while (start < end) {
763 error = s390_guest_io_table_walk(iommu->g_iota, start, &entry);
764 if (error) {
765 break;
766 }
767
768 /*
769 * If this is an unmap of a PTE, let's try to coalesce multiple unmaps
770 * into as few notifier events as possible.
771 */
772 if (entry.perm == IOMMU_NONE && entry.len == TARGET_PAGE_SIZE) {
773 if (coalesce == 0) {
774 iova = entry.iova;
775 }
776 coalesce += entry.len;
777 } else if (coalesce > 0) {
778 /* Unleash the coalesced unmap before processing a new map */
779 s390_pci_batch_unmap(iommu, iova, coalesce);
780 coalesce = 0;
781 }
782
783 start += entry.len;
784 while (entry.iova < start && entry.iova < end) {
785 if (dma_avail > 0 || entry.perm == IOMMU_NONE) {
786 dma_avail = s390_pci_update_iotlb(iommu, &entry);
787 entry.iova += TARGET_PAGE_SIZE;
788 entry.translated_addr += TARGET_PAGE_SIZE;
789 } else {
790 /*
791 * We are unable to make a new mapping at this time, continue
792 * on and hopefully free up more space. Then attempt another
793 * pass.
794 */
795 again = true;
796 break;
797 }
798 }
799 }
800 if (coalesce) {
801 /* Unleash the coalesced unmap before finishing rpcit */
802 s390_pci_batch_unmap(iommu, iova, coalesce);
803 coalesce = 0;
804 }
805 if (again && dma_avail > 0)
806 goto retry;
807 err:
808 if (error) {
809 pbdev->state = ZPCI_FS_ERROR;
810 setcc(cpu, ZPCI_PCI_LS_ERR);
811 s390_set_status_code(env, r1, ZPCI_PCI_ST_FUNC_IN_ERR);
812 s390_pci_generate_error_event(error, pbdev->fh, pbdev->fid, start, 0);
813 } else {
814 pbdev->fmb.counter[ZPCI_FMB_CNT_RPCIT]++;
815 if (dma_avail > 0) {
816 setcc(cpu, ZPCI_PCI_LS_OK);
817 } else {
818 /* vfio DMA mappings are exhausted, trigger a RPCIT */
819 setcc(cpu, ZPCI_PCI_LS_ERR);
820 s390_set_status_code(env, r1, ZPCI_RPCIT_ST_INSUFF_RES);
821 }
822 }
823 return 0;
824 }
825
pcistb_service_call(S390CPU * cpu,uint8_t r1,uint8_t r3,uint64_t gaddr,uint8_t ar,uintptr_t ra)826 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
827 uint8_t ar, uintptr_t ra)
828 {
829 CPUS390XState *env = &cpu->env;
830 S390PCIBusDevice *pbdev;
831 MemoryRegion *mr;
832 MemTxResult result;
833 uint64_t offset;
834 int i;
835 uint32_t fh;
836 uint8_t pcias;
837 uint16_t len;
838 uint8_t buffer[128];
839
840 if (env->psw.mask & PSW_MASK_PSTATE) {
841 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
842 return 0;
843 }
844
845 fh = env->regs[r1] >> 32;
846 pcias = (env->regs[r1] >> 16) & 0xf;
847 len = env->regs[r1] & 0x1fff;
848 offset = env->regs[r3];
849
850 if (!(fh & FH_MASK_ENABLE)) {
851 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
852 return 0;
853 }
854
855 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
856 if (!pbdev) {
857 trace_s390_pci_nodev("pcistb", fh);
858 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
859 return 0;
860 }
861
862 switch (pbdev->state) {
863 case ZPCI_FS_PERMANENT_ERROR:
864 case ZPCI_FS_ERROR:
865 setcc(cpu, ZPCI_PCI_LS_ERR);
866 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
867 return 0;
868 default:
869 break;
870 }
871
872 if (pcias > ZPCI_IO_BAR_MAX) {
873 trace_s390_pci_invalid("pcistb", fh);
874 setcc(cpu, ZPCI_PCI_LS_ERR);
875 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
876 return 0;
877 }
878
879 /* Verify the address, offset and length */
880 /* offset must be a multiple of 8 */
881 if (offset % 8) {
882 goto specification_error;
883 }
884 /* Length must be greater than 8, a multiple of 8 */
885 /* and not greater than maxstbl */
886 if ((len <= 8) || (len % 8) ||
887 (len > pbdev->pci_group->zpci_group.maxstbl)) {
888 goto specification_error;
889 }
890 /* Do not cross a 4K-byte boundary */
891 if (((offset & 0xfff) + len) > 0x1000) {
892 goto specification_error;
893 }
894 /* Guest address must be double word aligned */
895 if (gaddr & 0x07UL) {
896 goto specification_error;
897 }
898
899 mr = pbdev->pdev->io_regions[pcias].memory;
900 mr = s390_get_subregion(mr, offset, len);
901 offset -= mr->addr;
902
903 for (i = 0; i < len; i += 8) {
904 if (!memory_region_access_valid(mr, offset + i, 8, true,
905 MEMTXATTRS_UNSPECIFIED)) {
906 s390_program_interrupt(env, PGM_OPERAND, ra);
907 return 0;
908 }
909 }
910
911 if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
912 s390_cpu_virt_mem_handle_exc(cpu, ra);
913 return 0;
914 }
915
916 for (i = 0; i < len / 8; i++) {
917 result = memory_region_dispatch_write(mr, offset + i * 8,
918 ldq_be_p(buffer + i * 8),
919 MO_64, MEMTXATTRS_UNSPECIFIED);
920 if (result != MEMTX_OK) {
921 s390_program_interrupt(env, PGM_OPERAND, ra);
922 return 0;
923 }
924 }
925
926 pbdev->fmb.counter[ZPCI_FMB_CNT_STB]++;
927
928 setcc(cpu, ZPCI_PCI_LS_OK);
929 return 0;
930
931 specification_error:
932 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
933 return 0;
934 }
935
reg_irqs(CPUS390XState * env,S390PCIBusDevice * pbdev,ZpciFib fib)936 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
937 {
938 int ret, len;
939 uint8_t isc = FIB_DATA_ISC(ldl_be_p(&fib.data));
940
941 pbdev->routes.adapter.adapter_id = css_get_adapter_id(
942 CSS_IO_ADAPTER_PCI, isc);
943 pbdev->summary_ind = get_indicator(ldq_be_p(&fib.aisb), sizeof(uint64_t));
944 len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_be_p(&fib.data))) * sizeof(unsigned long);
945 pbdev->indicator = get_indicator(ldq_be_p(&fib.aibv), len);
946
947 ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
948 if (ret) {
949 goto out;
950 }
951
952 ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator);
953 if (ret) {
954 goto out;
955 }
956
957 pbdev->routes.adapter.summary_addr = ldq_be_p(&fib.aisb);
958 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_be_p(&fib.data));
959 pbdev->routes.adapter.ind_addr = ldq_be_p(&fib.aibv);
960 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_be_p(&fib.data));
961 pbdev->isc = isc;
962 pbdev->noi = FIB_DATA_NOI(ldl_be_p(&fib.data));
963 pbdev->sum = FIB_DATA_SUM(ldl_be_p(&fib.data));
964
965 trace_s390_pci_irqs("register", pbdev->routes.adapter.adapter_id);
966 return 0;
967 out:
968 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
969 release_indicator(&pbdev->routes.adapter, pbdev->indicator);
970 pbdev->summary_ind = NULL;
971 pbdev->indicator = NULL;
972 return ret;
973 }
974
pci_dereg_irqs(S390PCIBusDevice * pbdev)975 int pci_dereg_irqs(S390PCIBusDevice *pbdev)
976 {
977 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
978 release_indicator(&pbdev->routes.adapter, pbdev->indicator);
979
980 pbdev->summary_ind = NULL;
981 pbdev->indicator = NULL;
982 pbdev->routes.adapter.summary_addr = 0;
983 pbdev->routes.adapter.summary_offset = 0;
984 pbdev->routes.adapter.ind_addr = 0;
985 pbdev->routes.adapter.ind_offset = 0;
986 pbdev->isc = 0;
987 pbdev->noi = 0;
988 pbdev->sum = 0;
989
990 trace_s390_pci_irqs("unregister", pbdev->routes.adapter.adapter_id);
991 return 0;
992 }
993
reg_ioat(CPUS390XState * env,S390PCIBusDevice * pbdev,ZpciFib fib,uintptr_t ra)994 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib,
995 uintptr_t ra)
996 {
997 S390PCIIOMMU *iommu = pbdev->iommu;
998 uint64_t pba = ldq_be_p(&fib.pba);
999 uint64_t pal = ldq_be_p(&fib.pal);
1000 uint64_t g_iota = ldq_be_p(&fib.iota);
1001 uint8_t dt = (g_iota >> 2) & 0x7;
1002 uint8_t t = (g_iota >> 11) & 0x1;
1003
1004 pba &= ~0xfff;
1005 pal |= 0xfff;
1006 if (pba > pal || pba < pbdev->zpci_fn.sdma || pal > pbdev->zpci_fn.edma) {
1007 s390_program_interrupt(env, PGM_OPERAND, ra);
1008 return -EINVAL;
1009 }
1010
1011 /* currently we only support designation type 1 with translation */
1012 if (t && dt != ZPCI_IOTA_RTTO) {
1013 error_report("unsupported ioat dt %d t %d", dt, t);
1014 s390_program_interrupt(env, PGM_OPERAND, ra);
1015 return -EINVAL;
1016 } else if (!t && !pbdev->rtr_avail) {
1017 error_report("relaxed translation not allowed");
1018 s390_program_interrupt(env, PGM_OPERAND, ra);
1019 return -EINVAL;
1020 }
1021
1022 iommu->pba = pba;
1023 iommu->pal = pal;
1024 iommu->g_iota = g_iota;
1025
1026 if (t) {
1027 s390_pci_iommu_enable(iommu);
1028 } else {
1029 s390_pci_iommu_direct_map_enable(iommu);
1030 }
1031
1032 return 0;
1033 }
1034
pci_dereg_ioat(S390PCIIOMMU * iommu)1035 void pci_dereg_ioat(S390PCIIOMMU *iommu)
1036 {
1037 s390_pci_iommu_disable(iommu);
1038 iommu->pba = 0;
1039 iommu->pal = 0;
1040 iommu->g_iota = 0;
1041 }
1042
fmb_timer_free(S390PCIBusDevice * pbdev)1043 void fmb_timer_free(S390PCIBusDevice *pbdev)
1044 {
1045 if (pbdev->fmb_timer) {
1046 timer_free(pbdev->fmb_timer);
1047 pbdev->fmb_timer = NULL;
1048 }
1049 pbdev->fmb_addr = 0;
1050 memset(&pbdev->fmb, 0, sizeof(ZpciFmb));
1051 }
1052
fmb_do_update(S390PCIBusDevice * pbdev,int offset,uint64_t val,int len)1053 static int fmb_do_update(S390PCIBusDevice *pbdev, int offset, uint64_t val,
1054 int len)
1055 {
1056 MemTxResult ret;
1057 uint64_t dst = pbdev->fmb_addr + offset;
1058
1059 switch (len) {
1060 case 8:
1061 address_space_stq_be(&address_space_memory, dst, val,
1062 MEMTXATTRS_UNSPECIFIED,
1063 &ret);
1064 break;
1065 case 4:
1066 address_space_stl_be(&address_space_memory, dst, val,
1067 MEMTXATTRS_UNSPECIFIED,
1068 &ret);
1069 break;
1070 case 2:
1071 address_space_stw_be(&address_space_memory, dst, val,
1072 MEMTXATTRS_UNSPECIFIED,
1073 &ret);
1074 break;
1075 case 1:
1076 address_space_stb(&address_space_memory, dst, val,
1077 MEMTXATTRS_UNSPECIFIED,
1078 &ret);
1079 break;
1080 default:
1081 ret = MEMTX_ERROR;
1082 break;
1083 }
1084 if (ret != MEMTX_OK) {
1085 s390_pci_generate_error_event(ERR_EVENT_FMBA, pbdev->fh, pbdev->fid,
1086 pbdev->fmb_addr, 0);
1087 fmb_timer_free(pbdev);
1088 }
1089
1090 return ret;
1091 }
1092
fmb_update(void * opaque)1093 static void fmb_update(void *opaque)
1094 {
1095 S390PCIBusDevice *pbdev = opaque;
1096 int64_t t = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1097 int i;
1098
1099 /* Update U bit */
1100 pbdev->fmb.last_update *= 2;
1101 pbdev->fmb.last_update |= UPDATE_U_BIT;
1102 if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1103 pbdev->fmb.last_update,
1104 sizeof(pbdev->fmb.last_update))) {
1105 return;
1106 }
1107
1108 /* Update FMB sample count */
1109 if (fmb_do_update(pbdev, offsetof(ZpciFmb, sample),
1110 pbdev->fmb.sample++,
1111 sizeof(pbdev->fmb.sample))) {
1112 return;
1113 }
1114
1115 /* Update FMB counters */
1116 for (i = 0; i < ZPCI_FMB_CNT_MAX; i++) {
1117 if (fmb_do_update(pbdev, offsetof(ZpciFmb, counter[i]),
1118 pbdev->fmb.counter[i],
1119 sizeof(pbdev->fmb.counter[0]))) {
1120 return;
1121 }
1122 }
1123
1124 /* Clear U bit and update the time */
1125 pbdev->fmb.last_update = time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
1126 pbdev->fmb.last_update *= 2;
1127 if (fmb_do_update(pbdev, offsetof(ZpciFmb, last_update),
1128 pbdev->fmb.last_update,
1129 sizeof(pbdev->fmb.last_update))) {
1130 return;
1131 }
1132 timer_mod(pbdev->fmb_timer, t + pbdev->pci_group->zpci_group.mui);
1133 }
1134
mpcifc_reg_int_interp(S390PCIBusDevice * pbdev,ZpciFib * fib)1135 static int mpcifc_reg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib)
1136 {
1137 int rc;
1138
1139 rc = s390_pci_kvm_aif_enable(pbdev, fib, pbdev->forwarding_assist);
1140 if (rc) {
1141 trace_s390_pci_kvm_aif("enable");
1142 return rc;
1143 }
1144
1145 return 0;
1146 }
1147
mpcifc_dereg_int_interp(S390PCIBusDevice * pbdev,ZpciFib * fib)1148 static int mpcifc_dereg_int_interp(S390PCIBusDevice *pbdev, ZpciFib *fib)
1149 {
1150 int rc;
1151
1152 rc = s390_pci_kvm_aif_disable(pbdev);
1153 if (rc) {
1154 trace_s390_pci_kvm_aif("disable");
1155 return rc;
1156 }
1157
1158 return 0;
1159 }
1160
mpcifc_service_call(S390CPU * cpu,uint8_t r1,uint64_t fiba,uint8_t ar,uintptr_t ra)1161 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1162 uintptr_t ra)
1163 {
1164 CPUS390XState *env = &cpu->env;
1165 uint8_t oc, dmaas;
1166 uint32_t fh;
1167 ZpciFib fib;
1168 S390PCIBusDevice *pbdev;
1169 uint64_t cc = ZPCI_PCI_LS_OK;
1170
1171 if (env->psw.mask & PSW_MASK_PSTATE) {
1172 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1173 return 0;
1174 }
1175
1176 oc = env->regs[r1] & 0xff;
1177 dmaas = (env->regs[r1] >> 16) & 0xff;
1178 fh = env->regs[r1] >> 32;
1179
1180 if (fiba & 0x7) {
1181 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1182 return 0;
1183 }
1184
1185 pbdev = s390_pci_find_dev_by_fh(s390_get_phb(), fh);
1186 if (!pbdev) {
1187 trace_s390_pci_nodev("mpcifc", fh);
1188 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1189 return 0;
1190 }
1191
1192 switch (pbdev->state) {
1193 case ZPCI_FS_RESERVED:
1194 case ZPCI_FS_STANDBY:
1195 case ZPCI_FS_DISABLED:
1196 case ZPCI_FS_PERMANENT_ERROR:
1197 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1198 return 0;
1199 default:
1200 break;
1201 }
1202
1203 if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1204 s390_cpu_virt_mem_handle_exc(cpu, ra);
1205 return 0;
1206 }
1207
1208 if (fib.fmt != 0) {
1209 s390_program_interrupt(env, PGM_OPERAND, ra);
1210 return 0;
1211 }
1212
1213 switch (oc) {
1214 case ZPCI_MOD_FC_REG_INT:
1215 if (pbdev->interp) {
1216 if (mpcifc_reg_int_interp(pbdev, &fib)) {
1217 cc = ZPCI_PCI_LS_ERR;
1218 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1219 }
1220 } else if (pbdev->summary_ind) {
1221 cc = ZPCI_PCI_LS_ERR;
1222 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1223 } else if (reg_irqs(env, pbdev, fib)) {
1224 cc = ZPCI_PCI_LS_ERR;
1225 s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL);
1226 }
1227 break;
1228 case ZPCI_MOD_FC_DEREG_INT:
1229 if (pbdev->interp) {
1230 if (mpcifc_dereg_int_interp(pbdev, &fib)) {
1231 cc = ZPCI_PCI_LS_ERR;
1232 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1233 }
1234 } else if (!pbdev->summary_ind) {
1235 cc = ZPCI_PCI_LS_ERR;
1236 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1237 } else {
1238 pci_dereg_irqs(pbdev);
1239 }
1240 break;
1241 case ZPCI_MOD_FC_REG_IOAT:
1242 if (dmaas != 0) {
1243 cc = ZPCI_PCI_LS_ERR;
1244 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1245 } else if (pbdev->iommu->enabled) {
1246 cc = ZPCI_PCI_LS_ERR;
1247 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1248 } else if (reg_ioat(env, pbdev, fib, ra)) {
1249 cc = ZPCI_PCI_LS_ERR;
1250 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1251 }
1252 break;
1253 case ZPCI_MOD_FC_DEREG_IOAT:
1254 if (dmaas != 0) {
1255 cc = ZPCI_PCI_LS_ERR;
1256 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1257 } else if (!pbdev->iommu->enabled) {
1258 cc = ZPCI_PCI_LS_ERR;
1259 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1260 } else {
1261 pci_dereg_ioat(pbdev->iommu);
1262 }
1263 break;
1264 case ZPCI_MOD_FC_REREG_IOAT:
1265 if (dmaas != 0) {
1266 cc = ZPCI_PCI_LS_ERR;
1267 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
1268 } else if (!pbdev->iommu->enabled) {
1269 cc = ZPCI_PCI_LS_ERR;
1270 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1271 } else {
1272 pci_dereg_ioat(pbdev->iommu);
1273 if (reg_ioat(env, pbdev, fib, ra)) {
1274 cc = ZPCI_PCI_LS_ERR;
1275 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
1276 }
1277 }
1278 break;
1279 case ZPCI_MOD_FC_RESET_ERROR:
1280 switch (pbdev->state) {
1281 case ZPCI_FS_BLOCKED:
1282 case ZPCI_FS_ERROR:
1283 pbdev->state = ZPCI_FS_ENABLED;
1284 break;
1285 default:
1286 cc = ZPCI_PCI_LS_ERR;
1287 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1288 }
1289 break;
1290 case ZPCI_MOD_FC_RESET_BLOCK:
1291 switch (pbdev->state) {
1292 case ZPCI_FS_ERROR:
1293 pbdev->state = ZPCI_FS_BLOCKED;
1294 break;
1295 default:
1296 cc = ZPCI_PCI_LS_ERR;
1297 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
1298 }
1299 break;
1300 case ZPCI_MOD_FC_SET_MEASURE: {
1301 uint64_t fmb_addr = ldq_be_p(&fib.fmb_addr);
1302
1303 if (fmb_addr & FMBK_MASK) {
1304 cc = ZPCI_PCI_LS_ERR;
1305 s390_pci_generate_error_event(ERR_EVENT_FMBPRO, pbdev->fh,
1306 pbdev->fid, fmb_addr, 0);
1307 fmb_timer_free(pbdev);
1308 break;
1309 }
1310
1311 if (!fmb_addr) {
1312 /* Stop updating FMB. */
1313 fmb_timer_free(pbdev);
1314 break;
1315 }
1316
1317 if (!pbdev->fmb_timer) {
1318 pbdev->fmb_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1319 fmb_update, pbdev);
1320 } else if (timer_pending(pbdev->fmb_timer)) {
1321 /* Remove pending timer to update FMB address. */
1322 timer_del(pbdev->fmb_timer);
1323 }
1324 pbdev->fmb_addr = fmb_addr;
1325 timer_mod(pbdev->fmb_timer,
1326 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
1327 pbdev->pci_group->zpci_group.mui);
1328 break;
1329 }
1330 default:
1331 s390_program_interrupt(&cpu->env, PGM_OPERAND, ra);
1332 cc = ZPCI_PCI_LS_ERR;
1333 }
1334
1335 setcc(cpu, cc);
1336 return 0;
1337 }
1338
stpcifc_service_call(S390CPU * cpu,uint8_t r1,uint64_t fiba,uint8_t ar,uintptr_t ra)1339 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar,
1340 uintptr_t ra)
1341 {
1342 CPUS390XState *env = &cpu->env;
1343 uint8_t dmaas;
1344 uint32_t fh;
1345 ZpciFib fib;
1346 S390PCIBusDevice *pbdev;
1347 uint32_t data;
1348 uint64_t cc = ZPCI_PCI_LS_OK;
1349
1350 if (env->psw.mask & PSW_MASK_PSTATE) {
1351 s390_program_interrupt(env, PGM_PRIVILEGED, ra);
1352 return 0;
1353 }
1354
1355 fh = env->regs[r1] >> 32;
1356 dmaas = (env->regs[r1] >> 16) & 0xff;
1357
1358 if (dmaas) {
1359 setcc(cpu, ZPCI_PCI_LS_ERR);
1360 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_INVAL_DMAAS);
1361 return 0;
1362 }
1363
1364 if (fiba & 0x7) {
1365 s390_program_interrupt(env, PGM_SPECIFICATION, ra);
1366 return 0;
1367 }
1368
1369 pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), fh & FH_MASK_INDEX);
1370 if (!pbdev) {
1371 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1372 return 0;
1373 }
1374
1375 memset(&fib, 0, sizeof(fib));
1376
1377 switch (pbdev->state) {
1378 case ZPCI_FS_RESERVED:
1379 case ZPCI_FS_STANDBY:
1380 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1381 return 0;
1382 case ZPCI_FS_DISABLED:
1383 if (fh & FH_MASK_ENABLE) {
1384 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
1385 return 0;
1386 }
1387 goto out;
1388 /* BLOCKED bit is set to one coincident with the setting of ERROR bit.
1389 * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */
1390 case ZPCI_FS_ERROR:
1391 fib.fc |= 0x20;
1392 /* fallthrough */
1393 case ZPCI_FS_BLOCKED:
1394 fib.fc |= 0x40;
1395 /* fallthrough */
1396 case ZPCI_FS_ENABLED:
1397 fib.fc |= 0x80;
1398 if (pbdev->iommu->enabled) {
1399 fib.fc |= 0x10;
1400 }
1401 if (!(fh & FH_MASK_ENABLE)) {
1402 env->regs[r1] |= 1ULL << 63;
1403 }
1404 break;
1405 case ZPCI_FS_PERMANENT_ERROR:
1406 setcc(cpu, ZPCI_PCI_LS_ERR);
1407 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR);
1408 return 0;
1409 }
1410
1411 stq_be_p(&fib.pba, pbdev->iommu->pba);
1412 stq_be_p(&fib.pal, pbdev->iommu->pal);
1413 stq_be_p(&fib.iota, pbdev->iommu->g_iota);
1414 stq_be_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
1415 stq_be_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
1416 stq_be_p(&fib.fmb_addr, pbdev->fmb_addr);
1417
1418 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
1419 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
1420 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
1421 stl_be_p(&fib.data, data);
1422
1423 out:
1424 if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1425 s390_cpu_virt_mem_handle_exc(cpu, ra);
1426 return 0;
1427 }
1428
1429 setcc(cpu, cc);
1430 return 0;
1431 }
1432