1 /* 2 * Channel subsystem base support. 3 * 4 * Copyright 2012 IBM Corp. 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or (at 8 * your option) any later version. See the COPYING file in the top-level 9 * directory. 10 */ 11 12 #include "qemu/osdep.h" 13 #include "qapi/error.h" 14 #include "qapi/visitor.h" 15 #include <hw/qdev.h> 16 #include "qemu/bitops.h" 17 #include "exec/address-spaces.h" 18 #include "cpu.h" 19 #include "hw/s390x/ioinst.h" 20 #include "hw/s390x/css.h" 21 #include "trace.h" 22 #include "hw/s390x/s390_flic.h" 23 24 typedef struct CrwContainer { 25 CRW crw; 26 QTAILQ_ENTRY(CrwContainer) sibling; 27 } CrwContainer; 28 29 typedef struct ChpInfo { 30 uint8_t in_use; 31 uint8_t type; 32 uint8_t is_virtual; 33 } ChpInfo; 34 35 typedef struct SubchSet { 36 SubchDev *sch[MAX_SCHID + 1]; 37 unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)]; 38 unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)]; 39 } SubchSet; 40 41 typedef struct CssImage { 42 SubchSet *sch_set[MAX_SSID + 1]; 43 ChpInfo chpids[MAX_CHPID + 1]; 44 } CssImage; 45 46 typedef struct IoAdapter { 47 uint32_t id; 48 uint8_t type; 49 uint8_t isc; 50 QTAILQ_ENTRY(IoAdapter) sibling; 51 } IoAdapter; 52 53 typedef struct ChannelSubSys { 54 QTAILQ_HEAD(, CrwContainer) pending_crws; 55 bool sei_pending; 56 bool do_crw_mchk; 57 bool crws_lost; 58 uint8_t max_cssid; 59 uint8_t max_ssid; 60 bool chnmon_active; 61 uint64_t chnmon_area; 62 CssImage *css[MAX_CSSID + 1]; 63 uint8_t default_cssid; 64 QTAILQ_HEAD(, IoAdapter) io_adapters; 65 QTAILQ_HEAD(, IndAddr) indicator_addresses; 66 } ChannelSubSys; 67 68 static ChannelSubSys channel_subsys = { 69 .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws), 70 .do_crw_mchk = true, 71 .sei_pending = false, 72 .do_crw_mchk = true, 73 .crws_lost = false, 74 .chnmon_active = false, 75 .io_adapters = QTAILQ_HEAD_INITIALIZER(channel_subsys.io_adapters), 76 .indicator_addresses = 77 QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses), 78 }; 79 80 IndAddr *get_indicator(hwaddr ind_addr, int len) 81 { 82 IndAddr *indicator; 83 84 QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) { 85 if (indicator->addr == ind_addr) { 86 indicator->refcnt++; 87 return indicator; 88 } 89 } 90 indicator = g_new0(IndAddr, 1); 91 indicator->addr = ind_addr; 92 indicator->len = len; 93 indicator->refcnt = 1; 94 QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses, 95 indicator, sibling); 96 return indicator; 97 } 98 99 static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr, 100 bool do_map) 101 { 102 S390FLICState *fs = s390_get_flic(); 103 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); 104 105 return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map); 106 } 107 108 void release_indicator(AdapterInfo *adapter, IndAddr *indicator) 109 { 110 assert(indicator->refcnt > 0); 111 indicator->refcnt--; 112 if (indicator->refcnt > 0) { 113 return; 114 } 115 QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling); 116 if (indicator->map) { 117 s390_io_adapter_map(adapter, indicator->map, false); 118 } 119 g_free(indicator); 120 } 121 122 int map_indicator(AdapterInfo *adapter, IndAddr *indicator) 123 { 124 int ret; 125 126 if (indicator->map) { 127 return 0; /* already mapped is not an error */ 128 } 129 indicator->map = indicator->addr; 130 ret = s390_io_adapter_map(adapter, indicator->map, true); 131 if ((ret != 0) && (ret != -ENOSYS)) { 132 goto out_err; 133 } 134 return 0; 135 136 out_err: 137 indicator->map = 0; 138 return ret; 139 } 140 141 int css_create_css_image(uint8_t cssid, bool default_image) 142 { 143 trace_css_new_image(cssid, default_image ? "(default)" : ""); 144 if (cssid > MAX_CSSID) { 145 return -EINVAL; 146 } 147 if (channel_subsys.css[cssid]) { 148 return -EBUSY; 149 } 150 channel_subsys.css[cssid] = g_malloc0(sizeof(CssImage)); 151 if (default_image) { 152 channel_subsys.default_cssid = cssid; 153 } 154 return 0; 155 } 156 157 int css_register_io_adapter(uint8_t type, uint8_t isc, bool swap, 158 bool maskable, uint32_t *id) 159 { 160 IoAdapter *adapter; 161 bool found = false; 162 int ret; 163 S390FLICState *fs = s390_get_flic(); 164 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); 165 166 *id = 0; 167 QTAILQ_FOREACH(adapter, &channel_subsys.io_adapters, sibling) { 168 if ((adapter->type == type) && (adapter->isc == isc)) { 169 *id = adapter->id; 170 found = true; 171 ret = 0; 172 break; 173 } 174 if (adapter->id >= *id) { 175 *id = adapter->id + 1; 176 } 177 } 178 if (found) { 179 goto out; 180 } 181 adapter = g_new0(IoAdapter, 1); 182 ret = fsc->register_io_adapter(fs, *id, isc, swap, maskable); 183 if (ret == 0) { 184 adapter->id = *id; 185 adapter->isc = isc; 186 adapter->type = type; 187 QTAILQ_INSERT_TAIL(&channel_subsys.io_adapters, adapter, sibling); 188 } else { 189 g_free(adapter); 190 fprintf(stderr, "Unexpected error %d when registering adapter %d\n", 191 ret, *id); 192 } 193 out: 194 return ret; 195 } 196 197 static void css_clear_io_interrupt(uint16_t subchannel_id, 198 uint16_t subchannel_nr) 199 { 200 Error *err = NULL; 201 static bool no_clear_irq; 202 S390FLICState *fs = s390_get_flic(); 203 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); 204 int r; 205 206 if (unlikely(no_clear_irq)) { 207 return; 208 } 209 r = fsc->clear_io_irq(fs, subchannel_id, subchannel_nr); 210 switch (r) { 211 case 0: 212 break; 213 case -ENOSYS: 214 no_clear_irq = true; 215 /* 216 * Ignore unavailability, as the user can't do anything 217 * about it anyway. 218 */ 219 break; 220 default: 221 error_setg_errno(&err, -r, "unexpected error condition"); 222 error_propagate(&error_abort, err); 223 } 224 } 225 226 static inline uint16_t css_do_build_subchannel_id(uint8_t cssid, uint8_t ssid) 227 { 228 if (channel_subsys.max_cssid > 0) { 229 return (cssid << 8) | (1 << 3) | (ssid << 1) | 1; 230 } 231 return (ssid << 1) | 1; 232 } 233 234 uint16_t css_build_subchannel_id(SubchDev *sch) 235 { 236 return css_do_build_subchannel_id(sch->cssid, sch->ssid); 237 } 238 239 static void css_inject_io_interrupt(SubchDev *sch) 240 { 241 uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11; 242 243 trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid, 244 sch->curr_status.pmcw.intparm, isc, ""); 245 s390_io_interrupt(css_build_subchannel_id(sch), 246 sch->schid, 247 sch->curr_status.pmcw.intparm, 248 isc << 27); 249 } 250 251 void css_conditional_io_interrupt(SubchDev *sch) 252 { 253 /* 254 * If the subchannel is not currently status pending, make it pending 255 * with alert status. 256 */ 257 if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) { 258 uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11; 259 260 trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid, 261 sch->curr_status.pmcw.intparm, isc, 262 "(unsolicited)"); 263 sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 264 sch->curr_status.scsw.ctrl |= 265 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 266 /* Inject an I/O interrupt. */ 267 s390_io_interrupt(css_build_subchannel_id(sch), 268 sch->schid, 269 sch->curr_status.pmcw.intparm, 270 isc << 27); 271 } 272 } 273 274 void css_adapter_interrupt(uint8_t isc) 275 { 276 uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI; 277 278 trace_css_adapter_interrupt(isc); 279 s390_io_interrupt(0, 0, 0, io_int_word); 280 } 281 282 static void sch_handle_clear_func(SubchDev *sch) 283 { 284 PMCW *p = &sch->curr_status.pmcw; 285 SCSW *s = &sch->curr_status.scsw; 286 int path; 287 288 /* Path management: In our simple css, we always choose the only path. */ 289 path = 0x80; 290 291 /* Reset values prior to 'issuing the clear signal'. */ 292 p->lpum = 0; 293 p->pom = 0xff; 294 s->flags &= ~SCSW_FLAGS_MASK_PNO; 295 296 /* We always 'attempt to issue the clear signal', and we always succeed. */ 297 sch->channel_prog = 0x0; 298 sch->last_cmd_valid = false; 299 s->ctrl &= ~SCSW_ACTL_CLEAR_PEND; 300 s->ctrl |= SCSW_STCTL_STATUS_PEND; 301 302 s->dstat = 0; 303 s->cstat = 0; 304 p->lpum = path; 305 306 } 307 308 static void sch_handle_halt_func(SubchDev *sch) 309 { 310 311 PMCW *p = &sch->curr_status.pmcw; 312 SCSW *s = &sch->curr_status.scsw; 313 hwaddr curr_ccw = sch->channel_prog; 314 int path; 315 316 /* Path management: In our simple css, we always choose the only path. */ 317 path = 0x80; 318 319 /* We always 'attempt to issue the halt signal', and we always succeed. */ 320 sch->channel_prog = 0x0; 321 sch->last_cmd_valid = false; 322 s->ctrl &= ~SCSW_ACTL_HALT_PEND; 323 s->ctrl |= SCSW_STCTL_STATUS_PEND; 324 325 if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) || 326 !((s->ctrl & SCSW_ACTL_START_PEND) || 327 (s->ctrl & SCSW_ACTL_SUSP))) { 328 s->dstat = SCSW_DSTAT_DEVICE_END; 329 } 330 if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) || 331 (s->ctrl & SCSW_ACTL_SUSP)) { 332 s->cpa = curr_ccw + 8; 333 } 334 s->cstat = 0; 335 p->lpum = path; 336 337 } 338 339 static void copy_sense_id_to_guest(SenseId *dest, SenseId *src) 340 { 341 int i; 342 343 dest->reserved = src->reserved; 344 dest->cu_type = cpu_to_be16(src->cu_type); 345 dest->cu_model = src->cu_model; 346 dest->dev_type = cpu_to_be16(src->dev_type); 347 dest->dev_model = src->dev_model; 348 dest->unused = src->unused; 349 for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) { 350 dest->ciw[i].type = src->ciw[i].type; 351 dest->ciw[i].command = src->ciw[i].command; 352 dest->ciw[i].count = cpu_to_be16(src->ciw[i].count); 353 } 354 } 355 356 static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1) 357 { 358 CCW0 tmp0; 359 CCW1 tmp1; 360 CCW1 ret; 361 362 if (fmt1) { 363 cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1)); 364 ret.cmd_code = tmp1.cmd_code; 365 ret.flags = tmp1.flags; 366 ret.count = be16_to_cpu(tmp1.count); 367 ret.cda = be32_to_cpu(tmp1.cda); 368 } else { 369 cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0)); 370 ret.cmd_code = tmp0.cmd_code; 371 ret.flags = tmp0.flags; 372 ret.count = be16_to_cpu(tmp0.count); 373 ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16); 374 if ((ret.cmd_code & 0x0f) == CCW_CMD_TIC) { 375 ret.cmd_code &= 0x0f; 376 } 377 } 378 return ret; 379 } 380 381 static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr, 382 bool suspend_allowed) 383 { 384 int ret; 385 bool check_len; 386 int len; 387 CCW1 ccw; 388 389 if (!ccw_addr) { 390 return -EIO; 391 } 392 393 /* Translate everything to format-1 ccws - the information is the same. */ 394 ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1); 395 396 /* Check for invalid command codes. */ 397 if ((ccw.cmd_code & 0x0f) == 0) { 398 return -EINVAL; 399 } 400 if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) && 401 ((ccw.cmd_code & 0xf0) != 0)) { 402 return -EINVAL; 403 } 404 if (!sch->ccw_fmt_1 && (ccw.count == 0) && 405 (ccw.cmd_code != CCW_CMD_TIC)) { 406 return -EINVAL; 407 } 408 409 if (ccw.flags & CCW_FLAG_SUSPEND) { 410 return suspend_allowed ? -EINPROGRESS : -EINVAL; 411 } 412 413 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); 414 415 if (!ccw.cda) { 416 if (sch->ccw_no_data_cnt == 255) { 417 return -EINVAL; 418 } 419 sch->ccw_no_data_cnt++; 420 } 421 422 /* Look at the command. */ 423 switch (ccw.cmd_code) { 424 case CCW_CMD_NOOP: 425 /* Nothing to do. */ 426 ret = 0; 427 break; 428 case CCW_CMD_BASIC_SENSE: 429 if (check_len) { 430 if (ccw.count != sizeof(sch->sense_data)) { 431 ret = -EINVAL; 432 break; 433 } 434 } 435 len = MIN(ccw.count, sizeof(sch->sense_data)); 436 cpu_physical_memory_write(ccw.cda, sch->sense_data, len); 437 sch->curr_status.scsw.count = ccw.count - len; 438 memset(sch->sense_data, 0, sizeof(sch->sense_data)); 439 ret = 0; 440 break; 441 case CCW_CMD_SENSE_ID: 442 { 443 SenseId sense_id; 444 445 copy_sense_id_to_guest(&sense_id, &sch->id); 446 /* Sense ID information is device specific. */ 447 if (check_len) { 448 if (ccw.count != sizeof(sense_id)) { 449 ret = -EINVAL; 450 break; 451 } 452 } 453 len = MIN(ccw.count, sizeof(sense_id)); 454 /* 455 * Only indicate 0xff in the first sense byte if we actually 456 * have enough place to store at least bytes 0-3. 457 */ 458 if (len >= 4) { 459 sense_id.reserved = 0xff; 460 } else { 461 sense_id.reserved = 0; 462 } 463 cpu_physical_memory_write(ccw.cda, &sense_id, len); 464 sch->curr_status.scsw.count = ccw.count - len; 465 ret = 0; 466 break; 467 } 468 case CCW_CMD_TIC: 469 if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) { 470 ret = -EINVAL; 471 break; 472 } 473 if (ccw.flags & (CCW_FLAG_CC | CCW_FLAG_DC)) { 474 ret = -EINVAL; 475 break; 476 } 477 sch->channel_prog = ccw.cda; 478 ret = -EAGAIN; 479 break; 480 default: 481 if (sch->ccw_cb) { 482 /* Handle device specific commands. */ 483 ret = sch->ccw_cb(sch, ccw); 484 } else { 485 ret = -ENOSYS; 486 } 487 break; 488 } 489 sch->last_cmd = ccw; 490 sch->last_cmd_valid = true; 491 if (ret == 0) { 492 if (ccw.flags & CCW_FLAG_CC) { 493 sch->channel_prog += 8; 494 ret = -EAGAIN; 495 } 496 } 497 498 return ret; 499 } 500 501 static void sch_handle_start_func(SubchDev *sch, ORB *orb) 502 { 503 504 PMCW *p = &sch->curr_status.pmcw; 505 SCSW *s = &sch->curr_status.scsw; 506 int path; 507 int ret; 508 bool suspend_allowed; 509 510 /* Path management: In our simple css, we always choose the only path. */ 511 path = 0x80; 512 513 if (!(s->ctrl & SCSW_ACTL_SUSP)) { 514 s->cstat = 0; 515 s->dstat = 0; 516 /* Look at the orb and try to execute the channel program. */ 517 assert(orb != NULL); /* resume does not pass an orb */ 518 p->intparm = orb->intparm; 519 if (!(orb->lpm & path)) { 520 /* Generate a deferred cc 3 condition. */ 521 s->flags |= SCSW_FLAGS_MASK_CC; 522 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 523 s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND); 524 return; 525 } 526 sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT); 527 sch->ccw_no_data_cnt = 0; 528 suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND); 529 } else { 530 s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND); 531 /* The channel program had been suspended before. */ 532 suspend_allowed = true; 533 } 534 sch->last_cmd_valid = false; 535 do { 536 ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed); 537 switch (ret) { 538 case -EAGAIN: 539 /* ccw chain, continue processing */ 540 break; 541 case 0: 542 /* success */ 543 s->ctrl &= ~SCSW_ACTL_START_PEND; 544 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 545 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 546 SCSW_STCTL_STATUS_PEND; 547 s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END; 548 s->cpa = sch->channel_prog + 8; 549 break; 550 case -ENOSYS: 551 /* unsupported command, generate unit check (command reject) */ 552 s->ctrl &= ~SCSW_ACTL_START_PEND; 553 s->dstat = SCSW_DSTAT_UNIT_CHECK; 554 /* Set sense bit 0 in ecw0. */ 555 sch->sense_data[0] = 0x80; 556 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 557 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 558 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 559 s->cpa = sch->channel_prog + 8; 560 break; 561 case -EFAULT: 562 /* memory problem, generate channel data check */ 563 s->ctrl &= ~SCSW_ACTL_START_PEND; 564 s->cstat = SCSW_CSTAT_DATA_CHECK; 565 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 566 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 567 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 568 s->cpa = sch->channel_prog + 8; 569 break; 570 case -EBUSY: 571 /* subchannel busy, generate deferred cc 1 */ 572 s->flags &= ~SCSW_FLAGS_MASK_CC; 573 s->flags |= (1 << 8); 574 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 575 s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 576 break; 577 case -EINPROGRESS: 578 /* channel program has been suspended */ 579 s->ctrl &= ~SCSW_ACTL_START_PEND; 580 s->ctrl |= SCSW_ACTL_SUSP; 581 break; 582 default: 583 /* error, generate channel program check */ 584 s->ctrl &= ~SCSW_ACTL_START_PEND; 585 s->cstat = SCSW_CSTAT_PROG_CHECK; 586 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 587 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 588 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 589 s->cpa = sch->channel_prog + 8; 590 break; 591 } 592 } while (ret == -EAGAIN); 593 594 } 595 596 /* 597 * On real machines, this would run asynchronously to the main vcpus. 598 * We might want to make some parts of the ssch handling (interpreting 599 * read/writes) asynchronous later on if we start supporting more than 600 * our current very simple devices. 601 */ 602 static void do_subchannel_work(SubchDev *sch, ORB *orb) 603 { 604 605 SCSW *s = &sch->curr_status.scsw; 606 607 if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) { 608 sch_handle_clear_func(sch); 609 } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) { 610 sch_handle_halt_func(sch); 611 } else if (s->ctrl & SCSW_FCTL_START_FUNC) { 612 sch_handle_start_func(sch, orb); 613 } else { 614 /* Cannot happen. */ 615 return; 616 } 617 css_inject_io_interrupt(sch); 618 } 619 620 static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src) 621 { 622 int i; 623 624 dest->intparm = cpu_to_be32(src->intparm); 625 dest->flags = cpu_to_be16(src->flags); 626 dest->devno = cpu_to_be16(src->devno); 627 dest->lpm = src->lpm; 628 dest->pnom = src->pnom; 629 dest->lpum = src->lpum; 630 dest->pim = src->pim; 631 dest->mbi = cpu_to_be16(src->mbi); 632 dest->pom = src->pom; 633 dest->pam = src->pam; 634 for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) { 635 dest->chpid[i] = src->chpid[i]; 636 } 637 dest->chars = cpu_to_be32(src->chars); 638 } 639 640 static void copy_scsw_to_guest(SCSW *dest, const SCSW *src) 641 { 642 dest->flags = cpu_to_be16(src->flags); 643 dest->ctrl = cpu_to_be16(src->ctrl); 644 dest->cpa = cpu_to_be32(src->cpa); 645 dest->dstat = src->dstat; 646 dest->cstat = src->cstat; 647 dest->count = cpu_to_be16(src->count); 648 } 649 650 static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src) 651 { 652 int i; 653 654 copy_pmcw_to_guest(&dest->pmcw, &src->pmcw); 655 copy_scsw_to_guest(&dest->scsw, &src->scsw); 656 dest->mba = cpu_to_be64(src->mba); 657 for (i = 0; i < ARRAY_SIZE(dest->mda); i++) { 658 dest->mda[i] = src->mda[i]; 659 } 660 } 661 662 int css_do_stsch(SubchDev *sch, SCHIB *schib) 663 { 664 /* Use current status. */ 665 copy_schib_to_guest(schib, &sch->curr_status); 666 return 0; 667 } 668 669 static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src) 670 { 671 int i; 672 673 dest->intparm = be32_to_cpu(src->intparm); 674 dest->flags = be16_to_cpu(src->flags); 675 dest->devno = be16_to_cpu(src->devno); 676 dest->lpm = src->lpm; 677 dest->pnom = src->pnom; 678 dest->lpum = src->lpum; 679 dest->pim = src->pim; 680 dest->mbi = be16_to_cpu(src->mbi); 681 dest->pom = src->pom; 682 dest->pam = src->pam; 683 for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) { 684 dest->chpid[i] = src->chpid[i]; 685 } 686 dest->chars = be32_to_cpu(src->chars); 687 } 688 689 static void copy_scsw_from_guest(SCSW *dest, const SCSW *src) 690 { 691 dest->flags = be16_to_cpu(src->flags); 692 dest->ctrl = be16_to_cpu(src->ctrl); 693 dest->cpa = be32_to_cpu(src->cpa); 694 dest->dstat = src->dstat; 695 dest->cstat = src->cstat; 696 dest->count = be16_to_cpu(src->count); 697 } 698 699 static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src) 700 { 701 int i; 702 703 copy_pmcw_from_guest(&dest->pmcw, &src->pmcw); 704 copy_scsw_from_guest(&dest->scsw, &src->scsw); 705 dest->mba = be64_to_cpu(src->mba); 706 for (i = 0; i < ARRAY_SIZE(dest->mda); i++) { 707 dest->mda[i] = src->mda[i]; 708 } 709 } 710 711 int css_do_msch(SubchDev *sch, const SCHIB *orig_schib) 712 { 713 SCSW *s = &sch->curr_status.scsw; 714 PMCW *p = &sch->curr_status.pmcw; 715 uint16_t oldflags; 716 int ret; 717 SCHIB schib; 718 719 if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) { 720 ret = 0; 721 goto out; 722 } 723 724 if (s->ctrl & SCSW_STCTL_STATUS_PEND) { 725 ret = -EINPROGRESS; 726 goto out; 727 } 728 729 if (s->ctrl & 730 (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) { 731 ret = -EBUSY; 732 goto out; 733 } 734 735 copy_schib_from_guest(&schib, orig_schib); 736 /* Only update the program-modifiable fields. */ 737 p->intparm = schib.pmcw.intparm; 738 oldflags = p->flags; 739 p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | 740 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | 741 PMCW_FLAGS_MASK_MP); 742 p->flags |= schib.pmcw.flags & 743 (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | 744 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | 745 PMCW_FLAGS_MASK_MP); 746 p->lpm = schib.pmcw.lpm; 747 p->mbi = schib.pmcw.mbi; 748 p->pom = schib.pmcw.pom; 749 p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); 750 p->chars |= schib.pmcw.chars & 751 (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); 752 sch->curr_status.mba = schib.mba; 753 754 /* Has the channel been disabled? */ 755 if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0 756 && (p->flags & PMCW_FLAGS_MASK_ENA) == 0) { 757 sch->disable_cb(sch); 758 } 759 760 ret = 0; 761 762 out: 763 return ret; 764 } 765 766 int css_do_xsch(SubchDev *sch) 767 { 768 SCSW *s = &sch->curr_status.scsw; 769 PMCW *p = &sch->curr_status.pmcw; 770 int ret; 771 772 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 773 ret = -ENODEV; 774 goto out; 775 } 776 777 if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) || 778 ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || 779 (!(s->ctrl & 780 (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) || 781 (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) { 782 ret = -EINPROGRESS; 783 goto out; 784 } 785 786 if (s->ctrl & SCSW_CTRL_MASK_STCTL) { 787 ret = -EBUSY; 788 goto out; 789 } 790 791 /* Cancel the current operation. */ 792 s->ctrl &= ~(SCSW_FCTL_START_FUNC | 793 SCSW_ACTL_RESUME_PEND | 794 SCSW_ACTL_START_PEND | 795 SCSW_ACTL_SUSP); 796 sch->channel_prog = 0x0; 797 sch->last_cmd_valid = false; 798 s->dstat = 0; 799 s->cstat = 0; 800 ret = 0; 801 802 out: 803 return ret; 804 } 805 806 int css_do_csch(SubchDev *sch) 807 { 808 SCSW *s = &sch->curr_status.scsw; 809 PMCW *p = &sch->curr_status.pmcw; 810 int ret; 811 812 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 813 ret = -ENODEV; 814 goto out; 815 } 816 817 /* Trigger the clear function. */ 818 s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL); 819 s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND; 820 821 do_subchannel_work(sch, NULL); 822 ret = 0; 823 824 out: 825 return ret; 826 } 827 828 int css_do_hsch(SubchDev *sch) 829 { 830 SCSW *s = &sch->curr_status.scsw; 831 PMCW *p = &sch->curr_status.pmcw; 832 int ret; 833 834 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 835 ret = -ENODEV; 836 goto out; 837 } 838 839 if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) || 840 (s->ctrl & (SCSW_STCTL_PRIMARY | 841 SCSW_STCTL_SECONDARY | 842 SCSW_STCTL_ALERT))) { 843 ret = -EINPROGRESS; 844 goto out; 845 } 846 847 if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 848 ret = -EBUSY; 849 goto out; 850 } 851 852 /* Trigger the halt function. */ 853 s->ctrl |= SCSW_FCTL_HALT_FUNC; 854 s->ctrl &= ~SCSW_FCTL_START_FUNC; 855 if (((s->ctrl & SCSW_CTRL_MASK_ACTL) == 856 (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) && 857 ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) { 858 s->ctrl &= ~SCSW_STCTL_STATUS_PEND; 859 } 860 s->ctrl |= SCSW_ACTL_HALT_PEND; 861 862 do_subchannel_work(sch, NULL); 863 ret = 0; 864 865 out: 866 return ret; 867 } 868 869 static void css_update_chnmon(SubchDev *sch) 870 { 871 if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) { 872 /* Not active. */ 873 return; 874 } 875 /* The counter is conveniently located at the beginning of the struct. */ 876 if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) { 877 /* Format 1, per-subchannel area. */ 878 uint32_t count; 879 880 count = address_space_ldl(&address_space_memory, 881 sch->curr_status.mba, 882 MEMTXATTRS_UNSPECIFIED, 883 NULL); 884 count++; 885 address_space_stl(&address_space_memory, sch->curr_status.mba, count, 886 MEMTXATTRS_UNSPECIFIED, NULL); 887 } else { 888 /* Format 0, global area. */ 889 uint32_t offset; 890 uint16_t count; 891 892 offset = sch->curr_status.pmcw.mbi << 5; 893 count = address_space_lduw(&address_space_memory, 894 channel_subsys.chnmon_area + offset, 895 MEMTXATTRS_UNSPECIFIED, 896 NULL); 897 count++; 898 address_space_stw(&address_space_memory, 899 channel_subsys.chnmon_area + offset, count, 900 MEMTXATTRS_UNSPECIFIED, NULL); 901 } 902 } 903 904 int css_do_ssch(SubchDev *sch, ORB *orb) 905 { 906 SCSW *s = &sch->curr_status.scsw; 907 PMCW *p = &sch->curr_status.pmcw; 908 int ret; 909 910 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 911 ret = -ENODEV; 912 goto out; 913 } 914 915 if (s->ctrl & SCSW_STCTL_STATUS_PEND) { 916 ret = -EINPROGRESS; 917 goto out; 918 } 919 920 if (s->ctrl & (SCSW_FCTL_START_FUNC | 921 SCSW_FCTL_HALT_FUNC | 922 SCSW_FCTL_CLEAR_FUNC)) { 923 ret = -EBUSY; 924 goto out; 925 } 926 927 /* If monitoring is active, update counter. */ 928 if (channel_subsys.chnmon_active) { 929 css_update_chnmon(sch); 930 } 931 sch->channel_prog = orb->cpa; 932 /* Trigger the start function. */ 933 s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND); 934 s->flags &= ~SCSW_FLAGS_MASK_PNO; 935 936 do_subchannel_work(sch, orb); 937 ret = 0; 938 939 out: 940 return ret; 941 } 942 943 static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw, 944 int *irb_len) 945 { 946 int i; 947 uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL; 948 uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL; 949 950 copy_scsw_to_guest(&dest->scsw, &src->scsw); 951 952 for (i = 0; i < ARRAY_SIZE(dest->esw); i++) { 953 dest->esw[i] = cpu_to_be32(src->esw[i]); 954 } 955 for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) { 956 dest->ecw[i] = cpu_to_be32(src->ecw[i]); 957 } 958 *irb_len = sizeof(*dest) - sizeof(dest->emw); 959 960 /* extended measurements enabled? */ 961 if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) || 962 !(pmcw->flags & PMCW_FLAGS_MASK_TF) || 963 !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) { 964 return; 965 } 966 /* extended measurements pending? */ 967 if (!(stctl & SCSW_STCTL_STATUS_PEND)) { 968 return; 969 } 970 if ((stctl & SCSW_STCTL_PRIMARY) || 971 (stctl == SCSW_STCTL_SECONDARY) || 972 ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) { 973 for (i = 0; i < ARRAY_SIZE(dest->emw); i++) { 974 dest->emw[i] = cpu_to_be32(src->emw[i]); 975 } 976 } 977 *irb_len = sizeof(*dest); 978 } 979 980 int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len) 981 { 982 SCSW *s = &sch->curr_status.scsw; 983 PMCW *p = &sch->curr_status.pmcw; 984 uint16_t stctl; 985 IRB irb; 986 987 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 988 return 3; 989 } 990 991 stctl = s->ctrl & SCSW_CTRL_MASK_STCTL; 992 993 /* Prepare the irb for the guest. */ 994 memset(&irb, 0, sizeof(IRB)); 995 996 /* Copy scsw from current status. */ 997 memcpy(&irb.scsw, s, sizeof(SCSW)); 998 if (stctl & SCSW_STCTL_STATUS_PEND) { 999 if (s->cstat & (SCSW_CSTAT_DATA_CHECK | 1000 SCSW_CSTAT_CHN_CTRL_CHK | 1001 SCSW_CSTAT_INTF_CTRL_CHK)) { 1002 irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF; 1003 irb.esw[0] = 0x04804000; 1004 } else { 1005 irb.esw[0] = 0x00800000; 1006 } 1007 /* If a unit check is pending, copy sense data. */ 1008 if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) && 1009 (p->chars & PMCW_CHARS_MASK_CSENSE)) { 1010 int i; 1011 1012 irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL; 1013 /* Attention: sense_data is already BE! */ 1014 memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data)); 1015 for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) { 1016 irb.ecw[i] = be32_to_cpu(irb.ecw[i]); 1017 } 1018 irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8); 1019 } 1020 } 1021 /* Store the irb to the guest. */ 1022 copy_irb_to_guest(target_irb, &irb, p, irb_len); 1023 1024 return ((stctl & SCSW_STCTL_STATUS_PEND) == 0); 1025 } 1026 1027 void css_do_tsch_update_subch(SubchDev *sch) 1028 { 1029 SCSW *s = &sch->curr_status.scsw; 1030 PMCW *p = &sch->curr_status.pmcw; 1031 uint16_t stctl; 1032 uint16_t fctl; 1033 uint16_t actl; 1034 1035 stctl = s->ctrl & SCSW_CTRL_MASK_STCTL; 1036 fctl = s->ctrl & SCSW_CTRL_MASK_FCTL; 1037 actl = s->ctrl & SCSW_CTRL_MASK_ACTL; 1038 1039 /* Clear conditions on subchannel, if applicable. */ 1040 if (stctl & SCSW_STCTL_STATUS_PEND) { 1041 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 1042 if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) || 1043 ((fctl & SCSW_FCTL_HALT_FUNC) && 1044 (actl & SCSW_ACTL_SUSP))) { 1045 s->ctrl &= ~SCSW_CTRL_MASK_FCTL; 1046 } 1047 if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) { 1048 s->flags &= ~SCSW_FLAGS_MASK_PNO; 1049 s->ctrl &= ~(SCSW_ACTL_RESUME_PEND | 1050 SCSW_ACTL_START_PEND | 1051 SCSW_ACTL_HALT_PEND | 1052 SCSW_ACTL_CLEAR_PEND | 1053 SCSW_ACTL_SUSP); 1054 } else { 1055 if ((actl & SCSW_ACTL_SUSP) && 1056 (fctl & SCSW_FCTL_START_FUNC)) { 1057 s->flags &= ~SCSW_FLAGS_MASK_PNO; 1058 if (fctl & SCSW_FCTL_HALT_FUNC) { 1059 s->ctrl &= ~(SCSW_ACTL_RESUME_PEND | 1060 SCSW_ACTL_START_PEND | 1061 SCSW_ACTL_HALT_PEND | 1062 SCSW_ACTL_CLEAR_PEND | 1063 SCSW_ACTL_SUSP); 1064 } else { 1065 s->ctrl &= ~SCSW_ACTL_RESUME_PEND; 1066 } 1067 } 1068 } 1069 /* Clear pending sense data. */ 1070 if (p->chars & PMCW_CHARS_MASK_CSENSE) { 1071 memset(sch->sense_data, 0 , sizeof(sch->sense_data)); 1072 } 1073 } 1074 } 1075 1076 static void copy_crw_to_guest(CRW *dest, const CRW *src) 1077 { 1078 dest->flags = cpu_to_be16(src->flags); 1079 dest->rsid = cpu_to_be16(src->rsid); 1080 } 1081 1082 int css_do_stcrw(CRW *crw) 1083 { 1084 CrwContainer *crw_cont; 1085 int ret; 1086 1087 crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws); 1088 if (crw_cont) { 1089 QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling); 1090 copy_crw_to_guest(crw, &crw_cont->crw); 1091 g_free(crw_cont); 1092 ret = 0; 1093 } else { 1094 /* List was empty, turn crw machine checks on again. */ 1095 memset(crw, 0, sizeof(*crw)); 1096 channel_subsys.do_crw_mchk = true; 1097 ret = 1; 1098 } 1099 1100 return ret; 1101 } 1102 1103 static void copy_crw_from_guest(CRW *dest, const CRW *src) 1104 { 1105 dest->flags = be16_to_cpu(src->flags); 1106 dest->rsid = be16_to_cpu(src->rsid); 1107 } 1108 1109 void css_undo_stcrw(CRW *crw) 1110 { 1111 CrwContainer *crw_cont; 1112 1113 crw_cont = g_try_malloc0(sizeof(CrwContainer)); 1114 if (!crw_cont) { 1115 channel_subsys.crws_lost = true; 1116 return; 1117 } 1118 copy_crw_from_guest(&crw_cont->crw, crw); 1119 1120 QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling); 1121 } 1122 1123 int css_do_tpi(IOIntCode *int_code, int lowcore) 1124 { 1125 /* No pending interrupts for !KVM. */ 1126 return 0; 1127 } 1128 1129 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid, 1130 int rfmt, void *buf) 1131 { 1132 int i, desc_size; 1133 uint32_t words[8]; 1134 uint32_t chpid_type_word; 1135 CssImage *css; 1136 1137 if (!m && !cssid) { 1138 css = channel_subsys.css[channel_subsys.default_cssid]; 1139 } else { 1140 css = channel_subsys.css[cssid]; 1141 } 1142 if (!css) { 1143 return 0; 1144 } 1145 desc_size = 0; 1146 for (i = f_chpid; i <= l_chpid; i++) { 1147 if (css->chpids[i].in_use) { 1148 chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i; 1149 if (rfmt == 0) { 1150 words[0] = cpu_to_be32(chpid_type_word); 1151 words[1] = 0; 1152 memcpy(buf + desc_size, words, 8); 1153 desc_size += 8; 1154 } else if (rfmt == 1) { 1155 words[0] = cpu_to_be32(chpid_type_word); 1156 words[1] = 0; 1157 words[2] = 0; 1158 words[3] = 0; 1159 words[4] = 0; 1160 words[5] = 0; 1161 words[6] = 0; 1162 words[7] = 0; 1163 memcpy(buf + desc_size, words, 32); 1164 desc_size += 32; 1165 } 1166 } 1167 } 1168 return desc_size; 1169 } 1170 1171 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo) 1172 { 1173 /* dct is currently ignored (not really meaningful for our devices) */ 1174 /* TODO: Don't ignore mbk. */ 1175 if (update && !channel_subsys.chnmon_active) { 1176 /* Enable measuring. */ 1177 channel_subsys.chnmon_area = mbo; 1178 channel_subsys.chnmon_active = true; 1179 } 1180 if (!update && channel_subsys.chnmon_active) { 1181 /* Disable measuring. */ 1182 channel_subsys.chnmon_area = 0; 1183 channel_subsys.chnmon_active = false; 1184 } 1185 } 1186 1187 int css_do_rsch(SubchDev *sch) 1188 { 1189 SCSW *s = &sch->curr_status.scsw; 1190 PMCW *p = &sch->curr_status.pmcw; 1191 int ret; 1192 1193 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 1194 ret = -ENODEV; 1195 goto out; 1196 } 1197 1198 if (s->ctrl & SCSW_STCTL_STATUS_PEND) { 1199 ret = -EINPROGRESS; 1200 goto out; 1201 } 1202 1203 if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || 1204 (s->ctrl & SCSW_ACTL_RESUME_PEND) || 1205 (!(s->ctrl & SCSW_ACTL_SUSP))) { 1206 ret = -EINVAL; 1207 goto out; 1208 } 1209 1210 /* If monitoring is active, update counter. */ 1211 if (channel_subsys.chnmon_active) { 1212 css_update_chnmon(sch); 1213 } 1214 1215 s->ctrl |= SCSW_ACTL_RESUME_PEND; 1216 do_subchannel_work(sch, NULL); 1217 ret = 0; 1218 1219 out: 1220 return ret; 1221 } 1222 1223 int css_do_rchp(uint8_t cssid, uint8_t chpid) 1224 { 1225 uint8_t real_cssid; 1226 1227 if (cssid > channel_subsys.max_cssid) { 1228 return -EINVAL; 1229 } 1230 if (channel_subsys.max_cssid == 0) { 1231 real_cssid = channel_subsys.default_cssid; 1232 } else { 1233 real_cssid = cssid; 1234 } 1235 if (!channel_subsys.css[real_cssid]) { 1236 return -EINVAL; 1237 } 1238 1239 if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) { 1240 return -ENODEV; 1241 } 1242 1243 if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) { 1244 fprintf(stderr, 1245 "rchp unsupported for non-virtual chpid %x.%02x!\n", 1246 real_cssid, chpid); 1247 return -ENODEV; 1248 } 1249 1250 /* We don't really use a channel path, so we're done here. */ 1251 css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 1252 channel_subsys.max_cssid > 0 ? 1 : 0, chpid); 1253 if (channel_subsys.max_cssid > 0) { 1254 css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8); 1255 } 1256 return 0; 1257 } 1258 1259 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid) 1260 { 1261 SubchSet *set; 1262 uint8_t real_cssid; 1263 1264 real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid; 1265 if (real_cssid > MAX_CSSID || ssid > MAX_SSID || 1266 !channel_subsys.css[real_cssid] || 1267 !channel_subsys.css[real_cssid]->sch_set[ssid]) { 1268 return true; 1269 } 1270 set = channel_subsys.css[real_cssid]->sch_set[ssid]; 1271 return schid > find_last_bit(set->schids_used, 1272 (MAX_SCHID + 1) / sizeof(unsigned long)); 1273 } 1274 1275 static int css_add_virtual_chpid(uint8_t cssid, uint8_t chpid, uint8_t type) 1276 { 1277 CssImage *css; 1278 1279 trace_css_chpid_add(cssid, chpid, type); 1280 if (cssid > MAX_CSSID) { 1281 return -EINVAL; 1282 } 1283 css = channel_subsys.css[cssid]; 1284 if (!css) { 1285 return -EINVAL; 1286 } 1287 if (css->chpids[chpid].in_use) { 1288 return -EEXIST; 1289 } 1290 css->chpids[chpid].in_use = 1; 1291 css->chpids[chpid].type = type; 1292 css->chpids[chpid].is_virtual = 1; 1293 1294 css_generate_chp_crws(cssid, chpid); 1295 1296 return 0; 1297 } 1298 1299 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type) 1300 { 1301 PMCW *p = &sch->curr_status.pmcw; 1302 SCSW *s = &sch->curr_status.scsw; 1303 int i; 1304 CssImage *css = channel_subsys.css[sch->cssid]; 1305 1306 assert(css != NULL); 1307 memset(p, 0, sizeof(PMCW)); 1308 p->flags |= PMCW_FLAGS_MASK_DNV; 1309 p->devno = sch->devno; 1310 /* single path */ 1311 p->pim = 0x80; 1312 p->pom = 0xff; 1313 p->pam = 0x80; 1314 p->chpid[0] = chpid; 1315 if (!css->chpids[chpid].in_use) { 1316 css_add_virtual_chpid(sch->cssid, chpid, type); 1317 } 1318 1319 memset(s, 0, sizeof(SCSW)); 1320 sch->curr_status.mba = 0; 1321 for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) { 1322 sch->curr_status.mda[i] = 0; 1323 } 1324 } 1325 1326 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid) 1327 { 1328 uint8_t real_cssid; 1329 1330 real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid; 1331 1332 if (!channel_subsys.css[real_cssid]) { 1333 return NULL; 1334 } 1335 1336 if (!channel_subsys.css[real_cssid]->sch_set[ssid]) { 1337 return NULL; 1338 } 1339 1340 return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid]; 1341 } 1342 1343 bool css_subch_visible(SubchDev *sch) 1344 { 1345 if (sch->ssid > channel_subsys.max_ssid) { 1346 return false; 1347 } 1348 1349 if (sch->cssid != channel_subsys.default_cssid) { 1350 return (channel_subsys.max_cssid > 0); 1351 } 1352 1353 return true; 1354 } 1355 1356 bool css_present(uint8_t cssid) 1357 { 1358 return (channel_subsys.css[cssid] != NULL); 1359 } 1360 1361 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno) 1362 { 1363 if (!channel_subsys.css[cssid]) { 1364 return false; 1365 } 1366 if (!channel_subsys.css[cssid]->sch_set[ssid]) { 1367 return false; 1368 } 1369 1370 return !!test_bit(devno, 1371 channel_subsys.css[cssid]->sch_set[ssid]->devnos_used); 1372 } 1373 1374 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid, 1375 uint16_t devno, SubchDev *sch) 1376 { 1377 CssImage *css; 1378 SubchSet *s_set; 1379 1380 trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid, 1381 devno); 1382 if (!channel_subsys.css[cssid]) { 1383 fprintf(stderr, 1384 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n", 1385 __func__, cssid, ssid, schid); 1386 return; 1387 } 1388 css = channel_subsys.css[cssid]; 1389 1390 if (!css->sch_set[ssid]) { 1391 css->sch_set[ssid] = g_malloc0(sizeof(SubchSet)); 1392 } 1393 s_set = css->sch_set[ssid]; 1394 1395 s_set->sch[schid] = sch; 1396 if (sch) { 1397 set_bit(schid, s_set->schids_used); 1398 set_bit(devno, s_set->devnos_used); 1399 } else { 1400 clear_bit(schid, s_set->schids_used); 1401 clear_bit(devno, s_set->devnos_used); 1402 } 1403 } 1404 1405 void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid) 1406 { 1407 CrwContainer *crw_cont; 1408 1409 trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : ""); 1410 /* TODO: Maybe use a static crw pool? */ 1411 crw_cont = g_try_malloc0(sizeof(CrwContainer)); 1412 if (!crw_cont) { 1413 channel_subsys.crws_lost = true; 1414 return; 1415 } 1416 crw_cont->crw.flags = (rsc << 8) | erc; 1417 if (chain) { 1418 crw_cont->crw.flags |= CRW_FLAGS_MASK_C; 1419 } 1420 crw_cont->crw.rsid = rsid; 1421 if (channel_subsys.crws_lost) { 1422 crw_cont->crw.flags |= CRW_FLAGS_MASK_R; 1423 channel_subsys.crws_lost = false; 1424 } 1425 1426 QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling); 1427 1428 if (channel_subsys.do_crw_mchk) { 1429 channel_subsys.do_crw_mchk = false; 1430 /* Inject crw pending machine check. */ 1431 s390_crw_mchk(); 1432 } 1433 } 1434 1435 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid, 1436 int hotplugged, int add) 1437 { 1438 uint8_t guest_cssid; 1439 bool chain_crw; 1440 1441 if (add && !hotplugged) { 1442 return; 1443 } 1444 if (channel_subsys.max_cssid == 0) { 1445 /* Default cssid shows up as 0. */ 1446 guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid; 1447 } else { 1448 /* Show real cssid to the guest. */ 1449 guest_cssid = cssid; 1450 } 1451 /* 1452 * Only notify for higher subchannel sets/channel subsystems if the 1453 * guest has enabled it. 1454 */ 1455 if ((ssid > channel_subsys.max_ssid) || 1456 (guest_cssid > channel_subsys.max_cssid) || 1457 ((channel_subsys.max_cssid == 0) && 1458 (cssid != channel_subsys.default_cssid))) { 1459 return; 1460 } 1461 chain_crw = (channel_subsys.max_ssid > 0) || 1462 (channel_subsys.max_cssid > 0); 1463 css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid); 1464 if (chain_crw) { 1465 css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, 1466 (guest_cssid << 8) | (ssid << 4)); 1467 } 1468 /* RW_ERC_IPI --> clear pending interrupts */ 1469 css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid); 1470 } 1471 1472 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid) 1473 { 1474 /* TODO */ 1475 } 1476 1477 void css_generate_css_crws(uint8_t cssid) 1478 { 1479 if (!channel_subsys.sei_pending) { 1480 css_queue_crw(CRW_RSC_CSS, 0, 0, cssid); 1481 } 1482 channel_subsys.sei_pending = true; 1483 } 1484 1485 void css_clear_sei_pending(void) 1486 { 1487 channel_subsys.sei_pending = false; 1488 } 1489 1490 int css_enable_mcsse(void) 1491 { 1492 trace_css_enable_facility("mcsse"); 1493 channel_subsys.max_cssid = MAX_CSSID; 1494 return 0; 1495 } 1496 1497 int css_enable_mss(void) 1498 { 1499 trace_css_enable_facility("mss"); 1500 channel_subsys.max_ssid = MAX_SSID; 1501 return 0; 1502 } 1503 1504 void subch_device_save(SubchDev *s, QEMUFile *f) 1505 { 1506 int i; 1507 1508 qemu_put_byte(f, s->cssid); 1509 qemu_put_byte(f, s->ssid); 1510 qemu_put_be16(f, s->schid); 1511 qemu_put_be16(f, s->devno); 1512 qemu_put_byte(f, s->thinint_active); 1513 /* SCHIB */ 1514 /* PMCW */ 1515 qemu_put_be32(f, s->curr_status.pmcw.intparm); 1516 qemu_put_be16(f, s->curr_status.pmcw.flags); 1517 qemu_put_be16(f, s->curr_status.pmcw.devno); 1518 qemu_put_byte(f, s->curr_status.pmcw.lpm); 1519 qemu_put_byte(f, s->curr_status.pmcw.pnom); 1520 qemu_put_byte(f, s->curr_status.pmcw.lpum); 1521 qemu_put_byte(f, s->curr_status.pmcw.pim); 1522 qemu_put_be16(f, s->curr_status.pmcw.mbi); 1523 qemu_put_byte(f, s->curr_status.pmcw.pom); 1524 qemu_put_byte(f, s->curr_status.pmcw.pam); 1525 qemu_put_buffer(f, s->curr_status.pmcw.chpid, 8); 1526 qemu_put_be32(f, s->curr_status.pmcw.chars); 1527 /* SCSW */ 1528 qemu_put_be16(f, s->curr_status.scsw.flags); 1529 qemu_put_be16(f, s->curr_status.scsw.ctrl); 1530 qemu_put_be32(f, s->curr_status.scsw.cpa); 1531 qemu_put_byte(f, s->curr_status.scsw.dstat); 1532 qemu_put_byte(f, s->curr_status.scsw.cstat); 1533 qemu_put_be16(f, s->curr_status.scsw.count); 1534 qemu_put_be64(f, s->curr_status.mba); 1535 qemu_put_buffer(f, s->curr_status.mda, 4); 1536 /* end SCHIB */ 1537 qemu_put_buffer(f, s->sense_data, 32); 1538 qemu_put_be64(f, s->channel_prog); 1539 /* last cmd */ 1540 qemu_put_byte(f, s->last_cmd.cmd_code); 1541 qemu_put_byte(f, s->last_cmd.flags); 1542 qemu_put_be16(f, s->last_cmd.count); 1543 qemu_put_be32(f, s->last_cmd.cda); 1544 qemu_put_byte(f, s->last_cmd_valid); 1545 qemu_put_byte(f, s->id.reserved); 1546 qemu_put_be16(f, s->id.cu_type); 1547 qemu_put_byte(f, s->id.cu_model); 1548 qemu_put_be16(f, s->id.dev_type); 1549 qemu_put_byte(f, s->id.dev_model); 1550 qemu_put_byte(f, s->id.unused); 1551 for (i = 0; i < ARRAY_SIZE(s->id.ciw); i++) { 1552 qemu_put_byte(f, s->id.ciw[i].type); 1553 qemu_put_byte(f, s->id.ciw[i].command); 1554 qemu_put_be16(f, s->id.ciw[i].count); 1555 } 1556 qemu_put_byte(f, s->ccw_fmt_1); 1557 qemu_put_byte(f, s->ccw_no_data_cnt); 1558 } 1559 1560 int subch_device_load(SubchDev *s, QEMUFile *f) 1561 { 1562 int i; 1563 1564 s->cssid = qemu_get_byte(f); 1565 s->ssid = qemu_get_byte(f); 1566 s->schid = qemu_get_be16(f); 1567 s->devno = qemu_get_be16(f); 1568 s->thinint_active = qemu_get_byte(f); 1569 /* SCHIB */ 1570 /* PMCW */ 1571 s->curr_status.pmcw.intparm = qemu_get_be32(f); 1572 s->curr_status.pmcw.flags = qemu_get_be16(f); 1573 s->curr_status.pmcw.devno = qemu_get_be16(f); 1574 s->curr_status.pmcw.lpm = qemu_get_byte(f); 1575 s->curr_status.pmcw.pnom = qemu_get_byte(f); 1576 s->curr_status.pmcw.lpum = qemu_get_byte(f); 1577 s->curr_status.pmcw.pim = qemu_get_byte(f); 1578 s->curr_status.pmcw.mbi = qemu_get_be16(f); 1579 s->curr_status.pmcw.pom = qemu_get_byte(f); 1580 s->curr_status.pmcw.pam = qemu_get_byte(f); 1581 qemu_get_buffer(f, s->curr_status.pmcw.chpid, 8); 1582 s->curr_status.pmcw.chars = qemu_get_be32(f); 1583 /* SCSW */ 1584 s->curr_status.scsw.flags = qemu_get_be16(f); 1585 s->curr_status.scsw.ctrl = qemu_get_be16(f); 1586 s->curr_status.scsw.cpa = qemu_get_be32(f); 1587 s->curr_status.scsw.dstat = qemu_get_byte(f); 1588 s->curr_status.scsw.cstat = qemu_get_byte(f); 1589 s->curr_status.scsw.count = qemu_get_be16(f); 1590 s->curr_status.mba = qemu_get_be64(f); 1591 qemu_get_buffer(f, s->curr_status.mda, 4); 1592 /* end SCHIB */ 1593 qemu_get_buffer(f, s->sense_data, 32); 1594 s->channel_prog = qemu_get_be64(f); 1595 /* last cmd */ 1596 s->last_cmd.cmd_code = qemu_get_byte(f); 1597 s->last_cmd.flags = qemu_get_byte(f); 1598 s->last_cmd.count = qemu_get_be16(f); 1599 s->last_cmd.cda = qemu_get_be32(f); 1600 s->last_cmd_valid = qemu_get_byte(f); 1601 s->id.reserved = qemu_get_byte(f); 1602 s->id.cu_type = qemu_get_be16(f); 1603 s->id.cu_model = qemu_get_byte(f); 1604 s->id.dev_type = qemu_get_be16(f); 1605 s->id.dev_model = qemu_get_byte(f); 1606 s->id.unused = qemu_get_byte(f); 1607 for (i = 0; i < ARRAY_SIZE(s->id.ciw); i++) { 1608 s->id.ciw[i].type = qemu_get_byte(f); 1609 s->id.ciw[i].command = qemu_get_byte(f); 1610 s->id.ciw[i].count = qemu_get_be16(f); 1611 } 1612 s->ccw_fmt_1 = qemu_get_byte(f); 1613 s->ccw_no_data_cnt = qemu_get_byte(f); 1614 /* 1615 * Hack alert. We don't migrate the channel subsystem status (no 1616 * device!), but we need to find out if the guest enabled mss/mcss-e. 1617 * If the subchannel is enabled, it certainly was able to access it, 1618 * so adjust the max_ssid/max_cssid values for relevant ssid/cssid 1619 * values. This is not watertight, but better than nothing. 1620 */ 1621 if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) { 1622 if (s->ssid) { 1623 channel_subsys.max_ssid = MAX_SSID; 1624 } 1625 if (s->cssid != channel_subsys.default_cssid) { 1626 channel_subsys.max_cssid = MAX_CSSID; 1627 } 1628 } 1629 return 0; 1630 } 1631 1632 void css_reset_sch(SubchDev *sch) 1633 { 1634 PMCW *p = &sch->curr_status.pmcw; 1635 1636 if ((p->flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) { 1637 sch->disable_cb(sch); 1638 } 1639 1640 p->intparm = 0; 1641 p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | 1642 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | 1643 PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF); 1644 p->flags |= PMCW_FLAGS_MASK_DNV; 1645 p->devno = sch->devno; 1646 p->pim = 0x80; 1647 p->lpm = p->pim; 1648 p->pnom = 0; 1649 p->lpum = 0; 1650 p->mbi = 0; 1651 p->pom = 0xff; 1652 p->pam = 0x80; 1653 p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME | 1654 PMCW_CHARS_MASK_CSENSE); 1655 1656 memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw)); 1657 sch->curr_status.mba = 0; 1658 1659 sch->channel_prog = 0x0; 1660 sch->last_cmd_valid = false; 1661 sch->thinint_active = false; 1662 } 1663 1664 void css_reset(void) 1665 { 1666 CrwContainer *crw_cont; 1667 1668 /* Clean up monitoring. */ 1669 channel_subsys.chnmon_active = false; 1670 channel_subsys.chnmon_area = 0; 1671 1672 /* Clear pending CRWs. */ 1673 while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) { 1674 QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling); 1675 g_free(crw_cont); 1676 } 1677 channel_subsys.sei_pending = false; 1678 channel_subsys.do_crw_mchk = true; 1679 channel_subsys.crws_lost = false; 1680 1681 /* Reset maximum ids. */ 1682 channel_subsys.max_cssid = 0; 1683 channel_subsys.max_ssid = 0; 1684 } 1685 1686 static void get_css_devid(Object *obj, Visitor *v, const char *name, 1687 void *opaque, Error **errp) 1688 { 1689 DeviceState *dev = DEVICE(obj); 1690 Property *prop = opaque; 1691 CssDevId *dev_id = qdev_get_prop_ptr(dev, prop); 1692 char buffer[] = "xx.x.xxxx"; 1693 char *p = buffer; 1694 int r; 1695 1696 if (dev_id->valid) { 1697 1698 r = snprintf(buffer, sizeof(buffer), "%02x.%1x.%04x", dev_id->cssid, 1699 dev_id->ssid, dev_id->devid); 1700 assert(r == sizeof(buffer) - 1); 1701 1702 /* drop leading zero */ 1703 if (dev_id->cssid <= 0xf) { 1704 p++; 1705 } 1706 } else { 1707 snprintf(buffer, sizeof(buffer), "<unset>"); 1708 } 1709 1710 visit_type_str(v, name, &p, errp); 1711 } 1712 1713 /* 1714 * parse <cssid>.<ssid>.<devid> and assert valid range for cssid/ssid 1715 */ 1716 static void set_css_devid(Object *obj, Visitor *v, const char *name, 1717 void *opaque, Error **errp) 1718 { 1719 DeviceState *dev = DEVICE(obj); 1720 Property *prop = opaque; 1721 CssDevId *dev_id = qdev_get_prop_ptr(dev, prop); 1722 Error *local_err = NULL; 1723 char *str; 1724 int num, n1, n2; 1725 unsigned int cssid, ssid, devid; 1726 1727 if (dev->realized) { 1728 qdev_prop_set_after_realize(dev, name, errp); 1729 return; 1730 } 1731 1732 visit_type_str(v, name, &str, &local_err); 1733 if (local_err) { 1734 error_propagate(errp, local_err); 1735 return; 1736 } 1737 1738 num = sscanf(str, "%2x.%1x%n.%4x%n", &cssid, &ssid, &n1, &devid, &n2); 1739 if (num != 3 || (n2 - n1) != 5 || strlen(str) != n2) { 1740 error_set_from_qdev_prop_error(errp, EINVAL, dev, prop, str); 1741 goto out; 1742 } 1743 if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) { 1744 error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x", 1745 cssid, ssid); 1746 goto out; 1747 } 1748 1749 dev_id->cssid = cssid; 1750 dev_id->ssid = ssid; 1751 dev_id->devid = devid; 1752 dev_id->valid = true; 1753 1754 out: 1755 g_free(str); 1756 } 1757 1758 PropertyInfo css_devid_propinfo = { 1759 .name = "str", 1760 .description = "Identifier of an I/O device in the channel " 1761 "subsystem, example: fe.1.23ab", 1762 .get = get_css_devid, 1763 .set = set_css_devid, 1764 }; 1765