1 /* 2 * Channel subsystem base support. 3 * 4 * Copyright 2012 IBM Corp. 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or (at 8 * your option) any later version. See the COPYING file in the top-level 9 * directory. 10 */ 11 12 #include "qemu/osdep.h" 13 #include <hw/qdev.h> 14 #include "qemu/bitops.h" 15 #include "exec/address-spaces.h" 16 #include "cpu.h" 17 #include "ioinst.h" 18 #include "css.h" 19 #include "trace.h" 20 #include "hw/s390x/s390_flic.h" 21 22 typedef struct CrwContainer { 23 CRW crw; 24 QTAILQ_ENTRY(CrwContainer) sibling; 25 } CrwContainer; 26 27 typedef struct ChpInfo { 28 uint8_t in_use; 29 uint8_t type; 30 uint8_t is_virtual; 31 } ChpInfo; 32 33 typedef struct SubchSet { 34 SubchDev *sch[MAX_SCHID + 1]; 35 unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)]; 36 unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)]; 37 } SubchSet; 38 39 typedef struct CssImage { 40 SubchSet *sch_set[MAX_SSID + 1]; 41 ChpInfo chpids[MAX_CHPID + 1]; 42 } CssImage; 43 44 typedef struct IoAdapter { 45 uint32_t id; 46 uint8_t type; 47 uint8_t isc; 48 QTAILQ_ENTRY(IoAdapter) sibling; 49 } IoAdapter; 50 51 typedef struct ChannelSubSys { 52 QTAILQ_HEAD(, CrwContainer) pending_crws; 53 bool sei_pending; 54 bool do_crw_mchk; 55 bool crws_lost; 56 uint8_t max_cssid; 57 uint8_t max_ssid; 58 bool chnmon_active; 59 uint64_t chnmon_area; 60 CssImage *css[MAX_CSSID + 1]; 61 uint8_t default_cssid; 62 QTAILQ_HEAD(, IoAdapter) io_adapters; 63 } ChannelSubSys; 64 65 static ChannelSubSys *channel_subsys; 66 67 int css_create_css_image(uint8_t cssid, bool default_image) 68 { 69 trace_css_new_image(cssid, default_image ? "(default)" : ""); 70 if (cssid > MAX_CSSID) { 71 return -EINVAL; 72 } 73 if (channel_subsys->css[cssid]) { 74 return -EBUSY; 75 } 76 channel_subsys->css[cssid] = g_malloc0(sizeof(CssImage)); 77 if (default_image) { 78 channel_subsys->default_cssid = cssid; 79 } 80 return 0; 81 } 82 83 int css_register_io_adapter(uint8_t type, uint8_t isc, bool swap, 84 bool maskable, uint32_t *id) 85 { 86 IoAdapter *adapter; 87 bool found = false; 88 int ret; 89 S390FLICState *fs = s390_get_flic(); 90 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs); 91 92 *id = 0; 93 QTAILQ_FOREACH(adapter, &channel_subsys->io_adapters, sibling) { 94 if ((adapter->type == type) && (adapter->isc == isc)) { 95 *id = adapter->id; 96 found = true; 97 ret = 0; 98 break; 99 } 100 if (adapter->id >= *id) { 101 *id = adapter->id + 1; 102 } 103 } 104 if (found) { 105 goto out; 106 } 107 adapter = g_new0(IoAdapter, 1); 108 ret = fsc->register_io_adapter(fs, *id, isc, swap, maskable); 109 if (ret == 0) { 110 adapter->id = *id; 111 adapter->isc = isc; 112 adapter->type = type; 113 QTAILQ_INSERT_TAIL(&channel_subsys->io_adapters, adapter, sibling); 114 } else { 115 g_free(adapter); 116 fprintf(stderr, "Unexpected error %d when registering adapter %d\n", 117 ret, *id); 118 } 119 out: 120 return ret; 121 } 122 123 uint16_t css_build_subchannel_id(SubchDev *sch) 124 { 125 if (channel_subsys->max_cssid > 0) { 126 return (sch->cssid << 8) | (1 << 3) | (sch->ssid << 1) | 1; 127 } 128 return (sch->ssid << 1) | 1; 129 } 130 131 static void css_inject_io_interrupt(SubchDev *sch) 132 { 133 uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11; 134 135 trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid, 136 sch->curr_status.pmcw.intparm, isc, ""); 137 s390_io_interrupt(css_build_subchannel_id(sch), 138 sch->schid, 139 sch->curr_status.pmcw.intparm, 140 isc << 27); 141 } 142 143 void css_conditional_io_interrupt(SubchDev *sch) 144 { 145 /* 146 * If the subchannel is not currently status pending, make it pending 147 * with alert status. 148 */ 149 if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) { 150 uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11; 151 152 trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid, 153 sch->curr_status.pmcw.intparm, isc, 154 "(unsolicited)"); 155 sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL; 156 sch->curr_status.scsw.ctrl |= 157 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 158 /* Inject an I/O interrupt. */ 159 s390_io_interrupt(css_build_subchannel_id(sch), 160 sch->schid, 161 sch->curr_status.pmcw.intparm, 162 isc << 27); 163 } 164 } 165 166 void css_adapter_interrupt(uint8_t isc) 167 { 168 uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI; 169 170 trace_css_adapter_interrupt(isc); 171 s390_io_interrupt(0, 0, 0, io_int_word); 172 } 173 174 static void sch_handle_clear_func(SubchDev *sch) 175 { 176 PMCW *p = &sch->curr_status.pmcw; 177 SCSW *s = &sch->curr_status.scsw; 178 int path; 179 180 /* Path management: In our simple css, we always choose the only path. */ 181 path = 0x80; 182 183 /* Reset values prior to 'issuing the clear signal'. */ 184 p->lpum = 0; 185 p->pom = 0xff; 186 s->flags &= ~SCSW_FLAGS_MASK_PNO; 187 188 /* We always 'attempt to issue the clear signal', and we always succeed. */ 189 sch->channel_prog = 0x0; 190 sch->last_cmd_valid = false; 191 s->ctrl &= ~SCSW_ACTL_CLEAR_PEND; 192 s->ctrl |= SCSW_STCTL_STATUS_PEND; 193 194 s->dstat = 0; 195 s->cstat = 0; 196 p->lpum = path; 197 198 } 199 200 static void sch_handle_halt_func(SubchDev *sch) 201 { 202 203 PMCW *p = &sch->curr_status.pmcw; 204 SCSW *s = &sch->curr_status.scsw; 205 hwaddr curr_ccw = sch->channel_prog; 206 int path; 207 208 /* Path management: In our simple css, we always choose the only path. */ 209 path = 0x80; 210 211 /* We always 'attempt to issue the halt signal', and we always succeed. */ 212 sch->channel_prog = 0x0; 213 sch->last_cmd_valid = false; 214 s->ctrl &= ~SCSW_ACTL_HALT_PEND; 215 s->ctrl |= SCSW_STCTL_STATUS_PEND; 216 217 if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) || 218 !((s->ctrl & SCSW_ACTL_START_PEND) || 219 (s->ctrl & SCSW_ACTL_SUSP))) { 220 s->dstat = SCSW_DSTAT_DEVICE_END; 221 } 222 if ((s->ctrl & (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) || 223 (s->ctrl & SCSW_ACTL_SUSP)) { 224 s->cpa = curr_ccw + 8; 225 } 226 s->cstat = 0; 227 p->lpum = path; 228 229 } 230 231 static void copy_sense_id_to_guest(SenseId *dest, SenseId *src) 232 { 233 int i; 234 235 dest->reserved = src->reserved; 236 dest->cu_type = cpu_to_be16(src->cu_type); 237 dest->cu_model = src->cu_model; 238 dest->dev_type = cpu_to_be16(src->dev_type); 239 dest->dev_model = src->dev_model; 240 dest->unused = src->unused; 241 for (i = 0; i < ARRAY_SIZE(dest->ciw); i++) { 242 dest->ciw[i].type = src->ciw[i].type; 243 dest->ciw[i].command = src->ciw[i].command; 244 dest->ciw[i].count = cpu_to_be16(src->ciw[i].count); 245 } 246 } 247 248 static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1) 249 { 250 CCW0 tmp0; 251 CCW1 tmp1; 252 CCW1 ret; 253 254 if (fmt1) { 255 cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1)); 256 ret.cmd_code = tmp1.cmd_code; 257 ret.flags = tmp1.flags; 258 ret.count = be16_to_cpu(tmp1.count); 259 ret.cda = be32_to_cpu(tmp1.cda); 260 } else { 261 cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0)); 262 ret.cmd_code = tmp0.cmd_code; 263 ret.flags = tmp0.flags; 264 ret.count = be16_to_cpu(tmp0.count); 265 ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16); 266 if ((ret.cmd_code & 0x0f) == CCW_CMD_TIC) { 267 ret.cmd_code &= 0x0f; 268 } 269 } 270 return ret; 271 } 272 273 static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr) 274 { 275 int ret; 276 bool check_len; 277 int len; 278 CCW1 ccw; 279 280 if (!ccw_addr) { 281 return -EIO; 282 } 283 284 /* Translate everything to format-1 ccws - the information is the same. */ 285 ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1); 286 287 /* Check for invalid command codes. */ 288 if ((ccw.cmd_code & 0x0f) == 0) { 289 return -EINVAL; 290 } 291 if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) && 292 ((ccw.cmd_code & 0xf0) != 0)) { 293 return -EINVAL; 294 } 295 if (!sch->ccw_fmt_1 && (ccw.count == 0) && 296 (ccw.cmd_code != CCW_CMD_TIC)) { 297 return -EINVAL; 298 } 299 300 if (ccw.flags & CCW_FLAG_SUSPEND) { 301 return -EINPROGRESS; 302 } 303 304 check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC)); 305 306 if (!ccw.cda) { 307 if (sch->ccw_no_data_cnt == 255) { 308 return -EINVAL; 309 } 310 sch->ccw_no_data_cnt++; 311 } 312 313 /* Look at the command. */ 314 switch (ccw.cmd_code) { 315 case CCW_CMD_NOOP: 316 /* Nothing to do. */ 317 ret = 0; 318 break; 319 case CCW_CMD_BASIC_SENSE: 320 if (check_len) { 321 if (ccw.count != sizeof(sch->sense_data)) { 322 ret = -EINVAL; 323 break; 324 } 325 } 326 len = MIN(ccw.count, sizeof(sch->sense_data)); 327 cpu_physical_memory_write(ccw.cda, sch->sense_data, len); 328 sch->curr_status.scsw.count = ccw.count - len; 329 memset(sch->sense_data, 0, sizeof(sch->sense_data)); 330 ret = 0; 331 break; 332 case CCW_CMD_SENSE_ID: 333 { 334 SenseId sense_id; 335 336 copy_sense_id_to_guest(&sense_id, &sch->id); 337 /* Sense ID information is device specific. */ 338 if (check_len) { 339 if (ccw.count != sizeof(sense_id)) { 340 ret = -EINVAL; 341 break; 342 } 343 } 344 len = MIN(ccw.count, sizeof(sense_id)); 345 /* 346 * Only indicate 0xff in the first sense byte if we actually 347 * have enough place to store at least bytes 0-3. 348 */ 349 if (len >= 4) { 350 sense_id.reserved = 0xff; 351 } else { 352 sense_id.reserved = 0; 353 } 354 cpu_physical_memory_write(ccw.cda, &sense_id, len); 355 sch->curr_status.scsw.count = ccw.count - len; 356 ret = 0; 357 break; 358 } 359 case CCW_CMD_TIC: 360 if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) { 361 ret = -EINVAL; 362 break; 363 } 364 if (ccw.flags & (CCW_FLAG_CC | CCW_FLAG_DC)) { 365 ret = -EINVAL; 366 break; 367 } 368 sch->channel_prog = ccw.cda; 369 ret = -EAGAIN; 370 break; 371 default: 372 if (sch->ccw_cb) { 373 /* Handle device specific commands. */ 374 ret = sch->ccw_cb(sch, ccw); 375 } else { 376 ret = -ENOSYS; 377 } 378 break; 379 } 380 sch->last_cmd = ccw; 381 sch->last_cmd_valid = true; 382 if (ret == 0) { 383 if (ccw.flags & CCW_FLAG_CC) { 384 sch->channel_prog += 8; 385 ret = -EAGAIN; 386 } 387 } 388 389 return ret; 390 } 391 392 static void sch_handle_start_func(SubchDev *sch, ORB *orb) 393 { 394 395 PMCW *p = &sch->curr_status.pmcw; 396 SCSW *s = &sch->curr_status.scsw; 397 int path; 398 int ret; 399 400 /* Path management: In our simple css, we always choose the only path. */ 401 path = 0x80; 402 403 if (!(s->ctrl & SCSW_ACTL_SUSP)) { 404 s->cstat = 0; 405 s->dstat = 0; 406 /* Look at the orb and try to execute the channel program. */ 407 assert(orb != NULL); /* resume does not pass an orb */ 408 p->intparm = orb->intparm; 409 if (!(orb->lpm & path)) { 410 /* Generate a deferred cc 3 condition. */ 411 s->flags |= SCSW_FLAGS_MASK_CC; 412 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 413 s->ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND); 414 return; 415 } 416 sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT); 417 sch->ccw_no_data_cnt = 0; 418 } else { 419 s->ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND); 420 } 421 sch->last_cmd_valid = false; 422 do { 423 ret = css_interpret_ccw(sch, sch->channel_prog); 424 switch (ret) { 425 case -EAGAIN: 426 /* ccw chain, continue processing */ 427 break; 428 case 0: 429 /* success */ 430 s->ctrl &= ~SCSW_ACTL_START_PEND; 431 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 432 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 433 SCSW_STCTL_STATUS_PEND; 434 s->dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END; 435 s->cpa = sch->channel_prog + 8; 436 break; 437 case -ENOSYS: 438 /* unsupported command, generate unit check (command reject) */ 439 s->ctrl &= ~SCSW_ACTL_START_PEND; 440 s->dstat = SCSW_DSTAT_UNIT_CHECK; 441 /* Set sense bit 0 in ecw0. */ 442 sch->sense_data[0] = 0x80; 443 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 444 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 445 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 446 s->cpa = sch->channel_prog + 8; 447 break; 448 case -EFAULT: 449 /* memory problem, generate channel data check */ 450 s->ctrl &= ~SCSW_ACTL_START_PEND; 451 s->cstat = SCSW_CSTAT_DATA_CHECK; 452 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 453 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 454 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 455 s->cpa = sch->channel_prog + 8; 456 break; 457 case -EBUSY: 458 /* subchannel busy, generate deferred cc 1 */ 459 s->flags &= ~SCSW_FLAGS_MASK_CC; 460 s->flags |= (1 << 8); 461 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 462 s->ctrl |= SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 463 break; 464 case -EINPROGRESS: 465 /* channel program has been suspended */ 466 s->ctrl &= ~SCSW_ACTL_START_PEND; 467 s->ctrl |= SCSW_ACTL_SUSP; 468 break; 469 default: 470 /* error, generate channel program check */ 471 s->ctrl &= ~SCSW_ACTL_START_PEND; 472 s->cstat = SCSW_CSTAT_PROG_CHECK; 473 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 474 s->ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY | 475 SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND; 476 s->cpa = sch->channel_prog + 8; 477 break; 478 } 479 } while (ret == -EAGAIN); 480 481 } 482 483 /* 484 * On real machines, this would run asynchronously to the main vcpus. 485 * We might want to make some parts of the ssch handling (interpreting 486 * read/writes) asynchronous later on if we start supporting more than 487 * our current very simple devices. 488 */ 489 static void do_subchannel_work(SubchDev *sch, ORB *orb) 490 { 491 492 SCSW *s = &sch->curr_status.scsw; 493 494 if (s->ctrl & SCSW_FCTL_CLEAR_FUNC) { 495 sch_handle_clear_func(sch); 496 } else if (s->ctrl & SCSW_FCTL_HALT_FUNC) { 497 sch_handle_halt_func(sch); 498 } else if (s->ctrl & SCSW_FCTL_START_FUNC) { 499 sch_handle_start_func(sch, orb); 500 } else { 501 /* Cannot happen. */ 502 return; 503 } 504 css_inject_io_interrupt(sch); 505 } 506 507 static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src) 508 { 509 int i; 510 511 dest->intparm = cpu_to_be32(src->intparm); 512 dest->flags = cpu_to_be16(src->flags); 513 dest->devno = cpu_to_be16(src->devno); 514 dest->lpm = src->lpm; 515 dest->pnom = src->pnom; 516 dest->lpum = src->lpum; 517 dest->pim = src->pim; 518 dest->mbi = cpu_to_be16(src->mbi); 519 dest->pom = src->pom; 520 dest->pam = src->pam; 521 for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) { 522 dest->chpid[i] = src->chpid[i]; 523 } 524 dest->chars = cpu_to_be32(src->chars); 525 } 526 527 static void copy_scsw_to_guest(SCSW *dest, const SCSW *src) 528 { 529 dest->flags = cpu_to_be16(src->flags); 530 dest->ctrl = cpu_to_be16(src->ctrl); 531 dest->cpa = cpu_to_be32(src->cpa); 532 dest->dstat = src->dstat; 533 dest->cstat = src->cstat; 534 dest->count = cpu_to_be16(src->count); 535 } 536 537 static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src) 538 { 539 int i; 540 541 copy_pmcw_to_guest(&dest->pmcw, &src->pmcw); 542 copy_scsw_to_guest(&dest->scsw, &src->scsw); 543 dest->mba = cpu_to_be64(src->mba); 544 for (i = 0; i < ARRAY_SIZE(dest->mda); i++) { 545 dest->mda[i] = src->mda[i]; 546 } 547 } 548 549 int css_do_stsch(SubchDev *sch, SCHIB *schib) 550 { 551 /* Use current status. */ 552 copy_schib_to_guest(schib, &sch->curr_status); 553 return 0; 554 } 555 556 static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src) 557 { 558 int i; 559 560 dest->intparm = be32_to_cpu(src->intparm); 561 dest->flags = be16_to_cpu(src->flags); 562 dest->devno = be16_to_cpu(src->devno); 563 dest->lpm = src->lpm; 564 dest->pnom = src->pnom; 565 dest->lpum = src->lpum; 566 dest->pim = src->pim; 567 dest->mbi = be16_to_cpu(src->mbi); 568 dest->pom = src->pom; 569 dest->pam = src->pam; 570 for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) { 571 dest->chpid[i] = src->chpid[i]; 572 } 573 dest->chars = be32_to_cpu(src->chars); 574 } 575 576 static void copy_scsw_from_guest(SCSW *dest, const SCSW *src) 577 { 578 dest->flags = be16_to_cpu(src->flags); 579 dest->ctrl = be16_to_cpu(src->ctrl); 580 dest->cpa = be32_to_cpu(src->cpa); 581 dest->dstat = src->dstat; 582 dest->cstat = src->cstat; 583 dest->count = be16_to_cpu(src->count); 584 } 585 586 static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src) 587 { 588 int i; 589 590 copy_pmcw_from_guest(&dest->pmcw, &src->pmcw); 591 copy_scsw_from_guest(&dest->scsw, &src->scsw); 592 dest->mba = be64_to_cpu(src->mba); 593 for (i = 0; i < ARRAY_SIZE(dest->mda); i++) { 594 dest->mda[i] = src->mda[i]; 595 } 596 } 597 598 int css_do_msch(SubchDev *sch, const SCHIB *orig_schib) 599 { 600 SCSW *s = &sch->curr_status.scsw; 601 PMCW *p = &sch->curr_status.pmcw; 602 uint16_t oldflags; 603 int ret; 604 SCHIB schib; 605 606 if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_DNV)) { 607 ret = 0; 608 goto out; 609 } 610 611 if (s->ctrl & SCSW_STCTL_STATUS_PEND) { 612 ret = -EINPROGRESS; 613 goto out; 614 } 615 616 if (s->ctrl & 617 (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) { 618 ret = -EBUSY; 619 goto out; 620 } 621 622 copy_schib_from_guest(&schib, orig_schib); 623 /* Only update the program-modifiable fields. */ 624 p->intparm = schib.pmcw.intparm; 625 oldflags = p->flags; 626 p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | 627 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | 628 PMCW_FLAGS_MASK_MP); 629 p->flags |= schib.pmcw.flags & 630 (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | 631 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | 632 PMCW_FLAGS_MASK_MP); 633 p->lpm = schib.pmcw.lpm; 634 p->mbi = schib.pmcw.mbi; 635 p->pom = schib.pmcw.pom; 636 p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); 637 p->chars |= schib.pmcw.chars & 638 (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE); 639 sch->curr_status.mba = schib.mba; 640 641 /* Has the channel been disabled? */ 642 if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0 643 && (p->flags & PMCW_FLAGS_MASK_ENA) == 0) { 644 sch->disable_cb(sch); 645 } 646 647 ret = 0; 648 649 out: 650 return ret; 651 } 652 653 int css_do_xsch(SubchDev *sch) 654 { 655 SCSW *s = &sch->curr_status.scsw; 656 PMCW *p = &sch->curr_status.pmcw; 657 int ret; 658 659 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 660 ret = -ENODEV; 661 goto out; 662 } 663 664 if (!(s->ctrl & SCSW_CTRL_MASK_FCTL) || 665 ((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || 666 (!(s->ctrl & 667 (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) || 668 (s->ctrl & SCSW_ACTL_SUBCH_ACTIVE)) { 669 ret = -EINPROGRESS; 670 goto out; 671 } 672 673 if (s->ctrl & SCSW_CTRL_MASK_STCTL) { 674 ret = -EBUSY; 675 goto out; 676 } 677 678 /* Cancel the current operation. */ 679 s->ctrl &= ~(SCSW_FCTL_START_FUNC | 680 SCSW_ACTL_RESUME_PEND | 681 SCSW_ACTL_START_PEND | 682 SCSW_ACTL_SUSP); 683 sch->channel_prog = 0x0; 684 sch->last_cmd_valid = false; 685 s->dstat = 0; 686 s->cstat = 0; 687 ret = 0; 688 689 out: 690 return ret; 691 } 692 693 int css_do_csch(SubchDev *sch) 694 { 695 SCSW *s = &sch->curr_status.scsw; 696 PMCW *p = &sch->curr_status.pmcw; 697 int ret; 698 699 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 700 ret = -ENODEV; 701 goto out; 702 } 703 704 /* Trigger the clear function. */ 705 s->ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL); 706 s->ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND; 707 708 do_subchannel_work(sch, NULL); 709 ret = 0; 710 711 out: 712 return ret; 713 } 714 715 int css_do_hsch(SubchDev *sch) 716 { 717 SCSW *s = &sch->curr_status.scsw; 718 PMCW *p = &sch->curr_status.pmcw; 719 int ret; 720 721 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 722 ret = -ENODEV; 723 goto out; 724 } 725 726 if (((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) || 727 (s->ctrl & (SCSW_STCTL_PRIMARY | 728 SCSW_STCTL_SECONDARY | 729 SCSW_STCTL_ALERT))) { 730 ret = -EINPROGRESS; 731 goto out; 732 } 733 734 if (s->ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) { 735 ret = -EBUSY; 736 goto out; 737 } 738 739 /* Trigger the halt function. */ 740 s->ctrl |= SCSW_FCTL_HALT_FUNC; 741 s->ctrl &= ~SCSW_FCTL_START_FUNC; 742 if (((s->ctrl & SCSW_CTRL_MASK_ACTL) == 743 (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) && 744 ((s->ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_INTERMEDIATE)) { 745 s->ctrl &= ~SCSW_STCTL_STATUS_PEND; 746 } 747 s->ctrl |= SCSW_ACTL_HALT_PEND; 748 749 do_subchannel_work(sch, NULL); 750 ret = 0; 751 752 out: 753 return ret; 754 } 755 756 static void css_update_chnmon(SubchDev *sch) 757 { 758 if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) { 759 /* Not active. */ 760 return; 761 } 762 /* The counter is conveniently located at the beginning of the struct. */ 763 if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) { 764 /* Format 1, per-subchannel area. */ 765 uint32_t count; 766 767 count = address_space_ldl(&address_space_memory, 768 sch->curr_status.mba, 769 MEMTXATTRS_UNSPECIFIED, 770 NULL); 771 count++; 772 address_space_stl(&address_space_memory, sch->curr_status.mba, count, 773 MEMTXATTRS_UNSPECIFIED, NULL); 774 } else { 775 /* Format 0, global area. */ 776 uint32_t offset; 777 uint16_t count; 778 779 offset = sch->curr_status.pmcw.mbi << 5; 780 count = address_space_lduw(&address_space_memory, 781 channel_subsys->chnmon_area + offset, 782 MEMTXATTRS_UNSPECIFIED, 783 NULL); 784 count++; 785 address_space_stw(&address_space_memory, 786 channel_subsys->chnmon_area + offset, count, 787 MEMTXATTRS_UNSPECIFIED, NULL); 788 } 789 } 790 791 int css_do_ssch(SubchDev *sch, ORB *orb) 792 { 793 SCSW *s = &sch->curr_status.scsw; 794 PMCW *p = &sch->curr_status.pmcw; 795 int ret; 796 797 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 798 ret = -ENODEV; 799 goto out; 800 } 801 802 if (s->ctrl & SCSW_STCTL_STATUS_PEND) { 803 ret = -EINPROGRESS; 804 goto out; 805 } 806 807 if (s->ctrl & (SCSW_FCTL_START_FUNC | 808 SCSW_FCTL_HALT_FUNC | 809 SCSW_FCTL_CLEAR_FUNC)) { 810 ret = -EBUSY; 811 goto out; 812 } 813 814 /* If monitoring is active, update counter. */ 815 if (channel_subsys->chnmon_active) { 816 css_update_chnmon(sch); 817 } 818 sch->channel_prog = orb->cpa; 819 /* Trigger the start function. */ 820 s->ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND); 821 s->flags &= ~SCSW_FLAGS_MASK_PNO; 822 823 do_subchannel_work(sch, orb); 824 ret = 0; 825 826 out: 827 return ret; 828 } 829 830 static void copy_irb_to_guest(IRB *dest, const IRB *src, PMCW *pmcw, 831 int *irb_len) 832 { 833 int i; 834 uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL; 835 uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL; 836 837 copy_scsw_to_guest(&dest->scsw, &src->scsw); 838 839 for (i = 0; i < ARRAY_SIZE(dest->esw); i++) { 840 dest->esw[i] = cpu_to_be32(src->esw[i]); 841 } 842 for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) { 843 dest->ecw[i] = cpu_to_be32(src->ecw[i]); 844 } 845 *irb_len = sizeof(*dest) - sizeof(dest->emw); 846 847 /* extended measurements enabled? */ 848 if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) || 849 !(pmcw->flags & PMCW_FLAGS_MASK_TF) || 850 !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) { 851 return; 852 } 853 /* extended measurements pending? */ 854 if (!(stctl & SCSW_STCTL_STATUS_PEND)) { 855 return; 856 } 857 if ((stctl & SCSW_STCTL_PRIMARY) || 858 (stctl == SCSW_STCTL_SECONDARY) || 859 ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) { 860 for (i = 0; i < ARRAY_SIZE(dest->emw); i++) { 861 dest->emw[i] = cpu_to_be32(src->emw[i]); 862 } 863 } 864 *irb_len = sizeof(*dest); 865 } 866 867 int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len) 868 { 869 SCSW *s = &sch->curr_status.scsw; 870 PMCW *p = &sch->curr_status.pmcw; 871 uint16_t stctl; 872 IRB irb; 873 874 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 875 return 3; 876 } 877 878 stctl = s->ctrl & SCSW_CTRL_MASK_STCTL; 879 880 /* Prepare the irb for the guest. */ 881 memset(&irb, 0, sizeof(IRB)); 882 883 /* Copy scsw from current status. */ 884 memcpy(&irb.scsw, s, sizeof(SCSW)); 885 if (stctl & SCSW_STCTL_STATUS_PEND) { 886 if (s->cstat & (SCSW_CSTAT_DATA_CHECK | 887 SCSW_CSTAT_CHN_CTRL_CHK | 888 SCSW_CSTAT_INTF_CTRL_CHK)) { 889 irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF; 890 irb.esw[0] = 0x04804000; 891 } else { 892 irb.esw[0] = 0x00800000; 893 } 894 /* If a unit check is pending, copy sense data. */ 895 if ((s->dstat & SCSW_DSTAT_UNIT_CHECK) && 896 (p->chars & PMCW_CHARS_MASK_CSENSE)) { 897 int i; 898 899 irb.scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL; 900 /* Attention: sense_data is already BE! */ 901 memcpy(irb.ecw, sch->sense_data, sizeof(sch->sense_data)); 902 for (i = 0; i < ARRAY_SIZE(irb.ecw); i++) { 903 irb.ecw[i] = be32_to_cpu(irb.ecw[i]); 904 } 905 irb.esw[1] = 0x01000000 | (sizeof(sch->sense_data) << 8); 906 } 907 } 908 /* Store the irb to the guest. */ 909 copy_irb_to_guest(target_irb, &irb, p, irb_len); 910 911 return ((stctl & SCSW_STCTL_STATUS_PEND) == 0); 912 } 913 914 void css_do_tsch_update_subch(SubchDev *sch) 915 { 916 SCSW *s = &sch->curr_status.scsw; 917 PMCW *p = &sch->curr_status.pmcw; 918 uint16_t stctl; 919 uint16_t fctl; 920 uint16_t actl; 921 922 stctl = s->ctrl & SCSW_CTRL_MASK_STCTL; 923 fctl = s->ctrl & SCSW_CTRL_MASK_FCTL; 924 actl = s->ctrl & SCSW_CTRL_MASK_ACTL; 925 926 /* Clear conditions on subchannel, if applicable. */ 927 if (stctl & SCSW_STCTL_STATUS_PEND) { 928 s->ctrl &= ~SCSW_CTRL_MASK_STCTL; 929 if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) || 930 ((fctl & SCSW_FCTL_HALT_FUNC) && 931 (actl & SCSW_ACTL_SUSP))) { 932 s->ctrl &= ~SCSW_CTRL_MASK_FCTL; 933 } 934 if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) { 935 s->flags &= ~SCSW_FLAGS_MASK_PNO; 936 s->ctrl &= ~(SCSW_ACTL_RESUME_PEND | 937 SCSW_ACTL_START_PEND | 938 SCSW_ACTL_HALT_PEND | 939 SCSW_ACTL_CLEAR_PEND | 940 SCSW_ACTL_SUSP); 941 } else { 942 if ((actl & SCSW_ACTL_SUSP) && 943 (fctl & SCSW_FCTL_START_FUNC)) { 944 s->flags &= ~SCSW_FLAGS_MASK_PNO; 945 if (fctl & SCSW_FCTL_HALT_FUNC) { 946 s->ctrl &= ~(SCSW_ACTL_RESUME_PEND | 947 SCSW_ACTL_START_PEND | 948 SCSW_ACTL_HALT_PEND | 949 SCSW_ACTL_CLEAR_PEND | 950 SCSW_ACTL_SUSP); 951 } else { 952 s->ctrl &= ~SCSW_ACTL_RESUME_PEND; 953 } 954 } 955 } 956 /* Clear pending sense data. */ 957 if (p->chars & PMCW_CHARS_MASK_CSENSE) { 958 memset(sch->sense_data, 0 , sizeof(sch->sense_data)); 959 } 960 } 961 } 962 963 static void copy_crw_to_guest(CRW *dest, const CRW *src) 964 { 965 dest->flags = cpu_to_be16(src->flags); 966 dest->rsid = cpu_to_be16(src->rsid); 967 } 968 969 int css_do_stcrw(CRW *crw) 970 { 971 CrwContainer *crw_cont; 972 int ret; 973 974 crw_cont = QTAILQ_FIRST(&channel_subsys->pending_crws); 975 if (crw_cont) { 976 QTAILQ_REMOVE(&channel_subsys->pending_crws, crw_cont, sibling); 977 copy_crw_to_guest(crw, &crw_cont->crw); 978 g_free(crw_cont); 979 ret = 0; 980 } else { 981 /* List was empty, turn crw machine checks on again. */ 982 memset(crw, 0, sizeof(*crw)); 983 channel_subsys->do_crw_mchk = true; 984 ret = 1; 985 } 986 987 return ret; 988 } 989 990 static void copy_crw_from_guest(CRW *dest, const CRW *src) 991 { 992 dest->flags = be16_to_cpu(src->flags); 993 dest->rsid = be16_to_cpu(src->rsid); 994 } 995 996 void css_undo_stcrw(CRW *crw) 997 { 998 CrwContainer *crw_cont; 999 1000 crw_cont = g_try_malloc0(sizeof(CrwContainer)); 1001 if (!crw_cont) { 1002 channel_subsys->crws_lost = true; 1003 return; 1004 } 1005 copy_crw_from_guest(&crw_cont->crw, crw); 1006 1007 QTAILQ_INSERT_HEAD(&channel_subsys->pending_crws, crw_cont, sibling); 1008 } 1009 1010 int css_do_tpi(IOIntCode *int_code, int lowcore) 1011 { 1012 /* No pending interrupts for !KVM. */ 1013 return 0; 1014 } 1015 1016 int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid, 1017 int rfmt, void *buf) 1018 { 1019 int i, desc_size; 1020 uint32_t words[8]; 1021 uint32_t chpid_type_word; 1022 CssImage *css; 1023 1024 if (!m && !cssid) { 1025 css = channel_subsys->css[channel_subsys->default_cssid]; 1026 } else { 1027 css = channel_subsys->css[cssid]; 1028 } 1029 if (!css) { 1030 return 0; 1031 } 1032 desc_size = 0; 1033 for (i = f_chpid; i <= l_chpid; i++) { 1034 if (css->chpids[i].in_use) { 1035 chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i; 1036 if (rfmt == 0) { 1037 words[0] = cpu_to_be32(chpid_type_word); 1038 words[1] = 0; 1039 memcpy(buf + desc_size, words, 8); 1040 desc_size += 8; 1041 } else if (rfmt == 1) { 1042 words[0] = cpu_to_be32(chpid_type_word); 1043 words[1] = 0; 1044 words[2] = 0; 1045 words[3] = 0; 1046 words[4] = 0; 1047 words[5] = 0; 1048 words[6] = 0; 1049 words[7] = 0; 1050 memcpy(buf + desc_size, words, 32); 1051 desc_size += 32; 1052 } 1053 } 1054 } 1055 return desc_size; 1056 } 1057 1058 void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo) 1059 { 1060 /* dct is currently ignored (not really meaningful for our devices) */ 1061 /* TODO: Don't ignore mbk. */ 1062 if (update && !channel_subsys->chnmon_active) { 1063 /* Enable measuring. */ 1064 channel_subsys->chnmon_area = mbo; 1065 channel_subsys->chnmon_active = true; 1066 } 1067 if (!update && channel_subsys->chnmon_active) { 1068 /* Disable measuring. */ 1069 channel_subsys->chnmon_area = 0; 1070 channel_subsys->chnmon_active = false; 1071 } 1072 } 1073 1074 int css_do_rsch(SubchDev *sch) 1075 { 1076 SCSW *s = &sch->curr_status.scsw; 1077 PMCW *p = &sch->curr_status.pmcw; 1078 int ret; 1079 1080 if (!(p->flags & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA))) { 1081 ret = -ENODEV; 1082 goto out; 1083 } 1084 1085 if (s->ctrl & SCSW_STCTL_STATUS_PEND) { 1086 ret = -EINPROGRESS; 1087 goto out; 1088 } 1089 1090 if (((s->ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) || 1091 (s->ctrl & SCSW_ACTL_RESUME_PEND) || 1092 (!(s->ctrl & SCSW_ACTL_SUSP))) { 1093 ret = -EINVAL; 1094 goto out; 1095 } 1096 1097 /* If monitoring is active, update counter. */ 1098 if (channel_subsys->chnmon_active) { 1099 css_update_chnmon(sch); 1100 } 1101 1102 s->ctrl |= SCSW_ACTL_RESUME_PEND; 1103 do_subchannel_work(sch, NULL); 1104 ret = 0; 1105 1106 out: 1107 return ret; 1108 } 1109 1110 int css_do_rchp(uint8_t cssid, uint8_t chpid) 1111 { 1112 uint8_t real_cssid; 1113 1114 if (cssid > channel_subsys->max_cssid) { 1115 return -EINVAL; 1116 } 1117 if (channel_subsys->max_cssid == 0) { 1118 real_cssid = channel_subsys->default_cssid; 1119 } else { 1120 real_cssid = cssid; 1121 } 1122 if (!channel_subsys->css[real_cssid]) { 1123 return -EINVAL; 1124 } 1125 1126 if (!channel_subsys->css[real_cssid]->chpids[chpid].in_use) { 1127 return -ENODEV; 1128 } 1129 1130 if (!channel_subsys->css[real_cssid]->chpids[chpid].is_virtual) { 1131 fprintf(stderr, 1132 "rchp unsupported for non-virtual chpid %x.%02x!\n", 1133 real_cssid, chpid); 1134 return -ENODEV; 1135 } 1136 1137 /* We don't really use a channel path, so we're done here. */ 1138 css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 1139 channel_subsys->max_cssid > 0 ? 1 : 0, chpid); 1140 if (channel_subsys->max_cssid > 0) { 1141 css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 0, real_cssid << 8); 1142 } 1143 return 0; 1144 } 1145 1146 bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid) 1147 { 1148 SubchSet *set; 1149 uint8_t real_cssid; 1150 1151 real_cssid = (!m && (cssid == 0)) ? channel_subsys->default_cssid : cssid; 1152 if (real_cssid > MAX_CSSID || ssid > MAX_SSID || 1153 !channel_subsys->css[real_cssid] || 1154 !channel_subsys->css[real_cssid]->sch_set[ssid]) { 1155 return true; 1156 } 1157 set = channel_subsys->css[real_cssid]->sch_set[ssid]; 1158 return schid > find_last_bit(set->schids_used, 1159 (MAX_SCHID + 1) / sizeof(unsigned long)); 1160 } 1161 1162 static int css_add_virtual_chpid(uint8_t cssid, uint8_t chpid, uint8_t type) 1163 { 1164 CssImage *css; 1165 1166 trace_css_chpid_add(cssid, chpid, type); 1167 if (cssid > MAX_CSSID) { 1168 return -EINVAL; 1169 } 1170 css = channel_subsys->css[cssid]; 1171 if (!css) { 1172 return -EINVAL; 1173 } 1174 if (css->chpids[chpid].in_use) { 1175 return -EEXIST; 1176 } 1177 css->chpids[chpid].in_use = 1; 1178 css->chpids[chpid].type = type; 1179 css->chpids[chpid].is_virtual = 1; 1180 1181 css_generate_chp_crws(cssid, chpid); 1182 1183 return 0; 1184 } 1185 1186 void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type) 1187 { 1188 PMCW *p = &sch->curr_status.pmcw; 1189 SCSW *s = &sch->curr_status.scsw; 1190 int i; 1191 CssImage *css = channel_subsys->css[sch->cssid]; 1192 1193 assert(css != NULL); 1194 memset(p, 0, sizeof(PMCW)); 1195 p->flags |= PMCW_FLAGS_MASK_DNV; 1196 p->devno = sch->devno; 1197 /* single path */ 1198 p->pim = 0x80; 1199 p->pom = 0xff; 1200 p->pam = 0x80; 1201 p->chpid[0] = chpid; 1202 if (!css->chpids[chpid].in_use) { 1203 css_add_virtual_chpid(sch->cssid, chpid, type); 1204 } 1205 1206 memset(s, 0, sizeof(SCSW)); 1207 sch->curr_status.mba = 0; 1208 for (i = 0; i < ARRAY_SIZE(sch->curr_status.mda); i++) { 1209 sch->curr_status.mda[i] = 0; 1210 } 1211 } 1212 1213 SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid) 1214 { 1215 uint8_t real_cssid; 1216 1217 real_cssid = (!m && (cssid == 0)) ? channel_subsys->default_cssid : cssid; 1218 1219 if (!channel_subsys->css[real_cssid]) { 1220 return NULL; 1221 } 1222 1223 if (!channel_subsys->css[real_cssid]->sch_set[ssid]) { 1224 return NULL; 1225 } 1226 1227 return channel_subsys->css[real_cssid]->sch_set[ssid]->sch[schid]; 1228 } 1229 1230 bool css_subch_visible(SubchDev *sch) 1231 { 1232 if (sch->ssid > channel_subsys->max_ssid) { 1233 return false; 1234 } 1235 1236 if (sch->cssid != channel_subsys->default_cssid) { 1237 return (channel_subsys->max_cssid > 0); 1238 } 1239 1240 return true; 1241 } 1242 1243 bool css_present(uint8_t cssid) 1244 { 1245 return (channel_subsys->css[cssid] != NULL); 1246 } 1247 1248 bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno) 1249 { 1250 if (!channel_subsys->css[cssid]) { 1251 return false; 1252 } 1253 if (!channel_subsys->css[cssid]->sch_set[ssid]) { 1254 return false; 1255 } 1256 1257 return !!test_bit(devno, 1258 channel_subsys->css[cssid]->sch_set[ssid]->devnos_used); 1259 } 1260 1261 void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid, 1262 uint16_t devno, SubchDev *sch) 1263 { 1264 CssImage *css; 1265 SubchSet *s_set; 1266 1267 trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid, 1268 devno); 1269 if (!channel_subsys->css[cssid]) { 1270 fprintf(stderr, 1271 "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n", 1272 __func__, cssid, ssid, schid); 1273 return; 1274 } 1275 css = channel_subsys->css[cssid]; 1276 1277 if (!css->sch_set[ssid]) { 1278 css->sch_set[ssid] = g_malloc0(sizeof(SubchSet)); 1279 } 1280 s_set = css->sch_set[ssid]; 1281 1282 s_set->sch[schid] = sch; 1283 if (sch) { 1284 set_bit(schid, s_set->schids_used); 1285 set_bit(devno, s_set->devnos_used); 1286 } else { 1287 clear_bit(schid, s_set->schids_used); 1288 clear_bit(devno, s_set->devnos_used); 1289 } 1290 } 1291 1292 void css_queue_crw(uint8_t rsc, uint8_t erc, int chain, uint16_t rsid) 1293 { 1294 CrwContainer *crw_cont; 1295 1296 trace_css_crw(rsc, erc, rsid, chain ? "(chained)" : ""); 1297 /* TODO: Maybe use a static crw pool? */ 1298 crw_cont = g_try_malloc0(sizeof(CrwContainer)); 1299 if (!crw_cont) { 1300 channel_subsys->crws_lost = true; 1301 return; 1302 } 1303 crw_cont->crw.flags = (rsc << 8) | erc; 1304 if (chain) { 1305 crw_cont->crw.flags |= CRW_FLAGS_MASK_C; 1306 } 1307 crw_cont->crw.rsid = rsid; 1308 if (channel_subsys->crws_lost) { 1309 crw_cont->crw.flags |= CRW_FLAGS_MASK_R; 1310 channel_subsys->crws_lost = false; 1311 } 1312 1313 QTAILQ_INSERT_TAIL(&channel_subsys->pending_crws, crw_cont, sibling); 1314 1315 if (channel_subsys->do_crw_mchk) { 1316 channel_subsys->do_crw_mchk = false; 1317 /* Inject crw pending machine check. */ 1318 s390_crw_mchk(); 1319 } 1320 } 1321 1322 void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid, 1323 int hotplugged, int add) 1324 { 1325 uint8_t guest_cssid; 1326 bool chain_crw; 1327 1328 if (add && !hotplugged) { 1329 return; 1330 } 1331 if (channel_subsys->max_cssid == 0) { 1332 /* Default cssid shows up as 0. */ 1333 guest_cssid = (cssid == channel_subsys->default_cssid) ? 0 : cssid; 1334 } else { 1335 /* Show real cssid to the guest. */ 1336 guest_cssid = cssid; 1337 } 1338 /* 1339 * Only notify for higher subchannel sets/channel subsystems if the 1340 * guest has enabled it. 1341 */ 1342 if ((ssid > channel_subsys->max_ssid) || 1343 (guest_cssid > channel_subsys->max_cssid) || 1344 ((channel_subsys->max_cssid == 0) && 1345 (cssid != channel_subsys->default_cssid))) { 1346 return; 1347 } 1348 chain_crw = (channel_subsys->max_ssid > 0) || 1349 (channel_subsys->max_cssid > 0); 1350 css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, chain_crw ? 1 : 0, schid); 1351 if (chain_crw) { 1352 css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, 1353 (guest_cssid << 8) | (ssid << 4)); 1354 } 1355 } 1356 1357 void css_generate_chp_crws(uint8_t cssid, uint8_t chpid) 1358 { 1359 /* TODO */ 1360 } 1361 1362 void css_generate_css_crws(uint8_t cssid) 1363 { 1364 if (!channel_subsys->sei_pending) { 1365 css_queue_crw(CRW_RSC_CSS, 0, 0, cssid); 1366 } 1367 channel_subsys->sei_pending = true; 1368 } 1369 1370 void css_clear_sei_pending(void) 1371 { 1372 channel_subsys->sei_pending = false; 1373 } 1374 1375 int css_enable_mcsse(void) 1376 { 1377 trace_css_enable_facility("mcsse"); 1378 channel_subsys->max_cssid = MAX_CSSID; 1379 return 0; 1380 } 1381 1382 int css_enable_mss(void) 1383 { 1384 trace_css_enable_facility("mss"); 1385 channel_subsys->max_ssid = MAX_SSID; 1386 return 0; 1387 } 1388 1389 void subch_device_save(SubchDev *s, QEMUFile *f) 1390 { 1391 int i; 1392 1393 qemu_put_byte(f, s->cssid); 1394 qemu_put_byte(f, s->ssid); 1395 qemu_put_be16(f, s->schid); 1396 qemu_put_be16(f, s->devno); 1397 qemu_put_byte(f, s->thinint_active); 1398 /* SCHIB */ 1399 /* PMCW */ 1400 qemu_put_be32(f, s->curr_status.pmcw.intparm); 1401 qemu_put_be16(f, s->curr_status.pmcw.flags); 1402 qemu_put_be16(f, s->curr_status.pmcw.devno); 1403 qemu_put_byte(f, s->curr_status.pmcw.lpm); 1404 qemu_put_byte(f, s->curr_status.pmcw.pnom); 1405 qemu_put_byte(f, s->curr_status.pmcw.lpum); 1406 qemu_put_byte(f, s->curr_status.pmcw.pim); 1407 qemu_put_be16(f, s->curr_status.pmcw.mbi); 1408 qemu_put_byte(f, s->curr_status.pmcw.pom); 1409 qemu_put_byte(f, s->curr_status.pmcw.pam); 1410 qemu_put_buffer(f, s->curr_status.pmcw.chpid, 8); 1411 qemu_put_be32(f, s->curr_status.pmcw.chars); 1412 /* SCSW */ 1413 qemu_put_be16(f, s->curr_status.scsw.flags); 1414 qemu_put_be16(f, s->curr_status.scsw.ctrl); 1415 qemu_put_be32(f, s->curr_status.scsw.cpa); 1416 qemu_put_byte(f, s->curr_status.scsw.dstat); 1417 qemu_put_byte(f, s->curr_status.scsw.cstat); 1418 qemu_put_be16(f, s->curr_status.scsw.count); 1419 qemu_put_be64(f, s->curr_status.mba); 1420 qemu_put_buffer(f, s->curr_status.mda, 4); 1421 /* end SCHIB */ 1422 qemu_put_buffer(f, s->sense_data, 32); 1423 qemu_put_be64(f, s->channel_prog); 1424 /* last cmd */ 1425 qemu_put_byte(f, s->last_cmd.cmd_code); 1426 qemu_put_byte(f, s->last_cmd.flags); 1427 qemu_put_be16(f, s->last_cmd.count); 1428 qemu_put_be32(f, s->last_cmd.cda); 1429 qemu_put_byte(f, s->last_cmd_valid); 1430 qemu_put_byte(f, s->id.reserved); 1431 qemu_put_be16(f, s->id.cu_type); 1432 qemu_put_byte(f, s->id.cu_model); 1433 qemu_put_be16(f, s->id.dev_type); 1434 qemu_put_byte(f, s->id.dev_model); 1435 qemu_put_byte(f, s->id.unused); 1436 for (i = 0; i < ARRAY_SIZE(s->id.ciw); i++) { 1437 qemu_put_byte(f, s->id.ciw[i].type); 1438 qemu_put_byte(f, s->id.ciw[i].command); 1439 qemu_put_be16(f, s->id.ciw[i].count); 1440 } 1441 qemu_put_byte(f, s->ccw_fmt_1); 1442 qemu_put_byte(f, s->ccw_no_data_cnt); 1443 } 1444 1445 int subch_device_load(SubchDev *s, QEMUFile *f) 1446 { 1447 int i; 1448 1449 s->cssid = qemu_get_byte(f); 1450 s->ssid = qemu_get_byte(f); 1451 s->schid = qemu_get_be16(f); 1452 s->devno = qemu_get_be16(f); 1453 s->thinint_active = qemu_get_byte(f); 1454 /* SCHIB */ 1455 /* PMCW */ 1456 s->curr_status.pmcw.intparm = qemu_get_be32(f); 1457 s->curr_status.pmcw.flags = qemu_get_be16(f); 1458 s->curr_status.pmcw.devno = qemu_get_be16(f); 1459 s->curr_status.pmcw.lpm = qemu_get_byte(f); 1460 s->curr_status.pmcw.pnom = qemu_get_byte(f); 1461 s->curr_status.pmcw.lpum = qemu_get_byte(f); 1462 s->curr_status.pmcw.pim = qemu_get_byte(f); 1463 s->curr_status.pmcw.mbi = qemu_get_be16(f); 1464 s->curr_status.pmcw.pom = qemu_get_byte(f); 1465 s->curr_status.pmcw.pam = qemu_get_byte(f); 1466 qemu_get_buffer(f, s->curr_status.pmcw.chpid, 8); 1467 s->curr_status.pmcw.chars = qemu_get_be32(f); 1468 /* SCSW */ 1469 s->curr_status.scsw.flags = qemu_get_be16(f); 1470 s->curr_status.scsw.ctrl = qemu_get_be16(f); 1471 s->curr_status.scsw.cpa = qemu_get_be32(f); 1472 s->curr_status.scsw.dstat = qemu_get_byte(f); 1473 s->curr_status.scsw.cstat = qemu_get_byte(f); 1474 s->curr_status.scsw.count = qemu_get_be16(f); 1475 s->curr_status.mba = qemu_get_be64(f); 1476 qemu_get_buffer(f, s->curr_status.mda, 4); 1477 /* end SCHIB */ 1478 qemu_get_buffer(f, s->sense_data, 32); 1479 s->channel_prog = qemu_get_be64(f); 1480 /* last cmd */ 1481 s->last_cmd.cmd_code = qemu_get_byte(f); 1482 s->last_cmd.flags = qemu_get_byte(f); 1483 s->last_cmd.count = qemu_get_be16(f); 1484 s->last_cmd.cda = qemu_get_be32(f); 1485 s->last_cmd_valid = qemu_get_byte(f); 1486 s->id.reserved = qemu_get_byte(f); 1487 s->id.cu_type = qemu_get_be16(f); 1488 s->id.cu_model = qemu_get_byte(f); 1489 s->id.dev_type = qemu_get_be16(f); 1490 s->id.dev_model = qemu_get_byte(f); 1491 s->id.unused = qemu_get_byte(f); 1492 for (i = 0; i < ARRAY_SIZE(s->id.ciw); i++) { 1493 s->id.ciw[i].type = qemu_get_byte(f); 1494 s->id.ciw[i].command = qemu_get_byte(f); 1495 s->id.ciw[i].count = qemu_get_be16(f); 1496 } 1497 s->ccw_fmt_1 = qemu_get_byte(f); 1498 s->ccw_no_data_cnt = qemu_get_byte(f); 1499 /* 1500 * Hack alert. We don't migrate the channel subsystem status (no 1501 * device!), but we need to find out if the guest enabled mss/mcss-e. 1502 * If the subchannel is enabled, it certainly was able to access it, 1503 * so adjust the max_ssid/max_cssid values for relevant ssid/cssid 1504 * values. This is not watertight, but better than nothing. 1505 */ 1506 if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) { 1507 if (s->ssid) { 1508 channel_subsys->max_ssid = MAX_SSID; 1509 } 1510 if (s->cssid != channel_subsys->default_cssid) { 1511 channel_subsys->max_cssid = MAX_CSSID; 1512 } 1513 } 1514 return 0; 1515 } 1516 1517 1518 static void css_init(void) 1519 { 1520 channel_subsys = g_malloc0(sizeof(*channel_subsys)); 1521 QTAILQ_INIT(&channel_subsys->pending_crws); 1522 channel_subsys->sei_pending = false; 1523 channel_subsys->do_crw_mchk = true; 1524 channel_subsys->crws_lost = false; 1525 channel_subsys->chnmon_active = false; 1526 QTAILQ_INIT(&channel_subsys->io_adapters); 1527 } 1528 machine_init(css_init); 1529 1530 void css_reset_sch(SubchDev *sch) 1531 { 1532 PMCW *p = &sch->curr_status.pmcw; 1533 1534 if ((p->flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) { 1535 sch->disable_cb(sch); 1536 } 1537 1538 p->intparm = 0; 1539 p->flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA | 1540 PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME | 1541 PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF); 1542 p->flags |= PMCW_FLAGS_MASK_DNV; 1543 p->devno = sch->devno; 1544 p->pim = 0x80; 1545 p->lpm = p->pim; 1546 p->pnom = 0; 1547 p->lpum = 0; 1548 p->mbi = 0; 1549 p->pom = 0xff; 1550 p->pam = 0x80; 1551 p->chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME | 1552 PMCW_CHARS_MASK_CSENSE); 1553 1554 memset(&sch->curr_status.scsw, 0, sizeof(sch->curr_status.scsw)); 1555 sch->curr_status.mba = 0; 1556 1557 sch->channel_prog = 0x0; 1558 sch->last_cmd_valid = false; 1559 sch->thinint_active = false; 1560 } 1561 1562 void css_reset(void) 1563 { 1564 CrwContainer *crw_cont; 1565 1566 /* Clean up monitoring. */ 1567 channel_subsys->chnmon_active = false; 1568 channel_subsys->chnmon_area = 0; 1569 1570 /* Clear pending CRWs. */ 1571 while ((crw_cont = QTAILQ_FIRST(&channel_subsys->pending_crws))) { 1572 QTAILQ_REMOVE(&channel_subsys->pending_crws, crw_cont, sibling); 1573 g_free(crw_cont); 1574 } 1575 channel_subsys->sei_pending = false; 1576 channel_subsys->do_crw_mchk = true; 1577 channel_subsys->crws_lost = false; 1578 1579 /* Reset maximum ids. */ 1580 channel_subsys->max_cssid = 0; 1581 channel_subsys->max_ssid = 0; 1582 } 1583