1 /* 2 * S/390 common I/O routines -- channel subsystem call 3 * 4 * Copyright IBM Corp. 1999,2012 5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Arnd Bergmann (arndb@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/pci.h> 18 19 #include <asm/cio.h> 20 #include <asm/chpid.h> 21 #include <asm/chsc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chp.h" 29 #include "chsc.h" 30 31 static void *sei_page; 32 static void *chsc_page; 33 static DEFINE_SPINLOCK(chsc_page_lock); 34 35 /** 36 * chsc_error_from_response() - convert a chsc response to an error 37 * @response: chsc response code 38 * 39 * Returns an appropriate Linux error code for @response. 40 */ 41 int chsc_error_from_response(int response) 42 { 43 switch (response) { 44 case 0x0001: 45 return 0; 46 case 0x0002: 47 case 0x0003: 48 case 0x0006: 49 case 0x0007: 50 case 0x0008: 51 case 0x000a: 52 case 0x0104: 53 return -EINVAL; 54 case 0x0004: 55 return -EOPNOTSUPP; 56 case 0x000b: 57 return -EBUSY; 58 case 0x0100: 59 case 0x0102: 60 return -ENOMEM; 61 default: 62 return -EIO; 63 } 64 } 65 EXPORT_SYMBOL_GPL(chsc_error_from_response); 66 67 struct chsc_ssd_area { 68 struct chsc_header request; 69 u16 :10; 70 u16 ssid:2; 71 u16 :4; 72 u16 f_sch; /* first subchannel */ 73 u16 :16; 74 u16 l_sch; /* last subchannel */ 75 u32 :32; 76 struct chsc_header response; 77 u32 :32; 78 u8 sch_valid : 1; 79 u8 dev_valid : 1; 80 u8 st : 3; /* subchannel type */ 81 u8 zeroes : 3; 82 u8 unit_addr; /* unit address */ 83 u16 devno; /* device number */ 84 u8 path_mask; 85 u8 fla_valid_mask; 86 u16 sch; /* subchannel */ 87 u8 chpid[8]; /* chpids 0-7 */ 88 u16 fla[8]; /* full link addresses 0-7 */ 89 } __attribute__ ((packed)); 90 91 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 92 { 93 struct chsc_ssd_area *ssd_area; 94 int ccode; 95 int ret; 96 int i; 97 int mask; 98 99 spin_lock_irq(&chsc_page_lock); 100 memset(chsc_page, 0, PAGE_SIZE); 101 ssd_area = chsc_page; 102 ssd_area->request.length = 0x0010; 103 ssd_area->request.code = 0x0004; 104 ssd_area->ssid = schid.ssid; 105 ssd_area->f_sch = schid.sch_no; 106 ssd_area->l_sch = schid.sch_no; 107 108 ccode = chsc(ssd_area); 109 /* Check response. */ 110 if (ccode > 0) { 111 ret = (ccode == 3) ? -ENODEV : -EBUSY; 112 goto out; 113 } 114 ret = chsc_error_from_response(ssd_area->response.code); 115 if (ret != 0) { 116 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 117 schid.ssid, schid.sch_no, 118 ssd_area->response.code); 119 goto out; 120 } 121 if (!ssd_area->sch_valid) { 122 ret = -ENODEV; 123 goto out; 124 } 125 /* Copy data */ 126 ret = 0; 127 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 128 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 129 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 130 goto out; 131 ssd->path_mask = ssd_area->path_mask; 132 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 133 for (i = 0; i < 8; i++) { 134 mask = 0x80 >> i; 135 if (ssd_area->path_mask & mask) { 136 chp_id_init(&ssd->chpid[i]); 137 ssd->chpid[i].id = ssd_area->chpid[i]; 138 } 139 if (ssd_area->fla_valid_mask & mask) 140 ssd->fla[i] = ssd_area->fla[i]; 141 } 142 out: 143 spin_unlock_irq(&chsc_page_lock); 144 return ret; 145 } 146 147 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 148 { 149 spin_lock_irq(sch->lock); 150 if (sch->driver && sch->driver->chp_event) 151 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 152 goto out_unreg; 153 spin_unlock_irq(sch->lock); 154 return 0; 155 156 out_unreg: 157 sch->lpm = 0; 158 spin_unlock_irq(sch->lock); 159 css_schedule_eval(sch->schid); 160 return 0; 161 } 162 163 void chsc_chp_offline(struct chp_id chpid) 164 { 165 char dbf_txt[15]; 166 struct chp_link link; 167 168 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 169 CIO_TRACE_EVENT(2, dbf_txt); 170 171 if (chp_get_status(chpid) <= 0) 172 return; 173 memset(&link, 0, sizeof(struct chp_link)); 174 link.chpid = chpid; 175 /* Wait until previous actions have settled. */ 176 css_wait_for_slow_path(); 177 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 178 } 179 180 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 181 { 182 struct schib schib; 183 /* 184 * We don't know the device yet, but since a path 185 * may be available now to the device we'll have 186 * to do recognition again. 187 * Since we don't have any idea about which chpid 188 * that beast may be on we'll have to do a stsch 189 * on all devices, grr... 190 */ 191 if (stsch_err(schid, &schib)) 192 /* We're through */ 193 return -ENXIO; 194 195 /* Put it on the slow path. */ 196 css_schedule_eval(schid); 197 return 0; 198 } 199 200 static int __s390_process_res_acc(struct subchannel *sch, void *data) 201 { 202 spin_lock_irq(sch->lock); 203 if (sch->driver && sch->driver->chp_event) 204 sch->driver->chp_event(sch, data, CHP_ONLINE); 205 spin_unlock_irq(sch->lock); 206 207 return 0; 208 } 209 210 static void s390_process_res_acc(struct chp_link *link) 211 { 212 char dbf_txt[15]; 213 214 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 215 link->chpid.id); 216 CIO_TRACE_EVENT( 2, dbf_txt); 217 if (link->fla != 0) { 218 sprintf(dbf_txt, "fla%x", link->fla); 219 CIO_TRACE_EVENT( 2, dbf_txt); 220 } 221 /* Wait until previous actions have settled. */ 222 css_wait_for_slow_path(); 223 /* 224 * I/O resources may have become accessible. 225 * Scan through all subchannels that may be concerned and 226 * do a validation on those. 227 * The more information we have (info), the less scanning 228 * will we have to do. 229 */ 230 for_each_subchannel_staged(__s390_process_res_acc, 231 s390_process_res_acc_new_sch, link); 232 } 233 234 static int 235 __get_chpid_from_lir(void *data) 236 { 237 struct lir { 238 u8 iq; 239 u8 ic; 240 u16 sci; 241 /* incident-node descriptor */ 242 u32 indesc[28]; 243 /* attached-node descriptor */ 244 u32 andesc[28]; 245 /* incident-specific information */ 246 u32 isinfo[28]; 247 } __attribute__ ((packed)) *lir; 248 249 lir = data; 250 if (!(lir->iq&0x80)) 251 /* NULL link incident record */ 252 return -EINVAL; 253 if (!(lir->indesc[0]&0xc0000000)) 254 /* node descriptor not valid */ 255 return -EINVAL; 256 if (!(lir->indesc[0]&0x10000000)) 257 /* don't handle device-type nodes - FIXME */ 258 return -EINVAL; 259 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 260 261 return (u16) (lir->indesc[0]&0x000000ff); 262 } 263 264 struct chsc_sei_nt0_area { 265 u8 flags; 266 u8 vf; /* validity flags */ 267 u8 rs; /* reporting source */ 268 u8 cc; /* content code */ 269 u16 fla; /* full link address */ 270 u16 rsid; /* reporting source id */ 271 u32 reserved1; 272 u32 reserved2; 273 /* ccdf has to be big enough for a link-incident record */ 274 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */ 275 } __packed; 276 277 struct chsc_sei_nt2_area { 278 u8 flags; /* p and v bit */ 279 u8 reserved1; 280 u8 reserved2; 281 u8 cc; /* content code */ 282 u32 reserved3[13]; 283 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */ 284 } __packed; 285 286 #define CHSC_SEI_NT0 (1ULL << 63) 287 #define CHSC_SEI_NT2 (1ULL << 61) 288 289 struct chsc_sei { 290 struct chsc_header request; 291 u32 reserved1; 292 u64 ntsm; /* notification type mask */ 293 struct chsc_header response; 294 u32 :24; 295 u8 nt; 296 union { 297 struct chsc_sei_nt0_area nt0_area; 298 struct chsc_sei_nt2_area nt2_area; 299 u8 nt_area[PAGE_SIZE - 24]; 300 } u; 301 } __packed; 302 303 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) 304 { 305 struct chp_id chpid; 306 int id; 307 308 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 309 sei_area->rs, sei_area->rsid); 310 if (sei_area->rs != 4) 311 return; 312 id = __get_chpid_from_lir(sei_area->ccdf); 313 if (id < 0) 314 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 315 else { 316 chp_id_init(&chpid); 317 chpid.id = id; 318 chsc_chp_offline(chpid); 319 } 320 } 321 322 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 323 { 324 struct chp_link link; 325 struct chp_id chpid; 326 int status; 327 328 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 329 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 330 if (sei_area->rs != 4) 331 return; 332 chp_id_init(&chpid); 333 chpid.id = sei_area->rsid; 334 /* allocate a new channel path structure, if needed */ 335 status = chp_get_status(chpid); 336 if (status < 0) 337 chp_new(chpid); 338 else if (!status) 339 return; 340 memset(&link, 0, sizeof(struct chp_link)); 341 link.chpid = chpid; 342 if ((sei_area->vf & 0xc0) != 0) { 343 link.fla = sei_area->fla; 344 if ((sei_area->vf & 0xc0) == 0xc0) 345 /* full link address */ 346 link.fla_mask = 0xffff; 347 else 348 /* link address */ 349 link.fla_mask = 0xff00; 350 } 351 s390_process_res_acc(&link); 352 } 353 354 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) 355 { 356 struct channel_path *chp; 357 struct chp_id chpid; 358 u8 *data; 359 int num; 360 361 CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); 362 if (sei_area->rs != 0) 363 return; 364 data = sei_area->ccdf; 365 chp_id_init(&chpid); 366 for (num = 0; num <= __MAX_CHPID; num++) { 367 if (!chp_test_bit(data, num)) 368 continue; 369 chpid.id = num; 370 371 CIO_CRW_EVENT(4, "Update information for channel path " 372 "%x.%02x\n", chpid.cssid, chpid.id); 373 chp = chpid_to_chp(chpid); 374 if (!chp) { 375 chp_new(chpid); 376 continue; 377 } 378 mutex_lock(&chp->lock); 379 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 380 mutex_unlock(&chp->lock); 381 } 382 } 383 384 struct chp_config_data { 385 u8 map[32]; 386 u8 op; 387 u8 pc; 388 }; 389 390 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) 391 { 392 struct chp_config_data *data; 393 struct chp_id chpid; 394 int num; 395 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 396 397 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 398 if (sei_area->rs != 0) 399 return; 400 data = (struct chp_config_data *) &(sei_area->ccdf); 401 chp_id_init(&chpid); 402 for (num = 0; num <= __MAX_CHPID; num++) { 403 if (!chp_test_bit(data->map, num)) 404 continue; 405 chpid.id = num; 406 pr_notice("Processing %s for channel path %x.%02x\n", 407 events[data->op], chpid.cssid, chpid.id); 408 switch (data->op) { 409 case 0: 410 chp_cfg_schedule(chpid, 1); 411 break; 412 case 1: 413 chp_cfg_schedule(chpid, 0); 414 break; 415 case 2: 416 chp_cfg_cancel_deconfigure(chpid); 417 break; 418 } 419 } 420 } 421 422 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) 423 { 424 int ret; 425 426 CIO_CRW_EVENT(4, "chsc: scm change notification\n"); 427 if (sei_area->rs != 7) 428 return; 429 430 ret = scm_update_information(); 431 if (ret) 432 CIO_CRW_EVENT(0, "chsc: updating change notification" 433 " failed (rc=%d).\n", ret); 434 } 435 436 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 437 { 438 switch (sei_area->cc) { 439 case 1: 440 zpci_event_error(sei_area->ccdf); 441 break; 442 case 2: 443 zpci_event_availability(sei_area->ccdf); 444 break; 445 default: 446 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", 447 sei_area->cc); 448 break; 449 } 450 } 451 452 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) 453 { 454 /* which kind of information was stored? */ 455 switch (sei_area->cc) { 456 case 1: /* link incident*/ 457 chsc_process_sei_link_incident(sei_area); 458 break; 459 case 2: /* i/o resource accessibility */ 460 chsc_process_sei_res_acc(sei_area); 461 break; 462 case 7: /* channel-path-availability information */ 463 chsc_process_sei_chp_avail(sei_area); 464 break; 465 case 8: /* channel-path-configuration notification */ 466 chsc_process_sei_chp_config(sei_area); 467 break; 468 case 12: /* scm change notification */ 469 chsc_process_sei_scm_change(sei_area); 470 break; 471 default: /* other stuff */ 472 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", 473 sei_area->cc); 474 break; 475 } 476 477 /* Check if we might have lost some information. */ 478 if (sei_area->flags & 0x40) { 479 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 480 css_schedule_eval_all(); 481 } 482 } 483 484 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) 485 { 486 do { 487 memset(sei, 0, sizeof(*sei)); 488 sei->request.length = 0x0010; 489 sei->request.code = 0x000e; 490 sei->ntsm = ntsm; 491 492 if (chsc(sei)) 493 break; 494 495 if (sei->response.code != 0x0001) { 496 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 497 sei->response.code); 498 break; 499 } 500 501 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); 502 switch (sei->nt) { 503 case 0: 504 chsc_process_sei_nt0(&sei->u.nt0_area); 505 break; 506 case 2: 507 chsc_process_sei_nt2(&sei->u.nt2_area); 508 break; 509 default: 510 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); 511 break; 512 } 513 } while (sei->u.nt0_area.flags & 0x80); 514 } 515 516 /* 517 * Handle channel subsystem related CRWs. 518 * Use store event information to find out what's going on. 519 * 520 * Note: Access to sei_page is serialized through machine check handler 521 * thread, so no need for locking. 522 */ 523 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 524 { 525 struct chsc_sei *sei = sei_page; 526 527 if (overflow) { 528 css_schedule_eval_all(); 529 return; 530 } 531 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 532 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 533 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 534 crw0->erc, crw0->rsid); 535 536 CIO_TRACE_EVENT(2, "prcss"); 537 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); 538 } 539 540 void chsc_chp_online(struct chp_id chpid) 541 { 542 char dbf_txt[15]; 543 struct chp_link link; 544 545 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 546 CIO_TRACE_EVENT(2, dbf_txt); 547 548 if (chp_get_status(chpid) != 0) { 549 memset(&link, 0, sizeof(struct chp_link)); 550 link.chpid = chpid; 551 /* Wait until previous actions have settled. */ 552 css_wait_for_slow_path(); 553 for_each_subchannel_staged(__s390_process_res_acc, NULL, 554 &link); 555 } 556 } 557 558 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 559 struct chp_id chpid, int on) 560 { 561 unsigned long flags; 562 struct chp_link link; 563 564 memset(&link, 0, sizeof(struct chp_link)); 565 link.chpid = chpid; 566 spin_lock_irqsave(sch->lock, flags); 567 if (sch->driver && sch->driver->chp_event) 568 sch->driver->chp_event(sch, &link, 569 on ? CHP_VARY_ON : CHP_VARY_OFF); 570 spin_unlock_irqrestore(sch->lock, flags); 571 } 572 573 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 574 { 575 struct chp_id *chpid = data; 576 577 __s390_subchannel_vary_chpid(sch, *chpid, 0); 578 return 0; 579 } 580 581 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 582 { 583 struct chp_id *chpid = data; 584 585 __s390_subchannel_vary_chpid(sch, *chpid, 1); 586 return 0; 587 } 588 589 static int 590 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 591 { 592 struct schib schib; 593 594 if (stsch_err(schid, &schib)) 595 /* We're through */ 596 return -ENXIO; 597 /* Put it on the slow path. */ 598 css_schedule_eval(schid); 599 return 0; 600 } 601 602 /** 603 * chsc_chp_vary - propagate channel-path vary operation to subchannels 604 * @chpid: channl-path ID 605 * @on: non-zero for vary online, zero for vary offline 606 */ 607 int chsc_chp_vary(struct chp_id chpid, int on) 608 { 609 struct channel_path *chp = chpid_to_chp(chpid); 610 611 /* Wait until previous actions have settled. */ 612 css_wait_for_slow_path(); 613 /* 614 * Redo PathVerification on the devices the chpid connects to 615 */ 616 if (on) { 617 /* Try to update the channel path descritor. */ 618 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 619 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 620 __s390_vary_chpid_on, &chpid); 621 } else 622 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 623 NULL, &chpid); 624 625 return 0; 626 } 627 628 static void 629 chsc_remove_cmg_attr(struct channel_subsystem *css) 630 { 631 int i; 632 633 for (i = 0; i <= __MAX_CHPID; i++) { 634 if (!css->chps[i]) 635 continue; 636 chp_remove_cmg_attr(css->chps[i]); 637 } 638 } 639 640 static int 641 chsc_add_cmg_attr(struct channel_subsystem *css) 642 { 643 int i, ret; 644 645 ret = 0; 646 for (i = 0; i <= __MAX_CHPID; i++) { 647 if (!css->chps[i]) 648 continue; 649 ret = chp_add_cmg_attr(css->chps[i]); 650 if (ret) 651 goto cleanup; 652 } 653 return ret; 654 cleanup: 655 for (--i; i >= 0; i--) { 656 if (!css->chps[i]) 657 continue; 658 chp_remove_cmg_attr(css->chps[i]); 659 } 660 return ret; 661 } 662 663 int __chsc_do_secm(struct channel_subsystem *css, int enable) 664 { 665 struct { 666 struct chsc_header request; 667 u32 operation_code : 2; 668 u32 : 30; 669 u32 key : 4; 670 u32 : 28; 671 u32 zeroes1; 672 u32 cub_addr1; 673 u32 zeroes2; 674 u32 cub_addr2; 675 u32 reserved[13]; 676 struct chsc_header response; 677 u32 status : 8; 678 u32 : 4; 679 u32 fmt : 4; 680 u32 : 16; 681 } __attribute__ ((packed)) *secm_area; 682 int ret, ccode; 683 684 spin_lock_irq(&chsc_page_lock); 685 memset(chsc_page, 0, PAGE_SIZE); 686 secm_area = chsc_page; 687 secm_area->request.length = 0x0050; 688 secm_area->request.code = 0x0016; 689 690 secm_area->key = PAGE_DEFAULT_KEY >> 4; 691 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 692 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 693 694 secm_area->operation_code = enable ? 0 : 1; 695 696 ccode = chsc(secm_area); 697 if (ccode > 0) { 698 ret = (ccode == 3) ? -ENODEV : -EBUSY; 699 goto out; 700 } 701 702 switch (secm_area->response.code) { 703 case 0x0102: 704 case 0x0103: 705 ret = -EINVAL; 706 break; 707 default: 708 ret = chsc_error_from_response(secm_area->response.code); 709 } 710 if (ret != 0) 711 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 712 secm_area->response.code); 713 out: 714 spin_unlock_irq(&chsc_page_lock); 715 return ret; 716 } 717 718 int 719 chsc_secm(struct channel_subsystem *css, int enable) 720 { 721 int ret; 722 723 if (enable && !css->cm_enabled) { 724 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 725 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 726 if (!css->cub_addr1 || !css->cub_addr2) { 727 free_page((unsigned long)css->cub_addr1); 728 free_page((unsigned long)css->cub_addr2); 729 return -ENOMEM; 730 } 731 } 732 ret = __chsc_do_secm(css, enable); 733 if (!ret) { 734 css->cm_enabled = enable; 735 if (css->cm_enabled) { 736 ret = chsc_add_cmg_attr(css); 737 if (ret) { 738 __chsc_do_secm(css, 0); 739 css->cm_enabled = 0; 740 } 741 } else 742 chsc_remove_cmg_attr(css); 743 } 744 if (!css->cm_enabled) { 745 free_page((unsigned long)css->cub_addr1); 746 free_page((unsigned long)css->cub_addr2); 747 } 748 return ret; 749 } 750 751 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 752 int c, int m, void *page) 753 { 754 struct chsc_scpd *scpd_area; 755 int ccode, ret; 756 757 if ((rfmt == 1) && !css_general_characteristics.fcs) 758 return -EINVAL; 759 if ((rfmt == 2) && !css_general_characteristics.cib) 760 return -EINVAL; 761 762 memset(page, 0, PAGE_SIZE); 763 scpd_area = page; 764 scpd_area->request.length = 0x0010; 765 scpd_area->request.code = 0x0002; 766 scpd_area->cssid = chpid.cssid; 767 scpd_area->first_chpid = chpid.id; 768 scpd_area->last_chpid = chpid.id; 769 scpd_area->m = m; 770 scpd_area->c = c; 771 scpd_area->fmt = fmt; 772 scpd_area->rfmt = rfmt; 773 774 ccode = chsc(scpd_area); 775 if (ccode > 0) 776 return (ccode == 3) ? -ENODEV : -EBUSY; 777 778 ret = chsc_error_from_response(scpd_area->response.code); 779 if (ret) 780 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 781 scpd_area->response.code); 782 return ret; 783 } 784 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 785 786 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 787 struct channel_path_desc *desc) 788 { 789 struct chsc_response_struct *chsc_resp; 790 struct chsc_scpd *scpd_area; 791 unsigned long flags; 792 int ret; 793 794 spin_lock_irqsave(&chsc_page_lock, flags); 795 scpd_area = chsc_page; 796 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); 797 if (ret) 798 goto out; 799 chsc_resp = (void *)&scpd_area->response; 800 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 801 out: 802 spin_unlock_irqrestore(&chsc_page_lock, flags); 803 return ret; 804 } 805 806 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, 807 struct channel_path_desc_fmt1 *desc) 808 { 809 struct chsc_response_struct *chsc_resp; 810 struct chsc_scpd *scpd_area; 811 int ret; 812 813 spin_lock_irq(&chsc_page_lock); 814 scpd_area = chsc_page; 815 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); 816 if (ret) 817 goto out; 818 chsc_resp = (void *)&scpd_area->response; 819 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 820 out: 821 spin_unlock_irq(&chsc_page_lock); 822 return ret; 823 } 824 825 static void 826 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 827 struct cmg_chars *chars) 828 { 829 struct cmg_chars *cmg_chars; 830 int i, mask; 831 832 cmg_chars = chp->cmg_chars; 833 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 834 mask = 0x80 >> (i + 3); 835 if (cmcv & mask) 836 cmg_chars->values[i] = chars->values[i]; 837 else 838 cmg_chars->values[i] = 0; 839 } 840 } 841 842 int chsc_get_channel_measurement_chars(struct channel_path *chp) 843 { 844 struct cmg_chars *cmg_chars; 845 int ccode, ret; 846 847 struct { 848 struct chsc_header request; 849 u32 : 24; 850 u32 first_chpid : 8; 851 u32 : 24; 852 u32 last_chpid : 8; 853 u32 zeroes1; 854 struct chsc_header response; 855 u32 zeroes2; 856 u32 not_valid : 1; 857 u32 shared : 1; 858 u32 : 22; 859 u32 chpid : 8; 860 u32 cmcv : 5; 861 u32 : 11; 862 u32 cmgq : 8; 863 u32 cmg : 8; 864 u32 zeroes3; 865 u32 data[NR_MEASUREMENT_CHARS]; 866 } __attribute__ ((packed)) *scmc_area; 867 868 chp->cmg_chars = NULL; 869 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); 870 if (!cmg_chars) 871 return -ENOMEM; 872 873 spin_lock_irq(&chsc_page_lock); 874 memset(chsc_page, 0, PAGE_SIZE); 875 scmc_area = chsc_page; 876 scmc_area->request.length = 0x0010; 877 scmc_area->request.code = 0x0022; 878 scmc_area->first_chpid = chp->chpid.id; 879 scmc_area->last_chpid = chp->chpid.id; 880 881 ccode = chsc(scmc_area); 882 if (ccode > 0) { 883 ret = (ccode == 3) ? -ENODEV : -EBUSY; 884 goto out; 885 } 886 887 ret = chsc_error_from_response(scmc_area->response.code); 888 if (ret) { 889 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 890 scmc_area->response.code); 891 goto out; 892 } 893 if (scmc_area->not_valid) { 894 chp->cmg = -1; 895 chp->shared = -1; 896 goto out; 897 } 898 chp->cmg = scmc_area->cmg; 899 chp->shared = scmc_area->shared; 900 if (chp->cmg != 2 && chp->cmg != 3) { 901 /* No cmg-dependent data. */ 902 goto out; 903 } 904 chp->cmg_chars = cmg_chars; 905 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 906 (struct cmg_chars *) &scmc_area->data); 907 out: 908 spin_unlock_irq(&chsc_page_lock); 909 if (!chp->cmg_chars) 910 kfree(cmg_chars); 911 912 return ret; 913 } 914 915 int __init chsc_init(void) 916 { 917 int ret; 918 919 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 920 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 921 if (!sei_page || !chsc_page) { 922 ret = -ENOMEM; 923 goto out_err; 924 } 925 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 926 if (ret) 927 goto out_err; 928 return ret; 929 out_err: 930 free_page((unsigned long)chsc_page); 931 free_page((unsigned long)sei_page); 932 return ret; 933 } 934 935 void __init chsc_init_cleanup(void) 936 { 937 crw_unregister_handler(CRW_RSC_CSS); 938 free_page((unsigned long)chsc_page); 939 free_page((unsigned long)sei_page); 940 } 941 942 int chsc_enable_facility(int operation_code) 943 { 944 unsigned long flags; 945 int ret; 946 struct { 947 struct chsc_header request; 948 u8 reserved1:4; 949 u8 format:4; 950 u8 reserved2; 951 u16 operation_code; 952 u32 reserved3; 953 u32 reserved4; 954 u32 operation_data_area[252]; 955 struct chsc_header response; 956 u32 reserved5:4; 957 u32 format2:4; 958 u32 reserved6:24; 959 } __attribute__ ((packed)) *sda_area; 960 961 spin_lock_irqsave(&chsc_page_lock, flags); 962 memset(chsc_page, 0, PAGE_SIZE); 963 sda_area = chsc_page; 964 sda_area->request.length = 0x0400; 965 sda_area->request.code = 0x0031; 966 sda_area->operation_code = operation_code; 967 968 ret = chsc(sda_area); 969 if (ret > 0) { 970 ret = (ret == 3) ? -ENODEV : -EBUSY; 971 goto out; 972 } 973 974 switch (sda_area->response.code) { 975 case 0x0101: 976 ret = -EOPNOTSUPP; 977 break; 978 default: 979 ret = chsc_error_from_response(sda_area->response.code); 980 } 981 if (ret != 0) 982 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 983 operation_code, sda_area->response.code); 984 out: 985 spin_unlock_irqrestore(&chsc_page_lock, flags); 986 return ret; 987 } 988 989 struct css_general_char css_general_characteristics; 990 struct css_chsc_char css_chsc_characteristics; 991 992 int __init 993 chsc_determine_css_characteristics(void) 994 { 995 int result; 996 struct { 997 struct chsc_header request; 998 u32 reserved1; 999 u32 reserved2; 1000 u32 reserved3; 1001 struct chsc_header response; 1002 u32 reserved4; 1003 u32 general_char[510]; 1004 u32 chsc_char[508]; 1005 } __attribute__ ((packed)) *scsc_area; 1006 1007 spin_lock_irq(&chsc_page_lock); 1008 memset(chsc_page, 0, PAGE_SIZE); 1009 scsc_area = chsc_page; 1010 scsc_area->request.length = 0x0010; 1011 scsc_area->request.code = 0x0010; 1012 1013 result = chsc(scsc_area); 1014 if (result) { 1015 result = (result == 3) ? -ENODEV : -EBUSY; 1016 goto exit; 1017 } 1018 1019 result = chsc_error_from_response(scsc_area->response.code); 1020 if (result == 0) { 1021 memcpy(&css_general_characteristics, scsc_area->general_char, 1022 sizeof(css_general_characteristics)); 1023 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1024 sizeof(css_chsc_characteristics)); 1025 } else 1026 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 1027 scsc_area->response.code); 1028 exit: 1029 spin_unlock_irq(&chsc_page_lock); 1030 return result; 1031 } 1032 1033 EXPORT_SYMBOL_GPL(css_general_characteristics); 1034 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1035 1036 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 1037 { 1038 struct { 1039 struct chsc_header request; 1040 unsigned int rsvd0; 1041 unsigned int op : 8; 1042 unsigned int rsvd1 : 8; 1043 unsigned int ctrl : 16; 1044 unsigned int rsvd2[5]; 1045 struct chsc_header response; 1046 unsigned int rsvd3[7]; 1047 } __attribute__ ((packed)) *rr; 1048 int rc; 1049 1050 memset(page, 0, PAGE_SIZE); 1051 rr = page; 1052 rr->request.length = 0x0020; 1053 rr->request.code = 0x0033; 1054 rr->op = op; 1055 rr->ctrl = ctrl; 1056 rc = chsc(rr); 1057 if (rc) 1058 return -EIO; 1059 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 1060 return rc; 1061 } 1062 1063 int chsc_sstpi(void *page, void *result, size_t size) 1064 { 1065 struct { 1066 struct chsc_header request; 1067 unsigned int rsvd0[3]; 1068 struct chsc_header response; 1069 char data[size]; 1070 } __attribute__ ((packed)) *rr; 1071 int rc; 1072 1073 memset(page, 0, PAGE_SIZE); 1074 rr = page; 1075 rr->request.length = 0x0010; 1076 rr->request.code = 0x0038; 1077 rc = chsc(rr); 1078 if (rc) 1079 return -EIO; 1080 memcpy(result, &rr->data, size); 1081 return (rr->response.code == 0x0001) ? 0 : -EIO; 1082 } 1083 1084 int chsc_siosl(struct subchannel_id schid) 1085 { 1086 struct { 1087 struct chsc_header request; 1088 u32 word1; 1089 struct subchannel_id sid; 1090 u32 word3; 1091 struct chsc_header response; 1092 u32 word[11]; 1093 } __attribute__ ((packed)) *siosl_area; 1094 unsigned long flags; 1095 int ccode; 1096 int rc; 1097 1098 spin_lock_irqsave(&chsc_page_lock, flags); 1099 memset(chsc_page, 0, PAGE_SIZE); 1100 siosl_area = chsc_page; 1101 siosl_area->request.length = 0x0010; 1102 siosl_area->request.code = 0x0046; 1103 siosl_area->word1 = 0x80000000; 1104 siosl_area->sid = schid; 1105 1106 ccode = chsc(siosl_area); 1107 if (ccode > 0) { 1108 if (ccode == 3) 1109 rc = -ENODEV; 1110 else 1111 rc = -EBUSY; 1112 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1113 schid.ssid, schid.sch_no, ccode); 1114 goto out; 1115 } 1116 rc = chsc_error_from_response(siosl_area->response.code); 1117 if (rc) 1118 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1119 schid.ssid, schid.sch_no, 1120 siosl_area->response.code); 1121 else 1122 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1123 schid.ssid, schid.sch_no); 1124 out: 1125 spin_unlock_irqrestore(&chsc_page_lock, flags); 1126 return rc; 1127 } 1128 EXPORT_SYMBOL_GPL(chsc_siosl); 1129 1130 /** 1131 * chsc_scm_info() - store SCM information (SSI) 1132 * @scm_area: request and response block for SSI 1133 * @token: continuation token 1134 * 1135 * Returns 0 on success. 1136 */ 1137 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) 1138 { 1139 int ccode, ret; 1140 1141 memset(scm_area, 0, sizeof(*scm_area)); 1142 scm_area->request.length = 0x0020; 1143 scm_area->request.code = 0x004C; 1144 scm_area->reqtok = token; 1145 1146 ccode = chsc(scm_area); 1147 if (ccode > 0) { 1148 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1149 goto out; 1150 } 1151 ret = chsc_error_from_response(scm_area->response.code); 1152 if (ret != 0) 1153 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", 1154 scm_area->response.code); 1155 out: 1156 return ret; 1157 } 1158 EXPORT_SYMBOL_GPL(chsc_scm_info); 1159