1 /* 2 * S/390 common I/O routines -- channel subsystem call 3 * 4 * Copyright IBM Corp. 1999,2012 5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Arnd Bergmann (arndb@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/pci.h> 18 19 #include <asm/cio.h> 20 #include <asm/chpid.h> 21 #include <asm/chsc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chp.h" 29 #include "chsc.h" 30 31 static void *sei_page; 32 static void *chsc_page; 33 static DEFINE_SPINLOCK(chsc_page_lock); 34 35 /** 36 * chsc_error_from_response() - convert a chsc response to an error 37 * @response: chsc response code 38 * 39 * Returns an appropriate Linux error code for @response. 40 */ 41 int chsc_error_from_response(int response) 42 { 43 switch (response) { 44 case 0x0001: 45 return 0; 46 case 0x0002: 47 case 0x0003: 48 case 0x0006: 49 case 0x0007: 50 case 0x0008: 51 case 0x000a: 52 case 0x0104: 53 return -EINVAL; 54 case 0x0004: 55 return -EOPNOTSUPP; 56 case 0x000b: 57 return -EBUSY; 58 case 0x0100: 59 case 0x0102: 60 return -ENOMEM; 61 default: 62 return -EIO; 63 } 64 } 65 EXPORT_SYMBOL_GPL(chsc_error_from_response); 66 67 struct chsc_ssd_area { 68 struct chsc_header request; 69 u16 :10; 70 u16 ssid:2; 71 u16 :4; 72 u16 f_sch; /* first subchannel */ 73 u16 :16; 74 u16 l_sch; /* last subchannel */ 75 u32 :32; 76 struct chsc_header response; 77 u32 :32; 78 u8 sch_valid : 1; 79 u8 dev_valid : 1; 80 u8 st : 3; /* subchannel type */ 81 u8 zeroes : 3; 82 u8 unit_addr; /* unit address */ 83 u16 devno; /* device number */ 84 u8 path_mask; 85 u8 fla_valid_mask; 86 u16 sch; /* subchannel */ 87 u8 chpid[8]; /* chpids 0-7 */ 88 u16 fla[8]; /* full link addresses 0-7 */ 89 } __attribute__ ((packed)); 90 91 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 92 { 93 struct chsc_ssd_area *ssd_area; 94 int ccode; 95 int ret; 96 int i; 97 int mask; 98 99 spin_lock_irq(&chsc_page_lock); 100 memset(chsc_page, 0, PAGE_SIZE); 101 ssd_area = chsc_page; 102 ssd_area->request.length = 0x0010; 103 ssd_area->request.code = 0x0004; 104 ssd_area->ssid = schid.ssid; 105 ssd_area->f_sch = schid.sch_no; 106 ssd_area->l_sch = schid.sch_no; 107 108 ccode = chsc(ssd_area); 109 /* Check response. */ 110 if (ccode > 0) { 111 ret = (ccode == 3) ? -ENODEV : -EBUSY; 112 goto out; 113 } 114 ret = chsc_error_from_response(ssd_area->response.code); 115 if (ret != 0) { 116 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 117 schid.ssid, schid.sch_no, 118 ssd_area->response.code); 119 goto out; 120 } 121 if (!ssd_area->sch_valid) { 122 ret = -ENODEV; 123 goto out; 124 } 125 /* Copy data */ 126 ret = 0; 127 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 128 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 129 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 130 goto out; 131 ssd->path_mask = ssd_area->path_mask; 132 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 133 for (i = 0; i < 8; i++) { 134 mask = 0x80 >> i; 135 if (ssd_area->path_mask & mask) { 136 chp_id_init(&ssd->chpid[i]); 137 ssd->chpid[i].id = ssd_area->chpid[i]; 138 } 139 if (ssd_area->fla_valid_mask & mask) 140 ssd->fla[i] = ssd_area->fla[i]; 141 } 142 out: 143 spin_unlock_irq(&chsc_page_lock); 144 return ret; 145 } 146 147 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 148 { 149 spin_lock_irq(sch->lock); 150 if (sch->driver && sch->driver->chp_event) 151 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 152 goto out_unreg; 153 spin_unlock_irq(sch->lock); 154 return 0; 155 156 out_unreg: 157 sch->lpm = 0; 158 spin_unlock_irq(sch->lock); 159 css_schedule_eval(sch->schid); 160 return 0; 161 } 162 163 void chsc_chp_offline(struct chp_id chpid) 164 { 165 char dbf_txt[15]; 166 struct chp_link link; 167 168 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 169 CIO_TRACE_EVENT(2, dbf_txt); 170 171 if (chp_get_status(chpid) <= 0) 172 return; 173 memset(&link, 0, sizeof(struct chp_link)); 174 link.chpid = chpid; 175 /* Wait until previous actions have settled. */ 176 css_wait_for_slow_path(); 177 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 178 } 179 180 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 181 { 182 struct schib schib; 183 /* 184 * We don't know the device yet, but since a path 185 * may be available now to the device we'll have 186 * to do recognition again. 187 * Since we don't have any idea about which chpid 188 * that beast may be on we'll have to do a stsch 189 * on all devices, grr... 190 */ 191 if (stsch_err(schid, &schib)) 192 /* We're through */ 193 return -ENXIO; 194 195 /* Put it on the slow path. */ 196 css_schedule_eval(schid); 197 return 0; 198 } 199 200 static int __s390_process_res_acc(struct subchannel *sch, void *data) 201 { 202 spin_lock_irq(sch->lock); 203 if (sch->driver && sch->driver->chp_event) 204 sch->driver->chp_event(sch, data, CHP_ONLINE); 205 spin_unlock_irq(sch->lock); 206 207 return 0; 208 } 209 210 static void s390_process_res_acc(struct chp_link *link) 211 { 212 char dbf_txt[15]; 213 214 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 215 link->chpid.id); 216 CIO_TRACE_EVENT( 2, dbf_txt); 217 if (link->fla != 0) { 218 sprintf(dbf_txt, "fla%x", link->fla); 219 CIO_TRACE_EVENT( 2, dbf_txt); 220 } 221 /* Wait until previous actions have settled. */ 222 css_wait_for_slow_path(); 223 /* 224 * I/O resources may have become accessible. 225 * Scan through all subchannels that may be concerned and 226 * do a validation on those. 227 * The more information we have (info), the less scanning 228 * will we have to do. 229 */ 230 for_each_subchannel_staged(__s390_process_res_acc, 231 s390_process_res_acc_new_sch, link); 232 } 233 234 static int 235 __get_chpid_from_lir(void *data) 236 { 237 struct lir { 238 u8 iq; 239 u8 ic; 240 u16 sci; 241 /* incident-node descriptor */ 242 u32 indesc[28]; 243 /* attached-node descriptor */ 244 u32 andesc[28]; 245 /* incident-specific information */ 246 u32 isinfo[28]; 247 } __attribute__ ((packed)) *lir; 248 249 lir = data; 250 if (!(lir->iq&0x80)) 251 /* NULL link incident record */ 252 return -EINVAL; 253 if (!(lir->indesc[0]&0xc0000000)) 254 /* node descriptor not valid */ 255 return -EINVAL; 256 if (!(lir->indesc[0]&0x10000000)) 257 /* don't handle device-type nodes - FIXME */ 258 return -EINVAL; 259 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 260 261 return (u16) (lir->indesc[0]&0x000000ff); 262 } 263 264 struct chsc_sei_nt0_area { 265 u8 flags; 266 u8 vf; /* validity flags */ 267 u8 rs; /* reporting source */ 268 u8 cc; /* content code */ 269 u16 fla; /* full link address */ 270 u16 rsid; /* reporting source id */ 271 u32 reserved1; 272 u32 reserved2; 273 /* ccdf has to be big enough for a link-incident record */ 274 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */ 275 } __packed; 276 277 struct chsc_sei_nt2_area { 278 u8 flags; /* p and v bit */ 279 u8 reserved1; 280 u8 reserved2; 281 u8 cc; /* content code */ 282 u32 reserved3[13]; 283 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */ 284 } __packed; 285 286 #define CHSC_SEI_NT0 (1ULL << 63) 287 #define CHSC_SEI_NT2 (1ULL << 61) 288 289 struct chsc_sei { 290 struct chsc_header request; 291 u32 reserved1; 292 u64 ntsm; /* notification type mask */ 293 struct chsc_header response; 294 u32 :24; 295 u8 nt; 296 union { 297 struct chsc_sei_nt0_area nt0_area; 298 struct chsc_sei_nt2_area nt2_area; 299 u8 nt_area[PAGE_SIZE - 24]; 300 } u; 301 } __packed; 302 303 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) 304 { 305 struct chp_id chpid; 306 int id; 307 308 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 309 sei_area->rs, sei_area->rsid); 310 if (sei_area->rs != 4) 311 return; 312 id = __get_chpid_from_lir(sei_area->ccdf); 313 if (id < 0) 314 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 315 else { 316 chp_id_init(&chpid); 317 chpid.id = id; 318 chsc_chp_offline(chpid); 319 } 320 } 321 322 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 323 { 324 struct chp_link link; 325 struct chp_id chpid; 326 int status; 327 328 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 329 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 330 if (sei_area->rs != 4) 331 return; 332 chp_id_init(&chpid); 333 chpid.id = sei_area->rsid; 334 /* allocate a new channel path structure, if needed */ 335 status = chp_get_status(chpid); 336 if (status < 0) 337 chp_new(chpid); 338 else if (!status) 339 return; 340 memset(&link, 0, sizeof(struct chp_link)); 341 link.chpid = chpid; 342 if ((sei_area->vf & 0xc0) != 0) { 343 link.fla = sei_area->fla; 344 if ((sei_area->vf & 0xc0) == 0xc0) 345 /* full link address */ 346 link.fla_mask = 0xffff; 347 else 348 /* link address */ 349 link.fla_mask = 0xff00; 350 } 351 s390_process_res_acc(&link); 352 } 353 354 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) 355 { 356 struct channel_path *chp; 357 struct chp_id chpid; 358 u8 *data; 359 int num; 360 361 CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); 362 if (sei_area->rs != 0) 363 return; 364 data = sei_area->ccdf; 365 chp_id_init(&chpid); 366 for (num = 0; num <= __MAX_CHPID; num++) { 367 if (!chp_test_bit(data, num)) 368 continue; 369 chpid.id = num; 370 371 CIO_CRW_EVENT(4, "Update information for channel path " 372 "%x.%02x\n", chpid.cssid, chpid.id); 373 chp = chpid_to_chp(chpid); 374 if (!chp) { 375 chp_new(chpid); 376 continue; 377 } 378 mutex_lock(&chp->lock); 379 chp_update_desc(chp); 380 mutex_unlock(&chp->lock); 381 } 382 } 383 384 struct chp_config_data { 385 u8 map[32]; 386 u8 op; 387 u8 pc; 388 }; 389 390 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) 391 { 392 struct chp_config_data *data; 393 struct chp_id chpid; 394 int num; 395 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 396 397 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 398 if (sei_area->rs != 0) 399 return; 400 data = (struct chp_config_data *) &(sei_area->ccdf); 401 chp_id_init(&chpid); 402 for (num = 0; num <= __MAX_CHPID; num++) { 403 if (!chp_test_bit(data->map, num)) 404 continue; 405 chpid.id = num; 406 pr_notice("Processing %s for channel path %x.%02x\n", 407 events[data->op], chpid.cssid, chpid.id); 408 switch (data->op) { 409 case 0: 410 chp_cfg_schedule(chpid, 1); 411 break; 412 case 1: 413 chp_cfg_schedule(chpid, 0); 414 break; 415 case 2: 416 chp_cfg_cancel_deconfigure(chpid); 417 break; 418 } 419 } 420 } 421 422 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) 423 { 424 int ret; 425 426 CIO_CRW_EVENT(4, "chsc: scm change notification\n"); 427 if (sei_area->rs != 7) 428 return; 429 430 ret = scm_update_information(); 431 if (ret) 432 CIO_CRW_EVENT(0, "chsc: updating change notification" 433 " failed (rc=%d).\n", ret); 434 } 435 436 static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) 437 { 438 int ret; 439 440 CIO_CRW_EVENT(4, "chsc: scm available information\n"); 441 if (sei_area->rs != 7) 442 return; 443 444 ret = scm_process_availability_information(); 445 if (ret) 446 CIO_CRW_EVENT(0, "chsc: process availability information" 447 " failed (rc=%d).\n", ret); 448 } 449 450 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 451 { 452 switch (sei_area->cc) { 453 case 1: 454 zpci_event_error(sei_area->ccdf); 455 break; 456 case 2: 457 zpci_event_availability(sei_area->ccdf); 458 break; 459 default: 460 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", 461 sei_area->cc); 462 break; 463 } 464 } 465 466 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) 467 { 468 /* which kind of information was stored? */ 469 switch (sei_area->cc) { 470 case 1: /* link incident*/ 471 chsc_process_sei_link_incident(sei_area); 472 break; 473 case 2: /* i/o resource accessibility */ 474 chsc_process_sei_res_acc(sei_area); 475 break; 476 case 7: /* channel-path-availability information */ 477 chsc_process_sei_chp_avail(sei_area); 478 break; 479 case 8: /* channel-path-configuration notification */ 480 chsc_process_sei_chp_config(sei_area); 481 break; 482 case 12: /* scm change notification */ 483 chsc_process_sei_scm_change(sei_area); 484 break; 485 case 14: /* scm available notification */ 486 chsc_process_sei_scm_avail(sei_area); 487 break; 488 default: /* other stuff */ 489 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", 490 sei_area->cc); 491 break; 492 } 493 494 /* Check if we might have lost some information. */ 495 if (sei_area->flags & 0x40) { 496 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 497 css_schedule_eval_all(); 498 } 499 } 500 501 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) 502 { 503 do { 504 memset(sei, 0, sizeof(*sei)); 505 sei->request.length = 0x0010; 506 sei->request.code = 0x000e; 507 sei->ntsm = ntsm; 508 509 if (chsc(sei)) 510 break; 511 512 if (sei->response.code != 0x0001) { 513 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 514 sei->response.code); 515 break; 516 } 517 518 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); 519 switch (sei->nt) { 520 case 0: 521 chsc_process_sei_nt0(&sei->u.nt0_area); 522 break; 523 case 2: 524 chsc_process_sei_nt2(&sei->u.nt2_area); 525 break; 526 default: 527 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); 528 break; 529 } 530 } while (sei->u.nt0_area.flags & 0x80); 531 } 532 533 /* 534 * Handle channel subsystem related CRWs. 535 * Use store event information to find out what's going on. 536 * 537 * Note: Access to sei_page is serialized through machine check handler 538 * thread, so no need for locking. 539 */ 540 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 541 { 542 struct chsc_sei *sei = sei_page; 543 544 if (overflow) { 545 css_schedule_eval_all(); 546 return; 547 } 548 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 549 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 550 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 551 crw0->erc, crw0->rsid); 552 553 CIO_TRACE_EVENT(2, "prcss"); 554 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); 555 } 556 557 void chsc_chp_online(struct chp_id chpid) 558 { 559 char dbf_txt[15]; 560 struct chp_link link; 561 562 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 563 CIO_TRACE_EVENT(2, dbf_txt); 564 565 if (chp_get_status(chpid) != 0) { 566 memset(&link, 0, sizeof(struct chp_link)); 567 link.chpid = chpid; 568 /* Wait until previous actions have settled. */ 569 css_wait_for_slow_path(); 570 for_each_subchannel_staged(__s390_process_res_acc, NULL, 571 &link); 572 } 573 } 574 575 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 576 struct chp_id chpid, int on) 577 { 578 unsigned long flags; 579 struct chp_link link; 580 581 memset(&link, 0, sizeof(struct chp_link)); 582 link.chpid = chpid; 583 spin_lock_irqsave(sch->lock, flags); 584 if (sch->driver && sch->driver->chp_event) 585 sch->driver->chp_event(sch, &link, 586 on ? CHP_VARY_ON : CHP_VARY_OFF); 587 spin_unlock_irqrestore(sch->lock, flags); 588 } 589 590 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 591 { 592 struct chp_id *chpid = data; 593 594 __s390_subchannel_vary_chpid(sch, *chpid, 0); 595 return 0; 596 } 597 598 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 599 { 600 struct chp_id *chpid = data; 601 602 __s390_subchannel_vary_chpid(sch, *chpid, 1); 603 return 0; 604 } 605 606 static int 607 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 608 { 609 struct schib schib; 610 611 if (stsch_err(schid, &schib)) 612 /* We're through */ 613 return -ENXIO; 614 /* Put it on the slow path. */ 615 css_schedule_eval(schid); 616 return 0; 617 } 618 619 /** 620 * chsc_chp_vary - propagate channel-path vary operation to subchannels 621 * @chpid: channl-path ID 622 * @on: non-zero for vary online, zero for vary offline 623 */ 624 int chsc_chp_vary(struct chp_id chpid, int on) 625 { 626 struct channel_path *chp = chpid_to_chp(chpid); 627 628 /* Wait until previous actions have settled. */ 629 css_wait_for_slow_path(); 630 /* 631 * Redo PathVerification on the devices the chpid connects to 632 */ 633 if (on) { 634 /* Try to update the channel path description. */ 635 chp_update_desc(chp); 636 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 637 __s390_vary_chpid_on, &chpid); 638 } else 639 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 640 NULL, &chpid); 641 642 return 0; 643 } 644 645 static void 646 chsc_remove_cmg_attr(struct channel_subsystem *css) 647 { 648 int i; 649 650 for (i = 0; i <= __MAX_CHPID; i++) { 651 if (!css->chps[i]) 652 continue; 653 chp_remove_cmg_attr(css->chps[i]); 654 } 655 } 656 657 static int 658 chsc_add_cmg_attr(struct channel_subsystem *css) 659 { 660 int i, ret; 661 662 ret = 0; 663 for (i = 0; i <= __MAX_CHPID; i++) { 664 if (!css->chps[i]) 665 continue; 666 ret = chp_add_cmg_attr(css->chps[i]); 667 if (ret) 668 goto cleanup; 669 } 670 return ret; 671 cleanup: 672 for (--i; i >= 0; i--) { 673 if (!css->chps[i]) 674 continue; 675 chp_remove_cmg_attr(css->chps[i]); 676 } 677 return ret; 678 } 679 680 int __chsc_do_secm(struct channel_subsystem *css, int enable) 681 { 682 struct { 683 struct chsc_header request; 684 u32 operation_code : 2; 685 u32 : 30; 686 u32 key : 4; 687 u32 : 28; 688 u32 zeroes1; 689 u32 cub_addr1; 690 u32 zeroes2; 691 u32 cub_addr2; 692 u32 reserved[13]; 693 struct chsc_header response; 694 u32 status : 8; 695 u32 : 4; 696 u32 fmt : 4; 697 u32 : 16; 698 } __attribute__ ((packed)) *secm_area; 699 int ret, ccode; 700 701 spin_lock_irq(&chsc_page_lock); 702 memset(chsc_page, 0, PAGE_SIZE); 703 secm_area = chsc_page; 704 secm_area->request.length = 0x0050; 705 secm_area->request.code = 0x0016; 706 707 secm_area->key = PAGE_DEFAULT_KEY >> 4; 708 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 709 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 710 711 secm_area->operation_code = enable ? 0 : 1; 712 713 ccode = chsc(secm_area); 714 if (ccode > 0) { 715 ret = (ccode == 3) ? -ENODEV : -EBUSY; 716 goto out; 717 } 718 719 switch (secm_area->response.code) { 720 case 0x0102: 721 case 0x0103: 722 ret = -EINVAL; 723 break; 724 default: 725 ret = chsc_error_from_response(secm_area->response.code); 726 } 727 if (ret != 0) 728 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 729 secm_area->response.code); 730 out: 731 spin_unlock_irq(&chsc_page_lock); 732 return ret; 733 } 734 735 int 736 chsc_secm(struct channel_subsystem *css, int enable) 737 { 738 int ret; 739 740 if (enable && !css->cm_enabled) { 741 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 742 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 743 if (!css->cub_addr1 || !css->cub_addr2) { 744 free_page((unsigned long)css->cub_addr1); 745 free_page((unsigned long)css->cub_addr2); 746 return -ENOMEM; 747 } 748 } 749 ret = __chsc_do_secm(css, enable); 750 if (!ret) { 751 css->cm_enabled = enable; 752 if (css->cm_enabled) { 753 ret = chsc_add_cmg_attr(css); 754 if (ret) { 755 __chsc_do_secm(css, 0); 756 css->cm_enabled = 0; 757 } 758 } else 759 chsc_remove_cmg_attr(css); 760 } 761 if (!css->cm_enabled) { 762 free_page((unsigned long)css->cub_addr1); 763 free_page((unsigned long)css->cub_addr2); 764 } 765 return ret; 766 } 767 768 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 769 int c, int m, void *page) 770 { 771 struct chsc_scpd *scpd_area; 772 int ccode, ret; 773 774 if ((rfmt == 1) && !css_general_characteristics.fcs) 775 return -EINVAL; 776 if ((rfmt == 2) && !css_general_characteristics.cib) 777 return -EINVAL; 778 779 memset(page, 0, PAGE_SIZE); 780 scpd_area = page; 781 scpd_area->request.length = 0x0010; 782 scpd_area->request.code = 0x0002; 783 scpd_area->cssid = chpid.cssid; 784 scpd_area->first_chpid = chpid.id; 785 scpd_area->last_chpid = chpid.id; 786 scpd_area->m = m; 787 scpd_area->c = c; 788 scpd_area->fmt = fmt; 789 scpd_area->rfmt = rfmt; 790 791 ccode = chsc(scpd_area); 792 if (ccode > 0) 793 return (ccode == 3) ? -ENODEV : -EBUSY; 794 795 ret = chsc_error_from_response(scpd_area->response.code); 796 if (ret) 797 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 798 scpd_area->response.code); 799 return ret; 800 } 801 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 802 803 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 804 struct channel_path_desc *desc) 805 { 806 struct chsc_response_struct *chsc_resp; 807 struct chsc_scpd *scpd_area; 808 unsigned long flags; 809 int ret; 810 811 spin_lock_irqsave(&chsc_page_lock, flags); 812 scpd_area = chsc_page; 813 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); 814 if (ret) 815 goto out; 816 chsc_resp = (void *)&scpd_area->response; 817 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 818 out: 819 spin_unlock_irqrestore(&chsc_page_lock, flags); 820 return ret; 821 } 822 823 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, 824 struct channel_path_desc_fmt1 *desc) 825 { 826 struct chsc_response_struct *chsc_resp; 827 struct chsc_scpd *scpd_area; 828 unsigned long flags; 829 int ret; 830 831 spin_lock_irqsave(&chsc_page_lock, flags); 832 scpd_area = chsc_page; 833 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); 834 if (ret) 835 goto out; 836 chsc_resp = (void *)&scpd_area->response; 837 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 838 out: 839 spin_unlock_irqrestore(&chsc_page_lock, flags); 840 return ret; 841 } 842 843 static void 844 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 845 struct cmg_chars *chars) 846 { 847 struct cmg_chars *cmg_chars; 848 int i, mask; 849 850 cmg_chars = chp->cmg_chars; 851 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 852 mask = 0x80 >> (i + 3); 853 if (cmcv & mask) 854 cmg_chars->values[i] = chars->values[i]; 855 else 856 cmg_chars->values[i] = 0; 857 } 858 } 859 860 int chsc_get_channel_measurement_chars(struct channel_path *chp) 861 { 862 struct cmg_chars *cmg_chars; 863 int ccode, ret; 864 865 struct { 866 struct chsc_header request; 867 u32 : 24; 868 u32 first_chpid : 8; 869 u32 : 24; 870 u32 last_chpid : 8; 871 u32 zeroes1; 872 struct chsc_header response; 873 u32 zeroes2; 874 u32 not_valid : 1; 875 u32 shared : 1; 876 u32 : 22; 877 u32 chpid : 8; 878 u32 cmcv : 5; 879 u32 : 11; 880 u32 cmgq : 8; 881 u32 cmg : 8; 882 u32 zeroes3; 883 u32 data[NR_MEASUREMENT_CHARS]; 884 } __attribute__ ((packed)) *scmc_area; 885 886 chp->cmg_chars = NULL; 887 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); 888 if (!cmg_chars) 889 return -ENOMEM; 890 891 spin_lock_irq(&chsc_page_lock); 892 memset(chsc_page, 0, PAGE_SIZE); 893 scmc_area = chsc_page; 894 scmc_area->request.length = 0x0010; 895 scmc_area->request.code = 0x0022; 896 scmc_area->first_chpid = chp->chpid.id; 897 scmc_area->last_chpid = chp->chpid.id; 898 899 ccode = chsc(scmc_area); 900 if (ccode > 0) { 901 ret = (ccode == 3) ? -ENODEV : -EBUSY; 902 goto out; 903 } 904 905 ret = chsc_error_from_response(scmc_area->response.code); 906 if (ret) { 907 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 908 scmc_area->response.code); 909 goto out; 910 } 911 if (scmc_area->not_valid) { 912 chp->cmg = -1; 913 chp->shared = -1; 914 goto out; 915 } 916 chp->cmg = scmc_area->cmg; 917 chp->shared = scmc_area->shared; 918 if (chp->cmg != 2 && chp->cmg != 3) { 919 /* No cmg-dependent data. */ 920 goto out; 921 } 922 chp->cmg_chars = cmg_chars; 923 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 924 (struct cmg_chars *) &scmc_area->data); 925 out: 926 spin_unlock_irq(&chsc_page_lock); 927 if (!chp->cmg_chars) 928 kfree(cmg_chars); 929 930 return ret; 931 } 932 933 int __init chsc_init(void) 934 { 935 int ret; 936 937 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 938 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 939 if (!sei_page || !chsc_page) { 940 ret = -ENOMEM; 941 goto out_err; 942 } 943 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 944 if (ret) 945 goto out_err; 946 return ret; 947 out_err: 948 free_page((unsigned long)chsc_page); 949 free_page((unsigned long)sei_page); 950 return ret; 951 } 952 953 void __init chsc_init_cleanup(void) 954 { 955 crw_unregister_handler(CRW_RSC_CSS); 956 free_page((unsigned long)chsc_page); 957 free_page((unsigned long)sei_page); 958 } 959 960 int chsc_enable_facility(int operation_code) 961 { 962 unsigned long flags; 963 int ret; 964 struct { 965 struct chsc_header request; 966 u8 reserved1:4; 967 u8 format:4; 968 u8 reserved2; 969 u16 operation_code; 970 u32 reserved3; 971 u32 reserved4; 972 u32 operation_data_area[252]; 973 struct chsc_header response; 974 u32 reserved5:4; 975 u32 format2:4; 976 u32 reserved6:24; 977 } __attribute__ ((packed)) *sda_area; 978 979 spin_lock_irqsave(&chsc_page_lock, flags); 980 memset(chsc_page, 0, PAGE_SIZE); 981 sda_area = chsc_page; 982 sda_area->request.length = 0x0400; 983 sda_area->request.code = 0x0031; 984 sda_area->operation_code = operation_code; 985 986 ret = chsc(sda_area); 987 if (ret > 0) { 988 ret = (ret == 3) ? -ENODEV : -EBUSY; 989 goto out; 990 } 991 992 switch (sda_area->response.code) { 993 case 0x0101: 994 ret = -EOPNOTSUPP; 995 break; 996 default: 997 ret = chsc_error_from_response(sda_area->response.code); 998 } 999 if (ret != 0) 1000 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 1001 operation_code, sda_area->response.code); 1002 out: 1003 spin_unlock_irqrestore(&chsc_page_lock, flags); 1004 return ret; 1005 } 1006 1007 struct css_general_char css_general_characteristics; 1008 struct css_chsc_char css_chsc_characteristics; 1009 1010 int __init 1011 chsc_determine_css_characteristics(void) 1012 { 1013 int result; 1014 struct { 1015 struct chsc_header request; 1016 u32 reserved1; 1017 u32 reserved2; 1018 u32 reserved3; 1019 struct chsc_header response; 1020 u32 reserved4; 1021 u32 general_char[510]; 1022 u32 chsc_char[508]; 1023 } __attribute__ ((packed)) *scsc_area; 1024 1025 spin_lock_irq(&chsc_page_lock); 1026 memset(chsc_page, 0, PAGE_SIZE); 1027 scsc_area = chsc_page; 1028 scsc_area->request.length = 0x0010; 1029 scsc_area->request.code = 0x0010; 1030 1031 result = chsc(scsc_area); 1032 if (result) { 1033 result = (result == 3) ? -ENODEV : -EBUSY; 1034 goto exit; 1035 } 1036 1037 result = chsc_error_from_response(scsc_area->response.code); 1038 if (result == 0) { 1039 memcpy(&css_general_characteristics, scsc_area->general_char, 1040 sizeof(css_general_characteristics)); 1041 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1042 sizeof(css_chsc_characteristics)); 1043 } else 1044 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 1045 scsc_area->response.code); 1046 exit: 1047 spin_unlock_irq(&chsc_page_lock); 1048 return result; 1049 } 1050 1051 EXPORT_SYMBOL_GPL(css_general_characteristics); 1052 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1053 1054 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 1055 { 1056 struct { 1057 struct chsc_header request; 1058 unsigned int rsvd0; 1059 unsigned int op : 8; 1060 unsigned int rsvd1 : 8; 1061 unsigned int ctrl : 16; 1062 unsigned int rsvd2[5]; 1063 struct chsc_header response; 1064 unsigned int rsvd3[7]; 1065 } __attribute__ ((packed)) *rr; 1066 int rc; 1067 1068 memset(page, 0, PAGE_SIZE); 1069 rr = page; 1070 rr->request.length = 0x0020; 1071 rr->request.code = 0x0033; 1072 rr->op = op; 1073 rr->ctrl = ctrl; 1074 rc = chsc(rr); 1075 if (rc) 1076 return -EIO; 1077 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 1078 return rc; 1079 } 1080 1081 int chsc_sstpi(void *page, void *result, size_t size) 1082 { 1083 struct { 1084 struct chsc_header request; 1085 unsigned int rsvd0[3]; 1086 struct chsc_header response; 1087 char data[size]; 1088 } __attribute__ ((packed)) *rr; 1089 int rc; 1090 1091 memset(page, 0, PAGE_SIZE); 1092 rr = page; 1093 rr->request.length = 0x0010; 1094 rr->request.code = 0x0038; 1095 rc = chsc(rr); 1096 if (rc) 1097 return -EIO; 1098 memcpy(result, &rr->data, size); 1099 return (rr->response.code == 0x0001) ? 0 : -EIO; 1100 } 1101 1102 int chsc_siosl(struct subchannel_id schid) 1103 { 1104 struct { 1105 struct chsc_header request; 1106 u32 word1; 1107 struct subchannel_id sid; 1108 u32 word3; 1109 struct chsc_header response; 1110 u32 word[11]; 1111 } __attribute__ ((packed)) *siosl_area; 1112 unsigned long flags; 1113 int ccode; 1114 int rc; 1115 1116 spin_lock_irqsave(&chsc_page_lock, flags); 1117 memset(chsc_page, 0, PAGE_SIZE); 1118 siosl_area = chsc_page; 1119 siosl_area->request.length = 0x0010; 1120 siosl_area->request.code = 0x0046; 1121 siosl_area->word1 = 0x80000000; 1122 siosl_area->sid = schid; 1123 1124 ccode = chsc(siosl_area); 1125 if (ccode > 0) { 1126 if (ccode == 3) 1127 rc = -ENODEV; 1128 else 1129 rc = -EBUSY; 1130 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1131 schid.ssid, schid.sch_no, ccode); 1132 goto out; 1133 } 1134 rc = chsc_error_from_response(siosl_area->response.code); 1135 if (rc) 1136 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1137 schid.ssid, schid.sch_no, 1138 siosl_area->response.code); 1139 else 1140 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1141 schid.ssid, schid.sch_no); 1142 out: 1143 spin_unlock_irqrestore(&chsc_page_lock, flags); 1144 return rc; 1145 } 1146 EXPORT_SYMBOL_GPL(chsc_siosl); 1147 1148 /** 1149 * chsc_scm_info() - store SCM information (SSI) 1150 * @scm_area: request and response block for SSI 1151 * @token: continuation token 1152 * 1153 * Returns 0 on success. 1154 */ 1155 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) 1156 { 1157 int ccode, ret; 1158 1159 memset(scm_area, 0, sizeof(*scm_area)); 1160 scm_area->request.length = 0x0020; 1161 scm_area->request.code = 0x004C; 1162 scm_area->reqtok = token; 1163 1164 ccode = chsc(scm_area); 1165 if (ccode > 0) { 1166 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1167 goto out; 1168 } 1169 ret = chsc_error_from_response(scm_area->response.code); 1170 if (ret != 0) 1171 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", 1172 scm_area->response.code); 1173 out: 1174 return ret; 1175 } 1176 EXPORT_SYMBOL_GPL(chsc_scm_info); 1177