1 /* 2 * S/390 common I/O routines -- channel subsystem call 3 * 4 * Copyright IBM Corp. 1999,2012 5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Arnd Bergmann (arndb@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/pci.h> 18 19 #include <asm/cio.h> 20 #include <asm/chpid.h> 21 #include <asm/chsc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chp.h" 29 #include "chsc.h" 30 31 static void *sei_page; 32 static void *chsc_page; 33 static DEFINE_SPINLOCK(chsc_page_lock); 34 35 /** 36 * chsc_error_from_response() - convert a chsc response to an error 37 * @response: chsc response code 38 * 39 * Returns an appropriate Linux error code for @response. 40 */ 41 int chsc_error_from_response(int response) 42 { 43 switch (response) { 44 case 0x0001: 45 return 0; 46 case 0x0002: 47 case 0x0003: 48 case 0x0006: 49 case 0x0007: 50 case 0x0008: 51 case 0x000a: 52 case 0x0104: 53 return -EINVAL; 54 case 0x0004: 55 return -EOPNOTSUPP; 56 case 0x000b: 57 return -EBUSY; 58 case 0x0100: 59 case 0x0102: 60 return -ENOMEM; 61 default: 62 return -EIO; 63 } 64 } 65 EXPORT_SYMBOL_GPL(chsc_error_from_response); 66 67 struct chsc_ssd_area { 68 struct chsc_header request; 69 u16 :10; 70 u16 ssid:2; 71 u16 :4; 72 u16 f_sch; /* first subchannel */ 73 u16 :16; 74 u16 l_sch; /* last subchannel */ 75 u32 :32; 76 struct chsc_header response; 77 u32 :32; 78 u8 sch_valid : 1; 79 u8 dev_valid : 1; 80 u8 st : 3; /* subchannel type */ 81 u8 zeroes : 3; 82 u8 unit_addr; /* unit address */ 83 u16 devno; /* device number */ 84 u8 path_mask; 85 u8 fla_valid_mask; 86 u16 sch; /* subchannel */ 87 u8 chpid[8]; /* chpids 0-7 */ 88 u16 fla[8]; /* full link addresses 0-7 */ 89 } __attribute__ ((packed)); 90 91 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 92 { 93 struct chsc_ssd_area *ssd_area; 94 int ccode; 95 int ret; 96 int i; 97 int mask; 98 99 spin_lock_irq(&chsc_page_lock); 100 memset(chsc_page, 0, PAGE_SIZE); 101 ssd_area = chsc_page; 102 ssd_area->request.length = 0x0010; 103 ssd_area->request.code = 0x0004; 104 ssd_area->ssid = schid.ssid; 105 ssd_area->f_sch = schid.sch_no; 106 ssd_area->l_sch = schid.sch_no; 107 108 ccode = chsc(ssd_area); 109 /* Check response. */ 110 if (ccode > 0) { 111 ret = (ccode == 3) ? -ENODEV : -EBUSY; 112 goto out; 113 } 114 ret = chsc_error_from_response(ssd_area->response.code); 115 if (ret != 0) { 116 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 117 schid.ssid, schid.sch_no, 118 ssd_area->response.code); 119 goto out; 120 } 121 if (!ssd_area->sch_valid) { 122 ret = -ENODEV; 123 goto out; 124 } 125 /* Copy data */ 126 ret = 0; 127 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 128 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 129 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 130 goto out; 131 ssd->path_mask = ssd_area->path_mask; 132 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 133 for (i = 0; i < 8; i++) { 134 mask = 0x80 >> i; 135 if (ssd_area->path_mask & mask) { 136 chp_id_init(&ssd->chpid[i]); 137 ssd->chpid[i].id = ssd_area->chpid[i]; 138 } 139 if (ssd_area->fla_valid_mask & mask) 140 ssd->fla[i] = ssd_area->fla[i]; 141 } 142 out: 143 spin_unlock_irq(&chsc_page_lock); 144 return ret; 145 } 146 147 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 148 { 149 spin_lock_irq(sch->lock); 150 if (sch->driver && sch->driver->chp_event) 151 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 152 goto out_unreg; 153 spin_unlock_irq(sch->lock); 154 return 0; 155 156 out_unreg: 157 sch->lpm = 0; 158 spin_unlock_irq(sch->lock); 159 css_schedule_eval(sch->schid); 160 return 0; 161 } 162 163 void chsc_chp_offline(struct chp_id chpid) 164 { 165 char dbf_txt[15]; 166 struct chp_link link; 167 168 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 169 CIO_TRACE_EVENT(2, dbf_txt); 170 171 if (chp_get_status(chpid) <= 0) 172 return; 173 memset(&link, 0, sizeof(struct chp_link)); 174 link.chpid = chpid; 175 /* Wait until previous actions have settled. */ 176 css_wait_for_slow_path(); 177 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 178 } 179 180 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 181 { 182 struct schib schib; 183 /* 184 * We don't know the device yet, but since a path 185 * may be available now to the device we'll have 186 * to do recognition again. 187 * Since we don't have any idea about which chpid 188 * that beast may be on we'll have to do a stsch 189 * on all devices, grr... 190 */ 191 if (stsch_err(schid, &schib)) 192 /* We're through */ 193 return -ENXIO; 194 195 /* Put it on the slow path. */ 196 css_schedule_eval(schid); 197 return 0; 198 } 199 200 static int __s390_process_res_acc(struct subchannel *sch, void *data) 201 { 202 spin_lock_irq(sch->lock); 203 if (sch->driver && sch->driver->chp_event) 204 sch->driver->chp_event(sch, data, CHP_ONLINE); 205 spin_unlock_irq(sch->lock); 206 207 return 0; 208 } 209 210 static void s390_process_res_acc(struct chp_link *link) 211 { 212 char dbf_txt[15]; 213 214 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 215 link->chpid.id); 216 CIO_TRACE_EVENT( 2, dbf_txt); 217 if (link->fla != 0) { 218 sprintf(dbf_txt, "fla%x", link->fla); 219 CIO_TRACE_EVENT( 2, dbf_txt); 220 } 221 /* Wait until previous actions have settled. */ 222 css_wait_for_slow_path(); 223 /* 224 * I/O resources may have become accessible. 225 * Scan through all subchannels that may be concerned and 226 * do a validation on those. 227 * The more information we have (info), the less scanning 228 * will we have to do. 229 */ 230 for_each_subchannel_staged(__s390_process_res_acc, 231 s390_process_res_acc_new_sch, link); 232 } 233 234 static int 235 __get_chpid_from_lir(void *data) 236 { 237 struct lir { 238 u8 iq; 239 u8 ic; 240 u16 sci; 241 /* incident-node descriptor */ 242 u32 indesc[28]; 243 /* attached-node descriptor */ 244 u32 andesc[28]; 245 /* incident-specific information */ 246 u32 isinfo[28]; 247 } __attribute__ ((packed)) *lir; 248 249 lir = data; 250 if (!(lir->iq&0x80)) 251 /* NULL link incident record */ 252 return -EINVAL; 253 if (!(lir->indesc[0]&0xc0000000)) 254 /* node descriptor not valid */ 255 return -EINVAL; 256 if (!(lir->indesc[0]&0x10000000)) 257 /* don't handle device-type nodes - FIXME */ 258 return -EINVAL; 259 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 260 261 return (u16) (lir->indesc[0]&0x000000ff); 262 } 263 264 struct chsc_sei_nt0_area { 265 u8 flags; 266 u8 vf; /* validity flags */ 267 u8 rs; /* reporting source */ 268 u8 cc; /* content code */ 269 u16 fla; /* full link address */ 270 u16 rsid; /* reporting source id */ 271 u32 reserved1; 272 u32 reserved2; 273 /* ccdf has to be big enough for a link-incident record */ 274 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */ 275 } __packed; 276 277 struct chsc_sei_nt2_area { 278 u8 flags; /* p and v bit */ 279 u8 reserved1; 280 u8 reserved2; 281 u8 cc; /* content code */ 282 u32 reserved3[13]; 283 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */ 284 } __packed; 285 286 #define CHSC_SEI_NT0 (1ULL << 63) 287 #define CHSC_SEI_NT2 (1ULL << 61) 288 289 struct chsc_sei { 290 struct chsc_header request; 291 u32 reserved1; 292 u64 ntsm; /* notification type mask */ 293 struct chsc_header response; 294 u32 :24; 295 u8 nt; 296 union { 297 struct chsc_sei_nt0_area nt0_area; 298 struct chsc_sei_nt2_area nt2_area; 299 u8 nt_area[PAGE_SIZE - 24]; 300 } u; 301 } __packed; 302 303 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) 304 { 305 struct chp_id chpid; 306 int id; 307 308 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 309 sei_area->rs, sei_area->rsid); 310 if (sei_area->rs != 4) 311 return; 312 id = __get_chpid_from_lir(sei_area->ccdf); 313 if (id < 0) 314 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 315 else { 316 chp_id_init(&chpid); 317 chpid.id = id; 318 chsc_chp_offline(chpid); 319 } 320 } 321 322 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 323 { 324 struct chp_link link; 325 struct chp_id chpid; 326 int status; 327 328 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 329 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 330 if (sei_area->rs != 4) 331 return; 332 chp_id_init(&chpid); 333 chpid.id = sei_area->rsid; 334 /* allocate a new channel path structure, if needed */ 335 status = chp_get_status(chpid); 336 if (status < 0) 337 chp_new(chpid); 338 else if (!status) 339 return; 340 memset(&link, 0, sizeof(struct chp_link)); 341 link.chpid = chpid; 342 if ((sei_area->vf & 0xc0) != 0) { 343 link.fla = sei_area->fla; 344 if ((sei_area->vf & 0xc0) == 0xc0) 345 /* full link address */ 346 link.fla_mask = 0xffff; 347 else 348 /* link address */ 349 link.fla_mask = 0xff00; 350 } 351 s390_process_res_acc(&link); 352 } 353 354 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) 355 { 356 struct channel_path *chp; 357 struct chp_id chpid; 358 u8 *data; 359 int num; 360 361 CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); 362 if (sei_area->rs != 0) 363 return; 364 data = sei_area->ccdf; 365 chp_id_init(&chpid); 366 for (num = 0; num <= __MAX_CHPID; num++) { 367 if (!chp_test_bit(data, num)) 368 continue; 369 chpid.id = num; 370 371 CIO_CRW_EVENT(4, "Update information for channel path " 372 "%x.%02x\n", chpid.cssid, chpid.id); 373 chp = chpid_to_chp(chpid); 374 if (!chp) { 375 chp_new(chpid); 376 continue; 377 } 378 mutex_lock(&chp->lock); 379 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 380 mutex_unlock(&chp->lock); 381 } 382 } 383 384 struct chp_config_data { 385 u8 map[32]; 386 u8 op; 387 u8 pc; 388 }; 389 390 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) 391 { 392 struct chp_config_data *data; 393 struct chp_id chpid; 394 int num; 395 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 396 397 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 398 if (sei_area->rs != 0) 399 return; 400 data = (struct chp_config_data *) &(sei_area->ccdf); 401 chp_id_init(&chpid); 402 for (num = 0; num <= __MAX_CHPID; num++) { 403 if (!chp_test_bit(data->map, num)) 404 continue; 405 chpid.id = num; 406 pr_notice("Processing %s for channel path %x.%02x\n", 407 events[data->op], chpid.cssid, chpid.id); 408 switch (data->op) { 409 case 0: 410 chp_cfg_schedule(chpid, 1); 411 break; 412 case 1: 413 chp_cfg_schedule(chpid, 0); 414 break; 415 case 2: 416 chp_cfg_cancel_deconfigure(chpid); 417 break; 418 } 419 } 420 } 421 422 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) 423 { 424 int ret; 425 426 CIO_CRW_EVENT(4, "chsc: scm change notification\n"); 427 if (sei_area->rs != 7) 428 return; 429 430 ret = scm_update_information(); 431 if (ret) 432 CIO_CRW_EVENT(0, "chsc: updating change notification" 433 " failed (rc=%d).\n", ret); 434 } 435 436 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 437 { 438 #ifdef CONFIG_PCI 439 switch (sei_area->cc) { 440 case 1: 441 zpci_event_error(sei_area->ccdf); 442 break; 443 case 2: 444 zpci_event_availability(sei_area->ccdf); 445 break; 446 default: 447 CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n", 448 sei_area->cc); 449 break; 450 } 451 #endif 452 } 453 454 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) 455 { 456 /* which kind of information was stored? */ 457 switch (sei_area->cc) { 458 case 1: /* link incident*/ 459 chsc_process_sei_link_incident(sei_area); 460 break; 461 case 2: /* i/o resource accessibility */ 462 chsc_process_sei_res_acc(sei_area); 463 break; 464 case 7: /* channel-path-availability information */ 465 chsc_process_sei_chp_avail(sei_area); 466 break; 467 case 8: /* channel-path-configuration notification */ 468 chsc_process_sei_chp_config(sei_area); 469 break; 470 case 12: /* scm change notification */ 471 chsc_process_sei_scm_change(sei_area); 472 break; 473 default: /* other stuff */ 474 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 475 sei_area->cc); 476 break; 477 } 478 } 479 480 static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm) 481 { 482 do { 483 memset(sei, 0, sizeof(*sei)); 484 sei->request.length = 0x0010; 485 sei->request.code = 0x000e; 486 sei->ntsm = ntsm; 487 488 if (chsc(sei)) 489 break; 490 491 if (sei->response.code == 0x0001) { 492 CIO_CRW_EVENT(2, "chsc: sei successful\n"); 493 494 /* Check if we might have lost some information. */ 495 if (sei->u.nt0_area.flags & 0x40) { 496 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 497 css_schedule_eval_all(); 498 } 499 500 switch (sei->nt) { 501 case 0: 502 chsc_process_sei_nt0(&sei->u.nt0_area); 503 break; 504 case 2: 505 chsc_process_sei_nt2(&sei->u.nt2_area); 506 break; 507 default: 508 CIO_CRW_EVENT(2, "chsc: unhandled nt=%d\n", 509 sei->nt); 510 break; 511 } 512 } else { 513 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 514 sei->response.code); 515 break; 516 } 517 } while (sei->u.nt0_area.flags & 0x80); 518 519 return 0; 520 } 521 522 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 523 { 524 struct chsc_sei *sei; 525 526 if (overflow) { 527 css_schedule_eval_all(); 528 return; 529 } 530 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 531 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 532 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 533 crw0->erc, crw0->rsid); 534 if (!sei_page) 535 return; 536 /* Access to sei_page is serialized through machine check handler 537 * thread, so no need for locking. */ 538 sei = sei_page; 539 540 CIO_TRACE_EVENT(2, "prcss"); 541 __chsc_process_crw(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); 542 } 543 544 void chsc_chp_online(struct chp_id chpid) 545 { 546 char dbf_txt[15]; 547 struct chp_link link; 548 549 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 550 CIO_TRACE_EVENT(2, dbf_txt); 551 552 if (chp_get_status(chpid) != 0) { 553 memset(&link, 0, sizeof(struct chp_link)); 554 link.chpid = chpid; 555 /* Wait until previous actions have settled. */ 556 css_wait_for_slow_path(); 557 for_each_subchannel_staged(__s390_process_res_acc, NULL, 558 &link); 559 } 560 } 561 562 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 563 struct chp_id chpid, int on) 564 { 565 unsigned long flags; 566 struct chp_link link; 567 568 memset(&link, 0, sizeof(struct chp_link)); 569 link.chpid = chpid; 570 spin_lock_irqsave(sch->lock, flags); 571 if (sch->driver && sch->driver->chp_event) 572 sch->driver->chp_event(sch, &link, 573 on ? CHP_VARY_ON : CHP_VARY_OFF); 574 spin_unlock_irqrestore(sch->lock, flags); 575 } 576 577 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 578 { 579 struct chp_id *chpid = data; 580 581 __s390_subchannel_vary_chpid(sch, *chpid, 0); 582 return 0; 583 } 584 585 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 586 { 587 struct chp_id *chpid = data; 588 589 __s390_subchannel_vary_chpid(sch, *chpid, 1); 590 return 0; 591 } 592 593 static int 594 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 595 { 596 struct schib schib; 597 598 if (stsch_err(schid, &schib)) 599 /* We're through */ 600 return -ENXIO; 601 /* Put it on the slow path. */ 602 css_schedule_eval(schid); 603 return 0; 604 } 605 606 /** 607 * chsc_chp_vary - propagate channel-path vary operation to subchannels 608 * @chpid: channl-path ID 609 * @on: non-zero for vary online, zero for vary offline 610 */ 611 int chsc_chp_vary(struct chp_id chpid, int on) 612 { 613 struct channel_path *chp = chpid_to_chp(chpid); 614 615 /* Wait until previous actions have settled. */ 616 css_wait_for_slow_path(); 617 /* 618 * Redo PathVerification on the devices the chpid connects to 619 */ 620 if (on) { 621 /* Try to update the channel path descritor. */ 622 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 623 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 624 __s390_vary_chpid_on, &chpid); 625 } else 626 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 627 NULL, &chpid); 628 629 return 0; 630 } 631 632 static void 633 chsc_remove_cmg_attr(struct channel_subsystem *css) 634 { 635 int i; 636 637 for (i = 0; i <= __MAX_CHPID; i++) { 638 if (!css->chps[i]) 639 continue; 640 chp_remove_cmg_attr(css->chps[i]); 641 } 642 } 643 644 static int 645 chsc_add_cmg_attr(struct channel_subsystem *css) 646 { 647 int i, ret; 648 649 ret = 0; 650 for (i = 0; i <= __MAX_CHPID; i++) { 651 if (!css->chps[i]) 652 continue; 653 ret = chp_add_cmg_attr(css->chps[i]); 654 if (ret) 655 goto cleanup; 656 } 657 return ret; 658 cleanup: 659 for (--i; i >= 0; i--) { 660 if (!css->chps[i]) 661 continue; 662 chp_remove_cmg_attr(css->chps[i]); 663 } 664 return ret; 665 } 666 667 int __chsc_do_secm(struct channel_subsystem *css, int enable) 668 { 669 struct { 670 struct chsc_header request; 671 u32 operation_code : 2; 672 u32 : 30; 673 u32 key : 4; 674 u32 : 28; 675 u32 zeroes1; 676 u32 cub_addr1; 677 u32 zeroes2; 678 u32 cub_addr2; 679 u32 reserved[13]; 680 struct chsc_header response; 681 u32 status : 8; 682 u32 : 4; 683 u32 fmt : 4; 684 u32 : 16; 685 } __attribute__ ((packed)) *secm_area; 686 int ret, ccode; 687 688 spin_lock_irq(&chsc_page_lock); 689 memset(chsc_page, 0, PAGE_SIZE); 690 secm_area = chsc_page; 691 secm_area->request.length = 0x0050; 692 secm_area->request.code = 0x0016; 693 694 secm_area->key = PAGE_DEFAULT_KEY >> 4; 695 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 696 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 697 698 secm_area->operation_code = enable ? 0 : 1; 699 700 ccode = chsc(secm_area); 701 if (ccode > 0) { 702 ret = (ccode == 3) ? -ENODEV : -EBUSY; 703 goto out; 704 } 705 706 switch (secm_area->response.code) { 707 case 0x0102: 708 case 0x0103: 709 ret = -EINVAL; 710 break; 711 default: 712 ret = chsc_error_from_response(secm_area->response.code); 713 } 714 if (ret != 0) 715 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 716 secm_area->response.code); 717 out: 718 spin_unlock_irq(&chsc_page_lock); 719 return ret; 720 } 721 722 int 723 chsc_secm(struct channel_subsystem *css, int enable) 724 { 725 int ret; 726 727 if (enable && !css->cm_enabled) { 728 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 729 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 730 if (!css->cub_addr1 || !css->cub_addr2) { 731 free_page((unsigned long)css->cub_addr1); 732 free_page((unsigned long)css->cub_addr2); 733 return -ENOMEM; 734 } 735 } 736 ret = __chsc_do_secm(css, enable); 737 if (!ret) { 738 css->cm_enabled = enable; 739 if (css->cm_enabled) { 740 ret = chsc_add_cmg_attr(css); 741 if (ret) { 742 __chsc_do_secm(css, 0); 743 css->cm_enabled = 0; 744 } 745 } else 746 chsc_remove_cmg_attr(css); 747 } 748 if (!css->cm_enabled) { 749 free_page((unsigned long)css->cub_addr1); 750 free_page((unsigned long)css->cub_addr2); 751 } 752 return ret; 753 } 754 755 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 756 int c, int m, void *page) 757 { 758 struct chsc_scpd *scpd_area; 759 int ccode, ret; 760 761 if ((rfmt == 1) && !css_general_characteristics.fcs) 762 return -EINVAL; 763 if ((rfmt == 2) && !css_general_characteristics.cib) 764 return -EINVAL; 765 766 memset(page, 0, PAGE_SIZE); 767 scpd_area = page; 768 scpd_area->request.length = 0x0010; 769 scpd_area->request.code = 0x0002; 770 scpd_area->cssid = chpid.cssid; 771 scpd_area->first_chpid = chpid.id; 772 scpd_area->last_chpid = chpid.id; 773 scpd_area->m = m; 774 scpd_area->c = c; 775 scpd_area->fmt = fmt; 776 scpd_area->rfmt = rfmt; 777 778 ccode = chsc(scpd_area); 779 if (ccode > 0) 780 return (ccode == 3) ? -ENODEV : -EBUSY; 781 782 ret = chsc_error_from_response(scpd_area->response.code); 783 if (ret) 784 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 785 scpd_area->response.code); 786 return ret; 787 } 788 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 789 790 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 791 struct channel_path_desc *desc) 792 { 793 struct chsc_response_struct *chsc_resp; 794 struct chsc_scpd *scpd_area; 795 unsigned long flags; 796 int ret; 797 798 spin_lock_irqsave(&chsc_page_lock, flags); 799 scpd_area = chsc_page; 800 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); 801 if (ret) 802 goto out; 803 chsc_resp = (void *)&scpd_area->response; 804 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 805 out: 806 spin_unlock_irqrestore(&chsc_page_lock, flags); 807 return ret; 808 } 809 810 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, 811 struct channel_path_desc_fmt1 *desc) 812 { 813 struct chsc_response_struct *chsc_resp; 814 struct chsc_scpd *scpd_area; 815 int ret; 816 817 spin_lock_irq(&chsc_page_lock); 818 scpd_area = chsc_page; 819 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); 820 if (ret) 821 goto out; 822 chsc_resp = (void *)&scpd_area->response; 823 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 824 out: 825 spin_unlock_irq(&chsc_page_lock); 826 return ret; 827 } 828 829 static void 830 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 831 struct cmg_chars *chars) 832 { 833 struct cmg_chars *cmg_chars; 834 int i, mask; 835 836 cmg_chars = chp->cmg_chars; 837 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 838 mask = 0x80 >> (i + 3); 839 if (cmcv & mask) 840 cmg_chars->values[i] = chars->values[i]; 841 else 842 cmg_chars->values[i] = 0; 843 } 844 } 845 846 int chsc_get_channel_measurement_chars(struct channel_path *chp) 847 { 848 struct cmg_chars *cmg_chars; 849 int ccode, ret; 850 851 struct { 852 struct chsc_header request; 853 u32 : 24; 854 u32 first_chpid : 8; 855 u32 : 24; 856 u32 last_chpid : 8; 857 u32 zeroes1; 858 struct chsc_header response; 859 u32 zeroes2; 860 u32 not_valid : 1; 861 u32 shared : 1; 862 u32 : 22; 863 u32 chpid : 8; 864 u32 cmcv : 5; 865 u32 : 11; 866 u32 cmgq : 8; 867 u32 cmg : 8; 868 u32 zeroes3; 869 u32 data[NR_MEASUREMENT_CHARS]; 870 } __attribute__ ((packed)) *scmc_area; 871 872 chp->cmg_chars = NULL; 873 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); 874 if (!cmg_chars) 875 return -ENOMEM; 876 877 spin_lock_irq(&chsc_page_lock); 878 memset(chsc_page, 0, PAGE_SIZE); 879 scmc_area = chsc_page; 880 scmc_area->request.length = 0x0010; 881 scmc_area->request.code = 0x0022; 882 scmc_area->first_chpid = chp->chpid.id; 883 scmc_area->last_chpid = chp->chpid.id; 884 885 ccode = chsc(scmc_area); 886 if (ccode > 0) { 887 ret = (ccode == 3) ? -ENODEV : -EBUSY; 888 goto out; 889 } 890 891 ret = chsc_error_from_response(scmc_area->response.code); 892 if (ret) { 893 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 894 scmc_area->response.code); 895 goto out; 896 } 897 if (scmc_area->not_valid) { 898 chp->cmg = -1; 899 chp->shared = -1; 900 goto out; 901 } 902 chp->cmg = scmc_area->cmg; 903 chp->shared = scmc_area->shared; 904 if (chp->cmg != 2 && chp->cmg != 3) { 905 /* No cmg-dependent data. */ 906 goto out; 907 } 908 chp->cmg_chars = cmg_chars; 909 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 910 (struct cmg_chars *) &scmc_area->data); 911 out: 912 spin_unlock_irq(&chsc_page_lock); 913 if (!chp->cmg_chars) 914 kfree(cmg_chars); 915 916 return ret; 917 } 918 919 int __init chsc_init(void) 920 { 921 int ret; 922 923 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 924 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 925 if (!sei_page || !chsc_page) { 926 ret = -ENOMEM; 927 goto out_err; 928 } 929 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 930 if (ret) 931 goto out_err; 932 return ret; 933 out_err: 934 free_page((unsigned long)chsc_page); 935 free_page((unsigned long)sei_page); 936 return ret; 937 } 938 939 void __init chsc_init_cleanup(void) 940 { 941 crw_unregister_handler(CRW_RSC_CSS); 942 free_page((unsigned long)chsc_page); 943 free_page((unsigned long)sei_page); 944 } 945 946 int chsc_enable_facility(int operation_code) 947 { 948 unsigned long flags; 949 int ret; 950 struct { 951 struct chsc_header request; 952 u8 reserved1:4; 953 u8 format:4; 954 u8 reserved2; 955 u16 operation_code; 956 u32 reserved3; 957 u32 reserved4; 958 u32 operation_data_area[252]; 959 struct chsc_header response; 960 u32 reserved5:4; 961 u32 format2:4; 962 u32 reserved6:24; 963 } __attribute__ ((packed)) *sda_area; 964 965 spin_lock_irqsave(&chsc_page_lock, flags); 966 memset(chsc_page, 0, PAGE_SIZE); 967 sda_area = chsc_page; 968 sda_area->request.length = 0x0400; 969 sda_area->request.code = 0x0031; 970 sda_area->operation_code = operation_code; 971 972 ret = chsc(sda_area); 973 if (ret > 0) { 974 ret = (ret == 3) ? -ENODEV : -EBUSY; 975 goto out; 976 } 977 978 switch (sda_area->response.code) { 979 case 0x0101: 980 ret = -EOPNOTSUPP; 981 break; 982 default: 983 ret = chsc_error_from_response(sda_area->response.code); 984 } 985 if (ret != 0) 986 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 987 operation_code, sda_area->response.code); 988 out: 989 spin_unlock_irqrestore(&chsc_page_lock, flags); 990 return ret; 991 } 992 993 struct css_general_char css_general_characteristics; 994 struct css_chsc_char css_chsc_characteristics; 995 996 int __init 997 chsc_determine_css_characteristics(void) 998 { 999 int result; 1000 struct { 1001 struct chsc_header request; 1002 u32 reserved1; 1003 u32 reserved2; 1004 u32 reserved3; 1005 struct chsc_header response; 1006 u32 reserved4; 1007 u32 general_char[510]; 1008 u32 chsc_char[508]; 1009 } __attribute__ ((packed)) *scsc_area; 1010 1011 spin_lock_irq(&chsc_page_lock); 1012 memset(chsc_page, 0, PAGE_SIZE); 1013 scsc_area = chsc_page; 1014 scsc_area->request.length = 0x0010; 1015 scsc_area->request.code = 0x0010; 1016 1017 result = chsc(scsc_area); 1018 if (result) { 1019 result = (result == 3) ? -ENODEV : -EBUSY; 1020 goto exit; 1021 } 1022 1023 result = chsc_error_from_response(scsc_area->response.code); 1024 if (result == 0) { 1025 memcpy(&css_general_characteristics, scsc_area->general_char, 1026 sizeof(css_general_characteristics)); 1027 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1028 sizeof(css_chsc_characteristics)); 1029 } else 1030 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 1031 scsc_area->response.code); 1032 exit: 1033 spin_unlock_irq(&chsc_page_lock); 1034 return result; 1035 } 1036 1037 EXPORT_SYMBOL_GPL(css_general_characteristics); 1038 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1039 1040 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 1041 { 1042 struct { 1043 struct chsc_header request; 1044 unsigned int rsvd0; 1045 unsigned int op : 8; 1046 unsigned int rsvd1 : 8; 1047 unsigned int ctrl : 16; 1048 unsigned int rsvd2[5]; 1049 struct chsc_header response; 1050 unsigned int rsvd3[7]; 1051 } __attribute__ ((packed)) *rr; 1052 int rc; 1053 1054 memset(page, 0, PAGE_SIZE); 1055 rr = page; 1056 rr->request.length = 0x0020; 1057 rr->request.code = 0x0033; 1058 rr->op = op; 1059 rr->ctrl = ctrl; 1060 rc = chsc(rr); 1061 if (rc) 1062 return -EIO; 1063 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 1064 return rc; 1065 } 1066 1067 int chsc_sstpi(void *page, void *result, size_t size) 1068 { 1069 struct { 1070 struct chsc_header request; 1071 unsigned int rsvd0[3]; 1072 struct chsc_header response; 1073 char data[size]; 1074 } __attribute__ ((packed)) *rr; 1075 int rc; 1076 1077 memset(page, 0, PAGE_SIZE); 1078 rr = page; 1079 rr->request.length = 0x0010; 1080 rr->request.code = 0x0038; 1081 rc = chsc(rr); 1082 if (rc) 1083 return -EIO; 1084 memcpy(result, &rr->data, size); 1085 return (rr->response.code == 0x0001) ? 0 : -EIO; 1086 } 1087 1088 int chsc_siosl(struct subchannel_id schid) 1089 { 1090 struct { 1091 struct chsc_header request; 1092 u32 word1; 1093 struct subchannel_id sid; 1094 u32 word3; 1095 struct chsc_header response; 1096 u32 word[11]; 1097 } __attribute__ ((packed)) *siosl_area; 1098 unsigned long flags; 1099 int ccode; 1100 int rc; 1101 1102 spin_lock_irqsave(&chsc_page_lock, flags); 1103 memset(chsc_page, 0, PAGE_SIZE); 1104 siosl_area = chsc_page; 1105 siosl_area->request.length = 0x0010; 1106 siosl_area->request.code = 0x0046; 1107 siosl_area->word1 = 0x80000000; 1108 siosl_area->sid = schid; 1109 1110 ccode = chsc(siosl_area); 1111 if (ccode > 0) { 1112 if (ccode == 3) 1113 rc = -ENODEV; 1114 else 1115 rc = -EBUSY; 1116 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1117 schid.ssid, schid.sch_no, ccode); 1118 goto out; 1119 } 1120 rc = chsc_error_from_response(siosl_area->response.code); 1121 if (rc) 1122 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1123 schid.ssid, schid.sch_no, 1124 siosl_area->response.code); 1125 else 1126 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1127 schid.ssid, schid.sch_no); 1128 out: 1129 spin_unlock_irqrestore(&chsc_page_lock, flags); 1130 return rc; 1131 } 1132 EXPORT_SYMBOL_GPL(chsc_siosl); 1133 1134 /** 1135 * chsc_scm_info() - store SCM information (SSI) 1136 * @scm_area: request and response block for SSI 1137 * @token: continuation token 1138 * 1139 * Returns 0 on success. 1140 */ 1141 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) 1142 { 1143 int ccode, ret; 1144 1145 memset(scm_area, 0, sizeof(*scm_area)); 1146 scm_area->request.length = 0x0020; 1147 scm_area->request.code = 0x004C; 1148 scm_area->reqtok = token; 1149 1150 ccode = chsc(scm_area); 1151 if (ccode > 0) { 1152 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1153 goto out; 1154 } 1155 ret = chsc_error_from_response(scm_area->response.code); 1156 if (ret != 0) 1157 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", 1158 scm_area->response.code); 1159 out: 1160 return ret; 1161 } 1162 EXPORT_SYMBOL_GPL(chsc_scm_info); 1163