1 /* 2 * drivers/s390/cio/chsc.c 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright IBM Corp. 1999,2010 6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/init.h> 17 #include <linux/device.h> 18 19 #include <asm/cio.h> 20 #include <asm/chpid.h> 21 #include <asm/chsc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chp.h" 29 #include "chsc.h" 30 31 static void *sei_page; 32 static void *chsc_page; 33 static DEFINE_SPINLOCK(chsc_page_lock); 34 35 /** 36 * chsc_error_from_response() - convert a chsc response to an error 37 * @response: chsc response code 38 * 39 * Returns an appropriate Linux error code for @response. 40 */ 41 int chsc_error_from_response(int response) 42 { 43 switch (response) { 44 case 0x0001: 45 return 0; 46 case 0x0002: 47 case 0x0003: 48 case 0x0006: 49 case 0x0007: 50 case 0x0008: 51 case 0x000a: 52 case 0x0104: 53 return -EINVAL; 54 case 0x0004: 55 return -EOPNOTSUPP; 56 default: 57 return -EIO; 58 } 59 } 60 EXPORT_SYMBOL_GPL(chsc_error_from_response); 61 62 struct chsc_ssd_area { 63 struct chsc_header request; 64 u16 :10; 65 u16 ssid:2; 66 u16 :4; 67 u16 f_sch; /* first subchannel */ 68 u16 :16; 69 u16 l_sch; /* last subchannel */ 70 u32 :32; 71 struct chsc_header response; 72 u32 :32; 73 u8 sch_valid : 1; 74 u8 dev_valid : 1; 75 u8 st : 3; /* subchannel type */ 76 u8 zeroes : 3; 77 u8 unit_addr; /* unit address */ 78 u16 devno; /* device number */ 79 u8 path_mask; 80 u8 fla_valid_mask; 81 u16 sch; /* subchannel */ 82 u8 chpid[8]; /* chpids 0-7 */ 83 u16 fla[8]; /* full link addresses 0-7 */ 84 } __attribute__ ((packed)); 85 86 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 87 { 88 struct chsc_ssd_area *ssd_area; 89 int ccode; 90 int ret; 91 int i; 92 int mask; 93 94 spin_lock_irq(&chsc_page_lock); 95 memset(chsc_page, 0, PAGE_SIZE); 96 ssd_area = chsc_page; 97 ssd_area->request.length = 0x0010; 98 ssd_area->request.code = 0x0004; 99 ssd_area->ssid = schid.ssid; 100 ssd_area->f_sch = schid.sch_no; 101 ssd_area->l_sch = schid.sch_no; 102 103 ccode = chsc(ssd_area); 104 /* Check response. */ 105 if (ccode > 0) { 106 ret = (ccode == 3) ? -ENODEV : -EBUSY; 107 goto out; 108 } 109 ret = chsc_error_from_response(ssd_area->response.code); 110 if (ret != 0) { 111 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 112 schid.ssid, schid.sch_no, 113 ssd_area->response.code); 114 goto out; 115 } 116 if (!ssd_area->sch_valid) { 117 ret = -ENODEV; 118 goto out; 119 } 120 /* Copy data */ 121 ret = 0; 122 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 123 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 124 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 125 goto out; 126 ssd->path_mask = ssd_area->path_mask; 127 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 128 for (i = 0; i < 8; i++) { 129 mask = 0x80 >> i; 130 if (ssd_area->path_mask & mask) { 131 chp_id_init(&ssd->chpid[i]); 132 ssd->chpid[i].id = ssd_area->chpid[i]; 133 } 134 if (ssd_area->fla_valid_mask & mask) 135 ssd->fla[i] = ssd_area->fla[i]; 136 } 137 out: 138 spin_unlock_irq(&chsc_page_lock); 139 return ret; 140 } 141 142 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 143 { 144 spin_lock_irq(sch->lock); 145 if (sch->driver && sch->driver->chp_event) 146 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 147 goto out_unreg; 148 spin_unlock_irq(sch->lock); 149 return 0; 150 151 out_unreg: 152 sch->lpm = 0; 153 spin_unlock_irq(sch->lock); 154 css_schedule_eval(sch->schid); 155 return 0; 156 } 157 158 void chsc_chp_offline(struct chp_id chpid) 159 { 160 char dbf_txt[15]; 161 struct chp_link link; 162 163 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 164 CIO_TRACE_EVENT(2, dbf_txt); 165 166 if (chp_get_status(chpid) <= 0) 167 return; 168 memset(&link, 0, sizeof(struct chp_link)); 169 link.chpid = chpid; 170 /* Wait until previous actions have settled. */ 171 css_wait_for_slow_path(); 172 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 173 } 174 175 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 176 { 177 struct schib schib; 178 /* 179 * We don't know the device yet, but since a path 180 * may be available now to the device we'll have 181 * to do recognition again. 182 * Since we don't have any idea about which chpid 183 * that beast may be on we'll have to do a stsch 184 * on all devices, grr... 185 */ 186 if (stsch_err(schid, &schib)) 187 /* We're through */ 188 return -ENXIO; 189 190 /* Put it on the slow path. */ 191 css_schedule_eval(schid); 192 return 0; 193 } 194 195 static int __s390_process_res_acc(struct subchannel *sch, void *data) 196 { 197 spin_lock_irq(sch->lock); 198 if (sch->driver && sch->driver->chp_event) 199 sch->driver->chp_event(sch, data, CHP_ONLINE); 200 spin_unlock_irq(sch->lock); 201 202 return 0; 203 } 204 205 static void s390_process_res_acc(struct chp_link *link) 206 { 207 char dbf_txt[15]; 208 209 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 210 link->chpid.id); 211 CIO_TRACE_EVENT( 2, dbf_txt); 212 if (link->fla != 0) { 213 sprintf(dbf_txt, "fla%x", link->fla); 214 CIO_TRACE_EVENT( 2, dbf_txt); 215 } 216 /* Wait until previous actions have settled. */ 217 css_wait_for_slow_path(); 218 /* 219 * I/O resources may have become accessible. 220 * Scan through all subchannels that may be concerned and 221 * do a validation on those. 222 * The more information we have (info), the less scanning 223 * will we have to do. 224 */ 225 for_each_subchannel_staged(__s390_process_res_acc, 226 s390_process_res_acc_new_sch, link); 227 } 228 229 static int 230 __get_chpid_from_lir(void *data) 231 { 232 struct lir { 233 u8 iq; 234 u8 ic; 235 u16 sci; 236 /* incident-node descriptor */ 237 u32 indesc[28]; 238 /* attached-node descriptor */ 239 u32 andesc[28]; 240 /* incident-specific information */ 241 u32 isinfo[28]; 242 } __attribute__ ((packed)) *lir; 243 244 lir = data; 245 if (!(lir->iq&0x80)) 246 /* NULL link incident record */ 247 return -EINVAL; 248 if (!(lir->indesc[0]&0xc0000000)) 249 /* node descriptor not valid */ 250 return -EINVAL; 251 if (!(lir->indesc[0]&0x10000000)) 252 /* don't handle device-type nodes - FIXME */ 253 return -EINVAL; 254 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 255 256 return (u16) (lir->indesc[0]&0x000000ff); 257 } 258 259 struct chsc_sei_area { 260 struct chsc_header request; 261 u32 reserved1; 262 u32 reserved2; 263 u32 reserved3; 264 struct chsc_header response; 265 u32 reserved4; 266 u8 flags; 267 u8 vf; /* validity flags */ 268 u8 rs; /* reporting source */ 269 u8 cc; /* content code */ 270 u16 fla; /* full link address */ 271 u16 rsid; /* reporting source id */ 272 u32 reserved5; 273 u32 reserved6; 274 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ 275 /* ccdf has to be big enough for a link-incident record */ 276 } __attribute__ ((packed)); 277 278 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 279 { 280 struct chp_id chpid; 281 int id; 282 283 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 284 sei_area->rs, sei_area->rsid); 285 if (sei_area->rs != 4) 286 return; 287 id = __get_chpid_from_lir(sei_area->ccdf); 288 if (id < 0) 289 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 290 else { 291 chp_id_init(&chpid); 292 chpid.id = id; 293 chsc_chp_offline(chpid); 294 } 295 } 296 297 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 298 { 299 struct chp_link link; 300 struct chp_id chpid; 301 int status; 302 303 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 304 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 305 if (sei_area->rs != 4) 306 return; 307 chp_id_init(&chpid); 308 chpid.id = sei_area->rsid; 309 /* allocate a new channel path structure, if needed */ 310 status = chp_get_status(chpid); 311 if (status < 0) 312 chp_new(chpid); 313 else if (!status) 314 return; 315 memset(&link, 0, sizeof(struct chp_link)); 316 link.chpid = chpid; 317 if ((sei_area->vf & 0xc0) != 0) { 318 link.fla = sei_area->fla; 319 if ((sei_area->vf & 0xc0) == 0xc0) 320 /* full link address */ 321 link.fla_mask = 0xffff; 322 else 323 /* link address */ 324 link.fla_mask = 0xff00; 325 } 326 s390_process_res_acc(&link); 327 } 328 329 static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area) 330 { 331 struct channel_path *chp; 332 struct chp_id chpid; 333 u8 *data; 334 int num; 335 336 CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); 337 if (sei_area->rs != 0) 338 return; 339 data = sei_area->ccdf; 340 chp_id_init(&chpid); 341 for (num = 0; num <= __MAX_CHPID; num++) { 342 if (!chp_test_bit(data, num)) 343 continue; 344 chpid.id = num; 345 346 CIO_CRW_EVENT(4, "Update information for channel path " 347 "%x.%02x\n", chpid.cssid, chpid.id); 348 chp = chpid_to_chp(chpid); 349 if (!chp) { 350 chp_new(chpid); 351 continue; 352 } 353 mutex_lock(&chp->lock); 354 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 355 mutex_unlock(&chp->lock); 356 } 357 } 358 359 struct chp_config_data { 360 u8 map[32]; 361 u8 op; 362 u8 pc; 363 }; 364 365 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) 366 { 367 struct chp_config_data *data; 368 struct chp_id chpid; 369 int num; 370 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 371 372 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 373 if (sei_area->rs != 0) 374 return; 375 data = (struct chp_config_data *) &(sei_area->ccdf); 376 chp_id_init(&chpid); 377 for (num = 0; num <= __MAX_CHPID; num++) { 378 if (!chp_test_bit(data->map, num)) 379 continue; 380 chpid.id = num; 381 pr_notice("Processing %s for channel path %x.%02x\n", 382 events[data->op], chpid.cssid, chpid.id); 383 switch (data->op) { 384 case 0: 385 chp_cfg_schedule(chpid, 1); 386 break; 387 case 1: 388 chp_cfg_schedule(chpid, 0); 389 break; 390 case 2: 391 chp_cfg_cancel_deconfigure(chpid); 392 break; 393 } 394 } 395 } 396 397 static void chsc_process_sei(struct chsc_sei_area *sei_area) 398 { 399 /* Check if we might have lost some information. */ 400 if (sei_area->flags & 0x40) { 401 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 402 css_schedule_eval_all(); 403 } 404 /* which kind of information was stored? */ 405 switch (sei_area->cc) { 406 case 1: /* link incident*/ 407 chsc_process_sei_link_incident(sei_area); 408 break; 409 case 2: /* i/o resource accessibility */ 410 chsc_process_sei_res_acc(sei_area); 411 break; 412 case 7: /* channel-path-availability information */ 413 chsc_process_sei_chp_avail(sei_area); 414 break; 415 case 8: /* channel-path-configuration notification */ 416 chsc_process_sei_chp_config(sei_area); 417 break; 418 default: /* other stuff */ 419 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 420 sei_area->cc); 421 break; 422 } 423 } 424 425 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 426 { 427 struct chsc_sei_area *sei_area; 428 429 if (overflow) { 430 css_schedule_eval_all(); 431 return; 432 } 433 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 434 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 435 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 436 crw0->erc, crw0->rsid); 437 if (!sei_page) 438 return; 439 /* Access to sei_page is serialized through machine check handler 440 * thread, so no need for locking. */ 441 sei_area = sei_page; 442 443 CIO_TRACE_EVENT(2, "prcss"); 444 do { 445 memset(sei_area, 0, sizeof(*sei_area)); 446 sei_area->request.length = 0x0010; 447 sei_area->request.code = 0x000e; 448 if (chsc(sei_area)) 449 break; 450 451 if (sei_area->response.code == 0x0001) { 452 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 453 chsc_process_sei(sei_area); 454 } else { 455 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 456 sei_area->response.code); 457 break; 458 } 459 } while (sei_area->flags & 0x80); 460 } 461 462 void chsc_chp_online(struct chp_id chpid) 463 { 464 char dbf_txt[15]; 465 struct chp_link link; 466 467 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 468 CIO_TRACE_EVENT(2, dbf_txt); 469 470 if (chp_get_status(chpid) != 0) { 471 memset(&link, 0, sizeof(struct chp_link)); 472 link.chpid = chpid; 473 /* Wait until previous actions have settled. */ 474 css_wait_for_slow_path(); 475 for_each_subchannel_staged(__s390_process_res_acc, NULL, 476 &link); 477 } 478 } 479 480 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 481 struct chp_id chpid, int on) 482 { 483 unsigned long flags; 484 struct chp_link link; 485 486 memset(&link, 0, sizeof(struct chp_link)); 487 link.chpid = chpid; 488 spin_lock_irqsave(sch->lock, flags); 489 if (sch->driver && sch->driver->chp_event) 490 sch->driver->chp_event(sch, &link, 491 on ? CHP_VARY_ON : CHP_VARY_OFF); 492 spin_unlock_irqrestore(sch->lock, flags); 493 } 494 495 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 496 { 497 struct chp_id *chpid = data; 498 499 __s390_subchannel_vary_chpid(sch, *chpid, 0); 500 return 0; 501 } 502 503 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 504 { 505 struct chp_id *chpid = data; 506 507 __s390_subchannel_vary_chpid(sch, *chpid, 1); 508 return 0; 509 } 510 511 static int 512 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 513 { 514 struct schib schib; 515 516 if (stsch_err(schid, &schib)) 517 /* We're through */ 518 return -ENXIO; 519 /* Put it on the slow path. */ 520 css_schedule_eval(schid); 521 return 0; 522 } 523 524 /** 525 * chsc_chp_vary - propagate channel-path vary operation to subchannels 526 * @chpid: channl-path ID 527 * @on: non-zero for vary online, zero for vary offline 528 */ 529 int chsc_chp_vary(struct chp_id chpid, int on) 530 { 531 struct channel_path *chp = chpid_to_chp(chpid); 532 533 /* Wait until previous actions have settled. */ 534 css_wait_for_slow_path(); 535 /* 536 * Redo PathVerification on the devices the chpid connects to 537 */ 538 if (on) { 539 /* Try to update the channel path descritor. */ 540 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 541 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 542 __s390_vary_chpid_on, &chpid); 543 } else 544 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 545 NULL, &chpid); 546 547 return 0; 548 } 549 550 static void 551 chsc_remove_cmg_attr(struct channel_subsystem *css) 552 { 553 int i; 554 555 for (i = 0; i <= __MAX_CHPID; i++) { 556 if (!css->chps[i]) 557 continue; 558 chp_remove_cmg_attr(css->chps[i]); 559 } 560 } 561 562 static int 563 chsc_add_cmg_attr(struct channel_subsystem *css) 564 { 565 int i, ret; 566 567 ret = 0; 568 for (i = 0; i <= __MAX_CHPID; i++) { 569 if (!css->chps[i]) 570 continue; 571 ret = chp_add_cmg_attr(css->chps[i]); 572 if (ret) 573 goto cleanup; 574 } 575 return ret; 576 cleanup: 577 for (--i; i >= 0; i--) { 578 if (!css->chps[i]) 579 continue; 580 chp_remove_cmg_attr(css->chps[i]); 581 } 582 return ret; 583 } 584 585 int __chsc_do_secm(struct channel_subsystem *css, int enable) 586 { 587 struct { 588 struct chsc_header request; 589 u32 operation_code : 2; 590 u32 : 30; 591 u32 key : 4; 592 u32 : 28; 593 u32 zeroes1; 594 u32 cub_addr1; 595 u32 zeroes2; 596 u32 cub_addr2; 597 u32 reserved[13]; 598 struct chsc_header response; 599 u32 status : 8; 600 u32 : 4; 601 u32 fmt : 4; 602 u32 : 16; 603 } __attribute__ ((packed)) *secm_area; 604 int ret, ccode; 605 606 spin_lock_irq(&chsc_page_lock); 607 memset(chsc_page, 0, PAGE_SIZE); 608 secm_area = chsc_page; 609 secm_area->request.length = 0x0050; 610 secm_area->request.code = 0x0016; 611 612 secm_area->key = PAGE_DEFAULT_KEY >> 4; 613 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 614 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 615 616 secm_area->operation_code = enable ? 0 : 1; 617 618 ccode = chsc(secm_area); 619 if (ccode > 0) { 620 ret = (ccode == 3) ? -ENODEV : -EBUSY; 621 goto out; 622 } 623 624 switch (secm_area->response.code) { 625 case 0x0102: 626 case 0x0103: 627 ret = -EINVAL; 628 break; 629 default: 630 ret = chsc_error_from_response(secm_area->response.code); 631 } 632 if (ret != 0) 633 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 634 secm_area->response.code); 635 out: 636 spin_unlock_irq(&chsc_page_lock); 637 return ret; 638 } 639 640 int 641 chsc_secm(struct channel_subsystem *css, int enable) 642 { 643 int ret; 644 645 if (enable && !css->cm_enabled) { 646 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 647 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 648 if (!css->cub_addr1 || !css->cub_addr2) { 649 free_page((unsigned long)css->cub_addr1); 650 free_page((unsigned long)css->cub_addr2); 651 return -ENOMEM; 652 } 653 } 654 ret = __chsc_do_secm(css, enable); 655 if (!ret) { 656 css->cm_enabled = enable; 657 if (css->cm_enabled) { 658 ret = chsc_add_cmg_attr(css); 659 if (ret) { 660 __chsc_do_secm(css, 0); 661 css->cm_enabled = 0; 662 } 663 } else 664 chsc_remove_cmg_attr(css); 665 } 666 if (!css->cm_enabled) { 667 free_page((unsigned long)css->cub_addr1); 668 free_page((unsigned long)css->cub_addr2); 669 } 670 return ret; 671 } 672 673 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 674 int c, int m, void *page) 675 { 676 struct chsc_scpd *scpd_area; 677 int ccode, ret; 678 679 if ((rfmt == 1) && !css_general_characteristics.fcs) 680 return -EINVAL; 681 if ((rfmt == 2) && !css_general_characteristics.cib) 682 return -EINVAL; 683 684 memset(page, 0, PAGE_SIZE); 685 scpd_area = page; 686 scpd_area->request.length = 0x0010; 687 scpd_area->request.code = 0x0002; 688 scpd_area->cssid = chpid.cssid; 689 scpd_area->first_chpid = chpid.id; 690 scpd_area->last_chpid = chpid.id; 691 scpd_area->m = m; 692 scpd_area->c = c; 693 scpd_area->fmt = fmt; 694 scpd_area->rfmt = rfmt; 695 696 ccode = chsc(scpd_area); 697 if (ccode > 0) 698 return (ccode == 3) ? -ENODEV : -EBUSY; 699 700 ret = chsc_error_from_response(scpd_area->response.code); 701 if (ret) 702 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 703 scpd_area->response.code); 704 return ret; 705 } 706 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 707 708 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 709 struct channel_path_desc *desc) 710 { 711 struct chsc_response_struct *chsc_resp; 712 struct chsc_scpd *scpd_area; 713 unsigned long flags; 714 int ret; 715 716 spin_lock_irqsave(&chsc_page_lock, flags); 717 scpd_area = chsc_page; 718 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); 719 if (ret) 720 goto out; 721 chsc_resp = (void *)&scpd_area->response; 722 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 723 out: 724 spin_unlock_irqrestore(&chsc_page_lock, flags); 725 return ret; 726 } 727 728 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, 729 struct channel_path_desc_fmt1 *desc) 730 { 731 struct chsc_response_struct *chsc_resp; 732 struct chsc_scpd *scpd_area; 733 int ret; 734 735 spin_lock_irq(&chsc_page_lock); 736 scpd_area = chsc_page; 737 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); 738 if (ret) 739 goto out; 740 chsc_resp = (void *)&scpd_area->response; 741 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 742 out: 743 spin_unlock_irq(&chsc_page_lock); 744 return ret; 745 } 746 747 static void 748 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 749 struct cmg_chars *chars) 750 { 751 struct cmg_chars *cmg_chars; 752 int i, mask; 753 754 cmg_chars = chp->cmg_chars; 755 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 756 mask = 0x80 >> (i + 3); 757 if (cmcv & mask) 758 cmg_chars->values[i] = chars->values[i]; 759 else 760 cmg_chars->values[i] = 0; 761 } 762 } 763 764 int chsc_get_channel_measurement_chars(struct channel_path *chp) 765 { 766 struct cmg_chars *cmg_chars; 767 int ccode, ret; 768 769 struct { 770 struct chsc_header request; 771 u32 : 24; 772 u32 first_chpid : 8; 773 u32 : 24; 774 u32 last_chpid : 8; 775 u32 zeroes1; 776 struct chsc_header response; 777 u32 zeroes2; 778 u32 not_valid : 1; 779 u32 shared : 1; 780 u32 : 22; 781 u32 chpid : 8; 782 u32 cmcv : 5; 783 u32 : 11; 784 u32 cmgq : 8; 785 u32 cmg : 8; 786 u32 zeroes3; 787 u32 data[NR_MEASUREMENT_CHARS]; 788 } __attribute__ ((packed)) *scmc_area; 789 790 chp->cmg_chars = NULL; 791 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); 792 if (!cmg_chars) 793 return -ENOMEM; 794 795 spin_lock_irq(&chsc_page_lock); 796 memset(chsc_page, 0, PAGE_SIZE); 797 scmc_area = chsc_page; 798 scmc_area->request.length = 0x0010; 799 scmc_area->request.code = 0x0022; 800 scmc_area->first_chpid = chp->chpid.id; 801 scmc_area->last_chpid = chp->chpid.id; 802 803 ccode = chsc(scmc_area); 804 if (ccode > 0) { 805 ret = (ccode == 3) ? -ENODEV : -EBUSY; 806 goto out; 807 } 808 809 ret = chsc_error_from_response(scmc_area->response.code); 810 if (ret) { 811 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 812 scmc_area->response.code); 813 goto out; 814 } 815 if (scmc_area->not_valid) { 816 chp->cmg = -1; 817 chp->shared = -1; 818 goto out; 819 } 820 chp->cmg = scmc_area->cmg; 821 chp->shared = scmc_area->shared; 822 if (chp->cmg != 2 && chp->cmg != 3) { 823 /* No cmg-dependent data. */ 824 goto out; 825 } 826 chp->cmg_chars = cmg_chars; 827 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 828 (struct cmg_chars *) &scmc_area->data); 829 out: 830 spin_unlock_irq(&chsc_page_lock); 831 if (!chp->cmg_chars) 832 kfree(cmg_chars); 833 834 return ret; 835 } 836 837 int __init chsc_init(void) 838 { 839 int ret; 840 841 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 842 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 843 if (!sei_page || !chsc_page) { 844 ret = -ENOMEM; 845 goto out_err; 846 } 847 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 848 if (ret) 849 goto out_err; 850 return ret; 851 out_err: 852 free_page((unsigned long)chsc_page); 853 free_page((unsigned long)sei_page); 854 return ret; 855 } 856 857 void __init chsc_init_cleanup(void) 858 { 859 crw_unregister_handler(CRW_RSC_CSS); 860 free_page((unsigned long)chsc_page); 861 free_page((unsigned long)sei_page); 862 } 863 864 int chsc_enable_facility(int operation_code) 865 { 866 unsigned long flags; 867 int ret; 868 struct { 869 struct chsc_header request; 870 u8 reserved1:4; 871 u8 format:4; 872 u8 reserved2; 873 u16 operation_code; 874 u32 reserved3; 875 u32 reserved4; 876 u32 operation_data_area[252]; 877 struct chsc_header response; 878 u32 reserved5:4; 879 u32 format2:4; 880 u32 reserved6:24; 881 } __attribute__ ((packed)) *sda_area; 882 883 spin_lock_irqsave(&chsc_page_lock, flags); 884 memset(chsc_page, 0, PAGE_SIZE); 885 sda_area = chsc_page; 886 sda_area->request.length = 0x0400; 887 sda_area->request.code = 0x0031; 888 sda_area->operation_code = operation_code; 889 890 ret = chsc(sda_area); 891 if (ret > 0) { 892 ret = (ret == 3) ? -ENODEV : -EBUSY; 893 goto out; 894 } 895 896 switch (sda_area->response.code) { 897 case 0x0101: 898 ret = -EOPNOTSUPP; 899 break; 900 default: 901 ret = chsc_error_from_response(sda_area->response.code); 902 } 903 if (ret != 0) 904 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 905 operation_code, sda_area->response.code); 906 out: 907 spin_unlock_irqrestore(&chsc_page_lock, flags); 908 return ret; 909 } 910 911 struct css_general_char css_general_characteristics; 912 struct css_chsc_char css_chsc_characteristics; 913 914 int __init 915 chsc_determine_css_characteristics(void) 916 { 917 int result; 918 struct { 919 struct chsc_header request; 920 u32 reserved1; 921 u32 reserved2; 922 u32 reserved3; 923 struct chsc_header response; 924 u32 reserved4; 925 u32 general_char[510]; 926 u32 chsc_char[508]; 927 } __attribute__ ((packed)) *scsc_area; 928 929 spin_lock_irq(&chsc_page_lock); 930 memset(chsc_page, 0, PAGE_SIZE); 931 scsc_area = chsc_page; 932 scsc_area->request.length = 0x0010; 933 scsc_area->request.code = 0x0010; 934 935 result = chsc(scsc_area); 936 if (result) { 937 result = (result == 3) ? -ENODEV : -EBUSY; 938 goto exit; 939 } 940 941 result = chsc_error_from_response(scsc_area->response.code); 942 if (result == 0) { 943 memcpy(&css_general_characteristics, scsc_area->general_char, 944 sizeof(css_general_characteristics)); 945 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 946 sizeof(css_chsc_characteristics)); 947 } else 948 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 949 scsc_area->response.code); 950 exit: 951 spin_unlock_irq(&chsc_page_lock); 952 return result; 953 } 954 955 EXPORT_SYMBOL_GPL(css_general_characteristics); 956 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 957 958 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 959 { 960 struct { 961 struct chsc_header request; 962 unsigned int rsvd0; 963 unsigned int op : 8; 964 unsigned int rsvd1 : 8; 965 unsigned int ctrl : 16; 966 unsigned int rsvd2[5]; 967 struct chsc_header response; 968 unsigned int rsvd3[7]; 969 } __attribute__ ((packed)) *rr; 970 int rc; 971 972 memset(page, 0, PAGE_SIZE); 973 rr = page; 974 rr->request.length = 0x0020; 975 rr->request.code = 0x0033; 976 rr->op = op; 977 rr->ctrl = ctrl; 978 rc = chsc(rr); 979 if (rc) 980 return -EIO; 981 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 982 return rc; 983 } 984 985 int chsc_sstpi(void *page, void *result, size_t size) 986 { 987 struct { 988 struct chsc_header request; 989 unsigned int rsvd0[3]; 990 struct chsc_header response; 991 char data[size]; 992 } __attribute__ ((packed)) *rr; 993 int rc; 994 995 memset(page, 0, PAGE_SIZE); 996 rr = page; 997 rr->request.length = 0x0010; 998 rr->request.code = 0x0038; 999 rc = chsc(rr); 1000 if (rc) 1001 return -EIO; 1002 memcpy(result, &rr->data, size); 1003 return (rr->response.code == 0x0001) ? 0 : -EIO; 1004 } 1005 1006 int chsc_siosl(struct subchannel_id schid) 1007 { 1008 struct { 1009 struct chsc_header request; 1010 u32 word1; 1011 struct subchannel_id sid; 1012 u32 word3; 1013 struct chsc_header response; 1014 u32 word[11]; 1015 } __attribute__ ((packed)) *siosl_area; 1016 unsigned long flags; 1017 int ccode; 1018 int rc; 1019 1020 spin_lock_irqsave(&chsc_page_lock, flags); 1021 memset(chsc_page, 0, PAGE_SIZE); 1022 siosl_area = chsc_page; 1023 siosl_area->request.length = 0x0010; 1024 siosl_area->request.code = 0x0046; 1025 siosl_area->word1 = 0x80000000; 1026 siosl_area->sid = schid; 1027 1028 ccode = chsc(siosl_area); 1029 if (ccode > 0) { 1030 if (ccode == 3) 1031 rc = -ENODEV; 1032 else 1033 rc = -EBUSY; 1034 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1035 schid.ssid, schid.sch_no, ccode); 1036 goto out; 1037 } 1038 rc = chsc_error_from_response(siosl_area->response.code); 1039 if (rc) 1040 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1041 schid.ssid, schid.sch_no, 1042 siosl_area->response.code); 1043 else 1044 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1045 schid.ssid, schid.sch_no); 1046 out: 1047 spin_unlock_irqrestore(&chsc_page_lock, flags); 1048 return rc; 1049 } 1050 EXPORT_SYMBOL_GPL(chsc_siosl); 1051