1 /* 2 * drivers/s390/cio/chsc.c 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright IBM Corp. 1999,2008 6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/init.h> 17 #include <linux/device.h> 18 19 #include <asm/cio.h> 20 #include <asm/chpid.h> 21 #include <asm/chsc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chp.h" 29 #include "chsc.h" 30 31 static void *sei_page; 32 33 /** 34 * chsc_error_from_response() - convert a chsc response to an error 35 * @response: chsc response code 36 * 37 * Returns an appropriate Linux error code for @response. 38 */ 39 int chsc_error_from_response(int response) 40 { 41 switch (response) { 42 case 0x0001: 43 return 0; 44 case 0x0002: 45 case 0x0003: 46 case 0x0006: 47 case 0x0007: 48 case 0x0008: 49 case 0x000a: 50 return -EINVAL; 51 case 0x0004: 52 return -EOPNOTSUPP; 53 default: 54 return -EIO; 55 } 56 } 57 EXPORT_SYMBOL_GPL(chsc_error_from_response); 58 59 struct chsc_ssd_area { 60 struct chsc_header request; 61 u16 :10; 62 u16 ssid:2; 63 u16 :4; 64 u16 f_sch; /* first subchannel */ 65 u16 :16; 66 u16 l_sch; /* last subchannel */ 67 u32 :32; 68 struct chsc_header response; 69 u32 :32; 70 u8 sch_valid : 1; 71 u8 dev_valid : 1; 72 u8 st : 3; /* subchannel type */ 73 u8 zeroes : 3; 74 u8 unit_addr; /* unit address */ 75 u16 devno; /* device number */ 76 u8 path_mask; 77 u8 fla_valid_mask; 78 u16 sch; /* subchannel */ 79 u8 chpid[8]; /* chpids 0-7 */ 80 u16 fla[8]; /* full link addresses 0-7 */ 81 } __attribute__ ((packed)); 82 83 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 84 { 85 unsigned long page; 86 struct chsc_ssd_area *ssd_area; 87 int ccode; 88 int ret; 89 int i; 90 int mask; 91 92 page = get_zeroed_page(GFP_KERNEL | GFP_DMA); 93 if (!page) 94 return -ENOMEM; 95 ssd_area = (struct chsc_ssd_area *) page; 96 ssd_area->request.length = 0x0010; 97 ssd_area->request.code = 0x0004; 98 ssd_area->ssid = schid.ssid; 99 ssd_area->f_sch = schid.sch_no; 100 ssd_area->l_sch = schid.sch_no; 101 102 ccode = chsc(ssd_area); 103 /* Check response. */ 104 if (ccode > 0) { 105 ret = (ccode == 3) ? -ENODEV : -EBUSY; 106 goto out_free; 107 } 108 ret = chsc_error_from_response(ssd_area->response.code); 109 if (ret != 0) { 110 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 111 schid.ssid, schid.sch_no, 112 ssd_area->response.code); 113 goto out_free; 114 } 115 if (!ssd_area->sch_valid) { 116 ret = -ENODEV; 117 goto out_free; 118 } 119 /* Copy data */ 120 ret = 0; 121 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 122 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 123 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 124 goto out_free; 125 ssd->path_mask = ssd_area->path_mask; 126 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 127 for (i = 0; i < 8; i++) { 128 mask = 0x80 >> i; 129 if (ssd_area->path_mask & mask) { 130 chp_id_init(&ssd->chpid[i]); 131 ssd->chpid[i].id = ssd_area->chpid[i]; 132 } 133 if (ssd_area->fla_valid_mask & mask) 134 ssd->fla[i] = ssd_area->fla[i]; 135 } 136 out_free: 137 free_page(page); 138 return ret; 139 } 140 141 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 142 { 143 spin_lock_irq(sch->lock); 144 if (sch->driver && sch->driver->chp_event) 145 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 146 goto out_unreg; 147 spin_unlock_irq(sch->lock); 148 return 0; 149 150 out_unreg: 151 sch->lpm = 0; 152 spin_unlock_irq(sch->lock); 153 css_schedule_eval(sch->schid); 154 return 0; 155 } 156 157 void chsc_chp_offline(struct chp_id chpid) 158 { 159 char dbf_txt[15]; 160 struct chp_link link; 161 162 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 163 CIO_TRACE_EVENT(2, dbf_txt); 164 165 if (chp_get_status(chpid) <= 0) 166 return; 167 memset(&link, 0, sizeof(struct chp_link)); 168 link.chpid = chpid; 169 /* Wait until previous actions have settled. */ 170 css_wait_for_slow_path(); 171 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 172 } 173 174 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 175 { 176 struct schib schib; 177 /* 178 * We don't know the device yet, but since a path 179 * may be available now to the device we'll have 180 * to do recognition again. 181 * Since we don't have any idea about which chpid 182 * that beast may be on we'll have to do a stsch 183 * on all devices, grr... 184 */ 185 if (stsch_err(schid, &schib)) 186 /* We're through */ 187 return -ENXIO; 188 189 /* Put it on the slow path. */ 190 css_schedule_eval(schid); 191 return 0; 192 } 193 194 static int __s390_process_res_acc(struct subchannel *sch, void *data) 195 { 196 spin_lock_irq(sch->lock); 197 if (sch->driver && sch->driver->chp_event) 198 sch->driver->chp_event(sch, data, CHP_ONLINE); 199 spin_unlock_irq(sch->lock); 200 201 return 0; 202 } 203 204 static void s390_process_res_acc(struct chp_link *link) 205 { 206 char dbf_txt[15]; 207 208 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 209 link->chpid.id); 210 CIO_TRACE_EVENT( 2, dbf_txt); 211 if (link->fla != 0) { 212 sprintf(dbf_txt, "fla%x", link->fla); 213 CIO_TRACE_EVENT( 2, dbf_txt); 214 } 215 /* Wait until previous actions have settled. */ 216 css_wait_for_slow_path(); 217 /* 218 * I/O resources may have become accessible. 219 * Scan through all subchannels that may be concerned and 220 * do a validation on those. 221 * The more information we have (info), the less scanning 222 * will we have to do. 223 */ 224 for_each_subchannel_staged(__s390_process_res_acc, 225 s390_process_res_acc_new_sch, link); 226 } 227 228 static int 229 __get_chpid_from_lir(void *data) 230 { 231 struct lir { 232 u8 iq; 233 u8 ic; 234 u16 sci; 235 /* incident-node descriptor */ 236 u32 indesc[28]; 237 /* attached-node descriptor */ 238 u32 andesc[28]; 239 /* incident-specific information */ 240 u32 isinfo[28]; 241 } __attribute__ ((packed)) *lir; 242 243 lir = data; 244 if (!(lir->iq&0x80)) 245 /* NULL link incident record */ 246 return -EINVAL; 247 if (!(lir->indesc[0]&0xc0000000)) 248 /* node descriptor not valid */ 249 return -EINVAL; 250 if (!(lir->indesc[0]&0x10000000)) 251 /* don't handle device-type nodes - FIXME */ 252 return -EINVAL; 253 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 254 255 return (u16) (lir->indesc[0]&0x000000ff); 256 } 257 258 struct chsc_sei_area { 259 struct chsc_header request; 260 u32 reserved1; 261 u32 reserved2; 262 u32 reserved3; 263 struct chsc_header response; 264 u32 reserved4; 265 u8 flags; 266 u8 vf; /* validity flags */ 267 u8 rs; /* reporting source */ 268 u8 cc; /* content code */ 269 u16 fla; /* full link address */ 270 u16 rsid; /* reporting source id */ 271 u32 reserved5; 272 u32 reserved6; 273 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ 274 /* ccdf has to be big enough for a link-incident record */ 275 } __attribute__ ((packed)); 276 277 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 278 { 279 struct chp_id chpid; 280 int id; 281 282 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 283 sei_area->rs, sei_area->rsid); 284 if (sei_area->rs != 4) 285 return; 286 id = __get_chpid_from_lir(sei_area->ccdf); 287 if (id < 0) 288 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 289 else { 290 chp_id_init(&chpid); 291 chpid.id = id; 292 chsc_chp_offline(chpid); 293 } 294 } 295 296 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 297 { 298 struct chp_link link; 299 struct chp_id chpid; 300 int status; 301 302 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 303 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 304 if (sei_area->rs != 4) 305 return; 306 chp_id_init(&chpid); 307 chpid.id = sei_area->rsid; 308 /* allocate a new channel path structure, if needed */ 309 status = chp_get_status(chpid); 310 if (status < 0) 311 chp_new(chpid); 312 else if (!status) 313 return; 314 memset(&link, 0, sizeof(struct chp_link)); 315 link.chpid = chpid; 316 if ((sei_area->vf & 0xc0) != 0) { 317 link.fla = sei_area->fla; 318 if ((sei_area->vf & 0xc0) == 0xc0) 319 /* full link address */ 320 link.fla_mask = 0xffff; 321 else 322 /* link address */ 323 link.fla_mask = 0xff00; 324 } 325 s390_process_res_acc(&link); 326 } 327 328 struct chp_config_data { 329 u8 map[32]; 330 u8 op; 331 u8 pc; 332 }; 333 334 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) 335 { 336 struct chp_config_data *data; 337 struct chp_id chpid; 338 int num; 339 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 340 341 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 342 if (sei_area->rs != 0) 343 return; 344 data = (struct chp_config_data *) &(sei_area->ccdf); 345 chp_id_init(&chpid); 346 for (num = 0; num <= __MAX_CHPID; num++) { 347 if (!chp_test_bit(data->map, num)) 348 continue; 349 chpid.id = num; 350 pr_notice("Processing %s for channel path %x.%02x\n", 351 events[data->op], chpid.cssid, chpid.id); 352 switch (data->op) { 353 case 0: 354 chp_cfg_schedule(chpid, 1); 355 break; 356 case 1: 357 chp_cfg_schedule(chpid, 0); 358 break; 359 case 2: 360 chp_cfg_cancel_deconfigure(chpid); 361 break; 362 } 363 } 364 } 365 366 static void chsc_process_sei(struct chsc_sei_area *sei_area) 367 { 368 /* Check if we might have lost some information. */ 369 if (sei_area->flags & 0x40) { 370 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 371 css_schedule_eval_all(); 372 } 373 /* which kind of information was stored? */ 374 switch (sei_area->cc) { 375 case 1: /* link incident*/ 376 chsc_process_sei_link_incident(sei_area); 377 break; 378 case 2: /* i/o resource accessibiliy */ 379 chsc_process_sei_res_acc(sei_area); 380 break; 381 case 8: /* channel-path-configuration notification */ 382 chsc_process_sei_chp_config(sei_area); 383 break; 384 default: /* other stuff */ 385 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 386 sei_area->cc); 387 break; 388 } 389 } 390 391 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 392 { 393 struct chsc_sei_area *sei_area; 394 395 if (overflow) { 396 css_schedule_eval_all(); 397 return; 398 } 399 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 400 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 401 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 402 crw0->erc, crw0->rsid); 403 if (!sei_page) 404 return; 405 /* Access to sei_page is serialized through machine check handler 406 * thread, so no need for locking. */ 407 sei_area = sei_page; 408 409 CIO_TRACE_EVENT(2, "prcss"); 410 do { 411 memset(sei_area, 0, sizeof(*sei_area)); 412 sei_area->request.length = 0x0010; 413 sei_area->request.code = 0x000e; 414 if (chsc(sei_area)) 415 break; 416 417 if (sei_area->response.code == 0x0001) { 418 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 419 chsc_process_sei(sei_area); 420 } else { 421 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 422 sei_area->response.code); 423 break; 424 } 425 } while (sei_area->flags & 0x80); 426 } 427 428 void chsc_chp_online(struct chp_id chpid) 429 { 430 char dbf_txt[15]; 431 struct chp_link link; 432 433 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 434 CIO_TRACE_EVENT(2, dbf_txt); 435 436 if (chp_get_status(chpid) != 0) { 437 memset(&link, 0, sizeof(struct chp_link)); 438 link.chpid = chpid; 439 /* Wait until previous actions have settled. */ 440 css_wait_for_slow_path(); 441 for_each_subchannel_staged(__s390_process_res_acc, NULL, 442 &link); 443 } 444 } 445 446 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 447 struct chp_id chpid, int on) 448 { 449 unsigned long flags; 450 struct chp_link link; 451 452 memset(&link, 0, sizeof(struct chp_link)); 453 link.chpid = chpid; 454 spin_lock_irqsave(sch->lock, flags); 455 if (sch->driver && sch->driver->chp_event) 456 sch->driver->chp_event(sch, &link, 457 on ? CHP_VARY_ON : CHP_VARY_OFF); 458 spin_unlock_irqrestore(sch->lock, flags); 459 } 460 461 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 462 { 463 struct chp_id *chpid = data; 464 465 __s390_subchannel_vary_chpid(sch, *chpid, 0); 466 return 0; 467 } 468 469 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 470 { 471 struct chp_id *chpid = data; 472 473 __s390_subchannel_vary_chpid(sch, *chpid, 1); 474 return 0; 475 } 476 477 static int 478 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 479 { 480 struct schib schib; 481 482 if (stsch_err(schid, &schib)) 483 /* We're through */ 484 return -ENXIO; 485 /* Put it on the slow path. */ 486 css_schedule_eval(schid); 487 return 0; 488 } 489 490 /** 491 * chsc_chp_vary - propagate channel-path vary operation to subchannels 492 * @chpid: channl-path ID 493 * @on: non-zero for vary online, zero for vary offline 494 */ 495 int chsc_chp_vary(struct chp_id chpid, int on) 496 { 497 struct chp_link link; 498 499 memset(&link, 0, sizeof(struct chp_link)); 500 link.chpid = chpid; 501 /* Wait until previous actions have settled. */ 502 css_wait_for_slow_path(); 503 /* 504 * Redo PathVerification on the devices the chpid connects to 505 */ 506 507 if (on) 508 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 509 __s390_vary_chpid_on, &link); 510 else 511 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 512 NULL, &link); 513 514 return 0; 515 } 516 517 static void 518 chsc_remove_cmg_attr(struct channel_subsystem *css) 519 { 520 int i; 521 522 for (i = 0; i <= __MAX_CHPID; i++) { 523 if (!css->chps[i]) 524 continue; 525 chp_remove_cmg_attr(css->chps[i]); 526 } 527 } 528 529 static int 530 chsc_add_cmg_attr(struct channel_subsystem *css) 531 { 532 int i, ret; 533 534 ret = 0; 535 for (i = 0; i <= __MAX_CHPID; i++) { 536 if (!css->chps[i]) 537 continue; 538 ret = chp_add_cmg_attr(css->chps[i]); 539 if (ret) 540 goto cleanup; 541 } 542 return ret; 543 cleanup: 544 for (--i; i >= 0; i--) { 545 if (!css->chps[i]) 546 continue; 547 chp_remove_cmg_attr(css->chps[i]); 548 } 549 return ret; 550 } 551 552 int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 553 { 554 struct { 555 struct chsc_header request; 556 u32 operation_code : 2; 557 u32 : 30; 558 u32 key : 4; 559 u32 : 28; 560 u32 zeroes1; 561 u32 cub_addr1; 562 u32 zeroes2; 563 u32 cub_addr2; 564 u32 reserved[13]; 565 struct chsc_header response; 566 u32 status : 8; 567 u32 : 4; 568 u32 fmt : 4; 569 u32 : 16; 570 } __attribute__ ((packed)) *secm_area; 571 int ret, ccode; 572 573 secm_area = page; 574 secm_area->request.length = 0x0050; 575 secm_area->request.code = 0x0016; 576 577 secm_area->key = PAGE_DEFAULT_KEY; 578 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 579 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 580 581 secm_area->operation_code = enable ? 0 : 1; 582 583 ccode = chsc(secm_area); 584 if (ccode > 0) 585 return (ccode == 3) ? -ENODEV : -EBUSY; 586 587 switch (secm_area->response.code) { 588 case 0x0102: 589 case 0x0103: 590 ret = -EINVAL; 591 break; 592 default: 593 ret = chsc_error_from_response(secm_area->response.code); 594 } 595 if (ret != 0) 596 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 597 secm_area->response.code); 598 return ret; 599 } 600 601 int 602 chsc_secm(struct channel_subsystem *css, int enable) 603 { 604 void *secm_area; 605 int ret; 606 607 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 608 if (!secm_area) 609 return -ENOMEM; 610 611 if (enable && !css->cm_enabled) { 612 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 613 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 614 if (!css->cub_addr1 || !css->cub_addr2) { 615 free_page((unsigned long)css->cub_addr1); 616 free_page((unsigned long)css->cub_addr2); 617 free_page((unsigned long)secm_area); 618 return -ENOMEM; 619 } 620 } 621 ret = __chsc_do_secm(css, enable, secm_area); 622 if (!ret) { 623 css->cm_enabled = enable; 624 if (css->cm_enabled) { 625 ret = chsc_add_cmg_attr(css); 626 if (ret) { 627 memset(secm_area, 0, PAGE_SIZE); 628 __chsc_do_secm(css, 0, secm_area); 629 css->cm_enabled = 0; 630 } 631 } else 632 chsc_remove_cmg_attr(css); 633 } 634 if (!css->cm_enabled) { 635 free_page((unsigned long)css->cub_addr1); 636 free_page((unsigned long)css->cub_addr2); 637 } 638 free_page((unsigned long)secm_area); 639 return ret; 640 } 641 642 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 643 int c, int m, 644 struct chsc_response_struct *resp) 645 { 646 int ccode, ret; 647 648 struct { 649 struct chsc_header request; 650 u32 : 2; 651 u32 m : 1; 652 u32 c : 1; 653 u32 fmt : 4; 654 u32 cssid : 8; 655 u32 : 4; 656 u32 rfmt : 4; 657 u32 first_chpid : 8; 658 u32 : 24; 659 u32 last_chpid : 8; 660 u32 zeroes1; 661 struct chsc_header response; 662 u8 data[PAGE_SIZE - 20]; 663 } __attribute__ ((packed)) *scpd_area; 664 665 if ((rfmt == 1) && !css_general_characteristics.fcs) 666 return -EINVAL; 667 if ((rfmt == 2) && !css_general_characteristics.cib) 668 return -EINVAL; 669 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 670 if (!scpd_area) 671 return -ENOMEM; 672 673 scpd_area->request.length = 0x0010; 674 scpd_area->request.code = 0x0002; 675 676 scpd_area->cssid = chpid.cssid; 677 scpd_area->first_chpid = chpid.id; 678 scpd_area->last_chpid = chpid.id; 679 scpd_area->m = m; 680 scpd_area->c = c; 681 scpd_area->fmt = fmt; 682 scpd_area->rfmt = rfmt; 683 684 ccode = chsc(scpd_area); 685 if (ccode > 0) { 686 ret = (ccode == 3) ? -ENODEV : -EBUSY; 687 goto out; 688 } 689 690 ret = chsc_error_from_response(scpd_area->response.code); 691 if (ret == 0) 692 /* Success. */ 693 memcpy(resp, &scpd_area->response, scpd_area->response.length); 694 else 695 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 696 scpd_area->response.code); 697 out: 698 free_page((unsigned long)scpd_area); 699 return ret; 700 } 701 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 702 703 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 704 struct channel_path_desc *desc) 705 { 706 struct chsc_response_struct *chsc_resp; 707 int ret; 708 709 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); 710 if (!chsc_resp) 711 return -ENOMEM; 712 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); 713 if (ret) 714 goto out_free; 715 memcpy(desc, &chsc_resp->data, chsc_resp->length); 716 out_free: 717 kfree(chsc_resp); 718 return ret; 719 } 720 721 static void 722 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 723 struct cmg_chars *chars) 724 { 725 switch (chp->cmg) { 726 case 2: 727 case 3: 728 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 729 GFP_KERNEL); 730 if (chp->cmg_chars) { 731 int i, mask; 732 struct cmg_chars *cmg_chars; 733 734 cmg_chars = chp->cmg_chars; 735 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 736 mask = 0x80 >> (i + 3); 737 if (cmcv & mask) 738 cmg_chars->values[i] = chars->values[i]; 739 else 740 cmg_chars->values[i] = 0; 741 } 742 } 743 break; 744 default: 745 /* No cmg-dependent data. */ 746 break; 747 } 748 } 749 750 int chsc_get_channel_measurement_chars(struct channel_path *chp) 751 { 752 int ccode, ret; 753 754 struct { 755 struct chsc_header request; 756 u32 : 24; 757 u32 first_chpid : 8; 758 u32 : 24; 759 u32 last_chpid : 8; 760 u32 zeroes1; 761 struct chsc_header response; 762 u32 zeroes2; 763 u32 not_valid : 1; 764 u32 shared : 1; 765 u32 : 22; 766 u32 chpid : 8; 767 u32 cmcv : 5; 768 u32 : 11; 769 u32 cmgq : 8; 770 u32 cmg : 8; 771 u32 zeroes3; 772 u32 data[NR_MEASUREMENT_CHARS]; 773 } __attribute__ ((packed)) *scmc_area; 774 775 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 776 if (!scmc_area) 777 return -ENOMEM; 778 779 scmc_area->request.length = 0x0010; 780 scmc_area->request.code = 0x0022; 781 782 scmc_area->first_chpid = chp->chpid.id; 783 scmc_area->last_chpid = chp->chpid.id; 784 785 ccode = chsc(scmc_area); 786 if (ccode > 0) { 787 ret = (ccode == 3) ? -ENODEV : -EBUSY; 788 goto out; 789 } 790 791 ret = chsc_error_from_response(scmc_area->response.code); 792 if (ret == 0) { 793 /* Success. */ 794 if (!scmc_area->not_valid) { 795 chp->cmg = scmc_area->cmg; 796 chp->shared = scmc_area->shared; 797 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 798 (struct cmg_chars *) 799 &scmc_area->data); 800 } else { 801 chp->cmg = -1; 802 chp->shared = -1; 803 } 804 } else { 805 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 806 scmc_area->response.code); 807 } 808 out: 809 free_page((unsigned long)scmc_area); 810 return ret; 811 } 812 813 int __init chsc_alloc_sei_area(void) 814 { 815 int ret; 816 817 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 818 if (!sei_page) { 819 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 820 "chsc machine checks!\n"); 821 return -ENOMEM; 822 } 823 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 824 if (ret) 825 kfree(sei_page); 826 return ret; 827 } 828 829 void __init chsc_free_sei_area(void) 830 { 831 crw_unregister_handler(CRW_RSC_CSS); 832 kfree(sei_page); 833 } 834 835 int __init 836 chsc_enable_facility(int operation_code) 837 { 838 int ret; 839 struct { 840 struct chsc_header request; 841 u8 reserved1:4; 842 u8 format:4; 843 u8 reserved2; 844 u16 operation_code; 845 u32 reserved3; 846 u32 reserved4; 847 u32 operation_data_area[252]; 848 struct chsc_header response; 849 u32 reserved5:4; 850 u32 format2:4; 851 u32 reserved6:24; 852 } __attribute__ ((packed)) *sda_area; 853 854 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 855 if (!sda_area) 856 return -ENOMEM; 857 sda_area->request.length = 0x0400; 858 sda_area->request.code = 0x0031; 859 sda_area->operation_code = operation_code; 860 861 ret = chsc(sda_area); 862 if (ret > 0) { 863 ret = (ret == 3) ? -ENODEV : -EBUSY; 864 goto out; 865 } 866 867 switch (sda_area->response.code) { 868 case 0x0101: 869 ret = -EOPNOTSUPP; 870 break; 871 default: 872 ret = chsc_error_from_response(sda_area->response.code); 873 } 874 if (ret != 0) 875 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 876 operation_code, sda_area->response.code); 877 out: 878 free_page((unsigned long)sda_area); 879 return ret; 880 } 881 882 struct css_general_char css_general_characteristics; 883 struct css_chsc_char css_chsc_characteristics; 884 885 int __init 886 chsc_determine_css_characteristics(void) 887 { 888 int result; 889 struct { 890 struct chsc_header request; 891 u32 reserved1; 892 u32 reserved2; 893 u32 reserved3; 894 struct chsc_header response; 895 u32 reserved4; 896 u32 general_char[510]; 897 u32 chsc_char[518]; 898 } __attribute__ ((packed)) *scsc_area; 899 900 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 901 if (!scsc_area) 902 return -ENOMEM; 903 904 scsc_area->request.length = 0x0010; 905 scsc_area->request.code = 0x0010; 906 907 result = chsc(scsc_area); 908 if (result) { 909 result = (result == 3) ? -ENODEV : -EBUSY; 910 goto exit; 911 } 912 913 result = chsc_error_from_response(scsc_area->response.code); 914 if (result == 0) { 915 memcpy(&css_general_characteristics, scsc_area->general_char, 916 sizeof(css_general_characteristics)); 917 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 918 sizeof(css_chsc_characteristics)); 919 } else 920 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 921 scsc_area->response.code); 922 exit: 923 free_page ((unsigned long) scsc_area); 924 return result; 925 } 926 927 EXPORT_SYMBOL_GPL(css_general_characteristics); 928 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 929 930 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 931 { 932 struct { 933 struct chsc_header request; 934 unsigned int rsvd0; 935 unsigned int op : 8; 936 unsigned int rsvd1 : 8; 937 unsigned int ctrl : 16; 938 unsigned int rsvd2[5]; 939 struct chsc_header response; 940 unsigned int rsvd3[7]; 941 } __attribute__ ((packed)) *rr; 942 int rc; 943 944 memset(page, 0, PAGE_SIZE); 945 rr = page; 946 rr->request.length = 0x0020; 947 rr->request.code = 0x0033; 948 rr->op = op; 949 rr->ctrl = ctrl; 950 rc = chsc(rr); 951 if (rc) 952 return -EIO; 953 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 954 return rc; 955 } 956 957 int chsc_sstpi(void *page, void *result, size_t size) 958 { 959 struct { 960 struct chsc_header request; 961 unsigned int rsvd0[3]; 962 struct chsc_header response; 963 char data[size]; 964 } __attribute__ ((packed)) *rr; 965 int rc; 966 967 memset(page, 0, PAGE_SIZE); 968 rr = page; 969 rr->request.length = 0x0010; 970 rr->request.code = 0x0038; 971 rc = chsc(rr); 972 if (rc) 973 return -EIO; 974 memcpy(result, &rr->data, size); 975 return (rr->response.code == 0x0001) ? 0 : -EIO; 976 } 977 978