1 /* 2 * drivers/s390/cio/chsc.c 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright IBM Corp. 1999,2008 6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/init.h> 17 #include <linux/device.h> 18 19 #include <asm/cio.h> 20 #include <asm/chpid.h> 21 #include <asm/chsc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chp.h" 29 #include "chsc.h" 30 31 static void *sei_page; 32 33 /** 34 * chsc_error_from_response() - convert a chsc response to an error 35 * @response: chsc response code 36 * 37 * Returns an appropriate Linux error code for @response. 38 */ 39 int chsc_error_from_response(int response) 40 { 41 switch (response) { 42 case 0x0001: 43 return 0; 44 case 0x0002: 45 case 0x0003: 46 case 0x0006: 47 case 0x0007: 48 case 0x0008: 49 case 0x000a: 50 return -EINVAL; 51 case 0x0004: 52 return -EOPNOTSUPP; 53 default: 54 return -EIO; 55 } 56 } 57 EXPORT_SYMBOL_GPL(chsc_error_from_response); 58 59 struct chsc_ssd_area { 60 struct chsc_header request; 61 u16 :10; 62 u16 ssid:2; 63 u16 :4; 64 u16 f_sch; /* first subchannel */ 65 u16 :16; 66 u16 l_sch; /* last subchannel */ 67 u32 :32; 68 struct chsc_header response; 69 u32 :32; 70 u8 sch_valid : 1; 71 u8 dev_valid : 1; 72 u8 st : 3; /* subchannel type */ 73 u8 zeroes : 3; 74 u8 unit_addr; /* unit address */ 75 u16 devno; /* device number */ 76 u8 path_mask; 77 u8 fla_valid_mask; 78 u16 sch; /* subchannel */ 79 u8 chpid[8]; /* chpids 0-7 */ 80 u16 fla[8]; /* full link addresses 0-7 */ 81 } __attribute__ ((packed)); 82 83 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 84 { 85 unsigned long page; 86 struct chsc_ssd_area *ssd_area; 87 int ccode; 88 int ret; 89 int i; 90 int mask; 91 92 page = get_zeroed_page(GFP_KERNEL | GFP_DMA); 93 if (!page) 94 return -ENOMEM; 95 ssd_area = (struct chsc_ssd_area *) page; 96 ssd_area->request.length = 0x0010; 97 ssd_area->request.code = 0x0004; 98 ssd_area->ssid = schid.ssid; 99 ssd_area->f_sch = schid.sch_no; 100 ssd_area->l_sch = schid.sch_no; 101 102 ccode = chsc(ssd_area); 103 /* Check response. */ 104 if (ccode > 0) { 105 ret = (ccode == 3) ? -ENODEV : -EBUSY; 106 goto out_free; 107 } 108 ret = chsc_error_from_response(ssd_area->response.code); 109 if (ret != 0) { 110 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 111 schid.ssid, schid.sch_no, 112 ssd_area->response.code); 113 goto out_free; 114 } 115 if (!ssd_area->sch_valid) { 116 ret = -ENODEV; 117 goto out_free; 118 } 119 /* Copy data */ 120 ret = 0; 121 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 122 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 123 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 124 goto out_free; 125 ssd->path_mask = ssd_area->path_mask; 126 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 127 for (i = 0; i < 8; i++) { 128 mask = 0x80 >> i; 129 if (ssd_area->path_mask & mask) { 130 chp_id_init(&ssd->chpid[i]); 131 ssd->chpid[i].id = ssd_area->chpid[i]; 132 } 133 if (ssd_area->fla_valid_mask & mask) 134 ssd->fla[i] = ssd_area->fla[i]; 135 } 136 out_free: 137 free_page(page); 138 return ret; 139 } 140 141 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 142 { 143 spin_lock_irq(sch->lock); 144 if (sch->driver && sch->driver->chp_event) 145 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 146 goto out_unreg; 147 spin_unlock_irq(sch->lock); 148 return 0; 149 150 out_unreg: 151 sch->lpm = 0; 152 spin_unlock_irq(sch->lock); 153 css_schedule_eval(sch->schid); 154 return 0; 155 } 156 157 void chsc_chp_offline(struct chp_id chpid) 158 { 159 char dbf_txt[15]; 160 struct chp_link link; 161 162 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 163 CIO_TRACE_EVENT(2, dbf_txt); 164 165 if (chp_get_status(chpid) <= 0) 166 return; 167 memset(&link, 0, sizeof(struct chp_link)); 168 link.chpid = chpid; 169 /* Wait until previous actions have settled. */ 170 css_wait_for_slow_path(); 171 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 172 } 173 174 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 175 { 176 struct schib schib; 177 /* 178 * We don't know the device yet, but since a path 179 * may be available now to the device we'll have 180 * to do recognition again. 181 * Since we don't have any idea about which chpid 182 * that beast may be on we'll have to do a stsch 183 * on all devices, grr... 184 */ 185 if (stsch_err(schid, &schib)) 186 /* We're through */ 187 return -ENXIO; 188 189 /* Put it on the slow path. */ 190 css_schedule_eval(schid); 191 return 0; 192 } 193 194 static int __s390_process_res_acc(struct subchannel *sch, void *data) 195 { 196 spin_lock_irq(sch->lock); 197 if (sch->driver && sch->driver->chp_event) 198 sch->driver->chp_event(sch, data, CHP_ONLINE); 199 spin_unlock_irq(sch->lock); 200 201 return 0; 202 } 203 204 static void s390_process_res_acc(struct chp_link *link) 205 { 206 char dbf_txt[15]; 207 208 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 209 link->chpid.id); 210 CIO_TRACE_EVENT( 2, dbf_txt); 211 if (link->fla != 0) { 212 sprintf(dbf_txt, "fla%x", link->fla); 213 CIO_TRACE_EVENT( 2, dbf_txt); 214 } 215 /* Wait until previous actions have settled. */ 216 css_wait_for_slow_path(); 217 /* 218 * I/O resources may have become accessible. 219 * Scan through all subchannels that may be concerned and 220 * do a validation on those. 221 * The more information we have (info), the less scanning 222 * will we have to do. 223 */ 224 for_each_subchannel_staged(__s390_process_res_acc, 225 s390_process_res_acc_new_sch, link); 226 } 227 228 static int 229 __get_chpid_from_lir(void *data) 230 { 231 struct lir { 232 u8 iq; 233 u8 ic; 234 u16 sci; 235 /* incident-node descriptor */ 236 u32 indesc[28]; 237 /* attached-node descriptor */ 238 u32 andesc[28]; 239 /* incident-specific information */ 240 u32 isinfo[28]; 241 } __attribute__ ((packed)) *lir; 242 243 lir = data; 244 if (!(lir->iq&0x80)) 245 /* NULL link incident record */ 246 return -EINVAL; 247 if (!(lir->indesc[0]&0xc0000000)) 248 /* node descriptor not valid */ 249 return -EINVAL; 250 if (!(lir->indesc[0]&0x10000000)) 251 /* don't handle device-type nodes - FIXME */ 252 return -EINVAL; 253 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 254 255 return (u16) (lir->indesc[0]&0x000000ff); 256 } 257 258 struct chsc_sei_area { 259 struct chsc_header request; 260 u32 reserved1; 261 u32 reserved2; 262 u32 reserved3; 263 struct chsc_header response; 264 u32 reserved4; 265 u8 flags; 266 u8 vf; /* validity flags */ 267 u8 rs; /* reporting source */ 268 u8 cc; /* content code */ 269 u16 fla; /* full link address */ 270 u16 rsid; /* reporting source id */ 271 u32 reserved5; 272 u32 reserved6; 273 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ 274 /* ccdf has to be big enough for a link-incident record */ 275 } __attribute__ ((packed)); 276 277 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 278 { 279 struct chp_id chpid; 280 int id; 281 282 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 283 sei_area->rs, sei_area->rsid); 284 if (sei_area->rs != 4) 285 return; 286 id = __get_chpid_from_lir(sei_area->ccdf); 287 if (id < 0) 288 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 289 else { 290 chp_id_init(&chpid); 291 chpid.id = id; 292 chsc_chp_offline(chpid); 293 } 294 } 295 296 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 297 { 298 struct chp_link link; 299 struct chp_id chpid; 300 int status; 301 302 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 303 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 304 if (sei_area->rs != 4) 305 return; 306 chp_id_init(&chpid); 307 chpid.id = sei_area->rsid; 308 /* allocate a new channel path structure, if needed */ 309 status = chp_get_status(chpid); 310 if (status < 0) 311 chp_new(chpid); 312 else if (!status) 313 return; 314 memset(&link, 0, sizeof(struct chp_link)); 315 link.chpid = chpid; 316 if ((sei_area->vf & 0xc0) != 0) { 317 link.fla = sei_area->fla; 318 if ((sei_area->vf & 0xc0) == 0xc0) 319 /* full link address */ 320 link.fla_mask = 0xffff; 321 else 322 /* link address */ 323 link.fla_mask = 0xff00; 324 } 325 s390_process_res_acc(&link); 326 } 327 328 struct chp_config_data { 329 u8 map[32]; 330 u8 op; 331 u8 pc; 332 }; 333 334 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) 335 { 336 struct chp_config_data *data; 337 struct chp_id chpid; 338 int num; 339 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 340 341 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 342 if (sei_area->rs != 0) 343 return; 344 data = (struct chp_config_data *) &(sei_area->ccdf); 345 chp_id_init(&chpid); 346 for (num = 0; num <= __MAX_CHPID; num++) { 347 if (!chp_test_bit(data->map, num)) 348 continue; 349 chpid.id = num; 350 pr_notice("Processing %s for channel path %x.%02x\n", 351 events[data->op], chpid.cssid, chpid.id); 352 switch (data->op) { 353 case 0: 354 chp_cfg_schedule(chpid, 1); 355 break; 356 case 1: 357 chp_cfg_schedule(chpid, 0); 358 break; 359 case 2: 360 chp_cfg_cancel_deconfigure(chpid); 361 break; 362 } 363 } 364 } 365 366 static void chsc_process_sei(struct chsc_sei_area *sei_area) 367 { 368 /* Check if we might have lost some information. */ 369 if (sei_area->flags & 0x40) { 370 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 371 css_schedule_eval_all(); 372 } 373 /* which kind of information was stored? */ 374 switch (sei_area->cc) { 375 case 1: /* link incident*/ 376 chsc_process_sei_link_incident(sei_area); 377 break; 378 case 2: /* i/o resource accessibiliy */ 379 chsc_process_sei_res_acc(sei_area); 380 break; 381 case 8: /* channel-path-configuration notification */ 382 chsc_process_sei_chp_config(sei_area); 383 break; 384 default: /* other stuff */ 385 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 386 sei_area->cc); 387 break; 388 } 389 } 390 391 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 392 { 393 struct chsc_sei_area *sei_area; 394 395 if (overflow) { 396 css_schedule_eval_all(); 397 return; 398 } 399 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 400 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 401 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 402 crw0->erc, crw0->rsid); 403 if (!sei_page) 404 return; 405 /* Access to sei_page is serialized through machine check handler 406 * thread, so no need for locking. */ 407 sei_area = sei_page; 408 409 CIO_TRACE_EVENT(2, "prcss"); 410 do { 411 memset(sei_area, 0, sizeof(*sei_area)); 412 sei_area->request.length = 0x0010; 413 sei_area->request.code = 0x000e; 414 if (chsc(sei_area)) 415 break; 416 417 if (sei_area->response.code == 0x0001) { 418 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 419 chsc_process_sei(sei_area); 420 } else { 421 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 422 sei_area->response.code); 423 break; 424 } 425 } while (sei_area->flags & 0x80); 426 } 427 428 void chsc_chp_online(struct chp_id chpid) 429 { 430 char dbf_txt[15]; 431 struct chp_link link; 432 433 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 434 CIO_TRACE_EVENT(2, dbf_txt); 435 436 if (chp_get_status(chpid) != 0) { 437 memset(&link, 0, sizeof(struct chp_link)); 438 link.chpid = chpid; 439 /* Wait until previous actions have settled. */ 440 css_wait_for_slow_path(); 441 for_each_subchannel_staged(__s390_process_res_acc, NULL, 442 &link); 443 } 444 } 445 446 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 447 struct chp_id chpid, int on) 448 { 449 unsigned long flags; 450 struct chp_link link; 451 452 memset(&link, 0, sizeof(struct chp_link)); 453 link.chpid = chpid; 454 spin_lock_irqsave(sch->lock, flags); 455 if (sch->driver && sch->driver->chp_event) 456 sch->driver->chp_event(sch, &link, 457 on ? CHP_VARY_ON : CHP_VARY_OFF); 458 spin_unlock_irqrestore(sch->lock, flags); 459 } 460 461 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 462 { 463 struct chp_id *chpid = data; 464 465 __s390_subchannel_vary_chpid(sch, *chpid, 0); 466 return 0; 467 } 468 469 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 470 { 471 struct chp_id *chpid = data; 472 473 __s390_subchannel_vary_chpid(sch, *chpid, 1); 474 return 0; 475 } 476 477 static int 478 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 479 { 480 struct schib schib; 481 482 if (stsch_err(schid, &schib)) 483 /* We're through */ 484 return -ENXIO; 485 /* Put it on the slow path. */ 486 css_schedule_eval(schid); 487 return 0; 488 } 489 490 /** 491 * chsc_chp_vary - propagate channel-path vary operation to subchannels 492 * @chpid: channl-path ID 493 * @on: non-zero for vary online, zero for vary offline 494 */ 495 int chsc_chp_vary(struct chp_id chpid, int on) 496 { 497 struct chp_link link; 498 499 memset(&link, 0, sizeof(struct chp_link)); 500 link.chpid = chpid; 501 /* Wait until previous actions have settled. */ 502 css_wait_for_slow_path(); 503 /* 504 * Redo PathVerification on the devices the chpid connects to 505 */ 506 507 if (on) 508 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 509 __s390_vary_chpid_on, &link); 510 else 511 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 512 NULL, &link); 513 514 return 0; 515 } 516 517 static void 518 chsc_remove_cmg_attr(struct channel_subsystem *css) 519 { 520 int i; 521 522 for (i = 0; i <= __MAX_CHPID; i++) { 523 if (!css->chps[i]) 524 continue; 525 chp_remove_cmg_attr(css->chps[i]); 526 } 527 } 528 529 static int 530 chsc_add_cmg_attr(struct channel_subsystem *css) 531 { 532 int i, ret; 533 534 ret = 0; 535 for (i = 0; i <= __MAX_CHPID; i++) { 536 if (!css->chps[i]) 537 continue; 538 ret = chp_add_cmg_attr(css->chps[i]); 539 if (ret) 540 goto cleanup; 541 } 542 return ret; 543 cleanup: 544 for (--i; i >= 0; i--) { 545 if (!css->chps[i]) 546 continue; 547 chp_remove_cmg_attr(css->chps[i]); 548 } 549 return ret; 550 } 551 552 static int 553 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 554 { 555 struct { 556 struct chsc_header request; 557 u32 operation_code : 2; 558 u32 : 30; 559 u32 key : 4; 560 u32 : 28; 561 u32 zeroes1; 562 u32 cub_addr1; 563 u32 zeroes2; 564 u32 cub_addr2; 565 u32 reserved[13]; 566 struct chsc_header response; 567 u32 status : 8; 568 u32 : 4; 569 u32 fmt : 4; 570 u32 : 16; 571 } __attribute__ ((packed)) *secm_area; 572 int ret, ccode; 573 574 secm_area = page; 575 secm_area->request.length = 0x0050; 576 secm_area->request.code = 0x0016; 577 578 secm_area->key = PAGE_DEFAULT_KEY; 579 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 580 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 581 582 secm_area->operation_code = enable ? 0 : 1; 583 584 ccode = chsc(secm_area); 585 if (ccode > 0) 586 return (ccode == 3) ? -ENODEV : -EBUSY; 587 588 switch (secm_area->response.code) { 589 case 0x0102: 590 case 0x0103: 591 ret = -EINVAL; 592 break; 593 default: 594 ret = chsc_error_from_response(secm_area->response.code); 595 } 596 if (ret != 0) 597 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 598 secm_area->response.code); 599 return ret; 600 } 601 602 int 603 chsc_secm(struct channel_subsystem *css, int enable) 604 { 605 void *secm_area; 606 int ret; 607 608 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 609 if (!secm_area) 610 return -ENOMEM; 611 612 if (enable && !css->cm_enabled) { 613 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 614 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 615 if (!css->cub_addr1 || !css->cub_addr2) { 616 free_page((unsigned long)css->cub_addr1); 617 free_page((unsigned long)css->cub_addr2); 618 free_page((unsigned long)secm_area); 619 return -ENOMEM; 620 } 621 } 622 ret = __chsc_do_secm(css, enable, secm_area); 623 if (!ret) { 624 css->cm_enabled = enable; 625 if (css->cm_enabled) { 626 ret = chsc_add_cmg_attr(css); 627 if (ret) { 628 memset(secm_area, 0, PAGE_SIZE); 629 __chsc_do_secm(css, 0, secm_area); 630 css->cm_enabled = 0; 631 } 632 } else 633 chsc_remove_cmg_attr(css); 634 } 635 if (!css->cm_enabled) { 636 free_page((unsigned long)css->cub_addr1); 637 free_page((unsigned long)css->cub_addr2); 638 } 639 free_page((unsigned long)secm_area); 640 return ret; 641 } 642 643 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 644 int c, int m, 645 struct chsc_response_struct *resp) 646 { 647 int ccode, ret; 648 649 struct { 650 struct chsc_header request; 651 u32 : 2; 652 u32 m : 1; 653 u32 c : 1; 654 u32 fmt : 4; 655 u32 cssid : 8; 656 u32 : 4; 657 u32 rfmt : 4; 658 u32 first_chpid : 8; 659 u32 : 24; 660 u32 last_chpid : 8; 661 u32 zeroes1; 662 struct chsc_header response; 663 u8 data[PAGE_SIZE - 20]; 664 } __attribute__ ((packed)) *scpd_area; 665 666 if ((rfmt == 1) && !css_general_characteristics.fcs) 667 return -EINVAL; 668 if ((rfmt == 2) && !css_general_characteristics.cib) 669 return -EINVAL; 670 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 671 if (!scpd_area) 672 return -ENOMEM; 673 674 scpd_area->request.length = 0x0010; 675 scpd_area->request.code = 0x0002; 676 677 scpd_area->cssid = chpid.cssid; 678 scpd_area->first_chpid = chpid.id; 679 scpd_area->last_chpid = chpid.id; 680 scpd_area->m = m; 681 scpd_area->c = c; 682 scpd_area->fmt = fmt; 683 scpd_area->rfmt = rfmt; 684 685 ccode = chsc(scpd_area); 686 if (ccode > 0) { 687 ret = (ccode == 3) ? -ENODEV : -EBUSY; 688 goto out; 689 } 690 691 ret = chsc_error_from_response(scpd_area->response.code); 692 if (ret == 0) 693 /* Success. */ 694 memcpy(resp, &scpd_area->response, scpd_area->response.length); 695 else 696 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 697 scpd_area->response.code); 698 out: 699 free_page((unsigned long)scpd_area); 700 return ret; 701 } 702 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 703 704 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 705 struct channel_path_desc *desc) 706 { 707 struct chsc_response_struct *chsc_resp; 708 int ret; 709 710 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); 711 if (!chsc_resp) 712 return -ENOMEM; 713 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); 714 if (ret) 715 goto out_free; 716 memcpy(desc, &chsc_resp->data, chsc_resp->length); 717 out_free: 718 kfree(chsc_resp); 719 return ret; 720 } 721 722 static void 723 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 724 struct cmg_chars *chars) 725 { 726 switch (chp->cmg) { 727 case 2: 728 case 3: 729 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 730 GFP_KERNEL); 731 if (chp->cmg_chars) { 732 int i, mask; 733 struct cmg_chars *cmg_chars; 734 735 cmg_chars = chp->cmg_chars; 736 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 737 mask = 0x80 >> (i + 3); 738 if (cmcv & mask) 739 cmg_chars->values[i] = chars->values[i]; 740 else 741 cmg_chars->values[i] = 0; 742 } 743 } 744 break; 745 default: 746 /* No cmg-dependent data. */ 747 break; 748 } 749 } 750 751 int chsc_get_channel_measurement_chars(struct channel_path *chp) 752 { 753 int ccode, ret; 754 755 struct { 756 struct chsc_header request; 757 u32 : 24; 758 u32 first_chpid : 8; 759 u32 : 24; 760 u32 last_chpid : 8; 761 u32 zeroes1; 762 struct chsc_header response; 763 u32 zeroes2; 764 u32 not_valid : 1; 765 u32 shared : 1; 766 u32 : 22; 767 u32 chpid : 8; 768 u32 cmcv : 5; 769 u32 : 11; 770 u32 cmgq : 8; 771 u32 cmg : 8; 772 u32 zeroes3; 773 u32 data[NR_MEASUREMENT_CHARS]; 774 } __attribute__ ((packed)) *scmc_area; 775 776 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 777 if (!scmc_area) 778 return -ENOMEM; 779 780 scmc_area->request.length = 0x0010; 781 scmc_area->request.code = 0x0022; 782 783 scmc_area->first_chpid = chp->chpid.id; 784 scmc_area->last_chpid = chp->chpid.id; 785 786 ccode = chsc(scmc_area); 787 if (ccode > 0) { 788 ret = (ccode == 3) ? -ENODEV : -EBUSY; 789 goto out; 790 } 791 792 ret = chsc_error_from_response(scmc_area->response.code); 793 if (ret == 0) { 794 /* Success. */ 795 if (!scmc_area->not_valid) { 796 chp->cmg = scmc_area->cmg; 797 chp->shared = scmc_area->shared; 798 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 799 (struct cmg_chars *) 800 &scmc_area->data); 801 } else { 802 chp->cmg = -1; 803 chp->shared = -1; 804 } 805 } else { 806 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 807 scmc_area->response.code); 808 } 809 out: 810 free_page((unsigned long)scmc_area); 811 return ret; 812 } 813 814 int __init chsc_alloc_sei_area(void) 815 { 816 int ret; 817 818 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 819 if (!sei_page) { 820 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 821 "chsc machine checks!\n"); 822 return -ENOMEM; 823 } 824 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 825 if (ret) 826 kfree(sei_page); 827 return ret; 828 } 829 830 void __init chsc_free_sei_area(void) 831 { 832 crw_unregister_handler(CRW_RSC_CSS); 833 kfree(sei_page); 834 } 835 836 int __init 837 chsc_enable_facility(int operation_code) 838 { 839 int ret; 840 struct { 841 struct chsc_header request; 842 u8 reserved1:4; 843 u8 format:4; 844 u8 reserved2; 845 u16 operation_code; 846 u32 reserved3; 847 u32 reserved4; 848 u32 operation_data_area[252]; 849 struct chsc_header response; 850 u32 reserved5:4; 851 u32 format2:4; 852 u32 reserved6:24; 853 } __attribute__ ((packed)) *sda_area; 854 855 sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); 856 if (!sda_area) 857 return -ENOMEM; 858 sda_area->request.length = 0x0400; 859 sda_area->request.code = 0x0031; 860 sda_area->operation_code = operation_code; 861 862 ret = chsc(sda_area); 863 if (ret > 0) { 864 ret = (ret == 3) ? -ENODEV : -EBUSY; 865 goto out; 866 } 867 868 switch (sda_area->response.code) { 869 case 0x0101: 870 ret = -EOPNOTSUPP; 871 break; 872 default: 873 ret = chsc_error_from_response(sda_area->response.code); 874 } 875 if (ret != 0) 876 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 877 operation_code, sda_area->response.code); 878 out: 879 free_page((unsigned long)sda_area); 880 return ret; 881 } 882 883 struct css_general_char css_general_characteristics; 884 struct css_chsc_char css_chsc_characteristics; 885 886 int __init 887 chsc_determine_css_characteristics(void) 888 { 889 int result; 890 struct { 891 struct chsc_header request; 892 u32 reserved1; 893 u32 reserved2; 894 u32 reserved3; 895 struct chsc_header response; 896 u32 reserved4; 897 u32 general_char[510]; 898 u32 chsc_char[518]; 899 } __attribute__ ((packed)) *scsc_area; 900 901 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 902 if (!scsc_area) 903 return -ENOMEM; 904 905 scsc_area->request.length = 0x0010; 906 scsc_area->request.code = 0x0010; 907 908 result = chsc(scsc_area); 909 if (result) { 910 result = (result == 3) ? -ENODEV : -EBUSY; 911 goto exit; 912 } 913 914 result = chsc_error_from_response(scsc_area->response.code); 915 if (result == 0) { 916 memcpy(&css_general_characteristics, scsc_area->general_char, 917 sizeof(css_general_characteristics)); 918 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 919 sizeof(css_chsc_characteristics)); 920 } else 921 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 922 scsc_area->response.code); 923 exit: 924 free_page ((unsigned long) scsc_area); 925 return result; 926 } 927 928 EXPORT_SYMBOL_GPL(css_general_characteristics); 929 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 930 931 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 932 { 933 struct { 934 struct chsc_header request; 935 unsigned int rsvd0; 936 unsigned int op : 8; 937 unsigned int rsvd1 : 8; 938 unsigned int ctrl : 16; 939 unsigned int rsvd2[5]; 940 struct chsc_header response; 941 unsigned int rsvd3[7]; 942 } __attribute__ ((packed)) *rr; 943 int rc; 944 945 memset(page, 0, PAGE_SIZE); 946 rr = page; 947 rr->request.length = 0x0020; 948 rr->request.code = 0x0033; 949 rr->op = op; 950 rr->ctrl = ctrl; 951 rc = chsc(rr); 952 if (rc) 953 return -EIO; 954 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 955 return rc; 956 } 957 958 int chsc_sstpi(void *page, void *result, size_t size) 959 { 960 struct { 961 struct chsc_header request; 962 unsigned int rsvd0[3]; 963 struct chsc_header response; 964 char data[size]; 965 } __attribute__ ((packed)) *rr; 966 int rc; 967 968 memset(page, 0, PAGE_SIZE); 969 rr = page; 970 rr->request.length = 0x0010; 971 rr->request.code = 0x0038; 972 rc = chsc(rr); 973 if (rc) 974 return -EIO; 975 memcpy(result, &rr->data, size); 976 return (rr->response.code == 0x0001) ? 0 : -EIO; 977 } 978 979