1 /* 2 * S/390 common I/O routines -- channel subsystem call 3 * 4 * Copyright IBM Corp. 1999,2012 5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Arnd Bergmann (arndb@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/pci.h> 18 19 #include <asm/cio.h> 20 #include <asm/chpid.h> 21 #include <asm/chsc.h> 22 #include <asm/crw.h> 23 #include <asm/isc.h> 24 25 #include "css.h" 26 #include "cio.h" 27 #include "cio_debug.h" 28 #include "ioasm.h" 29 #include "chp.h" 30 #include "chsc.h" 31 32 static void *sei_page; 33 static void *chsc_page; 34 static DEFINE_SPINLOCK(chsc_page_lock); 35 36 /** 37 * chsc_error_from_response() - convert a chsc response to an error 38 * @response: chsc response code 39 * 40 * Returns an appropriate Linux error code for @response. 41 */ 42 int chsc_error_from_response(int response) 43 { 44 switch (response) { 45 case 0x0001: 46 return 0; 47 case 0x0002: 48 case 0x0003: 49 case 0x0006: 50 case 0x0007: 51 case 0x0008: 52 case 0x000a: 53 case 0x0104: 54 return -EINVAL; 55 case 0x0004: 56 return -EOPNOTSUPP; 57 case 0x000b: 58 return -EBUSY; 59 case 0x0100: 60 case 0x0102: 61 return -ENOMEM; 62 default: 63 return -EIO; 64 } 65 } 66 EXPORT_SYMBOL_GPL(chsc_error_from_response); 67 68 struct chsc_ssd_area { 69 struct chsc_header request; 70 u16 :10; 71 u16 ssid:2; 72 u16 :4; 73 u16 f_sch; /* first subchannel */ 74 u16 :16; 75 u16 l_sch; /* last subchannel */ 76 u32 :32; 77 struct chsc_header response; 78 u32 :32; 79 u8 sch_valid : 1; 80 u8 dev_valid : 1; 81 u8 st : 3; /* subchannel type */ 82 u8 zeroes : 3; 83 u8 unit_addr; /* unit address */ 84 u16 devno; /* device number */ 85 u8 path_mask; 86 u8 fla_valid_mask; 87 u16 sch; /* subchannel */ 88 u8 chpid[8]; /* chpids 0-7 */ 89 u16 fla[8]; /* full link addresses 0-7 */ 90 } __attribute__ ((packed)); 91 92 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 93 { 94 struct chsc_ssd_area *ssd_area; 95 int ccode; 96 int ret; 97 int i; 98 int mask; 99 100 spin_lock_irq(&chsc_page_lock); 101 memset(chsc_page, 0, PAGE_SIZE); 102 ssd_area = chsc_page; 103 ssd_area->request.length = 0x0010; 104 ssd_area->request.code = 0x0004; 105 ssd_area->ssid = schid.ssid; 106 ssd_area->f_sch = schid.sch_no; 107 ssd_area->l_sch = schid.sch_no; 108 109 ccode = chsc(ssd_area); 110 /* Check response. */ 111 if (ccode > 0) { 112 ret = (ccode == 3) ? -ENODEV : -EBUSY; 113 goto out; 114 } 115 ret = chsc_error_from_response(ssd_area->response.code); 116 if (ret != 0) { 117 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 118 schid.ssid, schid.sch_no, 119 ssd_area->response.code); 120 goto out; 121 } 122 if (!ssd_area->sch_valid) { 123 ret = -ENODEV; 124 goto out; 125 } 126 /* Copy data */ 127 ret = 0; 128 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 129 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 130 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 131 goto out; 132 ssd->path_mask = ssd_area->path_mask; 133 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 134 for (i = 0; i < 8; i++) { 135 mask = 0x80 >> i; 136 if (ssd_area->path_mask & mask) { 137 chp_id_init(&ssd->chpid[i]); 138 ssd->chpid[i].id = ssd_area->chpid[i]; 139 } 140 if (ssd_area->fla_valid_mask & mask) 141 ssd->fla[i] = ssd_area->fla[i]; 142 } 143 out: 144 spin_unlock_irq(&chsc_page_lock); 145 return ret; 146 } 147 148 /** 149 * chsc_ssqd() - store subchannel QDIO data (SSQD) 150 * @schid: id of the subchannel on which SSQD is performed 151 * @ssqd: request and response block for SSQD 152 * 153 * Returns 0 on success. 154 */ 155 int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd) 156 { 157 memset(ssqd, 0, sizeof(*ssqd)); 158 ssqd->request.length = 0x0010; 159 ssqd->request.code = 0x0024; 160 ssqd->first_sch = schid.sch_no; 161 ssqd->last_sch = schid.sch_no; 162 ssqd->ssid = schid.ssid; 163 164 if (chsc(ssqd)) 165 return -EIO; 166 167 return chsc_error_from_response(ssqd->response.code); 168 } 169 EXPORT_SYMBOL_GPL(chsc_ssqd); 170 171 /** 172 * chsc_sadc() - set adapter device controls (SADC) 173 * @schid: id of the subchannel on which SADC is performed 174 * @scssc: request and response block for SADC 175 * @summary_indicator_addr: summary indicator address 176 * @subchannel_indicator_addr: subchannel indicator address 177 * 178 * Returns 0 on success. 179 */ 180 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, 181 u64 summary_indicator_addr, u64 subchannel_indicator_addr) 182 { 183 memset(scssc, 0, sizeof(*scssc)); 184 scssc->request.length = 0x0fe0; 185 scssc->request.code = 0x0021; 186 scssc->operation_code = 0; 187 188 scssc->summary_indicator_addr = summary_indicator_addr; 189 scssc->subchannel_indicator_addr = subchannel_indicator_addr; 190 191 scssc->ks = PAGE_DEFAULT_KEY >> 4; 192 scssc->kc = PAGE_DEFAULT_KEY >> 4; 193 scssc->isc = QDIO_AIRQ_ISC; 194 scssc->schid = schid; 195 196 /* enable the time delay disablement facility */ 197 if (css_general_characteristics.aif_tdd) 198 scssc->word_with_d_bit = 0x10000000; 199 200 if (chsc(scssc)) 201 return -EIO; 202 203 return chsc_error_from_response(scssc->response.code); 204 } 205 EXPORT_SYMBOL_GPL(chsc_sadc); 206 207 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 208 { 209 spin_lock_irq(sch->lock); 210 if (sch->driver && sch->driver->chp_event) 211 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 212 goto out_unreg; 213 spin_unlock_irq(sch->lock); 214 return 0; 215 216 out_unreg: 217 sch->lpm = 0; 218 spin_unlock_irq(sch->lock); 219 css_schedule_eval(sch->schid); 220 return 0; 221 } 222 223 void chsc_chp_offline(struct chp_id chpid) 224 { 225 char dbf_txt[15]; 226 struct chp_link link; 227 228 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 229 CIO_TRACE_EVENT(2, dbf_txt); 230 231 if (chp_get_status(chpid) <= 0) 232 return; 233 memset(&link, 0, sizeof(struct chp_link)); 234 link.chpid = chpid; 235 /* Wait until previous actions have settled. */ 236 css_wait_for_slow_path(); 237 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 238 } 239 240 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data) 241 { 242 struct schib schib; 243 /* 244 * We don't know the device yet, but since a path 245 * may be available now to the device we'll have 246 * to do recognition again. 247 * Since we don't have any idea about which chpid 248 * that beast may be on we'll have to do a stsch 249 * on all devices, grr... 250 */ 251 if (stsch_err(schid, &schib)) 252 /* We're through */ 253 return -ENXIO; 254 255 /* Put it on the slow path. */ 256 css_schedule_eval(schid); 257 return 0; 258 } 259 260 static int __s390_process_res_acc(struct subchannel *sch, void *data) 261 { 262 spin_lock_irq(sch->lock); 263 if (sch->driver && sch->driver->chp_event) 264 sch->driver->chp_event(sch, data, CHP_ONLINE); 265 spin_unlock_irq(sch->lock); 266 267 return 0; 268 } 269 270 static void s390_process_res_acc(struct chp_link *link) 271 { 272 char dbf_txt[15]; 273 274 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 275 link->chpid.id); 276 CIO_TRACE_EVENT( 2, dbf_txt); 277 if (link->fla != 0) { 278 sprintf(dbf_txt, "fla%x", link->fla); 279 CIO_TRACE_EVENT( 2, dbf_txt); 280 } 281 /* Wait until previous actions have settled. */ 282 css_wait_for_slow_path(); 283 /* 284 * I/O resources may have become accessible. 285 * Scan through all subchannels that may be concerned and 286 * do a validation on those. 287 * The more information we have (info), the less scanning 288 * will we have to do. 289 */ 290 for_each_subchannel_staged(__s390_process_res_acc, 291 s390_process_res_acc_new_sch, link); 292 } 293 294 static int 295 __get_chpid_from_lir(void *data) 296 { 297 struct lir { 298 u8 iq; 299 u8 ic; 300 u16 sci; 301 /* incident-node descriptor */ 302 u32 indesc[28]; 303 /* attached-node descriptor */ 304 u32 andesc[28]; 305 /* incident-specific information */ 306 u32 isinfo[28]; 307 } __attribute__ ((packed)) *lir; 308 309 lir = data; 310 if (!(lir->iq&0x80)) 311 /* NULL link incident record */ 312 return -EINVAL; 313 if (!(lir->indesc[0]&0xc0000000)) 314 /* node descriptor not valid */ 315 return -EINVAL; 316 if (!(lir->indesc[0]&0x10000000)) 317 /* don't handle device-type nodes - FIXME */ 318 return -EINVAL; 319 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ 320 321 return (u16) (lir->indesc[0]&0x000000ff); 322 } 323 324 struct chsc_sei_nt0_area { 325 u8 flags; 326 u8 vf; /* validity flags */ 327 u8 rs; /* reporting source */ 328 u8 cc; /* content code */ 329 u16 fla; /* full link address */ 330 u16 rsid; /* reporting source id */ 331 u32 reserved1; 332 u32 reserved2; 333 /* ccdf has to be big enough for a link-incident record */ 334 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */ 335 } __packed; 336 337 struct chsc_sei_nt2_area { 338 u8 flags; /* p and v bit */ 339 u8 reserved1; 340 u8 reserved2; 341 u8 cc; /* content code */ 342 u32 reserved3[13]; 343 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */ 344 } __packed; 345 346 #define CHSC_SEI_NT0 (1ULL << 63) 347 #define CHSC_SEI_NT2 (1ULL << 61) 348 349 struct chsc_sei { 350 struct chsc_header request; 351 u32 reserved1; 352 u64 ntsm; /* notification type mask */ 353 struct chsc_header response; 354 u32 :24; 355 u8 nt; 356 union { 357 struct chsc_sei_nt0_area nt0_area; 358 struct chsc_sei_nt2_area nt2_area; 359 u8 nt_area[PAGE_SIZE - 24]; 360 } u; 361 } __packed; 362 363 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) 364 { 365 struct chp_id chpid; 366 int id; 367 368 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 369 sei_area->rs, sei_area->rsid); 370 if (sei_area->rs != 4) 371 return; 372 id = __get_chpid_from_lir(sei_area->ccdf); 373 if (id < 0) 374 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 375 else { 376 chp_id_init(&chpid); 377 chpid.id = id; 378 chsc_chp_offline(chpid); 379 } 380 } 381 382 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 383 { 384 struct chp_link link; 385 struct chp_id chpid; 386 int status; 387 388 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 389 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 390 if (sei_area->rs != 4) 391 return; 392 chp_id_init(&chpid); 393 chpid.id = sei_area->rsid; 394 /* allocate a new channel path structure, if needed */ 395 status = chp_get_status(chpid); 396 if (status < 0) 397 chp_new(chpid); 398 else if (!status) 399 return; 400 memset(&link, 0, sizeof(struct chp_link)); 401 link.chpid = chpid; 402 if ((sei_area->vf & 0xc0) != 0) { 403 link.fla = sei_area->fla; 404 if ((sei_area->vf & 0xc0) == 0xc0) 405 /* full link address */ 406 link.fla_mask = 0xffff; 407 else 408 /* link address */ 409 link.fla_mask = 0xff00; 410 } 411 s390_process_res_acc(&link); 412 } 413 414 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) 415 { 416 struct channel_path *chp; 417 struct chp_id chpid; 418 u8 *data; 419 int num; 420 421 CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); 422 if (sei_area->rs != 0) 423 return; 424 data = sei_area->ccdf; 425 chp_id_init(&chpid); 426 for (num = 0; num <= __MAX_CHPID; num++) { 427 if (!chp_test_bit(data, num)) 428 continue; 429 chpid.id = num; 430 431 CIO_CRW_EVENT(4, "Update information for channel path " 432 "%x.%02x\n", chpid.cssid, chpid.id); 433 chp = chpid_to_chp(chpid); 434 if (!chp) { 435 chp_new(chpid); 436 continue; 437 } 438 mutex_lock(&chp->lock); 439 chp_update_desc(chp); 440 mutex_unlock(&chp->lock); 441 } 442 } 443 444 struct chp_config_data { 445 u8 map[32]; 446 u8 op; 447 u8 pc; 448 }; 449 450 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) 451 { 452 struct chp_config_data *data; 453 struct chp_id chpid; 454 int num; 455 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 456 457 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 458 if (sei_area->rs != 0) 459 return; 460 data = (struct chp_config_data *) &(sei_area->ccdf); 461 chp_id_init(&chpid); 462 for (num = 0; num <= __MAX_CHPID; num++) { 463 if (!chp_test_bit(data->map, num)) 464 continue; 465 chpid.id = num; 466 pr_notice("Processing %s for channel path %x.%02x\n", 467 events[data->op], chpid.cssid, chpid.id); 468 switch (data->op) { 469 case 0: 470 chp_cfg_schedule(chpid, 1); 471 break; 472 case 1: 473 chp_cfg_schedule(chpid, 0); 474 break; 475 case 2: 476 chp_cfg_cancel_deconfigure(chpid); 477 break; 478 } 479 } 480 } 481 482 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) 483 { 484 int ret; 485 486 CIO_CRW_EVENT(4, "chsc: scm change notification\n"); 487 if (sei_area->rs != 7) 488 return; 489 490 ret = scm_update_information(); 491 if (ret) 492 CIO_CRW_EVENT(0, "chsc: updating change notification" 493 " failed (rc=%d).\n", ret); 494 } 495 496 static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) 497 { 498 int ret; 499 500 CIO_CRW_EVENT(4, "chsc: scm available information\n"); 501 if (sei_area->rs != 7) 502 return; 503 504 ret = scm_process_availability_information(); 505 if (ret) 506 CIO_CRW_EVENT(0, "chsc: process availability information" 507 " failed (rc=%d).\n", ret); 508 } 509 510 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 511 { 512 switch (sei_area->cc) { 513 case 1: 514 zpci_event_error(sei_area->ccdf); 515 break; 516 case 2: 517 zpci_event_availability(sei_area->ccdf); 518 break; 519 default: 520 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", 521 sei_area->cc); 522 break; 523 } 524 } 525 526 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) 527 { 528 /* which kind of information was stored? */ 529 switch (sei_area->cc) { 530 case 1: /* link incident*/ 531 chsc_process_sei_link_incident(sei_area); 532 break; 533 case 2: /* i/o resource accessibility */ 534 chsc_process_sei_res_acc(sei_area); 535 break; 536 case 7: /* channel-path-availability information */ 537 chsc_process_sei_chp_avail(sei_area); 538 break; 539 case 8: /* channel-path-configuration notification */ 540 chsc_process_sei_chp_config(sei_area); 541 break; 542 case 12: /* scm change notification */ 543 chsc_process_sei_scm_change(sei_area); 544 break; 545 case 14: /* scm available notification */ 546 chsc_process_sei_scm_avail(sei_area); 547 break; 548 default: /* other stuff */ 549 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", 550 sei_area->cc); 551 break; 552 } 553 554 /* Check if we might have lost some information. */ 555 if (sei_area->flags & 0x40) { 556 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 557 css_schedule_eval_all(); 558 } 559 } 560 561 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) 562 { 563 do { 564 memset(sei, 0, sizeof(*sei)); 565 sei->request.length = 0x0010; 566 sei->request.code = 0x000e; 567 sei->ntsm = ntsm; 568 569 if (chsc(sei)) 570 break; 571 572 if (sei->response.code != 0x0001) { 573 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 574 sei->response.code); 575 break; 576 } 577 578 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); 579 switch (sei->nt) { 580 case 0: 581 chsc_process_sei_nt0(&sei->u.nt0_area); 582 break; 583 case 2: 584 chsc_process_sei_nt2(&sei->u.nt2_area); 585 break; 586 default: 587 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); 588 break; 589 } 590 } while (sei->u.nt0_area.flags & 0x80); 591 } 592 593 /* 594 * Handle channel subsystem related CRWs. 595 * Use store event information to find out what's going on. 596 * 597 * Note: Access to sei_page is serialized through machine check handler 598 * thread, so no need for locking. 599 */ 600 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 601 { 602 struct chsc_sei *sei = sei_page; 603 604 if (overflow) { 605 css_schedule_eval_all(); 606 return; 607 } 608 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 609 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 610 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 611 crw0->erc, crw0->rsid); 612 613 CIO_TRACE_EVENT(2, "prcss"); 614 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); 615 } 616 617 void chsc_chp_online(struct chp_id chpid) 618 { 619 char dbf_txt[15]; 620 struct chp_link link; 621 622 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 623 CIO_TRACE_EVENT(2, dbf_txt); 624 625 if (chp_get_status(chpid) != 0) { 626 memset(&link, 0, sizeof(struct chp_link)); 627 link.chpid = chpid; 628 /* Wait until previous actions have settled. */ 629 css_wait_for_slow_path(); 630 for_each_subchannel_staged(__s390_process_res_acc, NULL, 631 &link); 632 } 633 } 634 635 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 636 struct chp_id chpid, int on) 637 { 638 unsigned long flags; 639 struct chp_link link; 640 641 memset(&link, 0, sizeof(struct chp_link)); 642 link.chpid = chpid; 643 spin_lock_irqsave(sch->lock, flags); 644 if (sch->driver && sch->driver->chp_event) 645 sch->driver->chp_event(sch, &link, 646 on ? CHP_VARY_ON : CHP_VARY_OFF); 647 spin_unlock_irqrestore(sch->lock, flags); 648 } 649 650 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 651 { 652 struct chp_id *chpid = data; 653 654 __s390_subchannel_vary_chpid(sch, *chpid, 0); 655 return 0; 656 } 657 658 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 659 { 660 struct chp_id *chpid = data; 661 662 __s390_subchannel_vary_chpid(sch, *chpid, 1); 663 return 0; 664 } 665 666 static int 667 __s390_vary_chpid_on(struct subchannel_id schid, void *data) 668 { 669 struct schib schib; 670 671 if (stsch_err(schid, &schib)) 672 /* We're through */ 673 return -ENXIO; 674 /* Put it on the slow path. */ 675 css_schedule_eval(schid); 676 return 0; 677 } 678 679 /** 680 * chsc_chp_vary - propagate channel-path vary operation to subchannels 681 * @chpid: channl-path ID 682 * @on: non-zero for vary online, zero for vary offline 683 */ 684 int chsc_chp_vary(struct chp_id chpid, int on) 685 { 686 struct channel_path *chp = chpid_to_chp(chpid); 687 688 /* Wait until previous actions have settled. */ 689 css_wait_for_slow_path(); 690 /* 691 * Redo PathVerification on the devices the chpid connects to 692 */ 693 if (on) { 694 /* Try to update the channel path description. */ 695 chp_update_desc(chp); 696 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 697 __s390_vary_chpid_on, &chpid); 698 } else 699 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 700 NULL, &chpid); 701 702 return 0; 703 } 704 705 static void 706 chsc_remove_cmg_attr(struct channel_subsystem *css) 707 { 708 int i; 709 710 for (i = 0; i <= __MAX_CHPID; i++) { 711 if (!css->chps[i]) 712 continue; 713 chp_remove_cmg_attr(css->chps[i]); 714 } 715 } 716 717 static int 718 chsc_add_cmg_attr(struct channel_subsystem *css) 719 { 720 int i, ret; 721 722 ret = 0; 723 for (i = 0; i <= __MAX_CHPID; i++) { 724 if (!css->chps[i]) 725 continue; 726 ret = chp_add_cmg_attr(css->chps[i]); 727 if (ret) 728 goto cleanup; 729 } 730 return ret; 731 cleanup: 732 for (--i; i >= 0; i--) { 733 if (!css->chps[i]) 734 continue; 735 chp_remove_cmg_attr(css->chps[i]); 736 } 737 return ret; 738 } 739 740 int __chsc_do_secm(struct channel_subsystem *css, int enable) 741 { 742 struct { 743 struct chsc_header request; 744 u32 operation_code : 2; 745 u32 : 30; 746 u32 key : 4; 747 u32 : 28; 748 u32 zeroes1; 749 u32 cub_addr1; 750 u32 zeroes2; 751 u32 cub_addr2; 752 u32 reserved[13]; 753 struct chsc_header response; 754 u32 status : 8; 755 u32 : 4; 756 u32 fmt : 4; 757 u32 : 16; 758 } __attribute__ ((packed)) *secm_area; 759 int ret, ccode; 760 761 spin_lock_irq(&chsc_page_lock); 762 memset(chsc_page, 0, PAGE_SIZE); 763 secm_area = chsc_page; 764 secm_area->request.length = 0x0050; 765 secm_area->request.code = 0x0016; 766 767 secm_area->key = PAGE_DEFAULT_KEY >> 4; 768 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 769 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 770 771 secm_area->operation_code = enable ? 0 : 1; 772 773 ccode = chsc(secm_area); 774 if (ccode > 0) { 775 ret = (ccode == 3) ? -ENODEV : -EBUSY; 776 goto out; 777 } 778 779 switch (secm_area->response.code) { 780 case 0x0102: 781 case 0x0103: 782 ret = -EINVAL; 783 break; 784 default: 785 ret = chsc_error_from_response(secm_area->response.code); 786 } 787 if (ret != 0) 788 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 789 secm_area->response.code); 790 out: 791 spin_unlock_irq(&chsc_page_lock); 792 return ret; 793 } 794 795 int 796 chsc_secm(struct channel_subsystem *css, int enable) 797 { 798 int ret; 799 800 if (enable && !css->cm_enabled) { 801 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 802 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 803 if (!css->cub_addr1 || !css->cub_addr2) { 804 free_page((unsigned long)css->cub_addr1); 805 free_page((unsigned long)css->cub_addr2); 806 return -ENOMEM; 807 } 808 } 809 ret = __chsc_do_secm(css, enable); 810 if (!ret) { 811 css->cm_enabled = enable; 812 if (css->cm_enabled) { 813 ret = chsc_add_cmg_attr(css); 814 if (ret) { 815 __chsc_do_secm(css, 0); 816 css->cm_enabled = 0; 817 } 818 } else 819 chsc_remove_cmg_attr(css); 820 } 821 if (!css->cm_enabled) { 822 free_page((unsigned long)css->cub_addr1); 823 free_page((unsigned long)css->cub_addr2); 824 } 825 return ret; 826 } 827 828 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 829 int c, int m, void *page) 830 { 831 struct chsc_scpd *scpd_area; 832 int ccode, ret; 833 834 if ((rfmt == 1) && !css_general_characteristics.fcs) 835 return -EINVAL; 836 if ((rfmt == 2) && !css_general_characteristics.cib) 837 return -EINVAL; 838 839 memset(page, 0, PAGE_SIZE); 840 scpd_area = page; 841 scpd_area->request.length = 0x0010; 842 scpd_area->request.code = 0x0002; 843 scpd_area->cssid = chpid.cssid; 844 scpd_area->first_chpid = chpid.id; 845 scpd_area->last_chpid = chpid.id; 846 scpd_area->m = m; 847 scpd_area->c = c; 848 scpd_area->fmt = fmt; 849 scpd_area->rfmt = rfmt; 850 851 ccode = chsc(scpd_area); 852 if (ccode > 0) 853 return (ccode == 3) ? -ENODEV : -EBUSY; 854 855 ret = chsc_error_from_response(scpd_area->response.code); 856 if (ret) 857 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 858 scpd_area->response.code); 859 return ret; 860 } 861 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 862 863 int chsc_determine_base_channel_path_desc(struct chp_id chpid, 864 struct channel_path_desc *desc) 865 { 866 struct chsc_response_struct *chsc_resp; 867 struct chsc_scpd *scpd_area; 868 unsigned long flags; 869 int ret; 870 871 spin_lock_irqsave(&chsc_page_lock, flags); 872 scpd_area = chsc_page; 873 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area); 874 if (ret) 875 goto out; 876 chsc_resp = (void *)&scpd_area->response; 877 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 878 out: 879 spin_unlock_irqrestore(&chsc_page_lock, flags); 880 return ret; 881 } 882 883 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, 884 struct channel_path_desc_fmt1 *desc) 885 { 886 struct chsc_response_struct *chsc_resp; 887 struct chsc_scpd *scpd_area; 888 unsigned long flags; 889 int ret; 890 891 spin_lock_irqsave(&chsc_page_lock, flags); 892 scpd_area = chsc_page; 893 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); 894 if (ret) 895 goto out; 896 chsc_resp = (void *)&scpd_area->response; 897 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 898 out: 899 spin_unlock_irqrestore(&chsc_page_lock, flags); 900 return ret; 901 } 902 903 static void 904 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 905 struct cmg_chars *chars) 906 { 907 struct cmg_chars *cmg_chars; 908 int i, mask; 909 910 cmg_chars = chp->cmg_chars; 911 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 912 mask = 0x80 >> (i + 3); 913 if (cmcv & mask) 914 cmg_chars->values[i] = chars->values[i]; 915 else 916 cmg_chars->values[i] = 0; 917 } 918 } 919 920 int chsc_get_channel_measurement_chars(struct channel_path *chp) 921 { 922 struct cmg_chars *cmg_chars; 923 int ccode, ret; 924 925 struct { 926 struct chsc_header request; 927 u32 : 24; 928 u32 first_chpid : 8; 929 u32 : 24; 930 u32 last_chpid : 8; 931 u32 zeroes1; 932 struct chsc_header response; 933 u32 zeroes2; 934 u32 not_valid : 1; 935 u32 shared : 1; 936 u32 : 22; 937 u32 chpid : 8; 938 u32 cmcv : 5; 939 u32 : 11; 940 u32 cmgq : 8; 941 u32 cmg : 8; 942 u32 zeroes3; 943 u32 data[NR_MEASUREMENT_CHARS]; 944 } __attribute__ ((packed)) *scmc_area; 945 946 chp->cmg_chars = NULL; 947 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); 948 if (!cmg_chars) 949 return -ENOMEM; 950 951 spin_lock_irq(&chsc_page_lock); 952 memset(chsc_page, 0, PAGE_SIZE); 953 scmc_area = chsc_page; 954 scmc_area->request.length = 0x0010; 955 scmc_area->request.code = 0x0022; 956 scmc_area->first_chpid = chp->chpid.id; 957 scmc_area->last_chpid = chp->chpid.id; 958 959 ccode = chsc(scmc_area); 960 if (ccode > 0) { 961 ret = (ccode == 3) ? -ENODEV : -EBUSY; 962 goto out; 963 } 964 965 ret = chsc_error_from_response(scmc_area->response.code); 966 if (ret) { 967 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 968 scmc_area->response.code); 969 goto out; 970 } 971 if (scmc_area->not_valid) { 972 chp->cmg = -1; 973 chp->shared = -1; 974 goto out; 975 } 976 chp->cmg = scmc_area->cmg; 977 chp->shared = scmc_area->shared; 978 if (chp->cmg != 2 && chp->cmg != 3) { 979 /* No cmg-dependent data. */ 980 goto out; 981 } 982 chp->cmg_chars = cmg_chars; 983 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 984 (struct cmg_chars *) &scmc_area->data); 985 out: 986 spin_unlock_irq(&chsc_page_lock); 987 if (!chp->cmg_chars) 988 kfree(cmg_chars); 989 990 return ret; 991 } 992 993 int __init chsc_init(void) 994 { 995 int ret; 996 997 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 998 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 999 if (!sei_page || !chsc_page) { 1000 ret = -ENOMEM; 1001 goto out_err; 1002 } 1003 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 1004 if (ret) 1005 goto out_err; 1006 return ret; 1007 out_err: 1008 free_page((unsigned long)chsc_page); 1009 free_page((unsigned long)sei_page); 1010 return ret; 1011 } 1012 1013 void __init chsc_init_cleanup(void) 1014 { 1015 crw_unregister_handler(CRW_RSC_CSS); 1016 free_page((unsigned long)chsc_page); 1017 free_page((unsigned long)sei_page); 1018 } 1019 1020 int chsc_enable_facility(int operation_code) 1021 { 1022 unsigned long flags; 1023 int ret; 1024 struct { 1025 struct chsc_header request; 1026 u8 reserved1:4; 1027 u8 format:4; 1028 u8 reserved2; 1029 u16 operation_code; 1030 u32 reserved3; 1031 u32 reserved4; 1032 u32 operation_data_area[252]; 1033 struct chsc_header response; 1034 u32 reserved5:4; 1035 u32 format2:4; 1036 u32 reserved6:24; 1037 } __attribute__ ((packed)) *sda_area; 1038 1039 spin_lock_irqsave(&chsc_page_lock, flags); 1040 memset(chsc_page, 0, PAGE_SIZE); 1041 sda_area = chsc_page; 1042 sda_area->request.length = 0x0400; 1043 sda_area->request.code = 0x0031; 1044 sda_area->operation_code = operation_code; 1045 1046 ret = chsc(sda_area); 1047 if (ret > 0) { 1048 ret = (ret == 3) ? -ENODEV : -EBUSY; 1049 goto out; 1050 } 1051 1052 switch (sda_area->response.code) { 1053 case 0x0101: 1054 ret = -EOPNOTSUPP; 1055 break; 1056 default: 1057 ret = chsc_error_from_response(sda_area->response.code); 1058 } 1059 if (ret != 0) 1060 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 1061 operation_code, sda_area->response.code); 1062 out: 1063 spin_unlock_irqrestore(&chsc_page_lock, flags); 1064 return ret; 1065 } 1066 1067 struct css_general_char css_general_characteristics; 1068 struct css_chsc_char css_chsc_characteristics; 1069 1070 int __init 1071 chsc_determine_css_characteristics(void) 1072 { 1073 int result; 1074 struct { 1075 struct chsc_header request; 1076 u32 reserved1; 1077 u32 reserved2; 1078 u32 reserved3; 1079 struct chsc_header response; 1080 u32 reserved4; 1081 u32 general_char[510]; 1082 u32 chsc_char[508]; 1083 } __attribute__ ((packed)) *scsc_area; 1084 1085 spin_lock_irq(&chsc_page_lock); 1086 memset(chsc_page, 0, PAGE_SIZE); 1087 scsc_area = chsc_page; 1088 scsc_area->request.length = 0x0010; 1089 scsc_area->request.code = 0x0010; 1090 1091 result = chsc(scsc_area); 1092 if (result) { 1093 result = (result == 3) ? -ENODEV : -EBUSY; 1094 goto exit; 1095 } 1096 1097 result = chsc_error_from_response(scsc_area->response.code); 1098 if (result == 0) { 1099 memcpy(&css_general_characteristics, scsc_area->general_char, 1100 sizeof(css_general_characteristics)); 1101 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1102 sizeof(css_chsc_characteristics)); 1103 } else 1104 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 1105 scsc_area->response.code); 1106 exit: 1107 spin_unlock_irq(&chsc_page_lock); 1108 return result; 1109 } 1110 1111 EXPORT_SYMBOL_GPL(css_general_characteristics); 1112 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1113 1114 int chsc_sstpc(void *page, unsigned int op, u16 ctrl) 1115 { 1116 struct { 1117 struct chsc_header request; 1118 unsigned int rsvd0; 1119 unsigned int op : 8; 1120 unsigned int rsvd1 : 8; 1121 unsigned int ctrl : 16; 1122 unsigned int rsvd2[5]; 1123 struct chsc_header response; 1124 unsigned int rsvd3[7]; 1125 } __attribute__ ((packed)) *rr; 1126 int rc; 1127 1128 memset(page, 0, PAGE_SIZE); 1129 rr = page; 1130 rr->request.length = 0x0020; 1131 rr->request.code = 0x0033; 1132 rr->op = op; 1133 rr->ctrl = ctrl; 1134 rc = chsc(rr); 1135 if (rc) 1136 return -EIO; 1137 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 1138 return rc; 1139 } 1140 1141 int chsc_sstpi(void *page, void *result, size_t size) 1142 { 1143 struct { 1144 struct chsc_header request; 1145 unsigned int rsvd0[3]; 1146 struct chsc_header response; 1147 char data[size]; 1148 } __attribute__ ((packed)) *rr; 1149 int rc; 1150 1151 memset(page, 0, PAGE_SIZE); 1152 rr = page; 1153 rr->request.length = 0x0010; 1154 rr->request.code = 0x0038; 1155 rc = chsc(rr); 1156 if (rc) 1157 return -EIO; 1158 memcpy(result, &rr->data, size); 1159 return (rr->response.code == 0x0001) ? 0 : -EIO; 1160 } 1161 1162 int chsc_siosl(struct subchannel_id schid) 1163 { 1164 struct { 1165 struct chsc_header request; 1166 u32 word1; 1167 struct subchannel_id sid; 1168 u32 word3; 1169 struct chsc_header response; 1170 u32 word[11]; 1171 } __attribute__ ((packed)) *siosl_area; 1172 unsigned long flags; 1173 int ccode; 1174 int rc; 1175 1176 spin_lock_irqsave(&chsc_page_lock, flags); 1177 memset(chsc_page, 0, PAGE_SIZE); 1178 siosl_area = chsc_page; 1179 siosl_area->request.length = 0x0010; 1180 siosl_area->request.code = 0x0046; 1181 siosl_area->word1 = 0x80000000; 1182 siosl_area->sid = schid; 1183 1184 ccode = chsc(siosl_area); 1185 if (ccode > 0) { 1186 if (ccode == 3) 1187 rc = -ENODEV; 1188 else 1189 rc = -EBUSY; 1190 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1191 schid.ssid, schid.sch_no, ccode); 1192 goto out; 1193 } 1194 rc = chsc_error_from_response(siosl_area->response.code); 1195 if (rc) 1196 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1197 schid.ssid, schid.sch_no, 1198 siosl_area->response.code); 1199 else 1200 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1201 schid.ssid, schid.sch_no); 1202 out: 1203 spin_unlock_irqrestore(&chsc_page_lock, flags); 1204 return rc; 1205 } 1206 EXPORT_SYMBOL_GPL(chsc_siosl); 1207 1208 /** 1209 * chsc_scm_info() - store SCM information (SSI) 1210 * @scm_area: request and response block for SSI 1211 * @token: continuation token 1212 * 1213 * Returns 0 on success. 1214 */ 1215 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) 1216 { 1217 int ccode, ret; 1218 1219 memset(scm_area, 0, sizeof(*scm_area)); 1220 scm_area->request.length = 0x0020; 1221 scm_area->request.code = 0x004C; 1222 scm_area->reqtok = token; 1223 1224 ccode = chsc(scm_area); 1225 if (ccode > 0) { 1226 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1227 goto out; 1228 } 1229 ret = chsc_error_from_response(scm_area->response.code); 1230 if (ret != 0) 1231 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", 1232 scm_area->response.code); 1233 out: 1234 return ret; 1235 } 1236 EXPORT_SYMBOL_GPL(chsc_scm_info); 1237