1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * S/390 common I/O routines -- channel subsystem call 4 * 5 * Copyright IBM Corp. 1999,2012 6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/init.h> 17 #include <linux/device.h> 18 #include <linux/mutex.h> 19 #include <linux/pci.h> 20 21 #include <asm/cio.h> 22 #include <asm/chpid.h> 23 #include <asm/chsc.h> 24 #include <asm/crw.h> 25 #include <asm/isc.h> 26 #include <asm/ebcdic.h> 27 #include <asm/ap.h> 28 29 #include "css.h" 30 #include "cio.h" 31 #include "cio_debug.h" 32 #include "ioasm.h" 33 #include "chp.h" 34 #include "chsc.h" 35 36 static void *sei_page; 37 static void *chsc_page; 38 static DEFINE_SPINLOCK(chsc_page_lock); 39 40 #define SEI_VF_FLA 0xc0 /* VF flag for Full Link Address */ 41 #define SEI_RS_CHPID 0x4 /* 4 in RS field indicates CHPID */ 42 43 /** 44 * chsc_error_from_response() - convert a chsc response to an error 45 * @response: chsc response code 46 * 47 * Returns an appropriate Linux error code for @response. 48 */ 49 int chsc_error_from_response(int response) 50 { 51 switch (response) { 52 case 0x0001: 53 return 0; 54 case 0x0002: 55 case 0x0003: 56 case 0x0006: 57 case 0x0007: 58 case 0x0008: 59 case 0x000a: 60 case 0x0104: 61 return -EINVAL; 62 case 0x0004: 63 case 0x0106: /* "Wrong Channel Parm" for the op 0x003d */ 64 return -EOPNOTSUPP; 65 case 0x000b: 66 case 0x0107: /* "Channel busy" for the op 0x003d */ 67 return -EBUSY; 68 case 0x0100: 69 case 0x0102: 70 return -ENOMEM; 71 case 0x0108: /* "HW limit exceeded" for the op 0x003d */ 72 return -EUSERS; 73 default: 74 return -EIO; 75 } 76 } 77 EXPORT_SYMBOL_GPL(chsc_error_from_response); 78 79 struct chsc_ssd_area { 80 struct chsc_header request; 81 u16 :10; 82 u16 ssid:2; 83 u16 :4; 84 u16 f_sch; /* first subchannel */ 85 u16 :16; 86 u16 l_sch; /* last subchannel */ 87 u32 :32; 88 struct chsc_header response; 89 u32 :32; 90 u8 sch_valid : 1; 91 u8 dev_valid : 1; 92 u8 st : 3; /* subchannel type */ 93 u8 zeroes : 3; 94 u8 unit_addr; /* unit address */ 95 u16 devno; /* device number */ 96 u8 path_mask; 97 u8 fla_valid_mask; 98 u16 sch; /* subchannel */ 99 u8 chpid[8]; /* chpids 0-7 */ 100 u16 fla[8]; /* full link addresses 0-7 */ 101 } __packed __aligned(PAGE_SIZE); 102 103 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 104 { 105 struct chsc_ssd_area *ssd_area; 106 unsigned long flags; 107 int ccode; 108 int ret; 109 int i; 110 int mask; 111 112 spin_lock_irqsave(&chsc_page_lock, flags); 113 memset(chsc_page, 0, PAGE_SIZE); 114 ssd_area = chsc_page; 115 ssd_area->request.length = 0x0010; 116 ssd_area->request.code = 0x0004; 117 ssd_area->ssid = schid.ssid; 118 ssd_area->f_sch = schid.sch_no; 119 ssd_area->l_sch = schid.sch_no; 120 121 ccode = chsc(ssd_area); 122 /* Check response. */ 123 if (ccode > 0) { 124 ret = (ccode == 3) ? -ENODEV : -EBUSY; 125 goto out; 126 } 127 ret = chsc_error_from_response(ssd_area->response.code); 128 if (ret != 0) { 129 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 130 schid.ssid, schid.sch_no, 131 ssd_area->response.code); 132 goto out; 133 } 134 if (!ssd_area->sch_valid) { 135 ret = -ENODEV; 136 goto out; 137 } 138 /* Copy data */ 139 ret = 0; 140 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 141 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 142 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 143 goto out; 144 ssd->path_mask = ssd_area->path_mask; 145 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 146 for (i = 0; i < 8; i++) { 147 mask = 0x80 >> i; 148 if (ssd_area->path_mask & mask) { 149 chp_id_init(&ssd->chpid[i]); 150 ssd->chpid[i].id = ssd_area->chpid[i]; 151 } 152 if (ssd_area->fla_valid_mask & mask) 153 ssd->fla[i] = ssd_area->fla[i]; 154 } 155 out: 156 spin_unlock_irqrestore(&chsc_page_lock, flags); 157 return ret; 158 } 159 160 /** 161 * chsc_ssqd() - store subchannel QDIO data (SSQD) 162 * @schid: id of the subchannel on which SSQD is performed 163 * @ssqd: request and response block for SSQD 164 * 165 * Returns 0 on success. 166 */ 167 int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd) 168 { 169 memset(ssqd, 0, sizeof(*ssqd)); 170 ssqd->request.length = 0x0010; 171 ssqd->request.code = 0x0024; 172 ssqd->first_sch = schid.sch_no; 173 ssqd->last_sch = schid.sch_no; 174 ssqd->ssid = schid.ssid; 175 176 if (chsc(ssqd)) 177 return -EIO; 178 179 return chsc_error_from_response(ssqd->response.code); 180 } 181 EXPORT_SYMBOL_GPL(chsc_ssqd); 182 183 /** 184 * chsc_sadc() - set adapter device controls (SADC) 185 * @schid: id of the subchannel on which SADC is performed 186 * @scssc: request and response block for SADC 187 * @summary_indicator_addr: summary indicator address 188 * @subchannel_indicator_addr: subchannel indicator address 189 * @isc: Interruption Subclass for this subchannel 190 * 191 * Returns 0 on success. 192 */ 193 int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, 194 u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc) 195 { 196 memset(scssc, 0, sizeof(*scssc)); 197 scssc->request.length = 0x0fe0; 198 scssc->request.code = 0x0021; 199 scssc->operation_code = 0; 200 201 scssc->summary_indicator_addr = summary_indicator_addr; 202 scssc->subchannel_indicator_addr = subchannel_indicator_addr; 203 204 scssc->ks = PAGE_DEFAULT_KEY >> 4; 205 scssc->kc = PAGE_DEFAULT_KEY >> 4; 206 scssc->isc = isc; 207 scssc->schid = schid; 208 209 /* enable the time delay disablement facility */ 210 if (css_general_characteristics.aif_tdd) 211 scssc->word_with_d_bit = 0x10000000; 212 213 if (chsc(scssc)) 214 return -EIO; 215 216 return chsc_error_from_response(scssc->response.code); 217 } 218 EXPORT_SYMBOL_GPL(chsc_sadc); 219 220 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data) 221 { 222 spin_lock_irq(sch->lock); 223 if (sch->driver && sch->driver->chp_event) 224 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0) 225 goto out_unreg; 226 spin_unlock_irq(sch->lock); 227 return 0; 228 229 out_unreg: 230 sch->lpm = 0; 231 spin_unlock_irq(sch->lock); 232 css_schedule_eval(sch->schid); 233 return 0; 234 } 235 236 void chsc_chp_offline(struct chp_id chpid) 237 { 238 struct channel_path *chp = chpid_to_chp(chpid); 239 struct chp_link link; 240 char dbf_txt[15]; 241 242 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); 243 CIO_TRACE_EVENT(2, dbf_txt); 244 245 if (chp_get_status(chpid) <= 0) 246 return; 247 memset(&link, 0, sizeof(struct chp_link)); 248 link.chpid = chpid; 249 /* Wait until previous actions have settled. */ 250 css_wait_for_slow_path(); 251 252 mutex_lock(&chp->lock); 253 chp_update_desc(chp); 254 mutex_unlock(&chp->lock); 255 256 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); 257 } 258 259 static int __s390_process_res_acc(struct subchannel *sch, void *data) 260 { 261 spin_lock_irq(sch->lock); 262 if (sch->driver && sch->driver->chp_event) 263 sch->driver->chp_event(sch, data, CHP_ONLINE); 264 spin_unlock_irq(sch->lock); 265 266 return 0; 267 } 268 269 static void s390_process_res_acc(struct chp_link *link) 270 { 271 char dbf_txt[15]; 272 273 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid, 274 link->chpid.id); 275 CIO_TRACE_EVENT( 2, dbf_txt); 276 if (link->fla != 0) { 277 sprintf(dbf_txt, "fla%x", link->fla); 278 CIO_TRACE_EVENT( 2, dbf_txt); 279 } 280 /* Wait until previous actions have settled. */ 281 css_wait_for_slow_path(); 282 /* 283 * I/O resources may have become accessible. 284 * Scan through all subchannels that may be concerned and 285 * do a validation on those. 286 * The more information we have (info), the less scanning 287 * will we have to do. 288 */ 289 for_each_subchannel_staged(__s390_process_res_acc, NULL, link); 290 css_schedule_reprobe(); 291 } 292 293 static int process_fces_event(struct subchannel *sch, void *data) 294 { 295 spin_lock_irq(sch->lock); 296 if (sch->driver && sch->driver->chp_event) 297 sch->driver->chp_event(sch, data, CHP_FCES_EVENT); 298 spin_unlock_irq(sch->lock); 299 return 0; 300 } 301 302 struct chsc_sei_nt0_area { 303 u8 flags; 304 u8 vf; /* validity flags */ 305 u8 rs; /* reporting source */ 306 u8 cc; /* content code */ 307 u16 fla; /* full link address */ 308 u16 rsid; /* reporting source id */ 309 u32 reserved1; 310 u32 reserved2; 311 /* ccdf has to be big enough for a link-incident record */ 312 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */ 313 } __packed; 314 315 struct chsc_sei_nt2_area { 316 u8 flags; /* p and v bit */ 317 u8 reserved1; 318 u8 reserved2; 319 u8 cc; /* content code */ 320 u32 reserved3[13]; 321 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */ 322 } __packed; 323 324 #define CHSC_SEI_NT0 (1ULL << 63) 325 #define CHSC_SEI_NT2 (1ULL << 61) 326 327 struct chsc_sei { 328 struct chsc_header request; 329 u32 reserved1; 330 u64 ntsm; /* notification type mask */ 331 struct chsc_header response; 332 u32 :24; 333 u8 nt; 334 union { 335 struct chsc_sei_nt0_area nt0_area; 336 struct chsc_sei_nt2_area nt2_area; 337 u8 nt_area[PAGE_SIZE - 24]; 338 } u; 339 } __packed __aligned(PAGE_SIZE); 340 341 /* 342 * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface" 343 */ 344 345 #define LIR_IQ_CLASS_INFO 0 346 #define LIR_IQ_CLASS_DEGRADED 1 347 #define LIR_IQ_CLASS_NOT_OPERATIONAL 2 348 349 struct lir { 350 struct { 351 u32 null:1; 352 u32 reserved:3; 353 u32 class:2; 354 u32 reserved2:2; 355 } __packed iq; 356 u32 ic:8; 357 u32 reserved:16; 358 struct node_descriptor incident_node; 359 struct node_descriptor attached_node; 360 u8 reserved2[32]; 361 } __packed; 362 363 #define PARAMS_LEN 10 /* PARAMS=xx,xxxxxx */ 364 #define NODEID_LEN 35 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */ 365 366 /* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */ 367 static char *store_ebcdic(char *dest, const char *src, unsigned long len, 368 char delim) 369 { 370 memcpy(dest, src, len); 371 EBCASC(dest, len); 372 373 if (delim) 374 dest[len++] = delim; 375 376 return dest + len; 377 } 378 379 static void chsc_link_from_sei(struct chp_link *link, 380 struct chsc_sei_nt0_area *sei_area) 381 { 382 if ((sei_area->vf & SEI_VF_FLA) != 0) { 383 link->fla = sei_area->fla; 384 link->fla_mask = ((sei_area->vf & SEI_VF_FLA) == SEI_VF_FLA) ? 385 0xffff : 0xff00; 386 } 387 } 388 389 /* Format node ID and parameters for output in LIR log message. */ 390 static void format_node_data(char *params, char *id, struct node_descriptor *nd) 391 { 392 memset(params, 0, PARAMS_LEN); 393 memset(id, 0, NODEID_LEN); 394 395 if (nd->validity != ND_VALIDITY_VALID) { 396 strncpy(params, "n/a", PARAMS_LEN - 1); 397 strncpy(id, "n/a", NODEID_LEN - 1); 398 return; 399 } 400 401 /* PARAMS=xx,xxxxxx */ 402 snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params); 403 /* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */ 404 id = store_ebcdic(id, nd->type, sizeof(nd->type), '/'); 405 id = store_ebcdic(id, nd->model, sizeof(nd->model), ','); 406 id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.'); 407 id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0); 408 id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ','); 409 sprintf(id, "%04X", nd->tag); 410 } 411 412 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) 413 { 414 struct lir *lir = (struct lir *) &sei_area->ccdf; 415 char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN], 416 aunodeid[NODEID_LEN]; 417 418 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n", 419 sei_area->rs, sei_area->rsid, sei_area->ccdf[0]); 420 421 /* Ignore NULL Link Incident Records. */ 422 if (lir->iq.null) 423 return; 424 425 /* Inform user that a link requires maintenance actions because it has 426 * become degraded or not operational. Note that this log message is 427 * the primary intention behind a Link Incident Record. */ 428 429 format_node_data(iuparams, iunodeid, &lir->incident_node); 430 format_node_data(auparams, aunodeid, &lir->attached_node); 431 432 switch (lir->iq.class) { 433 case LIR_IQ_CLASS_DEGRADED: 434 pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x " 435 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n", 436 sei_area->rs, sei_area->rsid, lir->ic, iuparams, 437 iunodeid, auparams, aunodeid); 438 break; 439 case LIR_IQ_CLASS_NOT_OPERATIONAL: 440 pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x " 441 "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n", 442 sei_area->rs, sei_area->rsid, lir->ic, iuparams, 443 iunodeid, auparams, aunodeid); 444 break; 445 default: 446 break; 447 } 448 } 449 450 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) 451 { 452 struct channel_path *chp; 453 struct chp_link link; 454 struct chp_id chpid; 455 int status; 456 457 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 458 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 459 if (sei_area->rs != 4) 460 return; 461 chp_id_init(&chpid); 462 chpid.id = sei_area->rsid; 463 /* allocate a new channel path structure, if needed */ 464 status = chp_get_status(chpid); 465 if (!status) 466 return; 467 468 if (status < 0) { 469 chp_new(chpid); 470 } else { 471 chp = chpid_to_chp(chpid); 472 mutex_lock(&chp->lock); 473 chp_update_desc(chp); 474 mutex_unlock(&chp->lock); 475 } 476 memset(&link, 0, sizeof(struct chp_link)); 477 link.chpid = chpid; 478 chsc_link_from_sei(&link, sei_area); 479 s390_process_res_acc(&link); 480 } 481 482 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) 483 { 484 struct channel_path *chp; 485 struct chp_id chpid; 486 u8 *data; 487 int num; 488 489 CIO_CRW_EVENT(4, "chsc: channel path availability information\n"); 490 if (sei_area->rs != 0) 491 return; 492 data = sei_area->ccdf; 493 chp_id_init(&chpid); 494 for (num = 0; num <= __MAX_CHPID; num++) { 495 if (!chp_test_bit(data, num)) 496 continue; 497 chpid.id = num; 498 499 CIO_CRW_EVENT(4, "Update information for channel path " 500 "%x.%02x\n", chpid.cssid, chpid.id); 501 chp = chpid_to_chp(chpid); 502 if (!chp) { 503 chp_new(chpid); 504 continue; 505 } 506 mutex_lock(&chp->lock); 507 chp_update_desc(chp); 508 mutex_unlock(&chp->lock); 509 } 510 } 511 512 struct chp_config_data { 513 u8 map[32]; 514 u8 op; 515 u8 pc; 516 }; 517 518 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area) 519 { 520 struct chp_config_data *data; 521 struct chp_id chpid; 522 int num; 523 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; 524 525 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); 526 if (sei_area->rs != 0) 527 return; 528 data = (struct chp_config_data *) &(sei_area->ccdf); 529 chp_id_init(&chpid); 530 for (num = 0; num <= __MAX_CHPID; num++) { 531 if (!chp_test_bit(data->map, num)) 532 continue; 533 chpid.id = num; 534 pr_notice("Processing %s for channel path %x.%02x\n", 535 events[data->op], chpid.cssid, chpid.id); 536 switch (data->op) { 537 case 0: 538 chp_cfg_schedule(chpid, 1); 539 break; 540 case 1: 541 chp_cfg_schedule(chpid, 0); 542 break; 543 case 2: 544 chp_cfg_cancel_deconfigure(chpid); 545 break; 546 } 547 } 548 } 549 550 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) 551 { 552 int ret; 553 554 CIO_CRW_EVENT(4, "chsc: scm change notification\n"); 555 if (sei_area->rs != 7) 556 return; 557 558 ret = scm_update_information(); 559 if (ret) 560 CIO_CRW_EVENT(0, "chsc: updating change notification" 561 " failed (rc=%d).\n", ret); 562 } 563 564 static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) 565 { 566 int ret; 567 568 CIO_CRW_EVENT(4, "chsc: scm available information\n"); 569 if (sei_area->rs != 7) 570 return; 571 572 ret = scm_process_availability_information(); 573 if (ret) 574 CIO_CRW_EVENT(0, "chsc: process availability information" 575 " failed (rc=%d).\n", ret); 576 } 577 578 static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area) 579 { 580 CIO_CRW_EVENT(3, "chsc: ap config changed\n"); 581 if (sei_area->rs != 5) 582 return; 583 584 ap_bus_cfg_chg(); 585 } 586 587 static void chsc_process_sei_fces_event(struct chsc_sei_nt0_area *sei_area) 588 { 589 struct chp_link link; 590 struct chp_id chpid; 591 struct channel_path *chp; 592 593 CIO_CRW_EVENT(4, 594 "chsc: FCES status notification (rs=%02x, rs_id=%04x, FCES-status=%x)\n", 595 sei_area->rs, sei_area->rsid, sei_area->ccdf[0]); 596 597 if (sei_area->rs != SEI_RS_CHPID) 598 return; 599 chp_id_init(&chpid); 600 chpid.id = sei_area->rsid; 601 602 /* Ignore the event on unknown/invalid chp */ 603 chp = chpid_to_chp(chpid); 604 if (!chp) 605 return; 606 607 memset(&link, 0, sizeof(struct chp_link)); 608 link.chpid = chpid; 609 chsc_link_from_sei(&link, sei_area); 610 611 for_each_subchannel_staged(process_fces_event, NULL, &link); 612 } 613 614 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 615 { 616 switch (sei_area->cc) { 617 case 1: 618 zpci_event_error(sei_area->ccdf); 619 break; 620 case 2: 621 zpci_event_availability(sei_area->ccdf); 622 break; 623 default: 624 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n", 625 sei_area->cc); 626 break; 627 } 628 } 629 630 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) 631 { 632 /* which kind of information was stored? */ 633 switch (sei_area->cc) { 634 case 1: /* link incident*/ 635 chsc_process_sei_link_incident(sei_area); 636 break; 637 case 2: /* i/o resource accessibility */ 638 chsc_process_sei_res_acc(sei_area); 639 break; 640 case 3: /* ap config changed */ 641 chsc_process_sei_ap_cfg_chg(sei_area); 642 break; 643 case 7: /* channel-path-availability information */ 644 chsc_process_sei_chp_avail(sei_area); 645 break; 646 case 8: /* channel-path-configuration notification */ 647 chsc_process_sei_chp_config(sei_area); 648 break; 649 case 12: /* scm change notification */ 650 chsc_process_sei_scm_change(sei_area); 651 break; 652 case 14: /* scm available notification */ 653 chsc_process_sei_scm_avail(sei_area); 654 break; 655 case 15: /* FCES event notification */ 656 chsc_process_sei_fces_event(sei_area); 657 break; 658 default: /* other stuff */ 659 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", 660 sei_area->cc); 661 break; 662 } 663 664 /* Check if we might have lost some information. */ 665 if (sei_area->flags & 0x40) { 666 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 667 css_schedule_eval_all(); 668 } 669 } 670 671 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) 672 { 673 static int ntsm_unsupported; 674 675 while (true) { 676 memset(sei, 0, sizeof(*sei)); 677 sei->request.length = 0x0010; 678 sei->request.code = 0x000e; 679 if (!ntsm_unsupported) 680 sei->ntsm = ntsm; 681 682 if (chsc(sei)) 683 break; 684 685 if (sei->response.code != 0x0001) { 686 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", 687 sei->response.code, sei->ntsm); 688 689 if (sei->response.code == 3 && sei->ntsm) { 690 /* Fallback for old firmware. */ 691 ntsm_unsupported = 1; 692 continue; 693 } 694 break; 695 } 696 697 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); 698 switch (sei->nt) { 699 case 0: 700 chsc_process_sei_nt0(&sei->u.nt0_area); 701 break; 702 case 2: 703 chsc_process_sei_nt2(&sei->u.nt2_area); 704 break; 705 default: 706 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); 707 break; 708 } 709 710 if (!(sei->u.nt0_area.flags & 0x80)) 711 break; 712 } 713 } 714 715 /* 716 * Handle channel subsystem related CRWs. 717 * Use store event information to find out what's going on. 718 * 719 * Note: Access to sei_page is serialized through machine check handler 720 * thread, so no need for locking. 721 */ 722 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 723 { 724 struct chsc_sei *sei = sei_page; 725 726 if (overflow) { 727 css_schedule_eval_all(); 728 return; 729 } 730 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " 731 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 732 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 733 crw0->erc, crw0->rsid); 734 735 CIO_TRACE_EVENT(2, "prcss"); 736 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2); 737 } 738 739 void chsc_chp_online(struct chp_id chpid) 740 { 741 struct channel_path *chp = chpid_to_chp(chpid); 742 struct chp_link link; 743 char dbf_txt[15]; 744 745 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); 746 CIO_TRACE_EVENT(2, dbf_txt); 747 748 if (chp_get_status(chpid) != 0) { 749 memset(&link, 0, sizeof(struct chp_link)); 750 link.chpid = chpid; 751 /* Wait until previous actions have settled. */ 752 css_wait_for_slow_path(); 753 754 mutex_lock(&chp->lock); 755 chp_update_desc(chp); 756 mutex_unlock(&chp->lock); 757 758 for_each_subchannel_staged(__s390_process_res_acc, NULL, 759 &link); 760 css_schedule_reprobe(); 761 } 762 } 763 764 static void __s390_subchannel_vary_chpid(struct subchannel *sch, 765 struct chp_id chpid, int on) 766 { 767 unsigned long flags; 768 struct chp_link link; 769 770 memset(&link, 0, sizeof(struct chp_link)); 771 link.chpid = chpid; 772 spin_lock_irqsave(sch->lock, flags); 773 if (sch->driver && sch->driver->chp_event) 774 sch->driver->chp_event(sch, &link, 775 on ? CHP_VARY_ON : CHP_VARY_OFF); 776 spin_unlock_irqrestore(sch->lock, flags); 777 } 778 779 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data) 780 { 781 struct chp_id *chpid = data; 782 783 __s390_subchannel_vary_chpid(sch, *chpid, 0); 784 return 0; 785 } 786 787 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data) 788 { 789 struct chp_id *chpid = data; 790 791 __s390_subchannel_vary_chpid(sch, *chpid, 1); 792 return 0; 793 } 794 795 /** 796 * chsc_chp_vary - propagate channel-path vary operation to subchannels 797 * @chpid: channl-path ID 798 * @on: non-zero for vary online, zero for vary offline 799 */ 800 int chsc_chp_vary(struct chp_id chpid, int on) 801 { 802 struct channel_path *chp = chpid_to_chp(chpid); 803 804 /* 805 * Redo PathVerification on the devices the chpid connects to 806 */ 807 if (on) { 808 /* Try to update the channel path description. */ 809 chp_update_desc(chp); 810 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 811 NULL, &chpid); 812 css_schedule_reprobe(); 813 } else 814 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 815 NULL, &chpid); 816 817 return 0; 818 } 819 820 static void 821 chsc_remove_cmg_attr(struct channel_subsystem *css) 822 { 823 int i; 824 825 for (i = 0; i <= __MAX_CHPID; i++) { 826 if (!css->chps[i]) 827 continue; 828 chp_remove_cmg_attr(css->chps[i]); 829 } 830 } 831 832 static int 833 chsc_add_cmg_attr(struct channel_subsystem *css) 834 { 835 int i, ret; 836 837 ret = 0; 838 for (i = 0; i <= __MAX_CHPID; i++) { 839 if (!css->chps[i]) 840 continue; 841 ret = chp_add_cmg_attr(css->chps[i]); 842 if (ret) 843 goto cleanup; 844 } 845 return ret; 846 cleanup: 847 for (--i; i >= 0; i--) { 848 if (!css->chps[i]) 849 continue; 850 chp_remove_cmg_attr(css->chps[i]); 851 } 852 return ret; 853 } 854 855 int __chsc_do_secm(struct channel_subsystem *css, int enable) 856 { 857 struct { 858 struct chsc_header request; 859 u32 operation_code : 2; 860 u32 : 30; 861 u32 key : 4; 862 u32 : 28; 863 u32 zeroes1; 864 u32 cub_addr1; 865 u32 zeroes2; 866 u32 cub_addr2; 867 u32 reserved[13]; 868 struct chsc_header response; 869 u32 status : 8; 870 u32 : 4; 871 u32 fmt : 4; 872 u32 : 16; 873 } *secm_area; 874 unsigned long flags; 875 int ret, ccode; 876 877 spin_lock_irqsave(&chsc_page_lock, flags); 878 memset(chsc_page, 0, PAGE_SIZE); 879 secm_area = chsc_page; 880 secm_area->request.length = 0x0050; 881 secm_area->request.code = 0x0016; 882 883 secm_area->key = PAGE_DEFAULT_KEY >> 4; 884 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 885 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 886 887 secm_area->operation_code = enable ? 0 : 1; 888 889 ccode = chsc(secm_area); 890 if (ccode > 0) { 891 ret = (ccode == 3) ? -ENODEV : -EBUSY; 892 goto out; 893 } 894 895 switch (secm_area->response.code) { 896 case 0x0102: 897 case 0x0103: 898 ret = -EINVAL; 899 break; 900 default: 901 ret = chsc_error_from_response(secm_area->response.code); 902 } 903 if (ret != 0) 904 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 905 secm_area->response.code); 906 out: 907 spin_unlock_irqrestore(&chsc_page_lock, flags); 908 return ret; 909 } 910 911 int 912 chsc_secm(struct channel_subsystem *css, int enable) 913 { 914 int ret; 915 916 if (enable && !css->cm_enabled) { 917 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 918 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 919 if (!css->cub_addr1 || !css->cub_addr2) { 920 free_page((unsigned long)css->cub_addr1); 921 free_page((unsigned long)css->cub_addr2); 922 return -ENOMEM; 923 } 924 } 925 ret = __chsc_do_secm(css, enable); 926 if (!ret) { 927 css->cm_enabled = enable; 928 if (css->cm_enabled) { 929 ret = chsc_add_cmg_attr(css); 930 if (ret) { 931 __chsc_do_secm(css, 0); 932 css->cm_enabled = 0; 933 } 934 } else 935 chsc_remove_cmg_attr(css); 936 } 937 if (!css->cm_enabled) { 938 free_page((unsigned long)css->cub_addr1); 939 free_page((unsigned long)css->cub_addr2); 940 } 941 return ret; 942 } 943 944 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 945 int c, int m, void *page) 946 { 947 struct chsc_scpd *scpd_area; 948 int ccode, ret; 949 950 if ((rfmt == 1 || rfmt == 0) && c == 1 && 951 !css_general_characteristics.fcs) 952 return -EINVAL; 953 if ((rfmt == 2) && !css_general_characteristics.cib) 954 return -EINVAL; 955 if ((rfmt == 3) && !css_general_characteristics.util_str) 956 return -EINVAL; 957 958 memset(page, 0, PAGE_SIZE); 959 scpd_area = page; 960 scpd_area->request.length = 0x0010; 961 scpd_area->request.code = 0x0002; 962 scpd_area->cssid = chpid.cssid; 963 scpd_area->first_chpid = chpid.id; 964 scpd_area->last_chpid = chpid.id; 965 scpd_area->m = m; 966 scpd_area->c = c; 967 scpd_area->fmt = fmt; 968 scpd_area->rfmt = rfmt; 969 970 ccode = chsc(scpd_area); 971 if (ccode > 0) 972 return (ccode == 3) ? -ENODEV : -EBUSY; 973 974 ret = chsc_error_from_response(scpd_area->response.code); 975 if (ret) 976 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 977 scpd_area->response.code); 978 return ret; 979 } 980 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 981 982 #define chsc_det_chp_desc(FMT, c) \ 983 int chsc_determine_fmt##FMT##_channel_path_desc( \ 984 struct chp_id chpid, struct channel_path_desc_fmt##FMT *desc) \ 985 { \ 986 struct chsc_scpd *scpd_area; \ 987 unsigned long flags; \ 988 int ret; \ 989 \ 990 spin_lock_irqsave(&chsc_page_lock, flags); \ 991 scpd_area = chsc_page; \ 992 ret = chsc_determine_channel_path_desc(chpid, 0, FMT, c, 0, \ 993 scpd_area); \ 994 if (ret) \ 995 goto out; \ 996 \ 997 memcpy(desc, scpd_area->data, sizeof(*desc)); \ 998 out: \ 999 spin_unlock_irqrestore(&chsc_page_lock, flags); \ 1000 return ret; \ 1001 } 1002 1003 chsc_det_chp_desc(0, 0) 1004 chsc_det_chp_desc(1, 1) 1005 chsc_det_chp_desc(3, 0) 1006 1007 static void 1008 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 1009 struct cmg_chars *chars) 1010 { 1011 int i, mask; 1012 1013 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { 1014 mask = 0x80 >> (i + 3); 1015 if (cmcv & mask) 1016 chp->cmg_chars.values[i] = chars->values[i]; 1017 else 1018 chp->cmg_chars.values[i] = 0; 1019 } 1020 } 1021 1022 int chsc_get_channel_measurement_chars(struct channel_path *chp) 1023 { 1024 unsigned long flags; 1025 int ccode, ret; 1026 1027 struct { 1028 struct chsc_header request; 1029 u32 : 24; 1030 u32 first_chpid : 8; 1031 u32 : 24; 1032 u32 last_chpid : 8; 1033 u32 zeroes1; 1034 struct chsc_header response; 1035 u32 zeroes2; 1036 u32 not_valid : 1; 1037 u32 shared : 1; 1038 u32 : 22; 1039 u32 chpid : 8; 1040 u32 cmcv : 5; 1041 u32 : 11; 1042 u32 cmgq : 8; 1043 u32 cmg : 8; 1044 u32 zeroes3; 1045 u32 data[NR_MEASUREMENT_CHARS]; 1046 } *scmc_area; 1047 1048 chp->shared = -1; 1049 chp->cmg = -1; 1050 1051 if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm) 1052 return -EINVAL; 1053 1054 spin_lock_irqsave(&chsc_page_lock, flags); 1055 memset(chsc_page, 0, PAGE_SIZE); 1056 scmc_area = chsc_page; 1057 scmc_area->request.length = 0x0010; 1058 scmc_area->request.code = 0x0022; 1059 scmc_area->first_chpid = chp->chpid.id; 1060 scmc_area->last_chpid = chp->chpid.id; 1061 1062 ccode = chsc(scmc_area); 1063 if (ccode > 0) { 1064 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1065 goto out; 1066 } 1067 1068 ret = chsc_error_from_response(scmc_area->response.code); 1069 if (ret) { 1070 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 1071 scmc_area->response.code); 1072 goto out; 1073 } 1074 if (scmc_area->not_valid) 1075 goto out; 1076 1077 chp->cmg = scmc_area->cmg; 1078 chp->shared = scmc_area->shared; 1079 if (chp->cmg != 2 && chp->cmg != 3) { 1080 /* No cmg-dependent data. */ 1081 goto out; 1082 } 1083 chsc_initialize_cmg_chars(chp, scmc_area->cmcv, 1084 (struct cmg_chars *) &scmc_area->data); 1085 out: 1086 spin_unlock_irqrestore(&chsc_page_lock, flags); 1087 return ret; 1088 } 1089 1090 int __init chsc_init(void) 1091 { 1092 int ret; 1093 1094 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1095 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 1096 if (!sei_page || !chsc_page) { 1097 ret = -ENOMEM; 1098 goto out_err; 1099 } 1100 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 1101 if (ret) 1102 goto out_err; 1103 return ret; 1104 out_err: 1105 free_page((unsigned long)chsc_page); 1106 free_page((unsigned long)sei_page); 1107 return ret; 1108 } 1109 1110 void __init chsc_init_cleanup(void) 1111 { 1112 crw_unregister_handler(CRW_RSC_CSS); 1113 free_page((unsigned long)chsc_page); 1114 free_page((unsigned long)sei_page); 1115 } 1116 1117 int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code) 1118 { 1119 int ret; 1120 1121 sda_area->request.length = 0x0400; 1122 sda_area->request.code = 0x0031; 1123 sda_area->operation_code = operation_code; 1124 1125 ret = chsc(sda_area); 1126 if (ret > 0) { 1127 ret = (ret == 3) ? -ENODEV : -EBUSY; 1128 goto out; 1129 } 1130 1131 switch (sda_area->response.code) { 1132 case 0x0101: 1133 ret = -EOPNOTSUPP; 1134 break; 1135 default: 1136 ret = chsc_error_from_response(sda_area->response.code); 1137 } 1138 out: 1139 return ret; 1140 } 1141 1142 int chsc_enable_facility(int operation_code) 1143 { 1144 struct chsc_sda_area *sda_area; 1145 unsigned long flags; 1146 int ret; 1147 1148 spin_lock_irqsave(&chsc_page_lock, flags); 1149 memset(chsc_page, 0, PAGE_SIZE); 1150 sda_area = chsc_page; 1151 1152 ret = __chsc_enable_facility(sda_area, operation_code); 1153 if (ret != 0) 1154 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 1155 operation_code, sda_area->response.code); 1156 1157 spin_unlock_irqrestore(&chsc_page_lock, flags); 1158 return ret; 1159 } 1160 1161 int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid) 1162 { 1163 struct { 1164 struct chsc_header request; 1165 u8 atype; 1166 u32 : 24; 1167 u32 reserved1[6]; 1168 struct chsc_header response; 1169 u32 reserved2[3]; 1170 struct { 1171 u8 cssid; 1172 u8 iid; 1173 u32 : 16; 1174 } list[]; 1175 } *sdcal_area; 1176 int ret; 1177 1178 spin_lock_irq(&chsc_page_lock); 1179 memset(chsc_page, 0, PAGE_SIZE); 1180 sdcal_area = chsc_page; 1181 sdcal_area->request.length = 0x0020; 1182 sdcal_area->request.code = 0x0034; 1183 sdcal_area->atype = 4; 1184 1185 ret = chsc(sdcal_area); 1186 if (ret) { 1187 ret = (ret == 3) ? -ENODEV : -EBUSY; 1188 goto exit; 1189 } 1190 1191 ret = chsc_error_from_response(sdcal_area->response.code); 1192 if (ret) { 1193 CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n", 1194 sdcal_area->response.code); 1195 goto exit; 1196 } 1197 1198 if ((addr_t) &sdcal_area->list[idx] < 1199 (addr_t) &sdcal_area->response + sdcal_area->response.length) { 1200 *cssid = sdcal_area->list[idx].cssid; 1201 *iid = sdcal_area->list[idx].iid; 1202 } 1203 else 1204 ret = -ENODEV; 1205 exit: 1206 spin_unlock_irq(&chsc_page_lock); 1207 return ret; 1208 } 1209 1210 struct css_general_char css_general_characteristics; 1211 struct css_chsc_char css_chsc_characteristics; 1212 1213 int __init 1214 chsc_determine_css_characteristics(void) 1215 { 1216 unsigned long flags; 1217 int result; 1218 struct { 1219 struct chsc_header request; 1220 u32 reserved1; 1221 u32 reserved2; 1222 u32 reserved3; 1223 struct chsc_header response; 1224 u32 reserved4; 1225 u32 general_char[510]; 1226 u32 chsc_char[508]; 1227 } *scsc_area; 1228 1229 spin_lock_irqsave(&chsc_page_lock, flags); 1230 memset(chsc_page, 0, PAGE_SIZE); 1231 scsc_area = chsc_page; 1232 scsc_area->request.length = 0x0010; 1233 scsc_area->request.code = 0x0010; 1234 1235 result = chsc(scsc_area); 1236 if (result) { 1237 result = (result == 3) ? -ENODEV : -EBUSY; 1238 goto exit; 1239 } 1240 1241 result = chsc_error_from_response(scsc_area->response.code); 1242 if (result == 0) { 1243 memcpy(&css_general_characteristics, scsc_area->general_char, 1244 sizeof(css_general_characteristics)); 1245 memcpy(&css_chsc_characteristics, scsc_area->chsc_char, 1246 sizeof(css_chsc_characteristics)); 1247 } else 1248 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 1249 scsc_area->response.code); 1250 exit: 1251 spin_unlock_irqrestore(&chsc_page_lock, flags); 1252 return result; 1253 } 1254 1255 EXPORT_SYMBOL_GPL(css_general_characteristics); 1256 EXPORT_SYMBOL_GPL(css_chsc_characteristics); 1257 1258 int chsc_sstpc(void *page, unsigned int op, u16 ctrl, long *clock_delta) 1259 { 1260 struct { 1261 struct chsc_header request; 1262 unsigned int rsvd0; 1263 unsigned int op : 8; 1264 unsigned int rsvd1 : 8; 1265 unsigned int ctrl : 16; 1266 unsigned int rsvd2[5]; 1267 struct chsc_header response; 1268 unsigned int rsvd3[3]; 1269 s64 clock_delta; 1270 unsigned int rsvd4[2]; 1271 } *rr; 1272 int rc; 1273 1274 memset(page, 0, PAGE_SIZE); 1275 rr = page; 1276 rr->request.length = 0x0020; 1277 rr->request.code = 0x0033; 1278 rr->op = op; 1279 rr->ctrl = ctrl; 1280 rc = chsc(rr); 1281 if (rc) 1282 return -EIO; 1283 rc = (rr->response.code == 0x0001) ? 0 : -EIO; 1284 if (clock_delta) 1285 *clock_delta = rr->clock_delta; 1286 return rc; 1287 } 1288 1289 int chsc_sstpi(void *page, void *result, size_t size) 1290 { 1291 struct { 1292 struct chsc_header request; 1293 unsigned int rsvd0[3]; 1294 struct chsc_header response; 1295 char data[]; 1296 } *rr; 1297 int rc; 1298 1299 memset(page, 0, PAGE_SIZE); 1300 rr = page; 1301 rr->request.length = 0x0010; 1302 rr->request.code = 0x0038; 1303 rc = chsc(rr); 1304 if (rc) 1305 return -EIO; 1306 memcpy(result, &rr->data, size); 1307 return (rr->response.code == 0x0001) ? 0 : -EIO; 1308 } 1309 1310 int chsc_stzi(void *page, void *result, size_t size) 1311 { 1312 struct { 1313 struct chsc_header request; 1314 unsigned int rsvd0[3]; 1315 struct chsc_header response; 1316 char data[]; 1317 } *rr; 1318 int rc; 1319 1320 memset(page, 0, PAGE_SIZE); 1321 rr = page; 1322 rr->request.length = 0x0010; 1323 rr->request.code = 0x003e; 1324 rc = chsc(rr); 1325 if (rc) 1326 return -EIO; 1327 memcpy(result, &rr->data, size); 1328 return (rr->response.code == 0x0001) ? 0 : -EIO; 1329 } 1330 1331 int chsc_siosl(struct subchannel_id schid) 1332 { 1333 struct { 1334 struct chsc_header request; 1335 u32 word1; 1336 struct subchannel_id sid; 1337 u32 word3; 1338 struct chsc_header response; 1339 u32 word[11]; 1340 } *siosl_area; 1341 unsigned long flags; 1342 int ccode; 1343 int rc; 1344 1345 spin_lock_irqsave(&chsc_page_lock, flags); 1346 memset(chsc_page, 0, PAGE_SIZE); 1347 siosl_area = chsc_page; 1348 siosl_area->request.length = 0x0010; 1349 siosl_area->request.code = 0x0046; 1350 siosl_area->word1 = 0x80000000; 1351 siosl_area->sid = schid; 1352 1353 ccode = chsc(siosl_area); 1354 if (ccode > 0) { 1355 if (ccode == 3) 1356 rc = -ENODEV; 1357 else 1358 rc = -EBUSY; 1359 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n", 1360 schid.ssid, schid.sch_no, ccode); 1361 goto out; 1362 } 1363 rc = chsc_error_from_response(siosl_area->response.code); 1364 if (rc) 1365 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1366 schid.ssid, schid.sch_no, 1367 siosl_area->response.code); 1368 else 1369 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1370 schid.ssid, schid.sch_no); 1371 out: 1372 spin_unlock_irqrestore(&chsc_page_lock, flags); 1373 return rc; 1374 } 1375 EXPORT_SYMBOL_GPL(chsc_siosl); 1376 1377 /** 1378 * chsc_scm_info() - store SCM information (SSI) 1379 * @scm_area: request and response block for SSI 1380 * @token: continuation token 1381 * 1382 * Returns 0 on success. 1383 */ 1384 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token) 1385 { 1386 int ccode, ret; 1387 1388 memset(scm_area, 0, sizeof(*scm_area)); 1389 scm_area->request.length = 0x0020; 1390 scm_area->request.code = 0x004C; 1391 scm_area->reqtok = token; 1392 1393 ccode = chsc(scm_area); 1394 if (ccode > 0) { 1395 ret = (ccode == 3) ? -ENODEV : -EBUSY; 1396 goto out; 1397 } 1398 ret = chsc_error_from_response(scm_area->response.code); 1399 if (ret != 0) 1400 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n", 1401 scm_area->response.code); 1402 out: 1403 return ret; 1404 } 1405 EXPORT_SYMBOL_GPL(chsc_scm_info); 1406 1407 /** 1408 * chsc_pnso() - Perform Network-Subchannel Operation 1409 * @schid: id of the subchannel on which PNSO is performed 1410 * @pnso_area: request and response block for the operation 1411 * @oc: Operation Code 1412 * @resume_token: resume token for multiblock response 1413 * @cnc: Boolean change-notification control 1414 * 1415 * pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL) 1416 * 1417 * Returns 0 on success. 1418 */ 1419 int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area, 1420 u8 oc, struct chsc_pnso_resume_token resume_token, int cnc) 1421 { 1422 memset(pnso_area, 0, sizeof(*pnso_area)); 1423 pnso_area->request.length = 0x0030; 1424 pnso_area->request.code = 0x003d; /* network-subchannel operation */ 1425 pnso_area->m = schid.m; 1426 pnso_area->ssid = schid.ssid; 1427 pnso_area->sch = schid.sch_no; 1428 pnso_area->cssid = schid.cssid; 1429 pnso_area->oc = oc; 1430 pnso_area->resume_token = resume_token; 1431 pnso_area->n = (cnc != 0); 1432 if (chsc(pnso_area)) 1433 return -EIO; 1434 return chsc_error_from_response(pnso_area->response.code); 1435 } 1436 1437 int chsc_sgib(u32 origin) 1438 { 1439 struct { 1440 struct chsc_header request; 1441 u16 op; 1442 u8 reserved01[2]; 1443 u8 reserved02:4; 1444 u8 fmt:4; 1445 u8 reserved03[7]; 1446 /* operation data area begin */ 1447 u8 reserved04[4]; 1448 u32 gib_origin; 1449 u8 reserved05[10]; 1450 u8 aix; 1451 u8 reserved06[4029]; 1452 struct chsc_header response; 1453 u8 reserved07[4]; 1454 } *sgib_area; 1455 int ret; 1456 1457 spin_lock_irq(&chsc_page_lock); 1458 memset(chsc_page, 0, PAGE_SIZE); 1459 sgib_area = chsc_page; 1460 sgib_area->request.length = 0x0fe0; 1461 sgib_area->request.code = 0x0021; 1462 sgib_area->op = 0x1; 1463 sgib_area->gib_origin = origin; 1464 1465 ret = chsc(sgib_area); 1466 if (ret == 0) 1467 ret = chsc_error_from_response(sgib_area->response.code); 1468 spin_unlock_irq(&chsc_page_lock); 1469 1470 return ret; 1471 } 1472 EXPORT_SYMBOL_GPL(chsc_sgib); 1473 1474 #define SCUD_REQ_LEN 0x10 /* SCUD request block length */ 1475 #define SCUD_REQ_CMD 0x4b /* SCUD Command Code */ 1476 1477 struct chse_cudb { 1478 u16 flags:8; 1479 u16 chp_valid:8; 1480 u16 cu; 1481 u32 esm_valid:8; 1482 u32:24; 1483 u8 chpid[8]; 1484 u32:32; 1485 u32:32; 1486 u8 esm[8]; 1487 u32 efla[8]; 1488 } __packed; 1489 1490 struct chsc_scud { 1491 struct chsc_header request; 1492 u16:4; 1493 u16 fmt:4; 1494 u16 cssid:8; 1495 u16 first_cu; 1496 u16:16; 1497 u16 last_cu; 1498 u32:32; 1499 struct chsc_header response; 1500 u16:4; 1501 u16 fmt_resp:4; 1502 u32:24; 1503 struct chse_cudb cudb[]; 1504 } __packed; 1505 1506 /** 1507 * chsc_scud() - Store control-unit description. 1508 * @cu: number of the control-unit 1509 * @esm: 8 1-byte endpoint security mode values 1510 * @esm_valid: validity mask for @esm 1511 * 1512 * Interface to retrieve information about the endpoint security 1513 * modes for up to 8 paths of a control unit. 1514 * 1515 * Returns 0 on success. 1516 */ 1517 int chsc_scud(u16 cu, u64 *esm, u8 *esm_valid) 1518 { 1519 struct chsc_scud *scud = chsc_page; 1520 int ret; 1521 1522 spin_lock_irq(&chsc_page_lock); 1523 memset(chsc_page, 0, PAGE_SIZE); 1524 scud->request.length = SCUD_REQ_LEN; 1525 scud->request.code = SCUD_REQ_CMD; 1526 scud->fmt = 0; 1527 scud->cssid = 0; 1528 scud->first_cu = cu; 1529 scud->last_cu = cu; 1530 1531 ret = chsc(scud); 1532 if (!ret) 1533 ret = chsc_error_from_response(scud->response.code); 1534 1535 if (!ret && (scud->response.length <= 8 || scud->fmt_resp != 0 1536 || !(scud->cudb[0].flags & 0x80) 1537 || scud->cudb[0].cu != cu)) { 1538 1539 CIO_MSG_EVENT(2, "chsc: scud failed rc=%04x, L2=%04x " 1540 "FMT=%04x, cudb.flags=%02x, cudb.cu=%04x", 1541 scud->response.code, scud->response.length, 1542 scud->fmt_resp, scud->cudb[0].flags, scud->cudb[0].cu); 1543 ret = -EINVAL; 1544 } 1545 1546 if (ret) 1547 goto out; 1548 1549 memcpy(esm, scud->cudb[0].esm, sizeof(*esm)); 1550 *esm_valid = scud->cudb[0].esm_valid; 1551 out: 1552 spin_unlock_irq(&chsc_page_lock); 1553 return ret; 1554 } 1555 EXPORT_SYMBOL_GPL(chsc_scud); 1556