1 /* 2 * Copyright IBM Corp. 2007, 2009 3 * 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 6 */ 7 8 #define KMSG_COMPONENT "sclp_cmd" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/completion.h> 12 #include <linux/init.h> 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/slab.h> 16 #include <linux/string.h> 17 #include <linux/mm.h> 18 #include <linux/mmzone.h> 19 #include <linux/memory.h> 20 #include <linux/platform_device.h> 21 #include <asm/chpid.h> 22 #include <asm/sclp.h> 23 #include <asm/setup.h> 24 25 #include "sclp.h" 26 27 #define SCLP_CMDW_READ_SCP_INFO 0x00020001 28 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 29 30 struct read_info_sccb { 31 struct sccb_header header; /* 0-7 */ 32 u16 rnmax; /* 8-9 */ 33 u8 rnsize; /* 10 */ 34 u8 _reserved0[24 - 11]; /* 11-15 */ 35 u8 loadparm[8]; /* 24-31 */ 36 u8 _reserved1[48 - 32]; /* 32-47 */ 37 u64 facilities; /* 48-55 */ 38 u8 _reserved2[84 - 56]; /* 56-83 */ 39 u8 fac84; /* 84 */ 40 u8 _reserved3[91 - 85]; /* 85-90 */ 41 u8 flags; /* 91 */ 42 u8 _reserved4[100 - 92]; /* 92-99 */ 43 u32 rnsize2; /* 100-103 */ 44 u64 rnmax2; /* 104-111 */ 45 u8 _reserved5[4096 - 112]; /* 112-4095 */ 46 } __attribute__((packed, aligned(PAGE_SIZE))); 47 48 static struct read_info_sccb __initdata early_read_info_sccb; 49 static int __initdata early_read_info_sccb_valid; 50 51 u64 sclp_facilities; 52 static u8 sclp_fac84; 53 static unsigned long long rzm; 54 static unsigned long long rnmax; 55 56 static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) 57 { 58 int rc; 59 60 __ctl_set_bit(0, 9); 61 rc = sclp_service_call(cmd, sccb); 62 if (rc) 63 goto out; 64 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | 65 PSW_MASK_WAIT | PSW_DEFAULT_KEY); 66 local_irq_disable(); 67 out: 68 /* Contents of the sccb might have changed. */ 69 barrier(); 70 __ctl_clear_bit(0, 9); 71 return rc; 72 } 73 74 static void __init sclp_read_info_early(void) 75 { 76 int rc; 77 int i; 78 struct read_info_sccb *sccb; 79 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, 80 SCLP_CMDW_READ_SCP_INFO}; 81 82 sccb = &early_read_info_sccb; 83 for (i = 0; i < ARRAY_SIZE(commands); i++) { 84 do { 85 memset(sccb, 0, sizeof(*sccb)); 86 sccb->header.length = sizeof(*sccb); 87 sccb->header.function_code = 0x80; 88 sccb->header.control_mask[2] = 0x80; 89 rc = sclp_cmd_sync_early(commands[i], sccb); 90 } while (rc == -EBUSY); 91 92 if (rc) 93 break; 94 if (sccb->header.response_code == 0x10) { 95 early_read_info_sccb_valid = 1; 96 break; 97 } 98 if (sccb->header.response_code != 0x1f0) 99 break; 100 } 101 } 102 103 void __init sclp_facilities_detect(void) 104 { 105 struct read_info_sccb *sccb; 106 107 sclp_read_info_early(); 108 if (!early_read_info_sccb_valid) 109 return; 110 111 sccb = &early_read_info_sccb; 112 sclp_facilities = sccb->facilities; 113 sclp_fac84 = sccb->fac84; 114 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 115 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 116 rzm <<= 20; 117 } 118 119 unsigned long long sclp_get_rnmax(void) 120 { 121 return rnmax; 122 } 123 124 unsigned long long sclp_get_rzm(void) 125 { 126 return rzm; 127 } 128 129 /* 130 * This function will be called after sclp_facilities_detect(), which gets 131 * called from early.c code. Therefore the sccb should have valid contents. 132 */ 133 void __init sclp_get_ipl_info(struct sclp_ipl_info *info) 134 { 135 struct read_info_sccb *sccb; 136 137 if (!early_read_info_sccb_valid) 138 return; 139 sccb = &early_read_info_sccb; 140 info->is_valid = 1; 141 if (sccb->flags & 0x2) 142 info->has_dump = 1; 143 memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); 144 } 145 146 static void sclp_sync_callback(struct sclp_req *req, void *data) 147 { 148 struct completion *completion = data; 149 150 complete(completion); 151 } 152 153 static int do_sync_request(sclp_cmdw_t cmd, void *sccb) 154 { 155 struct completion completion; 156 struct sclp_req *request; 157 int rc; 158 159 request = kzalloc(sizeof(*request), GFP_KERNEL); 160 if (!request) 161 return -ENOMEM; 162 request->command = cmd; 163 request->sccb = sccb; 164 request->status = SCLP_REQ_FILLED; 165 request->callback = sclp_sync_callback; 166 request->callback_data = &completion; 167 init_completion(&completion); 168 169 /* Perform sclp request. */ 170 rc = sclp_add_request(request); 171 if (rc) 172 goto out; 173 wait_for_completion(&completion); 174 175 /* Check response. */ 176 if (request->status != SCLP_REQ_DONE) { 177 pr_warning("sync request failed (cmd=0x%08x, " 178 "status=0x%02x)\n", cmd, request->status); 179 rc = -EIO; 180 } 181 out: 182 kfree(request); 183 return rc; 184 } 185 186 /* 187 * CPU configuration related functions. 188 */ 189 190 #define SCLP_CMDW_READ_CPU_INFO 0x00010001 191 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 192 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 193 194 struct read_cpu_info_sccb { 195 struct sccb_header header; 196 u16 nr_configured; 197 u16 offset_configured; 198 u16 nr_standby; 199 u16 offset_standby; 200 u8 reserved[4096 - 16]; 201 } __attribute__((packed, aligned(PAGE_SIZE))); 202 203 static void sclp_fill_cpu_info(struct sclp_cpu_info *info, 204 struct read_cpu_info_sccb *sccb) 205 { 206 char *page = (char *) sccb; 207 208 memset(info, 0, sizeof(*info)); 209 info->configured = sccb->nr_configured; 210 info->standby = sccb->nr_standby; 211 info->combined = sccb->nr_configured + sccb->nr_standby; 212 info->has_cpu_type = sclp_fac84 & 0x1; 213 memcpy(&info->cpu, page + sccb->offset_configured, 214 info->combined * sizeof(struct sclp_cpu_entry)); 215 } 216 217 int sclp_get_cpu_info(struct sclp_cpu_info *info) 218 { 219 int rc; 220 struct read_cpu_info_sccb *sccb; 221 222 if (!SCLP_HAS_CPU_INFO) 223 return -EOPNOTSUPP; 224 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 225 if (!sccb) 226 return -ENOMEM; 227 sccb->header.length = sizeof(*sccb); 228 rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); 229 if (rc) 230 goto out; 231 if (sccb->header.response_code != 0x0010) { 232 pr_warning("readcpuinfo failed (response=0x%04x)\n", 233 sccb->header.response_code); 234 rc = -EIO; 235 goto out; 236 } 237 sclp_fill_cpu_info(info, sccb); 238 out: 239 free_page((unsigned long) sccb); 240 return rc; 241 } 242 243 struct cpu_configure_sccb { 244 struct sccb_header header; 245 } __attribute__((packed, aligned(8))); 246 247 static int do_cpu_configure(sclp_cmdw_t cmd) 248 { 249 struct cpu_configure_sccb *sccb; 250 int rc; 251 252 if (!SCLP_HAS_CPU_RECONFIG) 253 return -EOPNOTSUPP; 254 /* 255 * This is not going to cross a page boundary since we force 256 * kmalloc to have a minimum alignment of 8 bytes on s390. 257 */ 258 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA); 259 if (!sccb) 260 return -ENOMEM; 261 sccb->header.length = sizeof(*sccb); 262 rc = do_sync_request(cmd, sccb); 263 if (rc) 264 goto out; 265 switch (sccb->header.response_code) { 266 case 0x0020: 267 case 0x0120: 268 break; 269 default: 270 pr_warning("configure cpu failed (cmd=0x%08x, " 271 "response=0x%04x)\n", cmd, 272 sccb->header.response_code); 273 rc = -EIO; 274 break; 275 } 276 out: 277 kfree(sccb); 278 return rc; 279 } 280 281 int sclp_cpu_configure(u8 cpu) 282 { 283 return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8); 284 } 285 286 int sclp_cpu_deconfigure(u8 cpu) 287 { 288 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); 289 } 290 291 #ifdef CONFIG_MEMORY_HOTPLUG 292 293 static DEFINE_MUTEX(sclp_mem_mutex); 294 static LIST_HEAD(sclp_mem_list); 295 static u8 sclp_max_storage_id; 296 static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; 297 static int sclp_mem_state_changed; 298 299 struct memory_increment { 300 struct list_head list; 301 u16 rn; 302 int standby; 303 int usecount; 304 }; 305 306 struct assign_storage_sccb { 307 struct sccb_header header; 308 u16 rn; 309 } __packed; 310 311 static unsigned long long rn2addr(u16 rn) 312 { 313 return (unsigned long long) (rn - 1) * rzm; 314 } 315 316 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) 317 { 318 struct assign_storage_sccb *sccb; 319 int rc; 320 321 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 322 if (!sccb) 323 return -ENOMEM; 324 sccb->header.length = PAGE_SIZE; 325 sccb->rn = rn; 326 rc = do_sync_request(cmd, sccb); 327 if (rc) 328 goto out; 329 switch (sccb->header.response_code) { 330 case 0x0020: 331 case 0x0120: 332 break; 333 default: 334 pr_warning("assign storage failed (cmd=0x%08x, " 335 "response=0x%04x, rn=0x%04x)\n", cmd, 336 sccb->header.response_code, rn); 337 rc = -EIO; 338 break; 339 } 340 out: 341 free_page((unsigned long) sccb); 342 return rc; 343 } 344 345 static int sclp_assign_storage(u16 rn) 346 { 347 return do_assign_storage(0x000d0001, rn); 348 } 349 350 static int sclp_unassign_storage(u16 rn) 351 { 352 return do_assign_storage(0x000c0001, rn); 353 } 354 355 struct attach_storage_sccb { 356 struct sccb_header header; 357 u16 :16; 358 u16 assigned; 359 u32 :32; 360 u32 entries[0]; 361 } __packed; 362 363 static int sclp_attach_storage(u8 id) 364 { 365 struct attach_storage_sccb *sccb; 366 int rc; 367 int i; 368 369 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 370 if (!sccb) 371 return -ENOMEM; 372 sccb->header.length = PAGE_SIZE; 373 rc = do_sync_request(0x00080001 | id << 8, sccb); 374 if (rc) 375 goto out; 376 switch (sccb->header.response_code) { 377 case 0x0020: 378 set_bit(id, sclp_storage_ids); 379 for (i = 0; i < sccb->assigned; i++) 380 sclp_unassign_storage(sccb->entries[i] >> 16); 381 break; 382 default: 383 rc = -EIO; 384 break; 385 } 386 out: 387 free_page((unsigned long) sccb); 388 return rc; 389 } 390 391 static int sclp_mem_change_state(unsigned long start, unsigned long size, 392 int online) 393 { 394 struct memory_increment *incr; 395 unsigned long long istart; 396 int rc = 0; 397 398 list_for_each_entry(incr, &sclp_mem_list, list) { 399 istart = rn2addr(incr->rn); 400 if (start + size - 1 < istart) 401 break; 402 if (start > istart + rzm - 1) 403 continue; 404 if (online) { 405 if (incr->usecount++) 406 continue; 407 /* 408 * Don't break the loop if one assign fails. Loop may 409 * be walked again on CANCEL and we can't save 410 * information if state changed before or not. 411 * So continue and increase usecount for all increments. 412 */ 413 rc |= sclp_assign_storage(incr->rn); 414 } else { 415 if (--incr->usecount) 416 continue; 417 sclp_unassign_storage(incr->rn); 418 } 419 } 420 return rc ? -EIO : 0; 421 } 422 423 static int sclp_mem_notifier(struct notifier_block *nb, 424 unsigned long action, void *data) 425 { 426 unsigned long start, size; 427 struct memory_notify *arg; 428 unsigned char id; 429 int rc = 0; 430 431 arg = data; 432 start = arg->start_pfn << PAGE_SHIFT; 433 size = arg->nr_pages << PAGE_SHIFT; 434 mutex_lock(&sclp_mem_mutex); 435 for (id = 0; id <= sclp_max_storage_id; id++) 436 if (!test_bit(id, sclp_storage_ids)) 437 sclp_attach_storage(id); 438 switch (action) { 439 case MEM_ONLINE: 440 case MEM_GOING_OFFLINE: 441 case MEM_CANCEL_OFFLINE: 442 break; 443 case MEM_GOING_ONLINE: 444 rc = sclp_mem_change_state(start, size, 1); 445 break; 446 case MEM_CANCEL_ONLINE: 447 sclp_mem_change_state(start, size, 0); 448 break; 449 case MEM_OFFLINE: 450 sclp_mem_change_state(start, size, 0); 451 break; 452 default: 453 rc = -EINVAL; 454 break; 455 } 456 if (!rc) 457 sclp_mem_state_changed = 1; 458 mutex_unlock(&sclp_mem_mutex); 459 return rc ? NOTIFY_BAD : NOTIFY_OK; 460 } 461 462 static struct notifier_block sclp_mem_nb = { 463 .notifier_call = sclp_mem_notifier, 464 }; 465 466 static void __init add_memory_merged(u16 rn) 467 { 468 static u16 first_rn, num; 469 unsigned long long start, size; 470 471 if (rn && first_rn && (first_rn + num == rn)) { 472 num++; 473 return; 474 } 475 if (!first_rn) 476 goto skip_add; 477 start = rn2addr(first_rn); 478 size = (unsigned long long ) num * rzm; 479 if (start >= VMEM_MAX_PHYS) 480 goto skip_add; 481 if (start + size > VMEM_MAX_PHYS) 482 size = VMEM_MAX_PHYS - start; 483 if (memory_end_set && (start >= memory_end)) 484 goto skip_add; 485 if (memory_end_set && (start + size > memory_end)) 486 size = memory_end - start; 487 add_memory(0, start, size); 488 skip_add: 489 first_rn = rn; 490 num = 1; 491 } 492 493 static void __init sclp_add_standby_memory(void) 494 { 495 struct memory_increment *incr; 496 497 list_for_each_entry(incr, &sclp_mem_list, list) 498 if (incr->standby) 499 add_memory_merged(incr->rn); 500 add_memory_merged(0); 501 } 502 503 static void __init insert_increment(u16 rn, int standby, int assigned) 504 { 505 struct memory_increment *incr, *new_incr; 506 struct list_head *prev; 507 u16 last_rn; 508 509 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); 510 if (!new_incr) 511 return; 512 new_incr->rn = rn; 513 new_incr->standby = standby; 514 last_rn = 0; 515 prev = &sclp_mem_list; 516 list_for_each_entry(incr, &sclp_mem_list, list) { 517 if (assigned && incr->rn > rn) 518 break; 519 if (!assigned && incr->rn - last_rn > 1) 520 break; 521 last_rn = incr->rn; 522 prev = &incr->list; 523 } 524 if (!assigned) 525 new_incr->rn = last_rn + 1; 526 if (new_incr->rn > rnmax) { 527 kfree(new_incr); 528 return; 529 } 530 list_add(&new_incr->list, prev); 531 } 532 533 static int sclp_mem_freeze(struct device *dev) 534 { 535 if (!sclp_mem_state_changed) 536 return 0; 537 pr_err("Memory hotplug state changed, suspend refused.\n"); 538 return -EPERM; 539 } 540 541 struct read_storage_sccb { 542 struct sccb_header header; 543 u16 max_id; 544 u16 assigned; 545 u16 standby; 546 u16 :16; 547 u32 entries[0]; 548 } __packed; 549 550 static const struct dev_pm_ops sclp_mem_pm_ops = { 551 .freeze = sclp_mem_freeze, 552 }; 553 554 static struct platform_driver sclp_mem_pdrv = { 555 .driver = { 556 .name = "sclp_mem", 557 .pm = &sclp_mem_pm_ops, 558 }, 559 }; 560 561 static int __init sclp_detect_standby_memory(void) 562 { 563 struct platform_device *sclp_pdev; 564 struct read_storage_sccb *sccb; 565 int i, id, assigned, rc; 566 567 if (!early_read_info_sccb_valid) 568 return 0; 569 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) 570 return 0; 571 rc = -ENOMEM; 572 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); 573 if (!sccb) 574 goto out; 575 assigned = 0; 576 for (id = 0; id <= sclp_max_storage_id; id++) { 577 memset(sccb, 0, PAGE_SIZE); 578 sccb->header.length = PAGE_SIZE; 579 rc = do_sync_request(0x00040001 | id << 8, sccb); 580 if (rc) 581 goto out; 582 switch (sccb->header.response_code) { 583 case 0x0010: 584 set_bit(id, sclp_storage_ids); 585 for (i = 0; i < sccb->assigned; i++) { 586 if (!sccb->entries[i]) 587 continue; 588 assigned++; 589 insert_increment(sccb->entries[i] >> 16, 0, 1); 590 } 591 break; 592 case 0x0310: 593 break; 594 case 0x0410: 595 for (i = 0; i < sccb->assigned; i++) { 596 if (!sccb->entries[i]) 597 continue; 598 assigned++; 599 insert_increment(sccb->entries[i] >> 16, 1, 1); 600 } 601 break; 602 default: 603 rc = -EIO; 604 break; 605 } 606 if (!rc) 607 sclp_max_storage_id = sccb->max_id; 608 } 609 if (rc || list_empty(&sclp_mem_list)) 610 goto out; 611 for (i = 1; i <= rnmax - assigned; i++) 612 insert_increment(0, 1, 0); 613 rc = register_memory_notifier(&sclp_mem_nb); 614 if (rc) 615 goto out; 616 rc = platform_driver_register(&sclp_mem_pdrv); 617 if (rc) 618 goto out; 619 sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); 620 rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; 621 if (rc) 622 goto out_driver; 623 sclp_add_standby_memory(); 624 goto out; 625 out_driver: 626 platform_driver_unregister(&sclp_mem_pdrv); 627 out: 628 free_page((unsigned long) sccb); 629 return rc; 630 } 631 __initcall(sclp_detect_standby_memory); 632 633 #endif /* CONFIG_MEMORY_HOTPLUG */ 634 635 /* 636 * Channel path configuration related functions. 637 */ 638 639 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001 640 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001 641 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001 642 643 struct chp_cfg_sccb { 644 struct sccb_header header; 645 u8 ccm; 646 u8 reserved[6]; 647 u8 cssid; 648 } __attribute__((packed)); 649 650 static int do_chp_configure(sclp_cmdw_t cmd) 651 { 652 struct chp_cfg_sccb *sccb; 653 int rc; 654 655 if (!SCLP_HAS_CHP_RECONFIG) 656 return -EOPNOTSUPP; 657 /* Prepare sccb. */ 658 sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 659 if (!sccb) 660 return -ENOMEM; 661 sccb->header.length = sizeof(*sccb); 662 rc = do_sync_request(cmd, sccb); 663 if (rc) 664 goto out; 665 switch (sccb->header.response_code) { 666 case 0x0020: 667 case 0x0120: 668 case 0x0440: 669 case 0x0450: 670 break; 671 default: 672 pr_warning("configure channel-path failed " 673 "(cmd=0x%08x, response=0x%04x)\n", cmd, 674 sccb->header.response_code); 675 rc = -EIO; 676 break; 677 } 678 out: 679 free_page((unsigned long) sccb); 680 return rc; 681 } 682 683 /** 684 * sclp_chp_configure - perform configure channel-path sclp command 685 * @chpid: channel-path ID 686 * 687 * Perform configure channel-path command sclp command for specified chpid. 688 * Return 0 after command successfully finished, non-zero otherwise. 689 */ 690 int sclp_chp_configure(struct chp_id chpid) 691 { 692 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8); 693 } 694 695 /** 696 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command 697 * @chpid: channel-path ID 698 * 699 * Perform deconfigure channel-path command sclp command for specified chpid 700 * and wait for completion. On success return 0. Return non-zero otherwise. 701 */ 702 int sclp_chp_deconfigure(struct chp_id chpid) 703 { 704 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); 705 } 706 707 struct chp_info_sccb { 708 struct sccb_header header; 709 u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; 710 u8 standby[SCLP_CHP_INFO_MASK_SIZE]; 711 u8 configured[SCLP_CHP_INFO_MASK_SIZE]; 712 u8 ccm; 713 u8 reserved[6]; 714 u8 cssid; 715 } __attribute__((packed)); 716 717 /** 718 * sclp_chp_read_info - perform read channel-path information sclp command 719 * @info: resulting channel-path information data 720 * 721 * Perform read channel-path information sclp command and wait for completion. 722 * On success, store channel-path information in @info and return 0. Return 723 * non-zero otherwise. 724 */ 725 int sclp_chp_read_info(struct sclp_chp_info *info) 726 { 727 struct chp_info_sccb *sccb; 728 int rc; 729 730 if (!SCLP_HAS_CHP_INFO) 731 return -EOPNOTSUPP; 732 /* Prepare sccb. */ 733 sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 734 if (!sccb) 735 return -ENOMEM; 736 sccb->header.length = sizeof(*sccb); 737 rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); 738 if (rc) 739 goto out; 740 if (sccb->header.response_code != 0x0010) { 741 pr_warning("read channel-path info failed " 742 "(response=0x%04x)\n", sccb->header.response_code); 743 rc = -EIO; 744 goto out; 745 } 746 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE); 747 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE); 748 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE); 749 out: 750 free_page((unsigned long) sccb); 751 return rc; 752 } 753