1 /* 2 * drivers/s390/char/sclp_cmd.c 3 * 4 * Copyright IBM Corp. 2007 5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 */ 8 9 #include <linux/completion.h> 10 #include <linux/init.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/string.h> 14 #include <linux/mm.h> 15 #include <linux/mmzone.h> 16 #include <linux/memory.h> 17 #include <asm/chpid.h> 18 #include <asm/sclp.h> 19 #include "sclp.h" 20 21 #define TAG "sclp_cmd: " 22 23 #define SCLP_CMDW_READ_SCP_INFO 0x00020001 24 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 25 26 struct read_info_sccb { 27 struct sccb_header header; /* 0-7 */ 28 u16 rnmax; /* 8-9 */ 29 u8 rnsize; /* 10 */ 30 u8 _reserved0[24 - 11]; /* 11-15 */ 31 u8 loadparm[8]; /* 24-31 */ 32 u8 _reserved1[48 - 32]; /* 32-47 */ 33 u64 facilities; /* 48-55 */ 34 u8 _reserved2[84 - 56]; /* 56-83 */ 35 u8 fac84; /* 84 */ 36 u8 _reserved3[91 - 85]; /* 85-90 */ 37 u8 flags; /* 91 */ 38 u8 _reserved4[100 - 92]; /* 92-99 */ 39 u32 rnsize2; /* 100-103 */ 40 u64 rnmax2; /* 104-111 */ 41 u8 _reserved5[4096 - 112]; /* 112-4095 */ 42 } __attribute__((packed, aligned(PAGE_SIZE))); 43 44 static struct read_info_sccb __initdata early_read_info_sccb; 45 static int __initdata early_read_info_sccb_valid; 46 47 u64 sclp_facilities; 48 static u8 sclp_fac84; 49 static unsigned long long rzm; 50 static unsigned long long rnmax; 51 52 static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) 53 { 54 int rc; 55 56 __ctl_set_bit(0, 9); 57 rc = sclp_service_call(cmd, sccb); 58 if (rc) 59 goto out; 60 __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | 61 PSW_MASK_WAIT | PSW_DEFAULT_KEY); 62 local_irq_disable(); 63 out: 64 /* Contents of the sccb might have changed. */ 65 barrier(); 66 __ctl_clear_bit(0, 9); 67 return rc; 68 } 69 70 static void __init sclp_read_info_early(void) 71 { 72 int rc; 73 int i; 74 struct read_info_sccb *sccb; 75 sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED, 76 SCLP_CMDW_READ_SCP_INFO}; 77 78 sccb = &early_read_info_sccb; 79 for (i = 0; i < ARRAY_SIZE(commands); i++) { 80 do { 81 memset(sccb, 0, sizeof(*sccb)); 82 sccb->header.length = sizeof(*sccb); 83 sccb->header.control_mask[2] = 0x80; 84 rc = sclp_cmd_sync_early(commands[i], sccb); 85 } while (rc == -EBUSY); 86 87 if (rc) 88 break; 89 if (sccb->header.response_code == 0x10) { 90 early_read_info_sccb_valid = 1; 91 break; 92 } 93 if (sccb->header.response_code != 0x1f0) 94 break; 95 } 96 } 97 98 void __init sclp_facilities_detect(void) 99 { 100 struct read_info_sccb *sccb; 101 102 sclp_read_info_early(); 103 if (!early_read_info_sccb_valid) 104 return; 105 106 sccb = &early_read_info_sccb; 107 sclp_facilities = sccb->facilities; 108 sclp_fac84 = sccb->fac84; 109 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 110 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 111 rzm <<= 20; 112 } 113 114 unsigned long long sclp_get_rnmax(void) 115 { 116 return rnmax; 117 } 118 119 unsigned long long sclp_get_rzm(void) 120 { 121 return rzm; 122 } 123 124 /* 125 * This function will be called after sclp_facilities_detect(), which gets 126 * called from early.c code. Therefore the sccb should have valid contents. 127 */ 128 void __init sclp_get_ipl_info(struct sclp_ipl_info *info) 129 { 130 struct read_info_sccb *sccb; 131 132 if (!early_read_info_sccb_valid) 133 return; 134 sccb = &early_read_info_sccb; 135 info->is_valid = 1; 136 if (sccb->flags & 0x2) 137 info->has_dump = 1; 138 memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN); 139 } 140 141 static void sclp_sync_callback(struct sclp_req *req, void *data) 142 { 143 struct completion *completion = data; 144 145 complete(completion); 146 } 147 148 static int do_sync_request(sclp_cmdw_t cmd, void *sccb) 149 { 150 struct completion completion; 151 struct sclp_req *request; 152 int rc; 153 154 request = kzalloc(sizeof(*request), GFP_KERNEL); 155 if (!request) 156 return -ENOMEM; 157 request->command = cmd; 158 request->sccb = sccb; 159 request->status = SCLP_REQ_FILLED; 160 request->callback = sclp_sync_callback; 161 request->callback_data = &completion; 162 init_completion(&completion); 163 164 /* Perform sclp request. */ 165 rc = sclp_add_request(request); 166 if (rc) 167 goto out; 168 wait_for_completion(&completion); 169 170 /* Check response. */ 171 if (request->status != SCLP_REQ_DONE) { 172 printk(KERN_WARNING TAG "sync request failed " 173 "(cmd=0x%08x, status=0x%02x)\n", cmd, request->status); 174 rc = -EIO; 175 } 176 out: 177 kfree(request); 178 return rc; 179 } 180 181 /* 182 * CPU configuration related functions. 183 */ 184 185 #define SCLP_CMDW_READ_CPU_INFO 0x00010001 186 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 187 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 188 189 struct read_cpu_info_sccb { 190 struct sccb_header header; 191 u16 nr_configured; 192 u16 offset_configured; 193 u16 nr_standby; 194 u16 offset_standby; 195 u8 reserved[4096 - 16]; 196 } __attribute__((packed, aligned(PAGE_SIZE))); 197 198 static void sclp_fill_cpu_info(struct sclp_cpu_info *info, 199 struct read_cpu_info_sccb *sccb) 200 { 201 char *page = (char *) sccb; 202 203 memset(info, 0, sizeof(*info)); 204 info->configured = sccb->nr_configured; 205 info->standby = sccb->nr_standby; 206 info->combined = sccb->nr_configured + sccb->nr_standby; 207 info->has_cpu_type = sclp_fac84 & 0x1; 208 memcpy(&info->cpu, page + sccb->offset_configured, 209 info->combined * sizeof(struct sclp_cpu_entry)); 210 } 211 212 int sclp_get_cpu_info(struct sclp_cpu_info *info) 213 { 214 int rc; 215 struct read_cpu_info_sccb *sccb; 216 217 if (!SCLP_HAS_CPU_INFO) 218 return -EOPNOTSUPP; 219 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 220 if (!sccb) 221 return -ENOMEM; 222 sccb->header.length = sizeof(*sccb); 223 rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb); 224 if (rc) 225 goto out; 226 if (sccb->header.response_code != 0x0010) { 227 printk(KERN_WARNING TAG "readcpuinfo failed " 228 "(response=0x%04x)\n", sccb->header.response_code); 229 rc = -EIO; 230 goto out; 231 } 232 sclp_fill_cpu_info(info, sccb); 233 out: 234 free_page((unsigned long) sccb); 235 return rc; 236 } 237 238 struct cpu_configure_sccb { 239 struct sccb_header header; 240 } __attribute__((packed, aligned(8))); 241 242 static int do_cpu_configure(sclp_cmdw_t cmd) 243 { 244 struct cpu_configure_sccb *sccb; 245 int rc; 246 247 if (!SCLP_HAS_CPU_RECONFIG) 248 return -EOPNOTSUPP; 249 /* 250 * This is not going to cross a page boundary since we force 251 * kmalloc to have a minimum alignment of 8 bytes on s390. 252 */ 253 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA); 254 if (!sccb) 255 return -ENOMEM; 256 sccb->header.length = sizeof(*sccb); 257 rc = do_sync_request(cmd, sccb); 258 if (rc) 259 goto out; 260 switch (sccb->header.response_code) { 261 case 0x0020: 262 case 0x0120: 263 break; 264 default: 265 printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, " 266 "response=0x%04x)\n", cmd, sccb->header.response_code); 267 rc = -EIO; 268 break; 269 } 270 out: 271 kfree(sccb); 272 return rc; 273 } 274 275 int sclp_cpu_configure(u8 cpu) 276 { 277 return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8); 278 } 279 280 int sclp_cpu_deconfigure(u8 cpu) 281 { 282 return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); 283 } 284 285 #ifdef CONFIG_MEMORY_HOTPLUG 286 287 static DEFINE_MUTEX(sclp_mem_mutex); 288 static LIST_HEAD(sclp_mem_list); 289 static u8 sclp_max_storage_id; 290 static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; 291 292 struct memory_increment { 293 struct list_head list; 294 u16 rn; 295 int standby; 296 int usecount; 297 }; 298 299 struct assign_storage_sccb { 300 struct sccb_header header; 301 u16 rn; 302 } __packed; 303 304 static unsigned long long rn2addr(u16 rn) 305 { 306 return (unsigned long long) (rn - 1) * rzm; 307 } 308 309 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) 310 { 311 struct assign_storage_sccb *sccb; 312 int rc; 313 314 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 315 if (!sccb) 316 return -ENOMEM; 317 sccb->header.length = PAGE_SIZE; 318 sccb->rn = rn; 319 rc = do_sync_request(cmd, sccb); 320 if (rc) 321 goto out; 322 switch (sccb->header.response_code) { 323 case 0x0020: 324 case 0x0120: 325 break; 326 default: 327 rc = -EIO; 328 break; 329 } 330 out: 331 free_page((unsigned long) sccb); 332 return rc; 333 } 334 335 static int sclp_assign_storage(u16 rn) 336 { 337 return do_assign_storage(0x000d0001, rn); 338 } 339 340 static int sclp_unassign_storage(u16 rn) 341 { 342 return do_assign_storage(0x000c0001, rn); 343 } 344 345 struct attach_storage_sccb { 346 struct sccb_header header; 347 u16 :16; 348 u16 assigned; 349 u32 :32; 350 u32 entries[0]; 351 } __packed; 352 353 static int sclp_attach_storage(u8 id) 354 { 355 struct attach_storage_sccb *sccb; 356 int rc; 357 int i; 358 359 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 360 if (!sccb) 361 return -ENOMEM; 362 sccb->header.length = PAGE_SIZE; 363 rc = do_sync_request(0x00080001 | id << 8, sccb); 364 if (rc) 365 goto out; 366 switch (sccb->header.response_code) { 367 case 0x0020: 368 set_bit(id, sclp_storage_ids); 369 for (i = 0; i < sccb->assigned; i++) 370 sclp_unassign_storage(sccb->entries[i] >> 16); 371 break; 372 default: 373 rc = -EIO; 374 break; 375 } 376 out: 377 free_page((unsigned long) sccb); 378 return rc; 379 } 380 381 static int sclp_mem_change_state(unsigned long start, unsigned long size, 382 int online) 383 { 384 struct memory_increment *incr; 385 unsigned long long istart; 386 int rc = 0; 387 388 list_for_each_entry(incr, &sclp_mem_list, list) { 389 istart = rn2addr(incr->rn); 390 if (start + size - 1 < istart) 391 break; 392 if (start > istart + rzm - 1) 393 continue; 394 if (online) { 395 if (incr->usecount++) 396 continue; 397 /* 398 * Don't break the loop if one assign fails. Loop may 399 * be walked again on CANCEL and we can't save 400 * information if state changed before or not. 401 * So continue and increase usecount for all increments. 402 */ 403 rc |= sclp_assign_storage(incr->rn); 404 } else { 405 if (--incr->usecount) 406 continue; 407 sclp_unassign_storage(incr->rn); 408 } 409 } 410 return rc ? -EIO : 0; 411 } 412 413 static int sclp_mem_notifier(struct notifier_block *nb, 414 unsigned long action, void *data) 415 { 416 unsigned long start, size; 417 struct memory_notify *arg; 418 unsigned char id; 419 int rc = 0; 420 421 arg = data; 422 start = arg->start_pfn << PAGE_SHIFT; 423 size = arg->nr_pages << PAGE_SHIFT; 424 mutex_lock(&sclp_mem_mutex); 425 for (id = 0; id <= sclp_max_storage_id; id++) 426 if (!test_bit(id, sclp_storage_ids)) 427 sclp_attach_storage(id); 428 switch (action) { 429 case MEM_ONLINE: 430 break; 431 case MEM_GOING_ONLINE: 432 rc = sclp_mem_change_state(start, size, 1); 433 break; 434 case MEM_CANCEL_ONLINE: 435 sclp_mem_change_state(start, size, 0); 436 break; 437 default: 438 rc = -EINVAL; 439 break; 440 } 441 mutex_unlock(&sclp_mem_mutex); 442 return rc ? NOTIFY_BAD : NOTIFY_OK; 443 } 444 445 static struct notifier_block sclp_mem_nb = { 446 .notifier_call = sclp_mem_notifier, 447 }; 448 449 static void __init add_memory_merged(u16 rn) 450 { 451 static u16 first_rn, num; 452 unsigned long long start, size; 453 454 if (rn && first_rn && (first_rn + num == rn)) { 455 num++; 456 return; 457 } 458 if (!first_rn) 459 goto skip_add; 460 start = rn2addr(first_rn); 461 size = (unsigned long long ) num * rzm; 462 if (start >= VMEM_MAX_PHYS) 463 goto skip_add; 464 if (start + size > VMEM_MAX_PHYS) 465 size = VMEM_MAX_PHYS - start; 466 add_memory(0, start, size); 467 skip_add: 468 first_rn = rn; 469 num = 1; 470 } 471 472 static void __init sclp_add_standby_memory(void) 473 { 474 struct memory_increment *incr; 475 476 list_for_each_entry(incr, &sclp_mem_list, list) 477 if (incr->standby) 478 add_memory_merged(incr->rn); 479 add_memory_merged(0); 480 } 481 482 static void __init insert_increment(u16 rn, int standby, int assigned) 483 { 484 struct memory_increment *incr, *new_incr; 485 struct list_head *prev; 486 u16 last_rn; 487 488 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); 489 if (!new_incr) 490 return; 491 new_incr->rn = rn; 492 new_incr->standby = standby; 493 last_rn = 0; 494 prev = &sclp_mem_list; 495 list_for_each_entry(incr, &sclp_mem_list, list) { 496 if (assigned && incr->rn > rn) 497 break; 498 if (!assigned && incr->rn - last_rn > 1) 499 break; 500 last_rn = incr->rn; 501 prev = &incr->list; 502 } 503 if (!assigned) 504 new_incr->rn = last_rn + 1; 505 if (new_incr->rn > rnmax) { 506 kfree(new_incr); 507 return; 508 } 509 list_add(&new_incr->list, prev); 510 } 511 512 struct read_storage_sccb { 513 struct sccb_header header; 514 u16 max_id; 515 u16 assigned; 516 u16 standby; 517 u16 :16; 518 u32 entries[0]; 519 } __packed; 520 521 static int __init sclp_detect_standby_memory(void) 522 { 523 struct read_storage_sccb *sccb; 524 int i, id, assigned, rc; 525 526 if (!early_read_info_sccb_valid) 527 return 0; 528 if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) 529 return 0; 530 rc = -ENOMEM; 531 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); 532 if (!sccb) 533 goto out; 534 assigned = 0; 535 for (id = 0; id <= sclp_max_storage_id; id++) { 536 memset(sccb, 0, PAGE_SIZE); 537 sccb->header.length = PAGE_SIZE; 538 rc = do_sync_request(0x00040001 | id << 8, sccb); 539 if (rc) 540 goto out; 541 switch (sccb->header.response_code) { 542 case 0x0010: 543 set_bit(id, sclp_storage_ids); 544 for (i = 0; i < sccb->assigned; i++) { 545 if (!sccb->entries[i]) 546 continue; 547 assigned++; 548 insert_increment(sccb->entries[i] >> 16, 0, 1); 549 } 550 break; 551 case 0x0310: 552 break; 553 case 0x0410: 554 for (i = 0; i < sccb->assigned; i++) { 555 if (!sccb->entries[i]) 556 continue; 557 assigned++; 558 insert_increment(sccb->entries[i] >> 16, 1, 1); 559 } 560 break; 561 default: 562 rc = -EIO; 563 break; 564 } 565 if (!rc) 566 sclp_max_storage_id = sccb->max_id; 567 } 568 if (rc || list_empty(&sclp_mem_list)) 569 goto out; 570 for (i = 1; i <= rnmax - assigned; i++) 571 insert_increment(0, 1, 0); 572 rc = register_memory_notifier(&sclp_mem_nb); 573 if (rc) 574 goto out; 575 sclp_add_standby_memory(); 576 out: 577 free_page((unsigned long) sccb); 578 return rc; 579 } 580 __initcall(sclp_detect_standby_memory); 581 582 #endif /* CONFIG_MEMORY_HOTPLUG */ 583 584 /* 585 * Channel path configuration related functions. 586 */ 587 588 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001 589 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001 590 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001 591 592 struct chp_cfg_sccb { 593 struct sccb_header header; 594 u8 ccm; 595 u8 reserved[6]; 596 u8 cssid; 597 } __attribute__((packed)); 598 599 static int do_chp_configure(sclp_cmdw_t cmd) 600 { 601 struct chp_cfg_sccb *sccb; 602 int rc; 603 604 if (!SCLP_HAS_CHP_RECONFIG) 605 return -EOPNOTSUPP; 606 /* Prepare sccb. */ 607 sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 608 if (!sccb) 609 return -ENOMEM; 610 sccb->header.length = sizeof(*sccb); 611 rc = do_sync_request(cmd, sccb); 612 if (rc) 613 goto out; 614 switch (sccb->header.response_code) { 615 case 0x0020: 616 case 0x0120: 617 case 0x0440: 618 case 0x0450: 619 break; 620 default: 621 printk(KERN_WARNING TAG "configure channel-path failed " 622 "(cmd=0x%08x, response=0x%04x)\n", cmd, 623 sccb->header.response_code); 624 rc = -EIO; 625 break; 626 } 627 out: 628 free_page((unsigned long) sccb); 629 return rc; 630 } 631 632 /** 633 * sclp_chp_configure - perform configure channel-path sclp command 634 * @chpid: channel-path ID 635 * 636 * Perform configure channel-path command sclp command for specified chpid. 637 * Return 0 after command successfully finished, non-zero otherwise. 638 */ 639 int sclp_chp_configure(struct chp_id chpid) 640 { 641 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8); 642 } 643 644 /** 645 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command 646 * @chpid: channel-path ID 647 * 648 * Perform deconfigure channel-path command sclp command for specified chpid 649 * and wait for completion. On success return 0. Return non-zero otherwise. 650 */ 651 int sclp_chp_deconfigure(struct chp_id chpid) 652 { 653 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); 654 } 655 656 struct chp_info_sccb { 657 struct sccb_header header; 658 u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; 659 u8 standby[SCLP_CHP_INFO_MASK_SIZE]; 660 u8 configured[SCLP_CHP_INFO_MASK_SIZE]; 661 u8 ccm; 662 u8 reserved[6]; 663 u8 cssid; 664 } __attribute__((packed)); 665 666 /** 667 * sclp_chp_read_info - perform read channel-path information sclp command 668 * @info: resulting channel-path information data 669 * 670 * Perform read channel-path information sclp command and wait for completion. 671 * On success, store channel-path information in @info and return 0. Return 672 * non-zero otherwise. 673 */ 674 int sclp_chp_read_info(struct sclp_chp_info *info) 675 { 676 struct chp_info_sccb *sccb; 677 int rc; 678 679 if (!SCLP_HAS_CHP_INFO) 680 return -EOPNOTSUPP; 681 /* Prepare sccb. */ 682 sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 683 if (!sccb) 684 return -ENOMEM; 685 sccb->header.length = sizeof(*sccb); 686 rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); 687 if (rc) 688 goto out; 689 if (sccb->header.response_code != 0x0010) { 690 printk(KERN_WARNING TAG "read channel-path info failed " 691 "(response=0x%04x)\n", sccb->header.response_code); 692 rc = -EIO; 693 goto out; 694 } 695 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE); 696 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE); 697 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE); 698 out: 699 free_page((unsigned long) sccb); 700 return rc; 701 } 702