1 /* 2 * Copyright IBM Corp. 2007,2012 3 * 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 6 */ 7 8 #define KMSG_COMPONENT "sclp_cmd" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/completion.h> 12 #include <linux/init.h> 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/export.h> 16 #include <linux/slab.h> 17 #include <linux/string.h> 18 #include <linux/mm.h> 19 #include <linux/mmzone.h> 20 #include <linux/memory.h> 21 #include <linux/module.h> 22 #include <linux/platform_device.h> 23 #include <asm/ctl_reg.h> 24 #include <asm/chpid.h> 25 #include <asm/setup.h> 26 #include <asm/page.h> 27 #include <asm/sclp.h> 28 #include <asm/numa.h> 29 30 #include "sclp.h" 31 32 static void sclp_sync_callback(struct sclp_req *req, void *data) 33 { 34 struct completion *completion = data; 35 36 complete(completion); 37 } 38 39 int sclp_sync_request(sclp_cmdw_t cmd, void *sccb) 40 { 41 return sclp_sync_request_timeout(cmd, sccb, 0); 42 } 43 44 int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout) 45 { 46 struct completion completion; 47 struct sclp_req *request; 48 int rc; 49 50 request = kzalloc(sizeof(*request), GFP_KERNEL); 51 if (!request) 52 return -ENOMEM; 53 if (timeout) 54 request->queue_timeout = timeout; 55 request->command = cmd; 56 request->sccb = sccb; 57 request->status = SCLP_REQ_FILLED; 58 request->callback = sclp_sync_callback; 59 request->callback_data = &completion; 60 init_completion(&completion); 61 62 /* Perform sclp request. */ 63 rc = sclp_add_request(request); 64 if (rc) 65 goto out; 66 wait_for_completion(&completion); 67 68 /* Check response. */ 69 if (request->status != SCLP_REQ_DONE) { 70 pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n", 71 cmd, request->status); 72 rc = -EIO; 73 } 74 out: 75 kfree(request); 76 return rc; 77 } 78 79 /* 80 * CPU configuration related functions. 81 */ 82 83 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 84 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 85 86 int _sclp_get_core_info(struct sclp_core_info *info) 87 { 88 int rc; 89 struct read_cpu_info_sccb *sccb; 90 91 if (!SCLP_HAS_CPU_INFO) 92 return -EOPNOTSUPP; 93 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 94 if (!sccb) 95 return -ENOMEM; 96 sccb->header.length = sizeof(*sccb); 97 rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb, 98 SCLP_QUEUE_INTERVAL); 99 if (rc) 100 goto out; 101 if (sccb->header.response_code != 0x0010) { 102 pr_warn("readcpuinfo failed (response=0x%04x)\n", 103 sccb->header.response_code); 104 rc = -EIO; 105 goto out; 106 } 107 sclp_fill_core_info(info, sccb); 108 out: 109 free_page((unsigned long) sccb); 110 return rc; 111 } 112 113 struct cpu_configure_sccb { 114 struct sccb_header header; 115 } __attribute__((packed, aligned(8))); 116 117 static int do_core_configure(sclp_cmdw_t cmd) 118 { 119 struct cpu_configure_sccb *sccb; 120 int rc; 121 122 if (!SCLP_HAS_CPU_RECONFIG) 123 return -EOPNOTSUPP; 124 /* 125 * This is not going to cross a page boundary since we force 126 * kmalloc to have a minimum alignment of 8 bytes on s390. 127 */ 128 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA); 129 if (!sccb) 130 return -ENOMEM; 131 sccb->header.length = sizeof(*sccb); 132 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); 133 if (rc) 134 goto out; 135 switch (sccb->header.response_code) { 136 case 0x0020: 137 case 0x0120: 138 break; 139 default: 140 pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n", 141 cmd, sccb->header.response_code); 142 rc = -EIO; 143 break; 144 } 145 out: 146 kfree(sccb); 147 return rc; 148 } 149 150 int sclp_core_configure(u8 core) 151 { 152 return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8); 153 } 154 155 int sclp_core_deconfigure(u8 core) 156 { 157 return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8); 158 } 159 160 #ifdef CONFIG_MEMORY_HOTPLUG 161 162 static DEFINE_MUTEX(sclp_mem_mutex); 163 static LIST_HEAD(sclp_mem_list); 164 static u8 sclp_max_storage_id; 165 static DECLARE_BITMAP(sclp_storage_ids, 256); 166 static int sclp_mem_state_changed; 167 168 struct memory_increment { 169 struct list_head list; 170 u16 rn; 171 int standby; 172 }; 173 174 struct assign_storage_sccb { 175 struct sccb_header header; 176 u16 rn; 177 } __packed; 178 179 int arch_get_memory_phys_device(unsigned long start_pfn) 180 { 181 if (!sclp.rzm) 182 return 0; 183 return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm); 184 } 185 186 static unsigned long long rn2addr(u16 rn) 187 { 188 return (unsigned long long) (rn - 1) * sclp.rzm; 189 } 190 191 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) 192 { 193 struct assign_storage_sccb *sccb; 194 int rc; 195 196 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 197 if (!sccb) 198 return -ENOMEM; 199 sccb->header.length = PAGE_SIZE; 200 sccb->rn = rn; 201 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); 202 if (rc) 203 goto out; 204 switch (sccb->header.response_code) { 205 case 0x0020: 206 case 0x0120: 207 break; 208 default: 209 pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n", 210 cmd, sccb->header.response_code, rn); 211 rc = -EIO; 212 break; 213 } 214 out: 215 free_page((unsigned long) sccb); 216 return rc; 217 } 218 219 static int sclp_assign_storage(u16 rn) 220 { 221 unsigned long long start; 222 int rc; 223 224 rc = do_assign_storage(0x000d0001, rn); 225 if (rc) 226 return rc; 227 start = rn2addr(rn); 228 storage_key_init_range(start, start + sclp.rzm); 229 return 0; 230 } 231 232 static int sclp_unassign_storage(u16 rn) 233 { 234 return do_assign_storage(0x000c0001, rn); 235 } 236 237 struct attach_storage_sccb { 238 struct sccb_header header; 239 u16 :16; 240 u16 assigned; 241 u32 :32; 242 u32 entries[0]; 243 } __packed; 244 245 static int sclp_attach_storage(u8 id) 246 { 247 struct attach_storage_sccb *sccb; 248 int rc; 249 int i; 250 251 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 252 if (!sccb) 253 return -ENOMEM; 254 sccb->header.length = PAGE_SIZE; 255 sccb->header.function_code = 0x40; 256 rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb, 257 SCLP_QUEUE_INTERVAL); 258 if (rc) 259 goto out; 260 switch (sccb->header.response_code) { 261 case 0x0020: 262 set_bit(id, sclp_storage_ids); 263 for (i = 0; i < sccb->assigned; i++) { 264 if (sccb->entries[i]) 265 sclp_unassign_storage(sccb->entries[i] >> 16); 266 } 267 break; 268 default: 269 rc = -EIO; 270 break; 271 } 272 out: 273 free_page((unsigned long) sccb); 274 return rc; 275 } 276 277 static int sclp_mem_change_state(unsigned long start, unsigned long size, 278 int online) 279 { 280 struct memory_increment *incr; 281 unsigned long long istart; 282 int rc = 0; 283 284 list_for_each_entry(incr, &sclp_mem_list, list) { 285 istart = rn2addr(incr->rn); 286 if (start + size - 1 < istart) 287 break; 288 if (start > istart + sclp.rzm - 1) 289 continue; 290 if (online) 291 rc |= sclp_assign_storage(incr->rn); 292 else 293 sclp_unassign_storage(incr->rn); 294 if (rc == 0) 295 incr->standby = online ? 0 : 1; 296 } 297 return rc ? -EIO : 0; 298 } 299 300 static bool contains_standby_increment(unsigned long start, unsigned long end) 301 { 302 struct memory_increment *incr; 303 unsigned long istart; 304 305 list_for_each_entry(incr, &sclp_mem_list, list) { 306 istart = rn2addr(incr->rn); 307 if (end - 1 < istart) 308 continue; 309 if (start > istart + sclp.rzm - 1) 310 continue; 311 if (incr->standby) 312 return true; 313 } 314 return false; 315 } 316 317 static int sclp_mem_notifier(struct notifier_block *nb, 318 unsigned long action, void *data) 319 { 320 unsigned long start, size; 321 struct memory_notify *arg; 322 unsigned char id; 323 int rc = 0; 324 325 arg = data; 326 start = arg->start_pfn << PAGE_SHIFT; 327 size = arg->nr_pages << PAGE_SHIFT; 328 mutex_lock(&sclp_mem_mutex); 329 for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1) 330 sclp_attach_storage(id); 331 switch (action) { 332 case MEM_GOING_OFFLINE: 333 /* 334 * We do not allow to set memory blocks offline that contain 335 * standby memory. This is done to simplify the "memory online" 336 * case. 337 */ 338 if (contains_standby_increment(start, start + size)) 339 rc = -EPERM; 340 break; 341 case MEM_ONLINE: 342 case MEM_CANCEL_OFFLINE: 343 break; 344 case MEM_GOING_ONLINE: 345 rc = sclp_mem_change_state(start, size, 1); 346 break; 347 case MEM_CANCEL_ONLINE: 348 sclp_mem_change_state(start, size, 0); 349 break; 350 case MEM_OFFLINE: 351 sclp_mem_change_state(start, size, 0); 352 break; 353 default: 354 rc = -EINVAL; 355 break; 356 } 357 if (!rc) 358 sclp_mem_state_changed = 1; 359 mutex_unlock(&sclp_mem_mutex); 360 return rc ? NOTIFY_BAD : NOTIFY_OK; 361 } 362 363 static struct notifier_block sclp_mem_nb = { 364 .notifier_call = sclp_mem_notifier, 365 }; 366 367 static void __init align_to_block_size(unsigned long long *start, 368 unsigned long long *size, 369 unsigned long long alignment) 370 { 371 unsigned long long start_align, size_align; 372 373 start_align = roundup(*start, alignment); 374 size_align = rounddown(*start + *size, alignment) - start_align; 375 376 pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n", 377 *start, size_align >> 20, *size >> 20); 378 *start = start_align; 379 *size = size_align; 380 } 381 382 static void __init add_memory_merged(u16 rn) 383 { 384 unsigned long long start, size, addr, block_size; 385 static u16 first_rn, num; 386 387 if (rn && first_rn && (first_rn + num == rn)) { 388 num++; 389 return; 390 } 391 if (!first_rn) 392 goto skip_add; 393 start = rn2addr(first_rn); 394 size = (unsigned long long) num * sclp.rzm; 395 if (start >= VMEM_MAX_PHYS) 396 goto skip_add; 397 if (start + size > VMEM_MAX_PHYS) 398 size = VMEM_MAX_PHYS - start; 399 if (memory_end_set && (start >= memory_end)) 400 goto skip_add; 401 if (memory_end_set && (start + size > memory_end)) 402 size = memory_end - start; 403 block_size = memory_block_size_bytes(); 404 align_to_block_size(&start, &size, block_size); 405 if (!size) 406 goto skip_add; 407 for (addr = start; addr < start + size; addr += block_size) 408 add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size); 409 skip_add: 410 first_rn = rn; 411 num = 1; 412 } 413 414 static void __init sclp_add_standby_memory(void) 415 { 416 struct memory_increment *incr; 417 418 list_for_each_entry(incr, &sclp_mem_list, list) 419 if (incr->standby) 420 add_memory_merged(incr->rn); 421 add_memory_merged(0); 422 } 423 424 static void __init insert_increment(u16 rn, int standby, int assigned) 425 { 426 struct memory_increment *incr, *new_incr; 427 struct list_head *prev; 428 u16 last_rn; 429 430 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); 431 if (!new_incr) 432 return; 433 new_incr->rn = rn; 434 new_incr->standby = standby; 435 last_rn = 0; 436 prev = &sclp_mem_list; 437 list_for_each_entry(incr, &sclp_mem_list, list) { 438 if (assigned && incr->rn > rn) 439 break; 440 if (!assigned && incr->rn - last_rn > 1) 441 break; 442 last_rn = incr->rn; 443 prev = &incr->list; 444 } 445 if (!assigned) 446 new_incr->rn = last_rn + 1; 447 if (new_incr->rn > sclp.rnmax) { 448 kfree(new_incr); 449 return; 450 } 451 list_add(&new_incr->list, prev); 452 } 453 454 static int sclp_mem_freeze(struct device *dev) 455 { 456 if (!sclp_mem_state_changed) 457 return 0; 458 pr_err("Memory hotplug state changed, suspend refused.\n"); 459 return -EPERM; 460 } 461 462 struct read_storage_sccb { 463 struct sccb_header header; 464 u16 max_id; 465 u16 assigned; 466 u16 standby; 467 u16 :16; 468 u32 entries[0]; 469 } __packed; 470 471 static const struct dev_pm_ops sclp_mem_pm_ops = { 472 .freeze = sclp_mem_freeze, 473 }; 474 475 static struct platform_driver sclp_mem_pdrv = { 476 .driver = { 477 .name = "sclp_mem", 478 .pm = &sclp_mem_pm_ops, 479 }, 480 }; 481 482 static int __init sclp_detect_standby_memory(void) 483 { 484 struct platform_device *sclp_pdev; 485 struct read_storage_sccb *sccb; 486 int i, id, assigned, rc; 487 488 if (OLDMEM_BASE) /* No standby memory in kdump mode */ 489 return 0; 490 if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL) 491 return 0; 492 rc = -ENOMEM; 493 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); 494 if (!sccb) 495 goto out; 496 assigned = 0; 497 for (id = 0; id <= sclp_max_storage_id; id++) { 498 memset(sccb, 0, PAGE_SIZE); 499 sccb->header.length = PAGE_SIZE; 500 rc = sclp_sync_request(0x00040001 | id << 8, sccb); 501 if (rc) 502 goto out; 503 switch (sccb->header.response_code) { 504 case 0x0010: 505 set_bit(id, sclp_storage_ids); 506 for (i = 0; i < sccb->assigned; i++) { 507 if (!sccb->entries[i]) 508 continue; 509 assigned++; 510 insert_increment(sccb->entries[i] >> 16, 0, 1); 511 } 512 break; 513 case 0x0310: 514 break; 515 case 0x0410: 516 for (i = 0; i < sccb->assigned; i++) { 517 if (!sccb->entries[i]) 518 continue; 519 assigned++; 520 insert_increment(sccb->entries[i] >> 16, 1, 1); 521 } 522 break; 523 default: 524 rc = -EIO; 525 break; 526 } 527 if (!rc) 528 sclp_max_storage_id = sccb->max_id; 529 } 530 if (rc || list_empty(&sclp_mem_list)) 531 goto out; 532 for (i = 1; i <= sclp.rnmax - assigned; i++) 533 insert_increment(0, 1, 0); 534 rc = register_memory_notifier(&sclp_mem_nb); 535 if (rc) 536 goto out; 537 rc = platform_driver_register(&sclp_mem_pdrv); 538 if (rc) 539 goto out; 540 sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); 541 rc = PTR_ERR_OR_ZERO(sclp_pdev); 542 if (rc) 543 goto out_driver; 544 sclp_add_standby_memory(); 545 goto out; 546 out_driver: 547 platform_driver_unregister(&sclp_mem_pdrv); 548 out: 549 free_page((unsigned long) sccb); 550 return rc; 551 } 552 __initcall(sclp_detect_standby_memory); 553 554 #endif /* CONFIG_MEMORY_HOTPLUG */ 555 556 /* 557 * Channel path configuration related functions. 558 */ 559 560 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001 561 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001 562 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001 563 564 struct chp_cfg_sccb { 565 struct sccb_header header; 566 u8 ccm; 567 u8 reserved[6]; 568 u8 cssid; 569 } __attribute__((packed)); 570 571 static int do_chp_configure(sclp_cmdw_t cmd) 572 { 573 struct chp_cfg_sccb *sccb; 574 int rc; 575 576 if (!SCLP_HAS_CHP_RECONFIG) 577 return -EOPNOTSUPP; 578 /* Prepare sccb. */ 579 sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 580 if (!sccb) 581 return -ENOMEM; 582 sccb->header.length = sizeof(*sccb); 583 rc = sclp_sync_request(cmd, sccb); 584 if (rc) 585 goto out; 586 switch (sccb->header.response_code) { 587 case 0x0020: 588 case 0x0120: 589 case 0x0440: 590 case 0x0450: 591 break; 592 default: 593 pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n", 594 cmd, sccb->header.response_code); 595 rc = -EIO; 596 break; 597 } 598 out: 599 free_page((unsigned long) sccb); 600 return rc; 601 } 602 603 /** 604 * sclp_chp_configure - perform configure channel-path sclp command 605 * @chpid: channel-path ID 606 * 607 * Perform configure channel-path command sclp command for specified chpid. 608 * Return 0 after command successfully finished, non-zero otherwise. 609 */ 610 int sclp_chp_configure(struct chp_id chpid) 611 { 612 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8); 613 } 614 615 /** 616 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command 617 * @chpid: channel-path ID 618 * 619 * Perform deconfigure channel-path command sclp command for specified chpid 620 * and wait for completion. On success return 0. Return non-zero otherwise. 621 */ 622 int sclp_chp_deconfigure(struct chp_id chpid) 623 { 624 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); 625 } 626 627 struct chp_info_sccb { 628 struct sccb_header header; 629 u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; 630 u8 standby[SCLP_CHP_INFO_MASK_SIZE]; 631 u8 configured[SCLP_CHP_INFO_MASK_SIZE]; 632 u8 ccm; 633 u8 reserved[6]; 634 u8 cssid; 635 } __attribute__((packed)); 636 637 /** 638 * sclp_chp_read_info - perform read channel-path information sclp command 639 * @info: resulting channel-path information data 640 * 641 * Perform read channel-path information sclp command and wait for completion. 642 * On success, store channel-path information in @info and return 0. Return 643 * non-zero otherwise. 644 */ 645 int sclp_chp_read_info(struct sclp_chp_info *info) 646 { 647 struct chp_info_sccb *sccb; 648 int rc; 649 650 if (!SCLP_HAS_CHP_INFO) 651 return -EOPNOTSUPP; 652 /* Prepare sccb. */ 653 sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 654 if (!sccb) 655 return -ENOMEM; 656 sccb->header.length = sizeof(*sccb); 657 rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); 658 if (rc) 659 goto out; 660 if (sccb->header.response_code != 0x0010) { 661 pr_warn("read channel-path info failed (response=0x%04x)\n", 662 sccb->header.response_code); 663 rc = -EIO; 664 goto out; 665 } 666 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE); 667 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE); 668 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE); 669 out: 670 free_page((unsigned long) sccb); 671 return rc; 672 } 673