1 /* 2 * Copyright IBM Corp. 2007,2012 3 * 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 6 */ 7 8 #define KMSG_COMPONENT "sclp_cmd" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/completion.h> 12 #include <linux/init.h> 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/export.h> 16 #include <linux/slab.h> 17 #include <linux/string.h> 18 #include <linux/mm.h> 19 #include <linux/mmzone.h> 20 #include <linux/memory.h> 21 #include <linux/module.h> 22 #include <linux/platform_device.h> 23 #include <asm/ctl_reg.h> 24 #include <asm/chpid.h> 25 #include <asm/setup.h> 26 #include <asm/page.h> 27 #include <asm/sclp.h> 28 #include <asm/numa.h> 29 30 #include "sclp.h" 31 32 static void sclp_sync_callback(struct sclp_req *req, void *data) 33 { 34 struct completion *completion = data; 35 36 complete(completion); 37 } 38 39 int sclp_sync_request(sclp_cmdw_t cmd, void *sccb) 40 { 41 return sclp_sync_request_timeout(cmd, sccb, 0); 42 } 43 44 int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout) 45 { 46 struct completion completion; 47 struct sclp_req *request; 48 int rc; 49 50 request = kzalloc(sizeof(*request), GFP_KERNEL); 51 if (!request) 52 return -ENOMEM; 53 if (timeout) 54 request->queue_timeout = timeout; 55 request->command = cmd; 56 request->sccb = sccb; 57 request->status = SCLP_REQ_FILLED; 58 request->callback = sclp_sync_callback; 59 request->callback_data = &completion; 60 init_completion(&completion); 61 62 /* Perform sclp request. */ 63 rc = sclp_add_request(request); 64 if (rc) 65 goto out; 66 wait_for_completion(&completion); 67 68 /* Check response. */ 69 if (request->status != SCLP_REQ_DONE) { 70 pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n", 71 cmd, request->status); 72 rc = -EIO; 73 } 74 out: 75 kfree(request); 76 return rc; 77 } 78 79 /* 80 * CPU configuration related functions. 81 */ 82 83 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 84 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 85 86 int _sclp_get_core_info(struct sclp_core_info *info) 87 { 88 int rc; 89 struct read_cpu_info_sccb *sccb; 90 91 if (!SCLP_HAS_CPU_INFO) 92 return -EOPNOTSUPP; 93 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 94 if (!sccb) 95 return -ENOMEM; 96 sccb->header.length = sizeof(*sccb); 97 rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb, 98 SCLP_QUEUE_INTERVAL); 99 if (rc) 100 goto out; 101 if (sccb->header.response_code != 0x0010) { 102 pr_warn("readcpuinfo failed (response=0x%04x)\n", 103 sccb->header.response_code); 104 rc = -EIO; 105 goto out; 106 } 107 sclp_fill_core_info(info, sccb); 108 out: 109 free_page((unsigned long) sccb); 110 return rc; 111 } 112 113 struct cpu_configure_sccb { 114 struct sccb_header header; 115 } __attribute__((packed, aligned(8))); 116 117 static int do_core_configure(sclp_cmdw_t cmd) 118 { 119 struct cpu_configure_sccb *sccb; 120 int rc; 121 122 if (!SCLP_HAS_CPU_RECONFIG) 123 return -EOPNOTSUPP; 124 /* 125 * This is not going to cross a page boundary since we force 126 * kmalloc to have a minimum alignment of 8 bytes on s390. 127 */ 128 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA); 129 if (!sccb) 130 return -ENOMEM; 131 sccb->header.length = sizeof(*sccb); 132 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); 133 if (rc) 134 goto out; 135 switch (sccb->header.response_code) { 136 case 0x0020: 137 case 0x0120: 138 break; 139 default: 140 pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n", 141 cmd, sccb->header.response_code); 142 rc = -EIO; 143 break; 144 } 145 out: 146 kfree(sccb); 147 return rc; 148 } 149 150 int sclp_core_configure(u8 core) 151 { 152 return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8); 153 } 154 155 int sclp_core_deconfigure(u8 core) 156 { 157 return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8); 158 } 159 160 #ifdef CONFIG_MEMORY_HOTPLUG 161 162 static DEFINE_MUTEX(sclp_mem_mutex); 163 static LIST_HEAD(sclp_mem_list); 164 static u8 sclp_max_storage_id; 165 static DECLARE_BITMAP(sclp_storage_ids, 256); 166 static int sclp_mem_state_changed; 167 168 struct memory_increment { 169 struct list_head list; 170 u16 rn; 171 int standby; 172 }; 173 174 struct assign_storage_sccb { 175 struct sccb_header header; 176 u16 rn; 177 } __packed; 178 179 int arch_get_memory_phys_device(unsigned long start_pfn) 180 { 181 if (!sclp.rzm) 182 return 0; 183 return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm); 184 } 185 186 static unsigned long long rn2addr(u16 rn) 187 { 188 return (unsigned long long) (rn - 1) * sclp.rzm; 189 } 190 191 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) 192 { 193 struct assign_storage_sccb *sccb; 194 int rc; 195 196 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 197 if (!sccb) 198 return -ENOMEM; 199 sccb->header.length = PAGE_SIZE; 200 sccb->rn = rn; 201 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); 202 if (rc) 203 goto out; 204 switch (sccb->header.response_code) { 205 case 0x0020: 206 case 0x0120: 207 break; 208 default: 209 pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n", 210 cmd, sccb->header.response_code, rn); 211 rc = -EIO; 212 break; 213 } 214 out: 215 free_page((unsigned long) sccb); 216 return rc; 217 } 218 219 static int sclp_assign_storage(u16 rn) 220 { 221 unsigned long long start; 222 int rc; 223 224 rc = do_assign_storage(0x000d0001, rn); 225 if (rc) 226 return rc; 227 start = rn2addr(rn); 228 storage_key_init_range(start, start + sclp.rzm); 229 return 0; 230 } 231 232 static int sclp_unassign_storage(u16 rn) 233 { 234 return do_assign_storage(0x000c0001, rn); 235 } 236 237 struct attach_storage_sccb { 238 struct sccb_header header; 239 u16 :16; 240 u16 assigned; 241 u32 :32; 242 u32 entries[0]; 243 } __packed; 244 245 static int sclp_attach_storage(u8 id) 246 { 247 struct attach_storage_sccb *sccb; 248 int rc; 249 int i; 250 251 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 252 if (!sccb) 253 return -ENOMEM; 254 sccb->header.length = PAGE_SIZE; 255 rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb, 256 SCLP_QUEUE_INTERVAL); 257 if (rc) 258 goto out; 259 switch (sccb->header.response_code) { 260 case 0x0020: 261 set_bit(id, sclp_storage_ids); 262 for (i = 0; i < sccb->assigned; i++) { 263 if (sccb->entries[i]) 264 sclp_unassign_storage(sccb->entries[i] >> 16); 265 } 266 break; 267 default: 268 rc = -EIO; 269 break; 270 } 271 out: 272 free_page((unsigned long) sccb); 273 return rc; 274 } 275 276 static int sclp_mem_change_state(unsigned long start, unsigned long size, 277 int online) 278 { 279 struct memory_increment *incr; 280 unsigned long long istart; 281 int rc = 0; 282 283 list_for_each_entry(incr, &sclp_mem_list, list) { 284 istart = rn2addr(incr->rn); 285 if (start + size - 1 < istart) 286 break; 287 if (start > istart + sclp.rzm - 1) 288 continue; 289 if (online) 290 rc |= sclp_assign_storage(incr->rn); 291 else 292 sclp_unassign_storage(incr->rn); 293 if (rc == 0) 294 incr->standby = online ? 0 : 1; 295 } 296 return rc ? -EIO : 0; 297 } 298 299 static bool contains_standby_increment(unsigned long start, unsigned long end) 300 { 301 struct memory_increment *incr; 302 unsigned long istart; 303 304 list_for_each_entry(incr, &sclp_mem_list, list) { 305 istart = rn2addr(incr->rn); 306 if (end - 1 < istart) 307 continue; 308 if (start > istart + sclp.rzm - 1) 309 continue; 310 if (incr->standby) 311 return true; 312 } 313 return false; 314 } 315 316 static int sclp_mem_notifier(struct notifier_block *nb, 317 unsigned long action, void *data) 318 { 319 unsigned long start, size; 320 struct memory_notify *arg; 321 unsigned char id; 322 int rc = 0; 323 324 arg = data; 325 start = arg->start_pfn << PAGE_SHIFT; 326 size = arg->nr_pages << PAGE_SHIFT; 327 mutex_lock(&sclp_mem_mutex); 328 for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1) 329 sclp_attach_storage(id); 330 switch (action) { 331 case MEM_GOING_OFFLINE: 332 /* 333 * We do not allow to set memory blocks offline that contain 334 * standby memory. This is done to simplify the "memory online" 335 * case. 336 */ 337 if (contains_standby_increment(start, start + size)) 338 rc = -EPERM; 339 break; 340 case MEM_ONLINE: 341 case MEM_CANCEL_OFFLINE: 342 break; 343 case MEM_GOING_ONLINE: 344 rc = sclp_mem_change_state(start, size, 1); 345 break; 346 case MEM_CANCEL_ONLINE: 347 sclp_mem_change_state(start, size, 0); 348 break; 349 case MEM_OFFLINE: 350 sclp_mem_change_state(start, size, 0); 351 break; 352 default: 353 rc = -EINVAL; 354 break; 355 } 356 if (!rc) 357 sclp_mem_state_changed = 1; 358 mutex_unlock(&sclp_mem_mutex); 359 return rc ? NOTIFY_BAD : NOTIFY_OK; 360 } 361 362 static struct notifier_block sclp_mem_nb = { 363 .notifier_call = sclp_mem_notifier, 364 }; 365 366 static void __init align_to_block_size(unsigned long long *start, 367 unsigned long long *size, 368 unsigned long long alignment) 369 { 370 unsigned long long start_align, size_align; 371 372 start_align = roundup(*start, alignment); 373 size_align = rounddown(*start + *size, alignment) - start_align; 374 375 pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n", 376 *start, size_align >> 20, *size >> 20); 377 *start = start_align; 378 *size = size_align; 379 } 380 381 static void __init add_memory_merged(u16 rn) 382 { 383 unsigned long long start, size, addr, block_size; 384 static u16 first_rn, num; 385 386 if (rn && first_rn && (first_rn + num == rn)) { 387 num++; 388 return; 389 } 390 if (!first_rn) 391 goto skip_add; 392 start = rn2addr(first_rn); 393 size = (unsigned long long) num * sclp.rzm; 394 if (start >= VMEM_MAX_PHYS) 395 goto skip_add; 396 if (start + size > VMEM_MAX_PHYS) 397 size = VMEM_MAX_PHYS - start; 398 if (memory_end_set && (start >= memory_end)) 399 goto skip_add; 400 if (memory_end_set && (start + size > memory_end)) 401 size = memory_end - start; 402 block_size = memory_block_size_bytes(); 403 align_to_block_size(&start, &size, block_size); 404 if (!size) 405 goto skip_add; 406 for (addr = start; addr < start + size; addr += block_size) 407 add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size); 408 skip_add: 409 first_rn = rn; 410 num = 1; 411 } 412 413 static void __init sclp_add_standby_memory(void) 414 { 415 struct memory_increment *incr; 416 417 list_for_each_entry(incr, &sclp_mem_list, list) 418 if (incr->standby) 419 add_memory_merged(incr->rn); 420 add_memory_merged(0); 421 } 422 423 static void __init insert_increment(u16 rn, int standby, int assigned) 424 { 425 struct memory_increment *incr, *new_incr; 426 struct list_head *prev; 427 u16 last_rn; 428 429 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); 430 if (!new_incr) 431 return; 432 new_incr->rn = rn; 433 new_incr->standby = standby; 434 last_rn = 0; 435 prev = &sclp_mem_list; 436 list_for_each_entry(incr, &sclp_mem_list, list) { 437 if (assigned && incr->rn > rn) 438 break; 439 if (!assigned && incr->rn - last_rn > 1) 440 break; 441 last_rn = incr->rn; 442 prev = &incr->list; 443 } 444 if (!assigned) 445 new_incr->rn = last_rn + 1; 446 if (new_incr->rn > sclp.rnmax) { 447 kfree(new_incr); 448 return; 449 } 450 list_add(&new_incr->list, prev); 451 } 452 453 static int sclp_mem_freeze(struct device *dev) 454 { 455 if (!sclp_mem_state_changed) 456 return 0; 457 pr_err("Memory hotplug state changed, suspend refused.\n"); 458 return -EPERM; 459 } 460 461 struct read_storage_sccb { 462 struct sccb_header header; 463 u16 max_id; 464 u16 assigned; 465 u16 standby; 466 u16 :16; 467 u32 entries[0]; 468 } __packed; 469 470 static const struct dev_pm_ops sclp_mem_pm_ops = { 471 .freeze = sclp_mem_freeze, 472 }; 473 474 static struct platform_driver sclp_mem_pdrv = { 475 .driver = { 476 .name = "sclp_mem", 477 .pm = &sclp_mem_pm_ops, 478 }, 479 }; 480 481 static int __init sclp_detect_standby_memory(void) 482 { 483 struct platform_device *sclp_pdev; 484 struct read_storage_sccb *sccb; 485 int i, id, assigned, rc; 486 487 if (OLDMEM_BASE) /* No standby memory in kdump mode */ 488 return 0; 489 if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL) 490 return 0; 491 rc = -ENOMEM; 492 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); 493 if (!sccb) 494 goto out; 495 assigned = 0; 496 for (id = 0; id <= sclp_max_storage_id; id++) { 497 memset(sccb, 0, PAGE_SIZE); 498 sccb->header.length = PAGE_SIZE; 499 rc = sclp_sync_request(0x00040001 | id << 8, sccb); 500 if (rc) 501 goto out; 502 switch (sccb->header.response_code) { 503 case 0x0010: 504 set_bit(id, sclp_storage_ids); 505 for (i = 0; i < sccb->assigned; i++) { 506 if (!sccb->entries[i]) 507 continue; 508 assigned++; 509 insert_increment(sccb->entries[i] >> 16, 0, 1); 510 } 511 break; 512 case 0x0310: 513 break; 514 case 0x0410: 515 for (i = 0; i < sccb->assigned; i++) { 516 if (!sccb->entries[i]) 517 continue; 518 assigned++; 519 insert_increment(sccb->entries[i] >> 16, 1, 1); 520 } 521 break; 522 default: 523 rc = -EIO; 524 break; 525 } 526 if (!rc) 527 sclp_max_storage_id = sccb->max_id; 528 } 529 if (rc || list_empty(&sclp_mem_list)) 530 goto out; 531 for (i = 1; i <= sclp.rnmax - assigned; i++) 532 insert_increment(0, 1, 0); 533 rc = register_memory_notifier(&sclp_mem_nb); 534 if (rc) 535 goto out; 536 rc = platform_driver_register(&sclp_mem_pdrv); 537 if (rc) 538 goto out; 539 sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); 540 rc = PTR_ERR_OR_ZERO(sclp_pdev); 541 if (rc) 542 goto out_driver; 543 sclp_add_standby_memory(); 544 goto out; 545 out_driver: 546 platform_driver_unregister(&sclp_mem_pdrv); 547 out: 548 free_page((unsigned long) sccb); 549 return rc; 550 } 551 __initcall(sclp_detect_standby_memory); 552 553 #endif /* CONFIG_MEMORY_HOTPLUG */ 554 555 /* 556 * Channel path configuration related functions. 557 */ 558 559 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001 560 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001 561 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001 562 563 struct chp_cfg_sccb { 564 struct sccb_header header; 565 u8 ccm; 566 u8 reserved[6]; 567 u8 cssid; 568 } __attribute__((packed)); 569 570 static int do_chp_configure(sclp_cmdw_t cmd) 571 { 572 struct chp_cfg_sccb *sccb; 573 int rc; 574 575 if (!SCLP_HAS_CHP_RECONFIG) 576 return -EOPNOTSUPP; 577 /* Prepare sccb. */ 578 sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 579 if (!sccb) 580 return -ENOMEM; 581 sccb->header.length = sizeof(*sccb); 582 rc = sclp_sync_request(cmd, sccb); 583 if (rc) 584 goto out; 585 switch (sccb->header.response_code) { 586 case 0x0020: 587 case 0x0120: 588 case 0x0440: 589 case 0x0450: 590 break; 591 default: 592 pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n", 593 cmd, sccb->header.response_code); 594 rc = -EIO; 595 break; 596 } 597 out: 598 free_page((unsigned long) sccb); 599 return rc; 600 } 601 602 /** 603 * sclp_chp_configure - perform configure channel-path sclp command 604 * @chpid: channel-path ID 605 * 606 * Perform configure channel-path command sclp command for specified chpid. 607 * Return 0 after command successfully finished, non-zero otherwise. 608 */ 609 int sclp_chp_configure(struct chp_id chpid) 610 { 611 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8); 612 } 613 614 /** 615 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command 616 * @chpid: channel-path ID 617 * 618 * Perform deconfigure channel-path command sclp command for specified chpid 619 * and wait for completion. On success return 0. Return non-zero otherwise. 620 */ 621 int sclp_chp_deconfigure(struct chp_id chpid) 622 { 623 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); 624 } 625 626 struct chp_info_sccb { 627 struct sccb_header header; 628 u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; 629 u8 standby[SCLP_CHP_INFO_MASK_SIZE]; 630 u8 configured[SCLP_CHP_INFO_MASK_SIZE]; 631 u8 ccm; 632 u8 reserved[6]; 633 u8 cssid; 634 } __attribute__((packed)); 635 636 /** 637 * sclp_chp_read_info - perform read channel-path information sclp command 638 * @info: resulting channel-path information data 639 * 640 * Perform read channel-path information sclp command and wait for completion. 641 * On success, store channel-path information in @info and return 0. Return 642 * non-zero otherwise. 643 */ 644 int sclp_chp_read_info(struct sclp_chp_info *info) 645 { 646 struct chp_info_sccb *sccb; 647 int rc; 648 649 if (!SCLP_HAS_CHP_INFO) 650 return -EOPNOTSUPP; 651 /* Prepare sccb. */ 652 sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 653 if (!sccb) 654 return -ENOMEM; 655 sccb->header.length = sizeof(*sccb); 656 rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); 657 if (rc) 658 goto out; 659 if (sccb->header.response_code != 0x0010) { 660 pr_warn("read channel-path info failed (response=0x%04x)\n", 661 sccb->header.response_code); 662 rc = -EIO; 663 goto out; 664 } 665 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE); 666 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE); 667 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE); 668 out: 669 free_page((unsigned long) sccb); 670 return rc; 671 } 672