1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2007,2012 4 * 5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 */ 8 9 #define KMSG_COMPONENT "sclp_cmd" 10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 12 #include <linux/completion.h> 13 #include <linux/init.h> 14 #include <linux/errno.h> 15 #include <linux/err.h> 16 #include <linux/export.h> 17 #include <linux/slab.h> 18 #include <linux/string.h> 19 #include <linux/mm.h> 20 #include <linux/mmzone.h> 21 #include <linux/memory.h> 22 #include <linux/module.h> 23 #include <asm/ctl_reg.h> 24 #include <asm/chpid.h> 25 #include <asm/setup.h> 26 #include <asm/page.h> 27 #include <asm/sclp.h> 28 #include <asm/numa.h> 29 #include <asm/facility.h> 30 31 #include "sclp.h" 32 33 static void sclp_sync_callback(struct sclp_req *req, void *data) 34 { 35 struct completion *completion = data; 36 37 complete(completion); 38 } 39 40 int sclp_sync_request(sclp_cmdw_t cmd, void *sccb) 41 { 42 return sclp_sync_request_timeout(cmd, sccb, 0); 43 } 44 45 int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout) 46 { 47 struct completion completion; 48 struct sclp_req *request; 49 int rc; 50 51 request = kzalloc(sizeof(*request), GFP_KERNEL); 52 if (!request) 53 return -ENOMEM; 54 if (timeout) 55 request->queue_timeout = timeout; 56 request->command = cmd; 57 request->sccb = sccb; 58 request->status = SCLP_REQ_FILLED; 59 request->callback = sclp_sync_callback; 60 request->callback_data = &completion; 61 init_completion(&completion); 62 63 /* Perform sclp request. */ 64 rc = sclp_add_request(request); 65 if (rc) 66 goto out; 67 wait_for_completion(&completion); 68 69 /* Check response. */ 70 if (request->status != SCLP_REQ_DONE) { 71 pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n", 72 cmd, request->status); 73 rc = -EIO; 74 } 75 out: 76 kfree(request); 77 return rc; 78 } 79 80 /* 81 * CPU configuration related functions. 82 */ 83 84 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001 85 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 86 87 int _sclp_get_core_info(struct sclp_core_info *info) 88 { 89 int rc; 90 int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE; 91 struct read_cpu_info_sccb *sccb; 92 93 if (!SCLP_HAS_CPU_INFO) 94 return -EOPNOTSUPP; 95 96 sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length)); 97 if (!sccb) 98 return -ENOMEM; 99 sccb->header.length = length; 100 sccb->header.control_mask[2] = 0x80; 101 rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb, 102 SCLP_QUEUE_INTERVAL); 103 if (rc) 104 goto out; 105 if (sccb->header.response_code != 0x0010) { 106 pr_warn("readcpuinfo failed (response=0x%04x)\n", 107 sccb->header.response_code); 108 rc = -EIO; 109 goto out; 110 } 111 sclp_fill_core_info(info, sccb); 112 out: 113 free_pages((unsigned long) sccb, get_order(length)); 114 return rc; 115 } 116 117 struct cpu_configure_sccb { 118 struct sccb_header header; 119 } __attribute__((packed, aligned(8))); 120 121 static int do_core_configure(sclp_cmdw_t cmd) 122 { 123 struct cpu_configure_sccb *sccb; 124 int rc; 125 126 if (!SCLP_HAS_CPU_RECONFIG) 127 return -EOPNOTSUPP; 128 /* 129 * This is not going to cross a page boundary since we force 130 * kmalloc to have a minimum alignment of 8 bytes on s390. 131 */ 132 sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA); 133 if (!sccb) 134 return -ENOMEM; 135 sccb->header.length = sizeof(*sccb); 136 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); 137 if (rc) 138 goto out; 139 switch (sccb->header.response_code) { 140 case 0x0020: 141 case 0x0120: 142 break; 143 default: 144 pr_warn("configure cpu failed (cmd=0x%08x, response=0x%04x)\n", 145 cmd, sccb->header.response_code); 146 rc = -EIO; 147 break; 148 } 149 out: 150 kfree(sccb); 151 return rc; 152 } 153 154 int sclp_core_configure(u8 core) 155 { 156 return do_core_configure(SCLP_CMDW_CONFIGURE_CPU | core << 8); 157 } 158 159 int sclp_core_deconfigure(u8 core) 160 { 161 return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8); 162 } 163 164 #ifdef CONFIG_MEMORY_HOTPLUG 165 166 static DEFINE_MUTEX(sclp_mem_mutex); 167 static LIST_HEAD(sclp_mem_list); 168 static u8 sclp_max_storage_id; 169 static DECLARE_BITMAP(sclp_storage_ids, 256); 170 171 struct memory_increment { 172 struct list_head list; 173 u16 rn; 174 int standby; 175 }; 176 177 struct assign_storage_sccb { 178 struct sccb_header header; 179 u16 rn; 180 } __packed; 181 182 int arch_get_memory_phys_device(unsigned long start_pfn) 183 { 184 if (!sclp.rzm) 185 return 0; 186 return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm); 187 } 188 189 static unsigned long long rn2addr(u16 rn) 190 { 191 return (unsigned long long) (rn - 1) * sclp.rzm; 192 } 193 194 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) 195 { 196 struct assign_storage_sccb *sccb; 197 int rc; 198 199 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 200 if (!sccb) 201 return -ENOMEM; 202 sccb->header.length = PAGE_SIZE; 203 sccb->rn = rn; 204 rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL); 205 if (rc) 206 goto out; 207 switch (sccb->header.response_code) { 208 case 0x0020: 209 case 0x0120: 210 break; 211 default: 212 pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n", 213 cmd, sccb->header.response_code, rn); 214 rc = -EIO; 215 break; 216 } 217 out: 218 free_page((unsigned long) sccb); 219 return rc; 220 } 221 222 static int sclp_assign_storage(u16 rn) 223 { 224 unsigned long long start; 225 int rc; 226 227 rc = do_assign_storage(0x000d0001, rn); 228 if (rc) 229 return rc; 230 start = rn2addr(rn); 231 storage_key_init_range(start, start + sclp.rzm); 232 return 0; 233 } 234 235 static int sclp_unassign_storage(u16 rn) 236 { 237 return do_assign_storage(0x000c0001, rn); 238 } 239 240 struct attach_storage_sccb { 241 struct sccb_header header; 242 u16 :16; 243 u16 assigned; 244 u32 :32; 245 u32 entries[0]; 246 } __packed; 247 248 static int sclp_attach_storage(u8 id) 249 { 250 struct attach_storage_sccb *sccb; 251 int rc; 252 int i; 253 254 sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 255 if (!sccb) 256 return -ENOMEM; 257 sccb->header.length = PAGE_SIZE; 258 sccb->header.function_code = 0x40; 259 rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb, 260 SCLP_QUEUE_INTERVAL); 261 if (rc) 262 goto out; 263 switch (sccb->header.response_code) { 264 case 0x0020: 265 set_bit(id, sclp_storage_ids); 266 for (i = 0; i < sccb->assigned; i++) { 267 if (sccb->entries[i]) 268 sclp_unassign_storage(sccb->entries[i] >> 16); 269 } 270 break; 271 default: 272 rc = -EIO; 273 break; 274 } 275 out: 276 free_page((unsigned long) sccb); 277 return rc; 278 } 279 280 static int sclp_mem_change_state(unsigned long start, unsigned long size, 281 int online) 282 { 283 struct memory_increment *incr; 284 unsigned long long istart; 285 int rc = 0; 286 287 list_for_each_entry(incr, &sclp_mem_list, list) { 288 istart = rn2addr(incr->rn); 289 if (start + size - 1 < istart) 290 break; 291 if (start > istart + sclp.rzm - 1) 292 continue; 293 if (online) 294 rc |= sclp_assign_storage(incr->rn); 295 else 296 sclp_unassign_storage(incr->rn); 297 if (rc == 0) 298 incr->standby = online ? 0 : 1; 299 } 300 return rc ? -EIO : 0; 301 } 302 303 static bool contains_standby_increment(unsigned long start, unsigned long end) 304 { 305 struct memory_increment *incr; 306 unsigned long istart; 307 308 list_for_each_entry(incr, &sclp_mem_list, list) { 309 istart = rn2addr(incr->rn); 310 if (end - 1 < istart) 311 continue; 312 if (start > istart + sclp.rzm - 1) 313 continue; 314 if (incr->standby) 315 return true; 316 } 317 return false; 318 } 319 320 static int sclp_mem_notifier(struct notifier_block *nb, 321 unsigned long action, void *data) 322 { 323 unsigned long start, size; 324 struct memory_notify *arg; 325 unsigned char id; 326 int rc = 0; 327 328 arg = data; 329 start = arg->start_pfn << PAGE_SHIFT; 330 size = arg->nr_pages << PAGE_SHIFT; 331 mutex_lock(&sclp_mem_mutex); 332 for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1) 333 sclp_attach_storage(id); 334 switch (action) { 335 case MEM_GOING_OFFLINE: 336 /* 337 * We do not allow to set memory blocks offline that contain 338 * standby memory. This is done to simplify the "memory online" 339 * case. 340 */ 341 if (contains_standby_increment(start, start + size)) 342 rc = -EPERM; 343 break; 344 case MEM_ONLINE: 345 case MEM_CANCEL_OFFLINE: 346 break; 347 case MEM_GOING_ONLINE: 348 rc = sclp_mem_change_state(start, size, 1); 349 break; 350 case MEM_CANCEL_ONLINE: 351 sclp_mem_change_state(start, size, 0); 352 break; 353 case MEM_OFFLINE: 354 sclp_mem_change_state(start, size, 0); 355 break; 356 default: 357 rc = -EINVAL; 358 break; 359 } 360 mutex_unlock(&sclp_mem_mutex); 361 return rc ? NOTIFY_BAD : NOTIFY_OK; 362 } 363 364 static struct notifier_block sclp_mem_nb = { 365 .notifier_call = sclp_mem_notifier, 366 }; 367 368 static void __init align_to_block_size(unsigned long long *start, 369 unsigned long long *size, 370 unsigned long long alignment) 371 { 372 unsigned long long start_align, size_align; 373 374 start_align = roundup(*start, alignment); 375 size_align = rounddown(*start + *size, alignment) - start_align; 376 377 pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n", 378 *start, size_align >> 20, *size >> 20); 379 *start = start_align; 380 *size = size_align; 381 } 382 383 static void __init add_memory_merged(u16 rn) 384 { 385 unsigned long long start, size, addr, block_size; 386 static u16 first_rn, num; 387 388 if (rn && first_rn && (first_rn + num == rn)) { 389 num++; 390 return; 391 } 392 if (!first_rn) 393 goto skip_add; 394 start = rn2addr(first_rn); 395 size = (unsigned long long) num * sclp.rzm; 396 if (start >= VMEM_MAX_PHYS) 397 goto skip_add; 398 if (start + size > VMEM_MAX_PHYS) 399 size = VMEM_MAX_PHYS - start; 400 if (start >= ident_map_size) 401 goto skip_add; 402 if (start + size > ident_map_size) 403 size = ident_map_size - start; 404 block_size = memory_block_size_bytes(); 405 align_to_block_size(&start, &size, block_size); 406 if (!size) 407 goto skip_add; 408 for (addr = start; addr < start + size; addr += block_size) 409 add_memory(0, addr, block_size, MHP_NONE); 410 skip_add: 411 first_rn = rn; 412 num = 1; 413 } 414 415 static void __init sclp_add_standby_memory(void) 416 { 417 struct memory_increment *incr; 418 419 list_for_each_entry(incr, &sclp_mem_list, list) 420 if (incr->standby) 421 add_memory_merged(incr->rn); 422 add_memory_merged(0); 423 } 424 425 static void __init insert_increment(u16 rn, int standby, int assigned) 426 { 427 struct memory_increment *incr, *new_incr; 428 struct list_head *prev; 429 u16 last_rn; 430 431 new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); 432 if (!new_incr) 433 return; 434 new_incr->rn = rn; 435 new_incr->standby = standby; 436 last_rn = 0; 437 prev = &sclp_mem_list; 438 list_for_each_entry(incr, &sclp_mem_list, list) { 439 if (assigned && incr->rn > rn) 440 break; 441 if (!assigned && incr->rn - last_rn > 1) 442 break; 443 last_rn = incr->rn; 444 prev = &incr->list; 445 } 446 if (!assigned) 447 new_incr->rn = last_rn + 1; 448 if (new_incr->rn > sclp.rnmax) { 449 kfree(new_incr); 450 return; 451 } 452 list_add(&new_incr->list, prev); 453 } 454 455 static int __init sclp_detect_standby_memory(void) 456 { 457 struct read_storage_sccb *sccb; 458 int i, id, assigned, rc; 459 460 if (oldmem_data.start) /* No standby memory in kdump mode */ 461 return 0; 462 if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL) 463 return 0; 464 rc = -ENOMEM; 465 sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); 466 if (!sccb) 467 goto out; 468 assigned = 0; 469 for (id = 0; id <= sclp_max_storage_id; id++) { 470 memset(sccb, 0, PAGE_SIZE); 471 sccb->header.length = PAGE_SIZE; 472 rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb); 473 if (rc) 474 goto out; 475 switch (sccb->header.response_code) { 476 case 0x0010: 477 set_bit(id, sclp_storage_ids); 478 for (i = 0; i < sccb->assigned; i++) { 479 if (!sccb->entries[i]) 480 continue; 481 assigned++; 482 insert_increment(sccb->entries[i] >> 16, 0, 1); 483 } 484 break; 485 case 0x0310: 486 break; 487 case 0x0410: 488 for (i = 0; i < sccb->assigned; i++) { 489 if (!sccb->entries[i]) 490 continue; 491 assigned++; 492 insert_increment(sccb->entries[i] >> 16, 1, 1); 493 } 494 break; 495 default: 496 rc = -EIO; 497 break; 498 } 499 if (!rc) 500 sclp_max_storage_id = sccb->max_id; 501 } 502 if (rc || list_empty(&sclp_mem_list)) 503 goto out; 504 for (i = 1; i <= sclp.rnmax - assigned; i++) 505 insert_increment(0, 1, 0); 506 rc = register_memory_notifier(&sclp_mem_nb); 507 if (rc) 508 goto out; 509 sclp_add_standby_memory(); 510 out: 511 free_page((unsigned long) sccb); 512 return rc; 513 } 514 __initcall(sclp_detect_standby_memory); 515 516 #endif /* CONFIG_MEMORY_HOTPLUG */ 517 518 /* 519 * Channel path configuration related functions. 520 */ 521 522 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001 523 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001 524 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001 525 526 struct chp_cfg_sccb { 527 struct sccb_header header; 528 u8 ccm; 529 u8 reserved[6]; 530 u8 cssid; 531 } __attribute__((packed)); 532 533 static int do_chp_configure(sclp_cmdw_t cmd) 534 { 535 struct chp_cfg_sccb *sccb; 536 int rc; 537 538 if (!SCLP_HAS_CHP_RECONFIG) 539 return -EOPNOTSUPP; 540 /* Prepare sccb. */ 541 sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 542 if (!sccb) 543 return -ENOMEM; 544 sccb->header.length = sizeof(*sccb); 545 rc = sclp_sync_request(cmd, sccb); 546 if (rc) 547 goto out; 548 switch (sccb->header.response_code) { 549 case 0x0020: 550 case 0x0120: 551 case 0x0440: 552 case 0x0450: 553 break; 554 default: 555 pr_warn("configure channel-path failed (cmd=0x%08x, response=0x%04x)\n", 556 cmd, sccb->header.response_code); 557 rc = -EIO; 558 break; 559 } 560 out: 561 free_page((unsigned long) sccb); 562 return rc; 563 } 564 565 /** 566 * sclp_chp_configure - perform configure channel-path sclp command 567 * @chpid: channel-path ID 568 * 569 * Perform configure channel-path command sclp command for specified chpid. 570 * Return 0 after command successfully finished, non-zero otherwise. 571 */ 572 int sclp_chp_configure(struct chp_id chpid) 573 { 574 return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8); 575 } 576 577 /** 578 * sclp_chp_deconfigure - perform deconfigure channel-path sclp command 579 * @chpid: channel-path ID 580 * 581 * Perform deconfigure channel-path command sclp command for specified chpid 582 * and wait for completion. On success return 0. Return non-zero otherwise. 583 */ 584 int sclp_chp_deconfigure(struct chp_id chpid) 585 { 586 return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8); 587 } 588 589 struct chp_info_sccb { 590 struct sccb_header header; 591 u8 recognized[SCLP_CHP_INFO_MASK_SIZE]; 592 u8 standby[SCLP_CHP_INFO_MASK_SIZE]; 593 u8 configured[SCLP_CHP_INFO_MASK_SIZE]; 594 u8 ccm; 595 u8 reserved[6]; 596 u8 cssid; 597 } __attribute__((packed)); 598 599 /** 600 * sclp_chp_read_info - perform read channel-path information sclp command 601 * @info: resulting channel-path information data 602 * 603 * Perform read channel-path information sclp command and wait for completion. 604 * On success, store channel-path information in @info and return 0. Return 605 * non-zero otherwise. 606 */ 607 int sclp_chp_read_info(struct sclp_chp_info *info) 608 { 609 struct chp_info_sccb *sccb; 610 int rc; 611 612 if (!SCLP_HAS_CHP_INFO) 613 return -EOPNOTSUPP; 614 /* Prepare sccb. */ 615 sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 616 if (!sccb) 617 return -ENOMEM; 618 sccb->header.length = sizeof(*sccb); 619 rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb); 620 if (rc) 621 goto out; 622 if (sccb->header.response_code != 0x0010) { 623 pr_warn("read channel-path info failed (response=0x%04x)\n", 624 sccb->header.response_code); 625 rc = -EIO; 626 goto out; 627 } 628 memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE); 629 memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE); 630 memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE); 631 out: 632 free_page((unsigned long) sccb); 633 return rc; 634 } 635