1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2015 IBM Corp. 4 */ 5 6 #include <linux/spinlock.h> 7 #include <linux/uaccess.h> 8 #include <linux/delay.h> 9 10 #include "cxl.h" 11 #include "hcalls.h" 12 #include "trace.h" 13 14 #define CXL_ERROR_DETECTED_EVENT 1 15 #define CXL_SLOT_RESET_EVENT 2 16 #define CXL_RESUME_EVENT 3 17 18 static void pci_error_handlers(struct cxl_afu *afu, 19 int bus_error_event, 20 pci_channel_state_t state) 21 { 22 struct pci_dev *afu_dev; 23 24 if (afu->phb == NULL) 25 return; 26 27 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { 28 if (!afu_dev->driver) 29 continue; 30 31 switch (bus_error_event) { 32 case CXL_ERROR_DETECTED_EVENT: 33 afu_dev->error_state = state; 34 35 if (afu_dev->driver->err_handler && 36 afu_dev->driver->err_handler->error_detected) 37 afu_dev->driver->err_handler->error_detected(afu_dev, state); 38 break; 39 case CXL_SLOT_RESET_EVENT: 40 afu_dev->error_state = state; 41 42 if (afu_dev->driver->err_handler && 43 afu_dev->driver->err_handler->slot_reset) 44 afu_dev->driver->err_handler->slot_reset(afu_dev); 45 break; 46 case CXL_RESUME_EVENT: 47 if (afu_dev->driver->err_handler && 48 afu_dev->driver->err_handler->resume) 49 afu_dev->driver->err_handler->resume(afu_dev); 50 break; 51 } 52 } 53 } 54 55 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, 56 u64 errstat) 57 { 58 pr_devel("in %s\n", __func__); 59 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); 60 61 return cxl_ops->ack_irq(ctx, 0, errstat); 62 } 63 64 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, 65 void *buf, size_t len) 66 { 67 unsigned int entries, mod; 68 unsigned long **vpd_buf = NULL; 69 struct sg_list *le; 70 int rc = 0, i, tocopy; 71 u64 out = 0; 72 73 if (buf == NULL) 74 return -EINVAL; 75 76 /* number of entries in the list */ 77 entries = len / SG_BUFFER_SIZE; 78 mod = len % SG_BUFFER_SIZE; 79 if (mod) 80 entries++; 81 82 if (entries > SG_MAX_ENTRIES) { 83 entries = SG_MAX_ENTRIES; 84 len = SG_MAX_ENTRIES * SG_BUFFER_SIZE; 85 mod = 0; 86 } 87 88 vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL); 89 if (!vpd_buf) 90 return -ENOMEM; 91 92 le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); 93 if (!le) { 94 rc = -ENOMEM; 95 goto err1; 96 } 97 98 for (i = 0; i < entries; i++) { 99 vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); 100 if (!vpd_buf[i]) { 101 rc = -ENOMEM; 102 goto err2; 103 } 104 le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i])); 105 le[i].len = cpu_to_be64(SG_BUFFER_SIZE); 106 if ((i == (entries - 1)) && mod) 107 le[i].len = cpu_to_be64(mod); 108 } 109 110 if (adapter) 111 rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, 112 virt_to_phys(le), entries, &out); 113 else 114 rc = cxl_h_collect_vpd(afu->guest->handle, 0, 115 virt_to_phys(le), entries, &out); 116 pr_devel("length of available (entries: %i), vpd: %#llx\n", 117 entries, out); 118 119 if (!rc) { 120 /* 121 * hcall returns in 'out' the size of available VPDs. 122 * It fills the buffer with as much data as possible. 123 */ 124 if (out < len) 125 len = out; 126 rc = len; 127 if (out) { 128 for (i = 0; i < entries; i++) { 129 if (len < SG_BUFFER_SIZE) 130 tocopy = len; 131 else 132 tocopy = SG_BUFFER_SIZE; 133 memcpy(buf, vpd_buf[i], tocopy); 134 buf += tocopy; 135 len -= tocopy; 136 } 137 } 138 } 139 err2: 140 for (i = 0; i < entries; i++) { 141 if (vpd_buf[i]) 142 free_page((unsigned long) vpd_buf[i]); 143 } 144 free_page((unsigned long) le); 145 err1: 146 kfree(vpd_buf); 147 return rc; 148 } 149 150 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info) 151 { 152 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); 153 } 154 155 static irqreturn_t guest_psl_irq(int irq, void *data) 156 { 157 struct cxl_context *ctx = data; 158 struct cxl_irq_info irq_info; 159 int rc; 160 161 pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq); 162 rc = guest_get_irq_info(ctx, &irq_info); 163 if (rc) { 164 WARN(1, "Unable to get IRQ info: %i\n", rc); 165 return IRQ_HANDLED; 166 } 167 168 rc = cxl_irq_psl8(irq, ctx, &irq_info); 169 return rc; 170 } 171 172 static int afu_read_error_state(struct cxl_afu *afu, int *state_out) 173 { 174 u64 state; 175 int rc = 0; 176 177 if (!afu) 178 return -EIO; 179 180 rc = cxl_h_read_error_state(afu->guest->handle, &state); 181 if (!rc) { 182 WARN_ON(state != H_STATE_NORMAL && 183 state != H_STATE_DISABLE && 184 state != H_STATE_TEMP_UNAVAILABLE && 185 state != H_STATE_PERM_UNAVAILABLE); 186 *state_out = state & 0xffffffff; 187 } 188 return rc; 189 } 190 191 static irqreturn_t guest_slice_irq_err(int irq, void *data) 192 { 193 struct cxl_afu *afu = data; 194 int rc; 195 u64 serr, afu_error, dsisr; 196 197 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); 198 if (rc) { 199 dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); 200 return IRQ_HANDLED; 201 } 202 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); 203 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 204 cxl_afu_decode_psl_serr(afu, serr); 205 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); 206 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); 207 208 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); 209 if (rc) 210 dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", 211 rc); 212 213 return IRQ_HANDLED; 214 } 215 216 217 static int irq_alloc_range(struct cxl *adapter, int len, int *irq) 218 { 219 int i, n; 220 struct irq_avail *cur; 221 222 for (i = 0; i < adapter->guest->irq_nranges; i++) { 223 cur = &adapter->guest->irq_avail[i]; 224 n = bitmap_find_next_zero_area(cur->bitmap, cur->range, 225 0, len, 0); 226 if (n < cur->range) { 227 bitmap_set(cur->bitmap, n, len); 228 *irq = cur->offset + n; 229 pr_devel("guest: allocate IRQs %#x->%#x\n", 230 *irq, *irq + len - 1); 231 232 return 0; 233 } 234 } 235 return -ENOSPC; 236 } 237 238 static int irq_free_range(struct cxl *adapter, int irq, int len) 239 { 240 int i, n; 241 struct irq_avail *cur; 242 243 if (len == 0) 244 return -ENOENT; 245 246 for (i = 0; i < adapter->guest->irq_nranges; i++) { 247 cur = &adapter->guest->irq_avail[i]; 248 if (irq >= cur->offset && 249 (irq + len) <= (cur->offset + cur->range)) { 250 n = irq - cur->offset; 251 bitmap_clear(cur->bitmap, n, len); 252 pr_devel("guest: release IRQs %#x->%#x\n", 253 irq, irq + len - 1); 254 return 0; 255 } 256 } 257 return -ENOENT; 258 } 259 260 static int guest_reset(struct cxl *adapter) 261 { 262 struct cxl_afu *afu = NULL; 263 int i, rc; 264 265 pr_devel("Adapter reset request\n"); 266 spin_lock(&adapter->afu_list_lock); 267 for (i = 0; i < adapter->slices; i++) { 268 if ((afu = adapter->afu[i])) { 269 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 270 pci_channel_io_frozen); 271 cxl_context_detach_all(afu); 272 } 273 } 274 275 rc = cxl_h_reset_adapter(adapter->guest->handle); 276 for (i = 0; i < adapter->slices; i++) { 277 if (!rc && (afu = adapter->afu[i])) { 278 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, 279 pci_channel_io_normal); 280 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); 281 } 282 } 283 spin_unlock(&adapter->afu_list_lock); 284 return rc; 285 } 286 287 static int guest_alloc_one_irq(struct cxl *adapter) 288 { 289 int irq; 290 291 spin_lock(&adapter->guest->irq_alloc_lock); 292 if (irq_alloc_range(adapter, 1, &irq)) 293 irq = -ENOSPC; 294 spin_unlock(&adapter->guest->irq_alloc_lock); 295 return irq; 296 } 297 298 static void guest_release_one_irq(struct cxl *adapter, int irq) 299 { 300 spin_lock(&adapter->guest->irq_alloc_lock); 301 irq_free_range(adapter, irq, 1); 302 spin_unlock(&adapter->guest->irq_alloc_lock); 303 } 304 305 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs, 306 struct cxl *adapter, unsigned int num) 307 { 308 int i, try, irq; 309 310 memset(irqs, 0, sizeof(struct cxl_irq_ranges)); 311 312 spin_lock(&adapter->guest->irq_alloc_lock); 313 for (i = 0; i < CXL_IRQ_RANGES && num; i++) { 314 try = num; 315 while (try) { 316 if (irq_alloc_range(adapter, try, &irq) == 0) 317 break; 318 try /= 2; 319 } 320 if (!try) 321 goto error; 322 irqs->offset[i] = irq; 323 irqs->range[i] = try; 324 num -= try; 325 } 326 if (num) 327 goto error; 328 spin_unlock(&adapter->guest->irq_alloc_lock); 329 return 0; 330 331 error: 332 for (i = 0; i < CXL_IRQ_RANGES; i++) 333 irq_free_range(adapter, irqs->offset[i], irqs->range[i]); 334 spin_unlock(&adapter->guest->irq_alloc_lock); 335 return -ENOSPC; 336 } 337 338 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs, 339 struct cxl *adapter) 340 { 341 int i; 342 343 spin_lock(&adapter->guest->irq_alloc_lock); 344 for (i = 0; i < CXL_IRQ_RANGES; i++) 345 irq_free_range(adapter, irqs->offset[i], irqs->range[i]); 346 spin_unlock(&adapter->guest->irq_alloc_lock); 347 } 348 349 static int guest_register_serr_irq(struct cxl_afu *afu) 350 { 351 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 352 dev_name(&afu->dev)); 353 if (!afu->err_irq_name) 354 return -ENOMEM; 355 356 if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, 357 guest_slice_irq_err, afu, afu->err_irq_name))) { 358 kfree(afu->err_irq_name); 359 afu->err_irq_name = NULL; 360 return -ENOMEM; 361 } 362 363 return 0; 364 } 365 366 static void guest_release_serr_irq(struct cxl_afu *afu) 367 { 368 cxl_unmap_irq(afu->serr_virq, afu); 369 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 370 kfree(afu->err_irq_name); 371 } 372 373 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) 374 { 375 return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, 376 tfc >> 32, (psl_reset_mask != 0)); 377 } 378 379 static void disable_afu_irqs(struct cxl_context *ctx) 380 { 381 irq_hw_number_t hwirq; 382 unsigned int virq; 383 int r, i; 384 385 pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); 386 for (r = 0; r < CXL_IRQ_RANGES; r++) { 387 hwirq = ctx->irqs.offset[r]; 388 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 389 virq = irq_find_mapping(NULL, hwirq); 390 disable_irq(virq); 391 } 392 } 393 } 394 395 static void enable_afu_irqs(struct cxl_context *ctx) 396 { 397 irq_hw_number_t hwirq; 398 unsigned int virq; 399 int r, i; 400 401 pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); 402 for (r = 0; r < CXL_IRQ_RANGES; r++) { 403 hwirq = ctx->irqs.offset[r]; 404 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 405 virq = irq_find_mapping(NULL, hwirq); 406 enable_irq(virq); 407 } 408 } 409 } 410 411 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, 412 u64 offset, u64 *val) 413 { 414 unsigned long cr; 415 char c; 416 int rc = 0; 417 418 if (afu->crs_len < sz) 419 return -ENOENT; 420 421 if (unlikely(offset >= afu->crs_len)) 422 return -ERANGE; 423 424 cr = get_zeroed_page(GFP_KERNEL); 425 if (!cr) 426 return -ENOMEM; 427 428 rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, 429 virt_to_phys((void *)cr), sz); 430 if (rc) 431 goto err; 432 433 switch (sz) { 434 case 1: 435 c = *((char *) cr); 436 *val = c; 437 break; 438 case 2: 439 *val = in_le16((u16 *)cr); 440 break; 441 case 4: 442 *val = in_le32((unsigned *)cr); 443 break; 444 case 8: 445 *val = in_le64((u64 *)cr); 446 break; 447 default: 448 WARN_ON(1); 449 } 450 err: 451 free_page(cr); 452 return rc; 453 } 454 455 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, 456 u32 *out) 457 { 458 int rc; 459 u64 val; 460 461 rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); 462 if (!rc) 463 *out = (u32) val; 464 return rc; 465 } 466 467 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, 468 u16 *out) 469 { 470 int rc; 471 u64 val; 472 473 rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); 474 if (!rc) 475 *out = (u16) val; 476 return rc; 477 } 478 479 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, 480 u8 *out) 481 { 482 int rc; 483 u64 val; 484 485 rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); 486 if (!rc) 487 *out = (u8) val; 488 return rc; 489 } 490 491 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, 492 u64 *out) 493 { 494 return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); 495 } 496 497 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) 498 { 499 /* config record is not writable from guest */ 500 return -EPERM; 501 } 502 503 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) 504 { 505 /* config record is not writable from guest */ 506 return -EPERM; 507 } 508 509 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) 510 { 511 /* config record is not writable from guest */ 512 return -EPERM; 513 } 514 515 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) 516 { 517 struct cxl_process_element_hcall *elem; 518 struct cxl *adapter = ctx->afu->adapter; 519 const struct cred *cred; 520 u32 pid, idx; 521 int rc, r, i; 522 u64 mmio_addr, mmio_size; 523 __be64 flags = 0; 524 525 /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */ 526 if (!(elem = (struct cxl_process_element_hcall *) 527 get_zeroed_page(GFP_KERNEL))) 528 return -ENOMEM; 529 530 elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION); 531 if (ctx->kernel) { 532 pid = 0; 533 flags |= CXL_PE_TRANSLATION_ENABLED; 534 flags |= CXL_PE_PRIVILEGED_PROCESS; 535 if (mfmsr() & MSR_SF) 536 flags |= CXL_PE_64_BIT; 537 } else { 538 pid = current->pid; 539 flags |= CXL_PE_PROBLEM_STATE; 540 flags |= CXL_PE_TRANSLATION_ENABLED; 541 if (!test_tsk_thread_flag(current, TIF_32BIT)) 542 flags |= CXL_PE_64_BIT; 543 cred = get_current_cred(); 544 if (uid_eq(cred->euid, GLOBAL_ROOT_UID)) 545 flags |= CXL_PE_PRIVILEGED_PROCESS; 546 put_cred(cred); 547 } 548 elem->flags = cpu_to_be64(flags); 549 elem->common.tid = cpu_to_be32(0); /* Unused */ 550 elem->common.pid = cpu_to_be32(pid); 551 elem->common.csrp = cpu_to_be64(0); /* disable */ 552 elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */ 553 elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */ 554 555 cxl_prefault(ctx, wed); 556 557 elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); 558 elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); 559 560 /* 561 * Ensure we have at least one interrupt allocated to take faults for 562 * kernel contexts that may not have allocated any AFU IRQs at all: 563 */ 564 if (ctx->irqs.range[0] == 0) { 565 rc = afu_register_irqs(ctx, 0); 566 if (rc) 567 goto out_free; 568 } 569 570 for (r = 0; r < CXL_IRQ_RANGES; r++) { 571 for (i = 0; i < ctx->irqs.range[r]; i++) { 572 if (r == 0 && i == 0) { 573 elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]); 574 } else { 575 idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset; 576 elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8); 577 } 578 } 579 } 580 elem->common.amr = cpu_to_be64(amr); 581 elem->common.wed = cpu_to_be64(wed); 582 583 disable_afu_irqs(ctx); 584 585 rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, 586 &ctx->process_token, &mmio_addr, &mmio_size); 587 if (rc == H_SUCCESS) { 588 if (ctx->master || !ctx->afu->pp_psa) { 589 ctx->psn_phys = ctx->afu->psn_phys; 590 ctx->psn_size = ctx->afu->adapter->ps_size; 591 } else { 592 ctx->psn_phys = mmio_addr; 593 ctx->psn_size = mmio_size; 594 } 595 if (ctx->afu->pp_psa && mmio_size && 596 ctx->afu->pp_size == 0) { 597 /* 598 * There's no property in the device tree to read the 599 * pp_size. We only find out at the 1st attach. 600 * Compared to bare-metal, it is too late and we 601 * should really lock here. However, on powerVM, 602 * pp_size is really only used to display in /sys. 603 * Being discussed with pHyp for their next release. 604 */ 605 ctx->afu->pp_size = mmio_size; 606 } 607 /* from PAPR: process element is bytes 4-7 of process token */ 608 ctx->external_pe = ctx->process_token & 0xFFFFFFFF; 609 pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx", 610 ctx->pe, ctx->external_pe, ctx->psn_size); 611 ctx->pe_inserted = true; 612 enable_afu_irqs(ctx); 613 } 614 615 out_free: 616 free_page((u64)elem); 617 return rc; 618 } 619 620 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) 621 { 622 pr_devel("in %s\n", __func__); 623 624 ctx->kernel = kernel; 625 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 626 return attach_afu_directed(ctx, wed, amr); 627 628 /* dedicated mode not supported on FW840 */ 629 630 return -EINVAL; 631 } 632 633 static int detach_afu_directed(struct cxl_context *ctx) 634 { 635 if (!ctx->pe_inserted) 636 return 0; 637 if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) 638 return -1; 639 return 0; 640 } 641 642 static int guest_detach_process(struct cxl_context *ctx) 643 { 644 pr_devel("in %s\n", __func__); 645 trace_cxl_detach(ctx); 646 647 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) 648 return -EIO; 649 650 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 651 return detach_afu_directed(ctx); 652 653 return -EINVAL; 654 } 655 656 static void guest_release_afu(struct device *dev) 657 { 658 struct cxl_afu *afu = to_cxl_afu(dev); 659 660 pr_devel("%s\n", __func__); 661 662 idr_destroy(&afu->contexts_idr); 663 664 kfree(afu->guest); 665 kfree(afu); 666 } 667 668 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) 669 { 670 return guest_collect_vpd(NULL, afu, buf, len); 671 } 672 673 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE 674 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, 675 loff_t off, size_t count) 676 { 677 void *tbuf = NULL; 678 int rc = 0; 679 680 tbuf = (void *) get_zeroed_page(GFP_KERNEL); 681 if (!tbuf) 682 return -ENOMEM; 683 684 rc = cxl_h_get_afu_err(afu->guest->handle, 685 off & 0x7, 686 virt_to_phys(tbuf), 687 count); 688 if (rc) 689 goto err; 690 691 if (count > ERR_BUFF_MAX_COPY_SIZE) 692 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); 693 memcpy(buf, tbuf, count); 694 err: 695 free_page((u64)tbuf); 696 697 return rc; 698 } 699 700 static int guest_afu_check_and_enable(struct cxl_afu *afu) 701 { 702 return 0; 703 } 704 705 static bool guest_support_attributes(const char *attr_name, 706 enum cxl_attrs type) 707 { 708 switch (type) { 709 case CXL_ADAPTER_ATTRS: 710 if ((strcmp(attr_name, "base_image") == 0) || 711 (strcmp(attr_name, "load_image_on_perst") == 0) || 712 (strcmp(attr_name, "perst_reloads_same_image") == 0) || 713 (strcmp(attr_name, "image_loaded") == 0)) 714 return false; 715 break; 716 case CXL_AFU_MASTER_ATTRS: 717 if ((strcmp(attr_name, "pp_mmio_off") == 0)) 718 return false; 719 break; 720 case CXL_AFU_ATTRS: 721 break; 722 default: 723 break; 724 } 725 726 return true; 727 } 728 729 static int activate_afu_directed(struct cxl_afu *afu) 730 { 731 int rc; 732 733 dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); 734 735 afu->current_mode = CXL_MODE_DIRECTED; 736 737 afu->num_procs = afu->max_procs_virtualised; 738 739 if ((rc = cxl_chardev_m_afu_add(afu))) 740 return rc; 741 742 if ((rc = cxl_sysfs_afu_m_add(afu))) 743 goto err; 744 745 if ((rc = cxl_chardev_s_afu_add(afu))) 746 goto err1; 747 748 return 0; 749 err1: 750 cxl_sysfs_afu_m_remove(afu); 751 err: 752 cxl_chardev_afu_remove(afu); 753 return rc; 754 } 755 756 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) 757 { 758 if (!mode) 759 return 0; 760 if (!(mode & afu->modes_supported)) 761 return -EINVAL; 762 763 if (mode == CXL_MODE_DIRECTED) 764 return activate_afu_directed(afu); 765 766 if (mode == CXL_MODE_DEDICATED) 767 dev_err(&afu->dev, "Dedicated mode not supported\n"); 768 769 return -EINVAL; 770 } 771 772 static int deactivate_afu_directed(struct cxl_afu *afu) 773 { 774 dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); 775 776 afu->current_mode = 0; 777 afu->num_procs = 0; 778 779 cxl_sysfs_afu_m_remove(afu); 780 cxl_chardev_afu_remove(afu); 781 782 cxl_ops->afu_reset(afu); 783 784 return 0; 785 } 786 787 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) 788 { 789 if (!mode) 790 return 0; 791 if (!(mode & afu->modes_supported)) 792 return -EINVAL; 793 794 if (mode == CXL_MODE_DIRECTED) 795 return deactivate_afu_directed(afu); 796 return 0; 797 } 798 799 static int guest_afu_reset(struct cxl_afu *afu) 800 { 801 pr_devel("AFU(%d) reset request\n", afu->slice); 802 return cxl_h_reset_afu(afu->guest->handle); 803 } 804 805 static int guest_map_slice_regs(struct cxl_afu *afu) 806 { 807 if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { 808 dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", 809 afu->slice); 810 return -ENOMEM; 811 } 812 return 0; 813 } 814 815 static void guest_unmap_slice_regs(struct cxl_afu *afu) 816 { 817 if (afu->p2n_mmio) 818 iounmap(afu->p2n_mmio); 819 } 820 821 static int afu_update_state(struct cxl_afu *afu) 822 { 823 int rc, cur_state; 824 825 rc = afu_read_error_state(afu, &cur_state); 826 if (rc) 827 return rc; 828 829 if (afu->guest->previous_state == cur_state) 830 return 0; 831 832 pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); 833 834 switch (cur_state) { 835 case H_STATE_NORMAL: 836 afu->guest->previous_state = cur_state; 837 break; 838 839 case H_STATE_DISABLE: 840 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 841 pci_channel_io_frozen); 842 843 cxl_context_detach_all(afu); 844 if ((rc = cxl_ops->afu_reset(afu))) 845 pr_devel("reset hcall failed %d\n", rc); 846 847 rc = afu_read_error_state(afu, &cur_state); 848 if (!rc && cur_state == H_STATE_NORMAL) { 849 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, 850 pci_channel_io_normal); 851 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); 852 } 853 afu->guest->previous_state = 0; 854 break; 855 856 case H_STATE_TEMP_UNAVAILABLE: 857 afu->guest->previous_state = cur_state; 858 break; 859 860 case H_STATE_PERM_UNAVAILABLE: 861 dev_err(&afu->dev, "AFU is in permanent error state\n"); 862 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 863 pci_channel_io_perm_failure); 864 afu->guest->previous_state = cur_state; 865 break; 866 867 default: 868 pr_err("Unexpected AFU(%d) error state: %#x\n", 869 afu->slice, cur_state); 870 return -EINVAL; 871 } 872 873 return rc; 874 } 875 876 static void afu_handle_errstate(struct work_struct *work) 877 { 878 struct cxl_afu_guest *afu_guest = 879 container_of(to_delayed_work(work), struct cxl_afu_guest, work_err); 880 881 if (!afu_update_state(afu_guest->parent) && 882 afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE) 883 return; 884 885 if (afu_guest->handle_err) 886 schedule_delayed_work(&afu_guest->work_err, 887 msecs_to_jiffies(3000)); 888 } 889 890 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) 891 { 892 int state; 893 894 if (afu && (!afu_read_error_state(afu, &state))) { 895 if (state == H_STATE_NORMAL) 896 return true; 897 } 898 899 return false; 900 } 901 902 static int afu_properties_look_ok(struct cxl_afu *afu) 903 { 904 if (afu->pp_irqs < 0) { 905 dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); 906 return -EINVAL; 907 } 908 909 if (afu->max_procs_virtualised < 1) { 910 dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); 911 return -EINVAL; 912 } 913 914 return 0; 915 } 916 917 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np) 918 { 919 struct cxl_afu *afu; 920 bool free = true; 921 int rc; 922 923 pr_devel("in %s - AFU(%d)\n", __func__, slice); 924 if (!(afu = cxl_alloc_afu(adapter, slice))) 925 return -ENOMEM; 926 927 if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { 928 kfree(afu); 929 return -ENOMEM; 930 } 931 932 if ((rc = dev_set_name(&afu->dev, "afu%i.%i", 933 adapter->adapter_num, 934 slice))) 935 goto err1; 936 937 adapter->slices++; 938 939 if ((rc = cxl_of_read_afu_handle(afu, afu_np))) 940 goto err1; 941 942 if ((rc = cxl_ops->afu_reset(afu))) 943 goto err1; 944 945 if ((rc = cxl_of_read_afu_properties(afu, afu_np))) 946 goto err1; 947 948 if ((rc = afu_properties_look_ok(afu))) 949 goto err1; 950 951 if ((rc = guest_map_slice_regs(afu))) 952 goto err1; 953 954 if ((rc = guest_register_serr_irq(afu))) 955 goto err2; 956 957 /* 958 * After we call this function we must not free the afu directly, even 959 * if it returns an error! 960 */ 961 if ((rc = cxl_register_afu(afu))) 962 goto err_put1; 963 964 if ((rc = cxl_sysfs_afu_add(afu))) 965 goto err_put1; 966 967 /* 968 * pHyp doesn't expose the programming models supported by the 969 * AFU. pHyp currently only supports directed mode. If it adds 970 * dedicated mode later, this version of cxl has no way to 971 * detect it. So we'll initialize the driver, but the first 972 * attach will fail. 973 * Being discussed with pHyp to do better (likely new property) 974 */ 975 if (afu->max_procs_virtualised == 1) 976 afu->modes_supported = CXL_MODE_DEDICATED; 977 else 978 afu->modes_supported = CXL_MODE_DIRECTED; 979 980 if ((rc = cxl_afu_select_best_mode(afu))) 981 goto err_put2; 982 983 adapter->afu[afu->slice] = afu; 984 985 afu->enabled = true; 986 987 /* 988 * wake up the cpu periodically to check the state 989 * of the AFU using "afu" stored in the guest structure. 990 */ 991 afu->guest->parent = afu; 992 afu->guest->handle_err = true; 993 INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate); 994 schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000)); 995 996 if ((rc = cxl_pci_vphb_add(afu))) 997 dev_info(&afu->dev, "Can't register vPHB\n"); 998 999 return 0; 1000 1001 err_put2: 1002 cxl_sysfs_afu_remove(afu); 1003 err_put1: 1004 device_unregister(&afu->dev); 1005 free = false; 1006 guest_release_serr_irq(afu); 1007 err2: 1008 guest_unmap_slice_regs(afu); 1009 err1: 1010 if (free) { 1011 kfree(afu->guest); 1012 kfree(afu); 1013 } 1014 return rc; 1015 } 1016 1017 void cxl_guest_remove_afu(struct cxl_afu *afu) 1018 { 1019 if (!afu) 1020 return; 1021 1022 /* flush and stop pending job */ 1023 afu->guest->handle_err = false; 1024 flush_delayed_work(&afu->guest->work_err); 1025 1026 cxl_pci_vphb_remove(afu); 1027 cxl_sysfs_afu_remove(afu); 1028 1029 spin_lock(&afu->adapter->afu_list_lock); 1030 afu->adapter->afu[afu->slice] = NULL; 1031 spin_unlock(&afu->adapter->afu_list_lock); 1032 1033 cxl_context_detach_all(afu); 1034 cxl_ops->afu_deactivate_mode(afu, afu->current_mode); 1035 guest_release_serr_irq(afu); 1036 guest_unmap_slice_regs(afu); 1037 1038 device_unregister(&afu->dev); 1039 } 1040 1041 static void free_adapter(struct cxl *adapter) 1042 { 1043 struct irq_avail *cur; 1044 int i; 1045 1046 if (adapter->guest) { 1047 if (adapter->guest->irq_avail) { 1048 for (i = 0; i < adapter->guest->irq_nranges; i++) { 1049 cur = &adapter->guest->irq_avail[i]; 1050 kfree(cur->bitmap); 1051 } 1052 kfree(adapter->guest->irq_avail); 1053 } 1054 kfree(adapter->guest->status); 1055 kfree(adapter->guest); 1056 } 1057 cxl_remove_adapter_nr(adapter); 1058 kfree(adapter); 1059 } 1060 1061 static int properties_look_ok(struct cxl *adapter) 1062 { 1063 /* The absence of this property means that the operational 1064 * status is unknown or okay 1065 */ 1066 if (strlen(adapter->guest->status) && 1067 strcmp(adapter->guest->status, "okay")) { 1068 pr_err("ABORTING:Bad operational status of the device\n"); 1069 return -EINVAL; 1070 } 1071 1072 return 0; 1073 } 1074 1075 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) 1076 { 1077 return guest_collect_vpd(adapter, NULL, buf, len); 1078 } 1079 1080 void cxl_guest_remove_adapter(struct cxl *adapter) 1081 { 1082 pr_devel("in %s\n", __func__); 1083 1084 cxl_sysfs_adapter_remove(adapter); 1085 1086 cxl_guest_remove_chardev(adapter); 1087 device_unregister(&adapter->dev); 1088 } 1089 1090 static void release_adapter(struct device *dev) 1091 { 1092 free_adapter(to_cxl_adapter(dev)); 1093 } 1094 1095 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev) 1096 { 1097 struct cxl *adapter; 1098 bool free = true; 1099 int rc; 1100 1101 if (!(adapter = cxl_alloc_adapter())) 1102 return ERR_PTR(-ENOMEM); 1103 1104 if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) { 1105 free_adapter(adapter); 1106 return ERR_PTR(-ENOMEM); 1107 } 1108 1109 adapter->slices = 0; 1110 adapter->guest->pdev = pdev; 1111 adapter->dev.parent = &pdev->dev; 1112 adapter->dev.release = release_adapter; 1113 dev_set_drvdata(&pdev->dev, adapter); 1114 1115 /* 1116 * Hypervisor controls PSL timebase initialization (p1 register). 1117 * On FW840, PSL is initialized. 1118 */ 1119 adapter->psl_timebase_synced = true; 1120 1121 if ((rc = cxl_of_read_adapter_handle(adapter, np))) 1122 goto err1; 1123 1124 if ((rc = cxl_of_read_adapter_properties(adapter, np))) 1125 goto err1; 1126 1127 if ((rc = properties_look_ok(adapter))) 1128 goto err1; 1129 1130 if ((rc = cxl_guest_add_chardev(adapter))) 1131 goto err1; 1132 1133 /* 1134 * After we call this function we must not free the adapter directly, 1135 * even if it returns an error! 1136 */ 1137 if ((rc = cxl_register_adapter(adapter))) 1138 goto err_put1; 1139 1140 if ((rc = cxl_sysfs_adapter_add(adapter))) 1141 goto err_put1; 1142 1143 /* release the context lock as the adapter is configured */ 1144 cxl_adapter_context_unlock(adapter); 1145 1146 return adapter; 1147 1148 err_put1: 1149 device_unregister(&adapter->dev); 1150 free = false; 1151 cxl_guest_remove_chardev(adapter); 1152 err1: 1153 if (free) 1154 free_adapter(adapter); 1155 return ERR_PTR(rc); 1156 } 1157 1158 void cxl_guest_reload_module(struct cxl *adapter) 1159 { 1160 struct platform_device *pdev; 1161 1162 pdev = adapter->guest->pdev; 1163 cxl_guest_remove_adapter(adapter); 1164 1165 cxl_of_probe(pdev); 1166 } 1167 1168 const struct cxl_backend_ops cxl_guest_ops = { 1169 .module = THIS_MODULE, 1170 .adapter_reset = guest_reset, 1171 .alloc_one_irq = guest_alloc_one_irq, 1172 .release_one_irq = guest_release_one_irq, 1173 .alloc_irq_ranges = guest_alloc_irq_ranges, 1174 .release_irq_ranges = guest_release_irq_ranges, 1175 .setup_irq = NULL, 1176 .handle_psl_slice_error = guest_handle_psl_slice_error, 1177 .psl_interrupt = guest_psl_irq, 1178 .ack_irq = guest_ack_irq, 1179 .attach_process = guest_attach_process, 1180 .detach_process = guest_detach_process, 1181 .update_ivtes = NULL, 1182 .support_attributes = guest_support_attributes, 1183 .link_ok = guest_link_ok, 1184 .release_afu = guest_release_afu, 1185 .afu_read_err_buffer = guest_afu_read_err_buffer, 1186 .afu_check_and_enable = guest_afu_check_and_enable, 1187 .afu_activate_mode = guest_afu_activate_mode, 1188 .afu_deactivate_mode = guest_afu_deactivate_mode, 1189 .afu_reset = guest_afu_reset, 1190 .afu_cr_read8 = guest_afu_cr_read8, 1191 .afu_cr_read16 = guest_afu_cr_read16, 1192 .afu_cr_read32 = guest_afu_cr_read32, 1193 .afu_cr_read64 = guest_afu_cr_read64, 1194 .afu_cr_write8 = guest_afu_cr_write8, 1195 .afu_cr_write16 = guest_afu_cr_write16, 1196 .afu_cr_write32 = guest_afu_cr_write32, 1197 .read_adapter_vpd = cxl_guest_read_adapter_vpd, 1198 }; 1199