1 /* 2 * Copyright 2015 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/uaccess.h> 12 #include <linux/delay.h> 13 14 #include "cxl.h" 15 #include "hcalls.h" 16 #include "trace.h" 17 18 #define CXL_ERROR_DETECTED_EVENT 1 19 #define CXL_SLOT_RESET_EVENT 2 20 #define CXL_RESUME_EVENT 3 21 22 static void pci_error_handlers(struct cxl_afu *afu, 23 int bus_error_event, 24 pci_channel_state_t state) 25 { 26 struct pci_dev *afu_dev; 27 28 if (afu->phb == NULL) 29 return; 30 31 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { 32 if (!afu_dev->driver) 33 continue; 34 35 switch (bus_error_event) { 36 case CXL_ERROR_DETECTED_EVENT: 37 afu_dev->error_state = state; 38 39 if (afu_dev->driver->err_handler && 40 afu_dev->driver->err_handler->error_detected) 41 afu_dev->driver->err_handler->error_detected(afu_dev, state); 42 break; 43 case CXL_SLOT_RESET_EVENT: 44 afu_dev->error_state = state; 45 46 if (afu_dev->driver->err_handler && 47 afu_dev->driver->err_handler->slot_reset) 48 afu_dev->driver->err_handler->slot_reset(afu_dev); 49 break; 50 case CXL_RESUME_EVENT: 51 if (afu_dev->driver->err_handler && 52 afu_dev->driver->err_handler->resume) 53 afu_dev->driver->err_handler->resume(afu_dev); 54 break; 55 } 56 } 57 } 58 59 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, 60 u64 errstat) 61 { 62 pr_devel("in %s\n", __func__); 63 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); 64 65 return cxl_ops->ack_irq(ctx, 0, errstat); 66 } 67 68 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, 69 void *buf, size_t len) 70 { 71 unsigned int entries, mod; 72 unsigned long **vpd_buf = NULL; 73 struct sg_list *le; 74 int rc = 0, i, tocopy; 75 u64 out = 0; 76 77 if (buf == NULL) 78 return -EINVAL; 79 80 /* number of entries in the list */ 81 entries = len / SG_BUFFER_SIZE; 82 mod = len % SG_BUFFER_SIZE; 83 if (mod) 84 entries++; 85 86 if (entries > SG_MAX_ENTRIES) { 87 entries = SG_MAX_ENTRIES; 88 len = SG_MAX_ENTRIES * SG_BUFFER_SIZE; 89 mod = 0; 90 } 91 92 vpd_buf = kzalloc(entries * sizeof(unsigned long *), GFP_KERNEL); 93 if (!vpd_buf) 94 return -ENOMEM; 95 96 le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); 97 if (!le) { 98 rc = -ENOMEM; 99 goto err1; 100 } 101 102 for (i = 0; i < entries; i++) { 103 vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); 104 if (!vpd_buf[i]) { 105 rc = -ENOMEM; 106 goto err2; 107 } 108 le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i])); 109 le[i].len = cpu_to_be64(SG_BUFFER_SIZE); 110 if ((i == (entries - 1)) && mod) 111 le[i].len = cpu_to_be64(mod); 112 } 113 114 if (adapter) 115 rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, 116 virt_to_phys(le), entries, &out); 117 else 118 rc = cxl_h_collect_vpd(afu->guest->handle, 0, 119 virt_to_phys(le), entries, &out); 120 pr_devel("length of available (entries: %i), vpd: %#llx\n", 121 entries, out); 122 123 if (!rc) { 124 /* 125 * hcall returns in 'out' the size of available VPDs. 126 * It fills the buffer with as much data as possible. 127 */ 128 if (out < len) 129 len = out; 130 rc = len; 131 if (out) { 132 for (i = 0; i < entries; i++) { 133 if (len < SG_BUFFER_SIZE) 134 tocopy = len; 135 else 136 tocopy = SG_BUFFER_SIZE; 137 memcpy(buf, vpd_buf[i], tocopy); 138 buf += tocopy; 139 len -= tocopy; 140 } 141 } 142 } 143 err2: 144 for (i = 0; i < entries; i++) { 145 if (vpd_buf[i]) 146 free_page((unsigned long) vpd_buf[i]); 147 } 148 free_page((unsigned long) le); 149 err1: 150 kfree(vpd_buf); 151 return rc; 152 } 153 154 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info) 155 { 156 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); 157 } 158 159 static irqreturn_t guest_psl_irq(int irq, void *data) 160 { 161 struct cxl_context *ctx = data; 162 struct cxl_irq_info irq_info; 163 int rc; 164 165 pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq); 166 rc = guest_get_irq_info(ctx, &irq_info); 167 if (rc) { 168 WARN(1, "Unable to get IRQ info: %i\n", rc); 169 return IRQ_HANDLED; 170 } 171 172 rc = cxl_irq_psl8(irq, ctx, &irq_info); 173 return rc; 174 } 175 176 static int afu_read_error_state(struct cxl_afu *afu, int *state_out) 177 { 178 u64 state; 179 int rc = 0; 180 181 if (!afu) 182 return -EIO; 183 184 rc = cxl_h_read_error_state(afu->guest->handle, &state); 185 if (!rc) { 186 WARN_ON(state != H_STATE_NORMAL && 187 state != H_STATE_DISABLE && 188 state != H_STATE_TEMP_UNAVAILABLE && 189 state != H_STATE_PERM_UNAVAILABLE); 190 *state_out = state & 0xffffffff; 191 } 192 return rc; 193 } 194 195 static irqreturn_t guest_slice_irq_err(int irq, void *data) 196 { 197 struct cxl_afu *afu = data; 198 int rc; 199 u64 serr, afu_error, dsisr; 200 201 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); 202 if (rc) { 203 dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); 204 return IRQ_HANDLED; 205 } 206 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); 207 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 208 cxl_afu_decode_psl_serr(afu, serr); 209 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); 210 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); 211 212 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); 213 if (rc) 214 dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", 215 rc); 216 217 return IRQ_HANDLED; 218 } 219 220 221 static int irq_alloc_range(struct cxl *adapter, int len, int *irq) 222 { 223 int i, n; 224 struct irq_avail *cur; 225 226 for (i = 0; i < adapter->guest->irq_nranges; i++) { 227 cur = &adapter->guest->irq_avail[i]; 228 n = bitmap_find_next_zero_area(cur->bitmap, cur->range, 229 0, len, 0); 230 if (n < cur->range) { 231 bitmap_set(cur->bitmap, n, len); 232 *irq = cur->offset + n; 233 pr_devel("guest: allocate IRQs %#x->%#x\n", 234 *irq, *irq + len - 1); 235 236 return 0; 237 } 238 } 239 return -ENOSPC; 240 } 241 242 static int irq_free_range(struct cxl *adapter, int irq, int len) 243 { 244 int i, n; 245 struct irq_avail *cur; 246 247 if (len == 0) 248 return -ENOENT; 249 250 for (i = 0; i < adapter->guest->irq_nranges; i++) { 251 cur = &adapter->guest->irq_avail[i]; 252 if (irq >= cur->offset && 253 (irq + len) <= (cur->offset + cur->range)) { 254 n = irq - cur->offset; 255 bitmap_clear(cur->bitmap, n, len); 256 pr_devel("guest: release IRQs %#x->%#x\n", 257 irq, irq + len - 1); 258 return 0; 259 } 260 } 261 return -ENOENT; 262 } 263 264 static int guest_reset(struct cxl *adapter) 265 { 266 struct cxl_afu *afu = NULL; 267 int i, rc; 268 269 pr_devel("Adapter reset request\n"); 270 for (i = 0; i < adapter->slices; i++) { 271 if ((afu = adapter->afu[i])) { 272 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 273 pci_channel_io_frozen); 274 cxl_context_detach_all(afu); 275 } 276 } 277 278 rc = cxl_h_reset_adapter(adapter->guest->handle); 279 for (i = 0; i < adapter->slices; i++) { 280 if (!rc && (afu = adapter->afu[i])) { 281 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, 282 pci_channel_io_normal); 283 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); 284 } 285 } 286 return rc; 287 } 288 289 static int guest_alloc_one_irq(struct cxl *adapter) 290 { 291 int irq; 292 293 spin_lock(&adapter->guest->irq_alloc_lock); 294 if (irq_alloc_range(adapter, 1, &irq)) 295 irq = -ENOSPC; 296 spin_unlock(&adapter->guest->irq_alloc_lock); 297 return irq; 298 } 299 300 static void guest_release_one_irq(struct cxl *adapter, int irq) 301 { 302 spin_lock(&adapter->guest->irq_alloc_lock); 303 irq_free_range(adapter, irq, 1); 304 spin_unlock(&adapter->guest->irq_alloc_lock); 305 } 306 307 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs, 308 struct cxl *adapter, unsigned int num) 309 { 310 int i, try, irq; 311 312 memset(irqs, 0, sizeof(struct cxl_irq_ranges)); 313 314 spin_lock(&adapter->guest->irq_alloc_lock); 315 for (i = 0; i < CXL_IRQ_RANGES && num; i++) { 316 try = num; 317 while (try) { 318 if (irq_alloc_range(adapter, try, &irq) == 0) 319 break; 320 try /= 2; 321 } 322 if (!try) 323 goto error; 324 irqs->offset[i] = irq; 325 irqs->range[i] = try; 326 num -= try; 327 } 328 if (num) 329 goto error; 330 spin_unlock(&adapter->guest->irq_alloc_lock); 331 return 0; 332 333 error: 334 for (i = 0; i < CXL_IRQ_RANGES; i++) 335 irq_free_range(adapter, irqs->offset[i], irqs->range[i]); 336 spin_unlock(&adapter->guest->irq_alloc_lock); 337 return -ENOSPC; 338 } 339 340 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs, 341 struct cxl *adapter) 342 { 343 int i; 344 345 spin_lock(&adapter->guest->irq_alloc_lock); 346 for (i = 0; i < CXL_IRQ_RANGES; i++) 347 irq_free_range(adapter, irqs->offset[i], irqs->range[i]); 348 spin_unlock(&adapter->guest->irq_alloc_lock); 349 } 350 351 static int guest_register_serr_irq(struct cxl_afu *afu) 352 { 353 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 354 dev_name(&afu->dev)); 355 if (!afu->err_irq_name) 356 return -ENOMEM; 357 358 if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, 359 guest_slice_irq_err, afu, afu->err_irq_name))) { 360 kfree(afu->err_irq_name); 361 afu->err_irq_name = NULL; 362 return -ENOMEM; 363 } 364 365 return 0; 366 } 367 368 static void guest_release_serr_irq(struct cxl_afu *afu) 369 { 370 cxl_unmap_irq(afu->serr_virq, afu); 371 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 372 kfree(afu->err_irq_name); 373 } 374 375 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) 376 { 377 return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, 378 tfc >> 32, (psl_reset_mask != 0)); 379 } 380 381 static void disable_afu_irqs(struct cxl_context *ctx) 382 { 383 irq_hw_number_t hwirq; 384 unsigned int virq; 385 int r, i; 386 387 pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); 388 for (r = 0; r < CXL_IRQ_RANGES; r++) { 389 hwirq = ctx->irqs.offset[r]; 390 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 391 virq = irq_find_mapping(NULL, hwirq); 392 disable_irq(virq); 393 } 394 } 395 } 396 397 static void enable_afu_irqs(struct cxl_context *ctx) 398 { 399 irq_hw_number_t hwirq; 400 unsigned int virq; 401 int r, i; 402 403 pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); 404 for (r = 0; r < CXL_IRQ_RANGES; r++) { 405 hwirq = ctx->irqs.offset[r]; 406 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 407 virq = irq_find_mapping(NULL, hwirq); 408 enable_irq(virq); 409 } 410 } 411 } 412 413 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, 414 u64 offset, u64 *val) 415 { 416 unsigned long cr; 417 char c; 418 int rc = 0; 419 420 if (afu->crs_len < sz) 421 return -ENOENT; 422 423 if (unlikely(offset >= afu->crs_len)) 424 return -ERANGE; 425 426 cr = get_zeroed_page(GFP_KERNEL); 427 if (!cr) 428 return -ENOMEM; 429 430 rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, 431 virt_to_phys((void *)cr), sz); 432 if (rc) 433 goto err; 434 435 switch (sz) { 436 case 1: 437 c = *((char *) cr); 438 *val = c; 439 break; 440 case 2: 441 *val = in_le16((u16 *)cr); 442 break; 443 case 4: 444 *val = in_le32((unsigned *)cr); 445 break; 446 case 8: 447 *val = in_le64((u64 *)cr); 448 break; 449 default: 450 WARN_ON(1); 451 } 452 err: 453 free_page(cr); 454 return rc; 455 } 456 457 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, 458 u32 *out) 459 { 460 int rc; 461 u64 val; 462 463 rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); 464 if (!rc) 465 *out = (u32) val; 466 return rc; 467 } 468 469 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, 470 u16 *out) 471 { 472 int rc; 473 u64 val; 474 475 rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); 476 if (!rc) 477 *out = (u16) val; 478 return rc; 479 } 480 481 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, 482 u8 *out) 483 { 484 int rc; 485 u64 val; 486 487 rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); 488 if (!rc) 489 *out = (u8) val; 490 return rc; 491 } 492 493 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, 494 u64 *out) 495 { 496 return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); 497 } 498 499 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) 500 { 501 /* config record is not writable from guest */ 502 return -EPERM; 503 } 504 505 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) 506 { 507 /* config record is not writable from guest */ 508 return -EPERM; 509 } 510 511 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) 512 { 513 /* config record is not writable from guest */ 514 return -EPERM; 515 } 516 517 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) 518 { 519 struct cxl_process_element_hcall *elem; 520 struct cxl *adapter = ctx->afu->adapter; 521 const struct cred *cred; 522 u32 pid, idx; 523 int rc, r, i; 524 u64 mmio_addr, mmio_size; 525 __be64 flags = 0; 526 527 /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */ 528 if (!(elem = (struct cxl_process_element_hcall *) 529 get_zeroed_page(GFP_KERNEL))) 530 return -ENOMEM; 531 532 elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION); 533 if (ctx->kernel) { 534 pid = 0; 535 flags |= CXL_PE_TRANSLATION_ENABLED; 536 flags |= CXL_PE_PRIVILEGED_PROCESS; 537 if (mfmsr() & MSR_SF) 538 flags |= CXL_PE_64_BIT; 539 } else { 540 pid = current->pid; 541 flags |= CXL_PE_PROBLEM_STATE; 542 flags |= CXL_PE_TRANSLATION_ENABLED; 543 if (!test_tsk_thread_flag(current, TIF_32BIT)) 544 flags |= CXL_PE_64_BIT; 545 cred = get_current_cred(); 546 if (uid_eq(cred->euid, GLOBAL_ROOT_UID)) 547 flags |= CXL_PE_PRIVILEGED_PROCESS; 548 put_cred(cred); 549 } 550 elem->flags = cpu_to_be64(flags); 551 elem->common.tid = cpu_to_be32(0); /* Unused */ 552 elem->common.pid = cpu_to_be32(pid); 553 elem->common.csrp = cpu_to_be64(0); /* disable */ 554 elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */ 555 elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */ 556 557 cxl_prefault(ctx, wed); 558 559 elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); 560 elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); 561 562 /* 563 * Ensure we have at least one interrupt allocated to take faults for 564 * kernel contexts that may not have allocated any AFU IRQs at all: 565 */ 566 if (ctx->irqs.range[0] == 0) { 567 rc = afu_register_irqs(ctx, 0); 568 if (rc) 569 goto out_free; 570 } 571 572 for (r = 0; r < CXL_IRQ_RANGES; r++) { 573 for (i = 0; i < ctx->irqs.range[r]; i++) { 574 if (r == 0 && i == 0) { 575 elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]); 576 } else { 577 idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset; 578 elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8); 579 } 580 } 581 } 582 elem->common.amr = cpu_to_be64(amr); 583 elem->common.wed = cpu_to_be64(wed); 584 585 disable_afu_irqs(ctx); 586 587 rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, 588 &ctx->process_token, &mmio_addr, &mmio_size); 589 if (rc == H_SUCCESS) { 590 if (ctx->master || !ctx->afu->pp_psa) { 591 ctx->psn_phys = ctx->afu->psn_phys; 592 ctx->psn_size = ctx->afu->adapter->ps_size; 593 } else { 594 ctx->psn_phys = mmio_addr; 595 ctx->psn_size = mmio_size; 596 } 597 if (ctx->afu->pp_psa && mmio_size && 598 ctx->afu->pp_size == 0) { 599 /* 600 * There's no property in the device tree to read the 601 * pp_size. We only find out at the 1st attach. 602 * Compared to bare-metal, it is too late and we 603 * should really lock here. However, on powerVM, 604 * pp_size is really only used to display in /sys. 605 * Being discussed with pHyp for their next release. 606 */ 607 ctx->afu->pp_size = mmio_size; 608 } 609 /* from PAPR: process element is bytes 4-7 of process token */ 610 ctx->external_pe = ctx->process_token & 0xFFFFFFFF; 611 pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx", 612 ctx->pe, ctx->external_pe, ctx->psn_size); 613 ctx->pe_inserted = true; 614 enable_afu_irqs(ctx); 615 } 616 617 out_free: 618 free_page((u64)elem); 619 return rc; 620 } 621 622 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) 623 { 624 pr_devel("in %s\n", __func__); 625 626 if (ctx->real_mode) 627 return -EPERM; 628 629 ctx->kernel = kernel; 630 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 631 return attach_afu_directed(ctx, wed, amr); 632 633 /* dedicated mode not supported on FW840 */ 634 635 return -EINVAL; 636 } 637 638 static int detach_afu_directed(struct cxl_context *ctx) 639 { 640 if (!ctx->pe_inserted) 641 return 0; 642 if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) 643 return -1; 644 return 0; 645 } 646 647 static int guest_detach_process(struct cxl_context *ctx) 648 { 649 pr_devel("in %s\n", __func__); 650 trace_cxl_detach(ctx); 651 652 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) 653 return -EIO; 654 655 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 656 return detach_afu_directed(ctx); 657 658 return -EINVAL; 659 } 660 661 static void guest_release_afu(struct device *dev) 662 { 663 struct cxl_afu *afu = to_cxl_afu(dev); 664 665 pr_devel("%s\n", __func__); 666 667 idr_destroy(&afu->contexts_idr); 668 669 kfree(afu->guest); 670 kfree(afu); 671 } 672 673 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) 674 { 675 return guest_collect_vpd(NULL, afu, buf, len); 676 } 677 678 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE 679 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, 680 loff_t off, size_t count) 681 { 682 void *tbuf = NULL; 683 int rc = 0; 684 685 tbuf = (void *) get_zeroed_page(GFP_KERNEL); 686 if (!tbuf) 687 return -ENOMEM; 688 689 rc = cxl_h_get_afu_err(afu->guest->handle, 690 off & 0x7, 691 virt_to_phys(tbuf), 692 count); 693 if (rc) 694 goto err; 695 696 if (count > ERR_BUFF_MAX_COPY_SIZE) 697 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); 698 memcpy(buf, tbuf, count); 699 err: 700 free_page((u64)tbuf); 701 702 return rc; 703 } 704 705 static int guest_afu_check_and_enable(struct cxl_afu *afu) 706 { 707 return 0; 708 } 709 710 static bool guest_support_attributes(const char *attr_name, 711 enum cxl_attrs type) 712 { 713 switch (type) { 714 case CXL_ADAPTER_ATTRS: 715 if ((strcmp(attr_name, "base_image") == 0) || 716 (strcmp(attr_name, "load_image_on_perst") == 0) || 717 (strcmp(attr_name, "perst_reloads_same_image") == 0) || 718 (strcmp(attr_name, "image_loaded") == 0)) 719 return false; 720 break; 721 case CXL_AFU_MASTER_ATTRS: 722 if ((strcmp(attr_name, "pp_mmio_off") == 0)) 723 return false; 724 break; 725 case CXL_AFU_ATTRS: 726 break; 727 default: 728 break; 729 } 730 731 return true; 732 } 733 734 static int activate_afu_directed(struct cxl_afu *afu) 735 { 736 int rc; 737 738 dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); 739 740 afu->current_mode = CXL_MODE_DIRECTED; 741 742 afu->num_procs = afu->max_procs_virtualised; 743 744 if ((rc = cxl_chardev_m_afu_add(afu))) 745 return rc; 746 747 if ((rc = cxl_sysfs_afu_m_add(afu))) 748 goto err; 749 750 if ((rc = cxl_chardev_s_afu_add(afu))) 751 goto err1; 752 753 return 0; 754 err1: 755 cxl_sysfs_afu_m_remove(afu); 756 err: 757 cxl_chardev_afu_remove(afu); 758 return rc; 759 } 760 761 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) 762 { 763 if (!mode) 764 return 0; 765 if (!(mode & afu->modes_supported)) 766 return -EINVAL; 767 768 if (mode == CXL_MODE_DIRECTED) 769 return activate_afu_directed(afu); 770 771 if (mode == CXL_MODE_DEDICATED) 772 dev_err(&afu->dev, "Dedicated mode not supported\n"); 773 774 return -EINVAL; 775 } 776 777 static int deactivate_afu_directed(struct cxl_afu *afu) 778 { 779 dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); 780 781 afu->current_mode = 0; 782 afu->num_procs = 0; 783 784 cxl_sysfs_afu_m_remove(afu); 785 cxl_chardev_afu_remove(afu); 786 787 cxl_ops->afu_reset(afu); 788 789 return 0; 790 } 791 792 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) 793 { 794 if (!mode) 795 return 0; 796 if (!(mode & afu->modes_supported)) 797 return -EINVAL; 798 799 if (mode == CXL_MODE_DIRECTED) 800 return deactivate_afu_directed(afu); 801 return 0; 802 } 803 804 static int guest_afu_reset(struct cxl_afu *afu) 805 { 806 pr_devel("AFU(%d) reset request\n", afu->slice); 807 return cxl_h_reset_afu(afu->guest->handle); 808 } 809 810 static int guest_map_slice_regs(struct cxl_afu *afu) 811 { 812 if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { 813 dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", 814 afu->slice); 815 return -ENOMEM; 816 } 817 return 0; 818 } 819 820 static void guest_unmap_slice_regs(struct cxl_afu *afu) 821 { 822 if (afu->p2n_mmio) 823 iounmap(afu->p2n_mmio); 824 } 825 826 static int afu_update_state(struct cxl_afu *afu) 827 { 828 int rc, cur_state; 829 830 rc = afu_read_error_state(afu, &cur_state); 831 if (rc) 832 return rc; 833 834 if (afu->guest->previous_state == cur_state) 835 return 0; 836 837 pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); 838 839 switch (cur_state) { 840 case H_STATE_NORMAL: 841 afu->guest->previous_state = cur_state; 842 break; 843 844 case H_STATE_DISABLE: 845 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 846 pci_channel_io_frozen); 847 848 cxl_context_detach_all(afu); 849 if ((rc = cxl_ops->afu_reset(afu))) 850 pr_devel("reset hcall failed %d\n", rc); 851 852 rc = afu_read_error_state(afu, &cur_state); 853 if (!rc && cur_state == H_STATE_NORMAL) { 854 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, 855 pci_channel_io_normal); 856 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); 857 } 858 afu->guest->previous_state = 0; 859 break; 860 861 case H_STATE_TEMP_UNAVAILABLE: 862 afu->guest->previous_state = cur_state; 863 break; 864 865 case H_STATE_PERM_UNAVAILABLE: 866 dev_err(&afu->dev, "AFU is in permanent error state\n"); 867 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 868 pci_channel_io_perm_failure); 869 afu->guest->previous_state = cur_state; 870 break; 871 872 default: 873 pr_err("Unexpected AFU(%d) error state: %#x\n", 874 afu->slice, cur_state); 875 return -EINVAL; 876 } 877 878 return rc; 879 } 880 881 static void afu_handle_errstate(struct work_struct *work) 882 { 883 struct cxl_afu_guest *afu_guest = 884 container_of(to_delayed_work(work), struct cxl_afu_guest, work_err); 885 886 if (!afu_update_state(afu_guest->parent) && 887 afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE) 888 return; 889 890 if (afu_guest->handle_err) 891 schedule_delayed_work(&afu_guest->work_err, 892 msecs_to_jiffies(3000)); 893 } 894 895 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) 896 { 897 int state; 898 899 if (afu && (!afu_read_error_state(afu, &state))) { 900 if (state == H_STATE_NORMAL) 901 return true; 902 } 903 904 return false; 905 } 906 907 static int afu_properties_look_ok(struct cxl_afu *afu) 908 { 909 if (afu->pp_irqs < 0) { 910 dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); 911 return -EINVAL; 912 } 913 914 if (afu->max_procs_virtualised < 1) { 915 dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); 916 return -EINVAL; 917 } 918 919 if (afu->crs_len < 0) { 920 dev_err(&afu->dev, "Unexpected configuration record size value\n"); 921 return -EINVAL; 922 } 923 924 return 0; 925 } 926 927 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np) 928 { 929 struct cxl_afu *afu; 930 bool free = true; 931 int rc; 932 933 pr_devel("in %s - AFU(%d)\n", __func__, slice); 934 if (!(afu = cxl_alloc_afu(adapter, slice))) 935 return -ENOMEM; 936 937 if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { 938 kfree(afu); 939 return -ENOMEM; 940 } 941 942 if ((rc = dev_set_name(&afu->dev, "afu%i.%i", 943 adapter->adapter_num, 944 slice))) 945 goto err1; 946 947 adapter->slices++; 948 949 if ((rc = cxl_of_read_afu_handle(afu, afu_np))) 950 goto err1; 951 952 if ((rc = cxl_ops->afu_reset(afu))) 953 goto err1; 954 955 if ((rc = cxl_of_read_afu_properties(afu, afu_np))) 956 goto err1; 957 958 if ((rc = afu_properties_look_ok(afu))) 959 goto err1; 960 961 if ((rc = guest_map_slice_regs(afu))) 962 goto err1; 963 964 if ((rc = guest_register_serr_irq(afu))) 965 goto err2; 966 967 /* 968 * After we call this function we must not free the afu directly, even 969 * if it returns an error! 970 */ 971 if ((rc = cxl_register_afu(afu))) 972 goto err_put1; 973 974 if ((rc = cxl_sysfs_afu_add(afu))) 975 goto err_put1; 976 977 /* 978 * pHyp doesn't expose the programming models supported by the 979 * AFU. pHyp currently only supports directed mode. If it adds 980 * dedicated mode later, this version of cxl has no way to 981 * detect it. So we'll initialize the driver, but the first 982 * attach will fail. 983 * Being discussed with pHyp to do better (likely new property) 984 */ 985 if (afu->max_procs_virtualised == 1) 986 afu->modes_supported = CXL_MODE_DEDICATED; 987 else 988 afu->modes_supported = CXL_MODE_DIRECTED; 989 990 if ((rc = cxl_afu_select_best_mode(afu))) 991 goto err_put2; 992 993 adapter->afu[afu->slice] = afu; 994 995 afu->enabled = true; 996 997 /* 998 * wake up the cpu periodically to check the state 999 * of the AFU using "afu" stored in the guest structure. 1000 */ 1001 afu->guest->parent = afu; 1002 afu->guest->handle_err = true; 1003 INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate); 1004 schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000)); 1005 1006 if ((rc = cxl_pci_vphb_add(afu))) 1007 dev_info(&afu->dev, "Can't register vPHB\n"); 1008 1009 return 0; 1010 1011 err_put2: 1012 cxl_sysfs_afu_remove(afu); 1013 err_put1: 1014 device_unregister(&afu->dev); 1015 free = false; 1016 guest_release_serr_irq(afu); 1017 err2: 1018 guest_unmap_slice_regs(afu); 1019 err1: 1020 if (free) { 1021 kfree(afu->guest); 1022 kfree(afu); 1023 } 1024 return rc; 1025 } 1026 1027 void cxl_guest_remove_afu(struct cxl_afu *afu) 1028 { 1029 pr_devel("in %s - AFU(%d)\n", __func__, afu->slice); 1030 1031 if (!afu) 1032 return; 1033 1034 /* flush and stop pending job */ 1035 afu->guest->handle_err = false; 1036 flush_delayed_work(&afu->guest->work_err); 1037 1038 cxl_pci_vphb_remove(afu); 1039 cxl_sysfs_afu_remove(afu); 1040 1041 spin_lock(&afu->adapter->afu_list_lock); 1042 afu->adapter->afu[afu->slice] = NULL; 1043 spin_unlock(&afu->adapter->afu_list_lock); 1044 1045 cxl_context_detach_all(afu); 1046 cxl_ops->afu_deactivate_mode(afu, afu->current_mode); 1047 guest_release_serr_irq(afu); 1048 guest_unmap_slice_regs(afu); 1049 1050 device_unregister(&afu->dev); 1051 } 1052 1053 static void free_adapter(struct cxl *adapter) 1054 { 1055 struct irq_avail *cur; 1056 int i; 1057 1058 if (adapter->guest) { 1059 if (adapter->guest->irq_avail) { 1060 for (i = 0; i < adapter->guest->irq_nranges; i++) { 1061 cur = &adapter->guest->irq_avail[i]; 1062 kfree(cur->bitmap); 1063 } 1064 kfree(adapter->guest->irq_avail); 1065 } 1066 kfree(adapter->guest->status); 1067 kfree(adapter->guest); 1068 } 1069 cxl_remove_adapter_nr(adapter); 1070 kfree(adapter); 1071 } 1072 1073 static int properties_look_ok(struct cxl *adapter) 1074 { 1075 /* The absence of this property means that the operational 1076 * status is unknown or okay 1077 */ 1078 if (strlen(adapter->guest->status) && 1079 strcmp(adapter->guest->status, "okay")) { 1080 pr_err("ABORTING:Bad operational status of the device\n"); 1081 return -EINVAL; 1082 } 1083 1084 return 0; 1085 } 1086 1087 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) 1088 { 1089 return guest_collect_vpd(adapter, NULL, buf, len); 1090 } 1091 1092 void cxl_guest_remove_adapter(struct cxl *adapter) 1093 { 1094 pr_devel("in %s\n", __func__); 1095 1096 cxl_sysfs_adapter_remove(adapter); 1097 1098 cxl_guest_remove_chardev(adapter); 1099 device_unregister(&adapter->dev); 1100 } 1101 1102 static void release_adapter(struct device *dev) 1103 { 1104 free_adapter(to_cxl_adapter(dev)); 1105 } 1106 1107 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev) 1108 { 1109 struct cxl *adapter; 1110 bool free = true; 1111 int rc; 1112 1113 if (!(adapter = cxl_alloc_adapter())) 1114 return ERR_PTR(-ENOMEM); 1115 1116 if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) { 1117 free_adapter(adapter); 1118 return ERR_PTR(-ENOMEM); 1119 } 1120 1121 adapter->slices = 0; 1122 adapter->guest->pdev = pdev; 1123 adapter->dev.parent = &pdev->dev; 1124 adapter->dev.release = release_adapter; 1125 dev_set_drvdata(&pdev->dev, adapter); 1126 1127 /* 1128 * Hypervisor controls PSL timebase initialization (p1 register). 1129 * On FW840, PSL is initialized. 1130 */ 1131 adapter->psl_timebase_synced = true; 1132 1133 if ((rc = cxl_of_read_adapter_handle(adapter, np))) 1134 goto err1; 1135 1136 if ((rc = cxl_of_read_adapter_properties(adapter, np))) 1137 goto err1; 1138 1139 if ((rc = properties_look_ok(adapter))) 1140 goto err1; 1141 1142 if ((rc = cxl_guest_add_chardev(adapter))) 1143 goto err1; 1144 1145 /* 1146 * After we call this function we must not free the adapter directly, 1147 * even if it returns an error! 1148 */ 1149 if ((rc = cxl_register_adapter(adapter))) 1150 goto err_put1; 1151 1152 if ((rc = cxl_sysfs_adapter_add(adapter))) 1153 goto err_put1; 1154 1155 /* release the context lock as the adapter is configured */ 1156 cxl_adapter_context_unlock(adapter); 1157 1158 return adapter; 1159 1160 err_put1: 1161 device_unregister(&adapter->dev); 1162 free = false; 1163 cxl_guest_remove_chardev(adapter); 1164 err1: 1165 if (free) 1166 free_adapter(adapter); 1167 return ERR_PTR(rc); 1168 } 1169 1170 void cxl_guest_reload_module(struct cxl *adapter) 1171 { 1172 struct platform_device *pdev; 1173 1174 pdev = adapter->guest->pdev; 1175 cxl_guest_remove_adapter(adapter); 1176 1177 cxl_of_probe(pdev); 1178 } 1179 1180 const struct cxl_backend_ops cxl_guest_ops = { 1181 .module = THIS_MODULE, 1182 .adapter_reset = guest_reset, 1183 .alloc_one_irq = guest_alloc_one_irq, 1184 .release_one_irq = guest_release_one_irq, 1185 .alloc_irq_ranges = guest_alloc_irq_ranges, 1186 .release_irq_ranges = guest_release_irq_ranges, 1187 .setup_irq = NULL, 1188 .handle_psl_slice_error = guest_handle_psl_slice_error, 1189 .psl_interrupt = guest_psl_irq, 1190 .ack_irq = guest_ack_irq, 1191 .attach_process = guest_attach_process, 1192 .detach_process = guest_detach_process, 1193 .update_ivtes = NULL, 1194 .support_attributes = guest_support_attributes, 1195 .link_ok = guest_link_ok, 1196 .release_afu = guest_release_afu, 1197 .afu_read_err_buffer = guest_afu_read_err_buffer, 1198 .afu_check_and_enable = guest_afu_check_and_enable, 1199 .afu_activate_mode = guest_afu_activate_mode, 1200 .afu_deactivate_mode = guest_afu_deactivate_mode, 1201 .afu_reset = guest_afu_reset, 1202 .afu_cr_read8 = guest_afu_cr_read8, 1203 .afu_cr_read16 = guest_afu_cr_read16, 1204 .afu_cr_read32 = guest_afu_cr_read32, 1205 .afu_cr_read64 = guest_afu_cr_read64, 1206 .afu_cr_write8 = guest_afu_cr_write8, 1207 .afu_cr_write16 = guest_afu_cr_write16, 1208 .afu_cr_write32 = guest_afu_cr_write32, 1209 .read_adapter_vpd = cxl_guest_read_adapter_vpd, 1210 }; 1211