1 /* 2 * Copyright 2015 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/uaccess.h> 12 #include <linux/delay.h> 13 14 #include "cxl.h" 15 #include "hcalls.h" 16 #include "trace.h" 17 18 #define CXL_ERROR_DETECTED_EVENT 1 19 #define CXL_SLOT_RESET_EVENT 2 20 #define CXL_RESUME_EVENT 3 21 22 static void pci_error_handlers(struct cxl_afu *afu, 23 int bus_error_event, 24 pci_channel_state_t state) 25 { 26 struct pci_dev *afu_dev; 27 28 if (afu->phb == NULL) 29 return; 30 31 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { 32 if (!afu_dev->driver) 33 continue; 34 35 switch (bus_error_event) { 36 case CXL_ERROR_DETECTED_EVENT: 37 afu_dev->error_state = state; 38 39 if (afu_dev->driver->err_handler && 40 afu_dev->driver->err_handler->error_detected) 41 afu_dev->driver->err_handler->error_detected(afu_dev, state); 42 break; 43 case CXL_SLOT_RESET_EVENT: 44 afu_dev->error_state = state; 45 46 if (afu_dev->driver->err_handler && 47 afu_dev->driver->err_handler->slot_reset) 48 afu_dev->driver->err_handler->slot_reset(afu_dev); 49 break; 50 case CXL_RESUME_EVENT: 51 if (afu_dev->driver->err_handler && 52 afu_dev->driver->err_handler->resume) 53 afu_dev->driver->err_handler->resume(afu_dev); 54 break; 55 } 56 } 57 } 58 59 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, 60 u64 errstat) 61 { 62 pr_devel("in %s\n", __func__); 63 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); 64 65 return cxl_ops->ack_irq(ctx, 0, errstat); 66 } 67 68 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu, 69 void *buf, size_t len) 70 { 71 unsigned int entries, mod; 72 unsigned long **vpd_buf = NULL; 73 struct sg_list *le; 74 int rc = 0, i, tocopy; 75 u64 out = 0; 76 77 if (buf == NULL) 78 return -EINVAL; 79 80 /* number of entries in the list */ 81 entries = len / SG_BUFFER_SIZE; 82 mod = len % SG_BUFFER_SIZE; 83 if (mod) 84 entries++; 85 86 if (entries > SG_MAX_ENTRIES) { 87 entries = SG_MAX_ENTRIES; 88 len = SG_MAX_ENTRIES * SG_BUFFER_SIZE; 89 mod = 0; 90 } 91 92 vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL); 93 if (!vpd_buf) 94 return -ENOMEM; 95 96 le = (struct sg_list *)get_zeroed_page(GFP_KERNEL); 97 if (!le) { 98 rc = -ENOMEM; 99 goto err1; 100 } 101 102 for (i = 0; i < entries; i++) { 103 vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL); 104 if (!vpd_buf[i]) { 105 rc = -ENOMEM; 106 goto err2; 107 } 108 le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i])); 109 le[i].len = cpu_to_be64(SG_BUFFER_SIZE); 110 if ((i == (entries - 1)) && mod) 111 le[i].len = cpu_to_be64(mod); 112 } 113 114 if (adapter) 115 rc = cxl_h_collect_vpd_adapter(adapter->guest->handle, 116 virt_to_phys(le), entries, &out); 117 else 118 rc = cxl_h_collect_vpd(afu->guest->handle, 0, 119 virt_to_phys(le), entries, &out); 120 pr_devel("length of available (entries: %i), vpd: %#llx\n", 121 entries, out); 122 123 if (!rc) { 124 /* 125 * hcall returns in 'out' the size of available VPDs. 126 * It fills the buffer with as much data as possible. 127 */ 128 if (out < len) 129 len = out; 130 rc = len; 131 if (out) { 132 for (i = 0; i < entries; i++) { 133 if (len < SG_BUFFER_SIZE) 134 tocopy = len; 135 else 136 tocopy = SG_BUFFER_SIZE; 137 memcpy(buf, vpd_buf[i], tocopy); 138 buf += tocopy; 139 len -= tocopy; 140 } 141 } 142 } 143 err2: 144 for (i = 0; i < entries; i++) { 145 if (vpd_buf[i]) 146 free_page((unsigned long) vpd_buf[i]); 147 } 148 free_page((unsigned long) le); 149 err1: 150 kfree(vpd_buf); 151 return rc; 152 } 153 154 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info) 155 { 156 return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info); 157 } 158 159 static irqreturn_t guest_psl_irq(int irq, void *data) 160 { 161 struct cxl_context *ctx = data; 162 struct cxl_irq_info irq_info; 163 int rc; 164 165 pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq); 166 rc = guest_get_irq_info(ctx, &irq_info); 167 if (rc) { 168 WARN(1, "Unable to get IRQ info: %i\n", rc); 169 return IRQ_HANDLED; 170 } 171 172 rc = cxl_irq_psl8(irq, ctx, &irq_info); 173 return rc; 174 } 175 176 static int afu_read_error_state(struct cxl_afu *afu, int *state_out) 177 { 178 u64 state; 179 int rc = 0; 180 181 if (!afu) 182 return -EIO; 183 184 rc = cxl_h_read_error_state(afu->guest->handle, &state); 185 if (!rc) { 186 WARN_ON(state != H_STATE_NORMAL && 187 state != H_STATE_DISABLE && 188 state != H_STATE_TEMP_UNAVAILABLE && 189 state != H_STATE_PERM_UNAVAILABLE); 190 *state_out = state & 0xffffffff; 191 } 192 return rc; 193 } 194 195 static irqreturn_t guest_slice_irq_err(int irq, void *data) 196 { 197 struct cxl_afu *afu = data; 198 int rc; 199 u64 serr, afu_error, dsisr; 200 201 rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr); 202 if (rc) { 203 dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc); 204 return IRQ_HANDLED; 205 } 206 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); 207 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 208 cxl_afu_decode_psl_serr(afu, serr); 209 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); 210 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); 211 212 rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr); 213 if (rc) 214 dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n", 215 rc); 216 217 return IRQ_HANDLED; 218 } 219 220 221 static int irq_alloc_range(struct cxl *adapter, int len, int *irq) 222 { 223 int i, n; 224 struct irq_avail *cur; 225 226 for (i = 0; i < adapter->guest->irq_nranges; i++) { 227 cur = &adapter->guest->irq_avail[i]; 228 n = bitmap_find_next_zero_area(cur->bitmap, cur->range, 229 0, len, 0); 230 if (n < cur->range) { 231 bitmap_set(cur->bitmap, n, len); 232 *irq = cur->offset + n; 233 pr_devel("guest: allocate IRQs %#x->%#x\n", 234 *irq, *irq + len - 1); 235 236 return 0; 237 } 238 } 239 return -ENOSPC; 240 } 241 242 static int irq_free_range(struct cxl *adapter, int irq, int len) 243 { 244 int i, n; 245 struct irq_avail *cur; 246 247 if (len == 0) 248 return -ENOENT; 249 250 for (i = 0; i < adapter->guest->irq_nranges; i++) { 251 cur = &adapter->guest->irq_avail[i]; 252 if (irq >= cur->offset && 253 (irq + len) <= (cur->offset + cur->range)) { 254 n = irq - cur->offset; 255 bitmap_clear(cur->bitmap, n, len); 256 pr_devel("guest: release IRQs %#x->%#x\n", 257 irq, irq + len - 1); 258 return 0; 259 } 260 } 261 return -ENOENT; 262 } 263 264 static int guest_reset(struct cxl *adapter) 265 { 266 struct cxl_afu *afu = NULL; 267 int i, rc; 268 269 pr_devel("Adapter reset request\n"); 270 spin_lock(&adapter->afu_list_lock); 271 for (i = 0; i < adapter->slices; i++) { 272 if ((afu = adapter->afu[i])) { 273 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 274 pci_channel_io_frozen); 275 cxl_context_detach_all(afu); 276 } 277 } 278 279 rc = cxl_h_reset_adapter(adapter->guest->handle); 280 for (i = 0; i < adapter->slices; i++) { 281 if (!rc && (afu = adapter->afu[i])) { 282 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, 283 pci_channel_io_normal); 284 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); 285 } 286 } 287 spin_unlock(&adapter->afu_list_lock); 288 return rc; 289 } 290 291 static int guest_alloc_one_irq(struct cxl *adapter) 292 { 293 int irq; 294 295 spin_lock(&adapter->guest->irq_alloc_lock); 296 if (irq_alloc_range(adapter, 1, &irq)) 297 irq = -ENOSPC; 298 spin_unlock(&adapter->guest->irq_alloc_lock); 299 return irq; 300 } 301 302 static void guest_release_one_irq(struct cxl *adapter, int irq) 303 { 304 spin_lock(&adapter->guest->irq_alloc_lock); 305 irq_free_range(adapter, irq, 1); 306 spin_unlock(&adapter->guest->irq_alloc_lock); 307 } 308 309 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs, 310 struct cxl *adapter, unsigned int num) 311 { 312 int i, try, irq; 313 314 memset(irqs, 0, sizeof(struct cxl_irq_ranges)); 315 316 spin_lock(&adapter->guest->irq_alloc_lock); 317 for (i = 0; i < CXL_IRQ_RANGES && num; i++) { 318 try = num; 319 while (try) { 320 if (irq_alloc_range(adapter, try, &irq) == 0) 321 break; 322 try /= 2; 323 } 324 if (!try) 325 goto error; 326 irqs->offset[i] = irq; 327 irqs->range[i] = try; 328 num -= try; 329 } 330 if (num) 331 goto error; 332 spin_unlock(&adapter->guest->irq_alloc_lock); 333 return 0; 334 335 error: 336 for (i = 0; i < CXL_IRQ_RANGES; i++) 337 irq_free_range(adapter, irqs->offset[i], irqs->range[i]); 338 spin_unlock(&adapter->guest->irq_alloc_lock); 339 return -ENOSPC; 340 } 341 342 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs, 343 struct cxl *adapter) 344 { 345 int i; 346 347 spin_lock(&adapter->guest->irq_alloc_lock); 348 for (i = 0; i < CXL_IRQ_RANGES; i++) 349 irq_free_range(adapter, irqs->offset[i], irqs->range[i]); 350 spin_unlock(&adapter->guest->irq_alloc_lock); 351 } 352 353 static int guest_register_serr_irq(struct cxl_afu *afu) 354 { 355 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 356 dev_name(&afu->dev)); 357 if (!afu->err_irq_name) 358 return -ENOMEM; 359 360 if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq, 361 guest_slice_irq_err, afu, afu->err_irq_name))) { 362 kfree(afu->err_irq_name); 363 afu->err_irq_name = NULL; 364 return -ENOMEM; 365 } 366 367 return 0; 368 } 369 370 static void guest_release_serr_irq(struct cxl_afu *afu) 371 { 372 cxl_unmap_irq(afu->serr_virq, afu); 373 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 374 kfree(afu->err_irq_name); 375 } 376 377 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) 378 { 379 return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token, 380 tfc >> 32, (psl_reset_mask != 0)); 381 } 382 383 static void disable_afu_irqs(struct cxl_context *ctx) 384 { 385 irq_hw_number_t hwirq; 386 unsigned int virq; 387 int r, i; 388 389 pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice); 390 for (r = 0; r < CXL_IRQ_RANGES; r++) { 391 hwirq = ctx->irqs.offset[r]; 392 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 393 virq = irq_find_mapping(NULL, hwirq); 394 disable_irq(virq); 395 } 396 } 397 } 398 399 static void enable_afu_irqs(struct cxl_context *ctx) 400 { 401 irq_hw_number_t hwirq; 402 unsigned int virq; 403 int r, i; 404 405 pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice); 406 for (r = 0; r < CXL_IRQ_RANGES; r++) { 407 hwirq = ctx->irqs.offset[r]; 408 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 409 virq = irq_find_mapping(NULL, hwirq); 410 enable_irq(virq); 411 } 412 } 413 } 414 415 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx, 416 u64 offset, u64 *val) 417 { 418 unsigned long cr; 419 char c; 420 int rc = 0; 421 422 if (afu->crs_len < sz) 423 return -ENOENT; 424 425 if (unlikely(offset >= afu->crs_len)) 426 return -ERANGE; 427 428 cr = get_zeroed_page(GFP_KERNEL); 429 if (!cr) 430 return -ENOMEM; 431 432 rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset, 433 virt_to_phys((void *)cr), sz); 434 if (rc) 435 goto err; 436 437 switch (sz) { 438 case 1: 439 c = *((char *) cr); 440 *val = c; 441 break; 442 case 2: 443 *val = in_le16((u16 *)cr); 444 break; 445 case 4: 446 *val = in_le32((unsigned *)cr); 447 break; 448 case 8: 449 *val = in_le64((u64 *)cr); 450 break; 451 default: 452 WARN_ON(1); 453 } 454 err: 455 free_page(cr); 456 return rc; 457 } 458 459 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset, 460 u32 *out) 461 { 462 int rc; 463 u64 val; 464 465 rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val); 466 if (!rc) 467 *out = (u32) val; 468 return rc; 469 } 470 471 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset, 472 u16 *out) 473 { 474 int rc; 475 u64 val; 476 477 rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val); 478 if (!rc) 479 *out = (u16) val; 480 return rc; 481 } 482 483 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset, 484 u8 *out) 485 { 486 int rc; 487 u64 val; 488 489 rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val); 490 if (!rc) 491 *out = (u8) val; 492 return rc; 493 } 494 495 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset, 496 u64 *out) 497 { 498 return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out); 499 } 500 501 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) 502 { 503 /* config record is not writable from guest */ 504 return -EPERM; 505 } 506 507 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) 508 { 509 /* config record is not writable from guest */ 510 return -EPERM; 511 } 512 513 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) 514 { 515 /* config record is not writable from guest */ 516 return -EPERM; 517 } 518 519 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) 520 { 521 struct cxl_process_element_hcall *elem; 522 struct cxl *adapter = ctx->afu->adapter; 523 const struct cred *cred; 524 u32 pid, idx; 525 int rc, r, i; 526 u64 mmio_addr, mmio_size; 527 __be64 flags = 0; 528 529 /* Must be 8 byte aligned and cannot cross a 4096 byte boundary */ 530 if (!(elem = (struct cxl_process_element_hcall *) 531 get_zeroed_page(GFP_KERNEL))) 532 return -ENOMEM; 533 534 elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION); 535 if (ctx->kernel) { 536 pid = 0; 537 flags |= CXL_PE_TRANSLATION_ENABLED; 538 flags |= CXL_PE_PRIVILEGED_PROCESS; 539 if (mfmsr() & MSR_SF) 540 flags |= CXL_PE_64_BIT; 541 } else { 542 pid = current->pid; 543 flags |= CXL_PE_PROBLEM_STATE; 544 flags |= CXL_PE_TRANSLATION_ENABLED; 545 if (!test_tsk_thread_flag(current, TIF_32BIT)) 546 flags |= CXL_PE_64_BIT; 547 cred = get_current_cred(); 548 if (uid_eq(cred->euid, GLOBAL_ROOT_UID)) 549 flags |= CXL_PE_PRIVILEGED_PROCESS; 550 put_cred(cred); 551 } 552 elem->flags = cpu_to_be64(flags); 553 elem->common.tid = cpu_to_be32(0); /* Unused */ 554 elem->common.pid = cpu_to_be32(pid); 555 elem->common.csrp = cpu_to_be64(0); /* disable */ 556 elem->common.u.psl8.aurp0 = cpu_to_be64(0); /* disable */ 557 elem->common.u.psl8.aurp1 = cpu_to_be64(0); /* disable */ 558 559 cxl_prefault(ctx, wed); 560 561 elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); 562 elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); 563 564 /* 565 * Ensure we have at least one interrupt allocated to take faults for 566 * kernel contexts that may not have allocated any AFU IRQs at all: 567 */ 568 if (ctx->irqs.range[0] == 0) { 569 rc = afu_register_irqs(ctx, 0); 570 if (rc) 571 goto out_free; 572 } 573 574 for (r = 0; r < CXL_IRQ_RANGES; r++) { 575 for (i = 0; i < ctx->irqs.range[r]; i++) { 576 if (r == 0 && i == 0) { 577 elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]); 578 } else { 579 idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset; 580 elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8); 581 } 582 } 583 } 584 elem->common.amr = cpu_to_be64(amr); 585 elem->common.wed = cpu_to_be64(wed); 586 587 disable_afu_irqs(ctx); 588 589 rc = cxl_h_attach_process(ctx->afu->guest->handle, elem, 590 &ctx->process_token, &mmio_addr, &mmio_size); 591 if (rc == H_SUCCESS) { 592 if (ctx->master || !ctx->afu->pp_psa) { 593 ctx->psn_phys = ctx->afu->psn_phys; 594 ctx->psn_size = ctx->afu->adapter->ps_size; 595 } else { 596 ctx->psn_phys = mmio_addr; 597 ctx->psn_size = mmio_size; 598 } 599 if (ctx->afu->pp_psa && mmio_size && 600 ctx->afu->pp_size == 0) { 601 /* 602 * There's no property in the device tree to read the 603 * pp_size. We only find out at the 1st attach. 604 * Compared to bare-metal, it is too late and we 605 * should really lock here. However, on powerVM, 606 * pp_size is really only used to display in /sys. 607 * Being discussed with pHyp for their next release. 608 */ 609 ctx->afu->pp_size = mmio_size; 610 } 611 /* from PAPR: process element is bytes 4-7 of process token */ 612 ctx->external_pe = ctx->process_token & 0xFFFFFFFF; 613 pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx", 614 ctx->pe, ctx->external_pe, ctx->psn_size); 615 ctx->pe_inserted = true; 616 enable_afu_irqs(ctx); 617 } 618 619 out_free: 620 free_page((u64)elem); 621 return rc; 622 } 623 624 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) 625 { 626 pr_devel("in %s\n", __func__); 627 628 ctx->kernel = kernel; 629 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 630 return attach_afu_directed(ctx, wed, amr); 631 632 /* dedicated mode not supported on FW840 */ 633 634 return -EINVAL; 635 } 636 637 static int detach_afu_directed(struct cxl_context *ctx) 638 { 639 if (!ctx->pe_inserted) 640 return 0; 641 if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token)) 642 return -1; 643 return 0; 644 } 645 646 static int guest_detach_process(struct cxl_context *ctx) 647 { 648 pr_devel("in %s\n", __func__); 649 trace_cxl_detach(ctx); 650 651 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) 652 return -EIO; 653 654 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 655 return detach_afu_directed(ctx); 656 657 return -EINVAL; 658 } 659 660 static void guest_release_afu(struct device *dev) 661 { 662 struct cxl_afu *afu = to_cxl_afu(dev); 663 664 pr_devel("%s\n", __func__); 665 666 idr_destroy(&afu->contexts_idr); 667 668 kfree(afu->guest); 669 kfree(afu); 670 } 671 672 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len) 673 { 674 return guest_collect_vpd(NULL, afu, buf, len); 675 } 676 677 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE 678 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf, 679 loff_t off, size_t count) 680 { 681 void *tbuf = NULL; 682 int rc = 0; 683 684 tbuf = (void *) get_zeroed_page(GFP_KERNEL); 685 if (!tbuf) 686 return -ENOMEM; 687 688 rc = cxl_h_get_afu_err(afu->guest->handle, 689 off & 0x7, 690 virt_to_phys(tbuf), 691 count); 692 if (rc) 693 goto err; 694 695 if (count > ERR_BUFF_MAX_COPY_SIZE) 696 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); 697 memcpy(buf, tbuf, count); 698 err: 699 free_page((u64)tbuf); 700 701 return rc; 702 } 703 704 static int guest_afu_check_and_enable(struct cxl_afu *afu) 705 { 706 return 0; 707 } 708 709 static bool guest_support_attributes(const char *attr_name, 710 enum cxl_attrs type) 711 { 712 switch (type) { 713 case CXL_ADAPTER_ATTRS: 714 if ((strcmp(attr_name, "base_image") == 0) || 715 (strcmp(attr_name, "load_image_on_perst") == 0) || 716 (strcmp(attr_name, "perst_reloads_same_image") == 0) || 717 (strcmp(attr_name, "image_loaded") == 0)) 718 return false; 719 break; 720 case CXL_AFU_MASTER_ATTRS: 721 if ((strcmp(attr_name, "pp_mmio_off") == 0)) 722 return false; 723 break; 724 case CXL_AFU_ATTRS: 725 break; 726 default: 727 break; 728 } 729 730 return true; 731 } 732 733 static int activate_afu_directed(struct cxl_afu *afu) 734 { 735 int rc; 736 737 dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice); 738 739 afu->current_mode = CXL_MODE_DIRECTED; 740 741 afu->num_procs = afu->max_procs_virtualised; 742 743 if ((rc = cxl_chardev_m_afu_add(afu))) 744 return rc; 745 746 if ((rc = cxl_sysfs_afu_m_add(afu))) 747 goto err; 748 749 if ((rc = cxl_chardev_s_afu_add(afu))) 750 goto err1; 751 752 return 0; 753 err1: 754 cxl_sysfs_afu_m_remove(afu); 755 err: 756 cxl_chardev_afu_remove(afu); 757 return rc; 758 } 759 760 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode) 761 { 762 if (!mode) 763 return 0; 764 if (!(mode & afu->modes_supported)) 765 return -EINVAL; 766 767 if (mode == CXL_MODE_DIRECTED) 768 return activate_afu_directed(afu); 769 770 if (mode == CXL_MODE_DEDICATED) 771 dev_err(&afu->dev, "Dedicated mode not supported\n"); 772 773 return -EINVAL; 774 } 775 776 static int deactivate_afu_directed(struct cxl_afu *afu) 777 { 778 dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice); 779 780 afu->current_mode = 0; 781 afu->num_procs = 0; 782 783 cxl_sysfs_afu_m_remove(afu); 784 cxl_chardev_afu_remove(afu); 785 786 cxl_ops->afu_reset(afu); 787 788 return 0; 789 } 790 791 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode) 792 { 793 if (!mode) 794 return 0; 795 if (!(mode & afu->modes_supported)) 796 return -EINVAL; 797 798 if (mode == CXL_MODE_DIRECTED) 799 return deactivate_afu_directed(afu); 800 return 0; 801 } 802 803 static int guest_afu_reset(struct cxl_afu *afu) 804 { 805 pr_devel("AFU(%d) reset request\n", afu->slice); 806 return cxl_h_reset_afu(afu->guest->handle); 807 } 808 809 static int guest_map_slice_regs(struct cxl_afu *afu) 810 { 811 if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { 812 dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n", 813 afu->slice); 814 return -ENOMEM; 815 } 816 return 0; 817 } 818 819 static void guest_unmap_slice_regs(struct cxl_afu *afu) 820 { 821 if (afu->p2n_mmio) 822 iounmap(afu->p2n_mmio); 823 } 824 825 static int afu_update_state(struct cxl_afu *afu) 826 { 827 int rc, cur_state; 828 829 rc = afu_read_error_state(afu, &cur_state); 830 if (rc) 831 return rc; 832 833 if (afu->guest->previous_state == cur_state) 834 return 0; 835 836 pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state); 837 838 switch (cur_state) { 839 case H_STATE_NORMAL: 840 afu->guest->previous_state = cur_state; 841 break; 842 843 case H_STATE_DISABLE: 844 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 845 pci_channel_io_frozen); 846 847 cxl_context_detach_all(afu); 848 if ((rc = cxl_ops->afu_reset(afu))) 849 pr_devel("reset hcall failed %d\n", rc); 850 851 rc = afu_read_error_state(afu, &cur_state); 852 if (!rc && cur_state == H_STATE_NORMAL) { 853 pci_error_handlers(afu, CXL_SLOT_RESET_EVENT, 854 pci_channel_io_normal); 855 pci_error_handlers(afu, CXL_RESUME_EVENT, 0); 856 } 857 afu->guest->previous_state = 0; 858 break; 859 860 case H_STATE_TEMP_UNAVAILABLE: 861 afu->guest->previous_state = cur_state; 862 break; 863 864 case H_STATE_PERM_UNAVAILABLE: 865 dev_err(&afu->dev, "AFU is in permanent error state\n"); 866 pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, 867 pci_channel_io_perm_failure); 868 afu->guest->previous_state = cur_state; 869 break; 870 871 default: 872 pr_err("Unexpected AFU(%d) error state: %#x\n", 873 afu->slice, cur_state); 874 return -EINVAL; 875 } 876 877 return rc; 878 } 879 880 static void afu_handle_errstate(struct work_struct *work) 881 { 882 struct cxl_afu_guest *afu_guest = 883 container_of(to_delayed_work(work), struct cxl_afu_guest, work_err); 884 885 if (!afu_update_state(afu_guest->parent) && 886 afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE) 887 return; 888 889 if (afu_guest->handle_err) 890 schedule_delayed_work(&afu_guest->work_err, 891 msecs_to_jiffies(3000)); 892 } 893 894 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu) 895 { 896 int state; 897 898 if (afu && (!afu_read_error_state(afu, &state))) { 899 if (state == H_STATE_NORMAL) 900 return true; 901 } 902 903 return false; 904 } 905 906 static int afu_properties_look_ok(struct cxl_afu *afu) 907 { 908 if (afu->pp_irqs < 0) { 909 dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n"); 910 return -EINVAL; 911 } 912 913 if (afu->max_procs_virtualised < 1) { 914 dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n"); 915 return -EINVAL; 916 } 917 918 return 0; 919 } 920 921 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np) 922 { 923 struct cxl_afu *afu; 924 bool free = true; 925 int rc; 926 927 pr_devel("in %s - AFU(%d)\n", __func__, slice); 928 if (!(afu = cxl_alloc_afu(adapter, slice))) 929 return -ENOMEM; 930 931 if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) { 932 kfree(afu); 933 return -ENOMEM; 934 } 935 936 if ((rc = dev_set_name(&afu->dev, "afu%i.%i", 937 adapter->adapter_num, 938 slice))) 939 goto err1; 940 941 adapter->slices++; 942 943 if ((rc = cxl_of_read_afu_handle(afu, afu_np))) 944 goto err1; 945 946 if ((rc = cxl_ops->afu_reset(afu))) 947 goto err1; 948 949 if ((rc = cxl_of_read_afu_properties(afu, afu_np))) 950 goto err1; 951 952 if ((rc = afu_properties_look_ok(afu))) 953 goto err1; 954 955 if ((rc = guest_map_slice_regs(afu))) 956 goto err1; 957 958 if ((rc = guest_register_serr_irq(afu))) 959 goto err2; 960 961 /* 962 * After we call this function we must not free the afu directly, even 963 * if it returns an error! 964 */ 965 if ((rc = cxl_register_afu(afu))) 966 goto err_put1; 967 968 if ((rc = cxl_sysfs_afu_add(afu))) 969 goto err_put1; 970 971 /* 972 * pHyp doesn't expose the programming models supported by the 973 * AFU. pHyp currently only supports directed mode. If it adds 974 * dedicated mode later, this version of cxl has no way to 975 * detect it. So we'll initialize the driver, but the first 976 * attach will fail. 977 * Being discussed with pHyp to do better (likely new property) 978 */ 979 if (afu->max_procs_virtualised == 1) 980 afu->modes_supported = CXL_MODE_DEDICATED; 981 else 982 afu->modes_supported = CXL_MODE_DIRECTED; 983 984 if ((rc = cxl_afu_select_best_mode(afu))) 985 goto err_put2; 986 987 adapter->afu[afu->slice] = afu; 988 989 afu->enabled = true; 990 991 /* 992 * wake up the cpu periodically to check the state 993 * of the AFU using "afu" stored in the guest structure. 994 */ 995 afu->guest->parent = afu; 996 afu->guest->handle_err = true; 997 INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate); 998 schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000)); 999 1000 if ((rc = cxl_pci_vphb_add(afu))) 1001 dev_info(&afu->dev, "Can't register vPHB\n"); 1002 1003 return 0; 1004 1005 err_put2: 1006 cxl_sysfs_afu_remove(afu); 1007 err_put1: 1008 device_unregister(&afu->dev); 1009 free = false; 1010 guest_release_serr_irq(afu); 1011 err2: 1012 guest_unmap_slice_regs(afu); 1013 err1: 1014 if (free) { 1015 kfree(afu->guest); 1016 kfree(afu); 1017 } 1018 return rc; 1019 } 1020 1021 void cxl_guest_remove_afu(struct cxl_afu *afu) 1022 { 1023 if (!afu) 1024 return; 1025 1026 /* flush and stop pending job */ 1027 afu->guest->handle_err = false; 1028 flush_delayed_work(&afu->guest->work_err); 1029 1030 cxl_pci_vphb_remove(afu); 1031 cxl_sysfs_afu_remove(afu); 1032 1033 spin_lock(&afu->adapter->afu_list_lock); 1034 afu->adapter->afu[afu->slice] = NULL; 1035 spin_unlock(&afu->adapter->afu_list_lock); 1036 1037 cxl_context_detach_all(afu); 1038 cxl_ops->afu_deactivate_mode(afu, afu->current_mode); 1039 guest_release_serr_irq(afu); 1040 guest_unmap_slice_regs(afu); 1041 1042 device_unregister(&afu->dev); 1043 } 1044 1045 static void free_adapter(struct cxl *adapter) 1046 { 1047 struct irq_avail *cur; 1048 int i; 1049 1050 if (adapter->guest) { 1051 if (adapter->guest->irq_avail) { 1052 for (i = 0; i < adapter->guest->irq_nranges; i++) { 1053 cur = &adapter->guest->irq_avail[i]; 1054 kfree(cur->bitmap); 1055 } 1056 kfree(adapter->guest->irq_avail); 1057 } 1058 kfree(adapter->guest->status); 1059 kfree(adapter->guest); 1060 } 1061 cxl_remove_adapter_nr(adapter); 1062 kfree(adapter); 1063 } 1064 1065 static int properties_look_ok(struct cxl *adapter) 1066 { 1067 /* The absence of this property means that the operational 1068 * status is unknown or okay 1069 */ 1070 if (strlen(adapter->guest->status) && 1071 strcmp(adapter->guest->status, "okay")) { 1072 pr_err("ABORTING:Bad operational status of the device\n"); 1073 return -EINVAL; 1074 } 1075 1076 return 0; 1077 } 1078 1079 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len) 1080 { 1081 return guest_collect_vpd(adapter, NULL, buf, len); 1082 } 1083 1084 void cxl_guest_remove_adapter(struct cxl *adapter) 1085 { 1086 pr_devel("in %s\n", __func__); 1087 1088 cxl_sysfs_adapter_remove(adapter); 1089 1090 cxl_guest_remove_chardev(adapter); 1091 device_unregister(&adapter->dev); 1092 } 1093 1094 static void release_adapter(struct device *dev) 1095 { 1096 free_adapter(to_cxl_adapter(dev)); 1097 } 1098 1099 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev) 1100 { 1101 struct cxl *adapter; 1102 bool free = true; 1103 int rc; 1104 1105 if (!(adapter = cxl_alloc_adapter())) 1106 return ERR_PTR(-ENOMEM); 1107 1108 if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) { 1109 free_adapter(adapter); 1110 return ERR_PTR(-ENOMEM); 1111 } 1112 1113 adapter->slices = 0; 1114 adapter->guest->pdev = pdev; 1115 adapter->dev.parent = &pdev->dev; 1116 adapter->dev.release = release_adapter; 1117 dev_set_drvdata(&pdev->dev, adapter); 1118 1119 /* 1120 * Hypervisor controls PSL timebase initialization (p1 register). 1121 * On FW840, PSL is initialized. 1122 */ 1123 adapter->psl_timebase_synced = true; 1124 1125 if ((rc = cxl_of_read_adapter_handle(adapter, np))) 1126 goto err1; 1127 1128 if ((rc = cxl_of_read_adapter_properties(adapter, np))) 1129 goto err1; 1130 1131 if ((rc = properties_look_ok(adapter))) 1132 goto err1; 1133 1134 if ((rc = cxl_guest_add_chardev(adapter))) 1135 goto err1; 1136 1137 /* 1138 * After we call this function we must not free the adapter directly, 1139 * even if it returns an error! 1140 */ 1141 if ((rc = cxl_register_adapter(adapter))) 1142 goto err_put1; 1143 1144 if ((rc = cxl_sysfs_adapter_add(adapter))) 1145 goto err_put1; 1146 1147 /* release the context lock as the adapter is configured */ 1148 cxl_adapter_context_unlock(adapter); 1149 1150 return adapter; 1151 1152 err_put1: 1153 device_unregister(&adapter->dev); 1154 free = false; 1155 cxl_guest_remove_chardev(adapter); 1156 err1: 1157 if (free) 1158 free_adapter(adapter); 1159 return ERR_PTR(rc); 1160 } 1161 1162 void cxl_guest_reload_module(struct cxl *adapter) 1163 { 1164 struct platform_device *pdev; 1165 1166 pdev = adapter->guest->pdev; 1167 cxl_guest_remove_adapter(adapter); 1168 1169 cxl_of_probe(pdev); 1170 } 1171 1172 const struct cxl_backend_ops cxl_guest_ops = { 1173 .module = THIS_MODULE, 1174 .adapter_reset = guest_reset, 1175 .alloc_one_irq = guest_alloc_one_irq, 1176 .release_one_irq = guest_release_one_irq, 1177 .alloc_irq_ranges = guest_alloc_irq_ranges, 1178 .release_irq_ranges = guest_release_irq_ranges, 1179 .setup_irq = NULL, 1180 .handle_psl_slice_error = guest_handle_psl_slice_error, 1181 .psl_interrupt = guest_psl_irq, 1182 .ack_irq = guest_ack_irq, 1183 .attach_process = guest_attach_process, 1184 .detach_process = guest_detach_process, 1185 .update_ivtes = NULL, 1186 .support_attributes = guest_support_attributes, 1187 .link_ok = guest_link_ok, 1188 .release_afu = guest_release_afu, 1189 .afu_read_err_buffer = guest_afu_read_err_buffer, 1190 .afu_check_and_enable = guest_afu_check_and_enable, 1191 .afu_activate_mode = guest_afu_activate_mode, 1192 .afu_deactivate_mode = guest_afu_deactivate_mode, 1193 .afu_reset = guest_afu_reset, 1194 .afu_cr_read8 = guest_afu_cr_read8, 1195 .afu_cr_read16 = guest_afu_cr_read16, 1196 .afu_cr_read32 = guest_afu_cr_read32, 1197 .afu_cr_read64 = guest_afu_cr_read64, 1198 .afu_cr_write8 = guest_afu_cr_write8, 1199 .afu_cr_write16 = guest_afu_cr_write16, 1200 .afu_cr_write32 = guest_afu_cr_write32, 1201 .read_adapter_vpd = cxl_guest_read_adapter_vpd, 1202 }; 1203