1 /* 2 * Copyright 2014 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/sched.h> 12 #include <linux/slab.h> 13 #include <linux/sched.h> 14 #include <linux/mutex.h> 15 #include <linux/mm.h> 16 #include <linux/uaccess.h> 17 #include <linux/delay.h> 18 #include <asm/synch.h> 19 #include <misc/cxl-base.h> 20 21 #include "cxl.h" 22 #include "trace.h" 23 24 static int afu_control(struct cxl_afu *afu, u64 command, 25 u64 result, u64 mask, bool enabled) 26 { 27 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 28 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 29 int rc = 0; 30 31 spin_lock(&afu->afu_cntl_lock); 32 pr_devel("AFU command starting: %llx\n", command); 33 34 trace_cxl_afu_ctrl(afu, command); 35 36 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command); 37 38 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 39 while ((AFU_Cntl & mask) != result) { 40 if (time_after_eq(jiffies, timeout)) { 41 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n"); 42 rc = -EBUSY; 43 goto out; 44 } 45 46 if (!cxl_ops->link_ok(afu->adapter, afu)) { 47 afu->enabled = enabled; 48 rc = -EIO; 49 goto out; 50 } 51 52 pr_devel_ratelimited("AFU control... (0x%016llx)\n", 53 AFU_Cntl | command); 54 cpu_relax(); 55 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 56 }; 57 pr_devel("AFU command complete: %llx\n", command); 58 afu->enabled = enabled; 59 out: 60 trace_cxl_afu_ctrl_done(afu, command, rc); 61 spin_unlock(&afu->afu_cntl_lock); 62 63 return rc; 64 } 65 66 static int afu_enable(struct cxl_afu *afu) 67 { 68 pr_devel("AFU enable request\n"); 69 70 return afu_control(afu, CXL_AFU_Cntl_An_E, 71 CXL_AFU_Cntl_An_ES_Enabled, 72 CXL_AFU_Cntl_An_ES_MASK, true); 73 } 74 75 int cxl_afu_disable(struct cxl_afu *afu) 76 { 77 pr_devel("AFU disable request\n"); 78 79 return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled, 80 CXL_AFU_Cntl_An_ES_MASK, false); 81 } 82 83 /* This will disable as well as reset */ 84 static int native_afu_reset(struct cxl_afu *afu) 85 { 86 pr_devel("AFU reset request\n"); 87 88 return afu_control(afu, CXL_AFU_Cntl_An_RA, 89 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, 90 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, 91 false); 92 } 93 94 static int native_afu_check_and_enable(struct cxl_afu *afu) 95 { 96 if (!cxl_ops->link_ok(afu->adapter, afu)) { 97 WARN(1, "Refusing to enable afu while link down!\n"); 98 return -EIO; 99 } 100 if (afu->enabled) 101 return 0; 102 return afu_enable(afu); 103 } 104 105 int cxl_psl_purge(struct cxl_afu *afu) 106 { 107 u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); 108 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 109 u64 dsisr, dar; 110 u64 start, end; 111 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 112 int rc = 0; 113 114 trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc); 115 116 pr_devel("PSL purge request\n"); 117 118 if (!cxl_ops->link_ok(afu->adapter, afu)) { 119 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); 120 rc = -EIO; 121 goto out; 122 } 123 124 if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { 125 WARN(1, "psl_purge request while AFU not disabled!\n"); 126 cxl_afu_disable(afu); 127 } 128 129 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, 130 PSL_CNTL | CXL_PSL_SCNTL_An_Pc); 131 start = local_clock(); 132 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); 133 while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK) 134 == CXL_PSL_SCNTL_An_Ps_Pending) { 135 if (time_after_eq(jiffies, timeout)) { 136 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n"); 137 rc = -EBUSY; 138 goto out; 139 } 140 if (!cxl_ops->link_ok(afu->adapter, afu)) { 141 rc = -EIO; 142 goto out; 143 } 144 145 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 146 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr); 147 if (dsisr & CXL_PSL_DSISR_TRANS) { 148 dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); 149 dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar); 150 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); 151 } else if (dsisr) { 152 dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr); 153 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); 154 } else { 155 cpu_relax(); 156 } 157 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); 158 }; 159 end = local_clock(); 160 pr_devel("PSL purged in %lld ns\n", end - start); 161 162 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, 163 PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc); 164 out: 165 trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc); 166 return rc; 167 } 168 169 static int spa_max_procs(int spa_size) 170 { 171 /* 172 * From the CAIA: 173 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255 174 * Most of that junk is really just an overly-complicated way of saying 175 * the last 256 bytes are __aligned(128), so it's really: 176 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255 177 * and 178 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1 179 * so 180 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256 181 * Ignore the alignment (which is safe in this case as long as we are 182 * careful with our rounding) and solve for n: 183 */ 184 return ((spa_size / 8) - 96) / 17; 185 } 186 187 int cxl_alloc_spa(struct cxl_afu *afu) 188 { 189 unsigned spa_size; 190 191 /* Work out how many pages to allocate */ 192 afu->native->spa_order = 0; 193 do { 194 afu->native->spa_order++; 195 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; 196 197 if (spa_size > 0x100000) { 198 dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n", 199 afu->native->spa_max_procs, afu->native->spa_size); 200 afu->num_procs = afu->native->spa_max_procs; 201 break; 202 } 203 204 afu->native->spa_size = spa_size; 205 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size); 206 } while (afu->native->spa_max_procs < afu->num_procs); 207 208 if (!(afu->native->spa = (struct cxl_process_element *) 209 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) { 210 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); 211 return -ENOMEM; 212 } 213 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", 214 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs); 215 216 return 0; 217 } 218 219 static void attach_spa(struct cxl_afu *afu) 220 { 221 u64 spap; 222 223 afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa + 224 ((afu->native->spa_max_procs + 3) * 128)); 225 226 spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr; 227 spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; 228 spap |= CXL_PSL_SPAP_V; 229 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", 230 afu->native->spa, afu->native->spa_max_procs, 231 afu->native->sw_command_status, spap); 232 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); 233 } 234 235 static inline void detach_spa(struct cxl_afu *afu) 236 { 237 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); 238 } 239 240 void cxl_release_spa(struct cxl_afu *afu) 241 { 242 if (afu->native->spa) { 243 free_pages((unsigned long) afu->native->spa, 244 afu->native->spa_order); 245 afu->native->spa = NULL; 246 } 247 } 248 249 int cxl_tlb_slb_invalidate(struct cxl *adapter) 250 { 251 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 252 253 pr_devel("CXL adapter wide TLBIA & SLBIA\n"); 254 255 cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A); 256 257 cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL); 258 while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) { 259 if (time_after_eq(jiffies, timeout)) { 260 dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); 261 return -EBUSY; 262 } 263 if (!cxl_ops->link_ok(adapter, NULL)) 264 return -EIO; 265 cpu_relax(); 266 } 267 268 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL); 269 while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) { 270 if (time_after_eq(jiffies, timeout)) { 271 dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); 272 return -EBUSY; 273 } 274 if (!cxl_ops->link_ok(adapter, NULL)) 275 return -EIO; 276 cpu_relax(); 277 } 278 return 0; 279 } 280 281 static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1) 282 { 283 int rc; 284 285 /* 1. Disable SSTP by writing 0 to SSTP1[V] */ 286 cxl_p2n_write(afu, CXL_SSTP1_An, 0); 287 288 /* 2. Invalidate all SLB entries */ 289 if ((rc = cxl_afu_slbia(afu))) 290 return rc; 291 292 /* 3. Set SSTP0_An */ 293 cxl_p2n_write(afu, CXL_SSTP0_An, sstp0); 294 295 /* 4. Set SSTP1_An */ 296 cxl_p2n_write(afu, CXL_SSTP1_An, sstp1); 297 298 return 0; 299 } 300 301 /* Using per slice version may improve performance here. (ie. SLBIA_An) */ 302 static void slb_invalid(struct cxl_context *ctx) 303 { 304 struct cxl *adapter = ctx->afu->adapter; 305 u64 slbia; 306 307 WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex)); 308 309 cxl_p1_write(adapter, CXL_PSL_LBISEL, 310 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | 311 be32_to_cpu(ctx->elem->lpid)); 312 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); 313 314 while (1) { 315 if (!cxl_ops->link_ok(adapter, NULL)) 316 break; 317 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA); 318 if (!(slbia & CXL_TLB_SLB_P)) 319 break; 320 cpu_relax(); 321 } 322 } 323 324 static int do_process_element_cmd(struct cxl_context *ctx, 325 u64 cmd, u64 pe_state) 326 { 327 u64 state; 328 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 329 int rc = 0; 330 331 trace_cxl_llcmd(ctx, cmd); 332 333 WARN_ON(!ctx->afu->enabled); 334 335 ctx->elem->software_state = cpu_to_be32(pe_state); 336 smp_wmb(); 337 *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); 338 smp_mb(); 339 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); 340 while (1) { 341 if (time_after_eq(jiffies, timeout)) { 342 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n"); 343 rc = -EBUSY; 344 goto out; 345 } 346 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { 347 dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n"); 348 rc = -EIO; 349 goto out; 350 } 351 state = be64_to_cpup(ctx->afu->native->sw_command_status); 352 if (state == ~0ULL) { 353 pr_err("cxl: Error adding process element to AFU\n"); 354 rc = -1; 355 goto out; 356 } 357 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) == 358 (cmd | (cmd >> 16) | ctx->pe)) 359 break; 360 /* 361 * The command won't finish in the PSL if there are 362 * outstanding DSIs. Hence we need to yield here in 363 * case there are outstanding DSIs that we need to 364 * service. Tuning possiblity: we could wait for a 365 * while before sched 366 */ 367 schedule(); 368 369 } 370 out: 371 trace_cxl_llcmd_done(ctx, cmd, rc); 372 return rc; 373 } 374 375 static int add_process_element(struct cxl_context *ctx) 376 { 377 int rc = 0; 378 379 mutex_lock(&ctx->afu->native->spa_mutex); 380 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); 381 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) 382 ctx->pe_inserted = true; 383 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); 384 mutex_unlock(&ctx->afu->native->spa_mutex); 385 return rc; 386 } 387 388 static int terminate_process_element(struct cxl_context *ctx) 389 { 390 int rc = 0; 391 392 /* fast path terminate if it's already invalid */ 393 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) 394 return rc; 395 396 mutex_lock(&ctx->afu->native->spa_mutex); 397 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); 398 /* We could be asked to terminate when the hw is down. That 399 * should always succeed: it's not running if the hw has gone 400 * away and is being reset. 401 */ 402 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) 403 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, 404 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); 405 ctx->elem->software_state = 0; /* Remove Valid bit */ 406 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); 407 mutex_unlock(&ctx->afu->native->spa_mutex); 408 return rc; 409 } 410 411 static int remove_process_element(struct cxl_context *ctx) 412 { 413 int rc = 0; 414 415 mutex_lock(&ctx->afu->native->spa_mutex); 416 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); 417 418 /* We could be asked to remove when the hw is down. Again, if 419 * the hw is down, the PE is gone, so we succeed. 420 */ 421 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) 422 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0); 423 424 if (!rc) 425 ctx->pe_inserted = false; 426 slb_invalid(ctx); 427 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); 428 mutex_unlock(&ctx->afu->native->spa_mutex); 429 430 return rc; 431 } 432 433 434 void cxl_assign_psn_space(struct cxl_context *ctx) 435 { 436 if (!ctx->afu->pp_size || ctx->master) { 437 ctx->psn_phys = ctx->afu->psn_phys; 438 ctx->psn_size = ctx->afu->adapter->ps_size; 439 } else { 440 ctx->psn_phys = ctx->afu->psn_phys + 441 (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe); 442 ctx->psn_size = ctx->afu->pp_size; 443 } 444 } 445 446 static int activate_afu_directed(struct cxl_afu *afu) 447 { 448 int rc; 449 450 dev_info(&afu->dev, "Activating AFU directed mode\n"); 451 452 afu->num_procs = afu->max_procs_virtualised; 453 if (afu->native->spa == NULL) { 454 if (cxl_alloc_spa(afu)) 455 return -ENOMEM; 456 } 457 attach_spa(afu); 458 459 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU); 460 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); 461 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); 462 463 afu->current_mode = CXL_MODE_DIRECTED; 464 465 if ((rc = cxl_chardev_m_afu_add(afu))) 466 return rc; 467 468 if ((rc = cxl_sysfs_afu_m_add(afu))) 469 goto err; 470 471 if ((rc = cxl_chardev_s_afu_add(afu))) 472 goto err1; 473 474 return 0; 475 err1: 476 cxl_sysfs_afu_m_remove(afu); 477 err: 478 cxl_chardev_afu_remove(afu); 479 return rc; 480 } 481 482 #ifdef CONFIG_CPU_LITTLE_ENDIAN 483 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE) 484 #else 485 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE)) 486 #endif 487 488 static u64 calculate_sr(struct cxl_context *ctx) 489 { 490 u64 sr = 0; 491 492 set_endian(sr); 493 if (ctx->master) 494 sr |= CXL_PSL_SR_An_MP; 495 if (mfspr(SPRN_LPCR) & LPCR_TC) 496 sr |= CXL_PSL_SR_An_TC; 497 if (ctx->kernel) { 498 if (!ctx->real_mode) 499 sr |= CXL_PSL_SR_An_R; 500 sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; 501 } else { 502 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; 503 sr &= ~(CXL_PSL_SR_An_HV); 504 if (!test_tsk_thread_flag(current, TIF_32BIT)) 505 sr |= CXL_PSL_SR_An_SF; 506 } 507 return sr; 508 } 509 510 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) 511 { 512 u32 pid; 513 int r, result; 514 515 cxl_assign_psn_space(ctx); 516 517 ctx->elem->ctxtime = 0; /* disable */ 518 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); 519 ctx->elem->haurp = 0; /* disable */ 520 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1)); 521 522 pid = current->pid; 523 if (ctx->kernel) 524 pid = 0; 525 ctx->elem->common.tid = 0; 526 ctx->elem->common.pid = cpu_to_be32(pid); 527 528 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); 529 530 ctx->elem->common.csrp = 0; /* disable */ 531 ctx->elem->common.aurp0 = 0; /* disable */ 532 ctx->elem->common.aurp1 = 0; /* disable */ 533 534 cxl_prefault(ctx, wed); 535 536 ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0); 537 ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1); 538 539 /* 540 * Ensure we have the multiplexed PSL interrupt set up to take faults 541 * for kernel contexts that may not have allocated any AFU IRQs at all: 542 */ 543 if (ctx->irqs.range[0] == 0) { 544 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; 545 ctx->irqs.range[0] = 1; 546 } 547 548 for (r = 0; r < CXL_IRQ_RANGES; r++) { 549 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); 550 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); 551 } 552 553 ctx->elem->common.amr = cpu_to_be64(amr); 554 ctx->elem->common.wed = cpu_to_be64(wed); 555 556 /* first guy needs to enable */ 557 if ((result = cxl_ops->afu_check_and_enable(ctx->afu))) 558 return result; 559 560 return add_process_element(ctx); 561 } 562 563 static int deactivate_afu_directed(struct cxl_afu *afu) 564 { 565 dev_info(&afu->dev, "Deactivating AFU directed mode\n"); 566 567 afu->current_mode = 0; 568 afu->num_procs = 0; 569 570 cxl_sysfs_afu_m_remove(afu); 571 cxl_chardev_afu_remove(afu); 572 573 cxl_ops->afu_reset(afu); 574 cxl_afu_disable(afu); 575 cxl_psl_purge(afu); 576 577 return 0; 578 } 579 580 static int activate_dedicated_process(struct cxl_afu *afu) 581 { 582 dev_info(&afu->dev, "Activating dedicated process mode\n"); 583 584 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); 585 586 cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */ 587 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */ 588 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); 589 cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID)); 590 cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */ 591 cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1)); 592 593 cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */ 594 cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */ 595 cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */ 596 597 afu->current_mode = CXL_MODE_DEDICATED; 598 afu->num_procs = 1; 599 600 return cxl_chardev_d_afu_add(afu); 601 } 602 603 static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) 604 { 605 struct cxl_afu *afu = ctx->afu; 606 u64 pid; 607 int rc; 608 609 pid = (u64)current->pid << 32; 610 if (ctx->kernel) 611 pid = 0; 612 cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid); 613 614 cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx)); 615 616 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1))) 617 return rc; 618 619 cxl_prefault(ctx, wed); 620 621 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 622 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | 623 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | 624 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | 625 ((u64)ctx->irqs.offset[3] & 0xffff)); 626 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) 627 (((u64)ctx->irqs.range[0] & 0xffff) << 48) | 628 (((u64)ctx->irqs.range[1] & 0xffff) << 32) | 629 (((u64)ctx->irqs.range[2] & 0xffff) << 16) | 630 ((u64)ctx->irqs.range[3] & 0xffff)); 631 632 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); 633 634 /* master only context for dedicated */ 635 cxl_assign_psn_space(ctx); 636 637 if ((rc = cxl_ops->afu_reset(afu))) 638 return rc; 639 640 cxl_p2n_write(afu, CXL_PSL_WED_An, wed); 641 642 return afu_enable(afu); 643 } 644 645 static int deactivate_dedicated_process(struct cxl_afu *afu) 646 { 647 dev_info(&afu->dev, "Deactivating dedicated process mode\n"); 648 649 afu->current_mode = 0; 650 afu->num_procs = 0; 651 652 cxl_chardev_afu_remove(afu); 653 654 return 0; 655 } 656 657 static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode) 658 { 659 if (mode == CXL_MODE_DIRECTED) 660 return deactivate_afu_directed(afu); 661 if (mode == CXL_MODE_DEDICATED) 662 return deactivate_dedicated_process(afu); 663 return 0; 664 } 665 666 static int native_afu_activate_mode(struct cxl_afu *afu, int mode) 667 { 668 if (!mode) 669 return 0; 670 if (!(mode & afu->modes_supported)) 671 return -EINVAL; 672 673 if (!cxl_ops->link_ok(afu->adapter, afu)) { 674 WARN(1, "Device link is down, refusing to activate!\n"); 675 return -EIO; 676 } 677 678 if (mode == CXL_MODE_DIRECTED) 679 return activate_afu_directed(afu); 680 if (mode == CXL_MODE_DEDICATED) 681 return activate_dedicated_process(afu); 682 683 return -EINVAL; 684 } 685 686 static int native_attach_process(struct cxl_context *ctx, bool kernel, 687 u64 wed, u64 amr) 688 { 689 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { 690 WARN(1, "Device link is down, refusing to attach process!\n"); 691 return -EIO; 692 } 693 694 ctx->kernel = kernel; 695 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 696 return attach_afu_directed(ctx, wed, amr); 697 698 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) 699 return attach_dedicated(ctx, wed, amr); 700 701 return -EINVAL; 702 } 703 704 static inline int detach_process_native_dedicated(struct cxl_context *ctx) 705 { 706 cxl_ops->afu_reset(ctx->afu); 707 cxl_afu_disable(ctx->afu); 708 cxl_psl_purge(ctx->afu); 709 return 0; 710 } 711 712 static inline int detach_process_native_afu_directed(struct cxl_context *ctx) 713 { 714 if (!ctx->pe_inserted) 715 return 0; 716 if (terminate_process_element(ctx)) 717 return -1; 718 if (remove_process_element(ctx)) 719 return -1; 720 721 return 0; 722 } 723 724 static int native_detach_process(struct cxl_context *ctx) 725 { 726 trace_cxl_detach(ctx); 727 728 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) 729 return detach_process_native_dedicated(ctx); 730 731 return detach_process_native_afu_directed(ctx); 732 } 733 734 static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) 735 { 736 u64 pidtid; 737 738 /* If the adapter has gone away, we can't get any meaningful 739 * information. 740 */ 741 if (!cxl_ops->link_ok(afu->adapter, afu)) 742 return -EIO; 743 744 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 745 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); 746 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An); 747 pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An); 748 info->pid = pidtid >> 32; 749 info->tid = pidtid & 0xffffffff; 750 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); 751 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); 752 info->proc_handle = 0; 753 754 return 0; 755 } 756 757 static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, 758 u64 dsisr, u64 errstat) 759 { 760 u64 fir1, fir2, fir_slice, serr, afu_debug; 761 762 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); 763 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); 764 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); 765 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); 766 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); 767 768 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); 769 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); 770 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); 771 dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); 772 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); 773 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); 774 775 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); 776 cxl_stop_trace(ctx->afu->adapter); 777 778 return cxl_ops->ack_irq(ctx, 0, errstat); 779 } 780 781 static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info) 782 { 783 if (irq_info->dsisr & CXL_PSL_DSISR_TRANS) 784 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); 785 else 786 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); 787 788 return IRQ_HANDLED; 789 } 790 791 static irqreturn_t native_irq_multiplexed(int irq, void *data) 792 { 793 struct cxl_afu *afu = data; 794 struct cxl_context *ctx; 795 struct cxl_irq_info irq_info; 796 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; 797 int ret; 798 799 if ((ret = native_get_irq_info(afu, &irq_info))) { 800 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret); 801 return fail_psl_irq(afu, &irq_info); 802 } 803 804 rcu_read_lock(); 805 ctx = idr_find(&afu->contexts_idr, ph); 806 if (ctx) { 807 ret = cxl_irq(irq, ctx, &irq_info); 808 rcu_read_unlock(); 809 return ret; 810 } 811 rcu_read_unlock(); 812 813 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR" 814 " %016llx\n(Possible AFU HW issue - was a term/remove acked" 815 " with outstanding transactions?)\n", ph, irq_info.dsisr, 816 irq_info.dar); 817 return fail_psl_irq(afu, &irq_info); 818 } 819 820 void native_irq_wait(struct cxl_context *ctx) 821 { 822 u64 dsisr; 823 int timeout = 1000; 824 int ph; 825 826 /* 827 * Wait until no further interrupts are presented by the PSL 828 * for this context. 829 */ 830 while (timeout--) { 831 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff; 832 if (ph != ctx->pe) 833 return; 834 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); 835 if ((dsisr & CXL_PSL_DSISR_PENDING) == 0) 836 return; 837 /* 838 * We are waiting for the workqueue to process our 839 * irq, so need to let that run here. 840 */ 841 msleep(1); 842 } 843 844 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i" 845 " DSISR %016llx!\n", ph, dsisr); 846 return; 847 } 848 849 static irqreturn_t native_slice_irq_err(int irq, void *data) 850 { 851 struct cxl_afu *afu = data; 852 u64 fir_slice, errstat, serr, afu_debug; 853 854 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); 855 856 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 857 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); 858 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); 859 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); 860 dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); 861 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); 862 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); 863 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); 864 865 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 866 867 return IRQ_HANDLED; 868 } 869 870 static irqreturn_t native_irq_err(int irq, void *data) 871 { 872 struct cxl *adapter = data; 873 u64 fir1, fir2, err_ivte; 874 875 WARN(1, "CXL ERROR interrupt %i\n", irq); 876 877 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); 878 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); 879 880 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); 881 cxl_stop_trace(adapter); 882 883 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); 884 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); 885 886 dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); 887 888 return IRQ_HANDLED; 889 } 890 891 int cxl_native_register_psl_err_irq(struct cxl *adapter) 892 { 893 int rc; 894 895 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 896 dev_name(&adapter->dev)); 897 if (!adapter->irq_name) 898 return -ENOMEM; 899 900 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter, 901 &adapter->native->err_hwirq, 902 &adapter->native->err_virq, 903 adapter->irq_name))) { 904 kfree(adapter->irq_name); 905 adapter->irq_name = NULL; 906 return rc; 907 } 908 909 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff); 910 911 return 0; 912 } 913 914 void cxl_native_release_psl_err_irq(struct cxl *adapter) 915 { 916 if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq)) 917 return; 918 919 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 920 cxl_unmap_irq(adapter->native->err_virq, adapter); 921 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); 922 kfree(adapter->irq_name); 923 } 924 925 int cxl_native_register_serr_irq(struct cxl_afu *afu) 926 { 927 u64 serr; 928 int rc; 929 930 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 931 dev_name(&afu->dev)); 932 if (!afu->err_irq_name) 933 return -ENOMEM; 934 935 if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu, 936 &afu->serr_hwirq, 937 &afu->serr_virq, afu->err_irq_name))) { 938 kfree(afu->err_irq_name); 939 afu->err_irq_name = NULL; 940 return rc; 941 } 942 943 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 944 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); 945 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 946 947 return 0; 948 } 949 950 void cxl_native_release_serr_irq(struct cxl_afu *afu) 951 { 952 if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) 953 return; 954 955 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); 956 cxl_unmap_irq(afu->serr_virq, afu); 957 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 958 kfree(afu->err_irq_name); 959 } 960 961 int cxl_native_register_psl_irq(struct cxl_afu *afu) 962 { 963 int rc; 964 965 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", 966 dev_name(&afu->dev)); 967 if (!afu->psl_irq_name) 968 return -ENOMEM; 969 970 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed, 971 afu, &afu->native->psl_hwirq, &afu->native->psl_virq, 972 afu->psl_irq_name))) { 973 kfree(afu->psl_irq_name); 974 afu->psl_irq_name = NULL; 975 } 976 return rc; 977 } 978 979 void cxl_native_release_psl_irq(struct cxl_afu *afu) 980 { 981 if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq)) 982 return; 983 984 cxl_unmap_irq(afu->native->psl_virq, afu); 985 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); 986 kfree(afu->psl_irq_name); 987 } 988 989 static void recover_psl_err(struct cxl_afu *afu, u64 errstat) 990 { 991 u64 dsisr; 992 993 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat); 994 995 /* Clear PSL_DSISR[PE] */ 996 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 997 cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE); 998 999 /* Write 1s to clear error status bits */ 1000 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat); 1001 } 1002 1003 static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) 1004 { 1005 trace_cxl_psl_irq_ack(ctx, tfc); 1006 if (tfc) 1007 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc); 1008 if (psl_reset_mask) 1009 recover_psl_err(ctx->afu, psl_reset_mask); 1010 1011 return 0; 1012 } 1013 1014 int cxl_check_error(struct cxl_afu *afu) 1015 { 1016 return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL); 1017 } 1018 1019 static bool native_support_attributes(const char *attr_name, 1020 enum cxl_attrs type) 1021 { 1022 return true; 1023 } 1024 1025 static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out) 1026 { 1027 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) 1028 return -EIO; 1029 if (unlikely(off >= afu->crs_len)) 1030 return -ERANGE; 1031 *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset + 1032 (cr * afu->crs_len) + off); 1033 return 0; 1034 } 1035 1036 static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out) 1037 { 1038 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) 1039 return -EIO; 1040 if (unlikely(off >= afu->crs_len)) 1041 return -ERANGE; 1042 *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset + 1043 (cr * afu->crs_len) + off); 1044 return 0; 1045 } 1046 1047 static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out) 1048 { 1049 u64 aligned_off = off & ~0x3L; 1050 u32 val; 1051 int rc; 1052 1053 rc = native_afu_cr_read32(afu, cr, aligned_off, &val); 1054 if (!rc) 1055 *out = (val >> ((off & 0x3) * 8)) & 0xffff; 1056 return rc; 1057 } 1058 1059 static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out) 1060 { 1061 u64 aligned_off = off & ~0x3L; 1062 u32 val; 1063 int rc; 1064 1065 rc = native_afu_cr_read32(afu, cr, aligned_off, &val); 1066 if (!rc) 1067 *out = (val >> ((off & 0x3) * 8)) & 0xff; 1068 return rc; 1069 } 1070 1071 static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) 1072 { 1073 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) 1074 return -EIO; 1075 if (unlikely(off >= afu->crs_len)) 1076 return -ERANGE; 1077 out_le32(afu->native->afu_desc_mmio + afu->crs_offset + 1078 (cr * afu->crs_len) + off, in); 1079 return 0; 1080 } 1081 1082 static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) 1083 { 1084 u64 aligned_off = off & ~0x3L; 1085 u32 val32, mask, shift; 1086 int rc; 1087 1088 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); 1089 if (rc) 1090 return rc; 1091 shift = (off & 0x3) * 8; 1092 WARN_ON(shift == 24); 1093 mask = 0xffff << shift; 1094 val32 = (val32 & ~mask) | (in << shift); 1095 1096 rc = native_afu_cr_write32(afu, cr, aligned_off, val32); 1097 return rc; 1098 } 1099 1100 static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) 1101 { 1102 u64 aligned_off = off & ~0x3L; 1103 u32 val32, mask, shift; 1104 int rc; 1105 1106 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); 1107 if (rc) 1108 return rc; 1109 shift = (off & 0x3) * 8; 1110 mask = 0xff << shift; 1111 val32 = (val32 & ~mask) | (in << shift); 1112 1113 rc = native_afu_cr_write32(afu, cr, aligned_off, val32); 1114 return rc; 1115 } 1116 1117 const struct cxl_backend_ops cxl_native_ops = { 1118 .module = THIS_MODULE, 1119 .adapter_reset = cxl_pci_reset, 1120 .alloc_one_irq = cxl_pci_alloc_one_irq, 1121 .release_one_irq = cxl_pci_release_one_irq, 1122 .alloc_irq_ranges = cxl_pci_alloc_irq_ranges, 1123 .release_irq_ranges = cxl_pci_release_irq_ranges, 1124 .setup_irq = cxl_pci_setup_irq, 1125 .handle_psl_slice_error = native_handle_psl_slice_error, 1126 .psl_interrupt = NULL, 1127 .ack_irq = native_ack_irq, 1128 .irq_wait = native_irq_wait, 1129 .attach_process = native_attach_process, 1130 .detach_process = native_detach_process, 1131 .support_attributes = native_support_attributes, 1132 .link_ok = cxl_adapter_link_ok, 1133 .release_afu = cxl_pci_release_afu, 1134 .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, 1135 .afu_check_and_enable = native_afu_check_and_enable, 1136 .afu_activate_mode = native_afu_activate_mode, 1137 .afu_deactivate_mode = native_afu_deactivate_mode, 1138 .afu_reset = native_afu_reset, 1139 .afu_cr_read8 = native_afu_cr_read8, 1140 .afu_cr_read16 = native_afu_cr_read16, 1141 .afu_cr_read32 = native_afu_cr_read32, 1142 .afu_cr_read64 = native_afu_cr_read64, 1143 .afu_cr_write8 = native_afu_cr_write8, 1144 .afu_cr_write16 = native_afu_cr_write16, 1145 .afu_cr_write32 = native_afu_cr_write32, 1146 .read_adapter_vpd = cxl_pci_read_adapter_vpd, 1147 }; 1148