1 /* 2 * Copyright 2014 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/sched.h> 12 #include <linux/sched/clock.h> 13 #include <linux/slab.h> 14 #include <linux/mutex.h> 15 #include <linux/mm.h> 16 #include <linux/uaccess.h> 17 #include <linux/delay.h> 18 #include <asm/synch.h> 19 #include <asm/switch_to.h> 20 #include <misc/cxl-base.h> 21 22 #include "cxl.h" 23 #include "trace.h" 24 25 static int afu_control(struct cxl_afu *afu, u64 command, u64 clear, 26 u64 result, u64 mask, bool enabled) 27 { 28 u64 AFU_Cntl; 29 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 30 int rc = 0; 31 32 spin_lock(&afu->afu_cntl_lock); 33 pr_devel("AFU command starting: %llx\n", command); 34 35 trace_cxl_afu_ctrl(afu, command); 36 37 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 38 cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command); 39 40 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 41 while ((AFU_Cntl & mask) != result) { 42 if (time_after_eq(jiffies, timeout)) { 43 dev_warn(&afu->dev, "WARNING: AFU control timed out!\n"); 44 rc = -EBUSY; 45 goto out; 46 } 47 48 if (!cxl_ops->link_ok(afu->adapter, afu)) { 49 afu->enabled = enabled; 50 rc = -EIO; 51 goto out; 52 } 53 54 pr_devel_ratelimited("AFU control... (0x%016llx)\n", 55 AFU_Cntl | command); 56 cpu_relax(); 57 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 58 } 59 60 if (AFU_Cntl & CXL_AFU_Cntl_An_RA) { 61 /* 62 * Workaround for a bug in the XSL used in the Mellanox CX4 63 * that fails to clear the RA bit after an AFU reset, 64 * preventing subsequent AFU resets from working. 65 */ 66 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA); 67 } 68 69 pr_devel("AFU command complete: %llx\n", command); 70 afu->enabled = enabled; 71 out: 72 trace_cxl_afu_ctrl_done(afu, command, rc); 73 spin_unlock(&afu->afu_cntl_lock); 74 75 return rc; 76 } 77 78 static int afu_enable(struct cxl_afu *afu) 79 { 80 pr_devel("AFU enable request\n"); 81 82 return afu_control(afu, CXL_AFU_Cntl_An_E, 0, 83 CXL_AFU_Cntl_An_ES_Enabled, 84 CXL_AFU_Cntl_An_ES_MASK, true); 85 } 86 87 int cxl_afu_disable(struct cxl_afu *afu) 88 { 89 pr_devel("AFU disable request\n"); 90 91 return afu_control(afu, 0, CXL_AFU_Cntl_An_E, 92 CXL_AFU_Cntl_An_ES_Disabled, 93 CXL_AFU_Cntl_An_ES_MASK, false); 94 } 95 96 /* This will disable as well as reset */ 97 static int native_afu_reset(struct cxl_afu *afu) 98 { 99 int rc; 100 u64 serr; 101 102 pr_devel("AFU reset request\n"); 103 104 rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0, 105 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, 106 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, 107 false); 108 109 /* 110 * Re-enable any masked interrupts when the AFU is not 111 * activated to avoid side effects after attaching a process 112 * in dedicated mode. 113 */ 114 if (afu->current_mode == 0) { 115 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 116 serr &= ~CXL_PSL_SERR_An_IRQ_MASKS; 117 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 118 } 119 120 return rc; 121 } 122 123 static int native_afu_check_and_enable(struct cxl_afu *afu) 124 { 125 if (!cxl_ops->link_ok(afu->adapter, afu)) { 126 WARN(1, "Refusing to enable afu while link down!\n"); 127 return -EIO; 128 } 129 if (afu->enabled) 130 return 0; 131 return afu_enable(afu); 132 } 133 134 int cxl_psl_purge(struct cxl_afu *afu) 135 { 136 u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); 137 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 138 u64 dsisr, dar; 139 u64 start, end; 140 u64 trans_fault = 0x0ULL; 141 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 142 int rc = 0; 143 144 trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc); 145 146 pr_devel("PSL purge request\n"); 147 148 if (cxl_is_power8()) 149 trans_fault = CXL_PSL_DSISR_TRANS; 150 if (cxl_is_power9()) 151 trans_fault = CXL_PSL9_DSISR_An_TF; 152 153 if (!cxl_ops->link_ok(afu->adapter, afu)) { 154 dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n"); 155 rc = -EIO; 156 goto out; 157 } 158 159 if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { 160 WARN(1, "psl_purge request while AFU not disabled!\n"); 161 cxl_afu_disable(afu); 162 } 163 164 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, 165 PSL_CNTL | CXL_PSL_SCNTL_An_Pc); 166 start = local_clock(); 167 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); 168 while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK) 169 == CXL_PSL_SCNTL_An_Ps_Pending) { 170 if (time_after_eq(jiffies, timeout)) { 171 dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n"); 172 rc = -EBUSY; 173 goto out; 174 } 175 if (!cxl_ops->link_ok(afu->adapter, afu)) { 176 rc = -EIO; 177 goto out; 178 } 179 180 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 181 pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", 182 PSL_CNTL, dsisr); 183 184 if (dsisr & trans_fault) { 185 dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); 186 dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", 187 dsisr, dar); 188 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); 189 } else if (dsisr) { 190 dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", 191 dsisr); 192 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); 193 } else { 194 cpu_relax(); 195 } 196 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An); 197 } 198 end = local_clock(); 199 pr_devel("PSL purged in %lld ns\n", end - start); 200 201 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, 202 PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc); 203 out: 204 trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc); 205 return rc; 206 } 207 208 static int spa_max_procs(int spa_size) 209 { 210 /* 211 * From the CAIA: 212 * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255 213 * Most of that junk is really just an overly-complicated way of saying 214 * the last 256 bytes are __aligned(128), so it's really: 215 * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255 216 * and 217 * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1 218 * so 219 * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256 220 * Ignore the alignment (which is safe in this case as long as we are 221 * careful with our rounding) and solve for n: 222 */ 223 return ((spa_size / 8) - 96) / 17; 224 } 225 226 static int cxl_alloc_spa(struct cxl_afu *afu, int mode) 227 { 228 unsigned spa_size; 229 230 /* Work out how many pages to allocate */ 231 afu->native->spa_order = -1; 232 do { 233 afu->native->spa_order++; 234 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; 235 236 if (spa_size > 0x100000) { 237 dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n", 238 afu->native->spa_max_procs, afu->native->spa_size); 239 if (mode != CXL_MODE_DEDICATED) 240 afu->num_procs = afu->native->spa_max_procs; 241 break; 242 } 243 244 afu->native->spa_size = spa_size; 245 afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size); 246 } while (afu->native->spa_max_procs < afu->num_procs); 247 248 if (!(afu->native->spa = (struct cxl_process_element *) 249 __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) { 250 pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n"); 251 return -ENOMEM; 252 } 253 pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n", 254 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs); 255 256 return 0; 257 } 258 259 static void attach_spa(struct cxl_afu *afu) 260 { 261 u64 spap; 262 263 afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa + 264 ((afu->native->spa_max_procs + 3) * 128)); 265 266 spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr; 267 spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; 268 spap |= CXL_PSL_SPAP_V; 269 pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", 270 afu->native->spa, afu->native->spa_max_procs, 271 afu->native->sw_command_status, spap); 272 cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap); 273 } 274 275 static inline void detach_spa(struct cxl_afu *afu) 276 { 277 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); 278 } 279 280 void cxl_release_spa(struct cxl_afu *afu) 281 { 282 if (afu->native->spa) { 283 free_pages((unsigned long) afu->native->spa, 284 afu->native->spa_order); 285 afu->native->spa = NULL; 286 } 287 } 288 289 /* 290 * Invalidation of all ERAT entries is no longer required by CAIA2. Use 291 * only for debug. 292 */ 293 int cxl_invalidate_all_psl9(struct cxl *adapter) 294 { 295 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 296 u64 ierat; 297 298 pr_devel("CXL adapter - invalidation of all ERAT entries\n"); 299 300 /* Invalidates all ERAT entries for Radix or HPT */ 301 ierat = CXL_XSL9_IERAT_IALL; 302 if (radix_enabled()) 303 ierat |= CXL_XSL9_IERAT_INVR; 304 cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat); 305 306 while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) { 307 if (time_after_eq(jiffies, timeout)) { 308 dev_warn(&adapter->dev, 309 "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n"); 310 return -EBUSY; 311 } 312 if (!cxl_ops->link_ok(adapter, NULL)) 313 return -EIO; 314 cpu_relax(); 315 } 316 return 0; 317 } 318 319 int cxl_invalidate_all_psl8(struct cxl *adapter) 320 { 321 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 322 323 pr_devel("CXL adapter wide TLBIA & SLBIA\n"); 324 325 cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A); 326 327 cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL); 328 while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) { 329 if (time_after_eq(jiffies, timeout)) { 330 dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n"); 331 return -EBUSY; 332 } 333 if (!cxl_ops->link_ok(adapter, NULL)) 334 return -EIO; 335 cpu_relax(); 336 } 337 338 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL); 339 while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) { 340 if (time_after_eq(jiffies, timeout)) { 341 dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n"); 342 return -EBUSY; 343 } 344 if (!cxl_ops->link_ok(adapter, NULL)) 345 return -EIO; 346 cpu_relax(); 347 } 348 return 0; 349 } 350 351 int cxl_data_cache_flush(struct cxl *adapter) 352 { 353 u64 reg; 354 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 355 356 /* 357 * Do a datacache flush only if datacache is available. 358 * In case of PSL9D datacache absent hence flush operation. 359 * would timeout. 360 */ 361 if (adapter->native->no_data_cache) { 362 pr_devel("No PSL data cache. Ignoring cache flush req.\n"); 363 return 0; 364 } 365 366 pr_devel("Flushing data cache\n"); 367 reg = cxl_p1_read(adapter, CXL_PSL_Control); 368 reg |= CXL_PSL_Control_Fr; 369 cxl_p1_write(adapter, CXL_PSL_Control, reg); 370 371 reg = cxl_p1_read(adapter, CXL_PSL_Control); 372 while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) { 373 if (time_after_eq(jiffies, timeout)) { 374 dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n"); 375 return -EBUSY; 376 } 377 378 if (!cxl_ops->link_ok(adapter, NULL)) { 379 dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n"); 380 return -EIO; 381 } 382 cpu_relax(); 383 reg = cxl_p1_read(adapter, CXL_PSL_Control); 384 } 385 386 reg &= ~CXL_PSL_Control_Fr; 387 cxl_p1_write(adapter, CXL_PSL_Control, reg); 388 return 0; 389 } 390 391 static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1) 392 { 393 int rc; 394 395 /* 1. Disable SSTP by writing 0 to SSTP1[V] */ 396 cxl_p2n_write(afu, CXL_SSTP1_An, 0); 397 398 /* 2. Invalidate all SLB entries */ 399 if ((rc = cxl_afu_slbia(afu))) 400 return rc; 401 402 /* 3. Set SSTP0_An */ 403 cxl_p2n_write(afu, CXL_SSTP0_An, sstp0); 404 405 /* 4. Set SSTP1_An */ 406 cxl_p2n_write(afu, CXL_SSTP1_An, sstp1); 407 408 return 0; 409 } 410 411 /* Using per slice version may improve performance here. (ie. SLBIA_An) */ 412 static void slb_invalid(struct cxl_context *ctx) 413 { 414 struct cxl *adapter = ctx->afu->adapter; 415 u64 slbia; 416 417 WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex)); 418 419 cxl_p1_write(adapter, CXL_PSL_LBISEL, 420 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | 421 be32_to_cpu(ctx->elem->lpid)); 422 cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); 423 424 while (1) { 425 if (!cxl_ops->link_ok(adapter, NULL)) 426 break; 427 slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA); 428 if (!(slbia & CXL_TLB_SLB_P)) 429 break; 430 cpu_relax(); 431 } 432 } 433 434 static int do_process_element_cmd(struct cxl_context *ctx, 435 u64 cmd, u64 pe_state) 436 { 437 u64 state; 438 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 439 int rc = 0; 440 441 trace_cxl_llcmd(ctx, cmd); 442 443 WARN_ON(!ctx->afu->enabled); 444 445 ctx->elem->software_state = cpu_to_be32(pe_state); 446 smp_wmb(); 447 *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); 448 smp_mb(); 449 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); 450 while (1) { 451 if (time_after_eq(jiffies, timeout)) { 452 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n"); 453 rc = -EBUSY; 454 goto out; 455 } 456 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { 457 dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n"); 458 rc = -EIO; 459 goto out; 460 } 461 state = be64_to_cpup(ctx->afu->native->sw_command_status); 462 if (state == ~0ULL) { 463 pr_err("cxl: Error adding process element to AFU\n"); 464 rc = -1; 465 goto out; 466 } 467 if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) == 468 (cmd | (cmd >> 16) | ctx->pe)) 469 break; 470 /* 471 * The command won't finish in the PSL if there are 472 * outstanding DSIs. Hence we need to yield here in 473 * case there are outstanding DSIs that we need to 474 * service. Tuning possiblity: we could wait for a 475 * while before sched 476 */ 477 schedule(); 478 479 } 480 out: 481 trace_cxl_llcmd_done(ctx, cmd, rc); 482 return rc; 483 } 484 485 static int add_process_element(struct cxl_context *ctx) 486 { 487 int rc = 0; 488 489 mutex_lock(&ctx->afu->native->spa_mutex); 490 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); 491 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) 492 ctx->pe_inserted = true; 493 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); 494 mutex_unlock(&ctx->afu->native->spa_mutex); 495 return rc; 496 } 497 498 static int terminate_process_element(struct cxl_context *ctx) 499 { 500 int rc = 0; 501 502 /* fast path terminate if it's already invalid */ 503 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) 504 return rc; 505 506 mutex_lock(&ctx->afu->native->spa_mutex); 507 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); 508 /* We could be asked to terminate when the hw is down. That 509 * should always succeed: it's not running if the hw has gone 510 * away and is being reset. 511 */ 512 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) 513 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, 514 CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); 515 ctx->elem->software_state = 0; /* Remove Valid bit */ 516 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); 517 mutex_unlock(&ctx->afu->native->spa_mutex); 518 return rc; 519 } 520 521 static int remove_process_element(struct cxl_context *ctx) 522 { 523 int rc = 0; 524 525 mutex_lock(&ctx->afu->native->spa_mutex); 526 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); 527 528 /* We could be asked to remove when the hw is down. Again, if 529 * the hw is down, the PE is gone, so we succeed. 530 */ 531 if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) 532 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0); 533 534 if (!rc) 535 ctx->pe_inserted = false; 536 if (cxl_is_power8()) 537 slb_invalid(ctx); 538 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); 539 mutex_unlock(&ctx->afu->native->spa_mutex); 540 541 return rc; 542 } 543 544 void cxl_assign_psn_space(struct cxl_context *ctx) 545 { 546 if (!ctx->afu->pp_size || ctx->master) { 547 ctx->psn_phys = ctx->afu->psn_phys; 548 ctx->psn_size = ctx->afu->adapter->ps_size; 549 } else { 550 ctx->psn_phys = ctx->afu->psn_phys + 551 (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe); 552 ctx->psn_size = ctx->afu->pp_size; 553 } 554 } 555 556 static int activate_afu_directed(struct cxl_afu *afu) 557 { 558 int rc; 559 560 dev_info(&afu->dev, "Activating AFU directed mode\n"); 561 562 afu->num_procs = afu->max_procs_virtualised; 563 if (afu->native->spa == NULL) { 564 if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED)) 565 return -ENOMEM; 566 } 567 attach_spa(afu); 568 569 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU); 570 if (cxl_is_power8()) 571 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); 572 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); 573 574 afu->current_mode = CXL_MODE_DIRECTED; 575 576 if ((rc = cxl_chardev_m_afu_add(afu))) 577 return rc; 578 579 if ((rc = cxl_sysfs_afu_m_add(afu))) 580 goto err; 581 582 if ((rc = cxl_chardev_s_afu_add(afu))) 583 goto err1; 584 585 return 0; 586 err1: 587 cxl_sysfs_afu_m_remove(afu); 588 err: 589 cxl_chardev_afu_remove(afu); 590 return rc; 591 } 592 593 #ifdef CONFIG_CPU_LITTLE_ENDIAN 594 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE) 595 #else 596 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE)) 597 #endif 598 599 u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9) 600 { 601 u64 sr = 0; 602 603 set_endian(sr); 604 if (master) 605 sr |= CXL_PSL_SR_An_MP; 606 if (mfspr(SPRN_LPCR) & LPCR_TC) 607 sr |= CXL_PSL_SR_An_TC; 608 if (kernel) { 609 if (!real_mode) 610 sr |= CXL_PSL_SR_An_R; 611 sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; 612 } else { 613 sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; 614 if (radix_enabled()) 615 sr |= CXL_PSL_SR_An_HV; 616 else 617 sr &= ~(CXL_PSL_SR_An_HV); 618 if (!test_tsk_thread_flag(current, TIF_32BIT)) 619 sr |= CXL_PSL_SR_An_SF; 620 } 621 if (p9) { 622 if (radix_enabled()) 623 sr |= CXL_PSL_SR_An_XLAT_ror; 624 else 625 sr |= CXL_PSL_SR_An_XLAT_hpt; 626 } 627 return sr; 628 } 629 630 static u64 calculate_sr(struct cxl_context *ctx) 631 { 632 return cxl_calculate_sr(ctx->master, ctx->kernel, ctx->real_mode, 633 cxl_is_power9()); 634 } 635 636 static void update_ivtes_directed(struct cxl_context *ctx) 637 { 638 bool need_update = (ctx->status == STARTED); 639 int r; 640 641 if (need_update) { 642 WARN_ON(terminate_process_element(ctx)); 643 WARN_ON(remove_process_element(ctx)); 644 } 645 646 for (r = 0; r < CXL_IRQ_RANGES; r++) { 647 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); 648 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); 649 } 650 651 /* 652 * Theoretically we could use the update llcmd, instead of a 653 * terminate/remove/add (or if an atomic update was required we could 654 * do a suspend/update/resume), however it seems there might be issues 655 * with the update llcmd on some cards (including those using an XSL on 656 * an ASIC) so for now it's safest to go with the commands that are 657 * known to work. In the future if we come across a situation where the 658 * card may be performing transactions using the same PE while we are 659 * doing this update we might need to revisit this. 660 */ 661 if (need_update) 662 WARN_ON(add_process_element(ctx)); 663 } 664 665 static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr) 666 { 667 u32 pid; 668 int rc; 669 670 cxl_assign_psn_space(ctx); 671 672 ctx->elem->ctxtime = 0; /* disable */ 673 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); 674 ctx->elem->haurp = 0; /* disable */ 675 676 if (ctx->kernel) 677 pid = 0; 678 else { 679 if (ctx->mm == NULL) { 680 pr_devel("%s: unable to get mm for pe=%d pid=%i\n", 681 __func__, ctx->pe, pid_nr(ctx->pid)); 682 return -EINVAL; 683 } 684 pid = ctx->mm->context.id; 685 } 686 687 /* Assign a unique TIDR (thread id) for the current thread */ 688 if (!(ctx->tidr) && (ctx->assign_tidr)) { 689 rc = set_thread_tidr(current); 690 if (rc) 691 return -ENODEV; 692 ctx->tidr = current->thread.tidr; 693 pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr); 694 } 695 696 ctx->elem->common.tid = cpu_to_be32(ctx->tidr); 697 ctx->elem->common.pid = cpu_to_be32(pid); 698 699 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); 700 701 ctx->elem->common.csrp = 0; /* disable */ 702 703 cxl_prefault(ctx, wed); 704 705 /* 706 * Ensure we have the multiplexed PSL interrupt set up to take faults 707 * for kernel contexts that may not have allocated any AFU IRQs at all: 708 */ 709 if (ctx->irqs.range[0] == 0) { 710 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; 711 ctx->irqs.range[0] = 1; 712 } 713 714 ctx->elem->common.amr = cpu_to_be64(amr); 715 ctx->elem->common.wed = cpu_to_be64(wed); 716 717 return 0; 718 } 719 720 int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr) 721 { 722 int result; 723 724 /* fill the process element entry */ 725 result = process_element_entry_psl9(ctx, wed, amr); 726 if (result) 727 return result; 728 729 update_ivtes_directed(ctx); 730 731 /* first guy needs to enable */ 732 result = cxl_ops->afu_check_and_enable(ctx->afu); 733 if (result) 734 return result; 735 736 return add_process_element(ctx); 737 } 738 739 int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr) 740 { 741 u32 pid; 742 int result; 743 744 cxl_assign_psn_space(ctx); 745 746 ctx->elem->ctxtime = 0; /* disable */ 747 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); 748 ctx->elem->haurp = 0; /* disable */ 749 ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1)); 750 751 pid = current->pid; 752 if (ctx->kernel) 753 pid = 0; 754 ctx->elem->common.tid = 0; 755 ctx->elem->common.pid = cpu_to_be32(pid); 756 757 ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); 758 759 ctx->elem->common.csrp = 0; /* disable */ 760 ctx->elem->common.u.psl8.aurp0 = 0; /* disable */ 761 ctx->elem->common.u.psl8.aurp1 = 0; /* disable */ 762 763 cxl_prefault(ctx, wed); 764 765 ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); 766 ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); 767 768 /* 769 * Ensure we have the multiplexed PSL interrupt set up to take faults 770 * for kernel contexts that may not have allocated any AFU IRQs at all: 771 */ 772 if (ctx->irqs.range[0] == 0) { 773 ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; 774 ctx->irqs.range[0] = 1; 775 } 776 777 update_ivtes_directed(ctx); 778 779 ctx->elem->common.amr = cpu_to_be64(amr); 780 ctx->elem->common.wed = cpu_to_be64(wed); 781 782 /* first guy needs to enable */ 783 if ((result = cxl_ops->afu_check_and_enable(ctx->afu))) 784 return result; 785 786 return add_process_element(ctx); 787 } 788 789 static int deactivate_afu_directed(struct cxl_afu *afu) 790 { 791 dev_info(&afu->dev, "Deactivating AFU directed mode\n"); 792 793 afu->current_mode = 0; 794 afu->num_procs = 0; 795 796 cxl_sysfs_afu_m_remove(afu); 797 cxl_chardev_afu_remove(afu); 798 799 /* 800 * The CAIA section 2.2.1 indicates that the procedure for starting and 801 * stopping an AFU in AFU directed mode is AFU specific, which is not 802 * ideal since this code is generic and with one exception has no 803 * knowledge of the AFU. This is in contrast to the procedure for 804 * disabling a dedicated process AFU, which is documented to just 805 * require a reset. The architecture does indicate that both an AFU 806 * reset and an AFU disable should result in the AFU being disabled and 807 * we do both followed by a PSL purge for safety. 808 * 809 * Notably we used to have some issues with the disable sequence on PSL 810 * cards, which is why we ended up using this heavy weight procedure in 811 * the first place, however a bug was discovered that had rendered the 812 * disable operation ineffective, so it is conceivable that was the 813 * sole explanation for those difficulties. Careful regression testing 814 * is recommended if anyone attempts to remove or reorder these 815 * operations. 816 * 817 * The XSL on the Mellanox CX4 behaves a little differently from the 818 * PSL based cards and will time out an AFU reset if the AFU is still 819 * enabled. That card is special in that we do have a means to identify 820 * it from this code, so in that case we skip the reset and just use a 821 * disable/purge to avoid the timeout and corresponding noise in the 822 * kernel log. 823 */ 824 if (afu->adapter->native->sl_ops->needs_reset_before_disable) 825 cxl_ops->afu_reset(afu); 826 cxl_afu_disable(afu); 827 cxl_psl_purge(afu); 828 829 return 0; 830 } 831 832 int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu) 833 { 834 dev_info(&afu->dev, "Activating dedicated process mode\n"); 835 836 /* 837 * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the 838 * XSL and AFU are programmed to work with a single context. 839 * The context information should be configured in the SPA area 840 * index 0 (so PSL_SPAP must be configured before enabling the 841 * AFU). 842 */ 843 afu->num_procs = 1; 844 if (afu->native->spa == NULL) { 845 if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED)) 846 return -ENOMEM; 847 } 848 attach_spa(afu); 849 850 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); 851 cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); 852 853 afu->current_mode = CXL_MODE_DEDICATED; 854 855 return cxl_chardev_d_afu_add(afu); 856 } 857 858 int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu) 859 { 860 dev_info(&afu->dev, "Activating dedicated process mode\n"); 861 862 cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); 863 864 cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */ 865 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */ 866 cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL); 867 cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID)); 868 cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */ 869 cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1)); 870 871 cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */ 872 cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */ 873 cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */ 874 875 afu->current_mode = CXL_MODE_DEDICATED; 876 afu->num_procs = 1; 877 878 return cxl_chardev_d_afu_add(afu); 879 } 880 881 void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx) 882 { 883 int r; 884 885 for (r = 0; r < CXL_IRQ_RANGES; r++) { 886 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); 887 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); 888 } 889 } 890 891 void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx) 892 { 893 struct cxl_afu *afu = ctx->afu; 894 895 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 896 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | 897 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | 898 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | 899 ((u64)ctx->irqs.offset[3] & 0xffff)); 900 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64) 901 (((u64)ctx->irqs.range[0] & 0xffff) << 48) | 902 (((u64)ctx->irqs.range[1] & 0xffff) << 32) | 903 (((u64)ctx->irqs.range[2] & 0xffff) << 16) | 904 ((u64)ctx->irqs.range[3] & 0xffff)); 905 } 906 907 int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr) 908 { 909 struct cxl_afu *afu = ctx->afu; 910 int result; 911 912 /* fill the process element entry */ 913 result = process_element_entry_psl9(ctx, wed, amr); 914 if (result) 915 return result; 916 917 if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) 918 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); 919 920 ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V); 921 /* 922 * Ideally we should do a wmb() here to make sure the changes to the 923 * PE are visible to the card before we call afu_enable. 924 * On ppc64 though all mmios are preceded by a 'sync' instruction hence 925 * we dont dont need one here. 926 */ 927 928 result = cxl_ops->afu_reset(afu); 929 if (result) 930 return result; 931 932 return afu_enable(afu); 933 } 934 935 int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr) 936 { 937 struct cxl_afu *afu = ctx->afu; 938 u64 pid; 939 int rc; 940 941 pid = (u64)current->pid << 32; 942 if (ctx->kernel) 943 pid = 0; 944 cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid); 945 946 cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx)); 947 948 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1))) 949 return rc; 950 951 cxl_prefault(ctx, wed); 952 953 if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) 954 afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); 955 956 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); 957 958 /* master only context for dedicated */ 959 cxl_assign_psn_space(ctx); 960 961 if ((rc = cxl_ops->afu_reset(afu))) 962 return rc; 963 964 cxl_p2n_write(afu, CXL_PSL_WED_An, wed); 965 966 return afu_enable(afu); 967 } 968 969 static int deactivate_dedicated_process(struct cxl_afu *afu) 970 { 971 dev_info(&afu->dev, "Deactivating dedicated process mode\n"); 972 973 afu->current_mode = 0; 974 afu->num_procs = 0; 975 976 cxl_chardev_afu_remove(afu); 977 978 return 0; 979 } 980 981 static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode) 982 { 983 if (mode == CXL_MODE_DIRECTED) 984 return deactivate_afu_directed(afu); 985 if (mode == CXL_MODE_DEDICATED) 986 return deactivate_dedicated_process(afu); 987 return 0; 988 } 989 990 static int native_afu_activate_mode(struct cxl_afu *afu, int mode) 991 { 992 if (!mode) 993 return 0; 994 if (!(mode & afu->modes_supported)) 995 return -EINVAL; 996 997 if (!cxl_ops->link_ok(afu->adapter, afu)) { 998 WARN(1, "Device link is down, refusing to activate!\n"); 999 return -EIO; 1000 } 1001 1002 if (mode == CXL_MODE_DIRECTED) 1003 return activate_afu_directed(afu); 1004 if ((mode == CXL_MODE_DEDICATED) && 1005 (afu->adapter->native->sl_ops->activate_dedicated_process)) 1006 return afu->adapter->native->sl_ops->activate_dedicated_process(afu); 1007 1008 return -EINVAL; 1009 } 1010 1011 static int native_attach_process(struct cxl_context *ctx, bool kernel, 1012 u64 wed, u64 amr) 1013 { 1014 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { 1015 WARN(1, "Device link is down, refusing to attach process!\n"); 1016 return -EIO; 1017 } 1018 1019 ctx->kernel = kernel; 1020 if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) && 1021 (ctx->afu->adapter->native->sl_ops->attach_afu_directed)) 1022 return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr); 1023 1024 if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && 1025 (ctx->afu->adapter->native->sl_ops->attach_dedicated_process)) 1026 return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr); 1027 1028 return -EINVAL; 1029 } 1030 1031 static inline int detach_process_native_dedicated(struct cxl_context *ctx) 1032 { 1033 /* 1034 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to 1035 * stop the AFU in dedicated mode (we therefore do not make that 1036 * optional like we do in the afu directed path). It does not indicate 1037 * that we need to do an explicit disable (which should occur 1038 * implicitly as part of the reset) or purge, but we do these as well 1039 * to be on the safe side. 1040 * 1041 * Notably we used to have some issues with the disable sequence 1042 * (before the sequence was spelled out in the architecture) which is 1043 * why we were so heavy weight in the first place, however a bug was 1044 * discovered that had rendered the disable operation ineffective, so 1045 * it is conceivable that was the sole explanation for those 1046 * difficulties. Point is, we should be careful and do some regression 1047 * testing if we ever attempt to remove any part of this procedure. 1048 */ 1049 cxl_ops->afu_reset(ctx->afu); 1050 cxl_afu_disable(ctx->afu); 1051 cxl_psl_purge(ctx->afu); 1052 return 0; 1053 } 1054 1055 static void native_update_ivtes(struct cxl_context *ctx) 1056 { 1057 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) 1058 return update_ivtes_directed(ctx); 1059 if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && 1060 (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)) 1061 return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); 1062 WARN(1, "native_update_ivtes: Bad mode\n"); 1063 } 1064 1065 static inline int detach_process_native_afu_directed(struct cxl_context *ctx) 1066 { 1067 if (!ctx->pe_inserted) 1068 return 0; 1069 if (terminate_process_element(ctx)) 1070 return -1; 1071 if (remove_process_element(ctx)) 1072 return -1; 1073 1074 return 0; 1075 } 1076 1077 static int native_detach_process(struct cxl_context *ctx) 1078 { 1079 trace_cxl_detach(ctx); 1080 1081 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) 1082 return detach_process_native_dedicated(ctx); 1083 1084 return detach_process_native_afu_directed(ctx); 1085 } 1086 1087 static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) 1088 { 1089 /* If the adapter has gone away, we can't get any meaningful 1090 * information. 1091 */ 1092 if (!cxl_ops->link_ok(afu->adapter, afu)) 1093 return -EIO; 1094 1095 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 1096 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An); 1097 if (cxl_is_power8()) 1098 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An); 1099 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An); 1100 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); 1101 info->proc_handle = 0; 1102 1103 return 0; 1104 } 1105 1106 void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx) 1107 { 1108 u64 fir1, serr; 1109 1110 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1); 1111 1112 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); 1113 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { 1114 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); 1115 cxl_afu_decode_psl_serr(ctx->afu, serr); 1116 } 1117 } 1118 1119 void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx) 1120 { 1121 u64 fir1, fir2, fir_slice, serr, afu_debug; 1122 1123 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); 1124 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); 1125 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); 1126 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); 1127 1128 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); 1129 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); 1130 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { 1131 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); 1132 cxl_afu_decode_psl_serr(ctx->afu, serr); 1133 } 1134 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); 1135 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); 1136 } 1137 1138 static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, 1139 u64 dsisr, u64 errstat) 1140 { 1141 1142 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat); 1143 1144 if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers) 1145 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx); 1146 1147 if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) { 1148 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); 1149 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter); 1150 } 1151 1152 return cxl_ops->ack_irq(ctx, 0, errstat); 1153 } 1154 1155 static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr) 1156 { 1157 if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS)) 1158 return true; 1159 1160 if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF)) 1161 return true; 1162 1163 return false; 1164 } 1165 1166 irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info) 1167 { 1168 if (cxl_is_translation_fault(afu, irq_info->dsisr)) 1169 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); 1170 else 1171 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); 1172 1173 return IRQ_HANDLED; 1174 } 1175 1176 static irqreturn_t native_irq_multiplexed(int irq, void *data) 1177 { 1178 struct cxl_afu *afu = data; 1179 struct cxl_context *ctx; 1180 struct cxl_irq_info irq_info; 1181 u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An); 1182 int ph, ret = IRQ_HANDLED, res; 1183 1184 /* check if eeh kicked in while the interrupt was in flight */ 1185 if (unlikely(phreg == ~0ULL)) { 1186 dev_warn(&afu->dev, 1187 "Ignoring slice interrupt(%d) due to fenced card", 1188 irq); 1189 return IRQ_HANDLED; 1190 } 1191 /* Mask the pe-handle from register value */ 1192 ph = phreg & 0xffff; 1193 if ((res = native_get_irq_info(afu, &irq_info))) { 1194 WARN(1, "Unable to get CXL IRQ Info: %i\n", res); 1195 if (afu->adapter->native->sl_ops->fail_irq) 1196 return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); 1197 return ret; 1198 } 1199 1200 rcu_read_lock(); 1201 ctx = idr_find(&afu->contexts_idr, ph); 1202 if (ctx) { 1203 if (afu->adapter->native->sl_ops->handle_interrupt) 1204 ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info); 1205 rcu_read_unlock(); 1206 return ret; 1207 } 1208 rcu_read_unlock(); 1209 1210 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR" 1211 " %016llx\n(Possible AFU HW issue - was a term/remove acked" 1212 " with outstanding transactions?)\n", ph, irq_info.dsisr, 1213 irq_info.dar); 1214 if (afu->adapter->native->sl_ops->fail_irq) 1215 ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); 1216 return ret; 1217 } 1218 1219 static void native_irq_wait(struct cxl_context *ctx) 1220 { 1221 u64 dsisr; 1222 int timeout = 1000; 1223 int ph; 1224 1225 /* 1226 * Wait until no further interrupts are presented by the PSL 1227 * for this context. 1228 */ 1229 while (timeout--) { 1230 ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff; 1231 if (ph != ctx->pe) 1232 return; 1233 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); 1234 if (cxl_is_power8() && 1235 ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) 1236 return; 1237 if (cxl_is_power9() && 1238 ((dsisr & CXL_PSL9_DSISR_PENDING) == 0)) 1239 return; 1240 /* 1241 * We are waiting for the workqueue to process our 1242 * irq, so need to let that run here. 1243 */ 1244 msleep(1); 1245 } 1246 1247 dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i" 1248 " DSISR %016llx!\n", ph, dsisr); 1249 return; 1250 } 1251 1252 static irqreturn_t native_slice_irq_err(int irq, void *data) 1253 { 1254 struct cxl_afu *afu = data; 1255 u64 errstat, serr, afu_error, dsisr; 1256 u64 fir_slice, afu_debug, irq_mask; 1257 1258 /* 1259 * slice err interrupt is only used with full PSL (no XSL) 1260 */ 1261 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 1262 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); 1263 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An); 1264 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 1265 cxl_afu_decode_psl_serr(afu, serr); 1266 1267 if (cxl_is_power8()) { 1268 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); 1269 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); 1270 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); 1271 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); 1272 } 1273 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); 1274 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error); 1275 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr); 1276 1277 /* mask off the IRQ so it won't retrigger until the AFU is reset */ 1278 irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32; 1279 serr |= irq_mask; 1280 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 1281 dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n"); 1282 1283 return IRQ_HANDLED; 1284 } 1285 1286 void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter) 1287 { 1288 u64 fir1; 1289 1290 fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1); 1291 dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1); 1292 } 1293 1294 void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter) 1295 { 1296 u64 fir1, fir2; 1297 1298 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); 1299 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); 1300 dev_crit(&adapter->dev, 1301 "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", 1302 fir1, fir2); 1303 } 1304 1305 static irqreturn_t native_irq_err(int irq, void *data) 1306 { 1307 struct cxl *adapter = data; 1308 u64 err_ivte; 1309 1310 WARN(1, "CXL ERROR interrupt %i\n", irq); 1311 1312 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); 1313 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); 1314 1315 if (adapter->native->sl_ops->debugfs_stop_trace) { 1316 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); 1317 adapter->native->sl_ops->debugfs_stop_trace(adapter); 1318 } 1319 1320 if (adapter->native->sl_ops->err_irq_dump_registers) 1321 adapter->native->sl_ops->err_irq_dump_registers(adapter); 1322 1323 return IRQ_HANDLED; 1324 } 1325 1326 int cxl_native_register_psl_err_irq(struct cxl *adapter) 1327 { 1328 int rc; 1329 1330 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 1331 dev_name(&adapter->dev)); 1332 if (!adapter->irq_name) 1333 return -ENOMEM; 1334 1335 if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter, 1336 &adapter->native->err_hwirq, 1337 &adapter->native->err_virq, 1338 adapter->irq_name))) { 1339 kfree(adapter->irq_name); 1340 adapter->irq_name = NULL; 1341 return rc; 1342 } 1343 1344 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff); 1345 1346 return 0; 1347 } 1348 1349 void cxl_native_release_psl_err_irq(struct cxl *adapter) 1350 { 1351 if (adapter->native->err_virq == 0 || 1352 adapter->native->err_virq != 1353 irq_find_mapping(NULL, adapter->native->err_hwirq)) 1354 return; 1355 1356 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 1357 cxl_unmap_irq(adapter->native->err_virq, adapter); 1358 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); 1359 kfree(adapter->irq_name); 1360 adapter->native->err_virq = 0; 1361 } 1362 1363 int cxl_native_register_serr_irq(struct cxl_afu *afu) 1364 { 1365 u64 serr; 1366 int rc; 1367 1368 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 1369 dev_name(&afu->dev)); 1370 if (!afu->err_irq_name) 1371 return -ENOMEM; 1372 1373 if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu, 1374 &afu->serr_hwirq, 1375 &afu->serr_virq, afu->err_irq_name))) { 1376 kfree(afu->err_irq_name); 1377 afu->err_irq_name = NULL; 1378 return rc; 1379 } 1380 1381 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 1382 if (cxl_is_power8()) 1383 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); 1384 if (cxl_is_power9()) { 1385 /* 1386 * By default, all errors are masked. So don't set all masks. 1387 * Slice errors will be transfered. 1388 */ 1389 serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff); 1390 } 1391 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 1392 1393 return 0; 1394 } 1395 1396 void cxl_native_release_serr_irq(struct cxl_afu *afu) 1397 { 1398 if (afu->serr_virq == 0 || 1399 afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) 1400 return; 1401 1402 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); 1403 cxl_unmap_irq(afu->serr_virq, afu); 1404 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 1405 kfree(afu->err_irq_name); 1406 afu->serr_virq = 0; 1407 } 1408 1409 int cxl_native_register_psl_irq(struct cxl_afu *afu) 1410 { 1411 int rc; 1412 1413 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", 1414 dev_name(&afu->dev)); 1415 if (!afu->psl_irq_name) 1416 return -ENOMEM; 1417 1418 if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed, 1419 afu, &afu->native->psl_hwirq, &afu->native->psl_virq, 1420 afu->psl_irq_name))) { 1421 kfree(afu->psl_irq_name); 1422 afu->psl_irq_name = NULL; 1423 } 1424 return rc; 1425 } 1426 1427 void cxl_native_release_psl_irq(struct cxl_afu *afu) 1428 { 1429 if (afu->native->psl_virq == 0 || 1430 afu->native->psl_virq != 1431 irq_find_mapping(NULL, afu->native->psl_hwirq)) 1432 return; 1433 1434 cxl_unmap_irq(afu->native->psl_virq, afu); 1435 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); 1436 kfree(afu->psl_irq_name); 1437 afu->native->psl_virq = 0; 1438 } 1439 1440 static void recover_psl_err(struct cxl_afu *afu, u64 errstat) 1441 { 1442 u64 dsisr; 1443 1444 pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat); 1445 1446 /* Clear PSL_DSISR[PE] */ 1447 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An); 1448 cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE); 1449 1450 /* Write 1s to clear error status bits */ 1451 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat); 1452 } 1453 1454 static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) 1455 { 1456 trace_cxl_psl_irq_ack(ctx, tfc); 1457 if (tfc) 1458 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc); 1459 if (psl_reset_mask) 1460 recover_psl_err(ctx->afu, psl_reset_mask); 1461 1462 return 0; 1463 } 1464 1465 int cxl_check_error(struct cxl_afu *afu) 1466 { 1467 return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL); 1468 } 1469 1470 static bool native_support_attributes(const char *attr_name, 1471 enum cxl_attrs type) 1472 { 1473 return true; 1474 } 1475 1476 static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out) 1477 { 1478 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) 1479 return -EIO; 1480 if (unlikely(off >= afu->crs_len)) 1481 return -ERANGE; 1482 *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset + 1483 (cr * afu->crs_len) + off); 1484 return 0; 1485 } 1486 1487 static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out) 1488 { 1489 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) 1490 return -EIO; 1491 if (unlikely(off >= afu->crs_len)) 1492 return -ERANGE; 1493 *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset + 1494 (cr * afu->crs_len) + off); 1495 return 0; 1496 } 1497 1498 static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out) 1499 { 1500 u64 aligned_off = off & ~0x3L; 1501 u32 val; 1502 int rc; 1503 1504 rc = native_afu_cr_read32(afu, cr, aligned_off, &val); 1505 if (!rc) 1506 *out = (val >> ((off & 0x3) * 8)) & 0xffff; 1507 return rc; 1508 } 1509 1510 static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out) 1511 { 1512 u64 aligned_off = off & ~0x3L; 1513 u32 val; 1514 int rc; 1515 1516 rc = native_afu_cr_read32(afu, cr, aligned_off, &val); 1517 if (!rc) 1518 *out = (val >> ((off & 0x3) * 8)) & 0xff; 1519 return rc; 1520 } 1521 1522 static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) 1523 { 1524 if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) 1525 return -EIO; 1526 if (unlikely(off >= afu->crs_len)) 1527 return -ERANGE; 1528 out_le32(afu->native->afu_desc_mmio + afu->crs_offset + 1529 (cr * afu->crs_len) + off, in); 1530 return 0; 1531 } 1532 1533 static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) 1534 { 1535 u64 aligned_off = off & ~0x3L; 1536 u32 val32, mask, shift; 1537 int rc; 1538 1539 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); 1540 if (rc) 1541 return rc; 1542 shift = (off & 0x3) * 8; 1543 WARN_ON(shift == 24); 1544 mask = 0xffff << shift; 1545 val32 = (val32 & ~mask) | (in << shift); 1546 1547 rc = native_afu_cr_write32(afu, cr, aligned_off, val32); 1548 return rc; 1549 } 1550 1551 static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) 1552 { 1553 u64 aligned_off = off & ~0x3L; 1554 u32 val32, mask, shift; 1555 int rc; 1556 1557 rc = native_afu_cr_read32(afu, cr, aligned_off, &val32); 1558 if (rc) 1559 return rc; 1560 shift = (off & 0x3) * 8; 1561 mask = 0xff << shift; 1562 val32 = (val32 & ~mask) | (in << shift); 1563 1564 rc = native_afu_cr_write32(afu, cr, aligned_off, val32); 1565 return rc; 1566 } 1567 1568 const struct cxl_backend_ops cxl_native_ops = { 1569 .module = THIS_MODULE, 1570 .adapter_reset = cxl_pci_reset, 1571 .alloc_one_irq = cxl_pci_alloc_one_irq, 1572 .release_one_irq = cxl_pci_release_one_irq, 1573 .alloc_irq_ranges = cxl_pci_alloc_irq_ranges, 1574 .release_irq_ranges = cxl_pci_release_irq_ranges, 1575 .setup_irq = cxl_pci_setup_irq, 1576 .handle_psl_slice_error = native_handle_psl_slice_error, 1577 .psl_interrupt = NULL, 1578 .ack_irq = native_ack_irq, 1579 .irq_wait = native_irq_wait, 1580 .attach_process = native_attach_process, 1581 .detach_process = native_detach_process, 1582 .update_ivtes = native_update_ivtes, 1583 .support_attributes = native_support_attributes, 1584 .link_ok = cxl_adapter_link_ok, 1585 .release_afu = cxl_pci_release_afu, 1586 .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, 1587 .afu_check_and_enable = native_afu_check_and_enable, 1588 .afu_activate_mode = native_afu_activate_mode, 1589 .afu_deactivate_mode = native_afu_deactivate_mode, 1590 .afu_reset = native_afu_reset, 1591 .afu_cr_read8 = native_afu_cr_read8, 1592 .afu_cr_read16 = native_afu_cr_read16, 1593 .afu_cr_read32 = native_afu_cr_read32, 1594 .afu_cr_read64 = native_afu_cr_read64, 1595 .afu_cr_write8 = native_afu_cr_write8, 1596 .afu_cr_write16 = native_afu_cr_write16, 1597 .afu_cr_write32 = native_afu_cr_write32, 1598 .read_adapter_vpd = cxl_pci_read_adapter_vpd, 1599 }; 1600