1 /* 2 * Copyright 2014 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/interrupt.h> 11 #include <linux/workqueue.h> 12 #include <linux/sched.h> 13 #include <linux/wait.h> 14 #include <linux/slab.h> 15 #include <linux/pid.h> 16 #include <asm/cputable.h> 17 #include <misc/cxl.h> 18 19 #include "cxl.h" 20 21 /* XXX: This is implementation specific */ 22 static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat) 23 { 24 u64 fir1, fir2, fir_slice, serr, afu_debug; 25 26 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); 27 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); 28 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); 29 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); 30 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); 31 32 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); 33 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1); 34 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2); 35 dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); 36 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice); 37 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug); 38 39 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); 40 cxl_stop_trace(ctx->afu->adapter); 41 42 return cxl_ack_irq(ctx, 0, errstat); 43 } 44 45 irqreturn_t cxl_slice_irq_err(int irq, void *data) 46 { 47 struct cxl_afu *afu = data; 48 u64 fir_slice, errstat, serr, afu_debug; 49 50 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq); 51 52 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 53 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); 54 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); 55 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); 56 dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); 57 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice); 58 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat); 59 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug); 60 61 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 62 63 return IRQ_HANDLED; 64 } 65 66 static irqreturn_t cxl_irq_err(int irq, void *data) 67 { 68 struct cxl *adapter = data; 69 u64 fir1, fir2, err_ivte; 70 71 WARN(1, "CXL ERROR interrupt %i\n", irq); 72 73 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); 74 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte); 75 76 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); 77 cxl_stop_trace(adapter); 78 79 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); 80 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2); 81 82 dev_crit(&adapter->dev, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1, fir2); 83 84 return IRQ_HANDLED; 85 } 86 87 static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) 88 { 89 ctx->dsisr = dsisr; 90 ctx->dar = dar; 91 schedule_work(&ctx->fault_work); 92 return IRQ_HANDLED; 93 } 94 95 static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info) 96 { 97 struct cxl_context *ctx = data; 98 u64 dsisr, dar; 99 100 dsisr = irq_info->dsisr; 101 dar = irq_info->dar; 102 103 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); 104 105 if (dsisr & CXL_PSL_DSISR_An_DS) { 106 /* 107 * We don't inherently need to sleep to handle this, but we do 108 * need to get a ref to the task's mm, which we can't do from 109 * irq context without the potential for a deadlock since it 110 * takes the task_lock. An alternate option would be to keep a 111 * reference to the task's mm the entire time it has cxl open, 112 * but to do that we need to solve the issue where we hold a 113 * ref to the mm, but the mm can hold a ref to the fd after an 114 * mmap preventing anything from being cleaned up. 115 */ 116 pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe); 117 return schedule_cxl_fault(ctx, dsisr, dar); 118 } 119 120 if (dsisr & CXL_PSL_DSISR_An_M) 121 pr_devel("CXL interrupt: PTE not found\n"); 122 if (dsisr & CXL_PSL_DSISR_An_P) 123 pr_devel("CXL interrupt: Storage protection violation\n"); 124 if (dsisr & CXL_PSL_DSISR_An_A) 125 pr_devel("CXL interrupt: AFU lock access to write through or cache inhibited storage\n"); 126 if (dsisr & CXL_PSL_DSISR_An_S) 127 pr_devel("CXL interrupt: Access was afu_wr or afu_zero\n"); 128 if (dsisr & CXL_PSL_DSISR_An_K) 129 pr_devel("CXL interrupt: Access not permitted by virtual page class key protection\n"); 130 131 if (dsisr & CXL_PSL_DSISR_An_DM) { 132 /* 133 * In some cases we might be able to handle the fault 134 * immediately if hash_page would succeed, but we still need 135 * the task's mm, which as above we can't get without a lock 136 */ 137 pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe); 138 return schedule_cxl_fault(ctx, dsisr, dar); 139 } 140 if (dsisr & CXL_PSL_DSISR_An_ST) 141 WARN(1, "CXL interrupt: Segment Table PTE not found\n"); 142 if (dsisr & CXL_PSL_DSISR_An_UR) 143 pr_devel("CXL interrupt: AURP PTE not found\n"); 144 if (dsisr & CXL_PSL_DSISR_An_PE) 145 return handle_psl_slice_error(ctx, dsisr, irq_info->errstat); 146 if (dsisr & CXL_PSL_DSISR_An_AE) { 147 pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info->afu_err); 148 149 if (ctx->pending_afu_err) { 150 /* 151 * This shouldn't happen - the PSL treats these errors 152 * as fatal and will have reset the AFU, so there's not 153 * much point buffering multiple AFU errors. 154 * OTOH if we DO ever see a storm of these come in it's 155 * probably best that we log them somewhere: 156 */ 157 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " 158 "undelivered to pe %i: %.llx\n", 159 ctx->pe, irq_info->afu_err); 160 } else { 161 spin_lock(&ctx->lock); 162 ctx->afu_err = irq_info->afu_err; 163 ctx->pending_afu_err = 1; 164 spin_unlock(&ctx->lock); 165 166 wake_up_all(&ctx->wq); 167 } 168 169 cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0); 170 } 171 if (dsisr & CXL_PSL_DSISR_An_OC) 172 pr_devel("CXL interrupt: OS Context Warning\n"); 173 174 WARN(1, "Unhandled CXL PSL IRQ\n"); 175 return IRQ_HANDLED; 176 } 177 178 static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info) 179 { 180 if (irq_info->dsisr & CXL_PSL_DSISR_TRANS) 181 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); 182 else 183 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); 184 185 return IRQ_HANDLED; 186 } 187 188 static irqreturn_t cxl_irq_multiplexed(int irq, void *data) 189 { 190 struct cxl_afu *afu = data; 191 struct cxl_context *ctx; 192 struct cxl_irq_info irq_info; 193 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; 194 int ret; 195 196 if ((ret = cxl_get_irq(afu, &irq_info))) { 197 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret); 198 return fail_psl_irq(afu, &irq_info); 199 } 200 201 rcu_read_lock(); 202 ctx = idr_find(&afu->contexts_idr, ph); 203 if (ctx) { 204 ret = cxl_irq(irq, ctx, &irq_info); 205 rcu_read_unlock(); 206 return ret; 207 } 208 rcu_read_unlock(); 209 210 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR" 211 " %.16llx\n(Possible AFU HW issue - was a term/remove acked" 212 " with outstanding transactions?)\n", ph, irq_info.dsisr, 213 irq_info.dar); 214 return fail_psl_irq(afu, &irq_info); 215 } 216 217 static irqreturn_t cxl_irq_afu(int irq, void *data) 218 { 219 struct cxl_context *ctx = data; 220 irq_hw_number_t hwirq = irqd_to_hwirq(irq_get_irq_data(irq)); 221 int irq_off, afu_irq = 1; 222 __u16 range; 223 int r; 224 225 for (r = 1; r < CXL_IRQ_RANGES; r++) { 226 irq_off = hwirq - ctx->irqs.offset[r]; 227 range = ctx->irqs.range[r]; 228 if (irq_off >= 0 && irq_off < range) { 229 afu_irq += irq_off; 230 break; 231 } 232 afu_irq += range; 233 } 234 if (unlikely(r >= CXL_IRQ_RANGES)) { 235 WARN(1, "Recieved AFU IRQ out of range for pe %i (virq %i hwirq %lx)\n", 236 ctx->pe, irq, hwirq); 237 return IRQ_HANDLED; 238 } 239 240 pr_devel("Received AFU interrupt %i for pe: %i (virq %i hwirq %lx)\n", 241 afu_irq, ctx->pe, irq, hwirq); 242 243 if (unlikely(!ctx->irq_bitmap)) { 244 WARN(1, "Recieved AFU IRQ for context with no IRQ bitmap\n"); 245 return IRQ_HANDLED; 246 } 247 spin_lock(&ctx->lock); 248 set_bit(afu_irq - 1, ctx->irq_bitmap); 249 ctx->pending_irq = true; 250 spin_unlock(&ctx->lock); 251 252 wake_up_all(&ctx->wq); 253 254 return IRQ_HANDLED; 255 } 256 257 unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, 258 irq_handler_t handler, void *cookie, const char *name) 259 { 260 unsigned int virq; 261 int result; 262 263 /* IRQ Domain? */ 264 virq = irq_create_mapping(NULL, hwirq); 265 if (!virq) { 266 dev_warn(&adapter->dev, "cxl_map_irq: irq_create_mapping failed\n"); 267 return 0; 268 } 269 270 cxl_setup_irq(adapter, hwirq, virq); 271 272 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); 273 274 result = request_irq(virq, handler, 0, name, cookie); 275 if (result) { 276 dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); 277 return 0; 278 } 279 280 return virq; 281 } 282 283 void cxl_unmap_irq(unsigned int virq, void *cookie) 284 { 285 free_irq(virq, cookie); 286 irq_dispose_mapping(virq); 287 } 288 289 static int cxl_register_one_irq(struct cxl *adapter, 290 irq_handler_t handler, 291 void *cookie, 292 irq_hw_number_t *dest_hwirq, 293 unsigned int *dest_virq, 294 const char *name) 295 { 296 int hwirq, virq; 297 298 if ((hwirq = cxl_alloc_one_irq(adapter)) < 0) 299 return hwirq; 300 301 if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name))) 302 goto err; 303 304 *dest_hwirq = hwirq; 305 *dest_virq = virq; 306 307 return 0; 308 309 err: 310 cxl_release_one_irq(adapter, hwirq); 311 return -ENOMEM; 312 } 313 314 int cxl_register_psl_err_irq(struct cxl *adapter) 315 { 316 int rc; 317 318 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 319 dev_name(&adapter->dev)); 320 if (!adapter->irq_name) 321 return -ENOMEM; 322 323 if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter, 324 &adapter->err_hwirq, 325 &adapter->err_virq, 326 adapter->irq_name))) { 327 kfree(adapter->irq_name); 328 adapter->irq_name = NULL; 329 return rc; 330 } 331 332 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff); 333 334 return 0; 335 } 336 337 void cxl_release_psl_err_irq(struct cxl *adapter) 338 { 339 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 340 cxl_unmap_irq(adapter->err_virq, adapter); 341 cxl_release_one_irq(adapter, adapter->err_hwirq); 342 kfree(adapter->irq_name); 343 } 344 345 int cxl_register_serr_irq(struct cxl_afu *afu) 346 { 347 u64 serr; 348 int rc; 349 350 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err", 351 dev_name(&afu->dev)); 352 if (!afu->err_irq_name) 353 return -ENOMEM; 354 355 if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu, 356 &afu->serr_hwirq, 357 &afu->serr_virq, afu->err_irq_name))) { 358 kfree(afu->err_irq_name); 359 afu->err_irq_name = NULL; 360 return rc; 361 } 362 363 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 364 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); 365 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 366 367 return 0; 368 } 369 370 void cxl_release_serr_irq(struct cxl_afu *afu) 371 { 372 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); 373 cxl_unmap_irq(afu->serr_virq, afu); 374 cxl_release_one_irq(afu->adapter, afu->serr_hwirq); 375 kfree(afu->err_irq_name); 376 } 377 378 int cxl_register_psl_irq(struct cxl_afu *afu) 379 { 380 int rc; 381 382 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s", 383 dev_name(&afu->dev)); 384 if (!afu->psl_irq_name) 385 return -ENOMEM; 386 387 if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu, 388 &afu->psl_hwirq, &afu->psl_virq, 389 afu->psl_irq_name))) { 390 kfree(afu->psl_irq_name); 391 afu->psl_irq_name = NULL; 392 } 393 return rc; 394 } 395 396 void cxl_release_psl_irq(struct cxl_afu *afu) 397 { 398 cxl_unmap_irq(afu->psl_virq, afu); 399 cxl_release_one_irq(afu->adapter, afu->psl_hwirq); 400 kfree(afu->psl_irq_name); 401 } 402 403 void afu_irq_name_free(struct cxl_context *ctx) 404 { 405 struct cxl_irq_name *irq_name, *tmp; 406 407 list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) { 408 kfree(irq_name->name); 409 list_del(&irq_name->list); 410 kfree(irq_name); 411 } 412 } 413 414 int afu_register_irqs(struct cxl_context *ctx, u32 count) 415 { 416 irq_hw_number_t hwirq; 417 int rc, r, i, j = 1; 418 struct cxl_irq_name *irq_name; 419 420 if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count))) 421 return rc; 422 423 /* Multiplexed PSL Interrupt */ 424 ctx->irqs.offset[0] = ctx->afu->psl_hwirq; 425 ctx->irqs.range[0] = 1; 426 427 ctx->irq_count = count; 428 ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), 429 sizeof(*ctx->irq_bitmap), GFP_KERNEL); 430 if (!ctx->irq_bitmap) 431 return -ENOMEM; 432 433 /* 434 * Allocate names first. If any fail, bail out before allocating 435 * actual hardware IRQs. 436 */ 437 INIT_LIST_HEAD(&ctx->irq_names); 438 for (r = 1; r < CXL_IRQ_RANGES; r++) { 439 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 440 irq_name = kmalloc(sizeof(struct cxl_irq_name), 441 GFP_KERNEL); 442 if (!irq_name) 443 goto out; 444 irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i", 445 dev_name(&ctx->afu->dev), 446 ctx->pe, j); 447 if (!irq_name->name) { 448 kfree(irq_name); 449 goto out; 450 } 451 /* Add to tail so next look get the correct order */ 452 list_add_tail(&irq_name->list, &ctx->irq_names); 453 j++; 454 } 455 } 456 457 /* We've allocated all memory now, so let's do the irq allocations */ 458 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); 459 for (r = 1; r < CXL_IRQ_RANGES; r++) { 460 hwirq = ctx->irqs.offset[r]; 461 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 462 cxl_map_irq(ctx->afu->adapter, hwirq, 463 cxl_irq_afu, ctx, irq_name->name); 464 irq_name = list_next_entry(irq_name, list); 465 } 466 } 467 468 return 0; 469 470 out: 471 afu_irq_name_free(ctx); 472 return -ENOMEM; 473 } 474 475 void afu_release_irqs(struct cxl_context *ctx) 476 { 477 irq_hw_number_t hwirq; 478 unsigned int virq; 479 int r, i; 480 481 for (r = 1; r < CXL_IRQ_RANGES; r++) { 482 hwirq = ctx->irqs.offset[r]; 483 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 484 virq = irq_find_mapping(NULL, hwirq); 485 if (virq) 486 cxl_unmap_irq(virq, ctx); 487 } 488 } 489 490 afu_irq_name_free(ctx); 491 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); 492 } 493