1 /* 2 * Copyright 2014 IBM Corp. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #include <linux/workqueue.h> 11 #include <linux/sched/signal.h> 12 #include <linux/sched/mm.h> 13 #include <linux/pid.h> 14 #include <linux/mm.h> 15 #include <linux/moduleparam.h> 16 17 #undef MODULE_PARAM_PREFIX 18 #define MODULE_PARAM_PREFIX "cxl" "." 19 #include <asm/current.h> 20 #include <asm/copro.h> 21 #include <asm/mmu.h> 22 23 #include "cxl.h" 24 #include "trace.h" 25 26 static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb) 27 { 28 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) && 29 (sste->esid_data == cpu_to_be64(slb->esid))); 30 } 31 32 /* 33 * This finds a free SSTE for the given SLB, or returns NULL if it's already in 34 * the segment table. 35 */ 36 static struct cxl_sste* find_free_sste(struct cxl_context *ctx, 37 struct copro_slb *slb) 38 { 39 struct cxl_sste *primary, *sste, *ret = NULL; 40 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ 41 unsigned int entry; 42 unsigned int hash; 43 44 if (slb->vsid & SLB_VSID_B_1T) 45 hash = (slb->esid >> SID_SHIFT_1T) & mask; 46 else /* 256M */ 47 hash = (slb->esid >> SID_SHIFT) & mask; 48 49 primary = ctx->sstp + (hash << 3); 50 51 for (entry = 0, sste = primary; entry < 8; entry++, sste++) { 52 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) 53 ret = sste; 54 if (sste_matches(sste, slb)) 55 return NULL; 56 } 57 if (ret) 58 return ret; 59 60 /* Nothing free, select an entry to cast out */ 61 ret = primary + ctx->sst_lru; 62 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; 63 64 return ret; 65 } 66 67 static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) 68 { 69 /* mask is the group index, we search primary and secondary here. */ 70 struct cxl_sste *sste; 71 unsigned long flags; 72 73 spin_lock_irqsave(&ctx->sste_lock, flags); 74 sste = find_free_sste(ctx, slb); 75 if (!sste) 76 goto out_unlock; 77 78 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", 79 sste - ctx->sstp, slb->vsid, slb->esid); 80 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); 81 82 sste->vsid_data = cpu_to_be64(slb->vsid); 83 sste->esid_data = cpu_to_be64(slb->esid); 84 out_unlock: 85 spin_unlock_irqrestore(&ctx->sste_lock, flags); 86 } 87 88 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, 89 u64 ea) 90 { 91 struct copro_slb slb = {0,0}; 92 int rc; 93 94 if (!(rc = copro_calculate_slb(mm, ea, &slb))) { 95 cxl_load_segment(ctx, &slb); 96 } 97 98 return rc; 99 } 100 101 static void cxl_ack_ae(struct cxl_context *ctx) 102 { 103 unsigned long flags; 104 105 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); 106 107 spin_lock_irqsave(&ctx->lock, flags); 108 ctx->pending_fault = true; 109 ctx->fault_addr = ctx->dar; 110 ctx->fault_dsisr = ctx->dsisr; 111 spin_unlock_irqrestore(&ctx->lock, flags); 112 113 wake_up_all(&ctx->wq); 114 } 115 116 static int cxl_handle_segment_miss(struct cxl_context *ctx, 117 struct mm_struct *mm, u64 ea) 118 { 119 int rc; 120 121 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); 122 trace_cxl_ste_miss(ctx, ea); 123 124 if ((rc = cxl_fault_segment(ctx, mm, ea))) 125 cxl_ack_ae(ctx); 126 else { 127 128 mb(); /* Order seg table write to TFC MMIO write */ 129 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); 130 } 131 132 return IRQ_HANDLED; 133 } 134 135 static void cxl_handle_page_fault(struct cxl_context *ctx, 136 struct mm_struct *mm, u64 dsisr, u64 dar) 137 { 138 unsigned flt = 0; 139 int result; 140 unsigned long access, flags, inv_flags = 0; 141 142 trace_cxl_pte_miss(ctx, dsisr, dar); 143 144 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { 145 pr_devel("copro_handle_mm_fault failed: %#x\n", result); 146 return cxl_ack_ae(ctx); 147 } 148 149 if (!radix_enabled()) { 150 /* 151 * update_mmu_cache() will not have loaded the hash since current->trap 152 * is not a 0x400 or 0x300, so just call hash_page_mm() here. 153 */ 154 access = _PAGE_PRESENT | _PAGE_READ; 155 if (dsisr & CXL_PSL_DSISR_An_S) 156 access |= _PAGE_WRITE; 157 158 access |= _PAGE_PRIVILEGED; 159 if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID)) 160 access &= ~_PAGE_PRIVILEGED; 161 162 if (dsisr & DSISR_NOHPTE) 163 inv_flags |= HPTE_NOHPTE_UPDATE; 164 165 local_irq_save(flags); 166 hash_page_mm(mm, dar, access, 0x300, inv_flags); 167 local_irq_restore(flags); 168 } 169 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); 170 cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0); 171 } 172 173 /* 174 * Returns the mm_struct corresponding to the context ctx. 175 * mm_users == 0, the context may be in the process of being closed. 176 */ 177 static struct mm_struct *get_mem_context(struct cxl_context *ctx) 178 { 179 if (ctx->mm == NULL) 180 return NULL; 181 182 if (!atomic_inc_not_zero(&ctx->mm->mm_users)) 183 return NULL; 184 185 return ctx->mm; 186 } 187 188 static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr) 189 { 190 if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DS)) 191 return true; 192 193 return false; 194 } 195 196 static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr) 197 { 198 if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DM)) 199 return true; 200 201 if ((cxl_is_psl9(ctx->afu)) && 202 ((dsisr & CXL_PSL9_DSISR_An_CO_MASK) & 203 (CXL_PSL9_DSISR_An_PF_SLR | CXL_PSL9_DSISR_An_PF_RGC | 204 CXL_PSL9_DSISR_An_PF_RGP | CXL_PSL9_DSISR_An_PF_HRH | 205 CXL_PSL9_DSISR_An_PF_STEG))) 206 return true; 207 208 return false; 209 } 210 211 void cxl_handle_fault(struct work_struct *fault_work) 212 { 213 struct cxl_context *ctx = 214 container_of(fault_work, struct cxl_context, fault_work); 215 u64 dsisr = ctx->dsisr; 216 u64 dar = ctx->dar; 217 struct mm_struct *mm = NULL; 218 219 if (cpu_has_feature(CPU_FTR_HVMODE)) { 220 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || 221 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || 222 cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { 223 /* Most likely explanation is harmless - a dedicated 224 * process has detached and these were cleared by the 225 * PSL purge, but warn about it just in case 226 */ 227 dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); 228 return; 229 } 230 } 231 232 /* Early return if the context is being / has been detached */ 233 if (ctx->status == CLOSED) { 234 cxl_ack_ae(ctx); 235 return; 236 } 237 238 pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " 239 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); 240 241 if (!ctx->kernel) { 242 243 mm = get_mem_context(ctx); 244 if (mm == NULL) { 245 pr_devel("%s: unable to get mm for pe=%d pid=%i\n", 246 __func__, ctx->pe, pid_nr(ctx->pid)); 247 cxl_ack_ae(ctx); 248 return; 249 } else { 250 pr_devel("Handling page fault for pe=%d pid=%i\n", 251 ctx->pe, pid_nr(ctx->pid)); 252 } 253 } 254 255 if (cxl_is_segment_miss(ctx, dsisr)) 256 cxl_handle_segment_miss(ctx, mm, dar); 257 else if (cxl_is_page_fault(ctx, dsisr)) 258 cxl_handle_page_fault(ctx, mm, dsisr, dar); 259 else 260 WARN(1, "cxl_handle_fault has nothing to handle\n"); 261 262 if (mm) 263 mmput(mm); 264 } 265 266 static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) 267 { 268 struct mm_struct *mm; 269 270 mm = get_mem_context(ctx); 271 if (mm == NULL) { 272 pr_devel("cxl_prefault_one unable to get mm %i\n", 273 pid_nr(ctx->pid)); 274 return; 275 } 276 277 cxl_fault_segment(ctx, mm, ea); 278 279 mmput(mm); 280 } 281 282 static u64 next_segment(u64 ea, u64 vsid) 283 { 284 if (vsid & SLB_VSID_B_1T) 285 ea |= (1ULL << 40) - 1; 286 else 287 ea |= (1ULL << 28) - 1; 288 289 return ea + 1; 290 } 291 292 static void cxl_prefault_vma(struct cxl_context *ctx) 293 { 294 u64 ea, last_esid = 0; 295 struct copro_slb slb; 296 struct vm_area_struct *vma; 297 int rc; 298 struct mm_struct *mm; 299 300 mm = get_mem_context(ctx); 301 if (mm == NULL) { 302 pr_devel("cxl_prefault_vm unable to get mm %i\n", 303 pid_nr(ctx->pid)); 304 return; 305 } 306 307 down_read(&mm->mmap_sem); 308 for (vma = mm->mmap; vma; vma = vma->vm_next) { 309 for (ea = vma->vm_start; ea < vma->vm_end; 310 ea = next_segment(ea, slb.vsid)) { 311 rc = copro_calculate_slb(mm, ea, &slb); 312 if (rc) 313 continue; 314 315 if (last_esid == slb.esid) 316 continue; 317 318 cxl_load_segment(ctx, &slb); 319 last_esid = slb.esid; 320 } 321 } 322 up_read(&mm->mmap_sem); 323 324 mmput(mm); 325 } 326 327 void cxl_prefault(struct cxl_context *ctx, u64 wed) 328 { 329 switch (ctx->afu->prefault_mode) { 330 case CXL_PREFAULT_WED: 331 cxl_prefault_one(ctx, wed); 332 break; 333 case CXL_PREFAULT_ALL: 334 cxl_prefault_vma(ctx); 335 break; 336 default: 337 break; 338 } 339 } 340