1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/io-64-nonatomic-lo-hi.h> 8 #include <linux/dmaengine.h> 9 #include <uapi/linux/idxd.h> 10 #include "../dmaengine.h" 11 #include "idxd.h" 12 #include "registers.h" 13 14 enum irq_work_type { 15 IRQ_WORK_NORMAL = 0, 16 IRQ_WORK_PROCESS_FAULT, 17 }; 18 19 struct idxd_fault { 20 struct work_struct work; 21 u64 addr; 22 struct idxd_device *idxd; 23 }; 24 25 static int irq_process_work_list(struct idxd_irq_entry *irq_entry, 26 enum irq_work_type wtype, 27 int *processed, u64 data); 28 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, 29 enum irq_work_type wtype, 30 int *processed, u64 data); 31 32 static void idxd_device_reinit(struct work_struct *work) 33 { 34 struct idxd_device *idxd = container_of(work, struct idxd_device, work); 35 struct device *dev = &idxd->pdev->dev; 36 int rc, i; 37 38 idxd_device_reset(idxd); 39 rc = idxd_device_config(idxd); 40 if (rc < 0) 41 goto out; 42 43 rc = idxd_device_enable(idxd); 44 if (rc < 0) 45 goto out; 46 47 for (i = 0; i < idxd->max_wqs; i++) { 48 struct idxd_wq *wq = idxd->wqs[i]; 49 50 if (wq->state == IDXD_WQ_ENABLED) { 51 rc = idxd_wq_enable(wq); 52 if (rc < 0) { 53 dev_warn(dev, "Unable to re-enable wq %s\n", 54 dev_name(&wq->conf_dev)); 55 } 56 } 57 } 58 59 return; 60 61 out: 62 idxd_device_clear_state(idxd); 63 } 64 65 static void idxd_device_fault_work(struct work_struct *work) 66 { 67 struct idxd_fault *fault = container_of(work, struct idxd_fault, work); 68 struct idxd_irq_entry *ie; 69 int i; 70 int processed; 71 int irqcnt = fault->idxd->num_wq_irqs + 1; 72 73 for (i = 1; i < irqcnt; i++) { 74 ie = &fault->idxd->irq_entries[i]; 75 irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT, 76 &processed, fault->addr); 77 if (processed) 78 break; 79 80 irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT, 81 &processed, fault->addr); 82 if (processed) 83 break; 84 } 85 86 kfree(fault); 87 } 88 89 static int idxd_device_schedule_fault_process(struct idxd_device *idxd, 90 u64 fault_addr) 91 { 92 struct idxd_fault *fault; 93 94 fault = kmalloc(sizeof(*fault), GFP_ATOMIC); 95 if (!fault) 96 return -ENOMEM; 97 98 fault->addr = fault_addr; 99 fault->idxd = idxd; 100 INIT_WORK(&fault->work, idxd_device_fault_work); 101 queue_work(idxd->wq, &fault->work); 102 return 0; 103 } 104 105 static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) 106 { 107 struct device *dev = &idxd->pdev->dev; 108 union gensts_reg gensts; 109 u32 val = 0; 110 int i; 111 bool err = false; 112 113 if (cause & IDXD_INTC_ERR) { 114 spin_lock_bh(&idxd->dev_lock); 115 for (i = 0; i < 4; i++) 116 idxd->sw_err.bits[i] = ioread64(idxd->reg_base + 117 IDXD_SWERR_OFFSET + i * sizeof(u64)); 118 119 iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK, 120 idxd->reg_base + IDXD_SWERR_OFFSET); 121 122 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { 123 int id = idxd->sw_err.wq_idx; 124 struct idxd_wq *wq = idxd->wqs[id]; 125 126 if (wq->type == IDXD_WQT_USER) 127 wake_up_interruptible(&wq->err_queue); 128 } else { 129 int i; 130 131 for (i = 0; i < idxd->max_wqs; i++) { 132 struct idxd_wq *wq = idxd->wqs[i]; 133 134 if (wq->type == IDXD_WQT_USER) 135 wake_up_interruptible(&wq->err_queue); 136 } 137 } 138 139 spin_unlock_bh(&idxd->dev_lock); 140 val |= IDXD_INTC_ERR; 141 142 for (i = 0; i < 4; i++) 143 dev_warn(dev, "err[%d]: %#16.16llx\n", 144 i, idxd->sw_err.bits[i]); 145 err = true; 146 } 147 148 if (cause & IDXD_INTC_CMD) { 149 val |= IDXD_INTC_CMD; 150 complete(idxd->cmd_done); 151 } 152 153 if (cause & IDXD_INTC_OCCUPY) { 154 /* Driver does not utilize occupancy interrupt */ 155 val |= IDXD_INTC_OCCUPY; 156 } 157 158 if (cause & IDXD_INTC_PERFMON_OVFL) { 159 val |= IDXD_INTC_PERFMON_OVFL; 160 perfmon_counter_overflow(idxd); 161 } 162 163 val ^= cause; 164 if (val) 165 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", 166 val); 167 168 if (!err) 169 return 0; 170 171 /* 172 * This case should rarely happen and typically is due to software 173 * programming error by the driver. 174 */ 175 if (idxd->sw_err.valid && 176 idxd->sw_err.desc_valid && 177 idxd->sw_err.fault_addr) 178 idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr); 179 180 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); 181 if (gensts.state == IDXD_DEVICE_STATE_HALT) { 182 idxd->state = IDXD_DEV_HALTED; 183 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { 184 /* 185 * If we need a software reset, we will throw the work 186 * on a system workqueue in order to allow interrupts 187 * for the device command completions. 188 */ 189 INIT_WORK(&idxd->work, idxd_device_reinit); 190 queue_work(idxd->wq, &idxd->work); 191 } else { 192 spin_lock_bh(&idxd->dev_lock); 193 idxd_wqs_quiesce(idxd); 194 idxd_wqs_unmap_portal(idxd); 195 idxd_device_clear_state(idxd); 196 dev_err(&idxd->pdev->dev, 197 "idxd halted, need %s.\n", 198 gensts.reset_type == IDXD_DEVICE_RESET_FLR ? 199 "FLR" : "system reset"); 200 spin_unlock_bh(&idxd->dev_lock); 201 return -ENXIO; 202 } 203 } 204 205 return 0; 206 } 207 208 irqreturn_t idxd_misc_thread(int vec, void *data) 209 { 210 struct idxd_irq_entry *irq_entry = data; 211 struct idxd_device *idxd = irq_entry->idxd; 212 int rc; 213 u32 cause; 214 215 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); 216 if (cause) 217 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); 218 219 while (cause) { 220 rc = process_misc_interrupts(idxd, cause); 221 if (rc < 0) 222 break; 223 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); 224 if (cause) 225 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); 226 } 227 228 return IRQ_HANDLED; 229 } 230 231 static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr) 232 { 233 /* 234 * Completion address can be bad as well. Check fault address match for descriptor 235 * and completion address. 236 */ 237 if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) { 238 struct idxd_device *idxd = desc->wq->idxd; 239 struct device *dev = &idxd->pdev->dev; 240 241 dev_warn(dev, "desc with fault address: %#llx\n", fault_addr); 242 return true; 243 } 244 245 return false; 246 } 247 248 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, 249 enum irq_work_type wtype, 250 int *processed, u64 data) 251 { 252 struct idxd_desc *desc, *t; 253 struct llist_node *head; 254 int queued = 0; 255 unsigned long flags; 256 enum idxd_complete_type reason; 257 258 *processed = 0; 259 head = llist_del_all(&irq_entry->pending_llist); 260 if (!head) 261 goto out; 262 263 if (wtype == IRQ_WORK_NORMAL) 264 reason = IDXD_COMPLETE_NORMAL; 265 else 266 reason = IDXD_COMPLETE_DEV_FAIL; 267 268 llist_for_each_entry_safe(desc, t, head, llnode) { 269 u8 status = desc->completion->status & DSA_COMP_STATUS_MASK; 270 271 if (status) { 272 if (unlikely(status == IDXD_COMP_DESC_ABORT)) { 273 complete_desc(desc, IDXD_COMPLETE_ABORT); 274 (*processed)++; 275 continue; 276 } 277 278 if (unlikely(status != DSA_COMP_SUCCESS)) 279 match_fault(desc, data); 280 complete_desc(desc, reason); 281 (*processed)++; 282 } else { 283 spin_lock_irqsave(&irq_entry->list_lock, flags); 284 list_add_tail(&desc->list, 285 &irq_entry->work_list); 286 spin_unlock_irqrestore(&irq_entry->list_lock, flags); 287 queued++; 288 } 289 } 290 291 out: 292 return queued; 293 } 294 295 static int irq_process_work_list(struct idxd_irq_entry *irq_entry, 296 enum irq_work_type wtype, 297 int *processed, u64 data) 298 { 299 int queued = 0; 300 unsigned long flags; 301 LIST_HEAD(flist); 302 struct idxd_desc *desc, *n; 303 enum idxd_complete_type reason; 304 305 *processed = 0; 306 if (wtype == IRQ_WORK_NORMAL) 307 reason = IDXD_COMPLETE_NORMAL; 308 else 309 reason = IDXD_COMPLETE_DEV_FAIL; 310 311 /* 312 * This lock protects list corruption from access of list outside of the irq handler 313 * thread. 314 */ 315 spin_lock_irqsave(&irq_entry->list_lock, flags); 316 if (list_empty(&irq_entry->work_list)) { 317 spin_unlock_irqrestore(&irq_entry->list_lock, flags); 318 return 0; 319 } 320 321 list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) { 322 if (desc->completion->status) { 323 list_del(&desc->list); 324 (*processed)++; 325 list_add_tail(&desc->list, &flist); 326 } else { 327 queued++; 328 } 329 } 330 331 spin_unlock_irqrestore(&irq_entry->list_lock, flags); 332 333 list_for_each_entry(desc, &flist, list) { 334 u8 status = desc->completion->status & DSA_COMP_STATUS_MASK; 335 336 if (unlikely(status == IDXD_COMP_DESC_ABORT)) { 337 complete_desc(desc, IDXD_COMPLETE_ABORT); 338 continue; 339 } 340 341 if (unlikely(status != DSA_COMP_SUCCESS)) 342 match_fault(desc, data); 343 complete_desc(desc, reason); 344 } 345 346 return queued; 347 } 348 349 static int idxd_desc_process(struct idxd_irq_entry *irq_entry) 350 { 351 int rc, processed, total = 0; 352 353 /* 354 * There are two lists we are processing. The pending_llist is where 355 * submmiter adds all the submitted descriptor after sending it to 356 * the workqueue. It's a lockless singly linked list. The work_list 357 * is the common linux double linked list. We are in a scenario of 358 * multiple producers and a single consumer. The producers are all 359 * the kernel submitters of descriptors, and the consumer is the 360 * kernel irq handler thread for the msix vector when using threaded 361 * irq. To work with the restrictions of llist to remain lockless, 362 * we are doing the following steps: 363 * 1. Iterate through the work_list and process any completed 364 * descriptor. Delete the completed entries during iteration. 365 * 2. llist_del_all() from the pending list. 366 * 3. Iterate through the llist that was deleted from the pending list 367 * and process the completed entries. 368 * 4. If the entry is still waiting on hardware, list_add_tail() to 369 * the work_list. 370 * 5. Repeat until no more descriptors. 371 */ 372 do { 373 rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL, 374 &processed, 0); 375 total += processed; 376 if (rc != 0) 377 continue; 378 379 rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL, 380 &processed, 0); 381 total += processed; 382 } while (rc != 0); 383 384 return total; 385 } 386 387 irqreturn_t idxd_wq_thread(int irq, void *data) 388 { 389 struct idxd_irq_entry *irq_entry = data; 390 int processed; 391 392 processed = idxd_desc_process(irq_entry); 393 if (processed == 0) 394 return IRQ_NONE; 395 396 return IRQ_HANDLED; 397 } 398