1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/io-64-nonatomic-lo-hi.h> 8 #include <linux/dmaengine.h> 9 #include <uapi/linux/idxd.h> 10 #include "../dmaengine.h" 11 #include "idxd.h" 12 #include "registers.h" 13 14 enum irq_work_type { 15 IRQ_WORK_NORMAL = 0, 16 IRQ_WORK_PROCESS_FAULT, 17 }; 18 19 struct idxd_fault { 20 struct work_struct work; 21 u64 addr; 22 struct idxd_device *idxd; 23 }; 24 25 static int irq_process_work_list(struct idxd_irq_entry *irq_entry, 26 int *processed, u64 data); 27 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, 28 int *processed, u64 data); 29 30 static void idxd_device_reinit(struct work_struct *work) 31 { 32 struct idxd_device *idxd = container_of(work, struct idxd_device, work); 33 struct device *dev = &idxd->pdev->dev; 34 int rc, i; 35 36 idxd_device_reset(idxd); 37 rc = idxd_device_config(idxd); 38 if (rc < 0) 39 goto out; 40 41 rc = idxd_device_enable(idxd); 42 if (rc < 0) 43 goto out; 44 45 for (i = 0; i < idxd->max_wqs; i++) { 46 struct idxd_wq *wq = idxd->wqs[i]; 47 48 if (wq->state == IDXD_WQ_ENABLED) { 49 rc = idxd_wq_enable(wq); 50 if (rc < 0) { 51 dev_warn(dev, "Unable to re-enable wq %s\n", 52 dev_name(wq_confdev(wq))); 53 } 54 } 55 } 56 57 return; 58 59 out: 60 idxd_device_clear_state(idxd); 61 } 62 63 static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) 64 { 65 struct device *dev = &idxd->pdev->dev; 66 union gensts_reg gensts; 67 u32 val = 0; 68 int i; 69 bool err = false; 70 71 if (cause & IDXD_INTC_ERR) { 72 spin_lock_bh(&idxd->dev_lock); 73 for (i = 0; i < 4; i++) 74 idxd->sw_err.bits[i] = ioread64(idxd->reg_base + 75 IDXD_SWERR_OFFSET + i * sizeof(u64)); 76 77 iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK, 78 idxd->reg_base + IDXD_SWERR_OFFSET); 79 80 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { 81 int id = idxd->sw_err.wq_idx; 82 struct idxd_wq *wq = idxd->wqs[id]; 83 84 if (wq->type == IDXD_WQT_USER) 85 wake_up_interruptible(&wq->err_queue); 86 } else { 87 int i; 88 89 for (i = 0; i < idxd->max_wqs; i++) { 90 struct idxd_wq *wq = idxd->wqs[i]; 91 92 if (wq->type == IDXD_WQT_USER) 93 wake_up_interruptible(&wq->err_queue); 94 } 95 } 96 97 spin_unlock_bh(&idxd->dev_lock); 98 val |= IDXD_INTC_ERR; 99 100 for (i = 0; i < 4; i++) 101 dev_warn(dev, "err[%d]: %#16.16llx\n", 102 i, idxd->sw_err.bits[i]); 103 err = true; 104 } 105 106 if (cause & IDXD_INTC_CMD) { 107 val |= IDXD_INTC_CMD; 108 complete(idxd->cmd_done); 109 } 110 111 if (cause & IDXD_INTC_OCCUPY) { 112 /* Driver does not utilize occupancy interrupt */ 113 val |= IDXD_INTC_OCCUPY; 114 } 115 116 if (cause & IDXD_INTC_PERFMON_OVFL) { 117 val |= IDXD_INTC_PERFMON_OVFL; 118 perfmon_counter_overflow(idxd); 119 } 120 121 val ^= cause; 122 if (val) 123 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", 124 val); 125 126 if (!err) 127 return 0; 128 129 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); 130 if (gensts.state == IDXD_DEVICE_STATE_HALT) { 131 idxd->state = IDXD_DEV_HALTED; 132 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { 133 /* 134 * If we need a software reset, we will throw the work 135 * on a system workqueue in order to allow interrupts 136 * for the device command completions. 137 */ 138 INIT_WORK(&idxd->work, idxd_device_reinit); 139 queue_work(idxd->wq, &idxd->work); 140 } else { 141 spin_lock_bh(&idxd->dev_lock); 142 idxd_wqs_quiesce(idxd); 143 idxd_wqs_unmap_portal(idxd); 144 idxd_device_clear_state(idxd); 145 dev_err(&idxd->pdev->dev, 146 "idxd halted, need %s.\n", 147 gensts.reset_type == IDXD_DEVICE_RESET_FLR ? 148 "FLR" : "system reset"); 149 spin_unlock_bh(&idxd->dev_lock); 150 return -ENXIO; 151 } 152 } 153 154 return 0; 155 } 156 157 irqreturn_t idxd_misc_thread(int vec, void *data) 158 { 159 struct idxd_irq_entry *irq_entry = data; 160 struct idxd_device *idxd = irq_entry->idxd; 161 int rc; 162 u32 cause; 163 164 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); 165 if (cause) 166 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); 167 168 while (cause) { 169 rc = process_misc_interrupts(idxd, cause); 170 if (rc < 0) 171 break; 172 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); 173 if (cause) 174 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); 175 } 176 177 return IRQ_HANDLED; 178 } 179 180 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, 181 int *processed, u64 data) 182 { 183 struct idxd_desc *desc, *t; 184 struct llist_node *head; 185 int queued = 0; 186 unsigned long flags; 187 188 *processed = 0; 189 head = llist_del_all(&irq_entry->pending_llist); 190 if (!head) 191 goto out; 192 193 llist_for_each_entry_safe(desc, t, head, llnode) { 194 u8 status = desc->completion->status & DSA_COMP_STATUS_MASK; 195 196 if (status) { 197 if (unlikely(status == IDXD_COMP_DESC_ABORT)) { 198 complete_desc(desc, IDXD_COMPLETE_ABORT); 199 (*processed)++; 200 continue; 201 } 202 203 complete_desc(desc, IDXD_COMPLETE_NORMAL); 204 (*processed)++; 205 } else { 206 spin_lock_irqsave(&irq_entry->list_lock, flags); 207 list_add_tail(&desc->list, 208 &irq_entry->work_list); 209 spin_unlock_irqrestore(&irq_entry->list_lock, flags); 210 queued++; 211 } 212 } 213 214 out: 215 return queued; 216 } 217 218 static int irq_process_work_list(struct idxd_irq_entry *irq_entry, 219 int *processed, u64 data) 220 { 221 int queued = 0; 222 unsigned long flags; 223 LIST_HEAD(flist); 224 struct idxd_desc *desc, *n; 225 226 *processed = 0; 227 228 /* 229 * This lock protects list corruption from access of list outside of the irq handler 230 * thread. 231 */ 232 spin_lock_irqsave(&irq_entry->list_lock, flags); 233 if (list_empty(&irq_entry->work_list)) { 234 spin_unlock_irqrestore(&irq_entry->list_lock, flags); 235 return 0; 236 } 237 238 list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) { 239 if (desc->completion->status) { 240 list_del(&desc->list); 241 (*processed)++; 242 list_add_tail(&desc->list, &flist); 243 } else { 244 queued++; 245 } 246 } 247 248 spin_unlock_irqrestore(&irq_entry->list_lock, flags); 249 250 list_for_each_entry(desc, &flist, list) { 251 u8 status = desc->completion->status & DSA_COMP_STATUS_MASK; 252 253 if (unlikely(status == IDXD_COMP_DESC_ABORT)) { 254 complete_desc(desc, IDXD_COMPLETE_ABORT); 255 continue; 256 } 257 258 complete_desc(desc, IDXD_COMPLETE_NORMAL); 259 } 260 261 return queued; 262 } 263 264 static int idxd_desc_process(struct idxd_irq_entry *irq_entry) 265 { 266 int rc, processed, total = 0; 267 268 /* 269 * There are two lists we are processing. The pending_llist is where 270 * submmiter adds all the submitted descriptor after sending it to 271 * the workqueue. It's a lockless singly linked list. The work_list 272 * is the common linux double linked list. We are in a scenario of 273 * multiple producers and a single consumer. The producers are all 274 * the kernel submitters of descriptors, and the consumer is the 275 * kernel irq handler thread for the msix vector when using threaded 276 * irq. To work with the restrictions of llist to remain lockless, 277 * we are doing the following steps: 278 * 1. Iterate through the work_list and process any completed 279 * descriptor. Delete the completed entries during iteration. 280 * 2. llist_del_all() from the pending list. 281 * 3. Iterate through the llist that was deleted from the pending list 282 * and process the completed entries. 283 * 4. If the entry is still waiting on hardware, list_add_tail() to 284 * the work_list. 285 * 5. Repeat until no more descriptors. 286 */ 287 do { 288 rc = irq_process_work_list(irq_entry, &processed, 0); 289 total += processed; 290 if (rc != 0) 291 continue; 292 293 rc = irq_process_pending_llist(irq_entry, &processed, 0); 294 total += processed; 295 } while (rc != 0); 296 297 return total; 298 } 299 300 irqreturn_t idxd_wq_thread(int irq, void *data) 301 { 302 struct idxd_irq_entry *irq_entry = data; 303 int processed; 304 305 processed = idxd_desc_process(irq_entry); 306 if (processed == 0) 307 return IRQ_NONE; 308 309 return IRQ_HANDLED; 310 } 311