1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/io-64-nonatomic-lo-hi.h> 8 #include <linux/dmaengine.h> 9 #include <uapi/linux/idxd.h> 10 #include "../dmaengine.h" 11 #include "idxd.h" 12 #include "registers.h" 13 14 static void idxd_device_reinit(struct work_struct *work) 15 { 16 struct idxd_device *idxd = container_of(work, struct idxd_device, work); 17 struct device *dev = &idxd->pdev->dev; 18 int rc, i; 19 20 idxd_device_reset(idxd); 21 rc = idxd_device_config(idxd); 22 if (rc < 0) 23 goto out; 24 25 rc = idxd_device_enable(idxd); 26 if (rc < 0) 27 goto out; 28 29 for (i = 0; i < idxd->max_wqs; i++) { 30 struct idxd_wq *wq = &idxd->wqs[i]; 31 32 if (wq->state == IDXD_WQ_ENABLED) { 33 rc = idxd_wq_enable(wq); 34 if (rc < 0) { 35 dev_warn(dev, "Unable to re-enable wq %s\n", 36 dev_name(&wq->conf_dev)); 37 } 38 } 39 } 40 41 return; 42 43 out: 44 idxd_device_wqs_clear_state(idxd); 45 } 46 47 irqreturn_t idxd_irq_handler(int vec, void *data) 48 { 49 struct idxd_irq_entry *irq_entry = data; 50 struct idxd_device *idxd = irq_entry->idxd; 51 52 idxd_mask_msix_vector(idxd, irq_entry->id); 53 return IRQ_WAKE_THREAD; 54 } 55 56 irqreturn_t idxd_misc_thread(int vec, void *data) 57 { 58 struct idxd_irq_entry *irq_entry = data; 59 struct idxd_device *idxd = irq_entry->idxd; 60 struct device *dev = &idxd->pdev->dev; 61 union gensts_reg gensts; 62 u32 cause, val = 0; 63 int i; 64 bool err = false; 65 66 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); 67 68 if (cause & IDXD_INTC_ERR) { 69 spin_lock_bh(&idxd->dev_lock); 70 for (i = 0; i < 4; i++) 71 idxd->sw_err.bits[i] = ioread64(idxd->reg_base + 72 IDXD_SWERR_OFFSET + i * sizeof(u64)); 73 iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET); 74 75 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { 76 int id = idxd->sw_err.wq_idx; 77 struct idxd_wq *wq = &idxd->wqs[id]; 78 79 if (wq->type == IDXD_WQT_USER) 80 wake_up_interruptible(&wq->idxd_cdev.err_queue); 81 } else { 82 int i; 83 84 for (i = 0; i < idxd->max_wqs; i++) { 85 struct idxd_wq *wq = &idxd->wqs[i]; 86 87 if (wq->type == IDXD_WQT_USER) 88 wake_up_interruptible(&wq->idxd_cdev.err_queue); 89 } 90 } 91 92 spin_unlock_bh(&idxd->dev_lock); 93 val |= IDXD_INTC_ERR; 94 95 for (i = 0; i < 4; i++) 96 dev_warn(dev, "err[%d]: %#16.16llx\n", 97 i, idxd->sw_err.bits[i]); 98 err = true; 99 } 100 101 if (cause & IDXD_INTC_CMD) { 102 val |= IDXD_INTC_CMD; 103 complete(idxd->cmd_done); 104 } 105 106 if (cause & IDXD_INTC_OCCUPY) { 107 /* Driver does not utilize occupancy interrupt */ 108 val |= IDXD_INTC_OCCUPY; 109 } 110 111 if (cause & IDXD_INTC_PERFMON_OVFL) { 112 /* 113 * Driver does not utilize perfmon counter overflow interrupt 114 * yet. 115 */ 116 val |= IDXD_INTC_PERFMON_OVFL; 117 } 118 119 val ^= cause; 120 if (val) 121 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", 122 val); 123 124 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); 125 if (!err) 126 goto out; 127 128 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); 129 if (gensts.state == IDXD_DEVICE_STATE_HALT) { 130 idxd->state = IDXD_DEV_HALTED; 131 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { 132 /* 133 * If we need a software reset, we will throw the work 134 * on a system workqueue in order to allow interrupts 135 * for the device command completions. 136 */ 137 INIT_WORK(&idxd->work, idxd_device_reinit); 138 queue_work(idxd->wq, &idxd->work); 139 } else { 140 spin_lock_bh(&idxd->dev_lock); 141 idxd_device_wqs_clear_state(idxd); 142 dev_err(&idxd->pdev->dev, 143 "idxd halted, need %s.\n", 144 gensts.reset_type == IDXD_DEVICE_RESET_FLR ? 145 "FLR" : "system reset"); 146 spin_unlock_bh(&idxd->dev_lock); 147 } 148 } 149 150 out: 151 idxd_unmask_msix_vector(idxd, irq_entry->id); 152 return IRQ_HANDLED; 153 } 154 155 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, 156 int *processed) 157 { 158 struct idxd_desc *desc, *t; 159 struct llist_node *head; 160 int queued = 0; 161 162 *processed = 0; 163 head = llist_del_all(&irq_entry->pending_llist); 164 if (!head) 165 return 0; 166 167 llist_for_each_entry_safe(desc, t, head, llnode) { 168 if (desc->completion->status) { 169 idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); 170 idxd_free_desc(desc->wq, desc); 171 (*processed)++; 172 } else { 173 list_add_tail(&desc->list, &irq_entry->work_list); 174 queued++; 175 } 176 } 177 178 return queued; 179 } 180 181 static int irq_process_work_list(struct idxd_irq_entry *irq_entry, 182 int *processed) 183 { 184 struct list_head *node, *next; 185 int queued = 0; 186 187 *processed = 0; 188 if (list_empty(&irq_entry->work_list)) 189 return 0; 190 191 list_for_each_safe(node, next, &irq_entry->work_list) { 192 struct idxd_desc *desc = 193 container_of(node, struct idxd_desc, list); 194 195 if (desc->completion->status) { 196 list_del(&desc->list); 197 /* process and callback */ 198 idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); 199 idxd_free_desc(desc->wq, desc); 200 (*processed)++; 201 } else { 202 queued++; 203 } 204 } 205 206 return queued; 207 } 208 209 static int idxd_desc_process(struct idxd_irq_entry *irq_entry) 210 { 211 int rc, processed, total = 0; 212 213 /* 214 * There are two lists we are processing. The pending_llist is where 215 * submmiter adds all the submitted descriptor after sending it to 216 * the workqueue. It's a lockless singly linked list. The work_list 217 * is the common linux double linked list. We are in a scenario of 218 * multiple producers and a single consumer. The producers are all 219 * the kernel submitters of descriptors, and the consumer is the 220 * kernel irq handler thread for the msix vector when using threaded 221 * irq. To work with the restrictions of llist to remain lockless, 222 * we are doing the following steps: 223 * 1. Iterate through the work_list and process any completed 224 * descriptor. Delete the completed entries during iteration. 225 * 2. llist_del_all() from the pending list. 226 * 3. Iterate through the llist that was deleted from the pending list 227 * and process the completed entries. 228 * 4. If the entry is still waiting on hardware, list_add_tail() to 229 * the work_list. 230 * 5. Repeat until no more descriptors. 231 */ 232 do { 233 rc = irq_process_work_list(irq_entry, &processed); 234 total += processed; 235 if (rc != 0) 236 continue; 237 238 rc = irq_process_pending_llist(irq_entry, &processed); 239 total += processed; 240 } while (rc != 0); 241 242 return total; 243 } 244 245 irqreturn_t idxd_wq_thread(int irq, void *data) 246 { 247 struct idxd_irq_entry *irq_entry = data; 248 int processed; 249 250 processed = idxd_desc_process(irq_entry); 251 idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id); 252 253 if (processed == 0) 254 return IRQ_NONE; 255 256 return IRQ_HANDLED; 257 } 258