1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/io-64-nonatomic-lo-hi.h> 8 #include <linux/dmaengine.h> 9 #include <uapi/linux/idxd.h> 10 #include "../dmaengine.h" 11 #include "idxd.h" 12 #include "registers.h" 13 14 void idxd_device_wqs_clear_state(struct idxd_device *idxd) 15 { 16 int i; 17 18 lockdep_assert_held(&idxd->dev_lock); 19 for (i = 0; i < idxd->max_wqs; i++) { 20 struct idxd_wq *wq = &idxd->wqs[i]; 21 22 wq->state = IDXD_WQ_DISABLED; 23 } 24 } 25 26 static int idxd_restart(struct idxd_device *idxd) 27 { 28 int i, rc; 29 30 lockdep_assert_held(&idxd->dev_lock); 31 32 rc = __idxd_device_reset(idxd); 33 if (rc < 0) 34 goto out; 35 36 rc = idxd_device_config(idxd); 37 if (rc < 0) 38 goto out; 39 40 rc = idxd_device_enable(idxd); 41 if (rc < 0) 42 goto out; 43 44 for (i = 0; i < idxd->max_wqs; i++) { 45 struct idxd_wq *wq = &idxd->wqs[i]; 46 47 if (wq->state == IDXD_WQ_ENABLED) { 48 rc = idxd_wq_enable(wq); 49 if (rc < 0) { 50 dev_warn(&idxd->pdev->dev, 51 "Unable to re-enable wq %s\n", 52 dev_name(&wq->conf_dev)); 53 } 54 } 55 } 56 57 return 0; 58 59 out: 60 idxd_device_wqs_clear_state(idxd); 61 idxd->state = IDXD_DEV_HALTED; 62 return rc; 63 } 64 65 irqreturn_t idxd_irq_handler(int vec, void *data) 66 { 67 struct idxd_irq_entry *irq_entry = data; 68 struct idxd_device *idxd = irq_entry->idxd; 69 70 idxd_mask_msix_vector(idxd, irq_entry->id); 71 return IRQ_WAKE_THREAD; 72 } 73 74 irqreturn_t idxd_misc_thread(int vec, void *data) 75 { 76 struct idxd_irq_entry *irq_entry = data; 77 struct idxd_device *idxd = irq_entry->idxd; 78 struct device *dev = &idxd->pdev->dev; 79 union gensts_reg gensts; 80 u32 cause, val = 0; 81 int i, rc; 82 bool err = false; 83 84 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); 85 86 if (cause & IDXD_INTC_ERR) { 87 spin_lock_bh(&idxd->dev_lock); 88 for (i = 0; i < 4; i++) 89 idxd->sw_err.bits[i] = ioread64(idxd->reg_base + 90 IDXD_SWERR_OFFSET + i * sizeof(u64)); 91 iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET); 92 93 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { 94 int id = idxd->sw_err.wq_idx; 95 struct idxd_wq *wq = &idxd->wqs[id]; 96 97 if (wq->type == IDXD_WQT_USER) 98 wake_up_interruptible(&wq->idxd_cdev.err_queue); 99 } else { 100 int i; 101 102 for (i = 0; i < idxd->max_wqs; i++) { 103 struct idxd_wq *wq = &idxd->wqs[i]; 104 105 if (wq->type == IDXD_WQT_USER) 106 wake_up_interruptible(&wq->idxd_cdev.err_queue); 107 } 108 } 109 110 spin_unlock_bh(&idxd->dev_lock); 111 val |= IDXD_INTC_ERR; 112 113 for (i = 0; i < 4; i++) 114 dev_warn(dev, "err[%d]: %#16.16llx\n", 115 i, idxd->sw_err.bits[i]); 116 err = true; 117 } 118 119 if (cause & IDXD_INTC_CMD) { 120 /* Driver does use command interrupts */ 121 val |= IDXD_INTC_CMD; 122 } 123 124 if (cause & IDXD_INTC_OCCUPY) { 125 /* Driver does not utilize occupancy interrupt */ 126 val |= IDXD_INTC_OCCUPY; 127 } 128 129 if (cause & IDXD_INTC_PERFMON_OVFL) { 130 /* 131 * Driver does not utilize perfmon counter overflow interrupt 132 * yet. 133 */ 134 val |= IDXD_INTC_PERFMON_OVFL; 135 } 136 137 val ^= cause; 138 if (val) 139 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", 140 val); 141 142 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); 143 if (!err) 144 return IRQ_HANDLED; 145 146 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); 147 if (gensts.state == IDXD_DEVICE_STATE_HALT) { 148 spin_lock_bh(&idxd->dev_lock); 149 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { 150 rc = idxd_restart(idxd); 151 if (rc < 0) 152 dev_err(&idxd->pdev->dev, 153 "idxd restart failed, device halt."); 154 } else { 155 idxd_device_wqs_clear_state(idxd); 156 idxd->state = IDXD_DEV_HALTED; 157 dev_err(&idxd->pdev->dev, 158 "idxd halted, need %s.\n", 159 gensts.reset_type == IDXD_DEVICE_RESET_FLR ? 160 "FLR" : "system reset"); 161 } 162 spin_unlock_bh(&idxd->dev_lock); 163 } 164 165 idxd_unmask_msix_vector(idxd, irq_entry->id); 166 return IRQ_HANDLED; 167 } 168 169 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, 170 int *processed) 171 { 172 struct idxd_desc *desc, *t; 173 struct llist_node *head; 174 int queued = 0; 175 176 *processed = 0; 177 head = llist_del_all(&irq_entry->pending_llist); 178 if (!head) 179 return 0; 180 181 llist_for_each_entry_safe(desc, t, head, llnode) { 182 if (desc->completion->status) { 183 idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); 184 idxd_free_desc(desc->wq, desc); 185 (*processed)++; 186 } else { 187 list_add_tail(&desc->list, &irq_entry->work_list); 188 queued++; 189 } 190 } 191 192 return queued; 193 } 194 195 static int irq_process_work_list(struct idxd_irq_entry *irq_entry, 196 int *processed) 197 { 198 struct list_head *node, *next; 199 int queued = 0; 200 201 *processed = 0; 202 if (list_empty(&irq_entry->work_list)) 203 return 0; 204 205 list_for_each_safe(node, next, &irq_entry->work_list) { 206 struct idxd_desc *desc = 207 container_of(node, struct idxd_desc, list); 208 209 if (desc->completion->status) { 210 list_del(&desc->list); 211 /* process and callback */ 212 idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); 213 idxd_free_desc(desc->wq, desc); 214 (*processed)++; 215 } else { 216 queued++; 217 } 218 } 219 220 return queued; 221 } 222 223 static int idxd_desc_process(struct idxd_irq_entry *irq_entry) 224 { 225 int rc, processed, total = 0; 226 227 /* 228 * There are two lists we are processing. The pending_llist is where 229 * submmiter adds all the submitted descriptor after sending it to 230 * the workqueue. It's a lockless singly linked list. The work_list 231 * is the common linux double linked list. We are in a scenario of 232 * multiple producers and a single consumer. The producers are all 233 * the kernel submitters of descriptors, and the consumer is the 234 * kernel irq handler thread for the msix vector when using threaded 235 * irq. To work with the restrictions of llist to remain lockless, 236 * we are doing the following steps: 237 * 1. Iterate through the work_list and process any completed 238 * descriptor. Delete the completed entries during iteration. 239 * 2. llist_del_all() from the pending list. 240 * 3. Iterate through the llist that was deleted from the pending list 241 * and process the completed entries. 242 * 4. If the entry is still waiting on hardware, list_add_tail() to 243 * the work_list. 244 * 5. Repeat until no more descriptors. 245 */ 246 do { 247 rc = irq_process_work_list(irq_entry, &processed); 248 total += processed; 249 if (rc != 0) 250 continue; 251 252 rc = irq_process_pending_llist(irq_entry, &processed); 253 total += processed; 254 } while (rc != 0); 255 256 return total; 257 } 258 259 irqreturn_t idxd_wq_thread(int irq, void *data) 260 { 261 struct idxd_irq_entry *irq_entry = data; 262 int processed; 263 264 processed = idxd_desc_process(irq_entry); 265 idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id); 266 /* catch anything unprocessed after unmasking */ 267 processed += idxd_desc_process(irq_entry); 268 269 if (processed == 0) 270 return IRQ_NONE; 271 272 return IRQ_HANDLED; 273 } 274