1 /* 2 * VFIO platform devices interrupt handling 3 * 4 * Copyright (C) 2013 - Virtual Open Systems 5 * Author: Antonios Motakis <a.motakis@virtualopensystems.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License, version 2, as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 */ 16 17 #include <linux/eventfd.h> 18 #include <linux/interrupt.h> 19 #include <linux/slab.h> 20 #include <linux/types.h> 21 #include <linux/vfio.h> 22 #include <linux/irq.h> 23 24 #include "vfio_platform_private.h" 25 26 static void vfio_platform_mask(struct vfio_platform_irq *irq_ctx) 27 { 28 unsigned long flags; 29 30 spin_lock_irqsave(&irq_ctx->lock, flags); 31 32 if (!irq_ctx->masked) { 33 disable_irq_nosync(irq_ctx->hwirq); 34 irq_ctx->masked = true; 35 } 36 37 spin_unlock_irqrestore(&irq_ctx->lock, flags); 38 } 39 40 static int vfio_platform_mask_handler(void *opaque, void *unused) 41 { 42 struct vfio_platform_irq *irq_ctx = opaque; 43 44 vfio_platform_mask(irq_ctx); 45 46 return 0; 47 } 48 49 static int vfio_platform_set_irq_mask(struct vfio_platform_device *vdev, 50 unsigned index, unsigned start, 51 unsigned count, uint32_t flags, 52 void *data) 53 { 54 if (start != 0 || count != 1) 55 return -EINVAL; 56 57 if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE)) 58 return -EINVAL; 59 60 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 61 int32_t fd = *(int32_t *)data; 62 63 if (fd >= 0) 64 return vfio_virqfd_enable((void *) &vdev->irqs[index], 65 vfio_platform_mask_handler, 66 NULL, NULL, 67 &vdev->irqs[index].mask, fd); 68 69 vfio_virqfd_disable(&vdev->irqs[index].mask); 70 return 0; 71 } 72 73 if (flags & VFIO_IRQ_SET_DATA_NONE) { 74 vfio_platform_mask(&vdev->irqs[index]); 75 76 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 77 uint8_t mask = *(uint8_t *)data; 78 79 if (mask) 80 vfio_platform_mask(&vdev->irqs[index]); 81 } 82 83 return 0; 84 } 85 86 static void vfio_platform_unmask(struct vfio_platform_irq *irq_ctx) 87 { 88 unsigned long flags; 89 90 spin_lock_irqsave(&irq_ctx->lock, flags); 91 92 if (irq_ctx->masked) { 93 enable_irq(irq_ctx->hwirq); 94 irq_ctx->masked = false; 95 } 96 97 spin_unlock_irqrestore(&irq_ctx->lock, flags); 98 } 99 100 static int vfio_platform_unmask_handler(void *opaque, void *unused) 101 { 102 struct vfio_platform_irq *irq_ctx = opaque; 103 104 vfio_platform_unmask(irq_ctx); 105 106 return 0; 107 } 108 109 static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev, 110 unsigned index, unsigned start, 111 unsigned count, uint32_t flags, 112 void *data) 113 { 114 if (start != 0 || count != 1) 115 return -EINVAL; 116 117 if (!(vdev->irqs[index].flags & VFIO_IRQ_INFO_MASKABLE)) 118 return -EINVAL; 119 120 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 121 int32_t fd = *(int32_t *)data; 122 123 if (fd >= 0) 124 return vfio_virqfd_enable((void *) &vdev->irqs[index], 125 vfio_platform_unmask_handler, 126 NULL, NULL, 127 &vdev->irqs[index].unmask, 128 fd); 129 130 vfio_virqfd_disable(&vdev->irqs[index].unmask); 131 return 0; 132 } 133 134 if (flags & VFIO_IRQ_SET_DATA_NONE) { 135 vfio_platform_unmask(&vdev->irqs[index]); 136 137 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 138 uint8_t unmask = *(uint8_t *)data; 139 140 if (unmask) 141 vfio_platform_unmask(&vdev->irqs[index]); 142 } 143 144 return 0; 145 } 146 147 static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id) 148 { 149 struct vfio_platform_irq *irq_ctx = dev_id; 150 unsigned long flags; 151 int ret = IRQ_NONE; 152 153 spin_lock_irqsave(&irq_ctx->lock, flags); 154 155 if (!irq_ctx->masked) { 156 ret = IRQ_HANDLED; 157 158 /* automask maskable interrupts */ 159 disable_irq_nosync(irq_ctx->hwirq); 160 irq_ctx->masked = true; 161 } 162 163 spin_unlock_irqrestore(&irq_ctx->lock, flags); 164 165 if (ret == IRQ_HANDLED) 166 eventfd_signal(irq_ctx->trigger, 1); 167 168 return ret; 169 } 170 171 static irqreturn_t vfio_irq_handler(int irq, void *dev_id) 172 { 173 struct vfio_platform_irq *irq_ctx = dev_id; 174 175 eventfd_signal(irq_ctx->trigger, 1); 176 177 return IRQ_HANDLED; 178 } 179 180 static int vfio_set_trigger(struct vfio_platform_device *vdev, int index, 181 int fd, irq_handler_t handler) 182 { 183 struct vfio_platform_irq *irq = &vdev->irqs[index]; 184 struct eventfd_ctx *trigger; 185 int ret; 186 187 if (irq->trigger) { 188 free_irq(irq->hwirq, irq); 189 kfree(irq->name); 190 eventfd_ctx_put(irq->trigger); 191 irq->trigger = NULL; 192 } 193 194 if (fd < 0) /* Disable only */ 195 return 0; 196 197 irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)", 198 irq->hwirq, vdev->name); 199 if (!irq->name) 200 return -ENOMEM; 201 202 trigger = eventfd_ctx_fdget(fd); 203 if (IS_ERR(trigger)) { 204 kfree(irq->name); 205 return PTR_ERR(trigger); 206 } 207 208 irq->trigger = trigger; 209 210 irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN); 211 ret = request_irq(irq->hwirq, handler, 0, irq->name, irq); 212 if (ret) { 213 kfree(irq->name); 214 eventfd_ctx_put(trigger); 215 irq->trigger = NULL; 216 return ret; 217 } 218 219 if (!irq->masked) 220 enable_irq(irq->hwirq); 221 222 return 0; 223 } 224 225 static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev, 226 unsigned index, unsigned start, 227 unsigned count, uint32_t flags, 228 void *data) 229 { 230 struct vfio_platform_irq *irq = &vdev->irqs[index]; 231 irq_handler_t handler; 232 233 if (vdev->irqs[index].flags & VFIO_IRQ_INFO_AUTOMASKED) 234 handler = vfio_automasked_irq_handler; 235 else 236 handler = vfio_irq_handler; 237 238 if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) 239 return vfio_set_trigger(vdev, index, -1, handler); 240 241 if (start != 0 || count != 1) 242 return -EINVAL; 243 244 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 245 int32_t fd = *(int32_t *)data; 246 247 return vfio_set_trigger(vdev, index, fd, handler); 248 } 249 250 if (flags & VFIO_IRQ_SET_DATA_NONE) { 251 handler(irq->hwirq, irq); 252 253 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 254 uint8_t trigger = *(uint8_t *)data; 255 256 if (trigger) 257 handler(irq->hwirq, irq); 258 } 259 260 return 0; 261 } 262 263 int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev, 264 uint32_t flags, unsigned index, unsigned start, 265 unsigned count, void *data) 266 { 267 int (*func)(struct vfio_platform_device *vdev, unsigned index, 268 unsigned start, unsigned count, uint32_t flags, 269 void *data) = NULL; 270 271 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 272 case VFIO_IRQ_SET_ACTION_MASK: 273 func = vfio_platform_set_irq_mask; 274 break; 275 case VFIO_IRQ_SET_ACTION_UNMASK: 276 func = vfio_platform_set_irq_unmask; 277 break; 278 case VFIO_IRQ_SET_ACTION_TRIGGER: 279 func = vfio_platform_set_irq_trigger; 280 break; 281 } 282 283 if (!func) 284 return -ENOTTY; 285 286 return func(vdev, index, start, count, flags, data); 287 } 288 289 int vfio_platform_irq_init(struct vfio_platform_device *vdev) 290 { 291 int cnt = 0, i; 292 293 while (vdev->get_irq(vdev, cnt) >= 0) 294 cnt++; 295 296 vdev->irqs = kcalloc(cnt, sizeof(struct vfio_platform_irq), GFP_KERNEL); 297 if (!vdev->irqs) 298 return -ENOMEM; 299 300 for (i = 0; i < cnt; i++) { 301 int hwirq = vdev->get_irq(vdev, i); 302 303 if (hwirq < 0) 304 goto err; 305 306 spin_lock_init(&vdev->irqs[i].lock); 307 308 vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD; 309 310 if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) 311 vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE 312 | VFIO_IRQ_INFO_AUTOMASKED; 313 314 vdev->irqs[i].count = 1; 315 vdev->irqs[i].hwirq = hwirq; 316 vdev->irqs[i].masked = false; 317 } 318 319 vdev->num_irqs = cnt; 320 321 return 0; 322 err: 323 kfree(vdev->irqs); 324 return -EINVAL; 325 } 326 327 void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev) 328 { 329 int i; 330 331 for (i = 0; i < vdev->num_irqs; i++) 332 vfio_set_trigger(vdev, i, -1, NULL); 333 334 vdev->num_irqs = 0; 335 kfree(vdev->irqs); 336 } 337