1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <linux/pci.h> 29 #include <linux/sched/signal.h> 30 31 #include "vmwgfx_drv.h" 32 33 #define VMW_FENCE_WRAP (1 << 24) 34 35 static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw) 36 { 37 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) 38 return SVGA_IRQFLAG_REG_FENCE_GOAL; 39 else 40 return SVGA_IRQFLAG_FENCE_GOAL; 41 } 42 43 /** 44 * vmw_thread_fn - Deferred (process context) irq handler 45 * 46 * @irq: irq number 47 * @arg: Closure argument. Pointer to a struct drm_device cast to void * 48 * 49 * This function implements the deferred part of irq processing. 50 * The function is guaranteed to run at least once after the 51 * vmw_irq_handler has returned with IRQ_WAKE_THREAD. 52 * 53 */ 54 static irqreturn_t vmw_thread_fn(int irq, void *arg) 55 { 56 struct drm_device *dev = (struct drm_device *)arg; 57 struct vmw_private *dev_priv = vmw_priv(dev); 58 irqreturn_t ret = IRQ_NONE; 59 60 if (test_and_clear_bit(VMW_IRQTHREAD_FENCE, 61 dev_priv->irqthread_pending)) { 62 vmw_fences_update(dev_priv->fman); 63 wake_up_all(&dev_priv->fence_queue); 64 ret = IRQ_HANDLED; 65 } 66 67 if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF, 68 dev_priv->irqthread_pending)) { 69 vmw_cmdbuf_irqthread(dev_priv->cman); 70 ret = IRQ_HANDLED; 71 } 72 73 return ret; 74 } 75 76 /** 77 * vmw_irq_handler: irq handler 78 * 79 * @irq: irq number 80 * @arg: Closure argument. Pointer to a struct drm_device cast to void * 81 * 82 * This function implements the quick part of irq processing. 83 * The function performs fast actions like clearing the device interrupt 84 * flags and also reasonably quick actions like waking processes waiting for 85 * FIFO space. Other IRQ actions are deferred to the IRQ thread. 86 */ 87 static irqreturn_t vmw_irq_handler(int irq, void *arg) 88 { 89 struct drm_device *dev = (struct drm_device *)arg; 90 struct vmw_private *dev_priv = vmw_priv(dev); 91 uint32_t status, masked_status; 92 irqreturn_t ret = IRQ_HANDLED; 93 94 status = vmw_irq_status_read(dev_priv); 95 masked_status = status & READ_ONCE(dev_priv->irq_mask); 96 97 if (likely(status)) 98 vmw_irq_status_write(dev_priv, status); 99 100 if (!status) 101 return IRQ_NONE; 102 103 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) 104 wake_up_all(&dev_priv->fifo_queue); 105 106 if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE | 107 vmw_irqflag_fence_goal(dev_priv))) && 108 !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending)) 109 ret = IRQ_WAKE_THREAD; 110 111 if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | 112 SVGA_IRQFLAG_ERROR)) && 113 !test_and_set_bit(VMW_IRQTHREAD_CMDBUF, 114 dev_priv->irqthread_pending)) 115 ret = IRQ_WAKE_THREAD; 116 117 return ret; 118 } 119 120 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) 121 { 122 123 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); 124 } 125 126 void vmw_update_seqno(struct vmw_private *dev_priv) 127 { 128 uint32_t seqno = vmw_fence_read(dev_priv); 129 130 if (dev_priv->last_read_seqno != seqno) { 131 dev_priv->last_read_seqno = seqno; 132 vmw_fences_update(dev_priv->fman); 133 } 134 } 135 136 bool vmw_seqno_passed(struct vmw_private *dev_priv, 137 uint32_t seqno) 138 { 139 bool ret; 140 141 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 142 return true; 143 144 vmw_update_seqno(dev_priv); 145 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 146 return true; 147 148 if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno)) 149 return true; 150 151 /** 152 * Then check if the seqno is higher than what we've actually 153 * emitted. Then the fence is stale and signaled. 154 */ 155 156 ret = ((atomic_read(&dev_priv->marker_seq) - seqno) 157 > VMW_FENCE_WRAP); 158 159 return ret; 160 } 161 162 int vmw_fallback_wait(struct vmw_private *dev_priv, 163 bool lazy, 164 bool fifo_idle, 165 uint32_t seqno, 166 bool interruptible, 167 unsigned long timeout) 168 { 169 struct vmw_fifo_state *fifo_state = dev_priv->fifo; 170 bool fifo_down = false; 171 172 uint32_t count = 0; 173 uint32_t signal_seq; 174 int ret; 175 unsigned long end_jiffies = jiffies + timeout; 176 bool (*wait_condition)(struct vmw_private *, uint32_t); 177 DEFINE_WAIT(__wait); 178 179 wait_condition = (fifo_idle) ? &vmw_fifo_idle : 180 &vmw_seqno_passed; 181 182 /** 183 * Block command submission while waiting for idle. 184 */ 185 186 if (fifo_idle) { 187 if (dev_priv->cman) { 188 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, 189 10*HZ); 190 if (ret) 191 goto out_err; 192 } else if (fifo_state) { 193 down_read(&fifo_state->rwsem); 194 fifo_down = true; 195 } 196 } 197 198 signal_seq = atomic_read(&dev_priv->marker_seq); 199 ret = 0; 200 201 for (;;) { 202 prepare_to_wait(&dev_priv->fence_queue, &__wait, 203 (interruptible) ? 204 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 205 if (wait_condition(dev_priv, seqno)) 206 break; 207 if (time_after_eq(jiffies, end_jiffies)) { 208 DRM_ERROR("SVGA device lockup.\n"); 209 break; 210 } 211 if (lazy) 212 schedule_timeout(1); 213 else if ((++count & 0x0F) == 0) { 214 /** 215 * FIXME: Use schedule_hr_timeout here for 216 * newer kernels and lower CPU utilization. 217 */ 218 219 __set_current_state(TASK_RUNNING); 220 schedule(); 221 __set_current_state((interruptible) ? 222 TASK_INTERRUPTIBLE : 223 TASK_UNINTERRUPTIBLE); 224 } 225 if (interruptible && signal_pending(current)) { 226 ret = -ERESTARTSYS; 227 break; 228 } 229 } 230 finish_wait(&dev_priv->fence_queue, &__wait); 231 if (ret == 0 && fifo_idle && fifo_state) 232 vmw_fence_write(dev_priv, signal_seq); 233 234 wake_up_all(&dev_priv->fence_queue); 235 out_err: 236 if (fifo_down) 237 up_read(&fifo_state->rwsem); 238 239 return ret; 240 } 241 242 void vmw_generic_waiter_add(struct vmw_private *dev_priv, 243 u32 flag, int *waiter_count) 244 { 245 spin_lock_bh(&dev_priv->waiter_lock); 246 if ((*waiter_count)++ == 0) { 247 vmw_irq_status_write(dev_priv, flag); 248 dev_priv->irq_mask |= flag; 249 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 250 } 251 spin_unlock_bh(&dev_priv->waiter_lock); 252 } 253 254 void vmw_generic_waiter_remove(struct vmw_private *dev_priv, 255 u32 flag, int *waiter_count) 256 { 257 spin_lock_bh(&dev_priv->waiter_lock); 258 if (--(*waiter_count) == 0) { 259 dev_priv->irq_mask &= ~flag; 260 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 261 } 262 spin_unlock_bh(&dev_priv->waiter_lock); 263 } 264 265 void vmw_seqno_waiter_add(struct vmw_private *dev_priv) 266 { 267 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE, 268 &dev_priv->fence_queue_waiters); 269 } 270 271 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 272 { 273 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE, 274 &dev_priv->fence_queue_waiters); 275 } 276 277 void vmw_goal_waiter_add(struct vmw_private *dev_priv) 278 { 279 vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv), 280 &dev_priv->goal_queue_waiters); 281 } 282 283 void vmw_goal_waiter_remove(struct vmw_private *dev_priv) 284 { 285 vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv), 286 &dev_priv->goal_queue_waiters); 287 } 288 289 static void vmw_irq_preinstall(struct drm_device *dev) 290 { 291 struct vmw_private *dev_priv = vmw_priv(dev); 292 uint32_t status; 293 294 status = vmw_irq_status_read(dev_priv); 295 vmw_irq_status_write(dev_priv, status); 296 } 297 298 void vmw_irq_uninstall(struct drm_device *dev) 299 { 300 struct vmw_private *dev_priv = vmw_priv(dev); 301 struct pci_dev *pdev = to_pci_dev(dev->dev); 302 uint32_t status; 303 u32 i; 304 305 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 306 return; 307 308 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); 309 310 status = vmw_irq_status_read(dev_priv); 311 vmw_irq_status_write(dev_priv, status); 312 313 for (i = 0; i < dev_priv->num_irq_vectors; ++i) 314 free_irq(dev_priv->irqs[i], dev); 315 316 pci_free_irq_vectors(pdev); 317 dev_priv->num_irq_vectors = 0; 318 } 319 320 /** 321 * vmw_irq_install - Install the irq handlers 322 * 323 * @dev_priv: Pointer to the vmw_private device. 324 * Return: Zero if successful. Negative number otherwise. 325 */ 326 int vmw_irq_install(struct vmw_private *dev_priv) 327 { 328 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 329 struct drm_device *dev = &dev_priv->drm; 330 int ret; 331 int nvec; 332 int i = 0; 333 334 BUILD_BUG_ON((SVGA_IRQFLAG_MAX >> VMWGFX_MAX_NUM_IRQS) != 1); 335 BUG_ON(VMWGFX_MAX_NUM_IRQS != get_count_order(SVGA_IRQFLAG_MAX)); 336 337 nvec = pci_alloc_irq_vectors(pdev, 1, VMWGFX_MAX_NUM_IRQS, 338 PCI_IRQ_ALL_TYPES); 339 340 if (nvec <= 0) { 341 drm_err(&dev_priv->drm, 342 "IRQ's are unavailable, nvec: %d\n", nvec); 343 ret = nvec; 344 goto done; 345 } 346 347 vmw_irq_preinstall(dev); 348 349 for (i = 0; i < nvec; ++i) { 350 ret = pci_irq_vector(pdev, i); 351 if (ret < 0) { 352 drm_err(&dev_priv->drm, 353 "failed getting irq vector: %d\n", ret); 354 goto done; 355 } 356 dev_priv->irqs[i] = ret; 357 358 ret = request_threaded_irq(dev_priv->irqs[i], vmw_irq_handler, vmw_thread_fn, 359 IRQF_SHARED, VMWGFX_DRIVER_NAME, dev); 360 if (ret != 0) { 361 drm_err(&dev_priv->drm, 362 "Failed installing irq(%d): %d\n", 363 dev_priv->irqs[i], ret); 364 goto done; 365 } 366 } 367 368 done: 369 dev_priv->num_irq_vectors = i; 370 return ret; 371 } 372