1fb1d9738SJakob Bornecrantz /************************************************************************** 2fb1d9738SJakob Bornecrantz * 354fbde8aSSinclair Yeh * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA 4fb1d9738SJakob Bornecrantz * All Rights Reserved. 5fb1d9738SJakob Bornecrantz * 6fb1d9738SJakob Bornecrantz * Permission is hereby granted, free of charge, to any person obtaining a 7fb1d9738SJakob Bornecrantz * copy of this software and associated documentation files (the 8fb1d9738SJakob Bornecrantz * "Software"), to deal in the Software without restriction, including 9fb1d9738SJakob Bornecrantz * without limitation the rights to use, copy, modify, merge, publish, 10fb1d9738SJakob Bornecrantz * distribute, sub license, and/or sell copies of the Software, and to 11fb1d9738SJakob Bornecrantz * permit persons to whom the Software is furnished to do so, subject to 12fb1d9738SJakob Bornecrantz * the following conditions: 13fb1d9738SJakob Bornecrantz * 14fb1d9738SJakob Bornecrantz * The above copyright notice and this permission notice (including the 15fb1d9738SJakob Bornecrantz * next paragraph) shall be included in all copies or substantial portions 16fb1d9738SJakob Bornecrantz * of the Software. 17fb1d9738SJakob Bornecrantz * 18fb1d9738SJakob Bornecrantz * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19fb1d9738SJakob Bornecrantz * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20fb1d9738SJakob Bornecrantz * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21fb1d9738SJakob Bornecrantz * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22fb1d9738SJakob Bornecrantz * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23fb1d9738SJakob Bornecrantz * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24fb1d9738SJakob Bornecrantz * USE OR OTHER DEALINGS IN THE SOFTWARE. 25fb1d9738SJakob Bornecrantz * 26fb1d9738SJakob Bornecrantz **************************************************************************/ 27fb1d9738SJakob Bornecrantz 28760285e7SDavid Howells #include <drm/drmP.h> 29fb1d9738SJakob Bornecrantz #include "vmwgfx_drv.h" 30fb1d9738SJakob Bornecrantz 31fb1d9738SJakob Bornecrantz #define VMW_FENCE_WRAP (1 << 24) 32fb1d9738SJakob Bornecrantz 33e300173fSThomas Hellstrom static irqreturn_t vmw_irq_handler(int irq, void *arg) 34fb1d9738SJakob Bornecrantz { 35fb1d9738SJakob Bornecrantz struct drm_device *dev = (struct drm_device *)arg; 36fb1d9738SJakob Bornecrantz struct vmw_private *dev_priv = vmw_priv(dev); 3757c5ee79SThomas Hellstrom uint32_t status, masked_status; 38fb1d9738SJakob Bornecrantz 39fb1d9738SJakob Bornecrantz status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 40d2e8851aSThomas Hellstrom masked_status = status & READ_ONCE(dev_priv->irq_mask); 41fb1d9738SJakob Bornecrantz 4257c5ee79SThomas Hellstrom if (likely(status)) 4357c5ee79SThomas Hellstrom outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 44ae2a1040SThomas Hellstrom 45d2e8851aSThomas Hellstrom if (!status) 4657c5ee79SThomas Hellstrom return IRQ_NONE; 4757c5ee79SThomas Hellstrom 4857c5ee79SThomas Hellstrom if (masked_status & (SVGA_IRQFLAG_ANY_FENCE | 4957c5ee79SThomas Hellstrom SVGA_IRQFLAG_FENCE_GOAL)) { 5057c5ee79SThomas Hellstrom vmw_fences_update(dev_priv->fman); 51fb1d9738SJakob Bornecrantz wake_up_all(&dev_priv->fence_queue); 52ae2a1040SThomas Hellstrom } 5357c5ee79SThomas Hellstrom 5457c5ee79SThomas Hellstrom if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) 55fb1d9738SJakob Bornecrantz wake_up_all(&dev_priv->fifo_queue); 56fb1d9738SJakob Bornecrantz 573eab3d9eSThomas Hellstrom if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | 583eab3d9eSThomas Hellstrom SVGA_IRQFLAG_ERROR)) 593eab3d9eSThomas Hellstrom vmw_cmdbuf_tasklet_schedule(dev_priv->cman); 60fb1d9738SJakob Bornecrantz 6157c5ee79SThomas Hellstrom return IRQ_HANDLED; 62fb1d9738SJakob Bornecrantz } 63fb1d9738SJakob Bornecrantz 646bcd8d3cSThomas Hellstrom static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) 65fb1d9738SJakob Bornecrantz { 66fb1d9738SJakob Bornecrantz 67496eb6fdSThomas Hellstrom return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); 68fb1d9738SJakob Bornecrantz } 69fb1d9738SJakob Bornecrantz 706bcd8d3cSThomas Hellstrom void vmw_update_seqno(struct vmw_private *dev_priv, 711925d456SThomas Hellstrom struct vmw_fifo_state *fifo_state) 721925d456SThomas Hellstrom { 73b76ff5eaSThomas Hellstrom u32 *fifo_mem = dev_priv->mmio_virt; 74b76ff5eaSThomas Hellstrom uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); 751925d456SThomas Hellstrom 766bcd8d3cSThomas Hellstrom if (dev_priv->last_read_seqno != seqno) { 776bcd8d3cSThomas Hellstrom dev_priv->last_read_seqno = seqno; 786bcd8d3cSThomas Hellstrom vmw_marker_pull(&fifo_state->marker_queue, seqno); 7957c5ee79SThomas Hellstrom vmw_fences_update(dev_priv->fman); 801925d456SThomas Hellstrom } 811925d456SThomas Hellstrom } 82fb1d9738SJakob Bornecrantz 836bcd8d3cSThomas Hellstrom bool vmw_seqno_passed(struct vmw_private *dev_priv, 846bcd8d3cSThomas Hellstrom uint32_t seqno) 85fb1d9738SJakob Bornecrantz { 86fb1d9738SJakob Bornecrantz struct vmw_fifo_state *fifo_state; 87fb1d9738SJakob Bornecrantz bool ret; 88fb1d9738SJakob Bornecrantz 896bcd8d3cSThomas Hellstrom if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 90fb1d9738SJakob Bornecrantz return true; 91fb1d9738SJakob Bornecrantz 921925d456SThomas Hellstrom fifo_state = &dev_priv->fifo; 936bcd8d3cSThomas Hellstrom vmw_update_seqno(dev_priv, fifo_state); 946bcd8d3cSThomas Hellstrom if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 95fb1d9738SJakob Bornecrantz return true; 96fb1d9738SJakob Bornecrantz 97fb1d9738SJakob Bornecrantz if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && 986bcd8d3cSThomas Hellstrom vmw_fifo_idle(dev_priv, seqno)) 99fb1d9738SJakob Bornecrantz return true; 100fb1d9738SJakob Bornecrantz 101fb1d9738SJakob Bornecrantz /** 1026bcd8d3cSThomas Hellstrom * Then check if the seqno is higher than what we've actually 103fb1d9738SJakob Bornecrantz * emitted. Then the fence is stale and signaled. 104fb1d9738SJakob Bornecrantz */ 105fb1d9738SJakob Bornecrantz 1066bcd8d3cSThomas Hellstrom ret = ((atomic_read(&dev_priv->marker_seq) - seqno) 10785b9e487SThomas Hellstrom > VMW_FENCE_WRAP); 108fb1d9738SJakob Bornecrantz 109fb1d9738SJakob Bornecrantz return ret; 110fb1d9738SJakob Bornecrantz } 111fb1d9738SJakob Bornecrantz 112fb1d9738SJakob Bornecrantz int vmw_fallback_wait(struct vmw_private *dev_priv, 113fb1d9738SJakob Bornecrantz bool lazy, 114fb1d9738SJakob Bornecrantz bool fifo_idle, 1156bcd8d3cSThomas Hellstrom uint32_t seqno, 116fb1d9738SJakob Bornecrantz bool interruptible, 117fb1d9738SJakob Bornecrantz unsigned long timeout) 118fb1d9738SJakob Bornecrantz { 119fb1d9738SJakob Bornecrantz struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 120fb1d9738SJakob Bornecrantz 121fb1d9738SJakob Bornecrantz uint32_t count = 0; 122fb1d9738SJakob Bornecrantz uint32_t signal_seq; 123fb1d9738SJakob Bornecrantz int ret; 124fb1d9738SJakob Bornecrantz unsigned long end_jiffies = jiffies + timeout; 125fb1d9738SJakob Bornecrantz bool (*wait_condition)(struct vmw_private *, uint32_t); 126fb1d9738SJakob Bornecrantz DEFINE_WAIT(__wait); 127fb1d9738SJakob Bornecrantz 128fb1d9738SJakob Bornecrantz wait_condition = (fifo_idle) ? &vmw_fifo_idle : 1296bcd8d3cSThomas Hellstrom &vmw_seqno_passed; 130fb1d9738SJakob Bornecrantz 131fb1d9738SJakob Bornecrantz /** 132fb1d9738SJakob Bornecrantz * Block command submission while waiting for idle. 133fb1d9738SJakob Bornecrantz */ 134fb1d9738SJakob Bornecrantz 1353eab3d9eSThomas Hellstrom if (fifo_idle) { 136fb1d9738SJakob Bornecrantz down_read(&fifo_state->rwsem); 1373eab3d9eSThomas Hellstrom if (dev_priv->cman) { 1383eab3d9eSThomas Hellstrom ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, 1393eab3d9eSThomas Hellstrom 10*HZ); 1403eab3d9eSThomas Hellstrom if (ret) 1413eab3d9eSThomas Hellstrom goto out_err; 1423eab3d9eSThomas Hellstrom } 1433eab3d9eSThomas Hellstrom } 1443eab3d9eSThomas Hellstrom 1456bcd8d3cSThomas Hellstrom signal_seq = atomic_read(&dev_priv->marker_seq); 146fb1d9738SJakob Bornecrantz ret = 0; 147fb1d9738SJakob Bornecrantz 148fb1d9738SJakob Bornecrantz for (;;) { 149fb1d9738SJakob Bornecrantz prepare_to_wait(&dev_priv->fence_queue, &__wait, 150fb1d9738SJakob Bornecrantz (interruptible) ? 151fb1d9738SJakob Bornecrantz TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 1526bcd8d3cSThomas Hellstrom if (wait_condition(dev_priv, seqno)) 153fb1d9738SJakob Bornecrantz break; 154fb1d9738SJakob Bornecrantz if (time_after_eq(jiffies, end_jiffies)) { 155fb1d9738SJakob Bornecrantz DRM_ERROR("SVGA device lockup.\n"); 156fb1d9738SJakob Bornecrantz break; 157fb1d9738SJakob Bornecrantz } 158fb1d9738SJakob Bornecrantz if (lazy) 159fb1d9738SJakob Bornecrantz schedule_timeout(1); 160fb1d9738SJakob Bornecrantz else if ((++count & 0x0F) == 0) { 161fb1d9738SJakob Bornecrantz /** 162fb1d9738SJakob Bornecrantz * FIXME: Use schedule_hr_timeout here for 163fb1d9738SJakob Bornecrantz * newer kernels and lower CPU utilization. 164fb1d9738SJakob Bornecrantz */ 165fb1d9738SJakob Bornecrantz 166fb1d9738SJakob Bornecrantz __set_current_state(TASK_RUNNING); 167fb1d9738SJakob Bornecrantz schedule(); 168fb1d9738SJakob Bornecrantz __set_current_state((interruptible) ? 169fb1d9738SJakob Bornecrantz TASK_INTERRUPTIBLE : 170fb1d9738SJakob Bornecrantz TASK_UNINTERRUPTIBLE); 171fb1d9738SJakob Bornecrantz } 172fb1d9738SJakob Bornecrantz if (interruptible && signal_pending(current)) { 1733d3a5b32SThomas Hellstrom ret = -ERESTARTSYS; 174fb1d9738SJakob Bornecrantz break; 175fb1d9738SJakob Bornecrantz } 176fb1d9738SJakob Bornecrantz } 177fb1d9738SJakob Bornecrantz finish_wait(&dev_priv->fence_queue, &__wait); 178fb1d9738SJakob Bornecrantz if (ret == 0 && fifo_idle) { 179b76ff5eaSThomas Hellstrom u32 *fifo_mem = dev_priv->mmio_virt; 180b76ff5eaSThomas Hellstrom 181b76ff5eaSThomas Hellstrom vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE); 182fb1d9738SJakob Bornecrantz } 183fb1d9738SJakob Bornecrantz wake_up_all(&dev_priv->fence_queue); 1843eab3d9eSThomas Hellstrom out_err: 185fb1d9738SJakob Bornecrantz if (fifo_idle) 186fb1d9738SJakob Bornecrantz up_read(&fifo_state->rwsem); 187fb1d9738SJakob Bornecrantz 188fb1d9738SJakob Bornecrantz return ret; 189fb1d9738SJakob Bornecrantz } 190fb1d9738SJakob Bornecrantz 191d2e8851aSThomas Hellstrom void vmw_generic_waiter_add(struct vmw_private *dev_priv, 192d2e8851aSThomas Hellstrom u32 flag, int *waiter_count) 193d2e8851aSThomas Hellstrom { 194d2e8851aSThomas Hellstrom spin_lock_bh(&dev_priv->waiter_lock); 195d2e8851aSThomas Hellstrom if ((*waiter_count)++ == 0) { 196d2e8851aSThomas Hellstrom outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 197d2e8851aSThomas Hellstrom dev_priv->irq_mask |= flag; 198d2e8851aSThomas Hellstrom vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 199d2e8851aSThomas Hellstrom } 200d2e8851aSThomas Hellstrom spin_unlock_bh(&dev_priv->waiter_lock); 201d2e8851aSThomas Hellstrom } 202d2e8851aSThomas Hellstrom 203d2e8851aSThomas Hellstrom void vmw_generic_waiter_remove(struct vmw_private *dev_priv, 204d2e8851aSThomas Hellstrom u32 flag, int *waiter_count) 205d2e8851aSThomas Hellstrom { 206d2e8851aSThomas Hellstrom spin_lock_bh(&dev_priv->waiter_lock); 207d2e8851aSThomas Hellstrom if (--(*waiter_count) == 0) { 208d2e8851aSThomas Hellstrom dev_priv->irq_mask &= ~flag; 209d2e8851aSThomas Hellstrom vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 210d2e8851aSThomas Hellstrom } 211d2e8851aSThomas Hellstrom spin_unlock_bh(&dev_priv->waiter_lock); 212d2e8851aSThomas Hellstrom } 213d2e8851aSThomas Hellstrom 214ae2a1040SThomas Hellstrom void vmw_seqno_waiter_add(struct vmw_private *dev_priv) 2154f73a96bSThomas Hellstrom { 216d2e8851aSThomas Hellstrom vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE, 217d2e8851aSThomas Hellstrom &dev_priv->fence_queue_waiters); 2184f73a96bSThomas Hellstrom } 2194f73a96bSThomas Hellstrom 220ae2a1040SThomas Hellstrom void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 2214f73a96bSThomas Hellstrom { 222d2e8851aSThomas Hellstrom vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE, 223d2e8851aSThomas Hellstrom &dev_priv->fence_queue_waiters); 22457c5ee79SThomas Hellstrom } 22557c5ee79SThomas Hellstrom 22657c5ee79SThomas Hellstrom void vmw_goal_waiter_add(struct vmw_private *dev_priv) 22757c5ee79SThomas Hellstrom { 228d2e8851aSThomas Hellstrom vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, 229d2e8851aSThomas Hellstrom &dev_priv->goal_queue_waiters); 23057c5ee79SThomas Hellstrom } 23157c5ee79SThomas Hellstrom 23257c5ee79SThomas Hellstrom void vmw_goal_waiter_remove(struct vmw_private *dev_priv) 23357c5ee79SThomas Hellstrom { 234d2e8851aSThomas Hellstrom vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, 235d2e8851aSThomas Hellstrom &dev_priv->goal_queue_waiters); 2364f73a96bSThomas Hellstrom } 2374f73a96bSThomas Hellstrom 2386bcd8d3cSThomas Hellstrom int vmw_wait_seqno(struct vmw_private *dev_priv, 2396bcd8d3cSThomas Hellstrom bool lazy, uint32_t seqno, 240fb1d9738SJakob Bornecrantz bool interruptible, unsigned long timeout) 241fb1d9738SJakob Bornecrantz { 242fb1d9738SJakob Bornecrantz long ret; 243fb1d9738SJakob Bornecrantz struct vmw_fifo_state *fifo = &dev_priv->fifo; 244fb1d9738SJakob Bornecrantz 2456bcd8d3cSThomas Hellstrom if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 246fb1d9738SJakob Bornecrantz return 0; 247fb1d9738SJakob Bornecrantz 2486bcd8d3cSThomas Hellstrom if (likely(vmw_seqno_passed(dev_priv, seqno))) 249fb1d9738SJakob Bornecrantz return 0; 250fb1d9738SJakob Bornecrantz 251fb1d9738SJakob Bornecrantz vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 252fb1d9738SJakob Bornecrantz 253fb1d9738SJakob Bornecrantz if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) 2546bcd8d3cSThomas Hellstrom return vmw_fallback_wait(dev_priv, lazy, true, seqno, 255fb1d9738SJakob Bornecrantz interruptible, timeout); 256fb1d9738SJakob Bornecrantz 257fb1d9738SJakob Bornecrantz if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 2586bcd8d3cSThomas Hellstrom return vmw_fallback_wait(dev_priv, lazy, false, seqno, 259fb1d9738SJakob Bornecrantz interruptible, timeout); 260fb1d9738SJakob Bornecrantz 2614f73a96bSThomas Hellstrom vmw_seqno_waiter_add(dev_priv); 262fb1d9738SJakob Bornecrantz 263fb1d9738SJakob Bornecrantz if (interruptible) 264fb1d9738SJakob Bornecrantz ret = wait_event_interruptible_timeout 265fb1d9738SJakob Bornecrantz (dev_priv->fence_queue, 2666bcd8d3cSThomas Hellstrom vmw_seqno_passed(dev_priv, seqno), 267fb1d9738SJakob Bornecrantz timeout); 268fb1d9738SJakob Bornecrantz else 269fb1d9738SJakob Bornecrantz ret = wait_event_timeout 270fb1d9738SJakob Bornecrantz (dev_priv->fence_queue, 2716bcd8d3cSThomas Hellstrom vmw_seqno_passed(dev_priv, seqno), 272fb1d9738SJakob Bornecrantz timeout); 273fb1d9738SJakob Bornecrantz 2744f73a96bSThomas Hellstrom vmw_seqno_waiter_remove(dev_priv); 2754f73a96bSThomas Hellstrom 2763d3a5b32SThomas Hellstrom if (unlikely(ret == 0)) 277fb1d9738SJakob Bornecrantz ret = -EBUSY; 278fb1d9738SJakob Bornecrantz else if (likely(ret > 0)) 279fb1d9738SJakob Bornecrantz ret = 0; 280fb1d9738SJakob Bornecrantz 281fb1d9738SJakob Bornecrantz return ret; 282fb1d9738SJakob Bornecrantz } 283fb1d9738SJakob Bornecrantz 284e300173fSThomas Hellstrom static void vmw_irq_preinstall(struct drm_device *dev) 285fb1d9738SJakob Bornecrantz { 286fb1d9738SJakob Bornecrantz struct vmw_private *dev_priv = vmw_priv(dev); 287fb1d9738SJakob Bornecrantz uint32_t status; 288fb1d9738SJakob Bornecrantz 289fb1d9738SJakob Bornecrantz status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 290fb1d9738SJakob Bornecrantz outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 291fb1d9738SJakob Bornecrantz } 292fb1d9738SJakob Bornecrantz 293fb1d9738SJakob Bornecrantz void vmw_irq_uninstall(struct drm_device *dev) 294fb1d9738SJakob Bornecrantz { 295fb1d9738SJakob Bornecrantz struct vmw_private *dev_priv = vmw_priv(dev); 296fb1d9738SJakob Bornecrantz uint32_t status; 297fb1d9738SJakob Bornecrantz 298fb1d9738SJakob Bornecrantz if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 299fb1d9738SJakob Bornecrantz return; 300fb1d9738SJakob Bornecrantz 301e300173fSThomas Hellstrom if (!dev->irq_enabled) 302e300173fSThomas Hellstrom return; 303e300173fSThomas Hellstrom 304fb1d9738SJakob Bornecrantz vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); 305fb1d9738SJakob Bornecrantz 306fb1d9738SJakob Bornecrantz status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 307fb1d9738SJakob Bornecrantz outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 308e300173fSThomas Hellstrom 309e300173fSThomas Hellstrom dev->irq_enabled = false; 310e300173fSThomas Hellstrom free_irq(dev->irq, dev); 311e300173fSThomas Hellstrom } 312e300173fSThomas Hellstrom 313e300173fSThomas Hellstrom /** 314e300173fSThomas Hellstrom * vmw_irq_install - Install the irq handlers 315e300173fSThomas Hellstrom * 316e300173fSThomas Hellstrom * @dev: Pointer to the drm device. 317e300173fSThomas Hellstrom * @irq: The irq number. 318e300173fSThomas Hellstrom * Return: Zero if successful. Negative number otherwise. 319e300173fSThomas Hellstrom */ 320e300173fSThomas Hellstrom int vmw_irq_install(struct drm_device *dev, int irq) 321e300173fSThomas Hellstrom { 322e300173fSThomas Hellstrom int ret; 323e300173fSThomas Hellstrom 324e300173fSThomas Hellstrom if (dev->irq_enabled) 325e300173fSThomas Hellstrom return -EBUSY; 326e300173fSThomas Hellstrom 327e300173fSThomas Hellstrom vmw_irq_preinstall(dev); 328e300173fSThomas Hellstrom 329e300173fSThomas Hellstrom ret = request_threaded_irq(irq, vmw_irq_handler, NULL, 330e300173fSThomas Hellstrom IRQF_SHARED, VMWGFX_DRIVER_NAME, dev); 331e300173fSThomas Hellstrom if (ret < 0) 332e300173fSThomas Hellstrom return ret; 333e300173fSThomas Hellstrom 334e300173fSThomas Hellstrom dev->irq_enabled = true; 335e300173fSThomas Hellstrom dev->irq = irq; 336e300173fSThomas Hellstrom 337e300173fSThomas Hellstrom return ret; 338fb1d9738SJakob Bornecrantz } 339