1d5b1a78aSEric Anholt /* 2d5b1a78aSEric Anholt * Copyright © 2014 Broadcom 3d5b1a78aSEric Anholt * 4d5b1a78aSEric Anholt * Permission is hereby granted, free of charge, to any person obtaining a 5d5b1a78aSEric Anholt * copy of this software and associated documentation files (the "Software"), 6d5b1a78aSEric Anholt * to deal in the Software without restriction, including without limitation 7d5b1a78aSEric Anholt * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8d5b1a78aSEric Anholt * and/or sell copies of the Software, and to permit persons to whom the 9d5b1a78aSEric Anholt * Software is furnished to do so, subject to the following conditions: 10d5b1a78aSEric Anholt * 11d5b1a78aSEric Anholt * The above copyright notice and this permission notice (including the next 12d5b1a78aSEric Anholt * paragraph) shall be included in all copies or substantial portions of the 13d5b1a78aSEric Anholt * Software. 14d5b1a78aSEric Anholt * 15d5b1a78aSEric Anholt * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16d5b1a78aSEric Anholt * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17d5b1a78aSEric Anholt * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18d5b1a78aSEric Anholt * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19d5b1a78aSEric Anholt * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20d5b1a78aSEric Anholt * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21d5b1a78aSEric Anholt * IN THE SOFTWARE. 22d5b1a78aSEric Anholt */ 23d5b1a78aSEric Anholt 2472f793f1SEric Anholt /** 2572f793f1SEric Anholt * DOC: Interrupt management for the V3D engine 26d5b1a78aSEric Anholt * 27d5b1a78aSEric Anholt * We have an interrupt status register (V3D_INTCTL) which reports 28d5b1a78aSEric Anholt * interrupts, and where writing 1 bits clears those interrupts. 29d5b1a78aSEric Anholt * There are also a pair of interrupt registers 30d5b1a78aSEric Anholt * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or 31d5b1a78aSEric Anholt * disables that specific interrupt, and 0s written are ignored 32d5b1a78aSEric Anholt * (reading either one returns the set of enabled interrupts). 33d5b1a78aSEric Anholt * 34ca26d28bSVarad Gautam * When we take a binning flush done interrupt, we need to submit the 35ca26d28bSVarad Gautam * next frame for binning and move the finished frame to the render 36ca26d28bSVarad Gautam * thread. 37ca26d28bSVarad Gautam * 38d5b1a78aSEric Anholt * When we take a render frame interrupt, we need to wake the 39d5b1a78aSEric Anholt * processes waiting for some frame to be done, and get the next frame 40d5b1a78aSEric Anholt * submitted ASAP (so the hardware doesn't sit idle when there's work 41d5b1a78aSEric Anholt * to do). 42d5b1a78aSEric Anholt * 43d5b1a78aSEric Anholt * When we take the binner out of memory interrupt, we need to 44d5b1a78aSEric Anholt * allocate some new memory and pass it to the binner so that the 45d5b1a78aSEric Anholt * current job can make progress. 46d5b1a78aSEric Anholt */ 47d5b1a78aSEric Anholt 48d5b1a78aSEric Anholt #include "vc4_drv.h" 49d5b1a78aSEric Anholt #include "vc4_regs.h" 50d5b1a78aSEric Anholt 51d5b1a78aSEric Anholt #define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \ 52ca26d28bSVarad Gautam V3D_INT_FLDONE | \ 53d5b1a78aSEric Anholt V3D_INT_FRDONE) 54d5b1a78aSEric Anholt 55d5b1a78aSEric Anholt DECLARE_WAIT_QUEUE_HEAD(render_wait); 56d5b1a78aSEric Anholt 57d5b1a78aSEric Anholt static void 58d5b1a78aSEric Anholt vc4_overflow_mem_work(struct work_struct *work) 59d5b1a78aSEric Anholt { 60d5b1a78aSEric Anholt struct vc4_dev *vc4 = 61d5b1a78aSEric Anholt container_of(work, struct vc4_dev, overflow_mem_work); 62d5b1a78aSEric Anholt struct drm_device *dev = vc4->dev; 63d5b1a78aSEric Anholt struct vc4_bo *bo; 64d5b1a78aSEric Anholt 65d5b1a78aSEric Anholt bo = vc4_bo_create(dev, 256 * 1024, true); 662c68f1fcSEric Anholt if (IS_ERR(bo)) { 67d5b1a78aSEric Anholt DRM_ERROR("Couldn't allocate binner overflow mem\n"); 68d5b1a78aSEric Anholt return; 69d5b1a78aSEric Anholt } 70d5b1a78aSEric Anholt 71d5b1a78aSEric Anholt /* If there's a job executing currently, then our previous 72d5b1a78aSEric Anholt * overflow allocation is getting used in that job and we need 73d5b1a78aSEric Anholt * to queue it to be released when the job is done. But if no 74d5b1a78aSEric Anholt * job is executing at all, then we can free the old overflow 75d5b1a78aSEric Anholt * object direcctly. 76d5b1a78aSEric Anholt * 77d5b1a78aSEric Anholt * No lock necessary for this pointer since we're the only 78d5b1a78aSEric Anholt * ones that update the pointer, and our workqueue won't 79d5b1a78aSEric Anholt * reenter. 80d5b1a78aSEric Anholt */ 81d5b1a78aSEric Anholt if (vc4->overflow_mem) { 82d5b1a78aSEric Anholt struct vc4_exec_info *current_exec; 83d5b1a78aSEric Anholt unsigned long irqflags; 84d5b1a78aSEric Anholt 85d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags); 86ca26d28bSVarad Gautam current_exec = vc4_first_bin_job(vc4); 879326e6f2SEric Anholt if (!current_exec) 889326e6f2SEric Anholt current_exec = vc4_last_render_job(vc4); 89d5b1a78aSEric Anholt if (current_exec) { 909326e6f2SEric Anholt vc4->overflow_mem->seqno = current_exec->seqno; 91d5b1a78aSEric Anholt list_add_tail(&vc4->overflow_mem->unref_head, 92d5b1a78aSEric Anholt ¤t_exec->unref_list); 93d5b1a78aSEric Anholt vc4->overflow_mem = NULL; 94d5b1a78aSEric Anholt } 95d5b1a78aSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags); 96d5b1a78aSEric Anholt } 97d5b1a78aSEric Anholt 98d5b1a78aSEric Anholt if (vc4->overflow_mem) 99d5b1a78aSEric Anholt drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base); 100d5b1a78aSEric Anholt vc4->overflow_mem = bo; 101d5b1a78aSEric Anholt 102d5b1a78aSEric Anholt V3D_WRITE(V3D_BPOA, bo->base.paddr); 103d5b1a78aSEric Anholt V3D_WRITE(V3D_BPOS, bo->base.base.size); 104d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM); 105d5b1a78aSEric Anholt V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM); 106d5b1a78aSEric Anholt } 107d5b1a78aSEric Anholt 108d5b1a78aSEric Anholt static void 109ca26d28bSVarad Gautam vc4_irq_finish_bin_job(struct drm_device *dev) 110d5b1a78aSEric Anholt { 111d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 112ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_bin_job(vc4); 113ca26d28bSVarad Gautam 114ca26d28bSVarad Gautam if (!exec) 115ca26d28bSVarad Gautam return; 116ca26d28bSVarad Gautam 117ca26d28bSVarad Gautam vc4_move_job_to_render(dev, exec); 118ca26d28bSVarad Gautam vc4_submit_next_bin_job(dev); 119ca26d28bSVarad Gautam } 120ca26d28bSVarad Gautam 121ca26d28bSVarad Gautam static void 122ca26d28bSVarad Gautam vc4_cancel_bin_job(struct drm_device *dev) 123ca26d28bSVarad Gautam { 124ca26d28bSVarad Gautam struct vc4_dev *vc4 = to_vc4_dev(dev); 125ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_bin_job(vc4); 126ca26d28bSVarad Gautam 127ca26d28bSVarad Gautam if (!exec) 128ca26d28bSVarad Gautam return; 129ca26d28bSVarad Gautam 130ca26d28bSVarad Gautam list_move_tail(&exec->head, &vc4->bin_job_list); 131ca26d28bSVarad Gautam vc4_submit_next_bin_job(dev); 132ca26d28bSVarad Gautam } 133ca26d28bSVarad Gautam 134ca26d28bSVarad Gautam static void 135ca26d28bSVarad Gautam vc4_irq_finish_render_job(struct drm_device *dev) 136ca26d28bSVarad Gautam { 137ca26d28bSVarad Gautam struct vc4_dev *vc4 = to_vc4_dev(dev); 138ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_render_job(vc4); 139d5b1a78aSEric Anholt 140d5b1a78aSEric Anholt if (!exec) 141d5b1a78aSEric Anholt return; 142d5b1a78aSEric Anholt 143d5b1a78aSEric Anholt vc4->finished_seqno++; 144d5b1a78aSEric Anholt list_move_tail(&exec->head, &vc4->job_done_list); 145cdec4d36SEric Anholt if (exec->fence) { 146cdec4d36SEric Anholt dma_fence_signal_locked(exec->fence); 147cdec4d36SEric Anholt exec->fence = NULL; 148cdec4d36SEric Anholt } 149ca26d28bSVarad Gautam vc4_submit_next_render_job(dev); 150d5b1a78aSEric Anholt 151d5b1a78aSEric Anholt wake_up_all(&vc4->job_wait_queue); 152d5b1a78aSEric Anholt schedule_work(&vc4->job_done_work); 153d5b1a78aSEric Anholt } 154d5b1a78aSEric Anholt 155d5b1a78aSEric Anholt irqreturn_t 156d5b1a78aSEric Anholt vc4_irq(int irq, void *arg) 157d5b1a78aSEric Anholt { 158d5b1a78aSEric Anholt struct drm_device *dev = arg; 159d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 160d5b1a78aSEric Anholt uint32_t intctl; 161d5b1a78aSEric Anholt irqreturn_t status = IRQ_NONE; 162d5b1a78aSEric Anholt 163d5b1a78aSEric Anholt barrier(); 164d5b1a78aSEric Anholt intctl = V3D_READ(V3D_INTCTL); 165d5b1a78aSEric Anholt 166ca26d28bSVarad Gautam /* Acknowledge the interrupts we're handling here. The binner 167ca26d28bSVarad Gautam * last flush / render frame done interrupt will be cleared, 168ca26d28bSVarad Gautam * while OUTOMEM will stay high until the underlying cause is 169ca26d28bSVarad Gautam * cleared. 170d5b1a78aSEric Anholt */ 171d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, intctl); 172d5b1a78aSEric Anholt 173d5b1a78aSEric Anholt if (intctl & V3D_INT_OUTOMEM) { 174d5b1a78aSEric Anholt /* Disable OUTOMEM until the work is done. */ 175d5b1a78aSEric Anholt V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM); 176d5b1a78aSEric Anholt schedule_work(&vc4->overflow_mem_work); 177d5b1a78aSEric Anholt status = IRQ_HANDLED; 178d5b1a78aSEric Anholt } 179d5b1a78aSEric Anholt 180ca26d28bSVarad Gautam if (intctl & V3D_INT_FLDONE) { 181ca26d28bSVarad Gautam spin_lock(&vc4->job_lock); 182ca26d28bSVarad Gautam vc4_irq_finish_bin_job(dev); 183ca26d28bSVarad Gautam spin_unlock(&vc4->job_lock); 184ca26d28bSVarad Gautam status = IRQ_HANDLED; 185ca26d28bSVarad Gautam } 186ca26d28bSVarad Gautam 187d5b1a78aSEric Anholt if (intctl & V3D_INT_FRDONE) { 188d5b1a78aSEric Anholt spin_lock(&vc4->job_lock); 189ca26d28bSVarad Gautam vc4_irq_finish_render_job(dev); 190d5b1a78aSEric Anholt spin_unlock(&vc4->job_lock); 191d5b1a78aSEric Anholt status = IRQ_HANDLED; 192d5b1a78aSEric Anholt } 193d5b1a78aSEric Anholt 194d5b1a78aSEric Anholt return status; 195d5b1a78aSEric Anholt } 196d5b1a78aSEric Anholt 197d5b1a78aSEric Anholt void 198d5b1a78aSEric Anholt vc4_irq_preinstall(struct drm_device *dev) 199d5b1a78aSEric Anholt { 200d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 201d5b1a78aSEric Anholt 202d5b1a78aSEric Anholt init_waitqueue_head(&vc4->job_wait_queue); 203d5b1a78aSEric Anholt INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work); 204d5b1a78aSEric Anholt 205d5b1a78aSEric Anholt /* Clear any pending interrupts someone might have left around 206d5b1a78aSEric Anholt * for us. 207d5b1a78aSEric Anholt */ 208d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 209d5b1a78aSEric Anholt } 210d5b1a78aSEric Anholt 211d5b1a78aSEric Anholt int 212d5b1a78aSEric Anholt vc4_irq_postinstall(struct drm_device *dev) 213d5b1a78aSEric Anholt { 214d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 215d5b1a78aSEric Anholt 216d5b1a78aSEric Anholt /* Enable both the render done and out of memory interrupts. */ 217d5b1a78aSEric Anholt V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); 218d5b1a78aSEric Anholt 219d5b1a78aSEric Anholt return 0; 220d5b1a78aSEric Anholt } 221d5b1a78aSEric Anholt 222d5b1a78aSEric Anholt void 223d5b1a78aSEric Anholt vc4_irq_uninstall(struct drm_device *dev) 224d5b1a78aSEric Anholt { 225d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 226d5b1a78aSEric Anholt 227d5b1a78aSEric Anholt /* Disable sending interrupts for our driver's IRQs. */ 228d5b1a78aSEric Anholt V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS); 229d5b1a78aSEric Anholt 230d5b1a78aSEric Anholt /* Clear any pending interrupts we might have left. */ 231d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 232d5b1a78aSEric Anholt 233d5b1a78aSEric Anholt cancel_work_sync(&vc4->overflow_mem_work); 234d5b1a78aSEric Anholt } 235d5b1a78aSEric Anholt 236d5b1a78aSEric Anholt /** Reinitializes interrupt registers when a GPU reset is performed. */ 237d5b1a78aSEric Anholt void vc4_irq_reset(struct drm_device *dev) 238d5b1a78aSEric Anholt { 239d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 240d5b1a78aSEric Anholt unsigned long irqflags; 241d5b1a78aSEric Anholt 242d5b1a78aSEric Anholt /* Acknowledge any stale IRQs. */ 243d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 244d5b1a78aSEric Anholt 245d5b1a78aSEric Anholt /* 246d5b1a78aSEric Anholt * Turn all our interrupts on. Binner out of memory is the 247d5b1a78aSEric Anholt * only one we expect to trigger at this point, since we've 248d5b1a78aSEric Anholt * just come from poweron and haven't supplied any overflow 249d5b1a78aSEric Anholt * memory yet. 250d5b1a78aSEric Anholt */ 251d5b1a78aSEric Anholt V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); 252d5b1a78aSEric Anholt 253d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags); 254ca26d28bSVarad Gautam vc4_cancel_bin_job(dev); 255ca26d28bSVarad Gautam vc4_irq_finish_render_job(dev); 256d5b1a78aSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags); 257d5b1a78aSEric Anholt } 258