1d5b1a78aSEric Anholt /* 2d5b1a78aSEric Anholt * Copyright © 2014 Broadcom 3d5b1a78aSEric Anholt * 4d5b1a78aSEric Anholt * Permission is hereby granted, free of charge, to any person obtaining a 5d5b1a78aSEric Anholt * copy of this software and associated documentation files (the "Software"), 6d5b1a78aSEric Anholt * to deal in the Software without restriction, including without limitation 7d5b1a78aSEric Anholt * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8d5b1a78aSEric Anholt * and/or sell copies of the Software, and to permit persons to whom the 9d5b1a78aSEric Anholt * Software is furnished to do so, subject to the following conditions: 10d5b1a78aSEric Anholt * 11d5b1a78aSEric Anholt * The above copyright notice and this permission notice (including the next 12d5b1a78aSEric Anholt * paragraph) shall be included in all copies or substantial portions of the 13d5b1a78aSEric Anholt * Software. 14d5b1a78aSEric Anholt * 15d5b1a78aSEric Anholt * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16d5b1a78aSEric Anholt * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17d5b1a78aSEric Anholt * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18d5b1a78aSEric Anholt * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19d5b1a78aSEric Anholt * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20d5b1a78aSEric Anholt * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21d5b1a78aSEric Anholt * IN THE SOFTWARE. 22d5b1a78aSEric Anholt */ 23d5b1a78aSEric Anholt 2472f793f1SEric Anholt /** 2572f793f1SEric Anholt * DOC: Interrupt management for the V3D engine 26d5b1a78aSEric Anholt * 27d5b1a78aSEric Anholt * We have an interrupt status register (V3D_INTCTL) which reports 28d5b1a78aSEric Anholt * interrupts, and where writing 1 bits clears those interrupts. 29d5b1a78aSEric Anholt * There are also a pair of interrupt registers 30d5b1a78aSEric Anholt * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or 31d5b1a78aSEric Anholt * disables that specific interrupt, and 0s written are ignored 32d5b1a78aSEric Anholt * (reading either one returns the set of enabled interrupts). 33d5b1a78aSEric Anholt * 34ca26d28bSVarad Gautam * When we take a binning flush done interrupt, we need to submit the 35ca26d28bSVarad Gautam * next frame for binning and move the finished frame to the render 36ca26d28bSVarad Gautam * thread. 37ca26d28bSVarad Gautam * 38d5b1a78aSEric Anholt * When we take a render frame interrupt, we need to wake the 39d5b1a78aSEric Anholt * processes waiting for some frame to be done, and get the next frame 40d5b1a78aSEric Anholt * submitted ASAP (so the hardware doesn't sit idle when there's work 41d5b1a78aSEric Anholt * to do). 42d5b1a78aSEric Anholt * 43d5b1a78aSEric Anholt * When we take the binner out of memory interrupt, we need to 44d5b1a78aSEric Anholt * allocate some new memory and pass it to the binner so that the 45d5b1a78aSEric Anholt * current job can make progress. 46d5b1a78aSEric Anholt */ 47d5b1a78aSEric Anholt 48*5226711eSThomas Zimmermann #include <linux/platform_device.h> 49*5226711eSThomas Zimmermann 50*5226711eSThomas Zimmermann #include <drm/drm_drv.h> 51*5226711eSThomas Zimmermann 52d5b1a78aSEric Anholt #include "vc4_drv.h" 53d5b1a78aSEric Anholt #include "vc4_regs.h" 54d5b1a78aSEric Anholt 55d5b1a78aSEric Anholt #define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \ 56ca26d28bSVarad Gautam V3D_INT_FLDONE | \ 57d5b1a78aSEric Anholt V3D_INT_FRDONE) 58d5b1a78aSEric Anholt 59d5b1a78aSEric Anholt DECLARE_WAIT_QUEUE_HEAD(render_wait); 60d5b1a78aSEric Anholt 61d5b1a78aSEric Anholt static void 62d5b1a78aSEric Anholt vc4_overflow_mem_work(struct work_struct *work) 63d5b1a78aSEric Anholt { 64d5b1a78aSEric Anholt struct vc4_dev *vc4 = 65d5b1a78aSEric Anholt container_of(work, struct vc4_dev, overflow_mem_work); 6635c8b4b2SPaul Kocialkowski struct vc4_bo *bo; 67553c942fSEric Anholt int bin_bo_slot; 68553c942fSEric Anholt struct vc4_exec_info *exec; 69553c942fSEric Anholt unsigned long irqflags; 70d5b1a78aSEric Anholt 7135c8b4b2SPaul Kocialkowski mutex_lock(&vc4->bin_bo_lock); 7235c8b4b2SPaul Kocialkowski 7335c8b4b2SPaul Kocialkowski if (!vc4->bin_bo) 7435c8b4b2SPaul Kocialkowski goto complete; 7535c8b4b2SPaul Kocialkowski 7635c8b4b2SPaul Kocialkowski bo = vc4->bin_bo; 77e43fe02fSPaul Kocialkowski 78553c942fSEric Anholt bin_bo_slot = vc4_v3d_get_bin_slot(vc4); 79553c942fSEric Anholt if (bin_bo_slot < 0) { 80d5b1a78aSEric Anholt DRM_ERROR("Couldn't allocate binner overflow mem\n"); 8135c8b4b2SPaul Kocialkowski goto complete; 82d5b1a78aSEric Anholt } 83d5b1a78aSEric Anholt 84d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags); 85d5b1a78aSEric Anholt 86553c942fSEric Anholt if (vc4->bin_alloc_overflow) { 87553c942fSEric Anholt /* If we had overflow memory allocated previously, 88553c942fSEric Anholt * then that chunk will free when the current bin job 89553c942fSEric Anholt * is done. If we don't have a bin job running, then 90553c942fSEric Anholt * the chunk will be done whenever the list of render 91553c942fSEric Anholt * jobs has drained. 92553c942fSEric Anholt */ 93553c942fSEric Anholt exec = vc4_first_bin_job(vc4); 94553c942fSEric Anholt if (!exec) 95553c942fSEric Anholt exec = vc4_last_render_job(vc4); 96553c942fSEric Anholt if (exec) { 97553c942fSEric Anholt exec->bin_slots |= vc4->bin_alloc_overflow; 98553c942fSEric Anholt } else { 99553c942fSEric Anholt /* There's nothing queued in the hardware, so 100553c942fSEric Anholt * the old slot is free immediately. 101553c942fSEric Anholt */ 102553c942fSEric Anholt vc4->bin_alloc_used &= ~vc4->bin_alloc_overflow; 103553c942fSEric Anholt } 104553c942fSEric Anholt } 105553c942fSEric Anholt vc4->bin_alloc_overflow = BIT(bin_bo_slot); 106d5b1a78aSEric Anholt 107553c942fSEric Anholt V3D_WRITE(V3D_BPOA, bo->base.paddr + bin_bo_slot * vc4->bin_alloc_size); 108d5b1a78aSEric Anholt V3D_WRITE(V3D_BPOS, bo->base.base.size); 109d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM); 110d5b1a78aSEric Anholt V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM); 111553c942fSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags); 11235c8b4b2SPaul Kocialkowski 11335c8b4b2SPaul Kocialkowski complete: 11435c8b4b2SPaul Kocialkowski mutex_unlock(&vc4->bin_bo_lock); 115d5b1a78aSEric Anholt } 116d5b1a78aSEric Anholt 117d5b1a78aSEric Anholt static void 118ca26d28bSVarad Gautam vc4_irq_finish_bin_job(struct drm_device *dev) 119d5b1a78aSEric Anholt { 120d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 12165101d8cSBoris Brezillon struct vc4_exec_info *next, *exec = vc4_first_bin_job(vc4); 122ca26d28bSVarad Gautam 123ca26d28bSVarad Gautam if (!exec) 124ca26d28bSVarad Gautam return; 125ca26d28bSVarad Gautam 126ca26d28bSVarad Gautam vc4_move_job_to_render(dev, exec); 12765101d8cSBoris Brezillon next = vc4_first_bin_job(vc4); 12865101d8cSBoris Brezillon 12965101d8cSBoris Brezillon /* Only submit the next job in the bin list if it matches the perfmon 13065101d8cSBoris Brezillon * attached to the one that just finished (or if both jobs don't have 13165101d8cSBoris Brezillon * perfmon attached to them). 13265101d8cSBoris Brezillon */ 13365101d8cSBoris Brezillon if (next && next->perfmon == exec->perfmon) 134ca26d28bSVarad Gautam vc4_submit_next_bin_job(dev); 135ca26d28bSVarad Gautam } 136ca26d28bSVarad Gautam 137ca26d28bSVarad Gautam static void 138ca26d28bSVarad Gautam vc4_cancel_bin_job(struct drm_device *dev) 139ca26d28bSVarad Gautam { 140ca26d28bSVarad Gautam struct vc4_dev *vc4 = to_vc4_dev(dev); 141ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_bin_job(vc4); 142ca26d28bSVarad Gautam 143ca26d28bSVarad Gautam if (!exec) 144ca26d28bSVarad Gautam return; 145ca26d28bSVarad Gautam 14665101d8cSBoris Brezillon /* Stop the perfmon so that the next bin job can be started. */ 14765101d8cSBoris Brezillon if (exec->perfmon) 14865101d8cSBoris Brezillon vc4_perfmon_stop(vc4, exec->perfmon, false); 14965101d8cSBoris Brezillon 150ca26d28bSVarad Gautam list_move_tail(&exec->head, &vc4->bin_job_list); 151ca26d28bSVarad Gautam vc4_submit_next_bin_job(dev); 152ca26d28bSVarad Gautam } 153ca26d28bSVarad Gautam 154ca26d28bSVarad Gautam static void 155ca26d28bSVarad Gautam vc4_irq_finish_render_job(struct drm_device *dev) 156ca26d28bSVarad Gautam { 157ca26d28bSVarad Gautam struct vc4_dev *vc4 = to_vc4_dev(dev); 158ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_render_job(vc4); 15965101d8cSBoris Brezillon struct vc4_exec_info *nextbin, *nextrender; 160d5b1a78aSEric Anholt 161d5b1a78aSEric Anholt if (!exec) 162d5b1a78aSEric Anholt return; 163d5b1a78aSEric Anholt 164d5b1a78aSEric Anholt vc4->finished_seqno++; 165d5b1a78aSEric Anholt list_move_tail(&exec->head, &vc4->job_done_list); 16665101d8cSBoris Brezillon 16765101d8cSBoris Brezillon nextbin = vc4_first_bin_job(vc4); 16865101d8cSBoris Brezillon nextrender = vc4_first_render_job(vc4); 16965101d8cSBoris Brezillon 17065101d8cSBoris Brezillon /* Only stop the perfmon if following jobs in the queue don't expect it 17165101d8cSBoris Brezillon * to be enabled. 17265101d8cSBoris Brezillon */ 17365101d8cSBoris Brezillon if (exec->perfmon && !nextrender && 17465101d8cSBoris Brezillon (!nextbin || nextbin->perfmon != exec->perfmon)) 17565101d8cSBoris Brezillon vc4_perfmon_stop(vc4, exec->perfmon, true); 17665101d8cSBoris Brezillon 17765101d8cSBoris Brezillon /* If there's a render job waiting, start it. If this is not the case 17865101d8cSBoris Brezillon * we may have to unblock the binner if it's been stalled because of 17965101d8cSBoris Brezillon * perfmon (this can be checked by comparing the perfmon attached to 18065101d8cSBoris Brezillon * the finished renderjob to the one attached to the next bin job: if 18165101d8cSBoris Brezillon * they don't match, this means the binner is stalled and should be 18265101d8cSBoris Brezillon * restarted). 18365101d8cSBoris Brezillon */ 18465101d8cSBoris Brezillon if (nextrender) 18565101d8cSBoris Brezillon vc4_submit_next_render_job(dev); 18665101d8cSBoris Brezillon else if (nextbin && nextbin->perfmon != exec->perfmon) 18765101d8cSBoris Brezillon vc4_submit_next_bin_job(dev); 18865101d8cSBoris Brezillon 189cdec4d36SEric Anholt if (exec->fence) { 190cdec4d36SEric Anholt dma_fence_signal_locked(exec->fence); 191babc8110SStefan Schake dma_fence_put(exec->fence); 192cdec4d36SEric Anholt exec->fence = NULL; 193cdec4d36SEric Anholt } 194d5b1a78aSEric Anholt 195d5b1a78aSEric Anholt wake_up_all(&vc4->job_wait_queue); 196d5b1a78aSEric Anholt schedule_work(&vc4->job_done_work); 197d5b1a78aSEric Anholt } 198d5b1a78aSEric Anholt 199*5226711eSThomas Zimmermann static irqreturn_t 200d5b1a78aSEric Anholt vc4_irq(int irq, void *arg) 201d5b1a78aSEric Anholt { 202d5b1a78aSEric Anholt struct drm_device *dev = arg; 203d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 204d5b1a78aSEric Anholt uint32_t intctl; 205d5b1a78aSEric Anholt irqreturn_t status = IRQ_NONE; 206d5b1a78aSEric Anholt 207d5b1a78aSEric Anholt barrier(); 208d5b1a78aSEric Anholt intctl = V3D_READ(V3D_INTCTL); 209d5b1a78aSEric Anholt 210ca26d28bSVarad Gautam /* Acknowledge the interrupts we're handling here. The binner 211ca26d28bSVarad Gautam * last flush / render frame done interrupt will be cleared, 212ca26d28bSVarad Gautam * while OUTOMEM will stay high until the underlying cause is 213ca26d28bSVarad Gautam * cleared. 214d5b1a78aSEric Anholt */ 215d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, intctl); 216d5b1a78aSEric Anholt 217d5b1a78aSEric Anholt if (intctl & V3D_INT_OUTOMEM) { 218d5b1a78aSEric Anholt /* Disable OUTOMEM until the work is done. */ 219d5b1a78aSEric Anholt V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM); 220d5b1a78aSEric Anholt schedule_work(&vc4->overflow_mem_work); 221d5b1a78aSEric Anholt status = IRQ_HANDLED; 222d5b1a78aSEric Anholt } 223d5b1a78aSEric Anholt 224ca26d28bSVarad Gautam if (intctl & V3D_INT_FLDONE) { 225ca26d28bSVarad Gautam spin_lock(&vc4->job_lock); 226ca26d28bSVarad Gautam vc4_irq_finish_bin_job(dev); 227ca26d28bSVarad Gautam spin_unlock(&vc4->job_lock); 228ca26d28bSVarad Gautam status = IRQ_HANDLED; 229ca26d28bSVarad Gautam } 230ca26d28bSVarad Gautam 231d5b1a78aSEric Anholt if (intctl & V3D_INT_FRDONE) { 232d5b1a78aSEric Anholt spin_lock(&vc4->job_lock); 233ca26d28bSVarad Gautam vc4_irq_finish_render_job(dev); 234d5b1a78aSEric Anholt spin_unlock(&vc4->job_lock); 235d5b1a78aSEric Anholt status = IRQ_HANDLED; 236d5b1a78aSEric Anholt } 237d5b1a78aSEric Anholt 238d5b1a78aSEric Anholt return status; 239d5b1a78aSEric Anholt } 240d5b1a78aSEric Anholt 241*5226711eSThomas Zimmermann static void 242*5226711eSThomas Zimmermann vc4_irq_prepare(struct drm_device *dev) 243d5b1a78aSEric Anholt { 244d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 245d5b1a78aSEric Anholt 246ffc26740SEric Anholt if (!vc4->v3d) 247ffc26740SEric Anholt return; 248ffc26740SEric Anholt 249d5b1a78aSEric Anholt init_waitqueue_head(&vc4->job_wait_queue); 250d5b1a78aSEric Anholt INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work); 251d5b1a78aSEric Anholt 252d5b1a78aSEric Anholt /* Clear any pending interrupts someone might have left around 253d5b1a78aSEric Anholt * for us. 254d5b1a78aSEric Anholt */ 255d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 256d5b1a78aSEric Anholt } 257d5b1a78aSEric Anholt 258*5226711eSThomas Zimmermann void 259*5226711eSThomas Zimmermann vc4_irq_enable(struct drm_device *dev) 260d5b1a78aSEric Anholt { 261d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 262d5b1a78aSEric Anholt 263ffc26740SEric Anholt if (!vc4->v3d) 264*5226711eSThomas Zimmermann return; 265ffc26740SEric Anholt 26635c8b4b2SPaul Kocialkowski /* Enable the render done interrupts. The out-of-memory interrupt is 26735c8b4b2SPaul Kocialkowski * enabled as soon as we have a binner BO allocated. 26835c8b4b2SPaul Kocialkowski */ 26935c8b4b2SPaul Kocialkowski V3D_WRITE(V3D_INTENA, V3D_INT_FLDONE | V3D_INT_FRDONE); 270d5b1a78aSEric Anholt } 271d5b1a78aSEric Anholt 272d5b1a78aSEric Anholt void 273*5226711eSThomas Zimmermann vc4_irq_disable(struct drm_device *dev) 274d5b1a78aSEric Anholt { 275d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 276d5b1a78aSEric Anholt 277ffc26740SEric Anholt if (!vc4->v3d) 278ffc26740SEric Anholt return; 279ffc26740SEric Anholt 280d5b1a78aSEric Anholt /* Disable sending interrupts for our driver's IRQs. */ 281d5b1a78aSEric Anholt V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS); 282d5b1a78aSEric Anholt 283d5b1a78aSEric Anholt /* Clear any pending interrupts we might have left. */ 284d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 285d5b1a78aSEric Anholt 286253696ccSStefan Schake /* Finish any interrupt handler still in flight. */ 287*5226711eSThomas Zimmermann disable_irq(vc4->irq); 288253696ccSStefan Schake 289d5b1a78aSEric Anholt cancel_work_sync(&vc4->overflow_mem_work); 290d5b1a78aSEric Anholt } 291d5b1a78aSEric Anholt 292*5226711eSThomas Zimmermann int vc4_irq_install(struct drm_device *dev, int irq) 293*5226711eSThomas Zimmermann { 294*5226711eSThomas Zimmermann int ret; 295*5226711eSThomas Zimmermann 296*5226711eSThomas Zimmermann if (irq == IRQ_NOTCONNECTED) 297*5226711eSThomas Zimmermann return -ENOTCONN; 298*5226711eSThomas Zimmermann 299*5226711eSThomas Zimmermann vc4_irq_prepare(dev); 300*5226711eSThomas Zimmermann 301*5226711eSThomas Zimmermann ret = request_irq(irq, vc4_irq, 0, dev->driver->name, dev); 302*5226711eSThomas Zimmermann if (ret) 303*5226711eSThomas Zimmermann return ret; 304*5226711eSThomas Zimmermann 305*5226711eSThomas Zimmermann vc4_irq_enable(dev); 306*5226711eSThomas Zimmermann 307*5226711eSThomas Zimmermann return 0; 308*5226711eSThomas Zimmermann } 309*5226711eSThomas Zimmermann 310*5226711eSThomas Zimmermann void vc4_irq_uninstall(struct drm_device *dev) 311*5226711eSThomas Zimmermann { 312*5226711eSThomas Zimmermann struct vc4_dev *vc4 = to_vc4_dev(dev); 313*5226711eSThomas Zimmermann 314*5226711eSThomas Zimmermann vc4_irq_disable(dev); 315*5226711eSThomas Zimmermann free_irq(vc4->irq, dev); 316*5226711eSThomas Zimmermann } 317*5226711eSThomas Zimmermann 318d5b1a78aSEric Anholt /** Reinitializes interrupt registers when a GPU reset is performed. */ 319d5b1a78aSEric Anholt void vc4_irq_reset(struct drm_device *dev) 320d5b1a78aSEric Anholt { 321d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 322d5b1a78aSEric Anholt unsigned long irqflags; 323d5b1a78aSEric Anholt 324d5b1a78aSEric Anholt /* Acknowledge any stale IRQs. */ 325d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 326d5b1a78aSEric Anholt 327d5b1a78aSEric Anholt /* 328d5b1a78aSEric Anholt * Turn all our interrupts on. Binner out of memory is the 329d5b1a78aSEric Anholt * only one we expect to trigger at this point, since we've 330d5b1a78aSEric Anholt * just come from poweron and haven't supplied any overflow 331d5b1a78aSEric Anholt * memory yet. 332d5b1a78aSEric Anholt */ 333d5b1a78aSEric Anholt V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); 334d5b1a78aSEric Anholt 335d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags); 336ca26d28bSVarad Gautam vc4_cancel_bin_job(dev); 337ca26d28bSVarad Gautam vc4_irq_finish_render_job(dev); 338d5b1a78aSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags); 339d5b1a78aSEric Anholt } 340