1d5b1a78aSEric Anholt /* 2d5b1a78aSEric Anholt * Copyright © 2014 Broadcom 3d5b1a78aSEric Anholt * 4d5b1a78aSEric Anholt * Permission is hereby granted, free of charge, to any person obtaining a 5d5b1a78aSEric Anholt * copy of this software and associated documentation files (the "Software"), 6d5b1a78aSEric Anholt * to deal in the Software without restriction, including without limitation 7d5b1a78aSEric Anholt * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8d5b1a78aSEric Anholt * and/or sell copies of the Software, and to permit persons to whom the 9d5b1a78aSEric Anholt * Software is furnished to do so, subject to the following conditions: 10d5b1a78aSEric Anholt * 11d5b1a78aSEric Anholt * The above copyright notice and this permission notice (including the next 12d5b1a78aSEric Anholt * paragraph) shall be included in all copies or substantial portions of the 13d5b1a78aSEric Anholt * Software. 14d5b1a78aSEric Anholt * 15d5b1a78aSEric Anholt * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16d5b1a78aSEric Anholt * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17d5b1a78aSEric Anholt * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18d5b1a78aSEric Anholt * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19d5b1a78aSEric Anholt * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20d5b1a78aSEric Anholt * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21d5b1a78aSEric Anholt * IN THE SOFTWARE. 22d5b1a78aSEric Anholt */ 23d5b1a78aSEric Anholt 2472f793f1SEric Anholt /** 2572f793f1SEric Anholt * DOC: Interrupt management for the V3D engine 26d5b1a78aSEric Anholt * 27d5b1a78aSEric Anholt * We have an interrupt status register (V3D_INTCTL) which reports 28d5b1a78aSEric Anholt * interrupts, and where writing 1 bits clears those interrupts. 29d5b1a78aSEric Anholt * There are also a pair of interrupt registers 30d5b1a78aSEric Anholt * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or 31d5b1a78aSEric Anholt * disables that specific interrupt, and 0s written are ignored 32d5b1a78aSEric Anholt * (reading either one returns the set of enabled interrupts). 33d5b1a78aSEric Anholt * 34ca26d28bSVarad Gautam * When we take a binning flush done interrupt, we need to submit the 35ca26d28bSVarad Gautam * next frame for binning and move the finished frame to the render 36ca26d28bSVarad Gautam * thread. 37ca26d28bSVarad Gautam * 38d5b1a78aSEric Anholt * When we take a render frame interrupt, we need to wake the 39d5b1a78aSEric Anholt * processes waiting for some frame to be done, and get the next frame 40d5b1a78aSEric Anholt * submitted ASAP (so the hardware doesn't sit idle when there's work 41d5b1a78aSEric Anholt * to do). 42d5b1a78aSEric Anholt * 43d5b1a78aSEric Anholt * When we take the binner out of memory interrupt, we need to 44d5b1a78aSEric Anholt * allocate some new memory and pass it to the binner so that the 45d5b1a78aSEric Anholt * current job can make progress. 46d5b1a78aSEric Anholt */ 47d5b1a78aSEric Anholt 485226711eSThomas Zimmermann #include <linux/platform_device.h> 495226711eSThomas Zimmermann 505226711eSThomas Zimmermann #include <drm/drm_drv.h> 515226711eSThomas Zimmermann 52d5b1a78aSEric Anholt #include "vc4_drv.h" 53d5b1a78aSEric Anholt #include "vc4_regs.h" 54044feb97SMelissa Wen #include "vc4_trace.h" 55d5b1a78aSEric Anholt 56d5b1a78aSEric Anholt #define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \ 57ca26d28bSVarad Gautam V3D_INT_FLDONE | \ 58d5b1a78aSEric Anholt V3D_INT_FRDONE) 59d5b1a78aSEric Anholt 60d5b1a78aSEric Anholt DECLARE_WAIT_QUEUE_HEAD(render_wait); 61d5b1a78aSEric Anholt 62d5b1a78aSEric Anholt static void 63d5b1a78aSEric Anholt vc4_overflow_mem_work(struct work_struct *work) 64d5b1a78aSEric Anholt { 65d5b1a78aSEric Anholt struct vc4_dev *vc4 = 66d5b1a78aSEric Anholt container_of(work, struct vc4_dev, overflow_mem_work); 6735c8b4b2SPaul Kocialkowski struct vc4_bo *bo; 68553c942fSEric Anholt int bin_bo_slot; 69553c942fSEric Anholt struct vc4_exec_info *exec; 70553c942fSEric Anholt unsigned long irqflags; 71d5b1a78aSEric Anholt 7235c8b4b2SPaul Kocialkowski mutex_lock(&vc4->bin_bo_lock); 7335c8b4b2SPaul Kocialkowski 7435c8b4b2SPaul Kocialkowski if (!vc4->bin_bo) 7535c8b4b2SPaul Kocialkowski goto complete; 7635c8b4b2SPaul Kocialkowski 7735c8b4b2SPaul Kocialkowski bo = vc4->bin_bo; 78e43fe02fSPaul Kocialkowski 79553c942fSEric Anholt bin_bo_slot = vc4_v3d_get_bin_slot(vc4); 80553c942fSEric Anholt if (bin_bo_slot < 0) { 81d5b1a78aSEric Anholt DRM_ERROR("Couldn't allocate binner overflow mem\n"); 8235c8b4b2SPaul Kocialkowski goto complete; 83d5b1a78aSEric Anholt } 84d5b1a78aSEric Anholt 85d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags); 86d5b1a78aSEric Anholt 87553c942fSEric Anholt if (vc4->bin_alloc_overflow) { 88553c942fSEric Anholt /* If we had overflow memory allocated previously, 89553c942fSEric Anholt * then that chunk will free when the current bin job 90553c942fSEric Anholt * is done. If we don't have a bin job running, then 91553c942fSEric Anholt * the chunk will be done whenever the list of render 92553c942fSEric Anholt * jobs has drained. 93553c942fSEric Anholt */ 94553c942fSEric Anholt exec = vc4_first_bin_job(vc4); 95553c942fSEric Anholt if (!exec) 96553c942fSEric Anholt exec = vc4_last_render_job(vc4); 97553c942fSEric Anholt if (exec) { 98553c942fSEric Anholt exec->bin_slots |= vc4->bin_alloc_overflow; 99553c942fSEric Anholt } else { 100553c942fSEric Anholt /* There's nothing queued in the hardware, so 101553c942fSEric Anholt * the old slot is free immediately. 102553c942fSEric Anholt */ 103553c942fSEric Anholt vc4->bin_alloc_used &= ~vc4->bin_alloc_overflow; 104553c942fSEric Anholt } 105553c942fSEric Anholt } 106553c942fSEric Anholt vc4->bin_alloc_overflow = BIT(bin_bo_slot); 107d5b1a78aSEric Anholt 108553c942fSEric Anholt V3D_WRITE(V3D_BPOA, bo->base.paddr + bin_bo_slot * vc4->bin_alloc_size); 109d5b1a78aSEric Anholt V3D_WRITE(V3D_BPOS, bo->base.base.size); 110d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM); 111d5b1a78aSEric Anholt V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM); 112553c942fSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags); 11335c8b4b2SPaul Kocialkowski 11435c8b4b2SPaul Kocialkowski complete: 11535c8b4b2SPaul Kocialkowski mutex_unlock(&vc4->bin_bo_lock); 116d5b1a78aSEric Anholt } 117d5b1a78aSEric Anholt 118d5b1a78aSEric Anholt static void 119ca26d28bSVarad Gautam vc4_irq_finish_bin_job(struct drm_device *dev) 120d5b1a78aSEric Anholt { 121d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 12265101d8cSBoris Brezillon struct vc4_exec_info *next, *exec = vc4_first_bin_job(vc4); 123ca26d28bSVarad Gautam 124ca26d28bSVarad Gautam if (!exec) 125ca26d28bSVarad Gautam return; 126ca26d28bSVarad Gautam 127044feb97SMelissa Wen trace_vc4_bcl_end_irq(dev, exec->seqno); 128044feb97SMelissa Wen 129ca26d28bSVarad Gautam vc4_move_job_to_render(dev, exec); 13065101d8cSBoris Brezillon next = vc4_first_bin_job(vc4); 13165101d8cSBoris Brezillon 13265101d8cSBoris Brezillon /* Only submit the next job in the bin list if it matches the perfmon 13365101d8cSBoris Brezillon * attached to the one that just finished (or if both jobs don't have 13465101d8cSBoris Brezillon * perfmon attached to them). 13565101d8cSBoris Brezillon */ 13665101d8cSBoris Brezillon if (next && next->perfmon == exec->perfmon) 137ca26d28bSVarad Gautam vc4_submit_next_bin_job(dev); 138ca26d28bSVarad Gautam } 139ca26d28bSVarad Gautam 140ca26d28bSVarad Gautam static void 141ca26d28bSVarad Gautam vc4_cancel_bin_job(struct drm_device *dev) 142ca26d28bSVarad Gautam { 143ca26d28bSVarad Gautam struct vc4_dev *vc4 = to_vc4_dev(dev); 144ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_bin_job(vc4); 145ca26d28bSVarad Gautam 146ca26d28bSVarad Gautam if (!exec) 147ca26d28bSVarad Gautam return; 148ca26d28bSVarad Gautam 14965101d8cSBoris Brezillon /* Stop the perfmon so that the next bin job can be started. */ 15065101d8cSBoris Brezillon if (exec->perfmon) 15165101d8cSBoris Brezillon vc4_perfmon_stop(vc4, exec->perfmon, false); 15265101d8cSBoris Brezillon 153ca26d28bSVarad Gautam list_move_tail(&exec->head, &vc4->bin_job_list); 154ca26d28bSVarad Gautam vc4_submit_next_bin_job(dev); 155ca26d28bSVarad Gautam } 156ca26d28bSVarad Gautam 157ca26d28bSVarad Gautam static void 158ca26d28bSVarad Gautam vc4_irq_finish_render_job(struct drm_device *dev) 159ca26d28bSVarad Gautam { 160ca26d28bSVarad Gautam struct vc4_dev *vc4 = to_vc4_dev(dev); 161ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_render_job(vc4); 16265101d8cSBoris Brezillon struct vc4_exec_info *nextbin, *nextrender; 163d5b1a78aSEric Anholt 164d5b1a78aSEric Anholt if (!exec) 165d5b1a78aSEric Anholt return; 166d5b1a78aSEric Anholt 167044feb97SMelissa Wen trace_vc4_rcl_end_irq(dev, exec->seqno); 168044feb97SMelissa Wen 169d5b1a78aSEric Anholt vc4->finished_seqno++; 170d5b1a78aSEric Anholt list_move_tail(&exec->head, &vc4->job_done_list); 17165101d8cSBoris Brezillon 17265101d8cSBoris Brezillon nextbin = vc4_first_bin_job(vc4); 17365101d8cSBoris Brezillon nextrender = vc4_first_render_job(vc4); 17465101d8cSBoris Brezillon 17565101d8cSBoris Brezillon /* Only stop the perfmon if following jobs in the queue don't expect it 17665101d8cSBoris Brezillon * to be enabled. 17765101d8cSBoris Brezillon */ 17865101d8cSBoris Brezillon if (exec->perfmon && !nextrender && 17965101d8cSBoris Brezillon (!nextbin || nextbin->perfmon != exec->perfmon)) 18065101d8cSBoris Brezillon vc4_perfmon_stop(vc4, exec->perfmon, true); 18165101d8cSBoris Brezillon 18265101d8cSBoris Brezillon /* If there's a render job waiting, start it. If this is not the case 18365101d8cSBoris Brezillon * we may have to unblock the binner if it's been stalled because of 18465101d8cSBoris Brezillon * perfmon (this can be checked by comparing the perfmon attached to 18565101d8cSBoris Brezillon * the finished renderjob to the one attached to the next bin job: if 18665101d8cSBoris Brezillon * they don't match, this means the binner is stalled and should be 18765101d8cSBoris Brezillon * restarted). 18865101d8cSBoris Brezillon */ 18965101d8cSBoris Brezillon if (nextrender) 19065101d8cSBoris Brezillon vc4_submit_next_render_job(dev); 19165101d8cSBoris Brezillon else if (nextbin && nextbin->perfmon != exec->perfmon) 19265101d8cSBoris Brezillon vc4_submit_next_bin_job(dev); 19365101d8cSBoris Brezillon 194cdec4d36SEric Anholt if (exec->fence) { 195cdec4d36SEric Anholt dma_fence_signal_locked(exec->fence); 196babc8110SStefan Schake dma_fence_put(exec->fence); 197cdec4d36SEric Anholt exec->fence = NULL; 198cdec4d36SEric Anholt } 199d5b1a78aSEric Anholt 200d5b1a78aSEric Anholt wake_up_all(&vc4->job_wait_queue); 201d5b1a78aSEric Anholt schedule_work(&vc4->job_done_work); 202d5b1a78aSEric Anholt } 203d5b1a78aSEric Anholt 2045226711eSThomas Zimmermann static irqreturn_t 205d5b1a78aSEric Anholt vc4_irq(int irq, void *arg) 206d5b1a78aSEric Anholt { 207d5b1a78aSEric Anholt struct drm_device *dev = arg; 208d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 209d5b1a78aSEric Anholt uint32_t intctl; 210d5b1a78aSEric Anholt irqreturn_t status = IRQ_NONE; 211d5b1a78aSEric Anholt 212d5b1a78aSEric Anholt barrier(); 213d5b1a78aSEric Anholt intctl = V3D_READ(V3D_INTCTL); 214d5b1a78aSEric Anholt 215ca26d28bSVarad Gautam /* Acknowledge the interrupts we're handling here. The binner 216ca26d28bSVarad Gautam * last flush / render frame done interrupt will be cleared, 217ca26d28bSVarad Gautam * while OUTOMEM will stay high until the underlying cause is 218ca26d28bSVarad Gautam * cleared. 219d5b1a78aSEric Anholt */ 220d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, intctl); 221d5b1a78aSEric Anholt 222d5b1a78aSEric Anholt if (intctl & V3D_INT_OUTOMEM) { 223d5b1a78aSEric Anholt /* Disable OUTOMEM until the work is done. */ 224d5b1a78aSEric Anholt V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM); 225d5b1a78aSEric Anholt schedule_work(&vc4->overflow_mem_work); 226d5b1a78aSEric Anholt status = IRQ_HANDLED; 227d5b1a78aSEric Anholt } 228d5b1a78aSEric Anholt 229ca26d28bSVarad Gautam if (intctl & V3D_INT_FLDONE) { 230ca26d28bSVarad Gautam spin_lock(&vc4->job_lock); 231ca26d28bSVarad Gautam vc4_irq_finish_bin_job(dev); 232ca26d28bSVarad Gautam spin_unlock(&vc4->job_lock); 233ca26d28bSVarad Gautam status = IRQ_HANDLED; 234ca26d28bSVarad Gautam } 235ca26d28bSVarad Gautam 236d5b1a78aSEric Anholt if (intctl & V3D_INT_FRDONE) { 237d5b1a78aSEric Anholt spin_lock(&vc4->job_lock); 238ca26d28bSVarad Gautam vc4_irq_finish_render_job(dev); 239d5b1a78aSEric Anholt spin_unlock(&vc4->job_lock); 240d5b1a78aSEric Anholt status = IRQ_HANDLED; 241d5b1a78aSEric Anholt } 242d5b1a78aSEric Anholt 243d5b1a78aSEric Anholt return status; 244d5b1a78aSEric Anholt } 245d5b1a78aSEric Anholt 2465226711eSThomas Zimmermann static void 2475226711eSThomas Zimmermann vc4_irq_prepare(struct drm_device *dev) 248d5b1a78aSEric Anholt { 249d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 250d5b1a78aSEric Anholt 251ffc26740SEric Anholt if (!vc4->v3d) 252ffc26740SEric Anholt return; 253ffc26740SEric Anholt 254d5b1a78aSEric Anholt init_waitqueue_head(&vc4->job_wait_queue); 255d5b1a78aSEric Anholt INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work); 256d5b1a78aSEric Anholt 257d5b1a78aSEric Anholt /* Clear any pending interrupts someone might have left around 258d5b1a78aSEric Anholt * for us. 259d5b1a78aSEric Anholt */ 260d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 261d5b1a78aSEric Anholt } 262d5b1a78aSEric Anholt 2635226711eSThomas Zimmermann void 2645226711eSThomas Zimmermann vc4_irq_enable(struct drm_device *dev) 265d5b1a78aSEric Anholt { 266d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 267d5b1a78aSEric Anholt 268*30f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5)) 269*30f8c74cSMaxime Ripard return; 270*30f8c74cSMaxime Ripard 271ffc26740SEric Anholt if (!vc4->v3d) 2725226711eSThomas Zimmermann return; 273ffc26740SEric Anholt 27435c8b4b2SPaul Kocialkowski /* Enable the render done interrupts. The out-of-memory interrupt is 27535c8b4b2SPaul Kocialkowski * enabled as soon as we have a binner BO allocated. 27635c8b4b2SPaul Kocialkowski */ 27735c8b4b2SPaul Kocialkowski V3D_WRITE(V3D_INTENA, V3D_INT_FLDONE | V3D_INT_FRDONE); 278d5b1a78aSEric Anholt } 279d5b1a78aSEric Anholt 280d5b1a78aSEric Anholt void 2815226711eSThomas Zimmermann vc4_irq_disable(struct drm_device *dev) 282d5b1a78aSEric Anholt { 283d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 284d5b1a78aSEric Anholt 285*30f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5)) 286*30f8c74cSMaxime Ripard return; 287*30f8c74cSMaxime Ripard 288ffc26740SEric Anholt if (!vc4->v3d) 289ffc26740SEric Anholt return; 290ffc26740SEric Anholt 291d5b1a78aSEric Anholt /* Disable sending interrupts for our driver's IRQs. */ 292d5b1a78aSEric Anholt V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS); 293d5b1a78aSEric Anholt 294d5b1a78aSEric Anholt /* Clear any pending interrupts we might have left. */ 295d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 296d5b1a78aSEric Anholt 297253696ccSStefan Schake /* Finish any interrupt handler still in flight. */ 2985226711eSThomas Zimmermann disable_irq(vc4->irq); 299253696ccSStefan Schake 300d5b1a78aSEric Anholt cancel_work_sync(&vc4->overflow_mem_work); 301d5b1a78aSEric Anholt } 302d5b1a78aSEric Anholt 3035226711eSThomas Zimmermann int vc4_irq_install(struct drm_device *dev, int irq) 3045226711eSThomas Zimmermann { 305*30f8c74cSMaxime Ripard struct vc4_dev *vc4 = to_vc4_dev(dev); 3065226711eSThomas Zimmermann int ret; 3075226711eSThomas Zimmermann 308*30f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5)) 309*30f8c74cSMaxime Ripard return -ENODEV; 310*30f8c74cSMaxime Ripard 3115226711eSThomas Zimmermann if (irq == IRQ_NOTCONNECTED) 3125226711eSThomas Zimmermann return -ENOTCONN; 3135226711eSThomas Zimmermann 3145226711eSThomas Zimmermann vc4_irq_prepare(dev); 3155226711eSThomas Zimmermann 3165226711eSThomas Zimmermann ret = request_irq(irq, vc4_irq, 0, dev->driver->name, dev); 3175226711eSThomas Zimmermann if (ret) 3185226711eSThomas Zimmermann return ret; 3195226711eSThomas Zimmermann 3205226711eSThomas Zimmermann vc4_irq_enable(dev); 3215226711eSThomas Zimmermann 3225226711eSThomas Zimmermann return 0; 3235226711eSThomas Zimmermann } 3245226711eSThomas Zimmermann 3255226711eSThomas Zimmermann void vc4_irq_uninstall(struct drm_device *dev) 3265226711eSThomas Zimmermann { 3275226711eSThomas Zimmermann struct vc4_dev *vc4 = to_vc4_dev(dev); 3285226711eSThomas Zimmermann 329*30f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5)) 330*30f8c74cSMaxime Ripard return; 331*30f8c74cSMaxime Ripard 3325226711eSThomas Zimmermann vc4_irq_disable(dev); 3335226711eSThomas Zimmermann free_irq(vc4->irq, dev); 3345226711eSThomas Zimmermann } 3355226711eSThomas Zimmermann 336d5b1a78aSEric Anholt /** Reinitializes interrupt registers when a GPU reset is performed. */ 337d5b1a78aSEric Anholt void vc4_irq_reset(struct drm_device *dev) 338d5b1a78aSEric Anholt { 339d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 340d5b1a78aSEric Anholt unsigned long irqflags; 341d5b1a78aSEric Anholt 342*30f8c74cSMaxime Ripard if (WARN_ON_ONCE(vc4->is_vc5)) 343*30f8c74cSMaxime Ripard return; 344*30f8c74cSMaxime Ripard 345d5b1a78aSEric Anholt /* Acknowledge any stale IRQs. */ 346d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 347d5b1a78aSEric Anholt 348d5b1a78aSEric Anholt /* 349d5b1a78aSEric Anholt * Turn all our interrupts on. Binner out of memory is the 350d5b1a78aSEric Anholt * only one we expect to trigger at this point, since we've 351d5b1a78aSEric Anholt * just come from poweron and haven't supplied any overflow 352d5b1a78aSEric Anholt * memory yet. 353d5b1a78aSEric Anholt */ 354d5b1a78aSEric Anholt V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); 355d5b1a78aSEric Anholt 356d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags); 357ca26d28bSVarad Gautam vc4_cancel_bin_job(dev); 358ca26d28bSVarad Gautam vc4_irq_finish_render_job(dev); 359d5b1a78aSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags); 360d5b1a78aSEric Anholt } 361