1d5b1a78aSEric Anholt /* 2d5b1a78aSEric Anholt * Copyright © 2014 Broadcom 3d5b1a78aSEric Anholt * 4d5b1a78aSEric Anholt * Permission is hereby granted, free of charge, to any person obtaining a 5d5b1a78aSEric Anholt * copy of this software and associated documentation files (the "Software"), 6d5b1a78aSEric Anholt * to deal in the Software without restriction, including without limitation 7d5b1a78aSEric Anholt * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8d5b1a78aSEric Anholt * and/or sell copies of the Software, and to permit persons to whom the 9d5b1a78aSEric Anholt * Software is furnished to do so, subject to the following conditions: 10d5b1a78aSEric Anholt * 11d5b1a78aSEric Anholt * The above copyright notice and this permission notice (including the next 12d5b1a78aSEric Anholt * paragraph) shall be included in all copies or substantial portions of the 13d5b1a78aSEric Anholt * Software. 14d5b1a78aSEric Anholt * 15d5b1a78aSEric Anholt * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16d5b1a78aSEric Anholt * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17d5b1a78aSEric Anholt * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18d5b1a78aSEric Anholt * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19d5b1a78aSEric Anholt * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20d5b1a78aSEric Anholt * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21d5b1a78aSEric Anholt * IN THE SOFTWARE. 22d5b1a78aSEric Anholt */ 23d5b1a78aSEric Anholt 24d5b1a78aSEric Anholt /** DOC: Interrupt management for the V3D engine. 25d5b1a78aSEric Anholt * 26d5b1a78aSEric Anholt * We have an interrupt status register (V3D_INTCTL) which reports 27d5b1a78aSEric Anholt * interrupts, and where writing 1 bits clears those interrupts. 28d5b1a78aSEric Anholt * There are also a pair of interrupt registers 29d5b1a78aSEric Anholt * (V3D_INTENA/V3D_INTDIS) where writing a 1 to their bits enables or 30d5b1a78aSEric Anholt * disables that specific interrupt, and 0s written are ignored 31d5b1a78aSEric Anholt * (reading either one returns the set of enabled interrupts). 32d5b1a78aSEric Anholt * 33ca26d28bSVarad Gautam * When we take a binning flush done interrupt, we need to submit the 34ca26d28bSVarad Gautam * next frame for binning and move the finished frame to the render 35ca26d28bSVarad Gautam * thread. 36ca26d28bSVarad Gautam * 37d5b1a78aSEric Anholt * When we take a render frame interrupt, we need to wake the 38d5b1a78aSEric Anholt * processes waiting for some frame to be done, and get the next frame 39d5b1a78aSEric Anholt * submitted ASAP (so the hardware doesn't sit idle when there's work 40d5b1a78aSEric Anholt * to do). 41d5b1a78aSEric Anholt * 42d5b1a78aSEric Anholt * When we take the binner out of memory interrupt, we need to 43d5b1a78aSEric Anholt * allocate some new memory and pass it to the binner so that the 44d5b1a78aSEric Anholt * current job can make progress. 45d5b1a78aSEric Anholt */ 46d5b1a78aSEric Anholt 47d5b1a78aSEric Anholt #include "vc4_drv.h" 48d5b1a78aSEric Anholt #include "vc4_regs.h" 49d5b1a78aSEric Anholt 50d5b1a78aSEric Anholt #define V3D_DRIVER_IRQS (V3D_INT_OUTOMEM | \ 51ca26d28bSVarad Gautam V3D_INT_FLDONE | \ 52d5b1a78aSEric Anholt V3D_INT_FRDONE) 53d5b1a78aSEric Anholt 54d5b1a78aSEric Anholt DECLARE_WAIT_QUEUE_HEAD(render_wait); 55d5b1a78aSEric Anholt 56d5b1a78aSEric Anholt static void 57d5b1a78aSEric Anholt vc4_overflow_mem_work(struct work_struct *work) 58d5b1a78aSEric Anholt { 59d5b1a78aSEric Anholt struct vc4_dev *vc4 = 60d5b1a78aSEric Anholt container_of(work, struct vc4_dev, overflow_mem_work); 61d5b1a78aSEric Anholt struct drm_device *dev = vc4->dev; 62d5b1a78aSEric Anholt struct vc4_bo *bo; 63d5b1a78aSEric Anholt 64d5b1a78aSEric Anholt bo = vc4_bo_create(dev, 256 * 1024, true); 652c68f1fcSEric Anholt if (IS_ERR(bo)) { 66d5b1a78aSEric Anholt DRM_ERROR("Couldn't allocate binner overflow mem\n"); 67d5b1a78aSEric Anholt return; 68d5b1a78aSEric Anholt } 69d5b1a78aSEric Anholt 70d5b1a78aSEric Anholt /* If there's a job executing currently, then our previous 71d5b1a78aSEric Anholt * overflow allocation is getting used in that job and we need 72d5b1a78aSEric Anholt * to queue it to be released when the job is done. But if no 73d5b1a78aSEric Anholt * job is executing at all, then we can free the old overflow 74d5b1a78aSEric Anholt * object direcctly. 75d5b1a78aSEric Anholt * 76d5b1a78aSEric Anholt * No lock necessary for this pointer since we're the only 77d5b1a78aSEric Anholt * ones that update the pointer, and our workqueue won't 78d5b1a78aSEric Anholt * reenter. 79d5b1a78aSEric Anholt */ 80d5b1a78aSEric Anholt if (vc4->overflow_mem) { 81d5b1a78aSEric Anholt struct vc4_exec_info *current_exec; 82d5b1a78aSEric Anholt unsigned long irqflags; 83d5b1a78aSEric Anholt 84d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags); 85ca26d28bSVarad Gautam current_exec = vc4_first_bin_job(vc4); 869326e6f2SEric Anholt if (!current_exec) 879326e6f2SEric Anholt current_exec = vc4_last_render_job(vc4); 88d5b1a78aSEric Anholt if (current_exec) { 899326e6f2SEric Anholt vc4->overflow_mem->seqno = current_exec->seqno; 90d5b1a78aSEric Anholt list_add_tail(&vc4->overflow_mem->unref_head, 91d5b1a78aSEric Anholt ¤t_exec->unref_list); 92d5b1a78aSEric Anholt vc4->overflow_mem = NULL; 93d5b1a78aSEric Anholt } 94d5b1a78aSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags); 95d5b1a78aSEric Anholt } 96d5b1a78aSEric Anholt 97d5b1a78aSEric Anholt if (vc4->overflow_mem) 98d5b1a78aSEric Anholt drm_gem_object_unreference_unlocked(&vc4->overflow_mem->base.base); 99d5b1a78aSEric Anholt vc4->overflow_mem = bo; 100d5b1a78aSEric Anholt 101d5b1a78aSEric Anholt V3D_WRITE(V3D_BPOA, bo->base.paddr); 102d5b1a78aSEric Anholt V3D_WRITE(V3D_BPOS, bo->base.base.size); 103d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM); 104d5b1a78aSEric Anholt V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM); 105d5b1a78aSEric Anholt } 106d5b1a78aSEric Anholt 107d5b1a78aSEric Anholt static void 108ca26d28bSVarad Gautam vc4_irq_finish_bin_job(struct drm_device *dev) 109d5b1a78aSEric Anholt { 110d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 111ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_bin_job(vc4); 112ca26d28bSVarad Gautam 113ca26d28bSVarad Gautam if (!exec) 114ca26d28bSVarad Gautam return; 115ca26d28bSVarad Gautam 116ca26d28bSVarad Gautam vc4_move_job_to_render(dev, exec); 117ca26d28bSVarad Gautam vc4_submit_next_bin_job(dev); 118ca26d28bSVarad Gautam } 119ca26d28bSVarad Gautam 120ca26d28bSVarad Gautam static void 121ca26d28bSVarad Gautam vc4_cancel_bin_job(struct drm_device *dev) 122ca26d28bSVarad Gautam { 123ca26d28bSVarad Gautam struct vc4_dev *vc4 = to_vc4_dev(dev); 124ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_bin_job(vc4); 125ca26d28bSVarad Gautam 126ca26d28bSVarad Gautam if (!exec) 127ca26d28bSVarad Gautam return; 128ca26d28bSVarad Gautam 129ca26d28bSVarad Gautam list_move_tail(&exec->head, &vc4->bin_job_list); 130ca26d28bSVarad Gautam vc4_submit_next_bin_job(dev); 131ca26d28bSVarad Gautam } 132ca26d28bSVarad Gautam 133ca26d28bSVarad Gautam static void 134ca26d28bSVarad Gautam vc4_irq_finish_render_job(struct drm_device *dev) 135ca26d28bSVarad Gautam { 136ca26d28bSVarad Gautam struct vc4_dev *vc4 = to_vc4_dev(dev); 137ca26d28bSVarad Gautam struct vc4_exec_info *exec = vc4_first_render_job(vc4); 138d5b1a78aSEric Anholt 139d5b1a78aSEric Anholt if (!exec) 140d5b1a78aSEric Anholt return; 141d5b1a78aSEric Anholt 142d5b1a78aSEric Anholt vc4->finished_seqno++; 143d5b1a78aSEric Anholt list_move_tail(&exec->head, &vc4->job_done_list); 144ca26d28bSVarad Gautam vc4_submit_next_render_job(dev); 145d5b1a78aSEric Anholt 146d5b1a78aSEric Anholt wake_up_all(&vc4->job_wait_queue); 147d5b1a78aSEric Anholt schedule_work(&vc4->job_done_work); 148d5b1a78aSEric Anholt } 149d5b1a78aSEric Anholt 150d5b1a78aSEric Anholt irqreturn_t 151d5b1a78aSEric Anholt vc4_irq(int irq, void *arg) 152d5b1a78aSEric Anholt { 153d5b1a78aSEric Anholt struct drm_device *dev = arg; 154d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 155d5b1a78aSEric Anholt uint32_t intctl; 156d5b1a78aSEric Anholt irqreturn_t status = IRQ_NONE; 157d5b1a78aSEric Anholt 158d5b1a78aSEric Anholt barrier(); 159d5b1a78aSEric Anholt intctl = V3D_READ(V3D_INTCTL); 160d5b1a78aSEric Anholt 161ca26d28bSVarad Gautam /* Acknowledge the interrupts we're handling here. The binner 162ca26d28bSVarad Gautam * last flush / render frame done interrupt will be cleared, 163ca26d28bSVarad Gautam * while OUTOMEM will stay high until the underlying cause is 164ca26d28bSVarad Gautam * cleared. 165d5b1a78aSEric Anholt */ 166d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, intctl); 167d5b1a78aSEric Anholt 168d5b1a78aSEric Anholt if (intctl & V3D_INT_OUTOMEM) { 169d5b1a78aSEric Anholt /* Disable OUTOMEM until the work is done. */ 170d5b1a78aSEric Anholt V3D_WRITE(V3D_INTDIS, V3D_INT_OUTOMEM); 171d5b1a78aSEric Anholt schedule_work(&vc4->overflow_mem_work); 172d5b1a78aSEric Anholt status = IRQ_HANDLED; 173d5b1a78aSEric Anholt } 174d5b1a78aSEric Anholt 175ca26d28bSVarad Gautam if (intctl & V3D_INT_FLDONE) { 176ca26d28bSVarad Gautam spin_lock(&vc4->job_lock); 177ca26d28bSVarad Gautam vc4_irq_finish_bin_job(dev); 178ca26d28bSVarad Gautam spin_unlock(&vc4->job_lock); 179ca26d28bSVarad Gautam status = IRQ_HANDLED; 180ca26d28bSVarad Gautam } 181ca26d28bSVarad Gautam 182d5b1a78aSEric Anholt if (intctl & V3D_INT_FRDONE) { 183d5b1a78aSEric Anholt spin_lock(&vc4->job_lock); 184ca26d28bSVarad Gautam vc4_irq_finish_render_job(dev); 185d5b1a78aSEric Anholt spin_unlock(&vc4->job_lock); 186d5b1a78aSEric Anholt status = IRQ_HANDLED; 187d5b1a78aSEric Anholt } 188d5b1a78aSEric Anholt 189d5b1a78aSEric Anholt return status; 190d5b1a78aSEric Anholt } 191d5b1a78aSEric Anholt 192d5b1a78aSEric Anholt void 193d5b1a78aSEric Anholt vc4_irq_preinstall(struct drm_device *dev) 194d5b1a78aSEric Anholt { 195d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 196d5b1a78aSEric Anholt 197d5b1a78aSEric Anholt init_waitqueue_head(&vc4->job_wait_queue); 198d5b1a78aSEric Anholt INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work); 199d5b1a78aSEric Anholt 200d5b1a78aSEric Anholt /* Clear any pending interrupts someone might have left around 201d5b1a78aSEric Anholt * for us. 202d5b1a78aSEric Anholt */ 203d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 204d5b1a78aSEric Anholt } 205d5b1a78aSEric Anholt 206d5b1a78aSEric Anholt int 207d5b1a78aSEric Anholt vc4_irq_postinstall(struct drm_device *dev) 208d5b1a78aSEric Anholt { 209d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 210d5b1a78aSEric Anholt 211d5b1a78aSEric Anholt /* Enable both the render done and out of memory interrupts. */ 212d5b1a78aSEric Anholt V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); 213d5b1a78aSEric Anholt 214d5b1a78aSEric Anholt return 0; 215d5b1a78aSEric Anholt } 216d5b1a78aSEric Anholt 217d5b1a78aSEric Anholt void 218d5b1a78aSEric Anholt vc4_irq_uninstall(struct drm_device *dev) 219d5b1a78aSEric Anholt { 220d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 221d5b1a78aSEric Anholt 222d5b1a78aSEric Anholt /* Disable sending interrupts for our driver's IRQs. */ 223d5b1a78aSEric Anholt V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS); 224d5b1a78aSEric Anholt 225d5b1a78aSEric Anholt /* Clear any pending interrupts we might have left. */ 226d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 227d5b1a78aSEric Anholt 228d5b1a78aSEric Anholt cancel_work_sync(&vc4->overflow_mem_work); 229d5b1a78aSEric Anholt } 230d5b1a78aSEric Anholt 231d5b1a78aSEric Anholt /** Reinitializes interrupt registers when a GPU reset is performed. */ 232d5b1a78aSEric Anholt void vc4_irq_reset(struct drm_device *dev) 233d5b1a78aSEric Anholt { 234d5b1a78aSEric Anholt struct vc4_dev *vc4 = to_vc4_dev(dev); 235d5b1a78aSEric Anholt unsigned long irqflags; 236d5b1a78aSEric Anholt 237d5b1a78aSEric Anholt /* Acknowledge any stale IRQs. */ 238d5b1a78aSEric Anholt V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); 239d5b1a78aSEric Anholt 240d5b1a78aSEric Anholt /* 241d5b1a78aSEric Anholt * Turn all our interrupts on. Binner out of memory is the 242d5b1a78aSEric Anholt * only one we expect to trigger at this point, since we've 243d5b1a78aSEric Anholt * just come from poweron and haven't supplied any overflow 244d5b1a78aSEric Anholt * memory yet. 245d5b1a78aSEric Anholt */ 246d5b1a78aSEric Anholt V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); 247d5b1a78aSEric Anholt 248d5b1a78aSEric Anholt spin_lock_irqsave(&vc4->job_lock, irqflags); 249ca26d28bSVarad Gautam vc4_cancel_bin_job(dev); 250ca26d28bSVarad Gautam vc4_irq_finish_render_job(dev); 251d5b1a78aSEric Anholt spin_unlock_irqrestore(&vc4->job_lock, irqflags); 252d5b1a78aSEric Anholt } 253