1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (C) 2014-2018 Broadcom */ 3 4 /** 5 * DOC: Interrupt management for the V3D engine 6 * 7 * When we take a bin, render, or TFU done interrupt, we need to 8 * signal the fence for that job so that the scheduler can queue up 9 * the next one and unblock any waiters. 10 * 11 * When we take the binner out of memory interrupt, we need to 12 * allocate some new memory and pass it to the binner so that the 13 * current job can make progress. 14 */ 15 16 #include "v3d_drv.h" 17 #include "v3d_regs.h" 18 #include "v3d_trace.h" 19 20 #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ 21 V3D_INT_FLDONE | \ 22 V3D_INT_FRDONE | \ 23 V3D_INT_GMPV)) 24 25 #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ 26 V3D_HUB_INT_MMU_PTI | \ 27 V3D_HUB_INT_MMU_CAP | \ 28 V3D_HUB_INT_TFUC)) 29 30 static void 31 v3d_overflow_mem_work(struct work_struct *work) 32 { 33 struct v3d_dev *v3d = 34 container_of(work, struct v3d_dev, overflow_mem_work); 35 struct drm_device *dev = &v3d->drm; 36 struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); 37 unsigned long irqflags; 38 39 if (IS_ERR(bo)) { 40 DRM_ERROR("Couldn't allocate binner overflow mem\n"); 41 return; 42 } 43 44 /* We lost a race, and our work task came in after the bin job 45 * completed and exited. This can happen because the HW 46 * signals OOM before it's fully OOM, so the binner might just 47 * barely complete. 48 * 49 * If we lose the race and our work task comes in after a new 50 * bin job got scheduled, that's fine. We'll just give them 51 * some binner pool anyway. 52 */ 53 spin_lock_irqsave(&v3d->job_lock, irqflags); 54 if (!v3d->bin_job) { 55 spin_unlock_irqrestore(&v3d->job_lock, irqflags); 56 goto out; 57 } 58 59 drm_gem_object_get(&bo->base); 60 list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); 61 spin_unlock_irqrestore(&v3d->job_lock, irqflags); 62 63 V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); 64 V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size); 65 66 out: 67 drm_gem_object_put_unlocked(&bo->base); 68 } 69 70 static irqreturn_t 71 v3d_irq(int irq, void *arg) 72 { 73 struct v3d_dev *v3d = arg; 74 u32 intsts; 75 irqreturn_t status = IRQ_NONE; 76 77 intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS); 78 79 /* Acknowledge the interrupts we're handling here. */ 80 V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts); 81 82 if (intsts & V3D_INT_OUTOMEM) { 83 /* Note that the OOM status is edge signaled, so the 84 * interrupt won't happen again until the we actually 85 * add more memory. 86 */ 87 schedule_work(&v3d->overflow_mem_work); 88 status = IRQ_HANDLED; 89 } 90 91 if (intsts & V3D_INT_FLDONE) { 92 struct v3d_fence *fence = 93 to_v3d_fence(v3d->bin_job->bin.done_fence); 94 95 trace_v3d_bcl_irq(&v3d->drm, fence->seqno); 96 dma_fence_signal(&fence->base); 97 status = IRQ_HANDLED; 98 } 99 100 if (intsts & V3D_INT_FRDONE) { 101 struct v3d_fence *fence = 102 to_v3d_fence(v3d->render_job->render.done_fence); 103 104 trace_v3d_rcl_irq(&v3d->drm, fence->seqno); 105 dma_fence_signal(&fence->base); 106 status = IRQ_HANDLED; 107 } 108 109 /* We shouldn't be triggering these if we have GMP in 110 * always-allowed mode. 111 */ 112 if (intsts & V3D_INT_GMPV) 113 dev_err(v3d->dev, "GMP violation\n"); 114 115 return status; 116 } 117 118 static irqreturn_t 119 v3d_hub_irq(int irq, void *arg) 120 { 121 struct v3d_dev *v3d = arg; 122 u32 intsts; 123 irqreturn_t status = IRQ_NONE; 124 125 intsts = V3D_READ(V3D_HUB_INT_STS); 126 127 /* Acknowledge the interrupts we're handling here. */ 128 V3D_WRITE(V3D_HUB_INT_CLR, intsts); 129 130 if (intsts & V3D_HUB_INT_TFUC) { 131 struct v3d_fence *fence = 132 to_v3d_fence(v3d->tfu_job->done_fence); 133 134 trace_v3d_tfu_irq(&v3d->drm, fence->seqno); 135 dma_fence_signal(&fence->base); 136 status = IRQ_HANDLED; 137 } 138 139 if (intsts & (V3D_HUB_INT_MMU_WRV | 140 V3D_HUB_INT_MMU_PTI | 141 V3D_HUB_INT_MMU_CAP)) { 142 u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); 143 u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8; 144 145 dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n", 146 axi_id, (long long)vio_addr, 147 ((intsts & V3D_HUB_INT_MMU_WRV) ? 148 ", write violation" : ""), 149 ((intsts & V3D_HUB_INT_MMU_PTI) ? 150 ", pte invalid" : ""), 151 ((intsts & V3D_HUB_INT_MMU_CAP) ? 152 ", cap exceeded" : "")); 153 status = IRQ_HANDLED; 154 } 155 156 return status; 157 } 158 159 void 160 v3d_irq_init(struct v3d_dev *v3d) 161 { 162 int ret, core; 163 164 INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); 165 166 /* Clear any pending interrupts someone might have left around 167 * for us. 168 */ 169 for (core = 0; core < v3d->cores; core++) 170 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 171 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 172 173 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), 174 v3d_hub_irq, IRQF_SHARED, 175 "v3d_hub", v3d); 176 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), 177 v3d_irq, IRQF_SHARED, 178 "v3d_core0", v3d); 179 if (ret) 180 dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); 181 182 v3d_irq_enable(v3d); 183 } 184 185 void 186 v3d_irq_enable(struct v3d_dev *v3d) 187 { 188 int core; 189 190 /* Enable our set of interrupts, masking out any others. */ 191 for (core = 0; core < v3d->cores; core++) { 192 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS); 193 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS); 194 } 195 196 V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS); 197 V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS); 198 } 199 200 void 201 v3d_irq_disable(struct v3d_dev *v3d) 202 { 203 int core; 204 205 /* Disable all interrupts. */ 206 for (core = 0; core < v3d->cores; core++) 207 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); 208 V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); 209 210 /* Clear any pending interrupts we might have left. */ 211 for (core = 0; core < v3d->cores; core++) 212 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 213 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 214 215 cancel_work_sync(&v3d->overflow_mem_work); 216 } 217 218 /** Reinitializes interrupt registers when a GPU reset is performed. */ 219 void v3d_irq_reset(struct v3d_dev *v3d) 220 { 221 v3d_irq_enable(v3d); 222 } 223