1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (C) 2014-2018 Broadcom */ 3 4 /** 5 * DOC: Interrupt management for the V3D engine 6 * 7 * When we take a bin, render, or TFU done interrupt, we need to 8 * signal the fence for that job so that the scheduler can queue up 9 * the next one and unblock any waiters. 10 * 11 * When we take the binner out of memory interrupt, we need to 12 * allocate some new memory and pass it to the binner so that the 13 * current job can make progress. 14 */ 15 16 #include "v3d_drv.h" 17 #include "v3d_regs.h" 18 #include "v3d_trace.h" 19 20 #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ 21 V3D_INT_FLDONE | \ 22 V3D_INT_FRDONE | \ 23 V3D_INT_GMPV)) 24 25 #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ 26 V3D_HUB_INT_MMU_PTI | \ 27 V3D_HUB_INT_MMU_CAP | \ 28 V3D_HUB_INT_TFUC)) 29 30 static irqreturn_t 31 v3d_hub_irq(int irq, void *arg); 32 33 static void 34 v3d_overflow_mem_work(struct work_struct *work) 35 { 36 struct v3d_dev *v3d = 37 container_of(work, struct v3d_dev, overflow_mem_work); 38 struct drm_device *dev = &v3d->drm; 39 struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); 40 struct drm_gem_object *obj; 41 unsigned long irqflags; 42 43 if (IS_ERR(bo)) { 44 DRM_ERROR("Couldn't allocate binner overflow mem\n"); 45 return; 46 } 47 obj = &bo->base.base; 48 49 /* We lost a race, and our work task came in after the bin job 50 * completed and exited. This can happen because the HW 51 * signals OOM before it's fully OOM, so the binner might just 52 * barely complete. 53 * 54 * If we lose the race and our work task comes in after a new 55 * bin job got scheduled, that's fine. We'll just give them 56 * some binner pool anyway. 57 */ 58 spin_lock_irqsave(&v3d->job_lock, irqflags); 59 if (!v3d->bin_job) { 60 spin_unlock_irqrestore(&v3d->job_lock, irqflags); 61 goto out; 62 } 63 64 drm_gem_object_get(obj); 65 list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); 66 spin_unlock_irqrestore(&v3d->job_lock, irqflags); 67 68 V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); 69 V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size); 70 71 out: 72 drm_gem_object_put_unlocked(obj); 73 } 74 75 static irqreturn_t 76 v3d_irq(int irq, void *arg) 77 { 78 struct v3d_dev *v3d = arg; 79 u32 intsts; 80 irqreturn_t status = IRQ_NONE; 81 82 intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS); 83 84 /* Acknowledge the interrupts we're handling here. */ 85 V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts); 86 87 if (intsts & V3D_INT_OUTOMEM) { 88 /* Note that the OOM status is edge signaled, so the 89 * interrupt won't happen again until the we actually 90 * add more memory. Also, as of V3D 4.1, FLDONE won't 91 * be reported until any OOM state has been cleared. 92 */ 93 schedule_work(&v3d->overflow_mem_work); 94 status = IRQ_HANDLED; 95 } 96 97 if (intsts & V3D_INT_FLDONE) { 98 struct v3d_fence *fence = 99 to_v3d_fence(v3d->bin_job->bin.irq_fence); 100 101 trace_v3d_bcl_irq(&v3d->drm, fence->seqno); 102 dma_fence_signal(&fence->base); 103 status = IRQ_HANDLED; 104 } 105 106 if (intsts & V3D_INT_FRDONE) { 107 struct v3d_fence *fence = 108 to_v3d_fence(v3d->render_job->render.irq_fence); 109 110 trace_v3d_rcl_irq(&v3d->drm, fence->seqno); 111 dma_fence_signal(&fence->base); 112 status = IRQ_HANDLED; 113 } 114 115 /* We shouldn't be triggering these if we have GMP in 116 * always-allowed mode. 117 */ 118 if (intsts & V3D_INT_GMPV) 119 dev_err(v3d->dev, "GMP violation\n"); 120 121 /* V3D 4.2 wires the hub and core IRQs together, so if we & 122 * didn't see the common one then check hub for MMU IRQs. 123 */ 124 if (v3d->single_irq_line && status == IRQ_NONE) 125 return v3d_hub_irq(irq, arg); 126 127 return status; 128 } 129 130 static irqreturn_t 131 v3d_hub_irq(int irq, void *arg) 132 { 133 struct v3d_dev *v3d = arg; 134 u32 intsts; 135 irqreturn_t status = IRQ_NONE; 136 137 intsts = V3D_READ(V3D_HUB_INT_STS); 138 139 /* Acknowledge the interrupts we're handling here. */ 140 V3D_WRITE(V3D_HUB_INT_CLR, intsts); 141 142 if (intsts & V3D_HUB_INT_TFUC) { 143 struct v3d_fence *fence = 144 to_v3d_fence(v3d->tfu_job->irq_fence); 145 146 trace_v3d_tfu_irq(&v3d->drm, fence->seqno); 147 dma_fence_signal(&fence->base); 148 status = IRQ_HANDLED; 149 } 150 151 if (intsts & (V3D_HUB_INT_MMU_WRV | 152 V3D_HUB_INT_MMU_PTI | 153 V3D_HUB_INT_MMU_CAP)) { 154 u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); 155 u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8; 156 157 dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n", 158 axi_id, (long long)vio_addr, 159 ((intsts & V3D_HUB_INT_MMU_WRV) ? 160 ", write violation" : ""), 161 ((intsts & V3D_HUB_INT_MMU_PTI) ? 162 ", pte invalid" : ""), 163 ((intsts & V3D_HUB_INT_MMU_CAP) ? 164 ", cap exceeded" : "")); 165 status = IRQ_HANDLED; 166 } 167 168 return status; 169 } 170 171 int 172 v3d_irq_init(struct v3d_dev *v3d) 173 { 174 int irq1, ret, core; 175 176 INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); 177 178 /* Clear any pending interrupts someone might have left around 179 * for us. 180 */ 181 for (core = 0; core < v3d->cores; core++) 182 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 183 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 184 185 irq1 = platform_get_irq(v3d->pdev, 1); 186 if (irq1 == -EPROBE_DEFER) 187 return irq1; 188 if (irq1 > 0) { 189 ret = devm_request_irq(v3d->dev, irq1, 190 v3d_irq, IRQF_SHARED, 191 "v3d_core0", v3d); 192 if (ret) 193 goto fail; 194 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), 195 v3d_hub_irq, IRQF_SHARED, 196 "v3d_hub", v3d); 197 if (ret) 198 goto fail; 199 } else { 200 v3d->single_irq_line = true; 201 202 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), 203 v3d_irq, IRQF_SHARED, 204 "v3d", v3d); 205 if (ret) 206 goto fail; 207 } 208 209 v3d_irq_enable(v3d); 210 return 0; 211 212 fail: 213 if (ret != -EPROBE_DEFER) 214 dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); 215 return ret; 216 } 217 218 void 219 v3d_irq_enable(struct v3d_dev *v3d) 220 { 221 int core; 222 223 /* Enable our set of interrupts, masking out any others. */ 224 for (core = 0; core < v3d->cores; core++) { 225 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS); 226 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS); 227 } 228 229 V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS); 230 V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS); 231 } 232 233 void 234 v3d_irq_disable(struct v3d_dev *v3d) 235 { 236 int core; 237 238 /* Disable all interrupts. */ 239 for (core = 0; core < v3d->cores; core++) 240 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); 241 V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); 242 243 /* Clear any pending interrupts we might have left. */ 244 for (core = 0; core < v3d->cores; core++) 245 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 246 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 247 248 cancel_work_sync(&v3d->overflow_mem_work); 249 } 250 251 /** Reinitializes interrupt registers when a GPU reset is performed. */ 252 void v3d_irq_reset(struct v3d_dev *v3d) 253 { 254 v3d_irq_enable(v3d); 255 } 256