1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (C) 2014-2018 Broadcom */ 3 4 /** 5 * DOC: Interrupt management for the V3D engine 6 * 7 * When we take a binning or rendering flush done interrupt, we need 8 * to signal the fence for that job so that the scheduler can queue up 9 * the next one and unblock any waiters. 10 * 11 * When we take the binner out of memory interrupt, we need to 12 * allocate some new memory and pass it to the binner so that the 13 * current job can make progress. 14 */ 15 16 #include "v3d_drv.h" 17 #include "v3d_regs.h" 18 19 #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \ 20 V3D_INT_FLDONE | \ 21 V3D_INT_FRDONE | \ 22 V3D_INT_GMPV)) 23 24 #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \ 25 V3D_HUB_INT_MMU_PTI | \ 26 V3D_HUB_INT_MMU_CAP)) 27 28 static void 29 v3d_overflow_mem_work(struct work_struct *work) 30 { 31 struct v3d_dev *v3d = 32 container_of(work, struct v3d_dev, overflow_mem_work); 33 struct drm_device *dev = &v3d->drm; 34 struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); 35 unsigned long irqflags; 36 37 if (IS_ERR(bo)) { 38 DRM_ERROR("Couldn't allocate binner overflow mem\n"); 39 return; 40 } 41 42 /* We lost a race, and our work task came in after the bin job 43 * completed and exited. This can happen because the HW 44 * signals OOM before it's fully OOM, so the binner might just 45 * barely complete. 46 * 47 * If we lose the race and our work task comes in after a new 48 * bin job got scheduled, that's fine. We'll just give them 49 * some binner pool anyway. 50 */ 51 spin_lock_irqsave(&v3d->job_lock, irqflags); 52 if (!v3d->bin_job) { 53 spin_unlock_irqrestore(&v3d->job_lock, irqflags); 54 goto out; 55 } 56 57 drm_gem_object_get(&bo->base); 58 list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list); 59 spin_unlock_irqrestore(&v3d->job_lock, irqflags); 60 61 V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT); 62 V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size); 63 64 out: 65 drm_gem_object_put_unlocked(&bo->base); 66 } 67 68 static irqreturn_t 69 v3d_irq(int irq, void *arg) 70 { 71 struct v3d_dev *v3d = arg; 72 u32 intsts; 73 irqreturn_t status = IRQ_NONE; 74 75 intsts = V3D_CORE_READ(0, V3D_CTL_INT_STS); 76 77 /* Acknowledge the interrupts we're handling here. */ 78 V3D_CORE_WRITE(0, V3D_CTL_INT_CLR, intsts); 79 80 if (intsts & V3D_INT_OUTOMEM) { 81 /* Note that the OOM status is edge signaled, so the 82 * interrupt won't happen again until the we actually 83 * add more memory. 84 */ 85 schedule_work(&v3d->overflow_mem_work); 86 status = IRQ_HANDLED; 87 } 88 89 if (intsts & V3D_INT_FLDONE) { 90 dma_fence_signal(v3d->bin_job->bin.done_fence); 91 status = IRQ_HANDLED; 92 } 93 94 if (intsts & V3D_INT_FRDONE) { 95 dma_fence_signal(v3d->render_job->render.done_fence); 96 status = IRQ_HANDLED; 97 } 98 99 /* We shouldn't be triggering these if we have GMP in 100 * always-allowed mode. 101 */ 102 if (intsts & V3D_INT_GMPV) 103 dev_err(v3d->dev, "GMP violation\n"); 104 105 return status; 106 } 107 108 static irqreturn_t 109 v3d_hub_irq(int irq, void *arg) 110 { 111 struct v3d_dev *v3d = arg; 112 u32 intsts; 113 irqreturn_t status = IRQ_NONE; 114 115 intsts = V3D_READ(V3D_HUB_INT_STS); 116 117 /* Acknowledge the interrupts we're handling here. */ 118 V3D_WRITE(V3D_HUB_INT_CLR, intsts); 119 120 if (intsts & (V3D_HUB_INT_MMU_WRV | 121 V3D_HUB_INT_MMU_PTI | 122 V3D_HUB_INT_MMU_CAP)) { 123 u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); 124 u64 vio_addr = (u64)V3D_READ(V3D_MMU_VIO_ADDR) << 8; 125 126 dev_err(v3d->dev, "MMU error from client %d at 0x%08llx%s%s%s\n", 127 axi_id, (long long)vio_addr, 128 ((intsts & V3D_HUB_INT_MMU_WRV) ? 129 ", write violation" : ""), 130 ((intsts & V3D_HUB_INT_MMU_PTI) ? 131 ", pte invalid" : ""), 132 ((intsts & V3D_HUB_INT_MMU_CAP) ? 133 ", cap exceeded" : "")); 134 status = IRQ_HANDLED; 135 } 136 137 return status; 138 } 139 140 void 141 v3d_irq_init(struct v3d_dev *v3d) 142 { 143 int ret, core; 144 145 INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); 146 147 /* Clear any pending interrupts someone might have left around 148 * for us. 149 */ 150 for (core = 0; core < v3d->cores; core++) 151 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 152 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 153 154 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0), 155 v3d_hub_irq, IRQF_SHARED, 156 "v3d_hub", v3d); 157 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1), 158 v3d_irq, IRQF_SHARED, 159 "v3d_core0", v3d); 160 if (ret) 161 dev_err(v3d->dev, "IRQ setup failed: %d\n", ret); 162 163 v3d_irq_enable(v3d); 164 } 165 166 void 167 v3d_irq_enable(struct v3d_dev *v3d) 168 { 169 int core; 170 171 /* Enable our set of interrupts, masking out any others. */ 172 for (core = 0; core < v3d->cores; core++) { 173 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~V3D_CORE_IRQS); 174 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_CLR, V3D_CORE_IRQS); 175 } 176 177 V3D_WRITE(V3D_HUB_INT_MSK_SET, ~V3D_HUB_IRQS); 178 V3D_WRITE(V3D_HUB_INT_MSK_CLR, V3D_HUB_IRQS); 179 } 180 181 void 182 v3d_irq_disable(struct v3d_dev *v3d) 183 { 184 int core; 185 186 /* Disable all interrupts. */ 187 for (core = 0; core < v3d->cores; core++) 188 V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); 189 V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); 190 191 /* Clear any pending interrupts we might have left. */ 192 for (core = 0; core < v3d->cores; core++) 193 V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); 194 V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); 195 196 cancel_work_sync(&v3d->overflow_mem_work); 197 } 198 199 /** Reinitializes interrupt registers when a GPU reset is performed. */ 200 void v3d_irq_reset(struct v3d_dev *v3d) 201 { 202 v3d_irq_enable(v3d); 203 } 204