1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 4 #include <linux/interrupt.h> 5 #include <linux/iopoll.h> 6 #include <linux/device.h> 7 #include <linux/slab.h> 8 9 #include <drm/lima_drm.h> 10 11 #include "lima_device.h" 12 #include "lima_gp.h" 13 #include "lima_regs.h" 14 #include "lima_gem.h" 15 #include "lima_vm.h" 16 17 #define gp_write(reg, data) writel(data, ip->iomem + reg) 18 #define gp_read(reg) readl(ip->iomem + reg) 19 20 static irqreturn_t lima_gp_irq_handler(int irq, void *data) 21 { 22 struct lima_ip *ip = data; 23 struct lima_device *dev = ip->dev; 24 struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; 25 struct lima_sched_task *task = pipe->current_task; 26 u32 state = gp_read(LIMA_GP_INT_STAT); 27 u32 status = gp_read(LIMA_GP_STATUS); 28 bool done = false; 29 30 /* for shared irq case */ 31 if (!state) 32 return IRQ_NONE; 33 34 if (state & LIMA_GP_IRQ_MASK_ERROR) { 35 if ((state & LIMA_GP_IRQ_MASK_ERROR) == 36 LIMA_GP_IRQ_PLBU_OUT_OF_MEM) { 37 dev_dbg(dev->dev, "gp out of heap irq status=%x\n", 38 status); 39 } else { 40 dev_err(dev->dev, "gp error irq state=%x status=%x\n", 41 state, status); 42 if (task) 43 task->recoverable = false; 44 } 45 46 /* mask all interrupts before hard reset */ 47 gp_write(LIMA_GP_INT_MASK, 0); 48 49 pipe->error = true; 50 done = true; 51 } else { 52 bool valid = state & (LIMA_GP_IRQ_VS_END_CMD_LST | 53 LIMA_GP_IRQ_PLBU_END_CMD_LST); 54 bool active = status & (LIMA_GP_STATUS_VS_ACTIVE | 55 LIMA_GP_STATUS_PLBU_ACTIVE); 56 done = valid && !active; 57 pipe->error = false; 58 } 59 60 gp_write(LIMA_GP_INT_CLEAR, state); 61 62 if (done) 63 lima_sched_pipe_task_done(pipe); 64 65 return IRQ_HANDLED; 66 } 67 68 static void lima_gp_soft_reset_async(struct lima_ip *ip) 69 { 70 if (ip->data.async_reset) 71 return; 72 73 gp_write(LIMA_GP_INT_MASK, 0); 74 gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_RESET_COMPLETED); 75 gp_write(LIMA_GP_CMD, LIMA_GP_CMD_SOFT_RESET); 76 ip->data.async_reset = true; 77 } 78 79 static int lima_gp_soft_reset_async_wait(struct lima_ip *ip) 80 { 81 struct lima_device *dev = ip->dev; 82 int err; 83 u32 v; 84 85 if (!ip->data.async_reset) 86 return 0; 87 88 err = readl_poll_timeout(ip->iomem + LIMA_GP_INT_RAWSTAT, v, 89 v & LIMA_GP_IRQ_RESET_COMPLETED, 90 0, 100); 91 if (err) { 92 dev_err(dev->dev, "gp soft reset time out\n"); 93 return err; 94 } 95 96 gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL); 97 gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED); 98 99 ip->data.async_reset = false; 100 return 0; 101 } 102 103 static int lima_gp_task_validate(struct lima_sched_pipe *pipe, 104 struct lima_sched_task *task) 105 { 106 struct drm_lima_gp_frame *frame = task->frame; 107 u32 *f = frame->frame; 108 (void)pipe; 109 110 if (f[LIMA_GP_VSCL_START_ADDR >> 2] > 111 f[LIMA_GP_VSCL_END_ADDR >> 2] || 112 f[LIMA_GP_PLBUCL_START_ADDR >> 2] > 113 f[LIMA_GP_PLBUCL_END_ADDR >> 2] || 114 f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] > 115 f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]) 116 return -EINVAL; 117 118 if (f[LIMA_GP_VSCL_START_ADDR >> 2] == 119 f[LIMA_GP_VSCL_END_ADDR >> 2] && 120 f[LIMA_GP_PLBUCL_START_ADDR >> 2] == 121 f[LIMA_GP_PLBUCL_END_ADDR >> 2]) 122 return -EINVAL; 123 124 return 0; 125 } 126 127 static void lima_gp_task_run(struct lima_sched_pipe *pipe, 128 struct lima_sched_task *task) 129 { 130 struct lima_ip *ip = pipe->processor[0]; 131 struct drm_lima_gp_frame *frame = task->frame; 132 u32 *f = frame->frame; 133 u32 cmd = 0; 134 int i; 135 136 /* update real heap buffer size for GP */ 137 for (i = 0; i < task->num_bos; i++) { 138 struct lima_bo *bo = task->bos[i]; 139 140 if (bo->heap_size && 141 lima_vm_get_va(task->vm, bo) == 142 f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2]) { 143 f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] = 144 f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + 145 bo->heap_size; 146 task->recoverable = true; 147 task->heap = bo; 148 break; 149 } 150 } 151 152 if (f[LIMA_GP_VSCL_START_ADDR >> 2] != 153 f[LIMA_GP_VSCL_END_ADDR >> 2]) 154 cmd |= LIMA_GP_CMD_START_VS; 155 if (f[LIMA_GP_PLBUCL_START_ADDR >> 2] != 156 f[LIMA_GP_PLBUCL_END_ADDR >> 2]) 157 cmd |= LIMA_GP_CMD_START_PLBU; 158 159 /* before any hw ops, wait last success task async soft reset */ 160 lima_gp_soft_reset_async_wait(ip); 161 162 for (i = 0; i < LIMA_GP_FRAME_REG_NUM; i++) 163 writel(f[i], ip->iomem + LIMA_GP_VSCL_START_ADDR + i * 4); 164 165 gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC); 166 gp_write(LIMA_GP_CMD, cmd); 167 } 168 169 static int lima_gp_bus_stop_poll(struct lima_ip *ip) 170 { 171 return !!(gp_read(LIMA_GP_STATUS) & LIMA_GP_STATUS_BUS_STOPPED); 172 } 173 174 static int lima_gp_hard_reset_poll(struct lima_ip *ip) 175 { 176 gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000); 177 return gp_read(LIMA_GP_PERF_CNT_0_LIMIT) == 0xC01A0000; 178 } 179 180 static int lima_gp_hard_reset(struct lima_ip *ip) 181 { 182 struct lima_device *dev = ip->dev; 183 int ret; 184 185 gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000); 186 gp_write(LIMA_GP_INT_MASK, 0); 187 188 gp_write(LIMA_GP_CMD, LIMA_GP_CMD_STOP_BUS); 189 ret = lima_poll_timeout(ip, lima_gp_bus_stop_poll, 10, 100); 190 if (ret) { 191 dev_err(dev->dev, "%s bus stop timeout\n", lima_ip_name(ip)); 192 return ret; 193 } 194 gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET); 195 ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100); 196 if (ret) { 197 dev_err(dev->dev, "gp hard reset timeout\n"); 198 return ret; 199 } 200 201 gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0); 202 gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL); 203 gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED); 204 return 0; 205 } 206 207 static void lima_gp_task_fini(struct lima_sched_pipe *pipe) 208 { 209 lima_gp_soft_reset_async(pipe->processor[0]); 210 } 211 212 static void lima_gp_task_error(struct lima_sched_pipe *pipe) 213 { 214 struct lima_ip *ip = pipe->processor[0]; 215 216 dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n", 217 gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS)); 218 219 lima_gp_hard_reset(ip); 220 } 221 222 static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe) 223 { 224 lima_sched_pipe_task_done(pipe); 225 } 226 227 static void lima_gp_task_mask_irq(struct lima_sched_pipe *pipe) 228 { 229 struct lima_ip *ip = pipe->processor[0]; 230 231 gp_write(LIMA_GP_INT_MASK, 0); 232 } 233 234 static int lima_gp_task_recover(struct lima_sched_pipe *pipe) 235 { 236 struct lima_ip *ip = pipe->processor[0]; 237 struct lima_sched_task *task = pipe->current_task; 238 struct drm_lima_gp_frame *frame = task->frame; 239 u32 *f = frame->frame; 240 size_t fail_size = 241 f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] - 242 f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2]; 243 244 if (fail_size == task->heap->heap_size) { 245 int ret; 246 247 ret = lima_heap_alloc(task->heap, task->vm); 248 if (ret < 0) 249 return ret; 250 } 251 252 gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED); 253 /* Resume from where we stopped, i.e. new start is old end */ 254 gp_write(LIMA_GP_PLBU_ALLOC_START_ADDR, 255 f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]); 256 f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] = 257 f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + task->heap->heap_size; 258 gp_write(LIMA_GP_PLBU_ALLOC_END_ADDR, 259 f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]); 260 gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC); 261 return 0; 262 } 263 264 static void lima_gp_print_version(struct lima_ip *ip) 265 { 266 u32 version, major, minor; 267 char *name; 268 269 version = gp_read(LIMA_GP_VERSION); 270 major = (version >> 8) & 0xFF; 271 minor = version & 0xFF; 272 switch (version >> 16) { 273 case 0xA07: 274 name = "mali200"; 275 break; 276 case 0xC07: 277 name = "mali300"; 278 break; 279 case 0xB07: 280 name = "mali400"; 281 break; 282 case 0xD07: 283 name = "mali450"; 284 break; 285 default: 286 name = "unknown"; 287 break; 288 } 289 dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n", 290 lima_ip_name(ip), name, major, minor); 291 } 292 293 static struct kmem_cache *lima_gp_task_slab; 294 static int lima_gp_task_slab_refcnt; 295 296 static int lima_gp_hw_init(struct lima_ip *ip) 297 { 298 ip->data.async_reset = false; 299 lima_gp_soft_reset_async(ip); 300 return lima_gp_soft_reset_async_wait(ip); 301 } 302 303 int lima_gp_resume(struct lima_ip *ip) 304 { 305 return lima_gp_hw_init(ip); 306 } 307 308 void lima_gp_suspend(struct lima_ip *ip) 309 { 310 311 } 312 313 int lima_gp_init(struct lima_ip *ip) 314 { 315 struct lima_device *dev = ip->dev; 316 int err; 317 318 lima_gp_print_version(ip); 319 320 err = lima_gp_hw_init(ip); 321 if (err) 322 return err; 323 324 err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler, 325 IRQF_SHARED, lima_ip_name(ip), ip); 326 if (err) { 327 dev_err(dev->dev, "gp %s fail to request irq\n", 328 lima_ip_name(ip)); 329 return err; 330 } 331 332 dev->gp_version = gp_read(LIMA_GP_VERSION); 333 334 return 0; 335 } 336 337 void lima_gp_fini(struct lima_ip *ip) 338 { 339 struct lima_device *dev = ip->dev; 340 341 devm_free_irq(dev->dev, ip->irq, ip); 342 } 343 344 int lima_gp_pipe_init(struct lima_device *dev) 345 { 346 int frame_size = sizeof(struct drm_lima_gp_frame); 347 struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp; 348 349 if (!lima_gp_task_slab) { 350 lima_gp_task_slab = kmem_cache_create_usercopy( 351 "lima_gp_task", sizeof(struct lima_sched_task) + frame_size, 352 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task), 353 frame_size, NULL); 354 if (!lima_gp_task_slab) 355 return -ENOMEM; 356 } 357 lima_gp_task_slab_refcnt++; 358 359 pipe->frame_size = frame_size; 360 pipe->task_slab = lima_gp_task_slab; 361 362 pipe->task_validate = lima_gp_task_validate; 363 pipe->task_run = lima_gp_task_run; 364 pipe->task_fini = lima_gp_task_fini; 365 pipe->task_error = lima_gp_task_error; 366 pipe->task_mmu_error = lima_gp_task_mmu_error; 367 pipe->task_recover = lima_gp_task_recover; 368 pipe->task_mask_irq = lima_gp_task_mask_irq; 369 370 return 0; 371 } 372 373 void lima_gp_pipe_fini(struct lima_device *dev) 374 { 375 if (!--lima_gp_task_slab_refcnt) { 376 kmem_cache_destroy(lima_gp_task_slab); 377 lima_gp_task_slab = NULL; 378 } 379 } 380