1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ 3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ 4 /* Copyright 2019 Collabora ltd. */ 5 #include <linux/bitfield.h> 6 #include <linux/bitmap.h> 7 #include <linux/delay.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/iopoll.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_runtime.h> 14 15 #include "panfrost_device.h" 16 #include "panfrost_features.h" 17 #include "panfrost_issues.h" 18 #include "panfrost_gpu.h" 19 #include "panfrost_perfcnt.h" 20 #include "panfrost_regs.h" 21 22 static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data) 23 { 24 struct panfrost_device *pfdev = data; 25 u32 state = gpu_read(pfdev, GPU_INT_STAT); 26 u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS); 27 28 if (!state) 29 return IRQ_NONE; 30 31 if (state & GPU_IRQ_MASK_ERROR) { 32 u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32; 33 address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO); 34 35 dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n", 36 fault_status, panfrost_exception_name(fault_status & 0xFF), 37 address); 38 39 if (state & GPU_IRQ_MULTIPLE_FAULT) 40 dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n"); 41 42 gpu_write(pfdev, GPU_INT_MASK, 0); 43 } 44 45 if (state & GPU_IRQ_PERFCNT_SAMPLE_COMPLETED) 46 panfrost_perfcnt_sample_done(pfdev); 47 48 if (state & GPU_IRQ_CLEAN_CACHES_COMPLETED) 49 panfrost_perfcnt_clean_cache_done(pfdev); 50 51 gpu_write(pfdev, GPU_INT_CLEAR, state); 52 53 return IRQ_HANDLED; 54 } 55 56 int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) 57 { 58 int ret; 59 u32 val; 60 61 gpu_write(pfdev, GPU_INT_MASK, 0); 62 gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED); 63 gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET); 64 65 ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, 66 val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000); 67 68 if (ret) { 69 dev_err(pfdev->dev, "gpu soft reset timed out\n"); 70 return ret; 71 } 72 73 gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL); 74 gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL); 75 76 return 0; 77 } 78 79 void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev) 80 { 81 /* 82 * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs 83 * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order 84 * to operate correctly. 85 */ 86 gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK); 87 gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16)); 88 } 89 90 static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) 91 { 92 u32 quirks = 0; 93 94 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8443) || 95 panfrost_has_hw_issue(pfdev, HW_ISSUE_11035)) 96 quirks |= SC_LS_PAUSEBUFFER_DISABLE; 97 98 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10327)) 99 quirks |= SC_SDC_DISABLE_OQ_DISCARD; 100 101 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10797)) 102 quirks |= SC_ENABLE_TEXGRD_FLAGS; 103 104 if (!panfrost_has_hw_issue(pfdev, GPUCORE_1619)) { 105 if (panfrost_model_cmp(pfdev, 0x750) < 0) /* T60x, T62x, T72x */ 106 quirks |= SC_LS_ATTR_CHECK_DISABLE; 107 else if (panfrost_model_cmp(pfdev, 0x880) <= 0) /* T76x, T8xx */ 108 quirks |= SC_LS_ALLOW_ATTR_TYPES; 109 } 110 111 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_TLS_HASHING)) 112 quirks |= SC_TLS_HASH_ENABLE; 113 114 if (quirks) 115 gpu_write(pfdev, GPU_SHADER_CONFIG, quirks); 116 117 118 quirks = gpu_read(pfdev, GPU_TILER_CONFIG); 119 120 /* Set tiler clock gate override if required */ 121 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_T76X_3953)) 122 quirks |= TC_CLOCK_GATE_OVERRIDE; 123 124 gpu_write(pfdev, GPU_TILER_CONFIG, quirks); 125 126 127 quirks = gpu_read(pfdev, GPU_L2_MMU_CONFIG); 128 129 /* Limit read & write ID width for AXI */ 130 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG)) 131 quirks &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS | 132 L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES); 133 else 134 quirks &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS | 135 L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES); 136 137 gpu_write(pfdev, GPU_L2_MMU_CONFIG, quirks); 138 139 quirks = 0; 140 if ((panfrost_model_eq(pfdev, 0x860) || panfrost_model_eq(pfdev, 0x880)) && 141 pfdev->features.revision >= 0x2000) 142 quirks |= JM_MAX_JOB_THROTTLE_LIMIT << JM_JOB_THROTTLE_LIMIT_SHIFT; 143 else if (panfrost_model_eq(pfdev, 0x6000) && 144 pfdev->features.coherency_features == COHERENCY_ACE) 145 quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) << 146 JM_FORCE_COHERENCY_FEATURES_SHIFT; 147 148 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_IDVS_GROUP_SIZE)) 149 quirks |= JM_DEFAULT_IDVS_GROUP_SIZE << JM_IDVS_GROUP_SIZE_SHIFT; 150 151 if (quirks) 152 gpu_write(pfdev, GPU_JM_CONFIG, quirks); 153 154 /* Here goes platform specific quirks */ 155 if (pfdev->comp->vendor_quirk) 156 pfdev->comp->vendor_quirk(pfdev); 157 } 158 159 #define MAX_HW_REVS 6 160 161 struct panfrost_model { 162 const char *name; 163 u32 id; 164 u32 id_mask; 165 u64 features; 166 u64 issues; 167 struct { 168 u32 revision; 169 u64 issues; 170 } revs[MAX_HW_REVS]; 171 }; 172 173 #define GPU_MODEL(_name, _id, ...) \ 174 {\ 175 .name = __stringify(_name), \ 176 .id = _id, \ 177 .features = hw_features_##_name, \ 178 .issues = hw_issues_##_name, \ 179 .revs = { __VA_ARGS__ }, \ 180 } 181 182 #define GPU_REV_EXT(name, _rev, _p, _s, stat) \ 183 {\ 184 .revision = (_rev) << 12 | (_p) << 4 | (_s), \ 185 .issues = hw_issues_##name##_r##_rev##p##_p##stat, \ 186 } 187 #define GPU_REV(name, r, p) GPU_REV_EXT(name, r, p, 0, ) 188 189 static const struct panfrost_model gpu_models[] = { 190 /* T60x has an oddball version */ 191 GPU_MODEL(t600, 0x600, 192 GPU_REV_EXT(t600, 0, 0, 1, _15dev0)), 193 GPU_MODEL(t620, 0x620, 194 GPU_REV(t620, 0, 1), GPU_REV(t620, 1, 0)), 195 GPU_MODEL(t720, 0x720), 196 GPU_MODEL(t760, 0x750, 197 GPU_REV(t760, 0, 0), GPU_REV(t760, 0, 1), 198 GPU_REV_EXT(t760, 0, 1, 0, _50rel0), 199 GPU_REV(t760, 0, 2), GPU_REV(t760, 0, 3)), 200 GPU_MODEL(t820, 0x820), 201 GPU_MODEL(t830, 0x830), 202 GPU_MODEL(t860, 0x860), 203 GPU_MODEL(t880, 0x880), 204 205 GPU_MODEL(g71, 0x6000, 206 GPU_REV_EXT(g71, 0, 0, 1, _05dev0)), 207 GPU_MODEL(g72, 0x6001), 208 GPU_MODEL(g51, 0x7000), 209 GPU_MODEL(g76, 0x7001), 210 GPU_MODEL(g52, 0x7002), 211 GPU_MODEL(g31, 0x7003, 212 GPU_REV(g31, 1, 0)), 213 }; 214 215 static void panfrost_gpu_init_features(struct panfrost_device *pfdev) 216 { 217 u32 gpu_id, num_js, major, minor, status, rev; 218 const char *name = "unknown"; 219 u64 hw_feat = 0; 220 u64 hw_issues = hw_issues_all; 221 const struct panfrost_model *model; 222 int i; 223 224 pfdev->features.l2_features = gpu_read(pfdev, GPU_L2_FEATURES); 225 pfdev->features.core_features = gpu_read(pfdev, GPU_CORE_FEATURES); 226 pfdev->features.tiler_features = gpu_read(pfdev, GPU_TILER_FEATURES); 227 pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES); 228 pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES); 229 pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES); 230 pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS); 231 pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE); 232 pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE); 233 pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES); 234 pfdev->features.afbc_features = gpu_read(pfdev, GPU_AFBC_FEATURES); 235 for (i = 0; i < 4; i++) 236 pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i)); 237 238 pfdev->features.as_present = gpu_read(pfdev, GPU_AS_PRESENT); 239 240 pfdev->features.js_present = gpu_read(pfdev, GPU_JS_PRESENT); 241 num_js = hweight32(pfdev->features.js_present); 242 for (i = 0; i < num_js; i++) 243 pfdev->features.js_features[i] = gpu_read(pfdev, GPU_JS_FEATURES(i)); 244 245 pfdev->features.shader_present = gpu_read(pfdev, GPU_SHADER_PRESENT_LO); 246 pfdev->features.shader_present |= (u64)gpu_read(pfdev, GPU_SHADER_PRESENT_HI) << 32; 247 248 pfdev->features.tiler_present = gpu_read(pfdev, GPU_TILER_PRESENT_LO); 249 pfdev->features.tiler_present |= (u64)gpu_read(pfdev, GPU_TILER_PRESENT_HI) << 32; 250 251 pfdev->features.l2_present = gpu_read(pfdev, GPU_L2_PRESENT_LO); 252 pfdev->features.l2_present |= (u64)gpu_read(pfdev, GPU_L2_PRESENT_HI) << 32; 253 pfdev->features.nr_core_groups = hweight64(pfdev->features.l2_present); 254 255 pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO); 256 pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32; 257 258 pfdev->features.thread_tls_alloc = gpu_read(pfdev, GPU_THREAD_TLS_ALLOC); 259 260 gpu_id = gpu_read(pfdev, GPU_ID); 261 pfdev->features.revision = gpu_id & 0xffff; 262 pfdev->features.id = gpu_id >> 16; 263 264 /* The T60x has an oddball ID value. Fix it up to the standard Midgard 265 * format so we (and userspace) don't have to special case it. 266 */ 267 if (pfdev->features.id == 0x6956) 268 pfdev->features.id = 0x0600; 269 270 major = (pfdev->features.revision >> 12) & 0xf; 271 minor = (pfdev->features.revision >> 4) & 0xff; 272 status = pfdev->features.revision & 0xf; 273 rev = pfdev->features.revision; 274 275 gpu_id = pfdev->features.id; 276 277 for (model = gpu_models; model->name; model++) { 278 int best = -1; 279 280 if (!panfrost_model_eq(pfdev, model->id)) 281 continue; 282 283 name = model->name; 284 hw_feat = model->features; 285 hw_issues |= model->issues; 286 for (i = 0; i < MAX_HW_REVS; i++) { 287 if (model->revs[i].revision == rev) { 288 best = i; 289 break; 290 } else if (model->revs[i].revision == (rev & ~0xf)) 291 best = i; 292 } 293 294 if (best >= 0) 295 hw_issues |= model->revs[best].issues; 296 297 break; 298 } 299 300 bitmap_from_u64(pfdev->features.hw_features, hw_feat); 301 bitmap_from_u64(pfdev->features.hw_issues, hw_issues); 302 303 dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x", 304 name, gpu_id, major, minor, status); 305 dev_info(pfdev->dev, "features: %64pb, issues: %64pb", 306 pfdev->features.hw_features, 307 pfdev->features.hw_issues); 308 309 dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x", 310 pfdev->features.l2_features, 311 pfdev->features.core_features, 312 pfdev->features.tiler_features, 313 pfdev->features.mem_features, 314 pfdev->features.mmu_features, 315 pfdev->features.as_present, 316 pfdev->features.js_present); 317 318 dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx", 319 pfdev->features.shader_present, pfdev->features.l2_present); 320 } 321 322 void panfrost_gpu_power_on(struct panfrost_device *pfdev) 323 { 324 int ret; 325 u32 val; 326 u64 core_mask = U64_MAX; 327 328 panfrost_gpu_init_quirks(pfdev); 329 330 if (pfdev->features.l2_present != 1) { 331 /* 332 * Only support one core group now. 333 * ~(l2_present - 1) unsets all bits in l2_present except 334 * the bottom bit. (l2_present - 2) has all the bits in 335 * the first core group set. AND them together to generate 336 * a mask of cores in the first core group. 337 */ 338 core_mask = ~(pfdev->features.l2_present - 1) & 339 (pfdev->features.l2_present - 2); 340 dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n", 341 hweight64(core_mask), 342 hweight64(pfdev->features.shader_present)); 343 } 344 gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask); 345 ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO, 346 val, val == (pfdev->features.l2_present & core_mask), 347 100, 20000); 348 if (ret) 349 dev_err(pfdev->dev, "error powering up gpu L2"); 350 351 gpu_write(pfdev, SHADER_PWRON_LO, 352 pfdev->features.shader_present & core_mask); 353 ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO, 354 val, val == (pfdev->features.shader_present & core_mask), 355 100, 20000); 356 if (ret) 357 dev_err(pfdev->dev, "error powering up gpu shader"); 358 359 gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present); 360 ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO, 361 val, val == pfdev->features.tiler_present, 100, 1000); 362 if (ret) 363 dev_err(pfdev->dev, "error powering up gpu tiler"); 364 } 365 366 void panfrost_gpu_power_off(struct panfrost_device *pfdev) 367 { 368 gpu_write(pfdev, TILER_PWROFF_LO, 0); 369 gpu_write(pfdev, SHADER_PWROFF_LO, 0); 370 gpu_write(pfdev, L2_PWROFF_LO, 0); 371 } 372 373 int panfrost_gpu_init(struct panfrost_device *pfdev) 374 { 375 int err, irq; 376 377 err = panfrost_gpu_soft_reset(pfdev); 378 if (err) 379 return err; 380 381 panfrost_gpu_init_features(pfdev); 382 383 err = dma_set_mask_and_coherent(pfdev->dev, 384 DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); 385 if (err) 386 return err; 387 388 dma_set_max_seg_size(pfdev->dev, UINT_MAX); 389 390 irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu"); 391 if (irq <= 0) 392 return -ENODEV; 393 394 err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler, 395 IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev); 396 if (err) { 397 dev_err(pfdev->dev, "failed to request gpu irq"); 398 return err; 399 } 400 401 panfrost_gpu_power_on(pfdev); 402 403 return 0; 404 } 405 406 void panfrost_gpu_fini(struct panfrost_device *pfdev) 407 { 408 panfrost_gpu_power_off(pfdev); 409 } 410 411 u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev) 412 { 413 u32 flush_id; 414 415 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) { 416 /* Flush reduction only makes sense when the GPU is kept powered on between jobs */ 417 if (pm_runtime_get_if_in_use(pfdev->dev)) { 418 flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID); 419 pm_runtime_put(pfdev->dev); 420 return flush_id; 421 } 422 } 423 424 return 0; 425 } 426