1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "intel_gt.h" 8 #include "intel_gt_pm.h" 9 #include "intel_gt_requests.h" 10 #include "intel_mocs.h" 11 #include "intel_rc6.h" 12 #include "intel_rps.h" 13 #include "intel_uncore.h" 14 #include "intel_pm.h" 15 16 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) 17 { 18 gt->i915 = i915; 19 gt->uncore = &i915->uncore; 20 21 spin_lock_init(>->irq_lock); 22 23 INIT_LIST_HEAD(>->closed_vma); 24 spin_lock_init(>->closed_lock); 25 26 intel_gt_init_reset(gt); 27 intel_gt_init_requests(gt); 28 intel_gt_pm_init_early(gt); 29 30 intel_rps_init_early(>->rps); 31 intel_uc_init_early(>->uc); 32 } 33 34 void intel_gt_init_hw_early(struct drm_i915_private *i915) 35 { 36 i915->gt.ggtt = &i915->ggtt; 37 } 38 39 static void init_unused_ring(struct intel_gt *gt, u32 base) 40 { 41 struct intel_uncore *uncore = gt->uncore; 42 43 intel_uncore_write(uncore, RING_CTL(base), 0); 44 intel_uncore_write(uncore, RING_HEAD(base), 0); 45 intel_uncore_write(uncore, RING_TAIL(base), 0); 46 intel_uncore_write(uncore, RING_START(base), 0); 47 } 48 49 static void init_unused_rings(struct intel_gt *gt) 50 { 51 struct drm_i915_private *i915 = gt->i915; 52 53 if (IS_I830(i915)) { 54 init_unused_ring(gt, PRB1_BASE); 55 init_unused_ring(gt, SRB0_BASE); 56 init_unused_ring(gt, SRB1_BASE); 57 init_unused_ring(gt, SRB2_BASE); 58 init_unused_ring(gt, SRB3_BASE); 59 } else if (IS_GEN(i915, 2)) { 60 init_unused_ring(gt, SRB0_BASE); 61 init_unused_ring(gt, SRB1_BASE); 62 } else if (IS_GEN(i915, 3)) { 63 init_unused_ring(gt, PRB1_BASE); 64 init_unused_ring(gt, PRB2_BASE); 65 } 66 } 67 68 int intel_gt_init_hw(struct intel_gt *gt) 69 { 70 struct drm_i915_private *i915 = gt->i915; 71 struct intel_uncore *uncore = gt->uncore; 72 int ret; 73 74 BUG_ON(!i915->kernel_context); 75 ret = intel_gt_terminally_wedged(gt); 76 if (ret) 77 return ret; 78 79 gt->last_init_time = ktime_get(); 80 81 /* Double layer security blanket, see i915_gem_init() */ 82 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 83 84 if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) 85 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); 86 87 if (IS_HASWELL(i915)) 88 intel_uncore_write(uncore, 89 MI_PREDICATE_RESULT_2, 90 IS_HSW_GT3(i915) ? 91 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 92 93 /* Apply the GT workarounds... */ 94 intel_gt_apply_workarounds(gt); 95 /* ...and determine whether they are sticking. */ 96 intel_gt_verify_workarounds(gt, "init"); 97 98 intel_gt_init_swizzling(gt); 99 100 /* 101 * At least 830 can leave some of the unused rings 102 * "active" (ie. head != tail) after resume which 103 * will prevent c3 entry. Makes sure all unused rings 104 * are totally idle. 105 */ 106 init_unused_rings(gt); 107 108 ret = i915_ppgtt_init_hw(gt); 109 if (ret) { 110 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); 111 goto out; 112 } 113 114 /* We can't enable contexts until all firmware is loaded */ 115 ret = intel_uc_init_hw(>->uc); 116 if (ret) { 117 i915_probe_error(i915, "Enabling uc failed (%d)\n", ret); 118 goto out; 119 } 120 121 intel_mocs_init(gt); 122 123 out: 124 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 125 return ret; 126 } 127 128 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) 129 { 130 intel_uncore_rmw(uncore, reg, 0, set); 131 } 132 133 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) 134 { 135 intel_uncore_rmw(uncore, reg, clr, 0); 136 } 137 138 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg) 139 { 140 intel_uncore_rmw(uncore, reg, 0, 0); 141 } 142 143 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine) 144 { 145 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); 146 GEN6_RING_FAULT_REG_POSTING_READ(engine); 147 } 148 149 void 150 intel_gt_clear_error_registers(struct intel_gt *gt, 151 intel_engine_mask_t engine_mask) 152 { 153 struct drm_i915_private *i915 = gt->i915; 154 struct intel_uncore *uncore = gt->uncore; 155 u32 eir; 156 157 if (!IS_GEN(i915, 2)) 158 clear_register(uncore, PGTBL_ER); 159 160 if (INTEL_GEN(i915) < 4) 161 clear_register(uncore, IPEIR(RENDER_RING_BASE)); 162 else 163 clear_register(uncore, IPEIR_I965); 164 165 clear_register(uncore, EIR); 166 eir = intel_uncore_read(uncore, EIR); 167 if (eir) { 168 /* 169 * some errors might have become stuck, 170 * mask them. 171 */ 172 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 173 rmw_set(uncore, EMR, eir); 174 intel_uncore_write(uncore, GEN2_IIR, 175 I915_MASTER_ERROR_INTERRUPT); 176 } 177 178 if (INTEL_GEN(i915) >= 12) { 179 rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID); 180 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG); 181 } else if (INTEL_GEN(i915) >= 8) { 182 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); 183 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); 184 } else if (INTEL_GEN(i915) >= 6) { 185 struct intel_engine_cs *engine; 186 enum intel_engine_id id; 187 188 for_each_engine_masked(engine, gt, engine_mask, id) 189 gen8_clear_engine_error_register(engine); 190 } 191 } 192 193 static void gen6_check_faults(struct intel_gt *gt) 194 { 195 struct intel_engine_cs *engine; 196 enum intel_engine_id id; 197 u32 fault; 198 199 for_each_engine(engine, gt, id) { 200 fault = GEN6_RING_FAULT_REG_READ(engine); 201 if (fault & RING_FAULT_VALID) { 202 DRM_DEBUG_DRIVER("Unexpected fault\n" 203 "\tAddr: 0x%08lx\n" 204 "\tAddress space: %s\n" 205 "\tSource ID: %d\n" 206 "\tType: %d\n", 207 fault & PAGE_MASK, 208 fault & RING_FAULT_GTTSEL_MASK ? 209 "GGTT" : "PPGTT", 210 RING_FAULT_SRCID(fault), 211 RING_FAULT_FAULT_TYPE(fault)); 212 } 213 } 214 } 215 216 static void gen8_check_faults(struct intel_gt *gt) 217 { 218 struct intel_uncore *uncore = gt->uncore; 219 i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg; 220 u32 fault; 221 222 if (INTEL_GEN(gt->i915) >= 12) { 223 fault_reg = GEN12_RING_FAULT_REG; 224 fault_data0_reg = GEN12_FAULT_TLB_DATA0; 225 fault_data1_reg = GEN12_FAULT_TLB_DATA1; 226 } else { 227 fault_reg = GEN8_RING_FAULT_REG; 228 fault_data0_reg = GEN8_FAULT_TLB_DATA0; 229 fault_data1_reg = GEN8_FAULT_TLB_DATA1; 230 } 231 232 fault = intel_uncore_read(uncore, fault_reg); 233 if (fault & RING_FAULT_VALID) { 234 u32 fault_data0, fault_data1; 235 u64 fault_addr; 236 237 fault_data0 = intel_uncore_read(uncore, fault_data0_reg); 238 fault_data1 = intel_uncore_read(uncore, fault_data1_reg); 239 240 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | 241 ((u64)fault_data0 << 12); 242 243 DRM_DEBUG_DRIVER("Unexpected fault\n" 244 "\tAddr: 0x%08x_%08x\n" 245 "\tAddress space: %s\n" 246 "\tEngine ID: %d\n" 247 "\tSource ID: %d\n" 248 "\tType: %d\n", 249 upper_32_bits(fault_addr), 250 lower_32_bits(fault_addr), 251 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", 252 GEN8_RING_FAULT_ENGINE_ID(fault), 253 RING_FAULT_SRCID(fault), 254 RING_FAULT_FAULT_TYPE(fault)); 255 } 256 } 257 258 void intel_gt_check_and_clear_faults(struct intel_gt *gt) 259 { 260 struct drm_i915_private *i915 = gt->i915; 261 262 /* From GEN8 onwards we only have one 'All Engine Fault Register' */ 263 if (INTEL_GEN(i915) >= 8) 264 gen8_check_faults(gt); 265 else if (INTEL_GEN(i915) >= 6) 266 gen6_check_faults(gt); 267 else 268 return; 269 270 intel_gt_clear_error_registers(gt, ALL_ENGINES); 271 } 272 273 void intel_gt_flush_ggtt_writes(struct intel_gt *gt) 274 { 275 struct intel_uncore *uncore = gt->uncore; 276 intel_wakeref_t wakeref; 277 278 /* 279 * No actual flushing is required for the GTT write domain for reads 280 * from the GTT domain. Writes to it "immediately" go to main memory 281 * as far as we know, so there's no chipset flush. It also doesn't 282 * land in the GPU render cache. 283 * 284 * However, we do have to enforce the order so that all writes through 285 * the GTT land before any writes to the device, such as updates to 286 * the GATT itself. 287 * 288 * We also have to wait a bit for the writes to land from the GTT. 289 * An uncached read (i.e. mmio) seems to be ideal for the round-trip 290 * timing. This issue has only been observed when switching quickly 291 * between GTT writes and CPU reads from inside the kernel on recent hw, 292 * and it appears to only affect discrete GTT blocks (i.e. on LLC 293 * system agents we cannot reproduce this behaviour, until Cannonlake 294 * that was!). 295 */ 296 297 wmb(); 298 299 if (INTEL_INFO(gt->i915)->has_coherent_ggtt) 300 return; 301 302 intel_gt_chipset_flush(gt); 303 304 with_intel_runtime_pm(uncore->rpm, wakeref) { 305 unsigned long flags; 306 307 spin_lock_irqsave(&uncore->lock, flags); 308 intel_uncore_posting_read_fw(uncore, 309 RING_HEAD(RENDER_RING_BASE)); 310 spin_unlock_irqrestore(&uncore->lock, flags); 311 } 312 } 313 314 void intel_gt_chipset_flush(struct intel_gt *gt) 315 { 316 wmb(); 317 if (INTEL_GEN(gt->i915) < 6) 318 intel_gtt_chipset_flush(); 319 } 320 321 void intel_gt_driver_register(struct intel_gt *gt) 322 { 323 intel_rps_driver_register(>->rps); 324 } 325 326 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) 327 { 328 struct drm_i915_private *i915 = gt->i915; 329 struct drm_i915_gem_object *obj; 330 struct i915_vma *vma; 331 int ret; 332 333 obj = i915_gem_object_create_stolen(i915, size); 334 if (IS_ERR(obj)) 335 obj = i915_gem_object_create_internal(i915, size); 336 if (IS_ERR(obj)) { 337 DRM_ERROR("Failed to allocate scratch page\n"); 338 return PTR_ERR(obj); 339 } 340 341 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); 342 if (IS_ERR(vma)) { 343 ret = PTR_ERR(vma); 344 goto err_unref; 345 } 346 347 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); 348 if (ret) 349 goto err_unref; 350 351 gt->scratch = i915_vma_make_unshrinkable(vma); 352 353 return 0; 354 355 err_unref: 356 i915_gem_object_put(obj); 357 return ret; 358 } 359 360 static void intel_gt_fini_scratch(struct intel_gt *gt) 361 { 362 i915_vma_unpin_and_release(>->scratch, 0); 363 } 364 365 int intel_gt_init(struct intel_gt *gt) 366 { 367 int err; 368 369 err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K); 370 if (err) 371 return err; 372 373 intel_gt_pm_init(gt); 374 375 return 0; 376 } 377 378 void intel_gt_driver_remove(struct intel_gt *gt) 379 { 380 GEM_BUG_ON(gt->awake); 381 } 382 383 void intel_gt_driver_unregister(struct intel_gt *gt) 384 { 385 intel_rps_driver_unregister(>->rps); 386 } 387 388 void intel_gt_driver_release(struct intel_gt *gt) 389 { 390 intel_gt_pm_fini(gt); 391 intel_gt_fini_scratch(gt); 392 } 393 394 void intel_gt_driver_late_release(struct intel_gt *gt) 395 { 396 intel_uc_driver_late_release(>->uc); 397 intel_gt_fini_reset(gt); 398 } 399