1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_GT__ 7 #define __INTEL_GT__ 8 9 #include "i915_drv.h" 10 #include "intel_engine_types.h" 11 #include "intel_gt_types.h" 12 #include "intel_reset.h" 13 14 struct drm_i915_private; 15 struct drm_printer; 16 17 /* 18 * Check that the GT is a graphics GT and has an IP version within the 19 * specified range (inclusive). 20 */ 21 #define IS_GFX_GT_IP_RANGE(gt, from, until) ( \ 22 BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \ 23 BUILD_BUG_ON_ZERO((until) < (from)) + \ 24 ((gt)->type != GT_MEDIA && \ 25 GRAPHICS_VER_FULL((gt)->i915) >= (from) && \ 26 GRAPHICS_VER_FULL((gt)->i915) <= (until))) 27 28 /* 29 * Check that the GT is a graphics GT with a specific IP version and has 30 * a stepping in the range [from, until). The lower stepping bound is 31 * inclusive, the upper bound is exclusive. The most common use-case of this 32 * macro is for checking bounds for workarounds, which usually have a stepping 33 * ("from") at which the hardware issue is first present and another stepping 34 * ("until") at which a hardware fix is present and the software workaround is 35 * no longer necessary. E.g., 36 * 37 * IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) 38 * IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B1, STEP_FOREVER) 39 * 40 * "STEP_FOREVER" can be passed as "until" for workarounds that have no upper 41 * stepping bound for the specified IP version. 42 */ 43 #define IS_GFX_GT_IP_STEP(gt, ipver, from, until) ( \ 44 BUILD_BUG_ON_ZERO((until) <= (from)) + \ 45 (IS_GFX_GT_IP_RANGE((gt), (ipver), (ipver)) && \ 46 IS_GRAPHICS_STEP((gt)->i915, (from), (until)))) 47 48 #define GT_TRACE(gt, fmt, ...) do { \ 49 const struct intel_gt *gt__ __maybe_unused = (gt); \ 50 GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \ 51 ##__VA_ARGS__); \ 52 } while (0) 53 54 static inline bool gt_is_root(struct intel_gt *gt) 55 { 56 return !gt->info.id; 57 } 58 59 static inline bool intel_gt_needs_wa_22016122933(struct intel_gt *gt) 60 { 61 return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA; 62 } 63 64 static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) 65 { 66 return container_of(uc, struct intel_gt, uc); 67 } 68 69 static inline struct intel_gt *guc_to_gt(struct intel_guc *guc) 70 { 71 return container_of(guc, struct intel_gt, uc.guc); 72 } 73 74 static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) 75 { 76 return container_of(huc, struct intel_gt, uc.huc); 77 } 78 79 static inline struct intel_gt *gsc_uc_to_gt(struct intel_gsc_uc *gsc_uc) 80 { 81 return container_of(gsc_uc, struct intel_gt, uc.gsc); 82 } 83 84 static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc) 85 { 86 return container_of(gsc, struct intel_gt, gsc); 87 } 88 89 void intel_gt_common_init_early(struct intel_gt *gt); 90 int intel_root_gt_init_early(struct drm_i915_private *i915); 91 int intel_gt_assign_ggtt(struct intel_gt *gt); 92 int intel_gt_init_mmio(struct intel_gt *gt); 93 int __must_check intel_gt_init_hw(struct intel_gt *gt); 94 int intel_gt_init(struct intel_gt *gt); 95 void intel_gt_driver_register(struct intel_gt *gt); 96 97 void intel_gt_driver_unregister(struct intel_gt *gt); 98 void intel_gt_driver_remove(struct intel_gt *gt); 99 void intel_gt_driver_release(struct intel_gt *gt); 100 void intel_gt_driver_late_release_all(struct drm_i915_private *i915); 101 102 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); 103 104 void intel_gt_check_and_clear_faults(struct intel_gt *gt); 105 i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt); 106 void intel_gt_clear_error_registers(struct intel_gt *gt, 107 intel_engine_mask_t engine_mask); 108 109 void intel_gt_flush_ggtt_writes(struct intel_gt *gt); 110 void intel_gt_chipset_flush(struct intel_gt *gt); 111 112 static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, 113 enum intel_gt_scratch_field field) 114 { 115 return i915_ggtt_offset(gt->scratch) + field; 116 } 117 118 static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt) 119 { 120 return test_bit(I915_WEDGED_ON_INIT, >->reset.flags) || 121 test_bit(I915_WEDGED_ON_FINI, >->reset.flags); 122 } 123 124 static inline bool intel_gt_is_wedged(const struct intel_gt *gt) 125 { 126 GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) && 127 !test_bit(I915_WEDGED, >->reset.flags)); 128 129 return unlikely(test_bit(I915_WEDGED, >->reset.flags)); 130 } 131 132 int intel_gt_probe_all(struct drm_i915_private *i915); 133 int intel_gt_tiles_init(struct drm_i915_private *i915); 134 void intel_gt_release_all(struct drm_i915_private *i915); 135 136 #define for_each_gt(gt__, i915__, id__) \ 137 for ((id__) = 0; \ 138 (id__) < I915_MAX_GT; \ 139 (id__)++) \ 140 for_each_if(((gt__) = (i915__)->gt[(id__)])) 141 142 void intel_gt_info_print(const struct intel_gt_info *info, 143 struct drm_printer *p); 144 145 void intel_gt_watchdog_work(struct work_struct *work); 146 147 enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt, 148 struct drm_i915_gem_object *obj, 149 bool always_coherent); 150 151 #endif /* __INTEL_GT_H__ */ 152