1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2014-2019 Intel Corporation 4 */ 5 6 #ifndef _INTEL_GUC_H_ 7 #define _INTEL_GUC_H_ 8 9 #include "intel_uncore.h" 10 #include "intel_guc_fw.h" 11 #include "intel_guc_fwif.h" 12 #include "intel_guc_ct.h" 13 #include "intel_guc_log.h" 14 #include "intel_guc_reg.h" 15 #include "intel_uc_fw.h" 16 #include "i915_utils.h" 17 #include "i915_vma.h" 18 19 struct __guc_ads_blob; 20 21 /* 22 * Top level structure of GuC. It handles firmware loading and manages client 23 * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList 24 * submission. 25 */ 26 struct intel_guc { 27 struct intel_uc_fw fw; 28 struct intel_guc_log log; 29 struct intel_guc_ct ct; 30 31 /* intel_guc_recv interrupt related state */ 32 spinlock_t irq_lock; 33 unsigned int msg_enabled_mask; 34 35 struct { 36 void (*reset)(struct intel_guc *guc); 37 void (*enable)(struct intel_guc *guc); 38 void (*disable)(struct intel_guc *guc); 39 } interrupts; 40 41 bool submission_selected; 42 43 struct i915_vma *ads_vma; 44 struct __guc_ads_blob *ads_blob; 45 46 struct i915_vma *stage_desc_pool; 47 void *stage_desc_pool_vaddr; 48 49 /* Control params for fw initialization */ 50 u32 params[GUC_CTL_MAX_DWORDS]; 51 52 /* GuC's FW specific registers used in MMIO send */ 53 struct { 54 u32 base; 55 unsigned int count; 56 enum forcewake_domains fw_domains; 57 } send_regs; 58 59 /* register used to send interrupts to the GuC FW */ 60 i915_reg_t notify_reg; 61 62 /* Store msg (e.g. log flush) that we see while CTBs are disabled */ 63 u32 mmio_msg; 64 65 /* To serialize the intel_guc_send actions */ 66 struct mutex send_mutex; 67 }; 68 69 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) 70 { 71 return container_of(log, struct intel_guc, log); 72 } 73 74 static 75 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) 76 { 77 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0); 78 } 79 80 static inline int 81 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, 82 u32 *response_buf, u32 response_buf_size) 83 { 84 return intel_guc_ct_send(&guc->ct, action, len, 85 response_buf, response_buf_size); 86 } 87 88 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) 89 { 90 intel_guc_ct_event_handler(&guc->ct); 91 } 92 93 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ 94 #define GUC_GGTT_TOP 0xFEE00000 95 96 /** 97 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma 98 * @guc: intel_guc structure. 99 * @vma: i915 graphics virtual memory area. 100 * 101 * GuC does not allow any gfx GGTT address that falls into range 102 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. 103 * Currently, in order to exclude [0, ggtt.pin_bias) address space from 104 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() 105 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias. 106 * 107 * Return: GGTT offset of the @vma. 108 */ 109 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, 110 struct i915_vma *vma) 111 { 112 u32 offset = i915_ggtt_offset(vma); 113 114 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma)); 115 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); 116 117 return offset; 118 } 119 120 void intel_guc_init_early(struct intel_guc *guc); 121 void intel_guc_init_send_regs(struct intel_guc *guc); 122 void intel_guc_write_params(struct intel_guc *guc); 123 int intel_guc_init(struct intel_guc *guc); 124 void intel_guc_fini(struct intel_guc *guc); 125 void intel_guc_notify(struct intel_guc *guc); 126 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, 127 u32 *response_buf, u32 response_buf_size); 128 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, 129 const u32 *payload, u32 len); 130 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); 131 int intel_guc_suspend(struct intel_guc *guc); 132 int intel_guc_resume(struct intel_guc *guc); 133 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); 134 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, 135 struct i915_vma **out_vma, void **out_vaddr); 136 137 static inline bool intel_guc_is_supported(struct intel_guc *guc) 138 { 139 return intel_uc_fw_is_supported(&guc->fw); 140 } 141 142 static inline bool intel_guc_is_wanted(struct intel_guc *guc) 143 { 144 return intel_uc_fw_is_enabled(&guc->fw); 145 } 146 147 static inline bool intel_guc_is_used(struct intel_guc *guc) 148 { 149 GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED); 150 return intel_uc_fw_is_available(&guc->fw); 151 } 152 153 static inline bool intel_guc_is_fw_running(struct intel_guc *guc) 154 { 155 return intel_uc_fw_is_running(&guc->fw); 156 } 157 158 static inline bool intel_guc_is_ready(struct intel_guc *guc) 159 { 160 return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct); 161 } 162 163 static inline int intel_guc_sanitize(struct intel_guc *guc) 164 { 165 intel_uc_fw_sanitize(&guc->fw); 166 intel_guc_ct_sanitize(&guc->ct); 167 guc->mmio_msg = 0; 168 169 return 0; 170 } 171 172 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) 173 { 174 spin_lock_irq(&guc->irq_lock); 175 guc->msg_enabled_mask |= mask; 176 spin_unlock_irq(&guc->irq_lock); 177 } 178 179 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) 180 { 181 spin_lock_irq(&guc->irq_lock); 182 guc->msg_enabled_mask &= ~mask; 183 spin_unlock_irq(&guc->irq_lock); 184 } 185 186 int intel_guc_reset_engine(struct intel_guc *guc, 187 struct intel_engine_cs *engine); 188 189 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p); 190 191 #endif 192