1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2014-2019 Intel Corporation 4 */ 5 6 #ifndef _INTEL_GUC_H_ 7 #define _INTEL_GUC_H_ 8 9 #include <linux/delay.h> 10 #include <linux/iosys-map.h> 11 #include <linux/xarray.h> 12 13 #include "intel_uncore.h" 14 #include "intel_guc_fw.h" 15 #include "intel_guc_fwif.h" 16 #include "intel_guc_ct.h" 17 #include "intel_guc_log.h" 18 #include "intel_guc_reg.h" 19 #include "intel_guc_slpc_types.h" 20 #include "intel_uc_fw.h" 21 #include "i915_utils.h" 22 #include "i915_vma.h" 23 24 struct __guc_ads_blob; 25 26 /** 27 * struct intel_guc - Top level structure of GuC. 28 * 29 * It handles firmware loading and manages client pool. intel_guc owns an 30 * i915_sched_engine for submission. 31 */ 32 struct intel_guc { 33 /** @fw: the GuC firmware */ 34 struct intel_uc_fw fw; 35 /** @log: sub-structure containing GuC log related data and objects */ 36 struct intel_guc_log log; 37 /** @ct: the command transport communication channel */ 38 struct intel_guc_ct ct; 39 /** @slpc: sub-structure containing SLPC related data and objects */ 40 struct intel_guc_slpc slpc; 41 42 /** @sched_engine: Global engine used to submit requests to GuC */ 43 struct i915_sched_engine *sched_engine; 44 /** 45 * @stalled_request: if GuC can't process a request for any reason, we 46 * save it until GuC restarts processing. No other request can be 47 * submitted until the stalled request is processed. 48 */ 49 struct i915_request *stalled_request; 50 /** 51 * @submission_stall_reason: reason why submission is stalled 52 */ 53 enum { 54 STALL_NONE, 55 STALL_REGISTER_CONTEXT, 56 STALL_MOVE_LRC_TAIL, 57 STALL_ADD_REQUEST, 58 } submission_stall_reason; 59 60 /* intel_guc_recv interrupt related state */ 61 /** @irq_lock: protects GuC irq state */ 62 spinlock_t irq_lock; 63 /** 64 * @msg_enabled_mask: mask of events that are processed when receiving 65 * an INTEL_GUC_ACTION_DEFAULT G2H message. 66 */ 67 unsigned int msg_enabled_mask; 68 69 /** 70 * @outstanding_submission_g2h: number of outstanding GuC to Host 71 * responses related to GuC submission, used to determine if the GT is 72 * idle 73 */ 74 atomic_t outstanding_submission_g2h; 75 76 /** @interrupts: pointers to GuC interrupt-managing functions. */ 77 struct { 78 void (*reset)(struct intel_guc *guc); 79 void (*enable)(struct intel_guc *guc); 80 void (*disable)(struct intel_guc *guc); 81 } interrupts; 82 83 /** 84 * @submission_state: sub-structure for submission state protected by 85 * single lock 86 */ 87 struct { 88 /** 89 * @lock: protects everything in submission_state, 90 * ce->guc_id.id, and ce->guc_id.ref when transitioning in and 91 * out of zero 92 */ 93 spinlock_t lock; 94 /** 95 * @guc_ids: used to allocate new guc_ids, single-lrc 96 */ 97 struct ida guc_ids; 98 /** 99 * @num_guc_ids: Number of guc_ids, selftest feature to be able 100 * to reduce this number while testing. 101 */ 102 int num_guc_ids; 103 /** 104 * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc 105 */ 106 unsigned long *guc_ids_bitmap; 107 /** 108 * @guc_id_list: list of intel_context with valid guc_ids but no 109 * refs 110 */ 111 struct list_head guc_id_list; 112 /** 113 * @destroyed_contexts: list of contexts waiting to be destroyed 114 * (deregistered with the GuC) 115 */ 116 struct list_head destroyed_contexts; 117 /** 118 * @destroyed_worker: worker to deregister contexts, need as we 119 * need to take a GT PM reference and can't from destroy 120 * function as it might be in an atomic context (no sleeping) 121 */ 122 struct work_struct destroyed_worker; 123 /** 124 * @reset_fail_worker: worker to trigger a GT reset after an 125 * engine reset fails 126 */ 127 struct work_struct reset_fail_worker; 128 /** 129 * @reset_fail_mask: mask of engines that failed to reset 130 */ 131 intel_engine_mask_t reset_fail_mask; 132 } submission_state; 133 134 /** 135 * @submission_supported: tracks whether we support GuC submission on 136 * the current platform 137 */ 138 bool submission_supported; 139 /** @submission_selected: tracks whether the user enabled GuC submission */ 140 bool submission_selected; 141 /** 142 * @rc_supported: tracks whether we support GuC rc on the current platform 143 */ 144 bool rc_supported; 145 /** @rc_selected: tracks whether the user enabled GuC rc */ 146 bool rc_selected; 147 148 /** @ads_vma: object allocated to hold the GuC ADS */ 149 struct i915_vma *ads_vma; 150 /** @ads_map: contents of the GuC ADS */ 151 struct iosys_map ads_map; 152 /** @ads_regset_size: size of the save/restore regsets in the ADS */ 153 u32 ads_regset_size; 154 /** 155 * @ads_regset_count: number of save/restore registers in the ADS for 156 * each engine 157 */ 158 u32 ads_regset_count[I915_NUM_ENGINES]; 159 /** @ads_regset: save/restore regsets in the ADS */ 160 struct guc_mmio_reg *ads_regset; 161 /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ 162 u32 ads_golden_ctxt_size; 163 /** @ads_engine_usage_size: size of engine usage in the ADS */ 164 u32 ads_engine_usage_size; 165 166 /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */ 167 struct i915_vma *lrc_desc_pool; 168 /** @lrc_desc_pool_vaddr: contents of the GuC LRC descriptor pool */ 169 void *lrc_desc_pool_vaddr; 170 171 /** 172 * @context_lookup: used to resolve intel_context from guc_id, if a 173 * context is present in this structure it is registered with the GuC 174 */ 175 struct xarray context_lookup; 176 177 /** @params: Control params for fw initialization */ 178 u32 params[GUC_CTL_MAX_DWORDS]; 179 180 /** @send_regs: GuC's FW specific registers used for sending MMIO H2G */ 181 struct { 182 u32 base; 183 unsigned int count; 184 enum forcewake_domains fw_domains; 185 } send_regs; 186 187 /** @notify_reg: register used to send interrupts to the GuC FW */ 188 i915_reg_t notify_reg; 189 190 /** 191 * @mmio_msg: notification bitmask that the GuC writes in one of its 192 * registers when the CT channel is disabled, to be processed when the 193 * channel is back up. 194 */ 195 u32 mmio_msg; 196 197 /** @send_mutex: used to serialize the intel_guc_send actions */ 198 struct mutex send_mutex; 199 200 /** 201 * @timestamp: GT timestamp object that stores a copy of the timestamp 202 * and adjusts it for overflow using a worker. 203 */ 204 struct { 205 /** 206 * @lock: Lock protecting the below fields and the engine stats. 207 */ 208 spinlock_t lock; 209 210 /** 211 * @gt_stamp: 64 bit extended value of the GT timestamp. 212 */ 213 u64 gt_stamp; 214 215 /** 216 * @ping_delay: Period for polling the GT timestamp for 217 * overflow. 218 */ 219 unsigned long ping_delay; 220 221 /** 222 * @work: Periodic work to adjust GT timestamp, engine and 223 * context usage for overflows. 224 */ 225 struct delayed_work work; 226 227 /** 228 * @shift: Right shift value for the gpm timestamp 229 */ 230 u32 shift; 231 } timestamp; 232 233 #ifdef CONFIG_DRM_I915_SELFTEST 234 /** 235 * @number_guc_id_stolen: The number of guc_ids that have been stolen 236 */ 237 int number_guc_id_stolen; 238 #endif 239 }; 240 241 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) 242 { 243 return container_of(log, struct intel_guc, log); 244 } 245 246 static 247 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) 248 { 249 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0); 250 } 251 252 static 253 inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len, 254 u32 g2h_len_dw) 255 { 256 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 257 MAKE_SEND_FLAGS(g2h_len_dw)); 258 } 259 260 static inline int 261 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, 262 u32 *response_buf, u32 response_buf_size) 263 { 264 return intel_guc_ct_send(&guc->ct, action, len, 265 response_buf, response_buf_size, 0); 266 } 267 268 static inline int intel_guc_send_busy_loop(struct intel_guc *guc, 269 const u32 *action, 270 u32 len, 271 u32 g2h_len_dw, 272 bool loop) 273 { 274 int err; 275 unsigned int sleep_period_ms = 1; 276 bool not_atomic = !in_atomic() && !irqs_disabled(); 277 278 /* 279 * FIXME: Have caller pass in if we are in an atomic context to avoid 280 * using in_atomic(). It is likely safe here as we check for irqs 281 * disabled which basically all the spin locks in the i915 do but 282 * regardless this should be cleaned up. 283 */ 284 285 /* No sleeping with spin locks, just busy loop */ 286 might_sleep_if(loop && not_atomic); 287 288 retry: 289 err = intel_guc_send_nb(guc, action, len, g2h_len_dw); 290 if (unlikely(err == -EBUSY && loop)) { 291 if (likely(not_atomic)) { 292 if (msleep_interruptible(sleep_period_ms)) 293 return -EINTR; 294 sleep_period_ms = sleep_period_ms << 1; 295 } else { 296 cpu_relax(); 297 } 298 goto retry; 299 } 300 301 return err; 302 } 303 304 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) 305 { 306 intel_guc_ct_event_handler(&guc->ct); 307 } 308 309 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ 310 #define GUC_GGTT_TOP 0xFEE00000 311 312 /** 313 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma 314 * @guc: intel_guc structure. 315 * @vma: i915 graphics virtual memory area. 316 * 317 * GuC does not allow any gfx GGTT address that falls into range 318 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. 319 * Currently, in order to exclude [0, ggtt.pin_bias) address space from 320 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() 321 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias. 322 * 323 * Return: GGTT offset of the @vma. 324 */ 325 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, 326 struct i915_vma *vma) 327 { 328 u32 offset = i915_ggtt_offset(vma); 329 330 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma)); 331 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); 332 333 return offset; 334 } 335 336 void intel_guc_init_early(struct intel_guc *guc); 337 void intel_guc_init_late(struct intel_guc *guc); 338 void intel_guc_init_send_regs(struct intel_guc *guc); 339 void intel_guc_write_params(struct intel_guc *guc); 340 int intel_guc_init(struct intel_guc *guc); 341 void intel_guc_fini(struct intel_guc *guc); 342 void intel_guc_notify(struct intel_guc *guc); 343 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, 344 u32 *response_buf, u32 response_buf_size); 345 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, 346 const u32 *payload, u32 len); 347 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); 348 int intel_guc_suspend(struct intel_guc *guc); 349 int intel_guc_resume(struct intel_guc *guc); 350 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); 351 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, 352 struct i915_vma **out_vma, void **out_vaddr); 353 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value); 354 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value); 355 356 static inline bool intel_guc_is_supported(struct intel_guc *guc) 357 { 358 return intel_uc_fw_is_supported(&guc->fw); 359 } 360 361 static inline bool intel_guc_is_wanted(struct intel_guc *guc) 362 { 363 return intel_uc_fw_is_enabled(&guc->fw); 364 } 365 366 static inline bool intel_guc_is_used(struct intel_guc *guc) 367 { 368 GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED); 369 return intel_uc_fw_is_available(&guc->fw); 370 } 371 372 static inline bool intel_guc_is_fw_running(struct intel_guc *guc) 373 { 374 return intel_uc_fw_is_running(&guc->fw); 375 } 376 377 static inline bool intel_guc_is_ready(struct intel_guc *guc) 378 { 379 return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct); 380 } 381 382 static inline void intel_guc_reset_interrupts(struct intel_guc *guc) 383 { 384 guc->interrupts.reset(guc); 385 } 386 387 static inline void intel_guc_enable_interrupts(struct intel_guc *guc) 388 { 389 guc->interrupts.enable(guc); 390 } 391 392 static inline void intel_guc_disable_interrupts(struct intel_guc *guc) 393 { 394 guc->interrupts.disable(guc); 395 } 396 397 static inline int intel_guc_sanitize(struct intel_guc *guc) 398 { 399 intel_uc_fw_sanitize(&guc->fw); 400 intel_guc_disable_interrupts(guc); 401 intel_guc_ct_sanitize(&guc->ct); 402 guc->mmio_msg = 0; 403 404 return 0; 405 } 406 407 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) 408 { 409 spin_lock_irq(&guc->irq_lock); 410 guc->msg_enabled_mask |= mask; 411 spin_unlock_irq(&guc->irq_lock); 412 } 413 414 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) 415 { 416 spin_lock_irq(&guc->irq_lock); 417 guc->msg_enabled_mask &= ~mask; 418 spin_unlock_irq(&guc->irq_lock); 419 } 420 421 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout); 422 423 int intel_guc_deregister_done_process_msg(struct intel_guc *guc, 424 const u32 *msg, u32 len); 425 int intel_guc_sched_done_process_msg(struct intel_guc *guc, 426 const u32 *msg, u32 len); 427 int intel_guc_context_reset_process_msg(struct intel_guc *guc, 428 const u32 *msg, u32 len); 429 int intel_guc_engine_failure_process_msg(struct intel_guc *guc, 430 const u32 *msg, u32 len); 431 int intel_guc_error_capture_process_msg(struct intel_guc *guc, 432 const u32 *msg, u32 len); 433 434 void intel_guc_find_hung_context(struct intel_engine_cs *engine); 435 436 int intel_guc_global_policies_update(struct intel_guc *guc); 437 438 void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq); 439 440 void intel_guc_submission_reset_prepare(struct intel_guc *guc); 441 void intel_guc_submission_reset(struct intel_guc *guc, bool stalled); 442 void intel_guc_submission_reset_finish(struct intel_guc *guc); 443 void intel_guc_submission_cancel_requests(struct intel_guc *guc); 444 445 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p); 446 447 void intel_guc_write_barrier(struct intel_guc *guc); 448 449 #endif 450