1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2014-2019 Intel Corporation 4 */ 5 6 #ifndef _INTEL_GUC_H_ 7 #define _INTEL_GUC_H_ 8 9 #include <linux/delay.h> 10 #include <linux/iosys-map.h> 11 #include <linux/xarray.h> 12 13 #include "intel_guc_ct.h" 14 #include "intel_guc_fw.h" 15 #include "intel_guc_fwif.h" 16 #include "intel_guc_log.h" 17 #include "intel_guc_reg.h" 18 #include "intel_guc_slpc_types.h" 19 #include "intel_uc_fw.h" 20 #include "intel_uncore.h" 21 #include "i915_utils.h" 22 #include "i915_vma.h" 23 24 struct __guc_ads_blob; 25 struct intel_guc_state_capture; 26 27 /** 28 * struct intel_guc - Top level structure of GuC. 29 * 30 * It handles firmware loading and manages client pool. intel_guc owns an 31 * i915_sched_engine for submission. 32 */ 33 struct intel_guc { 34 /** @fw: the GuC firmware */ 35 struct intel_uc_fw fw; 36 /** @log: sub-structure containing GuC log related data and objects */ 37 struct intel_guc_log log; 38 /** @ct: the command transport communication channel */ 39 struct intel_guc_ct ct; 40 /** @slpc: sub-structure containing SLPC related data and objects */ 41 struct intel_guc_slpc slpc; 42 /** @capture: the error-state-capture module's data and objects */ 43 struct intel_guc_state_capture *capture; 44 45 /** @sched_engine: Global engine used to submit requests to GuC */ 46 struct i915_sched_engine *sched_engine; 47 /** 48 * @stalled_request: if GuC can't process a request for any reason, we 49 * save it until GuC restarts processing. No other request can be 50 * submitted until the stalled request is processed. 51 */ 52 struct i915_request *stalled_request; 53 /** 54 * @submission_stall_reason: reason why submission is stalled 55 */ 56 enum { 57 STALL_NONE, 58 STALL_REGISTER_CONTEXT, 59 STALL_MOVE_LRC_TAIL, 60 STALL_ADD_REQUEST, 61 } submission_stall_reason; 62 63 /* intel_guc_recv interrupt related state */ 64 /** @irq_lock: protects GuC irq state */ 65 spinlock_t irq_lock; 66 /** 67 * @msg_enabled_mask: mask of events that are processed when receiving 68 * an INTEL_GUC_ACTION_DEFAULT G2H message. 69 */ 70 unsigned int msg_enabled_mask; 71 72 /** 73 * @outstanding_submission_g2h: number of outstanding GuC to Host 74 * responses related to GuC submission, used to determine if the GT is 75 * idle 76 */ 77 atomic_t outstanding_submission_g2h; 78 79 /** @interrupts: pointers to GuC interrupt-managing functions. */ 80 struct { 81 void (*reset)(struct intel_guc *guc); 82 void (*enable)(struct intel_guc *guc); 83 void (*disable)(struct intel_guc *guc); 84 } interrupts; 85 86 /** 87 * @submission_state: sub-structure for submission state protected by 88 * single lock 89 */ 90 struct { 91 /** 92 * @lock: protects everything in submission_state, 93 * ce->guc_id.id, and ce->guc_id.ref when transitioning in and 94 * out of zero 95 */ 96 spinlock_t lock; 97 /** 98 * @guc_ids: used to allocate new guc_ids, single-lrc 99 */ 100 struct ida guc_ids; 101 /** 102 * @num_guc_ids: Number of guc_ids, selftest feature to be able 103 * to reduce this number while testing. 104 */ 105 int num_guc_ids; 106 /** 107 * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc 108 */ 109 unsigned long *guc_ids_bitmap; 110 /** 111 * @guc_id_list: list of intel_context with valid guc_ids but no 112 * refs 113 */ 114 struct list_head guc_id_list; 115 /** 116 * @destroyed_contexts: list of contexts waiting to be destroyed 117 * (deregistered with the GuC) 118 */ 119 struct list_head destroyed_contexts; 120 /** 121 * @destroyed_worker: worker to deregister contexts, need as we 122 * need to take a GT PM reference and can't from destroy 123 * function as it might be in an atomic context (no sleeping) 124 */ 125 struct work_struct destroyed_worker; 126 /** 127 * @reset_fail_worker: worker to trigger a GT reset after an 128 * engine reset fails 129 */ 130 struct work_struct reset_fail_worker; 131 /** 132 * @reset_fail_mask: mask of engines that failed to reset 133 */ 134 intel_engine_mask_t reset_fail_mask; 135 } submission_state; 136 137 /** 138 * @submission_supported: tracks whether we support GuC submission on 139 * the current platform 140 */ 141 bool submission_supported; 142 /** @submission_selected: tracks whether the user enabled GuC submission */ 143 bool submission_selected; 144 /** @submission_initialized: tracks whether GuC submission has been initialised */ 145 bool submission_initialized; 146 /** 147 * @rc_supported: tracks whether we support GuC rc on the current platform 148 */ 149 bool rc_supported; 150 /** @rc_selected: tracks whether the user enabled GuC rc */ 151 bool rc_selected; 152 153 /** @ads_vma: object allocated to hold the GuC ADS */ 154 struct i915_vma *ads_vma; 155 /** @ads_map: contents of the GuC ADS */ 156 struct iosys_map ads_map; 157 /** @ads_regset_size: size of the save/restore regsets in the ADS */ 158 u32 ads_regset_size; 159 /** 160 * @ads_regset_count: number of save/restore registers in the ADS for 161 * each engine 162 */ 163 u32 ads_regset_count[I915_NUM_ENGINES]; 164 /** @ads_regset: save/restore regsets in the ADS */ 165 struct guc_mmio_reg *ads_regset; 166 /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ 167 u32 ads_golden_ctxt_size; 168 /** @ads_capture_size: size of register lists in the ADS used for error capture */ 169 u32 ads_capture_size; 170 /** @ads_engine_usage_size: size of engine usage in the ADS */ 171 u32 ads_engine_usage_size; 172 173 /** 174 * @context_lookup: used to resolve intel_context from guc_id, if a 175 * context is present in this structure it is registered with the GuC 176 */ 177 struct xarray context_lookup; 178 179 /** @params: Control params for fw initialization */ 180 u32 params[GUC_CTL_MAX_DWORDS]; 181 182 /** @send_regs: GuC's FW specific registers used for sending MMIO H2G */ 183 struct { 184 u32 base; 185 unsigned int count; 186 enum forcewake_domains fw_domains; 187 } send_regs; 188 189 /** @notify_reg: register used to send interrupts to the GuC FW */ 190 i915_reg_t notify_reg; 191 192 /** 193 * @mmio_msg: notification bitmask that the GuC writes in one of its 194 * registers when the CT channel is disabled, to be processed when the 195 * channel is back up. 196 */ 197 u32 mmio_msg; 198 199 /** @send_mutex: used to serialize the intel_guc_send actions */ 200 struct mutex send_mutex; 201 202 /** 203 * @timestamp: GT timestamp object that stores a copy of the timestamp 204 * and adjusts it for overflow using a worker. 205 */ 206 struct { 207 /** 208 * @lock: Lock protecting the below fields and the engine stats. 209 */ 210 spinlock_t lock; 211 212 /** 213 * @gt_stamp: 64 bit extended value of the GT timestamp. 214 */ 215 u64 gt_stamp; 216 217 /** 218 * @ping_delay: Period for polling the GT timestamp for 219 * overflow. 220 */ 221 unsigned long ping_delay; 222 223 /** 224 * @work: Periodic work to adjust GT timestamp, engine and 225 * context usage for overflows. 226 */ 227 struct delayed_work work; 228 229 /** 230 * @shift: Right shift value for the gpm timestamp 231 */ 232 u32 shift; 233 234 /** 235 * @last_stat_jiffies: jiffies at last actual stats collection time 236 * We use this timestamp to ensure we don't oversample the 237 * stats because runtime power management events can trigger 238 * stats collection at much higher rates than required. 239 */ 240 unsigned long last_stat_jiffies; 241 } timestamp; 242 243 #ifdef CONFIG_DRM_I915_SELFTEST 244 /** 245 * @number_guc_id_stolen: The number of guc_ids that have been stolen 246 */ 247 int number_guc_id_stolen; 248 #endif 249 }; 250 251 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) 252 { 253 return container_of(log, struct intel_guc, log); 254 } 255 256 static 257 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) 258 { 259 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0); 260 } 261 262 static 263 inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len, 264 u32 g2h_len_dw) 265 { 266 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 267 MAKE_SEND_FLAGS(g2h_len_dw)); 268 } 269 270 static inline int 271 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, 272 u32 *response_buf, u32 response_buf_size) 273 { 274 return intel_guc_ct_send(&guc->ct, action, len, 275 response_buf, response_buf_size, 0); 276 } 277 278 static inline int intel_guc_send_busy_loop(struct intel_guc *guc, 279 const u32 *action, 280 u32 len, 281 u32 g2h_len_dw, 282 bool loop) 283 { 284 int err; 285 unsigned int sleep_period_ms = 1; 286 bool not_atomic = !in_atomic() && !irqs_disabled(); 287 288 /* 289 * FIXME: Have caller pass in if we are in an atomic context to avoid 290 * using in_atomic(). It is likely safe here as we check for irqs 291 * disabled which basically all the spin locks in the i915 do but 292 * regardless this should be cleaned up. 293 */ 294 295 /* No sleeping with spin locks, just busy loop */ 296 might_sleep_if(loop && not_atomic); 297 298 retry: 299 err = intel_guc_send_nb(guc, action, len, g2h_len_dw); 300 if (unlikely(err == -EBUSY && loop)) { 301 if (likely(not_atomic)) { 302 if (msleep_interruptible(sleep_period_ms)) 303 return -EINTR; 304 sleep_period_ms = sleep_period_ms << 1; 305 } else { 306 cpu_relax(); 307 } 308 goto retry; 309 } 310 311 return err; 312 } 313 314 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) 315 { 316 intel_guc_ct_event_handler(&guc->ct); 317 } 318 319 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ 320 #define GUC_GGTT_TOP 0xFEE00000 321 322 /** 323 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma 324 * @guc: intel_guc structure. 325 * @vma: i915 graphics virtual memory area. 326 * 327 * GuC does not allow any gfx GGTT address that falls into range 328 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. 329 * Currently, in order to exclude [0, ggtt.pin_bias) address space from 330 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() 331 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias. 332 * 333 * Return: GGTT offset of the @vma. 334 */ 335 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, 336 struct i915_vma *vma) 337 { 338 u32 offset = i915_ggtt_offset(vma); 339 340 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma)); 341 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); 342 343 return offset; 344 } 345 346 void intel_guc_init_early(struct intel_guc *guc); 347 void intel_guc_init_late(struct intel_guc *guc); 348 void intel_guc_init_send_regs(struct intel_guc *guc); 349 void intel_guc_write_params(struct intel_guc *guc); 350 int intel_guc_init(struct intel_guc *guc); 351 void intel_guc_fini(struct intel_guc *guc); 352 void intel_guc_notify(struct intel_guc *guc); 353 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, 354 u32 *response_buf, u32 response_buf_size); 355 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, 356 const u32 *payload, u32 len); 357 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); 358 int intel_guc_suspend(struct intel_guc *guc); 359 int intel_guc_resume(struct intel_guc *guc); 360 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); 361 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, 362 struct i915_vma **out_vma, void **out_vaddr); 363 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value); 364 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value); 365 366 static inline bool intel_guc_is_supported(struct intel_guc *guc) 367 { 368 return intel_uc_fw_is_supported(&guc->fw); 369 } 370 371 static inline bool intel_guc_is_wanted(struct intel_guc *guc) 372 { 373 return intel_uc_fw_is_enabled(&guc->fw); 374 } 375 376 static inline bool intel_guc_is_used(struct intel_guc *guc) 377 { 378 GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED); 379 return intel_uc_fw_is_available(&guc->fw); 380 } 381 382 static inline bool intel_guc_is_fw_running(struct intel_guc *guc) 383 { 384 return intel_uc_fw_is_running(&guc->fw); 385 } 386 387 static inline bool intel_guc_is_ready(struct intel_guc *guc) 388 { 389 return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct); 390 } 391 392 static inline void intel_guc_reset_interrupts(struct intel_guc *guc) 393 { 394 guc->interrupts.reset(guc); 395 } 396 397 static inline void intel_guc_enable_interrupts(struct intel_guc *guc) 398 { 399 guc->interrupts.enable(guc); 400 } 401 402 static inline void intel_guc_disable_interrupts(struct intel_guc *guc) 403 { 404 guc->interrupts.disable(guc); 405 } 406 407 static inline int intel_guc_sanitize(struct intel_guc *guc) 408 { 409 intel_uc_fw_sanitize(&guc->fw); 410 intel_guc_disable_interrupts(guc); 411 intel_guc_ct_sanitize(&guc->ct); 412 guc->mmio_msg = 0; 413 414 return 0; 415 } 416 417 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) 418 { 419 spin_lock_irq(&guc->irq_lock); 420 guc->msg_enabled_mask |= mask; 421 spin_unlock_irq(&guc->irq_lock); 422 } 423 424 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) 425 { 426 spin_lock_irq(&guc->irq_lock); 427 guc->msg_enabled_mask &= ~mask; 428 spin_unlock_irq(&guc->irq_lock); 429 } 430 431 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout); 432 433 int intel_guc_deregister_done_process_msg(struct intel_guc *guc, 434 const u32 *msg, u32 len); 435 int intel_guc_sched_done_process_msg(struct intel_guc *guc, 436 const u32 *msg, u32 len); 437 int intel_guc_context_reset_process_msg(struct intel_guc *guc, 438 const u32 *msg, u32 len); 439 int intel_guc_engine_failure_process_msg(struct intel_guc *guc, 440 const u32 *msg, u32 len); 441 int intel_guc_error_capture_process_msg(struct intel_guc *guc, 442 const u32 *msg, u32 len); 443 444 struct intel_engine_cs * 445 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance); 446 447 void intel_guc_find_hung_context(struct intel_engine_cs *engine); 448 449 int intel_guc_global_policies_update(struct intel_guc *guc); 450 451 void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq); 452 453 void intel_guc_submission_reset_prepare(struct intel_guc *guc); 454 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled); 455 void intel_guc_submission_reset_finish(struct intel_guc *guc); 456 void intel_guc_submission_cancel_requests(struct intel_guc *guc); 457 458 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p); 459 460 void intel_guc_write_barrier(struct intel_guc *guc); 461 462 #endif 463