1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2014-2019 Intel Corporation 4 */ 5 6 #ifndef _INTEL_GUC_H_ 7 #define _INTEL_GUC_H_ 8 9 #include <linux/delay.h> 10 #include <linux/iosys-map.h> 11 #include <linux/xarray.h> 12 13 #include "intel_guc_ct.h" 14 #include "intel_guc_fw.h" 15 #include "intel_guc_fwif.h" 16 #include "intel_guc_log.h" 17 #include "intel_guc_reg.h" 18 #include "intel_guc_slpc_types.h" 19 #include "intel_uc_fw.h" 20 #include "intel_uncore.h" 21 #include "i915_utils.h" 22 #include "i915_vma.h" 23 24 struct __guc_ads_blob; 25 struct intel_guc_state_capture; 26 27 /** 28 * struct intel_guc - Top level structure of GuC. 29 * 30 * It handles firmware loading and manages client pool. intel_guc owns an 31 * i915_sched_engine for submission. 32 */ 33 struct intel_guc { 34 /** @fw: the GuC firmware */ 35 struct intel_uc_fw fw; 36 /** @log: sub-structure containing GuC log related data and objects */ 37 struct intel_guc_log log; 38 /** @ct: the command transport communication channel */ 39 struct intel_guc_ct ct; 40 /** @slpc: sub-structure containing SLPC related data and objects */ 41 struct intel_guc_slpc slpc; 42 /** @capture: the error-state-capture module's data and objects */ 43 struct intel_guc_state_capture *capture; 44 45 /** @sched_engine: Global engine used to submit requests to GuC */ 46 struct i915_sched_engine *sched_engine; 47 /** 48 * @stalled_request: if GuC can't process a request for any reason, we 49 * save it until GuC restarts processing. No other request can be 50 * submitted until the stalled request is processed. 51 */ 52 struct i915_request *stalled_request; 53 /** 54 * @submission_stall_reason: reason why submission is stalled 55 */ 56 enum { 57 STALL_NONE, 58 STALL_REGISTER_CONTEXT, 59 STALL_MOVE_LRC_TAIL, 60 STALL_ADD_REQUEST, 61 } submission_stall_reason; 62 63 /* intel_guc_recv interrupt related state */ 64 /** @irq_lock: protects GuC irq state */ 65 spinlock_t irq_lock; 66 /** 67 * @msg_enabled_mask: mask of events that are processed when receiving 68 * an INTEL_GUC_ACTION_DEFAULT G2H message. 69 */ 70 unsigned int msg_enabled_mask; 71 72 /** 73 * @outstanding_submission_g2h: number of outstanding GuC to Host 74 * responses related to GuC submission, used to determine if the GT is 75 * idle 76 */ 77 atomic_t outstanding_submission_g2h; 78 79 /** @interrupts: pointers to GuC interrupt-managing functions. */ 80 struct { 81 void (*reset)(struct intel_guc *guc); 82 void (*enable)(struct intel_guc *guc); 83 void (*disable)(struct intel_guc *guc); 84 } interrupts; 85 86 /** 87 * @submission_state: sub-structure for submission state protected by 88 * single lock 89 */ 90 struct { 91 /** 92 * @lock: protects everything in submission_state, 93 * ce->guc_id.id, and ce->guc_id.ref when transitioning in and 94 * out of zero 95 */ 96 spinlock_t lock; 97 /** 98 * @guc_ids: used to allocate new guc_ids, single-lrc 99 */ 100 struct ida guc_ids; 101 /** 102 * @num_guc_ids: Number of guc_ids, selftest feature to be able 103 * to reduce this number while testing. 104 */ 105 int num_guc_ids; 106 /** 107 * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc 108 */ 109 unsigned long *guc_ids_bitmap; 110 /** 111 * @guc_id_list: list of intel_context with valid guc_ids but no 112 * refs 113 */ 114 struct list_head guc_id_list; 115 /** 116 * @guc_ids_in_use: Number single-lrc guc_ids in use 117 */ 118 unsigned int guc_ids_in_use; 119 /** 120 * @destroyed_contexts: list of contexts waiting to be destroyed 121 * (deregistered with the GuC) 122 */ 123 struct list_head destroyed_contexts; 124 /** 125 * @destroyed_worker: worker to deregister contexts, need as we 126 * need to take a GT PM reference and can't from destroy 127 * function as it might be in an atomic context (no sleeping) 128 */ 129 struct work_struct destroyed_worker; 130 /** 131 * @reset_fail_worker: worker to trigger a GT reset after an 132 * engine reset fails 133 */ 134 struct work_struct reset_fail_worker; 135 /** 136 * @reset_fail_mask: mask of engines that failed to reset 137 */ 138 intel_engine_mask_t reset_fail_mask; 139 /** 140 * @sched_disable_delay_ms: schedule disable delay, in ms, for 141 * contexts 142 */ 143 unsigned int sched_disable_delay_ms; 144 /** 145 * @sched_disable_gucid_threshold: threshold of min remaining available 146 * guc_ids before we start bypassing the schedule disable delay 147 */ 148 unsigned int sched_disable_gucid_threshold; 149 } submission_state; 150 151 /** 152 * @submission_supported: tracks whether we support GuC submission on 153 * the current platform 154 */ 155 bool submission_supported; 156 /** @submission_selected: tracks whether the user enabled GuC submission */ 157 bool submission_selected; 158 /** @submission_initialized: tracks whether GuC submission has been initialised */ 159 bool submission_initialized; 160 /** 161 * @rc_supported: tracks whether we support GuC rc on the current platform 162 */ 163 bool rc_supported; 164 /** @rc_selected: tracks whether the user enabled GuC rc */ 165 bool rc_selected; 166 167 /** @ads_vma: object allocated to hold the GuC ADS */ 168 struct i915_vma *ads_vma; 169 /** @ads_map: contents of the GuC ADS */ 170 struct iosys_map ads_map; 171 /** @ads_regset_size: size of the save/restore regsets in the ADS */ 172 u32 ads_regset_size; 173 /** 174 * @ads_regset_count: number of save/restore registers in the ADS for 175 * each engine 176 */ 177 u32 ads_regset_count[I915_NUM_ENGINES]; 178 /** @ads_regset: save/restore regsets in the ADS */ 179 struct guc_mmio_reg *ads_regset; 180 /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ 181 u32 ads_golden_ctxt_size; 182 /** @ads_capture_size: size of register lists in the ADS used for error capture */ 183 u32 ads_capture_size; 184 /** @ads_engine_usage_size: size of engine usage in the ADS */ 185 u32 ads_engine_usage_size; 186 187 /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */ 188 struct i915_vma *lrc_desc_pool_v69; 189 /** @lrc_desc_pool_vaddr_v69: contents of the GuC LRC descriptor pool */ 190 void *lrc_desc_pool_vaddr_v69; 191 192 /** 193 * @context_lookup: used to resolve intel_context from guc_id, if a 194 * context is present in this structure it is registered with the GuC 195 */ 196 struct xarray context_lookup; 197 198 /** @params: Control params for fw initialization */ 199 u32 params[GUC_CTL_MAX_DWORDS]; 200 201 /** @send_regs: GuC's FW specific registers used for sending MMIO H2G */ 202 struct { 203 u32 base; 204 unsigned int count; 205 enum forcewake_domains fw_domains; 206 } send_regs; 207 208 /** @notify_reg: register used to send interrupts to the GuC FW */ 209 i915_reg_t notify_reg; 210 211 /** 212 * @mmio_msg: notification bitmask that the GuC writes in one of its 213 * registers when the CT channel is disabled, to be processed when the 214 * channel is back up. 215 */ 216 u32 mmio_msg; 217 218 /** @send_mutex: used to serialize the intel_guc_send actions */ 219 struct mutex send_mutex; 220 221 /** 222 * @timestamp: GT timestamp object that stores a copy of the timestamp 223 * and adjusts it for overflow using a worker. 224 */ 225 struct { 226 /** 227 * @lock: Lock protecting the below fields and the engine stats. 228 */ 229 spinlock_t lock; 230 231 /** 232 * @gt_stamp: 64 bit extended value of the GT timestamp. 233 */ 234 u64 gt_stamp; 235 236 /** 237 * @ping_delay: Period for polling the GT timestamp for 238 * overflow. 239 */ 240 unsigned long ping_delay; 241 242 /** 243 * @work: Periodic work to adjust GT timestamp, engine and 244 * context usage for overflows. 245 */ 246 struct delayed_work work; 247 248 /** 249 * @shift: Right shift value for the gpm timestamp 250 */ 251 u32 shift; 252 253 /** 254 * @last_stat_jiffies: jiffies at last actual stats collection time 255 * We use this timestamp to ensure we don't oversample the 256 * stats because runtime power management events can trigger 257 * stats collection at much higher rates than required. 258 */ 259 unsigned long last_stat_jiffies; 260 } timestamp; 261 262 #ifdef CONFIG_DRM_I915_SELFTEST 263 /** 264 * @number_guc_id_stolen: The number of guc_ids that have been stolen 265 */ 266 int number_guc_id_stolen; 267 #endif 268 }; 269 270 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) 271 { 272 return container_of(log, struct intel_guc, log); 273 } 274 275 static 276 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) 277 { 278 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0); 279 } 280 281 static 282 inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len, 283 u32 g2h_len_dw) 284 { 285 return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 286 MAKE_SEND_FLAGS(g2h_len_dw)); 287 } 288 289 static inline int 290 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, 291 u32 *response_buf, u32 response_buf_size) 292 { 293 return intel_guc_ct_send(&guc->ct, action, len, 294 response_buf, response_buf_size, 0); 295 } 296 297 static inline int intel_guc_send_busy_loop(struct intel_guc *guc, 298 const u32 *action, 299 u32 len, 300 u32 g2h_len_dw, 301 bool loop) 302 { 303 int err; 304 unsigned int sleep_period_ms = 1; 305 bool not_atomic = !in_atomic() && !irqs_disabled(); 306 307 /* 308 * FIXME: Have caller pass in if we are in an atomic context to avoid 309 * using in_atomic(). It is likely safe here as we check for irqs 310 * disabled which basically all the spin locks in the i915 do but 311 * regardless this should be cleaned up. 312 */ 313 314 /* No sleeping with spin locks, just busy loop */ 315 might_sleep_if(loop && not_atomic); 316 317 retry: 318 err = intel_guc_send_nb(guc, action, len, g2h_len_dw); 319 if (unlikely(err == -EBUSY && loop)) { 320 if (likely(not_atomic)) { 321 if (msleep_interruptible(sleep_period_ms)) 322 return -EINTR; 323 sleep_period_ms = sleep_period_ms << 1; 324 } else { 325 cpu_relax(); 326 } 327 goto retry; 328 } 329 330 return err; 331 } 332 333 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) 334 { 335 intel_guc_ct_event_handler(&guc->ct); 336 } 337 338 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ 339 #define GUC_GGTT_TOP 0xFEE00000 340 341 /** 342 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma 343 * @guc: intel_guc structure. 344 * @vma: i915 graphics virtual memory area. 345 * 346 * GuC does not allow any gfx GGTT address that falls into range 347 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. 348 * Currently, in order to exclude [0, ggtt.pin_bias) address space from 349 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() 350 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias. 351 * 352 * Return: GGTT offset of the @vma. 353 */ 354 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, 355 struct i915_vma *vma) 356 { 357 u32 offset = i915_ggtt_offset(vma); 358 359 GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma)); 360 GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); 361 362 return offset; 363 } 364 365 void intel_guc_init_early(struct intel_guc *guc); 366 void intel_guc_init_late(struct intel_guc *guc); 367 void intel_guc_init_send_regs(struct intel_guc *guc); 368 void intel_guc_write_params(struct intel_guc *guc); 369 int intel_guc_init(struct intel_guc *guc); 370 void intel_guc_fini(struct intel_guc *guc); 371 void intel_guc_notify(struct intel_guc *guc); 372 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, 373 u32 *response_buf, u32 response_buf_size); 374 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, 375 const u32 *payload, u32 len); 376 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); 377 int intel_guc_suspend(struct intel_guc *guc); 378 int intel_guc_resume(struct intel_guc *guc); 379 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); 380 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, 381 struct i915_vma **out_vma, void **out_vaddr); 382 int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value); 383 int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value); 384 385 static inline bool intel_guc_is_supported(struct intel_guc *guc) 386 { 387 return intel_uc_fw_is_supported(&guc->fw); 388 } 389 390 static inline bool intel_guc_is_wanted(struct intel_guc *guc) 391 { 392 return intel_uc_fw_is_enabled(&guc->fw); 393 } 394 395 static inline bool intel_guc_is_used(struct intel_guc *guc) 396 { 397 GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED); 398 return intel_uc_fw_is_available(&guc->fw); 399 } 400 401 static inline bool intel_guc_is_fw_running(struct intel_guc *guc) 402 { 403 return intel_uc_fw_is_running(&guc->fw); 404 } 405 406 static inline bool intel_guc_is_ready(struct intel_guc *guc) 407 { 408 return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct); 409 } 410 411 static inline void intel_guc_reset_interrupts(struct intel_guc *guc) 412 { 413 guc->interrupts.reset(guc); 414 } 415 416 static inline void intel_guc_enable_interrupts(struct intel_guc *guc) 417 { 418 guc->interrupts.enable(guc); 419 } 420 421 static inline void intel_guc_disable_interrupts(struct intel_guc *guc) 422 { 423 guc->interrupts.disable(guc); 424 } 425 426 static inline int intel_guc_sanitize(struct intel_guc *guc) 427 { 428 intel_uc_fw_sanitize(&guc->fw); 429 intel_guc_disable_interrupts(guc); 430 intel_guc_ct_sanitize(&guc->ct); 431 guc->mmio_msg = 0; 432 433 return 0; 434 } 435 436 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) 437 { 438 spin_lock_irq(&guc->irq_lock); 439 guc->msg_enabled_mask |= mask; 440 spin_unlock_irq(&guc->irq_lock); 441 } 442 443 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) 444 { 445 spin_lock_irq(&guc->irq_lock); 446 guc->msg_enabled_mask &= ~mask; 447 spin_unlock_irq(&guc->irq_lock); 448 } 449 450 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout); 451 452 int intel_guc_deregister_done_process_msg(struct intel_guc *guc, 453 const u32 *msg, u32 len); 454 int intel_guc_sched_done_process_msg(struct intel_guc *guc, 455 const u32 *msg, u32 len); 456 int intel_guc_context_reset_process_msg(struct intel_guc *guc, 457 const u32 *msg, u32 len); 458 int intel_guc_engine_failure_process_msg(struct intel_guc *guc, 459 const u32 *msg, u32 len); 460 int intel_guc_error_capture_process_msg(struct intel_guc *guc, 461 const u32 *msg, u32 len); 462 463 struct intel_engine_cs * 464 intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance); 465 466 void intel_guc_find_hung_context(struct intel_engine_cs *engine); 467 468 int intel_guc_global_policies_update(struct intel_guc *guc); 469 470 void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq); 471 472 void intel_guc_submission_reset_prepare(struct intel_guc *guc); 473 void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled); 474 void intel_guc_submission_reset_finish(struct intel_guc *guc); 475 void intel_guc_submission_cancel_requests(struct intel_guc *guc); 476 477 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p); 478 479 void intel_guc_write_barrier(struct intel_guc *guc); 480 481 void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p); 482 483 int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc); 484 485 #endif 486