1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_CONTEXT_TYPES__ 7 #define __INTEL_CONTEXT_TYPES__ 8 9 #include <linux/average.h> 10 #include <linux/kref.h> 11 #include <linux/list.h> 12 #include <linux/mutex.h> 13 #include <linux/types.h> 14 15 #include "i915_active_types.h" 16 #include "i915_sw_fence.h" 17 #include "i915_utils.h" 18 #include "intel_engine_types.h" 19 #include "intel_sseu.h" 20 21 #include "uc/intel_guc_fwif.h" 22 23 #define CONTEXT_REDZONE POISON_INUSE 24 DECLARE_EWMA(runtime, 3, 8); 25 26 struct i915_gem_context; 27 struct i915_gem_ww_ctx; 28 struct i915_vma; 29 struct intel_breadcrumbs; 30 struct intel_context; 31 struct intel_ring; 32 33 struct intel_context_ops { 34 unsigned long flags; 35 #define COPS_HAS_INFLIGHT_BIT 0 36 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT) 37 38 int (*alloc)(struct intel_context *ce); 39 40 void (*ban)(struct intel_context *ce, struct i915_request *rq); 41 42 int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr); 43 int (*pin)(struct intel_context *ce, void *vaddr); 44 void (*unpin)(struct intel_context *ce); 45 void (*post_unpin)(struct intel_context *ce); 46 47 void (*cancel_request)(struct intel_context *ce, 48 struct i915_request *rq); 49 50 void (*enter)(struct intel_context *ce); 51 void (*exit)(struct intel_context *ce); 52 53 void (*sched_disable)(struct intel_context *ce); 54 55 void (*reset)(struct intel_context *ce); 56 void (*destroy)(struct kref *kref); 57 58 /* virtual/parallel engine/context interface */ 59 struct intel_context *(*create_virtual)(struct intel_engine_cs **engine, 60 unsigned int count, 61 unsigned long flags); 62 struct intel_context *(*create_parallel)(struct intel_engine_cs **engines, 63 unsigned int num_siblings, 64 unsigned int width); 65 struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine, 66 unsigned int sibling); 67 }; 68 69 struct intel_context { 70 /* 71 * Note: Some fields may be accessed under RCU. 72 * 73 * Unless otherwise noted a field can safely be assumed to be protected 74 * by strong reference counting. 75 */ 76 union { 77 struct kref ref; /* no kref_get_unless_zero()! */ 78 struct rcu_head rcu; 79 }; 80 81 struct intel_engine_cs *engine; 82 struct intel_engine_cs *inflight; 83 #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3) 84 #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3) 85 #define intel_context_inflight(ce) \ 86 __intel_context_inflight(READ_ONCE((ce)->inflight)) 87 #define intel_context_inflight_count(ce) \ 88 __intel_context_inflight_count(READ_ONCE((ce)->inflight)) 89 90 struct i915_address_space *vm; 91 struct i915_gem_context __rcu *gem_context; 92 93 /* 94 * @signal_lock protects the list of requests that need signaling, 95 * @signals. While there are any requests that need signaling, 96 * we add the context to the breadcrumbs worker, and remove it 97 * upon completion/cancellation of the last request. 98 */ 99 struct list_head signal_link; /* Accessed under RCU */ 100 struct list_head signals; /* Guarded by signal_lock */ 101 spinlock_t signal_lock; /* protects signals, the list of requests */ 102 103 struct i915_vma *state; 104 u32 ring_size; 105 struct intel_ring *ring; 106 struct intel_timeline *timeline; 107 108 unsigned long flags; 109 #define CONTEXT_BARRIER_BIT 0 110 #define CONTEXT_ALLOC_BIT 1 111 #define CONTEXT_INIT_BIT 2 112 #define CONTEXT_VALID_BIT 3 113 #define CONTEXT_CLOSED_BIT 4 114 #define CONTEXT_USE_SEMAPHORES 5 115 #define CONTEXT_BANNED 6 116 #define CONTEXT_FORCE_SINGLE_SUBMISSION 7 117 #define CONTEXT_NOPREEMPT 8 118 #define CONTEXT_LRCA_DIRTY 9 119 #define CONTEXT_GUC_INIT 10 120 #define CONTEXT_PERMA_PIN 11 121 #define CONTEXT_IS_PARKING 12 122 123 struct { 124 u64 timeout_us; 125 } watchdog; 126 127 u32 *lrc_reg_state; 128 union { 129 struct { 130 u32 lrca; 131 u32 ccid; 132 }; 133 u64 desc; 134 } lrc; 135 u32 tag; /* cookie passed to HW to track this context on submission */ 136 137 /* Time on GPU as tracked by the hw. */ 138 struct { 139 struct ewma_runtime avg; 140 u64 total; 141 u32 last; 142 I915_SELFTEST_DECLARE(u32 num_underflow); 143 I915_SELFTEST_DECLARE(u32 max_underflow); 144 } runtime; 145 146 unsigned int active_count; /* protected by timeline->mutex */ 147 148 atomic_t pin_count; 149 struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ 150 151 /** 152 * active: Active tracker for the rq activity (inc. external) on this 153 * intel_context object. 154 */ 155 struct i915_active active; 156 157 const struct intel_context_ops *ops; 158 159 /** sseu: Control eu/slice partitioning */ 160 struct intel_sseu sseu; 161 162 /** 163 * pinned_contexts_link: List link for the engine's pinned contexts. 164 * This is only used if this is a perma-pinned kernel context and 165 * the list is assumed to only be manipulated during driver load 166 * or unload time so no mutex protection currently. 167 */ 168 struct list_head pinned_contexts_link; 169 170 u8 wa_bb_page; /* if set, page num reserved for context workarounds */ 171 172 struct { 173 /** @lock: protects everything in guc_state */ 174 spinlock_t lock; 175 /** 176 * @sched_state: scheduling state of this context using GuC 177 * submission 178 */ 179 u32 sched_state; 180 /* 181 * @fences: maintains a list of requests that are currently 182 * being fenced until a GuC operation completes 183 */ 184 struct list_head fences; 185 /** 186 * @blocked: fence used to signal when the blocking of a 187 * context's submissions is complete. 188 */ 189 struct i915_sw_fence blocked; 190 /** @number_committed_requests: number of committed requests */ 191 int number_committed_requests; 192 /** @requests: list of active requests on this context */ 193 struct list_head requests; 194 /** @prio: the context's current guc priority */ 195 u8 prio; 196 /** 197 * @prio_count: a counter of the number requests in flight in 198 * each priority bucket 199 */ 200 u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; 201 } guc_state; 202 203 struct { 204 /** 205 * @id: handle which is used to uniquely identify this context 206 * with the GuC, protected by guc->submission_state.lock 207 */ 208 u16 id; 209 /** 210 * @ref: the number of references to the guc_id, when 211 * transitioning in and out of zero protected by 212 * guc->submission_state.lock 213 */ 214 atomic_t ref; 215 /** 216 * @link: in guc->guc_id_list when the guc_id has no refs but is 217 * still valid, protected by guc->submission_state.lock 218 */ 219 struct list_head link; 220 } guc_id; 221 222 /** 223 * @destroyed_link: link in guc->submission_state.destroyed_contexts, in 224 * list when context is pending to be destroyed (deregistered with the 225 * GuC), protected by guc->submission_state.lock 226 */ 227 struct list_head destroyed_link; 228 229 /** @parallel: sub-structure for parallel submission members */ 230 struct { 231 union { 232 /** 233 * @child_list: parent's list of children 234 * contexts, no protection as immutable after context 235 * creation 236 */ 237 struct list_head child_list; 238 /** 239 * @child_link: child's link into parent's list of 240 * children 241 */ 242 struct list_head child_link; 243 }; 244 /** @parent: pointer to parent if child */ 245 struct intel_context *parent; 246 /** 247 * @last_rq: last request submitted on a parallel context, used 248 * to insert submit fences between requests in the parallel 249 * context 250 */ 251 struct i915_request *last_rq; 252 /** 253 * @fence_context: fence context composite fence when doing 254 * parallel submission 255 */ 256 u64 fence_context; 257 /** 258 * @seqno: seqno for composite fence when doing parallel 259 * submission 260 */ 261 u32 seqno; 262 /** @number_children: number of children if parent */ 263 u8 number_children; 264 /** @child_index: index into child_list if child */ 265 u8 child_index; 266 /** @guc: GuC specific members for parallel submission */ 267 struct { 268 /** @wqi_head: head pointer in work queue */ 269 u16 wqi_head; 270 /** @wqi_tail: tail pointer in work queue */ 271 u16 wqi_tail; 272 /** 273 * @parent_page: page in context state (ce->state) used 274 * by parent for work queue, process descriptor 275 */ 276 u8 parent_page; 277 } guc; 278 } parallel; 279 280 #ifdef CONFIG_DRM_I915_SELFTEST 281 /** 282 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest 283 */ 284 bool drop_schedule_enable; 285 286 /** 287 * @drop_schedule_disable: Force drop of schedule disable G2H for 288 * selftest 289 */ 290 bool drop_schedule_disable; 291 292 /** 293 * @drop_deregister: Force drop of deregister G2H for selftest 294 */ 295 bool drop_deregister; 296 #endif 297 }; 298 299 #endif /* __INTEL_CONTEXT_TYPES__ */ 300