1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_CONTEXT_TYPES__ 7 #define __INTEL_CONTEXT_TYPES__ 8 9 #include <linux/average.h> 10 #include <linux/kref.h> 11 #include <linux/list.h> 12 #include <linux/mutex.h> 13 #include <linux/types.h> 14 15 #include "i915_active_types.h" 16 #include "i915_utils.h" 17 #include "intel_engine_types.h" 18 #include "intel_sseu.h" 19 20 #define CONTEXT_REDZONE POISON_INUSE 21 22 DECLARE_EWMA(runtime, 3, 8); 23 24 struct i915_gem_context; 25 struct i915_gem_ww_ctx; 26 struct i915_vma; 27 struct intel_breadcrumbs; 28 struct intel_context; 29 struct intel_ring; 30 31 struct intel_context_ops { 32 unsigned long flags; 33 #define COPS_HAS_INFLIGHT_BIT 0 34 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT) 35 36 int (*alloc)(struct intel_context *ce); 37 38 int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr); 39 int (*pin)(struct intel_context *ce, void *vaddr); 40 void (*unpin)(struct intel_context *ce); 41 void (*post_unpin)(struct intel_context *ce); 42 43 void (*enter)(struct intel_context *ce); 44 void (*exit)(struct intel_context *ce); 45 46 void (*reset)(struct intel_context *ce); 47 void (*destroy)(struct kref *kref); 48 }; 49 50 struct intel_context { 51 /* 52 * Note: Some fields may be accessed under RCU. 53 * 54 * Unless otherwise noted a field can safely be assumed to be protected 55 * by strong reference counting. 56 */ 57 union { 58 struct kref ref; /* no kref_get_unless_zero()! */ 59 struct rcu_head rcu; 60 }; 61 62 struct intel_engine_cs *engine; 63 struct intel_engine_cs *inflight; 64 #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3) 65 #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3) 66 #define intel_context_inflight(ce) \ 67 __intel_context_inflight(READ_ONCE((ce)->inflight)) 68 #define intel_context_inflight_count(ce) \ 69 __intel_context_inflight_count(READ_ONCE((ce)->inflight)) 70 71 struct i915_address_space *vm; 72 struct i915_gem_context __rcu *gem_context; 73 74 /* 75 * @signal_lock protects the list of requests that need signaling, 76 * @signals. While there are any requests that need signaling, 77 * we add the context to the breadcrumbs worker, and remove it 78 * upon completion/cancellation of the last request. 79 */ 80 struct list_head signal_link; /* Accessed under RCU */ 81 struct list_head signals; /* Guarded by signal_lock */ 82 spinlock_t signal_lock; /* protects signals, the list of requests */ 83 84 struct i915_vma *state; 85 struct intel_ring *ring; 86 struct intel_timeline *timeline; 87 88 unsigned long flags; 89 #define CONTEXT_BARRIER_BIT 0 90 #define CONTEXT_ALLOC_BIT 1 91 #define CONTEXT_INIT_BIT 2 92 #define CONTEXT_VALID_BIT 3 93 #define CONTEXT_CLOSED_BIT 4 94 #define CONTEXT_USE_SEMAPHORES 5 95 #define CONTEXT_BANNED 6 96 #define CONTEXT_FORCE_SINGLE_SUBMISSION 7 97 #define CONTEXT_NOPREEMPT 8 98 99 struct { 100 u64 timeout_us; 101 } watchdog; 102 103 u32 *lrc_reg_state; 104 union { 105 struct { 106 u32 lrca; 107 u32 ccid; 108 }; 109 u64 desc; 110 } lrc; 111 u32 tag; /* cookie passed to HW to track this context on submission */ 112 113 /* Time on GPU as tracked by the hw. */ 114 struct { 115 struct ewma_runtime avg; 116 u64 total; 117 u32 last; 118 I915_SELFTEST_DECLARE(u32 num_underflow); 119 I915_SELFTEST_DECLARE(u32 max_underflow); 120 } runtime; 121 122 unsigned int active_count; /* protected by timeline->mutex */ 123 124 atomic_t pin_count; 125 struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ 126 127 /** 128 * active: Active tracker for the rq activity (inc. external) on this 129 * intel_context object. 130 */ 131 struct i915_active active; 132 133 const struct intel_context_ops *ops; 134 135 /** sseu: Control eu/slice partitioning */ 136 struct intel_sseu sseu; 137 138 u8 wa_bb_page; /* if set, page num reserved for context workarounds */ 139 }; 140 141 #endif /* __INTEL_CONTEXT_TYPES__ */ 142