1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #include "gt/intel_gt.h"
7 #include "intel_guc_ads.h"
8 #include "intel_uc.h"
9 #include "i915_drv.h"
10 
11 /*
12  * The Additional Data Struct (ADS) has pointers for different buffers used by
13  * the GuC. One single gem object contains the ADS struct itself (guc_ads), the
14  * scheduling policies (guc_policies), a structure describing a collection of
15  * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
16  * its internal state for sleep.
17  */
18 
19 static void guc_policy_init(struct guc_policy *policy)
20 {
21 	policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US;
22 	policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US;
23 	policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US;
24 	policy->policy_flags = 0;
25 }
26 
27 static void guc_policies_init(struct guc_policies *policies)
28 {
29 	struct guc_policy *policy;
30 	u32 p, i;
31 
32 	policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US;
33 	policies->max_num_work_items = POLICY_MAX_NUM_WI;
34 
35 	for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
36 		for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) {
37 			policy = &policies->policy[p][i];
38 
39 			guc_policy_init(policy);
40 		}
41 	}
42 
43 	policies->is_valid = 1;
44 }
45 
46 static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num)
47 {
48 	memset(pool, 0, num * sizeof(*pool));
49 }
50 
51 /*
52  * The first 80 dwords of the register state context, containing the
53  * execlists and ppgtt registers.
54  */
55 #define LR_HW_CONTEXT_SIZE	(80 * sizeof(u32))
56 
57 /* The ads obj includes the struct itself and buffers passed to GuC */
58 struct __guc_ads_blob {
59 	struct guc_ads ads;
60 	struct guc_policies policies;
61 	struct guc_mmio_reg_state reg_state;
62 	struct guc_gt_system_info system_info;
63 	struct guc_clients_info clients_info;
64 	struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE];
65 	u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
66 } __packed;
67 
68 static void __guc_ads_init(struct intel_guc *guc)
69 {
70 	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
71 	struct __guc_ads_blob *blob = guc->ads_blob;
72 	const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
73 	u32 base;
74 	u8 engine_class;
75 
76 	/* GuC scheduling policies */
77 	guc_policies_init(&blob->policies);
78 
79 	/*
80 	 * GuC expects a per-engine-class context image and size
81 	 * (minus hwsp and ring context). The context image will be
82 	 * used to reinitialize engines after a reset. It must exist
83 	 * and be pinned in the GGTT, so that the address won't change after
84 	 * we have told GuC where to find it. The context size will be used
85 	 * to validate that the LRC base + size fall within allowed GGTT.
86 	 */
87 	for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
88 		if (engine_class == OTHER_CLASS)
89 			continue;
90 		/*
91 		 * TODO: Set context pointer to default state to allow
92 		 * GuC to re-init guilty contexts after internal reset.
93 		 */
94 		blob->ads.golden_context_lrca[engine_class] = 0;
95 		blob->ads.eng_state_size[engine_class] =
96 			intel_engine_context_size(dev_priv, engine_class) -
97 			skipped_size;
98 	}
99 
100 	/* System info */
101 	blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
102 	blob->system_info.rcs_enabled = 1;
103 	blob->system_info.bcs_enabled = 1;
104 
105 	blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv);
106 	blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv);
107 	blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
108 
109 	base = intel_guc_ggtt_offset(guc, guc->ads_vma);
110 
111 	/* Clients info  */
112 	guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool));
113 
114 	blob->clients_info.clients_num = 1;
115 	blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool);
116 	blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool);
117 
118 	/* ADS */
119 	blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
120 	blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
121 	blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
122 	blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
123 	blob->ads.clients_info = base + ptr_offset(blob, clients_info);
124 
125 	i915_gem_object_flush_map(guc->ads_vma->obj);
126 }
127 
128 /**
129  * intel_guc_ads_create() - allocates and initializes GuC ADS.
130  * @guc: intel_guc struct
131  *
132  * GuC needs memory block (Additional Data Struct), where it will store
133  * some data. Allocate and initialize such memory block for GuC use.
134  */
135 int intel_guc_ads_create(struct intel_guc *guc)
136 {
137 	const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob));
138 	struct i915_vma *vma;
139 	void *blob;
140 	int ret;
141 
142 	GEM_BUG_ON(guc->ads_vma);
143 
144 	vma = intel_guc_allocate_vma(guc, size);
145 	if (IS_ERR(vma))
146 		return PTR_ERR(vma);
147 
148 	blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
149 	if (IS_ERR(blob)) {
150 		ret = PTR_ERR(blob);
151 		goto err_vma;
152 	}
153 
154 	guc->ads_vma = vma;
155 	guc->ads_blob = blob;
156 
157 	__guc_ads_init(guc);
158 
159 	return 0;
160 
161 err_vma:
162 	i915_vma_unpin_and_release(&guc->ads_vma, 0);
163 	return ret;
164 }
165 
166 void intel_guc_ads_destroy(struct intel_guc *guc)
167 {
168 	i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
169 }
170 
171 /**
172  * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse
173  * @guc: intel_guc struct
174  *
175  * GuC stores some data in ADS, which might be stale after a reset.
176  * Reinitialize whole ADS in case any part of it was corrupted during
177  * previous GuC run.
178  */
179 void intel_guc_ads_reset(struct intel_guc *guc)
180 {
181 	if (!guc->ads_vma)
182 		return;
183 	__guc_ads_init(guc);
184 }
185