1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 #include "gt/intel_gpu_commands.h"
7 #include "gt/intel_gt.h"
8 
9 #include "gem/selftests/igt_gem_utils.h"
10 
11 #include "igt_spinner.h"
12 
13 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
14 {
15 	unsigned int mode;
16 	void *vaddr;
17 	int err;
18 
19 	memset(spin, 0, sizeof(*spin));
20 	spin->gt = gt;
21 
22 	spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
23 	if (IS_ERR(spin->hws)) {
24 		err = PTR_ERR(spin->hws);
25 		goto err;
26 	}
27 
28 	spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
29 	if (IS_ERR(spin->obj)) {
30 		err = PTR_ERR(spin->obj);
31 		goto err_hws;
32 	}
33 
34 	i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
35 	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
36 	if (IS_ERR(vaddr)) {
37 		err = PTR_ERR(vaddr);
38 		goto err_obj;
39 	}
40 	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
41 
42 	mode = i915_coherent_map_type(gt->i915);
43 	vaddr = i915_gem_object_pin_map(spin->obj, mode);
44 	if (IS_ERR(vaddr)) {
45 		err = PTR_ERR(vaddr);
46 		goto err_unpin_hws;
47 	}
48 	spin->batch = vaddr;
49 
50 	return 0;
51 
52 err_unpin_hws:
53 	i915_gem_object_unpin_map(spin->hws);
54 err_obj:
55 	i915_gem_object_put(spin->obj);
56 err_hws:
57 	i915_gem_object_put(spin->hws);
58 err:
59 	return err;
60 }
61 
62 static unsigned int seqno_offset(u64 fence)
63 {
64 	return offset_in_page(sizeof(u32) * fence);
65 }
66 
67 static u64 hws_address(const struct i915_vma *hws,
68 		       const struct i915_request *rq)
69 {
70 	return hws->node.start + seqno_offset(rq->fence.context);
71 }
72 
73 static int move_to_active(struct i915_vma *vma,
74 			  struct i915_request *rq,
75 			  unsigned int flags)
76 {
77 	int err;
78 
79 	i915_vma_lock(vma);
80 	err = i915_request_await_object(rq, vma->obj,
81 					flags & EXEC_OBJECT_WRITE);
82 	if (err == 0)
83 		err = i915_vma_move_to_active(vma, rq, flags);
84 	i915_vma_unlock(vma);
85 
86 	return err;
87 }
88 
89 struct i915_request *
90 igt_spinner_create_request(struct igt_spinner *spin,
91 			   struct intel_context *ce,
92 			   u32 arbitration_command)
93 {
94 	struct intel_engine_cs *engine = ce->engine;
95 	struct i915_request *rq = NULL;
96 	struct i915_vma *hws, *vma;
97 	unsigned int flags;
98 	u32 *batch;
99 	int err;
100 
101 	GEM_BUG_ON(spin->gt != ce->vm->gt);
102 
103 	if (!intel_engine_can_store_dword(ce->engine))
104 		return ERR_PTR(-ENODEV);
105 
106 	vma = i915_vma_instance(spin->obj, ce->vm, NULL);
107 	if (IS_ERR(vma))
108 		return ERR_CAST(vma);
109 
110 	hws = i915_vma_instance(spin->hws, ce->vm, NULL);
111 	if (IS_ERR(hws))
112 		return ERR_CAST(hws);
113 
114 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
115 	if (err)
116 		return ERR_PTR(err);
117 
118 	err = i915_vma_pin(hws, 0, 0, PIN_USER);
119 	if (err)
120 		goto unpin_vma;
121 
122 	rq = intel_context_create_request(ce);
123 	if (IS_ERR(rq)) {
124 		err = PTR_ERR(rq);
125 		goto unpin_hws;
126 	}
127 
128 	err = move_to_active(vma, rq, 0);
129 	if (err)
130 		goto cancel_rq;
131 
132 	err = move_to_active(hws, rq, 0);
133 	if (err)
134 		goto cancel_rq;
135 
136 	batch = spin->batch;
137 
138 	if (INTEL_GEN(rq->engine->i915) >= 8) {
139 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
140 		*batch++ = lower_32_bits(hws_address(hws, rq));
141 		*batch++ = upper_32_bits(hws_address(hws, rq));
142 	} else if (INTEL_GEN(rq->engine->i915) >= 6) {
143 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
144 		*batch++ = 0;
145 		*batch++ = hws_address(hws, rq);
146 	} else if (INTEL_GEN(rq->engine->i915) >= 4) {
147 		*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
148 		*batch++ = 0;
149 		*batch++ = hws_address(hws, rq);
150 	} else {
151 		*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
152 		*batch++ = hws_address(hws, rq);
153 	}
154 	*batch++ = rq->fence.seqno;
155 
156 	*batch++ = arbitration_command;
157 
158 	if (INTEL_GEN(rq->engine->i915) >= 8)
159 		*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
160 	else if (IS_HASWELL(rq->engine->i915))
161 		*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
162 	else if (INTEL_GEN(rq->engine->i915) >= 6)
163 		*batch++ = MI_BATCH_BUFFER_START;
164 	else
165 		*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
166 	*batch++ = lower_32_bits(vma->node.start);
167 	*batch++ = upper_32_bits(vma->node.start);
168 
169 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
170 
171 	intel_gt_chipset_flush(engine->gt);
172 
173 	if (engine->emit_init_breadcrumb) {
174 		err = engine->emit_init_breadcrumb(rq);
175 		if (err)
176 			goto cancel_rq;
177 	}
178 
179 	flags = 0;
180 	if (INTEL_GEN(rq->engine->i915) <= 5)
181 		flags |= I915_DISPATCH_SECURE;
182 	err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
183 
184 cancel_rq:
185 	if (err) {
186 		i915_request_set_error_once(rq, err);
187 		i915_request_add(rq);
188 	}
189 unpin_hws:
190 	i915_vma_unpin(hws);
191 unpin_vma:
192 	i915_vma_unpin(vma);
193 	return err ? ERR_PTR(err) : rq;
194 }
195 
196 static u32
197 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
198 {
199 	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
200 
201 	return READ_ONCE(*seqno);
202 }
203 
204 void igt_spinner_end(struct igt_spinner *spin)
205 {
206 	*spin->batch = MI_BATCH_BUFFER_END;
207 	intel_gt_chipset_flush(spin->gt);
208 }
209 
210 void igt_spinner_fini(struct igt_spinner *spin)
211 {
212 	igt_spinner_end(spin);
213 
214 	i915_gem_object_unpin_map(spin->obj);
215 	i915_gem_object_put(spin->obj);
216 
217 	i915_gem_object_unpin_map(spin->hws);
218 	i915_gem_object_put(spin->hws);
219 }
220 
221 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
222 {
223 	if (i915_request_is_ready(rq))
224 		intel_engine_flush_submission(rq->engine);
225 
226 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
227 					       rq->fence.seqno),
228 			     100) &&
229 		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
230 					    rq->fence.seqno),
231 			  50));
232 }
233