1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "igt_spinner.h"
8 
9 int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
10 {
11 	unsigned int mode;
12 	void *vaddr;
13 	int err;
14 
15 	GEM_BUG_ON(INTEL_GEN(i915) < 8);
16 
17 	memset(spin, 0, sizeof(*spin));
18 	spin->i915 = i915;
19 
20 	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
21 	if (IS_ERR(spin->hws)) {
22 		err = PTR_ERR(spin->hws);
23 		goto err;
24 	}
25 
26 	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
27 	if (IS_ERR(spin->obj)) {
28 		err = PTR_ERR(spin->obj);
29 		goto err_hws;
30 	}
31 
32 	i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
33 	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
34 	if (IS_ERR(vaddr)) {
35 		err = PTR_ERR(vaddr);
36 		goto err_obj;
37 	}
38 	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
39 
40 	mode = i915_coherent_map_type(i915);
41 	vaddr = i915_gem_object_pin_map(spin->obj, mode);
42 	if (IS_ERR(vaddr)) {
43 		err = PTR_ERR(vaddr);
44 		goto err_unpin_hws;
45 	}
46 	spin->batch = vaddr;
47 
48 	return 0;
49 
50 err_unpin_hws:
51 	i915_gem_object_unpin_map(spin->hws);
52 err_obj:
53 	i915_gem_object_put(spin->obj);
54 err_hws:
55 	i915_gem_object_put(spin->hws);
56 err:
57 	return err;
58 }
59 
60 static unsigned int seqno_offset(u64 fence)
61 {
62 	return offset_in_page(sizeof(u32) * fence);
63 }
64 
65 static u64 hws_address(const struct i915_vma *hws,
66 		       const struct i915_request *rq)
67 {
68 	return hws->node.start + seqno_offset(rq->fence.context);
69 }
70 
71 static int emit_recurse_batch(struct igt_spinner *spin,
72 			      struct i915_request *rq,
73 			      u32 arbitration_command)
74 {
75 	struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
76 	struct i915_vma *hws, *vma;
77 	u32 *batch;
78 	int err;
79 
80 	vma = i915_vma_instance(spin->obj, vm, NULL);
81 	if (IS_ERR(vma))
82 		return PTR_ERR(vma);
83 
84 	hws = i915_vma_instance(spin->hws, vm, NULL);
85 	if (IS_ERR(hws))
86 		return PTR_ERR(hws);
87 
88 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
89 	if (err)
90 		return err;
91 
92 	err = i915_vma_pin(hws, 0, 0, PIN_USER);
93 	if (err)
94 		goto unpin_vma;
95 
96 	err = i915_vma_move_to_active(vma, rq, 0);
97 	if (err)
98 		goto unpin_hws;
99 
100 	if (!i915_gem_object_has_active_reference(vma->obj)) {
101 		i915_gem_object_get(vma->obj);
102 		i915_gem_object_set_active_reference(vma->obj);
103 	}
104 
105 	err = i915_vma_move_to_active(hws, rq, 0);
106 	if (err)
107 		goto unpin_hws;
108 
109 	if (!i915_gem_object_has_active_reference(hws->obj)) {
110 		i915_gem_object_get(hws->obj);
111 		i915_gem_object_set_active_reference(hws->obj);
112 	}
113 
114 	batch = spin->batch;
115 
116 	*batch++ = MI_STORE_DWORD_IMM_GEN4;
117 	*batch++ = lower_32_bits(hws_address(hws, rq));
118 	*batch++ = upper_32_bits(hws_address(hws, rq));
119 	*batch++ = rq->fence.seqno;
120 
121 	*batch++ = arbitration_command;
122 
123 	*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
124 	*batch++ = lower_32_bits(vma->node.start);
125 	*batch++ = upper_32_bits(vma->node.start);
126 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
127 
128 	i915_gem_chipset_flush(spin->i915);
129 
130 	err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
131 
132 unpin_hws:
133 	i915_vma_unpin(hws);
134 unpin_vma:
135 	i915_vma_unpin(vma);
136 	return err;
137 }
138 
139 struct i915_request *
140 igt_spinner_create_request(struct igt_spinner *spin,
141 			   struct i915_gem_context *ctx,
142 			   struct intel_engine_cs *engine,
143 			   u32 arbitration_command)
144 {
145 	struct i915_request *rq;
146 	int err;
147 
148 	rq = i915_request_alloc(engine, ctx);
149 	if (IS_ERR(rq))
150 		return rq;
151 
152 	err = emit_recurse_batch(spin, rq, arbitration_command);
153 	if (err) {
154 		i915_request_add(rq);
155 		return ERR_PTR(err);
156 	}
157 
158 	return rq;
159 }
160 
161 static u32
162 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
163 {
164 	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
165 
166 	return READ_ONCE(*seqno);
167 }
168 
169 void igt_spinner_end(struct igt_spinner *spin)
170 {
171 	*spin->batch = MI_BATCH_BUFFER_END;
172 	i915_gem_chipset_flush(spin->i915);
173 }
174 
175 void igt_spinner_fini(struct igt_spinner *spin)
176 {
177 	igt_spinner_end(spin);
178 
179 	i915_gem_object_unpin_map(spin->obj);
180 	i915_gem_object_put(spin->obj);
181 
182 	i915_gem_object_unpin_map(spin->hws);
183 	i915_gem_object_put(spin->hws);
184 }
185 
186 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
187 {
188 	if (!wait_event_timeout(rq->execute,
189 				READ_ONCE(rq->global_seqno),
190 				msecs_to_jiffies(10)))
191 		return false;
192 
193 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
194 					       rq->fence.seqno),
195 			     10) &&
196 		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
197 					    rq->fence.seqno),
198 			  1000));
199 }
200