1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "gem/selftests/igt_gem_utils.h"
8 
9 #include "igt_spinner.h"
10 
11 int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
12 {
13 	unsigned int mode;
14 	void *vaddr;
15 	int err;
16 
17 	GEM_BUG_ON(INTEL_GEN(i915) < 8);
18 
19 	memset(spin, 0, sizeof(*spin));
20 	spin->i915 = i915;
21 
22 	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
23 	if (IS_ERR(spin->hws)) {
24 		err = PTR_ERR(spin->hws);
25 		goto err;
26 	}
27 
28 	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
29 	if (IS_ERR(spin->obj)) {
30 		err = PTR_ERR(spin->obj);
31 		goto err_hws;
32 	}
33 
34 	i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
35 	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
36 	if (IS_ERR(vaddr)) {
37 		err = PTR_ERR(vaddr);
38 		goto err_obj;
39 	}
40 	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
41 
42 	mode = i915_coherent_map_type(i915);
43 	vaddr = i915_gem_object_pin_map(spin->obj, mode);
44 	if (IS_ERR(vaddr)) {
45 		err = PTR_ERR(vaddr);
46 		goto err_unpin_hws;
47 	}
48 	spin->batch = vaddr;
49 
50 	return 0;
51 
52 err_unpin_hws:
53 	i915_gem_object_unpin_map(spin->hws);
54 err_obj:
55 	i915_gem_object_put(spin->obj);
56 err_hws:
57 	i915_gem_object_put(spin->hws);
58 err:
59 	return err;
60 }
61 
62 static unsigned int seqno_offset(u64 fence)
63 {
64 	return offset_in_page(sizeof(u32) * fence);
65 }
66 
67 static u64 hws_address(const struct i915_vma *hws,
68 		       const struct i915_request *rq)
69 {
70 	return hws->node.start + seqno_offset(rq->fence.context);
71 }
72 
73 static int move_to_active(struct i915_vma *vma,
74 			  struct i915_request *rq,
75 			  unsigned int flags)
76 {
77 	int err;
78 
79 	i915_vma_lock(vma);
80 	err = i915_vma_move_to_active(vma, rq, flags);
81 	i915_vma_unlock(vma);
82 
83 	return err;
84 }
85 
86 struct i915_request *
87 igt_spinner_create_request(struct igt_spinner *spin,
88 			   struct i915_gem_context *ctx,
89 			   struct intel_engine_cs *engine,
90 			   u32 arbitration_command)
91 {
92 	struct i915_request *rq = NULL;
93 	struct i915_vma *hws, *vma;
94 	u32 *batch;
95 	int err;
96 
97 	vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
98 	if (IS_ERR(vma))
99 		return ERR_CAST(vma);
100 
101 	hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
102 	if (IS_ERR(hws))
103 		return ERR_CAST(hws);
104 
105 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
106 	if (err)
107 		return ERR_PTR(err);
108 
109 	err = i915_vma_pin(hws, 0, 0, PIN_USER);
110 	if (err)
111 		goto unpin_vma;
112 
113 	rq = igt_request_alloc(ctx, engine);
114 	if (IS_ERR(rq)) {
115 		err = PTR_ERR(rq);
116 		goto unpin_hws;
117 	}
118 
119 	err = move_to_active(vma, rq, 0);
120 	if (err)
121 		goto cancel_rq;
122 
123 	err = move_to_active(hws, rq, 0);
124 	if (err)
125 		goto cancel_rq;
126 
127 	batch = spin->batch;
128 
129 	*batch++ = MI_STORE_DWORD_IMM_GEN4;
130 	*batch++ = lower_32_bits(hws_address(hws, rq));
131 	*batch++ = upper_32_bits(hws_address(hws, rq));
132 	*batch++ = rq->fence.seqno;
133 
134 	*batch++ = arbitration_command;
135 
136 	*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
137 	*batch++ = lower_32_bits(vma->node.start);
138 	*batch++ = upper_32_bits(vma->node.start);
139 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
140 
141 	i915_gem_chipset_flush(spin->i915);
142 
143 	if (engine->emit_init_breadcrumb &&
144 	    rq->timeline->has_initial_breadcrumb) {
145 		err = engine->emit_init_breadcrumb(rq);
146 		if (err)
147 			goto cancel_rq;
148 	}
149 
150 	err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
151 
152 cancel_rq:
153 	if (err) {
154 		i915_request_skip(rq, err);
155 		i915_request_add(rq);
156 	}
157 unpin_hws:
158 	i915_vma_unpin(hws);
159 unpin_vma:
160 	i915_vma_unpin(vma);
161 	return err ? ERR_PTR(err) : rq;
162 }
163 
164 static u32
165 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
166 {
167 	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
168 
169 	return READ_ONCE(*seqno);
170 }
171 
172 void igt_spinner_end(struct igt_spinner *spin)
173 {
174 	*spin->batch = MI_BATCH_BUFFER_END;
175 	i915_gem_chipset_flush(spin->i915);
176 }
177 
178 void igt_spinner_fini(struct igt_spinner *spin)
179 {
180 	igt_spinner_end(spin);
181 
182 	i915_gem_object_unpin_map(spin->obj);
183 	i915_gem_object_put(spin->obj);
184 
185 	i915_gem_object_unpin_map(spin->hws);
186 	i915_gem_object_put(spin->hws);
187 }
188 
189 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
190 {
191 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
192 					       rq->fence.seqno),
193 			     10) &&
194 		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
195 					    rq->fence.seqno),
196 			  1000));
197 }
198