1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 #include "gt/intel_gt.h"
7 
8 #include "gem/selftests/igt_gem_utils.h"
9 
10 #include "igt_spinner.h"
11 
12 int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
13 {
14 	unsigned int mode;
15 	void *vaddr;
16 	int err;
17 
18 	GEM_BUG_ON(INTEL_GEN(i915) < 8);
19 
20 	memset(spin, 0, sizeof(*spin));
21 	spin->i915 = i915;
22 	spin->gt = &i915->gt;
23 
24 	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
25 	if (IS_ERR(spin->hws)) {
26 		err = PTR_ERR(spin->hws);
27 		goto err;
28 	}
29 
30 	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
31 	if (IS_ERR(spin->obj)) {
32 		err = PTR_ERR(spin->obj);
33 		goto err_hws;
34 	}
35 
36 	i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
37 	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
38 	if (IS_ERR(vaddr)) {
39 		err = PTR_ERR(vaddr);
40 		goto err_obj;
41 	}
42 	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
43 
44 	mode = i915_coherent_map_type(i915);
45 	vaddr = i915_gem_object_pin_map(spin->obj, mode);
46 	if (IS_ERR(vaddr)) {
47 		err = PTR_ERR(vaddr);
48 		goto err_unpin_hws;
49 	}
50 	spin->batch = vaddr;
51 
52 	return 0;
53 
54 err_unpin_hws:
55 	i915_gem_object_unpin_map(spin->hws);
56 err_obj:
57 	i915_gem_object_put(spin->obj);
58 err_hws:
59 	i915_gem_object_put(spin->hws);
60 err:
61 	return err;
62 }
63 
64 static unsigned int seqno_offset(u64 fence)
65 {
66 	return offset_in_page(sizeof(u32) * fence);
67 }
68 
69 static u64 hws_address(const struct i915_vma *hws,
70 		       const struct i915_request *rq)
71 {
72 	return hws->node.start + seqno_offset(rq->fence.context);
73 }
74 
75 static int move_to_active(struct i915_vma *vma,
76 			  struct i915_request *rq,
77 			  unsigned int flags)
78 {
79 	int err;
80 
81 	i915_vma_lock(vma);
82 	err = i915_vma_move_to_active(vma, rq, flags);
83 	i915_vma_unlock(vma);
84 
85 	return err;
86 }
87 
88 struct i915_request *
89 igt_spinner_create_request(struct igt_spinner *spin,
90 			   struct i915_gem_context *ctx,
91 			   struct intel_engine_cs *engine,
92 			   u32 arbitration_command)
93 {
94 	struct i915_request *rq = NULL;
95 	struct i915_vma *hws, *vma;
96 	u32 *batch;
97 	int err;
98 
99 	spin->gt = engine->gt;
100 
101 	vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
102 	if (IS_ERR(vma))
103 		return ERR_CAST(vma);
104 
105 	hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
106 	if (IS_ERR(hws))
107 		return ERR_CAST(hws);
108 
109 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
110 	if (err)
111 		return ERR_PTR(err);
112 
113 	err = i915_vma_pin(hws, 0, 0, PIN_USER);
114 	if (err)
115 		goto unpin_vma;
116 
117 	rq = igt_request_alloc(ctx, engine);
118 	if (IS_ERR(rq)) {
119 		err = PTR_ERR(rq);
120 		goto unpin_hws;
121 	}
122 
123 	err = move_to_active(vma, rq, 0);
124 	if (err)
125 		goto cancel_rq;
126 
127 	err = move_to_active(hws, rq, 0);
128 	if (err)
129 		goto cancel_rq;
130 
131 	batch = spin->batch;
132 
133 	*batch++ = MI_STORE_DWORD_IMM_GEN4;
134 	*batch++ = lower_32_bits(hws_address(hws, rq));
135 	*batch++ = upper_32_bits(hws_address(hws, rq));
136 	*batch++ = rq->fence.seqno;
137 
138 	*batch++ = arbitration_command;
139 
140 	*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
141 	*batch++ = lower_32_bits(vma->node.start);
142 	*batch++ = upper_32_bits(vma->node.start);
143 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
144 
145 	intel_gt_chipset_flush(engine->gt);
146 
147 	if (engine->emit_init_breadcrumb &&
148 	    rq->timeline->has_initial_breadcrumb) {
149 		err = engine->emit_init_breadcrumb(rq);
150 		if (err)
151 			goto cancel_rq;
152 	}
153 
154 	err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
155 
156 cancel_rq:
157 	if (err) {
158 		i915_request_skip(rq, err);
159 		i915_request_add(rq);
160 	}
161 unpin_hws:
162 	i915_vma_unpin(hws);
163 unpin_vma:
164 	i915_vma_unpin(vma);
165 	return err ? ERR_PTR(err) : rq;
166 }
167 
168 static u32
169 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
170 {
171 	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
172 
173 	return READ_ONCE(*seqno);
174 }
175 
176 void igt_spinner_end(struct igt_spinner *spin)
177 {
178 	*spin->batch = MI_BATCH_BUFFER_END;
179 	intel_gt_chipset_flush(spin->gt);
180 }
181 
182 void igt_spinner_fini(struct igt_spinner *spin)
183 {
184 	igt_spinner_end(spin);
185 
186 	i915_gem_object_unpin_map(spin->obj);
187 	i915_gem_object_put(spin->obj);
188 
189 	i915_gem_object_unpin_map(spin->hws);
190 	i915_gem_object_put(spin->hws);
191 }
192 
193 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
194 {
195 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
196 					       rq->fence.seqno),
197 			     10) &&
198 		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
199 					    rq->fence.seqno),
200 			  1000));
201 }
202