1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "igt_gem_utils.h"
8 #include "igt_spinner.h"
9 
10 int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
11 {
12 	unsigned int mode;
13 	void *vaddr;
14 	int err;
15 
16 	GEM_BUG_ON(INTEL_GEN(i915) < 8);
17 
18 	memset(spin, 0, sizeof(*spin));
19 	spin->i915 = i915;
20 
21 	spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
22 	if (IS_ERR(spin->hws)) {
23 		err = PTR_ERR(spin->hws);
24 		goto err;
25 	}
26 
27 	spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
28 	if (IS_ERR(spin->obj)) {
29 		err = PTR_ERR(spin->obj);
30 		goto err_hws;
31 	}
32 
33 	i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
34 	vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
35 	if (IS_ERR(vaddr)) {
36 		err = PTR_ERR(vaddr);
37 		goto err_obj;
38 	}
39 	spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
40 
41 	mode = i915_coherent_map_type(i915);
42 	vaddr = i915_gem_object_pin_map(spin->obj, mode);
43 	if (IS_ERR(vaddr)) {
44 		err = PTR_ERR(vaddr);
45 		goto err_unpin_hws;
46 	}
47 	spin->batch = vaddr;
48 
49 	return 0;
50 
51 err_unpin_hws:
52 	i915_gem_object_unpin_map(spin->hws);
53 err_obj:
54 	i915_gem_object_put(spin->obj);
55 err_hws:
56 	i915_gem_object_put(spin->hws);
57 err:
58 	return err;
59 }
60 
61 static unsigned int seqno_offset(u64 fence)
62 {
63 	return offset_in_page(sizeof(u32) * fence);
64 }
65 
66 static u64 hws_address(const struct i915_vma *hws,
67 		       const struct i915_request *rq)
68 {
69 	return hws->node.start + seqno_offset(rq->fence.context);
70 }
71 
72 static int move_to_active(struct i915_vma *vma,
73 			  struct i915_request *rq,
74 			  unsigned int flags)
75 {
76 	int err;
77 
78 	err = i915_vma_move_to_active(vma, rq, flags);
79 	if (err)
80 		return err;
81 
82 	if (!i915_gem_object_has_active_reference(vma->obj)) {
83 		i915_gem_object_get(vma->obj);
84 		i915_gem_object_set_active_reference(vma->obj);
85 	}
86 
87 	return 0;
88 }
89 
90 struct i915_request *
91 igt_spinner_create_request(struct igt_spinner *spin,
92 			   struct i915_gem_context *ctx,
93 			   struct intel_engine_cs *engine,
94 			   u32 arbitration_command)
95 {
96 	struct i915_address_space *vm = &ctx->ppgtt->vm;
97 	struct i915_request *rq = NULL;
98 	struct i915_vma *hws, *vma;
99 	u32 *batch;
100 	int err;
101 
102 	vma = i915_vma_instance(spin->obj, vm, NULL);
103 	if (IS_ERR(vma))
104 		return ERR_CAST(vma);
105 
106 	hws = i915_vma_instance(spin->hws, vm, NULL);
107 	if (IS_ERR(hws))
108 		return ERR_CAST(hws);
109 
110 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
111 	if (err)
112 		return ERR_PTR(err);
113 
114 	err = i915_vma_pin(hws, 0, 0, PIN_USER);
115 	if (err)
116 		goto unpin_vma;
117 
118 	rq = igt_request_alloc(ctx, engine);
119 	if (IS_ERR(rq)) {
120 		err = PTR_ERR(rq);
121 		goto unpin_hws;
122 	}
123 
124 	err = move_to_active(vma, rq, 0);
125 	if (err)
126 		goto cancel_rq;
127 
128 	err = move_to_active(hws, rq, 0);
129 	if (err)
130 		goto cancel_rq;
131 
132 	batch = spin->batch;
133 
134 	*batch++ = MI_STORE_DWORD_IMM_GEN4;
135 	*batch++ = lower_32_bits(hws_address(hws, rq));
136 	*batch++ = upper_32_bits(hws_address(hws, rq));
137 	*batch++ = rq->fence.seqno;
138 
139 	*batch++ = arbitration_command;
140 
141 	*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
142 	*batch++ = lower_32_bits(vma->node.start);
143 	*batch++ = upper_32_bits(vma->node.start);
144 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
145 
146 	i915_gem_chipset_flush(spin->i915);
147 
148 	if (engine->emit_init_breadcrumb &&
149 	    rq->timeline->has_initial_breadcrumb) {
150 		err = engine->emit_init_breadcrumb(rq);
151 		if (err)
152 			goto cancel_rq;
153 	}
154 
155 	err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
156 
157 cancel_rq:
158 	if (err) {
159 		i915_request_skip(rq, err);
160 		i915_request_add(rq);
161 	}
162 unpin_hws:
163 	i915_vma_unpin(hws);
164 unpin_vma:
165 	i915_vma_unpin(vma);
166 	return err ? ERR_PTR(err) : rq;
167 }
168 
169 static u32
170 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
171 {
172 	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
173 
174 	return READ_ONCE(*seqno);
175 }
176 
177 void igt_spinner_end(struct igt_spinner *spin)
178 {
179 	*spin->batch = MI_BATCH_BUFFER_END;
180 	i915_gem_chipset_flush(spin->i915);
181 }
182 
183 void igt_spinner_fini(struct igt_spinner *spin)
184 {
185 	igt_spinner_end(spin);
186 
187 	i915_gem_object_unpin_map(spin->obj);
188 	i915_gem_object_put(spin->obj);
189 
190 	i915_gem_object_unpin_map(spin->hws);
191 	i915_gem_object_put(spin->hws);
192 }
193 
194 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
195 {
196 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
197 					       rq->fence.seqno),
198 			     10) &&
199 		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
200 					    rq->fence.seqno),
201 			  1000));
202 }
203