1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "igt_gem_utils.h"
8 
9 #include "gem/i915_gem_context.h"
10 #include "gem/i915_gem_pm.h"
11 #include "gt/intel_context.h"
12 #include "gt/intel_gpu_commands.h"
13 #include "gt/intel_gt.h"
14 #include "i915_vma.h"
15 #include "i915_drv.h"
16 
17 #include "i915_request.h"
18 
19 struct i915_request *
20 igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
21 {
22 	struct intel_context *ce;
23 	struct i915_request *rq;
24 
25 	/*
26 	 * Pinning the contexts may generate requests in order to acquire
27 	 * GGTT space, so do this first before we reserve a seqno for
28 	 * ourselves.
29 	 */
30 	ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
31 	if (IS_ERR(ce))
32 		return ERR_CAST(ce);
33 
34 	rq = intel_context_create_request(ce);
35 	intel_context_put(ce);
36 
37 	return rq;
38 }
39 
40 struct i915_vma *
41 igt_emit_store_dw(struct i915_vma *vma,
42 		  u64 offset,
43 		  unsigned long count,
44 		  u32 val)
45 {
46 	struct drm_i915_gem_object *obj;
47 	const int gen = INTEL_GEN(vma->vm->i915);
48 	unsigned long n, size;
49 	u32 *cmd;
50 	int err;
51 
52 	size = (4 * count + 1) * sizeof(u32);
53 	size = round_up(size, PAGE_SIZE);
54 	obj = i915_gem_object_create_internal(vma->vm->i915, size);
55 	if (IS_ERR(obj))
56 		return ERR_CAST(obj);
57 
58 	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
59 	if (IS_ERR(cmd)) {
60 		err = PTR_ERR(cmd);
61 		goto err;
62 	}
63 
64 	GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
65 	offset += vma->node.start;
66 
67 	for (n = 0; n < count; n++) {
68 		if (gen >= 8) {
69 			*cmd++ = MI_STORE_DWORD_IMM_GEN4;
70 			*cmd++ = lower_32_bits(offset);
71 			*cmd++ = upper_32_bits(offset);
72 			*cmd++ = val;
73 		} else if (gen >= 4) {
74 			*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
75 				(gen < 6 ? MI_USE_GGTT : 0);
76 			*cmd++ = 0;
77 			*cmd++ = offset;
78 			*cmd++ = val;
79 		} else {
80 			*cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
81 			*cmd++ = offset;
82 			*cmd++ = val;
83 		}
84 		offset += PAGE_SIZE;
85 	}
86 	*cmd = MI_BATCH_BUFFER_END;
87 
88 	i915_gem_object_flush_map(obj);
89 	i915_gem_object_unpin_map(obj);
90 
91 	intel_gt_chipset_flush(vma->vm->gt);
92 
93 	vma = i915_vma_instance(obj, vma->vm, NULL);
94 	if (IS_ERR(vma)) {
95 		err = PTR_ERR(vma);
96 		goto err;
97 	}
98 
99 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
100 	if (err)
101 		goto err;
102 
103 	return vma;
104 
105 err:
106 	i915_gem_object_put(obj);
107 	return ERR_PTR(err);
108 }
109 
110 int igt_gpu_fill_dw(struct intel_context *ce,
111 		    struct i915_vma *vma, u64 offset,
112 		    unsigned long count, u32 val)
113 {
114 	struct i915_request *rq;
115 	struct i915_vma *batch;
116 	unsigned int flags;
117 	int err;
118 
119 	GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
120 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
121 
122 	batch = igt_emit_store_dw(vma, offset, count, val);
123 	if (IS_ERR(batch))
124 		return PTR_ERR(batch);
125 
126 	rq = intel_context_create_request(ce);
127 	if (IS_ERR(rq)) {
128 		err = PTR_ERR(rq);
129 		goto err_batch;
130 	}
131 
132 	i915_vma_lock(batch);
133 	err = i915_request_await_object(rq, batch->obj, false);
134 	if (err == 0)
135 		err = i915_vma_move_to_active(batch, rq, 0);
136 	i915_vma_unlock(batch);
137 	if (err)
138 		goto skip_request;
139 
140 	i915_vma_lock(vma);
141 	err = i915_request_await_object(rq, vma->obj, true);
142 	if (err == 0)
143 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
144 	i915_vma_unlock(vma);
145 	if (err)
146 		goto skip_request;
147 
148 	flags = 0;
149 	if (INTEL_GEN(ce->vm->i915) <= 5)
150 		flags |= I915_DISPATCH_SECURE;
151 
152 	err = rq->engine->emit_bb_start(rq,
153 					batch->node.start, batch->node.size,
154 					flags);
155 
156 skip_request:
157 	if (err)
158 		i915_request_set_error_once(rq, err);
159 	i915_request_add(rq);
160 err_batch:
161 	i915_vma_unpin_and_release(&batch, 0);
162 	return err;
163 }
164