1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #include "igt_gem_utils.h"
8 
9 #include "gem/i915_gem_context.h"
10 #include "gem/i915_gem_internal.h"
11 #include "gem/i915_gem_pm.h"
12 #include "gt/intel_context.h"
13 #include "gt/intel_gpu_commands.h"
14 #include "gt/intel_gt.h"
15 #include "i915_vma.h"
16 #include "i915_drv.h"
17 
18 #include "i915_request.h"
19 
20 struct i915_request *
21 igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
22 {
23 	struct intel_context *ce;
24 	struct i915_request *rq;
25 
26 	/*
27 	 * Pinning the contexts may generate requests in order to acquire
28 	 * GGTT space, so do this first before we reserve a seqno for
29 	 * ourselves.
30 	 */
31 	ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
32 	if (IS_ERR(ce))
33 		return ERR_CAST(ce);
34 
35 	rq = intel_context_create_request(ce);
36 	intel_context_put(ce);
37 
38 	return rq;
39 }
40 
41 struct i915_vma *
42 igt_emit_store_dw(struct i915_vma *vma,
43 		  u64 offset,
44 		  unsigned long count,
45 		  u32 val)
46 {
47 	struct drm_i915_gem_object *obj;
48 	const int ver = GRAPHICS_VER(vma->vm->i915);
49 	unsigned long n, size;
50 	u32 *cmd;
51 	int err;
52 
53 	size = (4 * count + 1) * sizeof(u32);
54 	size = round_up(size, PAGE_SIZE);
55 	obj = i915_gem_object_create_internal(vma->vm->i915, size);
56 	if (IS_ERR(obj))
57 		return ERR_CAST(obj);
58 
59 	cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
60 	if (IS_ERR(cmd)) {
61 		err = PTR_ERR(cmd);
62 		goto err;
63 	}
64 
65 	GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
66 	offset += vma->node.start;
67 
68 	for (n = 0; n < count; n++) {
69 		if (ver >= 8) {
70 			*cmd++ = MI_STORE_DWORD_IMM_GEN4;
71 			*cmd++ = lower_32_bits(offset);
72 			*cmd++ = upper_32_bits(offset);
73 			*cmd++ = val;
74 		} else if (ver >= 4) {
75 			*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
76 				(ver < 6 ? MI_USE_GGTT : 0);
77 			*cmd++ = 0;
78 			*cmd++ = offset;
79 			*cmd++ = val;
80 		} else {
81 			*cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
82 			*cmd++ = offset;
83 			*cmd++ = val;
84 		}
85 		offset += PAGE_SIZE;
86 	}
87 	*cmd = MI_BATCH_BUFFER_END;
88 
89 	i915_gem_object_flush_map(obj);
90 	i915_gem_object_unpin_map(obj);
91 
92 	intel_gt_chipset_flush(vma->vm->gt);
93 
94 	vma = i915_vma_instance(obj, vma->vm, NULL);
95 	if (IS_ERR(vma)) {
96 		err = PTR_ERR(vma);
97 		goto err;
98 	}
99 
100 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
101 	if (err)
102 		goto err;
103 
104 	return vma;
105 
106 err:
107 	i915_gem_object_put(obj);
108 	return ERR_PTR(err);
109 }
110 
111 int igt_gpu_fill_dw(struct intel_context *ce,
112 		    struct i915_vma *vma, u64 offset,
113 		    unsigned long count, u32 val)
114 {
115 	struct i915_request *rq;
116 	struct i915_vma *batch;
117 	unsigned int flags;
118 	int err;
119 
120 	GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
121 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
122 
123 	batch = igt_emit_store_dw(vma, offset, count, val);
124 	if (IS_ERR(batch))
125 		return PTR_ERR(batch);
126 
127 	rq = intel_context_create_request(ce);
128 	if (IS_ERR(rq)) {
129 		err = PTR_ERR(rq);
130 		goto err_batch;
131 	}
132 
133 	i915_vma_lock(batch);
134 	err = i915_vma_move_to_active(batch, rq, 0);
135 	i915_vma_unlock(batch);
136 	if (err)
137 		goto skip_request;
138 
139 	i915_vma_lock(vma);
140 	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
141 	i915_vma_unlock(vma);
142 	if (err)
143 		goto skip_request;
144 
145 	flags = 0;
146 	if (GRAPHICS_VER(ce->vm->i915) <= 5)
147 		flags |= I915_DISPATCH_SECURE;
148 
149 	err = rq->engine->emit_bb_start(rq,
150 					batch->node.start, batch->node.size,
151 					flags);
152 
153 skip_request:
154 	if (err)
155 		i915_request_set_error_once(rq, err);
156 	i915_request_add(rq);
157 err_batch:
158 	i915_vma_unpin_and_release(&batch, 0);
159 	return err;
160 }
161