1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include "igt_gem_utils.h" 8 9 #include "gem/i915_gem_context.h" 10 #include "gem/i915_gem_pm.h" 11 #include "gt/intel_context.h" 12 #include "gt/intel_gt.h" 13 #include "i915_vma.h" 14 #include "i915_drv.h" 15 16 #include "i915_request.h" 17 18 struct i915_request * 19 igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine) 20 { 21 struct intel_context *ce; 22 struct i915_request *rq; 23 24 /* 25 * Pinning the contexts may generate requests in order to acquire 26 * GGTT space, so do this first before we reserve a seqno for 27 * ourselves. 28 */ 29 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); 30 if (IS_ERR(ce)) 31 return ERR_CAST(ce); 32 33 rq = intel_context_create_request(ce); 34 intel_context_put(ce); 35 36 return rq; 37 } 38 39 struct i915_vma * 40 igt_emit_store_dw(struct i915_vma *vma, 41 u64 offset, 42 unsigned long count, 43 u32 val) 44 { 45 struct drm_i915_gem_object *obj; 46 const int gen = INTEL_GEN(vma->vm->i915); 47 unsigned long n, size; 48 u32 *cmd; 49 int err; 50 51 size = (4 * count + 1) * sizeof(u32); 52 size = round_up(size, PAGE_SIZE); 53 obj = i915_gem_object_create_internal(vma->vm->i915, size); 54 if (IS_ERR(obj)) 55 return ERR_CAST(obj); 56 57 cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); 58 if (IS_ERR(cmd)) { 59 err = PTR_ERR(cmd); 60 goto err; 61 } 62 63 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size); 64 offset += vma->node.start; 65 66 for (n = 0; n < count; n++) { 67 if (gen >= 8) { 68 *cmd++ = MI_STORE_DWORD_IMM_GEN4; 69 *cmd++ = lower_32_bits(offset); 70 *cmd++ = upper_32_bits(offset); 71 *cmd++ = val; 72 } else if (gen >= 4) { 73 *cmd++ = MI_STORE_DWORD_IMM_GEN4 | 74 (gen < 6 ? MI_USE_GGTT : 0); 75 *cmd++ = 0; 76 *cmd++ = offset; 77 *cmd++ = val; 78 } else { 79 *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 80 *cmd++ = offset; 81 *cmd++ = val; 82 } 83 offset += PAGE_SIZE; 84 } 85 *cmd = MI_BATCH_BUFFER_END; 86 i915_gem_object_unpin_map(obj); 87 88 intel_gt_chipset_flush(vma->vm->gt); 89 90 vma = i915_vma_instance(obj, vma->vm, NULL); 91 if (IS_ERR(vma)) { 92 err = PTR_ERR(vma); 93 goto err; 94 } 95 96 err = i915_vma_pin(vma, 0, 0, PIN_USER); 97 if (err) 98 goto err; 99 100 return vma; 101 102 err: 103 i915_gem_object_put(obj); 104 return ERR_PTR(err); 105 } 106 107 int igt_gpu_fill_dw(struct intel_context *ce, 108 struct i915_vma *vma, u64 offset, 109 unsigned long count, u32 val) 110 { 111 struct i915_request *rq; 112 struct i915_vma *batch; 113 unsigned int flags; 114 int err; 115 116 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); 117 GEM_BUG_ON(!i915_vma_is_pinned(vma)); 118 119 batch = igt_emit_store_dw(vma, offset, count, val); 120 if (IS_ERR(batch)) 121 return PTR_ERR(batch); 122 123 rq = intel_context_create_request(ce); 124 if (IS_ERR(rq)) { 125 err = PTR_ERR(rq); 126 goto err_batch; 127 } 128 129 flags = 0; 130 if (INTEL_GEN(ce->vm->i915) <= 5) 131 flags |= I915_DISPATCH_SECURE; 132 133 err = rq->engine->emit_bb_start(rq, 134 batch->node.start, batch->node.size, 135 flags); 136 if (err) 137 goto err_request; 138 139 i915_vma_lock(batch); 140 err = i915_request_await_object(rq, batch->obj, false); 141 if (err == 0) 142 err = i915_vma_move_to_active(batch, rq, 0); 143 i915_vma_unlock(batch); 144 if (err) 145 goto skip_request; 146 147 i915_vma_lock(vma); 148 err = i915_request_await_object(rq, vma->obj, true); 149 if (err == 0) 150 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 151 i915_vma_unlock(vma); 152 if (err) 153 goto skip_request; 154 155 i915_request_add(rq); 156 157 i915_vma_unpin_and_release(&batch, 0); 158 159 return 0; 160 161 skip_request: 162 i915_request_skip(rq, err); 163 err_request: 164 i915_request_add(rq); 165 err_batch: 166 i915_vma_unpin_and_release(&batch, 0); 167 return err; 168 } 169