1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include "igt_spinner.h" 8 9 int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) 10 { 11 unsigned int mode; 12 void *vaddr; 13 int err; 14 15 GEM_BUG_ON(INTEL_GEN(i915) < 8); 16 17 memset(spin, 0, sizeof(*spin)); 18 spin->i915 = i915; 19 20 spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); 21 if (IS_ERR(spin->hws)) { 22 err = PTR_ERR(spin->hws); 23 goto err; 24 } 25 26 spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 27 if (IS_ERR(spin->obj)) { 28 err = PTR_ERR(spin->obj); 29 goto err_hws; 30 } 31 32 i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); 33 vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); 34 if (IS_ERR(vaddr)) { 35 err = PTR_ERR(vaddr); 36 goto err_obj; 37 } 38 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); 39 40 mode = i915_coherent_map_type(i915); 41 vaddr = i915_gem_object_pin_map(spin->obj, mode); 42 if (IS_ERR(vaddr)) { 43 err = PTR_ERR(vaddr); 44 goto err_unpin_hws; 45 } 46 spin->batch = vaddr; 47 48 return 0; 49 50 err_unpin_hws: 51 i915_gem_object_unpin_map(spin->hws); 52 err_obj: 53 i915_gem_object_put(spin->obj); 54 err_hws: 55 i915_gem_object_put(spin->hws); 56 err: 57 return err; 58 } 59 60 static unsigned int seqno_offset(u64 fence) 61 { 62 return offset_in_page(sizeof(u32) * fence); 63 } 64 65 static u64 hws_address(const struct i915_vma *hws, 66 const struct i915_request *rq) 67 { 68 return hws->node.start + seqno_offset(rq->fence.context); 69 } 70 71 static int move_to_active(struct i915_vma *vma, 72 struct i915_request *rq, 73 unsigned int flags) 74 { 75 int err; 76 77 err = i915_vma_move_to_active(vma, rq, flags); 78 if (err) 79 return err; 80 81 if (!i915_gem_object_has_active_reference(vma->obj)) { 82 i915_gem_object_get(vma->obj); 83 i915_gem_object_set_active_reference(vma->obj); 84 } 85 86 return 0; 87 } 88 89 struct i915_request * 90 igt_spinner_create_request(struct igt_spinner *spin, 91 struct i915_gem_context *ctx, 92 struct intel_engine_cs *engine, 93 u32 arbitration_command) 94 { 95 struct i915_address_space *vm = &ctx->ppgtt->vm; 96 struct i915_request *rq = NULL; 97 struct i915_vma *hws, *vma; 98 u32 *batch; 99 int err; 100 101 vma = i915_vma_instance(spin->obj, vm, NULL); 102 if (IS_ERR(vma)) 103 return ERR_CAST(vma); 104 105 hws = i915_vma_instance(spin->hws, vm, NULL); 106 if (IS_ERR(hws)) 107 return ERR_CAST(hws); 108 109 err = i915_vma_pin(vma, 0, 0, PIN_USER); 110 if (err) 111 return ERR_PTR(err); 112 113 err = i915_vma_pin(hws, 0, 0, PIN_USER); 114 if (err) 115 goto unpin_vma; 116 117 rq = i915_request_alloc(engine, ctx); 118 if (IS_ERR(rq)) { 119 err = PTR_ERR(rq); 120 goto unpin_hws; 121 } 122 123 err = move_to_active(vma, rq, 0); 124 if (err) 125 goto cancel_rq; 126 127 err = move_to_active(hws, rq, 0); 128 if (err) 129 goto cancel_rq; 130 131 batch = spin->batch; 132 133 *batch++ = MI_STORE_DWORD_IMM_GEN4; 134 *batch++ = lower_32_bits(hws_address(hws, rq)); 135 *batch++ = upper_32_bits(hws_address(hws, rq)); 136 *batch++ = rq->fence.seqno; 137 138 *batch++ = arbitration_command; 139 140 *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; 141 *batch++ = lower_32_bits(vma->node.start); 142 *batch++ = upper_32_bits(vma->node.start); 143 *batch++ = MI_BATCH_BUFFER_END; /* not reached */ 144 145 i915_gem_chipset_flush(spin->i915); 146 147 err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); 148 149 cancel_rq: 150 if (err) { 151 i915_request_skip(rq, err); 152 i915_request_add(rq); 153 } 154 unpin_hws: 155 i915_vma_unpin(hws); 156 unpin_vma: 157 i915_vma_unpin(vma); 158 return err ? ERR_PTR(err) : rq; 159 } 160 161 static u32 162 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) 163 { 164 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); 165 166 return READ_ONCE(*seqno); 167 } 168 169 void igt_spinner_end(struct igt_spinner *spin) 170 { 171 *spin->batch = MI_BATCH_BUFFER_END; 172 i915_gem_chipset_flush(spin->i915); 173 } 174 175 void igt_spinner_fini(struct igt_spinner *spin) 176 { 177 igt_spinner_end(spin); 178 179 i915_gem_object_unpin_map(spin->obj); 180 i915_gem_object_put(spin->obj); 181 182 i915_gem_object_unpin_map(spin->hws); 183 i915_gem_object_put(spin->hws); 184 } 185 186 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) 187 { 188 return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), 189 rq->fence.seqno), 190 10) && 191 wait_for(i915_seqno_passed(hws_seqno(spin, rq), 192 rq->fence.seqno), 193 1000)); 194 } 195