1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020-2021 Intel Corporation 4 */ 5 6 #include "gt/intel_migrate.h" 7 #include "gem/i915_gem_ttm_move.h" 8 9 static int igt_fill_check_buffer(struct drm_i915_gem_object *obj, 10 bool fill) 11 { 12 struct drm_i915_private *i915 = to_i915(obj->base.dev); 13 unsigned int i, count = obj->base.size / sizeof(u32); 14 enum i915_map_type map_type = 15 i915_coherent_map_type(i915, obj, false); 16 u32 *cur; 17 int err = 0; 18 19 assert_object_held(obj); 20 cur = i915_gem_object_pin_map(obj, map_type); 21 if (IS_ERR(cur)) 22 return PTR_ERR(cur); 23 24 if (fill) 25 for (i = 0; i < count; ++i) 26 *cur++ = i; 27 else 28 for (i = 0; i < count; ++i) 29 if (*cur++ != i) { 30 pr_err("Object content mismatch at location %d of %d\n", i, count); 31 err = -EINVAL; 32 break; 33 } 34 35 i915_gem_object_unpin_map(obj); 36 37 return err; 38 } 39 40 static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src, 41 enum intel_region_id dst) 42 { 43 struct drm_i915_private *i915 = gt->i915; 44 struct intel_memory_region *src_mr = i915->mm.regions[src]; 45 struct drm_i915_gem_object *obj; 46 struct i915_gem_ww_ctx ww; 47 int err = 0; 48 49 GEM_BUG_ON(!src_mr); 50 51 /* Switch object backing-store on create */ 52 obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0); 53 if (IS_ERR(obj)) 54 return PTR_ERR(obj); 55 56 for_i915_gem_ww(&ww, err, true) { 57 err = i915_gem_object_lock(obj, &ww); 58 if (err) 59 continue; 60 61 err = igt_fill_check_buffer(obj, true); 62 if (err) 63 continue; 64 65 err = i915_gem_object_migrate(obj, &ww, dst); 66 if (err) 67 continue; 68 69 err = i915_gem_object_pin_pages(obj); 70 if (err) 71 continue; 72 73 if (i915_gem_object_can_migrate(obj, src)) 74 err = -EINVAL; 75 76 i915_gem_object_unpin_pages(obj); 77 err = i915_gem_object_wait_migration(obj, true); 78 if (err) 79 continue; 80 81 err = igt_fill_check_buffer(obj, false); 82 } 83 i915_gem_object_put(obj); 84 85 return err; 86 } 87 88 static int igt_smem_create_migrate(void *arg) 89 { 90 return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_SMEM); 91 } 92 93 static int igt_lmem_create_migrate(void *arg) 94 { 95 return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM); 96 } 97 98 static int igt_same_create_migrate(void *arg) 99 { 100 return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_LMEM); 101 } 102 103 static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww, 104 struct drm_i915_gem_object *obj) 105 { 106 int err; 107 108 err = i915_gem_object_lock(obj, ww); 109 if (err) 110 return err; 111 112 if (i915_gem_object_is_lmem(obj)) { 113 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM); 114 if (err) { 115 pr_err("Object failed migration to smem\n"); 116 if (err) 117 return err; 118 } 119 120 if (i915_gem_object_is_lmem(obj)) { 121 pr_err("object still backed by lmem\n"); 122 err = -EINVAL; 123 } 124 125 if (!i915_gem_object_has_struct_page(obj)) { 126 pr_err("object not backed by struct page\n"); 127 err = -EINVAL; 128 } 129 130 } else { 131 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM); 132 if (err) { 133 pr_err("Object failed migration to lmem\n"); 134 if (err) 135 return err; 136 } 137 138 if (i915_gem_object_has_struct_page(obj)) { 139 pr_err("object still backed by struct page\n"); 140 err = -EINVAL; 141 } 142 143 if (!i915_gem_object_is_lmem(obj)) { 144 pr_err("object not backed by lmem\n"); 145 err = -EINVAL; 146 } 147 } 148 149 return err; 150 } 151 152 static int igt_lmem_pages_migrate(void *arg) 153 { 154 struct intel_gt *gt = arg; 155 struct drm_i915_private *i915 = gt->i915; 156 struct drm_i915_gem_object *obj; 157 struct i915_gem_ww_ctx ww; 158 struct i915_request *rq; 159 int err; 160 int i; 161 162 /* From LMEM to shmem and back again */ 163 164 obj = i915_gem_object_create_lmem(i915, SZ_2M, 0); 165 if (IS_ERR(obj)) 166 return PTR_ERR(obj); 167 168 /* Initial GPU fill, sync, CPU initialization. */ 169 for_i915_gem_ww(&ww, err, true) { 170 err = i915_gem_object_lock(obj, &ww); 171 if (err) 172 continue; 173 174 err = ____i915_gem_object_get_pages(obj); 175 if (err) 176 continue; 177 178 err = intel_migrate_clear(>->migrate, &ww, NULL, 179 obj->mm.pages->sgl, obj->cache_level, 180 i915_gem_object_is_lmem(obj), 181 0xdeadbeaf, &rq); 182 if (rq) { 183 dma_resv_add_excl_fence(obj->base.resv, &rq->fence); 184 i915_request_put(rq); 185 } 186 if (err) 187 continue; 188 189 err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, 190 5 * HZ); 191 if (err) 192 continue; 193 194 err = igt_fill_check_buffer(obj, true); 195 if (err) 196 continue; 197 } 198 if (err) 199 goto out_put; 200 201 /* 202 * Migrate to and from smem without explicitly syncing. 203 * Finalize with data in smem for fast readout. 204 */ 205 for (i = 1; i <= 5; ++i) { 206 for_i915_gem_ww(&ww, err, true) 207 err = lmem_pages_migrate_one(&ww, obj); 208 if (err) 209 goto out_put; 210 } 211 212 err = i915_gem_object_lock_interruptible(obj, NULL); 213 if (err) 214 goto out_put; 215 216 /* Finally sync migration and check content. */ 217 err = i915_gem_object_wait_migration(obj, true); 218 if (err) 219 goto out_unlock; 220 221 err = igt_fill_check_buffer(obj, false); 222 223 out_unlock: 224 i915_gem_object_unlock(obj); 225 out_put: 226 i915_gem_object_put(obj); 227 228 return err; 229 } 230 231 static int igt_lmem_pages_failsafe_migrate(void *arg) 232 { 233 int fail_gpu, fail_alloc, ret; 234 235 for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) { 236 for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) { 237 pr_info("Simulated failure modes: gpu: %d, alloc: %d\n", 238 fail_gpu, fail_alloc); 239 i915_ttm_migrate_set_failure_modes(fail_gpu, 240 fail_alloc); 241 ret = igt_lmem_pages_migrate(arg); 242 if (ret) 243 goto out_err; 244 } 245 } 246 247 out_err: 248 i915_ttm_migrate_set_failure_modes(false, false); 249 return ret; 250 } 251 252 int i915_gem_migrate_live_selftests(struct drm_i915_private *i915) 253 { 254 static const struct i915_subtest tests[] = { 255 SUBTEST(igt_smem_create_migrate), 256 SUBTEST(igt_lmem_create_migrate), 257 SUBTEST(igt_same_create_migrate), 258 SUBTEST(igt_lmem_pages_failsafe_migrate), 259 }; 260 261 if (!HAS_LMEM(i915)) 262 return 0; 263 264 return intel_gt_live_subtests(tests, to_gt(i915)); 265 } 266