1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include <drm/ttm/ttm_placement.h> 7 #include <drm/ttm/ttm_tt.h> 8 9 #include "i915_drv.h" 10 #include "intel_memory_region.h" 11 #include "intel_region_ttm.h" 12 13 #include "gem/i915_gem_region.h" 14 #include "gem/i915_gem_ttm.h" 15 #include "gem/i915_gem_ttm_pm.h" 16 17 /** 18 * i915_ttm_backup_free - Free any backup attached to this object 19 * @obj: The object whose backup is to be freed. 20 */ 21 void i915_ttm_backup_free(struct drm_i915_gem_object *obj) 22 { 23 if (obj->ttm.backup) { 24 i915_gem_object_put(obj->ttm.backup); 25 obj->ttm.backup = NULL; 26 } 27 } 28 29 /** 30 * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore 31 * @base: The i915_gem_apply_to_region we derive from. 32 * @allow_gpu: Whether using the gpu blitter is allowed. 33 * @backup_pinned: On backup, backup also pinned objects. 34 */ 35 struct i915_gem_ttm_pm_apply { 36 struct i915_gem_apply_to_region base; 37 bool allow_gpu : 1; 38 bool backup_pinned : 1; 39 }; 40 41 static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, 42 struct drm_i915_gem_object *obj) 43 { 44 struct i915_gem_ttm_pm_apply *pm_apply = 45 container_of(apply, typeof(*pm_apply), base); 46 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 47 struct ttm_buffer_object *backup_bo; 48 struct drm_i915_private *i915 = 49 container_of(bo->bdev, typeof(*i915), bdev); 50 struct drm_i915_gem_object *backup; 51 struct ttm_operation_ctx ctx = {}; 52 int err = 0; 53 54 if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) 55 return 0; 56 57 if (pm_apply->allow_gpu && i915_gem_object_evictable(obj)) 58 return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); 59 60 if (!pm_apply->backup_pinned || 61 (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY))) 62 return 0; 63 64 if (obj->flags & I915_BO_ALLOC_PM_VOLATILE) 65 return 0; 66 67 backup = i915_gem_object_create_shmem(i915, obj->base.size); 68 if (IS_ERR(backup)) 69 return PTR_ERR(backup); 70 71 err = i915_gem_object_lock(backup, apply->ww); 72 if (err) 73 goto out_no_lock; 74 75 backup_bo = i915_gem_to_ttm(backup); 76 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); 77 if (err) 78 goto out_no_populate; 79 80 err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false); 81 GEM_WARN_ON(err); 82 83 obj->ttm.backup = backup; 84 return 0; 85 86 out_no_populate: 87 i915_gem_ww_unlock_single(backup); 88 out_no_lock: 89 i915_gem_object_put(backup); 90 91 return err; 92 } 93 94 static int i915_ttm_recover(struct i915_gem_apply_to_region *apply, 95 struct drm_i915_gem_object *obj) 96 { 97 i915_ttm_backup_free(obj); 98 return 0; 99 } 100 101 /** 102 * i915_ttm_recover_region - Free the backup of all objects of a region 103 * @mr: The memory region 104 * 105 * Checks all objects of a region if there is backup attached and if so 106 * frees that backup. Typically this is called to recover after a partially 107 * performed backup. 108 */ 109 void i915_ttm_recover_region(struct intel_memory_region *mr) 110 { 111 static const struct i915_gem_apply_to_region_ops recover_ops = { 112 .process_obj = i915_ttm_recover, 113 }; 114 struct i915_gem_apply_to_region apply = {.ops = &recover_ops}; 115 int ret; 116 117 ret = i915_gem_process_region(mr, &apply); 118 GEM_WARN_ON(ret); 119 } 120 121 /** 122 * i915_ttm_backup_region - Back up all objects of a region to smem. 123 * @mr: The memory region 124 * @allow_gpu: Whether to allow the gpu blitter for this backup. 125 * @backup_pinned: Backup also pinned objects. 126 * 127 * Loops over all objects of a region and either evicts them if they are 128 * evictable or backs them up using a backup object if they are pinned. 129 * 130 * Return: Zero on success. Negative error code on error. 131 */ 132 int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags) 133 { 134 static const struct i915_gem_apply_to_region_ops backup_ops = { 135 .process_obj = i915_ttm_backup, 136 }; 137 struct i915_gem_ttm_pm_apply pm_apply = { 138 .base = {.ops = &backup_ops}, 139 .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU, 140 .backup_pinned = flags & I915_TTM_BACKUP_PINNED, 141 }; 142 143 return i915_gem_process_region(mr, &pm_apply.base); 144 } 145 146 static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, 147 struct drm_i915_gem_object *obj) 148 { 149 struct i915_gem_ttm_pm_apply *pm_apply = 150 container_of(apply, typeof(*pm_apply), base); 151 struct drm_i915_gem_object *backup = obj->ttm.backup; 152 struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup); 153 struct ttm_operation_ctx ctx = {}; 154 int err; 155 156 if (!backup) 157 return 0; 158 159 if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY)) 160 return 0; 161 162 err = i915_gem_object_lock(backup, apply->ww); 163 if (err) 164 return err; 165 166 /* Content may have been swapped. */ 167 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); 168 if (!err) { 169 err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, 170 false); 171 GEM_WARN_ON(err); 172 173 obj->ttm.backup = NULL; 174 err = 0; 175 } 176 177 i915_gem_ww_unlock_single(backup); 178 179 if (!err) 180 i915_gem_object_put(backup); 181 182 return err; 183 } 184 185 /** 186 * i915_ttm_restore_region - Restore backed-up objects of a region from smem. 187 * @mr: The memory region 188 * @allow_gpu: Whether to allow the gpu blitter to recover. 189 * 190 * Loops over all objects of a region and if they are backed-up, restores 191 * them from smem. 192 * 193 * Return: Zero on success. Negative error code on error. 194 */ 195 int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags) 196 { 197 static const struct i915_gem_apply_to_region_ops restore_ops = { 198 .process_obj = i915_ttm_restore, 199 }; 200 struct i915_gem_ttm_pm_apply pm_apply = { 201 .base = {.ops = &restore_ops}, 202 .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU, 203 }; 204 205 return i915_gem_process_region(mr, &pm_apply.base); 206 } 207