1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include <drm/ttm/ttm_placement.h> 7 #include <drm/ttm/ttm_tt.h> 8 9 #include "i915_drv.h" 10 #include "intel_memory_region.h" 11 #include "intel_region_ttm.h" 12 13 #include "gem/i915_gem_region.h" 14 #include "gem/i915_gem_ttm.h" 15 #include "gem/i915_gem_ttm_move.h" 16 #include "gem/i915_gem_ttm_pm.h" 17 18 /** 19 * i915_ttm_backup_free - Free any backup attached to this object 20 * @obj: The object whose backup is to be freed. 21 */ 22 void i915_ttm_backup_free(struct drm_i915_gem_object *obj) 23 { 24 if (obj->ttm.backup) { 25 i915_gem_object_put(obj->ttm.backup); 26 obj->ttm.backup = NULL; 27 } 28 } 29 30 /** 31 * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore 32 * @base: The i915_gem_apply_to_region we derive from. 33 * @allow_gpu: Whether using the gpu blitter is allowed. 34 * @backup_pinned: On backup, backup also pinned objects. 35 */ 36 struct i915_gem_ttm_pm_apply { 37 struct i915_gem_apply_to_region base; 38 bool allow_gpu : 1; 39 bool backup_pinned : 1; 40 }; 41 42 static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, 43 struct drm_i915_gem_object *obj) 44 { 45 struct i915_gem_ttm_pm_apply *pm_apply = 46 container_of(apply, typeof(*pm_apply), base); 47 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 48 struct ttm_buffer_object *backup_bo; 49 struct drm_i915_private *i915 = 50 container_of(bo->bdev, typeof(*i915), bdev); 51 struct drm_i915_gem_object *backup; 52 struct ttm_operation_ctx ctx = {}; 53 unsigned int flags; 54 int err = 0; 55 56 if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) 57 return 0; 58 59 if (pm_apply->allow_gpu && i915_gem_object_evictable(obj)) 60 return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx); 61 62 if (!pm_apply->backup_pinned || 63 (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY))) 64 return 0; 65 66 if (obj->flags & I915_BO_ALLOC_PM_VOLATILE) 67 return 0; 68 69 /* 70 * It seems that we might have some framebuffers still pinned at this 71 * stage, but for such objects we might also need to deal with the CCS 72 * aux state. Make sure we force the save/restore of the CCS state, 73 * otherwise we might observe display corruption, when returning from 74 * suspend. 75 */ 76 flags = 0; 77 if (i915_gem_object_needs_ccs_pages(obj)) { 78 WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj)); 79 WARN_ON_ONCE(!pm_apply->allow_gpu); 80 81 flags = I915_BO_ALLOC_CCS_AUX; 82 } 83 backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], 84 obj->base.size, 0, flags); 85 if (IS_ERR(backup)) 86 return PTR_ERR(backup); 87 88 err = i915_gem_object_lock(backup, apply->ww); 89 if (err) 90 goto out_no_lock; 91 92 backup_bo = i915_gem_to_ttm(backup); 93 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); 94 if (err) 95 goto out_no_populate; 96 97 err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false); 98 if (err) { 99 drm_err(&i915->drm, 100 "Unable to copy from device to system memory, err:%pe\n", 101 ERR_PTR(err)); 102 goto out_no_populate; 103 } 104 ttm_bo_wait_ctx(backup_bo, &ctx); 105 106 obj->ttm.backup = backup; 107 return 0; 108 109 out_no_populate: 110 i915_gem_ww_unlock_single(backup); 111 out_no_lock: 112 i915_gem_object_put(backup); 113 114 return err; 115 } 116 117 static int i915_ttm_recover(struct i915_gem_apply_to_region *apply, 118 struct drm_i915_gem_object *obj) 119 { 120 i915_ttm_backup_free(obj); 121 return 0; 122 } 123 124 /** 125 * i915_ttm_recover_region - Free the backup of all objects of a region 126 * @mr: The memory region 127 * 128 * Checks all objects of a region if there is backup attached and if so 129 * frees that backup. Typically this is called to recover after a partially 130 * performed backup. 131 */ 132 void i915_ttm_recover_region(struct intel_memory_region *mr) 133 { 134 static const struct i915_gem_apply_to_region_ops recover_ops = { 135 .process_obj = i915_ttm_recover, 136 }; 137 struct i915_gem_apply_to_region apply = {.ops = &recover_ops}; 138 int ret; 139 140 ret = i915_gem_process_region(mr, &apply); 141 GEM_WARN_ON(ret); 142 } 143 144 /** 145 * i915_ttm_backup_region - Back up all objects of a region to smem. 146 * @mr: The memory region 147 * @allow_gpu: Whether to allow the gpu blitter for this backup. 148 * @backup_pinned: Backup also pinned objects. 149 * 150 * Loops over all objects of a region and either evicts them if they are 151 * evictable or backs them up using a backup object if they are pinned. 152 * 153 * Return: Zero on success. Negative error code on error. 154 */ 155 int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags) 156 { 157 static const struct i915_gem_apply_to_region_ops backup_ops = { 158 .process_obj = i915_ttm_backup, 159 }; 160 struct i915_gem_ttm_pm_apply pm_apply = { 161 .base = {.ops = &backup_ops}, 162 .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU, 163 .backup_pinned = flags & I915_TTM_BACKUP_PINNED, 164 }; 165 166 return i915_gem_process_region(mr, &pm_apply.base); 167 } 168 169 static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, 170 struct drm_i915_gem_object *obj) 171 { 172 struct i915_gem_ttm_pm_apply *pm_apply = 173 container_of(apply, typeof(*pm_apply), base); 174 struct drm_i915_gem_object *backup = obj->ttm.backup; 175 struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup); 176 struct ttm_operation_ctx ctx = {}; 177 int err; 178 179 if (!backup) 180 return 0; 181 182 if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY)) 183 return 0; 184 185 err = i915_gem_object_lock(backup, apply->ww); 186 if (err) 187 return err; 188 189 /* Content may have been swapped. */ 190 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); 191 if (!err) { 192 err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, 193 false); 194 GEM_WARN_ON(err); 195 ttm_bo_wait_ctx(backup_bo, &ctx); 196 197 obj->ttm.backup = NULL; 198 err = 0; 199 } 200 201 i915_gem_ww_unlock_single(backup); 202 203 if (!err) 204 i915_gem_object_put(backup); 205 206 return err; 207 } 208 209 /** 210 * i915_ttm_restore_region - Restore backed-up objects of a region from smem. 211 * @mr: The memory region 212 * @allow_gpu: Whether to allow the gpu blitter to recover. 213 * 214 * Loops over all objects of a region and if they are backed-up, restores 215 * them from smem. 216 * 217 * Return: Zero on success. Negative error code on error. 218 */ 219 int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags) 220 { 221 static const struct i915_gem_apply_to_region_ops restore_ops = { 222 .process_obj = i915_ttm_restore, 223 }; 224 struct i915_gem_ttm_pm_apply pm_apply = { 225 .base = {.ops = &restore_ops}, 226 .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU, 227 }; 228 229 return i915_gem_process_region(mr, &pm_apply.base); 230 } 231