1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "intel_memory_region.h" 7 #include "i915_drv.h" 8 9 static const struct { 10 u16 class; 11 u16 instance; 12 } intel_region_map[] = { 13 [INTEL_REGION_SMEM] = { 14 .class = INTEL_MEMORY_SYSTEM, 15 .instance = 0, 16 }, 17 [INTEL_REGION_LMEM] = { 18 .class = INTEL_MEMORY_LOCAL, 19 .instance = 0, 20 }, 21 [INTEL_REGION_STOLEN_SMEM] = { 22 .class = INTEL_MEMORY_STOLEN_SYSTEM, 23 .instance = 0, 24 }, 25 [INTEL_REGION_STOLEN_LMEM] = { 26 .class = INTEL_MEMORY_STOLEN_LOCAL, 27 .instance = 0, 28 }, 29 }; 30 31 struct intel_region_reserve { 32 struct list_head link; 33 struct ttm_resource *res; 34 }; 35 36 struct intel_memory_region * 37 intel_memory_region_lookup(struct drm_i915_private *i915, 38 u16 class, u16 instance) 39 { 40 struct intel_memory_region *mr; 41 int id; 42 43 /* XXX: consider maybe converting to an rb tree at some point */ 44 for_each_memory_region(mr, i915, id) { 45 if (mr->type == class && mr->instance == instance) 46 return mr; 47 } 48 49 return NULL; 50 } 51 52 struct intel_memory_region * 53 intel_memory_region_by_type(struct drm_i915_private *i915, 54 enum intel_memory_type mem_type) 55 { 56 struct intel_memory_region *mr; 57 int id; 58 59 for_each_memory_region(mr, i915, id) 60 if (mr->type == mem_type) 61 return mr; 62 63 return NULL; 64 } 65 66 /** 67 * intel_memory_region_unreserve - Unreserve all previously reserved 68 * ranges 69 * @mem: The region containing the reserved ranges. 70 */ 71 void intel_memory_region_unreserve(struct intel_memory_region *mem) 72 { 73 struct intel_region_reserve *reserve, *next; 74 75 if (!mem->priv_ops || !mem->priv_ops->free) 76 return; 77 78 mutex_lock(&mem->mm_lock); 79 list_for_each_entry_safe(reserve, next, &mem->reserved, link) { 80 list_del(&reserve->link); 81 mem->priv_ops->free(mem, reserve->res); 82 kfree(reserve); 83 } 84 mutex_unlock(&mem->mm_lock); 85 } 86 87 /** 88 * intel_memory_region_reserve - Reserve a memory range 89 * @mem: The region for which we want to reserve a range. 90 * @offset: Start of the range to reserve. 91 * @size: The size of the range to reserve. 92 * 93 * Return: 0 on success, negative error code on failure. 94 */ 95 int intel_memory_region_reserve(struct intel_memory_region *mem, 96 resource_size_t offset, 97 resource_size_t size) 98 { 99 int ret; 100 struct intel_region_reserve *reserve; 101 102 if (!mem->priv_ops || !mem->priv_ops->reserve) 103 return -EINVAL; 104 105 reserve = kzalloc(sizeof(*reserve), GFP_KERNEL); 106 if (!reserve) 107 return -ENOMEM; 108 109 reserve->res = mem->priv_ops->reserve(mem, offset, size); 110 if (IS_ERR(reserve->res)) { 111 ret = PTR_ERR(reserve->res); 112 kfree(reserve); 113 return ret; 114 } 115 116 mutex_lock(&mem->mm_lock); 117 list_add_tail(&reserve->link, &mem->reserved); 118 mutex_unlock(&mem->mm_lock); 119 120 return 0; 121 } 122 123 struct intel_memory_region * 124 intel_memory_region_create(struct drm_i915_private *i915, 125 resource_size_t start, 126 resource_size_t size, 127 resource_size_t min_page_size, 128 resource_size_t io_start, 129 u16 type, 130 u16 instance, 131 const struct intel_memory_region_ops *ops) 132 { 133 struct intel_memory_region *mem; 134 int err; 135 136 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 137 if (!mem) 138 return ERR_PTR(-ENOMEM); 139 140 mem->i915 = i915; 141 mem->region = (struct resource)DEFINE_RES_MEM(start, size); 142 mem->io_start = io_start; 143 mem->min_page_size = min_page_size; 144 mem->ops = ops; 145 mem->total = size; 146 mem->avail = mem->total; 147 mem->type = type; 148 mem->instance = instance; 149 150 mutex_init(&mem->objects.lock); 151 INIT_LIST_HEAD(&mem->objects.list); 152 INIT_LIST_HEAD(&mem->objects.purgeable); 153 INIT_LIST_HEAD(&mem->reserved); 154 155 mutex_init(&mem->mm_lock); 156 157 if (ops->init) { 158 err = ops->init(mem); 159 if (err) 160 goto err_free; 161 } 162 163 kref_init(&mem->kref); 164 return mem; 165 166 err_free: 167 kfree(mem); 168 return ERR_PTR(err); 169 } 170 171 void intel_memory_region_set_name(struct intel_memory_region *mem, 172 const char *fmt, ...) 173 { 174 va_list ap; 175 176 va_start(ap, fmt); 177 vsnprintf(mem->name, sizeof(mem->name), fmt, ap); 178 va_end(ap); 179 } 180 181 static void __intel_memory_region_destroy(struct kref *kref) 182 { 183 struct intel_memory_region *mem = 184 container_of(kref, typeof(*mem), kref); 185 186 intel_memory_region_unreserve(mem); 187 if (mem->ops->release) 188 mem->ops->release(mem); 189 190 mutex_destroy(&mem->mm_lock); 191 mutex_destroy(&mem->objects.lock); 192 kfree(mem); 193 } 194 195 struct intel_memory_region * 196 intel_memory_region_get(struct intel_memory_region *mem) 197 { 198 kref_get(&mem->kref); 199 return mem; 200 } 201 202 void intel_memory_region_put(struct intel_memory_region *mem) 203 { 204 kref_put(&mem->kref, __intel_memory_region_destroy); 205 } 206 207 /* Global memory region registration -- only slight layer inversions! */ 208 209 int intel_memory_regions_hw_probe(struct drm_i915_private *i915) 210 { 211 int err, i; 212 213 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { 214 struct intel_memory_region *mem = ERR_PTR(-ENODEV); 215 u16 type, instance; 216 217 if (!HAS_REGION(i915, BIT(i))) 218 continue; 219 220 type = intel_region_map[i].class; 221 instance = intel_region_map[i].instance; 222 switch (type) { 223 case INTEL_MEMORY_SYSTEM: 224 mem = i915_gem_shmem_setup(i915, type, instance); 225 break; 226 case INTEL_MEMORY_STOLEN_LOCAL: 227 mem = i915_gem_stolen_lmem_setup(i915, type, instance); 228 if (!IS_ERR(mem)) 229 i915->mm.stolen_region = mem; 230 break; 231 case INTEL_MEMORY_STOLEN_SYSTEM: 232 mem = i915_gem_stolen_smem_setup(i915, type, instance); 233 if (!IS_ERR(mem)) 234 i915->mm.stolen_region = mem; 235 break; 236 default: 237 continue; 238 } 239 240 if (IS_ERR(mem)) { 241 err = PTR_ERR(mem); 242 drm_err(&i915->drm, 243 "Failed to setup region(%d) type=%d\n", 244 err, type); 245 goto out_cleanup; 246 } 247 248 mem->id = i; 249 i915->mm.regions[i] = mem; 250 } 251 252 return 0; 253 254 out_cleanup: 255 intel_memory_regions_driver_release(i915); 256 return err; 257 } 258 259 void intel_memory_regions_driver_release(struct drm_i915_private *i915) 260 { 261 int i; 262 263 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { 264 struct intel_memory_region *region = 265 fetch_and_zero(&i915->mm.regions[i]); 266 267 if (region) 268 intel_memory_region_put(region); 269 } 270 } 271 272 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 273 #include "selftests/intel_memory_region.c" 274 #include "selftests/mock_region.c" 275 #endif 276