1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include "gem/i915_gem_ioctls.h" 7 #include "gem/i915_gem_lmem.h" 8 #include "gem/i915_gem_region.h" 9 10 #include "i915_drv.h" 11 #include "i915_trace.h" 12 #include "i915_user_extensions.h" 13 14 static u32 object_max_page_size(struct drm_i915_gem_object *obj) 15 { 16 u32 max_page_size = 0; 17 int i; 18 19 for (i = 0; i < obj->mm.n_placements; i++) { 20 struct intel_memory_region *mr = obj->mm.placements[i]; 21 22 GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); 23 max_page_size = max_t(u32, max_page_size, mr->min_page_size); 24 } 25 26 GEM_BUG_ON(!max_page_size); 27 return max_page_size; 28 } 29 30 static void object_set_placements(struct drm_i915_gem_object *obj, 31 struct intel_memory_region **placements, 32 unsigned int n_placements) 33 { 34 GEM_BUG_ON(!n_placements); 35 36 /* 37 * For the common case of one memory region, skip storing an 38 * allocated array and just point at the region directly. 39 */ 40 if (n_placements == 1) { 41 struct intel_memory_region *mr = placements[0]; 42 struct drm_i915_private *i915 = mr->i915; 43 44 obj->mm.placements = &i915->mm.regions[mr->id]; 45 obj->mm.n_placements = 1; 46 } else { 47 obj->mm.placements = placements; 48 obj->mm.n_placements = n_placements; 49 } 50 } 51 52 static int i915_gem_publish(struct drm_i915_gem_object *obj, 53 struct drm_file *file, 54 u64 *size_p, 55 u32 *handle_p) 56 { 57 u64 size = obj->base.size; 58 int ret; 59 60 ret = drm_gem_handle_create(file, &obj->base, handle_p); 61 /* drop reference from allocate - handle holds it now */ 62 i915_gem_object_put(obj); 63 if (ret) 64 return ret; 65 66 *size_p = size; 67 return 0; 68 } 69 70 static int 71 i915_gem_setup(struct drm_i915_gem_object *obj, u64 size) 72 { 73 struct intel_memory_region *mr = obj->mm.placements[0]; 74 unsigned int flags; 75 int ret; 76 77 size = round_up(size, object_max_page_size(obj)); 78 if (size == 0) 79 return -EINVAL; 80 81 /* For most of the ABI (e.g. mmap) we think in system pages */ 82 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); 83 84 if (i915_gem_object_size_2big(size)) 85 return -E2BIG; 86 87 /* 88 * For now resort to CPU based clearing for device local-memory, in the 89 * near future this will use the blitter engine for accelerated, GPU 90 * based clearing. 91 */ 92 flags = 0; 93 if (mr->type == INTEL_MEMORY_LOCAL) 94 flags = I915_BO_ALLOC_CPU_CLEAR; 95 96 ret = mr->ops->init_object(mr, obj, size, flags); 97 if (ret) 98 return ret; 99 100 GEM_BUG_ON(size != obj->base.size); 101 102 trace_i915_gem_object_create(obj); 103 return 0; 104 } 105 106 int 107 i915_gem_dumb_create(struct drm_file *file, 108 struct drm_device *dev, 109 struct drm_mode_create_dumb *args) 110 { 111 struct drm_i915_gem_object *obj; 112 struct intel_memory_region *mr; 113 enum intel_memory_type mem_type; 114 int cpp = DIV_ROUND_UP(args->bpp, 8); 115 u32 format; 116 int ret; 117 118 switch (cpp) { 119 case 1: 120 format = DRM_FORMAT_C8; 121 break; 122 case 2: 123 format = DRM_FORMAT_RGB565; 124 break; 125 case 4: 126 format = DRM_FORMAT_XRGB8888; 127 break; 128 default: 129 return -EINVAL; 130 } 131 132 /* have to work out size/pitch and return them */ 133 args->pitch = ALIGN(args->width * cpp, 64); 134 135 /* align stride to page size so that we can remap */ 136 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format, 137 DRM_FORMAT_MOD_LINEAR)) 138 args->pitch = ALIGN(args->pitch, 4096); 139 140 if (args->pitch < args->width) 141 return -EINVAL; 142 143 args->size = mul_u32_u32(args->pitch, args->height); 144 145 mem_type = INTEL_MEMORY_SYSTEM; 146 if (HAS_LMEM(to_i915(dev))) 147 mem_type = INTEL_MEMORY_LOCAL; 148 149 obj = i915_gem_object_alloc(); 150 if (!obj) 151 return -ENOMEM; 152 153 mr = intel_memory_region_by_type(to_i915(dev), mem_type); 154 object_set_placements(obj, &mr, 1); 155 156 ret = i915_gem_setup(obj, args->size); 157 if (ret) 158 goto object_free; 159 160 return i915_gem_publish(obj, file, &args->size, &args->handle); 161 162 object_free: 163 i915_gem_object_free(obj); 164 return ret; 165 } 166 167 /** 168 * Creates a new mm object and returns a handle to it. 169 * @dev: drm device pointer 170 * @data: ioctl data blob 171 * @file: drm file pointer 172 */ 173 int 174 i915_gem_create_ioctl(struct drm_device *dev, void *data, 175 struct drm_file *file) 176 { 177 struct drm_i915_private *i915 = to_i915(dev); 178 struct drm_i915_gem_create *args = data; 179 struct drm_i915_gem_object *obj; 180 struct intel_memory_region *mr; 181 int ret; 182 183 i915_gem_flush_free_objects(i915); 184 185 obj = i915_gem_object_alloc(); 186 if (!obj) 187 return -ENOMEM; 188 189 mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM); 190 object_set_placements(obj, &mr, 1); 191 192 ret = i915_gem_setup(obj, args->size); 193 if (ret) 194 goto object_free; 195 196 return i915_gem_publish(obj, file, &args->size, &args->handle); 197 198 object_free: 199 i915_gem_object_free(obj); 200 return ret; 201 } 202 203 struct create_ext { 204 struct drm_i915_private *i915; 205 struct drm_i915_gem_object *vanilla_object; 206 }; 207 208 static void repr_placements(char *buf, size_t size, 209 struct intel_memory_region **placements, 210 int n_placements) 211 { 212 int i; 213 214 buf[0] = '\0'; 215 216 for (i = 0; i < n_placements; i++) { 217 struct intel_memory_region *mr = placements[i]; 218 int r; 219 220 r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }", 221 mr->name, mr->type, mr->instance); 222 if (r >= size) 223 return; 224 225 buf += r; 226 size -= r; 227 } 228 } 229 230 static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args, 231 struct create_ext *ext_data) 232 { 233 struct drm_i915_private *i915 = ext_data->i915; 234 struct drm_i915_gem_memory_class_instance __user *uregions = 235 u64_to_user_ptr(args->regions); 236 struct drm_i915_gem_object *obj = ext_data->vanilla_object; 237 struct intel_memory_region **placements; 238 u32 mask; 239 int i, ret = 0; 240 241 if (args->pad) { 242 drm_dbg(&i915->drm, "pad should be zero\n"); 243 ret = -EINVAL; 244 } 245 246 if (!args->num_regions) { 247 drm_dbg(&i915->drm, "num_regions is zero\n"); 248 ret = -EINVAL; 249 } 250 251 if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) { 252 drm_dbg(&i915->drm, "num_regions is too large\n"); 253 ret = -EINVAL; 254 } 255 256 if (ret) 257 return ret; 258 259 placements = kmalloc_array(args->num_regions, 260 sizeof(struct intel_memory_region *), 261 GFP_KERNEL); 262 if (!placements) 263 return -ENOMEM; 264 265 mask = 0; 266 for (i = 0; i < args->num_regions; i++) { 267 struct drm_i915_gem_memory_class_instance region; 268 struct intel_memory_region *mr; 269 270 if (copy_from_user(®ion, uregions, sizeof(region))) { 271 ret = -EFAULT; 272 goto out_free; 273 } 274 275 mr = intel_memory_region_lookup(i915, 276 region.memory_class, 277 region.memory_instance); 278 if (!mr || mr->private) { 279 drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n", 280 region.memory_class, region.memory_instance, i); 281 ret = -EINVAL; 282 goto out_dump; 283 } 284 285 if (mask & BIT(mr->id)) { 286 drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n", 287 mr->name, region.memory_class, 288 region.memory_instance, i); 289 ret = -EINVAL; 290 goto out_dump; 291 } 292 293 placements[i] = mr; 294 mask |= BIT(mr->id); 295 296 ++uregions; 297 } 298 299 if (obj->mm.placements) { 300 ret = -EINVAL; 301 goto out_dump; 302 } 303 304 object_set_placements(obj, placements, args->num_regions); 305 if (args->num_regions == 1) 306 kfree(placements); 307 308 return 0; 309 310 out_dump: 311 if (1) { 312 char buf[256]; 313 314 if (obj->mm.placements) { 315 repr_placements(buf, 316 sizeof(buf), 317 obj->mm.placements, 318 obj->mm.n_placements); 319 drm_dbg(&i915->drm, 320 "Placements were already set in previous EXT. Existing placements: %s\n", 321 buf); 322 } 323 324 repr_placements(buf, sizeof(buf), placements, i); 325 drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf); 326 } 327 328 out_free: 329 kfree(placements); 330 return ret; 331 } 332 333 static int ext_set_placements(struct i915_user_extension __user *base, 334 void *data) 335 { 336 struct drm_i915_gem_create_ext_memory_regions ext; 337 338 if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) 339 return -ENODEV; 340 341 if (copy_from_user(&ext, base, sizeof(ext))) 342 return -EFAULT; 343 344 return set_placements(&ext, data); 345 } 346 347 static const i915_user_extension_fn create_extensions[] = { 348 [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements, 349 }; 350 351 /** 352 * Creates a new mm object and returns a handle to it. 353 * @dev: drm device pointer 354 * @data: ioctl data blob 355 * @file: drm file pointer 356 */ 357 int 358 i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, 359 struct drm_file *file) 360 { 361 struct drm_i915_private *i915 = to_i915(dev); 362 struct drm_i915_gem_create_ext *args = data; 363 struct create_ext ext_data = { .i915 = i915 }; 364 struct intel_memory_region **placements_ext; 365 struct drm_i915_gem_object *obj; 366 int ret; 367 368 if (args->flags) 369 return -EINVAL; 370 371 i915_gem_flush_free_objects(i915); 372 373 obj = i915_gem_object_alloc(); 374 if (!obj) 375 return -ENOMEM; 376 377 ext_data.vanilla_object = obj; 378 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 379 create_extensions, 380 ARRAY_SIZE(create_extensions), 381 &ext_data); 382 placements_ext = obj->mm.placements; 383 if (ret) 384 goto object_free; 385 386 if (!placements_ext) { 387 struct intel_memory_region *mr = 388 intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM); 389 390 object_set_placements(obj, &mr, 1); 391 } 392 393 ret = i915_gem_setup(obj, args->size); 394 if (ret) 395 goto object_free; 396 397 return i915_gem_publish(obj, file, &args->size, &args->handle); 398 399 object_free: 400 if (obj->mm.n_placements > 1) 401 kfree(placements_ext); 402 i915_gem_object_free(obj); 403 return ret; 404 } 405