1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include "gem/i915_gem_ioctls.h" 7 #include "gem/i915_gem_lmem.h" 8 #include "gem/i915_gem_region.h" 9 #include "pxp/intel_pxp.h" 10 11 #include "i915_drv.h" 12 #include "i915_trace.h" 13 #include "i915_user_extensions.h" 14 15 static u32 object_max_page_size(struct intel_memory_region **placements, 16 unsigned int n_placements) 17 { 18 u32 max_page_size = 0; 19 int i; 20 21 for (i = 0; i < n_placements; i++) { 22 struct intel_memory_region *mr = placements[i]; 23 24 GEM_BUG_ON(!is_power_of_2(mr->min_page_size)); 25 max_page_size = max_t(u32, max_page_size, mr->min_page_size); 26 } 27 28 GEM_BUG_ON(!max_page_size); 29 return max_page_size; 30 } 31 32 static int object_set_placements(struct drm_i915_gem_object *obj, 33 struct intel_memory_region **placements, 34 unsigned int n_placements) 35 { 36 struct intel_memory_region **arr; 37 unsigned int i; 38 39 GEM_BUG_ON(!n_placements); 40 41 /* 42 * For the common case of one memory region, skip storing an 43 * allocated array and just point at the region directly. 44 */ 45 if (n_placements == 1) { 46 struct intel_memory_region *mr = placements[0]; 47 struct drm_i915_private *i915 = mr->i915; 48 49 obj->mm.placements = &i915->mm.regions[mr->id]; 50 obj->mm.n_placements = 1; 51 } else { 52 arr = kmalloc_array(n_placements, 53 sizeof(struct intel_memory_region *), 54 GFP_KERNEL); 55 if (!arr) 56 return -ENOMEM; 57 58 for (i = 0; i < n_placements; i++) 59 arr[i] = placements[i]; 60 61 obj->mm.placements = arr; 62 obj->mm.n_placements = n_placements; 63 } 64 65 return 0; 66 } 67 68 static int i915_gem_publish(struct drm_i915_gem_object *obj, 69 struct drm_file *file, 70 u64 *size_p, 71 u32 *handle_p) 72 { 73 u64 size = obj->base.size; 74 int ret; 75 76 ret = drm_gem_handle_create(file, &obj->base, handle_p); 77 /* drop reference from allocate - handle holds it now */ 78 i915_gem_object_put(obj); 79 if (ret) 80 return ret; 81 82 *size_p = size; 83 return 0; 84 } 85 86 static struct drm_i915_gem_object * 87 __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size, 88 struct intel_memory_region **placements, 89 unsigned int n_placements, 90 unsigned int ext_flags) 91 { 92 struct intel_memory_region *mr = placements[0]; 93 struct drm_i915_gem_object *obj; 94 unsigned int flags; 95 int ret; 96 97 i915_gem_flush_free_objects(i915); 98 99 size = round_up(size, object_max_page_size(placements, n_placements)); 100 if (size == 0) 101 return ERR_PTR(-EINVAL); 102 103 /* For most of the ABI (e.g. mmap) we think in system pages */ 104 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE)); 105 106 if (i915_gem_object_size_2big(size)) 107 return ERR_PTR(-E2BIG); 108 109 obj = i915_gem_object_alloc(); 110 if (!obj) 111 return ERR_PTR(-ENOMEM); 112 113 ret = object_set_placements(obj, placements, n_placements); 114 if (ret) 115 goto object_free; 116 117 /* 118 * I915_BO_ALLOC_USER will make sure the object is cleared before 119 * any user access. 120 */ 121 flags = I915_BO_ALLOC_USER; 122 123 ret = mr->ops->init_object(mr, obj, size, 0, flags); 124 if (ret) 125 goto object_free; 126 127 GEM_BUG_ON(size != obj->base.size); 128 129 /* Add any flag set by create_ext options */ 130 obj->flags |= ext_flags; 131 132 trace_i915_gem_object_create(obj); 133 return obj; 134 135 object_free: 136 if (obj->mm.n_placements > 1) 137 kfree(obj->mm.placements); 138 i915_gem_object_free(obj); 139 return ERR_PTR(ret); 140 } 141 142 /** 143 * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT 144 * @i915: i915 private 145 * @size: size of the buffer, in bytes 146 * @placements: possible placement regions, in priority order 147 * @n_placements: number of possible placement regions 148 * 149 * This function is exposed primarily for selftests and does very little 150 * error checking. It is assumed that the set of placement regions has 151 * already been verified to be valid. 152 */ 153 struct drm_i915_gem_object * 154 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, 155 struct intel_memory_region **placements, 156 unsigned int n_placements) 157 { 158 return __i915_gem_object_create_user_ext(i915, size, placements, 159 n_placements, 0); 160 } 161 162 int 163 i915_gem_dumb_create(struct drm_file *file, 164 struct drm_device *dev, 165 struct drm_mode_create_dumb *args) 166 { 167 struct drm_i915_gem_object *obj; 168 struct intel_memory_region *mr; 169 enum intel_memory_type mem_type; 170 int cpp = DIV_ROUND_UP(args->bpp, 8); 171 u32 format; 172 173 switch (cpp) { 174 case 1: 175 format = DRM_FORMAT_C8; 176 break; 177 case 2: 178 format = DRM_FORMAT_RGB565; 179 break; 180 case 4: 181 format = DRM_FORMAT_XRGB8888; 182 break; 183 default: 184 return -EINVAL; 185 } 186 187 /* have to work out size/pitch and return them */ 188 args->pitch = ALIGN(args->width * cpp, 64); 189 190 /* align stride to page size so that we can remap */ 191 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format, 192 DRM_FORMAT_MOD_LINEAR)) 193 args->pitch = ALIGN(args->pitch, 4096); 194 195 if (args->pitch < args->width) 196 return -EINVAL; 197 198 args->size = mul_u32_u32(args->pitch, args->height); 199 200 mem_type = INTEL_MEMORY_SYSTEM; 201 if (HAS_LMEM(to_i915(dev))) 202 mem_type = INTEL_MEMORY_LOCAL; 203 204 mr = intel_memory_region_by_type(to_i915(dev), mem_type); 205 206 obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1); 207 if (IS_ERR(obj)) 208 return PTR_ERR(obj); 209 210 return i915_gem_publish(obj, file, &args->size, &args->handle); 211 } 212 213 /** 214 * Creates a new mm object and returns a handle to it. 215 * @dev: drm device pointer 216 * @data: ioctl data blob 217 * @file: drm file pointer 218 */ 219 int 220 i915_gem_create_ioctl(struct drm_device *dev, void *data, 221 struct drm_file *file) 222 { 223 struct drm_i915_private *i915 = to_i915(dev); 224 struct drm_i915_gem_create *args = data; 225 struct drm_i915_gem_object *obj; 226 struct intel_memory_region *mr; 227 228 mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM); 229 230 obj = __i915_gem_object_create_user(i915, args->size, &mr, 1); 231 if (IS_ERR(obj)) 232 return PTR_ERR(obj); 233 234 return i915_gem_publish(obj, file, &args->size, &args->handle); 235 } 236 237 struct create_ext { 238 struct drm_i915_private *i915; 239 struct intel_memory_region *placements[INTEL_REGION_UNKNOWN]; 240 unsigned int n_placements; 241 unsigned long flags; 242 }; 243 244 static void repr_placements(char *buf, size_t size, 245 struct intel_memory_region **placements, 246 int n_placements) 247 { 248 int i; 249 250 buf[0] = '\0'; 251 252 for (i = 0; i < n_placements; i++) { 253 struct intel_memory_region *mr = placements[i]; 254 int r; 255 256 r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }", 257 mr->name, mr->type, mr->instance); 258 if (r >= size) 259 return; 260 261 buf += r; 262 size -= r; 263 } 264 } 265 266 static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args, 267 struct create_ext *ext_data) 268 { 269 struct drm_i915_private *i915 = ext_data->i915; 270 struct drm_i915_gem_memory_class_instance __user *uregions = 271 u64_to_user_ptr(args->regions); 272 struct intel_memory_region *placements[INTEL_REGION_UNKNOWN]; 273 u32 mask; 274 int i, ret = 0; 275 276 if (args->pad) { 277 drm_dbg(&i915->drm, "pad should be zero\n"); 278 ret = -EINVAL; 279 } 280 281 if (!args->num_regions) { 282 drm_dbg(&i915->drm, "num_regions is zero\n"); 283 ret = -EINVAL; 284 } 285 286 BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements)); 287 BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements)); 288 if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) { 289 drm_dbg(&i915->drm, "num_regions is too large\n"); 290 ret = -EINVAL; 291 } 292 293 if (ret) 294 return ret; 295 296 mask = 0; 297 for (i = 0; i < args->num_regions; i++) { 298 struct drm_i915_gem_memory_class_instance region; 299 struct intel_memory_region *mr; 300 301 if (copy_from_user(®ion, uregions, sizeof(region))) 302 return -EFAULT; 303 304 mr = intel_memory_region_lookup(i915, 305 region.memory_class, 306 region.memory_instance); 307 if (!mr || mr->private) { 308 drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n", 309 region.memory_class, region.memory_instance, i); 310 ret = -EINVAL; 311 goto out_dump; 312 } 313 314 if (mask & BIT(mr->id)) { 315 drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n", 316 mr->name, region.memory_class, 317 region.memory_instance, i); 318 ret = -EINVAL; 319 goto out_dump; 320 } 321 322 placements[i] = mr; 323 mask |= BIT(mr->id); 324 325 ++uregions; 326 } 327 328 if (ext_data->n_placements) { 329 ret = -EINVAL; 330 goto out_dump; 331 } 332 333 ext_data->n_placements = args->num_regions; 334 for (i = 0; i < args->num_regions; i++) 335 ext_data->placements[i] = placements[i]; 336 337 return 0; 338 339 out_dump: 340 if (1) { 341 char buf[256]; 342 343 if (ext_data->n_placements) { 344 repr_placements(buf, 345 sizeof(buf), 346 ext_data->placements, 347 ext_data->n_placements); 348 drm_dbg(&i915->drm, 349 "Placements were already set in previous EXT. Existing placements: %s\n", 350 buf); 351 } 352 353 repr_placements(buf, sizeof(buf), placements, i); 354 drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf); 355 } 356 357 return ret; 358 } 359 360 static int ext_set_placements(struct i915_user_extension __user *base, 361 void *data) 362 { 363 struct drm_i915_gem_create_ext_memory_regions ext; 364 365 if (copy_from_user(&ext, base, sizeof(ext))) 366 return -EFAULT; 367 368 return set_placements(&ext, data); 369 } 370 371 static int ext_set_protected(struct i915_user_extension __user *base, void *data) 372 { 373 struct drm_i915_gem_create_ext_protected_content ext; 374 struct create_ext *ext_data = data; 375 376 if (copy_from_user(&ext, base, sizeof(ext))) 377 return -EFAULT; 378 379 if (ext.flags) 380 return -EINVAL; 381 382 if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp)) 383 return -ENODEV; 384 385 ext_data->flags |= I915_BO_PROTECTED; 386 387 return 0; 388 } 389 390 static const i915_user_extension_fn create_extensions[] = { 391 [I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements, 392 [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected, 393 }; 394 395 /** 396 * Creates a new mm object and returns a handle to it. 397 * @dev: drm device pointer 398 * @data: ioctl data blob 399 * @file: drm file pointer 400 */ 401 int 402 i915_gem_create_ext_ioctl(struct drm_device *dev, void *data, 403 struct drm_file *file) 404 { 405 struct drm_i915_private *i915 = to_i915(dev); 406 struct drm_i915_gem_create_ext *args = data; 407 struct create_ext ext_data = { .i915 = i915 }; 408 struct drm_i915_gem_object *obj; 409 int ret; 410 411 if (args->flags) 412 return -EINVAL; 413 414 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 415 create_extensions, 416 ARRAY_SIZE(create_extensions), 417 &ext_data); 418 if (ret) 419 return ret; 420 421 if (!ext_data.n_placements) { 422 ext_data.placements[0] = 423 intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM); 424 ext_data.n_placements = 1; 425 } 426 427 obj = __i915_gem_object_create_user_ext(i915, args->size, 428 ext_data.placements, 429 ext_data.n_placements, 430 ext_data.flags); 431 if (IS_ERR(obj)) 432 return PTR_ERR(obj); 433 434 return i915_gem_publish(obj, file, &args->size, &args->handle); 435 } 436