1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <uapi/drm/i915_drm.h> 7 8 #include "intel_memory_region.h" 9 #include "i915_gem_region.h" 10 #include "i915_drv.h" 11 #include "i915_trace.h" 12 13 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, 14 struct intel_memory_region *mem) 15 { 16 obj->mm.region = mem; 17 18 mutex_lock(&mem->objects.lock); 19 list_add(&obj->mm.region_link, &mem->objects.list); 20 mutex_unlock(&mem->objects.lock); 21 } 22 23 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj) 24 { 25 struct intel_memory_region *mem = obj->mm.region; 26 27 mutex_lock(&mem->objects.lock); 28 list_del(&obj->mm.region_link); 29 mutex_unlock(&mem->objects.lock); 30 } 31 32 struct drm_i915_gem_object * 33 i915_gem_object_create_region(struct intel_memory_region *mem, 34 resource_size_t size, 35 resource_size_t page_size, 36 unsigned int flags) 37 { 38 struct drm_i915_gem_object *obj; 39 resource_size_t default_page_size; 40 int err; 41 42 /* 43 * NB: Our use of resource_size_t for the size stems from using struct 44 * resource for the mem->region. We might need to revisit this in the 45 * future. 46 */ 47 48 GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS); 49 50 if (WARN_ON_ONCE(flags & I915_BO_ALLOC_GPU_ONLY && 51 (flags & I915_BO_ALLOC_CPU_CLEAR || 52 flags & I915_BO_ALLOC_PM_EARLY))) 53 return ERR_PTR(-EINVAL); 54 55 if (!mem) 56 return ERR_PTR(-ENODEV); 57 58 default_page_size = mem->min_page_size; 59 if (page_size) 60 default_page_size = page_size; 61 62 GEM_BUG_ON(!is_power_of_2_u64(default_page_size)); 63 GEM_BUG_ON(default_page_size < PAGE_SIZE); 64 65 size = round_up(size, default_page_size); 66 67 GEM_BUG_ON(!size); 68 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT)); 69 70 if (i915_gem_object_size_2big(size)) 71 return ERR_PTR(-E2BIG); 72 73 obj = i915_gem_object_alloc(); 74 if (!obj) 75 return ERR_PTR(-ENOMEM); 76 77 /* 78 * Anything smaller than the min_page_size can't be freely inserted into 79 * the GTT, due to alignemnt restrictions. For such special objects, 80 * make sure we force memcpy based suspend-resume. In the future we can 81 * revisit this, either by allowing special mis-aligned objects in the 82 * migration path, or by mapping all of LMEM upfront using cheap 1G 83 * GTT entries. 84 */ 85 if (default_page_size < mem->min_page_size) 86 flags |= I915_BO_ALLOC_PM_EARLY; 87 88 err = mem->ops->init_object(mem, obj, size, page_size, flags); 89 if (err) 90 goto err_object_free; 91 92 trace_i915_gem_object_create(obj); 93 return obj; 94 95 err_object_free: 96 i915_gem_object_free(obj); 97 return ERR_PTR(err); 98 } 99 100 /** 101 * i915_gem_process_region - Iterate over all objects of a region using ops 102 * to process and optionally skip objects 103 * @mr: The memory region 104 * @apply: ops and private data 105 * 106 * This function can be used to iterate over the regions object list, 107 * checking whether to skip objects, and, if not, lock the objects and 108 * process them using the supplied ops. Note that this function temporarily 109 * removes objects from the region list while iterating, so that if run 110 * concurrently with itself may not iterate over all objects. 111 * 112 * Return: 0 if successful, negative error code on failure. 113 */ 114 int i915_gem_process_region(struct intel_memory_region *mr, 115 struct i915_gem_apply_to_region *apply) 116 { 117 const struct i915_gem_apply_to_region_ops *ops = apply->ops; 118 struct drm_i915_gem_object *obj; 119 struct list_head still_in_list; 120 int ret = 0; 121 122 /* 123 * In the future, a non-NULL apply->ww could mean the caller is 124 * already in a locking transaction and provides its own context. 125 */ 126 GEM_WARN_ON(apply->ww); 127 128 INIT_LIST_HEAD(&still_in_list); 129 mutex_lock(&mr->objects.lock); 130 for (;;) { 131 struct i915_gem_ww_ctx ww; 132 133 obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj), 134 mm.region_link); 135 if (!obj) 136 break; 137 138 list_move_tail(&obj->mm.region_link, &still_in_list); 139 if (!kref_get_unless_zero(&obj->base.refcount)) 140 continue; 141 142 /* 143 * Note: Someone else might be migrating the object at this 144 * point. The object's region is not stable until we lock 145 * the object. 146 */ 147 mutex_unlock(&mr->objects.lock); 148 apply->ww = &ww; 149 for_i915_gem_ww(&ww, ret, apply->interruptible) { 150 ret = i915_gem_object_lock(obj, apply->ww); 151 if (ret) 152 continue; 153 154 if (obj->mm.region == mr) 155 ret = ops->process_obj(apply, obj); 156 /* Implicit object unlock */ 157 } 158 159 i915_gem_object_put(obj); 160 mutex_lock(&mr->objects.lock); 161 if (ret) 162 break; 163 } 164 list_splice_tail(&still_in_list, &mr->objects.list); 165 mutex_unlock(&mr->objects.lock); 166 167 return ret; 168 } 169