1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "intel_memory_region.h"
7 #include "i915_gem_region.h"
8 #include "i915_drv.h"
9 #include "i915_trace.h"
10 
11 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
12 					struct intel_memory_region *mem)
13 {
14 	obj->mm.region = mem;
15 
16 	mutex_lock(&mem->objects.lock);
17 	list_add(&obj->mm.region_link, &mem->objects.list);
18 	mutex_unlock(&mem->objects.lock);
19 }
20 
21 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
22 {
23 	struct intel_memory_region *mem = obj->mm.region;
24 
25 	mutex_lock(&mem->objects.lock);
26 	list_del(&obj->mm.region_link);
27 	mutex_unlock(&mem->objects.lock);
28 }
29 
30 struct drm_i915_gem_object *
31 i915_gem_object_create_region(struct intel_memory_region *mem,
32 			      resource_size_t size,
33 			      resource_size_t page_size,
34 			      unsigned int flags)
35 {
36 	struct drm_i915_gem_object *obj;
37 	resource_size_t default_page_size;
38 	int err;
39 
40 	/*
41 	 * NB: Our use of resource_size_t for the size stems from using struct
42 	 * resource for the mem->region. We might need to revisit this in the
43 	 * future.
44 	 */
45 
46 	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
47 
48 	if (WARN_ON_ONCE(flags & I915_BO_ALLOC_GPU_ONLY &&
49 			 (flags & I915_BO_ALLOC_CPU_CLEAR ||
50 			  flags & I915_BO_ALLOC_PM_EARLY)))
51 		return ERR_PTR(-EINVAL);
52 
53 	if (!mem)
54 		return ERR_PTR(-ENODEV);
55 
56 	default_page_size = mem->min_page_size;
57 	if (page_size)
58 		default_page_size = page_size;
59 
60 	GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
61 	GEM_BUG_ON(default_page_size < PAGE_SIZE);
62 
63 	size = round_up(size, default_page_size);
64 
65 	GEM_BUG_ON(!size);
66 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
67 
68 	if (i915_gem_object_size_2big(size))
69 		return ERR_PTR(-E2BIG);
70 
71 	obj = i915_gem_object_alloc();
72 	if (!obj)
73 		return ERR_PTR(-ENOMEM);
74 
75 	/*
76 	 * Anything smaller than the min_page_size can't be freely inserted into
77 	 * the GTT, due to alignemnt restrictions. For such special objects,
78 	 * make sure we force memcpy based suspend-resume. In the future we can
79 	 * revisit this, either by allowing special mis-aligned objects in the
80 	 * migration path, or by mapping all of LMEM upfront using cheap 1G
81 	 * GTT entries.
82 	 */
83 	if (default_page_size < mem->min_page_size)
84 		flags |= I915_BO_ALLOC_PM_EARLY;
85 
86 	err = mem->ops->init_object(mem, obj, size, page_size, flags);
87 	if (err)
88 		goto err_object_free;
89 
90 	trace_i915_gem_object_create(obj);
91 	return obj;
92 
93 err_object_free:
94 	i915_gem_object_free(obj);
95 	return ERR_PTR(err);
96 }
97 
98 /**
99  * i915_gem_process_region - Iterate over all objects of a region using ops
100  * to process and optionally skip objects
101  * @mr: The memory region
102  * @apply: ops and private data
103  *
104  * This function can be used to iterate over the regions object list,
105  * checking whether to skip objects, and, if not, lock the objects and
106  * process them using the supplied ops. Note that this function temporarily
107  * removes objects from the region list while iterating, so that if run
108  * concurrently with itself may not iterate over all objects.
109  *
110  * Return: 0 if successful, negative error code on failure.
111  */
112 int i915_gem_process_region(struct intel_memory_region *mr,
113 			    struct i915_gem_apply_to_region *apply)
114 {
115 	const struct i915_gem_apply_to_region_ops *ops = apply->ops;
116 	struct drm_i915_gem_object *obj;
117 	struct list_head still_in_list;
118 	int ret = 0;
119 
120 	/*
121 	 * In the future, a non-NULL apply->ww could mean the caller is
122 	 * already in a locking transaction and provides its own context.
123 	 */
124 	GEM_WARN_ON(apply->ww);
125 
126 	INIT_LIST_HEAD(&still_in_list);
127 	mutex_lock(&mr->objects.lock);
128 	for (;;) {
129 		struct i915_gem_ww_ctx ww;
130 
131 		obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj),
132 					       mm.region_link);
133 		if (!obj)
134 			break;
135 
136 		list_move_tail(&obj->mm.region_link, &still_in_list);
137 		if (!kref_get_unless_zero(&obj->base.refcount))
138 			continue;
139 
140 		/*
141 		 * Note: Someone else might be migrating the object at this
142 		 * point. The object's region is not stable until we lock
143 		 * the object.
144 		 */
145 		mutex_unlock(&mr->objects.lock);
146 		apply->ww = &ww;
147 		for_i915_gem_ww(&ww, ret, apply->interruptible) {
148 			ret = i915_gem_object_lock(obj, apply->ww);
149 			if (ret)
150 				continue;
151 
152 			if (obj->mm.region == mr)
153 				ret = ops->process_obj(apply, obj);
154 			/* Implicit object unlock */
155 		}
156 
157 		i915_gem_object_put(obj);
158 		mutex_lock(&mr->objects.lock);
159 		if (ret)
160 			break;
161 	}
162 	list_splice_tail(&still_in_list, &mr->objects.list);
163 	mutex_unlock(&mr->objects.lock);
164 
165 	return ret;
166 }
167