1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "intel_memory_region.h"
7 #include "i915_gem_region.h"
8 #include "i915_drv.h"
9 #include "i915_trace.h"
10 
11 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
12 					struct intel_memory_region *mem)
13 {
14 	obj->mm.region = mem;
15 
16 	mutex_lock(&mem->objects.lock);
17 	list_add(&obj->mm.region_link, &mem->objects.list);
18 	mutex_unlock(&mem->objects.lock);
19 }
20 
21 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
22 {
23 	struct intel_memory_region *mem = obj->mm.region;
24 
25 	mutex_lock(&mem->objects.lock);
26 	list_del(&obj->mm.region_link);
27 	mutex_unlock(&mem->objects.lock);
28 }
29 
30 struct drm_i915_gem_object *
31 i915_gem_object_create_region(struct intel_memory_region *mem,
32 			      resource_size_t size,
33 			      resource_size_t page_size,
34 			      unsigned int flags)
35 {
36 	struct drm_i915_gem_object *obj;
37 	resource_size_t default_page_size;
38 	int err;
39 
40 	/*
41 	 * NB: Our use of resource_size_t for the size stems from using struct
42 	 * resource for the mem->region. We might need to revisit this in the
43 	 * future.
44 	 */
45 
46 	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
47 
48 	if (!mem)
49 		return ERR_PTR(-ENODEV);
50 
51 	default_page_size = mem->min_page_size;
52 	if (page_size)
53 		default_page_size = page_size;
54 
55 	GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
56 	GEM_BUG_ON(default_page_size < PAGE_SIZE);
57 
58 	size = round_up(size, default_page_size);
59 
60 	GEM_BUG_ON(!size);
61 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
62 
63 	if (i915_gem_object_size_2big(size))
64 		return ERR_PTR(-E2BIG);
65 
66 	obj = i915_gem_object_alloc();
67 	if (!obj)
68 		return ERR_PTR(-ENOMEM);
69 
70 	err = mem->ops->init_object(mem, obj, size, page_size, flags);
71 	if (err)
72 		goto err_object_free;
73 
74 	trace_i915_gem_object_create(obj);
75 	return obj;
76 
77 err_object_free:
78 	i915_gem_object_free(obj);
79 	return ERR_PTR(err);
80 }
81 
82 /**
83  * i915_gem_process_region - Iterate over all objects of a region using ops
84  * to process and optionally skip objects
85  * @mr: The memory region
86  * @apply: ops and private data
87  *
88  * This function can be used to iterate over the regions object list,
89  * checking whether to skip objects, and, if not, lock the objects and
90  * process them using the supplied ops. Note that this function temporarily
91  * removes objects from the region list while iterating, so that if run
92  * concurrently with itself may not iterate over all objects.
93  *
94  * Return: 0 if successful, negative error code on failure.
95  */
96 int i915_gem_process_region(struct intel_memory_region *mr,
97 			    struct i915_gem_apply_to_region *apply)
98 {
99 	const struct i915_gem_apply_to_region_ops *ops = apply->ops;
100 	struct drm_i915_gem_object *obj;
101 	struct list_head still_in_list;
102 	int ret = 0;
103 
104 	/*
105 	 * In the future, a non-NULL apply->ww could mean the caller is
106 	 * already in a locking transaction and provides its own context.
107 	 */
108 	GEM_WARN_ON(apply->ww);
109 
110 	INIT_LIST_HEAD(&still_in_list);
111 	mutex_lock(&mr->objects.lock);
112 	for (;;) {
113 		struct i915_gem_ww_ctx ww;
114 
115 		obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj),
116 					       mm.region_link);
117 		if (!obj)
118 			break;
119 
120 		list_move_tail(&obj->mm.region_link, &still_in_list);
121 		if (!kref_get_unless_zero(&obj->base.refcount))
122 			continue;
123 
124 		/*
125 		 * Note: Someone else might be migrating the object at this
126 		 * point. The object's region is not stable until we lock
127 		 * the object.
128 		 */
129 		mutex_unlock(&mr->objects.lock);
130 		apply->ww = &ww;
131 		for_i915_gem_ww(&ww, ret, apply->interruptible) {
132 			ret = i915_gem_object_lock(obj, apply->ww);
133 			if (ret)
134 				continue;
135 
136 			if (obj->mm.region == mr)
137 				ret = ops->process_obj(apply, obj);
138 			/* Implicit object unlock */
139 		}
140 
141 		i915_gem_object_put(obj);
142 		mutex_lock(&mr->objects.lock);
143 		if (ret)
144 			break;
145 	}
146 	list_splice_tail(&still_in_list, &mr->objects.list);
147 	mutex_unlock(&mr->objects.lock);
148 
149 	return ret;
150 }
151