1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "intel_memory_region.h"
7 #include "i915_gem_region.h"
8 #include "i915_drv.h"
9 #include "i915_trace.h"
10 
11 void
12 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
13 				struct sg_table *pages)
14 {
15 	__intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
16 
17 	obj->mm.dirty = false;
18 	sg_free_table(pages);
19 	kfree(pages);
20 }
21 
22 int
23 i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
24 {
25 	const u64 max_segment = i915_sg_segment_size();
26 	struct intel_memory_region *mem = obj->mm.region;
27 	struct list_head *blocks = &obj->mm.blocks;
28 	resource_size_t size = obj->base.size;
29 	resource_size_t prev_end;
30 	struct i915_buddy_block *block;
31 	unsigned int flags;
32 	struct sg_table *st;
33 	struct scatterlist *sg;
34 	unsigned int sg_page_sizes;
35 	int ret;
36 
37 	st = kmalloc(sizeof(*st), GFP_KERNEL);
38 	if (!st)
39 		return -ENOMEM;
40 
41 	if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
42 		kfree(st);
43 		return -ENOMEM;
44 	}
45 
46 	flags = I915_ALLOC_MIN_PAGE_SIZE;
47 	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
48 		flags |= I915_ALLOC_CONTIGUOUS;
49 
50 	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
51 	if (ret)
52 		goto err_free_sg;
53 
54 	GEM_BUG_ON(list_empty(blocks));
55 
56 	sg = st->sgl;
57 	st->nents = 0;
58 	sg_page_sizes = 0;
59 	prev_end = (resource_size_t)-1;
60 
61 	list_for_each_entry(block, blocks, link) {
62 		u64 block_size, offset;
63 
64 		block_size = min_t(u64, size,
65 				   i915_buddy_block_size(&mem->mm, block));
66 		offset = i915_buddy_block_offset(block);
67 
68 		while (block_size) {
69 			u64 len;
70 
71 			if (offset != prev_end || sg->length >= max_segment) {
72 				if (st->nents) {
73 					sg_page_sizes |= sg->length;
74 					sg = __sg_next(sg);
75 				}
76 
77 				sg_dma_address(sg) = mem->region.start + offset;
78 				sg_dma_len(sg) = 0;
79 				sg->length = 0;
80 				st->nents++;
81 			}
82 
83 			len = min(block_size, max_segment - sg->length);
84 			sg->length += len;
85 			sg_dma_len(sg) += len;
86 
87 			offset += len;
88 			block_size -= len;
89 
90 			prev_end = offset;
91 		}
92 	}
93 
94 	sg_page_sizes |= sg->length;
95 	sg_mark_end(sg);
96 	i915_sg_trim(st);
97 
98 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
99 
100 	return 0;
101 
102 err_free_sg:
103 	sg_free_table(st);
104 	kfree(st);
105 	return ret;
106 }
107 
108 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
109 					struct intel_memory_region *mem,
110 					unsigned long flags)
111 {
112 	INIT_LIST_HEAD(&obj->mm.blocks);
113 	obj->mm.region = intel_memory_region_get(mem);
114 
115 	obj->flags |= flags;
116 	if (obj->base.size <= mem->min_page_size)
117 		obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
118 
119 	mutex_lock(&mem->objects.lock);
120 
121 	if (obj->flags & I915_BO_ALLOC_VOLATILE)
122 		list_add(&obj->mm.region_link, &mem->objects.purgeable);
123 	else
124 		list_add(&obj->mm.region_link, &mem->objects.list);
125 
126 	mutex_unlock(&mem->objects.lock);
127 }
128 
129 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
130 {
131 	struct intel_memory_region *mem = obj->mm.region;
132 
133 	mutex_lock(&mem->objects.lock);
134 	list_del(&obj->mm.region_link);
135 	mutex_unlock(&mem->objects.lock);
136 
137 	intel_memory_region_put(mem);
138 }
139 
140 struct drm_i915_gem_object *
141 i915_gem_object_create_region(struct intel_memory_region *mem,
142 			      resource_size_t size,
143 			      unsigned int flags)
144 {
145 	struct drm_i915_gem_object *obj;
146 	int err;
147 
148 	/*
149 	 * NB: Our use of resource_size_t for the size stems from using struct
150 	 * resource for the mem->region. We might need to revisit this in the
151 	 * future.
152 	 */
153 
154 	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
155 
156 	if (!mem)
157 		return ERR_PTR(-ENODEV);
158 
159 	size = round_up(size, mem->min_page_size);
160 
161 	GEM_BUG_ON(!size);
162 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
163 
164 	/*
165 	 * XXX: There is a prevalence of the assumption that we fit the
166 	 * object's page count inside a 32bit _signed_ variable. Let's document
167 	 * this and catch if we ever need to fix it. In the meantime, if you do
168 	 * spot such a local variable, please consider fixing!
169 	 */
170 
171 	if (size >> PAGE_SHIFT > INT_MAX)
172 		return ERR_PTR(-E2BIG);
173 
174 	if (overflows_type(size, obj->base.size))
175 		return ERR_PTR(-E2BIG);
176 
177 	obj = i915_gem_object_alloc();
178 	if (!obj)
179 		return ERR_PTR(-ENOMEM);
180 
181 	err = mem->ops->init_object(mem, obj, size, flags);
182 	if (err)
183 		goto err_object_free;
184 
185 	trace_i915_gem_object_create(obj);
186 	return obj;
187 
188 err_object_free:
189 	i915_gem_object_free(obj);
190 	return ERR_PTR(err);
191 }
192