1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "intel_memory_region.h"
7 #include "i915_gem_region.h"
8 #include "i915_drv.h"
9 #include "i915_trace.h"
10 
11 void
12 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
13 				struct sg_table *pages)
14 {
15 	__intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
16 
17 	obj->mm.dirty = false;
18 	sg_free_table(pages);
19 	kfree(pages);
20 }
21 
22 int
23 i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
24 {
25 	const u64 max_segment = i915_sg_segment_size();
26 	struct intel_memory_region *mem = obj->mm.region;
27 	struct list_head *blocks = &obj->mm.blocks;
28 	resource_size_t size = obj->base.size;
29 	resource_size_t prev_end;
30 	struct i915_buddy_block *block;
31 	unsigned int flags;
32 	struct sg_table *st;
33 	struct scatterlist *sg;
34 	unsigned int sg_page_sizes;
35 	int ret;
36 
37 	st = kmalloc(sizeof(*st), GFP_KERNEL);
38 	if (!st)
39 		return -ENOMEM;
40 
41 	if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) {
42 		kfree(st);
43 		return -ENOMEM;
44 	}
45 
46 	flags = I915_ALLOC_MIN_PAGE_SIZE;
47 	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
48 		flags |= I915_ALLOC_CONTIGUOUS;
49 
50 	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
51 	if (ret)
52 		goto err_free_sg;
53 
54 	GEM_BUG_ON(list_empty(blocks));
55 
56 	sg = st->sgl;
57 	st->nents = 0;
58 	sg_page_sizes = 0;
59 	prev_end = (resource_size_t)-1;
60 
61 	list_for_each_entry(block, blocks, link) {
62 		u64 block_size, offset;
63 
64 		block_size = min_t(u64, size,
65 				   i915_buddy_block_size(&mem->mm, block));
66 		offset = i915_buddy_block_offset(block);
67 
68 		while (block_size) {
69 			u64 len;
70 
71 			if (offset != prev_end || sg->length >= max_segment) {
72 				if (st->nents) {
73 					sg_page_sizes |= sg->length;
74 					sg = __sg_next(sg);
75 				}
76 
77 				sg_dma_address(sg) = mem->region.start + offset;
78 				sg_dma_len(sg) = 0;
79 				sg->length = 0;
80 				st->nents++;
81 			}
82 
83 			len = min(block_size, max_segment - sg->length);
84 			sg->length += len;
85 			sg_dma_len(sg) += len;
86 
87 			offset += len;
88 			block_size -= len;
89 
90 			prev_end = offset;
91 		}
92 	}
93 
94 	sg_page_sizes |= sg->length;
95 	sg_mark_end(sg);
96 	i915_sg_trim(st);
97 
98 	/* Intended for kernel internal use only */
99 	if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
100 		struct scatterlist *sg;
101 		unsigned long i;
102 
103 		for_each_sg(st->sgl, sg, st->nents, i) {
104 			unsigned int length;
105 			void __iomem *vaddr;
106 			dma_addr_t daddr;
107 
108 			daddr = sg_dma_address(sg);
109 			daddr -= mem->region.start;
110 			length = sg_dma_len(sg);
111 
112 			vaddr = io_mapping_map_wc(&mem->iomap, daddr, length);
113 			memset64((void __force *)vaddr, 0, length / sizeof(u64));
114 			io_mapping_unmap(vaddr);
115 		}
116 
117 		wmb();
118 	}
119 
120 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
121 
122 	return 0;
123 
124 err_free_sg:
125 	sg_free_table(st);
126 	kfree(st);
127 	return ret;
128 }
129 
130 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
131 					struct intel_memory_region *mem)
132 {
133 	INIT_LIST_HEAD(&obj->mm.blocks);
134 	obj->mm.region = intel_memory_region_get(mem);
135 
136 	if (obj->base.size <= mem->min_page_size)
137 		obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
138 
139 	mutex_lock(&mem->objects.lock);
140 
141 	if (obj->flags & I915_BO_ALLOC_VOLATILE)
142 		list_add(&obj->mm.region_link, &mem->objects.purgeable);
143 	else
144 		list_add(&obj->mm.region_link, &mem->objects.list);
145 
146 	mutex_unlock(&mem->objects.lock);
147 }
148 
149 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
150 {
151 	struct intel_memory_region *mem = obj->mm.region;
152 
153 	mutex_lock(&mem->objects.lock);
154 	list_del(&obj->mm.region_link);
155 	mutex_unlock(&mem->objects.lock);
156 
157 	intel_memory_region_put(mem);
158 }
159 
160 struct drm_i915_gem_object *
161 i915_gem_object_create_region(struct intel_memory_region *mem,
162 			      resource_size_t size,
163 			      unsigned int flags)
164 {
165 	struct drm_i915_gem_object *obj;
166 	int err;
167 
168 	/*
169 	 * NB: Our use of resource_size_t for the size stems from using struct
170 	 * resource for the mem->region. We might need to revisit this in the
171 	 * future.
172 	 */
173 
174 	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
175 
176 	if (!mem)
177 		return ERR_PTR(-ENODEV);
178 
179 	size = round_up(size, mem->min_page_size);
180 
181 	GEM_BUG_ON(!size);
182 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
183 
184 	if (i915_gem_object_size_2big(size))
185 		return ERR_PTR(-E2BIG);
186 
187 	obj = i915_gem_object_alloc();
188 	if (!obj)
189 		return ERR_PTR(-ENOMEM);
190 
191 	err = mem->ops->init_object(mem, obj, size, flags);
192 	if (err)
193 		goto err_object_free;
194 
195 	trace_i915_gem_object_create(obj);
196 	return obj;
197 
198 err_object_free:
199 	i915_gem_object_free(obj);
200 	return ERR_PTR(err);
201 }
202