1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include "i915_scatterlist.h"
8 #include "i915_ttm_buddy_manager.h"
9 
10 #include <drm/drm_buddy.h>
11 #include <drm/drm_mm.h>
12 
13 #include <linux/slab.h>
14 
15 bool i915_sg_trim(struct sg_table *orig_st)
16 {
17 	struct sg_table new_st;
18 	struct scatterlist *sg, *new_sg;
19 	unsigned int i;
20 
21 	if (orig_st->nents == orig_st->orig_nents)
22 		return false;
23 
24 	if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
25 		return false;
26 
27 	new_sg = new_st.sgl;
28 	for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
29 		sg_set_page(new_sg, sg_page(sg), sg->length, 0);
30 		sg_dma_address(new_sg) = sg_dma_address(sg);
31 		sg_dma_len(new_sg) = sg_dma_len(sg);
32 
33 		new_sg = sg_next(new_sg);
34 	}
35 	GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
36 
37 	sg_free_table(orig_st);
38 
39 	*orig_st = new_st;
40 	return true;
41 }
42 
43 static void i915_refct_sgt_release(struct kref *ref)
44 {
45 	struct i915_refct_sgt *rsgt =
46 		container_of(ref, typeof(*rsgt), kref);
47 
48 	sg_free_table(&rsgt->table);
49 	kfree(rsgt);
50 }
51 
52 static const struct i915_refct_sgt_ops rsgt_ops = {
53 	.release = i915_refct_sgt_release
54 };
55 
56 /**
57  * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops
58  * @rsgt: The struct i915_refct_sgt to initialize.
59  * size: The size of the underlying memory buffer.
60  */
61 void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
62 {
63 	__i915_refct_sgt_init(rsgt, size, &rsgt_ops);
64 }
65 
66 /**
67  * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct
68  * drm_mm_node
69  * @node: The drm_mm_node.
70  * @region_start: An offset to add to the dma addresses of the sg list.
71  * @page_alignment: Required page alignment for each sg entry. Power of two.
72  *
73  * Create a struct sg_table, initializing it from a struct drm_mm_node,
74  * taking a maximum segment length into account, splitting into segments
75  * if necessary.
76  *
77  * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative
78  * error code cast to an error pointer on failure.
79  */
80 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
81 					      u64 region_start,
82 					      u32 page_alignment)
83 {
84 	const u32 max_segment = round_down(UINT_MAX, page_alignment);
85 	const u32 segment_pages = max_segment >> PAGE_SHIFT;
86 	u64 block_size, offset, prev_end;
87 	struct i915_refct_sgt *rsgt;
88 	struct sg_table *st;
89 	struct scatterlist *sg;
90 
91 	GEM_BUG_ON(!max_segment);
92 
93 	rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
94 	if (!rsgt)
95 		return ERR_PTR(-ENOMEM);
96 
97 	i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
98 	st = &rsgt->table;
99 	/* restricted by sg_alloc_table */
100 	if (WARN_ON(overflows_type(DIV_ROUND_UP_ULL(node->size, segment_pages),
101 				   unsigned int)))
102 		return ERR_PTR(-E2BIG);
103 
104 	if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
105 			   GFP_KERNEL)) {
106 		i915_refct_sgt_put(rsgt);
107 		return ERR_PTR(-ENOMEM);
108 	}
109 
110 	sg = st->sgl;
111 	st->nents = 0;
112 	prev_end = (resource_size_t)-1;
113 	block_size = node->size << PAGE_SHIFT;
114 	offset = node->start << PAGE_SHIFT;
115 
116 	while (block_size) {
117 		u64 len;
118 
119 		if (offset != prev_end || sg->length >= max_segment) {
120 			if (st->nents)
121 				sg = __sg_next(sg);
122 
123 			sg_dma_address(sg) = region_start + offset;
124 			GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
125 					       page_alignment));
126 			sg_dma_len(sg) = 0;
127 			sg->length = 0;
128 			st->nents++;
129 		}
130 
131 		len = min_t(u64, block_size, max_segment - sg->length);
132 		sg->length += len;
133 		sg_dma_len(sg) += len;
134 
135 		offset += len;
136 		block_size -= len;
137 
138 		prev_end = offset;
139 	}
140 
141 	sg_mark_end(sg);
142 	i915_sg_trim(st);
143 
144 	return rsgt;
145 }
146 
147 /**
148  * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct
149  * i915_buddy_block list
150  * @res: The struct i915_ttm_buddy_resource.
151  * @region_start: An offset to add to the dma addresses of the sg list.
152  * @page_alignment: Required page alignment for each sg entry. Power of two.
153  *
154  * Create a struct sg_table, initializing it from struct i915_buddy_block list,
155  * taking a maximum segment length into account, splitting into segments
156  * if necessary.
157  *
158  * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative
159  * error code cast to an error pointer on failure.
160  */
161 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
162 						     u64 region_start,
163 						     u32 page_alignment)
164 {
165 	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
166 	const u64 size = res->size;
167 	const u32 max_segment = round_down(UINT_MAX, page_alignment);
168 	struct drm_buddy *mm = bman_res->mm;
169 	struct list_head *blocks = &bman_res->blocks;
170 	struct drm_buddy_block *block;
171 	struct i915_refct_sgt *rsgt;
172 	struct scatterlist *sg;
173 	struct sg_table *st;
174 	resource_size_t prev_end;
175 
176 	GEM_BUG_ON(list_empty(blocks));
177 	GEM_BUG_ON(!max_segment);
178 
179 	rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
180 	if (!rsgt)
181 		return ERR_PTR(-ENOMEM);
182 
183 	i915_refct_sgt_init(rsgt, size);
184 	st = &rsgt->table;
185 	/* restricted by sg_alloc_table */
186 	if (WARN_ON(overflows_type(PFN_UP(res->size), unsigned int)))
187 		return ERR_PTR(-E2BIG);
188 
189 	if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) {
190 		i915_refct_sgt_put(rsgt);
191 		return ERR_PTR(-ENOMEM);
192 	}
193 
194 	sg = st->sgl;
195 	st->nents = 0;
196 	prev_end = (resource_size_t)-1;
197 
198 	list_for_each_entry(block, blocks, link) {
199 		u64 block_size, offset;
200 
201 		block_size = min_t(u64, size, drm_buddy_block_size(mm, block));
202 		offset = drm_buddy_block_offset(block);
203 
204 		while (block_size) {
205 			u64 len;
206 
207 			if (offset != prev_end || sg->length >= max_segment) {
208 				if (st->nents)
209 					sg = __sg_next(sg);
210 
211 				sg_dma_address(sg) = region_start + offset;
212 				GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
213 						       page_alignment));
214 				sg_dma_len(sg) = 0;
215 				sg->length = 0;
216 				st->nents++;
217 			}
218 
219 			len = min_t(u64, block_size, max_segment - sg->length);
220 			sg->length += len;
221 			sg_dma_len(sg) += len;
222 
223 			offset += len;
224 			block_size -= len;
225 
226 			prev_end = offset;
227 		}
228 	}
229 
230 	sg_mark_end(sg);
231 	i915_sg_trim(st);
232 
233 	return rsgt;
234 }
235 
236 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
237 #include "selftests/scatterlist.c"
238 #endif
239