1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include "i915_scatterlist.h"
8 
9 #include "i915_buddy.h"
10 #include "i915_ttm_buddy_manager.h"
11 
12 #include <drm/drm_mm.h>
13 
14 #include <linux/slab.h>
15 
16 bool i915_sg_trim(struct sg_table *orig_st)
17 {
18 	struct sg_table new_st;
19 	struct scatterlist *sg, *new_sg;
20 	unsigned int i;
21 
22 	if (orig_st->nents == orig_st->orig_nents)
23 		return false;
24 
25 	if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
26 		return false;
27 
28 	new_sg = new_st.sgl;
29 	for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
30 		sg_set_page(new_sg, sg_page(sg), sg->length, 0);
31 		sg_dma_address(new_sg) = sg_dma_address(sg);
32 		sg_dma_len(new_sg) = sg_dma_len(sg);
33 
34 		new_sg = sg_next(new_sg);
35 	}
36 	GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
37 
38 	sg_free_table(orig_st);
39 
40 	*orig_st = new_st;
41 	return true;
42 }
43 
44 static void i915_refct_sgt_release(struct kref *ref)
45 {
46 	struct i915_refct_sgt *rsgt =
47 		container_of(ref, typeof(*rsgt), kref);
48 
49 	sg_free_table(&rsgt->table);
50 	kfree(rsgt);
51 }
52 
53 static const struct i915_refct_sgt_ops rsgt_ops = {
54 	.release = i915_refct_sgt_release
55 };
56 
57 /**
58  * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops
59  * @rsgt: The struct i915_refct_sgt to initialize.
60  * size: The size of the underlying memory buffer.
61  */
62 void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
63 {
64 	__i915_refct_sgt_init(rsgt, size, &rsgt_ops);
65 }
66 
67 /**
68  * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct
69  * drm_mm_node
70  * @node: The drm_mm_node.
71  * @region_start: An offset to add to the dma addresses of the sg list.
72  *
73  * Create a struct sg_table, initializing it from a struct drm_mm_node,
74  * taking a maximum segment length into account, splitting into segments
75  * if necessary.
76  *
77  * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative
78  * error code cast to an error pointer on failure.
79  */
80 struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
81 					      u64 region_start)
82 {
83 	const u64 max_segment = SZ_1G; /* Do we have a limit on this? */
84 	u64 segment_pages = max_segment >> PAGE_SHIFT;
85 	u64 block_size, offset, prev_end;
86 	struct i915_refct_sgt *rsgt;
87 	struct sg_table *st;
88 	struct scatterlist *sg;
89 
90 	rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
91 	if (!rsgt)
92 		return ERR_PTR(-ENOMEM);
93 
94 	i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
95 	st = &rsgt->table;
96 	if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages),
97 			   GFP_KERNEL)) {
98 		i915_refct_sgt_put(rsgt);
99 		return ERR_PTR(-ENOMEM);
100 	}
101 
102 	sg = st->sgl;
103 	st->nents = 0;
104 	prev_end = (resource_size_t)-1;
105 	block_size = node->size << PAGE_SHIFT;
106 	offset = node->start << PAGE_SHIFT;
107 
108 	while (block_size) {
109 		u64 len;
110 
111 		if (offset != prev_end || sg->length >= max_segment) {
112 			if (st->nents)
113 				sg = __sg_next(sg);
114 
115 			sg_dma_address(sg) = region_start + offset;
116 			sg_dma_len(sg) = 0;
117 			sg->length = 0;
118 			st->nents++;
119 		}
120 
121 		len = min(block_size, max_segment - sg->length);
122 		sg->length += len;
123 		sg_dma_len(sg) += len;
124 
125 		offset += len;
126 		block_size -= len;
127 
128 		prev_end = offset;
129 	}
130 
131 	sg_mark_end(sg);
132 	i915_sg_trim(st);
133 
134 	return rsgt;
135 }
136 
137 /**
138  * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct
139  * i915_buddy_block list
140  * @res: The struct i915_ttm_buddy_resource.
141  * @region_start: An offset to add to the dma addresses of the sg list.
142  *
143  * Create a struct sg_table, initializing it from struct i915_buddy_block list,
144  * taking a maximum segment length into account, splitting into segments
145  * if necessary.
146  *
147  * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative
148  * error code cast to an error pointer on failure.
149  */
150 struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
151 						     u64 region_start)
152 {
153 	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
154 	const u64 size = res->num_pages << PAGE_SHIFT;
155 	const u64 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
156 	struct i915_buddy_mm *mm = bman_res->mm;
157 	struct list_head *blocks = &bman_res->blocks;
158 	struct i915_buddy_block *block;
159 	struct i915_refct_sgt *rsgt;
160 	struct scatterlist *sg;
161 	struct sg_table *st;
162 	resource_size_t prev_end;
163 
164 	GEM_BUG_ON(list_empty(blocks));
165 
166 	rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
167 	if (!rsgt)
168 		return ERR_PTR(-ENOMEM);
169 
170 	i915_refct_sgt_init(rsgt, size);
171 	st = &rsgt->table;
172 	if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
173 		i915_refct_sgt_put(rsgt);
174 		return ERR_PTR(-ENOMEM);
175 	}
176 
177 	sg = st->sgl;
178 	st->nents = 0;
179 	prev_end = (resource_size_t)-1;
180 
181 	list_for_each_entry(block, blocks, link) {
182 		u64 block_size, offset;
183 
184 		block_size = min_t(u64, size, i915_buddy_block_size(mm, block));
185 		offset = i915_buddy_block_offset(block);
186 
187 		while (block_size) {
188 			u64 len;
189 
190 			if (offset != prev_end || sg->length >= max_segment) {
191 				if (st->nents)
192 					sg = __sg_next(sg);
193 
194 				sg_dma_address(sg) = region_start + offset;
195 				sg_dma_len(sg) = 0;
196 				sg->length = 0;
197 				st->nents++;
198 			}
199 
200 			len = min(block_size, max_segment - sg->length);
201 			sg->length += len;
202 			sg_dma_len(sg) += len;
203 
204 			offset += len;
205 			block_size -= len;
206 
207 			prev_end = offset;
208 		}
209 	}
210 
211 	sg_mark_end(sg);
212 	i915_sg_trim(st);
213 
214 	return rsgt;
215 }
216 
217 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
218 #include "selftests/scatterlist.c"
219 #endif
220