1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 #include <linux/mm.h>
6 
7 #include <drm/ttm/ttm_tt.h>
8 #include <drm/ttm/ttm_pool.h>
9 
10 #include "ttm_kunit_helpers.h"
11 
12 struct ttm_pool_test_case {
13 	const char *description;
14 	unsigned int order;
15 	bool use_dma_alloc;
16 };
17 
18 struct ttm_pool_test_priv {
19 	struct ttm_test_devices *devs;
20 
21 	/* Used to create mock ttm_tts */
22 	struct ttm_buffer_object *mock_bo;
23 };
24 
25 static struct ttm_operation_ctx simple_ctx = {
26 	.interruptible = true,
27 	.no_wait_gpu = false,
28 };
29 
ttm_pool_test_init(struct kunit * test)30 static int ttm_pool_test_init(struct kunit *test)
31 {
32 	struct ttm_pool_test_priv *priv;
33 
34 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
35 	KUNIT_ASSERT_NOT_NULL(test, priv);
36 
37 	priv->devs = ttm_test_devices_basic(test);
38 	test->priv = priv;
39 
40 	return 0;
41 }
42 
ttm_pool_test_fini(struct kunit * test)43 static void ttm_pool_test_fini(struct kunit *test)
44 {
45 	struct ttm_pool_test_priv *priv = test->priv;
46 
47 	ttm_test_devices_put(test, priv->devs);
48 }
49 
ttm_tt_kunit_init(struct kunit * test,uint32_t page_flags,enum ttm_caching caching,size_t size)50 static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
51 					uint32_t page_flags,
52 					enum ttm_caching caching,
53 					size_t size)
54 {
55 	struct ttm_pool_test_priv *priv = test->priv;
56 	struct ttm_buffer_object *bo;
57 	struct ttm_tt *tt;
58 	int err;
59 
60 	bo = ttm_bo_kunit_init(test, priv->devs, size);
61 	KUNIT_ASSERT_NOT_NULL(test, bo);
62 	priv->mock_bo = bo;
63 
64 	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
65 	KUNIT_ASSERT_NOT_NULL(test, tt);
66 
67 	err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
68 	KUNIT_ASSERT_EQ(test, err, 0);
69 
70 	return tt;
71 }
72 
ttm_pool_pre_populated(struct kunit * test,size_t size,enum ttm_caching caching)73 static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
74 					       size_t size,
75 					       enum ttm_caching caching)
76 {
77 	struct ttm_pool_test_priv *priv = test->priv;
78 	struct ttm_test_devices *devs = priv->devs;
79 	struct ttm_pool *pool;
80 	struct ttm_tt *tt;
81 	unsigned long order = __fls(size / PAGE_SIZE);
82 	int err;
83 
84 	tt = ttm_tt_kunit_init(test, order, caching, size);
85 	KUNIT_ASSERT_NOT_NULL(test, tt);
86 
87 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
88 	KUNIT_ASSERT_NOT_NULL(test, pool);
89 
90 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
91 
92 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
93 	KUNIT_ASSERT_EQ(test, err, 0);
94 
95 	ttm_pool_free(pool, tt);
96 	ttm_tt_fini(tt);
97 
98 	return pool;
99 }
100 
101 static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
102 	{
103 		.description = "One page",
104 		.order = 0,
105 	},
106 	{
107 		.description = "More than one page",
108 		.order = 2,
109 	},
110 	{
111 		.description = "Above the allocation limit",
112 		.order = MAX_ORDER + 1,
113 	},
114 	{
115 		.description = "One page, with coherent DMA mappings enabled",
116 		.order = 0,
117 		.use_dma_alloc = true,
118 	},
119 	{
120 		.description = "Above the allocation limit, with coherent DMA mappings enabled",
121 		.order = MAX_ORDER + 1,
122 		.use_dma_alloc = true,
123 	},
124 };
125 
ttm_pool_alloc_case_desc(const struct ttm_pool_test_case * t,char * desc)126 static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
127 				     char *desc)
128 {
129 	strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
130 }
131 
132 KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
133 		  ttm_pool_alloc_case_desc);
134 
ttm_pool_alloc_basic(struct kunit * test)135 static void ttm_pool_alloc_basic(struct kunit *test)
136 {
137 	struct ttm_pool_test_priv *priv = test->priv;
138 	struct ttm_test_devices *devs = priv->devs;
139 	const struct ttm_pool_test_case *params = test->param_value;
140 	struct ttm_tt *tt;
141 	struct ttm_pool *pool;
142 	struct page *fst_page, *last_page;
143 	enum ttm_caching caching = ttm_uncached;
144 	unsigned int expected_num_pages = 1 << params->order;
145 	size_t size = expected_num_pages * PAGE_SIZE;
146 	int err;
147 
148 	tt = ttm_tt_kunit_init(test, 0, caching, size);
149 	KUNIT_ASSERT_NOT_NULL(test, tt);
150 
151 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
152 	KUNIT_ASSERT_NOT_NULL(test, pool);
153 
154 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
155 		      false);
156 
157 	KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
158 	KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
159 	KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
160 
161 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
162 	KUNIT_ASSERT_EQ(test, err, 0);
163 	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
164 
165 	fst_page = tt->pages[0];
166 	last_page = tt->pages[tt->num_pages - 1];
167 
168 	if (params->order <= MAX_ORDER) {
169 		if (params->use_dma_alloc) {
170 			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
171 			KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
172 		} else {
173 			KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
174 		}
175 	} else {
176 		if (params->use_dma_alloc) {
177 			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
178 			KUNIT_ASSERT_NULL(test, (void *)last_page->private);
179 		} else {
180 			/*
181 			 * We expect to alloc one big block, followed by
182 			 * order 0 blocks
183 			 */
184 			KUNIT_ASSERT_EQ(test, fst_page->private,
185 					min_t(unsigned int, MAX_ORDER,
186 					      params->order));
187 			KUNIT_ASSERT_EQ(test, last_page->private, 0);
188 		}
189 	}
190 
191 	ttm_pool_free(pool, tt);
192 	ttm_tt_fini(tt);
193 	ttm_pool_fini(pool);
194 }
195 
ttm_pool_alloc_basic_dma_addr(struct kunit * test)196 static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
197 {
198 	struct ttm_pool_test_priv *priv = test->priv;
199 	struct ttm_test_devices *devs = priv->devs;
200 	const struct ttm_pool_test_case *params = test->param_value;
201 	struct ttm_tt *tt;
202 	struct ttm_pool *pool;
203 	struct ttm_buffer_object *bo;
204 	dma_addr_t dma1, dma2;
205 	enum ttm_caching caching = ttm_uncached;
206 	unsigned int expected_num_pages = 1 << params->order;
207 	size_t size = expected_num_pages * PAGE_SIZE;
208 	int err;
209 
210 	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
211 	KUNIT_ASSERT_NOT_NULL(test, tt);
212 
213 	bo = ttm_bo_kunit_init(test, devs, size);
214 	KUNIT_ASSERT_NOT_NULL(test, bo);
215 
216 	err = ttm_sg_tt_init(tt, bo, 0, caching);
217 	KUNIT_ASSERT_EQ(test, err, 0);
218 
219 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
220 	KUNIT_ASSERT_NOT_NULL(test, pool);
221 
222 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
223 
224 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
225 	KUNIT_ASSERT_EQ(test, err, 0);
226 	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
227 
228 	dma1 = tt->dma_address[0];
229 	dma2 = tt->dma_address[tt->num_pages - 1];
230 
231 	KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
232 	KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
233 
234 	ttm_pool_free(pool, tt);
235 	ttm_tt_fini(tt);
236 	ttm_pool_fini(pool);
237 }
238 
ttm_pool_alloc_order_caching_match(struct kunit * test)239 static void ttm_pool_alloc_order_caching_match(struct kunit *test)
240 {
241 	struct ttm_tt *tt;
242 	struct ttm_pool *pool;
243 	struct ttm_pool_type *pt;
244 	enum ttm_caching caching = ttm_uncached;
245 	unsigned int order = 0;
246 	size_t size = PAGE_SIZE;
247 	int err;
248 
249 	pool = ttm_pool_pre_populated(test, size, caching);
250 
251 	pt = &pool->caching[caching].orders[order];
252 	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
253 
254 	tt = ttm_tt_kunit_init(test, 0, caching, size);
255 	KUNIT_ASSERT_NOT_NULL(test, tt);
256 
257 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
258 	KUNIT_ASSERT_EQ(test, err, 0);
259 
260 	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
261 
262 	ttm_pool_free(pool, tt);
263 	ttm_tt_fini(tt);
264 	ttm_pool_fini(pool);
265 }
266 
ttm_pool_alloc_caching_mismatch(struct kunit * test)267 static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
268 {
269 	struct ttm_tt *tt;
270 	struct ttm_pool *pool;
271 	struct ttm_pool_type *pt_pool, *pt_tt;
272 	enum ttm_caching tt_caching = ttm_uncached;
273 	enum ttm_caching pool_caching = ttm_cached;
274 	size_t size = PAGE_SIZE;
275 	unsigned int order = 0;
276 	int err;
277 
278 	pool = ttm_pool_pre_populated(test, size, pool_caching);
279 
280 	pt_pool = &pool->caching[pool_caching].orders[order];
281 	pt_tt = &pool->caching[tt_caching].orders[order];
282 
283 	tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
284 	KUNIT_ASSERT_NOT_NULL(test, tt);
285 
286 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
287 	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
288 
289 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
290 	KUNIT_ASSERT_EQ(test, err, 0);
291 
292 	ttm_pool_free(pool, tt);
293 	ttm_tt_fini(tt);
294 
295 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
296 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
297 
298 	ttm_pool_fini(pool);
299 }
300 
ttm_pool_alloc_order_mismatch(struct kunit * test)301 static void ttm_pool_alloc_order_mismatch(struct kunit *test)
302 {
303 	struct ttm_tt *tt;
304 	struct ttm_pool *pool;
305 	struct ttm_pool_type *pt_pool, *pt_tt;
306 	enum ttm_caching caching = ttm_uncached;
307 	unsigned int order = 2;
308 	size_t fst_size = (1 << order) * PAGE_SIZE;
309 	size_t snd_size = PAGE_SIZE;
310 	int err;
311 
312 	pool = ttm_pool_pre_populated(test, fst_size, caching);
313 
314 	pt_pool = &pool->caching[caching].orders[order];
315 	pt_tt = &pool->caching[caching].orders[0];
316 
317 	tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
318 	KUNIT_ASSERT_NOT_NULL(test, tt);
319 
320 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
321 	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
322 
323 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
324 	KUNIT_ASSERT_EQ(test, err, 0);
325 
326 	ttm_pool_free(pool, tt);
327 	ttm_tt_fini(tt);
328 
329 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
330 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
331 
332 	ttm_pool_fini(pool);
333 }
334 
ttm_pool_free_dma_alloc(struct kunit * test)335 static void ttm_pool_free_dma_alloc(struct kunit *test)
336 {
337 	struct ttm_pool_test_priv *priv = test->priv;
338 	struct ttm_test_devices *devs = priv->devs;
339 	struct ttm_tt *tt;
340 	struct ttm_pool *pool;
341 	struct ttm_pool_type *pt;
342 	enum ttm_caching caching = ttm_uncached;
343 	unsigned int order = 2;
344 	size_t size = (1 << order) * PAGE_SIZE;
345 
346 	tt = ttm_tt_kunit_init(test, 0, caching, size);
347 	KUNIT_ASSERT_NOT_NULL(test, tt);
348 
349 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
350 	KUNIT_ASSERT_NOT_NULL(test, pool);
351 
352 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
353 	ttm_pool_alloc(pool, tt, &simple_ctx);
354 
355 	pt = &pool->caching[caching].orders[order];
356 	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
357 
358 	ttm_pool_free(pool, tt);
359 	ttm_tt_fini(tt);
360 
361 	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
362 
363 	ttm_pool_fini(pool);
364 }
365 
ttm_pool_free_no_dma_alloc(struct kunit * test)366 static void ttm_pool_free_no_dma_alloc(struct kunit *test)
367 {
368 	struct ttm_pool_test_priv *priv = test->priv;
369 	struct ttm_test_devices *devs = priv->devs;
370 	struct ttm_tt *tt;
371 	struct ttm_pool *pool;
372 	struct ttm_pool_type *pt;
373 	enum ttm_caching caching = ttm_uncached;
374 	unsigned int order = 2;
375 	size_t size = (1 << order) * PAGE_SIZE;
376 
377 	tt = ttm_tt_kunit_init(test, 0, caching, size);
378 	KUNIT_ASSERT_NOT_NULL(test, tt);
379 
380 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
381 	KUNIT_ASSERT_NOT_NULL(test, pool);
382 
383 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
384 	ttm_pool_alloc(pool, tt, &simple_ctx);
385 
386 	pt = &pool->caching[caching].orders[order];
387 	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
388 
389 	ttm_pool_free(pool, tt);
390 	ttm_tt_fini(tt);
391 
392 	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
393 
394 	ttm_pool_fini(pool);
395 }
396 
ttm_pool_fini_basic(struct kunit * test)397 static void ttm_pool_fini_basic(struct kunit *test)
398 {
399 	struct ttm_pool *pool;
400 	struct ttm_pool_type *pt;
401 	enum ttm_caching caching = ttm_uncached;
402 	unsigned int order = 0;
403 	size_t size = PAGE_SIZE;
404 
405 	pool = ttm_pool_pre_populated(test, size, caching);
406 	pt = &pool->caching[caching].orders[order];
407 
408 	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
409 
410 	ttm_pool_fini(pool);
411 
412 	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
413 }
414 
415 static struct kunit_case ttm_pool_test_cases[] = {
416 	KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
417 	KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
418 			 ttm_pool_alloc_basic_gen_params),
419 	KUNIT_CASE(ttm_pool_alloc_order_caching_match),
420 	KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
421 	KUNIT_CASE(ttm_pool_alloc_order_mismatch),
422 	KUNIT_CASE(ttm_pool_free_dma_alloc),
423 	KUNIT_CASE(ttm_pool_free_no_dma_alloc),
424 	KUNIT_CASE(ttm_pool_fini_basic),
425 	{}
426 };
427 
428 static struct kunit_suite ttm_pool_test_suite = {
429 	.name = "ttm_pool",
430 	.init = ttm_pool_test_init,
431 	.exit = ttm_pool_test_fini,
432 	.test_cases = ttm_pool_test_cases,
433 };
434 
435 kunit_test_suites(&ttm_pool_test_suite);
436 
437 MODULE_LICENSE("GPL");
438