1d1487389SThomas Hellström // SPDX-License-Identifier: MIT
2d1487389SThomas Hellström /*
3d1487389SThomas Hellström * Copyright © 2021 Intel Corporation
4d1487389SThomas Hellström */
5d1487389SThomas Hellström #include <drm/ttm/ttm_device.h>
62a7005c8SDave Airlie #include <drm/ttm/ttm_range_manager.h>
7d1487389SThomas Hellström
8d1487389SThomas Hellström #include "i915_drv.h"
9d1487389SThomas Hellström #include "i915_scatterlist.h"
10d53ec322SMatthew Auld #include "i915_ttm_buddy_manager.h"
11d1487389SThomas Hellström
12d1487389SThomas Hellström #include "intel_region_ttm.h"
13d1487389SThomas Hellström
14ecbf2060SMatthew Auld #include "gem/i915_gem_region.h"
15213d5092SThomas Hellström #include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
16d1487389SThomas Hellström /**
17d1487389SThomas Hellström * DOC: TTM support structure
18d1487389SThomas Hellström *
19d1487389SThomas Hellström * The code in this file deals with setting up memory managers for TTM
20d1487389SThomas Hellström * LMEM and MOCK regions and converting the output from
21d1487389SThomas Hellström * the managers to struct sg_table, Basically providing the mapping from
22d1487389SThomas Hellström * i915 GEM regions to TTM memory types and resource managers.
23d1487389SThomas Hellström */
24d1487389SThomas Hellström
25d1487389SThomas Hellström /**
26d1487389SThomas Hellström * intel_region_ttm_device_init - Initialize a TTM device
27d1487389SThomas Hellström * @dev_priv: Pointer to an i915 device private structure.
28d1487389SThomas Hellström *
29d1487389SThomas Hellström * Return: 0 on success, negative error code on failure.
30d1487389SThomas Hellström */
intel_region_ttm_device_init(struct drm_i915_private * dev_priv)31d1487389SThomas Hellström int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
32d1487389SThomas Hellström {
33d1487389SThomas Hellström struct drm_device *drm = &dev_priv->drm;
34d1487389SThomas Hellström
35213d5092SThomas Hellström return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
36d1487389SThomas Hellström drm->dev, drm->anon_inode->i_mapping,
37d1487389SThomas Hellström drm->vma_offset_manager, false, false);
38d1487389SThomas Hellström }
39d1487389SThomas Hellström
40d1487389SThomas Hellström /**
41d1487389SThomas Hellström * intel_region_ttm_device_fini - Finalize a TTM device
42d1487389SThomas Hellström * @dev_priv: Pointer to an i915 device private structure.
43d1487389SThomas Hellström */
intel_region_ttm_device_fini(struct drm_i915_private * dev_priv)44d1487389SThomas Hellström void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
45d1487389SThomas Hellström {
46d1487389SThomas Hellström ttm_device_fini(&dev_priv->bdev);
47d1487389SThomas Hellström }
48d1487389SThomas Hellström
49d1487389SThomas Hellström /*
50d1487389SThomas Hellström * Map the i915 memory regions to TTM memory types. We use the
51d1487389SThomas Hellström * driver-private types for now, reserving TTM_PL_VRAM for stolen
52d1487389SThomas Hellström * memory and TTM_PL_TT for GGTT use if decided to implement this.
53d1487389SThomas Hellström */
intel_region_to_ttm_type(const struct intel_memory_region * mem)5438f28c06SThomas Hellström int intel_region_to_ttm_type(const struct intel_memory_region *mem)
55d1487389SThomas Hellström {
56d1487389SThomas Hellström int type;
57d1487389SThomas Hellström
58d1487389SThomas Hellström GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
5938f28c06SThomas Hellström mem->type != INTEL_MEMORY_MOCK &&
6038f28c06SThomas Hellström mem->type != INTEL_MEMORY_SYSTEM);
6138f28c06SThomas Hellström
6238f28c06SThomas Hellström if (mem->type == INTEL_MEMORY_SYSTEM)
6338f28c06SThomas Hellström return TTM_PL_SYSTEM;
64d1487389SThomas Hellström
65d1487389SThomas Hellström type = mem->instance + TTM_PL_PRIV;
66d1487389SThomas Hellström GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
67d1487389SThomas Hellström
68d1487389SThomas Hellström return type;
69d1487389SThomas Hellström }
70d1487389SThomas Hellström
71d1487389SThomas Hellström /**
72d53ec322SMatthew Auld * intel_region_ttm_init - Initialize a memory region for TTM.
73d53ec322SMatthew Auld * @mem: The region to initialize.
74d53ec322SMatthew Auld *
75d53ec322SMatthew Auld * This function initializes a suitable TTM resource manager for the
76d53ec322SMatthew Auld * region, and if it's a LMEM region type, attaches it to the TTM
77d53ec322SMatthew Auld * device. MOCK regions are NOT attached to the TTM device, since we don't
78d53ec322SMatthew Auld * have one for the mock selftests.
79d53ec322SMatthew Auld *
80d53ec322SMatthew Auld * Return: 0 on success, negative error code on failure.
81d1487389SThomas Hellström */
intel_region_ttm_init(struct intel_memory_region * mem)82d1487389SThomas Hellström int intel_region_ttm_init(struct intel_memory_region *mem)
83d1487389SThomas Hellström {
84d1487389SThomas Hellström struct ttm_device *bdev = &mem->i915->bdev;
85d1487389SThomas Hellström int mem_type = intel_region_to_ttm_type(mem);
86d1487389SThomas Hellström int ret;
87d1487389SThomas Hellström
88d53ec322SMatthew Auld ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
89d22632c8SMatthew Auld resource_size(&mem->region),
9026ffcbbeSMatthew Auld mem->io_size,
91d22632c8SMatthew Auld mem->min_page_size, PAGE_SIZE);
92d1487389SThomas Hellström if (ret)
93d1487389SThomas Hellström return ret;
94d1487389SThomas Hellström
95d1487389SThomas Hellström mem->region_private = ttm_manager_type(bdev, mem_type);
96d1487389SThomas Hellström
97d1487389SThomas Hellström return 0;
98d1487389SThomas Hellström }
99d1487389SThomas Hellström
100d1487389SThomas Hellström /**
101d1487389SThomas Hellström * intel_region_ttm_fini - Finalize a TTM region.
102d1487389SThomas Hellström * @mem: The memory region
103d1487389SThomas Hellström *
104d1487389SThomas Hellström * This functions takes down the TTM resource manager associated with the
105d1487389SThomas Hellström * memory region, and if it was registered with the TTM device,
106d1487389SThomas Hellström * removes that registration.
107d1487389SThomas Hellström */
intel_region_ttm_fini(struct intel_memory_region * mem)1088b1f7f92SThomas Hellström int intel_region_ttm_fini(struct intel_memory_region *mem)
109d1487389SThomas Hellström {
1108b1f7f92SThomas Hellström struct ttm_resource_manager *man = mem->region_private;
1118b1f7f92SThomas Hellström int ret = -EBUSY;
1128b1f7f92SThomas Hellström int count;
1138b1f7f92SThomas Hellström
1148b1f7f92SThomas Hellström /*
1158b1f7f92SThomas Hellström * Put the region's move fences. This releases requests that
1168b1f7f92SThomas Hellström * may hold on to contexts and vms that may hold on to buffer
1178b1f7f92SThomas Hellström * objects placed in this region.
1188b1f7f92SThomas Hellström */
1198b1f7f92SThomas Hellström if (man)
1208b1f7f92SThomas Hellström ttm_resource_manager_cleanup(man);
1218b1f7f92SThomas Hellström
1228b1f7f92SThomas Hellström /* Flush objects from region. */
1238b1f7f92SThomas Hellström for (count = 0; count < 10; ++count) {
1248b1f7f92SThomas Hellström i915_gem_flush_free_objects(mem->i915);
1258b1f7f92SThomas Hellström
1268b1f7f92SThomas Hellström mutex_lock(&mem->objects.lock);
1278b1f7f92SThomas Hellström if (list_empty(&mem->objects.list))
1288b1f7f92SThomas Hellström ret = 0;
1298b1f7f92SThomas Hellström mutex_unlock(&mem->objects.lock);
1308b1f7f92SThomas Hellström if (!ret)
1318b1f7f92SThomas Hellström break;
1328b1f7f92SThomas Hellström
1338b1f7f92SThomas Hellström msleep(20);
1349bff18d1SChristian König drain_workqueue(mem->i915->bdev.wq);
1358b1f7f92SThomas Hellström }
1368b1f7f92SThomas Hellström
1378b1f7f92SThomas Hellström /* If we leaked objects, Don't free the region causing use after free */
1388b1f7f92SThomas Hellström if (ret || !man)
1398b1f7f92SThomas Hellström return ret;
140d1487389SThomas Hellström
141d53ec322SMatthew Auld ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
142d1487389SThomas Hellström intel_region_to_ttm_type(mem));
143d1487389SThomas Hellström GEM_WARN_ON(ret);
144d1487389SThomas Hellström mem->region_private = NULL;
1458b1f7f92SThomas Hellström
1468b1f7f92SThomas Hellström return ret;
147d1487389SThomas Hellström }
148d1487389SThomas Hellström
149d1487389SThomas Hellström /**
150cad7109aSThomas Hellström * intel_region_ttm_resource_to_rsgt -
151cad7109aSThomas Hellström * Convert an opaque TTM resource manager resource to a refcounted sg_table.
152d1487389SThomas Hellström * @mem: The memory region.
153687c7d0fSMatthew Auld * @res: The resource manager resource obtained from the TTM resource manager.
154bc99f120SMatthew Auld * @page_alignment: Required page alignment for each sg entry. Power of two.
155d1487389SThomas Hellström *
156d1487389SThomas Hellström * The gem backends typically use sg-tables for operations on the underlying
157d1487389SThomas Hellström * io_memory. So provide a way for the backends to translate the
158d1487389SThomas Hellström * nodes they are handed from TTM to sg-tables.
159d1487389SThomas Hellström *
160d1487389SThomas Hellström * Return: A malloced sg_table on success, an error pointer on failure.
161d1487389SThomas Hellström */
162cad7109aSThomas Hellström struct i915_refct_sgt *
intel_region_ttm_resource_to_rsgt(struct intel_memory_region * mem,struct ttm_resource * res,u32 page_alignment)163cad7109aSThomas Hellström intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
164bc99f120SMatthew Auld struct ttm_resource *res,
1659306b2b2SMatthew Auld u32 page_alignment)
166d1487389SThomas Hellström {
167d53ec322SMatthew Auld if (mem->is_range_manager) {
1682a7005c8SDave Airlie struct ttm_range_mgr_node *range_node =
169d53ec322SMatthew Auld to_ttm_range_mgr_node(res);
1702a7005c8SDave Airlie
171cad7109aSThomas Hellström return i915_rsgt_from_mm_node(&range_node->mm_nodes[0],
172bc99f120SMatthew Auld mem->region.start,
173bc99f120SMatthew Auld page_alignment);
174d53ec322SMatthew Auld } else {
175bc99f120SMatthew Auld return i915_rsgt_from_buddy_resource(res, mem->region.start,
176bc99f120SMatthew Auld page_alignment);
177d53ec322SMatthew Auld }
178d1487389SThomas Hellström }
179d1487389SThomas Hellström
180213d5092SThomas Hellström #ifdef CONFIG_DRM_I915_SELFTEST
181d1487389SThomas Hellström /**
182a6c5b891SMatthew Auld * intel_region_ttm_resource_alloc - Allocate memory resources from a region
183d1487389SThomas Hellström * @mem: The memory region,
184*5c908cd5SLee Jones * @offset: BO offset
185d1487389SThomas Hellström * @size: The requested size in bytes
186d1487389SThomas Hellström * @flags: Allocation flags
187d1487389SThomas Hellström *
188d1487389SThomas Hellström * This functionality is provided only for callers that need to allocate
189d1487389SThomas Hellström * memory from standalone TTM range managers, without the TTM eviction
190d1487389SThomas Hellström * functionality. Don't use if you are not completely sure that's the
191d1487389SThomas Hellström * case. The returned opaque node can be converted to an sg_table using
192a6c5b891SMatthew Auld * intel_region_ttm_resource_to_st(), and can be freed using
193a6c5b891SMatthew Auld * intel_region_ttm_resource_free().
194d1487389SThomas Hellström *
195d1487389SThomas Hellström * Return: A valid pointer on success, an error pointer on failure.
196d1487389SThomas Hellström */
1972a7005c8SDave Airlie struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region * mem,resource_size_t offset,resource_size_t size,unsigned int flags)198687c7d0fSMatthew Auld intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
199ecbf2060SMatthew Auld resource_size_t offset,
200d1487389SThomas Hellström resource_size_t size,
201d1487389SThomas Hellström unsigned int flags)
202d1487389SThomas Hellström {
203d1487389SThomas Hellström struct ttm_resource_manager *man = mem->region_private;
204d1487389SThomas Hellström struct ttm_place place = {};
205d1487389SThomas Hellström struct ttm_buffer_object mock_bo = {};
2062a7005c8SDave Airlie struct ttm_resource *res;
207d1487389SThomas Hellström int ret;
208d1487389SThomas Hellström
20930b9d1b3SMatthew Auld if (flags & I915_BO_ALLOC_CONTIGUOUS)
21030b9d1b3SMatthew Auld place.flags |= TTM_PL_FLAG_CONTIGUOUS;
211ecbf2060SMatthew Auld if (offset != I915_BO_INVALID_OFFSET) {
2126949aa0eSGwan-gyeong Mun if (WARN_ON(overflows_type(offset >> PAGE_SHIFT, place.fpfn))) {
2136949aa0eSGwan-gyeong Mun ret = -E2BIG;
2146949aa0eSGwan-gyeong Mun goto out;
2156949aa0eSGwan-gyeong Mun }
216ecbf2060SMatthew Auld place.fpfn = offset >> PAGE_SHIFT;
2176949aa0eSGwan-gyeong Mun if (WARN_ON(overflows_type(place.fpfn + (size >> PAGE_SHIFT), place.lpfn))) {
2186949aa0eSGwan-gyeong Mun ret = -E2BIG;
2196949aa0eSGwan-gyeong Mun goto out;
2206949aa0eSGwan-gyeong Mun }
221ecbf2060SMatthew Auld place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
222ecbf2060SMatthew Auld } else if (mem->io_size && mem->io_size < mem->total) {
22330b9d1b3SMatthew Auld if (flags & I915_BO_ALLOC_GPU_ONLY) {
22430b9d1b3SMatthew Auld place.flags |= TTM_PL_FLAG_TOPDOWN;
22530b9d1b3SMatthew Auld } else {
2263312a4acSMatthew Auld place.fpfn = 0;
2276949aa0eSGwan-gyeong Mun if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) {
2286949aa0eSGwan-gyeong Mun ret = -E2BIG;
2296949aa0eSGwan-gyeong Mun goto out;
2306949aa0eSGwan-gyeong Mun }
2313312a4acSMatthew Auld place.lpfn = mem->io_size >> PAGE_SHIFT;
2323312a4acSMatthew Auld }
23330b9d1b3SMatthew Auld }
2343312a4acSMatthew Auld
2352a7005c8SDave Airlie mock_bo.base.size = size;
2368fbf2893SMatthew Auld mock_bo.bdev = &mem->i915->bdev;
237d1487389SThomas Hellström
238d1487389SThomas Hellström ret = man->func->alloc(man, &mock_bo, &place, &res);
2396949aa0eSGwan-gyeong Mun
2406949aa0eSGwan-gyeong Mun out:
241d1487389SThomas Hellström if (ret == -ENOSPC)
242d1487389SThomas Hellström ret = -ENXIO;
2438fbf2893SMatthew Auld if (!ret)
2448fbf2893SMatthew Auld res->bo = NULL; /* Rather blow up, then some uaf */
2452a7005c8SDave Airlie return ret ? ERR_PTR(ret) : res;
246d1487389SThomas Hellström }
247d53ec322SMatthew Auld
248213d5092SThomas Hellström #endif
249d53ec322SMatthew Auld
250d53ec322SMatthew Auld /**
251d53ec322SMatthew Auld * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
252d53ec322SMatthew Auld * @mem: The region the resource was allocated from.
253d53ec322SMatthew Auld * @res: The opaque resource representing an allocation.
254d53ec322SMatthew Auld */
intel_region_ttm_resource_free(struct intel_memory_region * mem,struct ttm_resource * res)255d53ec322SMatthew Auld void intel_region_ttm_resource_free(struct intel_memory_region *mem,
256d53ec322SMatthew Auld struct ttm_resource *res)
257d53ec322SMatthew Auld {
258d53ec322SMatthew Auld struct ttm_resource_manager *man = mem->region_private;
2598fbf2893SMatthew Auld struct ttm_buffer_object mock_bo = {};
2608fbf2893SMatthew Auld
261e3c92eb4SSomalapuram Amaranath mock_bo.base.size = res->size;
2628fbf2893SMatthew Auld mock_bo.bdev = &mem->i915->bdev;
2638fbf2893SMatthew Auld res->bo = &mock_bo;
264d53ec322SMatthew Auld
265d53ec322SMatthew Auld man->func->free(man, res);
266d53ec322SMatthew Auld }
267