1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_memory_region.h"
8 #include "intel_region_lmem.h"
9 #include "intel_region_ttm.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "gem/i915_gem_region.h"
12 #include "gem/i915_gem_ttm.h"
13 #include "gt/intel_gt.h"
14 
15 static int init_fake_lmem_bar(struct intel_memory_region *mem)
16 {
17 	struct drm_i915_private *i915 = mem->i915;
18 	struct i915_ggtt *ggtt = &i915->ggtt;
19 	unsigned long n;
20 	int ret;
21 
22 	/* We want to 1:1 map the mappable aperture to our reserved region */
23 
24 	mem->fake_mappable.start = 0;
25 	mem->fake_mappable.size = resource_size(&mem->region);
26 	mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
27 
28 	ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
29 	if (ret)
30 		return ret;
31 
32 	mem->remap_addr = dma_map_resource(i915->drm.dev,
33 					   mem->region.start,
34 					   mem->fake_mappable.size,
35 					   DMA_BIDIRECTIONAL,
36 					   DMA_ATTR_FORCE_CONTIGUOUS);
37 	if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
38 		drm_mm_remove_node(&mem->fake_mappable);
39 		return -EINVAL;
40 	}
41 
42 	for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
43 		ggtt->vm.insert_page(&ggtt->vm,
44 				     mem->remap_addr + (n << PAGE_SHIFT),
45 				     n << PAGE_SHIFT,
46 				     I915_CACHE_NONE, 0);
47 	}
48 
49 	mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
50 						      mem->fake_mappable.size);
51 
52 	return 0;
53 }
54 
55 static void release_fake_lmem_bar(struct intel_memory_region *mem)
56 {
57 	if (!drm_mm_node_allocated(&mem->fake_mappable))
58 		return;
59 
60 	drm_mm_remove_node(&mem->fake_mappable);
61 
62 	dma_unmap_resource(mem->i915->drm.dev,
63 			   mem->remap_addr,
64 			   mem->fake_mappable.size,
65 			   DMA_BIDIRECTIONAL,
66 			   DMA_ATTR_FORCE_CONTIGUOUS);
67 }
68 
69 static int
70 region_lmem_release(struct intel_memory_region *mem)
71 {
72 	int ret;
73 
74 	ret = intel_region_ttm_fini(mem);
75 	io_mapping_fini(&mem->iomap);
76 	release_fake_lmem_bar(mem);
77 
78 	return ret;
79 }
80 
81 static int
82 region_lmem_init(struct intel_memory_region *mem)
83 {
84 	int ret;
85 
86 	if (mem->i915->params.fake_lmem_start) {
87 		ret = init_fake_lmem_bar(mem);
88 		GEM_BUG_ON(ret);
89 	}
90 
91 	if (!io_mapping_init_wc(&mem->iomap,
92 				mem->io_start,
93 				resource_size(&mem->region))) {
94 		ret = -EIO;
95 		goto out_no_io;
96 	}
97 
98 	ret = intel_region_ttm_init(mem);
99 	if (ret)
100 		goto out_no_buddy;
101 
102 	return 0;
103 
104 out_no_buddy:
105 	io_mapping_fini(&mem->iomap);
106 out_no_io:
107 	release_fake_lmem_bar(mem);
108 
109 	return ret;
110 }
111 
112 static const struct intel_memory_region_ops intel_region_lmem_ops = {
113 	.init = region_lmem_init,
114 	.release = region_lmem_release,
115 	.init_object = __i915_gem_ttm_object_init,
116 };
117 
118 struct intel_memory_region *
119 intel_gt_setup_fake_lmem(struct intel_gt *gt)
120 {
121 	struct drm_i915_private *i915 = gt->i915;
122 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
123 	struct intel_memory_region *mem;
124 	resource_size_t mappable_end;
125 	resource_size_t io_start;
126 	resource_size_t start;
127 
128 	if (!HAS_LMEM(i915))
129 		return ERR_PTR(-ENODEV);
130 
131 	if (!i915->params.fake_lmem_start)
132 		return ERR_PTR(-ENODEV);
133 
134 	GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
135 
136 	/* Your mappable aperture belongs to me now! */
137 	mappable_end = pci_resource_len(pdev, 2);
138 	io_start = pci_resource_start(pdev, 2);
139 	start = i915->params.fake_lmem_start;
140 
141 	mem = intel_memory_region_create(i915,
142 					 start,
143 					 mappable_end,
144 					 PAGE_SIZE,
145 					 io_start,
146 					 INTEL_MEMORY_LOCAL,
147 					 0,
148 					 &intel_region_lmem_ops);
149 	if (!IS_ERR(mem)) {
150 		drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
151 			 &mem->region);
152 		drm_info(&i915->drm,
153 			 "Intel graphics fake LMEM IO start: %llx\n",
154 			(u64)mem->io_start);
155 		drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
156 			 (u64)resource_size(&mem->region));
157 	}
158 
159 	return mem;
160 }
161 
162 static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
163 				     u64 *start, u32 *size)
164 {
165 	if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
166 		return false;
167 
168 	*start = 0;
169 	*size = SZ_1M;
170 
171 	drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
172 		*start, *start + *size);
173 
174 	return true;
175 }
176 
177 static int reserve_lowmem_region(struct intel_uncore *uncore,
178 				 struct intel_memory_region *mem)
179 {
180 	u64 reserve_start;
181 	u32 reserve_size;
182 	int ret;
183 
184 	if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
185 		return 0;
186 
187 	ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
188 	if (ret)
189 		drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
190 
191 	return ret;
192 }
193 
194 static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
195 {
196 	struct drm_i915_private *i915 = gt->i915;
197 	struct intel_uncore *uncore = gt->uncore;
198 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
199 	struct intel_memory_region *mem;
200 	resource_size_t min_page_size;
201 	resource_size_t io_start;
202 	resource_size_t lmem_size;
203 	int err;
204 
205 	if (!IS_DGFX(i915))
206 		return ERR_PTR(-ENODEV);
207 
208 	/* Stolen starts from GSMBASE on DG1 */
209 	lmem_size = intel_uncore_read64(uncore, GEN12_GSMBASE);
210 
211 	io_start = pci_resource_start(pdev, 2);
212 	if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
213 		return ERR_PTR(-ENODEV);
214 
215 	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
216 						I915_GTT_PAGE_SIZE_4K;
217 	mem = intel_memory_region_create(i915,
218 					 0,
219 					 lmem_size,
220 					 min_page_size,
221 					 io_start,
222 					 INTEL_MEMORY_LOCAL,
223 					 0,
224 					 &intel_region_lmem_ops);
225 	if (IS_ERR(mem))
226 		return mem;
227 
228 	err = reserve_lowmem_region(uncore, mem);
229 	if (err)
230 		goto err_region_put;
231 
232 	drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
233 	drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
234 		&mem->io_start);
235 	drm_info(&i915->drm, "Local memory available: %pa\n",
236 		 &lmem_size);
237 
238 	return mem;
239 
240 err_region_put:
241 	intel_memory_region_destroy(mem);
242 	return ERR_PTR(err);
243 }
244 
245 struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
246 {
247 	return setup_lmem(gt);
248 }
249