1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_memory_region.h"
9 #include "intel_region_lmem.h"
10 #include "intel_region_ttm.h"
11 #include "gem/i915_gem_lmem.h"
12 #include "gem/i915_gem_region.h"
13 #include "gem/i915_gem_ttm.h"
14 #include "gt/intel_gt.h"
15 
16 static int init_fake_lmem_bar(struct intel_memory_region *mem)
17 {
18 	struct drm_i915_private *i915 = mem->i915;
19 	struct i915_ggtt *ggtt = &i915->ggtt;
20 	unsigned long n;
21 	int ret;
22 
23 	/* We want to 1:1 map the mappable aperture to our reserved region */
24 
25 	mem->fake_mappable.start = 0;
26 	mem->fake_mappable.size = resource_size(&mem->region);
27 	mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
28 
29 	ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
30 	if (ret)
31 		return ret;
32 
33 	mem->remap_addr = dma_map_resource(i915->drm.dev,
34 					   mem->region.start,
35 					   mem->fake_mappable.size,
36 					   DMA_BIDIRECTIONAL,
37 					   DMA_ATTR_FORCE_CONTIGUOUS);
38 	if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
39 		drm_mm_remove_node(&mem->fake_mappable);
40 		return -EINVAL;
41 	}
42 
43 	for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
44 		ggtt->vm.insert_page(&ggtt->vm,
45 				     mem->remap_addr + (n << PAGE_SHIFT),
46 				     n << PAGE_SHIFT,
47 				     I915_CACHE_NONE, 0);
48 	}
49 
50 	mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
51 						      mem->fake_mappable.size);
52 
53 	return 0;
54 }
55 
56 static void release_fake_lmem_bar(struct intel_memory_region *mem)
57 {
58 	if (!drm_mm_node_allocated(&mem->fake_mappable))
59 		return;
60 
61 	drm_mm_remove_node(&mem->fake_mappable);
62 
63 	dma_unmap_resource(mem->i915->drm.dev,
64 			   mem->remap_addr,
65 			   mem->fake_mappable.size,
66 			   DMA_BIDIRECTIONAL,
67 			   DMA_ATTR_FORCE_CONTIGUOUS);
68 }
69 
70 static int
71 region_lmem_release(struct intel_memory_region *mem)
72 {
73 	int ret;
74 
75 	ret = intel_region_ttm_fini(mem);
76 	io_mapping_fini(&mem->iomap);
77 	release_fake_lmem_bar(mem);
78 
79 	return ret;
80 }
81 
82 static int
83 region_lmem_init(struct intel_memory_region *mem)
84 {
85 	int ret;
86 
87 	if (mem->i915->params.fake_lmem_start) {
88 		ret = init_fake_lmem_bar(mem);
89 		GEM_BUG_ON(ret);
90 	}
91 
92 	if (!io_mapping_init_wc(&mem->iomap,
93 				mem->io_start,
94 				resource_size(&mem->region))) {
95 		ret = -EIO;
96 		goto out_no_io;
97 	}
98 
99 	ret = intel_region_ttm_init(mem);
100 	if (ret)
101 		goto out_no_buddy;
102 
103 	return 0;
104 
105 out_no_buddy:
106 	io_mapping_fini(&mem->iomap);
107 out_no_io:
108 	release_fake_lmem_bar(mem);
109 
110 	return ret;
111 }
112 
113 static const struct intel_memory_region_ops intel_region_lmem_ops = {
114 	.init = region_lmem_init,
115 	.release = region_lmem_release,
116 	.init_object = __i915_gem_ttm_object_init,
117 };
118 
119 struct intel_memory_region *
120 intel_gt_setup_fake_lmem(struct intel_gt *gt)
121 {
122 	struct drm_i915_private *i915 = gt->i915;
123 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
124 	struct intel_memory_region *mem;
125 	resource_size_t mappable_end;
126 	resource_size_t io_start;
127 	resource_size_t start;
128 
129 	if (!HAS_LMEM(i915))
130 		return ERR_PTR(-ENODEV);
131 
132 	if (!i915->params.fake_lmem_start)
133 		return ERR_PTR(-ENODEV);
134 
135 	GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));
136 
137 	/* Your mappable aperture belongs to me now! */
138 	mappable_end = pci_resource_len(pdev, 2);
139 	io_start = pci_resource_start(pdev, 2);
140 	start = i915->params.fake_lmem_start;
141 
142 	mem = intel_memory_region_create(i915,
143 					 start,
144 					 mappable_end,
145 					 PAGE_SIZE,
146 					 io_start,
147 					 INTEL_MEMORY_LOCAL,
148 					 0,
149 					 &intel_region_lmem_ops);
150 	if (!IS_ERR(mem)) {
151 		drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
152 			 &mem->region);
153 		drm_info(&i915->drm,
154 			 "Intel graphics fake LMEM IO start: %llx\n",
155 			(u64)mem->io_start);
156 		drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
157 			 (u64)resource_size(&mem->region));
158 	}
159 
160 	return mem;
161 }
162 
163 static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
164 				     u64 *start, u32 *size)
165 {
166 	if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
167 		return false;
168 
169 	*start = 0;
170 	*size = SZ_1M;
171 
172 	drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
173 		*start, *start + *size);
174 
175 	return true;
176 }
177 
178 static int reserve_lowmem_region(struct intel_uncore *uncore,
179 				 struct intel_memory_region *mem)
180 {
181 	u64 reserve_start;
182 	u32 reserve_size;
183 	int ret;
184 
185 	if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
186 		return 0;
187 
188 	ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
189 	if (ret)
190 		drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
191 
192 	return ret;
193 }
194 
195 static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
196 {
197 	struct drm_i915_private *i915 = gt->i915;
198 	struct intel_uncore *uncore = gt->uncore;
199 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
200 	struct intel_memory_region *mem;
201 	resource_size_t min_page_size;
202 	resource_size_t io_start;
203 	resource_size_t lmem_size;
204 	int err;
205 
206 	if (!IS_DGFX(i915))
207 		return ERR_PTR(-ENODEV);
208 
209 	/* Stolen starts from GSMBASE on DG1 */
210 	lmem_size = intel_uncore_read64(uncore, GEN12_GSMBASE);
211 
212 	io_start = pci_resource_start(pdev, 2);
213 	if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
214 		return ERR_PTR(-ENODEV);
215 
216 	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
217 						I915_GTT_PAGE_SIZE_4K;
218 	mem = intel_memory_region_create(i915,
219 					 0,
220 					 lmem_size,
221 					 min_page_size,
222 					 io_start,
223 					 INTEL_MEMORY_LOCAL,
224 					 0,
225 					 &intel_region_lmem_ops);
226 	if (IS_ERR(mem))
227 		return mem;
228 
229 	err = reserve_lowmem_region(uncore, mem);
230 	if (err)
231 		goto err_region_put;
232 
233 	drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
234 	drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
235 		&mem->io_start);
236 	drm_info(&i915->drm, "Local memory available: %pa\n",
237 		 &lmem_size);
238 
239 	return mem;
240 
241 err_region_put:
242 	intel_memory_region_destroy(mem);
243 	return ERR_PTR(err);
244 }
245 
246 struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
247 {
248 	return setup_lmem(gt);
249 }
250