1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_memory_region.h"
9 #include "intel_region_lmem.h"
10 #include "intel_region_ttm.h"
11 #include "gem/i915_gem_lmem.h"
12 #include "gem/i915_gem_region.h"
13 #include "gem/i915_gem_ttm.h"
14 #include "gt/intel_gt.h"
15 #include "gt/intel_gt_regs.h"
16 
17 static int init_fake_lmem_bar(struct intel_memory_region *mem)
18 {
19 	struct drm_i915_private *i915 = mem->i915;
20 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
21 	unsigned long n;
22 	int ret;
23 
24 	/* We want to 1:1 map the mappable aperture to our reserved region */
25 
26 	mem->fake_mappable.start = 0;
27 	mem->fake_mappable.size = resource_size(&mem->region);
28 	mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;
29 
30 	ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
31 	if (ret)
32 		return ret;
33 
34 	mem->remap_addr = dma_map_resource(i915->drm.dev,
35 					   mem->region.start,
36 					   mem->fake_mappable.size,
37 					   DMA_BIDIRECTIONAL,
38 					   DMA_ATTR_FORCE_CONTIGUOUS);
39 	if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
40 		drm_mm_remove_node(&mem->fake_mappable);
41 		return -EINVAL;
42 	}
43 
44 	for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
45 		ggtt->vm.insert_page(&ggtt->vm,
46 				     mem->remap_addr + (n << PAGE_SHIFT),
47 				     n << PAGE_SHIFT,
48 				     I915_CACHE_NONE, 0);
49 	}
50 
51 	mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
52 						      mem->fake_mappable.size);
53 
54 	return 0;
55 }
56 
57 static void release_fake_lmem_bar(struct intel_memory_region *mem)
58 {
59 	if (!drm_mm_node_allocated(&mem->fake_mappable))
60 		return;
61 
62 	drm_mm_remove_node(&mem->fake_mappable);
63 
64 	dma_unmap_resource(mem->i915->drm.dev,
65 			   mem->remap_addr,
66 			   mem->fake_mappable.size,
67 			   DMA_BIDIRECTIONAL,
68 			   DMA_ATTR_FORCE_CONTIGUOUS);
69 }
70 
71 static int
72 region_lmem_release(struct intel_memory_region *mem)
73 {
74 	int ret;
75 
76 	ret = intel_region_ttm_fini(mem);
77 	io_mapping_fini(&mem->iomap);
78 	release_fake_lmem_bar(mem);
79 
80 	return ret;
81 }
82 
83 static int
84 region_lmem_init(struct intel_memory_region *mem)
85 {
86 	int ret;
87 
88 	if (mem->i915->params.fake_lmem_start) {
89 		ret = init_fake_lmem_bar(mem);
90 		GEM_BUG_ON(ret);
91 	}
92 
93 	if (!io_mapping_init_wc(&mem->iomap,
94 				mem->io_start,
95 				resource_size(&mem->region))) {
96 		ret = -EIO;
97 		goto out_no_io;
98 	}
99 
100 	ret = intel_region_ttm_init(mem);
101 	if (ret)
102 		goto out_no_buddy;
103 
104 	return 0;
105 
106 out_no_buddy:
107 	io_mapping_fini(&mem->iomap);
108 out_no_io:
109 	release_fake_lmem_bar(mem);
110 
111 	return ret;
112 }
113 
114 static const struct intel_memory_region_ops intel_region_lmem_ops = {
115 	.init = region_lmem_init,
116 	.release = region_lmem_release,
117 	.init_object = __i915_gem_ttm_object_init,
118 };
119 
120 struct intel_memory_region *
121 intel_gt_setup_fake_lmem(struct intel_gt *gt)
122 {
123 	struct drm_i915_private *i915 = gt->i915;
124 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
125 	struct intel_memory_region *mem;
126 	resource_size_t mappable_end;
127 	resource_size_t io_start;
128 	resource_size_t start;
129 
130 	if (!HAS_LMEM(i915))
131 		return ERR_PTR(-ENODEV);
132 
133 	if (!i915->params.fake_lmem_start)
134 		return ERR_PTR(-ENODEV);
135 
136 	GEM_BUG_ON(i915_ggtt_has_aperture(to_gt(i915)->ggtt));
137 
138 	/* Your mappable aperture belongs to me now! */
139 	mappable_end = pci_resource_len(pdev, 2);
140 	io_start = pci_resource_start(pdev, 2);
141 	start = i915->params.fake_lmem_start;
142 
143 	mem = intel_memory_region_create(i915,
144 					 start,
145 					 mappable_end,
146 					 PAGE_SIZE,
147 					 io_start,
148 					 INTEL_MEMORY_LOCAL,
149 					 0,
150 					 &intel_region_lmem_ops);
151 	if (!IS_ERR(mem)) {
152 		drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
153 			 &mem->region);
154 		drm_info(&i915->drm,
155 			 "Intel graphics fake LMEM IO start: %llx\n",
156 			(u64)mem->io_start);
157 		drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
158 			 (u64)resource_size(&mem->region));
159 	}
160 
161 	return mem;
162 }
163 
164 static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
165 				     u64 *start, u32 *size)
166 {
167 	if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
168 		return false;
169 
170 	*start = 0;
171 	*size = SZ_1M;
172 
173 	drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
174 		*start, *start + *size);
175 
176 	return true;
177 }
178 
179 static int reserve_lowmem_region(struct intel_uncore *uncore,
180 				 struct intel_memory_region *mem)
181 {
182 	u64 reserve_start;
183 	u32 reserve_size;
184 	int ret;
185 
186 	if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
187 		return 0;
188 
189 	ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
190 	if (ret)
191 		drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
192 
193 	return ret;
194 }
195 
196 static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
197 {
198 	struct drm_i915_private *i915 = gt->i915;
199 	struct intel_uncore *uncore = gt->uncore;
200 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
201 	struct intel_memory_region *mem;
202 	resource_size_t min_page_size;
203 	resource_size_t io_start;
204 	resource_size_t lmem_size;
205 	int err;
206 
207 	if (!IS_DGFX(i915))
208 		return ERR_PTR(-ENODEV);
209 
210 	/* Stolen starts from GSMBASE on DG1 */
211 	lmem_size = intel_uncore_read64(uncore, GEN12_GSMBASE);
212 
213 	io_start = pci_resource_start(pdev, 2);
214 	if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
215 		return ERR_PTR(-ENODEV);
216 
217 	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
218 						I915_GTT_PAGE_SIZE_4K;
219 	mem = intel_memory_region_create(i915,
220 					 0,
221 					 lmem_size,
222 					 min_page_size,
223 					 io_start,
224 					 INTEL_MEMORY_LOCAL,
225 					 0,
226 					 &intel_region_lmem_ops);
227 	if (IS_ERR(mem))
228 		return mem;
229 
230 	err = reserve_lowmem_region(uncore, mem);
231 	if (err)
232 		goto err_region_put;
233 
234 	drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
235 	drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
236 		&mem->io_start);
237 	drm_info(&i915->drm, "Local memory available: %pa\n",
238 		 &lmem_size);
239 
240 	return mem;
241 
242 err_region_put:
243 	intel_memory_region_destroy(mem);
244 	return ERR_PTR(err);
245 }
246 
247 struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
248 {
249 	return setup_lmem(gt);
250 }
251