xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_ppgtt.c (revision d58f75de)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/slab.h>
7 
8 #include "gem/i915_gem_lmem.h"
9 
10 #include "i915_trace.h"
11 #include "intel_gtt.h"
12 #include "gen6_ppgtt.h"
13 #include "gen8_ppgtt.h"
14 
15 struct i915_page_table *alloc_pt(struct i915_address_space *vm)
16 {
17 	struct i915_page_table *pt;
18 
19 	pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
20 	if (unlikely(!pt))
21 		return ERR_PTR(-ENOMEM);
22 
23 	pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
24 	if (IS_ERR(pt->base)) {
25 		kfree(pt);
26 		return ERR_PTR(-ENOMEM);
27 	}
28 
29 	atomic_set(&pt->used, 0);
30 	return pt;
31 }
32 
33 struct i915_page_directory *__alloc_pd(int count)
34 {
35 	struct i915_page_directory *pd;
36 
37 	pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
38 	if (unlikely(!pd))
39 		return NULL;
40 
41 	pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
42 	if (unlikely(!pd->entry)) {
43 		kfree(pd);
44 		return NULL;
45 	}
46 
47 	spin_lock_init(&pd->lock);
48 	return pd;
49 }
50 
51 struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
52 {
53 	struct i915_page_directory *pd;
54 
55 	pd = __alloc_pd(I915_PDES);
56 	if (unlikely(!pd))
57 		return ERR_PTR(-ENOMEM);
58 
59 	pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
60 	if (IS_ERR(pd->pt.base)) {
61 		kfree(pd->entry);
62 		kfree(pd);
63 		return ERR_PTR(-ENOMEM);
64 	}
65 
66 	return pd;
67 }
68 
69 void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
70 {
71 	BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
72 
73 	if (lvl) {
74 		struct i915_page_directory *pd =
75 			container_of(pt, typeof(*pd), pt);
76 		kfree(pd->entry);
77 	}
78 
79 	if (pt->base)
80 		i915_gem_object_put(pt->base);
81 
82 	kfree(pt);
83 }
84 
85 static void
86 write_dma_entry(struct drm_i915_gem_object * const pdma,
87 		const unsigned short idx,
88 		const u64 encoded_entry)
89 {
90 	u64 * const vaddr = kmap_atomic(__px_page(pdma));
91 
92 	vaddr[idx] = encoded_entry;
93 	clflush_cache_range(&vaddr[idx], sizeof(u64));
94 	kunmap_atomic(vaddr);
95 }
96 
97 void
98 __set_pd_entry(struct i915_page_directory * const pd,
99 	       const unsigned short idx,
100 	       struct i915_page_table * const to,
101 	       u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
102 {
103 	/* Each thread pre-pins the pd, and we may have a thread per pde. */
104 	GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
105 
106 	atomic_inc(px_used(pd));
107 	pd->entry[idx] = to;
108 	write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
109 }
110 
111 void
112 clear_pd_entry(struct i915_page_directory * const pd,
113 	       const unsigned short idx,
114 	       const struct drm_i915_gem_object * const scratch)
115 {
116 	GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
117 
118 	write_dma_entry(px_base(pd), idx, scratch->encode);
119 	pd->entry[idx] = NULL;
120 	atomic_dec(px_used(pd));
121 }
122 
123 bool
124 release_pd_entry(struct i915_page_directory * const pd,
125 		 const unsigned short idx,
126 		 struct i915_page_table * const pt,
127 		 const struct drm_i915_gem_object * const scratch)
128 {
129 	bool free = false;
130 
131 	if (atomic_add_unless(&pt->used, -1, 1))
132 		return false;
133 
134 	spin_lock(&pd->lock);
135 	if (atomic_dec_and_test(&pt->used)) {
136 		clear_pd_entry(pd, idx, scratch);
137 		free = true;
138 	}
139 	spin_unlock(&pd->lock);
140 
141 	return free;
142 }
143 
144 int i915_ppgtt_init_hw(struct intel_gt *gt)
145 {
146 	struct drm_i915_private *i915 = gt->i915;
147 
148 	gtt_write_workarounds(gt);
149 
150 	if (IS_GEN(i915, 6))
151 		gen6_ppgtt_enable(gt);
152 	else if (IS_GEN(i915, 7))
153 		gen7_ppgtt_enable(gt);
154 
155 	return 0;
156 }
157 
158 static struct i915_ppgtt *
159 __ppgtt_create(struct intel_gt *gt)
160 {
161 	if (INTEL_GEN(gt->i915) < 8)
162 		return gen6_ppgtt_create(gt);
163 	else
164 		return gen8_ppgtt_create(gt);
165 }
166 
167 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
168 {
169 	struct i915_ppgtt *ppgtt;
170 
171 	ppgtt = __ppgtt_create(gt);
172 	if (IS_ERR(ppgtt))
173 		return ppgtt;
174 
175 	trace_i915_ppgtt_create(&ppgtt->vm);
176 
177 	return ppgtt;
178 }
179 
180 void ppgtt_bind_vma(struct i915_address_space *vm,
181 		    struct i915_vm_pt_stash *stash,
182 		    struct i915_vma *vma,
183 		    enum i915_cache_level cache_level,
184 		    u32 flags)
185 {
186 	u32 pte_flags;
187 
188 	if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
189 		vm->allocate_va_range(vm, stash, vma->node.start, vma->size);
190 		set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
191 	}
192 
193 	/* Applicable to VLV, and gen8+ */
194 	pte_flags = 0;
195 	if (i915_gem_object_is_readonly(vma->obj))
196 		pte_flags |= PTE_READ_ONLY;
197 	if (i915_gem_object_is_lmem(vma->obj))
198 		pte_flags |= PTE_LM;
199 
200 	vm->insert_entries(vm, vma, cache_level, pte_flags);
201 	wmb();
202 }
203 
204 void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
205 {
206 	if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
207 		vm->clear_range(vm, vma->node.start, vma->size);
208 }
209 
210 static unsigned long pd_count(u64 size, int shift)
211 {
212 	/* Beware later misalignment */
213 	return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
214 }
215 
216 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
217 			   struct i915_vm_pt_stash *stash,
218 			   u64 size)
219 {
220 	unsigned long count;
221 	int shift, n;
222 
223 	shift = vm->pd_shift;
224 	if (!shift)
225 		return 0;
226 
227 	count = pd_count(size, shift);
228 	while (count--) {
229 		struct i915_page_table *pt;
230 
231 		pt = alloc_pt(vm);
232 		if (IS_ERR(pt)) {
233 			i915_vm_free_pt_stash(vm, stash);
234 			return PTR_ERR(pt);
235 		}
236 
237 		pt->stash = stash->pt[0];
238 		stash->pt[0] = pt;
239 	}
240 
241 	for (n = 1; n < vm->top; n++) {
242 		shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
243 		count = pd_count(size, shift);
244 		while (count--) {
245 			struct i915_page_directory *pd;
246 
247 			pd = alloc_pd(vm);
248 			if (IS_ERR(pd)) {
249 				i915_vm_free_pt_stash(vm, stash);
250 				return PTR_ERR(pd);
251 			}
252 
253 			pd->pt.stash = stash->pt[1];
254 			stash->pt[1] = &pd->pt;
255 		}
256 	}
257 
258 	return 0;
259 }
260 
261 int i915_vm_pin_pt_stash(struct i915_address_space *vm,
262 			 struct i915_vm_pt_stash *stash)
263 {
264 	struct i915_page_table *pt;
265 	int n, err;
266 
267 	for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
268 		for (pt = stash->pt[n]; pt; pt = pt->stash) {
269 			err = pin_pt_dma_locked(vm, pt->base);
270 			if (err)
271 				return err;
272 		}
273 	}
274 
275 	return 0;
276 }
277 
278 void i915_vm_free_pt_stash(struct i915_address_space *vm,
279 			   struct i915_vm_pt_stash *stash)
280 {
281 	struct i915_page_table *pt;
282 	int n;
283 
284 	for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
285 		while ((pt = stash->pt[n])) {
286 			stash->pt[n] = pt->stash;
287 			free_px(vm, pt, n);
288 		}
289 	}
290 }
291 
292 int ppgtt_set_pages(struct i915_vma *vma)
293 {
294 	GEM_BUG_ON(vma->pages);
295 
296 	vma->pages = vma->obj->mm.pages;
297 	vma->page_sizes = vma->obj->mm.page_sizes;
298 
299 	return 0;
300 }
301 
302 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
303 {
304 	struct drm_i915_private *i915 = gt->i915;
305 
306 	ppgtt->vm.gt = gt;
307 	ppgtt->vm.i915 = i915;
308 	ppgtt->vm.dma = i915->drm.dev;
309 	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
310 
311 	dma_resv_init(&ppgtt->vm.resv);
312 	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
313 
314 	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
315 	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
316 	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
317 	ppgtt->vm.vma_ops.clear_pages = clear_pages;
318 }
319