xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/mmio.c (revision 9dae47aba0a055f761176d9297371d5bb24289ec)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Ke Yu
25  *    Kevin Tian <kevin.tian@intel.com>
26  *    Dexuan Cui
27  *
28  * Contributors:
29  *    Tina Zhang <tina.zhang@intel.com>
30  *    Min He <min.he@intel.com>
31  *    Niu Bing <bing.niu@intel.com>
32  *    Zhi Wang <zhi.a.wang@intel.com>
33  *
34  */
35 
36 #include "i915_drv.h"
37 #include "gvt.h"
38 
39 /**
40  * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
41  * @vgpu: a vGPU
42  *
43  * Returns:
44  * Zero on success, negative error code if failed
45  */
46 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
47 {
48 	u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
49 	return gpa - gttmmio_gpa;
50 }
51 
52 #define reg_is_mmio(gvt, reg)  \
53 	(reg >= 0 && reg < gvt->device_info.mmio_size)
54 
55 #define reg_is_gtt(gvt, reg)   \
56 	(reg >= gvt->device_info.gtt_start_offset \
57 	 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
58 
59 static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
60 {
61 	u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
62 	u64 aperture_sz = vgpu_aperture_sz(vgpu);
63 
64 	return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
65 }
66 
67 static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
68 			    void *pdata, unsigned int size, bool is_read)
69 {
70 	u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
71 	u64 offset = gpa - aperture_gpa;
72 
73 	if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
74 		gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
75 			     offset, size);
76 		return -EINVAL;
77 	}
78 
79 	if (!vgpu->gm.aperture_va) {
80 		gvt_vgpu_err("BAR is not enabled\n");
81 		return -ENXIO;
82 	}
83 
84 	if (is_read)
85 		memcpy(pdata, vgpu->gm.aperture_va + offset, size);
86 	else
87 		memcpy(vgpu->gm.aperture_va + offset, pdata, size);
88 	return 0;
89 }
90 
91 static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
92 		void *p_data, unsigned int bytes, bool read)
93 {
94 	struct intel_gvt *gvt = NULL;
95 	void *pt = NULL;
96 	unsigned int offset = 0;
97 
98 	if (!vgpu || !p_data)
99 		return;
100 
101 	gvt = vgpu->gvt;
102 	mutex_lock(&gvt->lock);
103 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
104 	if (reg_is_mmio(gvt, offset)) {
105 		if (read)
106 			intel_vgpu_default_mmio_read(vgpu, offset, p_data,
107 					bytes);
108 		else
109 			intel_vgpu_default_mmio_write(vgpu, offset, p_data,
110 					bytes);
111 	} else if (reg_is_gtt(gvt, offset) &&
112 			vgpu->gtt.ggtt_mm->virtual_page_table) {
113 		offset -= gvt->device_info.gtt_start_offset;
114 		pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
115 		if (read)
116 			memcpy(p_data, pt, bytes);
117 		else
118 			memcpy(pt, p_data, bytes);
119 
120 	} else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
121 		struct intel_vgpu_guest_page *gp;
122 
123 		/* Since we enter the failsafe mode early during guest boot,
124 		 * guest may not have chance to set up its ppgtt table, so
125 		 * there should not be any wp pages for guest. Keep the wp
126 		 * related code here in case we need to handle it in furture.
127 		 */
128 		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
129 		if (gp) {
130 			/* remove write protection to prevent furture traps */
131 			intel_vgpu_clean_guest_page(vgpu, gp);
132 			if (read)
133 				intel_gvt_hypervisor_read_gpa(vgpu, pa,
134 						p_data, bytes);
135 			else
136 				intel_gvt_hypervisor_write_gpa(vgpu, pa,
137 						p_data, bytes);
138 		}
139 	}
140 	mutex_unlock(&gvt->lock);
141 }
142 
143 /**
144  * intel_vgpu_emulate_mmio_read - emulate MMIO read
145  * @vgpu: a vGPU
146  * @pa: guest physical address
147  * @p_data: data return buffer
148  * @bytes: access data length
149  *
150  * Returns:
151  * Zero on success, negative error code if failed
152  */
153 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
154 		void *p_data, unsigned int bytes)
155 {
156 	struct intel_gvt *gvt = vgpu->gvt;
157 	unsigned int offset = 0;
158 	int ret = -EINVAL;
159 
160 
161 	if (vgpu->failsafe) {
162 		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
163 		return 0;
164 	}
165 	mutex_lock(&gvt->lock);
166 
167 	if (vgpu_gpa_is_aperture(vgpu, pa)) {
168 		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
169 		mutex_unlock(&gvt->lock);
170 		return ret;
171 	}
172 
173 	if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
174 		struct intel_vgpu_guest_page *gp;
175 
176 		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
177 		if (gp) {
178 			ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
179 					p_data, bytes);
180 			if (ret) {
181 				gvt_vgpu_err("guest page read error %d, "
182 					"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
183 					ret, gp->gfn, pa, *(u32 *)p_data,
184 					bytes);
185 			}
186 			mutex_unlock(&gvt->lock);
187 			return ret;
188 		}
189 	}
190 
191 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
192 
193 	if (WARN_ON(bytes > 8))
194 		goto err;
195 
196 	if (reg_is_gtt(gvt, offset)) {
197 		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
198 			goto err;
199 		if (WARN_ON(bytes != 4 && bytes != 8))
200 			goto err;
201 		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
202 			goto err;
203 
204 		ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
205 				p_data, bytes);
206 		if (ret)
207 			goto err;
208 		mutex_unlock(&gvt->lock);
209 		return ret;
210 	}
211 
212 	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
213 		ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
214 		mutex_unlock(&gvt->lock);
215 		return ret;
216 	}
217 
218 	if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
219 		goto err;
220 
221 	if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
222 		if (WARN_ON(!IS_ALIGNED(offset, bytes)))
223 			goto err;
224 	}
225 
226 	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true);
227 	if (ret < 0)
228 		goto err;
229 
230 	intel_gvt_mmio_set_accessed(gvt, offset);
231 	mutex_unlock(&gvt->lock);
232 	return 0;
233 err:
234 	gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
235 			offset, bytes);
236 	mutex_unlock(&gvt->lock);
237 	return ret;
238 }
239 
240 /**
241  * intel_vgpu_emulate_mmio_write - emulate MMIO write
242  * @vgpu: a vGPU
243  * @pa: guest physical address
244  * @p_data: write data buffer
245  * @bytes: access data length
246  *
247  * Returns:
248  * Zero on success, negative error code if failed
249  */
250 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
251 		void *p_data, unsigned int bytes)
252 {
253 	struct intel_gvt *gvt = vgpu->gvt;
254 	unsigned int offset = 0;
255 	int ret = -EINVAL;
256 
257 	if (vgpu->failsafe) {
258 		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
259 		return 0;
260 	}
261 
262 	mutex_lock(&gvt->lock);
263 
264 	if (vgpu_gpa_is_aperture(vgpu, pa)) {
265 		ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
266 		mutex_unlock(&gvt->lock);
267 		return ret;
268 	}
269 
270 	if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
271 		struct intel_vgpu_guest_page *gp;
272 
273 		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
274 		if (gp) {
275 			ret = gp->handler(gp, pa, p_data, bytes);
276 			if (ret) {
277 				gvt_err("guest page write error %d, "
278 					"gfn 0x%lx, pa 0x%llx, "
279 					"var 0x%x, len %d\n",
280 					ret, gp->gfn, pa,
281 					*(u32 *)p_data, bytes);
282 			}
283 			mutex_unlock(&gvt->lock);
284 			return ret;
285 		}
286 	}
287 
288 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
289 
290 	if (WARN_ON(bytes > 8))
291 		goto err;
292 
293 	if (reg_is_gtt(gvt, offset)) {
294 		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
295 			goto err;
296 		if (WARN_ON(bytes != 4 && bytes != 8))
297 			goto err;
298 		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
299 			goto err;
300 
301 		ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
302 				p_data, bytes);
303 		if (ret)
304 			goto err;
305 		mutex_unlock(&gvt->lock);
306 		return ret;
307 	}
308 
309 	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
310 		ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
311 		mutex_unlock(&gvt->lock);
312 		return ret;
313 	}
314 
315 	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
316 	if (ret < 0)
317 		goto err;
318 
319 	intel_gvt_mmio_set_accessed(gvt, offset);
320 	mutex_unlock(&gvt->lock);
321 	return 0;
322 err:
323 	gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
324 		     bytes);
325 	mutex_unlock(&gvt->lock);
326 	return ret;
327 }
328 
329 
330 /**
331  * intel_vgpu_reset_mmio - reset virtual MMIO space
332  * @vgpu: a vGPU
333  *
334  */
335 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
336 {
337 	struct intel_gvt *gvt = vgpu->gvt;
338 	const struct intel_gvt_device_info *info = &gvt->device_info;
339 	void  *mmio = gvt->firmware.mmio;
340 
341 	if (dmlr) {
342 		memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
343 		memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
344 
345 		vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
346 
347 		/* set the bit 0:2(Core C-State ) to C0 */
348 		vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
349 
350 		vgpu->mmio.disable_warn_untrack = false;
351 	} else {
352 #define GVT_GEN8_MMIO_RESET_OFFSET		(0x44200)
353 		/* only reset the engine related, so starting with 0x44200
354 		 * interrupt include DE,display mmio related will not be
355 		 * touched
356 		 */
357 		memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
358 		memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
359 	}
360 
361 }
362 
363 /**
364  * intel_vgpu_init_mmio - init MMIO  space
365  * @vgpu: a vGPU
366  *
367  * Returns:
368  * Zero on success, negative error code if failed
369  */
370 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
371 {
372 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
373 
374 	vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
375 	if (!vgpu->mmio.vreg)
376 		return -ENOMEM;
377 
378 	vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
379 
380 	intel_vgpu_reset_mmio(vgpu, true);
381 
382 	return 0;
383 }
384 
385 /**
386  * intel_vgpu_clean_mmio - clean MMIO space
387  * @vgpu: a vGPU
388  *
389  */
390 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
391 {
392 	vfree(vgpu->mmio.vreg);
393 	vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
394 }
395