xref: /openbmc/linux/drivers/gpu/drm/i915/gvt/mmio.c (revision 5d0e4d78)
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Ke Yu
25  *    Kevin Tian <kevin.tian@intel.com>
26  *    Dexuan Cui
27  *
28  * Contributors:
29  *    Tina Zhang <tina.zhang@intel.com>
30  *    Min He <min.he@intel.com>
31  *    Niu Bing <bing.niu@intel.com>
32  *    Zhi Wang <zhi.a.wang@intel.com>
33  *
34  */
35 
36 #include "i915_drv.h"
37 #include "gvt.h"
38 
39 /**
40  * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset
41  * @vgpu: a vGPU
42  *
43  * Returns:
44  * Zero on success, negative error code if failed
45  */
46 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
47 {
48 	u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
49 			  ~GENMASK(3, 0);
50 	return gpa - gttmmio_gpa;
51 }
52 
53 #define reg_is_mmio(gvt, reg)  \
54 	(reg >= 0 && reg < gvt->device_info.mmio_size)
55 
56 #define reg_is_gtt(gvt, reg)   \
57 	(reg >= gvt->device_info.gtt_start_offset \
58 	 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
59 
60 static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
61 		void *p_data, unsigned int bytes, bool read)
62 {
63 	struct intel_gvt *gvt = NULL;
64 	void *pt = NULL;
65 	unsigned int offset = 0;
66 
67 	if (!vgpu || !p_data)
68 		return;
69 
70 	gvt = vgpu->gvt;
71 	mutex_lock(&gvt->lock);
72 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
73 	if (reg_is_mmio(gvt, offset)) {
74 		if (read)
75 			intel_vgpu_default_mmio_read(vgpu, offset, p_data,
76 					bytes);
77 		else
78 			intel_vgpu_default_mmio_write(vgpu, offset, p_data,
79 					bytes);
80 	} else if (reg_is_gtt(gvt, offset) &&
81 			vgpu->gtt.ggtt_mm->virtual_page_table) {
82 		offset -= gvt->device_info.gtt_start_offset;
83 		pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
84 		if (read)
85 			memcpy(p_data, pt, bytes);
86 		else
87 			memcpy(pt, p_data, bytes);
88 
89 	} else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
90 		struct intel_vgpu_guest_page *gp;
91 
92 		/* Since we enter the failsafe mode early during guest boot,
93 		 * guest may not have chance to set up its ppgtt table, so
94 		 * there should not be any wp pages for guest. Keep the wp
95 		 * related code here in case we need to handle it in furture.
96 		 */
97 		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
98 		if (gp) {
99 			/* remove write protection to prevent furture traps */
100 			intel_vgpu_clean_guest_page(vgpu, gp);
101 			if (read)
102 				intel_gvt_hypervisor_read_gpa(vgpu, pa,
103 						p_data, bytes);
104 			else
105 				intel_gvt_hypervisor_write_gpa(vgpu, pa,
106 						p_data, bytes);
107 		}
108 	}
109 	mutex_unlock(&gvt->lock);
110 }
111 
112 /**
113  * intel_vgpu_emulate_mmio_read - emulate MMIO read
114  * @vgpu: a vGPU
115  * @pa: guest physical address
116  * @p_data: data return buffer
117  * @bytes: access data length
118  *
119  * Returns:
120  * Zero on success, negative error code if failed
121  */
122 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
123 		void *p_data, unsigned int bytes)
124 {
125 	struct intel_gvt *gvt = vgpu->gvt;
126 	unsigned int offset = 0;
127 	int ret = -EINVAL;
128 
129 
130 	if (vgpu->failsafe) {
131 		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
132 		return 0;
133 	}
134 	mutex_lock(&gvt->lock);
135 
136 	if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
137 		struct intel_vgpu_guest_page *gp;
138 
139 		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
140 		if (gp) {
141 			ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
142 					p_data, bytes);
143 			if (ret) {
144 				gvt_vgpu_err("guest page read error %d, "
145 					"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
146 					ret, gp->gfn, pa, *(u32 *)p_data,
147 					bytes);
148 			}
149 			mutex_unlock(&gvt->lock);
150 			return ret;
151 		}
152 	}
153 
154 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
155 
156 	if (WARN_ON(bytes > 8))
157 		goto err;
158 
159 	if (reg_is_gtt(gvt, offset)) {
160 		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
161 			goto err;
162 		if (WARN_ON(bytes != 4 && bytes != 8))
163 			goto err;
164 		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
165 			goto err;
166 
167 		ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset,
168 				p_data, bytes);
169 		if (ret)
170 			goto err;
171 		mutex_unlock(&gvt->lock);
172 		return ret;
173 	}
174 
175 	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
176 		ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
177 		mutex_unlock(&gvt->lock);
178 		return ret;
179 	}
180 
181 	if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
182 		goto err;
183 
184 	if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
185 		if (WARN_ON(!IS_ALIGNED(offset, bytes)))
186 			goto err;
187 	}
188 
189 	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, true);
190 	if (ret < 0)
191 		goto err;
192 
193 	intel_gvt_mmio_set_accessed(gvt, offset);
194 	mutex_unlock(&gvt->lock);
195 	return 0;
196 err:
197 	gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
198 			offset, bytes);
199 	mutex_unlock(&gvt->lock);
200 	return ret;
201 }
202 
203 /**
204  * intel_vgpu_emulate_mmio_write - emulate MMIO write
205  * @vgpu: a vGPU
206  * @pa: guest physical address
207  * @p_data: write data buffer
208  * @bytes: access data length
209  *
210  * Returns:
211  * Zero on success, negative error code if failed
212  */
213 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
214 		void *p_data, unsigned int bytes)
215 {
216 	struct intel_gvt *gvt = vgpu->gvt;
217 	unsigned int offset = 0;
218 	int ret = -EINVAL;
219 
220 	if (vgpu->failsafe) {
221 		failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
222 		return 0;
223 	}
224 
225 	mutex_lock(&gvt->lock);
226 
227 	if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
228 		struct intel_vgpu_guest_page *gp;
229 
230 		gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
231 		if (gp) {
232 			ret = gp->handler(gp, pa, p_data, bytes);
233 			if (ret) {
234 				gvt_err("guest page write error %d, "
235 					"gfn 0x%lx, pa 0x%llx, "
236 					"var 0x%x, len %d\n",
237 					ret, gp->gfn, pa,
238 					*(u32 *)p_data, bytes);
239 			}
240 			mutex_unlock(&gvt->lock);
241 			return ret;
242 		}
243 	}
244 
245 	offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
246 
247 	if (WARN_ON(bytes > 8))
248 		goto err;
249 
250 	if (reg_is_gtt(gvt, offset)) {
251 		if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
252 			goto err;
253 		if (WARN_ON(bytes != 4 && bytes != 8))
254 			goto err;
255 		if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
256 			goto err;
257 
258 		ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset,
259 				p_data, bytes);
260 		if (ret)
261 			goto err;
262 		mutex_unlock(&gvt->lock);
263 		return ret;
264 	}
265 
266 	if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
267 		ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
268 		mutex_unlock(&gvt->lock);
269 		return ret;
270 	}
271 
272 	ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
273 	if (ret < 0)
274 		goto err;
275 
276 	intel_gvt_mmio_set_accessed(gvt, offset);
277 	mutex_unlock(&gvt->lock);
278 	return 0;
279 err:
280 	gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
281 		     bytes);
282 	mutex_unlock(&gvt->lock);
283 	return ret;
284 }
285 
286 
287 /**
288  * intel_vgpu_reset_mmio - reset virtual MMIO space
289  * @vgpu: a vGPU
290  *
291  */
292 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
293 {
294 	struct intel_gvt *gvt = vgpu->gvt;
295 	const struct intel_gvt_device_info *info = &gvt->device_info;
296 	void  *mmio = gvt->firmware.mmio;
297 
298 	if (dmlr) {
299 		memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
300 		memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
301 
302 		vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
303 
304 		/* set the bit 0:2(Core C-State ) to C0 */
305 		vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
306 
307 		vgpu->mmio.disable_warn_untrack = false;
308 	} else {
309 #define GVT_GEN8_MMIO_RESET_OFFSET		(0x44200)
310 		/* only reset the engine related, so starting with 0x44200
311 		 * interrupt include DE,display mmio related will not be
312 		 * touched
313 		 */
314 		memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
315 		memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
316 	}
317 
318 }
319 
320 /**
321  * intel_vgpu_init_mmio - init MMIO  space
322  * @vgpu: a vGPU
323  *
324  * Returns:
325  * Zero on success, negative error code if failed
326  */
327 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
328 {
329 	const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
330 
331 	vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
332 	if (!vgpu->mmio.vreg)
333 		return -ENOMEM;
334 
335 	vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
336 
337 	intel_vgpu_reset_mmio(vgpu, true);
338 
339 	return 0;
340 }
341 
342 /**
343  * intel_vgpu_clean_mmio - clean MMIO space
344  * @vgpu: a vGPU
345  *
346  */
347 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
348 {
349 	vfree(vgpu->mmio.vreg);
350 	vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
351 }
352