1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Ke Yu 25 * Kevin Tian <kevin.tian@intel.com> 26 * Dexuan Cui 27 * 28 * Contributors: 29 * Tina Zhang <tina.zhang@intel.com> 30 * Min He <min.he@intel.com> 31 * Niu Bing <bing.niu@intel.com> 32 * Zhi Wang <zhi.a.wang@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gvt.h" 38 39 /** 40 * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset 41 * @vgpu: a vGPU 42 * 43 * Returns: 44 * Zero on success, negative error code if failed 45 */ 46 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) 47 { 48 u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) & 49 ~GENMASK(3, 0); 50 return gpa - gttmmio_gpa; 51 } 52 53 #define reg_is_mmio(gvt, reg) \ 54 (reg >= 0 && reg < gvt->device_info.mmio_size) 55 56 #define reg_is_gtt(gvt, reg) \ 57 (reg >= gvt->device_info.gtt_start_offset \ 58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) 59 60 /** 61 * intel_vgpu_emulate_mmio_read - emulate MMIO read 62 * @vgpu: a vGPU 63 * @pa: guest physical address 64 * @p_data: data return buffer 65 * @bytes: access data length 66 * 67 * Returns: 68 * Zero on success, negative error code if failed 69 */ 70 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, 71 void *p_data, unsigned int bytes) 72 { 73 struct intel_gvt *gvt = vgpu->gvt; 74 struct intel_gvt_mmio_info *mmio; 75 unsigned int offset = 0; 76 int ret = -EINVAL; 77 78 mutex_lock(&gvt->lock); 79 80 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 81 struct intel_vgpu_guest_page *gp; 82 83 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); 84 if (gp) { 85 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, 86 p_data, bytes); 87 if (ret) { 88 gvt_err("vgpu%d: guest page read error %d, " 89 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 90 vgpu->id, ret, 91 gp->gfn, pa, *(u32 *)p_data, bytes); 92 } 93 mutex_unlock(&gvt->lock); 94 return ret; 95 } 96 } 97 98 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); 99 100 if (WARN_ON(bytes > 8)) 101 goto err; 102 103 if (reg_is_gtt(gvt, offset)) { 104 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) 105 goto err; 106 if (WARN_ON(bytes != 4 && bytes != 8)) 107 goto err; 108 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) 109 goto err; 110 111 ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset, 112 p_data, bytes); 113 if (ret) 114 goto err; 115 mutex_unlock(&gvt->lock); 116 return ret; 117 } 118 119 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { 120 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); 121 mutex_unlock(&gvt->lock); 122 return ret; 123 } 124 125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 126 goto err; 127 128 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 129 if (!mmio && !vgpu->mmio.disable_warn_untrack) { 130 gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n", 131 vgpu->id, offset, bytes, *(u32 *)p_data); 132 133 if (offset == 0x206c) { 134 gvt_err("------------------------------------------\n"); 135 gvt_err("vgpu%d: likely triggers a gfx reset\n", 136 vgpu->id); 137 gvt_err("------------------------------------------\n"); 138 vgpu->mmio.disable_warn_untrack = true; 139 } 140 } 141 142 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 143 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 144 goto err; 145 } 146 147 if (mmio) { 148 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 149 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) 150 goto err; 151 if (WARN_ON(mmio->offset != offset)) 152 goto err; 153 } 154 ret = mmio->read(vgpu, offset, p_data, bytes); 155 } else 156 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 157 158 if (ret) 159 goto err; 160 161 intel_gvt_mmio_set_accessed(gvt, offset); 162 mutex_unlock(&gvt->lock); 163 return 0; 164 err: 165 gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", 166 vgpu->id, offset, bytes); 167 mutex_unlock(&gvt->lock); 168 return ret; 169 } 170 171 /** 172 * intel_vgpu_emulate_mmio_write - emulate MMIO write 173 * @vgpu: a vGPU 174 * @pa: guest physical address 175 * @p_data: write data buffer 176 * @bytes: access data length 177 * 178 * Returns: 179 * Zero on success, negative error code if failed 180 */ 181 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, 182 void *p_data, unsigned int bytes) 183 { 184 struct intel_gvt *gvt = vgpu->gvt; 185 struct intel_gvt_mmio_info *mmio; 186 unsigned int offset = 0; 187 u32 old_vreg = 0, old_sreg = 0; 188 int ret = -EINVAL; 189 190 mutex_lock(&gvt->lock); 191 192 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 193 struct intel_vgpu_guest_page *gp; 194 195 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); 196 if (gp) { 197 ret = gp->handler(gp, pa, p_data, bytes); 198 if (ret) { 199 gvt_err("vgpu%d: guest page write error %d, " 200 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 201 vgpu->id, ret, 202 gp->gfn, pa, *(u32 *)p_data, bytes); 203 } 204 mutex_unlock(&gvt->lock); 205 return ret; 206 } 207 } 208 209 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); 210 211 if (WARN_ON(bytes > 8)) 212 goto err; 213 214 if (reg_is_gtt(gvt, offset)) { 215 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) 216 goto err; 217 if (WARN_ON(bytes != 4 && bytes != 8)) 218 goto err; 219 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) 220 goto err; 221 222 ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset, 223 p_data, bytes); 224 if (ret) 225 goto err; 226 mutex_unlock(&gvt->lock); 227 return ret; 228 } 229 230 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { 231 ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); 232 mutex_unlock(&gvt->lock); 233 return ret; 234 } 235 236 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 237 if (!mmio && !vgpu->mmio.disable_warn_untrack) 238 gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n", 239 vgpu->id, offset, bytes, *(u32 *)p_data); 240 241 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 242 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 243 goto err; 244 } 245 246 if (mmio) { 247 u64 ro_mask = mmio->ro_mask; 248 249 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 250 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) 251 goto err; 252 if (WARN_ON(mmio->offset != offset)) 253 goto err; 254 } 255 256 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) { 257 old_vreg = vgpu_vreg(vgpu, offset); 258 old_sreg = vgpu_sreg(vgpu, offset); 259 } 260 261 if (!ro_mask) { 262 ret = mmio->write(vgpu, offset, p_data, bytes); 263 } else { 264 /* Protect RO bits like HW */ 265 u64 data = 0; 266 267 /* all register bits are RO. */ 268 if (ro_mask == ~(u64)0) { 269 gvt_err("vgpu%d: try to write RO reg %x\n", 270 vgpu->id, offset); 271 ret = 0; 272 goto out; 273 } 274 /* keep the RO bits in the virtual register */ 275 memcpy(&data, p_data, bytes); 276 data &= ~mmio->ro_mask; 277 data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask; 278 ret = mmio->write(vgpu, offset, &data, bytes); 279 } 280 281 /* higher 16bits of mode ctl regs are mask bits for change */ 282 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) { 283 u32 mask = vgpu_vreg(vgpu, offset) >> 16; 284 285 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) 286 | (vgpu_vreg(vgpu, offset) & mask); 287 vgpu_sreg(vgpu, offset) = (old_sreg & ~mask) 288 | (vgpu_sreg(vgpu, offset) & mask); 289 } 290 } else 291 ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, 292 bytes); 293 if (ret) 294 goto err; 295 out: 296 intel_gvt_mmio_set_accessed(gvt, offset); 297 mutex_unlock(&gvt->lock); 298 return 0; 299 err: 300 gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", 301 vgpu->id, offset, bytes); 302 mutex_unlock(&gvt->lock); 303 return ret; 304 } 305