1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Ke Yu 25 * Kevin Tian <kevin.tian@intel.com> 26 * Dexuan Cui 27 * 28 * Contributors: 29 * Tina Zhang <tina.zhang@intel.com> 30 * Min He <min.he@intel.com> 31 * Niu Bing <bing.niu@intel.com> 32 * Zhi Wang <zhi.a.wang@intel.com> 33 * 34 */ 35 36 #include "i915_drv.h" 37 #include "gvt.h" 38 39 /** 40 * intel_vgpu_gpa_to_mmio_offset - translate a GPA to MMIO offset 41 * @vgpu: a vGPU 42 * 43 * Returns: 44 * Zero on success, negative error code if failed 45 */ 46 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) 47 { 48 u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) & 49 ~GENMASK(3, 0); 50 return gpa - gttmmio_gpa; 51 } 52 53 #define reg_is_mmio(gvt, reg) \ 54 (reg >= 0 && reg < gvt->device_info.mmio_size) 55 56 #define reg_is_gtt(gvt, reg) \ 57 (reg >= gvt->device_info.gtt_start_offset \ 58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) 59 60 /** 61 * intel_vgpu_emulate_mmio_read - emulate MMIO read 62 * @vgpu: a vGPU 63 * @pa: guest physical address 64 * @p_data: data return buffer 65 * @bytes: access data length 66 * 67 * Returns: 68 * Zero on success, negative error code if failed 69 */ 70 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, 71 void *p_data, unsigned int bytes) 72 { 73 struct intel_gvt *gvt = vgpu->gvt; 74 struct intel_gvt_mmio_info *mmio; 75 unsigned int offset = 0; 76 int ret = -EINVAL; 77 78 mutex_lock(&gvt->lock); 79 80 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 81 struct intel_vgpu_guest_page *gp; 82 83 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); 84 if (gp) { 85 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, 86 p_data, bytes); 87 if (ret) { 88 gvt_err("vgpu%d: guest page read error %d, " 89 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 90 vgpu->id, ret, 91 gp->gfn, pa, *(u32 *)p_data, bytes); 92 } 93 mutex_unlock(&gvt->lock); 94 return ret; 95 } 96 } 97 98 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); 99 100 if (WARN_ON(bytes > 8)) 101 goto err; 102 103 if (reg_is_gtt(gvt, offset)) { 104 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) 105 goto err; 106 if (WARN_ON(bytes != 4 && bytes != 8)) 107 goto err; 108 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) 109 goto err; 110 111 ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset, 112 p_data, bytes); 113 if (ret) 114 goto err; 115 mutex_unlock(&gvt->lock); 116 return ret; 117 } 118 119 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { 120 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); 121 mutex_unlock(&gvt->lock); 122 return ret; 123 } 124 125 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 126 goto err; 127 128 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 129 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 130 goto err; 131 } 132 133 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 134 if (mmio) { 135 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 136 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) 137 goto err; 138 if (WARN_ON(mmio->offset != offset)) 139 goto err; 140 } 141 ret = mmio->read(vgpu, offset, p_data, bytes); 142 } else { 143 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 144 145 if (!vgpu->mmio.disable_warn_untrack) { 146 gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", 147 vgpu->id, offset, bytes, *(u32 *)p_data); 148 149 if (offset == 0x206c) { 150 gvt_err("------------------------------------------\n"); 151 gvt_err("vgpu%d: likely triggers a gfx reset\n", 152 vgpu->id); 153 gvt_err("------------------------------------------\n"); 154 vgpu->mmio.disable_warn_untrack = true; 155 } 156 } 157 } 158 159 if (ret) 160 goto err; 161 162 intel_gvt_mmio_set_accessed(gvt, offset); 163 mutex_unlock(&gvt->lock); 164 return 0; 165 err: 166 gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", 167 vgpu->id, offset, bytes); 168 mutex_unlock(&gvt->lock); 169 return ret; 170 } 171 172 /** 173 * intel_vgpu_emulate_mmio_write - emulate MMIO write 174 * @vgpu: a vGPU 175 * @pa: guest physical address 176 * @p_data: write data buffer 177 * @bytes: access data length 178 * 179 * Returns: 180 * Zero on success, negative error code if failed 181 */ 182 int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, 183 void *p_data, unsigned int bytes) 184 { 185 struct intel_gvt *gvt = vgpu->gvt; 186 struct intel_gvt_mmio_info *mmio; 187 unsigned int offset = 0; 188 u32 old_vreg = 0, old_sreg = 0; 189 int ret = -EINVAL; 190 191 mutex_lock(&gvt->lock); 192 193 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 194 struct intel_vgpu_guest_page *gp; 195 196 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); 197 if (gp) { 198 ret = gp->handler(gp, pa, p_data, bytes); 199 if (ret) { 200 gvt_err("vgpu%d: guest page write error %d, " 201 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 202 vgpu->id, ret, 203 gp->gfn, pa, *(u32 *)p_data, bytes); 204 } 205 mutex_unlock(&gvt->lock); 206 return ret; 207 } 208 } 209 210 offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); 211 212 if (WARN_ON(bytes > 8)) 213 goto err; 214 215 if (reg_is_gtt(gvt, offset)) { 216 if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8))) 217 goto err; 218 if (WARN_ON(bytes != 4 && bytes != 8)) 219 goto err; 220 if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) 221 goto err; 222 223 ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset, 224 p_data, bytes); 225 if (ret) 226 goto err; 227 mutex_unlock(&gvt->lock); 228 return ret; 229 } 230 231 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { 232 ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); 233 mutex_unlock(&gvt->lock); 234 return ret; 235 } 236 237 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 238 if (!mmio && !vgpu->mmio.disable_warn_untrack) 239 gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n", 240 vgpu->id, offset, bytes, *(u32 *)p_data); 241 242 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { 243 if (WARN_ON(!IS_ALIGNED(offset, bytes))) 244 goto err; 245 } 246 247 if (mmio) { 248 u64 ro_mask = mmio->ro_mask; 249 250 if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) { 251 if (WARN_ON(offset + bytes > mmio->offset + mmio->size)) 252 goto err; 253 if (WARN_ON(mmio->offset != offset)) 254 goto err; 255 } 256 257 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) { 258 old_vreg = vgpu_vreg(vgpu, offset); 259 old_sreg = vgpu_sreg(vgpu, offset); 260 } 261 262 if (!ro_mask) { 263 ret = mmio->write(vgpu, offset, p_data, bytes); 264 } else { 265 /* Protect RO bits like HW */ 266 u64 data = 0; 267 268 /* all register bits are RO. */ 269 if (ro_mask == ~(u64)0) { 270 gvt_err("vgpu%d: try to write RO reg %x\n", 271 vgpu->id, offset); 272 ret = 0; 273 goto out; 274 } 275 /* keep the RO bits in the virtual register */ 276 memcpy(&data, p_data, bytes); 277 data &= ~mmio->ro_mask; 278 data |= vgpu_vreg(vgpu, offset) & mmio->ro_mask; 279 ret = mmio->write(vgpu, offset, &data, bytes); 280 } 281 282 /* higher 16bits of mode ctl regs are mask bits for change */ 283 if (intel_gvt_mmio_has_mode_mask(gvt, mmio->offset)) { 284 u32 mask = vgpu_vreg(vgpu, offset) >> 16; 285 286 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) 287 | (vgpu_vreg(vgpu, offset) & mask); 288 vgpu_sreg(vgpu, offset) = (old_sreg & ~mask) 289 | (vgpu_sreg(vgpu, offset) & mask); 290 } 291 } else 292 ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, 293 bytes); 294 if (ret) 295 goto err; 296 out: 297 intel_gvt_mmio_set_accessed(gvt, offset); 298 mutex_unlock(&gvt->lock); 299 return 0; 300 err: 301 gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", 302 vgpu->id, offset, bytes); 303 mutex_unlock(&gvt->lock); 304 return ret; 305 } 306 307 308 /** 309 * intel_vgpu_reset_mmio - reset virtual MMIO space 310 * @vgpu: a vGPU 311 * 312 */ 313 void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) 314 { 315 struct intel_gvt *gvt = vgpu->gvt; 316 const struct intel_gvt_device_info *info = &gvt->device_info; 317 318 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); 319 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); 320 321 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; 322 323 /* set the bit 0:2(Core C-State ) to C0 */ 324 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 325 } 326 327 /** 328 * intel_vgpu_init_mmio - init MMIO space 329 * @vgpu: a vGPU 330 * 331 * Returns: 332 * Zero on success, negative error code if failed 333 */ 334 int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) 335 { 336 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 337 338 vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); 339 if (!vgpu->mmio.vreg) 340 return -ENOMEM; 341 342 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; 343 344 intel_vgpu_reset_mmio(vgpu); 345 346 return 0; 347 } 348 349 /** 350 * intel_vgpu_clean_mmio - clean MMIO space 351 * @vgpu: a vGPU 352 * 353 */ 354 void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) 355 { 356 vfree(vgpu->mmio.vreg); 357 vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; 358 } 359