1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Eddie Dong <eddie.dong@intel.com> 25 * Jike Song <jike.song@intel.com> 26 * 27 * Contributors: 28 * Zhi Wang <zhi.a.wang@intel.com> 29 * Min He <min.he@intel.com> 30 * Bing Niu <bing.niu@intel.com> 31 * 32 */ 33 34 #include "i915_drv.h" 35 #include "gvt.h" 36 37 enum { 38 INTEL_GVT_PCI_BAR_GTTMMIO = 0, 39 INTEL_GVT_PCI_BAR_APERTURE, 40 INTEL_GVT_PCI_BAR_PIO, 41 INTEL_GVT_PCI_BAR_MAX, 42 }; 43 44 /* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one 45 * byte) byte by byte in standard pci configuration space. (not the full 46 * 256 bytes.) 47 */ 48 static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = { 49 [PCI_COMMAND] = 0xff, 0x07, 50 [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */ 51 [PCI_CACHE_LINE_SIZE] = 0xff, 52 [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff, 53 [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff, 54 [PCI_INTERRUPT_LINE] = 0xff, 55 }; 56 57 /** 58 * vgpu_pci_cfg_mem_write - write virtual cfg space memory 59 * 60 * Use this function to write virtual cfg space memory. 61 * For standard cfg space, only RW bits can be changed, 62 * and we emulates the RW1C behavior of PCI_STATUS register. 63 */ 64 static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, 65 u8 *src, unsigned int bytes) 66 { 67 u8 *cfg_base = vgpu_cfg_space(vgpu); 68 u8 mask, new, old; 69 int i = 0; 70 71 for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) { 72 mask = pci_cfg_space_rw_bmp[off + i]; 73 old = cfg_base[off + i]; 74 new = src[i] & mask; 75 76 /** 77 * The PCI_STATUS high byte has RW1C bits, here 78 * emulates clear by writing 1 for these bits. 79 * Writing a 0b to RW1C bits has no effect. 80 */ 81 if (off + i == PCI_STATUS + 1) 82 new = (~new & old) & mask; 83 84 cfg_base[off + i] = (old & ~mask) | new; 85 } 86 87 /* For other configuration space directly copy as it is. */ 88 if (i < bytes) 89 memcpy(cfg_base + off + i, src + i, bytes - i); 90 } 91 92 /** 93 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read 94 * 95 * Returns: 96 * Zero on success, negative error code if failed. 97 */ 98 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 99 void *p_data, unsigned int bytes) 100 { 101 if (WARN_ON(bytes > 4)) 102 return -EINVAL; 103 104 if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) 105 return -EINVAL; 106 107 memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes); 108 return 0; 109 } 110 111 static int map_aperture(struct intel_vgpu *vgpu, bool map) 112 { 113 u64 first_gfn, first_mfn; 114 u64 val; 115 int ret; 116 117 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) 118 return 0; 119 120 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; 121 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) 122 val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); 123 else 124 val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); 125 126 first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT; 127 first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; 128 129 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, 130 first_mfn, 131 vgpu_aperture_sz(vgpu) >> 132 PAGE_SHIFT, map); 133 if (ret) 134 return ret; 135 136 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; 137 return 0; 138 } 139 140 static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap) 141 { 142 u64 start, end; 143 u64 val; 144 int ret; 145 146 if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked) 147 return 0; 148 149 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0]; 150 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) 151 start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); 152 else 153 start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); 154 155 start &= ~GENMASK(3, 0); 156 end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; 157 158 ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap); 159 if (ret) 160 return ret; 161 162 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap; 163 return 0; 164 } 165 166 static int emulate_pci_command_write(struct intel_vgpu *vgpu, 167 unsigned int offset, void *p_data, unsigned int bytes) 168 { 169 u8 old = vgpu_cfg_space(vgpu)[offset]; 170 u8 new = *(u8 *)p_data; 171 u8 changed = old ^ new; 172 int ret; 173 174 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); 175 if (!(changed & PCI_COMMAND_MEMORY)) 176 return 0; 177 178 if (old & PCI_COMMAND_MEMORY) { 179 ret = trap_gttmmio(vgpu, false); 180 if (ret) 181 return ret; 182 ret = map_aperture(vgpu, false); 183 if (ret) 184 return ret; 185 } else { 186 ret = trap_gttmmio(vgpu, true); 187 if (ret) 188 return ret; 189 ret = map_aperture(vgpu, true); 190 if (ret) 191 return ret; 192 } 193 194 return 0; 195 } 196 197 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, 198 void *p_data, unsigned int bytes) 199 { 200 unsigned int bar_index = 201 (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8; 202 u32 new = *(u32 *)(p_data); 203 bool lo = IS_ALIGNED(offset, 8); 204 u64 size; 205 int ret = 0; 206 bool mmio_enabled = 207 vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; 208 209 if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX)) 210 return -EINVAL; 211 212 if (new == 0xffffffff) { 213 /* 214 * Power-up software can determine how much address 215 * space the device requires by writing a value of 216 * all 1's to the register and then reading the value 217 * back. The device will return 0's in all don't-care 218 * address bits. 219 */ 220 size = vgpu->cfg_space.bar[bar_index].size; 221 if (lo) { 222 new = rounddown(new, size); 223 } else { 224 u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)]; 225 /* for 32bit mode bar it returns all-0 in upper 32 226 * bit, for 64bit mode bar it will calculate the 227 * size with lower 32bit and return the corresponding 228 * value 229 */ 230 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) 231 new &= (~(size-1)) >> 32; 232 else 233 new = 0; 234 } 235 /* 236 * Unmapp & untrap the BAR, since guest hasn't configured a 237 * valid GPA 238 */ 239 switch (bar_index) { 240 case INTEL_GVT_PCI_BAR_GTTMMIO: 241 ret = trap_gttmmio(vgpu, false); 242 break; 243 case INTEL_GVT_PCI_BAR_APERTURE: 244 ret = map_aperture(vgpu, false); 245 break; 246 } 247 intel_vgpu_write_pci_bar(vgpu, offset, new, lo); 248 } else { 249 /* 250 * Unmapp & untrap the old BAR first, since guest has 251 * re-configured the BAR 252 */ 253 switch (bar_index) { 254 case INTEL_GVT_PCI_BAR_GTTMMIO: 255 ret = trap_gttmmio(vgpu, false); 256 break; 257 case INTEL_GVT_PCI_BAR_APERTURE: 258 ret = map_aperture(vgpu, false); 259 break; 260 } 261 intel_vgpu_write_pci_bar(vgpu, offset, new, lo); 262 /* Track the new BAR */ 263 if (mmio_enabled) { 264 switch (bar_index) { 265 case INTEL_GVT_PCI_BAR_GTTMMIO: 266 ret = trap_gttmmio(vgpu, true); 267 break; 268 case INTEL_GVT_PCI_BAR_APERTURE: 269 ret = map_aperture(vgpu, true); 270 break; 271 } 272 } 273 } 274 return ret; 275 } 276 277 /** 278 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write 279 * 280 * Returns: 281 * Zero on success, negative error code if failed. 282 */ 283 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, 284 void *p_data, unsigned int bytes) 285 { 286 int ret; 287 288 if (vgpu->failsafe) 289 return 0; 290 291 if (WARN_ON(bytes > 4)) 292 return -EINVAL; 293 294 if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) 295 return -EINVAL; 296 297 /* First check if it's PCI_COMMAND */ 298 if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) { 299 if (WARN_ON(bytes > 2)) 300 return -EINVAL; 301 return emulate_pci_command_write(vgpu, offset, p_data, bytes); 302 } 303 304 switch (rounddown(offset, 4)) { 305 case PCI_BASE_ADDRESS_0: 306 case PCI_BASE_ADDRESS_1: 307 case PCI_BASE_ADDRESS_2: 308 case PCI_BASE_ADDRESS_3: 309 if (WARN_ON(!IS_ALIGNED(offset, 4))) 310 return -EINVAL; 311 return emulate_pci_bar_write(vgpu, offset, p_data, bytes); 312 313 case INTEL_GVT_PCI_SWSCI: 314 if (WARN_ON(!IS_ALIGNED(offset, 4))) 315 return -EINVAL; 316 ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data); 317 if (ret) 318 return ret; 319 break; 320 321 case INTEL_GVT_PCI_OPREGION: 322 if (WARN_ON(!IS_ALIGNED(offset, 4))) 323 return -EINVAL; 324 ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data); 325 if (ret) 326 return ret; 327 328 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); 329 break; 330 default: 331 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); 332 break; 333 } 334 return 0; 335 } 336 337 /** 338 * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU 339 * 340 * @vgpu: a vGPU 341 * @primary: is the vGPU presented as primary 342 * 343 */ 344 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, 345 bool primary) 346 { 347 struct intel_gvt *gvt = vgpu->gvt; 348 const struct intel_gvt_device_info *info = &gvt->device_info; 349 u16 *gmch_ctl; 350 int i; 351 352 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 353 info->cfg_space_size); 354 355 if (!primary) { 356 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 357 INTEL_GVT_PCI_CLASS_VGA_OTHER; 358 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 359 INTEL_GVT_PCI_CLASS_VGA_OTHER; 360 } 361 362 /* Show guest that there isn't any stolen memory.*/ 363 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 364 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 365 366 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 367 gvt_aperture_pa_base(gvt), true); 368 369 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 370 | PCI_COMMAND_MEMORY 371 | PCI_COMMAND_MASTER); 372 /* 373 * Clear the bar upper 32bit and let guest to assign the new value 374 */ 375 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 376 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 377 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); 378 379 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 380 vgpu->cfg_space.bar[i].size = pci_resource_len( 381 gvt->dev_priv->drm.pdev, i * 2); 382 vgpu->cfg_space.bar[i].tracked = false; 383 } 384 } 385 386 /** 387 * intel_vgpu_reset_cfg_space - reset vGPU configuration space 388 * 389 * @vgpu: a vGPU 390 * 391 */ 392 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) 393 { 394 u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; 395 bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != 396 INTEL_GVT_PCI_CLASS_VGA_OTHER; 397 398 if (cmd & PCI_COMMAND_MEMORY) { 399 trap_gttmmio(vgpu, false); 400 map_aperture(vgpu, false); 401 } 402 403 /** 404 * Currently we only do such reset when vGPU is not 405 * owned by any VM, so we simply restore entire cfg 406 * space to default value. 407 */ 408 intel_vgpu_init_cfg_space(vgpu, primary); 409 } 410