1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Eddie Dong <eddie.dong@intel.com> 25 * Jike Song <jike.song@intel.com> 26 * 27 * Contributors: 28 * Zhi Wang <zhi.a.wang@intel.com> 29 * Min He <min.he@intel.com> 30 * Bing Niu <bing.niu@intel.com> 31 * 32 */ 33 34 #include "i915_drv.h" 35 #include "gvt.h" 36 37 enum { 38 INTEL_GVT_PCI_BAR_GTTMMIO = 0, 39 INTEL_GVT_PCI_BAR_APERTURE, 40 INTEL_GVT_PCI_BAR_PIO, 41 INTEL_GVT_PCI_BAR_MAX, 42 }; 43 44 /* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one 45 * byte) byte by byte in standard pci configuration space. (not the full 46 * 256 bytes.) 47 */ 48 static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = { 49 [PCI_COMMAND] = 0xff, 0x07, 50 [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */ 51 [PCI_CACHE_LINE_SIZE] = 0xff, 52 [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff, 53 [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff, 54 [PCI_INTERRUPT_LINE] = 0xff, 55 }; 56 57 /** 58 * vgpu_pci_cfg_mem_write - write virtual cfg space memory 59 * 60 * Use this function to write virtual cfg space memory. 61 * For standard cfg space, only RW bits can be changed, 62 * and we emulates the RW1C behavior of PCI_STATUS register. 63 */ 64 static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, 65 u8 *src, unsigned int bytes) 66 { 67 u8 *cfg_base = vgpu_cfg_space(vgpu); 68 u8 mask, new, old; 69 int i = 0; 70 71 for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) { 72 mask = pci_cfg_space_rw_bmp[off + i]; 73 old = cfg_base[off + i]; 74 new = src[i] & mask; 75 76 /** 77 * The PCI_STATUS high byte has RW1C bits, here 78 * emulates clear by writing 1 for these bits. 79 * Writing a 0b to RW1C bits has no effect. 80 */ 81 if (off + i == PCI_STATUS + 1) 82 new = (~new & old) & mask; 83 84 cfg_base[off + i] = (old & ~mask) | new; 85 } 86 87 /* For other configuration space directly copy as it is. */ 88 if (i < bytes) 89 memcpy(cfg_base + off + i, src + i, bytes - i); 90 } 91 92 /** 93 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read 94 * 95 * Returns: 96 * Zero on success, negative error code if failed. 97 */ 98 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 99 void *p_data, unsigned int bytes) 100 { 101 if (WARN_ON(bytes > 4)) 102 return -EINVAL; 103 104 if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) 105 return -EINVAL; 106 107 memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes); 108 return 0; 109 } 110 111 static int map_aperture(struct intel_vgpu *vgpu, bool map) 112 { 113 u64 first_gfn, first_mfn; 114 u64 val; 115 int ret; 116 117 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) 118 return 0; 119 120 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; 121 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) 122 val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); 123 else 124 val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); 125 126 first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT; 127 first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; 128 129 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, 130 first_mfn, 131 vgpu_aperture_sz(vgpu) >> 132 PAGE_SHIFT, map); 133 if (ret) 134 return ret; 135 136 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; 137 return 0; 138 } 139 140 static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap) 141 { 142 u64 start, end; 143 u64 val; 144 int ret; 145 146 if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked) 147 return 0; 148 149 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0]; 150 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) 151 start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); 152 else 153 start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); 154 155 start &= ~GENMASK(3, 0); 156 end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; 157 158 ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap); 159 if (ret) 160 return ret; 161 162 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap; 163 return 0; 164 } 165 166 static int emulate_pci_command_write(struct intel_vgpu *vgpu, 167 unsigned int offset, void *p_data, unsigned int bytes) 168 { 169 u8 old = vgpu_cfg_space(vgpu)[offset]; 170 u8 new = *(u8 *)p_data; 171 u8 changed = old ^ new; 172 int ret; 173 174 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); 175 if (!(changed & PCI_COMMAND_MEMORY)) 176 return 0; 177 178 if (old & PCI_COMMAND_MEMORY) { 179 ret = trap_gttmmio(vgpu, false); 180 if (ret) 181 return ret; 182 ret = map_aperture(vgpu, false); 183 if (ret) 184 return ret; 185 } else { 186 ret = trap_gttmmio(vgpu, true); 187 if (ret) 188 return ret; 189 ret = map_aperture(vgpu, true); 190 if (ret) 191 return ret; 192 } 193 194 return 0; 195 } 196 197 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, 198 void *p_data, unsigned int bytes) 199 { 200 u32 new = *(u32 *)(p_data); 201 bool lo = IS_ALIGNED(offset, 8); 202 u64 size; 203 int ret = 0; 204 bool mmio_enabled = 205 vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; 206 struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar; 207 208 /* 209 * Power-up software can determine how much address 210 * space the device requires by writing a value of 211 * all 1's to the register and then reading the value 212 * back. The device will return 0's in all don't-care 213 * address bits. 214 */ 215 if (new == 0xffffffff) { 216 switch (offset) { 217 case PCI_BASE_ADDRESS_0: 218 case PCI_BASE_ADDRESS_1: 219 size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1); 220 intel_vgpu_write_pci_bar(vgpu, offset, 221 size >> (lo ? 0 : 32), lo); 222 /* 223 * Untrap the BAR, since guest hasn't configured a 224 * valid GPA 225 */ 226 ret = trap_gttmmio(vgpu, false); 227 break; 228 case PCI_BASE_ADDRESS_2: 229 case PCI_BASE_ADDRESS_3: 230 size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1); 231 intel_vgpu_write_pci_bar(vgpu, offset, 232 size >> (lo ? 0 : 32), lo); 233 ret = map_aperture(vgpu, false); 234 break; 235 default: 236 /* Unimplemented BARs */ 237 intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false); 238 } 239 } else { 240 switch (offset) { 241 case PCI_BASE_ADDRESS_0: 242 case PCI_BASE_ADDRESS_1: 243 /* 244 * Untrap the old BAR first, since guest has 245 * re-configured the BAR 246 */ 247 trap_gttmmio(vgpu, false); 248 intel_vgpu_write_pci_bar(vgpu, offset, new, lo); 249 ret = trap_gttmmio(vgpu, mmio_enabled); 250 break; 251 case PCI_BASE_ADDRESS_2: 252 case PCI_BASE_ADDRESS_3: 253 map_aperture(vgpu, false); 254 intel_vgpu_write_pci_bar(vgpu, offset, new, lo); 255 ret = map_aperture(vgpu, mmio_enabled); 256 break; 257 default: 258 intel_vgpu_write_pci_bar(vgpu, offset, new, lo); 259 } 260 } 261 return ret; 262 } 263 264 /** 265 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write 266 * 267 * Returns: 268 * Zero on success, negative error code if failed. 269 */ 270 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, 271 void *p_data, unsigned int bytes) 272 { 273 int ret; 274 275 if (WARN_ON(bytes > 4)) 276 return -EINVAL; 277 278 if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) 279 return -EINVAL; 280 281 /* First check if it's PCI_COMMAND */ 282 if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) { 283 if (WARN_ON(bytes > 2)) 284 return -EINVAL; 285 return emulate_pci_command_write(vgpu, offset, p_data, bytes); 286 } 287 288 switch (rounddown(offset, 4)) { 289 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5: 290 if (WARN_ON(!IS_ALIGNED(offset, 4))) 291 return -EINVAL; 292 return emulate_pci_bar_write(vgpu, offset, p_data, bytes); 293 294 case INTEL_GVT_PCI_SWSCI: 295 if (WARN_ON(!IS_ALIGNED(offset, 4))) 296 return -EINVAL; 297 ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data); 298 if (ret) 299 return ret; 300 break; 301 302 case INTEL_GVT_PCI_OPREGION: 303 if (WARN_ON(!IS_ALIGNED(offset, 4))) 304 return -EINVAL; 305 ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data); 306 if (ret) 307 return ret; 308 309 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); 310 break; 311 default: 312 vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); 313 break; 314 } 315 return 0; 316 } 317 318 /** 319 * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU 320 * 321 * @vgpu: a vGPU 322 * @primary: is the vGPU presented as primary 323 * 324 */ 325 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, 326 bool primary) 327 { 328 struct intel_gvt *gvt = vgpu->gvt; 329 const struct intel_gvt_device_info *info = &gvt->device_info; 330 u16 *gmch_ctl; 331 332 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 333 info->cfg_space_size); 334 335 if (!primary) { 336 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 337 INTEL_GVT_PCI_CLASS_VGA_OTHER; 338 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 339 INTEL_GVT_PCI_CLASS_VGA_OTHER; 340 } 341 342 /* Show guest that there isn't any stolen memory.*/ 343 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 344 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 345 346 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 347 gvt_aperture_pa_base(gvt), true); 348 349 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 350 | PCI_COMMAND_MEMORY 351 | PCI_COMMAND_MASTER); 352 /* 353 * Clear the bar upper 32bit and let guest to assign the new value 354 */ 355 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 356 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 357 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8); 358 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); 359 360 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size = 361 pci_resource_len(gvt->dev_priv->drm.pdev, 0); 362 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size = 363 pci_resource_len(gvt->dev_priv->drm.pdev, 2); 364 } 365 366 /** 367 * intel_vgpu_reset_cfg_space - reset vGPU configuration space 368 * 369 * @vgpu: a vGPU 370 * 371 */ 372 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) 373 { 374 u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; 375 bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != 376 INTEL_GVT_PCI_CLASS_VGA_OTHER; 377 378 if (cmd & PCI_COMMAND_MEMORY) { 379 trap_gttmmio(vgpu, false); 380 map_aperture(vgpu, false); 381 } 382 383 /** 384 * Currently we only do such reset when vGPU is not 385 * owned by any VM, so we simply restore entire cfg 386 * space to default value. 387 */ 388 intel_vgpu_init_cfg_space(vgpu, primary); 389 } 390