1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Eddie Dong <eddie.dong@intel.com> 25 * Jike Song <jike.song@intel.com> 26 * 27 * Contributors: 28 * Zhi Wang <zhi.a.wang@intel.com> 29 * Min He <min.he@intel.com> 30 * Bing Niu <bing.niu@intel.com> 31 * 32 */ 33 34 #include "i915_drv.h" 35 #include "gvt.h" 36 37 enum { 38 INTEL_GVT_PCI_BAR_GTTMMIO = 0, 39 INTEL_GVT_PCI_BAR_APERTURE, 40 INTEL_GVT_PCI_BAR_PIO, 41 INTEL_GVT_PCI_BAR_MAX, 42 }; 43 44 /** 45 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read 46 * 47 * Returns: 48 * Zero on success, negative error code if failed. 49 */ 50 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 51 void *p_data, unsigned int bytes) 52 { 53 if (WARN_ON(bytes > 4)) 54 return -EINVAL; 55 56 if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) 57 return -EINVAL; 58 59 memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes); 60 return 0; 61 } 62 63 static int map_aperture(struct intel_vgpu *vgpu, bool map) 64 { 65 u64 first_gfn, first_mfn; 66 u64 val; 67 int ret; 68 69 if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) 70 return 0; 71 72 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; 73 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) 74 val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); 75 else 76 val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); 77 78 first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT; 79 first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; 80 81 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, 82 first_mfn, 83 vgpu_aperture_sz(vgpu) >> 84 PAGE_SHIFT, map); 85 if (ret) 86 return ret; 87 88 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; 89 return 0; 90 } 91 92 static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap) 93 { 94 u64 start, end; 95 u64 val; 96 int ret; 97 98 if (trap == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked) 99 return 0; 100 101 val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0]; 102 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) 103 start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); 104 else 105 start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); 106 107 start &= ~GENMASK(3, 0); 108 end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; 109 110 ret = intel_gvt_hypervisor_set_trap_area(vgpu, start, end, trap); 111 if (ret) 112 return ret; 113 114 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].tracked = trap; 115 return 0; 116 } 117 118 static int emulate_pci_command_write(struct intel_vgpu *vgpu, 119 unsigned int offset, void *p_data, unsigned int bytes) 120 { 121 u8 old = vgpu_cfg_space(vgpu)[offset]; 122 u8 new = *(u8 *)p_data; 123 u8 changed = old ^ new; 124 int ret; 125 126 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 127 if (!(changed & PCI_COMMAND_MEMORY)) 128 return 0; 129 130 if (old & PCI_COMMAND_MEMORY) { 131 ret = trap_gttmmio(vgpu, false); 132 if (ret) 133 return ret; 134 ret = map_aperture(vgpu, false); 135 if (ret) 136 return ret; 137 } else { 138 ret = trap_gttmmio(vgpu, true); 139 if (ret) 140 return ret; 141 ret = map_aperture(vgpu, true); 142 if (ret) 143 return ret; 144 } 145 146 return 0; 147 } 148 149 static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, 150 void *p_data, unsigned int bytes) 151 { 152 unsigned int bar_index = 153 (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8; 154 u32 new = *(u32 *)(p_data); 155 bool lo = IS_ALIGNED(offset, 8); 156 u64 size; 157 int ret = 0; 158 bool mmio_enabled = 159 vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; 160 161 if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX)) 162 return -EINVAL; 163 164 if (new == 0xffffffff) { 165 /* 166 * Power-up software can determine how much address 167 * space the device requires by writing a value of 168 * all 1's to the register and then reading the value 169 * back. The device will return 0's in all don't-care 170 * address bits. 171 */ 172 size = vgpu->cfg_space.bar[bar_index].size; 173 if (lo) { 174 new = rounddown(new, size); 175 } else { 176 u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)]; 177 /* for 32bit mode bar it returns all-0 in upper 32 178 * bit, for 64bit mode bar it will calculate the 179 * size with lower 32bit and return the corresponding 180 * value 181 */ 182 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) 183 new &= (~(size-1)) >> 32; 184 else 185 new = 0; 186 } 187 /* 188 * Unmapp & untrap the BAR, since guest hasn't configured a 189 * valid GPA 190 */ 191 switch (bar_index) { 192 case INTEL_GVT_PCI_BAR_GTTMMIO: 193 ret = trap_gttmmio(vgpu, false); 194 break; 195 case INTEL_GVT_PCI_BAR_APERTURE: 196 ret = map_aperture(vgpu, false); 197 break; 198 } 199 intel_vgpu_write_pci_bar(vgpu, offset, new, lo); 200 } else { 201 /* 202 * Unmapp & untrap the old BAR first, since guest has 203 * re-configured the BAR 204 */ 205 switch (bar_index) { 206 case INTEL_GVT_PCI_BAR_GTTMMIO: 207 ret = trap_gttmmio(vgpu, false); 208 break; 209 case INTEL_GVT_PCI_BAR_APERTURE: 210 ret = map_aperture(vgpu, false); 211 break; 212 } 213 intel_vgpu_write_pci_bar(vgpu, offset, new, lo); 214 /* Track the new BAR */ 215 if (mmio_enabled) { 216 switch (bar_index) { 217 case INTEL_GVT_PCI_BAR_GTTMMIO: 218 ret = trap_gttmmio(vgpu, true); 219 break; 220 case INTEL_GVT_PCI_BAR_APERTURE: 221 ret = map_aperture(vgpu, true); 222 break; 223 } 224 } 225 } 226 return ret; 227 } 228 229 /** 230 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space write 231 * 232 * Returns: 233 * Zero on success, negative error code if failed. 234 */ 235 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, 236 void *p_data, unsigned int bytes) 237 { 238 int ret; 239 240 if (WARN_ON(bytes > 4)) 241 return -EINVAL; 242 243 if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) 244 return -EINVAL; 245 246 /* First check if it's PCI_COMMAND */ 247 if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) { 248 if (WARN_ON(bytes > 2)) 249 return -EINVAL; 250 return emulate_pci_command_write(vgpu, offset, p_data, bytes); 251 } 252 253 switch (rounddown(offset, 4)) { 254 case PCI_BASE_ADDRESS_0: 255 case PCI_BASE_ADDRESS_1: 256 case PCI_BASE_ADDRESS_2: 257 case PCI_BASE_ADDRESS_3: 258 if (WARN_ON(!IS_ALIGNED(offset, 4))) 259 return -EINVAL; 260 return emulate_pci_bar_write(vgpu, offset, p_data, bytes); 261 262 case INTEL_GVT_PCI_SWSCI: 263 if (WARN_ON(!IS_ALIGNED(offset, 4))) 264 return -EINVAL; 265 ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data); 266 if (ret) 267 return ret; 268 break; 269 270 case INTEL_GVT_PCI_OPREGION: 271 if (WARN_ON(!IS_ALIGNED(offset, 4))) 272 return -EINVAL; 273 ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data); 274 if (ret) 275 return ret; 276 277 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 278 break; 279 default: 280 memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 281 break; 282 } 283 return 0; 284 } 285 286 /** 287 * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU 288 * 289 * @vgpu: a vGPU 290 * @primary: is the vGPU presented as primary 291 * 292 */ 293 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, 294 bool primary) 295 { 296 struct intel_gvt *gvt = vgpu->gvt; 297 const struct intel_gvt_device_info *info = &gvt->device_info; 298 u16 *gmch_ctl; 299 int i; 300 301 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 302 info->cfg_space_size); 303 304 if (!primary) { 305 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 306 INTEL_GVT_PCI_CLASS_VGA_OTHER; 307 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 308 INTEL_GVT_PCI_CLASS_VGA_OTHER; 309 } 310 311 /* Show guest that there isn't any stolen memory.*/ 312 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 313 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 314 315 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 316 gvt_aperture_pa_base(gvt), true); 317 318 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 319 | PCI_COMMAND_MEMORY 320 | PCI_COMMAND_MASTER); 321 /* 322 * Clear the bar upper 32bit and let guest to assign the new value 323 */ 324 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 325 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 326 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); 327 328 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 329 vgpu->cfg_space.bar[i].size = pci_resource_len( 330 gvt->dev_priv->drm.pdev, i * 2); 331 vgpu->cfg_space.bar[i].tracked = false; 332 } 333 } 334 335 /** 336 * intel_vgpu_reset_cfg_space - reset vGPU configuration space 337 * 338 * @vgpu: a vGPU 339 * 340 */ 341 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) 342 { 343 u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; 344 bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != 345 INTEL_GVT_PCI_CLASS_VGA_OTHER; 346 347 if (cmd & PCI_COMMAND_MEMORY) { 348 trap_gttmmio(vgpu, false); 349 map_aperture(vgpu, false); 350 } 351 352 /** 353 * Currently we only do such reset when vGPU is not 354 * owned by any VM, so we simply restore entire cfg 355 * space to default value. 356 */ 357 intel_vgpu_init_cfg_space(vgpu, primary); 358 } 359