1 /* 2 * Copyright(c) 2011-2015 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "i915_pvinfo.h" 26 #include "i915_vgpu.h" 27 28 /** 29 * DOC: Intel GVT-g guest support 30 * 31 * Intel GVT-g is a graphics virtualization technology which shares the 32 * GPU among multiple virtual machines on a time-sharing basis. Each 33 * virtual machine is presented a virtual GPU (vGPU), which has equivalent 34 * features as the underlying physical GPU (pGPU), so i915 driver can run 35 * seamlessly in a virtual machine. This file provides vGPU specific 36 * optimizations when running in a virtual machine, to reduce the complexity 37 * of vGPU emulation and to improve the overall performance. 38 * 39 * A primary function introduced here is so-called "address space ballooning" 40 * technique. Intel GVT-g partitions global graphics memory among multiple VMs, 41 * so each VM can directly access a portion of the memory without hypervisor's 42 * intervention, e.g. filling textures or queuing commands. However with the 43 * partitioning an unmodified i915 driver would assume a smaller graphics 44 * memory starting from address ZERO, then requires vGPU emulation module to 45 * translate the graphics address between 'guest view' and 'host view', for 46 * all registers and command opcodes which contain a graphics memory address. 47 * To reduce the complexity, Intel GVT-g introduces "address space ballooning", 48 * by telling the exact partitioning knowledge to each guest i915 driver, which 49 * then reserves and prevents non-allocated portions from allocation. Thus vGPU 50 * emulation module only needs to scan and validate graphics addresses without 51 * complexity of address translation. 52 * 53 */ 54 55 /** 56 * intel_vgpu_detect - detect virtual GPU 57 * @dev_priv: i915 device private 58 * 59 * This function is called at the initialization stage, to detect whether 60 * running on a vGPU. 61 */ 62 void intel_vgpu_detect(struct drm_i915_private *dev_priv) 63 { 64 struct pci_dev *pdev = dev_priv->drm.pdev; 65 u64 magic; 66 u16 version_major; 67 void __iomem *shared_area; 68 69 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 70 71 /* 72 * This is called before we setup the main MMIO BAR mappings used via 73 * the uncore structure, so we need to access the BAR directly. Since 74 * we do not support VGT on older gens, return early so we don't have 75 * to consider differently numbered or sized MMIO bars 76 */ 77 if (INTEL_GEN(dev_priv) < 6) 78 return; 79 80 shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE); 81 if (!shared_area) { 82 drm_err(&dev_priv->drm, 83 "failed to map MMIO bar to check for VGT\n"); 84 return; 85 } 86 87 magic = readq(shared_area + vgtif_offset(magic)); 88 if (magic != VGT_MAGIC) 89 goto out; 90 91 version_major = readw(shared_area + vgtif_offset(version_major)); 92 if (version_major < VGT_VERSION_MAJOR) { 93 drm_info(&dev_priv->drm, "VGT interface version mismatch!\n"); 94 goto out; 95 } 96 97 dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps)); 98 99 dev_priv->vgpu.active = true; 100 mutex_init(&dev_priv->vgpu.lock); 101 drm_info(&dev_priv->drm, "Virtual GPU for Intel GVT-g detected.\n"); 102 103 out: 104 pci_iounmap(pdev, shared_area); 105 } 106 107 void intel_vgpu_register(struct drm_i915_private *i915) 108 { 109 /* 110 * Notify a valid surface after modesetting, when running inside a VM. 111 */ 112 if (intel_vgpu_active(i915)) 113 intel_uncore_write(&i915->uncore, vgtif_reg(display_ready), 114 VGT_DRV_DISPLAY_READY); 115 } 116 117 bool intel_vgpu_active(struct drm_i915_private *dev_priv) 118 { 119 return dev_priv->vgpu.active; 120 } 121 122 bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv) 123 { 124 return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT; 125 } 126 127 bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv) 128 { 129 return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION; 130 } 131 132 bool intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv) 133 { 134 return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT; 135 } 136 137 struct _balloon_info_ { 138 /* 139 * There are up to 2 regions per mappable/unmappable graphic 140 * memory that might be ballooned. Here, index 0/1 is for mappable 141 * graphic memory, 2/3 for unmappable graphic memory. 142 */ 143 struct drm_mm_node space[4]; 144 }; 145 146 static struct _balloon_info_ bl_info; 147 148 static void vgt_deballoon_space(struct i915_ggtt *ggtt, 149 struct drm_mm_node *node) 150 { 151 struct drm_i915_private *dev_priv = ggtt->vm.i915; 152 if (!drm_mm_node_allocated(node)) 153 return; 154 155 drm_dbg(&dev_priv->drm, 156 "deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", 157 node->start, 158 node->start + node->size, 159 node->size / 1024); 160 161 ggtt->vm.reserved -= node->size; 162 drm_mm_remove_node(node); 163 } 164 165 /** 166 * intel_vgt_deballoon - deballoon reserved graphics address trunks 167 * @ggtt: the global GGTT from which we reserved earlier 168 * 169 * This function is called to deallocate the ballooned-out graphic memory, when 170 * driver is unloaded or when ballooning fails. 171 */ 172 void intel_vgt_deballoon(struct i915_ggtt *ggtt) 173 { 174 struct drm_i915_private *dev_priv = ggtt->vm.i915; 175 int i; 176 177 if (!intel_vgpu_active(ggtt->vm.i915)) 178 return; 179 180 drm_dbg(&dev_priv->drm, "VGT deballoon.\n"); 181 182 for (i = 0; i < 4; i++) 183 vgt_deballoon_space(ggtt, &bl_info.space[i]); 184 } 185 186 static int vgt_balloon_space(struct i915_ggtt *ggtt, 187 struct drm_mm_node *node, 188 unsigned long start, unsigned long end) 189 { 190 struct drm_i915_private *dev_priv = ggtt->vm.i915; 191 unsigned long size = end - start; 192 int ret; 193 194 if (start >= end) 195 return -EINVAL; 196 197 drm_info(&dev_priv->drm, 198 "balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", 199 start, end, size / 1024); 200 ret = i915_gem_gtt_reserve(&ggtt->vm, node, 201 size, start, I915_COLOR_UNEVICTABLE, 202 0); 203 if (!ret) 204 ggtt->vm.reserved += size; 205 206 return ret; 207 } 208 209 /** 210 * intel_vgt_balloon - balloon out reserved graphics address trunks 211 * @ggtt: the global GGTT from which to reserve 212 * 213 * This function is called at the initialization stage, to balloon out the 214 * graphic address space allocated to other vGPUs, by marking these spaces as 215 * reserved. The ballooning related knowledge(starting address and size of 216 * the mappable/unmappable graphic memory) is described in the vgt_if structure 217 * in a reserved mmio range. 218 * 219 * To give an example, the drawing below depicts one typical scenario after 220 * ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned 221 * out each for the mappable and the non-mappable part. From the vGPU1 point of 222 * view, the total size is the same as the physical one, with the start address 223 * of its graphic space being zero. Yet there are some portions ballooned out( 224 * the shadow part, which are marked as reserved by drm allocator). From the 225 * host point of view, the graphic address space is partitioned by multiple 226 * vGPUs in different VMs. :: 227 * 228 * vGPU1 view Host view 229 * 0 ------> +-----------+ +-----------+ 230 * ^ |###########| | vGPU3 | 231 * | |###########| +-----------+ 232 * | |###########| | vGPU2 | 233 * | +-----------+ +-----------+ 234 * mappable GM | available | ==> | vGPU1 | 235 * | +-----------+ +-----------+ 236 * | |###########| | | 237 * v |###########| | Host | 238 * +=======+===========+ +===========+ 239 * ^ |###########| | vGPU3 | 240 * | |###########| +-----------+ 241 * | |###########| | vGPU2 | 242 * | +-----------+ +-----------+ 243 * unmappable GM | available | ==> | vGPU1 | 244 * | +-----------+ +-----------+ 245 * | |###########| | | 246 * | |###########| | Host | 247 * v |###########| | | 248 * total GM size ------> +-----------+ +-----------+ 249 * 250 * Returns: 251 * zero on success, non-zero if configuration invalid or ballooning failed 252 */ 253 int intel_vgt_balloon(struct i915_ggtt *ggtt) 254 { 255 struct drm_i915_private *dev_priv = ggtt->vm.i915; 256 struct intel_uncore *uncore = &dev_priv->uncore; 257 unsigned long ggtt_end = ggtt->vm.total; 258 259 unsigned long mappable_base, mappable_size, mappable_end; 260 unsigned long unmappable_base, unmappable_size, unmappable_end; 261 int ret; 262 263 if (!intel_vgpu_active(ggtt->vm.i915)) 264 return 0; 265 266 mappable_base = 267 intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.base)); 268 mappable_size = 269 intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.size)); 270 unmappable_base = 271 intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.base)); 272 unmappable_size = 273 intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.size)); 274 275 mappable_end = mappable_base + mappable_size; 276 unmappable_end = unmappable_base + unmappable_size; 277 278 drm_info(&dev_priv->drm, "VGT ballooning configuration:\n"); 279 drm_info(&dev_priv->drm, 280 "Mappable graphic memory: base 0x%lx size %ldKiB\n", 281 mappable_base, mappable_size / 1024); 282 drm_info(&dev_priv->drm, 283 "Unmappable graphic memory: base 0x%lx size %ldKiB\n", 284 unmappable_base, unmappable_size / 1024); 285 286 if (mappable_end > ggtt->mappable_end || 287 unmappable_base < ggtt->mappable_end || 288 unmappable_end > ggtt_end) { 289 drm_err(&dev_priv->drm, "Invalid ballooning configuration!\n"); 290 return -EINVAL; 291 } 292 293 /* Unmappable graphic memory ballooning */ 294 if (unmappable_base > ggtt->mappable_end) { 295 ret = vgt_balloon_space(ggtt, &bl_info.space[2], 296 ggtt->mappable_end, unmappable_base); 297 298 if (ret) 299 goto err; 300 } 301 302 if (unmappable_end < ggtt_end) { 303 ret = vgt_balloon_space(ggtt, &bl_info.space[3], 304 unmappable_end, ggtt_end); 305 if (ret) 306 goto err_upon_mappable; 307 } 308 309 /* Mappable graphic memory ballooning */ 310 if (mappable_base) { 311 ret = vgt_balloon_space(ggtt, &bl_info.space[0], 312 0, mappable_base); 313 314 if (ret) 315 goto err_upon_unmappable; 316 } 317 318 if (mappable_end < ggtt->mappable_end) { 319 ret = vgt_balloon_space(ggtt, &bl_info.space[1], 320 mappable_end, ggtt->mappable_end); 321 322 if (ret) 323 goto err_below_mappable; 324 } 325 326 drm_info(&dev_priv->drm, "VGT balloon successfully\n"); 327 return 0; 328 329 err_below_mappable: 330 vgt_deballoon_space(ggtt, &bl_info.space[0]); 331 err_upon_unmappable: 332 vgt_deballoon_space(ggtt, &bl_info.space[3]); 333 err_upon_mappable: 334 vgt_deballoon_space(ggtt, &bl_info.space[2]); 335 err: 336 drm_err(&dev_priv->drm, "VGT balloon fail\n"); 337 return ret; 338 } 339