1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Eddie Dong <eddie.dong@intel.com> 25 * Kevin Tian <kevin.tian@intel.com> 26 * 27 * Contributors: 28 * Ping Gao <ping.a.gao@intel.com> 29 * Zhi Wang <zhi.a.wang@intel.com> 30 * Bing Niu <bing.niu@intel.com> 31 * 32 */ 33 34 #include "i915_drv.h" 35 36 static void clean_vgpu_mmio(struct intel_vgpu *vgpu) 37 { 38 vfree(vgpu->mmio.vreg); 39 vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; 40 } 41 42 static int setup_vgpu_mmio(struct intel_vgpu *vgpu) 43 { 44 struct intel_gvt *gvt = vgpu->gvt; 45 const struct intel_gvt_device_info *info = &gvt->device_info; 46 47 vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); 48 if (!vgpu->mmio.vreg) 49 return -ENOMEM; 50 51 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; 52 53 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); 54 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); 55 56 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; 57 58 /* set the bit 0:2(Core C-State ) to C0 */ 59 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 60 return 0; 61 } 62 63 static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, 64 struct intel_vgpu_creation_params *param) 65 { 66 struct intel_gvt *gvt = vgpu->gvt; 67 const struct intel_gvt_device_info *info = &gvt->device_info; 68 u16 *gmch_ctl; 69 int i; 70 71 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 72 info->cfg_space_size); 73 74 if (!param->primary) { 75 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 76 INTEL_GVT_PCI_CLASS_VGA_OTHER; 77 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 78 INTEL_GVT_PCI_CLASS_VGA_OTHER; 79 } 80 81 /* Show guest that there isn't any stolen memory.*/ 82 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 83 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 84 85 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 86 gvt_aperture_pa_base(gvt), true); 87 88 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 89 | PCI_COMMAND_MEMORY 90 | PCI_COMMAND_MASTER); 91 /* 92 * Clear the bar upper 32bit and let guest to assign the new value 93 */ 94 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 95 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 96 97 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 98 vgpu->cfg_space.bar[i].size = pci_resource_len( 99 gvt->dev_priv->drm.pdev, i * 2); 100 vgpu->cfg_space.bar[i].tracked = false; 101 } 102 } 103 104 static void populate_pvinfo_page(struct intel_vgpu *vgpu) 105 { 106 /* setup the ballooning information */ 107 vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC; 108 vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1; 109 vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0; 110 vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0; 111 vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id; 112 vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = 113 vgpu_aperture_gmadr_base(vgpu); 114 vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = 115 vgpu_aperture_sz(vgpu); 116 vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) = 117 vgpu_hidden_gmadr_base(vgpu); 118 vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) = 119 vgpu_hidden_sz(vgpu); 120 121 vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); 122 123 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); 124 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", 125 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); 126 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n", 127 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu)); 128 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu)); 129 130 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 131 } 132 133 /** 134 * intel_gvt_destroy_vgpu - destroy a virtual GPU 135 * @vgpu: virtual GPU 136 * 137 * This function is called when user wants to destroy a virtual GPU. 138 * 139 */ 140 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) 141 { 142 struct intel_gvt *gvt = vgpu->gvt; 143 144 mutex_lock(&gvt->lock); 145 146 vgpu->active = false; 147 idr_remove(&gvt->vgpu_idr, vgpu->id); 148 149 intel_vgpu_clean_display(vgpu); 150 intel_vgpu_clean_opregion(vgpu); 151 intel_vgpu_clean_gtt(vgpu); 152 intel_gvt_hypervisor_detach_vgpu(vgpu); 153 intel_vgpu_free_resource(vgpu); 154 clean_vgpu_mmio(vgpu); 155 vfree(vgpu); 156 157 mutex_unlock(&gvt->lock); 158 } 159 160 /** 161 * intel_gvt_create_vgpu - create a virtual GPU 162 * @gvt: GVT device 163 * @param: vGPU creation parameters 164 * 165 * This function is called when user wants to create a virtual GPU. 166 * 167 * Returns: 168 * pointer to intel_vgpu, error pointer if failed. 169 */ 170 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 171 struct intel_vgpu_creation_params *param) 172 { 173 struct intel_vgpu *vgpu; 174 int ret; 175 176 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n", 177 param->handle, param->low_gm_sz, param->high_gm_sz, 178 param->fence_sz); 179 180 vgpu = vzalloc(sizeof(*vgpu)); 181 if (!vgpu) 182 return ERR_PTR(-ENOMEM); 183 184 mutex_lock(&gvt->lock); 185 186 ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL); 187 if (ret < 0) 188 goto out_free_vgpu; 189 190 vgpu->id = ret; 191 vgpu->handle = param->handle; 192 vgpu->gvt = gvt; 193 194 setup_vgpu_cfg_space(vgpu, param); 195 196 ret = setup_vgpu_mmio(vgpu); 197 if (ret) 198 goto out_free_vgpu; 199 200 ret = intel_vgpu_alloc_resource(vgpu, param); 201 if (ret) 202 goto out_clean_vgpu_mmio; 203 204 populate_pvinfo_page(vgpu); 205 206 ret = intel_gvt_hypervisor_attach_vgpu(vgpu); 207 if (ret) 208 goto out_clean_vgpu_resource; 209 210 ret = intel_vgpu_init_gtt(vgpu); 211 if (ret) 212 goto out_detach_hypervisor_vgpu; 213 214 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { 215 ret = intel_vgpu_init_opregion(vgpu, 0); 216 if (ret) 217 goto out_clean_gtt; 218 } 219 220 ret = intel_vgpu_init_display(vgpu); 221 if (ret) 222 goto out_clean_opregion; 223 224 vgpu->active = true; 225 mutex_unlock(&gvt->lock); 226 227 return vgpu; 228 229 out_clean_opregion: 230 intel_vgpu_clean_opregion(vgpu); 231 out_clean_gtt: 232 intel_vgpu_clean_gtt(vgpu); 233 out_detach_hypervisor_vgpu: 234 intel_gvt_hypervisor_detach_vgpu(vgpu); 235 out_clean_vgpu_resource: 236 intel_vgpu_free_resource(vgpu); 237 out_clean_vgpu_mmio: 238 clean_vgpu_mmio(vgpu); 239 out_free_vgpu: 240 vfree(vgpu); 241 mutex_unlock(&gvt->lock); 242 return ERR_PTR(ret); 243 } 244