1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Eddie Dong <eddie.dong@intel.com> 25 * Kevin Tian <kevin.tian@intel.com> 26 * 27 * Contributors: 28 * Ping Gao <ping.a.gao@intel.com> 29 * Zhi Wang <zhi.a.wang@intel.com> 30 * Bing Niu <bing.niu@intel.com> 31 * 32 */ 33 34 #include "i915_drv.h" 35 #include "gvt.h" 36 #include "i915_pvinfo.h" 37 38 static void clean_vgpu_mmio(struct intel_vgpu *vgpu) 39 { 40 vfree(vgpu->mmio.vreg); 41 vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; 42 } 43 44 int setup_vgpu_mmio(struct intel_vgpu *vgpu) 45 { 46 struct intel_gvt *gvt = vgpu->gvt; 47 const struct intel_gvt_device_info *info = &gvt->device_info; 48 49 vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); 50 if (!vgpu->mmio.vreg) 51 return -ENOMEM; 52 53 vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; 54 55 memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); 56 memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); 57 58 vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; 59 60 /* set the bit 0:2(Core C-State ) to C0 */ 61 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 62 return 0; 63 } 64 65 static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, 66 struct intel_vgpu_creation_params *param) 67 { 68 struct intel_gvt *gvt = vgpu->gvt; 69 const struct intel_gvt_device_info *info = &gvt->device_info; 70 u16 *gmch_ctl; 71 int i; 72 73 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 74 info->cfg_space_size); 75 76 if (!param->primary) { 77 vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 78 INTEL_GVT_PCI_CLASS_VGA_OTHER; 79 vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 80 INTEL_GVT_PCI_CLASS_VGA_OTHER; 81 } 82 83 /* Show guest that there isn't any stolen memory.*/ 84 gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 85 *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 86 87 intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 88 gvt_aperture_pa_base(gvt), true); 89 90 vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 91 | PCI_COMMAND_MEMORY 92 | PCI_COMMAND_MASTER); 93 /* 94 * Clear the bar upper 32bit and let guest to assign the new value 95 */ 96 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 97 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 98 99 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 100 vgpu->cfg_space.bar[i].size = pci_resource_len( 101 gvt->dev_priv->drm.pdev, i * 2); 102 vgpu->cfg_space.bar[i].tracked = false; 103 } 104 } 105 106 void populate_pvinfo_page(struct intel_vgpu *vgpu) 107 { 108 /* setup the ballooning information */ 109 vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC; 110 vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1; 111 vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0; 112 vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0; 113 vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id; 114 vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = 115 vgpu_aperture_gmadr_base(vgpu); 116 vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = 117 vgpu_aperture_sz(vgpu); 118 vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) = 119 vgpu_hidden_gmadr_base(vgpu); 120 vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) = 121 vgpu_hidden_sz(vgpu); 122 123 vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); 124 125 gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); 126 gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", 127 vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); 128 gvt_dbg_core("hidden base [GMADR] 0x%llx size=0x%llx\n", 129 vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu)); 130 gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu)); 131 132 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 133 } 134 135 /** 136 * intel_gvt_destroy_vgpu - destroy a virtual GPU 137 * @vgpu: virtual GPU 138 * 139 * This function is called when user wants to destroy a virtual GPU. 140 * 141 */ 142 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) 143 { 144 struct intel_gvt *gvt = vgpu->gvt; 145 146 mutex_lock(&gvt->lock); 147 148 vgpu->active = false; 149 idr_remove(&gvt->vgpu_idr, vgpu->id); 150 151 if (atomic_read(&vgpu->running_workload_num)) { 152 mutex_unlock(&gvt->lock); 153 intel_gvt_wait_vgpu_idle(vgpu); 154 mutex_lock(&gvt->lock); 155 } 156 157 intel_vgpu_stop_schedule(vgpu); 158 intel_vgpu_clean_sched_policy(vgpu); 159 intel_vgpu_clean_gvt_context(vgpu); 160 intel_vgpu_clean_execlist(vgpu); 161 intel_vgpu_clean_display(vgpu); 162 intel_vgpu_clean_opregion(vgpu); 163 intel_vgpu_clean_gtt(vgpu); 164 intel_gvt_hypervisor_detach_vgpu(vgpu); 165 intel_vgpu_free_resource(vgpu); 166 clean_vgpu_mmio(vgpu); 167 vfree(vgpu); 168 169 mutex_unlock(&gvt->lock); 170 } 171 172 /** 173 * intel_gvt_create_vgpu - create a virtual GPU 174 * @gvt: GVT device 175 * @param: vGPU creation parameters 176 * 177 * This function is called when user wants to create a virtual GPU. 178 * 179 * Returns: 180 * pointer to intel_vgpu, error pointer if failed. 181 */ 182 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 183 struct intel_vgpu_creation_params *param) 184 { 185 struct intel_vgpu *vgpu; 186 int ret; 187 188 gvt_dbg_core("handle %llu low %llu MB high %llu MB fence %llu\n", 189 param->handle, param->low_gm_sz, param->high_gm_sz, 190 param->fence_sz); 191 192 vgpu = vzalloc(sizeof(*vgpu)); 193 if (!vgpu) 194 return ERR_PTR(-ENOMEM); 195 196 mutex_lock(&gvt->lock); 197 198 ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL); 199 if (ret < 0) 200 goto out_free_vgpu; 201 202 vgpu->id = ret; 203 vgpu->handle = param->handle; 204 vgpu->gvt = gvt; 205 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); 206 207 setup_vgpu_cfg_space(vgpu, param); 208 209 ret = setup_vgpu_mmio(vgpu); 210 if (ret) 211 goto out_free_vgpu; 212 213 ret = intel_vgpu_alloc_resource(vgpu, param); 214 if (ret) 215 goto out_clean_vgpu_mmio; 216 217 populate_pvinfo_page(vgpu); 218 219 ret = intel_gvt_hypervisor_attach_vgpu(vgpu); 220 if (ret) 221 goto out_clean_vgpu_resource; 222 223 ret = intel_vgpu_init_gtt(vgpu); 224 if (ret) 225 goto out_detach_hypervisor_vgpu; 226 227 if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { 228 ret = intel_vgpu_init_opregion(vgpu, 0); 229 if (ret) 230 goto out_clean_gtt; 231 } 232 233 ret = intel_vgpu_init_display(vgpu); 234 if (ret) 235 goto out_clean_opregion; 236 237 ret = intel_vgpu_init_execlist(vgpu); 238 if (ret) 239 goto out_clean_display; 240 241 ret = intel_vgpu_init_gvt_context(vgpu); 242 if (ret) 243 goto out_clean_execlist; 244 245 ret = intel_vgpu_init_sched_policy(vgpu); 246 if (ret) 247 goto out_clean_shadow_ctx; 248 249 vgpu->active = true; 250 mutex_unlock(&gvt->lock); 251 252 return vgpu; 253 254 out_clean_shadow_ctx: 255 intel_vgpu_clean_gvt_context(vgpu); 256 out_clean_execlist: 257 intel_vgpu_clean_execlist(vgpu); 258 out_clean_display: 259 intel_vgpu_clean_display(vgpu); 260 out_clean_opregion: 261 intel_vgpu_clean_opregion(vgpu); 262 out_clean_gtt: 263 intel_vgpu_clean_gtt(vgpu); 264 out_detach_hypervisor_vgpu: 265 intel_gvt_hypervisor_detach_vgpu(vgpu); 266 out_clean_vgpu_resource: 267 intel_vgpu_free_resource(vgpu); 268 out_clean_vgpu_mmio: 269 clean_vgpu_mmio(vgpu); 270 out_free_vgpu: 271 vfree(vgpu); 272 mutex_unlock(&gvt->lock); 273 return ERR_PTR(ret); 274 } 275