1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Kevin Tian <kevin.tian@intel.com> 25 * Dexuan Cui 26 * 27 * Contributors: 28 * Pei Zhang <pei.zhang@intel.com> 29 * Min He <min.he@intel.com> 30 * Niu Bing <bing.niu@intel.com> 31 * Yulei Zhang <yulei.zhang@intel.com> 32 * Zhenyu Wang <zhenyuw@linux.intel.com> 33 * Zhi Wang <zhi.a.wang@intel.com> 34 * 35 */ 36 37 #include "i915_drv.h" 38 #include "gvt.h" 39 40 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) 41 { 42 struct intel_gvt *gvt = vgpu->gvt; 43 struct drm_i915_private *dev_priv = gvt->dev_priv; 44 unsigned int flags; 45 u64 start, end, size; 46 struct drm_mm_node *node; 47 int ret; 48 49 if (high_gm) { 50 node = &vgpu->gm.high_gm_node; 51 size = vgpu_hidden_sz(vgpu); 52 start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE); 53 end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE); 54 flags = PIN_HIGH; 55 } else { 56 node = &vgpu->gm.low_gm_node; 57 size = vgpu_aperture_sz(vgpu); 58 start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE); 59 end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE); 60 flags = PIN_MAPPABLE; 61 } 62 63 mutex_lock(&dev_priv->drm.struct_mutex); 64 mmio_hw_access_pre(dev_priv); 65 ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, 66 size, I915_GTT_PAGE_SIZE, 67 I915_COLOR_UNEVICTABLE, 68 start, end, flags); 69 mmio_hw_access_post(dev_priv); 70 mutex_unlock(&dev_priv->drm.struct_mutex); 71 if (ret) 72 gvt_err("fail to alloc %s gm space from host\n", 73 high_gm ? "high" : "low"); 74 75 return ret; 76 } 77 78 static int alloc_vgpu_gm(struct intel_vgpu *vgpu) 79 { 80 struct intel_gvt *gvt = vgpu->gvt; 81 struct drm_i915_private *dev_priv = gvt->dev_priv; 82 int ret; 83 84 ret = alloc_gm(vgpu, false); 85 if (ret) 86 return ret; 87 88 ret = alloc_gm(vgpu, true); 89 if (ret) 90 goto out_free_aperture; 91 92 gvt_dbg_core("vgpu%d: alloc low GM start %llx size %llx\n", vgpu->id, 93 vgpu_aperture_offset(vgpu), vgpu_aperture_sz(vgpu)); 94 95 gvt_dbg_core("vgpu%d: alloc high GM start %llx size %llx\n", vgpu->id, 96 vgpu_hidden_offset(vgpu), vgpu_hidden_sz(vgpu)); 97 98 return 0; 99 out_free_aperture: 100 mutex_lock(&dev_priv->drm.struct_mutex); 101 drm_mm_remove_node(&vgpu->gm.low_gm_node); 102 mutex_unlock(&dev_priv->drm.struct_mutex); 103 return ret; 104 } 105 106 static void free_vgpu_gm(struct intel_vgpu *vgpu) 107 { 108 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 109 110 mutex_lock(&dev_priv->drm.struct_mutex); 111 drm_mm_remove_node(&vgpu->gm.low_gm_node); 112 drm_mm_remove_node(&vgpu->gm.high_gm_node); 113 mutex_unlock(&dev_priv->drm.struct_mutex); 114 } 115 116 /** 117 * intel_vgpu_write_fence - write fence registers owned by a vGPU 118 * @vgpu: vGPU instance 119 * @fence: vGPU fence register number 120 * @value: Fence register value to be written 121 * 122 * This function is used to write fence registers owned by a vGPU. The vGPU 123 * fence register number will be translated into HW fence register number. 124 * 125 */ 126 void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 127 u32 fence, u64 value) 128 { 129 struct intel_gvt *gvt = vgpu->gvt; 130 struct drm_i915_private *dev_priv = gvt->dev_priv; 131 struct drm_i915_fence_reg *reg; 132 i915_reg_t fence_reg_lo, fence_reg_hi; 133 134 assert_rpm_wakelock_held(dev_priv); 135 136 if (WARN_ON(fence >= vgpu_fence_sz(vgpu))) 137 return; 138 139 reg = vgpu->fence.regs[fence]; 140 if (WARN_ON(!reg)) 141 return; 142 143 fence_reg_lo = FENCE_REG_GEN6_LO(reg->id); 144 fence_reg_hi = FENCE_REG_GEN6_HI(reg->id); 145 146 I915_WRITE(fence_reg_lo, 0); 147 POSTING_READ(fence_reg_lo); 148 149 I915_WRITE(fence_reg_hi, upper_32_bits(value)); 150 I915_WRITE(fence_reg_lo, lower_32_bits(value)); 151 POSTING_READ(fence_reg_lo); 152 } 153 154 static void _clear_vgpu_fence(struct intel_vgpu *vgpu) 155 { 156 int i; 157 158 for (i = 0; i < vgpu_fence_sz(vgpu); i++) 159 intel_vgpu_write_fence(vgpu, i, 0); 160 } 161 162 static void free_vgpu_fence(struct intel_vgpu *vgpu) 163 { 164 struct intel_gvt *gvt = vgpu->gvt; 165 struct drm_i915_private *dev_priv = gvt->dev_priv; 166 struct drm_i915_fence_reg *reg; 167 u32 i; 168 169 if (WARN_ON(!vgpu_fence_sz(vgpu))) 170 return; 171 172 intel_runtime_pm_get(dev_priv); 173 174 mutex_lock(&dev_priv->drm.struct_mutex); 175 _clear_vgpu_fence(vgpu); 176 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 177 reg = vgpu->fence.regs[i]; 178 i915_unreserve_fence(reg); 179 vgpu->fence.regs[i] = NULL; 180 } 181 mutex_unlock(&dev_priv->drm.struct_mutex); 182 183 intel_runtime_pm_put_unchecked(dev_priv); 184 } 185 186 static int alloc_vgpu_fence(struct intel_vgpu *vgpu) 187 { 188 struct intel_gvt *gvt = vgpu->gvt; 189 struct drm_i915_private *dev_priv = gvt->dev_priv; 190 struct drm_i915_fence_reg *reg; 191 int i; 192 193 intel_runtime_pm_get(dev_priv); 194 195 /* Request fences from host */ 196 mutex_lock(&dev_priv->drm.struct_mutex); 197 198 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 199 reg = i915_reserve_fence(dev_priv); 200 if (IS_ERR(reg)) 201 goto out_free_fence; 202 203 vgpu->fence.regs[i] = reg; 204 } 205 206 _clear_vgpu_fence(vgpu); 207 208 mutex_unlock(&dev_priv->drm.struct_mutex); 209 intel_runtime_pm_put_unchecked(dev_priv); 210 return 0; 211 out_free_fence: 212 gvt_vgpu_err("Failed to alloc fences\n"); 213 /* Return fences to host, if fail */ 214 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 215 reg = vgpu->fence.regs[i]; 216 if (!reg) 217 continue; 218 i915_unreserve_fence(reg); 219 vgpu->fence.regs[i] = NULL; 220 } 221 mutex_unlock(&dev_priv->drm.struct_mutex); 222 intel_runtime_pm_put_unchecked(dev_priv); 223 return -ENOSPC; 224 } 225 226 static void free_resource(struct intel_vgpu *vgpu) 227 { 228 struct intel_gvt *gvt = vgpu->gvt; 229 230 gvt->gm.vgpu_allocated_low_gm_size -= vgpu_aperture_sz(vgpu); 231 gvt->gm.vgpu_allocated_high_gm_size -= vgpu_hidden_sz(vgpu); 232 gvt->fence.vgpu_allocated_fence_num -= vgpu_fence_sz(vgpu); 233 } 234 235 static int alloc_resource(struct intel_vgpu *vgpu, 236 struct intel_vgpu_creation_params *param) 237 { 238 struct intel_gvt *gvt = vgpu->gvt; 239 unsigned long request, avail, max, taken; 240 const char *item; 241 242 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { 243 gvt_vgpu_err("Invalid vGPU creation params\n"); 244 return -EINVAL; 245 } 246 247 item = "low GM space"; 248 max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; 249 taken = gvt->gm.vgpu_allocated_low_gm_size; 250 avail = max - taken; 251 request = MB_TO_BYTES(param->low_gm_sz); 252 253 if (request > avail) 254 goto no_enough_resource; 255 256 vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); 257 258 item = "high GM space"; 259 max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; 260 taken = gvt->gm.vgpu_allocated_high_gm_size; 261 avail = max - taken; 262 request = MB_TO_BYTES(param->high_gm_sz); 263 264 if (request > avail) 265 goto no_enough_resource; 266 267 vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); 268 269 item = "fence"; 270 max = gvt_fence_sz(gvt) - HOST_FENCE; 271 taken = gvt->fence.vgpu_allocated_fence_num; 272 avail = max - taken; 273 request = param->fence_sz; 274 275 if (request > avail) 276 goto no_enough_resource; 277 278 vgpu_fence_sz(vgpu) = request; 279 280 gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz); 281 gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz); 282 gvt->fence.vgpu_allocated_fence_num += param->fence_sz; 283 return 0; 284 285 no_enough_resource: 286 gvt_err("fail to allocate resource %s\n", item); 287 gvt_err("request %luMB avail %luMB max %luMB taken %luMB\n", 288 BYTES_TO_MB(request), BYTES_TO_MB(avail), 289 BYTES_TO_MB(max), BYTES_TO_MB(taken)); 290 return -ENOSPC; 291 } 292 293 /** 294 * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU 295 * @vgpu: a vGPU 296 * 297 * This function is used to free the HW resource owned by a vGPU. 298 * 299 */ 300 void intel_vgpu_free_resource(struct intel_vgpu *vgpu) 301 { 302 free_vgpu_gm(vgpu); 303 free_vgpu_fence(vgpu); 304 free_resource(vgpu); 305 } 306 307 /** 308 * intel_vgpu_reset_resource - reset resource state owned by a vGPU 309 * @vgpu: a vGPU 310 * 311 * This function is used to reset resource state owned by a vGPU. 312 * 313 */ 314 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) 315 { 316 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 317 318 intel_runtime_pm_get(dev_priv); 319 _clear_vgpu_fence(vgpu); 320 intel_runtime_pm_put_unchecked(dev_priv); 321 } 322 323 /** 324 * intel_alloc_vgpu_resource - allocate HW resource for a vGPU 325 * @vgpu: vGPU 326 * @param: vGPU creation params 327 * 328 * This function is used to allocate HW resource for a vGPU. User specifies 329 * the resource configuration through the creation params. 330 * 331 * Returns: 332 * zero on success, negative error code if failed. 333 * 334 */ 335 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 336 struct intel_vgpu_creation_params *param) 337 { 338 int ret; 339 340 ret = alloc_resource(vgpu, param); 341 if (ret) 342 return ret; 343 344 ret = alloc_vgpu_gm(vgpu); 345 if (ret) 346 goto out_free_resource; 347 348 ret = alloc_vgpu_fence(vgpu); 349 if (ret) 350 goto out_free_vgpu_gm; 351 352 return 0; 353 354 out_free_vgpu_gm: 355 free_vgpu_gm(vgpu); 356 out_free_resource: 357 free_resource(vgpu); 358 return ret; 359 } 360