1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Kevin Tian <kevin.tian@intel.com> 25 * Eddie Dong <eddie.dong@intel.com> 26 * 27 * Contributors: 28 * Niu Bing <bing.niu@intel.com> 29 * Zhi Wang <zhi.a.wang@intel.com> 30 * 31 */ 32 33 #ifndef _GVT_H_ 34 #define _GVT_H_ 35 36 #include "debug.h" 37 #include "hypercall.h" 38 #include "mmio.h" 39 #include "reg.h" 40 #include "interrupt.h" 41 #include "gtt.h" 42 #include "display.h" 43 #include "edid.h" 44 #include "execlist.h" 45 46 #define GVT_MAX_VGPU 8 47 48 enum { 49 INTEL_GVT_HYPERVISOR_XEN = 0, 50 INTEL_GVT_HYPERVISOR_KVM, 51 }; 52 53 struct intel_gvt_host { 54 bool initialized; 55 int hypervisor_type; 56 struct intel_gvt_mpt *mpt; 57 }; 58 59 extern struct intel_gvt_host intel_gvt_host; 60 61 /* Describe per-platform limitations. */ 62 struct intel_gvt_device_info { 63 u32 max_support_vgpus; 64 u32 cfg_space_size; 65 u32 mmio_size; 66 u32 mmio_bar; 67 unsigned long msi_cap_offset; 68 u32 gtt_start_offset; 69 u32 gtt_entry_size; 70 u32 gtt_entry_size_shift; 71 }; 72 73 /* GM resources owned by a vGPU */ 74 struct intel_vgpu_gm { 75 u64 aperture_sz; 76 u64 hidden_sz; 77 struct drm_mm_node low_gm_node; 78 struct drm_mm_node high_gm_node; 79 }; 80 81 #define INTEL_GVT_MAX_NUM_FENCES 32 82 83 /* Fences owned by a vGPU */ 84 struct intel_vgpu_fence { 85 struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; 86 u32 base; 87 u32 size; 88 }; 89 90 struct intel_vgpu_mmio { 91 void *vreg; 92 void *sreg; 93 bool disable_warn_untrack; 94 }; 95 96 #define INTEL_GVT_MAX_CFG_SPACE_SZ 256 97 #define INTEL_GVT_MAX_BAR_NUM 4 98 99 struct intel_vgpu_pci_bar { 100 u64 size; 101 bool tracked; 102 }; 103 104 struct intel_vgpu_cfg_space { 105 unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ]; 106 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; 107 }; 108 109 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) 110 111 #define INTEL_GVT_MAX_PIPE 4 112 113 struct intel_vgpu_irq { 114 bool irq_warn_once[INTEL_GVT_EVENT_MAX]; 115 DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE], 116 INTEL_GVT_EVENT_MAX); 117 }; 118 119 struct intel_vgpu_opregion { 120 void *va; 121 u32 gfn[INTEL_GVT_OPREGION_PAGES]; 122 struct page *pages[INTEL_GVT_OPREGION_PAGES]; 123 }; 124 125 #define vgpu_opregion(vgpu) (&(vgpu->opregion)) 126 127 #define INTEL_GVT_MAX_PORT 5 128 129 struct intel_vgpu_display { 130 struct intel_vgpu_i2c_edid i2c_edid; 131 struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT]; 132 struct intel_vgpu_sbi sbi; 133 }; 134 135 struct intel_vgpu { 136 struct intel_gvt *gvt; 137 int id; 138 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ 139 bool active; 140 bool resetting; 141 142 struct intel_vgpu_fence fence; 143 struct intel_vgpu_gm gm; 144 struct intel_vgpu_cfg_space cfg_space; 145 struct intel_vgpu_mmio mmio; 146 struct intel_vgpu_irq irq; 147 struct intel_vgpu_gtt gtt; 148 struct intel_vgpu_opregion opregion; 149 struct intel_vgpu_display display; 150 /* TODO: move the declaration of intel_gvt.h to a proper place. */ 151 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; 152 }; 153 154 struct intel_gvt_gm { 155 unsigned long vgpu_allocated_low_gm_size; 156 unsigned long vgpu_allocated_high_gm_size; 157 }; 158 159 struct intel_gvt_fence { 160 unsigned long vgpu_allocated_fence_num; 161 }; 162 163 #define INTEL_GVT_MMIO_HASH_BITS 9 164 165 struct intel_gvt_mmio { 166 u32 *mmio_attribute; 167 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); 168 }; 169 170 struct intel_gvt_firmware { 171 void *cfg_space; 172 void *mmio; 173 bool firmware_loaded; 174 }; 175 176 struct intel_gvt_opregion { 177 void *opregion_va; 178 u32 opregion_pa; 179 }; 180 181 struct intel_gvt { 182 struct mutex lock; 183 bool initialized; 184 185 struct drm_i915_private *dev_priv; 186 struct idr vgpu_idr; /* vGPU IDR pool */ 187 188 struct intel_gvt_device_info device_info; 189 struct intel_gvt_gm gm; 190 struct intel_gvt_fence fence; 191 struct intel_gvt_mmio mmio; 192 struct intel_gvt_firmware firmware; 193 struct intel_gvt_irq irq; 194 struct intel_gvt_gtt gtt; 195 struct intel_gvt_opregion opregion; 196 197 struct task_struct *service_thread; 198 wait_queue_head_t service_thread_wq; 199 unsigned long service_request; 200 }; 201 202 enum { 203 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, 204 }; 205 206 static inline void intel_gvt_request_service(struct intel_gvt *gvt, 207 int service) 208 { 209 set_bit(service, (void *)&gvt->service_request); 210 wake_up(&gvt->service_thread_wq); 211 } 212 213 void intel_gvt_free_firmware(struct intel_gvt *gvt); 214 int intel_gvt_load_firmware(struct intel_gvt *gvt); 215 216 /* Aperture/GM space definitions for GVT device */ 217 #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) 218 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base) 219 220 #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total) 221 #define gvt_ggtt_sz(gvt) \ 222 ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3) 223 #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) 224 225 #define gvt_aperture_gmadr_base(gvt) (0) 226 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ 227 + gvt_aperture_sz(gvt) - 1) 228 229 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \ 230 + gvt_aperture_sz(gvt)) 231 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ 232 + gvt_hidden_sz(gvt) - 1) 233 234 #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs) 235 236 /* Aperture/GM space definitions for vGPU */ 237 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) 238 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) 239 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz) 240 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz) 241 242 #define vgpu_aperture_pa_base(vgpu) \ 243 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu)) 244 245 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz) 246 247 #define vgpu_aperture_pa_end(vgpu) \ 248 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) 249 250 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu)) 251 #define vgpu_aperture_gmadr_end(vgpu) \ 252 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) 253 254 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu)) 255 #define vgpu_hidden_gmadr_end(vgpu) \ 256 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1) 257 258 #define vgpu_fence_base(vgpu) (vgpu->fence.base) 259 #define vgpu_fence_sz(vgpu) (vgpu->fence.size) 260 261 struct intel_vgpu_creation_params { 262 __u64 handle; 263 __u64 low_gm_sz; /* in MB */ 264 __u64 high_gm_sz; /* in MB */ 265 __u64 fence_sz; 266 __s32 primary; 267 __u64 vgpu_id; 268 }; 269 270 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 271 struct intel_vgpu_creation_params *param); 272 void intel_vgpu_free_resource(struct intel_vgpu *vgpu); 273 void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 274 u32 fence, u64 value); 275 276 /* Macros for easily accessing vGPU virtual/shadow register */ 277 #define vgpu_vreg(vgpu, reg) \ 278 (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) 279 #define vgpu_vreg8(vgpu, reg) \ 280 (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) 281 #define vgpu_vreg16(vgpu, reg) \ 282 (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) 283 #define vgpu_vreg64(vgpu, reg) \ 284 (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) 285 #define vgpu_sreg(vgpu, reg) \ 286 (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) 287 #define vgpu_sreg8(vgpu, reg) \ 288 (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) 289 #define vgpu_sreg16(vgpu, reg) \ 290 (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) 291 #define vgpu_sreg64(vgpu, reg) \ 292 (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) 293 294 #define for_each_active_vgpu(gvt, vgpu, id) \ 295 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ 296 for_each_if(vgpu->active) 297 298 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, 299 u32 offset, u32 val, bool low) 300 { 301 u32 *pval; 302 303 /* BAR offset should be 32 bits algiend */ 304 offset = rounddown(offset, 4); 305 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); 306 307 if (low) { 308 /* 309 * only update bit 31 - bit 4, 310 * leave the bit 3 - bit 0 unchanged. 311 */ 312 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0)); 313 } 314 } 315 316 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 317 struct intel_vgpu_creation_params * 318 param); 319 320 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 321 322 /* validating GM functions */ 323 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ 324 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ 325 (gmadr <= vgpu_aperture_gmadr_end(vgpu))) 326 327 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \ 328 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \ 329 (gmadr <= vgpu_hidden_gmadr_end(vgpu))) 330 331 #define vgpu_gmadr_is_valid(vgpu, gmadr) \ 332 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \ 333 (vgpu_gmadr_is_hidden(vgpu, gmadr)))) 334 335 #define gvt_gmadr_is_aperture(gvt, gmadr) \ 336 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \ 337 (gmadr <= gvt_aperture_gmadr_end(gvt))) 338 339 #define gvt_gmadr_is_hidden(gvt, gmadr) \ 340 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \ 341 (gmadr <= gvt_hidden_gmadr_end(gvt))) 342 343 #define gvt_gmadr_is_valid(gvt, gmadr) \ 344 (gvt_gmadr_is_aperture(gvt, gmadr) || \ 345 gvt_gmadr_is_hidden(gvt, gmadr)) 346 347 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size); 348 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr); 349 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr); 350 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, 351 unsigned long *h_index); 352 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 353 unsigned long *g_index); 354 355 int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset, 356 void *p_data, unsigned int bytes); 357 358 int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset, 359 void *p_data, unsigned int bytes); 360 361 void intel_gvt_clean_opregion(struct intel_gvt *gvt); 362 int intel_gvt_init_opregion(struct intel_gvt *gvt); 363 364 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); 365 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); 366 367 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); 368 369 #include "mpt.h" 370 371 #endif 372