1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Zhi Wang <zhi.a.wang@intel.com> 25 * Zhenyu Wang <zhenyuw@linux.intel.com> 26 * Xiao Zheng <xiao.zheng@intel.com> 27 * 28 * Contributors: 29 * Min He <min.he@intel.com> 30 * Bing Niu <bing.niu@intel.com> 31 * 32 */ 33 34 #ifndef _GVT_GTT_H_ 35 #define _GVT_GTT_H_ 36 37 #define I915_GTT_PAGE_SHIFT 12 38 #define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1)) 39 40 struct intel_vgpu_mm; 41 42 #define INTEL_GVT_GTT_HASH_BITS 8 43 #define INTEL_GVT_INVALID_ADDR (~0UL) 44 45 struct intel_gvt_gtt_entry { 46 u64 val64; 47 int type; 48 }; 49 50 struct intel_gvt_gtt_pte_ops { 51 int (*get_entry)(void *pt, 52 struct intel_gvt_gtt_entry *e, 53 unsigned long index, 54 bool hypervisor_access, 55 unsigned long gpa, 56 struct intel_vgpu *vgpu); 57 int (*set_entry)(void *pt, 58 struct intel_gvt_gtt_entry *e, 59 unsigned long index, 60 bool hypervisor_access, 61 unsigned long gpa, 62 struct intel_vgpu *vgpu); 63 bool (*test_present)(struct intel_gvt_gtt_entry *e); 64 void (*clear_present)(struct intel_gvt_gtt_entry *e); 65 void (*set_present)(struct intel_gvt_gtt_entry *e); 66 bool (*test_pse)(struct intel_gvt_gtt_entry *e); 67 void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn); 68 unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e); 69 }; 70 71 struct intel_gvt_gtt_gma_ops { 72 unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma); 73 unsigned long (*gma_to_pte_index)(unsigned long gma); 74 unsigned long (*gma_to_pde_index)(unsigned long gma); 75 unsigned long (*gma_to_l3_pdp_index)(unsigned long gma); 76 unsigned long (*gma_to_l4_pdp_index)(unsigned long gma); 77 unsigned long (*gma_to_pml4_index)(unsigned long gma); 78 }; 79 80 struct intel_gvt_gtt { 81 struct intel_gvt_gtt_pte_ops *pte_ops; 82 struct intel_gvt_gtt_gma_ops *gma_ops; 83 int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm); 84 void (*mm_free_page_table)(struct intel_vgpu_mm *mm); 85 struct list_head oos_page_use_list_head; 86 struct list_head oos_page_free_list_head; 87 struct list_head ppgtt_mm_lru_list_head; 88 89 struct page *scratch_page; 90 unsigned long scratch_mfn; 91 }; 92 93 typedef enum { 94 GTT_TYPE_INVALID = -1, 95 96 GTT_TYPE_GGTT_PTE, 97 98 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 99 GTT_TYPE_PPGTT_PTE_2M_ENTRY, 100 GTT_TYPE_PPGTT_PTE_1G_ENTRY, 101 102 GTT_TYPE_PPGTT_PTE_ENTRY, 103 104 GTT_TYPE_PPGTT_PDE_ENTRY, 105 GTT_TYPE_PPGTT_PDP_ENTRY, 106 GTT_TYPE_PPGTT_PML4_ENTRY, 107 108 GTT_TYPE_PPGTT_ROOT_ENTRY, 109 110 GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 111 GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 112 113 GTT_TYPE_PPGTT_ENTRY, 114 115 GTT_TYPE_PPGTT_PTE_PT, 116 GTT_TYPE_PPGTT_PDE_PT, 117 GTT_TYPE_PPGTT_PDP_PT, 118 GTT_TYPE_PPGTT_PML4_PT, 119 120 GTT_TYPE_MAX, 121 } intel_gvt_gtt_type_t; 122 123 enum intel_gvt_mm_type { 124 INTEL_GVT_MM_GGTT, 125 INTEL_GVT_MM_PPGTT, 126 }; 127 128 #define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES 129 130 struct intel_vgpu_mm { 131 enum intel_gvt_mm_type type; 132 struct intel_vgpu *vgpu; 133 134 struct kref ref; 135 atomic_t pincount; 136 137 union { 138 struct { 139 intel_gvt_gtt_type_t root_entry_type; 140 /* 141 * The 4 PDPs in ring context. For 48bit addressing, 142 * only PDP0 is valid and point to PML4. For 32it 143 * addressing, all 4 are used as true PDPs. 144 */ 145 u64 guest_pdps[GVT_RING_CTX_NR_PDPS]; 146 u64 shadow_pdps[GVT_RING_CTX_NR_PDPS]; 147 bool shadowed; 148 149 struct list_head list; 150 struct list_head lru_list; 151 } ppgtt_mm; 152 struct { 153 void *virtual_ggtt; 154 } ggtt_mm; 155 }; 156 }; 157 158 extern int intel_vgpu_mm_get_entry( 159 struct intel_vgpu_mm *mm, 160 void *page_table, struct intel_gvt_gtt_entry *e, 161 unsigned long index); 162 163 extern int intel_vgpu_mm_set_entry( 164 struct intel_vgpu_mm *mm, 165 void *page_table, struct intel_gvt_gtt_entry *e, 166 unsigned long index); 167 168 #define ggtt_get_guest_entry(mm, e, index) \ 169 intel_vgpu_mm_get_entry(mm, mm->ggtt_mm.virtual_ggtt, e, index) 170 171 #define ggtt_set_guest_entry(mm, e, index) \ 172 intel_vgpu_mm_set_entry(mm, mm->ggtt_mm.virtual_ggtt, e, index) 173 174 #define ggtt_get_shadow_entry(mm, e, index) \ 175 intel_vgpu_mm_get_entry(mm, mm->ggtt_mm.virtual_ggtt, e, index) 176 177 #define ggtt_set_shadow_entry(mm, e, index) \ 178 intel_vgpu_mm_set_entry(mm, mm->ggtt_mm.virtual_ggtt, e, index) 179 180 #define ppgtt_get_guest_root_entry(mm, e, index) \ 181 intel_vgpu_mm_get_entry(mm, mm->ppgtt_mm.guest_pdps, e, index) 182 183 #define ppgtt_set_guest_root_entry(mm, e, index) \ 184 intel_vgpu_mm_set_entry(mm, mm->ppgtt_mm.guest_pdps, e, index) 185 186 #define ppgtt_get_shadow_root_entry(mm, e, index) \ 187 intel_vgpu_mm_get_entry(mm, mm->ppgtt_mm.shadow_pdps, e, index) 188 189 #define ppgtt_set_shadow_root_entry(mm, e, index) \ 190 intel_vgpu_mm_set_entry(mm, mm->ppgtt_mm.shadow_pdps, e, index) 191 192 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, 193 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]); 194 extern void intel_vgpu_destroy_mm(struct kref *mm_ref); 195 196 struct intel_vgpu_guest_page; 197 198 struct intel_vgpu_scratch_pt { 199 struct page *page; 200 unsigned long page_mfn; 201 }; 202 203 struct intel_vgpu_gtt { 204 struct intel_vgpu_mm *ggtt_mm; 205 unsigned long active_ppgtt_mm_bitmap; 206 struct list_head ppgtt_mm_list_head; 207 DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS); 208 DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS); 209 atomic_t n_tracked_guest_page; 210 struct list_head oos_page_list_head; 211 struct list_head post_shadow_list_head; 212 struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; 213 }; 214 215 extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 216 extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 217 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 218 219 extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 220 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); 221 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); 222 223 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, 224 int page_table_level, void *root_entry); 225 226 struct intel_vgpu_oos_page; 227 228 struct intel_vgpu_shadow_page { 229 void *vaddr; 230 struct page *page; 231 int type; 232 struct hlist_node node; 233 unsigned long mfn; 234 }; 235 236 struct intel_vgpu_page_track { 237 struct hlist_node node; 238 bool tracked; 239 unsigned long gfn; 240 int (*handler)(void *, u64, void *, int); 241 void *data; 242 }; 243 244 struct intel_vgpu_guest_page { 245 struct intel_vgpu_page_track track; 246 unsigned long write_cnt; 247 struct intel_vgpu_oos_page *oos_page; 248 }; 249 250 struct intel_vgpu_oos_page { 251 struct intel_vgpu_guest_page *guest_page; 252 struct list_head list; 253 struct list_head vm_list; 254 int id; 255 unsigned char mem[I915_GTT_PAGE_SIZE]; 256 }; 257 258 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512 259 260 struct intel_vgpu_ppgtt_spt { 261 struct intel_vgpu_shadow_page shadow_page; 262 struct intel_vgpu_guest_page guest_page; 263 int guest_page_type; 264 atomic_t refcount; 265 struct intel_vgpu *vgpu; 266 DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE); 267 struct list_head post_shadow_list; 268 }; 269 270 int intel_vgpu_init_page_track(struct intel_vgpu *vgpu, 271 struct intel_vgpu_page_track *t, 272 unsigned long gfn, 273 int (*handler)(void *gp, u64, void *, int), 274 void *data); 275 276 void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu, 277 struct intel_vgpu_page_track *t); 278 279 struct intel_vgpu_page_track *intel_vgpu_find_tracked_page( 280 struct intel_vgpu *vgpu, unsigned long gfn); 281 282 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu); 283 284 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu); 285 286 static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm) 287 { 288 kref_get(&mm->ref); 289 } 290 291 static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm) 292 { 293 kref_put(&mm->ref, intel_vgpu_destroy_mm); 294 } 295 296 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm); 297 298 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm); 299 300 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, 301 unsigned long gma); 302 303 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, 304 u64 pdps[]); 305 306 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, 307 intel_gvt_gtt_type_t root_entry_type, u64 pdps[]); 308 309 int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]); 310 311 int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, 312 unsigned int off, void *p_data, unsigned int bytes); 313 314 int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, 315 unsigned int off, void *p_data, unsigned int bytes); 316 317 int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa, 318 void *p_data, unsigned int bytes); 319 320 #endif /* _GVT_GTT_H_ */ 321