1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Zhi Wang <zhi.a.wang@intel.com> 25 * Zhenyu Wang <zhenyuw@linux.intel.com> 26 * Xiao Zheng <xiao.zheng@intel.com> 27 * 28 * Contributors: 29 * Min He <min.he@intel.com> 30 * Bing Niu <bing.niu@intel.com> 31 * 32 */ 33 34 #ifndef _GVT_GTT_H_ 35 #define _GVT_GTT_H_ 36 37 #define GTT_PAGE_SHIFT 12 38 #define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT) 39 #define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1)) 40 41 struct intel_vgpu_mm; 42 43 #define INTEL_GVT_GTT_HASH_BITS 8 44 #define INTEL_GVT_INVALID_ADDR (~0UL) 45 46 struct intel_gvt_gtt_entry { 47 u64 val64; 48 int type; 49 }; 50 51 struct intel_gvt_gtt_pte_ops { 52 int (*get_entry)(void *pt, 53 struct intel_gvt_gtt_entry *e, 54 unsigned long index, 55 bool hypervisor_access, 56 unsigned long gpa, 57 struct intel_vgpu *vgpu); 58 int (*set_entry)(void *pt, 59 struct intel_gvt_gtt_entry *e, 60 unsigned long index, 61 bool hypervisor_access, 62 unsigned long gpa, 63 struct intel_vgpu *vgpu); 64 bool (*test_present)(struct intel_gvt_gtt_entry *e); 65 void (*clear_present)(struct intel_gvt_gtt_entry *e); 66 bool (*test_pse)(struct intel_gvt_gtt_entry *e); 67 void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn); 68 unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e); 69 }; 70 71 struct intel_gvt_gtt_gma_ops { 72 unsigned long (*gma_to_ggtt_pte_index)(unsigned long gma); 73 unsigned long (*gma_to_pte_index)(unsigned long gma); 74 unsigned long (*gma_to_pde_index)(unsigned long gma); 75 unsigned long (*gma_to_l3_pdp_index)(unsigned long gma); 76 unsigned long (*gma_to_l4_pdp_index)(unsigned long gma); 77 unsigned long (*gma_to_pml4_index)(unsigned long gma); 78 }; 79 80 struct intel_gvt_gtt { 81 struct intel_gvt_gtt_pte_ops *pte_ops; 82 struct intel_gvt_gtt_gma_ops *gma_ops; 83 int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm); 84 void (*mm_free_page_table)(struct intel_vgpu_mm *mm); 85 struct list_head oos_page_use_list_head; 86 struct list_head oos_page_free_list_head; 87 struct list_head mm_lru_list_head; 88 89 struct page *scratch_ggtt_page; 90 unsigned long scratch_ggtt_mfn; 91 }; 92 93 enum { 94 INTEL_GVT_MM_GGTT = 0, 95 INTEL_GVT_MM_PPGTT, 96 }; 97 98 typedef enum { 99 GTT_TYPE_INVALID = -1, 100 101 GTT_TYPE_GGTT_PTE, 102 103 GTT_TYPE_PPGTT_PTE_4K_ENTRY, 104 GTT_TYPE_PPGTT_PTE_2M_ENTRY, 105 GTT_TYPE_PPGTT_PTE_1G_ENTRY, 106 107 GTT_TYPE_PPGTT_PTE_ENTRY, 108 109 GTT_TYPE_PPGTT_PDE_ENTRY, 110 GTT_TYPE_PPGTT_PDP_ENTRY, 111 GTT_TYPE_PPGTT_PML4_ENTRY, 112 113 GTT_TYPE_PPGTT_ROOT_ENTRY, 114 115 GTT_TYPE_PPGTT_ROOT_L3_ENTRY, 116 GTT_TYPE_PPGTT_ROOT_L4_ENTRY, 117 118 GTT_TYPE_PPGTT_ENTRY, 119 120 GTT_TYPE_PPGTT_PTE_PT, 121 GTT_TYPE_PPGTT_PDE_PT, 122 GTT_TYPE_PPGTT_PDP_PT, 123 GTT_TYPE_PPGTT_PML4_PT, 124 125 GTT_TYPE_MAX, 126 } intel_gvt_gtt_type_t; 127 128 struct intel_vgpu_mm { 129 int type; 130 bool initialized; 131 bool shadowed; 132 133 int page_table_entry_type; 134 u32 page_table_entry_size; 135 u32 page_table_entry_cnt; 136 void *virtual_page_table; 137 void *shadow_page_table; 138 139 int page_table_level; 140 bool has_shadow_page_table; 141 u32 pde_base_index; 142 143 struct list_head list; 144 struct kref ref; 145 atomic_t pincount; 146 struct list_head lru_list; 147 struct intel_vgpu *vgpu; 148 }; 149 150 extern int intel_vgpu_mm_get_entry( 151 struct intel_vgpu_mm *mm, 152 void *page_table, struct intel_gvt_gtt_entry *e, 153 unsigned long index); 154 155 extern int intel_vgpu_mm_set_entry( 156 struct intel_vgpu_mm *mm, 157 void *page_table, struct intel_gvt_gtt_entry *e, 158 unsigned long index); 159 160 #define ggtt_get_guest_entry(mm, e, index) \ 161 intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index) 162 163 #define ggtt_set_guest_entry(mm, e, index) \ 164 intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index) 165 166 #define ggtt_get_shadow_entry(mm, e, index) \ 167 intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index) 168 169 #define ggtt_set_shadow_entry(mm, e, index) \ 170 intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index) 171 172 #define ppgtt_get_guest_root_entry(mm, e, index) \ 173 intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index) 174 175 #define ppgtt_set_guest_root_entry(mm, e, index) \ 176 intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index) 177 178 #define ppgtt_get_shadow_root_entry(mm, e, index) \ 179 intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index) 180 181 #define ppgtt_set_shadow_root_entry(mm, e, index) \ 182 intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index) 183 184 extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, 185 int mm_type, void *virtual_page_table, int page_table_level, 186 u32 pde_base_index); 187 extern void intel_vgpu_destroy_mm(struct kref *mm_ref); 188 189 struct intel_vgpu_guest_page; 190 191 struct intel_vgpu_scratch_pt { 192 struct page *page; 193 unsigned long page_mfn; 194 }; 195 196 197 struct intel_vgpu_gtt { 198 struct intel_vgpu_mm *ggtt_mm; 199 unsigned long active_ppgtt_mm_bitmap; 200 struct list_head mm_list_head; 201 DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS); 202 DECLARE_HASHTABLE(guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS); 203 atomic_t n_write_protected_guest_page; 204 struct list_head oos_page_list_head; 205 struct list_head post_shadow_list_head; 206 struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; 207 208 }; 209 210 extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 211 extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 212 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 213 214 extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 215 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); 216 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); 217 218 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, 219 int page_table_level, void *root_entry); 220 221 struct intel_vgpu_oos_page; 222 223 struct intel_vgpu_shadow_page { 224 void *vaddr; 225 struct page *page; 226 int type; 227 struct hlist_node node; 228 unsigned long mfn; 229 }; 230 231 struct intel_vgpu_guest_page { 232 struct hlist_node node; 233 bool writeprotection; 234 unsigned long gfn; 235 int (*handler)(void *, u64, void *, int); 236 void *data; 237 unsigned long write_cnt; 238 struct intel_vgpu_oos_page *oos_page; 239 }; 240 241 struct intel_vgpu_oos_page { 242 struct intel_vgpu_guest_page *guest_page; 243 struct list_head list; 244 struct list_head vm_list; 245 int id; 246 unsigned char mem[GTT_PAGE_SIZE]; 247 }; 248 249 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512 250 251 struct intel_vgpu_ppgtt_spt { 252 struct intel_vgpu_shadow_page shadow_page; 253 struct intel_vgpu_guest_page guest_page; 254 int guest_page_type; 255 atomic_t refcount; 256 struct intel_vgpu *vgpu; 257 DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE); 258 struct list_head post_shadow_list; 259 }; 260 261 int intel_vgpu_init_guest_page(struct intel_vgpu *vgpu, 262 struct intel_vgpu_guest_page *guest_page, 263 unsigned long gfn, 264 int (*handler)(void *gp, u64, void *, int), 265 void *data); 266 267 void intel_vgpu_clean_guest_page(struct intel_vgpu *vgpu, 268 struct intel_vgpu_guest_page *guest_page); 269 270 int intel_vgpu_set_guest_page_writeprotection(struct intel_vgpu *vgpu, 271 struct intel_vgpu_guest_page *guest_page); 272 273 void intel_vgpu_clear_guest_page_writeprotection(struct intel_vgpu *vgpu, 274 struct intel_vgpu_guest_page *guest_page); 275 276 struct intel_vgpu_guest_page *intel_vgpu_find_guest_page( 277 struct intel_vgpu *vgpu, unsigned long gfn); 278 279 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu); 280 281 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu); 282 283 static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm) 284 { 285 kref_get(&mm->ref); 286 } 287 288 static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm) 289 { 290 kref_put(&mm->ref, intel_vgpu_destroy_mm); 291 } 292 293 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm); 294 295 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm); 296 297 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, 298 unsigned long gma); 299 300 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, 301 int page_table_level, void *root_entry); 302 303 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, 304 int page_table_level); 305 306 int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, 307 int page_table_level); 308 309 int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, 310 unsigned int off, void *p_data, unsigned int bytes); 311 312 int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, 313 unsigned int off, void *p_data, unsigned int bytes); 314 315 #endif /* _GVT_GTT_H_ */ 316