1 #ifndef __KVM_X86_LAPIC_H 2 #define __KVM_X86_LAPIC_H 3 4 #include "iodev.h" 5 6 #include <linux/kvm_host.h> 7 8 struct kvm_timer { 9 struct hrtimer timer; 10 s64 period; /* unit: ns */ 11 u32 timer_mode_mask; 12 u64 tscdeadline; 13 atomic_t pending; /* accumulated triggered timers */ 14 }; 15 16 struct kvm_lapic { 17 unsigned long base_address; 18 struct kvm_io_device dev; 19 struct kvm_timer lapic_timer; 20 u32 divide_count; 21 struct kvm_vcpu *vcpu; 22 bool irr_pending; 23 /* Number of bits set in ISR. */ 24 s16 isr_count; 25 /* The highest vector set in ISR; if -1 - invalid, must scan ISR. */ 26 int highest_isr_cache; 27 /** 28 * APIC register page. The layout matches the register layout seen by 29 * the guest 1:1, because it is accessed by the vmx microcode. 30 * Note: Only one register, the TPR, is used by the microcode. 31 */ 32 void *regs; 33 gpa_t vapic_addr; 34 struct page *vapic_page; 35 }; 36 int kvm_create_lapic(struct kvm_vcpu *vcpu); 37 void kvm_free_lapic(struct kvm_vcpu *vcpu); 38 39 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu); 40 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu); 41 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu); 42 void kvm_lapic_reset(struct kvm_vcpu *vcpu); 43 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu); 44 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8); 45 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu); 46 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value); 47 u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu); 48 void kvm_apic_set_version(struct kvm_vcpu *vcpu); 49 50 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); 51 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); 52 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq); 53 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type); 54 55 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, 56 struct kvm_lapic_irq *irq, int *r); 57 58 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); 59 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); 60 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, 61 struct kvm_lapic_state *s); 62 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); 63 64 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); 65 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); 66 67 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset); 68 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector); 69 70 void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); 71 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); 72 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); 73 74 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data); 75 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 76 77 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data); 78 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data); 79 80 static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) 81 { 82 return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE; 83 } 84 85 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); 86 void kvm_lapic_init(void); 87 88 static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off) 89 { 90 return *((u32 *) (apic->regs + reg_off)); 91 } 92 93 extern struct static_key kvm_no_apic_vcpu; 94 95 static inline bool kvm_vcpu_has_lapic(struct kvm_vcpu *vcpu) 96 { 97 if (static_key_false(&kvm_no_apic_vcpu)) 98 return vcpu->arch.apic; 99 return true; 100 } 101 102 extern struct static_key_deferred apic_hw_disabled; 103 104 static inline int kvm_apic_hw_enabled(struct kvm_lapic *apic) 105 { 106 if (static_key_false(&apic_hw_disabled.key)) 107 return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE; 108 return MSR_IA32_APICBASE_ENABLE; 109 } 110 111 extern struct static_key_deferred apic_sw_disabled; 112 113 static inline int kvm_apic_sw_enabled(struct kvm_lapic *apic) 114 { 115 if (static_key_false(&apic_sw_disabled.key)) 116 return kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED; 117 return APIC_SPIV_APIC_ENABLED; 118 } 119 120 static inline bool kvm_apic_present(struct kvm_vcpu *vcpu) 121 { 122 return kvm_vcpu_has_lapic(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic); 123 } 124 125 static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu) 126 { 127 return kvm_apic_present(vcpu) && kvm_apic_sw_enabled(vcpu->arch.apic); 128 } 129 130 static inline int apic_x2apic_mode(struct kvm_lapic *apic) 131 { 132 return apic->vcpu->arch.apic_base & X2APIC_ENABLE; 133 } 134 135 static inline bool kvm_apic_vid_enabled(struct kvm *kvm) 136 { 137 return kvm_x86_ops->vm_has_apicv(kvm); 138 } 139 140 static inline u16 apic_cluster_id(struct kvm_apic_map *map, u32 ldr) 141 { 142 u16 cid; 143 ldr >>= 32 - map->ldr_bits; 144 cid = (ldr >> map->cid_shift) & map->cid_mask; 145 146 BUG_ON(cid >= ARRAY_SIZE(map->logical_map)); 147 148 return cid; 149 } 150 151 static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr) 152 { 153 ldr >>= (32 - map->ldr_bits); 154 return ldr & map->lid_mask; 155 } 156 157 void kvm_calculate_eoi_exitmap(struct kvm_vcpu *vcpu, 158 struct kvm_lapic_irq *irq, 159 u64 *eoi_bitmap); 160 161 #endif 162