Lines Matching refs:t

25 				     struct kvm_vcpu_timer *t)  in kvm_riscv_delta_cycles2ns()  argument
45 struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); in kvm_riscv_vcpu_hrtimer_expired() local
46 struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); in kvm_riscv_vcpu_hrtimer_expired()
49 if (kvm_riscv_current_cycles(gt) < t->next_cycles) { in kvm_riscv_vcpu_hrtimer_expired()
50 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); in kvm_riscv_vcpu_hrtimer_expired()
51 hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); in kvm_riscv_vcpu_hrtimer_expired()
55 t->next_set = false; in kvm_riscv_vcpu_hrtimer_expired()
61 static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t) in kvm_riscv_vcpu_timer_cancel() argument
63 if (!t->init_done || !t->next_set) in kvm_riscv_vcpu_timer_cancel()
66 hrtimer_cancel(&t->hrt); in kvm_riscv_vcpu_timer_cancel()
67 t->next_set = false; in kvm_riscv_vcpu_timer_cancel()
85 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_update_hrtimer() local
89 if (!t->init_done) in kvm_riscv_vcpu_update_hrtimer()
94 delta_ns = kvm_riscv_delta_cycles2ns(ncycles, gt, t); in kvm_riscv_vcpu_update_hrtimer()
95 t->next_cycles = ncycles; in kvm_riscv_vcpu_update_hrtimer()
96 hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); in kvm_riscv_vcpu_update_hrtimer()
97 t->next_set = true; in kvm_riscv_vcpu_update_hrtimer()
104 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_next_event() local
106 return t->timer_next_event(vcpu, ncycles); in kvm_riscv_vcpu_timer_next_event()
112 struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt); in kvm_riscv_vcpu_vstimer_expired() local
113 struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer); in kvm_riscv_vcpu_vstimer_expired()
116 if (kvm_riscv_current_cycles(gt) < t->next_cycles) { in kvm_riscv_vcpu_vstimer_expired()
117 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); in kvm_riscv_vcpu_vstimer_expired()
118 hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns)); in kvm_riscv_vcpu_vstimer_expired()
122 t->next_set = false; in kvm_riscv_vcpu_vstimer_expired()
130 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_pending() local
133 if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) || in kvm_riscv_vcpu_timer_pending()
142 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_blocking() local
146 if (!t->init_done) in kvm_riscv_vcpu_timer_blocking()
149 delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t); in kvm_riscv_vcpu_timer_blocking()
150 hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL); in kvm_riscv_vcpu_timer_blocking()
151 t->next_set = true; in kvm_riscv_vcpu_timer_blocking()
162 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_get_reg_timer() local
183 reg_val = t->next_cycles; in kvm_riscv_vcpu_get_reg_timer()
186 reg_val = (t->next_set) ? KVM_RISCV_TIMER_STATE_ON : in kvm_riscv_vcpu_get_reg_timer()
202 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_set_reg_timer() local
228 t->next_cycles = reg_val; in kvm_riscv_vcpu_set_reg_timer()
234 ret = kvm_riscv_vcpu_timer_cancel(t); in kvm_riscv_vcpu_set_reg_timer()
246 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_init() local
248 if (t->init_done) in kvm_riscv_vcpu_timer_init()
251 hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_riscv_vcpu_timer_init()
252 t->init_done = true; in kvm_riscv_vcpu_timer_init()
253 t->next_set = false; in kvm_riscv_vcpu_timer_init()
257 t->sstc_enabled = true; in kvm_riscv_vcpu_timer_init()
258 t->hrt.function = kvm_riscv_vcpu_vstimer_expired; in kvm_riscv_vcpu_timer_init()
259 t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp; in kvm_riscv_vcpu_timer_init()
261 t->sstc_enabled = false; in kvm_riscv_vcpu_timer_init()
262 t->hrt.function = kvm_riscv_vcpu_hrtimer_expired; in kvm_riscv_vcpu_timer_init()
263 t->timer_next_event = kvm_riscv_vcpu_update_hrtimer; in kvm_riscv_vcpu_timer_init()
281 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_reset() local
283 t->next_cycles = -1ULL; in kvm_riscv_vcpu_timer_reset()
301 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_restore() local
305 if (!t->sstc_enabled) in kvm_riscv_vcpu_timer_restore()
309 csr_write(CSR_VSTIMECMP, (u32)t->next_cycles); in kvm_riscv_vcpu_timer_restore()
310 csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32)); in kvm_riscv_vcpu_timer_restore()
312 csr_write(CSR_VSTIMECMP, t->next_cycles); in kvm_riscv_vcpu_timer_restore()
316 if (unlikely(!t->init_done)) in kvm_riscv_vcpu_timer_restore()
324 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_sync() local
326 if (!t->sstc_enabled) in kvm_riscv_vcpu_timer_sync()
330 t->next_cycles = csr_read(CSR_VSTIMECMP); in kvm_riscv_vcpu_timer_sync()
331 t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32; in kvm_riscv_vcpu_timer_sync()
333 t->next_cycles = csr_read(CSR_VSTIMECMP); in kvm_riscv_vcpu_timer_sync()
339 struct kvm_vcpu_timer *t = &vcpu->arch.timer; in kvm_riscv_vcpu_timer_save() local
341 if (!t->sstc_enabled) in kvm_riscv_vcpu_timer_save()
350 if (unlikely(!t->init_done)) in kvm_riscv_vcpu_timer_save()