xref: /openbmc/linux/drivers/xen/time.c (revision ecb23dc6)
14ccefbe5SStefano Stabellini /*
24ccefbe5SStefano Stabellini  * Xen stolen ticks accounting.
34ccefbe5SStefano Stabellini  */
44ccefbe5SStefano Stabellini #include <linux/kernel.h>
54ccefbe5SStefano Stabellini #include <linux/kernel_stat.h>
64ccefbe5SStefano Stabellini #include <linux/math64.h>
74ccefbe5SStefano Stabellini #include <linux/gfp.h>
84ccefbe5SStefano Stabellini 
9ecb23dc6SJuergen Gross #include <asm/paravirt.h>
104ccefbe5SStefano Stabellini #include <asm/xen/hypervisor.h>
114ccefbe5SStefano Stabellini #include <asm/xen/hypercall.h>
124ccefbe5SStefano Stabellini 
134ccefbe5SStefano Stabellini #include <xen/events.h>
144ccefbe5SStefano Stabellini #include <xen/features.h>
154ccefbe5SStefano Stabellini #include <xen/interface/xen.h>
164ccefbe5SStefano Stabellini #include <xen/interface/vcpu.h>
174ccefbe5SStefano Stabellini #include <xen/xen-ops.h>
184ccefbe5SStefano Stabellini 
194ccefbe5SStefano Stabellini /* runstate info updated by Xen */
204ccefbe5SStefano Stabellini static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
214ccefbe5SStefano Stabellini 
224ccefbe5SStefano Stabellini /* return an consistent snapshot of 64-bit time/counter value */
234ccefbe5SStefano Stabellini static u64 get64(const u64 *p)
244ccefbe5SStefano Stabellini {
254ccefbe5SStefano Stabellini 	u64 ret;
264ccefbe5SStefano Stabellini 
274ccefbe5SStefano Stabellini 	if (BITS_PER_LONG < 64) {
284ccefbe5SStefano Stabellini 		u32 *p32 = (u32 *)p;
292dd887e3SStefano Stabellini 		u32 h, l, h2;
304ccefbe5SStefano Stabellini 
314ccefbe5SStefano Stabellini 		/*
324ccefbe5SStefano Stabellini 		 * Read high then low, and then make sure high is
334ccefbe5SStefano Stabellini 		 * still the same; this will only loop if low wraps
344ccefbe5SStefano Stabellini 		 * and carries into high.
354ccefbe5SStefano Stabellini 		 * XXX some clean way to make this endian-proof?
364ccefbe5SStefano Stabellini 		 */
374ccefbe5SStefano Stabellini 		do {
382dd887e3SStefano Stabellini 			h = READ_ONCE(p32[1]);
392dd887e3SStefano Stabellini 			l = READ_ONCE(p32[0]);
402dd887e3SStefano Stabellini 			h2 = READ_ONCE(p32[1]);
412dd887e3SStefano Stabellini 		} while(h2 != h);
424ccefbe5SStefano Stabellini 
434ccefbe5SStefano Stabellini 		ret = (((u64)h) << 32) | l;
444ccefbe5SStefano Stabellini 	} else
452dd887e3SStefano Stabellini 		ret = READ_ONCE(*p);
464ccefbe5SStefano Stabellini 
474ccefbe5SStefano Stabellini 	return ret;
484ccefbe5SStefano Stabellini }
494ccefbe5SStefano Stabellini 
504ccefbe5SStefano Stabellini /*
514ccefbe5SStefano Stabellini  * Runstate accounting
524ccefbe5SStefano Stabellini  */
534ccefbe5SStefano Stabellini void xen_get_runstate_snapshot(struct vcpu_runstate_info *res)
544ccefbe5SStefano Stabellini {
554ccefbe5SStefano Stabellini 	u64 state_time;
564ccefbe5SStefano Stabellini 	struct vcpu_runstate_info *state;
574ccefbe5SStefano Stabellini 
584ccefbe5SStefano Stabellini 	BUG_ON(preemptible());
594ccefbe5SStefano Stabellini 
604ccefbe5SStefano Stabellini 	state = this_cpu_ptr(&xen_runstate);
614ccefbe5SStefano Stabellini 
624ccefbe5SStefano Stabellini 	/*
634ccefbe5SStefano Stabellini 	 * The runstate info is always updated by the hypervisor on
644ccefbe5SStefano Stabellini 	 * the current CPU, so there's no need to use anything
654ccefbe5SStefano Stabellini 	 * stronger than a compiler barrier when fetching it.
664ccefbe5SStefano Stabellini 	 */
674ccefbe5SStefano Stabellini 	do {
684ccefbe5SStefano Stabellini 		state_time = get64(&state->state_entry_time);
692dd887e3SStefano Stabellini 		*res = READ_ONCE(*state);
704ccefbe5SStefano Stabellini 	} while (get64(&state->state_entry_time) != state_time);
714ccefbe5SStefano Stabellini }
724ccefbe5SStefano Stabellini 
734ccefbe5SStefano Stabellini /* return true when a vcpu could run but has no real cpu to run on */
744ccefbe5SStefano Stabellini bool xen_vcpu_stolen(int vcpu)
754ccefbe5SStefano Stabellini {
764ccefbe5SStefano Stabellini 	return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
774ccefbe5SStefano Stabellini }
784ccefbe5SStefano Stabellini 
79ecb23dc6SJuergen Gross static u64 xen_steal_clock(int cpu)
80ecb23dc6SJuergen Gross {
81ecb23dc6SJuergen Gross 	struct vcpu_runstate_info state;
82ecb23dc6SJuergen Gross 
83ecb23dc6SJuergen Gross 	BUG_ON(cpu != smp_processor_id());
84ecb23dc6SJuergen Gross 	xen_get_runstate_snapshot(&state);
85ecb23dc6SJuergen Gross 	return state.time[RUNSTATE_runnable] + state.time[RUNSTATE_offline];
86ecb23dc6SJuergen Gross }
87ecb23dc6SJuergen Gross 
884ccefbe5SStefano Stabellini void xen_setup_runstate_info(int cpu)
894ccefbe5SStefano Stabellini {
904ccefbe5SStefano Stabellini 	struct vcpu_register_runstate_memory_area area;
914ccefbe5SStefano Stabellini 
924ccefbe5SStefano Stabellini 	area.addr.v = &per_cpu(xen_runstate, cpu);
934ccefbe5SStefano Stabellini 
944ccefbe5SStefano Stabellini 	if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
954ccefbe5SStefano Stabellini 			       cpu, &area))
964ccefbe5SStefano Stabellini 		BUG();
974ccefbe5SStefano Stabellini }
984ccefbe5SStefano Stabellini 
99ecb23dc6SJuergen Gross void __init xen_time_setup_guest(void)
100ecb23dc6SJuergen Gross {
101ecb23dc6SJuergen Gross 	pv_time_ops.steal_clock = xen_steal_clock;
102ecb23dc6SJuergen Gross 
103ecb23dc6SJuergen Gross 	static_key_slow_inc(&paravirt_steal_enabled);
104ecb23dc6SJuergen Gross 	/*
105ecb23dc6SJuergen Gross 	 * We can't set paravirt_steal_rq_enabled as this would require the
106ecb23dc6SJuergen Gross 	 * capability to read another cpu's runstate info.
107ecb23dc6SJuergen Gross 	 */
108ecb23dc6SJuergen Gross }
109