xref: /openbmc/linux/arch/x86/xen/xen-asm.S (revision a1e58bbd)
1/*
2	Asm versions of Xen pv-ops, suitable for either direct use or inlining.
3	The inline versions are the same as the direct-use versions, with the
4	pre- and post-amble chopped off.
5
6	This code is encoded for size rather than absolute efficiency,
7	with a view to being able to inline as much as possible.
8
9	We only bother with direct forms (ie, vcpu in pda) of the operations
10	here; the indirect forms are better handled in C, since they're
11	generally too large to inline anyway.
12 */
13
14#include <linux/linkage.h>
15
16#include <asm/asm-offsets.h>
17#include <asm/thread_info.h>
18#include <asm/percpu.h>
19#include <asm/processor-flags.h>
20#include <asm/segment.h>
21
22#include <xen/interface/xen.h>
23
24#define RELOC(x, v)	.globl x##_reloc; x##_reloc=v
25#define ENDPATCH(x)	.globl x##_end; x##_end=.
26
27/* Pseudo-flag used for virtual NMI, which we don't implement yet */
28#define XEN_EFLAGS_NMI	0x80000000
29
30/*
31	Enable events.  This clears the event mask and tests the pending
32	event status with one and operation.  If there are pending
33	events, then enter the hypervisor to get them handled.
34 */
35ENTRY(xen_irq_enable_direct)
36	/* Unmask events */
37	movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
38
39	/* Preempt here doesn't matter because that will deal with
40	   any pending interrupts.  The pending check may end up being
41	   run on the wrong CPU, but that doesn't hurt. */
42
43	/* Test for pending */
44	testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
45	jz 1f
46
472:	call check_events
481:
49ENDPATCH(xen_irq_enable_direct)
50	ret
51	ENDPROC(xen_irq_enable_direct)
52	RELOC(xen_irq_enable_direct, 2b+1)
53
54
55/*
56	Disabling events is simply a matter of making the event mask
57	non-zero.
58 */
59ENTRY(xen_irq_disable_direct)
60	movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
61ENDPATCH(xen_irq_disable_direct)
62	ret
63	ENDPROC(xen_irq_disable_direct)
64	RELOC(xen_irq_disable_direct, 0)
65
66/*
67	(xen_)save_fl is used to get the current interrupt enable status.
68	Callers expect the status to be in X86_EFLAGS_IF, and other bits
69	may be set in the return value.  We take advantage of this by
70	making sure that X86_EFLAGS_IF has the right value (and other bits
71	in that byte are 0), but other bits in the return value are
72	undefined.  We need to toggle the state of the bit, because
73	Xen and x86 use opposite senses (mask vs enable).
74 */
75ENTRY(xen_save_fl_direct)
76	testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
77	setz %ah
78	addb %ah,%ah
79ENDPATCH(xen_save_fl_direct)
80	ret
81	ENDPROC(xen_save_fl_direct)
82	RELOC(xen_save_fl_direct, 0)
83
84
85/*
86	In principle the caller should be passing us a value return
87	from xen_save_fl_direct, but for robustness sake we test only
88	the X86_EFLAGS_IF flag rather than the whole byte. After
89	setting the interrupt mask state, it checks for unmasked
90	pending events and enters the hypervisor to get them delivered
91	if so.
92 */
93ENTRY(xen_restore_fl_direct)
94	testb $X86_EFLAGS_IF>>8, %ah
95	setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
96	/* Preempt here doesn't matter because that will deal with
97	   any pending interrupts.  The pending check may end up being
98	   run on the wrong CPU, but that doesn't hurt. */
99
100	/* check for unmasked and pending */
101	cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
102	jz 1f
1032:	call check_events
1041:
105ENDPATCH(xen_restore_fl_direct)
106	ret
107	ENDPROC(xen_restore_fl_direct)
108	RELOC(xen_restore_fl_direct, 2b+1)
109
110/*
111	This is run where a normal iret would be run, with the same stack setup:
112	      8: eflags
113	      4: cs
114	esp-> 0: eip
115
116	This attempts to make sure that any pending events are dealt
117	with on return to usermode, but there is a small window in
118	which an event can happen just before entering usermode.  If
119	the nested interrupt ends up setting one of the TIF_WORK_MASK
120	pending work flags, they will not be tested again before
121	returning to usermode. This means that a process can end up
122	with pending work, which will be unprocessed until the process
123	enters and leaves the kernel again, which could be an
124	unbounded amount of time.  This means that a pending signal or
125	reschedule event could be indefinitely delayed.
126
127	The fix is to notice a nested interrupt in the critical
128	window, and if one occurs, then fold the nested interrupt into
129	the current interrupt stack frame, and re-process it
130	iteratively rather than recursively.  This means that it will
131	exit via the normal path, and all pending work will be dealt
132	with appropriately.
133
134	Because the nested interrupt handler needs to deal with the
135	current stack state in whatever form its in, we keep things
136	simple by only using a single register which is pushed/popped
137	on the stack.
138
139	Non-direct iret could be done in the same way, but it would
140	require an annoying amount of code duplication.  We'll assume
141	that direct mode will be the common case once the hypervisor
142	support becomes commonplace.
143 */
144ENTRY(xen_iret_direct)
145	/* test eflags for special cases */
146	testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
147	jnz hyper_iret
148
149	push %eax
150	ESP_OFFSET=4	# bytes pushed onto stack
151
152	/* Store vcpu_info pointer for easy access.  Do it this
153	   way to avoid having to reload %fs */
154#ifdef CONFIG_SMP
155	GET_THREAD_INFO(%eax)
156	movl TI_cpu(%eax),%eax
157	movl __per_cpu_offset(,%eax,4),%eax
158	lea per_cpu__xen_vcpu_info(%eax),%eax
159#else
160	movl $per_cpu__xen_vcpu_info, %eax
161#endif
162
163	/* check IF state we're restoring */
164	testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
165
166	/* Maybe enable events.  Once this happens we could get a
167	   recursive event, so the critical region starts immediately
168	   afterwards.  However, if that happens we don't end up
169	   resuming the code, so we don't have to be worried about
170	   being preempted to another CPU. */
171	setz XEN_vcpu_info_mask(%eax)
172xen_iret_start_crit:
173
174	/* check for unmasked and pending */
175	cmpw $0x0001, XEN_vcpu_info_pending(%eax)
176
177	/* If there's something pending, mask events again so we
178	   can jump back into xen_hypervisor_callback */
179	sete XEN_vcpu_info_mask(%eax)
180
181	popl %eax
182
183	/* From this point on the registers are restored and the stack
184	   updated, so we don't need to worry about it if we're preempted */
185iret_restore_end:
186
187	/* Jump to hypervisor_callback after fixing up the stack.
188	   Events are masked, so jumping out of the critical
189	   region is OK. */
190	je xen_hypervisor_callback
191
192	iret
193xen_iret_end_crit:
194
195hyper_iret:
196	/* put this out of line since its very rarely used */
197	jmp hypercall_page + __HYPERVISOR_iret * 32
198
199	.globl xen_iret_start_crit, xen_iret_end_crit
200
201/*
202   This is called by xen_hypervisor_callback in entry.S when it sees
203   that the EIP at the time of interrupt was between xen_iret_start_crit
204   and xen_iret_end_crit.  We're passed the EIP in %eax so we can do
205   a more refined determination of what to do.
206
207   The stack format at this point is:
208	----------------
209	 ss		: (ss/esp may be present if we came from usermode)
210	 esp		:
211	 eflags		}  outer exception info
212	 cs		}
213	 eip		}
214	---------------- <- edi (copy dest)
215	 eax		:  outer eax if it hasn't been restored
216	----------------
217	 eflags		}  nested exception info
218	 cs		}   (no ss/esp because we're nested
219	 eip		}    from the same ring)
220	 orig_eax	}<- esi (copy src)
221	 - - - - - - - -
222	 fs		}
223	 es		}
224	 ds		}  SAVE_ALL state
225	 eax		}
226	  :		:
227	 ebx		}
228	----------------
229	 return addr	 <- esp
230	----------------
231
232   In order to deliver the nested exception properly, we need to shift
233   everything from the return addr up to the error code so it
234   sits just under the outer exception info.  This means that when we
235   handle the exception, we do it in the context of the outer exception
236   rather than starting a new one.
237
238   The only caveat is that if the outer eax hasn't been
239   restored yet (ie, it's still on stack), we need to insert
240   its value into the SAVE_ALL state before going on, since
241   it's usermode state which we eventually need to restore.
242 */
243ENTRY(xen_iret_crit_fixup)
244	/* offsets +4 for return address */
245
246	/*
247	   Paranoia: Make sure we're really coming from userspace.
248	   One could imagine a case where userspace jumps into the
249	   critical range address, but just before the CPU delivers a GP,
250	   it decides to deliver an interrupt instead.  Unlikely?
251	   Definitely.  Easy to avoid?  Yes.  The Intel documents
252	   explicitly say that the reported EIP for a bad jump is the
253	   jump instruction itself, not the destination, but some virtual
254	   environments get this wrong.
255	 */
256	movl PT_CS+4(%esp), %ecx
257	andl $SEGMENT_RPL_MASK, %ecx
258	cmpl $USER_RPL, %ecx
259	je 2f
260
261	lea PT_ORIG_EAX+4(%esp), %esi
262	lea PT_EFLAGS+4(%esp), %edi
263
264	/* If eip is before iret_restore_end then stack
265	   hasn't been restored yet. */
266	cmp $iret_restore_end, %eax
267	jae 1f
268
269	movl 0+4(%edi),%eax		/* copy EAX */
270	movl %eax, PT_EAX+4(%esp)
271
272	lea ESP_OFFSET(%edi),%edi	/* move dest up over saved regs */
273
274	/* set up the copy */
2751:	std
276	mov $(PT_EIP+4) / 4, %ecx	/* copy ret+saved regs up to orig_eax */
277	rep movsl
278	cld
279
280	lea 4(%edi),%esp		/* point esp to new frame */
2812:	ret
282
283
284/*
285	Force an event check by making a hypercall,
286	but preserve regs before making the call.
287 */
288check_events:
289	push %eax
290	push %ecx
291	push %edx
292	call force_evtchn_callback
293	pop %edx
294	pop %ecx
295	pop %eax
296	ret
297