xref: /openbmc/linux/arch/x86/math-emu/shr_Xsig.S (revision 8cb5d748)
1	.file	"shr_Xsig.S"
2/*---------------------------------------------------------------------------+
3 |  shr_Xsig.S                                                               |
4 |                                                                           |
5 | 12 byte right shift function                                              |
6 |                                                                           |
7 | Copyright (C) 1992,1994,1995                                              |
8 |                       W. Metzenthen, 22 Parker St, Ormond, Vic 3163,      |
9 |                       Australia.  E-mail billm@jacobi.maths.monash.edu.au |
10 |                                                                           |
11 | Call from C as:                                                           |
12 |   void shr_Xsig(Xsig *arg, unsigned nr)                                   |
13 |                                                                           |
14 |   Extended shift right function.                                          |
15 |   Fastest for small shifts.                                               |
16 |   Shifts the 12 byte quantity pointed to by the first arg (arg)           |
17 |   right by the number of bits specified by the second arg (nr).           |
18 |                                                                           |
19 +---------------------------------------------------------------------------*/
20
21#include "fpu_emu.h"
22
23.text
24ENTRY(shr_Xsig)
25	push	%ebp
26	movl	%esp,%ebp
27	pushl	%esi
28	movl	PARAM2,%ecx
29	movl	PARAM1,%esi
30	cmpl	$32,%ecx	/* shrd only works for 0..31 bits */
31	jnc	L_more_than_31
32
33/* less than 32 bits */
34	pushl	%ebx
35	movl	(%esi),%eax	/* lsl */
36	movl	4(%esi),%ebx	/* midl */
37	movl	8(%esi),%edx	/* msl */
38	shrd	%cl,%ebx,%eax
39	shrd	%cl,%edx,%ebx
40	shr	%cl,%edx
41	movl	%eax,(%esi)
42	movl	%ebx,4(%esi)
43	movl	%edx,8(%esi)
44	popl	%ebx
45	popl	%esi
46	leave
47	ret
48
49L_more_than_31:
50	cmpl	$64,%ecx
51	jnc	L_more_than_63
52
53	subb	$32,%cl
54	movl	4(%esi),%eax	/* midl */
55	movl	8(%esi),%edx	/* msl */
56	shrd	%cl,%edx,%eax
57	shr	%cl,%edx
58	movl	%eax,(%esi)
59	movl	%edx,4(%esi)
60	movl	$0,8(%esi)
61	popl	%esi
62	leave
63	ret
64
65L_more_than_63:
66	cmpl	$96,%ecx
67	jnc	L_more_than_95
68
69	subb	$64,%cl
70	movl	8(%esi),%eax	/* msl */
71	shr	%cl,%eax
72	xorl	%edx,%edx
73	movl	%eax,(%esi)
74	movl	%edx,4(%esi)
75	movl	%edx,8(%esi)
76	popl	%esi
77	leave
78	ret
79
80L_more_than_95:
81	xorl	%eax,%eax
82	movl	%eax,(%esi)
83	movl	%eax,4(%esi)
84	movl	%eax,8(%esi)
85	popl	%esi
86	leave
87	ret
88ENDPROC(shr_Xsig)
89