xref: /openbmc/linux/arch/x86/math-emu/mul_Xsig.S (revision b78412b8)
1/*---------------------------------------------------------------------------+
2 |  mul_Xsig.S                                                               |
3 |                                                                           |
4 | Multiply a 12 byte fixed point number by another fixed point number.      |
5 |                                                                           |
6 | Copyright (C) 1992,1994,1995                                              |
7 |                       W. Metzenthen, 22 Parker St, Ormond, Vic 3163,      |
8 |                       Australia.  E-mail billm@jacobi.maths.monash.edu.au |
9 |                                                                           |
10 | Call from C as:                                                           |
11 |   void mul32_Xsig(Xsig *x, unsigned b)                                    |
12 |                                                                           |
13 |   void mul64_Xsig(Xsig *x, unsigned long long *b)                         |
14 |                                                                           |
15 |   void mul_Xsig_Xsig(Xsig *x, unsigned *b)                                |
16 |                                                                           |
17 | The result is neither rounded nor normalized, and the ls bit or so may    |
18 | be wrong.                                                                 |
19 |                                                                           |
20 +---------------------------------------------------------------------------*/
21	.file	"mul_Xsig.S"
22
23
24#include "fpu_emu.h"
25
26.text
27ENTRY(mul32_Xsig)
28	pushl %ebp
29	movl %esp,%ebp
30	subl $16,%esp
31	pushl %esi
32
33	movl PARAM1,%esi
34	movl PARAM2,%ecx
35
36	xor %eax,%eax
37	movl %eax,-4(%ebp)
38	movl %eax,-8(%ebp)
39
40	movl (%esi),%eax        /* lsl of Xsig */
41	mull %ecx		/* msl of b */
42	movl %edx,-12(%ebp)
43
44	movl 4(%esi),%eax	/* midl of Xsig */
45	mull %ecx		/* msl of b */
46	addl %eax,-12(%ebp)
47	adcl %edx,-8(%ebp)
48	adcl $0,-4(%ebp)
49
50	movl 8(%esi),%eax	/* msl of Xsig */
51	mull %ecx		/* msl of b */
52	addl %eax,-8(%ebp)
53	adcl %edx,-4(%ebp)
54
55	movl -12(%ebp),%eax
56	movl %eax,(%esi)
57	movl -8(%ebp),%eax
58	movl %eax,4(%esi)
59	movl -4(%ebp),%eax
60	movl %eax,8(%esi)
61
62	popl %esi
63	leave
64	ret
65ENDPROC(mul32_Xsig)
66
67
68ENTRY(mul64_Xsig)
69	pushl %ebp
70	movl %esp,%ebp
71	subl $16,%esp
72	pushl %esi
73
74	movl PARAM1,%esi
75	movl PARAM2,%ecx
76
77	xor %eax,%eax
78	movl %eax,-4(%ebp)
79	movl %eax,-8(%ebp)
80
81	movl (%esi),%eax        /* lsl of Xsig */
82	mull 4(%ecx)		/* msl of b */
83	movl %edx,-12(%ebp)
84
85	movl 4(%esi),%eax	/* midl of Xsig */
86	mull (%ecx)		/* lsl of b */
87	addl %edx,-12(%ebp)
88	adcl $0,-8(%ebp)
89	adcl $0,-4(%ebp)
90
91	movl 4(%esi),%eax	/* midl of Xsig */
92	mull 4(%ecx)		/* msl of b */
93	addl %eax,-12(%ebp)
94	adcl %edx,-8(%ebp)
95	adcl $0,-4(%ebp)
96
97	movl 8(%esi),%eax	/* msl of Xsig */
98	mull (%ecx)		/* lsl of b */
99	addl %eax,-12(%ebp)
100	adcl %edx,-8(%ebp)
101	adcl $0,-4(%ebp)
102
103	movl 8(%esi),%eax	/* msl of Xsig */
104	mull 4(%ecx)		/* msl of b */
105	addl %eax,-8(%ebp)
106	adcl %edx,-4(%ebp)
107
108	movl -12(%ebp),%eax
109	movl %eax,(%esi)
110	movl -8(%ebp),%eax
111	movl %eax,4(%esi)
112	movl -4(%ebp),%eax
113	movl %eax,8(%esi)
114
115	popl %esi
116	leave
117	ret
118ENDPROC(mul64_Xsig)
119
120
121
122ENTRY(mul_Xsig_Xsig)
123	pushl %ebp
124	movl %esp,%ebp
125	subl $16,%esp
126	pushl %esi
127
128	movl PARAM1,%esi
129	movl PARAM2,%ecx
130
131	xor %eax,%eax
132	movl %eax,-4(%ebp)
133	movl %eax,-8(%ebp)
134
135	movl (%esi),%eax        /* lsl of Xsig */
136	mull 8(%ecx)		/* msl of b */
137	movl %edx,-12(%ebp)
138
139	movl 4(%esi),%eax	/* midl of Xsig */
140	mull 4(%ecx)		/* midl of b */
141	addl %edx,-12(%ebp)
142	adcl $0,-8(%ebp)
143	adcl $0,-4(%ebp)
144
145	movl 8(%esi),%eax	/* msl of Xsig */
146	mull (%ecx)		/* lsl of b */
147	addl %edx,-12(%ebp)
148	adcl $0,-8(%ebp)
149	adcl $0,-4(%ebp)
150
151	movl 4(%esi),%eax	/* midl of Xsig */
152	mull 8(%ecx)		/* msl of b */
153	addl %eax,-12(%ebp)
154	adcl %edx,-8(%ebp)
155	adcl $0,-4(%ebp)
156
157	movl 8(%esi),%eax	/* msl of Xsig */
158	mull 4(%ecx)		/* midl of b */
159	addl %eax,-12(%ebp)
160	adcl %edx,-8(%ebp)
161	adcl $0,-4(%ebp)
162
163	movl 8(%esi),%eax	/* msl of Xsig */
164	mull 8(%ecx)		/* msl of b */
165	addl %eax,-8(%ebp)
166	adcl %edx,-4(%ebp)
167
168	movl -12(%ebp),%edx
169	movl %edx,(%esi)
170	movl -8(%ebp),%edx
171	movl %edx,4(%esi)
172	movl -4(%ebp),%edx
173	movl %edx,8(%esi)
174
175	popl %esi
176	leave
177	ret
178ENDPROC(mul_Xsig_Xsig)
179