Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 75585 | Differences between
and this patch

Collapse All | Expand All

(-)gcc-3.3.3-orig/gcc/config/arm/coff.h (-2 / +6 lines)
Lines 32-42 Link Here
32
#define TARGET_VERSION fputs (" (ARM/coff)", stderr)
32
#define TARGET_VERSION fputs (" (ARM/coff)", stderr)
33
33
34
#undef  TARGET_DEFAULT
34
#undef  TARGET_DEFAULT
35
#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32 | ARM_FLAG_APCS_FRAME)
35
#define TARGET_DEFAULT		\
36
	( ARM_FLAG_SOFT_FLOAT	\
37
	| ARM_FLAG_VFP		\
38
	| ARM_FLAG_APCS_32	\
39
	| ARM_FLAG_APCS_FRAME )
36
40
37
#ifndef MULTILIB_DEFAULTS
41
#ifndef MULTILIB_DEFAULTS
38
#define MULTILIB_DEFAULTS \
42
#define MULTILIB_DEFAULTS \
39
  { "marm", "mlittle-endian", "msoft-float", "mapcs-32", "mno-thumb-interwork" }
43
  { "marm", "mlittle-endian", "mapcs-32", "mno-thumb-interwork" }
40
#endif
44
#endif
41
45
42
/* This is COFF, but prefer stabs.  */
46
/* This is COFF, but prefer stabs.  */
(-)gcc-3.3.3-orig/gcc/config/arm/conix-elf.h (-1 / +4 lines)
Lines 29-35 Link Here
29
29
30
/* Default to using APCS-32 and software floating point.  */
30
/* Default to using APCS-32 and software floating point.  */
31
#undef  TARGET_DEFAULT
31
#undef  TARGET_DEFAULT
32
#define TARGET_DEFAULT	(ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32)
32
#define TARGET_DEFAULT		\
33
	( ARM_FLAG_SOFT_FLOAT	\
34
	| ARM_FLAG_VFP		\
35
	| ARM_FLAG_APCS_32 )
33
36
34
#ifndef CPP_APCS_PC_DEFAULT_SPEC
37
#ifndef CPP_APCS_PC_DEFAULT_SPEC
35
#define CPP_APCS_PC_DEFAULT_SPEC	"-D__APCS_32__"
38
#define CPP_APCS_PC_DEFAULT_SPEC	"-D__APCS_32__"
(-)gcc-3.3.3-orig/gcc/config/arm/elf.h (-3 / +9 lines)
Lines 46-52 Link Here
46
46
47
#ifndef SUBTARGET_ASM_FLOAT_SPEC
47
#ifndef SUBTARGET_ASM_FLOAT_SPEC
48
#define SUBTARGET_ASM_FLOAT_SPEC "\
48
#define SUBTARGET_ASM_FLOAT_SPEC "\
49
%{mapcs-float:-mfloat} %{msoft-float:-mno-fpu}"
49
%{mapcs-float:-mfloat} \
50
%{mhard-float:-mfpu=fpa} \
51
%{!mhard-float: %{msoft-float:-mfpu=softvfp} %{!msoft-float:-mfpu=softvfp}}"
50
#endif
52
#endif
51
53
52
#ifndef ASM_SPEC
54
#ifndef ASM_SPEC
Lines 106-117 Link Here
106
#endif
108
#endif
107
109
108
#ifndef TARGET_DEFAULT
110
#ifndef TARGET_DEFAULT
109
#define TARGET_DEFAULT (ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32 | ARM_FLAG_APCS_FRAME)
111
#define TARGET_DEFAULT		\
112
	( ARM_FLAG_SOFT_FLOAT	\
113
	| ARM_FLAG_VFP		\
114
	| ARM_FLAG_APCS_32	\
115
	| ARM_FLAG_APCS_FRAME )
110
#endif
116
#endif
111
117
112
#ifndef MULTILIB_DEFAULTS
118
#ifndef MULTILIB_DEFAULTS
113
#define MULTILIB_DEFAULTS \
119
#define MULTILIB_DEFAULTS \
114
  { "marm", "mlittle-endian", "msoft-float", "mapcs-32", "mno-thumb-interwork", "fno-leading-underscore" }
120
  { "marm", "mlittle-endian", "mapcs-32", "mno-thumb-interwork", "fno-leading-underscore" }
115
#endif
121
#endif
116
122
117
123
(-)gcc-3.3.3-orig/gcc/config/arm/ieee754-df.S (+1224 lines)
Line 0 Link Here
1
/* ieee754-df.S double-precision floating point support for ARM
2
3
   Copyright (C) 2003  Free Software Foundation, Inc.
4
   Contributed by Nicolas Pitre (nico@cam.org)
5
6
   This file is free software; you can redistribute it and/or modify it
7
   under the terms of the GNU General Public License as published by the
8
   Free Software Foundation; either version 2, or (at your option) any
9
   later version.
10
11
   In addition to the permissions in the GNU General Public License, the
12
   Free Software Foundation gives you unlimited permission to link the
13
   compiled version of this file into combinations with other programs,
14
   and to distribute those combinations without any restriction coming
15
   from the use of this file.  (The General Public License restrictions
16
   do apply in other respects; for example, they cover modification of
17
   the file, and distribution when not linked into a combine
18
   executable.)
19
20
   This file is distributed in the hope that it will be useful, but
21
   WITHOUT ANY WARRANTY; without even the implied warranty of
22
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23
   General Public License for more details.
24
25
   You should have received a copy of the GNU General Public License
26
   along with this program; see the file COPYING.  If not, write to
27
   the Free Software Foundation, 59 Temple Place - Suite 330,
28
   Boston, MA 02111-1307, USA.  */
29
30
/*
31
 * Notes: 
32
 * 
33
 * The goal of this code is to be as fast as possible.  This is
34
 * not meant to be easy to understand for the casual reader.
35
 * For slightly simpler code please see the single precision version
36
 * of this file.
37
 * 
38
 * Only the default rounding mode is intended for best performances.
39
 * Exceptions aren't supported yet, but that can be added quite easily
40
 * if necessary without impacting performances.
41
 */
42
43
44
@ For FPA, float words are always big-endian.
45
@ For VFP, floats words follow the memory system mode.
46
#if defined(__VFP_FP__) && !defined(__ARMEB__)
47
#define xl r0
48
#define xh r1
49
#define yl r2
50
#define yh r3
51
#else
52
#define xh r0
53
#define xl r1
54
#define yh r2
55
#define yl r3
56
#endif
57
58
59
#ifdef L_negdf2
60
61
ARM_FUNC_START negdf2
62
	@ flip sign bit
63
	eor	xh, xh, #0x80000000
64
	RET
65
66
	FUNC_END negdf2
67
68
#endif
69
70
#ifdef L_addsubdf3
71
72
ARM_FUNC_START subdf3
73
	@ flip sign bit of second arg
74
	eor	yh, yh, #0x80000000
75
#if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
76
	b	1f			@ Skip Thumb-code prologue
77
#endif
78
79
ARM_FUNC_START adddf3
80
81
1:	@ Compare both args, return zero if equal but the sign.
82
	teq	xl, yl
83
	eoreq	ip, xh, yh
84
	teqeq	ip, #0x80000000
85
	beq	LSYM(Lad_z)
86
87
	@ If first arg is 0 or -0, return second arg.
88
	@ If second arg is 0 or -0, return first arg.
89
	orrs	ip, xl, xh, lsl #1
90
	moveq	xl, yl
91
	moveq	xh, yh
92
	orrnes	ip, yl, yh, lsl #1
93
	RETc(eq)
94
95
	stmfd	sp!, {r4, r5, lr}
96
97
	@ Mask out exponents.
98
	mov	ip, #0x7f000000
99
	orr	ip, ip, #0x00f00000
100
	and	r4, xh, ip
101
	and	r5, yh, ip
102
103
	@ If either of them is 0x7ff, result will be INF or NAN
104
	teq	r4, ip
105
	teqne	r5, ip
106
	beq	LSYM(Lad_i)
107
108
	@ Compute exponent difference.  Make largest exponent in r4,
109
	@ corresponding arg in xh-xl, and positive exponent difference in r5.
110
	subs	r5, r5, r4
111
	rsblt	r5, r5, #0
112
	ble	1f
113
	add	r4, r4, r5
114
	eor	yl, xl, yl
115
	eor	yh, xh, yh
116
	eor	xl, yl, xl
117
	eor	xh, yh, xh
118
	eor	yl, xl, yl
119
	eor	yh, xh, yh
120
1:
121
122
	@ If exponent difference is too large, return largest argument
123
	@ already in xh-xl.  We need up to 54 bit to handle proper rounding
124
	@ of 0x1p54 - 1.1.
125
	cmp	r5, #(54 << 20)
126
	RETLDM	"r4, r5" hi
127
128
	@ Convert mantissa to signed integer.
129
	tst	xh, #0x80000000
130
	bic	xh, xh, ip, lsl #1
131
	orr	xh, xh, #0x00100000
132
	beq	1f
133
	rsbs	xl, xl, #0
134
	rsc	xh, xh, #0
135
1:
136
	tst	yh, #0x80000000
137
	bic	yh, yh, ip, lsl #1
138
	orr	yh, yh, #0x00100000
139
	beq	1f
140
	rsbs	yl, yl, #0
141
	rsc	yh, yh, #0
142
1:
143
	@ If exponent == difference, one or both args were denormalized.
144
	@ Since this is not common case, rescale them off line.
145
	teq	r4, r5
146
	beq	LSYM(Lad_d)
147
LSYM(Lad_x):
148
	@ Scale down second arg with exponent difference.
149
	@ Apply shift one bit left to first arg and the rest to second arg
150
	@ to simplify things later, but only if exponent does not become 0.
151
	mov	ip, #0
152
	movs	r5, r5, lsr #20
153
	beq	3f
154
	teq	r4, #(1 << 20)
155
	beq	1f
156
	movs	xl, xl, lsl #1
157
	adc	xh, ip, xh, lsl #1
158
	sub	r4, r4, #(1 << 20)
159
	subs	r5, r5, #1
160
	beq	3f
161
162
	@ Shift yh-yl right per r5, keep leftover bits into ip.
163
1:	rsbs	lr, r5, #32
164
	blt	2f
165
	mov	ip, yl, lsl lr
166
	mov	yl, yl, lsr r5
167
	orr	yl, yl, yh, lsl lr
168
	mov	yh, yh, asr r5
169
	b	3f
170
2:	sub	r5, r5, #32
171
	add	lr, lr, #32
172
	cmp	yl, #1
173
	adc	ip, ip, yh, lsl lr
174
	mov	yl, yh, asr r5
175
	mov	yh, yh, asr #32
176
3:
177
	@ the actual addition
178
	adds	xl, xl, yl
179
	adc	xh, xh, yh
180
181
	@ We now have a result in xh-xl-ip.
182
	@ Keep absolute value in xh-xl-ip, sign in r5.
183
	ands	r5, xh, #0x80000000
184
	bpl	LSYM(Lad_p)
185
	rsbs	ip, ip, #0
186
	rscs	xl, xl, #0
187
	rsc	xh, xh, #0
188
189
	@ Determine how to normalize the result.
190
LSYM(Lad_p):
191
	cmp	xh, #0x00100000
192
	bcc	LSYM(Lad_l)
193
	cmp	xh, #0x00200000
194
	bcc	LSYM(Lad_r0)
195
	cmp	xh, #0x00400000
196
	bcc	LSYM(Lad_r1)
197
198
	@ Result needs to be shifted right.
199
	movs	xh, xh, lsr #1
200
	movs	xl, xl, rrx
201
	movs	ip, ip, rrx
202
	orrcs	ip, ip, #1
203
	add	r4, r4, #(1 << 20)
204
LSYM(Lad_r1):
205
	movs	xh, xh, lsr #1
206
	movs	xl, xl, rrx
207
	movs	ip, ip, rrx
208
	orrcs	ip, ip, #1
209
	add	r4, r4, #(1 << 20)
210
211
	@ Our result is now properly aligned into xh-xl, remaining bits in ip.
212
	@ Round with MSB of ip. If halfway between two numbers, round towards
213
	@ LSB of xl = 0.
214
LSYM(Lad_r0):
215
	adds	xl, xl, ip, lsr #31
216
	adc	xh, xh, #0
217
	teq	ip, #0x80000000
218
	biceq	xl, xl, #1
219
220
	@ One extreme rounding case may add a new MSB.  Adjust exponent.
221
	@ That MSB will be cleared when exponent is merged below. 
222
	tst	xh, #0x00200000
223
	addne	r4, r4, #(1 << 20)
224
225
	@ Make sure we did not bust our exponent.
226
	adds	ip, r4, #(1 << 20)
227
	bmi	LSYM(Lad_o)
228
229
	@ Pack final result together.
230
LSYM(Lad_e):
231
	bic	xh, xh, #0x00300000
232
	orr	xh, xh, r4
233
	orr	xh, xh, r5
234
	RETLDM	"r4, r5"
235
236
LSYM(Lad_l):
237
	@ Result must be shifted left and exponent adjusted.
238
	@ No rounding necessary since ip will always be 0.
239
#if __ARM_ARCH__ < 5
240
241
	teq	xh, #0
242
	movne	r3, #-11
243
	moveq	r3, #21
244
	moveq	xh, xl
245
	moveq	xl, #0
246
	mov	r2, xh
247
	movs	ip, xh, lsr #16
248
	moveq	r2, r2, lsl #16
249
	addeq	r3, r3, #16
250
	tst	r2, #0xff000000
251
	moveq	r2, r2, lsl #8
252
	addeq	r3, r3, #8
253
	tst	r2, #0xf0000000
254
	moveq	r2, r2, lsl #4
255
	addeq	r3, r3, #4
256
	tst	r2, #0xc0000000
257
	moveq	r2, r2, lsl #2
258
	addeq	r3, r3, #2
259
	tst	r2, #0x80000000
260
	addeq	r3, r3, #1
261
262
#else
263
264
	teq	xh, #0
265
	moveq	xh, xl
266
	moveq	xl, #0
267
	clz	r3, xh
268
	addeq	r3, r3, #32
269
	sub	r3, r3, #11
270
271
#endif
272
273
	@ determine how to shift the value.
274
	subs	r2, r3, #32
275
	bge	2f
276
	adds	r2, r2, #12
277
	ble	1f
278
279
	@ shift value left 21 to 31 bits, or actually right 11 to 1 bits
280
	@ since a register switch happened above.
281
	add	ip, r2, #20
282
	rsb	r2, r2, #12
283
	mov	xl, xh, lsl ip
284
	mov	xh, xh, lsr r2
285
	b	3f
286
287
	@ actually shift value left 1 to 20 bits, which might also represent
288
	@ 32 to 52 bits if counting the register switch that happened earlier.
289
1:	add	r2, r2, #20
290
2:	rsble	ip, r2, #32
291
	mov	xh, xh, lsl r2
292
	orrle	xh, xh, xl, lsr ip
293
	movle	xl, xl, lsl r2
294
295
	@ adjust exponent accordingly.
296
3:	subs	r4, r4, r3, lsl #20
297
	bgt	LSYM(Lad_e)
298
299
	@ Exponent too small, denormalize result.
300
	@ Find out proper shift value.
301
	mvn	r4, r4, asr #20
302
	subs	r4, r4, #30
303
	bge	2f
304
	adds	r4, r4, #12
305
	bgt	1f
306
307
	@ shift result right of 1 to 20 bits, sign is in r5.
308
	add	r4, r4, #20
309
	rsb	r2, r4, #32
310
	mov	xl, xl, lsr r4
311
	orr	xl, xl, xh, lsl r2
312
	orr	xh, r5, xh, lsr r4
313
	RETLDM	"r4, r5"
314
315
	@ shift result right of 21 to 31 bits, or left 11 to 1 bits after
316
	@ a register switch from xh to xl.
317
1:	rsb	r4, r4, #12
318
	rsb	r2, r4, #32
319
	mov	xl, xl, lsr r2
320
	orr	xl, xl, xh, lsl r4
321
	mov	xh, r5
322
	RETLDM	"r4, r5"
323
324
	@ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
325
	@ from xh to xl.
326
2:	mov	xl, xh, lsr r4
327
	mov	xh, r5
328
	RETLDM	"r4, r5"
329
330
	@ Adjust exponents for denormalized arguments.
331
LSYM(Lad_d):
332
	teq	r4, #0
333
	eoreq	xh, xh, #0x00100000
334
	addeq	r4, r4, #(1 << 20)
335
	eor	yh, yh, #0x00100000
336
	subne	r5, r5, #(1 << 20)
337
	b	LSYM(Lad_x)
338
339
	@ Result is x - x = 0, unless x = INF or NAN.
340
LSYM(Lad_z):
341
	sub	ip, ip, #0x00100000	@ ip becomes 0x7ff00000
342
	and	r2, xh, ip
343
	teq	r2, ip
344
	orreq	xh, ip, #0x00080000
345
	movne	xh, #0
346
	mov	xl, #0
347
	RET
348
349
	@ Overflow: return INF.
350
LSYM(Lad_o):
351
	orr	xh, r5, #0x7f000000
352
	orr	xh, xh, #0x00f00000
353
	mov	xl, #0
354
	RETLDM	"r4, r5"
355
356
	@ At least one of x or y is INF/NAN.
357
	@   if xh-xl != INF/NAN: return yh-yl (which is INF/NAN)
358
	@   if yh-yl != INF/NAN: return xh-xl (which is INF/NAN)
359
	@   if either is NAN: return NAN
360
	@   if opposite sign: return NAN
361
	@   return xh-xl (which is INF or -INF)
362
LSYM(Lad_i):
363
	teq	r4, ip
364
	movne	xh, yh
365
	movne	xl, yl
366
	teqeq	r5, ip
367
	RETLDM	"r4, r5" ne
368
369
	orrs	r4, xl, xh, lsl #12
370
	orreqs	r4, yl, yh, lsl #12
371
	teqeq	xh, yh
372
	orrne	xh, r5, #0x00080000
373
	movne	xl, #0
374
	RETLDM	"r4, r5"
375
376
	FUNC_END subdf3
377
	FUNC_END adddf3
378
379
ARM_FUNC_START floatunsidf
380
	teq	r0, #0
381
	moveq	r1, #0
382
	RETc(eq)
383
	stmfd	sp!, {r4, r5, lr}
384
	mov	r4, #(0x400 << 20)	@ initial exponent
385
	add	r4, r4, #((52-1) << 20)
386
	mov	r5, #0			@ sign bit is 0
387
	mov	xl, r0
388
	mov	xh, #0
389
	b	LSYM(Lad_l)
390
391
	FUNC_END floatunsidf
392
393
ARM_FUNC_START floatsidf
394
	teq	r0, #0
395
	moveq	r1, #0
396
	RETc(eq)
397
	stmfd	sp!, {r4, r5, lr}
398
	mov	r4, #(0x400 << 20)	@ initial exponent
399
	add	r4, r4, #((52-1) << 20)
400
	ands	r5, r0, #0x80000000	@ sign bit in r5
401
	rsbmi	r0, r0, #0		@ absolute value
402
	mov	xl, r0
403
	mov	xh, #0
404
	b	LSYM(Lad_l)
405
406
	FUNC_END floatsidf
407
408
ARM_FUNC_START extendsfdf2
409
	movs	r2, r0, lsl #1
410
	beq	1f			@ value is 0.0 or -0.0
411
	mov	xh, r2, asr #3		@ stretch exponent
412
	mov	xh, xh, rrx		@ retrieve sign bit
413
	mov	xl, r2, lsl #28		@ retrieve remaining bits
414
	ands	r2, r2, #0xff000000	@ isolate exponent
415
	beq	2f			@ exponent was 0 but not mantissa
416
	teq	r2, #0xff000000		@ check if INF or NAN
417
	eorne	xh, xh, #0x38000000	@ fixup exponent otherwise.
418
	RET
419
420
1:	mov	xh, r0
421
	mov	xl, #0
422
	RET
423
424
2:	@ value was denormalized.  We can normalize it now.
425
	stmfd	sp!, {r4, r5, lr}
426
	mov	r4, #(0x380 << 20)	@ setup corresponding exponent
427
	add	r4, r4, #(1 << 20)
428
	and	r5, xh, #0x80000000	@ move sign bit in r5
429
	bic	xh, xh, #0x80000000
430
	b	LSYM(Lad_l)
431
432
	FUNC_END extendsfdf2
433
434
#endif /* L_addsubdf3 */
435
436
#ifdef L_muldivdf3
437
438
ARM_FUNC_START muldf3
439
440
	stmfd	sp!, {r4, r5, r6, lr}
441
442
	@ Mask out exponents.
443
	mov	ip, #0x7f000000
444
	orr	ip, ip, #0x00f00000
445
	and	r4, xh, ip
446
	and	r5, yh, ip
447
448
	@ Trap any INF/NAN.
449
	teq	r4, ip
450
	teqne	r5, ip
451
	beq	LSYM(Lml_s)
452
453
	@ Trap any multiplication by 0.
454
	orrs	r6, xl, xh, lsl #1
455
	orrnes	r6, yl, yh, lsl #1
456
	beq	LSYM(Lml_z)
457
458
	@ Shift exponents right one bit to make room for overflow bit.
459
	@ If either of them is 0, scale denormalized arguments off line.
460
	@ Then add both exponents together.
461
	movs	r4, r4, lsr #1
462
	teqne	r5, #0
463
	beq	LSYM(Lml_d)
464
LSYM(Lml_x):
465
	add	r4, r4, r5, asr #1
466
467
	@ Preserve final sign in r4 along with exponent for now.
468
	teq	xh, yh
469
	orrmi	r4, r4, #0x8000
470
471
	@ Convert mantissa to unsigned integer.
472
	bic	xh, xh, ip, lsl #1
473
	bic	yh, yh, ip, lsl #1
474
	orr	xh, xh, #0x00100000
475
	orr	yh, yh, #0x00100000
476
477
#if __ARM_ARCH__ < 4
478
479
	@ Well, no way to make it shorter without the umull instruction.
480
	@ We must perform that 53 x 53 bit multiplication by hand.
481
	stmfd	sp!, {r7, r8, r9, sl, fp}
482
	mov	r7, xl, lsr #16
483
	mov	r8, yl, lsr #16
484
	mov	r9, xh, lsr #16
485
	mov	sl, yh, lsr #16
486
	bic	xl, xl, r7, lsl #16
487
	bic	yl, yl, r8, lsl #16
488
	bic	xh, xh, r9, lsl #16
489
	bic	yh, yh, sl, lsl #16
490
	mul	ip, xl, yl
491
	mul	fp, xl, r8
492
	mov	lr, #0
493
	adds	ip, ip, fp, lsl #16
494
	adc	lr, lr, fp, lsr #16
495
	mul	fp, r7, yl
496
	adds	ip, ip, fp, lsl #16
497
	adc	lr, lr, fp, lsr #16
498
	mul	fp, xl, sl
499
	mov	r5, #0
500
	adds	lr, lr, fp, lsl #16
501
	adc	r5, r5, fp, lsr #16
502
	mul	fp, r7, yh
503
	adds	lr, lr, fp, lsl #16
504
	adc	r5, r5, fp, lsr #16
505
	mul	fp, xh, r8
506
	adds	lr, lr, fp, lsl #16
507
	adc	r5, r5, fp, lsr #16
508
	mul	fp, r9, yl
509
	adds	lr, lr, fp, lsl #16
510
	adc	r5, r5, fp, lsr #16
511
	mul	fp, xh, sl
512
	mul	r6, r9, sl
513
	adds	r5, r5, fp, lsl #16
514
	adc	r6, r6, fp, lsr #16
515
	mul	fp, r9, yh
516
	adds	r5, r5, fp, lsl #16
517
	adc	r6, r6, fp, lsr #16
518
	mul	fp, xl, yh
519
	adds	lr, lr, fp
520
	mul	fp, r7, sl
521
	adcs	r5, r5, fp
522
	mul	fp, xh, yl
523
	adc	r6, r6, #0
524
	adds	lr, lr, fp
525
	mul	fp, r9, r8
526
	adcs	r5, r5, fp
527
	mul	fp, r7, r8
528
	adc	r6, r6, #0
529
	adds	lr, lr, fp
530
	mul	fp, xh, yh
531
	adcs	r5, r5, fp
532
	adc	r6, r6, #0
533
	ldmfd	sp!, {r7, r8, r9, sl, fp}
534
535
#else
536
537
	@ Here is the actual multiplication: 53 bits * 53 bits -> 106 bits.
538
	umull	ip, lr, xl, yl
539
	mov	r5, #0
540
	umlal	lr, r5, xl, yh
541
	umlal	lr, r5, xh, yl
542
	mov	r6, #0
543
	umlal	r5, r6, xh, yh
544
545
#endif
546
547
	@ The LSBs in ip are only significant for the final rounding.
548
	@ Fold them into one bit of lr.
549
	teq	ip, #0
550
	orrne	lr, lr, #1
551
552
	@ Put final sign in xh.
553
	mov	xh, r4, lsl #16
554
	bic	r4, r4, #0x8000
555
556
	@ Adjust result if one extra MSB appeared (one of four times).
557
	tst	r6, #(1 << 9)
558
	beq	1f
559
	add	r4, r4, #(1 << 19)
560
	movs	r6, r6, lsr #1
561
	movs	r5, r5, rrx
562
	movs	lr, lr, rrx
563
	orrcs	lr, lr, #1
564
1:
565
	@ Scale back to 53 bits.
566
	@ xh contains sign bit already.
567
	orr	xh, xh, r6, lsl #12
568
	orr	xh, xh, r5, lsr #20
569
	mov	xl, r5, lsl #12
570
	orr	xl, xl, lr, lsr #20
571
572
	@ Apply exponent bias, check range for underflow.
573
	sub	r4, r4, #0x00f80000
574
	subs	r4, r4, #0x1f000000
575
	ble	LSYM(Lml_u)
576
577
	@ Round the result.
578
	movs	lr, lr, lsl #12
579
	bpl	1f
580
	adds	xl, xl, #1
581
	adc	xh, xh, #0
582
	teq	lr, #0x80000000
583
	biceq	xl, xl, #1
584
585
	@ Rounding may have produced an extra MSB here.
586
	@ The extra bit is cleared before merging the exponent below.
587
	tst	xh, #0x00200000
588
	addne	r4, r4, #(1 << 19)
589
1:
590
	@ Check exponent for overflow.
591
	adds	ip, r4, #(1 << 19)
592
	tst	ip, #(1 << 30)
593
	bne	LSYM(Lml_o)
594
595
	@ Add final exponent.
596
	bic	xh, xh, #0x00300000
597
	orr	xh, xh, r4, lsl #1
598
	RETLDM	"r4, r5, r6"
599
600
	@ Result is 0, but determine sign anyway.
601
LSYM(Lml_z):
602
	eor	xh, xh, yh
603
LSYM(Ldv_z):
604
	bic	xh, xh, #0x7fffffff
605
	mov	xl, #0
606
	RETLDM	"r4, r5, r6"
607
608
	@ Check if denormalized result is possible, otherwise return signed 0.
609
LSYM(Lml_u):
610
	cmn	r4, #(53 << 19)
611
	movle	xl, #0
612
	bicle	xh, xh, #0x7fffffff
613
	RETLDM	"r4, r5, r6" le
614
615
	@ Find out proper shift value.
616
LSYM(Lml_r):
617
	mvn	r4, r4, asr #19
618
	subs	r4, r4, #30
619
	bge	2f
620
	adds	r4, r4, #12
621
	bgt	1f
622
623
	@ shift result right of 1 to 20 bits, preserve sign bit, round, etc.
624
	add	r4, r4, #20
625
	rsb	r5, r4, #32
626
	mov	r3, xl, lsl r5
627
	mov	xl, xl, lsr r4
628
	orr	xl, xl, xh, lsl r5
629
	movs	xh, xh, lsl #1
630
	mov	xh, xh, lsr r4
631
	mov	xh, xh, rrx
632
	adds	xl, xl, r3, lsr #31
633
	adc	xh, xh, #0
634
	teq	lr, #0
635
	teqeq	r3, #0x80000000
636
	biceq	xl, xl, #1
637
	RETLDM	"r4, r5, r6"
638
639
	@ shift result right of 21 to 31 bits, or left 11 to 1 bits after
640
	@ a register switch from xh to xl. Then round.
641
1:	rsb	r4, r4, #12
642
	rsb	r5, r4, #32
643
	mov	r3, xl, lsl r4
644
	mov	xl, xl, lsr r5
645
	orr	xl, xl, xh, lsl r4
646
	bic	xh, xh, #0x7fffffff
647
	adds	xl, xl, r3, lsr #31
648
	adc	xh, xh, #0
649
	teq	lr, #0
650
	teqeq	r3, #0x80000000
651
	biceq	xl, xl, #1
652
	RETLDM	"r4, r5, r6"
653
654
	@ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
655
	@ from xh to xl.  Leftover bits are in r3-r6-lr for rounding.
656
2:	rsb	r5, r4, #32
657
	mov	r6, xl, lsl r5
658
	mov	r3, xl, lsr r4
659
	orr	r3, r3, xh, lsl r5
660
	mov	xl, xh, lsr r4
661
	bic	xh, xh, #0x7fffffff
662
	adds	xl, xl, r3, lsr #31
663
	adc	xh, xh, #0
664
	orrs	r6, r6, lr
665
	teqeq	r3, #0x80000000
666
	biceq	xl, xl, #1
667
	RETLDM	"r4, r5, r6"
668
669
	@ One or both arguments are denormalized.
670
	@ Scale them leftwards and preserve sign bit.
671
LSYM(Lml_d):
672
	mov	lr, #0
673
	teq	r4, #0
674
	bne	2f
675
	and	r6, xh, #0x80000000
676
1:	movs	xl, xl, lsl #1
677
	adc	xh, lr, xh, lsl #1
678
	tst	xh, #0x00100000
679
	subeq	r4, r4, #(1 << 19)
680
	beq	1b
681
	orr	xh, xh, r6
682
	teq	r5, #0
683
	bne	LSYM(Lml_x)
684
2:	and	r6, yh, #0x80000000
685
3:	movs	yl, yl, lsl #1
686
	adc	yh, lr, yh, lsl #1
687
	tst	yh, #0x00100000
688
	subeq	r5, r5, #(1 << 20)
689
	beq	3b
690
	orr	yh, yh, r6
691
	b	LSYM(Lml_x)
692
693
	@ One or both args are INF or NAN.
694
LSYM(Lml_s):
695
	orrs	r6, xl, xh, lsl #1
696
	orrnes	r6, yl, yh, lsl #1
697
	beq	LSYM(Lml_n)		@ 0 * INF or INF * 0 -> NAN
698
	teq	r4, ip
699
	bne	1f
700
	orrs	r6, xl, xh, lsl #12
701
	bne	LSYM(Lml_n)		@ NAN * <anything> -> NAN
702
1:	teq	r5, ip
703
	bne	LSYM(Lml_i)
704
	orrs	r6, yl, yh, lsl #12
705
	bne	LSYM(Lml_n)		@ <anything> * NAN -> NAN
706
707
	@ Result is INF, but we need to determine its sign.
708
LSYM(Lml_i):
709
	eor	xh, xh, yh
710
711
	@ Overflow: return INF (sign already in xh).
712
LSYM(Lml_o):
713
	and	xh, xh, #0x80000000
714
	orr	xh, xh, #0x7f000000
715
	orr	xh, xh, #0x00f00000
716
	mov	xl, #0
717
	RETLDM	"r4, r5, r6"
718
719
	@ Return NAN.
720
LSYM(Lml_n):
721
	mov	xh, #0x7f000000
722
	orr	xh, xh, #0x00f80000
723
	RETLDM	"r4, r5, r6"
724
725
	FUNC_END muldf3
726
727
ARM_FUNC_START divdf3
728
729
	stmfd	sp!, {r4, r5, r6, lr}
730
731
	@ Mask out exponents.
732
	mov	ip, #0x7f000000
733
	orr	ip, ip, #0x00f00000
734
	and	r4, xh, ip
735
	and	r5, yh, ip
736
737
	@ Trap any INF/NAN or zeroes.
738
	teq	r4, ip
739
	teqne	r5, ip
740
	orrnes	r6, xl, xh, lsl #1
741
	orrnes	r6, yl, yh, lsl #1
742
	beq	LSYM(Ldv_s)
743
744
	@ Shift exponents right one bit to make room for overflow bit.
745
	@ If either of them is 0, scale denormalized arguments off line.
746
	@ Then substract divisor exponent from dividend''s.
747
	movs	r4, r4, lsr #1
748
	teqne	r5, #0
749
	beq	LSYM(Ldv_d)
750
LSYM(Ldv_x):
751
	sub	r4, r4, r5, asr #1
752
753
	@ Preserve final sign into lr.
754
	eor	lr, xh, yh
755
756
	@ Convert mantissa to unsigned integer.
757
	@ Dividend -> r5-r6, divisor -> yh-yl.
758
	mov	r5, #0x10000000
759
	mov	yh, yh, lsl #12
760
	orr	yh, r5, yh, lsr #4
761
	orr	yh, yh, yl, lsr #24
762
	movs	yl, yl, lsl #8
763
	mov	xh, xh, lsl #12
764
	teqeq	yh, r5
765
	beq	LSYM(Ldv_1)
766
	orr	r5, r5, xh, lsr #4
767
	orr	r5, r5, xl, lsr #24
768
	mov	r6, xl, lsl #8
769
770
	@ Initialize xh with final sign bit.
771
	and	xh, lr, #0x80000000
772
773
	@ Ensure result will land to known bit position.
774
	cmp	r5, yh
775
	cmpeq	r6, yl
776
	bcs	1f
777
	sub	r4, r4, #(1 << 19)
778
	movs	yh, yh, lsr #1
779
	mov	yl, yl, rrx
780
1:
781
	@ Apply exponent bias, check range for over/underflow.
782
	add	r4, r4, #0x1f000000
783
	add	r4, r4, #0x00f80000
784
	cmn	r4, #(53 << 19)
785
	ble	LSYM(Ldv_z)
786
	cmp	r4, ip, lsr #1
787
	bge	LSYM(Lml_o)
788
789
	@ Perform first substraction to align result to a nibble.
790
	subs	r6, r6, yl
791
	sbc	r5, r5, yh
792
	movs	yh, yh, lsr #1
793
	mov	yl, yl, rrx
794
	mov	xl, #0x00100000
795
	mov	ip, #0x00080000
796
797
	@ The actual division loop.
798
1:	subs	lr, r6, yl
799
	sbcs	lr, r5, yh
800
	subcs	r6, r6, yl
801
	movcs	r5, lr
802
	orrcs	xl, xl, ip
803
	movs	yh, yh, lsr #1
804
	mov	yl, yl, rrx
805
	subs	lr, r6, yl
806
	sbcs	lr, r5, yh
807
	subcs	r6, r6, yl
808
	movcs	r5, lr
809
	orrcs	xl, xl, ip, lsr #1
810
	movs	yh, yh, lsr #1
811
	mov	yl, yl, rrx
812
	subs	lr, r6, yl
813
	sbcs	lr, r5, yh
814
	subcs	r6, r6, yl
815
	movcs	r5, lr
816
	orrcs	xl, xl, ip, lsr #2
817
	movs	yh, yh, lsr #1
818
	mov	yl, yl, rrx
819
	subs	lr, r6, yl
820
	sbcs	lr, r5, yh
821
	subcs	r6, r6, yl
822
	movcs	r5, lr
823
	orrcs	xl, xl, ip, lsr #3
824
825
	orrs	lr, r5, r6
826
	beq	2f
827
	mov	r5, r5, lsl #4
828
	orr	r5, r5, r6, lsr #28
829
	mov	r6, r6, lsl #4
830
	mov	yh, yh, lsl #3
831
	orr	yh, yh, yl, lsr #29
832
	mov	yl, yl, lsl #3
833
	movs	ip, ip, lsr #4
834
	bne	1b
835
836
	@ We are done with a word of the result.
837
	@ Loop again for the low word if this pass was for the high word.
838
	tst	xh, #0x00100000
839
	bne	3f
840
	orr	xh, xh, xl
841
	mov	xl, #0
842
	mov	ip, #0x80000000
843
	b	1b
844
2:
845
	@ Be sure result starts in the high word.
846
	tst	xh, #0x00100000
847
	orreq	xh, xh, xl
848
	moveq	xl, #0
849
3:
850
	@ Check if denormalized result is needed.
851
	cmp	r4, #0
852
	ble	LSYM(Ldv_u)
853
854
	@ Apply proper rounding.
855
	subs	ip, r5, yh
856
	subeqs	ip, r6, yl
857
	adcs	xl, xl, #0
858
	adc	xh, xh, #0
859
	teq	ip, #0
860
	biceq	xl, xl, #1
861
862
	@ Add exponent to result.
863
	bic	xh, xh, #0x00100000
864
	orr	xh, xh, r4, lsl #1
865
	RETLDM	"r4, r5, r6"
866
867
	@ Division by 0x1p*: shortcut a lot of code.
868
LSYM(Ldv_1):
869
	and	lr, lr, #0x80000000
870
	orr	xh, lr, xh, lsr #12
871
	add	r4, r4, #0x1f000000
872
	add	r4, r4, #0x00f80000
873
	cmp	r4, ip, lsr #1
874
	bge	LSYM(Lml_o)
875
	cmp	r4, #0
876
	orrgt	xh, xh, r4, lsl #1
877
	RETLDM	"r4, r5, r6" gt
878
879
	cmn	r4, #(53 << 19)
880
	ble	LSYM(Ldv_z)
881
	orr	xh, xh, #0x00100000
882
	mov	lr, #0
883
	b	LSYM(Lml_r)
884
885
	@ Result must be denormalized: put remainder in lr for
886
	@ rounding considerations.
887
LSYM(Ldv_u):
888
	orr	lr, r5, r6
889
	b	LSYM(Lml_r)
890
891
	@ One or both arguments are denormalized.
892
	@ Scale them leftwards and preserve sign bit.
893
LSYM(Ldv_d):
894
	mov	lr, #0
895
	teq	r4, #0
896
	bne	2f
897
	and	r6, xh, #0x80000000
898
1:	movs	xl, xl, lsl #1
899
	adc	xh, lr, xh, lsl #1
900
	tst	xh, #0x00100000
901
	subeq	r4, r4, #(1 << 19)
902
	beq	1b
903
	orr	xh, xh, r6
904
	teq	r5, #0
905
	bne	LSYM(Ldv_x)
906
2:	and	r6, yh, #0x80000000
907
3:	movs	yl, yl, lsl #1
908
	adc	yh, lr, yh, lsl #1
909
	tst	yh, #0x00100000
910
	subeq	r5, r5, #(1 << 20)
911
	beq	3b
912
	orr	yh, yh, r6
913
	b	LSYM(Ldv_x)
914
915
	@ One or both arguments is either INF, NAN or zero.
916
LSYM(Ldv_s):
917
	teq	r4, ip
918
	teqeq	r5, ip
919
	beq	LSYM(Lml_n)		@ INF/NAN / INF/NAN -> NAN
920
	teq	r4, ip
921
	bne	1f
922
	orrs	r4, xl, xh, lsl #12
923
	bne	LSYM(Lml_n)		@ NAN / <anything> -> NAN
924
	b	LSYM(Lml_i)		@ INF / <anything> -> INF
925
1:	teq	r5, ip
926
	bne	2f
927
	orrs	r5, yl, yh, lsl #12
928
	bne	LSYM(Lml_n)		@ <anything> / NAN -> NAN
929
	b	LSYM(Lml_z)		@ <anything> / INF -> 0
930
2:	@ One or both arguments are 0.
931
	orrs	r4, xl, xh, lsl #1
932
	bne	LSYM(Lml_i)		@ <non_zero> / 0 -> INF
933
	orrs	r5, yl, yh, lsl #1
934
	bne	LSYM(Lml_z)		@ 0 / <non_zero> -> 0
935
	b	LSYM(Lml_n)		@ 0 / 0 -> NAN
936
937
	FUNC_END divdf3
938
939
#endif /* L_muldivdf3 */
940
941
#ifdef L_cmpdf2
942
943
FUNC_START gedf2
944
ARM_FUNC_START gtdf2
945
	mov	ip, #-1
946
	b	1f
947
948
FUNC_START ledf2
949
ARM_FUNC_START ltdf2
950
	mov	ip, #1
951
	b	1f
952
953
FUNC_START nedf2
954
FUNC_START eqdf2
955
ARM_FUNC_START cmpdf2
956
	mov	ip, #1			@ how should we specify unordered here?
957
958
1:	stmfd	sp!, {r4, r5, lr}
959
960
	@ Trap any INF/NAN first.
961
	mov	lr, #0x7f000000
962
	orr	lr, lr, #0x00f00000
963
	and	r4, xh, lr
964
	and	r5, yh, lr
965
	teq	r4, lr
966
	teqne	r5, lr
967
	beq	3f
968
969
	@ Test for equality.
970
	@ Note that 0.0 is equal to -0.0.
971
2:	orrs	ip, xl, xh, lsl #1	@ if x == 0.0 or -0.0
972
	orreqs	ip, yl, yh, lsl #1	@ and y == 0.0 or -0.0
973
	teqne	xh, yh			@ or xh == yh
974
	teqeq	xl, yl			@ and xl == yl
975
	moveq	r0, #0			@ then equal.
976
	RETLDM	"r4, r5" eq
977
978
	@ Check for sign difference.
979
	teq	xh, yh
980
	movmi	r0, xh, asr #31
981
	orrmi	r0, r0, #1
982
	RETLDM	"r4, r5" mi
983
984
	@ Compare exponents.
985
	cmp	r4, r5
986
987
	@ Compare mantissa if exponents are equal.
988
	moveq	xh, xh, lsl #12
989
	cmpeq	xh, yh, lsl #12
990
	cmpeq	xl, yl
991
	movcs	r0, yh, asr #31
992
	mvncc	r0, yh, asr #31
993
	orr	r0, r0, #1
994
	RETLDM	"r4, r5"
995
996
	@ Look for a NAN.
997
3:	teq	r4, lr
998
	bne	4f
999
	orrs	xl, xl, xh, lsl #12
1000
	bne	5f			@ x is NAN
1001
4:	teq	r5, lr
1002
	bne	2b
1003
	orrs	yl, yl, yh, lsl #12
1004
	beq	2b			@ y is not NAN
1005
5:	mov	r0, ip			@ return unordered code from ip
1006
	RETLDM	"r4, r5"
1007
1008
	FUNC_END gedf2
1009
	FUNC_END gtdf2
1010
	FUNC_END ledf2
1011
	FUNC_END ltdf2
1012
	FUNC_END nedf2
1013
	FUNC_END eqdf2
1014
	FUNC_END cmpdf2
1015
1016
#endif /* L_cmpdf2 */
1017
1018
#ifdef L_unorddf2
1019
1020
ARM_FUNC_START unorddf2
1021
	str	lr, [sp, #-4]!
1022
	mov	ip, #0x7f000000
1023
	orr	ip, ip, #0x00f00000
1024
	and	lr, xh, ip
1025
	teq	lr, ip
1026
	bne	1f
1027
	orrs	xl, xl, xh, lsl #12
1028
	bne	3f			@ x is NAN
1029
1:	and	lr, yh, ip
1030
	teq	lr, ip
1031
	bne	2f
1032
	orrs	yl, yl, yh, lsl #12
1033
	bne	3f			@ y is NAN
1034
2:	mov	r0, #0			@ arguments are ordered.
1035
	RETLDM
1036
1037
3:	mov	r0, #1			@ arguments are unordered.
1038
	RETLDM
1039
1040
	FUNC_END unorddf2
1041
1042
#endif /* L_unorddf2 */
1043
1044
#ifdef L_fixdfsi
1045
1046
ARM_FUNC_START fixdfsi
1047
	orrs	ip, xl, xh, lsl #1
1048
	beq	1f			@ value is 0.
1049
1050
	mov	r3, r3, rrx		@ preserve C flag (the actual sign)
1051
1052
	@ check exponent range.
1053
	mov	ip, #0x7f000000
1054
	orr	ip, ip, #0x00f00000
1055
	and	r2, xh, ip
1056
	teq	r2, ip
1057
	beq	2f			@ value is INF or NAN
1058
	bic	ip, ip, #0x40000000
1059
	cmp	r2, ip
1060
	bcc	1f			@ value is too small
1061
	add	ip, ip, #(31 << 20)
1062
	cmp	r2, ip
1063
	bcs	3f			@ value is too large
1064
1065
	rsb	r2, r2, ip
1066
	mov	ip, xh, lsl #11
1067
	orr	ip, ip, #0x80000000
1068
	orr	ip, ip, xl, lsr #21
1069
	mov	r2, r2, lsr #20
1070
	tst	r3, #0x80000000		@ the sign bit
1071
	mov	r0, ip, lsr r2
1072
	rsbne	r0, r0, #0
1073
	RET
1074
1075
1:	mov	r0, #0
1076
	RET
1077
1078
2:	orrs	xl, xl, xh, lsl #12
1079
	bne	4f			@ r0 is NAN.
1080
3:	ands	r0, r3, #0x80000000	@ the sign bit
1081
	moveq	r0, #0x7fffffff		@ maximum signed positive si
1082
	RET
1083
1084
4:	mov	r0, #0			@ How should we convert NAN?
1085
	RET
1086
1087
	FUNC_END fixdfsi
1088
1089
#endif /* L_fixdfsi */
1090
1091
#ifdef L_fixunsdfsi
1092
1093
ARM_FUNC_START fixunsdfsi
1094
	orrs	ip, xl, xh, lsl #1
1095
	movcss	r0, #0			@ value is negative
1096
	RETc(eq)			@ or 0 (xl, xh overlap r0)
1097
1098
	@ check exponent range.
1099
	mov	ip, #0x7f000000
1100
	orr	ip, ip, #0x00f00000
1101
	and	r2, xh, ip
1102
	teq	r2, ip
1103
	beq	2f			@ value is INF or NAN
1104
	bic	ip, ip, #0x40000000
1105
	cmp	r2, ip
1106
	bcc	1f			@ value is too small
1107
	add	ip, ip, #(31 << 20)
1108
	cmp	r2, ip
1109
	bhi	3f			@ value is too large
1110
1111
	rsb	r2, r2, ip
1112
	mov	ip, xh, lsl #11
1113
	orr	ip, ip, #0x80000000
1114
	orr	ip, ip, xl, lsr #21
1115
	mov	r2, r2, lsr #20
1116
	mov	r0, ip, lsr r2
1117
	RET
1118
1119
1:	mov	r0, #0
1120
	RET
1121
1122
2:	orrs	xl, xl, xh, lsl #12
1123
	bne	4f			@ value is NAN.
1124
3:	mov	r0, #0xffffffff		@ maximum unsigned si
1125
	RET
1126
1127
4:	mov	r0, #0			@ How should we convert NAN?
1128
	RET
1129
1130
	FUNC_END fixunsdfsi
1131
1132
#endif /* L_fixunsdfsi */
1133
1134
#ifdef L_truncdfsf2
1135
1136
ARM_FUNC_START truncdfsf2
1137
	orrs	r2, xl, xh, lsl #1
1138
	moveq	r0, r2, rrx
1139
	RETc(eq)			@ value is 0.0 or -0.0
1140
	
1141
	@ check exponent range.
1142
	mov	ip, #0x7f000000
1143
	orr	ip, ip, #0x00f00000
1144
	and	r2, ip, xh
1145
	teq	r2, ip
1146
	beq	2f			@ value is INF or NAN
1147
	bic	xh, xh, ip
1148
	cmp	r2, #(0x380 << 20)
1149
	bls	4f			@ value is too small
1150
1151
	@ shift and round mantissa
1152
1:	movs	r3, xl, lsr #29
1153
	adc	r3, r3, xh, lsl #3
1154
1155
	@ if halfway between two numbers, round towards LSB = 0.
1156
	mov	xl, xl, lsl #3
1157
	teq	xl, #0x80000000
1158
	biceq	r3, r3, #1
1159
1160
	@ rounding might have created an extra MSB.  If so adjust exponent.
1161
	tst	r3, #0x00800000
1162
	addne	r2, r2, #(1 << 20)
1163
	bicne	r3, r3, #0x00800000
1164
1165
	@ check exponent for overflow
1166
	mov	ip, #(0x400 << 20)
1167
	orr	ip, ip, #(0x07f << 20)
1168
	cmp	r2, ip
1169
	bcs	3f			@ overflow
1170
1171
	@ adjust exponent, merge with sign bit and mantissa.
1172
	movs	xh, xh, lsl #1
1173
	mov	r2, r2, lsl #4
1174
	orr	r0, r3, r2, rrx
1175
	eor	r0, r0, #0x40000000
1176
	RET
1177
1178
2:	@ chech for NAN
1179
	orrs	xl, xl, xh, lsl #12
1180
	movne	r0, #0x7f000000
1181
	orrne	r0, r0, #0x00c00000
1182
	RETc(ne)			@ return NAN
1183
1184
3:	@ return INF with sign
1185
	and	r0, xh, #0x80000000
1186
	orr	r0, r0, #0x7f000000
1187
	orr	r0, r0, #0x00800000
1188
	RET
1189
1190
4:	@ check if denormalized value is possible
1191
	subs	r2, r2, #((0x380 - 24) << 20)
1192
	andle	r0, xh, #0x80000000	@ too small, return signed 0.
1193
	RETc(le)
1194
	
1195
	@ denormalize value so we can resume with the code above afterwards.
1196
	orr	xh, xh, #0x00100000
1197
	mov	r2, r2, lsr #20
1198
	rsb	r2, r2, #25
1199
	cmp	r2, #20
1200
	bgt	6f
1201
1202
	rsb	ip, r2, #32
1203
	mov	r3, xl, lsl ip
1204
	mov	xl, xl, lsr r2
1205
	orr	xl, xl, xh, lsl ip
1206
	movs	xh, xh, lsl #1
1207
	mov	xh, xh, lsr r2
1208
	mov	xh, xh, rrx
1209
5:	teq	r3, #0			@ fold r3 bits into the LSB
1210
	orrne	xl, xl, #1		@ for rounding considerations. 
1211
	mov	r2, #(0x380 << 20)	@ equivalent to the 0 float exponent
1212
	b	1b
1213
1214
6:	rsb	r2, r2, #(12 + 20)
1215
	rsb	ip, r2, #32
1216
	mov	r3, xl, lsl r2
1217
	mov	xl, xl, lsr ip
1218
	orr	xl, xl, xh, lsl r2
1219
	and	xh, xh, #0x80000000
1220
	b	5b
1221
1222
	FUNC_END truncdfsf2
1223
1224
#endif /* L_truncdfsf2 */
(-)gcc-3.3.3-orig/gcc/config/arm/ieee754-sf.S (+815 lines)
Line 0 Link Here
1
/* ieee754-sf.S single-precision floating point support for ARM
2
3
   Copyright (C) 2003  Free Software Foundation, Inc.
4
   Contributed by Nicolas Pitre (nico@cam.org)
5
6
   This file is free software; you can redistribute it and/or modify it
7
   under the terms of the GNU General Public License as published by the
8
   Free Software Foundation; either version 2, or (at your option) any
9
   later version.
10
11
   In addition to the permissions in the GNU General Public License, the
12
   Free Software Foundation gives you unlimited permission to link the
13
   compiled version of this file into combinations with other programs,
14
   and to distribute those combinations without any restriction coming
15
   from the use of this file.  (The General Public License restrictions
16
   do apply in other respects; for example, they cover modification of
17
   the file, and distribution when not linked into a combine
18
   executable.)
19
20
   This file is distributed in the hope that it will be useful, but
21
   WITHOUT ANY WARRANTY; without even the implied warranty of
22
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
23
   General Public License for more details.
24
25
   You should have received a copy of the GNU General Public License
26
   along with this program; see the file COPYING.  If not, write to
27
   the Free Software Foundation, 59 Temple Place - Suite 330,
28
   Boston, MA 02111-1307, USA.  */
29
30
/*
31
 * Notes:
32
 *
33
 * The goal of this code is to be as fast as possible.  This is
34
 * not meant to be easy to understand for the casual reader.
35
 *
36
 * Only the default rounding mode is intended for best performances.
37
 * Exceptions aren't supported yet, but that can be added quite easily
38
 * if necessary without impacting performances.
39
 */
40
41
#ifdef L_negsf2
42
	
43
ARM_FUNC_START negsf2
44
	eor	r0, r0, #0x80000000	@ flip sign bit
45
	RET
46
47
	FUNC_END negsf2
48
49
#endif
50
51
#ifdef L_addsubsf3
52
53
ARM_FUNC_START subsf3
54
	eor	r1, r1, #0x80000000	@ flip sign bit of second arg
55
#if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
56
	b	1f			@ Skip Thumb-code prologue
57
#endif
58
59
ARM_FUNC_START addsf3
60
61
1:	@ Compare both args, return zero if equal but the sign.
62
	eor	r2, r0, r1
63
	teq	r2, #0x80000000
64
	beq	LSYM(Lad_z)
65
66
	@ If first arg is 0 or -0, return second arg.
67
	@ If second arg is 0 or -0, return first arg.
68
	bics	r2, r0, #0x80000000
69
	moveq	r0, r1
70
	bicnes	r2, r1, #0x80000000
71
	RETc(eq)
72
73
	@ Mask out exponents.
74
	mov	ip, #0xff000000
75
	and	r2, r0, ip, lsr #1
76
	and	r3, r1, ip, lsr #1
77
78
	@ If either of them is 255, result will be INF or NAN
79
	teq	r2, ip, lsr #1
80
	teqne	r3, ip, lsr #1
81
	beq	LSYM(Lad_i)
82
83
	@ Compute exponent difference.  Make largest exponent in r2,
84
	@ corresponding arg in r0, and positive exponent difference in r3.
85
	subs	r3, r3, r2
86
	addgt	r2, r2, r3
87
	eorgt	r1, r0, r1
88
	eorgt	r0, r1, r0
89
	eorgt	r1, r0, r1
90
	rsblt	r3, r3, #0
91
92
	@ If exponent difference is too large, return largest argument
93
	@ already in r0.  We need up to 25 bit to handle proper rounding
94
	@ of 0x1p25 - 1.1.
95
	cmp	r3, #(25 << 23)
96
	RETc(hi)
97
98
	@ Convert mantissa to signed integer.
99
	tst	r0, #0x80000000
100
	orr	r0, r0, #0x00800000
101
	bic	r0, r0, #0xff000000
102
	rsbne	r0, r0, #0
103
	tst	r1, #0x80000000
104
	orr	r1, r1, #0x00800000
105
	bic	r1, r1, #0xff000000
106
	rsbne	r1, r1, #0
107
108
	@ If exponent == difference, one or both args were denormalized.
109
	@ Since this is not common case, rescale them off line.
110
	teq	r2, r3
111
	beq	LSYM(Lad_d)
112
LSYM(Lad_x):
113
114
	@ Scale down second arg with exponent difference.
115
	@ Apply shift one bit left to first arg and the rest to second arg
116
	@ to simplify things later, but only if exponent does not become 0.
117
	movs	r3, r3, lsr #23
118
	teqne	r2, #(1 << 23)
119
	movne	r0, r0, lsl #1
120
	subne	r2, r2, #(1 << 23)
121
	subne	r3, r3, #1
122
123
	@ Shift second arg into ip, keep leftover bits into r1.
124
	mov	ip, r1, asr r3
125
	rsb	r3, r3, #32
126
	mov	r1, r1, lsl r3
127
128
	add	r0, r0, ip		@ the actual addition
129
130
	@ We now have a 64 bit result in r0-r1.
131
	@ Keep absolute value in r0-r1, sign in r3.
132
	ands	r3, r0, #0x80000000
133
	bpl	LSYM(Lad_p)
134
	rsbs	r1, r1, #0
135
	rsc	r0, r0, #0
136
137
	@ Determine how to normalize the result.
138
LSYM(Lad_p):
139
	cmp	r0, #0x00800000
140
	bcc	LSYM(Lad_l)
141
	cmp	r0, #0x01000000
142
	bcc	LSYM(Lad_r0)
143
	cmp	r0, #0x02000000
144
	bcc	LSYM(Lad_r1)
145
146
	@ Result needs to be shifted right.
147
	movs	r0, r0, lsr #1
148
	mov	r1, r1, rrx
149
	add	r2, r2, #(1 << 23)
150
LSYM(Lad_r1):
151
	movs	r0, r0, lsr #1
152
	mov	r1, r1, rrx
153
	add	r2, r2, #(1 << 23)
154
155
	@ Our result is now properly aligned into r0, remaining bits in r1.
156
	@ Round with MSB of r1. If halfway between two numbers, round towards
157
	@ LSB of r0 = 0. 
158
LSYM(Lad_r0):
159
	add	r0, r0, r1, lsr #31
160
	teq	r1, #0x80000000
161
	biceq	r0, r0, #1
162
163
	@ Rounding may have added a new MSB.  Adjust exponent.
164
	@ That MSB will be cleared when exponent is merged below.
165
	tst	r0, #0x01000000
166
	addne	r2, r2, #(1 << 23)
167
168
	@ Make sure we did not bust our exponent.
169
	cmp	r2, #(254 << 23)
170
	bhi	LSYM(Lad_o)
171
172
	@ Pack final result together.
173
LSYM(Lad_e):
174
	bic	r0, r0, #0x01800000
175
	orr	r0, r0, r2
176
	orr	r0, r0, r3
177
	RET
178
179
	@ Result must be shifted left.
180
	@ No rounding necessary since r1 will always be 0.
181
LSYM(Lad_l):
182
183
#if __ARM_ARCH__ < 5
184
185
	movs	ip, r0, lsr #12
186
	moveq	r0, r0, lsl #12
187
	subeq	r2, r2, #(12 << 23)
188
	tst	r0, #0x00ff0000
189
	moveq	r0, r0, lsl #8
190
	subeq	r2, r2, #(8 << 23)
191
	tst	r0, #0x00f00000
192
	moveq	r0, r0, lsl #4
193
	subeq	r2, r2, #(4 << 23)
194
	tst	r0, #0x00c00000
195
	moveq	r0, r0, lsl #2
196
	subeq	r2, r2, #(2 << 23)
197
	tst	r0, #0x00800000
198
	moveq	r0, r0, lsl #1
199
	subeq	r2, r2, #(1 << 23)
200
	cmp	r2, #0
201
	bgt	LSYM(Lad_e)
202
203
#else
204
205
	clz	ip, r0
206
	sub	ip, ip, #8
207
	mov	r0, r0, lsl ip
208
	subs	r2, r2, ip, lsl #23
209
	bgt	LSYM(Lad_e)
210
211
#endif
212
213
	@ Exponent too small, denormalize result.
214
	mvn	r2, r2, asr #23
215
	add	r2, r2, #2
216
	orr	r0, r3, r0, lsr r2
217
	RET
218
219
	@ Fixup and adjust bit position for denormalized arguments.
220
	@ Note that r2 must not remain equal to 0.
221
LSYM(Lad_d):
222
	teq	r2, #0
223
	eoreq	r0, r0, #0x00800000
224
	addeq	r2, r2, #(1 << 23)
225
	eor	r1, r1, #0x00800000
226
	subne	r3, r3, #(1 << 23)
227
	b	LSYM(Lad_x)
228
229
	@ Result is x - x = 0, unless x is INF or NAN.
230
LSYM(Lad_z):
231
	mov	ip, #0xff000000
232
	and	r2, r0, ip, lsr #1
233
	teq	r2, ip, lsr #1
234
	moveq	r0, ip, asr #2
235
	movne	r0, #0
236
	RET
237
238
	@ Overflow: return INF.
239
LSYM(Lad_o):
240
	orr	r0, r3, #0x7f000000
241
	orr	r0, r0, #0x00800000
242
	RET
243
244
	@ At least one of r0/r1 is INF/NAN.
245
	@   if r0 != INF/NAN: return r1 (which is INF/NAN)
246
	@   if r1 != INF/NAN: return r0 (which is INF/NAN)
247
	@   if r0 or r1 is NAN: return NAN
248
	@   if opposite sign: return NAN
249
	@   return r0 (which is INF or -INF)
250
LSYM(Lad_i):
251
	teq	r2, ip, lsr #1
252
	movne	r0, r1
253
	teqeq	r3, ip, lsr #1
254
	RETc(ne)
255
	movs	r2, r0, lsl #9
256
	moveqs	r2, r1, lsl #9
257
	teqeq	r0, r1
258
	orrne	r0, r3, #0x00400000	@ NAN
259
	RET
260
261
	FUNC_END addsf3
262
	FUNC_END subsf3
263
264
ARM_FUNC_START floatunsisf
265
	mov	r3, #0
266
	b	1f
267
268
ARM_FUNC_START floatsisf
269
	ands	r3, r0, #0x80000000
270
	rsbmi	r0, r0, #0
271
272
1:	teq	r0, #0
273
	RETc(eq)
274
275
	mov	r1, #0
276
	mov	r2, #((127 + 23) << 23)
277
	tst	r0, #0xfc000000
278
	beq	LSYM(Lad_p)
279
280
	@ We need to scale the value a little before branching to code above.
281
	tst	r0, #0xf0000000
282
	movne	r1, r0, lsl #28
283
	movne	r0, r0, lsr #4
284
	addne	r2, r2, #(4 << 23)
285
	tst	r0, #0x0c000000
286
	beq	LSYM(Lad_p)
287
	mov	r1, r1, lsr #2
288
	orr	r1, r1, r0, lsl #30
289
	mov	r0, r0, lsr #2
290
	add	r2, r2, #(2 << 23)
291
	b	LSYM(Lad_p)
292
293
	FUNC_END floatsisf
294
	FUNC_END floatunsisf
295
296
#endif /* L_addsubsf3 */
297
298
#ifdef L_muldivsf3
299
300
ARM_FUNC_START mulsf3
301
302
	@ Mask out exponents.
303
	mov	ip, #0xff000000
304
	and	r2, r0, ip, lsr #1
305
	and	r3, r1, ip, lsr #1
306
307
	@ Trap any INF/NAN.
308
	teq	r2, ip, lsr #1
309
	teqne	r3, ip, lsr #1
310
	beq	LSYM(Lml_s)
311
312
	@ Trap any multiplication by 0.
313
	bics	ip, r0, #0x80000000
314
	bicnes	ip, r1, #0x80000000
315
	beq	LSYM(Lml_z)
316
317
	@ Shift exponents right one bit to make room for overflow bit.
318
	@ If either of them is 0, scale denormalized arguments off line.
319
	@ Then add both exponents together.
320
	movs	r2, r2, lsr #1
321
	teqne	r3, #0
322
	beq	LSYM(Lml_d)
323
LSYM(Lml_x):
324
	add	r2, r2, r3, asr #1
325
326
	@ Preserve final sign in r2 along with exponent for now.
327
	teq	r0, r1
328
	orrmi	r2, r2, #0x8000
329
330
	@ Convert mantissa to unsigned integer.
331
	bic	r0, r0, #0xff000000
332
	bic	r1, r1, #0xff000000
333
	orr	r0, r0, #0x00800000
334
	orr	r1, r1, #0x00800000
335
336
#if __ARM_ARCH__ < 4
337
338
	@ Well, no way to make it shorter without the umull instruction.
339
	@ We must perform that 24 x 24 -> 48 bit multiplication by hand.
340
	stmfd	sp!, {r4, r5}
341
	mov	r4, r0, lsr #16
342
	mov	r5, r1, lsr #16
343
	bic	r0, r0, #0x00ff0000
344
	bic	r1, r1, #0x00ff0000
345
	mul	ip, r4, r5
346
	mul	r3, r0, r1
347
	mul	r0, r5, r0
348
	mla	r0, r4, r1, r0
349
	adds	r3, r3, r0, lsl #16
350
	adc	ip, ip, r0, lsr #16
351
	ldmfd	sp!, {r4, r5}
352
353
#else
354
355
	umull	r3, ip, r0, r1		@ The actual multiplication.
356
357
#endif
358
359
	@ Put final sign in r0.
360
	mov	r0, r2, lsl #16
361
	bic	r2, r2, #0x8000
362
363
	@ Adjust result if one extra MSB appeared.
364
	@ The LSB may be lost but this never changes the result in this case.
365
	tst	ip, #(1 << 15)
366
	addne	r2, r2, #(1 << 22)
367
	movnes	ip, ip, lsr #1
368
	movne	r3, r3, rrx
369
370
	@ Apply exponent bias, check range for underflow.
371
	subs	r2, r2, #(127 << 22)
372
	ble	LSYM(Lml_u)
373
374
	@ Scale back to 24 bits with rounding.
375
	@ r0 contains sign bit already.
376
	orrs	r0, r0, r3, lsr #23
377
	adc	r0, r0, ip, lsl #9
378
379
	@ If halfway between two numbers, rounding should be towards LSB = 0.
380
	mov	r3, r3, lsl #9
381
	teq	r3, #0x80000000
382
	biceq	r0, r0, #1
383
384
	@ Note: rounding may have produced an extra MSB here.
385
	@ The extra bit is cleared before merging the exponent below.
386
	tst	r0, #0x01000000
387
	addne	r2, r2, #(1 << 22)
388
389
	@ Check for exponent overflow
390
	cmp	r2, #(255 << 22)
391
	bge	LSYM(Lml_o)
392
393
	@ Add final exponent.
394
	bic	r0, r0, #0x01800000
395
	orr	r0, r0, r2, lsl #1
396
	RET
397
398
	@ Result is 0, but determine sign anyway.
399
LSYM(Lml_z):	eor	r0, r0, r1
400
	bic	r0, r0, #0x7fffffff
401
	RET
402
403
	@ Check if denormalized result is possible, otherwise return signed 0.
404
LSYM(Lml_u):
405
	cmn	r2, #(24 << 22)
406
	RETc(le)
407
408
	@ Find out proper shift value.
409
	mvn	r1, r2, asr #22
410
	subs	r1, r1, #7
411
	bgt	LSYM(Lml_ur)
412
413
	@ Shift value left, round, etc.
414
	add	r1, r1, #32
415
	orrs	r0, r0, r3, lsr r1
416
	rsb	r1, r1, #32
417
	adc	r0, r0, ip, lsl r1
418
	mov	ip, r3, lsl r1
419
	teq	ip, #0x80000000
420
	biceq	r0, r0, #1
421
	RET
422
423
	@ Shift value right, round, etc.
424
	@ Note: r1 must not be 0 otherwise carry does not get set.
425
LSYM(Lml_ur):
426
	orrs	r0, r0, ip, lsr r1
427
	adc	r0, r0, #0
428
	rsb	r1, r1, #32
429
	mov	ip, ip, lsl r1
430
	teq	r3, #0
431
	teqeq	ip, #0x80000000
432
	biceq	r0, r0, #1
433
	RET
434
435
	@ One or both arguments are denormalized.
436
	@ Scale them leftwards and preserve sign bit.
437
LSYM(Lml_d):
438
	teq	r2, #0
439
	and	ip, r0, #0x80000000
440
1:	moveq	r0, r0, lsl #1
441
	tsteq	r0, #0x00800000
442
	subeq	r2, r2, #(1 << 22)
443
	beq	1b
444
	orr	r0, r0, ip
445
	teq	r3, #0
446
	and	ip, r1, #0x80000000
447
2:	moveq	r1, r1, lsl #1
448
	tsteq	r1, #0x00800000
449
	subeq	r3, r3, #(1 << 23)
450
	beq	2b
451
	orr	r1, r1, ip
452
	b	LSYM(Lml_x)
453
454
	@ One or both args are INF or NAN.
455
LSYM(Lml_s):
456
	teq	r0, #0x0
457
	teqne	r1, #0x0
458
	teqne	r0, #0x80000000
459
	teqne	r1, #0x80000000
460
	beq	LSYM(Lml_n)		@ 0 * INF or INF * 0 -> NAN
461
	teq	r2, ip, lsr #1
462
	bne	1f
463
	movs	r2, r0, lsl #9
464
	bne	LSYM(Lml_n)		@ NAN * <anything> -> NAN
465
1:	teq	r3, ip, lsr #1
466
	bne	LSYM(Lml_i)
467
	movs	r3, r1, lsl #9
468
	bne	LSYM(Lml_n)		@ <anything> * NAN -> NAN
469
470
	@ Result is INF, but we need to determine its sign.
471
LSYM(Lml_i):
472
	eor	r0, r0, r1
473
474
	@ Overflow: return INF (sign already in r0).
475
LSYM(Lml_o):
476
	and	r0, r0, #0x80000000
477
	orr	r0, r0, #0x7f000000
478
	orr	r0, r0, #0x00800000
479
	RET
480
481
	@ Return NAN.
482
LSYM(Lml_n):
483
	mov	r0, #0x7f000000
484
	orr	r0, r0, #0x00c00000
485
	RET
486
487
	FUNC_END mulsf3
488
489
ARM_FUNC_START divsf3
490
491
	@ Mask out exponents.
492
	mov	ip, #0xff000000
493
	and	r2, r0, ip, lsr #1
494
	and	r3, r1, ip, lsr #1
495
496
	@ Trap any INF/NAN or zeroes.
497
	teq	r2, ip, lsr #1
498
	teqne	r3, ip, lsr #1
499
	bicnes	ip, r0, #0x80000000
500
	bicnes	ip, r1, #0x80000000
501
	beq	LSYM(Ldv_s)
502
503
	@ Shift exponents right one bit to make room for overflow bit.
504
	@ If either of them is 0, scale denormalized arguments off line.
505
	@ Then substract divisor exponent from dividend''s.
506
	movs	r2, r2, lsr #1
507
	teqne	r3, #0
508
	beq	LSYM(Ldv_d)
509
LSYM(Ldv_x):
510
	sub	r2, r2, r3, asr #1
511
512
	@ Preserve final sign into ip.
513
	eor	ip, r0, r1
514
515
	@ Convert mantissa to unsigned integer.
516
	@ Dividend -> r3, divisor -> r1.
517
	mov	r3, #0x10000000
518
	movs	r1, r1, lsl #9
519
	mov	r0, r0, lsl #9
520
	beq	LSYM(Ldv_1)
521
	orr	r1, r3, r1, lsr #4
522
	orr	r3, r3, r0, lsr #4
523
524
	@ Initialize r0 (result) with final sign bit.
525
	and	r0, ip, #0x80000000
526
527
	@ Ensure result will land to known bit position.
528
	cmp	r3, r1
529
	subcc	r2, r2, #(1 << 22)
530
	movcc	r3, r3, lsl #1
531
532
	@ Apply exponent bias, check range for over/underflow.
533
	add	r2, r2, #(127 << 22)
534
	cmn	r2, #(24 << 22)
535
	RETc(le)
536
	cmp	r2, #(255 << 22)
537
	bge	LSYM(Lml_o)
538
539
	@ The actual division loop.
540
	mov	ip, #0x00800000
541
1:	cmp	r3, r1
542
	subcs	r3, r3, r1
543
	orrcs	r0, r0, ip
544
	cmp	r3, r1, lsr #1
545
	subcs	r3, r3, r1, lsr #1
546
	orrcs	r0, r0, ip, lsr #1
547
	cmp	r3, r1, lsr #2
548
	subcs	r3, r3, r1, lsr #2
549
	orrcs	r0, r0, ip, lsr #2
550
	cmp	r3, r1, lsr #3
551
	subcs	r3, r3, r1, lsr #3
552
	orrcs	r0, r0, ip, lsr #3
553
	movs	r3, r3, lsl #4
554
	movnes	ip, ip, lsr #4
555
	bne	1b
556
557
	@ Check if denormalized result is needed.
558
	cmp	r2, #0
559
	ble	LSYM(Ldv_u)
560
561
	@ Apply proper rounding.
562
	cmp	r3, r1
563
	addcs	r0, r0, #1
564
	biceq	r0, r0, #1
565
566
	@ Add exponent to result.
567
	bic	r0, r0, #0x00800000
568
	orr	r0, r0, r2, lsl #1
569
	RET
570
571
	@ Division by 0x1p*: let''s shortcut a lot of code.
572
LSYM(Ldv_1):
573
	and	ip, ip, #0x80000000
574
	orr	r0, ip, r0, lsr #9
575
	add	r2, r2, #(127 << 22)
576
	cmp	r2, #(255 << 22)
577
	bge	LSYM(Lml_o)
578
	cmp	r2, #0
579
	orrgt	r0, r0, r2, lsl #1
580
	RETc(gt)
581
	cmn	r2, #(24 << 22)
582
	movle	r0, ip
583
	RETc(le)
584
	orr	r0, r0, #0x00800000
585
	mov	r3, #0
586
587
	@ Result must be denormalized: prepare parameters to use code above.
588
	@ r3 already contains remainder for rounding considerations.
589
LSYM(Ldv_u):
590
	bic	ip, r0, #0x80000000
591
	and	r0, r0, #0x80000000
592
	mvn	r1, r2, asr #22
593
	add	r1, r1, #2
594
	b	LSYM(Lml_ur)
595
596
	@ One or both arguments are denormalized.
597
	@ Scale them leftwards and preserve sign bit.
598
LSYM(Ldv_d):
599
	teq	r2, #0
600
	and	ip, r0, #0x80000000
601
1:	moveq	r0, r0, lsl #1
602
	tsteq	r0, #0x00800000
603
	subeq	r2, r2, #(1 << 22)
604
	beq	1b
605
	orr	r0, r0, ip
606
	teq	r3, #0
607
	and	ip, r1, #0x80000000
608
2:	moveq	r1, r1, lsl #1
609
	tsteq	r1, #0x00800000
610
	subeq	r3, r3, #(1 << 23)
611
	beq	2b
612
	orr	r1, r1, ip
613
	b	LSYM(Ldv_x)
614
615
	@ One or both arguments is either INF, NAN or zero.
616
LSYM(Ldv_s):
617
	mov	ip, #0xff000000
618
	teq	r2, ip, lsr #1
619
	teqeq	r3, ip, lsr #1
620
	beq	LSYM(Lml_n)		@ INF/NAN / INF/NAN -> NAN
621
	teq	r2, ip, lsr #1
622
	bne	1f
623
	movs	r2, r0, lsl #9
624
	bne	LSYM(Lml_n)		@ NAN / <anything> -> NAN
625
	b	LSYM(Lml_i)		@ INF / <anything> -> INF
626
1:	teq	r3, ip, lsr #1
627
	bne	2f
628
	movs	r3, r1, lsl #9
629
	bne	LSYM(Lml_n)		@ <anything> / NAN -> NAN
630
	b	LSYM(Lml_z)		@ <anything> / INF -> 0
631
2:	@ One or both arguments are 0.
632
	bics	r2, r0, #0x80000000
633
	bne	LSYM(Lml_i)		@ <non_zero> / 0 -> INF
634
	bics	r3, r1, #0x80000000
635
	bne	LSYM(Lml_z)		@ 0 / <non_zero> -> 0
636
	b	LSYM(Lml_n)		@ 0 / 0 -> NAN
637
638
	FUNC_END divsf3
639
640
#endif /* L_muldivsf3 */
641
642
#ifdef L_cmpsf2
643
644
FUNC_START gesf2
645
ARM_FUNC_START gtsf2
646
	mov	r3, #-1
647
	b	1f
648
649
FUNC_START lesf2
650
ARM_FUNC_START ltsf2
651
	mov	r3, #1
652
	b	1f
653
654
FUNC_START nesf2
655
FUNC_START eqsf2
656
ARM_FUNC_START cmpsf2
657
	mov	r3, #1			@ how should we specify unordered here?
658
659
1:	@ Trap any INF/NAN first.
660
	mov	ip, #0xff000000
661
	and	r2, r1, ip, lsr #1
662
	teq	r2, ip, lsr #1
663
	and	r2, r0, ip, lsr #1
664
	teqne	r2, ip, lsr #1
665
	beq	3f
666
667
	@ Test for equality.
668
	@ Note that 0.0 is equal to -0.0.
669
2:	orr	r3, r0, r1
670
	bics	r3, r3, #0x80000000	@ either 0.0 or -0.0
671
	teqne	r0, r1			@ or both the same
672
	moveq	r0, #0
673
	RETc(eq)
674
675
	@ Check for sign difference.  The N flag is set if it is the case.
676
	@ If so, return sign of r0.
677
	movmi	r0, r0, asr #31
678
	orrmi	r0, r0, #1
679
	RETc(mi)
680
681
	@ Compare exponents.
682
	and	r3, r1, ip, lsr #1
683
	cmp	r2, r3
684
685
	@ Compare mantissa if exponents are equal
686
	moveq	r0, r0, lsl #9
687
	cmpeq	r0, r1, lsl #9
688
	movcs	r0, r1, asr #31
689
	mvncc	r0, r1, asr #31
690
	orr	r0, r0, #1
691
	RET
692
693
	@ Look for a NAN. 
694
3:	and	r2, r1, ip, lsr #1
695
	teq	r2, ip, lsr #1
696
	bne	4f
697
	movs	r2, r1, lsl #9
698
	bne	5f			@ r1 is NAN
699
4:	and	r2, r0, ip, lsr #1
700
	teq	r2, ip, lsr #1
701
	bne	2b
702
	movs	ip, r0, lsl #9
703
	beq	2b			@ r0 is not NAN
704
5:	mov	r0, r3			@ return unordered code from r3.
705
	RET
706
707
	FUNC_END gesf2
708
	FUNC_END gtsf2
709
	FUNC_END lesf2
710
	FUNC_END ltsf2
711
	FUNC_END nesf2
712
	FUNC_END eqsf2
713
	FUNC_END cmpsf2
714
715
#endif /* L_cmpsf2 */
716
717
#ifdef L_unordsf2
718
719
ARM_FUNC_START unordsf2
720
	mov	ip, #0xff000000
721
	and	r2, r1, ip, lsr #1
722
	teq	r2, ip, lsr #1
723
	bne	1f
724
	movs	r2, r1, lsl #9
725
	bne	3f			@ r1 is NAN
726
1:	and	r2, r0, ip, lsr #1
727
	teq	r2, ip, lsr #1
728
	bne	2f
729
	movs	r2, r0, lsl #9
730
	bne	3f			@ r0 is NAN
731
2:	mov	r0, #0			@ arguments are ordered.
732
	RET
733
3:	mov	r0, #1			@ arguments are unordered.
734
	RET
735
736
	FUNC_END unordsf2
737
738
#endif /* L_unordsf2 */
739
740
#ifdef L_fixsfsi
741
742
ARM_FUNC_START fixsfsi
743
	movs	r0, r0, lsl #1
744
	RETc(eq)			@ value is 0.
745
746
	mov	r1, r1, rrx		@ preserve C flag (the actual sign)
747
748
	@ check exponent range.
749
	and	r2, r0, #0xff000000
750
	cmp	r2, #(127 << 24)
751
	movcc	r0, #0			@ value is too small
752
	RETc(cc)
753
	cmp	r2, #((127 + 31) << 24)
754
	bcs	1f			@ value is too large
755
756
	mov	r0, r0, lsl #7
757
	orr	r0, r0, #0x80000000
758
	mov	r2, r2, lsr #24
759
	rsb	r2, r2, #(127 + 31)
760
	tst	r1, #0x80000000		@ the sign bit
761
	mov	r0, r0, lsr r2
762
	rsbne	r0, r0, #0
763
	RET
764
765
1:	teq	r2, #0xff000000
766
	bne	2f
767
	movs	r0, r0, lsl #8
768
	bne	3f			@ r0 is NAN.
769
2:	ands	r0, r1, #0x80000000	@ the sign bit
770
	moveq	r0, #0x7fffffff		@ the maximum signed positive si
771
	RET
772
773
3:	mov	r0, #0			@ What should we convert NAN to?
774
	RET
775
776
	FUNC_END fixsfsi
777
778
#endif /* L_fixsfsi */
779
780
#ifdef L_fixunssfsi
781
782
ARM_FUNC_START fixunssfsi
783
	movs	r0, r0, lsl #1
784
	movcss	r0, #0			@ value is negative...
785
	RETc(eq)			@ ... or 0.
786
787
788
	@ check exponent range.
789
	and	r2, r0, #0xff000000
790
	cmp	r2, #(127 << 24)
791
	movcc	r0, #0			@ value is too small
792
	RETc(cc)
793
	cmp	r2, #((127 + 32) << 24)
794
	bcs	1f			@ value is too large
795
796
	mov	r0, r0, lsl #7
797
	orr	r0, r0, #0x80000000
798
	mov	r2, r2, lsr #24
799
	rsb	r2, r2, #(127 + 31)
800
	mov	r0, r0, lsr r2
801
	RET
802
803
1:	teq	r2, #0xff000000
804
	bne	2f
805
	movs	r0, r0, lsl #8
806
	bne	3f			@ r0 is NAN.
807
2:	mov	r0, #0xffffffff		@ maximum unsigned si
808
	RET
809
810
3:	mov	r0, #0			@ What should we convert NAN to?
811
	RET
812
813
	FUNC_END fixunssfsi
814
815
#endif /* L_fixunssfsi */
(-)gcc-3.3.3-orig/gcc/config/arm/lib1funcs.asm (-139 / +199 lines)
Lines 51-124 Link Here
51
#endif
51
#endif
52
#define TYPE(x) .type SYM(x),function
52
#define TYPE(x) .type SYM(x),function
53
#define SIZE(x) .size SYM(x), . - SYM(x)
53
#define SIZE(x) .size SYM(x), . - SYM(x)
54
#define LSYM(x) .x
54
#else
55
#else
55
#define __PLT__
56
#define __PLT__
56
#define TYPE(x)
57
#define TYPE(x)
57
#define SIZE(x)
58
#define SIZE(x)
59
#define LSYM(x) x
58
#endif
60
#endif
59
61
60
/* Function end macros.  Variants for 26 bit APCS and interworking.  */
62
/* Function end macros.  Variants for 26 bit APCS and interworking.  */
61
63
64
@ This selects the minimum architecture level required.
65
#define __ARM_ARCH__ 3
66
67
#if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \
68
	|| defined(__ARM_ARCH_4T__)
69
/* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with
70
   long multiply instructions.  That includes v3M.  */
71
# undef __ARM_ARCH__
72
# define __ARM_ARCH__ 4
73
#endif
74
	
75
#if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
76
	|| defined(__ARM_ARCH_5TE__)
77
# undef __ARM_ARCH__
78
# define __ARM_ARCH__ 5
79
#endif
80
81
/* How to return from a function call depends on the architecture variant.  */
82
62
#ifdef __APCS_26__
83
#ifdef __APCS_26__
84
63
# define RET		movs	pc, lr
85
# define RET		movs	pc, lr
64
# define RETc(x)	mov##x##s	pc, lr
86
# define RETc(x)	mov##x##s	pc, lr
65
# define RETCOND 	^
87
88
#elif (__ARM_ARCH__ > 4) || defined(__thumb__) || defined(__THUMB_INTERWORK__)
89
90
# define RET		bx	lr
91
# define RETc(x)	bx##x	lr
92
93
# if (__ARM_ARCH__ == 4) \
94
	&& (defined(__thumb__) || defined(__THUMB_INTERWORK__))
95
#  define __INTERWORKING__
96
# endif
97
98
#else
99
100
# define RET		mov	pc, lr
101
# define RETc(x)	mov##x	pc, lr
102
103
#endif
104
105
/* Don't pass dirn, it's there just to get token pasting right.  */
106
107
.macro	RETLDM	regs=, cond=, dirn=ia
108
#ifdef __APCS_26__
109
	.ifc "\regs",""
110
	ldm\cond\dirn	sp!, {pc}^
111
	.else
112
	ldm\cond\dirn	sp!, {\regs, pc}^
113
	.endif
114
#elif defined (__INTERWORKING__)
115
	.ifc "\regs",""
116
	ldr\cond	lr, [sp], #4
117
	.else
118
	ldm\cond\dirn	sp!, {\regs, lr}
119
	.endif
120
	bx\cond	lr
121
#else
122
	.ifc "\regs",""
123
	ldr\cond	pc, [sp], #4
124
	.else
125
	ldm\cond\dirn	sp!, {\regs, pc}
126
	.endif
127
#endif
128
.endm
129
130
66
.macro ARM_LDIV0
131
.macro ARM_LDIV0
67
Ldiv0:
132
LSYM(Ldiv0):
68
	str	lr, [sp, #-4]!
133
	str	lr, [sp, #-4]!
69
	bl	SYM (__div0) __PLT__
134
	bl	SYM (__div0) __PLT__
70
	mov	r0, #0			@ About as wrong as it could be.
135
	mov	r0, #0			@ About as wrong as it could be.
71
	ldmia	sp!, {pc}^
136
	RETLDM
72
.endm
137
.endm
73
#else
138
74
# ifdef __THUMB_INTERWORK__
139
75
#  define RET		bx	lr
76
#  define RETc(x)	bx##x	lr
77
.macro THUMB_LDIV0
140
.macro THUMB_LDIV0
78
Ldiv0:
141
LSYM(Ldiv0):
79
	push	{ lr }
142
	push	{ lr }
80
	bl	SYM (__div0)
143
	bl	SYM (__div0)
81
	mov	r0, #0			@ About as wrong as it could be.
144
	mov	r0, #0			@ About as wrong as it could be.
145
#if defined (__INTERWORKING__)
82
	pop	{ r1 }
146
	pop	{ r1 }
83
	bx	r1
147
	bx	r1
84
.endm
148
#else
85
.macro ARM_LDIV0
86
Ldiv0:
87
	str	lr, [sp, #-4]!
88
	bl	SYM (__div0) __PLT__
89
	mov	r0, #0			@ About as wrong as it could be.
90
	ldr	lr, [sp], #4
91
	bx	lr
92
.endm	
93
# else
94
#  define RET		mov	pc, lr
95
#  define RETc(x)	mov##x	pc, lr
96
.macro THUMB_LDIV0
97
Ldiv0:
98
	push	{ lr }
99
	bl	SYM (__div0)
100
	mov	r0, #0			@ About as wrong as it could be.
101
	pop	{ pc }
149
	pop	{ pc }
102
.endm
103
.macro ARM_LDIV0
104
Ldiv0:
105
	str	lr, [sp, #-4]!
106
	bl	SYM (__div0) __PLT__
107
	mov	r0, #0			@ About as wrong as it could be.
108
	ldmia	sp!, {pc}
109
.endm	
110
# endif
111
# define RETCOND
112
#endif
150
#endif
151
.endm
113
152
114
.macro FUNC_END name
153
.macro FUNC_END name
115
Ldiv0:
154
	SIZE (__\name)
155
.endm
156
157
.macro DIV_FUNC_END name
158
LSYM(Ldiv0):
116
#ifdef __thumb__
159
#ifdef __thumb__
117
	THUMB_LDIV0
160
	THUMB_LDIV0
118
#else
161
#else
119
	ARM_LDIV0
162
	ARM_LDIV0
120
#endif
163
#endif
121
	SIZE (__\name)	
164
	FUNC_END \name
122
.endm
165
.endm
123
166
124
.macro THUMB_FUNC_START name
167
.macro THUMB_FUNC_START name
Lines 147-153 Link Here
147
	THUMB_FUNC
190
	THUMB_FUNC
148
SYM (__\name):
191
SYM (__\name):
149
.endm
192
.endm
150
		
193
194
/* Special function that will always be coded in ARM assembly, even if
195
   in Thumb-only compilation.  */
196
197
#if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
198
.macro	ARM_FUNC_START name
199
	FUNC_START \name
200
	bx	pc
201
	nop
202
	.arm
203
_L__\name:		/* A hook to tell gdb that we've switched to ARM */
204
.endm
205
#else
206
.macro	ARM_FUNC_START name
207
	FUNC_START \name
208
.endm
209
#endif
210
151
/* Register aliases.  */
211
/* Register aliases.  */
152
212
153
work		.req	r4	@ XXXX is this safe ?
213
work		.req	r4	@ XXXX is this safe ?
Lines 156-171 Link Here
156
overdone	.req	r2
216
overdone	.req	r2
157
result		.req	r2
217
result		.req	r2
158
curbit		.req	r3
218
curbit		.req	r3
219
#if 0
159
ip		.req	r12
220
ip		.req	r12
160
sp		.req	r13
221
sp		.req	r13
161
lr		.req	r14
222
lr		.req	r14
162
pc		.req	r15
223
pc		.req	r15
163
224
#endif
164
/* ------------------------------------------------------------------------ */
225
/* ------------------------------------------------------------------------ */
165
/*		Bodies of the divsion and modulo routines.		    */
226
/*		Bodies of the division and modulo routines.		    */
166
/* ------------------------------------------------------------------------ */	
227
/* ------------------------------------------------------------------------ */	
167
.macro ARM_DIV_MOD_BODY modulo
228
.macro ARM_DIV_MOD_BODY modulo
168
Loop1:
229
LSYM(Loop1):
169
	@ Unless the divisor is very big, shift it up in multiples of
230
	@ Unless the divisor is very big, shift it up in multiples of
170
	@ four bits, since this is the amount of unwinding in the main
231
	@ four bits, since this is the amount of unwinding in the main
171
	@ division loop.  Continue shifting until the divisor is 
232
	@ division loop.  Continue shifting until the divisor is 
Lines 174-191 Link Here
174
	cmplo	divisor, dividend
235
	cmplo	divisor, dividend
175
	movlo	divisor, divisor, lsl #4
236
	movlo	divisor, divisor, lsl #4
176
	movlo	curbit,  curbit,  lsl #4
237
	movlo	curbit,  curbit,  lsl #4
177
	blo	Loop1
238
	blo	LSYM(Loop1)
178
239
179
Lbignum:
240
LSYM(Lbignum):
180
	@ For very big divisors, we must shift it a bit at a time, or
241
	@ For very big divisors, we must shift it a bit at a time, or
181
	@ we will be in danger of overflowing.
242
	@ we will be in danger of overflowing.
182
	cmp	divisor, #0x80000000
243
	cmp	divisor, #0x80000000
183
	cmplo	divisor, dividend
244
	cmplo	divisor, dividend
184
	movlo	divisor, divisor, lsl #1
245
	movlo	divisor, divisor, lsl #1
185
	movlo	curbit,  curbit,  lsl #1
246
	movlo	curbit,  curbit,  lsl #1
186
	blo	Lbignum
247
	blo	LSYM(Lbignum)
187
248
188
Loop3:
249
LSYM(Loop3):
189
	@ Test for possible subtractions.  On the final pass, this may 
250
	@ Test for possible subtractions.  On the final pass, this may 
190
	@ subtract too much from the dividend ...
251
	@ subtract too much from the dividend ...
191
	
252
	
Lines 226-235 Link Here
226
	cmp	dividend, #0			@ Early termination?
287
	cmp	dividend, #0			@ Early termination?
227
	movnes	curbit,   curbit,  lsr #4	@ No, any more bits to do?
288
	movnes	curbit,   curbit,  lsr #4	@ No, any more bits to do?
228
	movne	divisor,  divisor, lsr #4
289
	movne	divisor,  divisor, lsr #4
229
	bne	Loop3
290
	bne	LSYM(Loop3)
230
291
231
  .if \modulo
292
  .if \modulo
232
Lfixup_dividend:	
293
LSYM(Lfixup_dividend):	
233
	@ Any subtractions that we should not have done will be recorded in
294
	@ Any subtractions that we should not have done will be recorded in
234
	@ the top three bits of OVERDONE.  Exactly which were not needed
295
	@ the top three bits of OVERDONE.  Exactly which were not needed
235
	@ are governed by the position of the bit, stored in IP.
296
	@ are governed by the position of the bit, stored in IP.
Lines 241-247 Link Here
241
	@ the bit in ip could be in the top two bits which might then match
302
	@ the bit in ip could be in the top two bits which might then match
242
	@ with one of the smaller RORs.
303
	@ with one of the smaller RORs.
243
	tstne	ip, #0x7
304
	tstne	ip, #0x7
244
	beq	Lgot_result
305
	beq	LSYM(Lgot_result)
245
	tst	overdone, ip, ror #3
306
	tst	overdone, ip, ror #3
246
	addne	dividend, dividend, divisor, lsr #3
307
	addne	dividend, dividend, divisor, lsr #3
247
	tst	overdone, ip, ror #2
308
	tst	overdone, ip, ror #2
Lines 250-288 Link Here
250
	addne	dividend, dividend, divisor, lsr #1
311
	addne	dividend, dividend, divisor, lsr #1
251
  .endif
312
  .endif
252
313
253
Lgot_result:
314
LSYM(Lgot_result):
254
.endm
315
.endm
255
/* ------------------------------------------------------------------------ */
316
/* ------------------------------------------------------------------------ */
256
.macro THUMB_DIV_MOD_BODY modulo
317
.macro THUMB_DIV_MOD_BODY modulo
257
	@ Load the constant 0x10000000 into our work register.
318
	@ Load the constant 0x10000000 into our work register.
258
	mov	work, #1
319
	mov	work, #1
259
	lsl	work, #28
320
	lsl	work, #28
260
Loop1:
321
LSYM(Loop1):
261
	@ Unless the divisor is very big, shift it up in multiples of
322
	@ Unless the divisor is very big, shift it up in multiples of
262
	@ four bits, since this is the amount of unwinding in the main
323
	@ four bits, since this is the amount of unwinding in the main
263
	@ division loop.  Continue shifting until the divisor is 
324
	@ division loop.  Continue shifting until the divisor is 
264
	@ larger than the dividend.
325
	@ larger than the dividend.
265
	cmp	divisor, work
326
	cmp	divisor, work
266
	bhs	Lbignum
327
	bhs	LSYM(Lbignum)
267
	cmp	divisor, dividend
328
	cmp	divisor, dividend
268
	bhs	Lbignum
329
	bhs	LSYM(Lbignum)
269
	lsl	divisor, #4
330
	lsl	divisor, #4
270
	lsl	curbit,  #4
331
	lsl	curbit,  #4
271
	b	Loop1
332
	b	LSYM(Loop1)
272
Lbignum:
333
LSYM(Lbignum):
273
	@ Set work to 0x80000000
334
	@ Set work to 0x80000000
274
	lsl	work, #3
335
	lsl	work, #3
275
Loop2:
336
LSYM(Loop2):
276
	@ For very big divisors, we must shift it a bit at a time, or
337
	@ For very big divisors, we must shift it a bit at a time, or
277
	@ we will be in danger of overflowing.
338
	@ we will be in danger of overflowing.
278
	cmp	divisor, work
339
	cmp	divisor, work
279
	bhs	Loop3
340
	bhs	LSYM(Loop3)
280
	cmp	divisor, dividend
341
	cmp	divisor, dividend
281
	bhs	Loop3
342
	bhs	LSYM(Loop3)
282
	lsl	divisor, #1
343
	lsl	divisor, #1
283
	lsl	curbit,  #1
344
	lsl	curbit,  #1
284
	b	Loop2
345
	b	LSYM(Loop2)
285
Loop3:
346
LSYM(Loop3):
286
	@ Test for possible subtractions ...
347
	@ Test for possible subtractions ...
287
  .if \modulo
348
  .if \modulo
288
	@ ... On the final pass, this may subtract too much from the dividend, 
349
	@ ... On the final pass, this may subtract too much from the dividend, 
Lines 290-368 Link Here
290
	@ afterwards.
351
	@ afterwards.
291
	mov	overdone, #0
352
	mov	overdone, #0
292
	cmp	dividend, divisor
353
	cmp	dividend, divisor
293
	blo	Lover1
354
	blo	LSYM(Lover1)
294
	sub	dividend, dividend, divisor
355
	sub	dividend, dividend, divisor
295
Lover1:
356
LSYM(Lover1):
296
	lsr	work, divisor, #1
357
	lsr	work, divisor, #1
297
	cmp	dividend, work
358
	cmp	dividend, work
298
	blo	Lover2
359
	blo	LSYM(Lover2)
299
	sub	dividend, dividend, work
360
	sub	dividend, dividend, work
300
	mov	ip, curbit
361
	mov	ip, curbit
301
	mov	work, #1
362
	mov	work, #1
302
	ror	curbit, work
363
	ror	curbit, work
303
	orr	overdone, curbit
364
	orr	overdone, curbit
304
	mov	curbit, ip
365
	mov	curbit, ip
305
Lover2:
366
LSYM(Lover2):
306
	lsr	work, divisor, #2
367
	lsr	work, divisor, #2
307
	cmp	dividend, work
368
	cmp	dividend, work
308
	blo	Lover3
369
	blo	LSYM(Lover3)
309
	sub	dividend, dividend, work
370
	sub	dividend, dividend, work
310
	mov	ip, curbit
371
	mov	ip, curbit
311
	mov	work, #2
372
	mov	work, #2
312
	ror	curbit, work
373
	ror	curbit, work
313
	orr	overdone, curbit
374
	orr	overdone, curbit
314
	mov	curbit, ip
375
	mov	curbit, ip
315
Lover3:
376
LSYM(Lover3):
316
	lsr	work, divisor, #3
377
	lsr	work, divisor, #3
317
	cmp	dividend, work
378
	cmp	dividend, work
318
	blo	Lover4
379
	blo	LSYM(Lover4)
319
	sub	dividend, dividend, work
380
	sub	dividend, dividend, work
320
	mov	ip, curbit
381
	mov	ip, curbit
321
	mov	work, #3
382
	mov	work, #3
322
	ror	curbit, work
383
	ror	curbit, work
323
	orr	overdone, curbit
384
	orr	overdone, curbit
324
	mov	curbit, ip
385
	mov	curbit, ip
325
Lover4:
386
LSYM(Lover4):
326
	mov	ip, curbit
387
	mov	ip, curbit
327
  .else
388
  .else
328
	@ ... and note which bits are done in the result.  On the final pass,
389
	@ ... and note which bits are done in the result.  On the final pass,
329
	@ this may subtract too much from the dividend, but the result will be ok,
390
	@ this may subtract too much from the dividend, but the result will be ok,
330
	@ since the "bit" will have been shifted out at the bottom.
391
	@ since the "bit" will have been shifted out at the bottom.
331
	cmp	dividend, divisor
392
	cmp	dividend, divisor
332
	blo	Lover1
393
	blo	LSYM(Lover1)
333
	sub	dividend, dividend, divisor
394
	sub	dividend, dividend, divisor
334
	orr	result, result, curbit
395
	orr	result, result, curbit
335
Lover1:
396
LSYM(Lover1):
336
	lsr	work, divisor, #1
397
	lsr	work, divisor, #1
337
	cmp	dividend, work
398
	cmp	dividend, work
338
	blo	Lover2
399
	blo	LSYM(Lover2)
339
	sub	dividend, dividend, work
400
	sub	dividend, dividend, work
340
	lsr	work, curbit, #1
401
	lsr	work, curbit, #1
341
	orr	result, work
402
	orr	result, work
342
Lover2:
403
LSYM(Lover2):
343
	lsr	work, divisor, #2
404
	lsr	work, divisor, #2
344
	cmp	dividend, work
405
	cmp	dividend, work
345
	blo	Lover3
406
	blo	LSYM(Lover3)
346
	sub	dividend, dividend, work
407
	sub	dividend, dividend, work
347
	lsr	work, curbit, #2
408
	lsr	work, curbit, #2
348
	orr	result, work
409
	orr	result, work
349
Lover3:
410
LSYM(Lover3):
350
	lsr	work, divisor, #3
411
	lsr	work, divisor, #3
351
	cmp	dividend, work
412
	cmp	dividend, work
352
	blo	Lover4
413
	blo	LSYM(Lover4)
353
	sub	dividend, dividend, work
414
	sub	dividend, dividend, work
354
	lsr	work, curbit, #3
415
	lsr	work, curbit, #3
355
	orr	result, work
416
	orr	result, work
356
Lover4:
417
LSYM(Lover4):
357
  .endif
418
  .endif
358
	
419
	
359
	cmp	dividend, #0			@ Early termination?
420
	cmp	dividend, #0			@ Early termination?
360
	beq	Lover5
421
	beq	LSYM(Lover5)
361
	lsr	curbit,  #4			@ No, any more bits to do?
422
	lsr	curbit,  #4			@ No, any more bits to do?
362
	beq	Lover5
423
	beq	LSYM(Lover5)
363
	lsr	divisor, #4
424
	lsr	divisor, #4
364
	b	Loop3
425
	b	LSYM(Loop3)
365
Lover5:
426
LSYM(Lover5):
366
  .if \modulo
427
  .if \modulo
367
	@ Any subtractions that we should not have done will be recorded in
428
	@ Any subtractions that we should not have done will be recorded in
368
	@ the top three bits of "overdone".  Exactly which were not needed
429
	@ the top three bits of "overdone".  Exactly which were not needed
Lines 370-376 Link Here
370
	mov	work, #0xe
431
	mov	work, #0xe
371
	lsl	work, #28
432
	lsl	work, #28
372
	and	overdone, work
433
	and	overdone, work
373
	beq	Lgot_result
434
	beq	LSYM(Lgot_result)
374
	
435
	
375
	@ If we terminated early, because dividend became zero, then the 
436
	@ If we terminated early, because dividend became zero, then the 
376
	@ bit in ip will not be in the bottom nibble, and we should not
437
	@ bit in ip will not be in the bottom nibble, and we should not
Lines 381-413 Link Here
381
	mov	curbit, ip
442
	mov	curbit, ip
382
	mov	work, #0x7
443
	mov	work, #0x7
383
	tst	curbit, work
444
	tst	curbit, work
384
	beq	Lgot_result
445
	beq	LSYM(Lgot_result)
385
	
446
	
386
	mov	curbit, ip
447
	mov	curbit, ip
387
	mov	work, #3
448
	mov	work, #3
388
	ror	curbit, work
449
	ror	curbit, work
389
	tst	overdone, curbit
450
	tst	overdone, curbit
390
	beq	Lover6
451
	beq	LSYM(Lover6)
391
	lsr	work, divisor, #3
452
	lsr	work, divisor, #3
392
	add	dividend, work
453
	add	dividend, work
393
Lover6:
454
LSYM(Lover6):
394
	mov	curbit, ip
455
	mov	curbit, ip
395
	mov	work, #2
456
	mov	work, #2
396
	ror	curbit, work
457
	ror	curbit, work
397
	tst	overdone, curbit
458
	tst	overdone, curbit
398
	beq	Lover7
459
	beq	LSYM(Lover7)
399
	lsr	work, divisor, #2
460
	lsr	work, divisor, #2
400
	add	dividend, work
461
	add	dividend, work
401
Lover7:
462
LSYM(Lover7):
402
	mov	curbit, ip
463
	mov	curbit, ip
403
	mov	work, #1
464
	mov	work, #1
404
	ror	curbit, work
465
	ror	curbit, work
405
	tst	overdone, curbit
466
	tst	overdone, curbit
406
	beq	Lgot_result
467
	beq	LSYM(Lgot_result)
407
	lsr	work, divisor, #1
468
	lsr	work, divisor, #1
408
	add	dividend, work
469
	add	dividend, work
409
  .endif
470
  .endif
410
Lgot_result:
471
LSYM(Lgot_result):
411
.endm	
472
.endm	
412
/* ------------------------------------------------------------------------ */
473
/* ------------------------------------------------------------------------ */
413
/*		Start of the Real Functions				    */
474
/*		Start of the Real Functions				    */
Lines 419-431 Link Here
419
#ifdef __thumb__
480
#ifdef __thumb__
420
481
421
	cmp	divisor, #0
482
	cmp	divisor, #0
422
	beq	Ldiv0
483
	beq	LSYM(Ldiv0)
423
	mov	curbit, #1
484
	mov	curbit, #1
424
	mov	result, #0
485
	mov	result, #0
425
	
486
	
426
	push	{ work }
487
	push	{ work }
427
	cmp	dividend, divisor
488
	cmp	dividend, divisor
428
	blo	Lgot_result
489
	blo	LSYM(Lgot_result)
429
490
430
	THUMB_DIV_MOD_BODY 0
491
	THUMB_DIV_MOD_BODY 0
431
	
492
	
Lines 436-446 Link Here
436
#else /* ARM version.  */
497
#else /* ARM version.  */
437
	
498
	
438
	cmp	divisor, #0
499
	cmp	divisor, #0
439
	beq	Ldiv0
500
	beq	LSYM(Ldiv0)
440
	mov	curbit, #1
501
	mov	curbit, #1
441
	mov	result, #0
502
	mov	result, #0
442
	cmp	dividend, divisor
503
	cmp	dividend, divisor
443
	blo	Lgot_result
504
	blo	LSYM(Lgot_result)
444
	
505
	
445
	ARM_DIV_MOD_BODY 0
506
	ARM_DIV_MOD_BODY 0
446
	
507
	
Lines 449-455 Link Here
449
510
450
#endif /* ARM version */
511
#endif /* ARM version */
451
512
452
	FUNC_END udivsi3
513
	DIV_FUNC_END udivsi3
453
514
454
#endif /* L_udivsi3 */
515
#endif /* L_udivsi3 */
455
/* ------------------------------------------------------------------------ */
516
/* ------------------------------------------------------------------------ */
Lines 460-472 Link Here
460
#ifdef __thumb__
521
#ifdef __thumb__
461
522
462
	cmp	divisor, #0
523
	cmp	divisor, #0
463
	beq	Ldiv0
524
	beq	LSYM(Ldiv0)
464
	mov	curbit, #1
525
	mov	curbit, #1
465
	cmp	dividend, divisor
526
	cmp	dividend, divisor
466
	bhs	Lover10
527
	bhs	LSYM(Lover10)
467
	RET	
528
	RET	
468
529
469
Lover10:
530
LSYM(Lover10):
470
	push	{ work }
531
	push	{ work }
471
532
472
	THUMB_DIV_MOD_BODY 1
533
	THUMB_DIV_MOD_BODY 1
Lines 477-483 Link Here
477
#else  /* ARM version.  */
538
#else  /* ARM version.  */
478
	
539
	
479
	cmp	divisor, #0
540
	cmp	divisor, #0
480
	beq	Ldiv0
541
	beq	LSYM(Ldiv0)
481
	cmp     divisor, #1
542
	cmp     divisor, #1
482
	cmpne	dividend, divisor
543
	cmpne	dividend, divisor
483
	moveq   dividend, #0
544
	moveq   dividend, #0
Lines 490-496 Link Here
490
551
491
#endif /* ARM version.  */
552
#endif /* ARM version.  */
492
	
553
	
493
	FUNC_END umodsi3
554
	DIV_FUNC_END umodsi3
494
555
495
#endif /* L_umodsi3 */
556
#endif /* L_umodsi3 */
496
/* ------------------------------------------------------------------------ */
557
/* ------------------------------------------------------------------------ */
Lines 500-506 Link Here
500
561
501
#ifdef __thumb__
562
#ifdef __thumb__
502
	cmp	divisor, #0
563
	cmp	divisor, #0
503
	beq	Ldiv0
564
	beq	LSYM(Ldiv0)
504
	
565
	
505
	push	{ work }
566
	push	{ work }
506
	mov	work, dividend
567
	mov	work, dividend
Lines 509-532 Link Here
509
	mov	curbit, #1
570
	mov	curbit, #1
510
	mov	result, #0
571
	mov	result, #0
511
	cmp	divisor, #0
572
	cmp	divisor, #0
512
	bpl	Lover10
573
	bpl	LSYM(Lover10)
513
	neg	divisor, divisor	@ Loops below use unsigned.
574
	neg	divisor, divisor	@ Loops below use unsigned.
514
Lover10:
575
LSYM(Lover10):
515
	cmp	dividend, #0
576
	cmp	dividend, #0
516
	bpl	Lover11
577
	bpl	LSYM(Lover11)
517
	neg	dividend, dividend
578
	neg	dividend, dividend
518
Lover11:
579
LSYM(Lover11):
519
	cmp	dividend, divisor
580
	cmp	dividend, divisor
520
	blo	Lgot_result
581
	blo	LSYM(Lgot_result)
521
582
522
	THUMB_DIV_MOD_BODY 0
583
	THUMB_DIV_MOD_BODY 0
523
	
584
	
524
	mov	r0, result
585
	mov	r0, result
525
	mov	work, ip
586
	mov	work, ip
526
	cmp	work, #0
587
	cmp	work, #0
527
	bpl	Lover12
588
	bpl	LSYM(Lover12)
528
	neg	r0, r0
589
	neg	r0, r0
529
Lover12:
590
LSYM(Lover12):
530
	pop	{ work }
591
	pop	{ work }
531
	RET
592
	RET
532
593
Lines 537-547 Link Here
537
	mov	result, #0
598
	mov	result, #0
538
	cmp	divisor, #0
599
	cmp	divisor, #0
539
	rsbmi	divisor, divisor, #0		@ Loops below use unsigned.
600
	rsbmi	divisor, divisor, #0		@ Loops below use unsigned.
540
	beq	Ldiv0
601
	beq	LSYM(Ldiv0)
541
	cmp	dividend, #0
602
	cmp	dividend, #0
542
	rsbmi	dividend, dividend, #0
603
	rsbmi	dividend, dividend, #0
543
	cmp	dividend, divisor
604
	cmp	dividend, divisor
544
	blo	Lgot_result
605
	blo	LSYM(Lgot_result)
545
606
546
	ARM_DIV_MOD_BODY 0
607
	ARM_DIV_MOD_BODY 0
547
	
608
	
Lines 552-558 Link Here
552
613
553
#endif /* ARM version */
614
#endif /* ARM version */
554
	
615
	
555
	FUNC_END divsi3
616
	DIV_FUNC_END divsi3
556
617
557
#endif /* L_divsi3 */
618
#endif /* L_divsi3 */
558
/* ------------------------------------------------------------------------ */
619
/* ------------------------------------------------------------------------ */
Lines 564-592 Link Here
564
625
565
	mov	curbit, #1
626
	mov	curbit, #1
566
	cmp	divisor, #0
627
	cmp	divisor, #0
567
	beq	Ldiv0
628
	beq	LSYM(Ldiv0)
568
	bpl	Lover10
629
	bpl	LSYM(Lover10)
569
	neg	divisor, divisor		@ Loops below use unsigned.
630
	neg	divisor, divisor		@ Loops below use unsigned.
570
Lover10:
631
LSYM(Lover10):
571
	push	{ work }
632
	push	{ work }
572
	@ Need to save the sign of the dividend, unfortunately, we need
633
	@ Need to save the sign of the dividend, unfortunately, we need
573
	@ work later on.  Must do this after saving the original value of
634
	@ work later on.  Must do this after saving the original value of
574
	@ the work register, because we will pop this value off first.
635
	@ the work register, because we will pop this value off first.
575
	push	{ dividend }
636
	push	{ dividend }
576
	cmp	dividend, #0
637
	cmp	dividend, #0
577
	bpl	Lover11
638
	bpl	LSYM(Lover11)
578
	neg	dividend, dividend
639
	neg	dividend, dividend
579
Lover11:
640
LSYM(Lover11):
580
	cmp	dividend, divisor
641
	cmp	dividend, divisor
581
	blo	Lgot_result
642
	blo	LSYM(Lgot_result)
582
643
583
	THUMB_DIV_MOD_BODY 1
644
	THUMB_DIV_MOD_BODY 1
584
		
645
		
585
	pop	{ work }
646
	pop	{ work }
586
	cmp	work, #0
647
	cmp	work, #0
587
	bpl	Lover12
648
	bpl	LSYM(Lover12)
588
	neg	dividend, dividend
649
	neg	dividend, dividend
589
Lover12:
650
LSYM(Lover12):
590
	pop	{ work }
651
	pop	{ work }
591
	RET	
652
	RET	
592
653
Lines 594-607 Link Here
594
	
655
	
595
	cmp	divisor, #0
656
	cmp	divisor, #0
596
	rsbmi	divisor, divisor, #0		@ Loops below use unsigned.
657
	rsbmi	divisor, divisor, #0		@ Loops below use unsigned.
597
	beq	Ldiv0
658
	beq	LSYM(Ldiv0)
598
	@ Need to save the sign of the dividend, unfortunately, we need
659
	@ Need to save the sign of the dividend, unfortunately, we need
599
	@ ip later on; this is faster than pushing lr and using that.
660
	@ ip later on; this is faster than pushing lr and using that.
600
	str	dividend, [sp, #-4]!
661
	str	dividend, [sp, #-4]!
601
	cmp	dividend, #0			@ Test dividend against zero
662
	cmp	dividend, #0			@ Test dividend against zero
602
	rsbmi	dividend, dividend, #0		@ If negative make positive
663
	rsbmi	dividend, dividend, #0		@ If negative make positive
603
	cmp	dividend, divisor		@ else if zero return zero
664
	cmp	dividend, divisor		@ else if zero return zero
604
	blo	Lgot_result			@ if smaller return dividend
665
	blo	LSYM(Lgot_result)		@ if smaller return dividend
605
	mov	curbit, #1
666
	mov	curbit, #1
606
667
607
	ARM_DIV_MOD_BODY 1
668
	ARM_DIV_MOD_BODY 1
Lines 613-619 Link Here
613
674
614
#endif /* ARM version */
675
#endif /* ARM version */
615
	
676
	
616
	FUNC_END modsi3
677
	DIV_FUNC_END modsi3
617
678
618
#endif /* L_modsi3 */
679
#endif /* L_modsi3 */
619
/* ------------------------------------------------------------------------ */
680
/* ------------------------------------------------------------------------ */
Lines 623-629 Link Here
623
684
624
	RET
685
	RET
625
686
626
	SIZE	(__div0)
687
	FUNC_END div0
627
	
688
	
628
#endif /* L_divmodsi_tools */
689
#endif /* L_divmodsi_tools */
629
/* ------------------------------------------------------------------------ */
690
/* ------------------------------------------------------------------------ */
Lines 636-657 Link Here
636
#define __NR_getpid			(__NR_SYSCALL_BASE+ 20)
697
#define __NR_getpid			(__NR_SYSCALL_BASE+ 20)
637
#define __NR_kill			(__NR_SYSCALL_BASE+ 37)
698
#define __NR_kill			(__NR_SYSCALL_BASE+ 37)
638
699
700
	.code	32
639
	FUNC_START div0
701
	FUNC_START div0
640
702
641
	stmfd	sp!, {r1, lr}
703
	stmfd	sp!, {r1, lr}
642
	swi	__NR_getpid
704
	swi	__NR_getpid
643
	cmn	r0, #1000
705
	cmn	r0, #1000
644
	ldmhsfd	sp!, {r1, pc}RETCOND	@ not much we can do
706
	RETLDM	r1 hs
645
	mov	r1, #SIGFPE
707
	mov	r1, #SIGFPE
646
	swi	__NR_kill
708
	swi	__NR_kill
647
#ifdef __THUMB_INTERWORK__
709
	RETLDM	r1
648
	ldmfd	sp!, {r1, lr}
649
	bx	lr
650
#else
651
	ldmfd	sp!, {r1, pc}RETCOND
652
#endif
653
710
654
	SIZE 	(__div0)
711
	FUNC_END div0
655
	
712
	
656
#endif /* L_dvmd_lnx */
713
#endif /* L_dvmd_lnx */
657
/* ------------------------------------------------------------------------ */
714
/* ------------------------------------------------------------------------ */
Lines 720-743 Link Here
720
777
721
	.code   32
778
	.code   32
722
	.globl _arm_return
779
	.globl _arm_return
723
_arm_return:		
780
_arm_return:
724
	ldmia 	r13!, {r12}
781
	RETLDM
725
	bx 	r12
726
	.code   16
782
	.code   16
727
783
728
.macro interwork register					
784
.macro interwork register
729
	.code   16
785
	.code	16
730
786
731
	THUMB_FUNC_START _interwork_call_via_\register
787
	THUMB_FUNC_START _interwork_call_via_\register
732
788
733
	bx 	pc
789
	bx	pc
734
	nop
790
	nop
735
	
791
736
	.code   32
792
	.code	32
737
	.globl .Lchange_\register
793
	.globl LSYM(Lchange_\register)
738
.Lchange_\register:
794
LSYM(Lchange_\register):
739
	tst	\register, #1
795
	tst	\register, #1
740
	stmeqdb	r13!, {lr}
796
	streq	lr, [sp, #-4]!
741
	adreq	lr, _arm_return
797
	adreq	lr, _arm_return
742
	bx	\register
798
	bx	\register
743
799
Lines 779-781 Link Here
779
	SIZE	(_interwork_call_via_lr)
835
	SIZE	(_interwork_call_via_lr)
780
	
836
	
781
#endif /* L_interwork_call_via_rX */
837
#endif /* L_interwork_call_via_rX */
838
839
#include "ieee754-df.S"
840
#include "ieee754-sf.S"
841
(-)gcc-3.3.3-orig/gcc/config/arm/linux-elf.h (-4 / +21 lines)
Lines 30-38 Link Here
30
/* Do not assume anything about header files.  */
30
/* Do not assume anything about header files.  */
31
#define NO_IMPLICIT_EXTERN_C
31
#define NO_IMPLICIT_EXTERN_C
32
32
33
/* Default is to use APCS-32 mode.  */
33
/*
34
 * Default is to use APCS-32 mode with soft-vfp.
35
 * The old Linux default for floats can be achieved with -mhard-float
36
 * or with the configure --with-float=hard option.
37
 * If -msoft-float or --with-float=soft is used then software float 
38
 * support will be used just like the default but with the legacy
39
 * big endian word ordering for double float representation instead.
40
 */
41
34
#undef  TARGET_DEFAULT
42
#undef  TARGET_DEFAULT
35
#define TARGET_DEFAULT (ARM_FLAG_APCS_32 | ARM_FLAG_MMU_TRAPS)
43
#define TARGET_DEFAULT		\
44
	( ARM_FLAG_APCS_32	\
45
	| ARM_FLAG_SOFT_FLOAT	\
46
	| ARM_FLAG_VFP		\
47
	| ARM_FLAG_MMU_TRAPS )
48
49
#undef  SUBTARGET_EXTRA_ASM_SPEC
50
#define SUBTARGET_EXTRA_ASM_SPEC "\
51
%{mhard-float:-mfpu=fpa} \
52
%{!mhard-float: %{msoft-float:-mfpu=softvfp} %{!msoft-float:-mfpu=softvfp}}"
36
53
37
#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm6
54
#define SUBTARGET_CPU_DEFAULT TARGET_CPU_arm6
Lines 40-46 Link Here
40
40
41
#undef  MULTILIB_DEFAULTS
41
#undef  MULTILIB_DEFAULTS
42
#define MULTILIB_DEFAULTS \
42
#define MULTILIB_DEFAULTS \
43
	{ "marm", "mlittle-endian", "mhard-float", "mapcs-32", "mno-thumb-interwork" }
43
	{ "marm", "mlittle-endian", "mapcs-32", "mno-thumb-interwork" }
44
44
45
#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
45
#define CPP_APCS_PC_DEFAULT_SPEC "-D__APCS_32__"
46
46
Lines 54-60 Link Here
54
   %{shared:-lc} \
72
   %{shared:-lc} \
55
   %{!shared:%{profile:-lc_p}%{!profile:-lc}}"
73
   %{!shared:%{profile:-lc_p}%{!profile:-lc}}"
56
74
57
#define LIBGCC_SPEC "%{msoft-float:-lfloat} -lgcc"
75
#define LIBGCC_SPEC "-lgcc"
58
76
59
/* Provide a STARTFILE_SPEC appropriate for GNU/Linux.  Here we add
77
/* Provide a STARTFILE_SPEC appropriate for GNU/Linux.  Here we add
60
   the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
78
   the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
(-)gcc-3.3.3-orig/gcc/config/arm/t-linux (-1 / +4 lines)
Lines 7-13 Link Here
7
ENQUIRE=
7
ENQUIRE=
8
8
9
LIB1ASMSRC = arm/lib1funcs.asm
9
LIB1ASMSRC = arm/lib1funcs.asm
10
LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx
10
LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_lnx \
11
	_negdf2 _addsubdf3 _muldivdf3 _cmpdf2 _unorddf2 _fixdfsi _fixunsdfsi \
12
	_truncdfsf2 _negsf2 _addsubsf3 _muldivsf3 _cmpsf2 _unordsf2 \
13
	_fixsfsi _fixunssfsi
11
14
12
# MULTILIB_OPTIONS = mhard-float/msoft-float
15
# MULTILIB_OPTIONS = mhard-float/msoft-float
13
# MULTILIB_DIRNAMES = hard-float soft-float
16
# MULTILIB_DIRNAMES = hard-float soft-float
(-)gcc-3.3.3-orig/gcc/config/arm/unknown-elf.h (-1 / +5 lines)
Lines 29-35 Link Here
29
29
30
/* Default to using APCS-32 and software floating point.  */
30
/* Default to using APCS-32 and software floating point.  */
31
#ifndef TARGET_DEFAULT
31
#ifndef TARGET_DEFAULT
32
#define TARGET_DEFAULT	(ARM_FLAG_SOFT_FLOAT | ARM_FLAG_APCS_32 | ARM_FLAG_APCS_FRAME)
32
#define TARGET_DEFAULT		\
33
	( ARM_FLAG_SOFT_FLOAT	\
34
	| ARM_FLAG_VFP		\
35
	| ARM_FLAG_APCS_32	\
36
	| ARM_FLAG_APCS_FRAME )
33
#endif
37
#endif
34
38
35
/* Now we define the strings used to build the spec file.  */
39
/* Now we define the strings used to build the spec file.  */
(-)gcc-3.3.3-orig/gcc/config/arm/xscale-elf.h (-2 / +5 lines)
Lines 28-36 Link Here
28
#define SUBTARGET_CPU_DEFAULT 		TARGET_CPU_xscale
28
#define SUBTARGET_CPU_DEFAULT 		TARGET_CPU_xscale
29
#endif
29
#endif
30
30
31
#define SUBTARGET_EXTRA_ASM_SPEC "%{!mcpu=*:-mcpu=xscale} %{!mhard-float:-mno-fpu}"
31
#define SUBTARGET_EXTRA_ASM_SPEC "\
32
%{!mcpu=*:-mcpu=xscale} \
33
%{mhard-float:-mfpu=fpa} \
34
%{!mhard-float: %{msoft-float:-mfpu=softvfp} %{!msoft-float:-mfpu=softvfp}}"
32
35
33
#ifndef MULTILIB_DEFAULTS
36
#ifndef MULTILIB_DEFAULTS
34
#define MULTILIB_DEFAULTS \
37
#define MULTILIB_DEFAULTS \
35
  { "mlittle-endian", "mno-thumb-interwork", "marm", "msoft-float" }
38
  { "mlittle-endian", "mno-thumb-interwork", "marm" }
36
#endif
39
#endif

Return to bug 75585