Marc:
Bottom line for this message: I think Gambit needs to generate basic- block--local GVM general registers. Can we have a beta with this change, so we can test it?
Brad
Details:
All the nontrivial operations on large bignums (multiplication, division, gcd) depend on fast multiplication, which we implement using an fft algorithm. The implementation compares unfavorably with that in gmp, and I am trying to investigate why.
Here are run times for some parts of the program for (* a a) where a is (expt 3 5000000).
Allocate zero f64vector of 2097152 elements (we do this twice, three times for (* a b))
36 ms cpu time (12 user, 24 system) 16777248 bytes allocated
Extract 8-bit fdigits from a and insert them into f64vector (we do this twice for (* a b))
8 ms cpu time (8 user, 0 system) -16 bytes allocated
Calculate the sin and cosine weight vector:
35 ms cpu time (35 user, 0 system) 16 bytes allocated
Calculate another table of sins and cosines:
15 ms cpu time (15 user, 0 system) 16 bytes allocated
Bit reverse the f64vector containing the digits of a (NOTE THIS ONE) (we do this twice, three times for (* a b)):
138 ms cpu time (137 user, 1 system) -16 bytes allocated
Do a complex fft (we do this twice, three times for (* a b)):
218 ms cpu time (212 user, 6 system) 13978480 bytes allocated
Do some post-processing to turn a complex fft into a real fft (we do this twice, three times for (* a b)):
18 ms cpu time (18 user, 0 system) -16 bytes allocated
Round and transfer the digits from the result f64vector to the result bignum, as fdigits: 98 ms cpu time (96 user, 2 system) 12160 bytes allocated
Now, it all adds up, but what really strikes me here is the CPU time for bitrv2.
Here is the code for the first nontrivial loop of bitrv2, the bit reversal routine, GVM, macro-expanded, and assembly:
___DEF_GLBL(___L6_bitrv2) ___SET_R4(___VECTORREF(___STK(-1),___R2)) ___SET_STK(1,___FIXASHL(___R3,___FIX(1L))) ___SET_R4(___FIXADD(___STK(1),___R4)) ___SET_STK(1,___VECTORREF(___STK(-1),___R3)) ___SET_STK(2,___FIXASHL(___R2,___FIX(1L))) ___SET_STK(1,___FIXADD(___STK(2),___STK(1))) ___SET_STK(2,___FIXADD(___R4,___FIX(0L))) ___SET_F64(___F64V1,___F64VECTORREF(___STK(0),___STK(2))) ___SET_STK(3,___FIXADD(___R4,___FIX(1L))) ___SET_F64(___F64V2,___F64VECTORREF(___STK(0),___STK(3))) ___SET_STK(4,___FIXADD(___STK(1),___FIX(0L))) ___SET_F64(___F64V3,___F64VECTORREF(___STK(0),___STK(4))) ___SET_STK(5,___FIXADD(___R4,___FIX(0L))) ___F64VECTORSET(___STK(0),___STK(5),___F64V3) ___SET_STK(4,___FIXADD(___STK(1),___FIX(1L))) ___SET_F64(___F64V4,___F64VECTORREF(___STK(0),___STK(4))) ___SET_R4(___FIXADD(___R4,___FIX(1L))) ___F64VECTORSET(___STK(0),___R4,___F64V4) ___SET_R4(___FIXADD(___STK(1),___FIX(0L))) ___F64VECTORSET(___STK(0),___R4,___F64V1) ___SET_R4(___FIXADD(___STK(1),___FIX(1L))) ___F64VECTORSET(___STK(0),___R4,___F64V2) ___SET_R3(___FIXADD(___R3,___FIX(1L))) ___IF(___FIXLT(___R3,___R2)) ___GOTO(___L6_bitrv2) ___END_IF
___L6_bitrv2: ___r4=(*(long*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+((___r2) <<(3 -2)))); ___fp[-(1)]=(((___r3)<<(((((long)(1L))<<2))>>2))); ___r4=(((long)((___fp[-(1)])+(___r4)))); ___fp[-(1)]=(*(long*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+ ((___r3)<<(3 -2)))); ___fp[-(2)]=(((___r2)<<(((((long)(1L))<<2))>>2))); ___fp[-(1)]=(((long)((___fp[-(2)])+(___fp[-(1)])))); ___fp[-(2)]=(((long)((___r4)+((((long)(0L))<<2))))); ___F64V1=*(double*)(((long)(((long*)((___fp[-(0)])-(1)))+1))+ ((___fp[-(2)])<<(3-2))); ___fp[-(3)]=(((long)((___r4)+((((long)(1L))<<2))))); ___F64V2=*(double*)(((long)(((long*)((___fp[-(0)])-(1)))+1))+ ((___fp[-(3)])<<(3-2))); ___fp[-(4)]=(((long)((___fp[-(1)])+((((long)(0L))<<2))))); ___F64V3=*(double*)(((long)(((long*)((___fp[-(0)])-(1)))+1))+ ((___fp[-(4)])<<(3-2))); ___fp[-(5)]=(((long)((___r4)+((((long)(0L))<<2))))); *(double*)(((long)(((long*)((___fp[-(0)])-(1)))+1))+((___fp[-(5)]) <<(3-2)))=___F64V3; ___fp[-(4)]=(((long)((___fp[-(1)])+((((long)(1L))<<2))))); ___F64V4=*(double*)(((long)(((long*)((___fp[-(0)])-(1)))+1))+ ((___fp[-(4)])<<(3-2))); ___r4=(((long)((___r4)+((((long)(1L))<<2))))); *(double*)(((long)(((long*)((___fp[-(0)])-(1)))+1))+((___r4)<< (3-2)))=___F64V4; ___r4=(((long)((___fp[-(1)])+((((long)(0L))<<2))))); *(double*)(((long)(((long*)((___fp[-(0)])-(1)))+1))+((___r4)<< (3-2)))=___F64V1; ___r4=(((long)((___fp[-(1)])+((((long)(1L))<<2))))); *(double*)(((long)(((long*)((___fp[-(0)])-(1)))+1))+((___r4)<< (3-2)))=___F64V2; ___r3=(((long)((___r3)+((((long)(1L))<<2))))); if (((___r3)<(___r2))) { goto ___L6_bitrv2; }
L13478: ld r0,8(r7) sldi r2,r6,1 addi r6,r6,4 cmpd cr7,r30,r6 add r9,r4,r0 addi r9,r9,7 ld r8,0(r9) std r2,-8(r7) ld r9,0(r7) add r8,r8,r2 add r2,r2,r0 addi r2,r2,7 sldi r10,r8,1 addi r11,r8,4 add r10,r9,r10 ld r0,0(r2) std r8,-16(r7) add r0,r4,r0 std r0,-8(r7) lfd f13,7(r10) std r11,-24(r7) sldi r11,r11,1 add r2,r9,r11 lfd f12,7(r2) std r0,-32(r7) sldi r0,r0,1 add r9,r9,r0 lfd f0,7(r9) std r8,-40(r7) stfd f0,7(r10) ld r9,-8(r7) ld r2,0(r7) addi r9,r9,4 add r11,r11,r2 std r9,-32(r7) sldi r9,r9,1 add r2,r2,r9 lfd f0,7(r2) stfd f0,7(r11) ld r0,-8(r7) ld r2,0(r7) sldi r0,r0,1 add r2,r2,r0 stfd f13,7(r2) ld r9,-8(r7) ld r2,0(r7) addi r11,r9,4 sldi r0,r11,1 add r2,r2,r0 stfd f12,7(r2) bgt cr7,L13478
and for the second:
___DEF_GLBL(___L10_bitrv2) ___SET_R4(___VECTORREF(___STK(-2),___R2)) ___SET_STK(1,___FIXASHL(___R3,___FIX(1L))) ___SET_R4(___FIXADD(___STK(1),___R4)) ___SET_STK(1,___VECTORREF(___STK(-2),___R3)) ___SET_STK(2,___FIXASHL(___R2,___FIX(1L))) ___SET_STK(1,___FIXADD(___STK(2),___STK(1))) ___SET_STK(2,___FIXADD(___R4,___FIX(0L))) ___SET_F64(___F64V1,___F64VECTORREF(___STK(-1),___STK(2))) ___SET_STK(3,___FIXADD(___R4,___FIX(1L))) ___SET_F64(___F64V2,___F64VECTORREF(___STK(-1),___STK(3))) ___SET_STK(4,___FIXADD(___STK(1),___FIX(0L))) ___SET_F64(___F64V3,___F64VECTORREF(___STK(-1),___STK(4))) ___SET_STK(5,___FIXADD(___R4,___FIX(0L))) ___F64VECTORSET(___STK(-1),___STK(5),___F64V3) ___SET_STK(4,___FIXADD(___STK(1),___FIX(1L))) ___SET_F64(___F64V4,___F64VECTORREF(___STK(-1),___STK(4))) ___SET_STK(5,___FIXADD(___R4,___FIX(1L))) ___F64VECTORSET(___STK(-1),___STK(5),___F64V4) ___SET_STK(4,___FIXADD(___STK(1),___FIX(0L))) ___F64VECTORSET(___STK(-1),___STK(4),___F64V1) ___SET_STK(2,___FIXADD(___STK(1),___FIX(1L))) ___F64VECTORSET(___STK(-1),___STK(2),___F64V2) ___SET_R4(___FIXADD(___R4,___R1)) ___SET_STK(1,___FIXADD(___STK(1),___R1)) ___SET_STK(2,___FIXADD(___R4,___FIX(0L))) ___SET_F64(___F64V5,___F64VECTORREF(___STK(-1),___STK(2))) ___SET_STK(3,___FIXADD(___R4,___FIX(1L))) ___SET_F64(___F64V6,___F64VECTORREF(___STK(-1),___STK(3))) ___SET_STK(4,___FIXADD(___STK(1),___FIX(0L))) ___SET_F64(___F64V7,___F64VECTORREF(___STK(-1),___STK(4))) ___SET_STK(5,___FIXADD(___R4,___FIX(0L))) ___F64VECTORSET(___STK(-1),___STK(5),___F64V7) ___SET_STK(4,___FIXADD(___STK(1),___FIX(1L))) ___SET_F64(___F64V8,___F64VECTORREF(___STK(-1),___STK(4))) ___SET_R4(___FIXADD(___R4,___FIX(1L))) ___F64VECTORSET(___STK(-1),___R4,___F64V8) ___SET_R4(___FIXADD(___STK(1),___FIX(0L))) ___F64VECTORSET(___STK(-1),___R4,___F64V5) ___SET_R4(___FIXADD(___STK(1),___FIX(1L))) ___F64VECTORSET(___STK(-1),___R4,___F64V6) ___SET_R3(___FIXADD(___R3,___FIX(1L))) ___IF(___FIXLT(___R3,___R2)) ___GOTO(___L10_bitrv2) ___END_IF
___L10_bitrv2: ___r4=(*(long*)(((long)(((long*)((___fp[-(-2)])-(1)))+1))+((___r2) <<(3 -2)))); ___fp[-(1)]=(((___r3)<<(((((long)(1L))<<2))>>2))); ___r4=(((long)((___fp[-(1)])+(___r4)))); ___fp[-(1)]=(*(long*)(((long)(((long*)((___fp[-(-2)])-(1)))+1))+ ((___r3)<<(3 -2)))); ___fp[-(2)]=(((___r2)<<(((((long)(1L))<<2))>>2))); ___fp[-(1)]=(((long)((___fp[-(2)])+(___fp[-(1)])))); ___fp[-(2)]=(((long)((___r4)+((((long)(0L))<<2))))); ___F64V1=*(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+ ((___fp[-(2)])<<(3-2))); ___fp[-(3)]=(((long)((___r4)+((((long)(1L))<<2))))); ___F64V2=*(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+ ((___fp[-(3)])<<(3-2))); ___fp[-(4)]=(((long)((___fp[-(1)])+((((long)(0L))<<2))))); ___F64V3=*(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+ ((___fp[-(4)])<<(3-2))); ___fp[-(5)]=(((long)((___r4)+((((long)(0L))<<2))))); *(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+((___fp[- (5)])<<(3-2)))=___F64V3; ___fp[-(4)]=(((long)((___fp[-(1)])+((((long)(1L))<<2))))); ___F64V4=*(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+ ((___fp[-(4)])<<(3-2))); ___fp[-(5)]=(((long)((___r4)+((((long)(1L))<<2))))); *(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+((___fp[- (5)])<<(3-2)))=___F64V4; ___fp[-(4)]=(((long)((___fp[-(1)])+((((long)(0L))<<2))))); *(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+((___fp[- (4)])<<(3-2)))=___F64V1; ___fp[-(2)]=(((long)((___fp[-(1)])+((((long)(1L))<<2))))); *(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+((___fp[- (2)])<<(3-2)))=___F64V2; ___r4=(((long)((___r4)+(___r1)))); ___fp[-(1)]=(((long)((___fp[-(1)])+(___r1)))); ___fp[-(2)]=(((long)((___r4)+((((long)(0L))<<2))))); ___F64V5=*(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+ ((___fp[-(2)])<<(3-2))); ___fp[-(3)]=(((long)((___r4)+((((long)(1L))<<2))))); ___F64V6=*(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+ ((___fp[-(3)])<<(3-2))); ___fp[-(4)]=(((long)((___fp[-(1)])+((((long)(0L))<<2))))); ___F64V7=*(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+ ((___fp[-(4)])<<(3-2))); ___fp[-(5)]=(((long)((___r4)+((((long)(0L))<<2))))); *(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+((___fp[- (5)])<<(3-2)))=___F64V7; ___fp[-(4)]=(((long)((___fp[-(1)])+((((long)(1L))<<2))))); ___F64V8=*(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+ ((___fp[-(4)])<<(3-2))); ___r4=(((long)((___r4)+((((long)(1L))<<2))))); *(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+((___r4)<< (3-2)))=___F64V8; ___r4=(((long)((___fp[-(1)])+((((long)(0L))<<2))))); *(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+((___r4)<< (3-2)))=___F64V5; ___r4=(((long)((___fp[-(1)])+((((long)(1L))<<2))))); *(double*)(((long)(((long*)((___fp[-(-1)])-(1)))+1))+((___r4)<< (3-2)))=___F64V6; ___r3=(((long)((___r3)+((((long)(1L))<<2))))); if (((___r3)<(___r2))) { goto ___L10_bitrv2; }
L13483: ld r2,0(r24) sldi r9,r12,1 addi r12,r12,4 cmpd cr7,r30,r12 add r2,r2,r28 addi r2,r2,7 ld r8,0(r2) std r9,0(r5) add r8,r8,r9 sldi r11,r8,1 addi r7,r8,4 sldi r10,r7,1 ld r0,0(r24) add r9,r9,r0 addi r9,r9,7 ld r0,0(r9) std r0,0(r5) std r28,0(r3) nop nop ld r0,0(r5) add r0,r28,r0 std r0,0(r5) std r8,0(r3) ld r2,0(r6) add r2,r2,r11 lfd f13,7(r2) std r7,0(r25) ld r2,0(r6) ld r0,0(r5) add r2,r2,r10 lfd f12,7(r2) std r0,0(r4) sldi r0,r0,1 ld r2,0(r6) add r2,r2,r0 lfd f0,7(r2) std r8,0(r29) add r8,r27,r8 ld r0,0(r6) add r11,r11,r0 stfd f0,7(r11) addi r11,r8,4 ld r9,0(r5) addi r9,r9,4 std r9,0(r4) sldi r9,r9,1 ld r2,0(r6) add r2,r2,r9 lfd f0,7(r2) std r7,0(r29) ld r0,0(r6) add r10,r10,r0 stfd f0,7(r10) sldi r10,r8,1 ld r0,0(r5) std r0,0(r4) sldi r0,r0,1 ld r2,0(r6) add r2,r2,r0 stfd f13,7(r2) ld r9,0(r5) addi r9,r9,4 std r9,0(r3) ld r2,0(r6) sldi r9,r9,1 add r2,r2,r9 stfd f12,7(r2) ld r0,0(r5) add r0,r27,r0 std r0,0(r5) std r8,0(r3) ld r2,0(r6) add r2,r2,r10 lfd f13,7(r2) std r11,0(r25) sldi r11,r11,1 ld r2,0(r6) ld r0,0(r5) add r2,r2,r11 lfd f12,7(r2) std r0,0(r4) sldi r0,r0,1 ld r2,0(r6) add r2,r2,r0 lfd f0,7(r2) std r8,0(r29) ld r0,0(r6) add r10,r10,r0 stfd f0,7(r10) ld r9,0(r5) addi r9,r9,4 std r9,0(r4) sldi r9,r9,1 ld r2,0(r6) add r11,r11,r2 add r2,r2,r9 lfd f0,7(r2) stfd f0,7(r11) ld r0,0(r5) ld r2,0(r6) sldi r0,r0,1 add r2,r2,r0 stfd f13,7(r2) ld r9,0(r5) ld r2,0(r6) addi r11,r9,4 sldi r0,r11,1 add r2,r2,r0 stfd f12,7(r2) bgt cr7,L13483
Now, bitrv2 really exercises the memory system (which is pretty good on my 2GHz G5), and I'm not saying that this is the best code one can write, but without basic-block--local GVM registers the code sucks (and gcc can't assume that the writes to the f64vector can't kill the reads from the stack because we have to compile with -fno-strict- aliasing).