Leave the decision to call/implement bn_sqr_mont to assembler developer.

This commit is contained in:
Andy Polyakov 2005-10-06 09:12:39 +00:00
parent 40a3c12305
commit ca04d7a208
3 changed files with 34 additions and 29 deletions

View File

@ -729,8 +729,7 @@ int RAND_pseudo_bytes(unsigned char *buf,int num);
bn_pollute(a); \ bn_pollute(a); \
} }
void bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,BN_ULONG n0, int num); int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,BN_ULONG n0, int num);
void bn_sqr_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *np,BN_ULONG n0, int num);
BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w); BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w);
BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w); BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w);
void bn_sqr_words(BN_ULONG *rp, const BN_ULONG *ap, int num); void bn_sqr_words(BN_ULONG *rp, const BN_ULONG *ap, int num);

View File

@ -831,13 +831,14 @@ void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
#ifdef OPENSSL_BN_ASM_MONT #ifdef OPENSSL_BN_ASM_MONT
/* /*
* This is essentially reference implementation, which may or may not * This is essentially reference implementation, which may or may not
* result in performance improvement. E.g. on IA-32 this does give 40% * result in performance improvement. E.g. on IA-32 this routine was
* faster rsa1024 private key operations and 10% faster rsa4096 ones, * observed to give 40% faster rsa1024 private key operations and 10%
* while on AMD64 it improves rsa1024 sign only by 10% and *worsens* * faster rsa4096 ones, while on AMD64 it improves rsa1024 sign only
* rsa4096 sign by 15%. Once again, it's a reference implementation, * by 10% and *worsens* rsa4096 sign by 15%. Once again, it's a
* one to be used as start-point for platform-specific assembler. * reference implementation, one to be used as start-point for
* platform-specific assembler.
*/ */
void bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,BN_ULONG n0, int num) int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,BN_ULONG n0, int num)
{ {
BN_ULONG c0,c1,ml,*tp; BN_ULONG c0,c1,ml,*tp;
#ifdef mul64 #ifdef mul64
@ -846,6 +847,9 @@ void bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_
volatile BN_ULONG *vp; volatile BN_ULONG *vp;
int i=0,j; int i=0,j;
#if 0 /* template for platform-specific implementation */
if (ap==bp) return bn_sqr_mont(rp,ap,np,n0,num);
#endif
vp = tp = alloca((num+2)*sizeof(BN_ULONG)); vp = tp = alloca((num+2)*sizeof(BN_ULONG));
tp[num] = bn_mul_words(tp,ap,num,bp[0]); tp[num] = bn_mul_words(tp,ap,num,bp[0]);
@ -890,18 +894,22 @@ void bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_
if (tp[num]!=0 || c0==0) if (tp[num]!=0 || c0==0)
{ {
for(i=0;i<num+2;i++) vp[i] = 0; for(i=0;i<num+2;i++) vp[i] = 0;
return; return 1;
} }
} }
for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0; for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0;
vp[num] = 0; vp[num] = 0;
vp[num+1] = 0; vp[num+1] = 0;
return 1;
} }
#else
void bn_sqr_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *np,BN_ULONG n0, int num) /*
{ * Return value of 0 indicates that multiplication/convolution was not
bn_mul_mont(rp,ap,ap,np,n0,num); * performed to signal the caller to fall down to alternative/original
} * code-path.
*/
int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,BN_ULONG n0, int num)
{ return 0; }
#endif /* OPENSSL_BN_ASM_MONT */ #endif /* OPENSSL_BN_ASM_MONT */
#else /* !BN_MUL_COMBA */ #else /* !BN_MUL_COMBA */
@ -942,7 +950,7 @@ void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
} }
#ifdef OPENSSL_BN_ASM_MONT #ifdef OPENSSL_BN_ASM_MONT
void bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,BN_ULONG n0, int num) int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,BN_ULONG n0, int num)
{ {
BN_ULONG c0,c1,*tp; BN_ULONG c0,c1,*tp;
volatile BN_ULONG *vp; volatile BN_ULONG *vp;
@ -972,18 +980,17 @@ void bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_
if (tp[num]!=0 || c0==0) if (tp[num]!=0 || c0==0)
{ {
for(i=0;i<num+2;i++) vp[i] = 0; for(i=0;i<num+2;i++) vp[i] = 0;
return; return 1;
} }
} }
for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0; for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0;
vp[num] = 0; vp[num] = 0;
vp[num+1] = 0; vp[num+1] = 0;
return 1;
} }
#else
void bn_sqr_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *np,BN_ULONG n0, int num) int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,BN_ULONG n0, int num)
{ { return 0; }
bn_mul_mont(rp,ap,ap,np,n0,num);
}
#endif /* OPENSSL_BN_ASM_MONT */ #endif /* OPENSSL_BN_ASM_MONT */
#endif /* !BN_MUL_COMBA */ #endif /* !BN_MUL_COMBA */

View File

@ -80,14 +80,13 @@ int BN_mod_mul_montgomery(BIGNUM *r, const BIGNUM *a, const BIGNUM *b,
if (num>1 && a->top==num && b->top==num) if (num>1 && a->top==num && b->top==num)
{ {
if (bn_wexpand(r,num) == NULL) return 0; if (bn_wexpand(r,num) == NULL) return 0;
r->neg = a->neg^b->neg; if (bn_mul_mont(r->d,a->d,b->d,mont->N.d,mont->n0,num))
r->top = num; {
if (a==b) r->neg = a->neg^b->neg;
bn_sqr_mont(r->d,a->d,mont->N.d,mont->n0,num); r->top = num;
else bn_fix_top(r);
bn_mul_mont(r->d,a->d,b->d,mont->N.d,mont->n0,num); return 1;
bn_fix_top(r); }
return 1;
} }
#endif #endif