crypto: sha512 - reduce stack usage to safe number

For rounds 16--79, W[i] only depends on W[i - 2], W[i - 7], W[i - 15] and W[i - 16].
Consequently, keeping all W[80] array on stack is unnecessary,
only 16 values are really needed.

Using W[16] instead of W[80] greatly reduces stack usage
(~750 bytes to ~340 bytes on x86_64).

Line by line explanation:
* BLEND_OP
  array is "circular" now, all indexes have to be modulo 16.
  Round number is positive, so remainder operation should be
  without surprises.

* initial full message scheduling is trimmed to first 16 values which
  come from data block, the rest is calculated before it's needed.

* original loop body is unrolled version of new SHA512_0_15 and
  SHA512_16_79 macros, unrolling was done to not do explicit variable
  renaming. Otherwise it's the very same code after preprocessing.
  See sha1_transform() code which does the same trick.

Patch survives in-tree crypto test and original bugreport test
(ping flood with hmac(sha512).

See FIPS 180-2 for SHA-512 definition
http://csrc.nist.gov/publications/fips/fips180-2/fips180-2withchangenotice.pdf

Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: stable@vger.kernel.org
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Alexey Dobriyan 2012-01-14 21:40:57 +03:00 committed by Herbert Xu
parent 84e31fdb7c
commit 51fc6dc8f9

View File

@ -78,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
static inline void BLEND_OP(int I, u64 *W)
{
W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
}
static void
@ -87,38 +87,48 @@ sha512_transform(u64 *state, const u8 *input)
u64 a, b, c, d, e, f, g, h, t1, t2;
int i;
u64 W[80];
u64 W[16];
/* load the input */
for (i = 0; i < 16; i++)
LOAD_OP(i, W, input);
for (i = 16; i < 80; i++) {
BLEND_OP(i, W);
}
/* load the state into our registers */
a=state[0]; b=state[1]; c=state[2]; d=state[3];
e=state[4]; f=state[5]; g=state[6]; h=state[7];
/* now iterate */
for (i=0; i<80; i+=8) {
t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ];
t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1];
t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2];
t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3];
t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4];
t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \
t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \
t2 = e0(a) + Maj(a, b, c); \
d += t1; \
h = t1 + t2
#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \
BLEND_OP(i, W); \
t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \
t2 = e0(a) + Maj(a, b, c); \
d += t1; \
h = t1 + t2
for (i = 0; i < 16; i += 8) {
SHA512_0_15(i, a, b, c, d, e, f, g, h);
SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
}
for (i = 16; i < 80; i += 8) {
SHA512_16_79(i, a, b, c, d, e, f, g, h);
SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
}
state[0] += a; state[1] += b; state[2] += c; state[3] += d;