Harden ASN.1 BIO handling of large amounts of data.

If the ASN.1 BIO is presented with a large length field read it in
chunks of increasing size checking for EOF on each read. This prevents
small files allocating excessive amounts of data.

CVE-2016-2109

Thanks to Brian Carpenter for reporting this issue.

Reviewed-by: Viktor Dukhovni <viktor@openssl.org>
This commit is contained in:
Dr. Stephen Henson 2016-04-11 13:57:20 +01:00
parent ddc606c914
commit c62981390d

View File

@ -138,6 +138,7 @@ void *ASN1_item_d2i_fp(const ASN1_ITEM *it, FILE *in, void *x)
#endif
#define HEADER_SIZE 8
#define ASN1_CHUNK_INITIAL_SIZE (16 * 1024)
static int asn1_d2i_read_bio(BIO *in, BUF_MEM **pb)
{
BUF_MEM *b;
@ -216,29 +217,44 @@ static int asn1_d2i_read_bio(BIO *in, BUF_MEM **pb)
/* suck in slen bytes of data */
want = slen;
if (want > (len - off)) {
size_t chunk_max = ASN1_CHUNK_INITIAL_SIZE;
want -= (len - off);
if (want > INT_MAX /* BIO_read takes an int length */ ||
len + want < len) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ASN1_R_TOO_LONG);
goto err;
}
if (!BUF_MEM_grow_clean(b, len + want)) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ERR_R_MALLOC_FAILURE);
goto err;
}
while (want > 0) {
i = BIO_read(in, &(b->data[len]), want);
if (i <= 0) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO,
ASN1_R_NOT_ENOUGH_DATA);
/*
* Read content in chunks of increasing size
* so we can return an error for EOF without
* having to allocate the entire content length
* in one go.
*/
size_t chunk = want > chunk_max ? chunk_max : want;
if (!BUF_MEM_grow_clean(b, len + chunk)) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO, ERR_R_MALLOC_FAILURE);
goto err;
}
want -= chunk;
while (chunk > 0) {
i = BIO_read(in, &(b->data[len]), chunk);
if (i <= 0) {
ASN1err(ASN1_F_ASN1_D2I_READ_BIO,
ASN1_R_NOT_ENOUGH_DATA);
goto err;
}
/*
* This can't overflow because |len+want| didn't
* overflow.
*/
len += i;
want -= i;
len += i;
chunk -= i;
}
if (chunk_max < INT_MAX/2)
chunk_max *= 2;
}
}
if (off + slen < off) {