lmb: fix allocation at end of address range

The lmb code fails if base + size of RAM overflows to zero.

Fix this by calculating end as 'base + size - 1' instead of 'base + size'
where appropriate.

Added tests to assert this is fixed.

Reviewed-by: Simon Glass <sjg@chromium.org>
Signed-off-by: Simon Goldschmidt <simon.k.r.goldschmidt@gmail.com>
This commit is contained in:
Simon Goldschmidt 2019-01-14 22:38:15 +01:00 committed by Tom Rini
parent a01ae0c23f
commit d67f33cf4e
2 changed files with 38 additions and 20 deletions

View File

@ -43,7 +43,10 @@ void lmb_dump_all(struct lmb *lmb)
static long lmb_addrs_overlap(phys_addr_t base1, static long lmb_addrs_overlap(phys_addr_t base1,
phys_size_t size1, phys_addr_t base2, phys_size_t size2) phys_size_t size1, phys_addr_t base2, phys_size_t size2)
{ {
return ((base1 < (base2+size2)) && (base2 < (base1+size1))); const phys_addr_t base1_end = base1 + size1 - 1;
const phys_addr_t base2_end = base2 + size2 - 1;
return ((base1 <= base2_end) && (base2 <= base1_end));
} }
static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
@ -89,18 +92,9 @@ static void lmb_coalesce_regions(struct lmb_region *rgn,
void lmb_init(struct lmb *lmb) void lmb_init(struct lmb *lmb)
{ {
/* Create a dummy zero size LMB which will get coalesced away later. lmb->memory.cnt = 0;
* This simplifies the lmb_add() code below...
*/
lmb->memory.region[0].base = 0;
lmb->memory.region[0].size = 0;
lmb->memory.cnt = 1;
lmb->memory.size = 0; lmb->memory.size = 0;
lmb->reserved.cnt = 0;
/* Ditto. */
lmb->reserved.region[0].base = 0;
lmb->reserved.region[0].size = 0;
lmb->reserved.cnt = 1;
lmb->reserved.size = 0; lmb->reserved.size = 0;
} }
@ -110,9 +104,10 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
unsigned long coalesced = 0; unsigned long coalesced = 0;
long adjacent, i; long adjacent, i;
if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { if (rgn->cnt == 0) {
rgn->region[0].base = base; rgn->region[0].base = base;
rgn->region[0].size = size; rgn->region[0].size = size;
rgn->cnt = 1;
return 0; return 0;
} }
@ -183,7 +178,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
{ {
struct lmb_region *rgn = &(lmb->reserved); struct lmb_region *rgn = &(lmb->reserved);
phys_addr_t rgnbegin, rgnend; phys_addr_t rgnbegin, rgnend;
phys_addr_t end = base + size; phys_addr_t end = base + size - 1;
int i; int i;
rgnbegin = rgnend = 0; /* supress gcc warnings */ rgnbegin = rgnend = 0; /* supress gcc warnings */
@ -191,7 +186,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
/* Find the region where (base, size) belongs to */ /* Find the region where (base, size) belongs to */
for (i=0; i < rgn->cnt; i++) { for (i=0; i < rgn->cnt; i++) {
rgnbegin = rgn->region[i].base; rgnbegin = rgn->region[i].base;
rgnend = rgnbegin + rgn->region[i].size; rgnend = rgnbegin + rgn->region[i].size - 1;
if ((rgnbegin <= base) && (end <= rgnend)) if ((rgnbegin <= base) && (end <= rgnend))
break; break;
@ -209,7 +204,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
/* Check to see if region is matching at the front */ /* Check to see if region is matching at the front */
if (rgnbegin == base) { if (rgnbegin == base) {
rgn->region[i].base = end; rgn->region[i].base = end + 1;
rgn->region[i].size -= size; rgn->region[i].size -= size;
return 0; return 0;
} }
@ -225,7 +220,7 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
* beginging of the hole and add the region after hole. * beginging of the hole and add the region after hole.
*/ */
rgn->region[i].size = base - rgn->region[i].base; rgn->region[i].size = base - rgn->region[i].base;
return lmb_add_region(rgn, end, rgnend - end); return lmb_add_region(rgn, end + 1, rgnend - end);
} }
long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)

View File

@ -146,8 +146,15 @@ static int test_multi_alloc_512mb(struct unit_test_state *uts,
/* Create a memory region with one reserved region and allocate */ /* Create a memory region with one reserved region and allocate */
static int lib_test_lmb_simple(struct unit_test_state *uts) static int lib_test_lmb_simple(struct unit_test_state *uts)
{ {
int ret;
/* simulate 512 MiB RAM beginning at 1GiB */ /* simulate 512 MiB RAM beginning at 1GiB */
return test_multi_alloc_512mb(uts, 0x40000000); ret = test_multi_alloc_512mb(uts, 0x40000000);
if (ret)
return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */
return test_multi_alloc_512mb(uts, 0xE0000000);
} }
DM_TEST(lib_test_lmb_simple, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); DM_TEST(lib_test_lmb_simple, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
@ -206,7 +213,15 @@ static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
static int lib_test_lmb_big(struct unit_test_state *uts) static int lib_test_lmb_big(struct unit_test_state *uts)
{ {
return test_bigblock(uts, 0x40000000); int ret;
/* simulate 512 MiB RAM beginning at 1GiB */
ret = test_bigblock(uts, 0x40000000);
if (ret)
return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */
return test_bigblock(uts, 0xE0000000);
} }
DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
@ -247,7 +262,15 @@ static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram)
static int lib_test_lmb_noreserved(struct unit_test_state *uts) static int lib_test_lmb_noreserved(struct unit_test_state *uts)
{ {
return test_noreserved(uts, 0x40000000); int ret;
/* simulate 512 MiB RAM beginning at 1GiB */
ret = test_noreserved(uts, 0x40000000);
if (ret)
return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */
return test_noreserved(uts, 0xE0000000);
} }
DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);