lib: lmb: reserving overlapping regions should fail

lmb_add_region handles overlapping regions wrong: instead of merging
or rejecting to add a new reserved region that overlaps an existing
one, it just adds the new region.

Since internally the same function is used for lmb_alloc, change
lmb_add_region to reject overlapping regions.

Also, to keep reserved memory correct after 'free', reserved entries
created by allocating memory must not set their size to a multiple
of alignment but to the original size. This ensures the reserved
region is completely removed when the caller calls 'lmb_free', as
this one takes the same size as passed to 'lmb_alloc' etc.

Add test to assert this.

Reviewed-by: Simon Glass <sjg@chromium.org>
Signed-off-by: Simon Goldschmidt <simon.k.r.goldschmidt@gmail.com>
This commit is contained in:
Simon Goldschmidt 2019-01-14 22:38:16 +01:00 committed by Tom Rini
parent d67f33cf4e
commit 0f7c51a676
2 changed files with 91 additions and 15 deletions

View file

@ -131,6 +131,9 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
rgn->region[i].size += size; rgn->region[i].size += size;
coalesced++; coalesced++;
break; break;
} else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
/* regions overlap */
return -1;
} }
} }
@ -269,11 +272,6 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
return addr & ~(size - 1); return addr & ~(size - 1);
} }
static phys_addr_t lmb_align_up(phys_addr_t addr, ulong size)
{
return (addr + (size - 1)) & ~(size - 1);
}
phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
{ {
long i, j; long i, j;
@ -302,8 +300,7 @@ phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phy
if (j < 0) { if (j < 0) {
/* This area isn't reserved, take it */ /* This area isn't reserved, take it */
if (lmb_add_region(&lmb->reserved, base, if (lmb_add_region(&lmb->reserved, base,
lmb_align_up(size, size) < 0)
align)) < 0)
return 0; return 0;
return base; return base;
} }

View file

@ -227,13 +227,16 @@ static int lib_test_lmb_big(struct unit_test_state *uts)
DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); DM_TEST(lib_test_lmb_big, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
/* Simulate 512 MiB RAM, allocate a block without previous reservation */ /* Simulate 512 MiB RAM, allocate a block without previous reservation */
static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram) static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
const phys_addr_t alloc_size, const ulong align)
{ {
const phys_size_t ram_size = 0x20000000; const phys_size_t ram_size = 0x20000000;
const phys_addr_t ram_end = ram + ram_size; const phys_addr_t ram_end = ram + ram_size;
struct lmb lmb; struct lmb lmb;
long ret; long ret;
phys_addr_t a, b; phys_addr_t a, b;
const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
~(align - 1);
/* check for overflow */ /* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram); ut_assert(ram_end == 0 || ram_end > ram);
@ -242,20 +245,43 @@ static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram)
ret = lmb_add(&lmb, ram, ram_size); ret = lmb_add(&lmb, ram, ram_size);
ut_asserteq(ret, 0); ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
/* allocate a block */ /* allocate a block */
a = lmb_alloc(&lmb, 4, 1); a = lmb_alloc(&lmb, alloc_size, align);
ut_assert(a != 0); ut_assert(a != 0);
/* and free it */ ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
ret = lmb_free(&lmb, a, 4); alloc_size, 0, 0, 0, 0);
/* allocate another block */
b = lmb_alloc(&lmb, alloc_size, align);
ut_assert(b != 0);
if (alloc_size == alloc_size_aligned) {
ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
(alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
0);
} else {
ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
(alloc_size_aligned * 2), alloc_size, ram + ram_size
- alloc_size_aligned, alloc_size, 0, 0);
}
/* and free them */
ret = lmb_free(&lmb, b, alloc_size);
ut_asserteq(ret, 0); ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
alloc_size, 0, 0, 0, 0);
ret = lmb_free(&lmb, a, alloc_size);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
/* allocate a block with base*/ /* allocate a block with base*/
b = lmb_alloc_base(&lmb, 4, 1, ram_end); b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
ut_assert(a == b); ut_assert(a == b);
ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
alloc_size, 0, 0, 0, 0);
/* and free it */ /* and free it */
ret = lmb_free(&lmb, b, 4); ret = lmb_free(&lmb, b, alloc_size);
ut_asserteq(ret, 0); ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
return 0; return 0;
} }
@ -265,16 +291,30 @@ static int lib_test_lmb_noreserved(struct unit_test_state *uts)
int ret; int ret;
/* simulate 512 MiB RAM beginning at 1GiB */ /* simulate 512 MiB RAM beginning at 1GiB */
ret = test_noreserved(uts, 0x40000000); ret = test_noreserved(uts, 0x40000000, 4, 1);
if (ret) if (ret)
return ret; return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */ /* simulate 512 MiB RAM beginning at 1.5GiB */
return test_noreserved(uts, 0xE0000000); return test_noreserved(uts, 0xE0000000, 4, 1);
} }
DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); DM_TEST(lib_test_lmb_noreserved, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
static int lib_test_lmb_unaligned_size(struct unit_test_state *uts)
{
int ret;
/* simulate 512 MiB RAM beginning at 1GiB */
ret = test_noreserved(uts, 0x40000000, 5, 8);
if (ret)
return ret;
/* simulate 512 MiB RAM beginning at 1.5GiB */
return test_noreserved(uts, 0xE0000000, 5, 8);
}
DM_TEST(lib_test_lmb_unaligned_size, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
/* /*
* Simulate a RAM that starts at 0 and allocate down to address 0, which must * Simulate a RAM that starts at 0 and allocate down to address 0, which must
* fail as '0' means failure for the lmb_alloc functions. * fail as '0' means failure for the lmb_alloc functions.
@ -318,3 +358,42 @@ static int lib_test_lmb_at_0(struct unit_test_state *uts)
} }
DM_TEST(lib_test_lmb_at_0, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT); DM_TEST(lib_test_lmb_at_0, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
/* Check that calling lmb_reserve with overlapping regions fails. */
static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
{
const phys_addr_t ram = 0x40000000;
const phys_size_t ram_size = 0x20000000;
struct lmb lmb;
long ret;
lmb_init(&lmb);
ret = lmb_add(&lmb, ram, ram_size);
ut_asserteq(ret, 0);
ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
0, 0, 0, 0);
/* allocate overlapping region should fail */
ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
ut_asserteq(ret, -1);
ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
0, 0, 0, 0);
/* allocate 3nd region */
ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
ut_asserteq(ret, 0);
ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
0x40030000, 0x10000, 0, 0);
/* allocate 2nd region */
ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
ut_assert(ret >= 0);
ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
0, 0, 0, 0);
return 0;
}
DM_TEST(lib_test_lmb_overlapping_reserve,
DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);