x86: Allow adding non-aligned size for MTRR

At present mtrr_add_request() requires that the size is a power of two.
This is too limiting for machines with 4GB (or more) of RAM, since they
often must take account of a memory hole at 3GB.

Update the function to automatically deal with an unaligned size, using
more MTRRs as required.

The algorithm is taken from coreboot commit 60bce10750

Signed-off-by: Simon Glass <sjg@chromium.org>
This commit is contained in:
Simon Glass 2025-03-15 14:25:46 +00:00 committed by Tom Rini
parent a9d106421d
commit 66c9ade35b

View file

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* (C) Copyright 2014 Google, Inc
* Portions added from coreboot
*
* Memory Type Range Regsters - these are used to tell the CPU whether
* memory is cacheable and if so the cache write mode to use.
@ -204,29 +205,80 @@ int mtrr_commit(bool do_caches)
return 0;
}
int mtrr_add_request(int type, uint64_t start, uint64_t size)
/* fms: find most significant bit set (from Linux) */
static inline uint fms(uint val)
{
uint ret;
__asm__("bsrl %1,%0\n\t"
"jnz 1f\n\t"
"movl $0,%0\n"
"1:" : "=r" (ret) : "mr" (val));
return ret;
}
/*
* fms64: find most significant bit set in a 64-bit word
* As samples, fms64(0x0) = 0; fms64(0x4400) = 14;
* fms64(0x40400000000) = 42.
*/
static uint fms64(uint64_t val)
{
u32 hi = (u32)(val >> 32);
if (!hi)
return fms((u32)val);
return fms(hi) + 32;
}
int mtrr_add_request(int type, u64 base, uint64_t size)
{
struct mtrr_request *req;
uint64_t mask;
u64 mask;
debug("%s: count=%d\n", __func__, gd->arch.mtrr_req_count);
if (!gd->arch.has_mtrr)
return -ENOSYS;
if (!is_power_of_2(size))
return -EINVAL;
while (size) {
uint addr_lsb;
uint size_msb;
u64 mtrr_size;
if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
return -ENOSPC;
req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
req->type = type;
req->start = start;
req->size = size;
debug("%d: type=%d, %08llx %08llx\n", gd->arch.mtrr_req_count - 1,
req->type, req->start, req->size);
mask = mtrr_to_mask(req->size);
mask |= MTRR_PHYS_MASK_VALID;
debug(" %016llx %016llx\n", req->start | req->type, mask);
addr_lsb = fls64(base);
size_msb = fms64(size);
/*
* All MTRR entries need to have their base aligned to the
* mask size. The maximum size is calculated by a function of
* the min base bit set and maximum size bit set.
* Algorithm is from coreboot
*/
if (!addr_lsb || addr_lsb > size_msb)
mtrr_size = 1ull << size_msb;
else
mtrr_size = 1ull << addr_lsb;
log_debug("addr_lsb %x size_msb %x mtrr_size %llx\n",
addr_lsb, size_msb, mtrr_size);
if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
return -ENOSPC;
req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
req->type = type;
req->start = base;
req->size = mtrr_size;
log_debug("%d: type=%d, %08llx %08llx ",
gd->arch.mtrr_req_count - 1, req->type, req->start,
req->size);
mask = mtrr_to_mask(req->size);
mask |= MTRR_PHYS_MASK_VALID;
log_debug(" %016llx %016llx\n", req->start | req->type, mask);
size -= mtrr_size;
base += mtrr_size;
}
return 0;
}