lmb: Add basic io_lmb functionality

These functions can be used with struct lmb pointers and will be used to
manage IOVA space in the apple_dart iommu driver. This restores part of
the pointer base struct lmb API from before commit ed17a33fed ("lmb:
make LMB memory map persistent and global").
io_lmb_add() and io_lmb_free() can trivially reuse exisiting lmb
functions. io_lmb_setup() is separate for unique error log messages.
io_lmb_alloc() is a simplified copy of _lmb_alloc_base() since the
later has unused features and internal use of the global LMB memory map.

Signed-off-by: Janne Grunau <j@jannau.net>
This commit is contained in:
Janne Grunau 2024-11-11 07:56:33 +01:00 committed by Tom Rini
parent 174f53d2f2
commit f6999cb554
2 changed files with 131 additions and 0 deletions

View file

@ -156,6 +156,57 @@ static inline int lmb_read_check(phys_addr_t addr, phys_size_t len)
return lmb_alloc_addr(addr, len) == addr ? 0 : -1;
}
/**
* io_lmb_setup() - Initialize LMB struct
* @lmb: IO LMB to initialize
*
* Returns: 0 on success, negative error code on failure
*/
int io_lmb_setup(struct lmb *io_lmb);
/**
* io_lmb_teardown() - Tear LMB struct down
* @lmb: IO LMB to teardown
*/
void io_lmb_teardown(struct lmb *io_lmb);
/**
* io_lmb_add() - Add an IOVA range for allocations
* @io_lmb: LMB to add the space to
* @base: Base Address of region to add
* @size: Size of the region to add
*
* Add the IOVA space [base, base + size] to be managed by io_lmb.
*
* Returns: 0 if the region addition was successful, -1 on failure
*/
long io_lmb_add(struct lmb *io_lmb, phys_addr_t base, phys_size_t size);
/**
* io_lmb_alloc() - Allocate specified IO memory address with specified alignment
* @io_lmb: LMB to alloc from
* @size: Size of the region requested
* @align: Required address and size alignment
*
* Allocate a region of IO memory. The base parameter is used to specify the
* base address of the requested region.
*
* Return: base IO address on success, 0 on error
*/
phys_addr_t io_lmb_alloc(struct lmb *io_lmb, phys_size_t size, ulong align);
/**
* io_lmb_free() - Free up a region of IOVA space
* @io_lmb: LMB to return the IO address space to
* @base: Base Address of region to be freed
* @size: Size of the region to be freed
*
* Free up a region of IOVA space.
*
* Return: 0 if successful, -1 on failure
*/
long io_lmb_free(struct lmb *io_lmb, phys_addr_t base, phys_size_t size);
#endif /* __KERNEL__ */
#endif /* _LINUX_LMB_H */

View file

@ -351,6 +351,86 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
return addr & ~(size - 1);
}
/*
* IOVA LMB memory maps using lmb pointers instead of the global LMB memory map.
*/
int io_lmb_setup(struct lmb *io_lmb)
{
int ret;
ret = alist_init(&io_lmb->free_mem, sizeof(struct lmb_region),
(uint)LMB_ALIST_INITIAL_SIZE);
if (!ret) {
log_debug("Unable to initialise the list for LMB free IOVA\n");
return -ENOMEM;
}
ret = alist_init(&io_lmb->used_mem, sizeof(struct lmb_region),
(uint)LMB_ALIST_INITIAL_SIZE);
if (!ret) {
log_debug("Unable to initialise the list for LMB used IOVA\n");
return -ENOMEM;
}
io_lmb->test = false;
return 0;
}
void io_lmb_teardown(struct lmb *io_lmb)
{
alist_uninit(&io_lmb->free_mem);
alist_uninit(&io_lmb->used_mem);
}
long io_lmb_add(struct lmb *io_lmb, phys_addr_t base, phys_size_t size)
{
return lmb_add_region_flags(&io_lmb->free_mem, base, size, LMB_NONE);
}
/* derived and simplified from _lmb_alloc_base() */
phys_addr_t io_lmb_alloc(struct lmb *io_lmb, phys_size_t size, ulong align)
{
long i, rgn;
phys_addr_t base = 0;
phys_addr_t res_base;
struct lmb_region *lmb_used = io_lmb->used_mem.data;
struct lmb_region *lmb_memory = io_lmb->free_mem.data;
for (i = io_lmb->free_mem.count - 1; i >= 0; i--) {
phys_addr_t lmbbase = lmb_memory[i].base;
phys_size_t lmbsize = lmb_memory[i].size;
if (lmbsize < size)
continue;
base = lmb_align_down(lmbbase + lmbsize - size, align);
while (base && lmbbase <= base) {
rgn = lmb_overlaps_region(&io_lmb->used_mem, base, size);
if (rgn < 0) {
/* This area isn't reserved, take it */
if (lmb_add_region_flags(&io_lmb->used_mem, base,
size, LMB_NONE) < 0)
return 0;
return base;
}
res_base = lmb_used[rgn].base;
if (res_base < size)
break;
base = lmb_align_down(res_base - size, align);
}
}
return 0;
}
long io_lmb_free(struct lmb *io_lmb, phys_addr_t base, phys_size_t size)
{
return _lmb_free(&io_lmb->used_mem, base, size);
}
/*
* Low level LMB functions are used to manage IOVA memory maps for the Apple
* dart iommu. They must not access the global LMB memory map.