2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-15 08:44:14 +08:00

iommu sg: add IOMMU helper functions for the free area management

This adds IOMMU helper functions for the free area management.  These
functions take care of LLD's segment boundary limit for IOMMUs.  They would be
useful for IOMMUs that use bitmap for the free area management.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
FUJITA Tomonori 2008-02-04 22:28:07 -08:00 committed by Linus Torvalds
parent 0c95fdc596
commit 0291df8cc9
3 changed files with 88 additions and 0 deletions

View File

@ -0,0 +1,7 @@
extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift,
unsigned long boundary_size,
unsigned long align_mask);
extern void iommu_area_free(unsigned long *map, unsigned long start,
unsigned int nr);

View File

@ -65,6 +65,7 @@ obj-$(CONFIG_SMP) += pcounter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o lib-$(CONFIG_GENERIC_BUG) += bug.o

80
lib/iommu-helper.c Normal file
View File

@ -0,0 +1,80 @@
/*
* IOMMU helper functions for the free area management
*/
#include <linux/module.h>
#include <linux/bitops.h>
static unsigned long find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
index = (index + align_mask) & ~align_mask;
end = index + nr;
if (end >= size)
return -1;
for (i = index; i < end; i++) {
if (test_bit(i, map)) {
start = i+1;
goto again;
}
}
return index;
}
static inline void set_bit_area(unsigned long *map, unsigned long i,
int len)
{
unsigned long end = i + len;
while (i < end) {
__set_bit(i, map);
i++;
}
}
static inline int is_span_boundary(unsigned int index, unsigned int nr,
unsigned long shift,
unsigned long boundary_size)
{
shift = (shift + index) & (boundary_size - 1);
return shift + nr > boundary_size;
}
unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr,
unsigned long shift, unsigned long boundary_size,
unsigned long align_mask)
{
unsigned long index;
again:
index = find_next_zero_area(map, size, start, nr, align_mask);
if (index != -1) {
if (is_span_boundary(index, nr, shift, boundary_size)) {
/* we could do more effectively */
start = index + 1;
goto again;
}
set_bit_area(map, index, nr);
}
return index;
}
EXPORT_SYMBOL(iommu_area_alloc);
void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
{
unsigned long end = start + nr;
while (start < end) {
__clear_bit(start, map);
start++;
}
}
EXPORT_SYMBOL(iommu_area_free);