mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
Merge branch 'for-linus-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb
Pull swiotlb updates from Konrad Rzeszutek Wilk: "One compiler fix, and a bug-fix in swiotlb_nr_tbl() and swiotlb_max_segment() to check also for no_iotlb_memory" * 'for-linus-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb: swiotlb: fix phys_addr_t overflow warning swiotlb: Return consistent SWIOTLB segments/nr_tbl swiotlb: Group identical cleanup in swiotlb_cleanup()
This commit is contained in:
commit
d4df33b0e9
@ -402,7 +402,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
||||
|
||||
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
|
||||
attrs);
|
||||
if (map == DMA_MAPPING_ERROR)
|
||||
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
dev_addr = xen_phys_to_bus(map);
|
||||
|
@ -129,15 +129,17 @@ setup_io_tlb_npages(char *str)
|
||||
}
|
||||
early_param("swiotlb", setup_io_tlb_npages);
|
||||
|
||||
static bool no_iotlb_memory;
|
||||
|
||||
unsigned long swiotlb_nr_tbl(void)
|
||||
{
|
||||
return io_tlb_nslabs;
|
||||
return unlikely(no_iotlb_memory) ? 0 : io_tlb_nslabs;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
|
||||
|
||||
unsigned int swiotlb_max_segment(void)
|
||||
{
|
||||
return max_segment;
|
||||
return unlikely(no_iotlb_memory) ? 0 : max_segment;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(swiotlb_max_segment);
|
||||
|
||||
@ -160,8 +162,6 @@ unsigned long swiotlb_size_or_default(void)
|
||||
return size ? size : (IO_TLB_DEFAULT_SIZE);
|
||||
}
|
||||
|
||||
static bool no_iotlb_memory;
|
||||
|
||||
void swiotlb_print_info(void)
|
||||
{
|
||||
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
||||
@ -317,6 +317,14 @@ swiotlb_late_init_with_default_size(size_t default_size)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void swiotlb_cleanup(void)
|
||||
{
|
||||
io_tlb_end = 0;
|
||||
io_tlb_start = 0;
|
||||
io_tlb_nslabs = 0;
|
||||
max_segment = 0;
|
||||
}
|
||||
|
||||
int
|
||||
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
|
||||
{
|
||||
@ -367,10 +375,7 @@ cleanup4:
|
||||
sizeof(int)));
|
||||
io_tlb_list = NULL;
|
||||
cleanup3:
|
||||
io_tlb_end = 0;
|
||||
io_tlb_start = 0;
|
||||
io_tlb_nslabs = 0;
|
||||
max_segment = 0;
|
||||
swiotlb_cleanup();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -394,10 +399,7 @@ void __init swiotlb_exit(void)
|
||||
memblock_free_late(io_tlb_start,
|
||||
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
|
||||
}
|
||||
io_tlb_start = 0;
|
||||
io_tlb_end = 0;
|
||||
io_tlb_nslabs = 0;
|
||||
max_segment = 0;
|
||||
swiotlb_cleanup();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -546,7 +548,7 @@ not_found:
|
||||
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
|
||||
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
|
||||
size, io_tlb_nslabs, tmp_io_tlb_used);
|
||||
return DMA_MAPPING_ERROR;
|
||||
return (phys_addr_t)DMA_MAPPING_ERROR;
|
||||
found:
|
||||
io_tlb_used += nslots;
|
||||
spin_unlock_irqrestore(&io_tlb_lock, flags);
|
||||
@ -664,7 +666,7 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
|
||||
/* Oh well, have to allocate and map a bounce buffer. */
|
||||
*phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
|
||||
*phys, size, dir, attrs);
|
||||
if (*phys == DMA_MAPPING_ERROR)
|
||||
if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
|
||||
return false;
|
||||
|
||||
/* Ensure that the address returned is DMA'ble */
|
||||
|
Loading…
Reference in New Issue
Block a user