2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-20 00:26:39 +08:00

[IA64] swiotlb bug fixes

This patch fixes
- marking I-cache clean of pages DMAed to now only done for IA64
- broken multiple inclusion in include/asm-x86_64/swiotlb.h
- missing call to mark_clean in swiotlb_sync_sg()
- a (perhaps only theoretical) issue in swiotlb_dma_supported() when
io_tlb_end is exactly at the end of memory

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Jan Beulich 2007-02-05 18:46:40 -08:00 committed by Tony Luck
parent 86afa9eb88
commit cde14bbfb3
4 changed files with 33 additions and 28 deletions

View File

@ -129,6 +129,25 @@ lazy_mmu_prot_update (pte_t pte)
set_bit(PG_arch_1, &page->flags); /* mark page as clean */ set_bit(PG_arch_1, &page->flags); /* mark page as clean */
} }
/*
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
void
dma_mark_clean(void *addr, size_t size)
{
unsigned long pg_addr, end;
pg_addr = PAGE_ALIGN((unsigned long) addr);
end = (unsigned long) addr + size;
while (pg_addr + PAGE_SIZE <= end) {
struct page *page = virt_to_page(pg_addr);
set_bit(PG_arch_1, &page->flags);
pg_addr += PAGE_SIZE;
}
}
inline void inline void
ia64_set_rbs_bot (void) ia64_set_rbs_bot (void)
{ {

View File

@ -19,4 +19,6 @@ extern unsigned long MAX_DMA_ADDRESS;
#define free_dma(x) #define free_dma(x)
void dma_mark_clean(void *addr, size_t size);
#endif /* _ASM_IA64_DMA_H */ #endif /* _ASM_IA64_DMA_H */

View File

@ -1,6 +1,5 @@
#ifndef _ASM_SWIOTLB_H #ifndef _ASM_SWIOTLB_H
#define _ASM_SWTIOLB_H 1 #define _ASM_SWIOTLB_H 1
#include <asm/dma-mapping.h> #include <asm/dma-mapping.h>
@ -52,4 +51,6 @@ extern int swiotlb;
extern void pci_swiotlb_init(void); extern void pci_swiotlb_init(void);
#endif /* _ASM_SWTIOLB_H */ static inline void dma_mark_clean(void *addr, size_t size) {}
#endif /* _ASM_SWIOTLB_H */

View File

@ -557,25 +557,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
return dev_addr; return dev_addr;
} }
/*
* Since DMA is i-cache coherent, any (complete) pages that were written via
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
static void
mark_clean(void *addr, size_t size)
{
unsigned long pg_addr, end;
pg_addr = PAGE_ALIGN((unsigned long) addr);
end = (unsigned long) addr + size;
while (pg_addr + PAGE_SIZE <= end) {
struct page *page = virt_to_page(pg_addr);
set_bit(PG_arch_1, &page->flags);
pg_addr += PAGE_SIZE;
}
}
/* /*
* Unmap a single streaming mode DMA translation. The dma_addr and size must * Unmap a single streaming mode DMA translation. The dma_addr and size must
* match what was provided for in a previous swiotlb_map_single call. All * match what was provided for in a previous swiotlb_map_single call. All
@ -594,7 +575,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
unmap_single(hwdev, dma_addr, size, dir); unmap_single(hwdev, dma_addr, size, dir);
else if (dir == DMA_FROM_DEVICE) else if (dir == DMA_FROM_DEVICE)
mark_clean(dma_addr, size); dma_mark_clean(dma_addr, size);
} }
/* /*
@ -617,7 +598,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
sync_single(hwdev, dma_addr, size, dir, target); sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE) else if (dir == DMA_FROM_DEVICE)
mark_clean(dma_addr, size); dma_mark_clean(dma_addr, size);
} }
void void
@ -648,7 +629,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
sync_single(hwdev, dma_addr, size, dir, target); sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE) else if (dir == DMA_FROM_DEVICE)
mark_clean(dma_addr, size); dma_mark_clean(dma_addr, size);
} }
void void
@ -698,7 +679,6 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
dev_addr = virt_to_phys(addr); dev_addr = virt_to_phys(addr);
if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
void *map = map_single(hwdev, addr, sg->length, dir); void *map = map_single(hwdev, addr, sg->length, dir);
sg->dma_address = virt_to_bus(map);
if (!map) { if (!map) {
/* Don't panic here, we expect map_sg users /* Don't panic here, we expect map_sg users
to do proper error handling. */ to do proper error handling. */
@ -707,6 +687,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
sg[0].dma_length = 0; sg[0].dma_length = 0;
return 0; return 0;
} }
sg->dma_address = virt_to_bus(map);
} else } else
sg->dma_address = dev_addr; sg->dma_address = dev_addr;
sg->dma_length = sg->length; sg->dma_length = sg->length;
@ -730,7 +711,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir);
else if (dir == DMA_FROM_DEVICE) else if (dir == DMA_FROM_DEVICE)
mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
} }
/* /*
@ -752,6 +733,8 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
sync_single(hwdev, (void *) sg->dma_address, sync_single(hwdev, (void *) sg->dma_address,
sg->dma_length, dir, target); sg->dma_length, dir, target);
else if (dir == DMA_FROM_DEVICE)
dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
} }
void void
@ -783,7 +766,7 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
int int
swiotlb_dma_supported (struct device *hwdev, u64 mask) swiotlb_dma_supported (struct device *hwdev, u64 mask)
{ {
return (virt_to_phys (io_tlb_end) - 1) <= mask; return virt_to_phys(io_tlb_end - 1) <= mask;
} }
EXPORT_SYMBOL(swiotlb_init); EXPORT_SYMBOL(swiotlb_init);