mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 01:04:19 +08:00
186a25026c
The commitf4780ca005
moves swiotlb initialization before dma32_free_bootmem(). It's supposed to fix a bug that the commit75f1cdf1dd
introduced, we initialize SWIOTLB right after dma32_free_bootmem so we wrongly steal memory area allocated for GART with broken BIOS earlier. However, the above commit introduced another problem, which likely breaks machines with huge amount of memory. Such a box use the majority of DMA32_ZONE so there is no memory for swiotlb. With this patch, the x86 IOMMU initialization sequence are: 1. We set swiotlb to 1 in the case of (max_pfn > MAX_DMA32_PFN && !no_iommu). If swiotlb usage is forced by the boot option, we go to the step 3 and finish (we don't try to detect IOMMUs). 2. We call the detection functions of all the IOMMUs. The detection function sets x86_init.iommu.iommu_init to the IOMMU initialization function (so we can avoid calling the initialization functions of all the IOMMUs needlessly). 3. We initialize swiotlb (and set dma_ops to swiotlb_dma_ops) if swiotlb is set to 1. 4. If the IOMMU initialization function doesn't need swiotlb (e.g. the initialization is sucessful) then sets swiotlb to zero. 5. If we find that swiotlb is set to zero, we free swiotlb resource. Reported-by: Yinghai Lu <yinghai@kernel.org> Reported-by: Roland Dreier <rdreier@cisco.com> Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> LKML-Reference: <20091215204729A.fujita.tomonori@lab.ntt.co.jp> Tested-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
73 lines
1.8 KiB
C
73 lines
1.8 KiB
C
/* Glue code to lib/swiotlb.c */
|
|
|
|
#include <linux/pci.h>
|
|
#include <linux/cache.h>
|
|
#include <linux/module.h>
|
|
#include <linux/swiotlb.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <asm/iommu.h>
|
|
#include <asm/swiotlb.h>
|
|
#include <asm/dma.h>
|
|
|
|
int swiotlb __read_mostly;
|
|
|
|
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t flags)
|
|
{
|
|
void *vaddr;
|
|
|
|
vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
|
|
if (vaddr)
|
|
return vaddr;
|
|
|
|
return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
|
|
}
|
|
|
|
static struct dma_map_ops swiotlb_dma_ops = {
|
|
.mapping_error = swiotlb_dma_mapping_error,
|
|
.alloc_coherent = x86_swiotlb_alloc_coherent,
|
|
.free_coherent = swiotlb_free_coherent,
|
|
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
|
.sync_single_for_device = swiotlb_sync_single_for_device,
|
|
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
|
|
.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
|
|
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
|
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
|
.map_sg = swiotlb_map_sg_attrs,
|
|
.unmap_sg = swiotlb_unmap_sg_attrs,
|
|
.map_page = swiotlb_map_page,
|
|
.unmap_page = swiotlb_unmap_page,
|
|
.dma_supported = NULL,
|
|
};
|
|
|
|
/*
|
|
* pci_swiotlb_detect - set swiotlb to 1 if necessary
|
|
*
|
|
* This returns non-zero if we are forced to use swiotlb (by the boot
|
|
* option).
|
|
*/
|
|
int __init pci_swiotlb_detect(void)
|
|
{
|
|
int use_swiotlb = swiotlb | swiotlb_force;
|
|
|
|
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
|
#ifdef CONFIG_X86_64
|
|
if (!no_iommu && max_pfn > MAX_DMA32_PFN)
|
|
swiotlb = 1;
|
|
#endif
|
|
if (swiotlb_force)
|
|
swiotlb = 1;
|
|
|
|
return use_swiotlb;
|
|
}
|
|
|
|
void __init pci_swiotlb_init(void)
|
|
{
|
|
if (swiotlb) {
|
|
swiotlb_init(0);
|
|
dma_ops = &swiotlb_dma_ops;
|
|
}
|
|
}
|