mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-13 14:04:05 +08:00
12f03ee606
1/ Introduce ZONE_DEVICE and devm_memremap_pages() as a generic mechanism for adding device-driver-discovered memory regions to the kernel's direct map. This facility is used by the pmem driver to enable pfn_to_page() operations on the page frames returned by DAX ('direct_access' in 'struct block_device_operations'). For now, the 'memmap' allocation for these "device" pages comes from "System RAM". Support for allocating the memmap from device memory will arrive in a later kernel. 2/ Introduce memremap() to replace usages of ioremap_cache() and ioremap_wt(). memremap() drops the __iomem annotation for these mappings to memory that do not have i/o side effects. The replacement of ioremap_cache() with memremap() is limited to the pmem driver to ease merging the api change in v4.3. Completion of the conversion is targeted for v4.4. 3/ Similar to the usage of memcpy_to_pmem() + wmb_pmem() in the pmem driver, update the VFS DAX implementation and PMEM api to provide persistence guarantees for kernel operations on a DAX mapping. 4/ Convert the ACPI NFIT 'BLK' driver to map the block apertures as cacheable to improve performance. 5/ Miscellaneous updates and fixes to libnvdimm including support for issuing "address range scrub" commands, clarifying the optimal 'sector size' of pmem devices, a clarification of the usage of the ACPI '_STA' (status) property for DIMM devices, and other minor fixes. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJV6Nx7AAoJEB7SkWpmfYgCWyYQAI5ju6Gvw27RNFtPovHcZUf5 JGnxXejI6/AqeTQ+IulgprxtEUCrXOHjCDA5dkjr1qvsoqK1qxug+vJHOZLgeW0R OwDtmdW4Qrgeqm+CPoxETkorJ8wDOc8mol81kTiMgeV3UqbYeeHIiTAmwe7VzZ0C nNdCRDm5g8dHCjTKcvK3rvozgyoNoWeBiHkPe76EbnxDICxCB5dak7XsVKNMIVFQ NuYlnw6IYN7+rMHgpgpRux38NtIW8VlYPWTmHExejc2mlioWMNBG/bmtwLyJ6M3e zliz4/cnonTMUaizZaVozyinTa65m7wcnpjK+vlyGV2deDZPJpDRvSOtB0lH30bR 1gy+qrKzuGKpaN6thOISxFLLjmEeYwzYd7SvC9n118r32qShz+opN9XX0WmWSFlA sajE1ehm4M7s5pkMoa/dRnAyR8RUPu4RNINdQ/Z9jFfAOx+Q26rLdQXwf9+uqbEb bIeSQwOteK5vYYCstvpAcHSMlJAglzIX5UfZBvtEIJN7rlb0VhmGWfxAnTu+ktG1 o9cqAt+J4146xHaFwj5duTsyKhWb8BL9+xqbKPNpXEp+PbLsrnE/+WkDLFD67jxz dgIoK60mGnVXp+16I2uMqYYDgAyO5zUdmM4OygOMnZNa1mxesjbDJC6Wat1Wsndn slsw6DkrWT60CRE42nbK =o57/ -----END PGP SIGNATURE----- Merge tag 'libnvdimm-for-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm Pull libnvdimm updates from Dan Williams: "This update has successfully completed a 0day-kbuild run and has appeared in a linux-next release. The changes outside of the typical drivers/nvdimm/ and drivers/acpi/nfit.[ch] paths are related to the removal of IORESOURCE_CACHEABLE, the introduction of memremap(), and the introduction of ZONE_DEVICE + devm_memremap_pages(). Summary: - Introduce ZONE_DEVICE and devm_memremap_pages() as a generic mechanism for adding device-driver-discovered memory regions to the kernel's direct map. This facility is used by the pmem driver to enable pfn_to_page() operations on the page frames returned by DAX ('direct_access' in 'struct block_device_operations'). For now, the 'memmap' allocation for these "device" pages comes from "System RAM". Support for allocating the memmap from device memory will arrive in a later kernel. - Introduce memremap() to replace usages of ioremap_cache() and ioremap_wt(). memremap() drops the __iomem annotation for these mappings to memory that do not have i/o side effects. The replacement of ioremap_cache() with memremap() is limited to the pmem driver to ease merging the api change in v4.3. Completion of the conversion is targeted for v4.4. - Similar to the usage of memcpy_to_pmem() + wmb_pmem() in the pmem driver, update the VFS DAX implementation and PMEM api to provide persistence guarantees for kernel operations on a DAX mapping. - Convert the ACPI NFIT 'BLK' driver to map the block apertures as cacheable to improve performance. - Miscellaneous updates and fixes to libnvdimm including support for issuing "address range scrub" commands, clarifying the optimal 'sector size' of pmem devices, a clarification of the usage of the ACPI '_STA' (status) property for DIMM devices, and other minor fixes" * tag 'libnvdimm-for-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (34 commits) libnvdimm, pmem: direct map legacy pmem by default libnvdimm, pmem: 'struct page' for pmem libnvdimm, pfn: 'struct page' provider infrastructure x86, pmem: clarify that ARCH_HAS_PMEM_API implies PMEM mapped WB add devm_memremap_pages mm: ZONE_DEVICE for "device memory" mm: move __phys_to_pfn and __pfn_to_phys to asm/generic/memory_model.h dax: drop size parameter to ->direct_access() nd_blk: change aperture mapping from WC to WB nvdimm: change to use generic kvfree() pmem, dax: have direct_access use __pmem annotation dax: update I/O path to do proper PMEM flushing pmem: add copy_from_iter_pmem() and clear_pmem() pmem, x86: clean up conditional pmem includes pmem: remove layer when calling arch_has_wmb_pmem() pmem, x86: move x86 PMEM API to new pmem.h header libnvdimm, e820: make CONFIG_X86_PMEM_LEGACY a tristate option pmem: switch to devm_ allocations devres: add devm_memremap libnvdimm, btt: write and validate parent_uuid ...
137 lines
4.2 KiB
C
137 lines
4.2 KiB
C
/*
|
|
* Implement the default iomap interfaces
|
|
*
|
|
* (C) Copyright 2004 Linus Torvalds
|
|
*/
|
|
#include <linux/pci.h>
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/export.h>
|
|
|
|
#ifdef CONFIG_PCI
|
|
/**
|
|
* pci_iomap_range - create a virtual mapping cookie for a PCI BAR
|
|
* @dev: PCI device that owns the BAR
|
|
* @bar: BAR number
|
|
* @offset: map memory at the given offset in BAR
|
|
* @maxlen: max length of the memory to map
|
|
*
|
|
* Using this function you will get a __iomem address to your device BAR.
|
|
* You can access it using ioread*() and iowrite*(). These functions hide
|
|
* the details if this is a MMIO or PIO address space and will just do what
|
|
* you expect from them in the correct way.
|
|
*
|
|
* @maxlen specifies the maximum length to map. If you want to get access to
|
|
* the complete BAR from offset to the end, pass %0 here.
|
|
* */
|
|
void __iomem *pci_iomap_range(struct pci_dev *dev,
|
|
int bar,
|
|
unsigned long offset,
|
|
unsigned long maxlen)
|
|
{
|
|
resource_size_t start = pci_resource_start(dev, bar);
|
|
resource_size_t len = pci_resource_len(dev, bar);
|
|
unsigned long flags = pci_resource_flags(dev, bar);
|
|
|
|
if (len <= offset || !start)
|
|
return NULL;
|
|
len -= offset;
|
|
start += offset;
|
|
if (maxlen && len > maxlen)
|
|
len = maxlen;
|
|
if (flags & IORESOURCE_IO)
|
|
return __pci_ioport_map(dev, start, len);
|
|
if (flags & IORESOURCE_MEM)
|
|
return ioremap(start, len);
|
|
/* What? */
|
|
return NULL;
|
|
}
|
|
EXPORT_SYMBOL(pci_iomap_range);
|
|
|
|
/**
|
|
* pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
|
|
* @dev: PCI device that owns the BAR
|
|
* @bar: BAR number
|
|
* @offset: map memory at the given offset in BAR
|
|
* @maxlen: max length of the memory to map
|
|
*
|
|
* Using this function you will get a __iomem address to your device BAR.
|
|
* You can access it using ioread*() and iowrite*(). These functions hide
|
|
* the details if this is a MMIO or PIO address space and will just do what
|
|
* you expect from them in the correct way. When possible write combining
|
|
* is used.
|
|
*
|
|
* @maxlen specifies the maximum length to map. If you want to get access to
|
|
* the complete BAR from offset to the end, pass %0 here.
|
|
* */
|
|
void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
|
|
int bar,
|
|
unsigned long offset,
|
|
unsigned long maxlen)
|
|
{
|
|
resource_size_t start = pci_resource_start(dev, bar);
|
|
resource_size_t len = pci_resource_len(dev, bar);
|
|
unsigned long flags = pci_resource_flags(dev, bar);
|
|
|
|
|
|
if (flags & IORESOURCE_IO)
|
|
return NULL;
|
|
|
|
if (len <= offset || !start)
|
|
return NULL;
|
|
|
|
len -= offset;
|
|
start += offset;
|
|
if (maxlen && len > maxlen)
|
|
len = maxlen;
|
|
|
|
if (flags & IORESOURCE_MEM)
|
|
return ioremap_wc(start, len);
|
|
|
|
/* What? */
|
|
return NULL;
|
|
}
|
|
EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
|
|
|
|
/**
|
|
* pci_iomap - create a virtual mapping cookie for a PCI BAR
|
|
* @dev: PCI device that owns the BAR
|
|
* @bar: BAR number
|
|
* @maxlen: length of the memory to map
|
|
*
|
|
* Using this function you will get a __iomem address to your device BAR.
|
|
* You can access it using ioread*() and iowrite*(). These functions hide
|
|
* the details if this is a MMIO or PIO address space and will just do what
|
|
* you expect from them in the correct way.
|
|
*
|
|
* @maxlen specifies the maximum length to map. If you want to get access to
|
|
* the complete BAR without checking for its length first, pass %0 here.
|
|
* */
|
|
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
|
|
{
|
|
return pci_iomap_range(dev, bar, 0, maxlen);
|
|
}
|
|
EXPORT_SYMBOL(pci_iomap);
|
|
|
|
/**
|
|
* pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
|
|
* @dev: PCI device that owns the BAR
|
|
* @bar: BAR number
|
|
* @maxlen: length of the memory to map
|
|
*
|
|
* Using this function you will get a __iomem address to your device BAR.
|
|
* You can access it using ioread*() and iowrite*(). These functions hide
|
|
* the details if this is a MMIO or PIO address space and will just do what
|
|
* you expect from them in the correct way. When possible write combining
|
|
* is used.
|
|
*
|
|
* @maxlen specifies the maximum length to map. If you want to get access to
|
|
* the complete BAR without checking for its length first, pass %0 here.
|
|
* */
|
|
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
|
|
{
|
|
return pci_iomap_wc_range(dev, bar, 0, maxlen);
|
|
}
|
|
EXPORT_SYMBOL_GPL(pci_iomap_wc);
|
|
#endif /* CONFIG_PCI */
|