PCI/P2PDMA: Add PCI p2pmem DMA mappings to adjust the bus offset

The DMA address used when mapping PCI P2P memory must be the PCI bus
address.  Thus, introduce pci_p2pmem_map_sg() to map the correct addresses
when using P2P memory.

Memory mapped in this way does not need to be unmapped and thus if we
provided pci_p2pmem_unmap_sg() it would be empty.  This breaks the expected
balance between map/unmap but was left out as an empty function doesn't
really provide any benefit.  In the future, if this call becomes necessary
it can be added without much difficulty.

For this, we assume that an SGL passed to these functions contain all P2P
memory or no P2P memory.

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Logan Gunthorpe 2018-10-04 15:27:37 -06:00 committed by Bjorn Helgaas
parent cbb8ca69fc
commit 977196b8c5
3 changed files with 51 additions and 0 deletions

View File

@ -194,6 +194,8 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
pgmap->res.flags = pci_resource_flags(pdev, bar);
pgmap->ref = &pdev->p2pdma->devmap_ref;
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
pci_resource_start(pdev, bar);
addr = devm_memremap_pages(&pdev->dev, pgmap);
if (IS_ERR(addr)) {
@ -678,3 +680,44 @@ void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
pdev->p2pdma->p2pmem_published = publish;
}
EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
/**
* pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA
* @dev: device doing the DMA request
* @sg: scatter list to map
* @nents: elements in the scatterlist
* @dir: DMA direction
*
* Scatterlists mapped with this function should not be unmapped in any way.
*
* Returns the number of SG entries mapped or 0 on error.
*/
int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
struct dev_pagemap *pgmap;
struct scatterlist *s;
phys_addr_t paddr;
int i;
/*
* p2pdma mappings are not compatible with devices that use
* dma_virt_ops. If the upper layers do the right thing
* this should never happen because it will be prevented
* by the check in pci_p2pdma_add_client()
*/
if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
dev->dma_ops == &dma_virt_ops))
return 0;
for_each_sg(sg, s, nents, i) {
pgmap = sg_page(s)->pgmap;
paddr = sg_phys(s);
s->dma_address = paddr - pgmap->pci_p2pdma_bus_offset;
sg_dma_len(s) = s->length;
}
return nents;
}
EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg);

View File

@ -125,6 +125,7 @@ struct dev_pagemap {
struct device *dev;
void *data;
enum memory_type type;
u64 pci_p2pdma_bus_offset;
};
#ifdef CONFIG_ZONE_DEVICE

View File

@ -30,6 +30,8 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
unsigned int *nents, u32 length);
void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir);
#else /* CONFIG_PCI_P2PDMA */
static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar,
size_t size, u64 offset)
@ -75,6 +77,11 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
{
}
static inline int pci_p2pdma_map_sg(struct device *dev,
struct scatterlist *sg, int nents, enum dma_data_direction dir)
{
return 0;
}
#endif /* CONFIG_PCI_P2PDMA */