2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-20 03:04:01 +08:00

NVMe: Group pci related actions in functions

This will make it easier to reuse these outside probe/remove.

Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
This commit is contained in:
Keith Busch 2013-07-15 15:02:19 -06:00 committed by Matthew Wilcox
parent 9e59d091b0
commit 0877cb0d28

View File

@ -1191,9 +1191,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
u64 cap = readq(&dev->bar->cap); u64 cap = readq(&dev->bar->cap);
struct nvme_queue *nvmeq; struct nvme_queue *nvmeq;
dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->db_stride = NVME_CAP_STRIDE(cap);
result = nvme_disable_ctrl(dev, cap); result = nvme_disable_ctrl(dev, cap);
if (result < 0) if (result < 0)
return result; return result;
@ -1832,6 +1829,61 @@ static int nvme_dev_add(struct nvme_dev *dev)
return res; return res;
} }
static int nvme_dev_map(struct nvme_dev *dev)
{
int bars, result = -ENOMEM;
struct pci_dev *pdev = dev->pci_dev;
if (pci_enable_device_mem(pdev))
return result;
dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
bars = pci_select_bars(pdev, IORESOURCE_MEM);
if (pci_request_selected_regions(pdev, bars, "nvme"))
goto disable_pci;
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
else
goto disable_pci;
pci_set_drvdata(pdev, dev);
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar)
goto disable;
dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
dev->dbs = ((void __iomem *)dev->bar) + 4096;
return 0;
disable:
pci_release_regions(pdev);
disable_pci:
pci_disable_device(pdev);
return result;
}
static void nvme_dev_unmap(struct nvme_dev *dev)
{
if (dev->pci_dev->msi_enabled)
pci_disable_msi(dev->pci_dev);
else if (dev->pci_dev->msix_enabled)
pci_disable_msix(dev->pci_dev);
if (dev->bar) {
iounmap(dev->bar);
dev->bar = NULL;
}
pci_release_regions(dev->pci_dev);
if (pci_is_enabled(dev->pci_dev))
pci_disable_device(dev->pci_dev);
}
static int nvme_dev_remove(struct nvme_dev *dev) static int nvme_dev_remove(struct nvme_dev *dev)
{ {
struct nvme_ns *ns, *next; struct nvme_ns *ns, *next;
@ -1908,15 +1960,9 @@ static void nvme_free_dev(struct kref *kref)
{ {
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
nvme_dev_remove(dev); nvme_dev_remove(dev);
if (dev->pci_dev->msi_enabled) nvme_dev_unmap(dev);
pci_disable_msi(dev->pci_dev);
else if (dev->pci_dev->msix_enabled)
pci_disable_msix(dev->pci_dev);
iounmap(dev->bar);
nvme_release_instance(dev); nvme_release_instance(dev);
nvme_release_prp_pools(dev); nvme_release_prp_pools(dev);
pci_disable_device(dev->pci_dev);
pci_release_regions(dev->pci_dev);
kfree(dev->queues); kfree(dev->queues);
kfree(dev->entry); kfree(dev->entry);
kfree(dev); kfree(dev);
@ -1959,7 +2005,7 @@ static const struct file_operations nvme_dev_fops = {
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{ {
int bars, result = -ENOMEM; int result = -ENOMEM;
struct nvme_dev *dev; struct nvme_dev *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL); dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@ -1974,39 +2020,19 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!dev->queues) if (!dev->queues)
goto free; goto free;
if (pci_enable_device_mem(pdev))
goto free;
pci_set_master(pdev);
bars = pci_select_bars(pdev, IORESOURCE_MEM);
if (pci_request_selected_regions(pdev, bars, "nvme"))
goto disable;
INIT_LIST_HEAD(&dev->namespaces); INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev; dev->pci_dev = pdev;
pci_set_drvdata(pdev, dev);
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
else
goto disable;
result = nvme_set_instance(dev); result = nvme_set_instance(dev);
if (result) if (result)
goto disable; goto free;
dev->entry[0].vector = pdev->irq;
result = nvme_setup_prp_pools(dev); result = nvme_setup_prp_pools(dev);
if (result) if (result)
goto disable_msix; goto release;
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); result = nvme_dev_map(dev);
if (!dev->bar) { if (result)
result = -ENOMEM; goto release_pools;
goto disable_msix;
}
result = nvme_configure_admin_queue(dev); result = nvme_configure_admin_queue(dev);
if (result) if (result)
@ -2042,17 +2068,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nvme_free_queues(dev); nvme_free_queues(dev);
unmap: unmap:
iounmap(dev->bar); nvme_dev_unmap(dev);
disable_msix: release_pools:
if (dev->pci_dev->msi_enabled)
pci_disable_msi(dev->pci_dev);
else if (dev->pci_dev->msix_enabled)
pci_disable_msix(dev->pci_dev);
nvme_release_instance(dev);
nvme_release_prp_pools(dev); nvme_release_prp_pools(dev);
disable: release:
pci_disable_device(pdev); nvme_release_instance(dev);
pci_release_regions(pdev);
free: free:
kfree(dev->queues); kfree(dev->queues);
kfree(dev->entry); kfree(dev->entry);