mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-28 13:34:38 +08:00
7b537b24e7
The CCP has the ability to perform several operations simultaneously, but only one interrupt. When implemented as a PCI device and using MSI-X/MSI interrupts, use a tasklet model to service interrupts. By disabling and enabling interrupts from the CCP, coupled with the queuing that tasklets provide, we can ensure that all events (occurring on the device) are recognized and serviced. This change fixes a problem wherein 2 or more busy queues can cause notification bits to change state while a (CCP) interrupt is being serviced, but after the queue state has been evaluated. This results in the event being 'lost' and the queue hanging, waiting to be serviced. Since the status bits are never fully de-asserted, the CCP never generates another interrupt (all bits zero -> one or more bits one), and no further CCP operations will be executed. Cc: <stable@vger.kernel.org> # 4.9.x+ Signed-off-by: Gary R Hook <gary.hook@amd.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
357 lines
7.4 KiB
C
357 lines
7.4 KiB
C
/*
|
|
* AMD Cryptographic Coprocessor (CCP) driver
|
|
*
|
|
* Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
|
|
*
|
|
* Author: Tom Lendacky <thomas.lendacky@amd.com>
|
|
* Author: Gary R Hook <gary.hook@amd.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/device.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/pci_ids.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/ccp.h>
|
|
|
|
#include "ccp-dev.h"
|
|
|
|
#define MSIX_VECTORS 2
|
|
|
|
struct ccp_msix {
|
|
u32 vector;
|
|
char name[16];
|
|
};
|
|
|
|
struct ccp_pci {
|
|
int msix_count;
|
|
struct ccp_msix msix[MSIX_VECTORS];
|
|
};
|
|
|
|
static int ccp_get_msix_irqs(struct ccp_device *ccp)
|
|
{
|
|
struct ccp_pci *ccp_pci = ccp->dev_specific;
|
|
struct device *dev = ccp->dev;
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
struct msix_entry msix_entry[MSIX_VECTORS];
|
|
unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
|
|
int v, ret;
|
|
|
|
for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
|
|
msix_entry[v].entry = v;
|
|
|
|
ret = pci_enable_msix_range(pdev, msix_entry, 1, v);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
ccp_pci->msix_count = ret;
|
|
for (v = 0; v < ccp_pci->msix_count; v++) {
|
|
/* Set the interrupt names and request the irqs */
|
|
snprintf(ccp_pci->msix[v].name, name_len, "%s-%u",
|
|
ccp->name, v);
|
|
ccp_pci->msix[v].vector = msix_entry[v].vector;
|
|
ret = request_irq(ccp_pci->msix[v].vector,
|
|
ccp->vdata->perform->irqhandler,
|
|
0, ccp_pci->msix[v].name, dev);
|
|
if (ret) {
|
|
dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
|
|
ret);
|
|
goto e_irq;
|
|
}
|
|
}
|
|
ccp->use_tasklet = true;
|
|
|
|
return 0;
|
|
|
|
e_irq:
|
|
while (v--)
|
|
free_irq(ccp_pci->msix[v].vector, dev);
|
|
|
|
pci_disable_msix(pdev);
|
|
|
|
ccp_pci->msix_count = 0;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int ccp_get_msi_irq(struct ccp_device *ccp)
|
|
{
|
|
struct device *dev = ccp->dev;
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
int ret;
|
|
|
|
ret = pci_enable_msi(pdev);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ccp->irq = pdev->irq;
|
|
ret = request_irq(ccp->irq, ccp->vdata->perform->irqhandler, 0,
|
|
ccp->name, dev);
|
|
if (ret) {
|
|
dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
|
|
goto e_msi;
|
|
}
|
|
ccp->use_tasklet = true;
|
|
|
|
return 0;
|
|
|
|
e_msi:
|
|
pci_disable_msi(pdev);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int ccp_get_irqs(struct ccp_device *ccp)
|
|
{
|
|
struct device *dev = ccp->dev;
|
|
int ret;
|
|
|
|
ret = ccp_get_msix_irqs(ccp);
|
|
if (!ret)
|
|
return 0;
|
|
|
|
/* Couldn't get MSI-X vectors, try MSI */
|
|
dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
|
|
ret = ccp_get_msi_irq(ccp);
|
|
if (!ret)
|
|
return 0;
|
|
|
|
/* Couldn't get MSI interrupt */
|
|
dev_notice(dev, "could not enable MSI (%d)\n", ret);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void ccp_free_irqs(struct ccp_device *ccp)
|
|
{
|
|
struct ccp_pci *ccp_pci = ccp->dev_specific;
|
|
struct device *dev = ccp->dev;
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
|
if (ccp_pci->msix_count) {
|
|
while (ccp_pci->msix_count--)
|
|
free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
|
|
dev);
|
|
pci_disable_msix(pdev);
|
|
} else if (ccp->irq) {
|
|
free_irq(ccp->irq, dev);
|
|
pci_disable_msi(pdev);
|
|
}
|
|
ccp->irq = 0;
|
|
}
|
|
|
|
static int ccp_find_mmio_area(struct ccp_device *ccp)
|
|
{
|
|
struct device *dev = ccp->dev;
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
resource_size_t io_len;
|
|
unsigned long io_flags;
|
|
|
|
io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
|
|
io_len = pci_resource_len(pdev, ccp->vdata->bar);
|
|
if ((io_flags & IORESOURCE_MEM) &&
|
|
(io_len >= (ccp->vdata->offset + 0x800)))
|
|
return ccp->vdata->bar;
|
|
|
|
return -EIO;
|
|
}
|
|
|
|
static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
{
|
|
struct ccp_device *ccp;
|
|
struct ccp_pci *ccp_pci;
|
|
struct device *dev = &pdev->dev;
|
|
unsigned int bar;
|
|
int ret;
|
|
|
|
ret = -ENOMEM;
|
|
ccp = ccp_alloc_struct(dev);
|
|
if (!ccp)
|
|
goto e_err;
|
|
|
|
ccp_pci = devm_kzalloc(dev, sizeof(*ccp_pci), GFP_KERNEL);
|
|
if (!ccp_pci)
|
|
goto e_err;
|
|
|
|
ccp->dev_specific = ccp_pci;
|
|
ccp->vdata = (struct ccp_vdata *)id->driver_data;
|
|
if (!ccp->vdata || !ccp->vdata->version) {
|
|
ret = -ENODEV;
|
|
dev_err(dev, "missing driver data\n");
|
|
goto e_err;
|
|
}
|
|
ccp->get_irq = ccp_get_irqs;
|
|
ccp->free_irq = ccp_free_irqs;
|
|
|
|
ret = pci_request_regions(pdev, "ccp");
|
|
if (ret) {
|
|
dev_err(dev, "pci_request_regions failed (%d)\n", ret);
|
|
goto e_err;
|
|
}
|
|
|
|
ret = pci_enable_device(pdev);
|
|
if (ret) {
|
|
dev_err(dev, "pci_enable_device failed (%d)\n", ret);
|
|
goto e_regions;
|
|
}
|
|
|
|
pci_set_master(pdev);
|
|
|
|
ret = ccp_find_mmio_area(ccp);
|
|
if (ret < 0)
|
|
goto e_device;
|
|
bar = ret;
|
|
|
|
ret = -EIO;
|
|
ccp->io_map = pci_iomap(pdev, bar, 0);
|
|
if (!ccp->io_map) {
|
|
dev_err(dev, "pci_iomap failed\n");
|
|
goto e_device;
|
|
}
|
|
ccp->io_regs = ccp->io_map + ccp->vdata->offset;
|
|
|
|
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
|
|
if (ret) {
|
|
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
|
if (ret) {
|
|
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
|
|
ret);
|
|
goto e_iomap;
|
|
}
|
|
}
|
|
|
|
dev_set_drvdata(dev, ccp);
|
|
|
|
if (ccp->vdata->setup)
|
|
ccp->vdata->setup(ccp);
|
|
|
|
ret = ccp->vdata->perform->init(ccp);
|
|
if (ret)
|
|
goto e_iomap;
|
|
|
|
dev_notice(dev, "enabled\n");
|
|
|
|
return 0;
|
|
|
|
e_iomap:
|
|
pci_iounmap(pdev, ccp->io_map);
|
|
|
|
e_device:
|
|
pci_disable_device(pdev);
|
|
|
|
e_regions:
|
|
pci_release_regions(pdev);
|
|
|
|
e_err:
|
|
dev_notice(dev, "initialization failed\n");
|
|
return ret;
|
|
}
|
|
|
|
static void ccp_pci_remove(struct pci_dev *pdev)
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
struct ccp_device *ccp = dev_get_drvdata(dev);
|
|
|
|
if (!ccp)
|
|
return;
|
|
|
|
ccp->vdata->perform->destroy(ccp);
|
|
|
|
pci_iounmap(pdev, ccp->io_map);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
dev_notice(dev, "disabled\n");
|
|
}
|
|
|
|
#ifdef CONFIG_PM
|
|
static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
struct ccp_device *ccp = dev_get_drvdata(dev);
|
|
unsigned long flags;
|
|
unsigned int i;
|
|
|
|
spin_lock_irqsave(&ccp->cmd_lock, flags);
|
|
|
|
ccp->suspending = 1;
|
|
|
|
/* Wake all the queue kthreads to prepare for suspend */
|
|
for (i = 0; i < ccp->cmd_q_count; i++)
|
|
wake_up_process(ccp->cmd_q[i].kthread);
|
|
|
|
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
|
|
|
|
/* Wait for all queue kthreads to say they're done */
|
|
while (!ccp_queues_suspended(ccp))
|
|
wait_event_interruptible(ccp->suspend_queue,
|
|
ccp_queues_suspended(ccp));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ccp_pci_resume(struct pci_dev *pdev)
|
|
{
|
|
struct device *dev = &pdev->dev;
|
|
struct ccp_device *ccp = dev_get_drvdata(dev);
|
|
unsigned long flags;
|
|
unsigned int i;
|
|
|
|
spin_lock_irqsave(&ccp->cmd_lock, flags);
|
|
|
|
ccp->suspending = 0;
|
|
|
|
/* Wake up all the kthreads */
|
|
for (i = 0; i < ccp->cmd_q_count; i++) {
|
|
ccp->cmd_q[i].suspended = 0;
|
|
wake_up_process(ccp->cmd_q[i].kthread);
|
|
}
|
|
|
|
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
|
|
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static const struct pci_device_id ccp_pci_table[] = {
|
|
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&ccpv3 },
|
|
{ PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&ccpv5a },
|
|
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&ccpv5b },
|
|
/* Last entry must be zero */
|
|
{ 0, }
|
|
};
|
|
MODULE_DEVICE_TABLE(pci, ccp_pci_table);
|
|
|
|
static struct pci_driver ccp_pci_driver = {
|
|
.name = "ccp",
|
|
.id_table = ccp_pci_table,
|
|
.probe = ccp_pci_probe,
|
|
.remove = ccp_pci_remove,
|
|
#ifdef CONFIG_PM
|
|
.suspend = ccp_pci_suspend,
|
|
.resume = ccp_pci_resume,
|
|
#endif
|
|
};
|
|
|
|
int ccp_pci_init(void)
|
|
{
|
|
return pci_register_driver(&ccp_pci_driver);
|
|
}
|
|
|
|
void ccp_pci_exit(void)
|
|
{
|
|
pci_unregister_driver(&ccp_pci_driver);
|
|
}
|