2006-10-04 17:16:59 +08:00
|
|
|
#ifndef LINUX_MSI_H
|
|
|
|
#define LINUX_MSI_H
|
|
|
|
|
2011-10-07 02:08:18 +08:00
|
|
|
#include <linux/kobject.h>
|
2007-04-05 15:19:10 +08:00
|
|
|
#include <linux/list.h>
|
|
|
|
|
2006-10-04 17:16:59 +08:00
|
|
|
struct msi_msg {
|
|
|
|
u32 address_lo; /* low 32 bits of msi message address */
|
|
|
|
u32 address_hi; /* high 32 bits of msi message address */
|
|
|
|
u32 data; /* 16 bits of msi message data */
|
|
|
|
};
|
|
|
|
|
2014-10-27 10:44:36 +08:00
|
|
|
extern int pci_msi_ignore_mask;
|
2007-01-18 12:50:05 +08:00
|
|
|
/* Helper functions */
|
2010-09-28 22:46:51 +08:00
|
|
|
struct irq_data;
|
2010-09-29 01:09:51 +08:00
|
|
|
struct msi_desc;
|
2013-04-19 00:55:46 +08:00
|
|
|
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
|
|
|
|
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
|
2014-11-09 23:10:33 +08:00
|
|
|
|
2006-10-04 17:16:59 +08:00
|
|
|
struct msi_desc {
|
|
|
|
struct {
|
2009-03-17 20:54:06 +08:00
|
|
|
__u8 is_msix : 1;
|
2014-06-19 16:30:30 +08:00
|
|
|
__u8 multiple: 3; /* log2 num of messages allocated */
|
|
|
|
__u8 multi_cap : 3; /* log2 num of messages supported */
|
2013-11-15 02:28:18 +08:00
|
|
|
__u8 maskbit : 1; /* mask-pending bit supported ? */
|
|
|
|
__u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
|
|
|
|
__u16 entry_nr; /* specific enabled entry */
|
|
|
|
unsigned default_irq; /* default pre-assigned irq */
|
2009-03-17 20:54:09 +08:00
|
|
|
} msi_attrib;
|
2006-10-04 17:16:59 +08:00
|
|
|
|
2009-03-17 20:54:09 +08:00
|
|
|
u32 masked; /* mask bits */
|
2007-04-05 15:19:10 +08:00
|
|
|
unsigned int irq;
|
PCI: Allocate only as many MSI vectors as requested by driver
Because of the encoding of the "Multiple Message Capable" and "Multiple
Message Enable" fields, a device can only advertise that it's capable of a
power-of-two number of vectors, and the OS can only enable a power-of-two
number.
For example, a device that's limited internally to using 18 vectors would
have to advertise that it's capable of 32. The 14 extra vectors consume
vector numbers and IRQ descriptors even though the device can't actually
use them.
This fix introduces a 'msi_desc::nvec_used' field to address this issue.
When non-zero, it is the actual number of MSIs the device will send, as
requested by the device driver. This value should be used by architectures
to set up and tear down only as many interrupt resources as the device will
actually use.
Note, although the existing 'msi_desc::multiple' field might seem
redundant, in fact it is not. The number of MSIs advertised need not be
the smallest power-of-two larger than the number of MSIs the device will
send. Thus, it is not always possible to derive the former from the
latter, so we need to keep them both to handle this case.
[bhelgaas: changelog, rename to "nvec_used"]
Signed-off-by: Alexander Gordeev <agordeev@redhat.com>
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
2013-05-13 17:05:48 +08:00
|
|
|
unsigned int nvec_used; /* number of messages */
|
2007-04-05 15:19:10 +08:00
|
|
|
struct list_head list;
|
2006-10-04 17:16:59 +08:00
|
|
|
|
2009-03-17 20:54:08 +08:00
|
|
|
union {
|
|
|
|
void __iomem *mask_base;
|
|
|
|
u8 mask_pos;
|
|
|
|
};
|
2006-10-04 17:16:59 +08:00
|
|
|
struct pci_dev *dev;
|
|
|
|
|
2007-03-09 04:04:57 +08:00
|
|
|
/* Last set MSI message */
|
|
|
|
struct msi_msg msg;
|
2006-10-04 17:16:59 +08:00
|
|
|
};
|
|
|
|
|
2014-11-15 22:24:03 +08:00
|
|
|
/* Helpers to hide struct msi_desc implementation details */
|
|
|
|
#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
|
|
|
|
#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
|
|
|
|
#define first_msi_entry(dev) \
|
|
|
|
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
|
|
|
|
#define for_each_msi_entry(desc, dev) \
|
|
|
|
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
|
|
|
|
|
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
|
|
|
|
#define for_each_pci_msi_entry(desc, pdev) \
|
|
|
|
for_each_msi_entry((desc), &(pdev)->dev)
|
|
|
|
|
|
|
|
static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
|
|
|
|
{
|
|
|
|
return desc->dev;
|
|
|
|
}
|
2015-07-09 16:00:36 +08:00
|
|
|
|
|
|
|
void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
|
|
|
|
#else /* CONFIG_PCI_MSI */
|
|
|
|
static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
2014-11-15 22:24:03 +08:00
|
|
|
#endif /* CONFIG_PCI_MSI */
|
|
|
|
|
2014-11-09 23:10:33 +08:00
|
|
|
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
|
2014-11-09 23:10:34 +08:00
|
|
|
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
|
|
|
|
void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
|
|
|
|
|
2014-11-23 18:55:58 +08:00
|
|
|
u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
|
|
|
|
u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
|
|
|
|
void pci_msi_mask_irq(struct irq_data *data);
|
|
|
|
void pci_msi_unmask_irq(struct irq_data *data);
|
|
|
|
|
2014-11-09 23:10:34 +08:00
|
|
|
/* Conversion helpers. Should be removed after merging */
|
|
|
|
static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
|
|
|
|
{
|
|
|
|
__pci_write_msi_msg(entry, msg);
|
|
|
|
}
|
|
|
|
static inline void write_msi_msg(int irq, struct msi_msg *msg)
|
|
|
|
{
|
|
|
|
pci_write_msi_msg(irq, msg);
|
|
|
|
}
|
2014-11-23 18:55:58 +08:00
|
|
|
static inline void mask_msi_irq(struct irq_data *data)
|
|
|
|
{
|
|
|
|
pci_msi_mask_irq(data);
|
|
|
|
}
|
|
|
|
static inline void unmask_msi_irq(struct irq_data *data)
|
|
|
|
{
|
|
|
|
pci_msi_unmask_irq(data);
|
|
|
|
}
|
2014-11-09 23:10:33 +08:00
|
|
|
|
2006-10-04 17:16:59 +08:00
|
|
|
/*
|
2013-08-10 04:27:06 +08:00
|
|
|
* The arch hooks to setup up msi irqs. Those functions are
|
|
|
|
* implemented as weak symbols so that they /can/ be overriden by
|
|
|
|
* architecture specific code if needed.
|
2006-10-04 17:16:59 +08:00
|
|
|
*/
|
2007-01-29 03:56:37 +08:00
|
|
|
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
|
2006-10-04 17:16:59 +08:00
|
|
|
void arch_teardown_msi_irq(unsigned int irq);
|
2013-04-19 00:55:46 +08:00
|
|
|
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
|
|
|
|
void arch_teardown_msi_irqs(struct pci_dev *dev);
|
2013-12-04 13:09:16 +08:00
|
|
|
void arch_restore_msi_irqs(struct pci_dev *dev);
|
2013-08-10 04:27:06 +08:00
|
|
|
|
|
|
|
void default_teardown_msi_irqs(struct pci_dev *dev);
|
2013-12-04 13:09:16 +08:00
|
|
|
void default_restore_msi_irqs(struct pci_dev *dev);
|
2006-10-04 17:16:59 +08:00
|
|
|
|
2014-11-12 08:45:45 +08:00
|
|
|
struct msi_controller {
|
2013-08-10 04:27:08 +08:00
|
|
|
struct module *owner;
|
|
|
|
struct device *dev;
|
2013-08-10 04:27:09 +08:00
|
|
|
struct device_node *of_node;
|
|
|
|
struct list_head list;
|
2014-11-15 18:49:12 +08:00
|
|
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
|
|
|
struct irq_domain *domain;
|
|
|
|
#endif
|
2013-08-10 04:27:08 +08:00
|
|
|
|
2014-11-12 08:45:45 +08:00
|
|
|
int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
|
2013-08-10 04:27:08 +08:00
|
|
|
struct msi_desc *desc);
|
2014-11-12 08:45:45 +08:00
|
|
|
void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
|
2013-08-10 04:27:08 +08:00
|
|
|
};
|
|
|
|
|
2014-11-12 18:39:03 +08:00
|
|
|
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
|
2014-11-15 22:24:04 +08:00
|
|
|
|
2014-11-15 22:24:05 +08:00
|
|
|
#include <linux/irqhandler.h>
|
2014-11-15 22:24:04 +08:00
|
|
|
#include <asm/msi.h>
|
|
|
|
|
2014-11-12 18:39:03 +08:00
|
|
|
struct irq_domain;
|
|
|
|
struct irq_chip;
|
|
|
|
struct device_node;
|
|
|
|
struct msi_domain_info;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct msi_domain_ops - MSI interrupt domain callbacks
|
|
|
|
* @get_hwirq: Retrieve the resulting hw irq number
|
|
|
|
* @msi_init: Domain specific init function for MSI interrupts
|
|
|
|
* @msi_free: Domain specific function to free a MSI interrupts
|
2014-11-15 22:24:04 +08:00
|
|
|
* @msi_check: Callback for verification of the domain/info/dev data
|
|
|
|
* @msi_prepare: Prepare the allocation of the interrupts in the domain
|
|
|
|
* @msi_finish: Optional callbacl to finalize the allocation
|
|
|
|
* @set_desc: Set the msi descriptor for an interrupt
|
|
|
|
* @handle_error: Optional error handler if the allocation fails
|
|
|
|
*
|
|
|
|
* @get_hwirq, @msi_init and @msi_free are callbacks used by
|
|
|
|
* msi_create_irq_domain() and related interfaces
|
|
|
|
*
|
|
|
|
* @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
|
|
|
|
* are callbacks used by msi_irq_domain_alloc_irqs() and related
|
|
|
|
* interfaces which are based on msi_desc.
|
2014-11-12 18:39:03 +08:00
|
|
|
*/
|
|
|
|
struct msi_domain_ops {
|
2014-11-15 22:24:05 +08:00
|
|
|
irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
|
|
|
|
msi_alloc_info_t *arg);
|
2014-11-12 18:39:03 +08:00
|
|
|
int (*msi_init)(struct irq_domain *domain,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
unsigned int virq, irq_hw_number_t hwirq,
|
2014-11-15 22:24:05 +08:00
|
|
|
msi_alloc_info_t *arg);
|
2014-11-12 18:39:03 +08:00
|
|
|
void (*msi_free)(struct irq_domain *domain,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
unsigned int virq);
|
2014-11-15 22:24:04 +08:00
|
|
|
int (*msi_check)(struct irq_domain *domain,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
struct device *dev);
|
|
|
|
int (*msi_prepare)(struct irq_domain *domain,
|
|
|
|
struct device *dev, int nvec,
|
|
|
|
msi_alloc_info_t *arg);
|
|
|
|
void (*msi_finish)(msi_alloc_info_t *arg, int retval);
|
|
|
|
void (*set_desc)(msi_alloc_info_t *arg,
|
|
|
|
struct msi_desc *desc);
|
|
|
|
int (*handle_error)(struct irq_domain *domain,
|
|
|
|
struct msi_desc *desc, int error);
|
2014-11-12 18:39:03 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct msi_domain_info - MSI interrupt domain data
|
2014-11-15 22:24:05 +08:00
|
|
|
* @flags: Flags to decribe features and capabilities
|
|
|
|
* @ops: The callback data structure
|
|
|
|
* @chip: Optional: associated interrupt chip
|
|
|
|
* @chip_data: Optional: associated interrupt chip data
|
|
|
|
* @handler: Optional: associated interrupt flow handler
|
|
|
|
* @handler_data: Optional: associated interrupt flow handler data
|
|
|
|
* @handler_name: Optional: associated interrupt flow handler name
|
|
|
|
* @data: Optional: domain specific data
|
2014-11-12 18:39:03 +08:00
|
|
|
*/
|
|
|
|
struct msi_domain_info {
|
2014-11-15 22:24:05 +08:00
|
|
|
u32 flags;
|
2014-11-12 18:39:03 +08:00
|
|
|
struct msi_domain_ops *ops;
|
|
|
|
struct irq_chip *chip;
|
2014-11-15 22:24:05 +08:00
|
|
|
void *chip_data;
|
|
|
|
irq_flow_handler_t handler;
|
|
|
|
void *handler_data;
|
|
|
|
const char *handler_name;
|
2014-11-12 18:39:03 +08:00
|
|
|
void *data;
|
|
|
|
};
|
|
|
|
|
2014-11-15 22:24:05 +08:00
|
|
|
/* Flags for msi_domain_info */
|
|
|
|
enum {
|
|
|
|
/*
|
|
|
|
* Init non implemented ops callbacks with default MSI domain
|
|
|
|
* callbacks.
|
|
|
|
*/
|
|
|
|
MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
|
|
|
|
/*
|
|
|
|
* Init non implemented chip callbacks with default MSI chip
|
|
|
|
* callbacks.
|
|
|
|
*/
|
|
|
|
MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
|
|
|
|
/* Build identity map between hwirq and irq */
|
|
|
|
MSI_FLAG_IDENTITY_MAP = (1 << 2),
|
|
|
|
/* Support multiple PCI MSI interrupts */
|
|
|
|
MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
|
|
|
|
/* Support PCI MSIX interrupts */
|
|
|
|
MSI_FLAG_PCI_MSIX = (1 << 4),
|
|
|
|
};
|
|
|
|
|
2014-11-12 18:39:03 +08:00
|
|
|
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
|
|
|
bool force);
|
|
|
|
|
|
|
|
struct irq_domain *msi_create_irq_domain(struct device_node *of_node,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
struct irq_domain *parent);
|
2014-11-15 22:24:04 +08:00
|
|
|
int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
|
|
|
|
int nvec);
|
|
|
|
void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
|
2014-11-12 18:39:03 +08:00
|
|
|
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
|
|
|
|
|
|
|
|
#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
|
|
|
|
|
2014-11-11 21:02:18 +08:00
|
|
|
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
|
|
|
|
void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
|
|
|
|
struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
|
|
|
|
struct msi_domain_info *info,
|
|
|
|
struct irq_domain *parent);
|
|
|
|
int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
|
|
|
|
int nvec, int type);
|
|
|
|
void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
|
2014-11-15 22:24:07 +08:00
|
|
|
struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
|
|
|
|
struct msi_domain_info *info, struct irq_domain *parent);
|
|
|
|
|
2014-11-11 21:02:18 +08:00
|
|
|
irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
|
|
|
|
struct msi_desc *desc);
|
|
|
|
int pci_msi_domain_check_cap(struct irq_domain *domain,
|
|
|
|
struct msi_domain_info *info, struct device *dev);
|
|
|
|
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
|
|
|
|
|
2006-10-04 17:16:59 +08:00
|
|
|
#endif /* LINUX_MSI_H */
|