2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 11:44:01 +08:00

KVM: PPC: Book3S HV: XIVE: Add a mapping for the source ESB pages

Each source is associated with an Event State Buffer (ESB) with a
even/odd pair of pages which provides commands to manage the source:
to trigger, to EOI, to turn off the source for instance.

The custom VM fault handler will deduce the guest IRQ number from the
offset of the fault, and the ESB page of the associated XIVE interrupt
will be inserted into the VMA using the internal structure caching
information on the interrupts.

Signed-off-by: Cédric Le Goater <clg@kaod.org>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
Cédric Le Goater 2019-04-18 12:39:38 +02:00 committed by Paul Mackerras
parent 39e9af3de5
commit 6520ca64cd
3 changed files with 65 additions and 0 deletions

View File

@ -36,6 +36,13 @@ the legacy interrupt mode, referred as XICS (POWER7/8).
third (operating system) and the fourth (user level) are exposed the
guest.
2. Event State Buffer (ESB)
Each source is associated with an Event State Buffer (ESB) with
either a pair of even/odd pair of pages which provides commands to
manage the source: to trigger, to EOI, to turn off the source for
instance.
* Groups:
1. KVM_DEV_XIVE_GRP_CTRL

View File

@ -721,5 +721,6 @@ struct kvm_ppc_xive_eq {
#define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001
#define KVM_XIVE_TIMA_PAGE_OFFSET 0
#define KVM_XIVE_ESB_PAGE_OFFSET 4
#endif /* __LINUX_KVM_POWERPC_H */

View File

@ -165,6 +165,59 @@ bail:
return rc;
}
static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct kvm_device *dev = vma->vm_file->private_data;
struct kvmppc_xive *xive = dev->private;
struct kvmppc_xive_src_block *sb;
struct kvmppc_xive_irq_state *state;
struct xive_irq_data *xd;
u32 hw_num;
u16 src;
u64 page;
unsigned long irq;
u64 page_offset;
/*
* Linux/KVM uses a two pages ESB setting, one for trigger and
* one for EOI
*/
page_offset = vmf->pgoff - vma->vm_pgoff;
irq = page_offset / 2;
sb = kvmppc_xive_find_source(xive, irq, &src);
if (!sb) {
pr_devel("%s: source %lx not found !\n", __func__, irq);
return VM_FAULT_SIGBUS;
}
state = &sb->irq_state[src];
kvmppc_xive_select_irq(state, &hw_num, &xd);
arch_spin_lock(&sb->lock);
/*
* first/even page is for trigger
* second/odd page is for EOI and management.
*/
page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
arch_spin_unlock(&sb->lock);
if (WARN_ON(!page)) {
pr_err("%s: acessing invalid ESB page for source %lx !\n",
__func__, irq);
return VM_FAULT_SIGBUS;
}
vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
return VM_FAULT_NOPAGE;
}
static const struct vm_operations_struct xive_native_esb_vmops = {
.fault = xive_native_esb_fault,
};
static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@ -194,6 +247,10 @@ static int kvmppc_xive_native_mmap(struct kvm_device *dev,
if (vma_pages(vma) > 4)
return -EINVAL;
vma->vm_ops = &xive_native_tima_vmops;
} else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) {
if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2)
return -EINVAL;
vma->vm_ops = &xive_native_esb_vmops;
} else {
return -EINVAL;
}