virtio,pc,pci: features, cleanups

infrastructure for vhost-vdpa shadow work
 piix south bridge rework
 reconnect for vhost-user-scsi
 dummy ACPI QTG DSM for cxl
 
 tests, cleanups, fixes all over the place
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmU06PMPHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpNIsH/0DlKti86VZLJ6PbNqsnKxoK2gg05TbEhPZU
 pQ+RPDaCHpFBsLC5qsoMJwvaEQFe0e49ZFemw7bXRzBxgmbbNnZ9ArCIPqT+rvQd
 7UBmyC+kacVyybZatq69aK2BHKFtiIRlT78d9Izgtjmp8V7oyKoz14Esh8wkE+FT
 ypHUa70Addi6alNm6BVkm7bxZxi0Wrmf3THqF8ViYvufzHKl7JR5e17fKWEG0BqV
 9W7AeHMnzJ7jkTvBGUw7g5EbzFn7hPLTbO4G/VW97k0puS4WRX5aIMkVhUazsRIa
 zDOuXCCskUWuRapiCwY0E4g7cCaT8/JR6JjjBaTgkjJgvo5Y8Eg=
 =ILek
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

virtio,pc,pci: features, cleanups

infrastructure for vhost-vdpa shadow work
piix south bridge rework
reconnect for vhost-user-scsi
dummy ACPI QTG DSM for cxl

tests, cleanups, fixes all over the place

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# -----BEGIN PGP SIGNATURE-----
#
# iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmU06PMPHG1zdEByZWRo
# YXQuY29tAAoJECgfDbjSjVRpNIsH/0DlKti86VZLJ6PbNqsnKxoK2gg05TbEhPZU
# pQ+RPDaCHpFBsLC5qsoMJwvaEQFe0e49ZFemw7bXRzBxgmbbNnZ9ArCIPqT+rvQd
# 7UBmyC+kacVyybZatq69aK2BHKFtiIRlT78d9Izgtjmp8V7oyKoz14Esh8wkE+FT
# ypHUa70Addi6alNm6BVkm7bxZxi0Wrmf3THqF8ViYvufzHKl7JR5e17fKWEG0BqV
# 9W7AeHMnzJ7jkTvBGUw7g5EbzFn7hPLTbO4G/VW97k0puS4WRX5aIMkVhUazsRIa
# zDOuXCCskUWuRapiCwY0E4g7cCaT8/JR6JjjBaTgkjJgvo5Y8Eg=
# =ILek
# -----END PGP SIGNATURE-----
# gpg: Signature made Sun 22 Oct 2023 02:18:43 PDT
# gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
# gpg:                issuer "mst@redhat.com"
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (62 commits)
  intel-iommu: Report interrupt remapping faults, fix return value
  MAINTAINERS: Add include/hw/intc/i8259.h to the PC chip section
  vhost-user: Fix protocol feature bit conflict
  tests/acpi: Update DSDT.cxl with QTG DSM
  hw/cxl: Add QTG _DSM support for ACPI0017 device
  tests/acpi: Allow update of DSDT.cxl
  hw/i386/cxl: ensure maxram is greater than ram size for calculating cxl range
  vhost-user: fix lost reconnect
  vhost-user-scsi: start vhost when guest kicks
  vhost-user-scsi: support reconnect to backend
  vhost: move and rename the conn retry times
  vhost-user-common: send get_inflight_fd once
  hw/i386/pc_piix: Make PIIX4 south bridge usable in PC machine
  hw/isa/piix: Implement multi-process QEMU support also for PIIX4
  hw/isa/piix: Resolve duplicate code regarding PCI interrupt wiring
  hw/isa/piix: Reuse PIIX3's PCI interrupt triggering in PIIX4
  hw/isa/piix: Rename functions to be shared for PCI interrupt triggering
  hw/isa/piix: Reuse PIIX3 base class' realize method in PIIX4
  hw/isa/piix: Share PIIX3's base class with PIIX4
  hw/isa/piix: Harmonize names of reset control memory regions
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-10-23 14:45:29 -07:00
commit 1b4a5a20da
43 changed files with 1218 additions and 836 deletions

View File

@ -1321,7 +1321,7 @@ Malta
M: Philippe Mathieu-Daudé <philmd@linaro.org>
R: Aurelien Jarno <aurelien@aurel32.net>
S: Odd Fixes
F: hw/isa/piix4.c
F: hw/isa/piix.c
F: hw/acpi/piix4.c
F: hw/mips/malta.c
F: hw/pci-host/gt64120.c
@ -1761,7 +1761,7 @@ F: hw/pci-host/pam.c
F: include/hw/pci-host/i440fx.h
F: include/hw/pci-host/q35.h
F: include/hw/pci-host/pam.h
F: hw/isa/piix3.c
F: hw/isa/piix.c
F: hw/isa/lpc_ich9.c
F: hw/i2c/smbus_ich9.c
F: hw/acpi/piix4.c
@ -2519,7 +2519,7 @@ PIIX4 South Bridge (i82371AB)
M: Hervé Poussineau <hpoussin@reactos.org>
M: Philippe Mathieu-Daudé <philmd@linaro.org>
S: Maintained
F: hw/isa/piix4.c
F: hw/isa/piix.c
F: include/hw/southbridge/piix.h
VIA South Bridges (VT82C686B, VT8231)

View File

@ -275,6 +275,16 @@ Inflight description
:queue size: a 16-bit size of virtqueues
VhostUserShared
^^^^^^^^^^^^^^^
+------+
| UUID |
+------+
:UUID: 16 bytes UUID, whose first three components (a 32-bit value, then
two 16-bit values) are stored in big endian.
C structure
-----------
@ -885,6 +895,7 @@ Protocol features
#define VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS 15
#define VHOST_USER_PROTOCOL_F_STATUS 16
#define VHOST_USER_PROTOCOL_F_XEN_MMAP 17
#define VHOST_USER_PROTOCOL_F_SHARED_OBJECT 18
Front-end message types
-----------------------

View File

@ -71,3 +71,11 @@ machine property, i.e.
|qemu_system_x86| some.img \
-audiodev <backend>,id=<name> \
-machine pcspk-audiodev=<name>
Machine-specific options
~~~~~~~~~~~~~~~~~~~~~~~~
It supports the following machine-specific options:
- ``x-south-bridge=PIIX3|piix4-isa`` (Experimental option to select a particular
south bridge. Default: ``PIIX3``)

View File

@ -30,6 +30,75 @@
#include "qapi/error.h"
#include "qemu/uuid.h"
void build_cxl_dsm_method(Aml *dev)
{
Aml *method, *ifctx, *ifctx2;
method = aml_method("_DSM", 4, AML_SERIALIZED);
{
Aml *function, *uuid;
uuid = aml_arg(0);
function = aml_arg(2);
/* CXL spec v3.0 9.17.3.1 _DSM Function for Retrieving QTG ID */
ifctx = aml_if(aml_equal(
uuid, aml_touuid("F365F9A6-A7DE-4071-A66A-B40C0B4F8E52")));
/* Function 0, standard DSM query function */
ifctx2 = aml_if(aml_equal(function, aml_int(0)));
{
uint8_t byte_list[1] = { 0x01 }; /* function 1 only */
aml_append(ifctx2,
aml_return(aml_buffer(sizeof(byte_list), byte_list)));
}
aml_append(ifctx, ifctx2);
/*
* Function 1
* Creating a package with static values. The max supported QTG ID will
* be 1 and recommended QTG IDs are 0 and then 1.
* The values here are statically created to simplify emulation. Values
* from a real BIOS would be determined by the performance of all the
* present CXL memory and then assigned.
*/
ifctx2 = aml_if(aml_equal(function, aml_int(1)));
{
Aml *pak, *pak1;
/*
* Return: A package containing two elements - a WORD that returns
* the maximum throttling group that the platform supports, and a
* package containing the QTG ID(s) that the platform recommends.
* Package {
* Max Supported QTG ID
* Package {QTG Recommendations}
* }
*
* While the SPEC specified WORD that hints at the value being
* 16bit, the ACPI dump of BIOS DSDT table showed that the values
* are integers with no specific size specification. aml_int() will
* be used for the values.
*/
pak1 = aml_package(2);
/* Set QTG ID of 0 */
aml_append(pak1, aml_int(0));
/* Set QTG ID of 1 */
aml_append(pak1, aml_int(1));
pak = aml_package(2);
/* Set Max QTG 1 */
aml_append(pak, aml_int(1));
aml_append(pak, pak1);
aml_append(ifctx2, aml_return(pak));
}
aml_append(ifctx, ifctx2);
}
aml_append(method, ifctx);
aml_append(dev, method);
}
static void cedt_build_chbs(GArray *table_data, PXBCXLDev *cxl)
{
PXBDev *pxb = PXB_DEV(cxl);

View File

@ -32,8 +32,6 @@
#include "sysemu/sysemu.h"
#include "sysemu/runstate.h"
#define REALIZE_CONNECTION_RETRIES 3
static const int user_feature_bits[] = {
VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_SEG_MAX,
@ -393,7 +391,7 @@ static void vhost_user_blk_event(void *opaque, QEMUChrEvent event)
case CHR_EVENT_CLOSED:
/* defer close until later to avoid circular close */
vhost_user_async_close(dev, &s->chardev, &s->dev,
vhost_user_blk_disconnect);
vhost_user_blk_disconnect, vhost_user_blk_event);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
@ -482,7 +480,7 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp)
s->inflight = g_new0(struct vhost_inflight, 1);
s->vhost_vqs = g_new0(struct vhost_virtqueue, s->num_queues);
retries = REALIZE_CONNECTION_RETRIES;
retries = VU_REALIZE_CONN_RETRIES;
assert(!*errp);
do {
if (*errp) {

View File

@ -29,7 +29,7 @@ static int uuid_equal_func(const void *lhv, const void *rhv)
static bool virtio_add_resource(QemuUUID *uuid, VirtioSharedObject *value)
{
bool result = false;
bool result = true;
g_mutex_lock(&lock);
if (resource_uuids == NULL) {
@ -39,7 +39,9 @@ static bool virtio_add_resource(QemuUUID *uuid, VirtioSharedObject *value)
g_free);
}
if (g_hash_table_lookup(resource_uuids, uuid) == NULL) {
result = g_hash_table_insert(resource_uuids, uuid, value);
g_hash_table_insert(resource_uuids, uuid, value);
} else {
result = false;
}
g_mutex_unlock(&lock);
@ -57,6 +59,9 @@ bool virtio_add_dmabuf(QemuUUID *uuid, int udmabuf_fd)
vso->type = TYPE_DMABUF;
vso->value = GINT_TO_POINTER(udmabuf_fd);
result = virtio_add_resource(uuid, vso);
if (!result) {
g_free(vso);
}
return result;
}
@ -72,6 +77,9 @@ bool virtio_add_vhost_device(QemuUUID *uuid, struct vhost_dev *dev)
vso->type = TYPE_VHOST_DEV;
vso->value = dev;
result = virtio_add_resource(uuid, vso);
if (!result) {
g_free(vso);
}
return result;
}

View File

@ -72,8 +72,7 @@ config I440FX
select PC_PCI
select PC_ACPI
select PCI_I440FX
select PIIX3
select IDE_PIIX
select PIIX
select DIMM
select SMBIOS
select FW_CFG_DMA

View File

@ -56,7 +56,6 @@
/* Supported chipsets: */
#include "hw/southbridge/ich9.h"
#include "hw/southbridge/piix.h"
#include "hw/acpi/pcihp.h"
#include "hw/i386/fw_cfg.h"
#include "hw/i386/pc.h"
@ -242,10 +241,6 @@ static void acpi_get_pm_info(MachineState *machine, AcpiPmInfo *pm)
pm->pcihp_io_len =
object_property_get_uint(obj, ACPI_PCIHP_IO_LEN_PROP, NULL);
/* The above need not be conditional on machine type because the reset port
* happens to be the same on PIIX (pc) and ICH9 (q35). */
QEMU_BUILD_BUG_ON(ICH9_RST_CNT_IOPORT != PIIX_RCR_IOPORT);
/* Fill in optional s3/s4 related properties */
o = object_property_get_qobject(obj, ACPI_PM_PROP_S3_DISABLED, NULL);
if (o) {
@ -1422,6 +1417,7 @@ static void build_acpi0017(Aml *table)
method = aml_method("_STA", 0, AML_NOTSERIALIZED);
aml_append(method, aml_return(aml_int(0x01)));
aml_append(dev, method);
build_cxl_dsm_method(dev);
aml_append(scope, dev);
aml_append(table, scope);

View File

@ -469,21 +469,12 @@ static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index)
/* Must not update F field now, should be done later */
static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index,
uint16_t source_id, hwaddr addr,
VTDFaultReason fault, bool is_write,
bool is_pasid, uint32_t pasid)
uint64_t hi, uint64_t lo)
{
uint64_t hi = 0, lo;
hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
assert(index < DMAR_FRCD_REG_NR);
lo = VTD_FRCD_FI(addr);
hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault) |
VTD_FRCD_PV(pasid) | VTD_FRCD_PP(is_pasid);
if (!is_write) {
hi |= VTD_FRCD_T;
}
vtd_set_quad_raw(s, frcd_reg_addr, lo);
vtd_set_quad_raw(s, frcd_reg_addr + 8, hi);
@ -509,17 +500,11 @@ static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id)
}
/* Log and report an DMAR (address translation) fault to software */
static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
hwaddr addr, VTDFaultReason fault,
bool is_write, bool is_pasid,
uint32_t pasid)
static void vtd_report_frcd_fault(IntelIOMMUState *s, uint64_t source_id,
uint64_t hi, uint64_t lo)
{
uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
assert(fault < VTD_FR_MAX);
trace_vtd_dmar_fault(source_id, fault, addr, is_write);
if (fsts_reg & VTD_FSTS_PFO) {
error_report_once("New fault is not recorded due to "
"Primary Fault Overflow");
@ -539,8 +524,7 @@ static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
return;
}
vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault,
is_write, is_pasid, pasid);
vtd_record_frcd(s, s->next_frcd_reg, hi, lo);
if (fsts_reg & VTD_FSTS_PPF) {
error_report_once("There are pending faults already, "
@ -565,6 +549,40 @@ static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
}
}
/* Log and report an DMAR (address translation) fault to software */
static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
hwaddr addr, VTDFaultReason fault,
bool is_write, bool is_pasid,
uint32_t pasid)
{
uint64_t hi, lo;
assert(fault < VTD_FR_MAX);
trace_vtd_dmar_fault(source_id, fault, addr, is_write);
lo = VTD_FRCD_FI(addr);
hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault) |
VTD_FRCD_PV(pasid) | VTD_FRCD_PP(is_pasid);
if (!is_write) {
hi |= VTD_FRCD_T;
}
vtd_report_frcd_fault(s, source_id, hi, lo);
}
static void vtd_report_ir_fault(IntelIOMMUState *s, uint64_t source_id,
VTDFaultReason fault, uint16_t index)
{
uint64_t hi, lo;
lo = VTD_FRCD_IR_IDX(index);
hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault);
vtd_report_frcd_fault(s, source_id, hi, lo);
}
/* Handle Invalidation Queue Errors of queued invalidation interface error
* conditions.
*/
@ -3305,8 +3323,9 @@ static Property vtd_properties[] = {
};
/* Read IRTE entry with specific index */
static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
VTD_IR_TableEntry *entry, uint16_t sid)
static bool vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
VTD_IR_TableEntry *entry, uint16_t sid,
bool do_fault)
{
static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \
{0xffff, 0xfffb, 0xfff9, 0xfff8};
@ -3317,7 +3336,10 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
if (index >= iommu->intr_size) {
error_report_once("%s: index too large: ind=0x%x",
__func__, index);
return -VTD_FR_IR_INDEX_OVER;
if (do_fault) {
vtd_report_ir_fault(iommu, sid, VTD_FR_IR_INDEX_OVER, index);
}
return false;
}
addr = iommu->intr_root + index * sizeof(*entry);
@ -3325,7 +3347,10 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
entry, sizeof(*entry), MEMTXATTRS_UNSPECIFIED)) {
error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64,
__func__, index, addr);
return -VTD_FR_IR_ROOT_INVAL;
if (do_fault) {
vtd_report_ir_fault(iommu, sid, VTD_FR_IR_ROOT_INVAL, index);
}
return false;
}
entry->data[0] = le64_to_cpu(entry->data[0]);
@ -3333,11 +3358,24 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
trace_vtd_ir_irte_get(index, entry->data[1], entry->data[0]);
/*
* The remaining potential fault conditions are "qualified" by the
* Fault Processing Disable bit in the IRTE. Even "not present".
* So just clear the do_fault flag if PFD is set, which will
* prevent faults being raised.
*/
if (entry->irte.fault_disable) {
do_fault = false;
}
if (!entry->irte.present) {
error_report_once("%s: detected non-present IRTE "
"(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")",
__func__, index, entry->data[1], entry->data[0]);
return -VTD_FR_IR_ENTRY_P;
if (do_fault) {
vtd_report_ir_fault(iommu, sid, VTD_FR_IR_ENTRY_P, index);
}
return false;
}
if (entry->irte.__reserved_0 || entry->irte.__reserved_1 ||
@ -3345,7 +3383,10 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
error_report_once("%s: detected non-zero reserved IRTE "
"(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")",
__func__, index, entry->data[1], entry->data[0]);
return -VTD_FR_IR_IRTE_RSVD;
if (do_fault) {
vtd_report_ir_fault(iommu, sid, VTD_FR_IR_IRTE_RSVD, index);
}
return false;
}
if (sid != X86_IOMMU_SID_INVALID) {
@ -3361,7 +3402,10 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
error_report_once("%s: invalid IRTE SID "
"(index=%u, sid=%u, source_id=%u)",
__func__, index, sid, source_id);
return -VTD_FR_IR_SID_ERR;
if (do_fault) {
vtd_report_ir_fault(iommu, sid, VTD_FR_IR_SID_ERR, index);
}
return false;
}
break;
@ -3373,7 +3417,10 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
error_report_once("%s: invalid SVT_BUS "
"(index=%u, bus=%u, min=%u, max=%u)",
__func__, index, bus, bus_min, bus_max);
return -VTD_FR_IR_SID_ERR;
if (do_fault) {
vtd_report_ir_fault(iommu, sid, VTD_FR_IR_SID_ERR, index);
}
return false;
}
break;
@ -3382,23 +3429,24 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
"(index=%u, type=%d)", __func__,
index, entry->irte.sid_vtype);
/* Take this as verification failure. */
return -VTD_FR_IR_SID_ERR;
if (do_fault) {
vtd_report_ir_fault(iommu, sid, VTD_FR_IR_SID_ERR, index);
}
return false;
}
}
return 0;
return true;
}
/* Fetch IRQ information of specific IR index */
static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
X86IOMMUIrq *irq, uint16_t sid)
static bool vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
X86IOMMUIrq *irq, uint16_t sid, bool do_fault)
{
VTD_IR_TableEntry irte = {};
int ret = 0;
ret = vtd_irte_get(iommu, index, &irte, sid);
if (ret) {
return ret;
if (!vtd_irte_get(iommu, index, &irte, sid, do_fault)) {
return false;
}
irq->trigger_mode = irte.irte.trigger_mode;
@ -3417,16 +3465,15 @@ static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector,
irq->delivery_mode, irq->dest, irq->dest_mode);
return 0;
return true;
}
/* Interrupt remapping for MSI/MSI-X entry */
static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
MSIMessage *origin,
MSIMessage *translated,
uint16_t sid)
uint16_t sid, bool do_fault)
{
int ret = 0;
VTD_IR_MSIAddress addr;
uint16_t index;
X86IOMMUIrq irq = {};
@ -3443,14 +3490,20 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
if (origin->address & VTD_MSI_ADDR_HI_MASK) {
error_report_once("%s: MSI address high 32 bits non-zero detected: "
"address=0x%" PRIx64, __func__, origin->address);
return -VTD_FR_IR_REQ_RSVD;
if (do_fault) {
vtd_report_ir_fault(iommu, sid, VTD_FR_IR_REQ_RSVD, 0);
}
return -EINVAL;
}
addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
if (addr.addr.__head != 0xfee) {
error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32,
__func__, addr.data);
return -VTD_FR_IR_REQ_RSVD;
if (do_fault) {
vtd_report_ir_fault(iommu, sid, VTD_FR_IR_REQ_RSVD, 0);
}
return -EINVAL;
}
/* This is compatible mode. */
@ -3469,9 +3522,8 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE;
}
ret = vtd_remap_irq_get(iommu, index, &irq, sid);
if (ret) {
return ret;
if (!vtd_remap_irq_get(iommu, index, &irq, sid, do_fault)) {
return -EINVAL;
}
if (addr.addr.sub_valid) {
@ -3481,7 +3533,10 @@ static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
"(sid=%u, address=0x%" PRIx64
", data=0x%" PRIx32 ")",
__func__, sid, origin->address, origin->data);
return -VTD_FR_IR_REQ_RSVD;
if (do_fault) {
vtd_report_ir_fault(iommu, sid, VTD_FR_IR_REQ_RSVD, 0);
}
return -EINVAL;
}
} else {
uint8_t vector = origin->data & 0xff;
@ -3521,7 +3576,7 @@ static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src,
MSIMessage *dst, uint16_t sid)
{
return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu),
src, dst, sid);
src, dst, sid, false);
}
static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr,
@ -3547,9 +3602,8 @@ static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
sid = attrs.requester_id;
}
ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid);
ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid, true);
if (ret) {
/* TODO: report error */
/* Drop this interrupt */
return MEMTX_ERROR;
}

View File

@ -268,6 +268,7 @@
#define VTD_FRCD_FI(val) ((val) & ~0xfffULL)
#define VTD_FRCD_PV(val) (((val) & 0xffffULL) << 40)
#define VTD_FRCD_PP(val) (((val) & 0x1) << 31)
#define VTD_FRCD_IR_IDX(val) (((val) & 0xffffULL) << 48)
/* DMA Remapping Fault Conditions */
typedef enum VTDFaultReason {

View File

@ -781,10 +781,12 @@ static void pc_get_device_memory_range(PCMachineState *pcms,
static uint64_t pc_get_cxl_range_start(PCMachineState *pcms)
{
PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms);
MachineState *ms = MACHINE(pcms);
hwaddr cxl_base;
ram_addr_t size;
if (pcmc->has_reserved_memory) {
if (pcmc->has_reserved_memory &&
(ms->ram_size < ms->maxram_size)) {
pc_get_device_memory_range(pcms, &cxl_base, &size);
cxl_base += size;
} else {
@ -1199,7 +1201,6 @@ void pc_basic_device_init(struct PCMachineState *pcms,
DeviceState *hpet = NULL;
int pit_isa_irq = 0;
qemu_irq pit_alt_irq = NULL;
qemu_irq rtc_irq = NULL;
ISADevice *pit = NULL;
MemoryRegion *ioport80_io = g_new(MemoryRegion, 1);
MemoryRegion *ioportF0_io = g_new(MemoryRegion, 1);
@ -1219,6 +1220,8 @@ void pc_basic_device_init(struct PCMachineState *pcms,
*/
if (pcms->hpet_enabled && (!kvm_irqchip_in_kernel() ||
kvm_has_pit_state2())) {
qemu_irq rtc_irq;
hpet = qdev_try_new(TYPE_HPET);
if (!hpet) {
error_report("couldn't create HPET device");
@ -1243,16 +1246,11 @@ void pc_basic_device_init(struct PCMachineState *pcms,
pit_isa_irq = -1;
pit_alt_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_PIT_INT);
rtc_irq = qdev_get_gpio_in(hpet, HPET_LEGACY_RTC_INT);
/* overwrite connection created by south bridge */
qdev_connect_gpio_out(DEVICE(rtc_state), 0, rtc_irq);
}
if (rtc_irq) {
qdev_connect_gpio_out(DEVICE(rtc_state), 0, rtc_irq);
} else {
uint32_t irq = object_property_get_uint(OBJECT(rtc_state),
"irq",
&error_fatal);
isa_connect_gpio_out(rtc_state, 0, irq);
}
object_property_add_alias(OBJECT(pcms), "rtc-time", OBJECT(rtc_state),
"date");
@ -1712,6 +1710,7 @@ static void pc_machine_initfn(Object *obj)
#endif /* CONFIG_VMPORT */
pcms->max_ram_below_4g = 0; /* use default */
pcms->smbios_entry_point_type = pcmc->default_smbios_ep_type;
pcms->south_bridge = pcmc->default_south_bridge;
/* acpi build is enabled by default if machine supports it */
pcms->acpi_build_enabled = pcmc->has_acpi_build;

View File

@ -43,7 +43,6 @@
#include "net/net.h"
#include "hw/ide/isa.h"
#include "hw/ide/pci.h"
#include "hw/ide/piix.h"
#include "hw/irq.h"
#include "sysemu/kvm.h"
#include "hw/i386/kvm/clock.h"
@ -51,8 +50,6 @@
#include "hw/i2c/smbus_eeprom.h"
#include "exec/memory.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/piix4.h"
#include "hw/usb/hcd-uhci.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "sysemu/xen.h"
@ -117,7 +114,7 @@ static void pc_init1(MachineState *machine,
MemoryRegion *system_io = get_system_io();
PCIBus *pci_bus = NULL;
ISABus *isa_bus;
int piix3_devfn = -1;
Object *piix4_pm = NULL;
qemu_irq smi_irq;
GSIState *gsi_state;
BusState *idebus[MAX_IDE_BUS];
@ -261,10 +258,29 @@ static void pc_init1(MachineState *machine,
gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled);
if (pcmc->pci_enabled) {
PIIX3State *piix3;
PCIDevice *pci_dev;
DeviceState *dev;
size_t i;
pci_dev = pci_create_simple_multifunction(pci_bus, -1, TYPE_PIIX3_DEVICE);
pci_dev = pci_new_multifunction(-1, pcms->south_bridge);
object_property_set_bool(OBJECT(pci_dev), "has-usb",
machine_usb(machine), &error_abort);
object_property_set_bool(OBJECT(pci_dev), "has-acpi",
x86_machine_is_acpi_enabled(x86ms),
&error_abort);
object_property_set_bool(OBJECT(pci_dev), "has-pic", false,
&error_abort);
object_property_set_bool(OBJECT(pci_dev), "has-pit", false,
&error_abort);
qdev_prop_set_uint32(DEVICE(pci_dev), "smb_io_base", 0xb100);
object_property_set_bool(OBJECT(pci_dev), "smm-enabled",
x86_machine_is_smm_enabled(x86ms),
&error_abort);
dev = DEVICE(pci_dev);
for (i = 0; i < ISA_NUM_IRQS; i++) {
qdev_connect_gpio_out_named(dev, "isa-irqs", i, x86ms->gsi[i]);
}
pci_realize_and_unref(pci_dev, pci_bus, &error_fatal);
if (xen_enabled()) {
pci_device_set_intx_routing_notifier(
@ -280,15 +296,18 @@ static void pc_init1(MachineState *machine,
XEN_IOAPIC_NUM_PIRQS);
}
piix3 = PIIX3_PCI_DEVICE(pci_dev);
piix3->pic = x86ms->gsi;
piix3_devfn = piix3->dev.devfn;
isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix3), "isa.0"));
isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(pci_dev), "isa.0"));
rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev),
"rtc"));
piix4_pm = object_resolve_path_component(OBJECT(pci_dev), "pm");
dev = DEVICE(object_resolve_path_component(OBJECT(pci_dev), "ide"));
pci_ide_create_devs(PCI_DEVICE(dev));
idebus[0] = qdev_get_child_bus(dev, "ide.0");
idebus[1] = qdev_get_child_bus(dev, "ide.1");
} else {
isa_bus = isa_bus_new(NULL, system_memory, system_io,
&error_abort);
isa_bus_register_input_irqs(isa_bus, x86ms->gsi);
rtc_state = isa_new(TYPE_MC146818_RTC);
qdev_prop_set_int32(DEVICE(rtc_state), "base_year", 2000);
@ -296,8 +315,9 @@ static void pc_init1(MachineState *machine,
i8257_dma_init(isa_bus, 0);
pcms->hpet_enabled = false;
idebus[0] = NULL;
idebus[1] = NULL;
}
isa_bus_register_input_irqs(isa_bus, x86ms->gsi);
if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) {
pc_i8259_create(isa_bus, gsi_state->i8259_irq);
@ -325,12 +345,6 @@ static void pc_init1(MachineState *machine,
pc_nic_init(pcmc, isa_bus, pci_bus);
if (pcmc->pci_enabled) {
PCIDevice *dev;
dev = pci_create_simple(pci_bus, piix3_devfn + 1, TYPE_PIIX3_IDE);
pci_ide_create_devs(dev);
idebus[0] = qdev_get_child_bus(&dev->qdev, "ide.0");
idebus[1] = qdev_get_child_bus(&dev->qdev, "ide.1");
pc_cmos_init(pcms, idebus[0], idebus[1], rtc_state);
}
#ifdef CONFIG_IDE_ISA
@ -356,21 +370,9 @@ static void pc_init1(MachineState *machine,
}
#endif
if (pcmc->pci_enabled && machine_usb(machine)) {
pci_create_simple(pci_bus, piix3_devfn + 2, TYPE_PIIX3_USB_UHCI);
}
if (pcmc->pci_enabled && x86_machine_is_acpi_enabled(X86_MACHINE(pcms))) {
PCIDevice *piix4_pm;
if (piix4_pm) {
smi_irq = qemu_allocate_irq(pc_acpi_smi_interrupt, first_cpu, 0);
piix4_pm = pci_new(piix3_devfn + 3, TYPE_PIIX4_PM);
qdev_prop_set_uint32(DEVICE(piix4_pm), "smb_io_base", 0xb100);
qdev_prop_set_bit(DEVICE(piix4_pm), "smm-enabled",
x86_machine_is_smm_enabled(x86ms));
pci_realize_and_unref(piix4_pm, pci_bus, &error_fatal);
qdev_connect_gpio_out(DEVICE(piix4_pm), 0, x86ms->gsi[9]);
qdev_connect_gpio_out_named(DEVICE(piix4_pm), "smi-irq", 0, smi_irq);
pcms->smbus = I2C_BUS(qdev_get_child_bus(DEVICE(piix4_pm), "i2c"));
/* TODO: Populate SPD eeprom data. */
@ -382,7 +384,7 @@ static void pc_init1(MachineState *machine,
object_property_allow_set_link,
OBJ_PROP_LINK_STRONG);
object_property_set_link(OBJECT(machine), PC_MACHINE_ACPI_DEVICE_PROP,
OBJECT(piix4_pm), &error_abort);
piix4_pm, &error_abort);
}
if (machine->nvdimms_state->is_enabled) {
@ -392,6 +394,56 @@ static void pc_init1(MachineState *machine,
}
}
typedef enum PCSouthBridgeOption {
PC_SOUTH_BRIDGE_OPTION_PIIX3,
PC_SOUTH_BRIDGE_OPTION_PIIX4,
PC_SOUTH_BRIDGE_OPTION_MAX,
} PCSouthBridgeOption;
static const QEnumLookup PCSouthBridgeOption_lookup = {
.array = (const char *const[]) {
[PC_SOUTH_BRIDGE_OPTION_PIIX3] = TYPE_PIIX3_DEVICE,
[PC_SOUTH_BRIDGE_OPTION_PIIX4] = TYPE_PIIX4_PCI_DEVICE,
},
.size = PC_SOUTH_BRIDGE_OPTION_MAX
};
#define NotifyVmexitOption_str(val) \
qapi_enum_lookup(&NotifyVmexitOption_lookup, (val))
static int pc_get_south_bridge(Object *obj, Error **errp)
{
PCMachineState *pcms = PC_MACHINE(obj);
int i;
for (i = 0; i < PCSouthBridgeOption_lookup.size; i++) {
if (g_strcmp0(PCSouthBridgeOption_lookup.array[i],
pcms->south_bridge) == 0) {
return i;
}
}
error_setg(errp, "Invalid south bridge value set");
return 0;
}
static void pc_set_south_bridge(Object *obj, int value, Error **errp)
{
PCMachineState *pcms = PC_MACHINE(obj);
if (value < 0) {
error_setg(errp, "Value can't be negative");
return;
}
if (value >= PCSouthBridgeOption_lookup.size) {
error_setg(errp, "Value too big");
return;
}
pcms->south_bridge = PCSouthBridgeOption_lookup.array[value];
}
/* Looking for a pc_compat_2_4() function? It doesn't exist.
* pc_compat_*() functions that run on machine-init time and
* change global QEMU state are deprecated. Please don't create
@ -471,6 +523,8 @@ static void pc_xen_hvm_init(MachineState *machine)
static void pc_i440fx_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
ObjectClass *oc = OBJECT_CLASS(m);
pcmc->default_south_bridge = TYPE_PIIX3_DEVICE;
pcmc->pci_root_uid = 0;
pcmc->default_cpu_version = 1;
@ -482,6 +536,13 @@ static void pc_i440fx_machine_options(MachineClass *m)
m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_RAMFB_DEVICE);
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
object_class_property_add_enum(oc, "x-south-bridge", "PCSouthBridgeOption",
&PCSouthBridgeOption_lookup,
pc_get_south_bridge,
pc_set_south_bridge);
object_class_property_set_description(oc, "x-south-bridge",
"Use a different south bridge than PIIX3");
}
static void pc_i440fx_8_2_machine_options(MachineClass *m)

View File

@ -242,11 +242,18 @@ static void pc_q35_init(MachineState *machine)
host_bus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pcie.0"));
pcms->bus = host_bus;
/* irq lines */
gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled);
/* create ISA bus */
lpc = pci_new_multifunction(PCI_DEVFN(ICH9_LPC_DEV, ICH9_LPC_FUNC),
TYPE_ICH9_LPC_DEVICE);
qdev_prop_set_bit(DEVICE(lpc), "smm-enabled",
x86_machine_is_smm_enabled(x86ms));
lpc_dev = DEVICE(lpc);
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, x86ms->gsi[i]);
}
pci_realize_and_unref(lpc, host_bus, &error_fatal);
rtc_state = ISA_DEVICE(object_resolve_path_component(OBJECT(lpc), "rtc"));
@ -273,13 +280,6 @@ static void pc_q35_init(MachineState *machine)
"true", true);
}
/* irq lines */
gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled);
lpc_dev = DEVICE(lpc);
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
qdev_connect_gpio_out_named(lpc_dev, ICH9_GPIO_GSI, i, x86ms->gsi[i]);
}
isa_bus = ISA_BUS(qdev_get_child_bus(lpc_dev, "isa.0"));
if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) {

View File

@ -31,13 +31,7 @@ config PC87312
select FDC_ISA
select IDE_ISA
config PIIX3
bool
select I8257
select ISA_BUS
select MC146818RTC
config PIIX4
config PIIX
bool
# For historical reasons, SuperIO devices are created in the board
# for PIIX4.

View File

@ -675,6 +675,9 @@ static void ich9_lpc_initfn(Object *obj)
object_initialize_child(obj, "rtc", &lpc->rtc, TYPE_MC146818_RTC);
qdev_init_gpio_out_named(DEVICE(lpc), lpc->gsi, ICH9_GPIO_GSI,
IOAPIC_NUM_PINS);
object_property_add_uint8_ptr(obj, ACPI_PM_PROP_SCI_INT,
&lpc->sci_gsi, OBJ_PROP_FLAG_READ);
object_property_add_uint8_ptr(OBJECT(lpc), ACPI_PM_PROP_ACPI_ENABLE_CMD,
@ -691,9 +694,9 @@ static void ich9_lpc_initfn(Object *obj)
static void ich9_lpc_realize(PCIDevice *d, Error **errp)
{
ICH9LPCState *lpc = ICH9_LPC_DEVICE(d);
DeviceState *dev = DEVICE(d);
PCIBus *pci_bus = pci_get_bus(d);
ISABus *isa_bus;
uint32_t irq;
if ((lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOT_UNPLUG_BIT)) &&
!(lpc->smi_host_features & BIT_ULL(ICH9_LPC_SMI_F_CPU_HOTPLUG_BIT))) {
@ -734,8 +737,6 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
ICH9_RST_CNT_IOPORT, &lpc->rst_cnt_mem,
1);
qdev_init_gpio_out_named(dev, lpc->gsi, ICH9_GPIO_GSI, IOAPIC_NUM_PINS);
isa_bus_register_input_irqs(isa_bus, lpc->gsi);
i8257_dma_init(isa_bus, 0);
@ -745,6 +746,8 @@ static void ich9_lpc_realize(PCIDevice *d, Error **errp)
if (!qdev_realize(DEVICE(&lpc->rtc), BUS(isa_bus), errp)) {
return;
}
irq = object_property_get_uint(OBJECT(&lpc->rtc), "irq", &error_fatal);
isa_connect_gpio_out(ISA_DEVICE(&lpc->rtc), 0, irq);
pci_bus_irqs(pci_bus, ich9_lpc_set_irq, d, ICH9_LPC_NB_PIRQS);
pci_bus_map_irqs(pci_bus, ich9_lpc_map_irq);

View File

@ -3,8 +3,7 @@ system_ss.add(when: 'CONFIG_I82378', if_true: files('i82378.c'))
system_ss.add(when: 'CONFIG_ISA_BUS', if_true: files('isa-bus.c'))
system_ss.add(when: 'CONFIG_ISA_SUPERIO', if_true: files('isa-superio.c'))
system_ss.add(when: 'CONFIG_PC87312', if_true: files('pc87312.c'))
system_ss.add(when: 'CONFIG_PIIX3', if_true: files('piix3.c'))
system_ss.add(when: 'CONFIG_PIIX4', if_true: files('piix4.c'))
system_ss.add(when: 'CONFIG_PIIX', if_true: files('piix.c'))
system_ss.add(when: 'CONFIG_SMC37C669', if_true: files('smc37c669-superio.c'))
system_ss.add(when: 'CONFIG_VT82C686', if_true: files('vt82c686.c'))

View File

@ -2,6 +2,7 @@
* QEMU PIIX PCI ISA Bridge Emulation
*
* Copyright (c) 2006 Fabrice Bellard
* Copyright (c) 2018 Hervé Poussineau
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@ -27,63 +28,72 @@
#include "qapi/error.h"
#include "hw/dma/i8257.h"
#include "hw/southbridge/piix.h"
#include "hw/timer/i8254.h"
#include "hw/irq.h"
#include "hw/qdev-properties.h"
#include "hw/ide/piix.h"
#include "hw/intc/i8259.h"
#include "hw/isa/isa.h"
#include "sysemu/runstate.h"
#include "migration/vmstate.h"
#include "hw/acpi/acpi_aml_interface.h"
static void piix3_set_irq_pic(PIIX3State *piix3, int pic_irq)
static void piix_set_irq_pic(PIIXState *s, int pic_irq)
{
qemu_set_irq(piix3->pic[pic_irq],
!!(piix3->pic_levels &
qemu_set_irq(s->isa_irqs_in[pic_irq],
!!(s->pic_levels &
(((1ULL << PIIX_NUM_PIRQS) - 1) <<
(pic_irq * PIIX_NUM_PIRQS))));
}
static void piix3_set_irq_level_internal(PIIX3State *piix3, int pirq, int level)
static void piix_set_pci_irq_level_internal(PIIXState *s, int pirq, int level)
{
int pic_irq;
uint64_t mask;
pic_irq = piix3->dev.config[PIIX_PIRQCA + pirq];
if (pic_irq >= PIIX_NUM_PIC_IRQS) {
pic_irq = s->dev.config[PIIX_PIRQCA + pirq];
if (pic_irq >= ISA_NUM_IRQS) {
return;
}
mask = 1ULL << ((pic_irq * PIIX_NUM_PIRQS) + pirq);
piix3->pic_levels &= ~mask;
piix3->pic_levels |= mask * !!level;
s->pic_levels &= ~mask;
s->pic_levels |= mask * !!level;
}
static void piix3_set_irq_level(PIIX3State *piix3, int pirq, int level)
static void piix_set_pci_irq_level(PIIXState *s, int pirq, int level)
{
int pic_irq;
pic_irq = piix3->dev.config[PIIX_PIRQCA + pirq];
if (pic_irq >= PIIX_NUM_PIC_IRQS) {
pic_irq = s->dev.config[PIIX_PIRQCA + pirq];
if (pic_irq >= ISA_NUM_IRQS) {
return;
}
piix3_set_irq_level_internal(piix3, pirq, level);
piix_set_pci_irq_level_internal(s, pirq, level);
piix3_set_irq_pic(piix3, pic_irq);
piix_set_irq_pic(s, pic_irq);
}
static void piix3_set_irq(void *opaque, int pirq, int level)
static void piix_set_pci_irq(void *opaque, int pirq, int level)
{
PIIX3State *piix3 = opaque;
piix3_set_irq_level(piix3, pirq, level);
PIIXState *s = opaque;
piix_set_pci_irq_level(s, pirq, level);
}
static PCIINTxRoute piix3_route_intx_pin_to_irq(void *opaque, int pin)
static void piix_request_i8259_irq(void *opaque, int irq, int level)
{
PIIX3State *piix3 = opaque;
int irq = piix3->dev.config[PIIX_PIRQCA + pin];
PIIXState *s = opaque;
qemu_set_irq(s->cpu_intr, level);
}
static PCIINTxRoute piix_route_intx_pin_to_irq(void *opaque, int pin)
{
PCIDevice *pci_dev = opaque;
int irq = pci_dev->config[PIIX_PIRQCA + pin];
PCIINTxRoute route;
if (irq < PIIX_NUM_PIC_IRQS) {
if (irq < ISA_NUM_IRQS) {
route.mode = PCI_INTX_ENABLED;
route.irq = irq;
} else {
@ -94,36 +104,36 @@ static PCIINTxRoute piix3_route_intx_pin_to_irq(void *opaque, int pin)
}
/* irq routing is changed. so rebuild bitmap */
static void piix3_update_irq_levels(PIIX3State *piix3)
static void piix_update_pci_irq_levels(PIIXState *s)
{
PCIBus *bus = pci_get_bus(&piix3->dev);
PCIBus *bus = pci_get_bus(&s->dev);
int pirq;
piix3->pic_levels = 0;
s->pic_levels = 0;
for (pirq = 0; pirq < PIIX_NUM_PIRQS; pirq++) {
piix3_set_irq_level(piix3, pirq, pci_bus_get_irq_level(bus, pirq));
piix_set_pci_irq_level(s, pirq, pci_bus_get_irq_level(bus, pirq));
}
}
static void piix3_write_config(PCIDevice *dev,
uint32_t address, uint32_t val, int len)
static void piix_write_config(PCIDevice *dev, uint32_t address, uint32_t val,
int len)
{
pci_default_write_config(dev, address, val, len);
if (ranges_overlap(address, len, PIIX_PIRQCA, 4)) {
PIIX3State *piix3 = PIIX3_PCI_DEVICE(dev);
PIIXState *s = PIIX_PCI_DEVICE(dev);
int pic_irq;
pci_bus_fire_intx_routing_notifier(pci_get_bus(&piix3->dev));
piix3_update_irq_levels(piix3);
for (pic_irq = 0; pic_irq < PIIX_NUM_PIC_IRQS; pic_irq++) {
piix3_set_irq_pic(piix3, pic_irq);
pci_bus_fire_intx_routing_notifier(pci_get_bus(&s->dev));
piix_update_pci_irq_levels(s);
for (pic_irq = 0; pic_irq < ISA_NUM_IRQS; pic_irq++) {
piix_set_irq_pic(s, pic_irq);
}
}
}
static void piix3_reset(DeviceState *dev)
static void piix_reset(DeviceState *dev)
{
PIIX3State *d = PIIX3_PCI_DEVICE(dev);
PIIXState *d = PIIX_PCI_DEVICE(dev);
uint8_t *pci_conf = d->dev.config;
pci_conf[0x04] = 0x07; /* master, memory and I/O */
@ -162,9 +172,9 @@ static void piix3_reset(DeviceState *dev)
d->rcr = 0;
}
static int piix3_post_load(void *opaque, int version_id)
static int piix_post_load(void *opaque, int version_id)
{
PIIX3State *piix3 = opaque;
PIIXState *s = opaque;
int pirq;
/*
@ -176,18 +186,29 @@ static int piix3_post_load(void *opaque, int version_id)
* Here, we update irq levels without raising the interrupt.
* Interrupt state will be deserialized separately through the i8259.
*/
piix3->pic_levels = 0;
s->pic_levels = 0;
for (pirq = 0; pirq < PIIX_NUM_PIRQS; pirq++) {
piix3_set_irq_level_internal(piix3, pirq,
pci_bus_get_irq_level(pci_get_bus(&piix3->dev), pirq));
piix_set_pci_irq_level_internal(s, pirq,
pci_bus_get_irq_level(pci_get_bus(&s->dev), pirq));
}
return 0;
}
static int piix4_post_load(void *opaque, int version_id)
{
PIIXState *s = opaque;
if (version_id == 2) {
s->rcr = 0;
}
return piix_post_load(opaque, version_id);
}
static int piix3_pre_save(void *opaque)
{
int i;
PIIX3State *piix3 = opaque;
PIIXState *piix3 = opaque;
for (i = 0; i < ARRAY_SIZE(piix3->pci_irq_levels_vmstate); i++) {
piix3->pci_irq_levels_vmstate[i] =
@ -199,7 +220,7 @@ static int piix3_pre_save(void *opaque)
static bool piix3_rcr_needed(void *opaque)
{
PIIX3State *piix3 = opaque;
PIIXState *piix3 = opaque;
return (piix3->rcr != 0);
}
@ -210,7 +231,7 @@ static const VMStateDescription vmstate_piix3_rcr = {
.minimum_version_id = 1,
.needed = piix3_rcr_needed,
.fields = (VMStateField[]) {
VMSTATE_UINT8(rcr, PIIX3State),
VMSTATE_UINT8(rcr, PIIXState),
VMSTATE_END_OF_LIST()
}
};
@ -219,11 +240,11 @@ static const VMStateDescription vmstate_piix3 = {
.name = "PIIX3",
.version_id = 3,
.minimum_version_id = 2,
.post_load = piix3_post_load,
.post_load = piix_post_load,
.pre_save = piix3_pre_save,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(dev, PIIX3State),
VMSTATE_INT32_ARRAY_V(pci_irq_levels_vmstate, PIIX3State,
VMSTATE_PCI_DEVICE(dev, PIIXState),
VMSTATE_INT32_ARRAY_V(pci_irq_levels_vmstate, PIIXState,
PIIX_NUM_PIRQS, 3),
VMSTATE_END_OF_LIST()
},
@ -233,10 +254,21 @@ static const VMStateDescription vmstate_piix3 = {
}
};
static const VMStateDescription vmstate_piix4 = {
.name = "PIIX4",
.version_id = 3,
.minimum_version_id = 2,
.post_load = piix4_post_load,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(dev, PIIXState),
VMSTATE_UINT8_V(rcr, PIIXState, 3),
VMSTATE_END_OF_LIST()
}
};
static void rcr_write(void *opaque, hwaddr addr, uint64_t val, unsigned len)
{
PIIX3State *d = opaque;
PIIXState *d = opaque;
if (val & 4) {
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
@ -247,7 +279,7 @@ static void rcr_write(void *opaque, hwaddr addr, uint64_t val, unsigned len)
static uint64_t rcr_read(void *opaque, hwaddr addr, unsigned len)
{
PIIX3State *d = opaque;
PIIXState *d = opaque;
return d->rcr;
}
@ -262,10 +294,13 @@ static const MemoryRegionOps rcr_ops = {
},
};
static void pci_piix3_realize(PCIDevice *dev, Error **errp)
static void pci_piix_realize(PCIDevice *dev, const char *uhci_type,
Error **errp)
{
PIIX3State *d = PIIX3_PCI_DEVICE(dev);
PIIXState *d = PIIX_PCI_DEVICE(dev);
PCIBus *pci_bus = pci_get_bus(dev);
ISABus *isa_bus;
uint32_t irq;
isa_bus = isa_bus_new(DEVICE(d), pci_address_space(dev),
pci_address_space_io(dev), errp);
@ -274,10 +309,33 @@ static void pci_piix3_realize(PCIDevice *dev, Error **errp)
}
memory_region_init_io(&d->rcr_mem, OBJECT(dev), &rcr_ops, d,
"piix3-reset-control", 1);
"piix-reset-control", 1);
memory_region_add_subregion_overlap(pci_address_space_io(dev),
PIIX_RCR_IOPORT, &d->rcr_mem, 1);
/* PIC */
if (d->has_pic) {
qemu_irq *i8259_out_irq = qemu_allocate_irqs(piix_request_i8259_irq, d,
1);
qemu_irq *i8259 = i8259_init(isa_bus, *i8259_out_irq);
size_t i;
for (i = 0; i < ISA_NUM_IRQS; i++) {
d->isa_irqs_in[i] = i8259[i];
}
g_free(i8259);
qdev_init_gpio_out_named(DEVICE(dev), &d->cpu_intr, "intr", 1);
}
isa_bus_register_input_irqs(isa_bus, d->isa_irqs_in);
/* PIT */
if (d->has_pit) {
i8254_pit_init(isa_bus, 0x40, 0, NULL);
}
i8257_dma_init(isa_bus, 0);
/* RTC */
@ -285,6 +343,38 @@ static void pci_piix3_realize(PCIDevice *dev, Error **errp)
if (!qdev_realize(DEVICE(&d->rtc), BUS(isa_bus), errp)) {
return;
}
irq = object_property_get_uint(OBJECT(&d->rtc), "irq", &error_fatal);
isa_connect_gpio_out(ISA_DEVICE(&d->rtc), 0, irq);
/* IDE */
qdev_prop_set_int32(DEVICE(&d->ide), "addr", dev->devfn + 1);
if (!qdev_realize(DEVICE(&d->ide), BUS(pci_bus), errp)) {
return;
}
/* USB */
if (d->has_usb) {
object_initialize_child(OBJECT(dev), "uhci", &d->uhci, uhci_type);
qdev_prop_set_int32(DEVICE(&d->uhci), "addr", dev->devfn + 2);
if (!qdev_realize(DEVICE(&d->uhci), BUS(pci_bus), errp)) {
return;
}
}
/* Power Management */
if (d->has_acpi) {
object_initialize_child(OBJECT(d), "pm", &d->pm, TYPE_PIIX4_PM);
qdev_prop_set_int32(DEVICE(&d->pm), "addr", dev->devfn + 3);
qdev_prop_set_uint32(DEVICE(&d->pm), "smb_io_base", d->smb_io_base);
qdev_prop_set_bit(DEVICE(&d->pm), "smm-enabled", d->smm_enabled);
if (!qdev_realize(DEVICE(&d->pm), BUS(pci_bus), errp)) {
return;
}
qdev_connect_gpio_out(DEVICE(&d->pm), 0, d->isa_irqs_in[9]);
}
pci_bus_irqs(pci_bus, piix_set_pci_irq, d, PIIX_NUM_PIRQS);
pci_bus_set_route_irq_fn(pci_bus, piix_route_intx_pin_to_irq);
}
static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
@ -308,43 +398,54 @@ static void build_pci_isa_aml(AcpiDevAmlIf *adev, Aml *scope)
qbus_build_aml(bus, scope);
}
static void pci_piix3_init(Object *obj)
static void pci_piix_init(Object *obj)
{
PIIX3State *d = PIIX3_PCI_DEVICE(obj);
PIIXState *d = PIIX_PCI_DEVICE(obj);
qdev_init_gpio_out_named(DEVICE(obj), d->isa_irqs_in, "isa-irqs",
ISA_NUM_IRQS);
object_initialize_child(obj, "rtc", &d->rtc, TYPE_MC146818_RTC);
}
static void pci_piix3_class_init(ObjectClass *klass, void *data)
static Property pci_piix_props[] = {
DEFINE_PROP_UINT32("smb_io_base", PIIXState, smb_io_base, 0),
DEFINE_PROP_BOOL("has-acpi", PIIXState, has_acpi, true),
DEFINE_PROP_BOOL("has-pic", PIIXState, has_pic, true),
DEFINE_PROP_BOOL("has-pit", PIIXState, has_pit, true),
DEFINE_PROP_BOOL("has-usb", PIIXState, has_usb, true),
DEFINE_PROP_BOOL("smm-enabled", PIIXState, smm_enabled, false),
DEFINE_PROP_END_OF_LIST(),
};
static void pci_piix_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
AcpiDevAmlIfClass *adevc = ACPI_DEV_AML_IF_CLASS(klass);
k->config_write = piix3_write_config;
dc->reset = piix3_reset;
k->config_write = piix_write_config;
dc->reset = piix_reset;
dc->desc = "ISA bridge";
dc->vmsd = &vmstate_piix3;
dc->hotpluggable = false;
k->vendor_id = PCI_VENDOR_ID_INTEL;
/* 82371SB PIIX3 PCI-to-ISA bridge (Step A1) */
k->device_id = PCI_DEVICE_ID_INTEL_82371SB_0;
k->class_id = PCI_CLASS_BRIDGE_ISA;
/*
* Reason: part of PIIX3 southbridge, needs to be wired up by
* Reason: part of PIIX southbridge, needs to be wired up by e.g.
* pc_piix.c's pc_init1()
*/
dc->user_creatable = false;
device_class_set_props(dc, pci_piix_props);
adevc->build_dev_aml = build_pci_isa_aml;
}
static const TypeInfo piix3_pci_type_info = {
.name = TYPE_PIIX3_PCI_DEVICE,
static const TypeInfo piix_pci_type_info = {
.name = TYPE_PIIX_PCI_DEVICE,
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PIIX3State),
.instance_init = pci_piix3_init,
.instance_size = sizeof(PIIXState),
.instance_init = pci_piix_init,
.abstract = true,
.class_init = pci_piix3_class_init,
.class_init = pci_piix_class_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ TYPE_ACPI_DEV_AML_IF },
@ -354,36 +455,68 @@ static const TypeInfo piix3_pci_type_info = {
static void piix3_realize(PCIDevice *dev, Error **errp)
{
ERRP_GUARD();
PIIX3State *piix3 = PIIX3_PCI_DEVICE(dev);
PCIBus *pci_bus = pci_get_bus(dev);
pci_piix_realize(dev, TYPE_PIIX3_USB_UHCI, errp);
}
pci_piix3_realize(dev, errp);
if (*errp) {
return;
}
static void piix3_init(Object *obj)
{
PIIXState *d = PIIX_PCI_DEVICE(obj);
pci_bus_irqs(pci_bus, piix3_set_irq, piix3, PIIX_NUM_PIRQS);
pci_bus_set_route_irq_fn(pci_bus, piix3_route_intx_pin_to_irq);
object_initialize_child(obj, "ide", &d->ide, TYPE_PIIX3_IDE);
}
static void piix3_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->realize = piix3_realize;
/* 82371SB PIIX3 PCI-to-ISA bridge (Step A1) */
k->device_id = PCI_DEVICE_ID_INTEL_82371SB_0;
dc->vmsd = &vmstate_piix3;
}
static const TypeInfo piix3_info = {
.name = TYPE_PIIX3_DEVICE,
.parent = TYPE_PIIX3_PCI_DEVICE,
.parent = TYPE_PIIX_PCI_DEVICE,
.instance_init = piix3_init,
.class_init = piix3_class_init,
};
static void piix4_realize(PCIDevice *dev, Error **errp)
{
pci_piix_realize(dev, TYPE_PIIX4_USB_UHCI, errp);
}
static void piix4_init(Object *obj)
{
PIIXState *s = PIIX_PCI_DEVICE(obj);
object_initialize_child(obj, "ide", &s->ide, TYPE_PIIX4_IDE);
}
static void piix4_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->realize = piix4_realize;
k->device_id = PCI_DEVICE_ID_INTEL_82371AB_0;
dc->vmsd = &vmstate_piix4;
}
static const TypeInfo piix4_info = {
.name = TYPE_PIIX4_PCI_DEVICE,
.parent = TYPE_PIIX_PCI_DEVICE,
.instance_init = piix4_init,
.class_init = piix4_class_init,
};
static void piix3_register_types(void)
{
type_register_static(&piix3_pci_type_info);
type_register_static(&piix_pci_type_info);
type_register_static(&piix3_info);
type_register_static(&piix4_info);
}
type_init(piix3_register_types)

View File

@ -1,302 +0,0 @@
/*
* QEMU PIIX4 PCI Bridge Emulation
*
* Copyright (c) 2006 Fabrice Bellard
* Copyright (c) 2018 Hervé Poussineau
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/irq.h"
#include "hw/southbridge/piix.h"
#include "hw/pci/pci.h"
#include "hw/ide/piix.h"
#include "hw/isa/isa.h"
#include "hw/intc/i8259.h"
#include "hw/dma/i8257.h"
#include "hw/timer/i8254.h"
#include "hw/rtc/mc146818rtc.h"
#include "hw/ide/pci.h"
#include "hw/acpi/piix4.h"
#include "hw/usb/hcd-uhci.h"
#include "migration/vmstate.h"
#include "sysemu/reset.h"
#include "sysemu/runstate.h"
#include "qom/object.h"
struct PIIX4State {
PCIDevice dev;
qemu_irq cpu_intr;
qemu_irq *isa;
MC146818RtcState rtc;
PCIIDEState ide;
UHCIState uhci;
PIIX4PMState pm;
/* Reset Control Register */
MemoryRegion rcr_mem;
uint8_t rcr;
};
OBJECT_DECLARE_SIMPLE_TYPE(PIIX4State, PIIX4_PCI_DEVICE)
static void piix4_set_irq(void *opaque, int irq_num, int level)
{
int i, pic_irq, pic_level;
PIIX4State *s = opaque;
PCIBus *bus = pci_get_bus(&s->dev);
/* now we change the pic irq level according to the piix irq mappings */
/* XXX: optimize */
pic_irq = s->dev.config[PIIX_PIRQCA + irq_num];
if (pic_irq < ISA_NUM_IRQS) {
/* The pic level is the logical OR of all the PCI irqs mapped to it. */
pic_level = 0;
for (i = 0; i < PIIX_NUM_PIRQS; i++) {
if (pic_irq == s->dev.config[PIIX_PIRQCA + i]) {
pic_level |= pci_bus_get_irq_level(bus, i);
}
}
qemu_set_irq(s->isa[pic_irq], pic_level);
}
}
static void piix4_isa_reset(DeviceState *dev)
{
PIIX4State *d = PIIX4_PCI_DEVICE(dev);
uint8_t *pci_conf = d->dev.config;
pci_conf[0x04] = 0x07; // master, memory and I/O
pci_conf[0x05] = 0x00;
pci_conf[0x06] = 0x00;
pci_conf[0x07] = 0x02; // PCI_status_devsel_medium
pci_conf[0x4c] = 0x4d;
pci_conf[0x4e] = 0x03;
pci_conf[0x4f] = 0x00;
pci_conf[0x60] = 0x80;
pci_conf[0x61] = 0x80;
pci_conf[0x62] = 0x80;
pci_conf[0x63] = 0x80;
pci_conf[0x69] = 0x02;
pci_conf[0x70] = 0x80;
pci_conf[0x76] = 0x0c;
pci_conf[0x77] = 0x0c;
pci_conf[0x78] = 0x02;
pci_conf[0x79] = 0x00;
pci_conf[0x80] = 0x00;
pci_conf[0x82] = 0x00;
pci_conf[0xa0] = 0x08;
pci_conf[0xa2] = 0x00;
pci_conf[0xa3] = 0x00;
pci_conf[0xa4] = 0x00;
pci_conf[0xa5] = 0x00;
pci_conf[0xa6] = 0x00;
pci_conf[0xa7] = 0x00;
pci_conf[0xa8] = 0x0f;
pci_conf[0xaa] = 0x00;
pci_conf[0xab] = 0x00;
pci_conf[0xac] = 0x00;
pci_conf[0xae] = 0x00;
d->rcr = 0;
}
static int piix4_post_load(void *opaque, int version_id)
{
PIIX4State *s = opaque;
if (version_id == 2) {
s->rcr = 0;
}
return 0;
}
static const VMStateDescription vmstate_piix4 = {
.name = "PIIX4",
.version_id = 3,
.minimum_version_id = 2,
.post_load = piix4_post_load,
.fields = (VMStateField[]) {
VMSTATE_PCI_DEVICE(dev, PIIX4State),
VMSTATE_UINT8_V(rcr, PIIX4State, 3),
VMSTATE_END_OF_LIST()
}
};
static void piix4_request_i8259_irq(void *opaque, int irq, int level)
{
PIIX4State *s = opaque;
qemu_set_irq(s->cpu_intr, level);
}
static void piix4_set_i8259_irq(void *opaque, int irq, int level)
{
PIIX4State *s = opaque;
qemu_set_irq(s->isa[irq], level);
}
static void piix4_rcr_write(void *opaque, hwaddr addr, uint64_t val,
unsigned int len)
{
PIIX4State *s = opaque;
if (val & 4) {
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
return;
}
s->rcr = val & 2; /* keep System Reset type only */
}
static uint64_t piix4_rcr_read(void *opaque, hwaddr addr, unsigned int len)
{
PIIX4State *s = opaque;
return s->rcr;
}
static const MemoryRegionOps piix4_rcr_ops = {
.read = piix4_rcr_read,
.write = piix4_rcr_write,
.endianness = DEVICE_LITTLE_ENDIAN,
.impl = {
.min_access_size = 1,
.max_access_size = 1,
},
};
static void piix4_realize(PCIDevice *dev, Error **errp)
{
PIIX4State *s = PIIX4_PCI_DEVICE(dev);
PCIBus *pci_bus = pci_get_bus(dev);
ISABus *isa_bus;
qemu_irq *i8259_out_irq;
isa_bus = isa_bus_new(DEVICE(dev), pci_address_space(dev),
pci_address_space_io(dev), errp);
if (!isa_bus) {
return;
}
qdev_init_gpio_in_named(DEVICE(dev), piix4_set_i8259_irq,
"isa", ISA_NUM_IRQS);
qdev_init_gpio_out_named(DEVICE(dev), &s->cpu_intr,
"intr", 1);
memory_region_init_io(&s->rcr_mem, OBJECT(dev), &piix4_rcr_ops, s,
"reset-control", 1);
memory_region_add_subregion_overlap(pci_address_space_io(dev),
PIIX_RCR_IOPORT, &s->rcr_mem, 1);
/* initialize i8259 pic */
i8259_out_irq = qemu_allocate_irqs(piix4_request_i8259_irq, s, 1);
s->isa = i8259_init(isa_bus, *i8259_out_irq);
/* initialize ISA irqs */
isa_bus_register_input_irqs(isa_bus, s->isa);
/* initialize pit */
i8254_pit_init(isa_bus, 0x40, 0, NULL);
/* DMA */
i8257_dma_init(isa_bus, 0);
/* RTC */
qdev_prop_set_int32(DEVICE(&s->rtc), "base_year", 2000);
if (!qdev_realize(DEVICE(&s->rtc), BUS(isa_bus), errp)) {
return;
}
s->rtc.irq = isa_get_irq(ISA_DEVICE(&s->rtc), s->rtc.isairq);
/* IDE */
qdev_prop_set_int32(DEVICE(&s->ide), "addr", dev->devfn + 1);
if (!qdev_realize(DEVICE(&s->ide), BUS(pci_bus), errp)) {
return;
}
/* USB */
qdev_prop_set_int32(DEVICE(&s->uhci), "addr", dev->devfn + 2);
if (!qdev_realize(DEVICE(&s->uhci), BUS(pci_bus), errp)) {
return;
}
/* ACPI controller */
qdev_prop_set_int32(DEVICE(&s->pm), "addr", dev->devfn + 3);
if (!qdev_realize(DEVICE(&s->pm), BUS(pci_bus), errp)) {
return;
}
qdev_connect_gpio_out(DEVICE(&s->pm), 0, s->isa[9]);
pci_bus_irqs(pci_bus, piix4_set_irq, s, PIIX_NUM_PIRQS);
}
static void piix4_init(Object *obj)
{
PIIX4State *s = PIIX4_PCI_DEVICE(obj);
object_initialize_child(obj, "rtc", &s->rtc, TYPE_MC146818_RTC);
object_initialize_child(obj, "ide", &s->ide, TYPE_PIIX4_IDE);
object_initialize_child(obj, "uhci", &s->uhci, TYPE_PIIX4_USB_UHCI);
object_initialize_child(obj, "pm", &s->pm, TYPE_PIIX4_PM);
qdev_prop_set_uint32(DEVICE(&s->pm), "smb_io_base", 0x1100);
qdev_prop_set_bit(DEVICE(&s->pm), "smm-enabled", 0);
}
static void piix4_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
k->realize = piix4_realize;
k->vendor_id = PCI_VENDOR_ID_INTEL;
k->device_id = PCI_DEVICE_ID_INTEL_82371AB_0;
k->class_id = PCI_CLASS_BRIDGE_ISA;
dc->reset = piix4_isa_reset;
dc->desc = "ISA bridge";
dc->vmsd = &vmstate_piix4;
/*
* Reason: part of PIIX4 southbridge, needs to be wired up,
* e.g. by mips_malta_init()
*/
dc->user_creatable = false;
dc->hotpluggable = false;
}
static const TypeInfo piix4_info = {
.name = TYPE_PIIX4_PCI_DEVICE,
.parent = TYPE_PCI_DEVICE,
.instance_size = sizeof(PIIX4State),
.instance_init = piix4_init,
.class_init = piix4_class_init,
.interfaces = (InterfaceInfo[]) {
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
{ },
},
};
static void piix4_register_types(void)
{
type_register_static(&piix4_info);
}
type_init(piix4_register_types)

View File

@ -2,7 +2,7 @@ config MALTA
bool
select GT64120
select ISA_SUPERIO
select PIIX4
select PIIX
config MIPSSIM
bool

View File

@ -1237,8 +1237,9 @@ void mips_malta_init(MachineState *machine)
pci_bus_map_irqs(pci_bus, malta_pci_slot_get_pirq);
/* Southbridge */
piix4 = pci_create_simple_multifunction(pci_bus, PIIX4_PCI_DEVFN,
TYPE_PIIX4_PCI_DEVICE);
piix4 = pci_new_multifunction(PIIX4_PCI_DEVFN, TYPE_PIIX4_PCI_DEVICE);
qdev_prop_set_uint32(DEVICE(piix4), "smb_io_base", 0x1100);
pci_realize_and_unref(piix4, pci_bus, &error_fatal);
isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(piix4), "isa.0"));
dev = DEVICE(object_resolve_path_component(OBJECT(piix4), "ide"));

View File

@ -16,6 +16,7 @@
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "hw/virtio/vhost.h"
@ -25,7 +26,7 @@
#include "hw/virtio/virtio-access.h"
#include "hw/fw-path-provider.h"
int vhost_scsi_common_start(VHostSCSICommon *vsc)
int vhost_scsi_common_start(VHostSCSICommon *vsc, Error **errp)
{
int ret, i;
VirtIODevice *vdev = VIRTIO_DEVICE(vsc);
@ -35,42 +36,51 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc)
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)vsc;
if (!k->set_guest_notifiers) {
error_report("binding does not support guest notifiers");
error_setg(errp, "binding does not support guest notifiers");
return -ENOSYS;
}
ret = vhost_dev_enable_notifiers(&vsc->dev, vdev);
if (ret < 0) {
error_setg_errno(errp, -ret, "Error enabling host notifiers");
return ret;
}
ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, true);
if (ret < 0) {
error_report("Error binding guest notifier");
error_setg_errno(errp, -ret, "Error binding guest notifier");
goto err_host_notifiers;
}
vsc->dev.acked_features = vdev->guest_features;
assert(vsc->inflight == NULL);
vsc->inflight = g_new0(struct vhost_inflight, 1);
ret = vhost_dev_get_inflight(&vsc->dev,
vs->conf.virtqueue_size,
vsc->inflight);
ret = vhost_dev_prepare_inflight(&vsc->dev, vdev);
if (ret < 0) {
error_report("Error get inflight: %d", -ret);
error_setg_errno(errp, -ret, "Error setting inflight format");
goto err_guest_notifiers;
}
ret = vhost_dev_set_inflight(&vsc->dev, vsc->inflight);
if (ret < 0) {
error_report("Error set inflight: %d", -ret);
goto err_guest_notifiers;
if (vsc->inflight) {
if (!vsc->inflight->addr) {
ret = vhost_dev_get_inflight(&vsc->dev,
vs->conf.virtqueue_size,
vsc->inflight);
if (ret < 0) {
error_setg_errno(errp, -ret, "Error getting inflight");
goto err_guest_notifiers;
}
}
ret = vhost_dev_set_inflight(&vsc->dev, vsc->inflight);
if (ret < 0) {
error_setg_errno(errp, -ret, "Error setting inflight");
goto err_guest_notifiers;
}
}
ret = vhost_dev_start(&vsc->dev, vdev, true);
if (ret < 0) {
error_report("Error start vhost dev");
error_setg_errno(errp, -ret, "Error starting vhost dev");
goto err_guest_notifiers;
}
@ -85,9 +95,6 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc)
return ret;
err_guest_notifiers:
g_free(vsc->inflight);
vsc->inflight = NULL;
k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false);
err_host_notifiers:
vhost_dev_disable_notifiers(&vsc->dev, vdev);
@ -111,12 +118,6 @@ void vhost_scsi_common_stop(VHostSCSICommon *vsc)
}
assert(ret >= 0);
if (vsc->inflight) {
vhost_dev_free_inflight(vsc->inflight);
g_free(vsc->inflight);
vsc->inflight = NULL;
}
vhost_dev_disable_notifiers(&vsc->dev, vdev);
}

View File

@ -75,6 +75,7 @@ static int vhost_scsi_start(VHostSCSI *s)
int ret, abi_version;
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
const VhostOps *vhost_ops = vsc->dev.vhost_ops;
Error *local_err = NULL;
ret = vhost_ops->vhost_scsi_get_abi_version(&vsc->dev, &abi_version);
if (ret < 0) {
@ -88,14 +89,15 @@ static int vhost_scsi_start(VHostSCSI *s)
return -ENOSYS;
}
ret = vhost_scsi_common_start(vsc);
ret = vhost_scsi_common_start(vsc, &local_err);
if (ret < 0) {
error_reportf_err(local_err, "Error starting vhost-scsi");
return ret;
}
ret = vhost_scsi_set_endpoint(s);
if (ret < 0) {
error_report("Error setting vhost-scsi endpoint");
error_reportf_err(local_err, "Error setting vhost-scsi endpoint");
vhost_scsi_common_stop(vsc);
}

View File

@ -39,69 +39,231 @@ static const int user_feature_bits[] = {
VHOST_INVALID_FEATURE_BIT
};
static int vhost_user_scsi_start(VHostUserSCSI *s, Error **errp)
{
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
int ret;
ret = vhost_scsi_common_start(vsc, errp);
s->started_vu = !(ret < 0);
return ret;
}
static void vhost_user_scsi_stop(VHostUserSCSI *s)
{
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
if (!s->started_vu) {
return;
}
s->started_vu = false;
vhost_scsi_common_stop(vsc);
}
static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserSCSI *s = (VHostUserSCSI *)vdev;
DeviceState *dev = DEVICE(vdev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
bool start = (status & VIRTIO_CONFIG_S_DRIVER_OK) && vdev->vm_running;
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
bool should_start = virtio_device_should_start(vdev, status);
Error *local_err = NULL;
int ret;
if (vhost_dev_is_started(&vsc->dev) == start) {
if (!s->connected) {
return;
}
if (start) {
int ret;
if (vhost_dev_is_started(&vsc->dev) == should_start) {
return;
}
ret = vhost_scsi_common_start(vsc);
if (should_start) {
ret = vhost_user_scsi_start(s, &local_err);
if (ret < 0) {
error_report("unable to start vhost-user-scsi: %s", strerror(-ret));
exit(1);
error_reportf_err(local_err, "unable to start vhost-user-scsi: %s",
strerror(-ret));
qemu_chr_fe_disconnect(&vs->conf.chardev);
}
} else {
vhost_scsi_common_stop(vsc);
vhost_user_scsi_stop(s);
}
}
static void vhost_user_scsi_reset(VirtIODevice *vdev)
static void vhost_user_scsi_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(vdev);
struct vhost_dev *dev = &vsc->dev;
VHostUserSCSI *s = (VHostUserSCSI *)vdev;
DeviceState *dev = DEVICE(vdev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
/*
* Historically, reset was not implemented so only reset devices
* that are expecting it.
*/
if (!virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_RESET_DEVICE)) {
Error *local_err = NULL;
int i, ret;
if (!vdev->start_on_kick) {
return;
}
if (dev->vhost_ops->vhost_reset_device) {
dev->vhost_ops->vhost_reset_device(dev);
if (!s->connected) {
return;
}
if (vhost_dev_is_started(&vsc->dev)) {
return;
}
/*
* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
* vhost here instead of waiting for .set_status().
*/
ret = vhost_user_scsi_start(s, &local_err);
if (ret < 0) {
error_reportf_err(local_err, "vhost-user-scsi: vhost start failed: ");
qemu_chr_fe_disconnect(&vs->conf.chardev);
return;
}
/* Kick right away to begin processing requests already in vring */
for (i = 0; i < vsc->dev.nvqs; i++) {
VirtQueue *kick_vq = virtio_get_queue(vdev, i);
if (!virtio_queue_get_desc_addr(vdev, i)) {
continue;
}
event_notifier_set(virtio_queue_get_host_notifier(kick_vq));
}
}
static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq)
static int vhost_user_scsi_connect(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
int ret = 0;
if (s->connected) {
return 0;
}
s->connected = true;
vsc->dev.num_queues = vs->conf.num_queues;
vsc->dev.nvqs = VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
vsc->dev.vqs = s->vhost_vqs;
vsc->dev.vq_index = 0;
vsc->dev.backend_features = 0;
ret = vhost_dev_init(&vsc->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0,
errp);
if (ret < 0) {
return ret;
}
/* restore vhost state */
if (virtio_device_started(vdev, vdev->status)) {
ret = vhost_user_scsi_start(s, errp);
}
return ret;
}
static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event);
static void vhost_user_scsi_disconnect(DeviceState *dev)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
if (!s->connected) {
return;
}
s->connected = false;
vhost_user_scsi_stop(s);
vhost_dev_cleanup(&vsc->dev);
/* Re-instate the event handler for new connections */
qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
vhost_user_scsi_event, NULL, dev, NULL, true);
}
static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event)
{
DeviceState *dev = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(vdev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
Error *local_err = NULL;
switch (event) {
case CHR_EVENT_OPENED:
if (vhost_user_scsi_connect(dev, &local_err) < 0) {
error_report_err(local_err);
qemu_chr_fe_disconnect(&vs->conf.chardev);
return;
}
break;
case CHR_EVENT_CLOSED:
/* defer close until later to avoid circular close */
vhost_user_async_close(dev, &vs->conf.chardev, &vsc->dev,
vhost_user_scsi_disconnect,
vhost_user_scsi_event);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
case CHR_EVENT_MUX_OUT:
/* Ignore */
break;
}
}
static int vhost_user_scsi_realize_connect(VHostUserSCSI *s, Error **errp)
{
DeviceState *dev = DEVICE(s);
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
int ret;
s->connected = false;
ret = qemu_chr_fe_wait_connected(&vs->conf.chardev, errp);
if (ret < 0) {
return ret;
}
ret = vhost_user_scsi_connect(dev, errp);
if (ret < 0) {
qemu_chr_fe_disconnect(&vs->conf.chardev);
return ret;
}
assert(s->connected);
return 0;
}
static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
{
ERRP_GUARD();
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(dev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
struct vhost_virtqueue *vqs = NULL;
Error *err = NULL;
int ret;
int retries = VU_REALIZE_CONN_RETRIES;
if (!vs->conf.chardev.chr) {
error_setg(errp, "vhost-user-scsi: missing chardev");
return;
}
virtio_scsi_common_realize(dev, vhost_dummy_handle_output,
vhost_dummy_handle_output,
vhost_dummy_handle_output, &err);
virtio_scsi_common_realize(dev, vhost_user_scsi_handle_output,
vhost_user_scsi_handle_output,
vhost_user_scsi_handle_output, &err);
if (err != NULL) {
error_propagate(errp, err);
return;
@ -111,18 +273,28 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
goto free_virtio;
}
vsc->dev.nvqs = VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs);
vsc->dev.vq_index = 0;
vsc->dev.backend_features = 0;
vqs = vsc->dev.vqs;
vsc->inflight = g_new0(struct vhost_inflight, 1);
s->vhost_vqs = g_new0(struct vhost_virtqueue,
VIRTIO_SCSI_VQ_NUM_FIXED + vs->conf.num_queues);
assert(!*errp);
do {
if (*errp) {
error_prepend(errp, "Reconnecting after error: ");
error_report_err(*errp);
*errp = NULL;
}
ret = vhost_user_scsi_realize_connect(s, errp);
} while (ret < 0 && retries--);
ret = vhost_dev_init(&vsc->dev, &s->vhost_user,
VHOST_BACKEND_TYPE_USER, 0, errp);
if (ret < 0) {
goto free_vhost;
}
/* we're fully initialized, now we can operate, so add the handler */
qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL,
vhost_user_scsi_event, NULL, (void *)dev,
NULL, true);
/* Channel and lun both are 0 for bootable vhost-user-scsi disk */
vsc->channel = 0;
vsc->lun = 0;
@ -131,8 +303,12 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp)
return;
free_vhost:
g_free(s->vhost_vqs);
s->vhost_vqs = NULL;
g_free(vsc->inflight);
vsc->inflight = NULL;
vhost_user_cleanup(&s->vhost_user);
g_free(vqs);
free_virtio:
virtio_scsi_common_unrealize(dev);
}
@ -142,16 +318,23 @@ static void vhost_user_scsi_unrealize(DeviceState *dev)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VHostUserSCSI *s = VHOST_USER_SCSI(dev);
VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s);
struct vhost_virtqueue *vqs = vsc->dev.vqs;
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
/* This will stop the vhost backend. */
vhost_user_scsi_set_status(vdev, 0);
qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL, NULL, NULL, NULL,
NULL, false);
vhost_dev_cleanup(&vsc->dev);
g_free(vqs);
g_free(s->vhost_vqs);
s->vhost_vqs = NULL;
vhost_dev_free_inflight(vsc->inflight);
g_free(vsc->inflight);
vsc->inflight = NULL;
virtio_scsi_common_unrealize(dev);
vhost_user_cleanup(&s->vhost_user);
virtio_scsi_common_unrealize(dev);
}
static Property vhost_user_scsi_properties[] = {
@ -200,7 +383,6 @@ static void vhost_user_scsi_class_init(ObjectClass *klass, void *data)
vdc->get_features = vhost_scsi_common_get_features;
vdc->set_config = vhost_scsi_common_set_config;
vdc->set_status = vhost_user_scsi_set_status;
vdc->reset = vhost_user_scsi_reset;
fwc->get_dev_path = vhost_scsi_common_get_fw_dev_path;
}

View File

@ -52,10 +52,8 @@ int pit_get_out(PITChannelState *s, int64_t current_time)
switch (s->mode) {
default:
case 0:
out = (d >= s->count);
break;
case 1:
out = (d < s->count);
out = (d >= s->count);
break;
case 2:
if ((d % s->count) == 0 && d != 0) {

View File

@ -197,11 +197,6 @@ static int vhost_kernel_set_owner(struct vhost_dev *dev)
return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL);
}
static int vhost_kernel_reset_device(struct vhost_dev *dev)
{
return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
}
static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
{
assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
@ -322,7 +317,6 @@ const VhostOps kernel_ops = {
.vhost_get_features = vhost_kernel_get_features,
.vhost_set_backend_cap = vhost_kernel_set_backend_cap,
.vhost_set_owner = vhost_kernel_set_owner,
.vhost_reset_device = vhost_kernel_reset_device,
.vhost_get_vq_index = vhost_kernel_get_vq_index,
.vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
.vhost_vsock_set_running = vhost_kernel_vsock_set_running,

View File

@ -66,7 +66,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp)
*
* @svq: The svq
*/
static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
{
return svq->num_free;
}

View File

@ -114,6 +114,7 @@ typedef struct VhostShadowVirtqueue {
bool vhost_svq_valid_features(uint64_t features, Error **errp);
uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq);
void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
const VirtQueueElement *elem, uint32_t len);
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,

View File

@ -15,7 +15,6 @@
#include "standard-headers/linux/virtio_ids.h"
#include "trace.h"
#define REALIZE_CONNECTION_RETRIES 3
#define VHOST_NVQS 2
/* Features required from VirtIO */
@ -290,7 +289,7 @@ static void vu_gpio_event(void *opaque, QEMUChrEvent event)
case CHR_EVENT_CLOSED:
/* defer close until later to avoid circular close */
vhost_user_async_close(dev, &gpio->chardev, &gpio->vhost_dev,
vu_gpio_disconnect);
vu_gpio_disconnect, vu_gpio_event);
break;
case CHR_EVENT_BREAK:
case CHR_EVENT_MUX_IN:
@ -365,7 +364,7 @@ static void vu_gpio_device_realize(DeviceState *dev, Error **errp)
qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, vu_gpio_event, NULL,
dev, NULL, true);
retries = REALIZE_CONNECTION_RETRIES;
retries = VU_REALIZE_CONN_RETRIES;
g_assert(!*errp);
do {
if (*errp) {

View File

@ -388,7 +388,7 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
* operations such as configuring device memory mappings or issuing device
* resets, which affect the whole device instead of individual VQs,
* vhost-user messages should only be sent once.
*
*
* Devices with multiple vhost_devs are given an associated dev->vq_index
* so per_device requests are only sent if vq_index is 0.
*/
@ -1073,9 +1073,95 @@ static int vhost_user_set_vring_endian(struct vhost_dev *dev,
return vhost_user_write(dev, &msg, NULL, 0);
}
static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
{
int ret;
VhostUserMsg msg = {
.hdr.request = request,
.hdr.flags = VHOST_USER_VERSION,
};
if (vhost_user_per_device_request(request) && dev->vq_index != 0) {
return 0;
}
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
return ret;
}
ret = vhost_user_read(dev, &msg);
if (ret < 0) {
return ret;
}
if (msg.hdr.request != request) {
error_report("Received unexpected msg type. Expected %d received %d",
request, msg.hdr.request);
return -EPROTO;
}
if (msg.hdr.size != sizeof(msg.payload.u64)) {
error_report("Received bad msg size.");
return -EPROTO;
}
*u64 = msg.payload.u64;
return 0;
}
static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
{
if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
return -EPROTO;
}
return 0;
}
/* Note: "msg->hdr.flags" may be modified. */
static int vhost_user_write_sync(struct vhost_dev *dev, VhostUserMsg *msg,
bool wait_for_reply)
{
int ret;
if (wait_for_reply) {
bool reply_supported = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK);
if (reply_supported) {
msg->hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
}
ret = vhost_user_write(dev, msg, NULL, 0);
if (ret < 0) {
return ret;
}
if (wait_for_reply) {
uint64_t dummy;
if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
return process_message_reply(dev, msg);
}
/*
* We need to wait for a reply but the backend does not
* support replies for the command we just sent.
* Send VHOST_USER_GET_FEATURES which makes all backends
* send a reply.
*/
return vhost_user_get_features(dev, &dummy);
}
return 0;
}
static int vhost_set_vring(struct vhost_dev *dev,
unsigned long int request,
struct vhost_vring_state *ring)
struct vhost_vring_state *ring,
bool wait_for_reply)
{
VhostUserMsg msg = {
.hdr.request = request,
@ -1084,13 +1170,13 @@ static int vhost_set_vring(struct vhost_dev *dev,
.hdr.size = sizeof(msg.payload.state),
};
return vhost_user_write(dev, &msg, NULL, 0);
return vhost_user_write_sync(dev, &msg, wait_for_reply);
}
static int vhost_user_set_vring_num(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring, false);
}
static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
@ -1121,7 +1207,7 @@ static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n,
static int vhost_user_set_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring, false);
}
static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
@ -1139,7 +1225,21 @@ static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
.num = enable,
};
ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
/*
* SET_VRING_ENABLE travels from guest to QEMU to vhost-user backend /
* control plane thread via unix domain socket. Virtio requests travel
* from guest to vhost-user backend / data plane thread via eventfd.
* Even if the guest enables the ring first, and pushes its first virtio
* request second (conforming to the virtio spec), the data plane thread
* in the backend may see the virtio request before the control plane
* thread sees the queue enablement. This causes (in fact, requires) the
* data plane thread to discard the virtio request (it arrived on a
* seemingly disabled queue). To prevent this out-of-order delivery,
* don't let the guest proceed to pushing the virtio request until the
* backend control plane acknowledges enabling the queue -- IOW, pass
* wait_for_reply=true below.
*/
ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state, true);
if (ret < 0) {
/*
* Restoring the previous state is likely infeasible, as well as
@ -1245,75 +1345,9 @@ static int vhost_user_set_vring_err(struct vhost_dev *dev,
return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file);
}
static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
{
int ret;
VhostUserMsg msg = {
.hdr.request = request,
.hdr.flags = VHOST_USER_VERSION,
};
if (vhost_user_per_device_request(request) && dev->vq_index != 0) {
return 0;
}
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
return ret;
}
ret = vhost_user_read(dev, &msg);
if (ret < 0) {
return ret;
}
if (msg.hdr.request != request) {
error_report("Received unexpected msg type. Expected %d received %d",
request, msg.hdr.request);
return -EPROTO;
}
if (msg.hdr.size != sizeof(msg.payload.u64)) {
error_report("Received bad msg size.");
return -EPROTO;
}
*u64 = msg.payload.u64;
return 0;
}
static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
{
if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
return -EPROTO;
}
return 0;
}
static int enforce_reply(struct vhost_dev *dev,
const VhostUserMsg *msg)
{
uint64_t dummy;
if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
return process_message_reply(dev, msg);
}
/*
* We need to wait for a reply but the backend does not
* support replies for the command we just sent.
* Send VHOST_USER_GET_FEATURES which makes all backends
* send a reply.
*/
return vhost_user_get_features(dev, &dummy);
}
static int vhost_user_set_vring_addr(struct vhost_dev *dev,
struct vhost_vring_addr *addr)
{
int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_SET_VRING_ADDR,
.hdr.flags = VHOST_USER_VERSION,
@ -1321,29 +1355,13 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev,
.hdr.size = sizeof(msg.payload.addr),
};
bool reply_supported = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK);
/*
* wait for a reply if logging is enabled to make sure
* backend is actually logging changes
*/
bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG);
if (reply_supported && wait_for_reply) {
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
return ret;
}
if (wait_for_reply) {
return enforce_reply(dev, &msg);
}
return 0;
return vhost_user_write_sync(dev, &msg, wait_for_reply);
}
static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
@ -1355,26 +1373,8 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
.payload.u64 = u64,
.hdr.size = sizeof(msg.payload.u64),
};
int ret;
if (wait_for_reply) {
bool reply_supported = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK);
if (reply_supported) {
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
}
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
return ret;
}
if (wait_for_reply) {
return enforce_reply(dev, &msg);
}
return 0;
return vhost_user_write_sync(dev, &msg, wait_for_reply);
}
static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status)
@ -1482,12 +1482,17 @@ static int vhost_user_reset_device(struct vhost_dev *dev)
{
VhostUserMsg msg = {
.hdr.flags = VHOST_USER_VERSION,
.hdr.request = VHOST_USER_RESET_DEVICE,
};
msg.hdr.request = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_RESET_DEVICE)
? VHOST_USER_RESET_DEVICE
: VHOST_USER_RESET_OWNER;
/*
* Historically, reset was not implemented so only reset devices
* that are expecting it.
*/
if (!virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_RESET_DEVICE)) {
return -ENOSYS;
}
return vhost_user_write(dev, &msg, NULL, 0);
}
@ -2751,6 +2756,7 @@ typedef struct {
DeviceState *dev;
CharBackend *cd;
struct vhost_dev *vhost;
IOEventHandler *event_cb;
} VhostAsyncCallback;
static void vhost_user_async_close_bh(void *opaque)
@ -2765,7 +2771,10 @@ static void vhost_user_async_close_bh(void *opaque)
*/
if (vhost->vdev) {
data->cb(data->dev);
}
} else if (data->event_cb) {
qemu_chr_fe_set_handlers(data->cd, NULL, NULL, data->event_cb,
NULL, data->dev, NULL, true);
}
g_free(data);
}
@ -2777,7 +2786,8 @@ static void vhost_user_async_close_bh(void *opaque)
*/
void vhost_user_async_close(DeviceState *d,
CharBackend *chardev, struct vhost_dev *vhost,
vu_async_close_fn cb)
vu_async_close_fn cb,
IOEventHandler *event_cb)
{
if (!runstate_check(RUN_STATE_SHUTDOWN)) {
/*
@ -2793,6 +2803,7 @@ void vhost_user_async_close(DeviceState *d,
data->dev = d;
data->cd = chardev;
data->vhost = vhost;
data->event_cb = event_cb;
/* Disable any further notifications on the chardev */
qemu_chr_fe_set_handlers(chardev,

View File

@ -2150,3 +2150,12 @@ int vhost_net_set_backend(struct vhost_dev *hdev,
return -ENOSYS;
}
int vhost_reset_device(struct vhost_dev *hdev)
{
if (hdev->vhost_ops->vhost_reset_device) {
return hdev->vhost_ops->vhost_reset_device(hdev);
}
return -ENOSYS;
}

View File

@ -2136,6 +2136,10 @@ void virtio_reset(void *opaque)
vdev->device_endian = virtio_default_endian();
}
if (vdev->vhost_started) {
vhost_reset_device(k->get_vhost(vdev));
}
if (k->reset) {
k->reset(vdev);
}

View File

@ -2793,6 +2793,8 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
static inline void address_space_cache_init_empty(MemoryRegionCache *cache)
{
cache->mrs.mr = NULL;
/* There is no real need to initialize fv, but it makes Coverity happy. */
cache->fv = NULL;
}
/**

View File

@ -25,5 +25,6 @@ void cxl_build_cedt(GArray *table_offsets, GArray *table_data,
BIOSLinker *linker, const char *oem_id,
const char *oem_table_id, CXLState *cxl_state);
void build_cxl_osc_method(Aml *dev);
void build_cxl_dsm_method(Aml *dev);
#endif

View File

@ -42,6 +42,7 @@ typedef struct PCMachineState {
uint64_t max_ram_below_4g;
OnOffAuto vmport;
SmbiosEntryPointType smbios_entry_point_type;
const char *south_bridge;
bool acpi_build_enabled;
bool smbus_enabled;
@ -92,6 +93,7 @@ struct PCMachineClass {
/* Device configuration: */
bool pci_enabled;
bool kvmclock_enabled;
const char *default_south_bridge;
/* Compat options: */

View File

@ -13,7 +13,10 @@
#define HW_SOUTHBRIDGE_PIIX_H
#include "hw/pci/pci_device.h"
#include "hw/acpi/piix4.h"
#include "hw/ide/pci.h"
#include "hw/rtc/mc146818rtc.h"
#include "hw/usb/hcd-uhci.h"
/* PIRQRC[A:D]: PIRQx Route Control Registers */
#define PIIX_PIRQCA 0x60
@ -27,7 +30,6 @@
*/
#define PIIX_RCR_IOPORT 0xcf9
#define PIIX_NUM_PIC_IRQS 16 /* i8259 * 2 */
#define PIIX_NUM_PIRQS 4ULL /* PIRQ[A-D] */
struct PIIXState {
@ -39,32 +41,42 @@ struct PIIXState {
* So one PIC level is tracked by PIIX_NUM_PIRQS bits.
*
* PIRQ is mapped to PIC pins, we track it by
* PIIX_NUM_PIRQS * PIIX_NUM_PIC_IRQS = 64 bits with
* PIIX_NUM_PIRQS * ISA_NUM_IRQS = 64 bits with
* pic_irq * PIIX_NUM_PIRQS + pirq
*/
#if PIIX_NUM_PIC_IRQS * PIIX_NUM_PIRQS > 64
#if ISA_NUM_IRQS * PIIX_NUM_PIRQS > 64
#error "unable to encode pic state in 64bit in pic_levels."
#endif
uint64_t pic_levels;
qemu_irq *pic;
qemu_irq cpu_intr;
qemu_irq isa_irqs_in[ISA_NUM_IRQS];
/* This member isn't used. Just for save/load compatibility */
int32_t pci_irq_levels_vmstate[PIIX_NUM_PIRQS];
MC146818RtcState rtc;
PCIIDEState ide;
UHCIState uhci;
PIIX4PMState pm;
uint32_t smb_io_base;
/* Reset Control Register contents */
uint8_t rcr;
/* IO memory region for Reset Control Register (PIIX_RCR_IOPORT) */
MemoryRegion rcr_mem;
};
typedef struct PIIXState PIIX3State;
#define TYPE_PIIX3_PCI_DEVICE "pci-piix3"
DECLARE_INSTANCE_CHECKER(PIIX3State, PIIX3_PCI_DEVICE,
TYPE_PIIX3_PCI_DEVICE)
bool has_acpi;
bool has_pic;
bool has_pit;
bool has_usb;
bool smm_enabled;
};
#define TYPE_PIIX_PCI_DEVICE "pci-piix"
OBJECT_DECLARE_SIMPLE_TYPE(PIIXState, PIIX_PCI_DEVICE)
#define TYPE_PIIX3_DEVICE "PIIX3"
#define TYPE_PIIX4_PCI_DEVICE "piix4-isa"

View File

@ -39,7 +39,7 @@ struct VHostSCSICommon {
struct vhost_inflight *inflight;
};
int vhost_scsi_common_start(VHostSCSICommon *vsc);
int vhost_scsi_common_start(VHostSCSICommon *vsc, Error **errp);
void vhost_scsi_common_stop(VHostSCSICommon *vsc);
char *vhost_scsi_common_get_fw_dev_path(FWPathProvider *p, BusState *bus,
DeviceState *dev);

View File

@ -28,7 +28,13 @@ OBJECT_DECLARE_SIMPLE_TYPE(VHostUserSCSI, VHOST_USER_SCSI)
struct VHostUserSCSI {
VHostSCSICommon parent_obj;
/* Properties */
bool connected;
bool started_vu;
VhostUserState vhost_user;
struct vhost_virtqueue *vhost_vqs;
};
#endif /* VHOST_USER_SCSI_H */

View File

@ -29,7 +29,8 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
VHOST_USER_PROTOCOL_F_STATUS = 16,
VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 17,
/* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18,
VHOST_USER_PROTOCOL_F_MAX
};
@ -106,6 +107,7 @@ typedef void (*vu_async_close_fn)(DeviceState *cb);
void vhost_user_async_close(DeviceState *d,
CharBackend *chardev, struct vhost_dev *vhost,
vu_async_close_fn cb);
vu_async_close_fn cb,
IOEventHandler *event_cb);
#endif

View File

@ -8,6 +8,8 @@
#define VHOST_F_DEVICE_IOTLB 63
#define VHOST_USER_F_PROTOCOL_FEATURES 30
#define VU_REALIZE_CONN_RETRIES 3
/* Generic structures common for any vhost based device. */
struct vhost_inflight {
@ -339,4 +341,14 @@ int vhost_dev_set_inflight(struct vhost_dev *dev,
int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
struct vhost_inflight *inflight);
bool vhost_dev_has_iommu(struct vhost_dev *dev);
#ifdef CONFIG_VHOST
int vhost_reset_device(struct vhost_dev *hdev);
#else
static inline int vhost_reset_device(struct vhost_dev *hdev)
{
return -ENOSYS;
}
#endif /* CONFIG_VHOST */
#endif

View File

@ -2135,6 +2135,7 @@ config_host_data.set('CONFIG_TPM', have_tpm)
config_host_data.set('CONFIG_TSAN', get_option('tsan'))
config_host_data.set('CONFIG_USB_LIBUSB', libusb.found())
config_host_data.set('CONFIG_VDE', vde.found())
config_host_data.set('CONFIG_VHOST', have_vhost)
config_host_data.set('CONFIG_VHOST_NET', have_vhost_net)
config_host_data.set('CONFIG_VHOST_NET_USER', have_vhost_net_user)
config_host_data.set('CONFIG_VHOST_NET_VDPA', have_vhost_net_vdpa)

View File

@ -619,39 +619,77 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
vhost_vdpa_net_client_stop(nc);
}
static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
size_t in_len)
static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
const struct iovec *out_sg, size_t out_num,
const struct iovec *in_sg, size_t in_num)
{
/* Buffers for the device */
const struct iovec out = {
.iov_base = s->cvq_cmd_out_buffer,
.iov_len = out_len,
};
const struct iovec in = {
.iov_base = s->status,
.iov_len = sizeof(virtio_net_ctrl_ack),
};
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
int r;
r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
if (unlikely(r != 0)) {
if (unlikely(r == -ENOSPC)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
__func__);
}
return r;
}
/*
* We can poll here since we've had BQL from the time we sent the
* descriptor. Also, we need to take the answer before SVQ pulls by itself,
* when BQL is released
*/
return vhost_svq_poll(svq, 1);
return r;
}
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
/*
* Convenience wrapper to poll SVQ for multiple control commands.
*
* Caller should hold the BQL when invoking this function, and should take
* the answer before SVQ pulls by itself when BQL is released.
*/
static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
{
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
return vhost_svq_poll(svq, cmds_in_flight);
}
static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
/* reset the cursor of the output buffer for the device */
out_cursor->iov_base = s->cvq_cmd_out_buffer;
out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
/* reset the cursor of the in buffer for the device */
in_cursor->iov_base = s->status;
in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
}
/*
* Poll SVQ for multiple pending control commands and check the device's ack.
*
* Caller should hold the BQL when invoking this function.
*
* @s: The VhostVDPAState
* @len: The length of the pending status shadow buffer
*/
static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
{
/* device uses a one-byte length ack for each control command */
ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
if (unlikely(dev_written != len)) {
return -EIO;
}
/* check the device's ack */
for (int i = 0; i < len; ++i) {
if (s->status[i] != VIRTIO_NET_OK) {
return -EIO;
}
}
return 0;
}
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
struct iovec *out_cursor,
struct iovec *in_cursor, uint8_t class,
uint8_t cmd, const struct iovec *data_sg,
size_t data_num)
{
@ -659,36 +697,72 @@ static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
.class = class,
.cmd = cmd,
};
size_t data_size = iov_size(data_sg, data_num);
size_t data_size = iov_size(data_sg, data_num), cmd_size;
struct iovec out, in;
ssize_t r;
unsigned dummy_cursor_iov_cnt;
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
cmd_size = sizeof(ctrl) + data_size;
if (vhost_svq_available_slots(svq) < 2 ||
iov_size(out_cursor, 1) < cmd_size) {
/*
* It is time to flush all pending control commands if SVQ is full
* or control commands shadow buffers are full.
*
* We can poll here since we've had BQL from the time
* we sent the descriptor.
*/
r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
(void *)s->status);
if (unlikely(r < 0)) {
return r;
}
vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
}
/* pack the CVQ command header */
memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
/* pack the CVQ command command-specific-data */
iov_to_buf(data_sg, data_num, 0,
s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
out_cursor->iov_base + sizeof(ctrl), data_size);
return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
sizeof(virtio_net_ctrl_ack));
/* extract the required buffer from the cursor for output */
iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
/* extract the required buffer from the cursor for input */
iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
if (unlikely(r < 0)) {
return r;
}
/* iterate the cursors */
dummy_cursor_iov_cnt = 1;
iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
dummy_cursor_iov_cnt = 1;
iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
return 0;
}
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
const struct iovec data = {
.iov_base = (void *)n->mac,
.iov_len = sizeof(n->mac),
};
ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET,
&data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_ADDR_SET,
&data, 1);
if (unlikely(r < 0)) {
return r;
}
}
@ -733,25 +807,24 @@ static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
.iov_len = mul_macs_size,
},
};
ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
data, ARRAY_SIZE(data));
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
data, ARRAY_SIZE(data));
if (unlikely(r < 0)) {
return r;
}
return 0;
}
static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
const VirtIONet *n)
const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
struct virtio_net_ctrl_mq mq;
ssize_t dev_written;
ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
return 0;
@ -762,24 +835,24 @@ static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
.iov_base = &mq,
.iov_len = sizeof(mq),
};
dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
&data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
&data, 1);
if (unlikely(r < 0)) {
return r;
}
return 0;
}
static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
const VirtIONet *n)
const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
uint64_t offloads;
ssize_t dev_written;
ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj,
VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
@ -807,20 +880,20 @@ static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
.iov_base = &offloads,
.iov_len = sizeof(offloads),
};
dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
&data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
&data, 1);
if (unlikely(r < 0)) {
return r;
}
return 0;
}
static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
struct iovec *out_cursor,
struct iovec *in_cursor,
uint8_t cmd,
uint8_t on)
{
@ -828,14 +901,23 @@ static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
.iov_base = &on,
.iov_len = sizeof(on),
};
return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
cmd, &data, 1);
ssize_t r;
r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX, cmd, &data, 1);
if (unlikely(r < 0)) {
return r;
}
return 0;
}
static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
const VirtIONet *n)
const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
ssize_t dev_written;
ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
return 0;
@ -860,13 +942,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (!n->mac_table.uni_overflow && !n->promisc) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_PROMISC, 0);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_PROMISC, 0);
if (unlikely(r < 0)) {
return r;
}
}
@ -888,13 +967,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->mac_table.multi_overflow || n->allmulti) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
if (unlikely(r < 0)) {
return r;
}
}
@ -913,13 +989,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->alluni) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_ALLUNI, 1);
if (dev_written < 0) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_ALLUNI, 1);
if (r < 0) {
return r;
}
}
@ -934,13 +1007,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nomulti) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_NOMULTI, 1);
if (dev_written < 0) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_NOMULTI, 1);
if (r < 0) {
return r;
}
}
@ -955,13 +1025,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nouni) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_NOUNI, 1);
if (dev_written < 0) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_NOUNI, 1);
if (r < 0) {
return r;
}
}
@ -976,13 +1043,10 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
* configuration only at live migration.
*/
if (n->nobcast) {
dev_written = vhost_vdpa_net_load_rx_mode(s,
VIRTIO_NET_CTRL_RX_NOBCAST, 1);
if (dev_written < 0) {
return dev_written;
}
if (*s->status != VIRTIO_NET_OK) {
return -EIO;
r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_RX_NOBCAST, 1);
if (r < 0) {
return r;
}
}
@ -991,27 +1055,29 @@ static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor,
uint16_t vid)
{
const struct iovec data = {
.iov_base = &vid,
.iov_len = sizeof(vid),
};
ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD,
&data, 1);
if (unlikely(dev_written < 0)) {
return dev_written;
}
if (unlikely(*s->status != VIRTIO_NET_OK)) {
return -EIO;
ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD,
&data, 1);
if (unlikely(r < 0)) {
return r;
}
return 0;
}
static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
const VirtIONet *n)
const VirtIONet *n,
struct iovec *out_cursor,
struct iovec *in_cursor)
{
int r;
@ -1022,7 +1088,8 @@ static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
for (int i = 0; i < MAX_VLAN >> 5; i++) {
for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
if (n->vlans[i] & (1U << j)) {
r = vhost_vdpa_net_load_single_vlan(s, n, (i << 5) + j);
r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
in_cursor, (i << 5) + j);
if (unlikely(r != 0)) {
return r;
}
@ -1039,6 +1106,7 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc)
struct vhost_vdpa *v = &s->vhost_vdpa;
const VirtIONet *n;
int r;
struct iovec out_cursor, in_cursor;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
@ -1046,23 +1114,35 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc)
if (v->shadow_vqs_enabled) {
n = VIRTIO_NET(v->dev->vdev);
r = vhost_vdpa_net_load_mac(s, n);
vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
if (unlikely(r < 0)) {
return r;
}
r = vhost_vdpa_net_load_mq(s, n);
r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_offloads(s, n);
r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_rx(s, n);
r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_vlan(s, n);
r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
if (unlikely(r)) {
return r;
}
/*
* We need to poll and check all pending device's used buffers.
*
* We can poll here since we've had BQL from the time
* we sent the descriptor.
*/
r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
if (unlikely(r)) {
return r;
}
@ -1115,12 +1195,14 @@ static NetClientInfo net_vhost_vdpa_cvq_info = {
*/
static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
VirtQueueElement *elem,
struct iovec *out)
struct iovec *out,
const struct iovec *in)
{
struct virtio_net_ctrl_mac mac_data, *mac_ptr;
struct virtio_net_ctrl_hdr *hdr_ptr;
uint32_t cursor;
ssize_t r;
uint8_t on = 1;
/* parse the non-multicast MAC address entries from CVQ command */
cursor = sizeof(*hdr_ptr);
@ -1168,10 +1250,25 @@ static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
* filter table to the vdpa device, it should send the
* VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
*/
r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
hdr_ptr = out->iov_base;
out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
hdr_ptr->class = VIRTIO_NET_CTRL_RX;
hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
if (unlikely(r < 0)) {
return r;
}
/*
* We can poll here since we've had BQL from the time
* we sent the descriptor.
*/
r = vhost_vdpa_net_svq_poll(s, 1);
if (unlikely(r < sizeof(*s->status))) {
return r;
}
if (*s->status != VIRTIO_NET_OK) {
return sizeof(*s->status);
}
@ -1249,10 +1346,15 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
.iov_base = s->cvq_cmd_out_buffer,
};
/* in buffer used for device model */
const struct iovec in = {
const struct iovec model_in = {
.iov_base = &status,
.iov_len = sizeof(status),
};
/* in buffer used for vdpa device */
const struct iovec vdpa_in = {
.iov_base = s->status,
.iov_len = sizeof(*s->status),
};
ssize_t dev_written = -EINVAL;
out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
@ -1281,15 +1383,23 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
* the CVQ command directly.
*/
dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
&out);
&out, &vdpa_in);
if (unlikely(dev_written < 0)) {
goto out;
}
} else {
dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
if (unlikely(dev_written < 0)) {
ssize_t r;
r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
if (unlikely(r < 0)) {
dev_written = r;
goto out;
}
/*
* We can poll here since we've had BQL from the time
* we sent the descriptor.
*/
dev_written = vhost_vdpa_net_svq_poll(s, 1);
}
if (unlikely(dev_written < sizeof(status))) {
@ -1302,7 +1412,7 @@ static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
}
status = VIRTIO_NET_ERR;
virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
if (status != VIRTIO_NET_OK) {
error_report("Bad CVQ processing in model");
}

View File

@ -65,7 +65,8 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
/* Feature 16 is reserved for VHOST_USER_PROTOCOL_F_STATUS. */
VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 17,
/* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */
VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18,
VHOST_USER_PROTOCOL_F_MAX
};

Binary file not shown.