mirror of
https://github.com/qemu/qemu.git
synced 2024-11-28 06:13:46 +08:00
Merge remote branch 'mst/for_anthony' into staging
This commit is contained in:
commit
b254b0d15d
@ -168,7 +168,8 @@ hw-obj-$(CONFIG_VIRTIO) += virtio.o virtio-console.o
|
|||||||
hw-obj-y += fw_cfg.o
|
hw-obj-y += fw_cfg.o
|
||||||
# FIXME: Core PCI code and its direct dependencies are required by the
|
# FIXME: Core PCI code and its direct dependencies are required by the
|
||||||
# QMP query-pci command.
|
# QMP query-pci command.
|
||||||
hw-obj-y += pci.o pci_bridge.o msix.o msi.o
|
hw-obj-y += pci.o pci_bridge.o
|
||||||
|
hw-obj-$(CONFIG_PCI) += msix.o msi.o
|
||||||
hw-obj-$(CONFIG_PCI) += pci_host.o pcie_host.o
|
hw-obj-$(CONFIG_PCI) += pci_host.o pcie_host.o
|
||||||
hw-obj-$(CONFIG_PCI) += ioh3420.o xio3130_upstream.o xio3130_downstream.o
|
hw-obj-$(CONFIG_PCI) += ioh3420.o xio3130_upstream.o xio3130_downstream.o
|
||||||
hw-obj-y += watchdog.o
|
hw-obj-y += watchdog.o
|
||||||
|
2
cpus.c
2
cpus.c
@ -111,6 +111,8 @@ static void do_vm_stop(int reason)
|
|||||||
vm_running = 0;
|
vm_running = 0;
|
||||||
pause_all_vcpus();
|
pause_all_vcpus();
|
||||||
vm_state_notify(0, reason);
|
vm_state_notify(0, reason);
|
||||||
|
qemu_aio_flush();
|
||||||
|
bdrv_flush_all();
|
||||||
monitor_protocol_event(QEVENT_STOP, NULL);
|
monitor_protocol_event(QEVENT_STOP, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
20
hw/pc_piix.c
20
hw/pc_piix.c
@ -217,6 +217,14 @@ static QEMUMachine pc_machine = {
|
|||||||
.desc = "Standard PC",
|
.desc = "Standard PC",
|
||||||
.init = pc_init_pci,
|
.init = pc_init_pci,
|
||||||
.max_cpus = 255,
|
.max_cpus = 255,
|
||||||
|
.compat_props = (GlobalProperty[]) {
|
||||||
|
{
|
||||||
|
.driver = "PCI",
|
||||||
|
.property = "command_serr_enable",
|
||||||
|
.value = "off",
|
||||||
|
},
|
||||||
|
{ /* end of list */ }
|
||||||
|
},
|
||||||
.is_default = 1,
|
.is_default = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -265,6 +273,10 @@ static QEMUMachine pc_machine_v0_12 = {
|
|||||||
.driver = "vmware-svga",
|
.driver = "vmware-svga",
|
||||||
.property = "rombar",
|
.property = "rombar",
|
||||||
.value = stringify(0),
|
.value = stringify(0),
|
||||||
|
},{
|
||||||
|
.driver = "PCI",
|
||||||
|
.property = "command_serr_enable",
|
||||||
|
.value = "off",
|
||||||
},
|
},
|
||||||
{ /* end of list */ }
|
{ /* end of list */ }
|
||||||
}
|
}
|
||||||
@ -300,6 +312,10 @@ static QEMUMachine pc_machine_v0_11 = {
|
|||||||
.driver = "PCI",
|
.driver = "PCI",
|
||||||
.property = "rombar",
|
.property = "rombar",
|
||||||
.value = stringify(0),
|
.value = stringify(0),
|
||||||
|
},{
|
||||||
|
.driver = "PCI",
|
||||||
|
.property = "command_serr_enable",
|
||||||
|
.value = "off",
|
||||||
},
|
},
|
||||||
{ /* end of list */ }
|
{ /* end of list */ }
|
||||||
}
|
}
|
||||||
@ -347,6 +363,10 @@ static QEMUMachine pc_machine_v0_10 = {
|
|||||||
.driver = "PCI",
|
.driver = "PCI",
|
||||||
.property = "rombar",
|
.property = "rombar",
|
||||||
.value = stringify(0),
|
.value = stringify(0),
|
||||||
|
},{
|
||||||
|
.driver = "PCI",
|
||||||
|
.property = "command_serr_enable",
|
||||||
|
.value = "off",
|
||||||
},
|
},
|
||||||
{ /* end of list */ }
|
{ /* end of list */ }
|
||||||
},
|
},
|
||||||
|
24
hw/pci.c
24
hw/pci.c
@ -25,8 +25,6 @@
|
|||||||
#include "pci.h"
|
#include "pci.h"
|
||||||
#include "pci_bridge.h"
|
#include "pci_bridge.h"
|
||||||
#include "pci_internals.h"
|
#include "pci_internals.h"
|
||||||
#include "msix.h"
|
|
||||||
#include "msi.h"
|
|
||||||
#include "monitor.h"
|
#include "monitor.h"
|
||||||
#include "net.h"
|
#include "net.h"
|
||||||
#include "sysemu.h"
|
#include "sysemu.h"
|
||||||
@ -59,6 +57,8 @@ struct BusInfo pci_bus_info = {
|
|||||||
DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1),
|
DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1),
|
||||||
DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
|
DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
|
||||||
QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
|
QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
|
||||||
|
DEFINE_PROP_BIT("command_serr_enable", PCIDevice, cap_present,
|
||||||
|
QEMU_PCI_CAP_SERR_BITNR, true),
|
||||||
DEFINE_PROP_END_OF_LIST()
|
DEFINE_PROP_END_OF_LIST()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -570,6 +570,9 @@ static void pci_init_wmask(PCIDevice *dev)
|
|||||||
pci_set_word(dev->wmask + PCI_COMMAND,
|
pci_set_word(dev->wmask + PCI_COMMAND,
|
||||||
PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
|
PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
|
||||||
PCI_COMMAND_INTX_DISABLE);
|
PCI_COMMAND_INTX_DISABLE);
|
||||||
|
if (dev->cap_present & QEMU_PCI_CAP_SERR) {
|
||||||
|
pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR);
|
||||||
|
}
|
||||||
|
|
||||||
memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
|
memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
|
||||||
config_size - PCI_CONFIG_HEADER_SIZE);
|
config_size - PCI_CONFIG_HEADER_SIZE);
|
||||||
@ -1096,23 +1099,6 @@ static void pci_set_irq(void *opaque, int irq_num, int level)
|
|||||||
pci_change_irq_level(pci_dev, irq_num, change);
|
pci_change_irq_level(pci_dev, irq_num, change);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool pci_msi_enabled(PCIDevice *dev)
|
|
||||||
{
|
|
||||||
return msix_enabled(dev) || msi_enabled(dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
void pci_msi_notify(PCIDevice *dev, unsigned int vector)
|
|
||||||
{
|
|
||||||
if (msix_enabled(dev)) {
|
|
||||||
msix_notify(dev, vector);
|
|
||||||
} else if (msi_enabled(dev)) {
|
|
||||||
msi_notify(dev, vector);
|
|
||||||
} else {
|
|
||||||
/* MSI/MSI-X must be enabled */
|
|
||||||
abort();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/***********************************************************/
|
/***********************************************************/
|
||||||
/* monitor info on PCI */
|
/* monitor info on PCI */
|
||||||
|
|
||||||
|
7
hw/pci.h
7
hw/pci.h
@ -118,6 +118,10 @@ enum {
|
|||||||
/* multifunction capable device */
|
/* multifunction capable device */
|
||||||
#define QEMU_PCI_CAP_MULTIFUNCTION_BITNR 3
|
#define QEMU_PCI_CAP_MULTIFUNCTION_BITNR 3
|
||||||
QEMU_PCI_CAP_MULTIFUNCTION = (1 << QEMU_PCI_CAP_MULTIFUNCTION_BITNR),
|
QEMU_PCI_CAP_MULTIFUNCTION = (1 << QEMU_PCI_CAP_MULTIFUNCTION_BITNR),
|
||||||
|
|
||||||
|
/* command register SERR bit enabled */
|
||||||
|
#define QEMU_PCI_CAP_SERR_BITNR 4
|
||||||
|
QEMU_PCI_CAP_SERR = (1 << QEMU_PCI_CAP_SERR_BITNR),
|
||||||
};
|
};
|
||||||
|
|
||||||
struct PCIDevice {
|
struct PCIDevice {
|
||||||
@ -257,9 +261,6 @@ void do_pci_info_print(Monitor *mon, const QObject *data);
|
|||||||
void do_pci_info(Monitor *mon, QObject **ret_data);
|
void do_pci_info(Monitor *mon, QObject **ret_data);
|
||||||
void pci_bridge_update_mappings(PCIBus *b);
|
void pci_bridge_update_mappings(PCIBus *b);
|
||||||
|
|
||||||
bool pci_msi_enabled(PCIDevice *dev);
|
|
||||||
void pci_msi_notify(PCIDevice *dev, unsigned int vector);
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
pci_set_byte(uint8_t *config, uint8_t val)
|
pci_set_byte(uint8_t *config, uint8_t val)
|
||||||
{
|
{
|
||||||
|
@ -167,10 +167,12 @@ static void hotplug_event_notify(PCIDevice *dev)
|
|||||||
* The Port may optionally send an MSI when there are hot-plug events that
|
* The Port may optionally send an MSI when there are hot-plug events that
|
||||||
* occur while interrupt generation is disabled, and interrupt generation is
|
* occur while interrupt generation is disabled, and interrupt generation is
|
||||||
* subsequently enabled. */
|
* subsequently enabled. */
|
||||||
if (!pci_msi_enabled(dev)) {
|
if (msix_enabled(dev)) {
|
||||||
|
msix_notify(dev, pcie_cap_flags_get_vector(dev));
|
||||||
|
} else if (msi_enabled(dev)) {
|
||||||
|
msi_notify(dev, pcie_cap_flags_get_vector(dev));
|
||||||
|
} else {
|
||||||
qemu_set_irq(dev->irq[dev->exp.hpev_intx], dev->exp.hpev_notified);
|
qemu_set_irq(dev->irq[dev->exp.hpev_intx], dev->exp.hpev_notified);
|
||||||
} else if (dev->exp.hpev_notified) {
|
|
||||||
pci_msi_notify(dev, pcie_cap_flags_get_vector(dev));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
109
hw/pcie_aer.c
109
hw/pcie_aer.c
@ -257,30 +257,49 @@ static unsigned int pcie_aer_root_get_vector(PCIDevice *dev)
|
|||||||
return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT;
|
return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Given a status register, get corresponding bits in the command register */
|
||||||
|
static uint32_t pcie_aer_status_to_cmd(uint32_t status)
|
||||||
|
{
|
||||||
|
uint32_t cmd = 0;
|
||||||
|
if (status & PCI_ERR_ROOT_COR_RCV) {
|
||||||
|
cmd |= PCI_ERR_ROOT_CMD_COR_EN;
|
||||||
|
}
|
||||||
|
if (status & PCI_ERR_ROOT_NONFATAL_RCV) {
|
||||||
|
cmd |= PCI_ERR_ROOT_CMD_NONFATAL_EN;
|
||||||
|
}
|
||||||
|
if (status & PCI_ERR_ROOT_FATAL_RCV) {
|
||||||
|
cmd |= PCI_ERR_ROOT_CMD_FATAL_EN;
|
||||||
|
}
|
||||||
|
return cmd;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void pcie_aer_root_notify(PCIDevice *dev)
|
||||||
|
{
|
||||||
|
if (msix_enabled(dev)) {
|
||||||
|
msix_notify(dev, pcie_aer_root_get_vector(dev));
|
||||||
|
} else if (msi_enabled(dev)) {
|
||||||
|
msi_notify(dev, pcie_aer_root_get_vector(dev));
|
||||||
|
} else {
|
||||||
|
qemu_set_irq(dev->irq[dev->exp.aer_intx], 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* return value:
|
|
||||||
* true: error message is sent up
|
|
||||||
* false: error message is masked
|
|
||||||
*
|
|
||||||
* 6.2.6 Error Message Control
|
* 6.2.6 Error Message Control
|
||||||
* Figure 6-3
|
* Figure 6-3
|
||||||
* root port part
|
* root port part
|
||||||
*/
|
*/
|
||||||
static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
|
static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
|
||||||
{
|
{
|
||||||
bool msg_sent;
|
|
||||||
uint16_t cmd;
|
uint16_t cmd;
|
||||||
uint8_t *aer_cap;
|
uint8_t *aer_cap;
|
||||||
uint32_t root_cmd;
|
uint32_t root_cmd;
|
||||||
uint32_t root_status;
|
uint32_t root_status, prev_status;
|
||||||
bool msi_trigger;
|
|
||||||
|
|
||||||
msg_sent = false;
|
|
||||||
cmd = pci_get_word(dev->config + PCI_COMMAND);
|
cmd = pci_get_word(dev->config + PCI_COMMAND);
|
||||||
aer_cap = dev->config + dev->exp.aer_cap;
|
aer_cap = dev->config + dev->exp.aer_cap;
|
||||||
root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
|
root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
|
||||||
root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
|
prev_status = root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
|
||||||
msi_trigger = false;
|
|
||||||
|
|
||||||
if (cmd & PCI_COMMAND_SERR) {
|
if (cmd & PCI_COMMAND_SERR) {
|
||||||
/* System Error.
|
/* System Error.
|
||||||
@ -299,25 +318,14 @@ static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
|
|||||||
if (root_status & PCI_ERR_ROOT_COR_RCV) {
|
if (root_status & PCI_ERR_ROOT_COR_RCV) {
|
||||||
root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
|
root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
|
||||||
} else {
|
} else {
|
||||||
if (root_cmd & PCI_ERR_ROOT_CMD_COR_EN) {
|
|
||||||
msi_trigger = true;
|
|
||||||
}
|
|
||||||
pci_set_word(aer_cap + PCI_ERR_ROOT_COR_SRC, msg->source_id);
|
pci_set_word(aer_cap + PCI_ERR_ROOT_COR_SRC, msg->source_id);
|
||||||
}
|
}
|
||||||
root_status |= PCI_ERR_ROOT_COR_RCV;
|
root_status |= PCI_ERR_ROOT_COR_RCV;
|
||||||
break;
|
break;
|
||||||
case PCI_ERR_ROOT_CMD_NONFATAL_EN:
|
case PCI_ERR_ROOT_CMD_NONFATAL_EN:
|
||||||
if (!(root_status & PCI_ERR_ROOT_NONFATAL_RCV) &&
|
|
||||||
root_cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) {
|
|
||||||
msi_trigger = true;
|
|
||||||
}
|
|
||||||
root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
|
root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
|
||||||
break;
|
break;
|
||||||
case PCI_ERR_ROOT_CMD_FATAL_EN:
|
case PCI_ERR_ROOT_CMD_FATAL_EN:
|
||||||
if (!(root_status & PCI_ERR_ROOT_FATAL_RCV) &&
|
|
||||||
root_cmd & PCI_ERR_ROOT_CMD_FATAL_EN) {
|
|
||||||
msi_trigger = true;
|
|
||||||
}
|
|
||||||
if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) {
|
if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) {
|
||||||
root_status |= PCI_ERR_ROOT_FIRST_FATAL;
|
root_status |= PCI_ERR_ROOT_FIRST_FATAL;
|
||||||
}
|
}
|
||||||
@ -337,18 +345,17 @@ static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
|
|||||||
}
|
}
|
||||||
pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status);
|
pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status);
|
||||||
|
|
||||||
if (root_cmd & msg->severity) {
|
|
||||||
/* 6.2.4.1.2 Interrupt Generation */
|
/* 6.2.4.1.2 Interrupt Generation */
|
||||||
if (pci_msi_enabled(dev)) {
|
/* All the above did was set some bits in the status register.
|
||||||
if (msi_trigger) {
|
* Specifically these that match message severity.
|
||||||
pci_msi_notify(dev, pcie_aer_root_get_vector(dev));
|
* The below code relies on this fact. */
|
||||||
|
if (!(root_cmd & msg->severity) ||
|
||||||
|
(pcie_aer_status_to_cmd(prev_status) & root_cmd)) {
|
||||||
|
/* Condition is not being set or was already true so nothing to do. */
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
qemu_set_irq(dev->irq[dev->exp.aer_intx], 1);
|
pcie_aer_root_notify(dev);
|
||||||
}
|
|
||||||
msg_sent = true;
|
|
||||||
}
|
|
||||||
return msg_sent;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -739,40 +746,26 @@ void pcie_aer_root_reset(PCIDevice *dev)
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool pcie_aer_root_does_trigger(uint32_t cmd, uint32_t status)
|
|
||||||
{
|
|
||||||
return
|
|
||||||
((cmd & PCI_ERR_ROOT_CMD_COR_EN) && (status & PCI_ERR_ROOT_COR_RCV)) ||
|
|
||||||
((cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) &&
|
|
||||||
(status & PCI_ERR_ROOT_NONFATAL_RCV)) ||
|
|
||||||
((cmd & PCI_ERR_ROOT_CMD_FATAL_EN) &&
|
|
||||||
(status & PCI_ERR_ROOT_FATAL_RCV));
|
|
||||||
}
|
|
||||||
|
|
||||||
void pcie_aer_root_write_config(PCIDevice *dev,
|
void pcie_aer_root_write_config(PCIDevice *dev,
|
||||||
uint32_t addr, uint32_t val, int len,
|
uint32_t addr, uint32_t val, int len,
|
||||||
uint32_t root_cmd_prev)
|
uint32_t root_cmd_prev)
|
||||||
{
|
{
|
||||||
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
|
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
|
||||||
|
|
||||||
/* root command register */
|
|
||||||
uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
|
|
||||||
if (root_cmd & PCI_ERR_ROOT_CMD_EN_MASK) {
|
|
||||||
/* 6.2.4.1.2 Interrupt Generation */
|
|
||||||
|
|
||||||
/* 0 -> 1 */
|
|
||||||
uint32_t root_cmd_set = (root_cmd_prev ^ root_cmd) & root_cmd;
|
|
||||||
uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
|
uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
|
||||||
|
uint32_t enabled_cmd = pcie_aer_status_to_cmd(root_status);
|
||||||
|
uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
|
||||||
|
/* 6.2.4.1.2 Interrupt Generation */
|
||||||
|
if (!msix_enabled(dev) && !msi_enabled(dev)) {
|
||||||
|
qemu_set_irq(dev->irq[dev->exp.aer_intx], !!(root_cmd & enabled_cmd));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (pci_msi_enabled(dev)) {
|
if ((root_cmd_prev & enabled_cmd) || !(root_cmd & enabled_cmd)) {
|
||||||
if (pcie_aer_root_does_trigger(root_cmd_set, root_status)) {
|
/* Send MSI on transition from false to true. */
|
||||||
pci_msi_notify(dev, pcie_aer_root_get_vector(dev));
|
return;
|
||||||
}
|
|
||||||
} else {
|
|
||||||
int int_level = pcie_aer_root_does_trigger(root_cmd, root_status);
|
|
||||||
qemu_set_irq(dev->irq[dev->exp.aer_intx], int_level);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pcie_aer_root_notify(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const VMStateDescription vmstate_pcie_aer_err = {
|
static const VMStateDescription vmstate_pcie_aer_err = {
|
||||||
|
@ -99,9 +99,14 @@ static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
|
static bool virtio_net_started(VirtIONet *n, uint8_t status)
|
||||||
|
{
|
||||||
|
return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
|
||||||
|
(n->status & VIRTIO_NET_S_LINK_UP) && n->vm_running;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
|
||||||
{
|
{
|
||||||
VirtIONet *n = to_virtio_net(vdev);
|
|
||||||
if (!n->nic->nc.peer) {
|
if (!n->nic->nc.peer) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -112,9 +117,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
|
|||||||
if (!tap_get_vhost_net(n->nic->nc.peer)) {
|
if (!tap_get_vhost_net(n->nic->nc.peer)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!!n->vhost_started == ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
|
if (!!n->vhost_started == virtio_net_started(n, status)) {
|
||||||
(n->status & VIRTIO_NET_S_LINK_UP) &&
|
|
||||||
n->vm_running)) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!n->vhost_started) {
|
if (!n->vhost_started) {
|
||||||
@ -131,6 +134,32 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
|
||||||
|
{
|
||||||
|
VirtIONet *n = to_virtio_net(vdev);
|
||||||
|
|
||||||
|
virtio_net_vhost_status(n, status);
|
||||||
|
|
||||||
|
if (!n->tx_waiting) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (virtio_net_started(n, status) && !n->vhost_started) {
|
||||||
|
if (n->tx_timer) {
|
||||||
|
qemu_mod_timer(n->tx_timer,
|
||||||
|
qemu_get_clock(vm_clock) + n->tx_timeout);
|
||||||
|
} else {
|
||||||
|
qemu_bh_schedule(n->tx_bh);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (n->tx_timer) {
|
||||||
|
qemu_del_timer(n->tx_timer);
|
||||||
|
} else {
|
||||||
|
qemu_bh_cancel(n->tx_bh);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void virtio_net_set_link_status(VLANClientState *nc)
|
static void virtio_net_set_link_status(VLANClientState *nc)
|
||||||
{
|
{
|
||||||
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
|
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
|
||||||
@ -424,6 +453,9 @@ static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
|
|||||||
static int virtio_net_can_receive(VLANClientState *nc)
|
static int virtio_net_can_receive(VLANClientState *nc)
|
||||||
{
|
{
|
||||||
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
|
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
|
||||||
|
if (!n->vm_running) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (!virtio_queue_ready(n->rx_vq) ||
|
if (!virtio_queue_ready(n->rx_vq) ||
|
||||||
!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
|
!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||||
@ -672,11 +704,12 @@ static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
|
|||||||
{
|
{
|
||||||
VirtQueueElement elem;
|
VirtQueueElement elem;
|
||||||
int32_t num_packets = 0;
|
int32_t num_packets = 0;
|
||||||
|
|
||||||
if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
||||||
return num_packets;
|
return num_packets;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(n->vm_running);
|
||||||
|
|
||||||
if (n->async_tx.elem.out_num) {
|
if (n->async_tx.elem.out_num) {
|
||||||
virtio_queue_set_notification(n->tx_vq, 0);
|
virtio_queue_set_notification(n->tx_vq, 0);
|
||||||
return num_packets;
|
return num_packets;
|
||||||
@ -735,6 +768,12 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
|
|||||||
{
|
{
|
||||||
VirtIONet *n = to_virtio_net(vdev);
|
VirtIONet *n = to_virtio_net(vdev);
|
||||||
|
|
||||||
|
/* This happens when device was stopped but VCPU wasn't. */
|
||||||
|
if (!n->vm_running) {
|
||||||
|
n->tx_waiting = 1;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (n->tx_waiting) {
|
if (n->tx_waiting) {
|
||||||
virtio_queue_set_notification(vq, 1);
|
virtio_queue_set_notification(vq, 1);
|
||||||
qemu_del_timer(n->tx_timer);
|
qemu_del_timer(n->tx_timer);
|
||||||
@ -755,14 +794,19 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
|
|||||||
if (unlikely(n->tx_waiting)) {
|
if (unlikely(n->tx_waiting)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
n->tx_waiting = 1;
|
||||||
|
/* This happens when device was stopped but VCPU wasn't. */
|
||||||
|
if (!n->vm_running) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
virtio_queue_set_notification(vq, 0);
|
virtio_queue_set_notification(vq, 0);
|
||||||
qemu_bh_schedule(n->tx_bh);
|
qemu_bh_schedule(n->tx_bh);
|
||||||
n->tx_waiting = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void virtio_net_tx_timer(void *opaque)
|
static void virtio_net_tx_timer(void *opaque)
|
||||||
{
|
{
|
||||||
VirtIONet *n = opaque;
|
VirtIONet *n = opaque;
|
||||||
|
assert(n->vm_running);
|
||||||
|
|
||||||
n->tx_waiting = 0;
|
n->tx_waiting = 0;
|
||||||
|
|
||||||
@ -779,6 +823,8 @@ static void virtio_net_tx_bh(void *opaque)
|
|||||||
VirtIONet *n = opaque;
|
VirtIONet *n = opaque;
|
||||||
int32_t ret;
|
int32_t ret;
|
||||||
|
|
||||||
|
assert(n->vm_running);
|
||||||
|
|
||||||
n->tx_waiting = 0;
|
n->tx_waiting = 0;
|
||||||
|
|
||||||
/* Just in case the driver is not ready on more */
|
/* Just in case the driver is not ready on more */
|
||||||
@ -923,15 +969,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
n->mac_table.first_multi = i;
|
n->mac_table.first_multi = i;
|
||||||
|
|
||||||
if (n->tx_waiting) {
|
|
||||||
if (n->tx_timer) {
|
|
||||||
qemu_mod_timer(n->tx_timer,
|
|
||||||
qemu_get_clock(vm_clock) + n->tx_timeout);
|
|
||||||
} else {
|
|
||||||
qemu_bh_schedule(n->tx_bh);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,8 +370,6 @@ void migrate_fd_put_ready(void *opaque)
|
|||||||
DPRINTF("done iterating\n");
|
DPRINTF("done iterating\n");
|
||||||
vm_stop(0);
|
vm_stop(0);
|
||||||
|
|
||||||
qemu_aio_flush();
|
|
||||||
bdrv_flush_all();
|
|
||||||
if ((qemu_savevm_state_complete(s->mon, s->file)) < 0) {
|
if ((qemu_savevm_state_complete(s->mon, s->file)) < 0) {
|
||||||
if (old_vm_running) {
|
if (old_vm_running) {
|
||||||
vm_start();
|
vm_start();
|
||||||
|
4
net.c
4
net.c
@ -1050,6 +1050,10 @@ static const struct {
|
|||||||
.name = "mcast",
|
.name = "mcast",
|
||||||
.type = QEMU_OPT_STRING,
|
.type = QEMU_OPT_STRING,
|
||||||
.help = "UDP multicast address and port number",
|
.help = "UDP multicast address and port number",
|
||||||
|
}, {
|
||||||
|
.name = "localaddr",
|
||||||
|
.type = QEMU_OPT_STRING,
|
||||||
|
.help = "source address for multicast packets",
|
||||||
},
|
},
|
||||||
{ /* end of list */ }
|
{ /* end of list */ }
|
||||||
},
|
},
|
||||||
|
50
net/socket.c
50
net/socket.c
@ -149,7 +149,7 @@ static void net_socket_send_dgram(void *opaque)
|
|||||||
qemu_send_packet(&s->nc, s->buf, size);
|
qemu_send_packet(&s->nc, s->buf, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int net_socket_mcast_create(struct sockaddr_in *mcastaddr)
|
static int net_socket_mcast_create(struct sockaddr_in *mcastaddr, struct in_addr *localaddr)
|
||||||
{
|
{
|
||||||
struct ip_mreq imr;
|
struct ip_mreq imr;
|
||||||
int fd;
|
int fd;
|
||||||
@ -183,7 +183,11 @@ static int net_socket_mcast_create(struct sockaddr_in *mcastaddr)
|
|||||||
|
|
||||||
/* Add host to multicast group */
|
/* Add host to multicast group */
|
||||||
imr.imr_multiaddr = mcastaddr->sin_addr;
|
imr.imr_multiaddr = mcastaddr->sin_addr;
|
||||||
|
if (localaddr) {
|
||||||
|
imr.imr_interface = *localaddr;
|
||||||
|
} else {
|
||||||
imr.imr_interface.s_addr = htonl(INADDR_ANY);
|
imr.imr_interface.s_addr = htonl(INADDR_ANY);
|
||||||
|
}
|
||||||
|
|
||||||
ret = setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
|
ret = setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP,
|
||||||
(const char *)&imr, sizeof(struct ip_mreq));
|
(const char *)&imr, sizeof(struct ip_mreq));
|
||||||
@ -201,6 +205,15 @@ static int net_socket_mcast_create(struct sockaddr_in *mcastaddr)
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* If a bind address is given, only send packets from that address */
|
||||||
|
if (localaddr != NULL) {
|
||||||
|
ret = setsockopt(fd, IPPROTO_IP, IP_MULTICAST_IF, localaddr, sizeof(*localaddr));
|
||||||
|
if (ret < 0) {
|
||||||
|
perror("setsockopt(IP_MULTICAST_IF)");
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
socket_set_nonblock(fd);
|
socket_set_nonblock(fd);
|
||||||
return fd;
|
return fd;
|
||||||
fail:
|
fail:
|
||||||
@ -248,7 +261,7 @@ static NetSocketState *net_socket_fd_init_dgram(VLANState *vlan,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
/* clone dgram socket */
|
/* clone dgram socket */
|
||||||
newfd = net_socket_mcast_create(&saddr);
|
newfd = net_socket_mcast_create(&saddr, NULL);
|
||||||
if (newfd < 0) {
|
if (newfd < 0) {
|
||||||
/* error already reported by net_socket_mcast_create() */
|
/* error already reported by net_socket_mcast_create() */
|
||||||
close(fd);
|
close(fd);
|
||||||
@ -468,17 +481,26 @@ static int net_socket_connect_init(VLANState *vlan,
|
|||||||
static int net_socket_mcast_init(VLANState *vlan,
|
static int net_socket_mcast_init(VLANState *vlan,
|
||||||
const char *model,
|
const char *model,
|
||||||
const char *name,
|
const char *name,
|
||||||
const char *host_str)
|
const char *host_str,
|
||||||
|
const char *localaddr_str)
|
||||||
{
|
{
|
||||||
NetSocketState *s;
|
NetSocketState *s;
|
||||||
int fd;
|
int fd;
|
||||||
struct sockaddr_in saddr;
|
struct sockaddr_in saddr;
|
||||||
|
struct in_addr localaddr, *param_localaddr;
|
||||||
|
|
||||||
if (parse_host_port(&saddr, host_str) < 0)
|
if (parse_host_port(&saddr, host_str) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
if (localaddr_str != NULL) {
|
||||||
|
if (inet_aton(localaddr_str, &localaddr) == 0)
|
||||||
|
return -1;
|
||||||
|
param_localaddr = &localaddr;
|
||||||
|
} else {
|
||||||
|
param_localaddr = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
fd = net_socket_mcast_create(&saddr);
|
fd = net_socket_mcast_create(&saddr, param_localaddr);
|
||||||
if (fd < 0)
|
if (fd < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
@ -505,8 +527,9 @@ int net_init_socket(QemuOpts *opts,
|
|||||||
|
|
||||||
if (qemu_opt_get(opts, "listen") ||
|
if (qemu_opt_get(opts, "listen") ||
|
||||||
qemu_opt_get(opts, "connect") ||
|
qemu_opt_get(opts, "connect") ||
|
||||||
qemu_opt_get(opts, "mcast")) {
|
qemu_opt_get(opts, "mcast") ||
|
||||||
error_report("listen=, connect= and mcast= is invalid with fd=");
|
qemu_opt_get(opts, "localaddr")) {
|
||||||
|
error_report("listen=, connect=, mcast= and localaddr= is invalid with fd=\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -524,8 +547,9 @@ int net_init_socket(QemuOpts *opts,
|
|||||||
|
|
||||||
if (qemu_opt_get(opts, "fd") ||
|
if (qemu_opt_get(opts, "fd") ||
|
||||||
qemu_opt_get(opts, "connect") ||
|
qemu_opt_get(opts, "connect") ||
|
||||||
qemu_opt_get(opts, "mcast")) {
|
qemu_opt_get(opts, "mcast") ||
|
||||||
error_report("fd=, connect= and mcast= is invalid with listen=");
|
qemu_opt_get(opts, "localaddr")) {
|
||||||
|
error_report("fd=, connect=, mcast= and localaddr= is invalid with listen=\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -539,8 +563,9 @@ int net_init_socket(QemuOpts *opts,
|
|||||||
|
|
||||||
if (qemu_opt_get(opts, "fd") ||
|
if (qemu_opt_get(opts, "fd") ||
|
||||||
qemu_opt_get(opts, "listen") ||
|
qemu_opt_get(opts, "listen") ||
|
||||||
qemu_opt_get(opts, "mcast")) {
|
qemu_opt_get(opts, "mcast") ||
|
||||||
error_report("fd=, listen= and mcast= is invalid with connect=");
|
qemu_opt_get(opts, "localaddr")) {
|
||||||
|
error_report("fd=, listen=, mcast= and localaddr= is invalid with connect=\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -550,7 +575,7 @@ int net_init_socket(QemuOpts *opts,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} else if (qemu_opt_get(opts, "mcast")) {
|
} else if (qemu_opt_get(opts, "mcast")) {
|
||||||
const char *mcast;
|
const char *mcast, *localaddr;
|
||||||
|
|
||||||
if (qemu_opt_get(opts, "fd") ||
|
if (qemu_opt_get(opts, "fd") ||
|
||||||
qemu_opt_get(opts, "connect") ||
|
qemu_opt_get(opts, "connect") ||
|
||||||
@ -560,8 +585,9 @@ int net_init_socket(QemuOpts *opts,
|
|||||||
}
|
}
|
||||||
|
|
||||||
mcast = qemu_opt_get(opts, "mcast");
|
mcast = qemu_opt_get(opts, "mcast");
|
||||||
|
localaddr = qemu_opt_get(opts, "localaddr");
|
||||||
|
|
||||||
if (net_socket_mcast_init(vlan, "socket", name, mcast) == -1) {
|
if (net_socket_mcast_init(vlan, "socket", name, mcast, localaddr) == -1) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1061,8 +1061,9 @@ DEF("net", HAS_ARG, QEMU_OPTION_net,
|
|||||||
#endif
|
#endif
|
||||||
"-net socket[,vlan=n][,name=str][,fd=h][,listen=[host]:port][,connect=host:port]\n"
|
"-net socket[,vlan=n][,name=str][,fd=h][,listen=[host]:port][,connect=host:port]\n"
|
||||||
" connect the vlan 'n' to another VLAN using a socket connection\n"
|
" connect the vlan 'n' to another VLAN using a socket connection\n"
|
||||||
"-net socket[,vlan=n][,name=str][,fd=h][,mcast=maddr:port]\n"
|
"-net socket[,vlan=n][,name=str][,fd=h][,mcast=maddr:port[,localaddr=addr]]\n"
|
||||||
" connect the vlan 'n' to multicast maddr and port\n"
|
" connect the vlan 'n' to multicast maddr and port\n"
|
||||||
|
" use 'localaddr=addr' to specify the host address to send packets from\n"
|
||||||
#ifdef CONFIG_VDE
|
#ifdef CONFIG_VDE
|
||||||
"-net vde[,vlan=n][,name=str][,sock=socketpath][,port=n][,group=groupname][,mode=octalmode]\n"
|
"-net vde[,vlan=n][,name=str][,sock=socketpath][,port=n][,group=groupname][,mode=octalmode]\n"
|
||||||
" connect the vlan 'n' to port 'n' of a vde switch running\n"
|
" connect the vlan 'n' to port 'n' of a vde switch running\n"
|
||||||
@ -1256,7 +1257,7 @@ qemu linux.img -net nic,macaddr=52:54:00:12:34:57 \
|
|||||||
-net socket,connect=127.0.0.1:1234
|
-net socket,connect=127.0.0.1:1234
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
@item -net socket[,vlan=@var{n}][,name=@var{name}][,fd=@var{h}] [,mcast=@var{maddr}:@var{port}]
|
@item -net socket[,vlan=@var{n}][,name=@var{name}][,fd=@var{h}][,mcast=@var{maddr}:@var{port}[,localaddr=@var{addr}]]
|
||||||
|
|
||||||
Create a VLAN @var{n} shared with another QEMU virtual
|
Create a VLAN @var{n} shared with another QEMU virtual
|
||||||
machines using a UDP multicast socket, effectively making a bus for
|
machines using a UDP multicast socket, effectively making a bus for
|
||||||
@ -1296,6 +1297,12 @@ qemu linux.img -net nic,macaddr=52:54:00:12:34:56 \
|
|||||||
/path/to/linux ubd0=/path/to/root_fs eth0=mcast
|
/path/to/linux ubd0=/path/to/root_fs eth0=mcast
|
||||||
@end example
|
@end example
|
||||||
|
|
||||||
|
Example (send packets from host's 1.2.3.4):
|
||||||
|
@example
|
||||||
|
qemu linux.img -net nic,macaddr=52:54:00:12:34:56 \
|
||||||
|
-net socket,mcast=239.192.168.1:1102,localaddr=1.2.3.4
|
||||||
|
@end example
|
||||||
|
|
||||||
@item -net vde[,vlan=@var{n}][,name=@var{name}][,sock=@var{socketpath}] [,port=@var{n}][,group=@var{groupname}][,mode=@var{octalmode}]
|
@item -net vde[,vlan=@var{n}][,name=@var{name}][,sock=@var{socketpath}] [,port=@var{n}][,group=@var{groupname}][,mode=@var{octalmode}]
|
||||||
Connect VLAN @var{n} to PORT @var{n} of a vde switch running on host and
|
Connect VLAN @var{n} to PORT @var{n} of a vde switch running on host and
|
||||||
listening for incoming connections on @var{socketpath}. Use GROUP @var{groupname}
|
listening for incoming connections on @var{socketpath}. Use GROUP @var{groupname}
|
||||||
|
4
savevm.c
4
savevm.c
@ -1575,8 +1575,6 @@ static int qemu_savevm_state(Monitor *mon, QEMUFile *f)
|
|||||||
saved_vm_running = vm_running;
|
saved_vm_running = vm_running;
|
||||||
vm_stop(0);
|
vm_stop(0);
|
||||||
|
|
||||||
bdrv_flush_all();
|
|
||||||
|
|
||||||
ret = qemu_savevm_state_begin(mon, f, 0, 0);
|
ret = qemu_savevm_state_begin(mon, f, 0, 0);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
@ -1885,8 +1883,6 @@ void do_savevm(Monitor *mon, const QDict *qdict)
|
|||||||
monitor_printf(mon, "No block device can accept snapshots\n");
|
monitor_printf(mon, "No block device can accept snapshots\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/* ??? Should this occur after vm_stop? */
|
|
||||||
qemu_aio_flush();
|
|
||||||
|
|
||||||
saved_vm_running = vm_running;
|
saved_vm_running = vm_running;
|
||||||
vm_stop(0);
|
vm_stop(0);
|
||||||
|
Loading…
Reference in New Issue
Block a user