mirror of
https://github.com/qemu/qemu.git
synced 2024-11-28 14:24:02 +08:00
Merge remote-tracking branch 'mst/tags/for_anthony' into staging
* mst/tags/for_anthony: e1000: set E1000_ICR_INT_ASSERTED only for 8257x e1000: link auto-negotiation emulation e1000: introduce bit for debugging PHY emulation e1000: introduce helpers to manipulate link status e1000: PHY loopback mode support e1000: conditionally raise irq at the end of MDI cycle e1000: introduce bits of PHY control register eepro100: Fix multicast regression virtio: order index/descriptor reads virtio: add missing mb() on enable notification virtio: add missing mb() on notification e1000: move reset function earlier in file
This commit is contained in:
commit
0677e2777e
167
hw/e1000.c
167
hw/e1000.c
@ -42,7 +42,7 @@ enum {
|
||||
DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
|
||||
DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
|
||||
DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
|
||||
DEBUG_RXFILTER, DEBUG_NOTYET,
|
||||
DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
|
||||
};
|
||||
#define DBGBIT(x) (1<<DEBUG_##x)
|
||||
static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
|
||||
@ -125,6 +125,8 @@ typedef struct E1000State_st {
|
||||
uint16_t reading;
|
||||
uint32_t old_eecd;
|
||||
} eecd_state;
|
||||
|
||||
QEMUTimer *autoneg_timer;
|
||||
} E1000State;
|
||||
|
||||
#define defreg(x) x = (E1000_##x>>2)
|
||||
@ -142,6 +144,48 @@ enum {
|
||||
defreg(VET),
|
||||
};
|
||||
|
||||
static void
|
||||
e1000_link_down(E1000State *s)
|
||||
{
|
||||
s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
|
||||
s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
|
||||
}
|
||||
|
||||
static void
|
||||
e1000_link_up(E1000State *s)
|
||||
{
|
||||
s->mac_reg[STATUS] |= E1000_STATUS_LU;
|
||||
s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
|
||||
}
|
||||
|
||||
static void
|
||||
set_phy_ctrl(E1000State *s, int index, uint16_t val)
|
||||
{
|
||||
if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
|
||||
s->nic->nc.link_down = true;
|
||||
e1000_link_down(s);
|
||||
s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
|
||||
DBGOUT(PHY, "Start link auto negotiation\n");
|
||||
qemu_mod_timer(s->autoneg_timer, qemu_get_clock_ms(vm_clock) + 500);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
e1000_autoneg_timer(void *opaque)
|
||||
{
|
||||
E1000State *s = opaque;
|
||||
s->nic->nc.link_down = false;
|
||||
e1000_link_up(s);
|
||||
s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
|
||||
DBGOUT(PHY, "Auto negotiation is completed\n");
|
||||
}
|
||||
|
||||
static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
|
||||
[PHY_CTRL] = set_phy_ctrl,
|
||||
};
|
||||
|
||||
enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
|
||||
|
||||
enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
|
||||
static const char phy_regcap[0x20] = {
|
||||
[PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
|
||||
@ -152,11 +196,37 @@ static const char phy_regcap[0x20] = {
|
||||
[PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
|
||||
};
|
||||
|
||||
static const uint16_t phy_reg_init[] = {
|
||||
[PHY_CTRL] = 0x1140,
|
||||
[PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
|
||||
[PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
|
||||
[PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
|
||||
[M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
|
||||
[PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
|
||||
[M88E1000_PHY_SPEC_STATUS] = 0xac00,
|
||||
};
|
||||
|
||||
static const uint32_t mac_reg_init[] = {
|
||||
[PBA] = 0x00100030,
|
||||
[LEDCTL] = 0x602,
|
||||
[CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
|
||||
E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
|
||||
[STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
|
||||
E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
|
||||
E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
|
||||
E1000_STATUS_LU,
|
||||
[MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
|
||||
E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
|
||||
E1000_MANC_RMCP_EN,
|
||||
};
|
||||
|
||||
static void
|
||||
set_interrupt_cause(E1000State *s, int index, uint32_t val)
|
||||
{
|
||||
if (val)
|
||||
if (val && (E1000_DEVID >= E1000_DEV_ID_82547EI_MOBILE)) {
|
||||
/* Only for 8257x */
|
||||
val |= E1000_ICR_INT_ASSERTED;
|
||||
}
|
||||
s->mac_reg[ICR] = val;
|
||||
s->mac_reg[ICS] = val;
|
||||
qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
|
||||
@ -193,6 +263,23 @@ rxbufsize(uint32_t v)
|
||||
return 2048;
|
||||
}
|
||||
|
||||
static void e1000_reset(void *opaque)
|
||||
{
|
||||
E1000State *d = opaque;
|
||||
|
||||
qemu_del_timer(d->autoneg_timer);
|
||||
memset(d->phy_reg, 0, sizeof d->phy_reg);
|
||||
memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
|
||||
memset(d->mac_reg, 0, sizeof d->mac_reg);
|
||||
memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
|
||||
d->rxbuf_min_shift = 1;
|
||||
memset(&d->tx, 0, sizeof d->tx);
|
||||
|
||||
if (d->nic->nc.link_down) {
|
||||
e1000_link_down(d);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
set_ctrl(E1000State *s, int index, uint32_t val)
|
||||
{
|
||||
@ -230,11 +317,18 @@ set_mdic(E1000State *s, int index, uint32_t val)
|
||||
if (!(phy_regcap[addr] & PHY_W)) {
|
||||
DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
|
||||
val |= E1000_MDIC_ERROR;
|
||||
} else
|
||||
} else {
|
||||
if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
|
||||
phyreg_writeops[addr](s, index, data);
|
||||
}
|
||||
s->phy_reg[addr] = data;
|
||||
}
|
||||
}
|
||||
s->mac_reg[MDIC] = val | E1000_MDIC_READY;
|
||||
set_ics(s, 0, E1000_ICR_MDAC);
|
||||
|
||||
if (val & E1000_MDIC_INT_EN) {
|
||||
set_ics(s, 0, E1000_ICR_MDAC);
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t
|
||||
@ -348,6 +442,16 @@ fcs_len(E1000State *s)
|
||||
return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
|
||||
}
|
||||
|
||||
static void
|
||||
e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
|
||||
{
|
||||
if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
|
||||
s->nic->nc.info->receive(&s->nic->nc, buf, size);
|
||||
} else {
|
||||
qemu_send_packet(&s->nic->nc, buf, size);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
xmit_seg(E1000State *s)
|
||||
{
|
||||
@ -397,9 +501,9 @@ xmit_seg(E1000State *s)
|
||||
memmove(tp->vlan, tp->data, 4);
|
||||
memmove(tp->data, tp->data + 4, 8);
|
||||
memcpy(tp->data + 8, tp->vlan_header, 4);
|
||||
qemu_send_packet(&s->nic->nc, tp->vlan, tp->size + 4);
|
||||
e1000_send_packet(s, tp->vlan, tp->size + 4);
|
||||
} else
|
||||
qemu_send_packet(&s->nic->nc, tp->data, tp->size);
|
||||
e1000_send_packet(s, tp->data, tp->size);
|
||||
s->mac_reg[TPT]++;
|
||||
s->mac_reg[GPTC]++;
|
||||
n = s->mac_reg[TOTL];
|
||||
@ -622,11 +726,9 @@ e1000_set_link_status(VLANClientState *nc)
|
||||
uint32_t old_status = s->mac_reg[STATUS];
|
||||
|
||||
if (nc->link_down) {
|
||||
s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
|
||||
s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
|
||||
e1000_link_down(s);
|
||||
} else {
|
||||
s->mac_reg[STATUS] |= E1000_STATUS_LU;
|
||||
s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
|
||||
e1000_link_up(s);
|
||||
}
|
||||
|
||||
if (s->mac_reg[STATUS] != old_status)
|
||||
@ -901,6 +1003,7 @@ static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
|
||||
[MTA ... MTA+127] = &mac_writereg,
|
||||
[VFTA ... VFTA+127] = &mac_writereg,
|
||||
};
|
||||
|
||||
enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
|
||||
|
||||
static void
|
||||
@ -1061,29 +1164,6 @@ static const uint16_t e1000_eeprom_template[64] = {
|
||||
0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
|
||||
};
|
||||
|
||||
static const uint16_t phy_reg_init[] = {
|
||||
[PHY_CTRL] = 0x1140, [PHY_STATUS] = 0x796d, // link initially up
|
||||
[PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
|
||||
[PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
|
||||
[M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
|
||||
[PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
|
||||
[M88E1000_PHY_SPEC_STATUS] = 0xac00,
|
||||
};
|
||||
|
||||
static const uint32_t mac_reg_init[] = {
|
||||
[PBA] = 0x00100030,
|
||||
[LEDCTL] = 0x602,
|
||||
[CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
|
||||
E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
|
||||
[STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
|
||||
E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
|
||||
E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
|
||||
E1000_STATUS_LU,
|
||||
[MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
|
||||
E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
|
||||
E1000_MANC_RMCP_EN,
|
||||
};
|
||||
|
||||
/* PCI interface */
|
||||
|
||||
static void
|
||||
@ -1117,29 +1197,14 @@ pci_e1000_uninit(PCIDevice *dev)
|
||||
{
|
||||
E1000State *d = DO_UPCAST(E1000State, dev, dev);
|
||||
|
||||
qemu_del_timer(d->autoneg_timer);
|
||||
qemu_free_timer(d->autoneg_timer);
|
||||
memory_region_destroy(&d->mmio);
|
||||
memory_region_destroy(&d->io);
|
||||
qemu_del_vlan_client(&d->nic->nc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void e1000_reset(void *opaque)
|
||||
{
|
||||
E1000State *d = opaque;
|
||||
|
||||
memset(d->phy_reg, 0, sizeof d->phy_reg);
|
||||
memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
|
||||
memset(d->mac_reg, 0, sizeof d->mac_reg);
|
||||
memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
|
||||
d->rxbuf_min_shift = 1;
|
||||
memset(&d->tx, 0, sizeof d->tx);
|
||||
|
||||
if (d->nic->nc.link_down) {
|
||||
d->mac_reg[STATUS] &= ~E1000_STATUS_LU;
|
||||
d->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
|
||||
}
|
||||
}
|
||||
|
||||
static NetClientInfo net_e1000_info = {
|
||||
.type = NET_CLIENT_TYPE_NIC,
|
||||
.size = sizeof(NICState),
|
||||
@ -1188,6 +1253,8 @@ static int pci_e1000_init(PCIDevice *pci_dev)
|
||||
|
||||
add_boot_device_path(d->conf.bootindex, &pci_dev->qdev, "/ethernet-phy@0");
|
||||
|
||||
d->autoneg_timer = qemu_new_timer_ms(vm_clock, e1000_autoneg_timer, d);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -349,6 +349,18 @@
|
||||
#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */
|
||||
#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */
|
||||
|
||||
/* PHY Control Register */
|
||||
#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
|
||||
#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
|
||||
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
|
||||
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
|
||||
#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
|
||||
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
|
||||
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
|
||||
#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
|
||||
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
|
||||
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
|
||||
|
||||
/* PHY Status Register */
|
||||
#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
|
||||
#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
|
||||
|
@ -322,8 +322,32 @@ static const uint16_t eepro100_mdi_mask[] = {
|
||||
0xffff, 0xffff, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
};
|
||||
|
||||
#define POLYNOMIAL 0x04c11db6
|
||||
|
||||
static E100PCIDeviceInfo *eepro100_get_class(EEPRO100State *s);
|
||||
|
||||
/* From FreeBSD (locally modified). */
|
||||
static unsigned e100_compute_mcast_idx(const uint8_t *ep)
|
||||
{
|
||||
uint32_t crc;
|
||||
int carry, i, j;
|
||||
uint8_t b;
|
||||
|
||||
crc = 0xffffffff;
|
||||
for (i = 0; i < 6; i++) {
|
||||
b = *ep++;
|
||||
for (j = 0; j < 8; j++) {
|
||||
carry = ((crc & 0x80000000L) ? 1 : 0) ^ (b & 0x01);
|
||||
crc <<= 1;
|
||||
b >>= 1;
|
||||
if (carry) {
|
||||
crc = ((crc ^ POLYNOMIAL) | carry);
|
||||
}
|
||||
}
|
||||
}
|
||||
return (crc & BITS(7, 2)) >> 2;
|
||||
}
|
||||
|
||||
/* Read a 16 bit control/status (CSR) register. */
|
||||
static uint16_t e100_read_reg2(EEPRO100State *s, E100RegisterOffset addr)
|
||||
{
|
||||
@ -823,7 +847,7 @@ static void set_multicast_list(EEPRO100State *s)
|
||||
uint8_t multicast_addr[6];
|
||||
pci_dma_read(&s->dev, s->cb_address + 10 + i, multicast_addr, 6);
|
||||
TRACE(OTHER, logout("multicast entry %s\n", nic_dump(multicast_addr, 6)));
|
||||
unsigned mcast_idx = compute_mcast_idx(multicast_addr);
|
||||
unsigned mcast_idx = e100_compute_mcast_idx(multicast_addr);
|
||||
assert(mcast_idx < 64);
|
||||
s->mult[mcast_idx >> 3] |= (1 << (mcast_idx & 7));
|
||||
}
|
||||
@ -1650,7 +1674,7 @@ static ssize_t nic_receive(VLANClientState *nc, const uint8_t * buf, size_t size
|
||||
if (s->configuration[21] & BIT(3)) {
|
||||
/* Multicast all bit is set, receive all multicast frames. */
|
||||
} else {
|
||||
unsigned mcast_idx = compute_mcast_idx(buf);
|
||||
unsigned mcast_idx = e100_compute_mcast_idx(buf);
|
||||
assert(mcast_idx < 64);
|
||||
if (s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))) {
|
||||
/* Multicast frame is allowed in hash table. */
|
||||
|
11
hw/virtio.c
11
hw/virtio.c
@ -209,6 +209,10 @@ void virtio_queue_set_notification(VirtQueue *vq, int enable)
|
||||
} else {
|
||||
vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
|
||||
}
|
||||
if (enable) {
|
||||
/* Expose avail event/used flags before caller checks the avail idx. */
|
||||
smp_mb();
|
||||
}
|
||||
}
|
||||
|
||||
int virtio_queue_ready(VirtQueue *vq)
|
||||
@ -283,6 +287,11 @@ static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
|
||||
idx, vring_avail_idx(vq));
|
||||
exit(1);
|
||||
}
|
||||
/* On success, callers read a descriptor at vq->last_avail_idx.
|
||||
* Make sure descriptor read does not bypass avail index read. */
|
||||
if (num_heads) {
|
||||
smp_rmb();
|
||||
}
|
||||
|
||||
return num_heads;
|
||||
}
|
||||
@ -700,6 +709,8 @@ static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
|
||||
{
|
||||
uint16_t old, new;
|
||||
bool v;
|
||||
/* We need to expose used array entries before checking used event. */
|
||||
smp_mb();
|
||||
/* Always notify when queue is empty (when feature acknowledge) */
|
||||
if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
|
||||
!vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) {
|
||||
|
@ -4,34 +4,61 @@
|
||||
/* Compiler barrier */
|
||||
#define barrier() asm volatile("" ::: "memory")
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
#if defined(__i386__)
|
||||
|
||||
/*
|
||||
* Because of the strongly ordered x86 storage model, wmb() is a nop
|
||||
* Because of the strongly ordered x86 storage model, wmb() and rmb() are nops
|
||||
* on x86(well, a compiler barrier only). Well, at least as long as
|
||||
* qemu doesn't do accesses to write-combining memory or non-temporal
|
||||
* load/stores from C code.
|
||||
*/
|
||||
#define smp_wmb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
/*
|
||||
* We use GCC builtin if it's available, as that can use
|
||||
* mfence on 32 bit as well, e.g. if built with -march=pentium-m.
|
||||
* However, on i386, there seem to be known bugs as recently as 4.3.
|
||||
* */
|
||||
#if defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
|
||||
#define smp_mb() __sync_synchronize()
|
||||
#else
|
||||
#define smp_mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
|
||||
#endif
|
||||
|
||||
#elif defined(__x86_64__)
|
||||
|
||||
#define smp_wmb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_mb() asm volatile("mfence" ::: "memory")
|
||||
|
||||
#elif defined(_ARCH_PPC)
|
||||
|
||||
/*
|
||||
* We use an eieio() for a wmb() on powerpc. This assumes we don't
|
||||
* We use an eieio() for wmb() on powerpc. This assumes we don't
|
||||
* need to order cacheable and non-cacheable stores with respect to
|
||||
* each other
|
||||
*/
|
||||
#define smp_wmb() asm volatile("eieio" ::: "memory")
|
||||
|
||||
#if defined(__powerpc64__)
|
||||
#define smp_rmb() asm volatile("lwsync" ::: "memory")
|
||||
#else
|
||||
#define smp_rmb() asm volatile("sync" ::: "memory")
|
||||
#endif
|
||||
|
||||
#define smp_mb() asm volatile("sync" ::: "memory")
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* For (host) platforms we don't have explicit barrier definitions
|
||||
* for, we use the gcc __sync_synchronize() primitive to generate a
|
||||
* full barrier. This should be safe on all platforms, though it may
|
||||
* be overkill.
|
||||
* be overkill for wmb() and rmb().
|
||||
*/
|
||||
#define smp_wmb() __sync_synchronize()
|
||||
#define smp_mb() __sync_synchronize()
|
||||
#define smp_rmb() __sync_synchronize()
|
||||
|
||||
#endif
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user