mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
virtio: tests, fixes and cleanups
Just minor tweaks, there's nothing major in this cycle. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJYFnb+AAoJECgfDbjSjVRpBD4IAKjmFLG6rcswWwYVSNwxMcEX 6gxStyojtx9jTdQhjYUeC4KJmPozGn9mxEyHuWHNAqWWP/nASbViXYDwVm9xv/14 TxuApiOJ9tFB0DYlduGDQbS5D6m3WSk7U2s71pwC0g2YPx1EQc8jHbeT09Jn5qVw 6A099BILekts/lN6s9ST+6CAHBLTkZGB6iyAb+zAh+KN+VRX3ikpOjebgUjzO1+P hLJc+lgfls+cwuhwO5A+Pg1PMrJ455A/gdEVUfMbbj8BAkihueUHPUzkc702Y5h2 j3AfCLJdCBIR/k3+R9Du0IcK0mXioZtiBPfREGQ+kI0ukauFDhtKgjaOd0bmU4s= =ASeM -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio updates from Michael Tsirkin: "Tests, fixes and cleanups. Just minor tweaks, there's nothing major in this cycle" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: virtio_ring: mark vring_dma_dev inline virtio/vhost: add Jason to list of maintainers virtio_blk: Delete an unnecessary initialisation in init_vq() virtio_blk: Use kmalloc_array() in init_vq() virtio: remove config.c virtio: console: Unlock vqs while freeing buffers ringtest: poll for new buffers once before updating event index ringtest: commonize implementation of poll_avail/poll_used ringtest: use link-time optimization virtio: update balloon size in balloon "probe" virtio_ring: Make interrupt suppression spec compliant virtio_pci: Limit DMA mask to 44 bits for legacy virtio devices
This commit is contained in:
commit
04659febcb
@ -12783,6 +12783,7 @@ F: include/uapi/linux/virtio_console.h
|
|||||||
|
|
||||||
VIRTIO CORE, NET AND BLOCK DRIVERS
|
VIRTIO CORE, NET AND BLOCK DRIVERS
|
||||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||||
|
M: Jason Wang <jasowang@redhat.com>
|
||||||
L: virtualization@lists.linux-foundation.org
|
L: virtualization@lists.linux-foundation.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/virtio/
|
F: Documentation/devicetree/bindings/virtio/
|
||||||
@ -12813,6 +12814,7 @@ F: include/uapi/linux/virtio_gpu.h
|
|||||||
|
|
||||||
VIRTIO HOST (VHOST)
|
VIRTIO HOST (VHOST)
|
||||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||||
|
M: Jason Wang <jasowang@redhat.com>
|
||||||
L: kvm@vger.kernel.org
|
L: kvm@vger.kernel.org
|
||||||
L: virtualization@lists.linux-foundation.org
|
L: virtualization@lists.linux-foundation.org
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
|
@ -376,7 +376,7 @@ static void virtblk_config_changed(struct virtio_device *vdev)
|
|||||||
|
|
||||||
static int init_vq(struct virtio_blk *vblk)
|
static int init_vq(struct virtio_blk *vblk)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err;
|
||||||
int i;
|
int i;
|
||||||
vq_callback_t **callbacks;
|
vq_callback_t **callbacks;
|
||||||
const char **names;
|
const char **names;
|
||||||
@ -390,13 +390,13 @@ static int init_vq(struct virtio_blk *vblk)
|
|||||||
if (err)
|
if (err)
|
||||||
num_vqs = 1;
|
num_vqs = 1;
|
||||||
|
|
||||||
vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
|
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
|
||||||
if (!vblk->vqs)
|
if (!vblk->vqs)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
|
names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
|
||||||
callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
|
callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
|
||||||
vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
|
vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
|
||||||
if (!names || !callbacks || !vqs) {
|
if (!names || !callbacks || !vqs) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1539,19 +1539,29 @@ static void remove_port_data(struct port *port)
|
|||||||
spin_lock_irq(&port->inbuf_lock);
|
spin_lock_irq(&port->inbuf_lock);
|
||||||
/* Remove unused data this port might have received. */
|
/* Remove unused data this port might have received. */
|
||||||
discard_port_data(port);
|
discard_port_data(port);
|
||||||
|
spin_unlock_irq(&port->inbuf_lock);
|
||||||
|
|
||||||
/* Remove buffers we queued up for the Host to send us data in. */
|
/* Remove buffers we queued up for the Host to send us data in. */
|
||||||
while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
|
do {
|
||||||
free_buf(buf, true);
|
spin_lock_irq(&port->inbuf_lock);
|
||||||
spin_unlock_irq(&port->inbuf_lock);
|
buf = virtqueue_detach_unused_buf(port->in_vq);
|
||||||
|
spin_unlock_irq(&port->inbuf_lock);
|
||||||
|
if (buf)
|
||||||
|
free_buf(buf, true);
|
||||||
|
} while (buf);
|
||||||
|
|
||||||
spin_lock_irq(&port->outvq_lock);
|
spin_lock_irq(&port->outvq_lock);
|
||||||
reclaim_consumed_buffers(port);
|
reclaim_consumed_buffers(port);
|
||||||
|
spin_unlock_irq(&port->outvq_lock);
|
||||||
|
|
||||||
/* Free pending buffers from the out-queue. */
|
/* Free pending buffers from the out-queue. */
|
||||||
while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
|
do {
|
||||||
free_buf(buf, true);
|
spin_lock_irq(&port->outvq_lock);
|
||||||
spin_unlock_irq(&port->outvq_lock);
|
buf = virtqueue_detach_unused_buf(port->out_vq);
|
||||||
|
spin_unlock_irq(&port->outvq_lock);
|
||||||
|
if (buf)
|
||||||
|
free_buf(buf, true);
|
||||||
|
} while (buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1,12 +0,0 @@
|
|||||||
/* Configuration space parsing helpers for virtio.
|
|
||||||
*
|
|
||||||
* The configuration is [type][len][... len bytes ...] fields.
|
|
||||||
*
|
|
||||||
* Copyright 2007 Rusty Russell, IBM Corporation.
|
|
||||||
* GPL v2 or later.
|
|
||||||
*/
|
|
||||||
#include <linux/err.h>
|
|
||||||
#include <linux/virtio.h>
|
|
||||||
#include <linux/virtio_config.h>
|
|
||||||
#include <linux/bug.h>
|
|
||||||
|
|
@ -577,6 +577,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
|
|||||||
|
|
||||||
virtio_device_ready(vdev);
|
virtio_device_ready(vdev);
|
||||||
|
|
||||||
|
if (towards_target(vb))
|
||||||
|
virtballoon_changed(vdev);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_del_vqs:
|
out_del_vqs:
|
||||||
|
@ -212,10 +212,18 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
|
rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
|
||||||
if (rc)
|
if (rc) {
|
||||||
rc = dma_set_mask_and_coherent(&pci_dev->dev,
|
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
|
||||||
DMA_BIT_MASK(32));
|
} else {
|
||||||
|
/*
|
||||||
|
* The virtio ring base address is expressed as a 32-bit PFN,
|
||||||
|
* with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
|
||||||
|
*/
|
||||||
|
dma_set_coherent_mask(&pci_dev->dev,
|
||||||
|
DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
|
||||||
|
}
|
||||||
|
|
||||||
if (rc)
|
if (rc)
|
||||||
dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
|
dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
|
|||||||
* making all of the arch DMA ops work on the vring device itself
|
* making all of the arch DMA ops work on the vring device itself
|
||||||
* is a mess. For now, we use the parent device for DMA ops.
|
* is a mess. For now, we use the parent device for DMA ops.
|
||||||
*/
|
*/
|
||||||
static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
|
static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
|
||||||
{
|
{
|
||||||
return vq->vq.vdev->dev.parent;
|
return vq->vq.vdev->dev.parent;
|
||||||
}
|
}
|
||||||
@ -732,7 +732,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
|
|||||||
|
|
||||||
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
|
if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
|
||||||
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
|
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
|
||||||
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
if (!vq->event)
|
||||||
|
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -764,7 +765,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
|
|||||||
* entry. Always do both to keep code simple. */
|
* entry. Always do both to keep code simple. */
|
||||||
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
|
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
|
||||||
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
||||||
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
if (!vq->event)
|
||||||
|
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
||||||
}
|
}
|
||||||
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
|
vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
|
||||||
END_USE(vq);
|
END_USE(vq);
|
||||||
@ -832,10 +834,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
|
|||||||
* more to do. */
|
* more to do. */
|
||||||
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
|
/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
|
||||||
* either clear the flags bit or point the event index at the next
|
* either clear the flags bit or point the event index at the next
|
||||||
* entry. Always do both to keep code simple. */
|
* entry. Always update the event index to keep code simple. */
|
||||||
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
|
if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
|
||||||
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
||||||
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
if (!vq->event)
|
||||||
|
vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
|
||||||
}
|
}
|
||||||
/* TODO: tune this threshold */
|
/* TODO: tune this threshold */
|
||||||
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
|
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
|
||||||
@ -953,7 +956,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
|||||||
/* No callback? Tell other side not to bother us. */
|
/* No callback? Tell other side not to bother us. */
|
||||||
if (!callback) {
|
if (!callback) {
|
||||||
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
|
vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
|
||||||
vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
|
if (!vq->event)
|
||||||
|
vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Put everything in free lists. */
|
/* Put everything in free lists. */
|
||||||
|
@ -3,8 +3,8 @@ all:
|
|||||||
all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
|
all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
|
||||||
|
|
||||||
CFLAGS += -Wall
|
CFLAGS += -Wall
|
||||||
CFLAGS += -pthread -O2 -ggdb
|
CFLAGS += -pthread -O2 -ggdb -flto -fwhole-program
|
||||||
LDFLAGS += -pthread -O2 -ggdb
|
LDFLAGS += -pthread -O2 -ggdb -flto -fwhole-program
|
||||||
|
|
||||||
main.o: main.c main.h
|
main.o: main.c main.h
|
||||||
ring.o: ring.c main.h
|
ring.o: ring.c main.h
|
||||||
|
@ -96,7 +96,13 @@ void set_affinity(const char *arg)
|
|||||||
assert(!ret);
|
assert(!ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void run_guest(void)
|
void poll_used(void)
|
||||||
|
{
|
||||||
|
while (used_empty())
|
||||||
|
busy_wait();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __attribute__((__flatten__)) run_guest(void)
|
||||||
{
|
{
|
||||||
int completed_before;
|
int completed_before;
|
||||||
int completed = 0;
|
int completed = 0;
|
||||||
@ -141,7 +147,7 @@ static void run_guest(void)
|
|||||||
assert(completed <= bufs);
|
assert(completed <= bufs);
|
||||||
assert(started <= bufs);
|
assert(started <= bufs);
|
||||||
if (do_sleep) {
|
if (do_sleep) {
|
||||||
if (enable_call())
|
if (used_empty() && enable_call())
|
||||||
wait_for_call();
|
wait_for_call();
|
||||||
} else {
|
} else {
|
||||||
poll_used();
|
poll_used();
|
||||||
@ -149,7 +155,13 @@ static void run_guest(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void run_host(void)
|
void poll_avail(void)
|
||||||
|
{
|
||||||
|
while (avail_empty())
|
||||||
|
busy_wait();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __attribute__((__flatten__)) run_host(void)
|
||||||
{
|
{
|
||||||
int completed_before;
|
int completed_before;
|
||||||
int completed = 0;
|
int completed = 0;
|
||||||
@ -160,7 +172,7 @@ static void run_host(void)
|
|||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (do_sleep) {
|
if (do_sleep) {
|
||||||
if (enable_kick())
|
if (avail_empty() && enable_kick())
|
||||||
wait_for_kick();
|
wait_for_kick();
|
||||||
} else {
|
} else {
|
||||||
poll_avail();
|
poll_avail();
|
||||||
|
@ -56,15 +56,15 @@ void alloc_ring(void);
|
|||||||
int add_inbuf(unsigned, void *, void *);
|
int add_inbuf(unsigned, void *, void *);
|
||||||
void *get_buf(unsigned *, void **);
|
void *get_buf(unsigned *, void **);
|
||||||
void disable_call();
|
void disable_call();
|
||||||
|
bool used_empty();
|
||||||
bool enable_call();
|
bool enable_call();
|
||||||
void kick_available();
|
void kick_available();
|
||||||
void poll_used();
|
|
||||||
/* host side */
|
/* host side */
|
||||||
void disable_kick();
|
void disable_kick();
|
||||||
|
bool avail_empty();
|
||||||
bool enable_kick();
|
bool enable_kick();
|
||||||
bool use_buf(unsigned *, void **);
|
bool use_buf(unsigned *, void **);
|
||||||
void call_used();
|
void call_used();
|
||||||
void poll_avail();
|
|
||||||
|
|
||||||
/* implemented by main */
|
/* implemented by main */
|
||||||
extern bool do_sleep;
|
extern bool do_sleep;
|
||||||
|
@ -24,8 +24,9 @@ void *get_buf(unsigned *lenp, void **bufp)
|
|||||||
return "Buffer";
|
return "Buffer";
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_used(void)
|
bool used_empty()
|
||||||
{
|
{
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void disable_call()
|
void disable_call()
|
||||||
@ -54,8 +55,9 @@ bool enable_kick()
|
|||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_avail(void)
|
bool avail_empty()
|
||||||
{
|
{
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool use_buf(unsigned *lenp, void **bufp)
|
bool use_buf(unsigned *lenp, void **bufp)
|
||||||
|
@ -133,18 +133,9 @@ void *get_buf(unsigned *lenp, void **bufp)
|
|||||||
return datap;
|
return datap;
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_used(void)
|
bool used_empty()
|
||||||
{
|
{
|
||||||
void *b;
|
return (tailcnt == headcnt || __ptr_ring_full(&array));
|
||||||
|
|
||||||
do {
|
|
||||||
if (tailcnt == headcnt || __ptr_ring_full(&array)) {
|
|
||||||
b = NULL;
|
|
||||||
barrier();
|
|
||||||
} else {
|
|
||||||
b = "Buffer\n";
|
|
||||||
}
|
|
||||||
} while (!b);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void disable_call()
|
void disable_call()
|
||||||
@ -173,14 +164,9 @@ bool enable_kick()
|
|||||||
assert(0);
|
assert(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_avail(void)
|
bool avail_empty()
|
||||||
{
|
{
|
||||||
void *b;
|
return !__ptr_ring_peek(&array);
|
||||||
|
|
||||||
do {
|
|
||||||
barrier();
|
|
||||||
b = __ptr_ring_peek(&array);
|
|
||||||
} while (!b);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool use_buf(unsigned *lenp, void **bufp)
|
bool use_buf(unsigned *lenp, void **bufp)
|
||||||
|
@ -163,12 +163,11 @@ void *get_buf(unsigned *lenp, void **bufp)
|
|||||||
return datap;
|
return datap;
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_used(void)
|
bool used_empty()
|
||||||
{
|
{
|
||||||
unsigned head = (ring_size - 1) & guest.last_used_idx;
|
unsigned head = (ring_size - 1) & guest.last_used_idx;
|
||||||
|
|
||||||
while (ring[head].flags & DESC_HW)
|
return (ring[head].flags & DESC_HW);
|
||||||
busy_wait();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void disable_call()
|
void disable_call()
|
||||||
@ -180,13 +179,11 @@ void disable_call()
|
|||||||
|
|
||||||
bool enable_call()
|
bool enable_call()
|
||||||
{
|
{
|
||||||
unsigned head = (ring_size - 1) & guest.last_used_idx;
|
|
||||||
|
|
||||||
event->call_index = guest.last_used_idx;
|
event->call_index = guest.last_used_idx;
|
||||||
/* Flush call index write */
|
/* Flush call index write */
|
||||||
/* Barrier D (for pairing) */
|
/* Barrier D (for pairing) */
|
||||||
smp_mb();
|
smp_mb();
|
||||||
return ring[head].flags & DESC_HW;
|
return used_empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
void kick_available(void)
|
void kick_available(void)
|
||||||
@ -213,20 +210,17 @@ void disable_kick()
|
|||||||
|
|
||||||
bool enable_kick()
|
bool enable_kick()
|
||||||
{
|
{
|
||||||
unsigned head = (ring_size - 1) & host.used_idx;
|
|
||||||
|
|
||||||
event->kick_index = host.used_idx;
|
event->kick_index = host.used_idx;
|
||||||
/* Barrier C (for pairing) */
|
/* Barrier C (for pairing) */
|
||||||
smp_mb();
|
smp_mb();
|
||||||
return !(ring[head].flags & DESC_HW);
|
return avail_empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_avail(void)
|
bool avail_empty()
|
||||||
{
|
{
|
||||||
unsigned head = (ring_size - 1) & host.used_idx;
|
unsigned head = (ring_size - 1) & host.used_idx;
|
||||||
|
|
||||||
while (!(ring[head].flags & DESC_HW))
|
return !(ring[head].flags & DESC_HW);
|
||||||
busy_wait();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool use_buf(unsigned *lenp, void **bufp)
|
bool use_buf(unsigned *lenp, void **bufp)
|
||||||
|
@ -194,24 +194,16 @@ void *get_buf(unsigned *lenp, void **bufp)
|
|||||||
return datap;
|
return datap;
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_used(void)
|
bool used_empty()
|
||||||
{
|
{
|
||||||
|
unsigned short last_used_idx = guest.last_used_idx;
|
||||||
#ifdef RING_POLL
|
#ifdef RING_POLL
|
||||||
unsigned head = (ring_size - 1) & guest.last_used_idx;
|
unsigned short head = last_used_idx & (ring_size - 1);
|
||||||
|
unsigned index = ring.used->ring[head].id;
|
||||||
|
|
||||||
for (;;) {
|
return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
|
||||||
unsigned index = ring.used->ring[head].id;
|
|
||||||
|
|
||||||
if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
|
|
||||||
busy_wait();
|
|
||||||
else
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
unsigned head = guest.last_used_idx;
|
return ring.used->idx == last_used_idx;
|
||||||
|
|
||||||
while (ring.used->idx == head)
|
|
||||||
busy_wait();
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,22 +216,11 @@ void disable_call()
|
|||||||
|
|
||||||
bool enable_call()
|
bool enable_call()
|
||||||
{
|
{
|
||||||
unsigned short last_used_idx;
|
vring_used_event(&ring) = guest.last_used_idx;
|
||||||
|
|
||||||
vring_used_event(&ring) = (last_used_idx = guest.last_used_idx);
|
|
||||||
/* Flush call index write */
|
/* Flush call index write */
|
||||||
/* Barrier D (for pairing) */
|
/* Barrier D (for pairing) */
|
||||||
smp_mb();
|
smp_mb();
|
||||||
#ifdef RING_POLL
|
return used_empty();
|
||||||
{
|
|
||||||
unsigned short head = last_used_idx & (ring_size - 1);
|
|
||||||
unsigned index = ring.used->ring[head].id;
|
|
||||||
|
|
||||||
return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
return ring.used->idx == last_used_idx;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void kick_available(void)
|
void kick_available(void)
|
||||||
@ -266,36 +247,21 @@ void disable_kick()
|
|||||||
|
|
||||||
bool enable_kick()
|
bool enable_kick()
|
||||||
{
|
{
|
||||||
unsigned head = host.used_idx;
|
vring_avail_event(&ring) = host.used_idx;
|
||||||
|
|
||||||
vring_avail_event(&ring) = head;
|
|
||||||
/* Barrier C (for pairing) */
|
/* Barrier C (for pairing) */
|
||||||
smp_mb();
|
smp_mb();
|
||||||
#ifdef RING_POLL
|
return avail_empty();
|
||||||
{
|
|
||||||
unsigned index = ring.avail->ring[head & (ring_size - 1)];
|
|
||||||
|
|
||||||
return (index ^ head ^ 0x8000) & ~(ring_size - 1);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
return head == ring.avail->idx;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void poll_avail(void)
|
bool avail_empty()
|
||||||
{
|
{
|
||||||
unsigned head = host.used_idx;
|
unsigned head = host.used_idx;
|
||||||
#ifdef RING_POLL
|
#ifdef RING_POLL
|
||||||
for (;;) {
|
unsigned index = ring.avail->ring[head & (ring_size - 1)];
|
||||||
unsigned index = ring.avail->ring[head & (ring_size - 1)];
|
|
||||||
if ((index ^ head ^ 0x8000) & ~(ring_size - 1))
|
return ((index ^ head ^ 0x8000) & ~(ring_size - 1));
|
||||||
busy_wait();
|
|
||||||
else
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
while (ring.avail->idx == head)
|
return head == ring.avail->idx;
|
||||||
busy_wait();
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user