mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
vmbus: refactor hv_signal_on_read
The function hv_signal_on_read was defined in hyperv.h and only used in one place in ring_buffer code. Clearer to just move it inline there. Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com> Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
95c40f41cf
commit
8dd45f2ab0
@ -29,6 +29,7 @@
|
||||
#include <linux/uio.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/prefetch.h>
|
||||
|
||||
#include "hyperv_vmbus.h"
|
||||
|
||||
@ -357,7 +358,7 @@ struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
|
||||
{
|
||||
struct hv_ring_buffer_info *rbi = &channel->inbound;
|
||||
|
||||
/* set state for later hv_signal_on_read() */
|
||||
/* set state for later hv_pkt_iter_close */
|
||||
rbi->cached_read_index = rbi->ring_buffer->read_index;
|
||||
|
||||
if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
|
||||
@ -400,6 +401,8 @@ EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
|
||||
void hv_pkt_iter_close(struct vmbus_channel *channel)
|
||||
{
|
||||
struct hv_ring_buffer_info *rbi = &channel->inbound;
|
||||
u32 cur_write_sz, cached_write_sz;
|
||||
u32 pending_sz;
|
||||
|
||||
/*
|
||||
* Make sure all reads are done before we update the read index since
|
||||
@ -409,6 +412,31 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
|
||||
virt_rmb();
|
||||
rbi->ring_buffer->read_index = rbi->priv_read_index;
|
||||
|
||||
hv_signal_on_read(channel);
|
||||
/*
|
||||
* Issue a full memory barrier before making the signaling decision.
|
||||
* Here is the reason for having this barrier:
|
||||
* If the reading of the pend_sz (in this function)
|
||||
* were to be reordered and read before we commit the new read
|
||||
* index (in the calling function) we could
|
||||
* have a problem. If the host were to set the pending_sz after we
|
||||
* have sampled pending_sz and go to sleep before we commit the
|
||||
* read index, we could miss sending the interrupt. Issue a full
|
||||
* memory barrier to address this.
|
||||
*/
|
||||
virt_mb();
|
||||
|
||||
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
|
||||
/* If the other end is not blocked on write don't bother. */
|
||||
if (pending_sz == 0)
|
||||
return;
|
||||
|
||||
cur_write_sz = hv_get_bytes_to_write(rbi);
|
||||
|
||||
if (cur_write_sz < pending_sz)
|
||||
return;
|
||||
|
||||
cached_write_sz = hv_get_cached_bytes_to_write(rbi);
|
||||
if (cached_write_sz < pending_sz)
|
||||
vmbus_setevent(channel);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
|
||||
|
@ -1471,55 +1471,6 @@ hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
|
||||
return ring_info->ring_buffer->buffer;
|
||||
}
|
||||
|
||||
/*
|
||||
* To optimize the flow management on the send-side,
|
||||
* when the sender is blocked because of lack of
|
||||
* sufficient space in the ring buffer, potential the
|
||||
* consumer of the ring buffer can signal the producer.
|
||||
* This is controlled by the following parameters:
|
||||
*
|
||||
* 1. pending_send_sz: This is the size in bytes that the
|
||||
* producer is trying to send.
|
||||
* 2. The feature bit feat_pending_send_sz set to indicate if
|
||||
* the consumer of the ring will signal when the ring
|
||||
* state transitions from being full to a state where
|
||||
* there is room for the producer to send the pending packet.
|
||||
*/
|
||||
|
||||
static inline void hv_signal_on_read(struct vmbus_channel *channel)
|
||||
{
|
||||
u32 cur_write_sz, cached_write_sz;
|
||||
u32 pending_sz;
|
||||
struct hv_ring_buffer_info *rbi = &channel->inbound;
|
||||
|
||||
/*
|
||||
* Issue a full memory barrier before making the signaling decision.
|
||||
* Here is the reason for having this barrier:
|
||||
* If the reading of the pend_sz (in this function)
|
||||
* were to be reordered and read before we commit the new read
|
||||
* index (in the calling function) we could
|
||||
* have a problem. If the host were to set the pending_sz after we
|
||||
* have sampled pending_sz and go to sleep before we commit the
|
||||
* read index, we could miss sending the interrupt. Issue a full
|
||||
* memory barrier to address this.
|
||||
*/
|
||||
virt_mb();
|
||||
|
||||
pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
|
||||
/* If the other end is not blocked on write don't bother. */
|
||||
if (pending_sz == 0)
|
||||
return;
|
||||
|
||||
cur_write_sz = hv_get_bytes_to_write(rbi);
|
||||
|
||||
if (cur_write_sz < pending_sz)
|
||||
return;
|
||||
|
||||
cached_write_sz = hv_get_cached_bytes_to_write(rbi);
|
||||
if (cached_write_sz < pending_sz)
|
||||
vmbus_setevent(channel);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask off host interrupt callback notifications
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user