mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-27 16:15:54 +08:00
vmci: simplify qp_dequeue_locked()
* no need for callback argument - it's always the same one * fold __qp_memcpy_from_queue() into its only caller, get rid of dead code * pass struct iov_iter * without casting to void * * don't pass buf_size at all - it's always iov_iter_count(to) Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
parent
ce3d6e7d42
commit
53f58d8ed8
@ -142,9 +142,6 @@
|
|||||||
typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
|
typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
|
||||||
u64 queue_offset, const void *src,
|
u64 queue_offset, const void *src,
|
||||||
size_t src_offset, size_t size);
|
size_t src_offset, size_t size);
|
||||||
typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
|
|
||||||
const struct vmci_queue *queue,
|
|
||||||
u64 queue_offset, size_t size);
|
|
||||||
|
|
||||||
/* The Kernel specific component of the struct vmci_queue structure. */
|
/* The Kernel specific component of the struct vmci_queue structure. */
|
||||||
struct vmci_queue_kern_if {
|
struct vmci_queue_kern_if {
|
||||||
@ -411,11 +408,9 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
|
|||||||
* by traversing the offset -> page translation structure for the queue.
|
* by traversing the offset -> page translation structure for the queue.
|
||||||
* Assumes that offset + size does not wrap around in the queue.
|
* Assumes that offset + size does not wrap around in the queue.
|
||||||
*/
|
*/
|
||||||
static int __qp_memcpy_from_queue(void *dest,
|
static int qp_memcpy_from_queue_iter(struct iov_iter *to,
|
||||||
const struct vmci_queue *queue,
|
const struct vmci_queue *queue,
|
||||||
u64 queue_offset,
|
u64 queue_offset, size_t size)
|
||||||
size_t size,
|
|
||||||
bool is_iovec)
|
|
||||||
{
|
{
|
||||||
struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
|
struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
|
||||||
size_t bytes_copied = 0;
|
size_t bytes_copied = 0;
|
||||||
@ -427,6 +422,7 @@ static int __qp_memcpy_from_queue(void *dest,
|
|||||||
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
|
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
|
||||||
void *va;
|
void *va;
|
||||||
size_t to_copy;
|
size_t to_copy;
|
||||||
|
int err;
|
||||||
|
|
||||||
if (kernel_if->host)
|
if (kernel_if->host)
|
||||||
va = kmap(kernel_if->u.h.page[page_index]);
|
va = kmap(kernel_if->u.h.page[page_index]);
|
||||||
@ -440,21 +436,12 @@ static int __qp_memcpy_from_queue(void *dest,
|
|||||||
else
|
else
|
||||||
to_copy = size - bytes_copied;
|
to_copy = size - bytes_copied;
|
||||||
|
|
||||||
if (is_iovec) {
|
err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
|
||||||
struct iov_iter *to = dest;
|
if (err != to_copy) {
|
||||||
int err;
|
if (kernel_if->host)
|
||||||
|
kunmap(kernel_if->u.h.page[page_index]);
|
||||||
err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
|
return VMCI_ERROR_INVALID_ARGS;
|
||||||
if (err != to_copy) {
|
|
||||||
if (kernel_if->host)
|
|
||||||
kunmap(kernel_if->u.h.page[page_index]);
|
|
||||||
return VMCI_ERROR_INVALID_ARGS;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
memcpy((u8 *)dest + bytes_copied,
|
|
||||||
(u8 *)va + page_offset, to_copy);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes_copied += to_copy;
|
bytes_copied += to_copy;
|
||||||
if (kernel_if->host)
|
if (kernel_if->host)
|
||||||
kunmap(kernel_if->u.h.page[page_index]);
|
kunmap(kernel_if->u.h.page[page_index]);
|
||||||
@ -591,21 +578,6 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
|
|||||||
return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
|
return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Copies to a given iovec from a VMCI Queue.
|
|
||||||
*/
|
|
||||||
static int qp_memcpy_from_queue_iov(void *dest,
|
|
||||||
size_t dest_offset,
|
|
||||||
const struct vmci_queue *queue,
|
|
||||||
u64 queue_offset, size_t size)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* We ignore dest_offset because dest is really a struct iovec * and
|
|
||||||
* will maintain offset internally.
|
|
||||||
*/
|
|
||||||
return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocates kernel VA space of specified size plus space for the queue
|
* Allocates kernel VA space of specified size plus space for the queue
|
||||||
* and kernel interface. This is different from the guest queue allocator,
|
* and kernel interface. This is different from the guest queue allocator,
|
||||||
@ -2679,11 +2651,10 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
|
|||||||
static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
|
static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
|
||||||
struct vmci_queue *consume_q,
|
struct vmci_queue *consume_q,
|
||||||
const u64 consume_q_size,
|
const u64 consume_q_size,
|
||||||
void *buf,
|
struct iov_iter *to,
|
||||||
size_t buf_size,
|
|
||||||
vmci_memcpy_from_queue_func memcpy_from_queue,
|
|
||||||
bool update_consumer)
|
bool update_consumer)
|
||||||
{
|
{
|
||||||
|
size_t buf_size = iov_iter_count(to);
|
||||||
s64 buf_ready;
|
s64 buf_ready;
|
||||||
u64 head;
|
u64 head;
|
||||||
size_t read;
|
size_t read;
|
||||||
@ -2705,15 +2676,15 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
|
|||||||
read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
|
read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
|
||||||
head = vmci_q_header_consumer_head(produce_q->q_header);
|
head = vmci_q_header_consumer_head(produce_q->q_header);
|
||||||
if (likely(head + read < consume_q_size)) {
|
if (likely(head + read < consume_q_size)) {
|
||||||
result = memcpy_from_queue(buf, 0, consume_q, head, read);
|
result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
|
||||||
} else {
|
} else {
|
||||||
/* Head pointer wraps around. */
|
/* Head pointer wraps around. */
|
||||||
|
|
||||||
const size_t tmp = (size_t) (consume_q_size - head);
|
const size_t tmp = (size_t) (consume_q_size - head);
|
||||||
|
|
||||||
result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
|
result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
|
||||||
if (result >= VMCI_SUCCESS)
|
if (result >= VMCI_SUCCESS)
|
||||||
result = memcpy_from_queue(buf, tmp, consume_q, 0,
|
result = qp_memcpy_from_queue_iter(to, consume_q, 0,
|
||||||
read - tmp);
|
read - tmp);
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -3162,8 +3133,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
|
|||||||
result = qp_dequeue_locked(qpair->produce_q,
|
result = qp_dequeue_locked(qpair->produce_q,
|
||||||
qpair->consume_q,
|
qpair->consume_q,
|
||||||
qpair->consume_q_size,
|
qpair->consume_q_size,
|
||||||
&to, buf_size,
|
&to, true);
|
||||||
qp_memcpy_from_queue_iov, true);
|
|
||||||
|
|
||||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||||
!qp_wait_for_ready_queue(qpair))
|
!qp_wait_for_ready_queue(qpair))
|
||||||
@ -3208,8 +3178,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
|
|||||||
result = qp_dequeue_locked(qpair->produce_q,
|
result = qp_dequeue_locked(qpair->produce_q,
|
||||||
qpair->consume_q,
|
qpair->consume_q,
|
||||||
qpair->consume_q_size,
|
qpair->consume_q_size,
|
||||||
&to, buf_size,
|
&to, false);
|
||||||
qp_memcpy_from_queue_iov, false);
|
|
||||||
|
|
||||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||||
!qp_wait_for_ready_queue(qpair))
|
!qp_wait_for_ready_queue(qpair))
|
||||||
@ -3292,9 +3261,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
|
|||||||
result = qp_dequeue_locked(qpair->produce_q,
|
result = qp_dequeue_locked(qpair->produce_q,
|
||||||
qpair->consume_q,
|
qpair->consume_q,
|
||||||
qpair->consume_q_size,
|
qpair->consume_q_size,
|
||||||
&msg->msg_iter, msg_data_left(msg),
|
&msg->msg_iter, true);
|
||||||
qp_memcpy_from_queue_iov,
|
|
||||||
true);
|
|
||||||
|
|
||||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||||
!qp_wait_for_ready_queue(qpair))
|
!qp_wait_for_ready_queue(qpair))
|
||||||
@ -3336,9 +3303,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
|
|||||||
result = qp_dequeue_locked(qpair->produce_q,
|
result = qp_dequeue_locked(qpair->produce_q,
|
||||||
qpair->consume_q,
|
qpair->consume_q,
|
||||||
qpair->consume_q_size,
|
qpair->consume_q_size,
|
||||||
&msg->msg_iter, msg_data_left(msg),
|
&msg->msg_iter, false);
|
||||||
qp_memcpy_from_queue_iov,
|
|
||||||
false);
|
|
||||||
|
|
||||||
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
|
||||||
!qp_wait_for_ready_queue(qpair))
|
!qp_wait_for_ready_queue(qpair))
|
||||||
|
Loading…
Reference in New Issue
Block a user