mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-01 08:04:22 +08:00
vringh: update for virtio 1.0 APIs
When switching everything over to virtio 1.0 memory access APIs, I missed converting vringh. Fortunately, it's straight-forward. Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
b97a8a9006
commit
b9f7ac8c72
@ -11,6 +11,7 @@
|
|||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
#include <uapi/linux/virtio_config.h>
|
||||||
|
|
||||||
static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
|
static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
|
||||||
{
|
{
|
||||||
@ -28,13 +29,14 @@ static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
|
|||||||
|
|
||||||
/* Returns vring->num if empty, -ve on error. */
|
/* Returns vring->num if empty, -ve on error. */
|
||||||
static inline int __vringh_get_head(const struct vringh *vrh,
|
static inline int __vringh_get_head(const struct vringh *vrh,
|
||||||
int (*getu16)(u16 *val, const u16 *p),
|
int (*getu16)(const struct vringh *vrh,
|
||||||
|
u16 *val, const __virtio16 *p),
|
||||||
u16 *last_avail_idx)
|
u16 *last_avail_idx)
|
||||||
{
|
{
|
||||||
u16 avail_idx, i, head;
|
u16 avail_idx, i, head;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = getu16(&avail_idx, &vrh->vring.avail->idx);
|
err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
|
||||||
if (err) {
|
if (err) {
|
||||||
vringh_bad("Failed to access avail idx at %p",
|
vringh_bad("Failed to access avail idx at %p",
|
||||||
&vrh->vring.avail->idx);
|
&vrh->vring.avail->idx);
|
||||||
@ -49,7 +51,7 @@ static inline int __vringh_get_head(const struct vringh *vrh,
|
|||||||
|
|
||||||
i = *last_avail_idx & (vrh->vring.num - 1);
|
i = *last_avail_idx & (vrh->vring.num - 1);
|
||||||
|
|
||||||
err = getu16(&head, &vrh->vring.avail->ring[i]);
|
err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
|
||||||
if (err) {
|
if (err) {
|
||||||
vringh_bad("Failed to read head: idx %d address %p",
|
vringh_bad("Failed to read head: idx %d address %p",
|
||||||
*last_avail_idx, &vrh->vring.avail->ring[i]);
|
*last_avail_idx, &vrh->vring.avail->ring[i]);
|
||||||
@ -144,28 +146,32 @@ static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* No reason for this code to be inline. */
|
/* No reason for this code to be inline. */
|
||||||
static int move_to_indirect(int *up_next, u16 *i, void *addr,
|
static int move_to_indirect(const struct vringh *vrh,
|
||||||
|
int *up_next, u16 *i, void *addr,
|
||||||
const struct vring_desc *desc,
|
const struct vring_desc *desc,
|
||||||
struct vring_desc **descs, int *desc_max)
|
struct vring_desc **descs, int *desc_max)
|
||||||
{
|
{
|
||||||
|
u32 len;
|
||||||
|
|
||||||
/* Indirect tables can't have indirect. */
|
/* Indirect tables can't have indirect. */
|
||||||
if (*up_next != -1) {
|
if (*up_next != -1) {
|
||||||
vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
|
vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(desc->len % sizeof(struct vring_desc))) {
|
len = vringh32_to_cpu(vrh, desc->len);
|
||||||
|
if (unlikely(len % sizeof(struct vring_desc))) {
|
||||||
vringh_bad("Strange indirect len %u", desc->len);
|
vringh_bad("Strange indirect len %u", desc->len);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We will check this when we follow it! */
|
/* We will check this when we follow it! */
|
||||||
if (desc->flags & VRING_DESC_F_NEXT)
|
if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
|
||||||
*up_next = desc->next;
|
*up_next = vringh16_to_cpu(vrh, desc->next);
|
||||||
else
|
else
|
||||||
*up_next = -2;
|
*up_next = -2;
|
||||||
*descs = addr;
|
*descs = addr;
|
||||||
*desc_max = desc->len / sizeof(struct vring_desc);
|
*desc_max = len / sizeof(struct vring_desc);
|
||||||
|
|
||||||
/* Now, start at the first indirect. */
|
/* Now, start at the first indirect. */
|
||||||
*i = 0;
|
*i = 0;
|
||||||
@ -287,22 +293,25 @@ __vringh_iov(struct vringh *vrh, u16 i,
|
|||||||
if (unlikely(err))
|
if (unlikely(err))
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
|
if (unlikely(desc.flags &
|
||||||
|
cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
|
||||||
|
u64 a = vringh64_to_cpu(vrh, desc.addr);
|
||||||
|
|
||||||
/* Make sure it's OK, and get offset. */
|
/* Make sure it's OK, and get offset. */
|
||||||
len = desc.len;
|
len = vringh32_to_cpu(vrh, desc.len);
|
||||||
if (!rcheck(vrh, desc.addr, &len, &range, getrange)) {
|
if (!rcheck(vrh, a, &len, &range, getrange)) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(len != desc.len)) {
|
if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
|
||||||
slow = true;
|
slow = true;
|
||||||
/* We need to save this range to use offset */
|
/* We need to save this range to use offset */
|
||||||
slowrange = range;
|
slowrange = range;
|
||||||
}
|
}
|
||||||
|
|
||||||
addr = (void *)(long)(desc.addr + range.offset);
|
addr = (void *)(long)(a + range.offset);
|
||||||
err = move_to_indirect(&up_next, &i, addr, &desc,
|
err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
|
||||||
&descs, &desc_max);
|
&descs, &desc_max);
|
||||||
if (err)
|
if (err)
|
||||||
goto fail;
|
goto fail;
|
||||||
@ -315,7 +324,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (desc.flags & VRING_DESC_F_WRITE)
|
if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
|
||||||
iov = wiov;
|
iov = wiov;
|
||||||
else {
|
else {
|
||||||
iov = riov;
|
iov = riov;
|
||||||
@ -336,12 +345,14 @@ __vringh_iov(struct vringh *vrh, u16 i,
|
|||||||
|
|
||||||
again:
|
again:
|
||||||
/* Make sure it's OK, and get offset. */
|
/* Make sure it's OK, and get offset. */
|
||||||
len = desc.len;
|
len = vringh32_to_cpu(vrh, desc.len);
|
||||||
if (!rcheck(vrh, desc.addr, &len, &range, getrange)) {
|
if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
|
||||||
|
getrange)) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
addr = (void *)(unsigned long)(desc.addr + range.offset);
|
addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
|
||||||
|
range.offset);
|
||||||
|
|
||||||
if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
|
if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
|
||||||
err = resize_iovec(iov, gfp);
|
err = resize_iovec(iov, gfp);
|
||||||
@ -353,14 +364,16 @@ __vringh_iov(struct vringh *vrh, u16 i,
|
|||||||
iov->iov[iov->used].iov_len = len;
|
iov->iov[iov->used].iov_len = len;
|
||||||
iov->used++;
|
iov->used++;
|
||||||
|
|
||||||
if (unlikely(len != desc.len)) {
|
if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
|
||||||
desc.len -= len;
|
desc.len = cpu_to_vringh32(vrh,
|
||||||
desc.addr += len;
|
vringh32_to_cpu(vrh, desc.len) - len);
|
||||||
|
desc.addr = cpu_to_vringh64(vrh,
|
||||||
|
vringh64_to_cpu(vrh, desc.addr) + len);
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (desc.flags & VRING_DESC_F_NEXT) {
|
if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
|
||||||
i = desc.next;
|
i = vringh16_to_cpu(vrh, desc.next);
|
||||||
} else {
|
} else {
|
||||||
/* Just in case we need to finish traversing above. */
|
/* Just in case we need to finish traversing above. */
|
||||||
if (unlikely(up_next > 0)) {
|
if (unlikely(up_next > 0)) {
|
||||||
@ -387,7 +400,8 @@ fail:
|
|||||||
static inline int __vringh_complete(struct vringh *vrh,
|
static inline int __vringh_complete(struct vringh *vrh,
|
||||||
const struct vring_used_elem *used,
|
const struct vring_used_elem *used,
|
||||||
unsigned int num_used,
|
unsigned int num_used,
|
||||||
int (*putu16)(u16 *p, u16 val),
|
int (*putu16)(const struct vringh *vrh,
|
||||||
|
__virtio16 *p, u16 val),
|
||||||
int (*putused)(struct vring_used_elem *dst,
|
int (*putused)(struct vring_used_elem *dst,
|
||||||
const struct vring_used_elem
|
const struct vring_used_elem
|
||||||
*src, unsigned num))
|
*src, unsigned num))
|
||||||
@ -420,7 +434,7 @@ static inline int __vringh_complete(struct vringh *vrh,
|
|||||||
/* Make sure buffer is written before we update index. */
|
/* Make sure buffer is written before we update index. */
|
||||||
virtio_wmb(vrh->weak_barriers);
|
virtio_wmb(vrh->weak_barriers);
|
||||||
|
|
||||||
err = putu16(&vrh->vring.used->idx, used_idx + num_used);
|
err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
|
||||||
if (err) {
|
if (err) {
|
||||||
vringh_bad("Failed to update used index at %p",
|
vringh_bad("Failed to update used index at %p",
|
||||||
&vrh->vring.used->idx);
|
&vrh->vring.used->idx);
|
||||||
@ -433,7 +447,9 @@ static inline int __vringh_complete(struct vringh *vrh,
|
|||||||
|
|
||||||
|
|
||||||
static inline int __vringh_need_notify(struct vringh *vrh,
|
static inline int __vringh_need_notify(struct vringh *vrh,
|
||||||
int (*getu16)(u16 *val, const u16 *p))
|
int (*getu16)(const struct vringh *vrh,
|
||||||
|
u16 *val,
|
||||||
|
const __virtio16 *p))
|
||||||
{
|
{
|
||||||
bool notify;
|
bool notify;
|
||||||
u16 used_event;
|
u16 used_event;
|
||||||
@ -447,7 +463,7 @@ static inline int __vringh_need_notify(struct vringh *vrh,
|
|||||||
/* Old-style, without event indices. */
|
/* Old-style, without event indices. */
|
||||||
if (!vrh->event_indices) {
|
if (!vrh->event_indices) {
|
||||||
u16 flags;
|
u16 flags;
|
||||||
err = getu16(&flags, &vrh->vring.avail->flags);
|
err = getu16(vrh, &flags, &vrh->vring.avail->flags);
|
||||||
if (err) {
|
if (err) {
|
||||||
vringh_bad("Failed to get flags at %p",
|
vringh_bad("Failed to get flags at %p",
|
||||||
&vrh->vring.avail->flags);
|
&vrh->vring.avail->flags);
|
||||||
@ -457,7 +473,7 @@ static inline int __vringh_need_notify(struct vringh *vrh,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Modern: we know when other side wants to know. */
|
/* Modern: we know when other side wants to know. */
|
||||||
err = getu16(&used_event, &vring_used_event(&vrh->vring));
|
err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
|
||||||
if (err) {
|
if (err) {
|
||||||
vringh_bad("Failed to get used event idx at %p",
|
vringh_bad("Failed to get used event idx at %p",
|
||||||
&vring_used_event(&vrh->vring));
|
&vring_used_event(&vrh->vring));
|
||||||
@ -478,20 +494,22 @@ static inline int __vringh_need_notify(struct vringh *vrh,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline bool __vringh_notify_enable(struct vringh *vrh,
|
static inline bool __vringh_notify_enable(struct vringh *vrh,
|
||||||
int (*getu16)(u16 *val, const u16 *p),
|
int (*getu16)(const struct vringh *vrh,
|
||||||
int (*putu16)(u16 *p, u16 val))
|
u16 *val, const __virtio16 *p),
|
||||||
|
int (*putu16)(const struct vringh *vrh,
|
||||||
|
__virtio16 *p, u16 val))
|
||||||
{
|
{
|
||||||
u16 avail;
|
u16 avail;
|
||||||
|
|
||||||
if (!vrh->event_indices) {
|
if (!vrh->event_indices) {
|
||||||
/* Old-school; update flags. */
|
/* Old-school; update flags. */
|
||||||
if (putu16(&vrh->vring.used->flags, 0) != 0) {
|
if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
|
||||||
vringh_bad("Clearing used flags %p",
|
vringh_bad("Clearing used flags %p",
|
||||||
&vrh->vring.used->flags);
|
&vrh->vring.used->flags);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (putu16(&vring_avail_event(&vrh->vring),
|
if (putu16(vrh, &vring_avail_event(&vrh->vring),
|
||||||
vrh->last_avail_idx) != 0) {
|
vrh->last_avail_idx) != 0) {
|
||||||
vringh_bad("Updating avail event index %p",
|
vringh_bad("Updating avail event index %p",
|
||||||
&vring_avail_event(&vrh->vring));
|
&vring_avail_event(&vrh->vring));
|
||||||
@ -503,7 +521,7 @@ static inline bool __vringh_notify_enable(struct vringh *vrh,
|
|||||||
* sure it's written, then check again. */
|
* sure it's written, then check again. */
|
||||||
virtio_mb(vrh->weak_barriers);
|
virtio_mb(vrh->weak_barriers);
|
||||||
|
|
||||||
if (getu16(&avail, &vrh->vring.avail->idx) != 0) {
|
if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
|
||||||
vringh_bad("Failed to check avail idx at %p",
|
vringh_bad("Failed to check avail idx at %p",
|
||||||
&vrh->vring.avail->idx);
|
&vrh->vring.avail->idx);
|
||||||
return true;
|
return true;
|
||||||
@ -516,11 +534,13 @@ static inline bool __vringh_notify_enable(struct vringh *vrh,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline void __vringh_notify_disable(struct vringh *vrh,
|
static inline void __vringh_notify_disable(struct vringh *vrh,
|
||||||
int (*putu16)(u16 *p, u16 val))
|
int (*putu16)(const struct vringh *vrh,
|
||||||
|
__virtio16 *p, u16 val))
|
||||||
{
|
{
|
||||||
if (!vrh->event_indices) {
|
if (!vrh->event_indices) {
|
||||||
/* Old-school; update flags. */
|
/* Old-school; update flags. */
|
||||||
if (putu16(&vrh->vring.used->flags, VRING_USED_F_NO_NOTIFY)) {
|
if (putu16(vrh, &vrh->vring.used->flags,
|
||||||
|
VRING_USED_F_NO_NOTIFY)) {
|
||||||
vringh_bad("Setting used flags %p",
|
vringh_bad("Setting used flags %p",
|
||||||
&vrh->vring.used->flags);
|
&vrh->vring.used->flags);
|
||||||
}
|
}
|
||||||
@ -528,14 +548,18 @@ static inline void __vringh_notify_disable(struct vringh *vrh,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Userspace access helpers: in this case, addresses are really userspace. */
|
/* Userspace access helpers: in this case, addresses are really userspace. */
|
||||||
static inline int getu16_user(u16 *val, const u16 *p)
|
static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
|
||||||
{
|
{
|
||||||
return get_user(*val, (__force u16 __user *)p);
|
__virtio16 v = 0;
|
||||||
|
int rc = get_user(v, (__force __virtio16 __user *)p);
|
||||||
|
*val = vringh16_to_cpu(vrh, v);
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int putu16_user(u16 *p, u16 val)
|
static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
|
||||||
{
|
{
|
||||||
return put_user(val, (__force u16 __user *)p);
|
__virtio16 v = cpu_to_vringh16(vrh, val);
|
||||||
|
return put_user(v, (__force __virtio16 __user *)p);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int copydesc_user(void *dst, const void *src, size_t len)
|
static inline int copydesc_user(void *dst, const void *src, size_t len)
|
||||||
@ -589,6 +613,7 @@ int vringh_init_user(struct vringh *vrh, u64 features,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
|
||||||
vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
|
vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
|
||||||
vrh->weak_barriers = weak_barriers;
|
vrh->weak_barriers = weak_barriers;
|
||||||
vrh->completed = 0;
|
vrh->completed = 0;
|
||||||
@ -729,8 +754,8 @@ int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
|
|||||||
{
|
{
|
||||||
struct vring_used_elem used;
|
struct vring_used_elem used;
|
||||||
|
|
||||||
used.id = head;
|
used.id = cpu_to_vringh32(vrh, head);
|
||||||
used.len = len;
|
used.len = cpu_to_vringh32(vrh, len);
|
||||||
return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
|
return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(vringh_complete_user);
|
EXPORT_SYMBOL(vringh_complete_user);
|
||||||
@ -792,15 +817,16 @@ int vringh_need_notify_user(struct vringh *vrh)
|
|||||||
EXPORT_SYMBOL(vringh_need_notify_user);
|
EXPORT_SYMBOL(vringh_need_notify_user);
|
||||||
|
|
||||||
/* Kernelspace access helpers. */
|
/* Kernelspace access helpers. */
|
||||||
static inline int getu16_kern(u16 *val, const u16 *p)
|
static inline int getu16_kern(const struct vringh *vrh,
|
||||||
|
u16 *val, const __virtio16 *p)
|
||||||
{
|
{
|
||||||
*val = ACCESS_ONCE(*p);
|
*val = vringh16_to_cpu(vrh, ACCESS_ONCE(*p));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int putu16_kern(u16 *p, u16 val)
|
static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
|
||||||
{
|
{
|
||||||
ACCESS_ONCE(*p) = val;
|
ACCESS_ONCE(*p) = cpu_to_vringh16(vrh, val);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -848,6 +874,7 @@ int vringh_init_kern(struct vringh *vrh, u64 features,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
|
||||||
vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
|
vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
|
||||||
vrh->weak_barriers = weak_barriers;
|
vrh->weak_barriers = weak_barriers;
|
||||||
vrh->completed = 0;
|
vrh->completed = 0;
|
||||||
@ -962,8 +989,8 @@ int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
|
|||||||
{
|
{
|
||||||
struct vring_used_elem used;
|
struct vring_used_elem used;
|
||||||
|
|
||||||
used.id = head;
|
used.id = cpu_to_vringh32(vrh, head);
|
||||||
used.len = len;
|
used.len = cpu_to_vringh32(vrh, len);
|
||||||
|
|
||||||
return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
|
return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
|
||||||
}
|
}
|
||||||
|
@ -24,12 +24,16 @@
|
|||||||
#ifndef _LINUX_VRINGH_H
|
#ifndef _LINUX_VRINGH_H
|
||||||
#define _LINUX_VRINGH_H
|
#define _LINUX_VRINGH_H
|
||||||
#include <uapi/linux/virtio_ring.h>
|
#include <uapi/linux/virtio_ring.h>
|
||||||
|
#include <linux/virtio_byteorder.h>
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <asm/barrier.h>
|
#include <asm/barrier.h>
|
||||||
|
|
||||||
/* virtio_ring with information needed for host access. */
|
/* virtio_ring with information needed for host access. */
|
||||||
struct vringh {
|
struct vringh {
|
||||||
|
/* Everything is little endian */
|
||||||
|
bool little_endian;
|
||||||
|
|
||||||
/* Guest publishes used event idx (note: we always do). */
|
/* Guest publishes used event idx (note: we always do). */
|
||||||
bool event_indices;
|
bool event_indices;
|
||||||
|
|
||||||
@ -222,4 +226,33 @@ static inline void vringh_notify(struct vringh *vrh)
|
|||||||
vrh->notify(vrh);
|
vrh->notify(vrh);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
|
||||||
|
{
|
||||||
|
return __virtio16_to_cpu(vrh->little_endian, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
|
||||||
|
{
|
||||||
|
return __cpu_to_virtio16(vrh->little_endian, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
|
||||||
|
{
|
||||||
|
return __virtio32_to_cpu(vrh->little_endian, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
|
||||||
|
{
|
||||||
|
return __cpu_to_virtio32(vrh->little_endian, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
|
||||||
|
{
|
||||||
|
return __virtio64_to_cpu(vrh->little_endian, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
|
||||||
|
{
|
||||||
|
return __cpu_to_virtio64(vrh->little_endian, val);
|
||||||
|
}
|
||||||
#endif /* _LINUX_VRINGH_H */
|
#endif /* _LINUX_VRINGH_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user