virtio-gpu: scanout fix, live migration support

vmsvga: security fixes
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABAgAGBQJXVSCcAAoJEEy22O7T6HE4Zf4P/35npT1VyXPpT7of/4fF/2+k
 zD1mGkB6cd4Zv45A9XkiT9RtaJJdOVnjVNftPp2J7t063lccGUOqbzEohh5At6Bl
 dbtjjbl/WBl+gDRRGGsPT+vHSYkzUBXWaNUeAnph7bgqTaRAm6U18sEnZmdHo6+9
 /Sdtb+hVcoPPrq9g9qspd3DU7anMdbjTMrPepkFVKozK0fHn+LRCDxS5RWFz51C2
 bcQAgPqT1TBYzrrcz8oFCBuDnaxCqHrSiawB/oh6uZwtcG9GGJqYJiDOmvGzJbDN
 RIwWZeOLcOBF2BRNI2AY6abMJkMTcMztEn8iNU8lZmSswgJ6cS+4YBjCsQfCxrcR
 aipbzI405D03oWJGSZed08Ud7Prp+tRHnOk/IU6zX5uT84U/0PVKTgUB9/xwq7L7
 LSKKQUgG6AwCwg5XNneoj2O6H8CgbQGdZ2BVAiN66bYF/6TDG6msXkVotyWttjfK
 Y8DbkHiwcNrbI8vcKed/VGSUEoidk/NljiGeflzgwPoVlB2dbr6LPov2HNHFW/0/
 /3rRatJhLgucjSeIdDU63ze/4If119YYwtj9EykN/Yhizmjsx0+st6BCsgyTjJXj
 HQ/hAk9wGCc/vhILHVoGQ5NOUqcfS05plyBXKF+GREugN7t/RE/kdtob54NeSqmP
 Jr0v3GmCn1zfKS6sw/iu
 =pera
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/kraxel/tags/pull-vga-20160606-1' into staging

virtio-gpu: scanout fix, live migration support
vmsvga: security fixes

# gpg: Signature made Mon 06 Jun 2016 08:05:00 BST
# gpg:                using RSA key 0x4CB6D8EED3E87138
# gpg: Good signature from "Gerd Hoffmann (work) <kraxel@redhat.com>"
# gpg:                 aka "Gerd Hoffmann <gerd@kraxel.org>"
# gpg:                 aka "Gerd Hoffmann (private) <kraxel@gmail.com>"

* remotes/kraxel/tags/pull-vga-20160606-1:
  virtio-gpu: add live migration support
  vmsvga: don't process more than 1024 fifo commands at once
  vmsvga: shadow fifo registers
  vmsvga: add more fifo checks
  vmsvga: move fifo sanity checks to vmsvga_fifo_length
  virtio-gpu: fix scanout rectangles

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-06-06 13:58:24 +01:00
commit e854d0cf78
5 changed files with 252 additions and 46 deletions

View File

@ -284,7 +284,7 @@ static void virgl_resource_attach_backing(VirtIOGPU *g,
VIRTIO_GPU_FILL_CMD(att_rb); VIRTIO_GPU_FILL_CMD(att_rb);
trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id); trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
ret = virtio_gpu_create_mapping_iov(&att_rb, cmd, &res_iovs); ret = virtio_gpu_create_mapping_iov(&att_rb, cmd, NULL, &res_iovs);
if (ret != 0) { if (ret != 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return; return;

View File

@ -22,6 +22,8 @@
#include "qemu/log.h" #include "qemu/log.h"
#include "qapi/error.h" #include "qapi/error.h"
#define VIRTIO_GPU_VM_VERSION 1
static struct virtio_gpu_simple_resource* static struct virtio_gpu_simple_resource*
virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
@ -94,7 +96,7 @@ static void update_cursor_data_virgl(VirtIOGPU *g,
static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
{ {
struct virtio_gpu_scanout *s; struct virtio_gpu_scanout *s;
bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR; bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
if (cursor->pos.scanout_id >= g->conf.max_outputs) { if (cursor->pos.scanout_id >= g->conf.max_outputs) {
return; return;
@ -107,7 +109,7 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
move ? "move" : "update", move ? "move" : "update",
cursor->resource_id); cursor->resource_id);
if (move) { if (!move) {
if (!s->current_cursor) { if (!s->current_cursor) {
s->current_cursor = cursor_alloc(64, 64); s->current_cursor = cursor_alloc(64, 64);
} }
@ -120,6 +122,11 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
g, s, cursor->resource_id); g, s, cursor->resource_id);
} }
dpy_cursor_define(s->con, s->current_cursor); dpy_cursor_define(s->con, s->current_cursor);
s->cursor = *cursor;
} else {
s->cursor.pos.x = cursor->pos.x;
s->cursor.pos.y = cursor->pos.y;
} }
dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
cursor->resource_id ? 1 : 0); cursor->resource_id ? 1 : 0);
@ -495,6 +502,11 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
pixman_region_fini(&flush_region); pixman_region_fini(&flush_region);
} }
static void virtio_unref_resource(pixman_image_t *image, void *data)
{
pixman_image_unref(data);
}
static void virtio_gpu_set_scanout(VirtIOGPU *g, static void virtio_gpu_set_scanout(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd) struct virtio_gpu_ctrl_command *cmd)
{ {
@ -571,8 +583,15 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
!= ((uint8_t *)pixman_image_get_data(res->image) + offset) || != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
scanout->width != ss.r.width || scanout->width != ss.r.width ||
scanout->height != ss.r.height) { scanout->height != ss.r.height) {
pixman_image_t *rect;
void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
pixman_image_get_stride(res->image));
pixman_image_ref(res->image);
pixman_image_set_destroy_function(rect, virtio_unref_resource,
res->image);
/* realloc the surface ptr */ /* realloc the surface ptr */
scanout->ds = qemu_create_displaysurface_pixman(res->image); scanout->ds = qemu_create_displaysurface_pixman(rect);
if (!scanout->ds) { if (!scanout->ds) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return; return;
@ -590,7 +609,7 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
struct virtio_gpu_ctrl_command *cmd, struct virtio_gpu_ctrl_command *cmd,
struct iovec **iov) uint64_t **addr, struct iovec **iov)
{ {
struct virtio_gpu_mem_entry *ents; struct virtio_gpu_mem_entry *ents;
size_t esize, s; size_t esize, s;
@ -616,10 +635,16 @@ int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
} }
*iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
if (addr) {
*addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
}
for (i = 0; i < ab->nr_entries; i++) { for (i = 0; i < ab->nr_entries; i++) {
hwaddr len = ents[i].length; hwaddr len = ents[i].length;
(*iov)[i].iov_len = ents[i].length; (*iov)[i].iov_len = ents[i].length;
(*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1);
if (addr) {
(*addr)[i] = ents[i].addr;
}
if (!(*iov)[i].iov_base || len != ents[i].length) { if (!(*iov)[i].iov_base || len != ents[i].length) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
" resource %d element %d\n", " resource %d element %d\n",
@ -627,6 +652,10 @@ int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
virtio_gpu_cleanup_mapping_iov(*iov, i); virtio_gpu_cleanup_mapping_iov(*iov, i);
g_free(ents); g_free(ents);
*iov = NULL; *iov = NULL;
if (addr) {
g_free(*addr);
*addr = NULL;
}
return -1; return -1;
} }
} }
@ -650,6 +679,8 @@ static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
res->iov = NULL; res->iov = NULL;
res->iov_cnt = 0; res->iov_cnt = 0;
g_free(res->addrs);
res->addrs = NULL;
} }
static void static void
@ -671,7 +702,7 @@ virtio_gpu_resource_attach_backing(VirtIOGPU *g,
return; return;
} }
ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov); ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
if (ret != 0) { if (ret != 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return; return;
@ -917,11 +948,163 @@ const GraphicHwOps virtio_gpu_ops = {
.gl_block = virtio_gpu_gl_block, .gl_block = virtio_gpu_gl_block,
}; };
static const VMStateDescription vmstate_virtio_gpu_scanout = {
.name = "virtio-gpu-one-scanout",
.version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
VMSTATE_UINT32(width, struct virtio_gpu_scanout),
VMSTATE_UINT32(height, struct virtio_gpu_scanout),
VMSTATE_INT32(x, struct virtio_gpu_scanout),
VMSTATE_INT32(y, struct virtio_gpu_scanout),
VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
VMSTATE_END_OF_LIST()
},
};
static const VMStateDescription vmstate_virtio_gpu_scanouts = {
.name = "virtio-gpu-scanouts",
.version_id = 1,
.fields = (VMStateField[]) {
VMSTATE_INT32(enable, struct VirtIOGPU),
VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU),
VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
conf.max_outputs, 1,
vmstate_virtio_gpu_scanout,
struct virtio_gpu_scanout),
VMSTATE_END_OF_LIST()
},
};
static const VMStateDescription vmstate_virtio_gpu_unmigratable = { static const VMStateDescription vmstate_virtio_gpu_unmigratable = {
.name = "virtio-gpu", .name = "virtio-gpu-with-virgl",
.unmigratable = 1, .unmigratable = 1,
}; };
static void virtio_gpu_save(QEMUFile *f, void *opaque)
{
VirtIOGPU *g = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(g);
struct virtio_gpu_simple_resource *res;
int i;
virtio_save(vdev, f);
/* in 2d mode we should never find unprocessed commands here */
assert(QTAILQ_EMPTY(&g->cmdq));
QTAILQ_FOREACH(res, &g->reslist, next) {
qemu_put_be32(f, res->resource_id);
qemu_put_be32(f, res->width);
qemu_put_be32(f, res->height);
qemu_put_be32(f, res->format);
qemu_put_be32(f, res->iov_cnt);
for (i = 0; i < res->iov_cnt; i++) {
qemu_put_be64(f, res->addrs[i]);
qemu_put_be32(f, res->iov[i].iov_len);
}
qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
pixman_image_get_stride(res->image) * res->height);
}
qemu_put_be32(f, 0); /* end of list */
vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
}
static int virtio_gpu_load(QEMUFile *f, void *opaque, int version_id)
{
VirtIOGPU *g = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(g);
struct virtio_gpu_simple_resource *res;
struct virtio_gpu_scanout *scanout;
uint32_t resource_id, pformat;
int i, ret;
if (version_id != VIRTIO_GPU_VM_VERSION) {
return -EINVAL;
}
ret = virtio_load(vdev, f, version_id);
if (ret) {
return ret;
}
resource_id = qemu_get_be32(f);
while (resource_id != 0) {
res = g_new0(struct virtio_gpu_simple_resource, 1);
res->resource_id = resource_id;
res->width = qemu_get_be32(f);
res->height = qemu_get_be32(f);
res->format = qemu_get_be32(f);
res->iov_cnt = qemu_get_be32(f);
/* allocate */
pformat = get_pixman_format(res->format);
if (!pformat) {
return -EINVAL;
}
res->image = pixman_image_create_bits(pformat,
res->width, res->height,
NULL, 0);
if (!res->image) {
return -EINVAL;
}
res->addrs = g_new(uint64_t, res->iov_cnt);
res->iov = g_new(struct iovec, res->iov_cnt);
/* read data */
for (i = 0; i < res->iov_cnt; i++) {
res->addrs[i] = qemu_get_be64(f);
res->iov[i].iov_len = qemu_get_be32(f);
}
qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
pixman_image_get_stride(res->image) * res->height);
/* restore mapping */
for (i = 0; i < res->iov_cnt; i++) {
hwaddr len = res->iov[i].iov_len;
res->iov[i].iov_base =
cpu_physical_memory_map(res->addrs[i], &len, 1);
if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
return -EINVAL;
}
}
QTAILQ_INSERT_HEAD(&g->reslist, res, next);
resource_id = qemu_get_be32(f);
}
/* load & apply scanout state */
vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
for (i = 0; i < g->conf.max_outputs; i++) {
scanout = &g->scanout[i];
if (!scanout->resource_id) {
continue;
}
res = virtio_gpu_find_resource(g, scanout->resource_id);
if (!res) {
return -EINVAL;
}
scanout->ds = qemu_create_displaysurface_pixman(res->image);
if (!scanout->ds) {
return -EINVAL;
}
dpy_gfx_replace_surface(scanout->con, scanout->ds);
dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height);
update_cursor(g, &scanout->cursor);
res->scanout_bitmask |= (1 << i);
}
return 0;
}
static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
{ {
VirtIODevice *vdev = VIRTIO_DEVICE(qdev); VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
@ -979,7 +1162,12 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
} }
} }
vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g); if (virtio_gpu_virgl_enabled(g->conf)) {
vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g);
} else {
register_savevm(qdev, "virtio-gpu", -1, VIRTIO_GPU_VM_VERSION,
virtio_gpu_save, virtio_gpu_load, g);
}
} }
static void virtio_gpu_instance_init(Object *obj) static void virtio_gpu_instance_init(Object *obj)

View File

@ -84,6 +84,17 @@ static const GraphicHwOps virtio_vga_ops = {
.gl_block = virtio_vga_gl_block, .gl_block = virtio_vga_gl_block,
}; };
static const VMStateDescription vmstate_virtio_vga = {
.name = "virtio-vga",
.version_id = 2,
.minimum_version_id = 2,
.fields = (VMStateField[]) {
/* no pci stuff here, saving the virtio device will handle that */
VMSTATE_STRUCT(vga, VirtIOVGA, 0, vmstate_vga_common, VGACommonState),
VMSTATE_END_OF_LIST()
}
};
/* VGA device wrapper around PCI device around virtio GPU */ /* VGA device wrapper around PCI device around virtio GPU */
static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp) static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{ {
@ -168,6 +179,7 @@ static void virtio_vga_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
dc->props = virtio_vga_properties; dc->props = virtio_vga_properties;
dc->reset = virtio_vga_reset; dc->reset = virtio_vga_reset;
dc->vmsd = &vmstate_virtio_vga;
dc->hotpluggable = false; dc->hotpluggable = false;
k->realize = virtio_vga_realize; k->realize = virtio_vga_realize;

View File

@ -66,17 +66,11 @@ struct vmsvga_state_s {
uint8_t *fifo_ptr; uint8_t *fifo_ptr;
unsigned int fifo_size; unsigned int fifo_size;
union { uint32_t *fifo;
uint32_t *fifo; uint32_t fifo_min;
struct QEMU_PACKED { uint32_t fifo_max;
uint32_t min; uint32_t fifo_next;
uint32_t max; uint32_t fifo_stop;
uint32_t next_cmd;
uint32_t stop;
/* Add registers here when adding capabilities. */
uint32_t fifo[0];
} *cmd;
};
#define REDRAW_FIFO_LEN 512 #define REDRAW_FIFO_LEN 512
struct vmsvga_rect_s { struct vmsvga_rect_s {
@ -198,7 +192,7 @@ enum {
*/ */
SVGA_FIFO_MIN = 0, SVGA_FIFO_MIN = 0,
SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */ SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */
SVGA_FIFO_NEXT_CMD, SVGA_FIFO_NEXT,
SVGA_FIFO_STOP, SVGA_FIFO_STOP,
/* /*
@ -546,8 +540,6 @@ static inline void vmsvga_cursor_define(struct vmsvga_state_s *s,
} }
#endif #endif
#define CMD(f) le32_to_cpu(s->cmd->f)
static inline int vmsvga_fifo_length(struct vmsvga_state_s *s) static inline int vmsvga_fifo_length(struct vmsvga_state_s *s)
{ {
int num; int num;
@ -555,21 +547,45 @@ static inline int vmsvga_fifo_length(struct vmsvga_state_s *s)
if (!s->config || !s->enable) { if (!s->config || !s->enable) {
return 0; return 0;
} }
num = CMD(next_cmd) - CMD(stop);
s->fifo_min = le32_to_cpu(s->fifo[SVGA_FIFO_MIN]);
s->fifo_max = le32_to_cpu(s->fifo[SVGA_FIFO_MAX]);
s->fifo_next = le32_to_cpu(s->fifo[SVGA_FIFO_NEXT]);
s->fifo_stop = le32_to_cpu(s->fifo[SVGA_FIFO_STOP]);
/* Check range and alignment. */
if ((s->fifo_min | s->fifo_max | s->fifo_next | s->fifo_stop) & 3) {
return 0;
}
if (s->fifo_min < sizeof(uint32_t) * 4) {
return 0;
}
if (s->fifo_max > SVGA_FIFO_SIZE ||
s->fifo_min >= SVGA_FIFO_SIZE ||
s->fifo_stop >= SVGA_FIFO_SIZE ||
s->fifo_next >= SVGA_FIFO_SIZE) {
return 0;
}
if (s->fifo_max < s->fifo_min + 10 * 1024) {
return 0;
}
num = s->fifo_next - s->fifo_stop;
if (num < 0) { if (num < 0) {
num += CMD(max) - CMD(min); num += s->fifo_max - s->fifo_min;
} }
return num >> 2; return num >> 2;
} }
static inline uint32_t vmsvga_fifo_read_raw(struct vmsvga_state_s *s) static inline uint32_t vmsvga_fifo_read_raw(struct vmsvga_state_s *s)
{ {
uint32_t cmd = s->fifo[CMD(stop) >> 2]; uint32_t cmd = s->fifo[s->fifo_stop >> 2];
s->cmd->stop = cpu_to_le32(CMD(stop) + 4); s->fifo_stop += 4;
if (CMD(stop) >= CMD(max)) { if (s->fifo_stop >= s->fifo_max) {
s->cmd->stop = s->cmd->min; s->fifo_stop = s->fifo_min;
} }
s->fifo[SVGA_FIFO_STOP] = cpu_to_le32(s->fifo_stop);
return cmd; return cmd;
} }
@ -581,15 +597,15 @@ static inline uint32_t vmsvga_fifo_read(struct vmsvga_state_s *s)
static void vmsvga_fifo_run(struct vmsvga_state_s *s) static void vmsvga_fifo_run(struct vmsvga_state_s *s)
{ {
uint32_t cmd, colour; uint32_t cmd, colour;
int args, len; int args, len, maxloop = 1024;
int x, y, dx, dy, width, height; int x, y, dx, dy, width, height;
struct vmsvga_cursor_definition_s cursor; struct vmsvga_cursor_definition_s cursor;
uint32_t cmd_start; uint32_t cmd_start;
len = vmsvga_fifo_length(s); len = vmsvga_fifo_length(s);
while (len > 0) { while (len > 0 && --maxloop > 0) {
/* May need to go back to the start of the command if incomplete */ /* May need to go back to the start of the command if incomplete */
cmd_start = s->cmd->stop; cmd_start = s->fifo_stop;
switch (cmd = vmsvga_fifo_read(s)) { switch (cmd = vmsvga_fifo_read(s)) {
case SVGA_CMD_UPDATE: case SVGA_CMD_UPDATE:
@ -748,7 +764,8 @@ static void vmsvga_fifo_run(struct vmsvga_state_s *s)
break; break;
rewind: rewind:
s->cmd->stop = cmd_start; s->fifo_stop = cmd_start;
s->fifo[SVGA_FIFO_STOP] = cpu_to_le32(s->fifo_stop);
break; break;
} }
} }
@ -1005,19 +1022,6 @@ static void vmsvga_value_write(void *opaque, uint32_t address, uint32_t value)
case SVGA_REG_CONFIG_DONE: case SVGA_REG_CONFIG_DONE:
if (value) { if (value) {
s->fifo = (uint32_t *) s->fifo_ptr; s->fifo = (uint32_t *) s->fifo_ptr;
/* Check range and alignment. */
if ((CMD(min) | CMD(max) | CMD(next_cmd) | CMD(stop)) & 3) {
break;
}
if (CMD(min) < (uint8_t *) s->cmd->fifo - (uint8_t *) s->fifo) {
break;
}
if (CMD(max) > SVGA_FIFO_SIZE) {
break;
}
if (CMD(max) < CMD(min) + 10 * 1024) {
break;
}
vga_dirty_log_stop(&s->vga); vga_dirty_log_stop(&s->vga);
} }
s->config = !!value; s->config = !!value;

View File

@ -32,6 +32,7 @@ struct virtio_gpu_simple_resource {
uint32_t width; uint32_t width;
uint32_t height; uint32_t height;
uint32_t format; uint32_t format;
uint64_t *addrs;
struct iovec *iov; struct iovec *iov;
unsigned int iov_cnt; unsigned int iov_cnt;
uint32_t scanout_bitmask; uint32_t scanout_bitmask;
@ -46,6 +47,7 @@ struct virtio_gpu_scanout {
int x, y; int x, y;
int invalidate; int invalidate;
uint32_t resource_id; uint32_t resource_id;
struct virtio_gpu_update_cursor cursor;
QEMUCursor *current_cursor; QEMUCursor *current_cursor;
}; };
@ -150,7 +152,7 @@ void virtio_gpu_get_display_info(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd); struct virtio_gpu_ctrl_command *cmd);
int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
struct virtio_gpu_ctrl_command *cmd, struct virtio_gpu_ctrl_command *cmd,
struct iovec **iov); uint64_t **addr, struct iovec **iov);
void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count); void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count);
void virtio_gpu_process_cmdq(VirtIOGPU *g); void virtio_gpu_process_cmdq(VirtIOGPU *g);