mirror of
https://github.com/qemu/qemu.git
synced 2024-11-24 11:23:43 +08:00
* MemoryRegionCache revert
* glib optimization workaround * fix "info lapic" segfault on isapc * fix QIOChannel memory leak -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQExBAABCAAbBQJY4oOMFBxwYm9uemluaUByZWRoYXQuY29tAAoJEL/70l94x66D AsIH/i52nJw41utJCs5AevnQyqNs9RnyMkZLHiVoi6a+pdJqX+0mCw8gV/5FsbPZ dtyt1tEuYBSu72adr+/ExE4aIEjwzeyRmnUdOkB+iYPxirHKuf4K/JTuLuvMtaQQ Tqj+FU5tx3wx0jlGOm5A7pzjZ680JUex+oaz3d1bZziv3zCyFCIgiZ2m2UAaaPQe fsd3fksJvc0gKOUKmdLUpu2m/xP3hAQAfQ4P/ozOfbVh9V2CVNaQ/cl935tNtdFK aYN3KleW3/ovb+YSexeNoW7QQH/3ZsjronCW5OmbF4FgHoeoV8MUROfNgu1S2bRU Bne9K/6boPzhD8NDEuSy8SXvf7s= =EdXr -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging * MemoryRegionCache revert * glib optimization workaround * fix "info lapic" segfault on isapc * fix QIOChannel memory leak # gpg: Signature made Mon 03 Apr 2017 18:17:00 BST # gpg: using RSA key 0xBFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: main-loop: Acquire main_context lock around os_host_main_loop_wait. exec: revert MemoryRegionCache nbd: fix memory leak on socket_connect failed ipmi: Fix macro issues target-i386: fix "info lapic" segfault on isapc iscsi: drop unused IscsiAIOCB.qiov field Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
87cc4c6102
@ -103,7 +103,6 @@ typedef struct IscsiTask {
|
||||
|
||||
typedef struct IscsiAIOCB {
|
||||
BlockAIOCB common;
|
||||
QEMUIOVector *qiov;
|
||||
QEMUBH *bh;
|
||||
IscsiLun *iscsilun;
|
||||
struct scsi_task *task;
|
||||
|
@ -322,6 +322,7 @@ static QIOChannelSocket *nbd_establish_connection(SocketAddressFlat *saddr_flat,
|
||||
&local_err);
|
||||
qapi_free_SocketAddress(saddr);
|
||||
if (local_err) {
|
||||
object_unref(OBJECT(sioc));
|
||||
error_propagate(errp, local_err);
|
||||
return NULL;
|
||||
}
|
||||
|
64
exec.c
64
exec.c
@ -3236,75 +3236,33 @@ int64_t address_space_cache_init(MemoryRegionCache *cache,
|
||||
hwaddr len,
|
||||
bool is_write)
|
||||
{
|
||||
hwaddr l, xlat;
|
||||
MemoryRegion *mr;
|
||||
void *ptr;
|
||||
|
||||
assert(len > 0);
|
||||
|
||||
l = len;
|
||||
mr = address_space_translate(as, addr, &xlat, &l, is_write);
|
||||
if (!memory_access_is_direct(mr, is_write)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
l = address_space_extend_translation(as, addr, len, mr, xlat, l, is_write);
|
||||
ptr = qemu_ram_ptr_length(mr->ram_block, xlat, &l);
|
||||
|
||||
cache->xlat = xlat;
|
||||
cache->is_write = is_write;
|
||||
cache->mr = mr;
|
||||
cache->ptr = ptr;
|
||||
cache->len = l;
|
||||
memory_region_ref(cache->mr);
|
||||
|
||||
return l;
|
||||
cache->len = len;
|
||||
cache->as = as;
|
||||
cache->xlat = addr;
|
||||
return len;
|
||||
}
|
||||
|
||||
void address_space_cache_invalidate(MemoryRegionCache *cache,
|
||||
hwaddr addr,
|
||||
hwaddr access_len)
|
||||
{
|
||||
assert(cache->is_write);
|
||||
invalidate_and_set_dirty(cache->mr, addr + cache->xlat, access_len);
|
||||
}
|
||||
|
||||
void address_space_cache_destroy(MemoryRegionCache *cache)
|
||||
{
|
||||
if (!cache->mr) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (xen_enabled()) {
|
||||
xen_invalidate_map_cache_entry(cache->ptr);
|
||||
}
|
||||
memory_region_unref(cache->mr);
|
||||
cache->mr = NULL;
|
||||
}
|
||||
|
||||
/* Called from RCU critical section. This function has the same
|
||||
* semantics as address_space_translate, but it only works on a
|
||||
* predefined range of a MemoryRegion that was mapped with
|
||||
* address_space_cache_init.
|
||||
*/
|
||||
static inline MemoryRegion *address_space_translate_cached(
|
||||
MemoryRegionCache *cache, hwaddr addr, hwaddr *xlat,
|
||||
hwaddr *plen, bool is_write)
|
||||
{
|
||||
assert(addr < cache->len && *plen <= cache->len - addr);
|
||||
*xlat = addr + cache->xlat;
|
||||
return cache->mr;
|
||||
cache->as = NULL;
|
||||
}
|
||||
|
||||
#define ARG1_DECL MemoryRegionCache *cache
|
||||
#define ARG1 cache
|
||||
#define SUFFIX _cached
|
||||
#define TRANSLATE(...) address_space_translate_cached(cache, __VA_ARGS__)
|
||||
#define TRANSLATE(addr, ...) \
|
||||
address_space_translate(cache->as, cache->xlat + (addr), __VA_ARGS__)
|
||||
#define IS_DIRECT(mr, is_write) true
|
||||
#define MAP_RAM(mr, ofs) (cache->ptr + (ofs - cache->xlat))
|
||||
#define INVALIDATE(mr, ofs, len) ((void)0)
|
||||
#define RCU_READ_LOCK() ((void)0)
|
||||
#define RCU_READ_UNLOCK() ((void)0)
|
||||
#define MAP_RAM(mr, ofs) qemu_map_ram_ptr((mr)->ram_block, ofs)
|
||||
#define INVALIDATE(mr, ofs, len) invalidate_and_set_dirty(mr, ofs, len)
|
||||
#define RCU_READ_LOCK() rcu_read_lock()
|
||||
#define RCU_READ_UNLOCK() rcu_read_unlock()
|
||||
#include "memory_ldst.inc.c"
|
||||
|
||||
/* virtual memory access for debug (includes writing to ROM) */
|
||||
|
@ -37,40 +37,30 @@
|
||||
#define IPMI_BT_HBUSY_BIT 6
|
||||
#define IPMI_BT_BBUSY_BIT 7
|
||||
|
||||
#define IPMI_BT_CLR_WR_MASK (1 << IPMI_BT_CLR_WR_BIT)
|
||||
#define IPMI_BT_GET_CLR_WR(d) (((d) >> IPMI_BT_CLR_WR_BIT) & 0x1)
|
||||
#define IPMI_BT_SET_CLR_WR(d, v) (d) = (((d) & ~IPMI_BT_CLR_WR_MASK) | \
|
||||
(((v & 1) << IPMI_BT_CLR_WR_BIT)))
|
||||
|
||||
#define IPMI_BT_CLR_RD_MASK (1 << IPMI_BT_CLR_RD_BIT)
|
||||
#define IPMI_BT_GET_CLR_RD(d) (((d) >> IPMI_BT_CLR_RD_BIT) & 0x1)
|
||||
#define IPMI_BT_SET_CLR_RD(d, v) (d) = (((d) & ~IPMI_BT_CLR_RD_MASK) | \
|
||||
(((v & 1) << IPMI_BT_CLR_RD_BIT)))
|
||||
|
||||
#define IPMI_BT_H2B_ATN_MASK (1 << IPMI_BT_H2B_ATN_BIT)
|
||||
#define IPMI_BT_GET_H2B_ATN(d) (((d) >> IPMI_BT_H2B_ATN_BIT) & 0x1)
|
||||
#define IPMI_BT_SET_H2B_ATN(d, v) (d) = (((d) & ~IPMI_BT_H2B_ATN_MASK) | \
|
||||
(((v & 1) << IPMI_BT_H2B_ATN_BIT)))
|
||||
|
||||
#define IPMI_BT_B2H_ATN_MASK (1 << IPMI_BT_B2H_ATN_BIT)
|
||||
#define IPMI_BT_GET_B2H_ATN(d) (((d) >> IPMI_BT_B2H_ATN_BIT) & 0x1)
|
||||
#define IPMI_BT_SET_B2H_ATN(d, v) (d) = (((d) & ~IPMI_BT_B2H_ATN_MASK) | \
|
||||
(((v & 1) << IPMI_BT_B2H_ATN_BIT)))
|
||||
#define IPMI_BT_SET_B2H_ATN(d, v) ((d) = (((d) & ~IPMI_BT_B2H_ATN_MASK) | \
|
||||
(((v) & 1) << IPMI_BT_B2H_ATN_BIT)))
|
||||
|
||||
#define IPMI_BT_SMS_ATN_MASK (1 << IPMI_BT_SMS_ATN_BIT)
|
||||
#define IPMI_BT_GET_SMS_ATN(d) (((d) >> IPMI_BT_SMS_ATN_BIT) & 0x1)
|
||||
#define IPMI_BT_SET_SMS_ATN(d, v) (d) = (((d) & ~IPMI_BT_SMS_ATN_MASK) | \
|
||||
(((v & 1) << IPMI_BT_SMS_ATN_BIT)))
|
||||
#define IPMI_BT_SET_SMS_ATN(d, v) ((d) = (((d) & ~IPMI_BT_SMS_ATN_MASK) | \
|
||||
(((v) & 1) << IPMI_BT_SMS_ATN_BIT)))
|
||||
|
||||
#define IPMI_BT_HBUSY_MASK (1 << IPMI_BT_HBUSY_BIT)
|
||||
#define IPMI_BT_GET_HBUSY(d) (((d) >> IPMI_BT_HBUSY_BIT) & 0x1)
|
||||
#define IPMI_BT_SET_HBUSY(d, v) (d) = (((d) & ~IPMI_BT_HBUSY_MASK) | \
|
||||
(((v & 1) << IPMI_BT_HBUSY_BIT)))
|
||||
#define IPMI_BT_SET_HBUSY(d, v) ((d) = (((d) & ~IPMI_BT_HBUSY_MASK) | \
|
||||
(((v) & 1) << IPMI_BT_HBUSY_BIT)))
|
||||
|
||||
#define IPMI_BT_BBUSY_MASK (1 << IPMI_BT_BBUSY_BIT)
|
||||
#define IPMI_BT_GET_BBUSY(d) (((d) >> IPMI_BT_BBUSY_BIT) & 0x1)
|
||||
#define IPMI_BT_SET_BBUSY(d, v) (d) = (((d) & ~IPMI_BT_BBUSY_MASK) | \
|
||||
(((v & 1) << IPMI_BT_BBUSY_BIT)))
|
||||
#define IPMI_BT_SET_BBUSY(d, v) ((d) = (((d) & ~IPMI_BT_BBUSY_MASK) | \
|
||||
(((v) & 1) << IPMI_BT_BBUSY_BIT)))
|
||||
|
||||
|
||||
/* Mask register */
|
||||
@ -79,13 +69,13 @@
|
||||
|
||||
#define IPMI_BT_B2H_IRQ_EN_MASK (1 << IPMI_BT_B2H_IRQ_EN_BIT)
|
||||
#define IPMI_BT_GET_B2H_IRQ_EN(d) (((d) >> IPMI_BT_B2H_IRQ_EN_BIT) & 0x1)
|
||||
#define IPMI_BT_SET_B2H_IRQ_EN(d, v) (d) = (((d) & ~IPMI_BT_B2H_IRQ_EN_MASK) | \
|
||||
(((v & 1) << IPMI_BT_B2H_IRQ_EN_BIT)))
|
||||
#define IPMI_BT_SET_B2H_IRQ_EN(d, v) ((d) = (((d) & ~IPMI_BT_B2H_IRQ_EN_MASK) |\
|
||||
(((v) & 1) << IPMI_BT_B2H_IRQ_EN_BIT)))
|
||||
|
||||
#define IPMI_BT_B2H_IRQ_MASK (1 << IPMI_BT_B2H_IRQ_BIT)
|
||||
#define IPMI_BT_GET_B2H_IRQ(d) (((d) >> IPMI_BT_B2H_IRQ_BIT) & 0x1)
|
||||
#define IPMI_BT_SET_B2H_IRQ(d, v) (d) = (((d) & ~IPMI_BT_B2H_IRQ_MASK) | \
|
||||
(((v & 1) << IPMI_BT_B2H_IRQ_BIT)))
|
||||
#define IPMI_BT_SET_B2H_IRQ(d, v) ((d) = (((d) & ~IPMI_BT_B2H_IRQ_MASK) | \
|
||||
(((v) & 1) << IPMI_BT_B2H_IRQ_BIT)))
|
||||
|
||||
typedef struct IPMIBT {
|
||||
IPMIBmc *bmc;
|
||||
|
@ -1426,13 +1426,11 @@ void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
|
||||
|
||||
struct MemoryRegionCache {
|
||||
hwaddr xlat;
|
||||
void *ptr;
|
||||
hwaddr len;
|
||||
MemoryRegion *mr;
|
||||
bool is_write;
|
||||
AddressSpace *as;
|
||||
};
|
||||
|
||||
#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mr = NULL })
|
||||
#define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL })
|
||||
|
||||
/* address_space_cache_init: prepare for repeated access to a physical
|
||||
* memory region
|
||||
@ -1688,7 +1686,7 @@ address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
void *buf, int len)
|
||||
{
|
||||
assert(addr < cache->len && len <= cache->len - addr);
|
||||
memcpy(buf, cache->ptr + addr, len);
|
||||
address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1704,7 +1702,7 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
|
||||
void *buf, int len)
|
||||
{
|
||||
assert(addr < cache->len && len <= cache->len - addr);
|
||||
memcpy(cache->ptr + addr, buf, len);
|
||||
address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -326,6 +326,10 @@ void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
|
||||
{
|
||||
X86CPU *cpu = X86_CPU(cs);
|
||||
APICCommonState *s = APIC_COMMON(cpu->apic_state);
|
||||
if (!s) {
|
||||
cpu_fprintf(f, "local apic state not available\n");
|
||||
return;
|
||||
}
|
||||
uint32_t *lvt = s->lvt;
|
||||
|
||||
cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
|
||||
|
@ -218,9 +218,12 @@ static void glib_pollfds_poll(void)
|
||||
|
||||
static int os_host_main_loop_wait(int64_t timeout)
|
||||
{
|
||||
GMainContext *context = g_main_context_default();
|
||||
int ret;
|
||||
static int spin_counter;
|
||||
|
||||
g_main_context_acquire(context);
|
||||
|
||||
glib_pollfds_fill(&timeout);
|
||||
|
||||
/* If the I/O thread is very busy or we are incorrectly busy waiting in
|
||||
@ -256,6 +259,9 @@ static int os_host_main_loop_wait(int64_t timeout)
|
||||
}
|
||||
|
||||
glib_pollfds_poll();
|
||||
|
||||
g_main_context_release(context);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
@ -412,12 +418,15 @@ static int os_host_main_loop_wait(int64_t timeout)
|
||||
fd_set rfds, wfds, xfds;
|
||||
int nfds;
|
||||
|
||||
g_main_context_acquire(context);
|
||||
|
||||
/* XXX: need to suppress polling by better using win32 events */
|
||||
ret = 0;
|
||||
for (pe = first_polling_entry; pe != NULL; pe = pe->next) {
|
||||
ret |= pe->func(pe->opaque);
|
||||
}
|
||||
if (ret != 0) {
|
||||
g_main_context_release(context);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -472,6 +481,8 @@ static int os_host_main_loop_wait(int64_t timeout)
|
||||
g_main_context_dispatch(context);
|
||||
}
|
||||
|
||||
g_main_context_release(context);
|
||||
|
||||
return select_ret || g_poll_ret;
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user