2019-05-19 20:07:45 +08:00
|
|
|
# SPDX-License-Identifier: GPL-2.0-only
|
2019-04-11 21:51:19 +08:00
|
|
|
include ../../../../scripts/Kbuild.include
|
|
|
|
|
2018-03-27 17:49:19 +08:00
|
|
|
all:
|
|
|
|
|
2018-09-19 01:54:26 +08:00
|
|
|
top_srcdir = ../../../..
|
2018-12-13 11:25:14 +08:00
|
|
|
KSFT_KHDR_INSTALL := 1
|
2020-04-28 08:11:07 +08:00
|
|
|
|
|
|
|
# For cross-builds to work, UNAME_M has to map to ARCH and arch specific
|
|
|
|
# directories and targets in this Makefile. "uname -m" doesn't map to
|
|
|
|
# arch specific sub-directory names.
|
|
|
|
#
|
|
|
|
# UNAME_M variable to used to run the compiles pointing to the right arch
|
|
|
|
# directories and build the right targets for these supported architectures.
|
|
|
|
#
|
|
|
|
# TEST_GEN_PROGS and LIBKVM are set using UNAME_M variable.
|
|
|
|
# LINUX_TOOL_ARCH_INCLUDE is set using ARCH variable.
|
|
|
|
#
|
|
|
|
# x86_64 targets are named to include x86_64 as a suffix and directories
|
|
|
|
# for includes are in x86_64 sub-directory. s390x and aarch64 follow the
|
|
|
|
# same convention. "uname -m" doesn't result in the correct mapping for
|
|
|
|
# s390x and aarch64.
|
|
|
|
#
|
|
|
|
# No change necessary for x86_64
|
2018-03-27 17:49:19 +08:00
|
|
|
UNAME_M := $(shell uname -m)
|
|
|
|
|
2020-04-28 08:11:07 +08:00
|
|
|
# Set UNAME_M for arm64 compile/install to work
|
|
|
|
ifeq ($(ARCH),arm64)
|
|
|
|
UNAME_M := aarch64
|
|
|
|
endif
|
|
|
|
# Set UNAME_M s390x compile/install to work
|
|
|
|
ifeq ($(ARCH),s390)
|
|
|
|
UNAME_M := s390x
|
|
|
|
endif
|
|
|
|
|
2020-01-24 02:04:30 +08:00
|
|
|
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c
|
2020-02-06 18:47:09 +08:00
|
|
|
LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c
|
2019-07-31 23:15:23 +08:00
|
|
|
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
|
2019-07-31 23:15:24 +08:00
|
|
|
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c
|
2018-03-27 17:49:19 +08:00
|
|
|
|
2019-05-22 01:13:58 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
|
2018-10-17 00:50:11 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
|
2018-12-11 01:21:59 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
|
2019-05-31 22:14:52 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
|
2019-05-22 01:13:58 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
|
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
|
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
|
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/state_test
|
2020-05-27 05:51:07 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
|
2020-03-13 23:56:44 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
|
2019-05-22 01:13:58 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
|
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
|
2019-09-26 21:01:15 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
|
2019-05-03 02:31:41 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
|
2019-05-22 01:13:58 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
|
2019-10-22 07:30:28 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
|
2020-05-06 04:50:00 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
|
2020-09-25 22:34:22 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += x86_64/user_msr_test
|
kvm: introduce manual dirty log reprotect
There are two problems with KVM_GET_DIRTY_LOG. First, and less important,
it can take kvm->mmu_lock for an extended period of time. Second, its user
can actually see many false positives in some cases. The latter is due
to a benign race like this:
1. KVM_GET_DIRTY_LOG returns a set of dirty pages and write protects
them.
2. The guest modifies the pages, causing them to be marked ditry.
3. Userspace actually copies the pages.
4. KVM_GET_DIRTY_LOG returns those pages as dirty again, even though
they were not written to since (3).
This is especially a problem for large guests, where the time between
(1) and (3) can be substantial. This patch introduces a new
capability which, when enabled, makes KVM_GET_DIRTY_LOG not
write-protect the pages it returns. Instead, userspace has to
explicitly clear the dirty log bits just before using the content
of the page. The new KVM_CLEAR_DIRTY_LOG ioctl can also operate on a
64-page granularity rather than requiring to sync a full memslot;
this way, the mmu_lock is taken for small amounts of time, and
only a small amount of time will pass between write protection
of pages and the sending of their content.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-10-23 08:36:47 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
|
2020-01-24 02:04:27 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += demand_paging_test
|
2020-03-13 23:56:44 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += dirty_log_test
|
2019-07-15 18:50:46 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
|
2020-04-11 07:17:06 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += set_memory_region_test
|
2020-03-13 23:56:44 +08:00
|
|
|
TEST_GEN_PROGS_x86_64 += steal_time
|
2018-09-19 01:54:32 +08:00
|
|
|
|
kvm: introduce manual dirty log reprotect
There are two problems with KVM_GET_DIRTY_LOG. First, and less important,
it can take kvm->mmu_lock for an extended period of time. Second, its user
can actually see many false positives in some cases. The latter is due
to a benign race like this:
1. KVM_GET_DIRTY_LOG returns a set of dirty pages and write protects
them.
2. The guest modifies the pages, causing them to be marked ditry.
3. Userspace actually copies the pages.
4. KVM_GET_DIRTY_LOG returns those pages as dirty again, even though
they were not written to since (3).
This is especially a problem for large guests, where the time between
(1) and (3) can be substantial. This patch introduces a new
capability which, when enabled, makes KVM_GET_DIRTY_LOG not
write-protect the pages it returns. Instead, userspace has to
explicitly clear the dirty log bits just before using the content
of the page. The new KVM_CLEAR_DIRTY_LOG ioctl can also operate on a
64-page granularity rather than requiring to sync a full memslot;
this way, the mmu_lock is taken for small amounts of time, and
only a small amount of time will pass between write protection
of pages and the sending of their content.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-10-23 08:36:47 +08:00
|
|
|
TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
|
2020-01-24 02:04:27 +08:00
|
|
|
TEST_GEN_PROGS_aarch64 += demand_paging_test
|
2020-03-13 23:56:44 +08:00
|
|
|
TEST_GEN_PROGS_aarch64 += dirty_log_test
|
2019-05-24 00:43:09 +08:00
|
|
|
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
|
2020-04-11 07:17:06 +08:00
|
|
|
TEST_GEN_PROGS_aarch64 += set_memory_region_test
|
2020-03-13 23:56:44 +08:00
|
|
|
TEST_GEN_PROGS_aarch64 += steal_time
|
2018-03-27 17:49:19 +08:00
|
|
|
|
2019-08-29 21:07:32 +08:00
|
|
|
TEST_GEN_PROGS_s390x = s390x/memop
|
2020-01-31 18:02:04 +08:00
|
|
|
TEST_GEN_PROGS_s390x += s390x/resets
|
2020-03-13 23:56:44 +08:00
|
|
|
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
|
2020-01-24 02:04:27 +08:00
|
|
|
TEST_GEN_PROGS_s390x += demand_paging_test
|
2020-03-13 23:56:44 +08:00
|
|
|
TEST_GEN_PROGS_s390x += dirty_log_test
|
2019-05-24 00:43:09 +08:00
|
|
|
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
|
2020-04-11 07:17:06 +08:00
|
|
|
TEST_GEN_PROGS_s390x += set_memory_region_test
|
2018-03-27 17:49:19 +08:00
|
|
|
|
|
|
|
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
|
|
|
|
LIBKVM += $(LIBKVM_$(UNAME_M))
|
|
|
|
|
|
|
|
INSTALL_HDR_PATH = $(top_srcdir)/usr
|
|
|
|
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
|
2018-09-19 01:54:26 +08:00
|
|
|
LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
|
2020-06-05 22:20:28 +08:00
|
|
|
ifeq ($(ARCH),x86_64)
|
|
|
|
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/x86/include
|
|
|
|
else
|
2020-04-28 08:11:07 +08:00
|
|
|
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
|
2020-06-05 22:20:28 +08:00
|
|
|
endif
|
2019-05-17 17:04:45 +08:00
|
|
|
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
|
|
|
|
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
|
2019-12-21 12:44:56 +08:00
|
|
|
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
|
|
|
|
-I$(<D) -Iinclude/$(UNAME_M) -I..
|
2019-04-11 21:51:19 +08:00
|
|
|
|
|
|
|
no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
|
2019-10-03 07:14:30 +08:00
|
|
|
$(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
|
2019-04-11 21:51:19 +08:00
|
|
|
|
2019-05-24 18:27:01 +08:00
|
|
|
# On s390, build the testcases KVM-enabled
|
|
|
|
pgste-option = $(call try-run, echo 'int main() { return 0; }' | \
|
|
|
|
$(CC) -Werror -Wl$(comma)--s390-pgste -x c - -o "$$TMP",-Wl$(comma)--s390-pgste)
|
|
|
|
|
|
|
|
|
|
|
|
LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
|
2018-03-27 17:49:19 +08:00
|
|
|
|
|
|
|
# After inclusion, $(OUTPUT) is defined and
|
|
|
|
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
|
|
|
|
include ../lib.mk
|
|
|
|
|
|
|
|
STATIC_LIBS := $(OUTPUT)/libkvm.a
|
|
|
|
LIBKVM_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM))
|
2018-09-19 01:54:27 +08:00
|
|
|
EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS) cscope.*
|
2018-03-27 17:49:19 +08:00
|
|
|
|
|
|
|
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_OBJ))))
|
|
|
|
$(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
|
|
|
|
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
|
|
|
|
|
|
|
|
$(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
|
|
|
|
$(AR) crs $@ $^
|
|
|
|
|
2020-04-28 08:11:07 +08:00
|
|
|
x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
|
2018-09-04 18:47:21 +08:00
|
|
|
all: $(STATIC_LIBS)
|
2018-03-27 17:49:19 +08:00
|
|
|
$(TEST_GEN_PROGS): $(STATIC_LIBS)
|
2018-09-19 01:54:27 +08:00
|
|
|
|
|
|
|
cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
|
|
|
|
cscope:
|
|
|
|
$(RM) cscope.*
|
|
|
|
(find $(include_paths) -name '*.h' \
|
|
|
|
-exec realpath --relative-base=$(PWD) {} \;; \
|
|
|
|
find . -name '*.c' \
|
|
|
|
-exec realpath --relative-base=$(PWD) {} \;) | sort -u > cscope.files
|
|
|
|
cscope -b
|