KVM/riscv changes for 5.18

- Prevent KVM_COMPAT from being selected
 - Refine __kvm_riscv_switch_to() implementation
 - RISC-V SBI v0.3 support
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEZdn75s5e6LHDQ+f/rUjsVaLHLAcFAmIrffAACgkQrUjsVaLH
 LAfJ3Q//W5mT0eREMjS0yylTVseFbn/qNRWbHiWDmLJEnjBz6BjHvDKB/hwbrQI2
 xCxZhhImIToIojtvXvCXLTqnpIrxYahi/a2N6B6UfYI7RXXQmaDKFEmu9rjdZQnq
 A3zr19U8CLEVleGNCI9zYETBhm8HMUCv/skwQNMyj7cdB5j8m+pt76LcSt25Df+z
 Jb8bpoIn0OuSgvEn8jy/hj6O7Lyij4m6gDlpuRK/vDx8BAe14+XGdqUAqvKPEwTu
 K58tnBG/ShjZWDHmkDRibgK7n/4R5auNe9Rb9+V3MdbkjFWU75cNk1bZpYESpvrP
 Xh4YSqnIR8asm7sf77KcJeJvqXvWqoqevLAb64GH21qg5IKfWlRSJcqojWVJfrh1
 5aEOt8l2U9x7eXgpfWqHwPbnVfRT3ahN1q+78GNp83etcxULX8B6mmjFO1DiideJ
 g0BU7wMm0gq2SvDLuKvVMN7d9Q0txvZuWEZuZ3Hf598aWtdAhe2kNECHHjFQGunm
 XRJvNuthDDDtNDXgaeYYT4Xbfaar4YB1NPhCtfcUcgwQoRG8wMkUjVeDX57H4uT6
 Xl3RBW0Qbw0k5f0bXk+IKBOd/8kUBsB35yV0wXo0CgU4PjYqIJAbyuDF6AkphEZm
 jQjedQksrR+H9lUeAeEE4n/xe84zWhoF/e/ADNBhSZ3jBOHoKBQ=
 =CzxN
 -----END PGP SIGNATURE-----

Merge tag 'kvm-riscv-5.18-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv changes for 5.18

- Prevent KVM_COMPAT from being selected
- Refine __kvm_riscv_switch_to() implementation
- RISC-V SBI v0.3 support
This commit is contained in:
Paolo Bonzini 2022-03-15 17:20:25 -04:00
commit cf5019816d
11 changed files with 161 additions and 59 deletions

View File

@ -228,6 +228,7 @@ void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu);
void __kvm_riscv_unpriv_trap(void); void __kvm_riscv_unpriv_trap(void);
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu);
unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
bool read_insn, bool read_insn,
unsigned long guest_addr, unsigned long guest_addr,

View File

@ -12,7 +12,7 @@
#define KVM_SBI_IMPID 3 #define KVM_SBI_IMPID 3
#define KVM_SBI_VERSION_MAJOR 0 #define KVM_SBI_VERSION_MAJOR 0
#define KVM_SBI_VERSION_MINOR 2 #define KVM_SBI_VERSION_MINOR 3
struct kvm_vcpu_sbi_extension { struct kvm_vcpu_sbi_extension {
unsigned long extid_start; unsigned long extid_start;
@ -28,6 +28,9 @@ struct kvm_vcpu_sbi_extension {
}; };
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_run *run,
u32 type, u64 flags);
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid); const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid);
#endif /* __RISCV_KVM_VCPU_SBI_H__ */ #endif /* __RISCV_KVM_VCPU_SBI_H__ */

View File

@ -71,15 +71,32 @@ enum sbi_ext_hsm_fid {
SBI_EXT_HSM_HART_START = 0, SBI_EXT_HSM_HART_START = 0,
SBI_EXT_HSM_HART_STOP, SBI_EXT_HSM_HART_STOP,
SBI_EXT_HSM_HART_STATUS, SBI_EXT_HSM_HART_STATUS,
SBI_EXT_HSM_HART_SUSPEND,
}; };
enum sbi_hsm_hart_status { enum sbi_hsm_hart_state {
SBI_HSM_HART_STATUS_STARTED = 0, SBI_HSM_STATE_STARTED = 0,
SBI_HSM_HART_STATUS_STOPPED, SBI_HSM_STATE_STOPPED,
SBI_HSM_HART_STATUS_START_PENDING, SBI_HSM_STATE_START_PENDING,
SBI_HSM_HART_STATUS_STOP_PENDING, SBI_HSM_STATE_STOP_PENDING,
SBI_HSM_STATE_SUSPENDED,
SBI_HSM_STATE_SUSPEND_PENDING,
SBI_HSM_STATE_RESUME_PENDING,
}; };
#define SBI_HSM_SUSP_BASE_MASK 0x7fffffff
#define SBI_HSM_SUSP_NON_RET_BIT 0x80000000
#define SBI_HSM_SUSP_PLAT_BASE 0x10000000
#define SBI_HSM_SUSPEND_RET_DEFAULT 0x00000000
#define SBI_HSM_SUSPEND_RET_PLATFORM SBI_HSM_SUSP_PLAT_BASE
#define SBI_HSM_SUSPEND_RET_LAST SBI_HSM_SUSP_BASE_MASK
#define SBI_HSM_SUSPEND_NON_RET_DEFAULT SBI_HSM_SUSP_NON_RET_BIT
#define SBI_HSM_SUSPEND_NON_RET_PLATFORM (SBI_HSM_SUSP_NON_RET_BIT | \
SBI_HSM_SUSP_PLAT_BASE)
#define SBI_HSM_SUSPEND_NON_RET_LAST (SBI_HSM_SUSP_NON_RET_BIT | \
SBI_HSM_SUSP_BASE_MASK)
enum sbi_ext_srst_fid { enum sbi_ext_srst_fid {
SBI_EXT_SRST_RESET = 0, SBI_EXT_SRST_RESET = 0,
}; };

View File

@ -111,7 +111,7 @@ static int sbi_cpu_is_stopped(unsigned int cpuid)
rc = sbi_hsm_hart_get_status(hartid); rc = sbi_hsm_hart_get_status(hartid);
if (rc == SBI_HSM_HART_STATUS_STOPPED) if (rc == SBI_HSM_STATE_STOPPED)
return 0; return 0;
return rc; return rc;
} }

View File

@ -144,12 +144,7 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu,
{ {
if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) { if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) {
vcpu->stat.wfi_exit_stat++; vcpu->stat.wfi_exit_stat++;
if (!kvm_arch_vcpu_runnable(vcpu)) { kvm_riscv_vcpu_wfi(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
kvm_vcpu_halt(vcpu);
vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
vcpu->arch.guest_context.sepc += INSN_LEN(insn); vcpu->arch.guest_context.sepc += INSN_LEN(insn);
return 1; return 1;
} }
@ -453,6 +448,21 @@ static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
return 1; return 1;
} }
/**
* kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
*
* @vcpu: The VCPU pointer
*/
void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
{
if (!kvm_arch_vcpu_runnable(vcpu)) {
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
kvm_vcpu_halt(vcpu);
vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
}
/** /**
* kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
* *

View File

@ -45,6 +45,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
@ -55,6 +56,7 @@ static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
&vcpu_sbi_ext_time, &vcpu_sbi_ext_time,
&vcpu_sbi_ext_ipi, &vcpu_sbi_ext_ipi,
&vcpu_sbi_ext_rfence, &vcpu_sbi_ext_rfence,
&vcpu_sbi_ext_srst,
&vcpu_sbi_ext_hsm, &vcpu_sbi_ext_hsm,
&vcpu_sbi_ext_experimental, &vcpu_sbi_ext_experimental,
&vcpu_sbi_ext_vendor, &vcpu_sbi_ext_vendor,
@ -79,6 +81,23 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->riscv_sbi.ret[1] = cp->a1; run->riscv_sbi.ret[1] = cp->a1;
} }
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_run *run,
u32 type, u64 flags)
{
unsigned long i;
struct kvm_vcpu *tmp;
kvm_for_each_vcpu(i, tmp, vcpu->kvm)
tmp->arch.power_off = true;
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&run->system_event, 0, sizeof(run->system_event));
run->system_event.type = type;
run->system_event.flags = flags;
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run) int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
struct kvm_cpu_context *cp = &vcpu->arch.guest_context; struct kvm_cpu_context *cp = &vcpu->arch.guest_context;

View File

@ -60,9 +60,11 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
if (!target_vcpu) if (!target_vcpu)
return -EINVAL; return -EINVAL;
if (!target_vcpu->arch.power_off) if (!target_vcpu->arch.power_off)
return SBI_HSM_HART_STATUS_STARTED; return SBI_HSM_STATE_STARTED;
else if (vcpu->stat.generic.blocking)
return SBI_HSM_STATE_SUSPENDED;
else else
return SBI_HSM_HART_STATUS_STOPPED; return SBI_HSM_STATE_STOPPED;
} }
static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
@ -91,6 +93,18 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
ret = 0; ret = 0;
} }
break; break;
case SBI_EXT_HSM_HART_SUSPEND:
switch (cp->a0) {
case SBI_HSM_SUSPEND_RET_DEFAULT:
kvm_riscv_vcpu_wfi(vcpu);
break;
case SBI_HSM_SUSPEND_NON_RET_DEFAULT:
ret = -EOPNOTSUPP;
break;
default:
ret = -EINVAL;
}
break;
default: default:
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
} }

View File

@ -130,3 +130,47 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
.extid_end = SBI_EXT_RFENCE, .extid_end = SBI_EXT_RFENCE,
.handler = kvm_sbi_ext_rfence_handler, .handler = kvm_sbi_ext_rfence_handler,
}; };
static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu,
struct kvm_run *run,
unsigned long *out_val,
struct kvm_cpu_trap *utrap, bool *exit)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long funcid = cp->a6;
u32 reason = cp->a1;
u32 type = cp->a0;
int ret = 0;
switch (funcid) {
case SBI_EXT_SRST_RESET:
switch (type) {
case SBI_SRST_RESET_TYPE_SHUTDOWN:
kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
KVM_SYSTEM_EVENT_SHUTDOWN,
reason);
*exit = true;
break;
case SBI_SRST_RESET_TYPE_COLD_REBOOT:
case SBI_SRST_RESET_TYPE_WARM_REBOOT:
kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
KVM_SYSTEM_EVENT_RESET,
reason);
*exit = true;
break;
default:
ret = -EOPNOTSUPP;
}
break;
default:
ret = -EOPNOTSUPP;
}
return ret;
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = {
.extid_start = SBI_EXT_SRST,
.extid_end = SBI_EXT_SRST,
.handler = kvm_sbi_ext_srst_handler,
};

View File

@ -14,21 +14,6 @@
#include <asm/kvm_vcpu_timer.h> #include <asm/kvm_vcpu_timer.h>
#include <asm/kvm_vcpu_sbi.h> #include <asm/kvm_vcpu_sbi.h>
static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu,
struct kvm_run *run, u32 type)
{
unsigned long i;
struct kvm_vcpu *tmp;
kvm_for_each_vcpu(i, tmp, vcpu->kvm)
tmp->arch.power_off = true;
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&run->system_event, 0, sizeof(run->system_event));
run->system_event.type = type;
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}
static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long *out_val, unsigned long *out_val,
struct kvm_cpu_trap *utrap, struct kvm_cpu_trap *utrap,
@ -80,7 +65,8 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
} }
break; break;
case SBI_EXT_0_1_SHUTDOWN: case SBI_EXT_0_1_SHUTDOWN:
kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN); kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
KVM_SYSTEM_EVENT_SHUTDOWN, 0);
*exit = true; *exit = true;
break; break;
case SBI_EXT_0_1_REMOTE_FENCE_I: case SBI_EXT_0_1_REMOTE_FENCE_I:
@ -111,7 +97,7 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
}; }
return ret; return ret;
} }

View File

@ -41,33 +41,37 @@ ENTRY(__kvm_riscv_switch_to)
REG_S s10, (KVM_ARCH_HOST_S10)(a0) REG_S s10, (KVM_ARCH_HOST_S10)(a0)
REG_S s11, (KVM_ARCH_HOST_S11)(a0) REG_S s11, (KVM_ARCH_HOST_S11)(a0)
/* Save Host and Restore Guest SSTATUS */ /* Load Guest CSR values */
REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0) REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0)
REG_L t1, (KVM_ARCH_GUEST_HSTATUS)(a0)
REG_L t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
la t4, __kvm_switch_return
REG_L t5, (KVM_ARCH_GUEST_SEPC)(a0)
/* Save Host and Restore Guest SSTATUS */
csrrw t0, CSR_SSTATUS, t0 csrrw t0, CSR_SSTATUS, t0
REG_S t0, (KVM_ARCH_HOST_SSTATUS)(a0)
/* Save Host and Restore Guest HSTATUS */ /* Save Host and Restore Guest HSTATUS */
REG_L t1, (KVM_ARCH_GUEST_HSTATUS)(a0)
csrrw t1, CSR_HSTATUS, t1 csrrw t1, CSR_HSTATUS, t1
REG_S t1, (KVM_ARCH_HOST_HSTATUS)(a0)
/* Save Host and Restore Guest SCOUNTEREN */ /* Save Host and Restore Guest SCOUNTEREN */
REG_L t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
csrrw t2, CSR_SCOUNTEREN, t2 csrrw t2, CSR_SCOUNTEREN, t2
REG_S t2, (KVM_ARCH_HOST_SCOUNTEREN)(a0)
/* Save Host STVEC and change it to return path */
csrrw t4, CSR_STVEC, t4
/* Save Host SSCRATCH and change it to struct kvm_vcpu_arch pointer */ /* Save Host SSCRATCH and change it to struct kvm_vcpu_arch pointer */
csrrw t3, CSR_SSCRATCH, a0 csrrw t3, CSR_SSCRATCH, a0
REG_S t3, (KVM_ARCH_HOST_SSCRATCH)(a0)
/* Save Host STVEC and change it to return path */
la t4, __kvm_switch_return
csrrw t4, CSR_STVEC, t4
REG_S t4, (KVM_ARCH_HOST_STVEC)(a0)
/* Restore Guest SEPC */ /* Restore Guest SEPC */
REG_L t0, (KVM_ARCH_GUEST_SEPC)(a0) csrw CSR_SEPC, t5
csrw CSR_SEPC, t0
/* Store Host CSR values */
REG_S t0, (KVM_ARCH_HOST_SSTATUS)(a0)
REG_S t1, (KVM_ARCH_HOST_HSTATUS)(a0)
REG_S t2, (KVM_ARCH_HOST_SCOUNTEREN)(a0)
REG_S t3, (KVM_ARCH_HOST_SSCRATCH)(a0)
REG_S t4, (KVM_ARCH_HOST_STVEC)(a0)
/* Restore Guest GPRs (except A0) */ /* Restore Guest GPRs (except A0) */
REG_L ra, (KVM_ARCH_GUEST_RA)(a0) REG_L ra, (KVM_ARCH_GUEST_RA)(a0)
@ -145,32 +149,36 @@ __kvm_switch_return:
REG_S t5, (KVM_ARCH_GUEST_T5)(a0) REG_S t5, (KVM_ARCH_GUEST_T5)(a0)
REG_S t6, (KVM_ARCH_GUEST_T6)(a0) REG_S t6, (KVM_ARCH_GUEST_T6)(a0)
/* Load Host CSR values */
REG_L t1, (KVM_ARCH_HOST_STVEC)(a0)
REG_L t2, (KVM_ARCH_HOST_SSCRATCH)(a0)
REG_L t3, (KVM_ARCH_HOST_SCOUNTEREN)(a0)
REG_L t4, (KVM_ARCH_HOST_HSTATUS)(a0)
REG_L t5, (KVM_ARCH_HOST_SSTATUS)(a0)
/* Save Guest SEPC */ /* Save Guest SEPC */
csrr t0, CSR_SEPC csrr t0, CSR_SEPC
REG_S t0, (KVM_ARCH_GUEST_SEPC)(a0)
/* Restore Host STVEC */
REG_L t1, (KVM_ARCH_HOST_STVEC)(a0)
csrw CSR_STVEC, t1
/* Save Guest A0 and Restore Host SSCRATCH */ /* Save Guest A0 and Restore Host SSCRATCH */
REG_L t2, (KVM_ARCH_HOST_SSCRATCH)(a0)
csrrw t2, CSR_SSCRATCH, t2 csrrw t2, CSR_SSCRATCH, t2
REG_S t2, (KVM_ARCH_GUEST_A0)(a0)
/* Restore Host STVEC */
csrw CSR_STVEC, t1
/* Save Guest and Restore Host SCOUNTEREN */ /* Save Guest and Restore Host SCOUNTEREN */
REG_L t3, (KVM_ARCH_HOST_SCOUNTEREN)(a0)
csrrw t3, CSR_SCOUNTEREN, t3 csrrw t3, CSR_SCOUNTEREN, t3
REG_S t3, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
/* Save Guest and Restore Host HSTATUS */ /* Save Guest and Restore Host HSTATUS */
REG_L t4, (KVM_ARCH_HOST_HSTATUS)(a0)
csrrw t4, CSR_HSTATUS, t4 csrrw t4, CSR_HSTATUS, t4
REG_S t4, (KVM_ARCH_GUEST_HSTATUS)(a0)
/* Save Guest and Restore Host SSTATUS */ /* Save Guest and Restore Host SSTATUS */
REG_L t5, (KVM_ARCH_HOST_SSTATUS)(a0)
csrrw t5, CSR_SSTATUS, t5 csrrw t5, CSR_SSTATUS, t5
/* Store Guest CSR values */
REG_S t0, (KVM_ARCH_GUEST_SEPC)(a0)
REG_S t2, (KVM_ARCH_GUEST_A0)(a0)
REG_S t3, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
REG_S t4, (KVM_ARCH_GUEST_HSTATUS)(a0)
REG_S t5, (KVM_ARCH_GUEST_SSTATUS)(a0) REG_S t5, (KVM_ARCH_GUEST_SSTATUS)(a0)
/* Restore Host GPRs (except A0 and T0-T6) */ /* Restore Host GPRs (except A0 and T0-T6) */

View File

@ -53,7 +53,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT
config KVM_COMPAT config KVM_COMPAT
def_bool y def_bool y
depends on KVM && COMPAT && !(S390 || ARM64) depends on KVM && COMPAT && !(S390 || ARM64 || RISCV)
config HAVE_KVM_IRQ_BYPASS config HAVE_KVM_IRQ_BYPASS
bool bool