mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
KVM: nVMX: track NMI blocking state separately for each VMCS
vmx_recover_nmi_blocking is using a cached value of the guest interruptibility info, which is stored in vmx->nmi_known_unmasked. vmx_recover_nmi_blocking is run for both normal and nested guests, so the cached value must be per-VMCS. This fixes eventinj.flat in a nested non-EPT environment. With EPT it works, because the EPT violation handler doesn't have the vmx->nmi_known_unmasked optimization (it is unnecessary because, unlike vmx_recover_nmi_blocking, it can just look at the exit qualification). Thanks to Wanpeng Li for debugging the testcase and providing an initial patch. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
This commit is contained in:
parent
f85c758dbe
commit
4c4a6f790e
@ -198,7 +198,8 @@ struct loaded_vmcs {
|
||||
struct vmcs *vmcs;
|
||||
struct vmcs *shadow_vmcs;
|
||||
int cpu;
|
||||
int launched;
|
||||
bool launched;
|
||||
bool nmi_known_unmasked;
|
||||
struct list_head loaded_vmcss_on_cpu_link;
|
||||
};
|
||||
|
||||
@ -5510,10 +5511,8 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
if (!is_guest_mode(vcpu)) {
|
||||
++vcpu->stat.nmi_injections;
|
||||
vmx->nmi_known_unmasked = false;
|
||||
}
|
||||
++vcpu->stat.nmi_injections;
|
||||
vmx->loaded_vmcs->nmi_known_unmasked = false;
|
||||
|
||||
if (vmx->rmode.vm86_active) {
|
||||
if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
|
||||
@ -5527,16 +5526,21 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
|
||||
|
||||
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (to_vmx(vcpu)->nmi_known_unmasked)
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
bool masked;
|
||||
|
||||
if (vmx->loaded_vmcs->nmi_known_unmasked)
|
||||
return false;
|
||||
return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
|
||||
masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
|
||||
vmx->loaded_vmcs->nmi_known_unmasked = !masked;
|
||||
return masked;
|
||||
}
|
||||
|
||||
static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
vmx->nmi_known_unmasked = !masked;
|
||||
vmx->loaded_vmcs->nmi_known_unmasked = !masked;
|
||||
if (masked)
|
||||
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
|
||||
GUEST_INTR_STATE_NMI);
|
||||
@ -8736,7 +8740,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
|
||||
|
||||
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
|
||||
|
||||
if (vmx->nmi_known_unmasked)
|
||||
if (vmx->loaded_vmcs->nmi_known_unmasked)
|
||||
return;
|
||||
/*
|
||||
* Can't use vmx->exit_intr_info since we're not sure what
|
||||
@ -8760,7 +8764,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
|
||||
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
|
||||
GUEST_INTR_STATE_NMI);
|
||||
else
|
||||
vmx->nmi_known_unmasked =
|
||||
vmx->loaded_vmcs->nmi_known_unmasked =
|
||||
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
|
||||
& GUEST_INTR_STATE_NMI);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user