@@ -1580,7 +1580,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
15801580 vmcs_writel (GUEST_RFLAGS , rflags );
15811581
15821582 if ((old_rflags ^ vmx -> rflags ) & X86_EFLAGS_VM )
1583- vmx -> emulation_required = vmx_emulation_required (vcpu );
1583+ vmx -> vt . emulation_required = vmx_emulation_required (vcpu );
15841584}
15851585
15861586bool vmx_get_if_flag (struct kvm_vcpu * vcpu )
@@ -1862,7 +1862,7 @@ void vmx_inject_exception(struct kvm_vcpu *vcpu)
18621862 return ;
18631863 }
18641864
1865- WARN_ON_ONCE (vmx -> emulation_required );
1865+ WARN_ON_ONCE (vmx -> vt . emulation_required );
18661866
18671867 if (kvm_exception_is_soft (ex -> vector )) {
18681868 vmcs_write32 (VM_ENTRY_INSTRUCTION_LEN ,
@@ -3391,7 +3391,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
33913391 }
33923392
33933393 /* depends on vcpu->arch.cr0 to be set to a new value */
3394- vmx -> emulation_required = vmx_emulation_required (vcpu );
3394+ vmx -> vt . emulation_required = vmx_emulation_required (vcpu );
33953395}
33963396
33973397static int vmx_get_max_ept_level (void )
@@ -3654,7 +3654,7 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
36543654{
36553655 __vmx_set_segment (vcpu , var , seg );
36563656
3657- to_vmx (vcpu )-> emulation_required = vmx_emulation_required (vcpu );
3657+ to_vmx (vcpu )-> vt . emulation_required = vmx_emulation_required (vcpu );
36583658}
36593659
36603660void vmx_get_cs_db_l_bits (struct kvm_vcpu * vcpu , int * db , int * l )
@@ -5800,7 +5800,7 @@ static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
58005800{
58015801 struct vcpu_vmx * vmx = to_vmx (vcpu );
58025802
5803- return vmx -> emulation_required && !vmx -> rmode .vm86_active &&
5803+ return vmx -> vt . emulation_required && !vmx -> rmode .vm86_active &&
58045804 (kvm_is_exception_pending (vcpu ) || vcpu -> arch .exception .injected );
58055805}
58065806
@@ -5813,7 +5813,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
58135813 intr_window_requested = exec_controls_get (vmx ) &
58145814 CPU_BASED_INTR_WINDOW_EXITING ;
58155815
5816- while (vmx -> emulation_required && count -- != 0 ) {
5816+ while (vmx -> vt . emulation_required && count -- != 0 ) {
58175817 if (intr_window_requested && !vmx_interrupt_blocked (vcpu ))
58185818 return handle_interrupt_window (& vmx -> vcpu );
58195819
@@ -6460,7 +6460,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
64606460 * the least awful solution for the userspace case without
64616461 * risking false positives.
64626462 */
6463- if (vmx -> emulation_required ) {
6463+ if (vmx -> vt . emulation_required ) {
64646464 nested_vmx_vmexit (vcpu , EXIT_REASON_TRIPLE_FAULT , 0 , 0 );
64656465 return 1 ;
64666466 }
@@ -6470,7 +6470,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
64706470 }
64716471
64726472 /* If guest state is invalid, start emulating. L2 is handled above. */
6473- if (vmx -> emulation_required )
6473+ if (vmx -> vt . emulation_required )
64746474 return handle_invalid_guest_state (vcpu );
64756475
64766476 if (exit_reason .failed_vmentry ) {
@@ -6963,7 +6963,7 @@ void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
69636963{
69646964 struct vcpu_vmx * vmx = to_vmx (vcpu );
69656965
6966- if (vmx -> emulation_required )
6966+ if (vmx -> vt . emulation_required )
69676967 return ;
69686968
69696969 if (vmx_get_exit_reason (vcpu ).basic == EXIT_REASON_EXTERNAL_INTERRUPT )
@@ -7286,7 +7286,7 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
72867286 * start emulation until we arrive back to a valid state. Synthesize a
72877287 * consistency check VM-Exit due to invalid guest state and bail.
72887288 */
7289- if (unlikely (vmx -> emulation_required )) {
7289+ if (unlikely (vmx -> vt . emulation_required )) {
72907290 vmx -> fail = 0 ;
72917291
72927292 vmx -> vt .exit_reason .full = EXIT_REASON_INVALID_STATE ;
0 commit comments