@@ -570,6 +570,11 @@ static void __svm_write_tsc_multiplier(u64 multiplier)
570570 __this_cpu_write (current_tsc_ratio , multiplier );
571571}
572572
573+ static __always_inline struct sev_es_save_area * sev_es_host_save_area (struct svm_cpu_data * sd )
574+ {
575+ return page_address (sd -> save_area ) + 0x400 ;
576+ }
577+
573578static inline void kvm_cpu_svm_disable (void )
574579{
575580 uint64_t efer ;
@@ -674,12 +679,9 @@ static int svm_hardware_enable(void)
674679 * TSC_AUX field now to avoid a RDMSR on every vCPU run.
675680 */
676681 if (boot_cpu_has (X86_FEATURE_V_TSC_AUX )) {
677- struct sev_es_save_area * hostsa ;
678682 u32 __maybe_unused msr_hi ;
679683
680- hostsa = (struct sev_es_save_area * )(page_address (sd -> save_area ) + 0x400 );
681-
682- rdmsr (MSR_TSC_AUX , hostsa -> tsc_aux , msr_hi );
684+ rdmsr (MSR_TSC_AUX , sev_es_host_save_area (sd )-> tsc_aux , msr_hi );
683685 }
684686
685687 return 0 ;
@@ -704,7 +706,7 @@ static int svm_cpu_init(int cpu)
704706 int ret = - ENOMEM ;
705707
706708 memset (sd , 0 , sizeof (struct svm_cpu_data ));
707- sd -> save_area = snp_safe_alloc_page ( NULL );
709+ sd -> save_area = snp_safe_alloc_page_node ( cpu_to_node ( cpu ), GFP_KERNEL );
708710 if (!sd -> save_area )
709711 return ret ;
710712
@@ -1430,7 +1432,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
14301432 svm = to_svm (vcpu );
14311433
14321434 err = - ENOMEM ;
1433- vmcb01_page = snp_safe_alloc_page (vcpu );
1435+ vmcb01_page = snp_safe_alloc_page ();
14341436 if (!vmcb01_page )
14351437 goto out ;
14361438
@@ -1439,7 +1441,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
14391441 * SEV-ES guests require a separate VMSA page used to contain
14401442 * the encrypted register state of the guest.
14411443 */
1442- vmsa_page = snp_safe_alloc_page (vcpu );
1444+ vmsa_page = snp_safe_alloc_page ();
14431445 if (!vmsa_page )
14441446 goto error_free_vmcb_page ;
14451447 }
@@ -1504,11 +1506,6 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
15041506 __free_pages (virt_to_page (svm -> msrpm ), get_order (MSRPM_SIZE ));
15051507}
15061508
1507- static struct sev_es_save_area * sev_es_host_save_area (struct svm_cpu_data * sd )
1508- {
1509- return page_address (sd -> save_area ) + 0x400 ;
1510- }
1511-
15121509static void svm_prepare_switch_to_guest (struct kvm_vcpu * vcpu )
15131510{
15141511 struct vcpu_svm * svm = to_svm (vcpu );
@@ -3882,16 +3879,27 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
38823879 struct vcpu_svm * svm = to_svm (vcpu );
38833880
38843881 /*
3885- * KVM should never request an NMI window when vNMI is enabled, as KVM
3886- * allows at most one to-be-injected NMI and one pending NMI, i.e. if
3887- * two NMIs arrive simultaneously, KVM will inject one and set
3888- * V_NMI_PENDING for the other. WARN, but continue with the standard
3889- * single-step approach to try and salvage the pending NMI.
3882+ * If NMIs are outright masked, i.e. the vCPU is already handling an
3883+ * NMI, and KVM has not yet intercepted an IRET, then there is nothing
3884+ * more to do at this time as KVM has already enabled IRET intercepts.
3885+ * If KVM has already intercepted IRET, then single-step over the IRET,
3886+ * as NMIs aren't architecturally unmasked until the IRET completes.
3887+ *
3888+ * If vNMI is enabled, KVM should never request an NMI window if NMIs
3889+ * are masked, as KVM allows at most one to-be-injected NMI and one
3890+ * pending NMI. If two NMIs arrive simultaneously, KVM will inject one
3891+ * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
3892+ * unmasked. KVM _will_ request an NMI window in some situations, e.g.
3893+ * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
3894+ * inject the NMI. In those situations, KVM needs to single-step over
3895+ * the STI shadow or intercept STGI.
38903896 */
3891- WARN_ON_ONCE (is_vnmi_enabled (svm ));
3897+ if (svm_get_nmi_mask (vcpu )) {
3898+ WARN_ON_ONCE (is_vnmi_enabled (svm ));
38923899
3893- if (svm_get_nmi_mask (vcpu ) && !svm -> awaiting_iret_completion )
3894- return ; /* IRET will cause a vm exit */
3900+ if (!svm -> awaiting_iret_completion )
3901+ return ; /* IRET will cause a vm exit */
3902+ }
38953903
38963904 /*
38973905 * SEV-ES guests are responsible for signaling when a vCPU is ready to
@@ -4959,7 +4967,7 @@ static int svm_vm_init(struct kvm *kvm)
49594967
49604968static void * svm_alloc_apic_backing_page (struct kvm_vcpu * vcpu )
49614969{
4962- struct page * page = snp_safe_alloc_page (vcpu );
4970+ struct page * page = snp_safe_alloc_page ();
49634971
49644972 if (!page )
49654973 return NULL ;
0 commit comments