Skip to content

Commit 020ef96

Browse files
committed
Merge: KVM bugfix update for 9.5
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4972 # Merge Request Required Information ## Summary of Changes Bug fixes - miscellaneous patches for stable up to 6.11-rc2 - Make SVM per-CPU save_area allocations NUMA-aware. - Force sev_es_host_save_area() to be inlined to avoid calling into an instrumentable function from noinstr code. - final SEV-SNP update from 6.11-rc2 ## Approved Development Ticket ``` JIRA: https://issues.redhat.com/browse/RHEL-32435 Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> ``` Approved-by: Vitaly Kuznetsov <vkuznets@redhat.com> Approved-by: Maxim Levitsky <mlevitsk@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: Lucas Zampieri <lzampier@redhat.com>
2 parents d8d0169 + 2f37f74 commit 020ef96

File tree

18 files changed

+255
-186
lines changed

18 files changed

+255
-186
lines changed

arch/x86/kvm/Kconfig

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -139,8 +139,8 @@ config KVM_AMD_SEV
139139
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
140140
select ARCH_HAS_CC_PLATFORM
141141
select KVM_GENERIC_PRIVATE_MEM
142-
select HAVE_KVM_GMEM_PREPARE
143-
select HAVE_KVM_GMEM_INVALIDATE
142+
select HAVE_KVM_ARCH_GMEM_PREPARE
143+
select HAVE_KVM_ARCH_GMEM_INVALIDATE
144144
help
145145
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
146146
with Encrypted State (SEV-ES) on AMD processors.

arch/x86/kvm/lapic.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1732,7 +1732,7 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
17321732
s64 min_period = min_timer_period_us * 1000LL;
17331733

17341734
if (apic->lapic_timer.period < min_period) {
1735-
pr_info_ratelimited(
1735+
pr_info_once(
17361736
"vcpu %i: requested %lld ns "
17371737
"lapic timer period limited to %lld ns\n",
17381738
apic->vcpu->vcpu_id,

arch/x86/kvm/mmu/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7430,7 +7430,7 @@ static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
74307430
const unsigned long end = start + KVM_PAGES_PER_HPAGE(level);
74317431

74327432
if (level == PG_LEVEL_2M)
7433-
return kvm_range_has_memory_attributes(kvm, start, end, attrs);
7433+
return kvm_range_has_memory_attributes(kvm, start, end, ~0, attrs);
74347434

74357435
for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
74367436
if (hugepage_test_mixed(slot, gfn, level - 1) ||

arch/x86/kvm/svm/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1181,7 +1181,7 @@ int svm_allocate_nested(struct vcpu_svm *svm)
11811181
if (svm->nested.initialized)
11821182
return 0;
11831183

1184-
vmcb02_page = snp_safe_alloc_page(&svm->vcpu);
1184+
vmcb02_page = snp_safe_alloc_page();
11851185
if (!vmcb02_page)
11861186
return -ENOMEM;
11871187
svm->nested.vmcb02.ptr = page_address(vmcb02_page);

arch/x86/kvm/svm/sev.c

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2202,18 +2202,11 @@ static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pf
22022202
bool assigned;
22032203
int level;
22042204

2205-
if (!kvm_mem_is_private(kvm, gfn)) {
2206-
pr_debug("%s: Failed to ensure GFN 0x%llx has private memory attribute set\n",
2207-
__func__, gfn);
2208-
ret = -EINVAL;
2209-
goto err;
2210-
}
2211-
22122205
ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
22132206
if (ret || assigned) {
22142207
pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
22152208
__func__, gfn, ret, assigned);
2216-
ret = -EINVAL;
2209+
ret = ret ? -EINVAL : -EEXIST;
22172210
goto err;
22182211
}
22192212

@@ -4459,13 +4452,13 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
44594452
}
44604453
}
44614454

4462-
struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
4455+
struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
44634456
{
44644457
unsigned long pfn;
44654458
struct page *p;
44664459

44674460
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
4468-
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
4461+
return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
44694462

44704463
/*
44714464
* Allocate an SNP-safe page to workaround the SNP erratum where
@@ -4476,7 +4469,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
44764469
* Allocate one extra page, choose a page which is not
44774470
* 2MB-aligned, and free the other.
44784471
*/
4479-
p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
4472+
p = alloc_pages_node(node, gfp | __GFP_ZERO, 1);
44804473
if (!p)
44814474
return NULL;
44824475

arch/x86/kvm/svm/svm.c

Lines changed: 29 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -570,6 +570,11 @@ static void __svm_write_tsc_multiplier(u64 multiplier)
570570
__this_cpu_write(current_tsc_ratio, multiplier);
571571
}
572572

573+
static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
574+
{
575+
return page_address(sd->save_area) + 0x400;
576+
}
577+
573578
static inline void kvm_cpu_svm_disable(void)
574579
{
575580
uint64_t efer;
@@ -674,12 +679,9 @@ static int svm_hardware_enable(void)
674679
* TSC_AUX field now to avoid a RDMSR on every vCPU run.
675680
*/
676681
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
677-
struct sev_es_save_area *hostsa;
678682
u32 __maybe_unused msr_hi;
679683

680-
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
681-
682-
rdmsr(MSR_TSC_AUX, hostsa->tsc_aux, msr_hi);
684+
rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
683685
}
684686

685687
return 0;
@@ -704,7 +706,7 @@ static int svm_cpu_init(int cpu)
704706
int ret = -ENOMEM;
705707

706708
memset(sd, 0, sizeof(struct svm_cpu_data));
707-
sd->save_area = snp_safe_alloc_page(NULL);
709+
sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
708710
if (!sd->save_area)
709711
return ret;
710712

@@ -1430,7 +1432,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
14301432
svm = to_svm(vcpu);
14311433

14321434
err = -ENOMEM;
1433-
vmcb01_page = snp_safe_alloc_page(vcpu);
1435+
vmcb01_page = snp_safe_alloc_page();
14341436
if (!vmcb01_page)
14351437
goto out;
14361438

@@ -1439,7 +1441,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
14391441
* SEV-ES guests require a separate VMSA page used to contain
14401442
* the encrypted register state of the guest.
14411443
*/
1442-
vmsa_page = snp_safe_alloc_page(vcpu);
1444+
vmsa_page = snp_safe_alloc_page();
14431445
if (!vmsa_page)
14441446
goto error_free_vmcb_page;
14451447
}
@@ -1504,11 +1506,6 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
15041506
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
15051507
}
15061508

1507-
static struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
1508-
{
1509-
return page_address(sd->save_area) + 0x400;
1510-
}
1511-
15121509
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
15131510
{
15141511
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3882,16 +3879,27 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
38823879
struct vcpu_svm *svm = to_svm(vcpu);
38833880

38843881
/*
3885-
* KVM should never request an NMI window when vNMI is enabled, as KVM
3886-
* allows at most one to-be-injected NMI and one pending NMI, i.e. if
3887-
* two NMIs arrive simultaneously, KVM will inject one and set
3888-
* V_NMI_PENDING for the other. WARN, but continue with the standard
3889-
* single-step approach to try and salvage the pending NMI.
3882+
* If NMIs are outright masked, i.e. the vCPU is already handling an
3883+
* NMI, and KVM has not yet intercepted an IRET, then there is nothing
3884+
* more to do at this time as KVM has already enabled IRET intercepts.
3885+
* If KVM has already intercepted IRET, then single-step over the IRET,
3886+
* as NMIs aren't architecturally unmasked until the IRET completes.
3887+
*
3888+
* If vNMI is enabled, KVM should never request an NMI window if NMIs
3889+
* are masked, as KVM allows at most one to-be-injected NMI and one
3890+
* pending NMI. If two NMIs arrive simultaneously, KVM will inject one
3891+
* NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
3892+
* unmasked. KVM _will_ request an NMI window in some situations, e.g.
3893+
* if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
3894+
* inject the NMI. In those situations, KVM needs to single-step over
3895+
* the STI shadow or intercept STGI.
38903896
*/
3891-
WARN_ON_ONCE(is_vnmi_enabled(svm));
3897+
if (svm_get_nmi_mask(vcpu)) {
3898+
WARN_ON_ONCE(is_vnmi_enabled(svm));
38923899

3893-
if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
3894-
return; /* IRET will cause a vm exit */
3900+
if (!svm->awaiting_iret_completion)
3901+
return; /* IRET will cause a vm exit */
3902+
}
38953903

38963904
/*
38973905
* SEV-ES guests are responsible for signaling when a vCPU is ready to
@@ -4959,7 +4967,7 @@ static int svm_vm_init(struct kvm *kvm)
49594967

49604968
static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
49614969
{
4962-
struct page *page = snp_safe_alloc_page(vcpu);
4970+
struct page *page = snp_safe_alloc_page();
49634971

49644972
if (!page)
49654973
return NULL;

arch/x86/kvm/svm/svm.h

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -726,7 +726,13 @@ void sev_guest_memory_reclaimed(struct kvm *kvm);
726726
int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
727727

728728
/* These symbols are used in common code and are stubbed below. */
729-
struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
729+
730+
struct page *snp_safe_alloc_page_node(int node, gfp_t gfp);
731+
static inline struct page *snp_safe_alloc_page(void)
732+
{
733+
return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
734+
}
735+
730736
void sev_free_vcpu(struct kvm_vcpu *vcpu);
731737
void sev_vm_destroy(struct kvm *kvm);
732738
void __init sev_set_cpu_caps(void);
@@ -741,8 +747,14 @@ int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
741747
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
742748
int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
743749
#else
744-
static inline struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) {
745-
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
750+
static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
751+
{
752+
return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
753+
}
754+
755+
static inline struct page *snp_safe_alloc_page(void)
756+
{
757+
return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
746758
}
747759

748760
static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}

arch/x86/kvm/x86.c

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -10733,13 +10733,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
1073310733

1073410734
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
1073510735

10736+
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
10737+
1073610738
if (irqchip_split(vcpu->kvm))
1073710739
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
10738-
else {
10739-
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
10740-
if (ioapic_in_kernel(vcpu->kvm))
10741-
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
10742-
}
10740+
else if (ioapic_in_kernel(vcpu->kvm))
10741+
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
1074310742

1074410743
if (is_guest_mode(vcpu))
1074510744
vcpu->arch.load_eoi_exitmap_pending = true;
@@ -13616,19 +13615,14 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
1361613615
}
1361713616
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
1361813617

13619-
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
13620-
bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
13621-
{
13622-
return kvm->arch.vm_type == KVM_X86_SNP_VM;
13623-
}
13624-
13618+
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
1362513619
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
1362613620
{
1362713621
return static_call(kvm_x86_gmem_prepare)(kvm, pfn, gfn, max_order);
1362813622
}
1362913623
#endif
1363013624

13631-
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
13625+
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
1363213626
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
1363313627
{
1363413628
static_call_cond(kvm_x86_gmem_invalidate)(start, end);

arch/x86/kvm/xen.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -741,7 +741,7 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
741741
} else {
742742
void __user * hva = u64_to_user_ptr(data->u.shared_info.hva);
743743

744-
if (!PAGE_ALIGNED(hva) || !access_ok(hva, PAGE_SIZE)) {
744+
if (!PAGE_ALIGNED(hva)) {
745745
r = -EINVAL;
746746
} else if (!hva) {
747747
kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);

include/linux/kvm_host.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2412,7 +2412,7 @@ static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn
24122412
}
24132413

24142414
bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end,
2415-
unsigned long attrs);
2415+
unsigned long mask, unsigned long attrs);
24162416
bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
24172417
struct kvm_gfn_range *range);
24182418
bool kvm_arch_post_set_memory_attributes(struct kvm *kvm,
@@ -2443,11 +2443,11 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm,
24432443
}
24442444
#endif /* CONFIG_KVM_PRIVATE_MEM */
24452445

2446-
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
2446+
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
24472447
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order);
2448-
bool kvm_arch_gmem_prepare_needed(struct kvm *kvm);
24492448
#endif
24502449

2450+
#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
24512451
/**
24522452
* kvm_gmem_populate() - Populate/prepare a GPA range with guest data
24532453
*
@@ -2474,8 +2474,9 @@ typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
24742474

24752475
long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages,
24762476
kvm_gmem_populate_cb post_populate, void *opaque);
2477+
#endif
24772478

2478-
#ifdef CONFIG_HAVE_KVM_GMEM_INVALIDATE
2479+
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE
24792480
void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
24802481
#endif
24812482

0 commit comments

Comments
 (0)