Skip to content

Commit 817168d

Browse files
committed
KVM: x86: Make cpu_dirty_log_size a per-VM value
JIRA: https://issues.redhat.com/browse/RHEL-15711 Upstream status: https://git.kernel.org/pub/scm/virt/kvm/kvm.git Make cpu_dirty_log_size (CPU's dirty log buffer size) a per-VM value and set the per-VM cpu_dirty_log_size only for normal VMs when PML is enabled. Do not set it for TDs. Until now, cpu_dirty_log_size was a system-wide value that is used for all VMs and is set to the PML buffer size when PML was enabled in VMX. However, PML is not currently supported for TDs, though PML remains available for normal VMs as long as the feature is supported by hardware and enabled in VMX. Making cpu_dirty_log_size a per-VM value allows it to be ther PML buffer size for normal VMs and 0 for TDs. This allows functions like kvm_arch_sync_dirty_log() and kvm_mmu_update_cpu_dirty_logging() to determine if PML is supported, in order to kick off vCPUs or request them to update CPU dirty logging status (turn on/off PML in VMCS). This fixes an issue first reported in [1], where QEMU attaches an emulated VGA device to a TD; note that KVM_MEM_LOG_DIRTY_PAGES still works if the corresponding has no flag KVM_MEM_GUEST_MEMFD. KVM then invokes kvm_mmu_update_cpu_dirty_logging() and from there vmx_update_cpu_dirty_logging(), which incorrectly accesses a kvm_vmx struct for a TDX VM. Reported-by: ANAND NARSHINHA PATIL <Anand.N.Patil@ibm.com> Reported-by: Pedro Principeza <pedro.principeza@canonical.com> Reported-by: Farrah Chen <farrah.chen@intel.com> Closes: canonical/tdx#202 Link: canonical/tdx#202 [1] Suggested-by: Kai Huang <kai.huang@intel.com> Signed-off-by: Yan Zhao <yan.y.zhao@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> (cherry picked from commit fbb4ada) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> RHEL: context differences (upstream removed the .sched_in callback and has a more expansive comment)
1 parent 3dacbea commit 817168d

File tree

6 files changed

+16
-15
lines changed

6 files changed

+16
-15
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1527,6 +1527,13 @@ struct kvm_arch {
15271527
struct kvm_mmu_memory_cache split_desc_cache;
15281528

15291529
gfn_t gfn_direct_bits;
1530+
1531+
/*
1532+
* Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A Zero
1533+
* value indicates CPU dirty logging is unsupported or disabled in
1534+
* current VM.
1535+
*/
1536+
int cpu_dirty_log_size;
15301537
};
15311538

15321539
struct kvm_vm_stat {
@@ -1777,11 +1784,6 @@ struct kvm_x86_ops {
17771784

17781785
void (*sched_in)(struct kvm_vcpu *vcpu, int cpu);
17791786

1780-
/*
1781-
* Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero
1782-
* value indicates CPU dirty logging is unsupported or disabled.
1783-
*/
1784-
int cpu_dirty_log_size;
17851787
void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
17861788

17871789
const struct kvm_x86_nested_ops *nested_ops;

arch/x86/kvm/mmu/mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1420,15 +1420,15 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
14201420
}
14211421

14221422
/* Now handle 4K PTEs. */
1423-
if (kvm_x86_ops.cpu_dirty_log_size)
1423+
if (kvm->arch.cpu_dirty_log_size)
14241424
kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
14251425
else
14261426
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
14271427
}
14281428

14291429
int kvm_cpu_dirty_log_size(struct kvm *kvm)
14301430
{
1431-
return kvm_x86_ops.cpu_dirty_log_size;
1431+
return kvm->arch.cpu_dirty_log_size;
14321432
}
14331433

14341434
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,

arch/x86/kvm/mmu/mmu_internal.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm *kvm,
198198
* being enabled is mandatory as the bits used to denote WP-only SPTEs
199199
* are reserved for PAE paging (32-bit KVM).
200200
*/
201-
return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
201+
return kvm->arch.cpu_dirty_log_size && sp->role.guest_mode;
202202
}
203203

204204
static inline gfn_t gfn_round_for_level(gfn_t gfn, int level)

arch/x86/kvm/vmx/main.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -321,7 +321,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
321321

322322
.sched_in = vmx_sched_in,
323323

324-
.cpu_dirty_log_size = PML_LOG_NR_ENTRIES,
325324
.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
326325

327326
.nested_ops = &vmx_nested_ops,

arch/x86/kvm/vmx/vmx.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7632,6 +7632,9 @@ int vmx_vm_init(struct kvm *kvm)
76327632
break;
76337633
}
76347634
}
7635+
7636+
if (enable_pml)
7637+
kvm->arch.cpu_dirty_log_size = PML_LOG_NR_ENTRIES;
76357638
return 0;
76367639
}
76377640

@@ -8501,9 +8504,6 @@ __init int vmx_hardware_setup(void)
85018504
if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
85028505
enable_pml = 0;
85038506

8504-
if (!enable_pml)
8505-
vt_x86_ops.cpu_dirty_log_size = 0;
8506-
85078507
if (!cpu_has_vmx_preemption_timer())
85088508
enable_preemption_timer = false;
85098509

arch/x86/kvm/x86.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6500,7 +6500,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
65006500
struct kvm_vcpu *vcpu;
65016501
unsigned long i;
65026502

6503-
if (!kvm_x86_ops.cpu_dirty_log_size)
6503+
if (!kvm->arch.cpu_dirty_log_size)
65046504
return;
65056505

65066506
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -13113,7 +13113,7 @@ static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
1311313113
{
1311413114
int nr_slots;
1311513115

13116-
if (!kvm_x86_ops.cpu_dirty_log_size)
13116+
if (!kvm->arch.cpu_dirty_log_size)
1311713117
return;
1311813118

1311913119
nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging);
@@ -13189,7 +13189,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
1318913189
if (READ_ONCE(eager_page_split))
1319013190
kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K);
1319113191

13192-
if (kvm_x86_ops.cpu_dirty_log_size) {
13192+
if (kvm->arch.cpu_dirty_log_size) {
1319313193
kvm_mmu_slot_leaf_clear_dirty(kvm, new);
1319413194
kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
1319513195
} else {

0 commit comments

Comments
 (0)