@@ -2574,6 +2574,9 @@ EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
25742574
25752575static void kvm_vcpu_write_tsc_offset (struct kvm_vcpu * vcpu , u64 l1_offset )
25762576{
2577+ if (vcpu -> arch .guest_tsc_protected )
2578+ return ;
2579+
25772580 trace_kvm_write_tsc_offset (vcpu -> vcpu_id ,
25782581 vcpu -> arch .l1_tsc_offset ,
25792582 l1_offset );
@@ -2631,12 +2634,18 @@ static inline bool kvm_check_tsc_unstable(void)
26312634 * participates in.
26322635 */
26332636static void __kvm_synchronize_tsc (struct kvm_vcpu * vcpu , u64 offset , u64 tsc ,
2634- u64 ns , bool matched )
2637+ u64 ns , bool matched , bool user_set_tsc )
26352638{
26362639 struct kvm * kvm = vcpu -> kvm ;
26372640
26382641 lockdep_assert_held (& kvm -> arch .tsc_write_lock );
26392642
2643+ if (vcpu -> arch .guest_tsc_protected )
2644+ return ;
2645+
2646+ if (user_set_tsc )
2647+ vcpu -> kvm -> arch .user_set_tsc = true;
2648+
26402649 /*
26412650 * We also track th most recent recorded KHZ, write and time to
26422651 * allow the matching interval to be extended at each write.
@@ -2722,8 +2731,6 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
27222731 }
27232732 }
27242733
2725- if (user_value )
2726- kvm -> arch .user_set_tsc = true;
27272734
27282735 /*
27292736 * For a reliable TSC, we can match TSC offsets, and for an unstable
@@ -2743,7 +2750,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 *user_value)
27432750 matched = true;
27442751 }
27452752
2746- __kvm_synchronize_tsc (vcpu , offset , data , ns , matched );
2753+ __kvm_synchronize_tsc (vcpu , offset , data , ns , matched , !! user_value );
27472754 raw_spin_unlock_irqrestore (& kvm -> arch .tsc_write_lock , flags );
27482755}
27492756
@@ -3923,7 +3930,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
39233930 case MSR_IA32_TSC :
39243931 if (msr_info -> host_initiated ) {
39253932 kvm_synchronize_tsc (vcpu , & data );
3926- } else {
3933+ } else if (! vcpu -> arch . guest_tsc_protected ) {
39273934 u64 adj = kvm_compute_l1_tsc_offset (vcpu , data ) - vcpu -> arch .l1_tsc_offset ;
39283935 adjust_tsc_offset_guest (vcpu , adj );
39293936 vcpu -> arch .ia32_tsc_adjust_msr += adj ;
@@ -4590,6 +4597,11 @@ static bool kvm_is_vm_type_supported(unsigned long type)
45904597 return type < 32 && (kvm_caps .supported_vm_types & BIT (type ));
45914598}
45924599
4600+ static inline u32 kvm_sync_valid_fields (struct kvm * kvm )
4601+ {
4602+ return kvm && kvm -> arch .has_protected_state ? 0 : KVM_SYNC_X86_VALID_FIELDS ;
4603+ }
4604+
45934605int kvm_vm_ioctl_check_extension (struct kvm * kvm , long ext )
45944606{
45954607 int r = 0 ;
@@ -4698,7 +4710,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
46984710 break ;
46994711#endif
47004712 case KVM_CAP_SYNC_REGS :
4701- r = KVM_SYNC_X86_VALID_FIELDS ;
4713+ r = kvm_sync_valid_fields ( kvm ) ;
47024714 break ;
47034715 case KVM_CAP_ADJUST_CLOCK :
47044716 r = KVM_CLOCK_VALID_FLAGS ;
@@ -5003,7 +5015,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
50035015 u64 offset = kvm_compute_l1_tsc_offset (vcpu ,
50045016 vcpu -> arch .last_guest_tsc );
50055017 kvm_vcpu_write_tsc_offset (vcpu , offset );
5006- vcpu -> arch .tsc_catchup = 1 ;
5018+ if (!vcpu -> arch .guest_tsc_protected )
5019+ vcpu -> arch .tsc_catchup = 1 ;
50075020 }
50085021
50095022 if (kvm_lapic_hv_timer_in_use (vcpu ))
@@ -5742,8 +5755,7 @@ static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
57425755 tsc = kvm_scale_tsc (rdtsc (), vcpu -> arch .l1_tsc_scaling_ratio ) + offset ;
57435756 ns = get_kvmclock_base_ns ();
57445757
5745- kvm -> arch .user_set_tsc = true;
5746- __kvm_synchronize_tsc (vcpu , offset , tsc , ns , matched );
5758+ __kvm_synchronize_tsc (vcpu , offset , tsc , ns , matched , true);
57475759 raw_spin_unlock_irqrestore (& kvm -> arch .tsc_write_lock , flags );
57485760
57495761 r = 0 ;
@@ -11480,6 +11492,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1148011492{
1148111493 struct kvm_queued_exception * ex = & vcpu -> arch .exception ;
1148211494 struct kvm_run * kvm_run = vcpu -> run ;
11495+ u32 sync_valid_fields ;
1148311496 int r ;
1148411497
1148511498 r = kvm_mmu_post_init_vm (vcpu -> kvm );
@@ -11525,8 +11538,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1152511538 goto out ;
1152611539 }
1152711540
11528- if ((kvm_run -> kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS ) ||
11529- (kvm_run -> kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS )) {
11541+ sync_valid_fields = kvm_sync_valid_fields (vcpu -> kvm );
11542+ if ((kvm_run -> kvm_valid_regs & ~sync_valid_fields ) ||
11543+ (kvm_run -> kvm_dirty_regs & ~sync_valid_fields )) {
1153011544 r = - EINVAL ;
1153111545 goto out ;
1153211546 }
@@ -11584,7 +11598,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1158411598
1158511599out :
1158611600 kvm_put_guest_fpu (vcpu );
11587- if (kvm_run -> kvm_valid_regs )
11601+ if (kvm_run -> kvm_valid_regs && likely (! vcpu -> arch . guest_state_protected ) )
1158811602 store_regs (vcpu );
1158911603 post_kvm_run_save (vcpu );
1159011604 kvm_vcpu_srcu_read_unlock (vcpu );
@@ -12874,7 +12888,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1287412888 mutex_unlock (& kvm -> slots_lock );
1287512889 }
1287612890 kvm_destroy_vcpus (kvm );
12877- kvm_x86_call (vm_destroy )(kvm );
1287812891 kvm_free_msr_filter (srcu_dereference_check (kvm -> arch .msr_filter , & kvm -> srcu , 1 ));
1287912892 kvm_pic_destroy (kvm );
1288012893 kvm_ioapic_destroy (kvm );
@@ -12884,6 +12897,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1288412897 kvm_page_track_cleanup (kvm );
1288512898 kvm_xen_destroy_vm (kvm );
1288612899 kvm_hv_destroy_vm (kvm );
12900+ kvm_x86_call (vm_destroy )(kvm );
1288712901}
1288812902
1288912903static void memslot_rmap_free (struct kvm_memory_slot * slot )
0 commit comments