@@ -776,7 +776,8 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
776776 kvm_flush_remote_tlbs_gfn (kvm , gfn , PG_LEVEL_4K );
777777}
778778
779- void track_possible_nx_huge_page (struct kvm * kvm , struct kvm_mmu_page * sp )
779+ void track_possible_nx_huge_page (struct kvm * kvm , struct kvm_mmu_page * sp ,
780+ enum kvm_mmu_type mmu_type )
780781{
781782 /*
782783 * If it's possible to replace the shadow page with an NX huge page,
@@ -790,8 +791,9 @@ void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
790791 return ;
791792
792793 ++ kvm -> stat .nx_lpage_splits ;
794+ ++ kvm -> arch .possible_nx_huge_pages [mmu_type ].nr_pages ;
793795 list_add_tail (& sp -> possible_nx_huge_page_link ,
794- & kvm -> arch .possible_nx_huge_pages );
796+ & kvm -> arch .possible_nx_huge_pages [ mmu_type ]. pages );
795797}
796798
797799static void account_nx_huge_page (struct kvm * kvm , struct kvm_mmu_page * sp ,
@@ -800,7 +802,7 @@ static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
800802 sp -> nx_huge_page_disallowed = true;
801803
802804 if (nx_huge_page_possible )
803- track_possible_nx_huge_page (kvm , sp );
805+ track_possible_nx_huge_page (kvm , sp , KVM_SHADOW_MMU );
804806}
805807
806808static void unaccount_shadowed (struct kvm * kvm , struct kvm_mmu_page * sp )
@@ -819,20 +821,22 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
819821 kvm_mmu_gfn_allow_lpage (slot , gfn );
820822}
821823
822- void untrack_possible_nx_huge_page (struct kvm * kvm , struct kvm_mmu_page * sp )
824+ void untrack_possible_nx_huge_page (struct kvm * kvm , struct kvm_mmu_page * sp ,
825+ enum kvm_mmu_type mmu_type )
823826{
824827 if (list_empty (& sp -> possible_nx_huge_page_link ))
825828 return ;
826829
827830 -- kvm -> stat .nx_lpage_splits ;
831+ -- kvm -> arch .possible_nx_huge_pages [mmu_type ].nr_pages ;
828832 list_del_init (& sp -> possible_nx_huge_page_link );
829833}
830834
831835static void unaccount_nx_huge_page (struct kvm * kvm , struct kvm_mmu_page * sp )
832836{
833837 sp -> nx_huge_page_disallowed = false;
834838
835- untrack_possible_nx_huge_page (kvm , sp );
839+ untrack_possible_nx_huge_page (kvm , sp , KVM_SHADOW_MMU );
836840}
837841
838842static struct kvm_memory_slot * gfn_to_memslot_dirty_bitmap (struct kvm_vcpu * vcpu ,
@@ -6737,11 +6741,12 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
67376741
67386742int kvm_mmu_init_vm (struct kvm * kvm )
67396743{
6740- int r ;
6744+ int r , i ;
67416745
67426746 kvm -> arch .shadow_mmio_value = shadow_mmio_value ;
67436747 INIT_LIST_HEAD (& kvm -> arch .active_mmu_pages );
6744- INIT_LIST_HEAD (& kvm -> arch .possible_nx_huge_pages );
6748+ for (i = 0 ; i < KVM_NR_MMU_TYPES ; ++ i )
6749+ INIT_LIST_HEAD (& kvm -> arch .possible_nx_huge_pages [i ].pages );
67456750 spin_lock_init (& kvm -> arch .mmu_unsync_pages_lock );
67466751
67476752 if (tdp_mmu_enabled ) {
@@ -7582,16 +7587,32 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
75827587 return err ;
75837588}
75847589
7585- static void kvm_recover_nx_huge_pages (struct kvm * kvm )
7590+ static unsigned long nx_huge_pages_to_zap (struct kvm * kvm ,
7591+ enum kvm_mmu_type mmu_type )
75867592{
7587- unsigned long nx_lpage_splits = kvm -> stat .nx_lpage_splits ;
7593+ unsigned long pages = READ_ONCE (kvm -> arch .possible_nx_huge_pages [mmu_type ].nr_pages );
7594+ unsigned int ratio = READ_ONCE (nx_huge_pages_recovery_ratio );
7595+
7596+ return ratio ? DIV_ROUND_UP (pages , ratio ) : 0 ;
7597+ }
7598+
7599+ static void kvm_recover_nx_huge_pages (struct kvm * kvm ,
7600+ enum kvm_mmu_type mmu_type )
7601+ {
7602+ #ifdef CONFIG_X86_64
7603+ const bool is_tdp_mmu = mmu_type == KVM_TDP_MMU ;
7604+ #else
7605+ const bool is_tdp_mmu = false;
7606+ #endif
7607+ unsigned long to_zap = nx_huge_pages_to_zap (kvm , mmu_type );
7608+ struct list_head * nx_huge_pages ;
75887609 struct kvm_memory_slot * slot ;
7589- int rcu_idx ;
75907610 struct kvm_mmu_page * sp ;
7591- unsigned int ratio ;
75927611 LIST_HEAD (invalid_list );
75937612 bool flush = false;
7594- ulong to_zap ;
7613+ int rcu_idx ;
7614+
7615+ nx_huge_pages = & kvm -> arch .possible_nx_huge_pages [mmu_type ].pages ;
75957616
75967617 rcu_idx = srcu_read_lock (& kvm -> srcu );
75977618 write_lock (& kvm -> mmu_lock );
@@ -7603,10 +7624,8 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
76037624 */
76047625 rcu_read_lock ();
76057626
7606- ratio = READ_ONCE (nx_huge_pages_recovery_ratio );
7607- to_zap = ratio ? DIV_ROUND_UP (nx_lpage_splits , ratio ) : 0 ;
76087627 for ( ; to_zap ; -- to_zap ) {
7609- if (list_empty (& kvm -> arch . possible_nx_huge_pages ))
7628+ if (list_empty (nx_huge_pages ))
76107629 break ;
76117630
76127631 /*
@@ -7616,7 +7635,7 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
76167635 * the total number of shadow pages. And because the TDP MMU
76177636 * doesn't use active_mmu_pages.
76187637 */
7619- sp = list_first_entry (& kvm -> arch . possible_nx_huge_pages ,
7638+ sp = list_first_entry (nx_huge_pages ,
76207639 struct kvm_mmu_page ,
76217640 possible_nx_huge_page_link );
76227641 WARN_ON_ONCE (!sp -> nx_huge_page_disallowed );
@@ -7653,7 +7672,7 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
76537672
76547673 if (slot && kvm_slot_dirty_track_enabled (slot ))
76557674 unaccount_nx_huge_page (kvm , sp );
7656- else if (is_tdp_mmu_page ( sp ) )
7675+ else if (is_tdp_mmu )
76577676 flush |= kvm_tdp_mmu_zap_sp (kvm , sp );
76587677 else
76597678 kvm_mmu_prepare_zap_page (kvm , sp , & invalid_list );
@@ -7684,9 +7703,10 @@ static void kvm_nx_huge_page_recovery_worker_kill(void *data)
76847703static bool kvm_nx_huge_page_recovery_worker (void * data )
76857704{
76867705 struct kvm * kvm = data ;
7706+ long remaining_time ;
76877707 bool enabled ;
76887708 uint period ;
7689- long remaining_time ;
7709+ int i ;
76907710
76917711 enabled = calc_nx_huge_pages_recovery_period (& period );
76927712 if (!enabled )
@@ -7701,7 +7721,8 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
77017721 }
77027722
77037723 __set_current_state (TASK_RUNNING );
7704- kvm_recover_nx_huge_pages (kvm );
7724+ for (i = 0 ; i < KVM_NR_MMU_TYPES ; ++ i )
7725+ kvm_recover_nx_huge_pages (kvm , i );
77057726 kvm -> arch .nx_huge_page_last = get_jiffies_64 ();
77067727 return true;
77077728}
0 commit comments