@@ -167,15 +167,21 @@ static int pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
167167
168168 pkvm_vcpu_reset_hcr (vcpu );
169169
170- if ((!pkvm_hyp_vcpu_is_protected (hyp_vcpu )))
170+ if ((!pkvm_hyp_vcpu_is_protected (hyp_vcpu ))) {
171+ struct kvm_vcpu * host_vcpu = hyp_vcpu -> host_vcpu ;
172+
173+ /* Trust the host for non-protected vcpu features. */
174+ vcpu -> arch .hcrx_el2 = host_vcpu -> arch .hcrx_el2 ;
171175 return 0 ;
176+ }
172177
173178 ret = pkvm_check_pvm_cpu_features (vcpu );
174179 if (ret )
175180 return ret ;
176181
177182 pvm_init_traps_hcr (vcpu );
178183 pvm_init_traps_mdcr (vcpu );
184+ vcpu_set_hcrx (vcpu );
179185
180186 return 0 ;
181187}
@@ -240,10 +246,12 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
240246
241247 hyp_spin_lock (& vm_table_lock );
242248 hyp_vm = get_vm_by_handle (handle );
243- if (!hyp_vm || hyp_vm -> nr_vcpus <= vcpu_idx )
249+ if (!hyp_vm || hyp_vm -> kvm . created_vcpus <= vcpu_idx )
244250 goto unlock ;
245251
246252 hyp_vcpu = hyp_vm -> vcpus [vcpu_idx ];
253+ if (!hyp_vcpu )
254+ goto unlock ;
247255
248256 /* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
249257 if (unlikely (hyp_vcpu -> loaded_hyp_vcpu )) {
@@ -369,8 +377,14 @@ static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
369377{
370378 int i ;
371379
372- for (i = 0 ; i < nr_vcpus ; i ++ )
373- unpin_host_vcpu (hyp_vcpus [i ]-> host_vcpu );
380+ for (i = 0 ; i < nr_vcpus ; i ++ ) {
381+ struct pkvm_hyp_vcpu * hyp_vcpu = hyp_vcpus [i ];
382+
383+ if (!hyp_vcpu )
384+ continue ;
385+
386+ unpin_host_vcpu (hyp_vcpu -> host_vcpu );
387+ }
374388}
375389
376390static void init_pkvm_hyp_vm (struct kvm * host_kvm , struct pkvm_hyp_vm * hyp_vm ,
@@ -394,24 +408,18 @@ static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *
394408
395409static int init_pkvm_hyp_vcpu (struct pkvm_hyp_vcpu * hyp_vcpu ,
396410 struct pkvm_hyp_vm * hyp_vm ,
397- struct kvm_vcpu * host_vcpu ,
398- unsigned int vcpu_idx )
411+ struct kvm_vcpu * host_vcpu )
399412{
400413 int ret = 0 ;
401414
402415 if (hyp_pin_shared_mem (host_vcpu , host_vcpu + 1 ))
403416 return - EBUSY ;
404417
405- if (host_vcpu -> vcpu_idx != vcpu_idx ) {
406- ret = - EINVAL ;
407- goto done ;
408- }
409-
410418 hyp_vcpu -> host_vcpu = host_vcpu ;
411419
412420 hyp_vcpu -> vcpu .kvm = & hyp_vm -> kvm ;
413421 hyp_vcpu -> vcpu .vcpu_id = READ_ONCE (host_vcpu -> vcpu_id );
414- hyp_vcpu -> vcpu .vcpu_idx = vcpu_idx ;
422+ hyp_vcpu -> vcpu .vcpu_idx = READ_ONCE ( host_vcpu -> vcpu_idx ) ;
415423
416424 hyp_vcpu -> vcpu .arch .hw_mmu = & hyp_vm -> kvm .arch .mmu ;
417425 hyp_vcpu -> vcpu .arch .cflags = READ_ONCE (host_vcpu -> arch .cflags );
@@ -649,27 +657,28 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
649657 goto unlock ;
650658 }
651659
652- idx = hyp_vm -> nr_vcpus ;
660+ ret = init_pkvm_hyp_vcpu (hyp_vcpu , hyp_vm , host_vcpu );
661+ if (ret )
662+ goto unlock ;
663+
664+ idx = hyp_vcpu -> vcpu .vcpu_idx ;
653665 if (idx >= hyp_vm -> kvm .created_vcpus ) {
654666 ret = - EINVAL ;
655667 goto unlock ;
656668 }
657669
658- ret = init_pkvm_hyp_vcpu ( hyp_vcpu , hyp_vm , host_vcpu , idx );
659- if ( ret )
670+ if ( hyp_vm -> vcpus [ idx ]) {
671+ ret = - EINVAL ;
660672 goto unlock ;
673+ }
661674
662675 hyp_vm -> vcpus [idx ] = hyp_vcpu ;
663- hyp_vm -> nr_vcpus ++ ;
664676unlock :
665677 hyp_spin_unlock (& vm_table_lock );
666678
667- if (ret ) {
679+ if (ret )
668680 unmap_donated_memory (hyp_vcpu , sizeof (* hyp_vcpu ));
669- return ret ;
670- }
671-
672- return 0 ;
681+ return ret ;
673682}
674683
675684static void
@@ -686,7 +695,7 @@ teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
686695
687696int __pkvm_teardown_vm (pkvm_handle_t handle )
688697{
689- struct kvm_hyp_memcache * mc ;
698+ struct kvm_hyp_memcache * mc , * stage2_mc ;
690699 struct pkvm_hyp_vm * hyp_vm ;
691700 struct kvm * host_kvm ;
692701 unsigned int idx ;
@@ -714,18 +723,24 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
714723
715724 /* Reclaim guest pages (including page-table pages) */
716725 mc = & host_kvm -> arch .pkvm .teardown_mc ;
717- reclaim_guest_pages (hyp_vm , mc );
718- unpin_host_vcpus (hyp_vm -> vcpus , hyp_vm -> nr_vcpus );
726+ stage2_mc = & host_kvm -> arch .pkvm .stage2_teardown_mc ;
727+ reclaim_pgtable_pages (hyp_vm , stage2_mc );
728+ unpin_host_vcpus (hyp_vm -> vcpus , hyp_vm -> kvm .created_vcpus );
719729
720730 /* Push the metadata pages to the teardown memcache */
721- for (idx = 0 ; idx < hyp_vm -> nr_vcpus ; ++ idx ) {
731+ for (idx = 0 ; idx < hyp_vm -> kvm . created_vcpus ; ++ idx ) {
722732 struct pkvm_hyp_vcpu * hyp_vcpu = hyp_vm -> vcpus [idx ];
723- struct kvm_hyp_memcache * vcpu_mc = & hyp_vcpu -> vcpu .arch .pkvm_memcache ;
733+ struct kvm_hyp_memcache * vcpu_mc ;
734+
735+ if (!hyp_vcpu )
736+ continue ;
737+
738+ vcpu_mc = & hyp_vcpu -> vcpu .arch .pkvm_memcache ;
724739
725740 while (vcpu_mc -> nr_pages ) {
726741 void * addr = pop_hyp_memcache (vcpu_mc , hyp_phys_to_virt );
727742
728- push_hyp_memcache (mc , addr , hyp_virt_to_phys );
743+ push_hyp_memcache (stage2_mc , addr , hyp_virt_to_phys );
729744 unmap_donated_memory_noclear (addr , PAGE_SIZE );
730745 }
731746
0 commit comments