@@ -103,15 +103,15 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
103103 kvm_riscv_vcpu_pmu_incr_fw (vcpu , SBI_PMU_FW_FENCE_I_SENT );
104104 break ;
105105 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA :
106- if (cp -> a2 == 0 && cp -> a3 == 0 )
106+ if (( cp -> a2 == 0 && cp -> a3 == 0 ) || cp -> a3 == -1UL )
107107 kvm_riscv_hfence_vvma_all (vcpu -> kvm , hbase , hmask );
108108 else
109109 kvm_riscv_hfence_vvma_gva (vcpu -> kvm , hbase , hmask ,
110110 cp -> a2 , cp -> a3 , PAGE_SHIFT );
111111 kvm_riscv_vcpu_pmu_incr_fw (vcpu , SBI_PMU_FW_HFENCE_VVMA_SENT );
112112 break ;
113113 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID :
114- if (cp -> a2 == 0 && cp -> a3 == 0 )
114+ if (( cp -> a2 == 0 && cp -> a3 == 0 ) || cp -> a3 == -1UL )
115115 kvm_riscv_hfence_vvma_asid_all (vcpu -> kvm ,
116116 hbase , hmask , cp -> a4 );
117117 else
@@ -127,9 +127,9 @@ static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run
127127 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID :
128128 /*
129129 * Until nested virtualization is implemented, the
130- * SBI HFENCE calls should be treated as NOPs
130+ * SBI HFENCE calls should return not supported
131+ * hence fallthrough.
131132 */
132- break ;
133133 default :
134134 retdata -> err_val = SBI_ERR_NOT_SUPPORTED ;
135135 }
0 commit comments