Skip to content

Commit c732163

Browse files
author
CKI KWF Bot
committed
Merge: Fix the lockdep max depth failure due to large number of vCPUs
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-10/-/merge_requests/1073 This pull request fixes a false lockdep error messasge that happens mostly on ARM when the system has more that 48 vCPUs JIRA: https://issues.redhat.com/browse/RHEL-74410 Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Approved-by: Cornelia Huck <cohuck@redhat.com> Approved-by: Waiman Long <longman@redhat.com> Approved-by: Gavin Shan <gshan@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Approved-by: Vitaly Kuznetsov <vkuznets@redhat.com> Merged-by: CKI GitLab Kmaint Pipeline Bot <26919896-cki-kmaint-pipeline-bot@users.noreply.gitlab.com>
2 parents 8de8459 + 36a76ce commit c732163

File tree

14 files changed

+152
-182
lines changed

14 files changed

+152
-182
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1234,9 +1234,6 @@ int __init populate_sysreg_config(const struct sys_reg_desc *sr,
12341234
unsigned int idx);
12351235
int __init populate_nv_trap_config(void);
12361236

1237-
bool lock_all_vcpus(struct kvm *kvm);
1238-
void unlock_all_vcpus(struct kvm *kvm);
1239-
12401237
void kvm_calculate_traps(struct kvm_vcpu *vcpu);
12411238

12421239
/* MMIO helpers */

arch/arm64/kvm/arch_timer.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1767,7 +1767,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
17671767

17681768
mutex_lock(&kvm->lock);
17691769

1770-
if (lock_all_vcpus(kvm)) {
1770+
if (!kvm_trylock_all_vcpus(kvm)) {
17711771
set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
17721772

17731773
/*
@@ -1779,7 +1779,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
17791779
kvm->arch.timer_data.voffset = offset->counter_offset;
17801780
kvm->arch.timer_data.poffset = offset->counter_offset;
17811781

1782-
unlock_all_vcpus(kvm);
1782+
kvm_unlock_all_vcpus(kvm);
17831783
} else {
17841784
ret = -EBUSY;
17851785
}

arch/arm64/kvm/arm.c

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1882,49 +1882,6 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
18821882
}
18831883
}
18841884

1885-
/* unlocks vcpus from @vcpu_lock_idx and smaller */
1886-
static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
1887-
{
1888-
struct kvm_vcpu *tmp_vcpu;
1889-
1890-
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1891-
tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1892-
mutex_unlock(&tmp_vcpu->mutex);
1893-
}
1894-
}
1895-
1896-
void unlock_all_vcpus(struct kvm *kvm)
1897-
{
1898-
lockdep_assert_held(&kvm->lock);
1899-
1900-
unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
1901-
}
1902-
1903-
/* Returns true if all vcpus were locked, false otherwise */
1904-
bool lock_all_vcpus(struct kvm *kvm)
1905-
{
1906-
struct kvm_vcpu *tmp_vcpu;
1907-
unsigned long c;
1908-
1909-
lockdep_assert_held(&kvm->lock);
1910-
1911-
/*
1912-
* Any time a vcpu is in an ioctl (including running), the
1913-
* core KVM code tries to grab the vcpu->mutex.
1914-
*
1915-
* By grabbing the vcpu->mutex of all VCPUs we ensure that no
1916-
* other VCPUs can fiddle with the state while we access it.
1917-
*/
1918-
kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
1919-
if (!mutex_trylock(&tmp_vcpu->mutex)) {
1920-
unlock_vcpus(kvm, c - 1);
1921-
return false;
1922-
}
1923-
}
1924-
1925-
return true;
1926-
}
1927-
19281885
static unsigned long nvhe_percpu_size(void)
19291886
{
19301887
return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
8888
lockdep_assert_held(&kvm->lock);
8989

9090
ret = -EBUSY;
91-
if (!lock_all_vcpus(kvm))
91+
if (kvm_trylock_all_vcpus(kvm))
9292
return ret;
9393

9494
mutex_lock(&kvm->arch.config_lock);
@@ -142,7 +142,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
142142

143143
out_unlock:
144144
mutex_unlock(&kvm->arch.config_lock);
145-
unlock_all_vcpus(kvm);
145+
kvm_unlock_all_vcpus(kvm);
146146
return ret;
147147
}
148148

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1999,7 +1999,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
19991999

20002000
mutex_lock(&dev->kvm->lock);
20012001

2002-
if (!lock_all_vcpus(dev->kvm)) {
2002+
if (kvm_trylock_all_vcpus(dev->kvm)) {
20032003
mutex_unlock(&dev->kvm->lock);
20042004
return -EBUSY;
20052005
}
@@ -2034,7 +2034,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
20342034
}
20352035
out:
20362036
mutex_unlock(&dev->kvm->arch.config_lock);
2037-
unlock_all_vcpus(dev->kvm);
2037+
kvm_unlock_all_vcpus(dev->kvm);
20382038
mutex_unlock(&dev->kvm->lock);
20392039
return ret;
20402040
}
@@ -2704,7 +2704,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
27042704

27052705
mutex_lock(&kvm->lock);
27062706

2707-
if (!lock_all_vcpus(kvm)) {
2707+
if (kvm_trylock_all_vcpus(kvm)) {
27082708
mutex_unlock(&kvm->lock);
27092709
return -EBUSY;
27102710
}
@@ -2726,7 +2726,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
27262726

27272727
mutex_unlock(&its->its_lock);
27282728
mutex_unlock(&kvm->arch.config_lock);
2729-
unlock_all_vcpus(kvm);
2729+
kvm_unlock_all_vcpus(kvm);
27302730
mutex_unlock(&kvm->lock);
27312731
return ret;
27322732
}

arch/arm64/kvm/vgic/vgic-kvm-device.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -268,15 +268,15 @@ static int vgic_set_common_attr(struct kvm_device *dev,
268268
return -ENXIO;
269269
mutex_lock(&dev->kvm->lock);
270270

271-
if (!lock_all_vcpus(dev->kvm)) {
271+
if (kvm_trylock_all_vcpus(dev->kvm)) {
272272
mutex_unlock(&dev->kvm->lock);
273273
return -EBUSY;
274274
}
275275

276276
mutex_lock(&dev->kvm->arch.config_lock);
277277
r = vgic_v3_save_pending_tables(dev->kvm);
278278
mutex_unlock(&dev->kvm->arch.config_lock);
279-
unlock_all_vcpus(dev->kvm);
279+
kvm_unlock_all_vcpus(dev->kvm);
280280
mutex_unlock(&dev->kvm->lock);
281281
return r;
282282
}
@@ -384,7 +384,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
384384

385385
mutex_lock(&dev->kvm->lock);
386386

387-
if (!lock_all_vcpus(dev->kvm)) {
387+
if (kvm_trylock_all_vcpus(dev->kvm)) {
388388
mutex_unlock(&dev->kvm->lock);
389389
return -EBUSY;
390390
}
@@ -409,7 +409,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
409409

410410
out:
411411
mutex_unlock(&dev->kvm->arch.config_lock);
412-
unlock_all_vcpus(dev->kvm);
412+
kvm_unlock_all_vcpus(dev->kvm);
413413
mutex_unlock(&dev->kvm->lock);
414414

415415
if (!ret && !is_write)
@@ -545,7 +545,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
545545

546546
mutex_lock(&dev->kvm->lock);
547547

548-
if (!lock_all_vcpus(dev->kvm)) {
548+
if (kvm_trylock_all_vcpus(dev->kvm)) {
549549
mutex_unlock(&dev->kvm->lock);
550550
return -EBUSY;
551551
}
@@ -589,7 +589,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
589589

590590
out:
591591
mutex_unlock(&dev->kvm->arch.config_lock);
592-
unlock_all_vcpus(dev->kvm);
592+
kvm_unlock_all_vcpus(dev->kvm);
593593
mutex_unlock(&dev->kvm->lock);
594594

595595
if (!ret && uaccess && !is_write) {

arch/riscv/kvm/aia_device.c

Lines changed: 2 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -12,36 +12,6 @@
1212
#include <linux/kvm_host.h>
1313
#include <linux/uaccess.h>
1414

15-
static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
16-
{
17-
struct kvm_vcpu *tmp_vcpu;
18-
19-
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
20-
tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
21-
mutex_unlock(&tmp_vcpu->mutex);
22-
}
23-
}
24-
25-
static void unlock_all_vcpus(struct kvm *kvm)
26-
{
27-
unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
28-
}
29-
30-
static bool lock_all_vcpus(struct kvm *kvm)
31-
{
32-
struct kvm_vcpu *tmp_vcpu;
33-
unsigned long c;
34-
35-
kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
36-
if (!mutex_trylock(&tmp_vcpu->mutex)) {
37-
unlock_vcpus(kvm, c - 1);
38-
return false;
39-
}
40-
}
41-
42-
return true;
43-
}
44-
4515
static int aia_create(struct kvm_device *dev, u32 type)
4616
{
4717
int ret;
@@ -53,7 +23,7 @@ static int aia_create(struct kvm_device *dev, u32 type)
5323
return -EEXIST;
5424

5525
ret = -EBUSY;
56-
if (!lock_all_vcpus(kvm))
26+
if (kvm_trylock_all_vcpus(kvm))
5727
return ret;
5828

5929
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -65,7 +35,7 @@ static int aia_create(struct kvm_device *dev, u32 type)
6535
kvm->arch.aia.in_kernel = true;
6636

6737
out_unlock:
68-
unlock_all_vcpus(kvm);
38+
kvm_unlock_all_vcpus(kvm);
6939
return ret;
7040
}
7141

arch/x86/kvm/svm/sev.c

Lines changed: 4 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1887,70 +1887,6 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
18871887
atomic_set_release(&src_sev->migration_in_progress, 0);
18881888
}
18891889

1890-
/* vCPU mutex subclasses. */
1891-
enum sev_migration_role {
1892-
SEV_MIGRATION_SOURCE = 0,
1893-
SEV_MIGRATION_TARGET,
1894-
SEV_NR_MIGRATION_ROLES,
1895-
};
1896-
1897-
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
1898-
enum sev_migration_role role)
1899-
{
1900-
struct kvm_vcpu *vcpu;
1901-
unsigned long i, j;
1902-
1903-
kvm_for_each_vcpu(i, vcpu, kvm) {
1904-
if (mutex_lock_killable_nested(&vcpu->mutex, role))
1905-
goto out_unlock;
1906-
1907-
#ifdef CONFIG_PROVE_LOCKING
1908-
if (!i)
1909-
/*
1910-
* Reset the role to one that avoids colliding with
1911-
* the role used for the first vcpu mutex.
1912-
*/
1913-
role = SEV_NR_MIGRATION_ROLES;
1914-
else
1915-
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1916-
#endif
1917-
}
1918-
1919-
return 0;
1920-
1921-
out_unlock:
1922-
1923-
kvm_for_each_vcpu(j, vcpu, kvm) {
1924-
if (i == j)
1925-
break;
1926-
1927-
#ifdef CONFIG_PROVE_LOCKING
1928-
if (j)
1929-
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1930-
#endif
1931-
1932-
mutex_unlock(&vcpu->mutex);
1933-
}
1934-
return -EINTR;
1935-
}
1936-
1937-
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1938-
{
1939-
struct kvm_vcpu *vcpu;
1940-
unsigned long i;
1941-
bool first = true;
1942-
1943-
kvm_for_each_vcpu(i, vcpu, kvm) {
1944-
if (first)
1945-
first = false;
1946-
else
1947-
mutex_acquire(&vcpu->mutex.dep_map,
1948-
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
1949-
1950-
mutex_unlock(&vcpu->mutex);
1951-
}
1952-
}
1953-
19541890
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
19551891
{
19561892
struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
@@ -2090,10 +2026,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
20902026
charged = true;
20912027
}
20922028

2093-
ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
2029+
ret = kvm_lock_all_vcpus(kvm);
20942030
if (ret)
20952031
goto out_dst_cgroup;
2096-
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
2032+
ret = kvm_lock_all_vcpus(source_kvm);
20972033
if (ret)
20982034
goto out_dst_vcpu;
20992035

@@ -2107,9 +2043,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
21072043
ret = 0;
21082044

21092045
out_source_vcpu:
2110-
sev_unlock_vcpus_for_migration(source_kvm);
2046+
kvm_unlock_all_vcpus(source_kvm);
21112047
out_dst_vcpu:
2112-
sev_unlock_vcpus_for_migration(kvm);
2048+
kvm_unlock_all_vcpus(kvm);
21132049
out_dst_cgroup:
21142050
/* Operates on the source on success, on the destination on failure. */
21152051
if (charged)

include/linux/kvm_host.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1014,6 +1014,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
10141014

10151015
void kvm_destroy_vcpus(struct kvm *kvm);
10161016

1017+
int kvm_trylock_all_vcpus(struct kvm *kvm);
1018+
int kvm_lock_all_vcpus(struct kvm *kvm);
1019+
void kvm_unlock_all_vcpus(struct kvm *kvm);
1020+
10171021
void vcpu_load(struct kvm_vcpu *vcpu);
10181022
void vcpu_put(struct kvm_vcpu *vcpu);
10191023

0 commit comments

Comments
 (0)