Skip to content

Commit 75360a9

Browse files
Oliver UptonMarc Zyngier
authored andcommitted
KVM: arm64: vgic-v3: Reinstate IRQ lock ordering for LPI xarray
Zenghui reports that running a KVM guest with an assigned device and lockdep enabled produces an unfriendly splat due to an inconsistent irq context when taking the lpi_xa's spinlock. This is no good as in rare cases the last reference to an LPI can get dropped after injection of a cached LPI translation. In this case, vgic_put_irq() will release the IRQ struct and take the lpi_xa's spinlock to erase it from the xarray. Reinstate the IRQ ordering and update the lockdep hint accordingly. Note that there is no irqsave equivalent of might_lock(), so just explictly grab and release the spinlock on lockdep kernels. Reported-by: Zenghui Yu <yuzenghui@huawei.com> Closes: https://lore.kernel.org/kvmarm/b4d7cb0f-f007-0b81-46d1-998b15cc14bc@huawei.com/ Fixes: 982f31b ("KVM: arm64: vgic-v3: Don't require IRQs be disabled for LPI xarray lock") Signed-off-by: Oliver Upton <oupton@kernel.org> Link: https://patch.msgid.link/20251107184847.1784820-2-oupton@kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parent 50e7cce commit 75360a9

File tree

4 files changed

+32
-16
lines changed

4 files changed

+32
-16
lines changed

arch/arm64/kvm/vgic/vgic-debug.c

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -64,29 +64,37 @@ static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter)
6464
static int iter_mark_lpis(struct kvm *kvm)
6565
{
6666
struct vgic_dist *dist = &kvm->arch.vgic;
67+
unsigned long intid, flags;
6768
struct vgic_irq *irq;
68-
unsigned long intid;
6969
int nr_lpis = 0;
7070

71+
xa_lock_irqsave(&dist->lpi_xa, flags);
72+
7173
xa_for_each(&dist->lpi_xa, intid, irq) {
7274
if (!vgic_try_get_irq_ref(irq))
7375
continue;
7476

75-
xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
77+
__xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
7678
nr_lpis++;
7779
}
7880

81+
xa_unlock_irqrestore(&dist->lpi_xa, flags);
82+
7983
return nr_lpis;
8084
}
8185

8286
static void iter_unmark_lpis(struct kvm *kvm)
8387
{
8488
struct vgic_dist *dist = &kvm->arch.vgic;
89+
unsigned long intid, flags;
8590
struct vgic_irq *irq;
86-
unsigned long intid;
8791

8892
xa_for_each_marked(&dist->lpi_xa, intid, irq, LPI_XA_MARK_DEBUG_ITER) {
89-
xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
93+
xa_lock_irqsave(&dist->lpi_xa, flags);
94+
__xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
95+
xa_unlock_irqrestore(&dist->lpi_xa, flags);
96+
97+
/* vgic_put_irq() expects to be called outside of the xa_lock */
9098
vgic_put_irq(kvm, irq);
9199
}
92100
}

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
5353
{
5454
struct vgic_dist *dist = &kvm->arch.vgic;
5555

56-
xa_init(&dist->lpi_xa);
56+
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
5757
}
5858

5959
/* CREATION */

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
7878
{
7979
struct vgic_dist *dist = &kvm->arch.vgic;
8080
struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq;
81+
unsigned long flags;
8182
int ret;
8283

8384
/* In this case there is no put, since we keep the reference. */
@@ -88,7 +89,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
8889
if (!irq)
8990
return ERR_PTR(-ENOMEM);
9091

91-
ret = xa_reserve(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
92+
ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
9293
if (ret) {
9394
kfree(irq);
9495
return ERR_PTR(ret);
@@ -103,7 +104,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
103104
irq->target_vcpu = vcpu;
104105
irq->group = 1;
105106

106-
xa_lock(&dist->lpi_xa);
107+
xa_lock_irqsave(&dist->lpi_xa, flags);
107108

108109
/*
109110
* There could be a race with another vgic_add_lpi(), so we need to
@@ -125,7 +126,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
125126
}
126127

127128
out_unlock:
128-
xa_unlock(&dist->lpi_xa);
129+
xa_unlock_irqrestore(&dist->lpi_xa, flags);
129130

130131
if (ret)
131132
return ERR_PTR(ret);

arch/arm64/kvm/vgic/vgic.c

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
2828
* kvm->arch.config_lock (mutex)
2929
* its->cmd_lock (mutex)
3030
* its->its_lock (mutex)
31-
* vgic_dist->lpi_xa.xa_lock
31+
* vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
3232
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
3333
* vgic_irq->irq_lock must be taken with IRQs disabled
3434
*
@@ -141,32 +141,39 @@ static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq
141141
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
142142
{
143143
struct vgic_dist *dist = &kvm->arch.vgic;
144+
unsigned long flags;
144145

145-
if (irq->intid >= VGIC_MIN_LPI)
146-
might_lock(&dist->lpi_xa.xa_lock);
146+
/*
147+
* Normally the lock is only taken when the refcount drops to 0.
148+
* Acquire/release it early on lockdep kernels to make locking issues
149+
* in rare release paths a bit more obvious.
150+
*/
151+
if (IS_ENABLED(CONFIG_LOCKDEP) && irq->intid >= VGIC_MIN_LPI) {
152+
guard(spinlock_irqsave)(&dist->lpi_xa.xa_lock);
153+
}
147154

148155
if (!__vgic_put_irq(kvm, irq))
149156
return;
150157

151-
xa_lock(&dist->lpi_xa);
158+
xa_lock_irqsave(&dist->lpi_xa, flags);
152159
vgic_release_lpi_locked(dist, irq);
153-
xa_unlock(&dist->lpi_xa);
160+
xa_unlock_irqrestore(&dist->lpi_xa, flags);
154161
}
155162

156163
static void vgic_release_deleted_lpis(struct kvm *kvm)
157164
{
158165
struct vgic_dist *dist = &kvm->arch.vgic;
159-
unsigned long intid;
166+
unsigned long flags, intid;
160167
struct vgic_irq *irq;
161168

162-
xa_lock(&dist->lpi_xa);
169+
xa_lock_irqsave(&dist->lpi_xa, flags);
163170

164171
xa_for_each(&dist->lpi_xa, intid, irq) {
165172
if (irq->pending_release)
166173
vgic_release_lpi_locked(dist, irq);
167174
}
168175

169-
xa_unlock(&dist->lpi_xa);
176+
xa_unlock_irqrestore(&dist->lpi_xa, flags);
170177
}
171178

172179
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)

0 commit comments

Comments
 (0)