Skip to content

Commit f58c9aa

Browse files
bibo-maochenhuacai
authored andcommitted
LoongArch: KVM: Fix VM migration failure with PTW enabled
With PTW disabled system, bit _PAGE_DIRTY is a HW bit for page writing. However with PTW enabled system, bit _PAGE_WRITE is also a "HW bit" for page writing, because hardware synchronizes _PAGE_WRITE to _PAGE_DIRTY automatically. Previously, _PAGE_WRITE is treated as a SW bit to record the page writeable attribute for the fast page fault handling in the secondary MMU, however with PTW enabled machine, this bit is used by HW already (so setting it will silence the TLB modify exception). Here define KVM_PAGE_WRITEABLE with the SW bit _PAGE_MODIFIED, so that it can work on both PTW disabled and enabled machines. And for HW write bits, both _PAGE_DIRTY and _PAGE_WRITE are set or clear together. Cc: stable@vger.kernel.org Signed-off-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
1 parent 091b29d commit f58c9aa

File tree

2 files changed

+20
-8
lines changed

2 files changed

+20
-8
lines changed

arch/loongarch/include/asm/kvm_mmu.h

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,13 @@
1616
*/
1717
#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1)
1818

19+
/*
20+
* _PAGE_MODIFIED is a SW pte bit, it records page ever written on host
21+
* kernel, on secondary MMU it records the page writeable attribute, in
22+
* order for fast path handling.
23+
*/
24+
#define KVM_PAGE_WRITEABLE _PAGE_MODIFIED
25+
1926
#define _KVM_FLUSH_PGTABLE 0x1
2027
#define _KVM_HAS_PGMASK 0x2
2128
#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
@@ -52,10 +59,10 @@ static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val)
5259
WRITE_ONCE(*ptep, val);
5360
}
5461

55-
static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; }
56-
static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; }
5762
static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; }
5863
static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; }
64+
static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & __WRITEABLE; }
65+
static inline int kvm_pte_writeable(kvm_pte_t pte) { return pte & KVM_PAGE_WRITEABLE; }
5966

6067
static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte)
6168
{
@@ -69,12 +76,12 @@ static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte)
6976

7077
static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte)
7178
{
72-
return pte | _PAGE_DIRTY;
79+
return pte | __WRITEABLE;
7380
}
7481

7582
static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte)
7683
{
77-
return pte & ~_PAGE_DIRTY;
84+
return pte & ~__WRITEABLE;
7885
}
7986

8087
static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte)
@@ -87,6 +94,11 @@ static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte)
8794
return pte & ~_PAGE_HUGE;
8895
}
8996

97+
static inline kvm_pte_t kvm_pte_mkwriteable(kvm_pte_t pte)
98+
{
99+
return pte | KVM_PAGE_WRITEABLE;
100+
}
101+
90102
static inline int kvm_need_flush(kvm_ptw_ctx *ctx)
91103
{
92104
return ctx->flag & _KVM_FLUSH_PGTABLE;

arch/loongarch/kvm/mmu.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -569,7 +569,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ
569569
/* Track access to pages marked old */
570570
new = kvm_pte_mkyoung(*ptep);
571571
if (write && !kvm_pte_dirty(new)) {
572-
if (!kvm_pte_write(new)) {
572+
if (!kvm_pte_writeable(new)) {
573573
ret = -EFAULT;
574574
goto out;
575575
}
@@ -856,9 +856,9 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
856856
prot_bits |= _CACHE_SUC;
857857

858858
if (writeable) {
859-
prot_bits |= _PAGE_WRITE;
859+
prot_bits = kvm_pte_mkwriteable(prot_bits);
860860
if (write)
861-
prot_bits |= __WRITEABLE;
861+
prot_bits = kvm_pte_mkdirty(prot_bits);
862862
}
863863

864864
/* Disable dirty logging on HugePages */
@@ -904,7 +904,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
904904
kvm_release_faultin_page(kvm, page, false, writeable);
905905
spin_unlock(&kvm->mmu_lock);
906906

907-
if (prot_bits & _PAGE_DIRTY)
907+
if (kvm_pte_dirty(prot_bits))
908908
mark_page_dirty_in_slot(kvm, memslot, gfn);
909909

910910
out:

0 commit comments

Comments
 (0)