Skip to content

Commit 9b486cd

Browse files
bibo-maochenhuacai
authored andcommitted
LoongArch: KVM: Add paravirt preempt feature in hypervisor side
Feature KVM_FEATURE_PREEMPT is added to show whether vCPU is preempted or not. It is to help guest OS scheduling or lock checking etc. Here add KVM_FEATURE_PREEMPT feature and use one byte as preempted flag in the steal time structure. Signed-off-by: Bibo Mao <maobibo@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
1 parent 2faec60 commit 9b486cd

6 files changed

Lines changed: 62 additions & 2 deletions

File tree

arch/loongarch/include/asm/kvm_host.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,7 @@ enum emulation_result {
165165

166166
#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63)
167167
#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \
168+
BIT(KVM_FEATURE_PREEMPT) | \
168169
BIT(KVM_FEATURE_STEAL_TIME) | \
169170
BIT(KVM_FEATURE_USER_HCALL) | \
170171
BIT(KVM_FEATURE_VIRT_EXTIOI))
@@ -254,6 +255,7 @@ struct kvm_vcpu_arch {
254255
u64 guest_addr;
255256
u64 last_steal;
256257
struct gfn_to_hva_cache cache;
258+
u8 preempted;
257259
} st;
258260
};
259261

arch/loongarch/include/asm/kvm_para.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,10 @@ struct kvm_steal_time {
3737
__u64 steal;
3838
__u32 version;
3939
__u32 flags;
40-
__u32 pad[12];
40+
__u8 preempted;
41+
__u8 pad[47];
4142
};
43+
#define KVM_VCPU_PREEMPTED (1 << 0)
4244

4345
/*
4446
* Hypercall interface for KVM hypervisor

arch/loongarch/include/uapi/asm/kvm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,7 @@ struct kvm_fpu {
105105
#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7
106106
#define KVM_LOONGARCH_VM_FEAT_PTW 8
107107
#define KVM_LOONGARCH_VM_FEAT_MSGINT 9
108+
#define KVM_LOONGARCH_VM_FEAT_PV_PREEMPT 10
108109

109110
/* Device Control API on vcpu fd */
110111
#define KVM_LOONGARCH_VCPU_CPUCFG 0

arch/loongarch/include/uapi/asm/kvm_para.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4)
1616
#define KVM_FEATURE_IPI 1
1717
#define KVM_FEATURE_STEAL_TIME 2
18+
#define KVM_FEATURE_PREEMPT 3
1819
/* BIT 24 - 31 are features configurable by user space vmm */
1920
#define KVM_FEATURE_VIRT_EXTIOI 24
2021
#define KVM_FEATURE_USER_HCALL 25

arch/loongarch/kvm/vcpu.c

Lines changed: 52 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,11 @@ static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
181181
}
182182

183183
st = (struct kvm_steal_time __user *)ghc->hva;
184+
if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
185+
unsafe_put_user(0, &st->preempted, out);
186+
vcpu->arch.st.preempted = 0;
187+
}
188+
184189
unsafe_get_user(version, &st->version, out);
185190
if (version & 1)
186191
version += 1; /* first time write, random junk */
@@ -1795,11 +1800,57 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
17951800
return 0;
17961801
}
17971802

1803+
static void kvm_vcpu_set_pv_preempted(struct kvm_vcpu *vcpu)
1804+
{
1805+
gpa_t gpa;
1806+
struct gfn_to_hva_cache *ghc;
1807+
struct kvm_memslots *slots;
1808+
struct kvm_steal_time __user *st;
1809+
1810+
gpa = vcpu->arch.st.guest_addr;
1811+
if (!(gpa & KVM_STEAL_PHYS_VALID))
1812+
return;
1813+
1814+
/* vCPU may be preempted for many times */
1815+
if (vcpu->arch.st.preempted)
1816+
return;
1817+
1818+
/* This happens on process exit */
1819+
if (unlikely(current->mm != vcpu->kvm->mm))
1820+
return;
1821+
1822+
gpa &= KVM_STEAL_PHYS_MASK;
1823+
ghc = &vcpu->arch.st.cache;
1824+
slots = kvm_memslots(vcpu->kvm);
1825+
if (slots->generation != ghc->generation || gpa != ghc->gpa) {
1826+
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
1827+
ghc->gpa = INVALID_GPA;
1828+
return;
1829+
}
1830+
}
1831+
1832+
st = (struct kvm_steal_time __user *)ghc->hva;
1833+
unsafe_put_user(KVM_VCPU_PREEMPTED, &st->preempted, out);
1834+
vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
1835+
out:
1836+
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
1837+
}
1838+
17981839
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
17991840
{
1800-
int cpu;
1841+
int cpu, idx;
18011842
unsigned long flags;
18021843

1844+
if (vcpu->preempted && kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) {
1845+
/*
1846+
* Take the srcu lock as memslots will be accessed to check
1847+
* the gfn cache generation against the memslots generation.
1848+
*/
1849+
idx = srcu_read_lock(&vcpu->kvm->srcu);
1850+
kvm_vcpu_set_pv_preempted(vcpu);
1851+
srcu_read_unlock(&vcpu->kvm->srcu, idx);
1852+
}
1853+
18031854
local_irq_save(flags);
18041855
cpu = smp_processor_id();
18051856
vcpu->arch.last_sched_cpu = cpu;

arch/loongarch/kvm/vm.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,9 @@ static void kvm_vm_init_features(struct kvm *kvm)
5252
kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
5353
kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
5454
if (kvm_pvtime_supported()) {
55+
kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT);
5556
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
57+
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_PREEMPT);
5658
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME);
5759
}
5860
}
@@ -154,6 +156,7 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr
154156
case KVM_LOONGARCH_VM_FEAT_MSGINT:
155157
case KVM_LOONGARCH_VM_FEAT_PMU:
156158
case KVM_LOONGARCH_VM_FEAT_PV_IPI:
159+
case KVM_LOONGARCH_VM_FEAT_PV_PREEMPT:
157160
case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME:
158161
if (kvm_vm_support(&kvm->arch, attr->attr))
159162
return 0;

0 commit comments

Comments
 (0)