1616#include <asm/hwcap.h>
1717#include <asm/kvm_aia.h>
1818#include <asm/ptrace.h>
19+ #include <asm/kvm_tlb.h>
20+ #include <asm/kvm_vmid.h>
1921#include <asm/kvm_vcpu_fp.h>
2022#include <asm/kvm_vcpu_insn.h>
2123#include <asm/kvm_vcpu_sbi.h>
3638#define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
3739#define KVM_REQ_FENCE_I \
3840 KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
39- #define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
4041#define KVM_REQ_HFENCE_VVMA_ALL \
4142 KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
4243#define KVM_REQ_HFENCE \
4344 KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
4445#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6)
4546
47+ #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
48+
4649#define KVM_HEDELEG_DEFAULT (BIT(EXC_INST_MISALIGNED) | \
50+ BIT(EXC_INST_ILLEGAL) | \
4751 BIT(EXC_BREAKPOINT) | \
4852 BIT(EXC_SYSCALL) | \
4953 BIT(EXC_INST_PAGE_FAULT) | \
5458 BIT(IRQ_VS_TIMER) | \
5559 BIT(IRQ_VS_EXT))
5660
57- enum kvm_riscv_hfence_type {
58- KVM_RISCV_HFENCE_UNKNOWN = 0 ,
59- KVM_RISCV_HFENCE_GVMA_VMID_GPA ,
60- KVM_RISCV_HFENCE_VVMA_ASID_GVA ,
61- KVM_RISCV_HFENCE_VVMA_ASID_ALL ,
62- KVM_RISCV_HFENCE_VVMA_GVA ,
63- };
64-
65- struct kvm_riscv_hfence {
66- enum kvm_riscv_hfence_type type ;
67- unsigned long asid ;
68- unsigned long order ;
69- gpa_t addr ;
70- gpa_t size ;
71- };
72-
73- #define KVM_RISCV_VCPU_MAX_HFENCE 64
74-
7561struct kvm_vm_stat {
7662 struct kvm_vm_stat_generic generic ;
7763};
@@ -97,15 +83,6 @@ struct kvm_vcpu_stat {
9783struct kvm_arch_memory_slot {
9884};
9985
100- struct kvm_vmid {
101- /*
102- * Writes to vmid_version and vmid happen with vmid_lock held
103- * whereas reads happen without any lock held.
104- */
105- unsigned long vmid_version ;
106- unsigned long vmid ;
107- };
108-
10986struct kvm_arch {
11087 /* G-stage vmid */
11188 struct kvm_vmid vmid ;
@@ -309,77 +286,6 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
309286static inline void kvm_arch_vcpu_blocking (struct kvm_vcpu * vcpu ) {}
310287static inline void kvm_arch_vcpu_unblocking (struct kvm_vcpu * vcpu ) {}
311288
312- #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
313-
314- void kvm_riscv_local_hfence_gvma_vmid_gpa (unsigned long vmid ,
315- gpa_t gpa , gpa_t gpsz ,
316- unsigned long order );
317- void kvm_riscv_local_hfence_gvma_vmid_all (unsigned long vmid );
318- void kvm_riscv_local_hfence_gvma_gpa (gpa_t gpa , gpa_t gpsz ,
319- unsigned long order );
320- void kvm_riscv_local_hfence_gvma_all (void );
321- void kvm_riscv_local_hfence_vvma_asid_gva (unsigned long vmid ,
322- unsigned long asid ,
323- unsigned long gva ,
324- unsigned long gvsz ,
325- unsigned long order );
326- void kvm_riscv_local_hfence_vvma_asid_all (unsigned long vmid ,
327- unsigned long asid );
328- void kvm_riscv_local_hfence_vvma_gva (unsigned long vmid ,
329- unsigned long gva , unsigned long gvsz ,
330- unsigned long order );
331- void kvm_riscv_local_hfence_vvma_all (unsigned long vmid );
332-
333- void kvm_riscv_local_tlb_sanitize (struct kvm_vcpu * vcpu );
334-
335- void kvm_riscv_fence_i_process (struct kvm_vcpu * vcpu );
336- void kvm_riscv_hfence_gvma_vmid_all_process (struct kvm_vcpu * vcpu );
337- void kvm_riscv_hfence_vvma_all_process (struct kvm_vcpu * vcpu );
338- void kvm_riscv_hfence_process (struct kvm_vcpu * vcpu );
339-
340- void kvm_riscv_fence_i (struct kvm * kvm ,
341- unsigned long hbase , unsigned long hmask );
342- void kvm_riscv_hfence_gvma_vmid_gpa (struct kvm * kvm ,
343- unsigned long hbase , unsigned long hmask ,
344- gpa_t gpa , gpa_t gpsz ,
345- unsigned long order );
346- void kvm_riscv_hfence_gvma_vmid_all (struct kvm * kvm ,
347- unsigned long hbase , unsigned long hmask );
348- void kvm_riscv_hfence_vvma_asid_gva (struct kvm * kvm ,
349- unsigned long hbase , unsigned long hmask ,
350- unsigned long gva , unsigned long gvsz ,
351- unsigned long order , unsigned long asid );
352- void kvm_riscv_hfence_vvma_asid_all (struct kvm * kvm ,
353- unsigned long hbase , unsigned long hmask ,
354- unsigned long asid );
355- void kvm_riscv_hfence_vvma_gva (struct kvm * kvm ,
356- unsigned long hbase , unsigned long hmask ,
357- unsigned long gva , unsigned long gvsz ,
358- unsigned long order );
359- void kvm_riscv_hfence_vvma_all (struct kvm * kvm ,
360- unsigned long hbase , unsigned long hmask );
361-
362- int kvm_riscv_gstage_ioremap (struct kvm * kvm , gpa_t gpa ,
363- phys_addr_t hpa , unsigned long size ,
364- bool writable , bool in_atomic );
365- void kvm_riscv_gstage_iounmap (struct kvm * kvm , gpa_t gpa ,
366- unsigned long size );
367- int kvm_riscv_gstage_map (struct kvm_vcpu * vcpu ,
368- struct kvm_memory_slot * memslot ,
369- gpa_t gpa , unsigned long hva , bool is_write );
370- int kvm_riscv_gstage_alloc_pgd (struct kvm * kvm );
371- void kvm_riscv_gstage_free_pgd (struct kvm * kvm );
372- void kvm_riscv_gstage_update_hgatp (struct kvm_vcpu * vcpu );
373- void __init kvm_riscv_gstage_mode_detect (void );
374- unsigned long __init kvm_riscv_gstage_mode (void );
375- int kvm_riscv_gstage_gpa_bits (void );
376-
377- void __init kvm_riscv_gstage_vmid_detect (void );
378- unsigned long kvm_riscv_gstage_vmid_bits (void );
379- int kvm_riscv_gstage_vmid_init (struct kvm * kvm );
380- bool kvm_riscv_gstage_vmid_ver_changed (struct kvm_vmid * vmid );
381- void kvm_riscv_gstage_vmid_update (struct kvm_vcpu * vcpu );
382-
383289int kvm_riscv_setup_default_irq_routing (struct kvm * kvm , u32 lines );
384290
385291void __kvm_riscv_unpriv_trap (void );
@@ -415,7 +321,6 @@ void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
415321void kvm_riscv_vcpu_power_on (struct kvm_vcpu * vcpu );
416322bool kvm_riscv_vcpu_stopped (struct kvm_vcpu * vcpu );
417323
418- void kvm_riscv_vcpu_sbi_sta_reset (struct kvm_vcpu * vcpu );
419324void kvm_riscv_vcpu_record_steal_time (struct kvm_vcpu * vcpu );
420325
421326#endif /* __RISCV_KVM_HOST_H__ */
0 commit comments