1616#include <asm/hwcap.h>
1717#include <asm/kvm_aia.h>
1818#include <asm/ptrace.h>
19+ #include <asm/kvm_tlb.h>
20+ #include <asm/kvm_vmid.h>
1921#include <asm/kvm_vcpu_fp.h>
2022#include <asm/kvm_vcpu_insn.h>
2123#include <asm/kvm_vcpu_sbi.h>
5557 BIT(IRQ_VS_TIMER) | \
5658 BIT(IRQ_VS_EXT))
5759
58- enum kvm_riscv_hfence_type {
59- KVM_RISCV_HFENCE_UNKNOWN = 0 ,
60- KVM_RISCV_HFENCE_GVMA_VMID_GPA ,
61- KVM_RISCV_HFENCE_VVMA_ASID_GVA ,
62- KVM_RISCV_HFENCE_VVMA_ASID_ALL ,
63- KVM_RISCV_HFENCE_VVMA_GVA ,
64- };
65-
66- struct kvm_riscv_hfence {
67- enum kvm_riscv_hfence_type type ;
68- unsigned long asid ;
69- unsigned long order ;
70- gpa_t addr ;
71- gpa_t size ;
72- };
73-
74- #define KVM_RISCV_VCPU_MAX_HFENCE 64
75-
7660struct kvm_vm_stat {
7761 struct kvm_vm_stat_generic generic ;
7862};
@@ -98,15 +82,6 @@ struct kvm_vcpu_stat {
9882struct kvm_arch_memory_slot {
9983};
10084
101- struct kvm_vmid {
102- /*
103- * Writes to vmid_version and vmid happen with vmid_lock held
104- * whereas reads happen without any lock held.
105- */
106- unsigned long vmid_version ;
107- unsigned long vmid ;
108- };
109-
11085struct kvm_arch {
11186 /* G-stage vmid */
11287 struct kvm_vmid vmid ;
@@ -310,77 +285,6 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
310285static inline void kvm_arch_vcpu_blocking (struct kvm_vcpu * vcpu ) {}
311286static inline void kvm_arch_vcpu_unblocking (struct kvm_vcpu * vcpu ) {}
312287
313- #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
314-
315- void kvm_riscv_local_hfence_gvma_vmid_gpa (unsigned long vmid ,
316- gpa_t gpa , gpa_t gpsz ,
317- unsigned long order );
318- void kvm_riscv_local_hfence_gvma_vmid_all (unsigned long vmid );
319- void kvm_riscv_local_hfence_gvma_gpa (gpa_t gpa , gpa_t gpsz ,
320- unsigned long order );
321- void kvm_riscv_local_hfence_gvma_all (void );
322- void kvm_riscv_local_hfence_vvma_asid_gva (unsigned long vmid ,
323- unsigned long asid ,
324- unsigned long gva ,
325- unsigned long gvsz ,
326- unsigned long order );
327- void kvm_riscv_local_hfence_vvma_asid_all (unsigned long vmid ,
328- unsigned long asid );
329- void kvm_riscv_local_hfence_vvma_gva (unsigned long vmid ,
330- unsigned long gva , unsigned long gvsz ,
331- unsigned long order );
332- void kvm_riscv_local_hfence_vvma_all (unsigned long vmid );
333-
334- void kvm_riscv_tlb_flush_process (struct kvm_vcpu * vcpu );
335-
336- void kvm_riscv_fence_i_process (struct kvm_vcpu * vcpu );
337- void kvm_riscv_hfence_vvma_all_process (struct kvm_vcpu * vcpu );
338- void kvm_riscv_hfence_process (struct kvm_vcpu * vcpu );
339-
340- void kvm_riscv_fence_i (struct kvm * kvm ,
341- unsigned long hbase , unsigned long hmask );
342- void kvm_riscv_hfence_gvma_vmid_gpa (struct kvm * kvm ,
343- unsigned long hbase , unsigned long hmask ,
344- gpa_t gpa , gpa_t gpsz ,
345- unsigned long order );
346- void kvm_riscv_hfence_gvma_vmid_all (struct kvm * kvm ,
347- unsigned long hbase , unsigned long hmask );
348- void kvm_riscv_hfence_vvma_asid_gva (struct kvm * kvm ,
349- unsigned long hbase , unsigned long hmask ,
350- unsigned long gva , unsigned long gvsz ,
351- unsigned long order , unsigned long asid );
352- void kvm_riscv_hfence_vvma_asid_all (struct kvm * kvm ,
353- unsigned long hbase , unsigned long hmask ,
354- unsigned long asid );
355- void kvm_riscv_hfence_vvma_gva (struct kvm * kvm ,
356- unsigned long hbase , unsigned long hmask ,
357- unsigned long gva , unsigned long gvsz ,
358- unsigned long order );
359- void kvm_riscv_hfence_vvma_all (struct kvm * kvm ,
360- unsigned long hbase , unsigned long hmask );
361-
362- int kvm_riscv_gstage_ioremap (struct kvm * kvm , gpa_t gpa ,
363- phys_addr_t hpa , unsigned long size ,
364- bool writable , bool in_atomic );
365- void kvm_riscv_gstage_iounmap (struct kvm * kvm , gpa_t gpa ,
366- unsigned long size );
367- int kvm_riscv_gstage_map (struct kvm_vcpu * vcpu ,
368- struct kvm_memory_slot * memslot ,
369- gpa_t gpa , unsigned long hva , bool is_write );
370- int kvm_riscv_gstage_alloc_pgd (struct kvm * kvm );
371- void kvm_riscv_gstage_free_pgd (struct kvm * kvm );
372- void kvm_riscv_gstage_update_hgatp (struct kvm_vcpu * vcpu );
373- void __init kvm_riscv_gstage_mode_detect (void );
374- unsigned long __init kvm_riscv_gstage_mode (void );
375- int kvm_riscv_gstage_gpa_bits (void );
376-
377- void __init kvm_riscv_gstage_vmid_detect (void );
378- unsigned long kvm_riscv_gstage_vmid_bits (void );
379- int kvm_riscv_gstage_vmid_init (struct kvm * kvm );
380- bool kvm_riscv_gstage_vmid_ver_changed (struct kvm_vmid * vmid );
381- void kvm_riscv_gstage_vmid_update (struct kvm_vcpu * vcpu );
382- void kvm_riscv_gstage_vmid_sanitize (struct kvm_vcpu * vcpu );
383-
384288int kvm_riscv_setup_default_irq_routing (struct kvm * kvm , u32 lines );
385289
386290void __kvm_riscv_unpriv_trap (void );
0 commit comments