1212
1313#include <linux/jump_label.h>
1414#include <linux/kvm_types.h>
15+ #include <asm/csr.h>
1516
1617struct kvm_aia {
1718 /* In-kernel irqchip created */
@@ -21,7 +22,22 @@ struct kvm_aia {
2122 bool initialized ;
2223};
2324
25+ struct kvm_vcpu_aia_csr {
26+ unsigned long vsiselect ;
27+ unsigned long hviprio1 ;
28+ unsigned long hviprio2 ;
29+ unsigned long vsieh ;
30+ unsigned long hviph ;
31+ unsigned long hviprio1h ;
32+ unsigned long hviprio2h ;
33+ };
34+
2435struct kvm_vcpu_aia {
36+ /* CPU AIA CSR context of Guest VCPU */
37+ struct kvm_vcpu_aia_csr guest_csr ;
38+
39+ /* CPU AIA CSR context upon Guest VCPU reset */
40+ struct kvm_vcpu_aia_csr guest_reset_csr ;
2541};
2642
2743#define kvm_riscv_aia_initialized (k ) ((k)->arch.aia.initialized)
@@ -32,48 +48,50 @@ DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
3248#define kvm_riscv_aia_available () \
3349 static_branch_unlikely(&kvm_riscv_aia_available)
3450
35- static inline void kvm_riscv_vcpu_aia_flush_interrupts (struct kvm_vcpu * vcpu )
36- {
37- }
38-
39- static inline void kvm_riscv_vcpu_aia_sync_interrupts (struct kvm_vcpu * vcpu )
40- {
41- }
42-
43- static inline bool kvm_riscv_vcpu_aia_has_interrupts (struct kvm_vcpu * vcpu ,
44- u64 mask )
45- {
46- return false;
47- }
48-
49- static inline void kvm_riscv_vcpu_aia_update_hvip (struct kvm_vcpu * vcpu )
50- {
51- }
52-
53- static inline void kvm_riscv_vcpu_aia_load (struct kvm_vcpu * vcpu , int cpu )
54- {
55- }
56-
57- static inline void kvm_riscv_vcpu_aia_put (struct kvm_vcpu * vcpu )
51+ #define KVM_RISCV_AIA_IMSIC_TOPEI (ISELECT_MASK + 1)
52+ static inline int kvm_riscv_vcpu_aia_imsic_rmw (struct kvm_vcpu * vcpu ,
53+ unsigned long isel ,
54+ unsigned long * val ,
55+ unsigned long new_val ,
56+ unsigned long wr_mask )
5857{
58+ return 0 ;
5959}
6060
61- static inline int kvm_riscv_vcpu_aia_get_csr (struct kvm_vcpu * vcpu ,
62- unsigned long reg_num ,
63- unsigned long * out_val )
61+ #ifdef CONFIG_32BIT
62+ void kvm_riscv_vcpu_aia_flush_interrupts (struct kvm_vcpu * vcpu );
63+ void kvm_riscv_vcpu_aia_sync_interrupts (struct kvm_vcpu * vcpu );
64+ #else
65+ static inline void kvm_riscv_vcpu_aia_flush_interrupts (struct kvm_vcpu * vcpu )
6466{
65- * out_val = 0 ;
66- return 0 ;
6767}
68-
69- static inline int kvm_riscv_vcpu_aia_set_csr (struct kvm_vcpu * vcpu ,
70- unsigned long reg_num ,
71- unsigned long val )
68+ static inline void kvm_riscv_vcpu_aia_sync_interrupts (struct kvm_vcpu * vcpu )
7269{
73- return 0 ;
7470}
75-
76- #define KVM_RISCV_VCPU_AIA_CSR_FUNCS
71+ #endif
72+ bool kvm_riscv_vcpu_aia_has_interrupts (struct kvm_vcpu * vcpu , u64 mask );
73+
74+ void kvm_riscv_vcpu_aia_update_hvip (struct kvm_vcpu * vcpu );
75+ void kvm_riscv_vcpu_aia_load (struct kvm_vcpu * vcpu , int cpu );
76+ void kvm_riscv_vcpu_aia_put (struct kvm_vcpu * vcpu );
77+ int kvm_riscv_vcpu_aia_get_csr (struct kvm_vcpu * vcpu ,
78+ unsigned long reg_num ,
79+ unsigned long * out_val );
80+ int kvm_riscv_vcpu_aia_set_csr (struct kvm_vcpu * vcpu ,
81+ unsigned long reg_num ,
82+ unsigned long val );
83+
84+ int kvm_riscv_vcpu_aia_rmw_topei (struct kvm_vcpu * vcpu ,
85+ unsigned int csr_num ,
86+ unsigned long * val ,
87+ unsigned long new_val ,
88+ unsigned long wr_mask );
89+ int kvm_riscv_vcpu_aia_rmw_ireg (struct kvm_vcpu * vcpu , unsigned int csr_num ,
90+ unsigned long * val , unsigned long new_val ,
91+ unsigned long wr_mask );
92+ #define KVM_RISCV_VCPU_AIA_CSR_FUNCS \
93+ { .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
94+ { .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
7795
7896static inline int kvm_riscv_vcpu_aia_update (struct kvm_vcpu * vcpu )
7997{
0 commit comments