@@ -20,6 +20,33 @@ struct kvm_aia {
2020
2121 /* In-kernel irqchip initialized */
2222 bool initialized ;
23+
24+ /* Virtualization mode (Emulation, HW Accelerated, or Auto) */
25+ u32 mode ;
26+
27+ /* Number of MSIs */
28+ u32 nr_ids ;
29+
30+ /* Number of wired IRQs */
31+ u32 nr_sources ;
32+
33+ /* Number of group bits in IMSIC address */
34+ u32 nr_group_bits ;
35+
36+ /* Position of group bits in IMSIC address */
37+ u32 nr_group_shift ;
38+
39+ /* Number of hart bits in IMSIC address */
40+ u32 nr_hart_bits ;
41+
42+ /* Number of guest bits in IMSIC address */
43+ u32 nr_guest_bits ;
44+
45+ /* Guest physical address of APLIC */
46+ gpa_t aplic_addr ;
47+
48+ /* Internal state of APLIC */
49+ void * aplic_state ;
2350};
2451
2552struct kvm_vcpu_aia_csr {
@@ -38,8 +65,19 @@ struct kvm_vcpu_aia {
3865
3966 /* CPU AIA CSR context upon Guest VCPU reset */
4067 struct kvm_vcpu_aia_csr guest_reset_csr ;
68+
69+ /* Guest physical address of IMSIC for this VCPU */
70+ gpa_t imsic_addr ;
71+
72+ /* HART index of IMSIC extacted from guest physical address */
73+ u32 hart_index ;
74+
75+ /* Internal state of IMSIC for this VCPU */
76+ void * imsic_state ;
4177};
4278
79+ #define KVM_RISCV_AIA_UNDEF_ADDR (-1)
80+
4381#define kvm_riscv_aia_initialized (k ) ((k)->arch.aia.initialized)
4482
4583#define irqchip_in_kernel (k ) ((k)->arch.aia.in_kernel)
@@ -50,10 +88,17 @@ DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
5088#define kvm_riscv_aia_available () \
5189 static_branch_unlikely(&kvm_riscv_aia_available)
5290
91+ extern struct kvm_device_ops kvm_riscv_aia_device_ops ;
92+
5393static inline void kvm_riscv_vcpu_aia_imsic_release (struct kvm_vcpu * vcpu )
5494{
5595}
5696
97+ static inline int kvm_riscv_vcpu_aia_imsic_update (struct kvm_vcpu * vcpu )
98+ {
99+ return 1 ;
100+ }
101+
57102#define KVM_RISCV_AIA_IMSIC_TOPEI (ISELECT_MASK + 1)
58103static inline int kvm_riscv_vcpu_aia_imsic_rmw (struct kvm_vcpu * vcpu ,
59104 unsigned long isel ,
@@ -64,6 +109,41 @@ static inline int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu,
64109 return 0 ;
65110}
66111
112+ static inline void kvm_riscv_vcpu_aia_imsic_reset (struct kvm_vcpu * vcpu )
113+ {
114+ }
115+
116+ static inline int kvm_riscv_vcpu_aia_imsic_inject (struct kvm_vcpu * vcpu ,
117+ u32 guest_index , u32 offset ,
118+ u32 iid )
119+ {
120+ return 0 ;
121+ }
122+
123+ static inline int kvm_riscv_vcpu_aia_imsic_init (struct kvm_vcpu * vcpu )
124+ {
125+ return 0 ;
126+ }
127+
128+ static inline void kvm_riscv_vcpu_aia_imsic_cleanup (struct kvm_vcpu * vcpu )
129+ {
130+ }
131+
132+ static inline int kvm_riscv_aia_aplic_inject (struct kvm * kvm ,
133+ u32 source , bool level )
134+ {
135+ return 0 ;
136+ }
137+
138+ static inline int kvm_riscv_aia_aplic_init (struct kvm * kvm )
139+ {
140+ return 0 ;
141+ }
142+
143+ static inline void kvm_riscv_aia_aplic_cleanup (struct kvm * kvm )
144+ {
145+ }
146+
67147#ifdef CONFIG_32BIT
68148void kvm_riscv_vcpu_aia_flush_interrupts (struct kvm_vcpu * vcpu );
69149void kvm_riscv_vcpu_aia_sync_interrupts (struct kvm_vcpu * vcpu );
@@ -99,50 +179,18 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
99179{ .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
100180{ .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
101181
102- static inline int kvm_riscv_vcpu_aia_update (struct kvm_vcpu * vcpu )
103- {
104- return 1 ;
105- }
106-
107- static inline void kvm_riscv_vcpu_aia_reset (struct kvm_vcpu * vcpu )
108- {
109- }
110-
111- static inline int kvm_riscv_vcpu_aia_init (struct kvm_vcpu * vcpu )
112- {
113- return 0 ;
114- }
115-
116- static inline void kvm_riscv_vcpu_aia_deinit (struct kvm_vcpu * vcpu )
117- {
118- }
119-
120- static inline int kvm_riscv_aia_inject_msi_by_id (struct kvm * kvm ,
121- u32 hart_index ,
122- u32 guest_index , u32 iid )
123- {
124- return 0 ;
125- }
126-
127- static inline int kvm_riscv_aia_inject_msi (struct kvm * kvm ,
128- struct kvm_msi * msi )
129- {
130- return 0 ;
131- }
182+ int kvm_riscv_vcpu_aia_update (struct kvm_vcpu * vcpu );
183+ void kvm_riscv_vcpu_aia_reset (struct kvm_vcpu * vcpu );
184+ int kvm_riscv_vcpu_aia_init (struct kvm_vcpu * vcpu );
185+ void kvm_riscv_vcpu_aia_deinit (struct kvm_vcpu * vcpu );
132186
133- static inline int kvm_riscv_aia_inject_irq (struct kvm * kvm ,
134- unsigned int irq , bool level )
135- {
136- return 0 ;
137- }
187+ int kvm_riscv_aia_inject_msi_by_id (struct kvm * kvm , u32 hart_index ,
188+ u32 guest_index , u32 iid );
189+ int kvm_riscv_aia_inject_msi (struct kvm * kvm , struct kvm_msi * msi );
190+ int kvm_riscv_aia_inject_irq (struct kvm * kvm , unsigned int irq , bool level );
138191
139- static inline void kvm_riscv_aia_init_vm (struct kvm * kvm )
140- {
141- }
142-
143- static inline void kvm_riscv_aia_destroy_vm (struct kvm * kvm )
144- {
145- }
192+ void kvm_riscv_aia_init_vm (struct kvm * kvm );
193+ void kvm_riscv_aia_destroy_vm (struct kvm * kvm );
146194
147195int kvm_riscv_aia_alloc_hgei (int cpu , struct kvm_vcpu * owner ,
148196 void __iomem * * hgei_va , phys_addr_t * hgei_pa );
0 commit comments