@@ -30,17 +30,52 @@ static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
3030};
3131#endif
3232
33- static const struct kvm_vcpu_sbi_extension * sbi_ext [] = {
34- & vcpu_sbi_ext_v01 ,
35- & vcpu_sbi_ext_base ,
36- & vcpu_sbi_ext_time ,
37- & vcpu_sbi_ext_ipi ,
38- & vcpu_sbi_ext_rfence ,
39- & vcpu_sbi_ext_srst ,
40- & vcpu_sbi_ext_hsm ,
41- & vcpu_sbi_ext_pmu ,
42- & vcpu_sbi_ext_experimental ,
43- & vcpu_sbi_ext_vendor ,
33+ struct kvm_riscv_sbi_extension_entry {
34+ enum KVM_RISCV_SBI_EXT_ID dis_idx ;
35+ const struct kvm_vcpu_sbi_extension * ext_ptr ;
36+ };
37+
38+ static const struct kvm_riscv_sbi_extension_entry sbi_ext [] = {
39+ {
40+ .dis_idx = KVM_RISCV_SBI_EXT_V01 ,
41+ .ext_ptr = & vcpu_sbi_ext_v01 ,
42+ },
43+ {
44+ .dis_idx = KVM_RISCV_SBI_EXT_MAX , /* Can't be disabled */
45+ .ext_ptr = & vcpu_sbi_ext_base ,
46+ },
47+ {
48+ .dis_idx = KVM_RISCV_SBI_EXT_TIME ,
49+ .ext_ptr = & vcpu_sbi_ext_time ,
50+ },
51+ {
52+ .dis_idx = KVM_RISCV_SBI_EXT_IPI ,
53+ .ext_ptr = & vcpu_sbi_ext_ipi ,
54+ },
55+ {
56+ .dis_idx = KVM_RISCV_SBI_EXT_RFENCE ,
57+ .ext_ptr = & vcpu_sbi_ext_rfence ,
58+ },
59+ {
60+ .dis_idx = KVM_RISCV_SBI_EXT_SRST ,
61+ .ext_ptr = & vcpu_sbi_ext_srst ,
62+ },
63+ {
64+ .dis_idx = KVM_RISCV_SBI_EXT_HSM ,
65+ .ext_ptr = & vcpu_sbi_ext_hsm ,
66+ },
67+ {
68+ .dis_idx = KVM_RISCV_SBI_EXT_PMU ,
69+ .ext_ptr = & vcpu_sbi_ext_pmu ,
70+ },
71+ {
72+ .dis_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL ,
73+ .ext_ptr = & vcpu_sbi_ext_experimental ,
74+ },
75+ {
76+ .dis_idx = KVM_RISCV_SBI_EXT_VENDOR ,
77+ .ext_ptr = & vcpu_sbi_ext_vendor ,
78+ },
4479};
4580
4681void kvm_riscv_vcpu_sbi_forward (struct kvm_vcpu * vcpu , struct kvm_run * run )
@@ -99,14 +134,192 @@ int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
99134 return 0 ;
100135}
101136
102- const struct kvm_vcpu_sbi_extension * kvm_vcpu_sbi_find_ext (unsigned long extid )
137+ static int riscv_vcpu_set_sbi_ext_single (struct kvm_vcpu * vcpu ,
138+ unsigned long reg_num ,
139+ unsigned long reg_val )
140+ {
141+ unsigned long i ;
142+ const struct kvm_riscv_sbi_extension_entry * sext = NULL ;
143+ struct kvm_vcpu_sbi_context * scontext = & vcpu -> arch .sbi_context ;
144+
145+ if (reg_num >= KVM_RISCV_SBI_EXT_MAX ||
146+ (reg_val != 1 && reg_val != 0 ))
147+ return - EINVAL ;
148+
149+ for (i = 0 ; i < ARRAY_SIZE (sbi_ext ); i ++ ) {
150+ if (sbi_ext [i ].dis_idx == reg_num ) {
151+ sext = & sbi_ext [i ];
152+ break ;
153+ }
154+ }
155+ if (!sext )
156+ return - ENOENT ;
157+
158+ scontext -> extension_disabled [sext -> dis_idx ] = !reg_val ;
159+
160+ return 0 ;
161+ }
162+
163+ static int riscv_vcpu_get_sbi_ext_single (struct kvm_vcpu * vcpu ,
164+ unsigned long reg_num ,
165+ unsigned long * reg_val )
166+ {
167+ unsigned long i ;
168+ const struct kvm_riscv_sbi_extension_entry * sext = NULL ;
169+ struct kvm_vcpu_sbi_context * scontext = & vcpu -> arch .sbi_context ;
170+
171+ if (reg_num >= KVM_RISCV_SBI_EXT_MAX )
172+ return - EINVAL ;
173+
174+ for (i = 0 ; i < ARRAY_SIZE (sbi_ext ); i ++ ) {
175+ if (sbi_ext [i ].dis_idx == reg_num ) {
176+ sext = & sbi_ext [i ];
177+ break ;
178+ }
179+ }
180+ if (!sext )
181+ return - ENOENT ;
182+
183+ * reg_val = !scontext -> extension_disabled [sext -> dis_idx ];
184+
185+ return 0 ;
186+ }
187+
188+ static int riscv_vcpu_set_sbi_ext_multi (struct kvm_vcpu * vcpu ,
189+ unsigned long reg_num ,
190+ unsigned long reg_val , bool enable )
191+ {
192+ unsigned long i , ext_id ;
193+
194+ if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST )
195+ return - EINVAL ;
196+
197+ for_each_set_bit (i , & reg_val , BITS_PER_LONG ) {
198+ ext_id = i + reg_num * BITS_PER_LONG ;
199+ if (ext_id >= KVM_RISCV_SBI_EXT_MAX )
200+ break ;
201+
202+ riscv_vcpu_set_sbi_ext_single (vcpu , ext_id , enable );
203+ }
204+
205+ return 0 ;
206+ }
207+
208+ static int riscv_vcpu_get_sbi_ext_multi (struct kvm_vcpu * vcpu ,
209+ unsigned long reg_num ,
210+ unsigned long * reg_val )
211+ {
212+ unsigned long i , ext_id , ext_val ;
213+
214+ if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST )
215+ return - EINVAL ;
216+
217+ for (i = 0 ; i < BITS_PER_LONG ; i ++ ) {
218+ ext_id = i + reg_num * BITS_PER_LONG ;
219+ if (ext_id >= KVM_RISCV_SBI_EXT_MAX )
220+ break ;
221+
222+ ext_val = 0 ;
223+ riscv_vcpu_get_sbi_ext_single (vcpu , ext_id , & ext_val );
224+ if (ext_val )
225+ * reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK (ext_id );
226+ }
227+
228+ return 0 ;
229+ }
230+
231+ int kvm_riscv_vcpu_set_reg_sbi_ext (struct kvm_vcpu * vcpu ,
232+ const struct kvm_one_reg * reg )
233+ {
234+ unsigned long __user * uaddr =
235+ (unsigned long __user * )(unsigned long )reg -> addr ;
236+ unsigned long reg_num = reg -> id & ~(KVM_REG_ARCH_MASK |
237+ KVM_REG_SIZE_MASK |
238+ KVM_REG_RISCV_SBI_EXT );
239+ unsigned long reg_val , reg_subtype ;
240+
241+ if (KVM_REG_SIZE (reg -> id ) != sizeof (unsigned long ))
242+ return - EINVAL ;
243+
244+ if (vcpu -> arch .ran_atleast_once )
245+ return - EBUSY ;
246+
247+ reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK ;
248+ reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK ;
249+
250+ if (copy_from_user (& reg_val , uaddr , KVM_REG_SIZE (reg -> id )))
251+ return - EFAULT ;
252+
253+ switch (reg_subtype ) {
254+ case KVM_REG_RISCV_SBI_SINGLE :
255+ return riscv_vcpu_set_sbi_ext_single (vcpu , reg_num , reg_val );
256+ case KVM_REG_RISCV_SBI_MULTI_EN :
257+ return riscv_vcpu_set_sbi_ext_multi (vcpu , reg_num , reg_val , true);
258+ case KVM_REG_RISCV_SBI_MULTI_DIS :
259+ return riscv_vcpu_set_sbi_ext_multi (vcpu , reg_num , reg_val , false);
260+ default :
261+ return - EINVAL ;
262+ }
263+
264+ return 0 ;
265+ }
266+
267+ int kvm_riscv_vcpu_get_reg_sbi_ext (struct kvm_vcpu * vcpu ,
268+ const struct kvm_one_reg * reg )
269+ {
270+ int rc ;
271+ unsigned long __user * uaddr =
272+ (unsigned long __user * )(unsigned long )reg -> addr ;
273+ unsigned long reg_num = reg -> id & ~(KVM_REG_ARCH_MASK |
274+ KVM_REG_SIZE_MASK |
275+ KVM_REG_RISCV_SBI_EXT );
276+ unsigned long reg_val , reg_subtype ;
277+
278+ if (KVM_REG_SIZE (reg -> id ) != sizeof (unsigned long ))
279+ return - EINVAL ;
280+
281+ reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK ;
282+ reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK ;
283+
284+ reg_val = 0 ;
285+ switch (reg_subtype ) {
286+ case KVM_REG_RISCV_SBI_SINGLE :
287+ rc = riscv_vcpu_get_sbi_ext_single (vcpu , reg_num , & reg_val );
288+ break ;
289+ case KVM_REG_RISCV_SBI_MULTI_EN :
290+ case KVM_REG_RISCV_SBI_MULTI_DIS :
291+ rc = riscv_vcpu_get_sbi_ext_multi (vcpu , reg_num , & reg_val );
292+ if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS )
293+ reg_val = ~reg_val ;
294+ break ;
295+ default :
296+ rc = - EINVAL ;
297+ }
298+ if (rc )
299+ return rc ;
300+
301+ if (copy_to_user (uaddr , & reg_val , KVM_REG_SIZE (reg -> id )))
302+ return - EFAULT ;
303+
304+ return 0 ;
305+ }
306+
307+ const struct kvm_vcpu_sbi_extension * kvm_vcpu_sbi_find_ext (
308+ struct kvm_vcpu * vcpu , unsigned long extid )
103309{
104- int i = 0 ;
310+ int i ;
311+ const struct kvm_riscv_sbi_extension_entry * sext ;
312+ struct kvm_vcpu_sbi_context * scontext = & vcpu -> arch .sbi_context ;
105313
106314 for (i = 0 ; i < ARRAY_SIZE (sbi_ext ); i ++ ) {
107- if (sbi_ext [i ]-> extid_start <= extid &&
108- sbi_ext [i ]-> extid_end >= extid )
109- return sbi_ext [i ];
315+ sext = & sbi_ext [i ];
316+ if (sext -> ext_ptr -> extid_start <= extid &&
317+ sext -> ext_ptr -> extid_end >= extid ) {
318+ if (sext -> dis_idx < KVM_RISCV_SBI_EXT_MAX &&
319+ scontext -> extension_disabled [sext -> dis_idx ])
320+ return NULL ;
321+ return sbi_ext [i ].ext_ptr ;
322+ }
110323 }
111324
112325 return NULL ;
@@ -126,7 +339,7 @@ int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
126339 };
127340 bool ext_is_v01 = false;
128341
129- sbi_ext = kvm_vcpu_sbi_find_ext (cp -> a7 );
342+ sbi_ext = kvm_vcpu_sbi_find_ext (vcpu , cp -> a7 );
130343 if (sbi_ext && sbi_ext -> handler ) {
131344#ifdef CONFIG_RISCV_SBI_V01
132345 if (cp -> a7 >= SBI_EXT_0_1_SET_TIMER &&
0 commit comments