66#include <linux/kconfig.h>
77#include <linux/kernel.h>
88#include <linux/kvm_host.h>
9+ #include <linux/mm.h>
10+ #include <linux/sizes.h>
911
12+ #include <asm/bug.h>
13+ #include <asm/current.h>
1014#include <asm/kvm_vcpu_sbi.h>
15+ #include <asm/page.h>
1116#include <asm/sbi.h>
17+ #include <asm/uaccess.h>
1218
1319void kvm_riscv_vcpu_sbi_sta_reset (struct kvm_vcpu * vcpu )
1420{
@@ -19,14 +25,100 @@ void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
1925void kvm_riscv_vcpu_record_steal_time (struct kvm_vcpu * vcpu )
2026{
2127 gpa_t shmem = vcpu -> arch .sta .shmem ;
28+ u64 last_steal = vcpu -> arch .sta .last_steal ;
29+ u32 * sequence_ptr , sequence ;
30+ u64 * steal_ptr , steal ;
31+ unsigned long hva ;
32+ gfn_t gfn ;
2233
2334 if (shmem == INVALID_GPA )
2435 return ;
36+
37+ /*
38+ * shmem is 64-byte aligned (see the enforcement in
39+ * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
40+ * is 64 bytes, so we know all its offsets are in the same page.
41+ */
42+ gfn = shmem >> PAGE_SHIFT ;
43+ hva = kvm_vcpu_gfn_to_hva (vcpu , gfn );
44+
45+ if (WARN_ON (kvm_is_error_hva (hva ))) {
46+ vcpu -> arch .sta .shmem = INVALID_GPA ;
47+ return ;
48+ }
49+
50+ sequence_ptr = (u32 * )(hva + offset_in_page (shmem ) +
51+ offsetof(struct sbi_sta_struct , sequence ));
52+ steal_ptr = (u64 * )(hva + offset_in_page (shmem ) +
53+ offsetof(struct sbi_sta_struct , steal ));
54+
55+ if (WARN_ON (get_user (sequence , sequence_ptr )))
56+ return ;
57+
58+ sequence = le32_to_cpu (sequence );
59+ sequence += 1 ;
60+
61+ if (WARN_ON (put_user (cpu_to_le32 (sequence ), sequence_ptr )))
62+ return ;
63+
64+ if (!WARN_ON (get_user (steal , steal_ptr ))) {
65+ steal = le64_to_cpu (steal );
66+ vcpu -> arch .sta .last_steal = READ_ONCE (current -> sched_info .run_delay );
67+ steal += vcpu -> arch .sta .last_steal - last_steal ;
68+ WARN_ON (put_user (cpu_to_le64 (steal ), steal_ptr ));
69+ }
70+
71+ sequence += 1 ;
72+ WARN_ON (put_user (cpu_to_le32 (sequence ), sequence_ptr ));
73+
74+ kvm_vcpu_mark_page_dirty (vcpu , gfn );
2575}
2676
2777static int kvm_sbi_sta_steal_time_set_shmem (struct kvm_vcpu * vcpu )
2878{
29- return SBI_ERR_FAILURE ;
79+ struct kvm_cpu_context * cp = & vcpu -> arch .guest_context ;
80+ unsigned long shmem_phys_lo = cp -> a0 ;
81+ unsigned long shmem_phys_hi = cp -> a1 ;
82+ u32 flags = cp -> a2 ;
83+ struct sbi_sta_struct zero_sta = {0 };
84+ unsigned long hva ;
85+ bool writable ;
86+ gpa_t shmem ;
87+ int ret ;
88+
89+ if (flags != 0 )
90+ return SBI_ERR_INVALID_PARAM ;
91+
92+ if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE &&
93+ shmem_phys_hi == SBI_STA_SHMEM_DISABLE ) {
94+ vcpu -> arch .sta .shmem = INVALID_GPA ;
95+ return 0 ;
96+ }
97+
98+ if (shmem_phys_lo & (SZ_64 - 1 ))
99+ return SBI_ERR_INVALID_PARAM ;
100+
101+ shmem = shmem_phys_lo ;
102+
103+ if (shmem_phys_hi != 0 ) {
104+ if (IS_ENABLED (CONFIG_32BIT ))
105+ shmem |= ((gpa_t )shmem_phys_hi << 32 );
106+ else
107+ return SBI_ERR_INVALID_ADDRESS ;
108+ }
109+
110+ hva = kvm_vcpu_gfn_to_hva_prot (vcpu , shmem >> PAGE_SHIFT , & writable );
111+ if (kvm_is_error_hva (hva ) || !writable )
112+ return SBI_ERR_INVALID_ADDRESS ;
113+
114+ ret = kvm_vcpu_write_guest (vcpu , shmem , & zero_sta , sizeof (zero_sta ));
115+ if (ret )
116+ return SBI_ERR_FAILURE ;
117+
118+ vcpu -> arch .sta .shmem = shmem ;
119+ vcpu -> arch .sta .last_steal = current -> sched_info .run_delay ;
120+
121+ return 0 ;
30122}
31123
32124static int kvm_sbi_ext_sta_handler (struct kvm_vcpu * vcpu , struct kvm_run * run ,
@@ -52,7 +144,7 @@ static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
52144
53145static unsigned long kvm_sbi_ext_sta_probe (struct kvm_vcpu * vcpu )
54146{
55- return 0 ;
147+ return !! sched_info_on () ;
56148}
57149
58150const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
0 commit comments