|
11 | 11 | #include <pthread.h> |
12 | 12 | #include <linux/kernel.h> |
13 | 13 | #include <asm/kvm.h> |
| 14 | +#ifndef __riscv |
14 | 15 | #include <asm/kvm_para.h> |
| 16 | +#endif |
15 | 17 |
|
16 | 18 | #include "test_util.h" |
17 | 19 | #include "kvm_util.h" |
@@ -203,6 +205,103 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) |
203 | 205 | pr_info(" st_time: %ld\n", st->st_time); |
204 | 206 | } |
205 | 207 |
|
| 208 | +#elif defined(__riscv) |
| 209 | + |
| 210 | +/* SBI STA shmem must have 64-byte alignment */ |
| 211 | +#define STEAL_TIME_SIZE ((sizeof(struct sta_struct) + 63) & ~63) |
| 212 | + |
| 213 | +static vm_paddr_t st_gpa[NR_VCPUS]; |
| 214 | + |
| 215 | +struct sta_struct { |
| 216 | + uint32_t sequence; |
| 217 | + uint32_t flags; |
| 218 | + uint64_t steal; |
| 219 | + uint8_t preempted; |
| 220 | + uint8_t pad[47]; |
| 221 | +} __packed; |
| 222 | + |
| 223 | +static void sta_set_shmem(vm_paddr_t gpa, unsigned long flags) |
| 224 | +{ |
| 225 | + unsigned long lo = (unsigned long)gpa; |
| 226 | +#if __riscv_xlen == 32 |
| 227 | + unsigned long hi = (unsigned long)(gpa >> 32); |
| 228 | +#else |
| 229 | + unsigned long hi = gpa == -1 ? -1 : 0; |
| 230 | +#endif |
| 231 | + struct sbiret ret = sbi_ecall(SBI_EXT_STA, 0, lo, hi, flags, 0, 0, 0); |
| 232 | + |
| 233 | + GUEST_ASSERT(ret.value == 0 && ret.error == 0); |
| 234 | +} |
| 235 | + |
| 236 | +static void check_status(struct sta_struct *st) |
| 237 | +{ |
| 238 | + GUEST_ASSERT(!(READ_ONCE(st->sequence) & 1)); |
| 239 | + GUEST_ASSERT(READ_ONCE(st->flags) == 0); |
| 240 | + GUEST_ASSERT(READ_ONCE(st->preempted) == 0); |
| 241 | +} |
| 242 | + |
| 243 | +static void guest_code(int cpu) |
| 244 | +{ |
| 245 | + struct sta_struct *st = st_gva[cpu]; |
| 246 | + uint32_t sequence; |
| 247 | + long out_val = 0; |
| 248 | + bool probe; |
| 249 | + |
| 250 | + probe = guest_sbi_probe_extension(SBI_EXT_STA, &out_val); |
| 251 | + GUEST_ASSERT(probe && out_val == 1); |
| 252 | + |
| 253 | + sta_set_shmem(st_gpa[cpu], 0); |
| 254 | + GUEST_SYNC(0); |
| 255 | + |
| 256 | + check_status(st); |
| 257 | + WRITE_ONCE(guest_stolen_time[cpu], st->steal); |
| 258 | + sequence = READ_ONCE(st->sequence); |
| 259 | + check_status(st); |
| 260 | + GUEST_SYNC(1); |
| 261 | + |
| 262 | + check_status(st); |
| 263 | + GUEST_ASSERT(sequence < READ_ONCE(st->sequence)); |
| 264 | + WRITE_ONCE(guest_stolen_time[cpu], st->steal); |
| 265 | + check_status(st); |
| 266 | + GUEST_DONE(); |
| 267 | +} |
| 268 | + |
| 269 | +static bool is_steal_time_supported(struct kvm_vcpu *vcpu) |
| 270 | +{ |
| 271 | + uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA); |
| 272 | + unsigned long enabled; |
| 273 | + |
| 274 | + vcpu_get_reg(vcpu, id, &enabled); |
| 275 | + TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result"); |
| 276 | + |
| 277 | + return enabled; |
| 278 | +} |
| 279 | + |
| 280 | +static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) |
| 281 | +{ |
| 282 | + /* ST_GPA_BASE is identity mapped */ |
| 283 | + st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); |
| 284 | + st_gpa[i] = addr_gva2gpa(vcpu->vm, (vm_vaddr_t)st_gva[i]); |
| 285 | + sync_global_to_guest(vcpu->vm, st_gva[i]); |
| 286 | + sync_global_to_guest(vcpu->vm, st_gpa[i]); |
| 287 | +} |
| 288 | + |
| 289 | +static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) |
| 290 | +{ |
| 291 | + struct sta_struct *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); |
| 292 | + int i; |
| 293 | + |
| 294 | + pr_info("VCPU%d:\n", vcpu_idx); |
| 295 | + pr_info(" sequence: %d\n", st->sequence); |
| 296 | + pr_info(" flags: %d\n", st->flags); |
| 297 | + pr_info(" steal: %"PRIu64"\n", st->steal); |
| 298 | + pr_info(" preempted: %d\n", st->preempted); |
| 299 | + pr_info(" pad: "); |
| 300 | + for (i = 0; i < 47; ++i) |
| 301 | + pr_info("%d", st->pad[i]); |
| 302 | + pr_info("\n"); |
| 303 | +} |
| 304 | + |
206 | 305 | #endif |
207 | 306 |
|
208 | 307 | static void *do_steal_time(void *arg) |
|
0 commit comments