|
13 | 13 |
|
14 | 14 | #define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000 |
15 | 15 |
|
| 16 | +static vm_vaddr_t exception_handlers; |
| 17 | + |
16 | 18 | static uint64_t page_align(struct kvm_vm *vm, uint64_t v) |
17 | 19 | { |
18 | 20 | return (v + vm->page_size) & ~(vm->page_size - 1); |
@@ -364,8 +366,75 @@ void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) |
364 | 366 | va_end(ap); |
365 | 367 | } |
366 | 368 |
|
| 369 | +void kvm_exit_unexpected_exception(int vector, int ec) |
| 370 | +{ |
| 371 | + ucall(UCALL_UNHANDLED, 2, vector, ec); |
| 372 | +} |
| 373 | + |
367 | 374 | void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) |
368 | 375 | { |
| 376 | + struct ucall uc; |
| 377 | + |
| 378 | + if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) { |
| 379 | + TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)", |
| 380 | + uc.args[0], uc.args[1]); |
| 381 | + } |
| 382 | +} |
| 383 | + |
| 384 | +struct handlers { |
| 385 | + exception_handler_fn exception_handlers[NR_VECTORS][NR_EXCEPTIONS]; |
| 386 | +}; |
| 387 | + |
| 388 | +void route_exception(struct ex_regs *regs) |
| 389 | +{ |
| 390 | + struct handlers *handlers = (struct handlers *)exception_handlers; |
| 391 | + int vector = 0, ec; |
| 392 | + |
| 393 | + ec = regs->cause & ~CAUSE_IRQ_FLAG; |
| 394 | + if (ec >= NR_EXCEPTIONS) |
| 395 | + goto unexpected_exception; |
| 396 | + |
| 397 | + /* Use the same handler for all the interrupts */ |
| 398 | + if (regs->cause & CAUSE_IRQ_FLAG) { |
| 399 | + vector = 1; |
| 400 | + ec = 0; |
| 401 | + } |
| 402 | + |
| 403 | + if (handlers && handlers->exception_handlers[vector][ec]) |
| 404 | + return handlers->exception_handlers[vector][ec](regs); |
| 405 | + |
| 406 | +unexpected_exception: |
| 407 | + return kvm_exit_unexpected_exception(vector, ec); |
| 408 | +} |
| 409 | + |
| 410 | +void vcpu_init_vector_tables(struct kvm_vcpu *vcpu) |
| 411 | +{ |
| 412 | + extern char exception_vectors; |
| 413 | + |
| 414 | + vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)&exception_vectors); |
| 415 | +} |
| 416 | + |
| 417 | +void vm_init_vector_tables(struct kvm_vm *vm) |
| 418 | +{ |
| 419 | + vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), |
| 420 | + vm->page_size, MEM_REGION_DATA); |
| 421 | + |
| 422 | + *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; |
| 423 | +} |
| 424 | + |
| 425 | +void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler) |
| 426 | +{ |
| 427 | + struct handlers *handlers = addr_gva2hva(vm, vm->handlers); |
| 428 | + |
| 429 | + assert(vector < NR_EXCEPTIONS); |
| 430 | + handlers->exception_handlers[0][vector] = handler; |
| 431 | +} |
| 432 | + |
| 433 | +void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler) |
| 434 | +{ |
| 435 | + struct handlers *handlers = addr_gva2hva(vm, vm->handlers); |
| 436 | + |
| 437 | + handlers->exception_handlers[1][0] = handler; |
369 | 438 | } |
370 | 439 |
|
371 | 440 | struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, |
|
0 commit comments