|
| 1 | +/* SPDX-License-Identifier: GPL-2.0 |
| 2 | + * |
| 3 | + * Assembly level code for mshv_vtl VTL transition |
| 4 | + * |
| 5 | + * Copyright (c) 2025, Microsoft Corporation. |
| 6 | + * |
| 7 | + * Author: |
| 8 | + * Naman Jain <namjain@microsoft.com> |
| 9 | + */ |
| 10 | + |
| 11 | +#include <linux/linkage.h> |
| 12 | +#include <linux/static_call_types.h> |
| 13 | +#include <asm/asm.h> |
| 14 | +#include <asm/asm-offsets.h> |
| 15 | +#include <asm/frame.h> |
| 16 | +#include "mshv-asm-offsets.h" |
| 17 | + |
| 18 | + .text |
| 19 | + .section .noinstr.text, "ax" |
| 20 | +/* |
| 21 | + * void __mshv_vtl_return_call(struct mshv_vtl_cpu_context *vtl0) |
| 22 | + * |
| 23 | + * This function is used to context switch between different Virtual Trust Levels. |
| 24 | + * It is marked as 'noinstr' to prevent against instrumentation and debugging facilities. |
| 25 | + * NMIs aren't a problem because the NMI handler saves/restores CR2 specifically to guard |
| 26 | + * against #PFs in NMI context clobbering the guest state. |
| 27 | + */ |
| 28 | +SYM_FUNC_START(__mshv_vtl_return_call) |
| 29 | + /* Push callee save registers */ |
| 30 | + pushq %rbp |
| 31 | + mov %rsp, %rbp |
| 32 | + pushq %r12 |
| 33 | + pushq %r13 |
| 34 | + pushq %r14 |
| 35 | + pushq %r15 |
| 36 | + pushq %rbx |
| 37 | + |
| 38 | + /* register switch to VTL0 clobbers all registers except rax/rcx */ |
| 39 | + mov %_ASM_ARG1, %rax |
| 40 | + |
| 41 | + /* grab rbx/rbp/rsi/rdi/r8-r15 */ |
| 42 | + mov MSHV_VTL_CPU_CONTEXT_rbx(%rax), %rbx |
| 43 | + mov MSHV_VTL_CPU_CONTEXT_rbp(%rax), %rbp |
| 44 | + mov MSHV_VTL_CPU_CONTEXT_rsi(%rax), %rsi |
| 45 | + mov MSHV_VTL_CPU_CONTEXT_rdi(%rax), %rdi |
| 46 | + mov MSHV_VTL_CPU_CONTEXT_r8(%rax), %r8 |
| 47 | + mov MSHV_VTL_CPU_CONTEXT_r9(%rax), %r9 |
| 48 | + mov MSHV_VTL_CPU_CONTEXT_r10(%rax), %r10 |
| 49 | + mov MSHV_VTL_CPU_CONTEXT_r11(%rax), %r11 |
| 50 | + mov MSHV_VTL_CPU_CONTEXT_r12(%rax), %r12 |
| 51 | + mov MSHV_VTL_CPU_CONTEXT_r13(%rax), %r13 |
| 52 | + mov MSHV_VTL_CPU_CONTEXT_r14(%rax), %r14 |
| 53 | + mov MSHV_VTL_CPU_CONTEXT_r15(%rax), %r15 |
| 54 | + |
| 55 | + mov MSHV_VTL_CPU_CONTEXT_cr2(%rax), %rdx |
| 56 | + mov %rdx, %cr2 |
| 57 | + mov MSHV_VTL_CPU_CONTEXT_rdx(%rax), %rdx |
| 58 | + |
| 59 | + /* stash host registers on stack */ |
| 60 | + pushq %rax |
| 61 | + pushq %rcx |
| 62 | + |
| 63 | + xor %ecx, %ecx |
| 64 | + |
| 65 | + /* make a hypercall to switch VTL */ |
| 66 | + call STATIC_CALL_TRAMP_STR(__mshv_vtl_return_hypercall) |
| 67 | + |
| 68 | + /* stash guest registers on stack, restore saved host copies */ |
| 69 | + pushq %rax |
| 70 | + pushq %rcx |
| 71 | + mov 16(%rsp), %rcx |
| 72 | + mov 24(%rsp), %rax |
| 73 | + |
| 74 | + mov %rdx, MSHV_VTL_CPU_CONTEXT_rdx(%rax) |
| 75 | + mov %cr2, %rdx |
| 76 | + mov %rdx, MSHV_VTL_CPU_CONTEXT_cr2(%rax) |
| 77 | + pop MSHV_VTL_CPU_CONTEXT_rcx(%rax) |
| 78 | + pop MSHV_VTL_CPU_CONTEXT_rax(%rax) |
| 79 | + add $16, %rsp |
| 80 | + |
| 81 | + /* save rbx/rbp/rsi/rdi/r8-r15 */ |
| 82 | + mov %rbx, MSHV_VTL_CPU_CONTEXT_rbx(%rax) |
| 83 | + mov %rbp, MSHV_VTL_CPU_CONTEXT_rbp(%rax) |
| 84 | + mov %rsi, MSHV_VTL_CPU_CONTEXT_rsi(%rax) |
| 85 | + mov %rdi, MSHV_VTL_CPU_CONTEXT_rdi(%rax) |
| 86 | + mov %r8, MSHV_VTL_CPU_CONTEXT_r8(%rax) |
| 87 | + mov %r9, MSHV_VTL_CPU_CONTEXT_r9(%rax) |
| 88 | + mov %r10, MSHV_VTL_CPU_CONTEXT_r10(%rax) |
| 89 | + mov %r11, MSHV_VTL_CPU_CONTEXT_r11(%rax) |
| 90 | + mov %r12, MSHV_VTL_CPU_CONTEXT_r12(%rax) |
| 91 | + mov %r13, MSHV_VTL_CPU_CONTEXT_r13(%rax) |
| 92 | + mov %r14, MSHV_VTL_CPU_CONTEXT_r14(%rax) |
| 93 | + mov %r15, MSHV_VTL_CPU_CONTEXT_r15(%rax) |
| 94 | + |
| 95 | + /* pop callee-save registers r12-r15, rbx */ |
| 96 | + pop %rbx |
| 97 | + pop %r15 |
| 98 | + pop %r14 |
| 99 | + pop %r13 |
| 100 | + pop %r12 |
| 101 | + |
| 102 | + pop %rbp |
| 103 | + RET |
| 104 | +SYM_FUNC_END(__mshv_vtl_return_call) |
| 105 | +/* |
| 106 | + * Make sure that static_call_key symbol: __SCK____mshv_vtl_return_hypercall is accessible here. |
| 107 | + * Below code is inspired from __ADDRESSABLE(sym) macro. Symbol name is kept simple, to avoid |
| 108 | + * naming it something like "__UNIQUE_ID_addressable___SCK____mshv_vtl_return_hypercall_662.0" |
| 109 | + * which would otherwise have been generated by the macro. |
| 110 | + */ |
| 111 | + .section .discard.addressable,"aw" |
| 112 | + .align 8 |
| 113 | + .type mshv_vtl_return_sym, @object |
| 114 | + .size mshv_vtl_return_sym, 8 |
| 115 | +mshv_vtl_return_sym: |
| 116 | + .quad __SCK____mshv_vtl_return_hypercall |
0 commit comments