Skip to content

Commit eed52e4

Browse files
sean-jcbonzini
authored andcommitted
KVM: Allow arch code to track number of memslot address spaces per VM
Let x86 track the number of address spaces on a per-VM basis so that KVM can disallow SMM memslots for confidential VMs. Confidentials VMs are fundamentally incompatible with emulating SMM, which as the name suggests requires being able to read and write guest memory and register state. Disallowing SMM will simplify support for guest private memory, as KVM will not need to worry about tracking memory attributes for multiple address spaces (SMM is the only "non-default" address space across all architectures). Signed-off-by: Sean Christopherson <seanjc@google.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Fuad Tabba <tabba@google.com> Tested-by: Fuad Tabba <tabba@google.com> Message-Id: <20231027182217.3615211-23-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 2333afa commit eed52e4

8 files changed

Lines changed: 39 additions & 26 deletions

File tree

arch/powerpc/kvm/book3s_hv.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6084,7 +6084,7 @@ static int kvmhv_svm_off(struct kvm *kvm)
60846084
}
60856085

60866086
srcu_idx = srcu_read_lock(&kvm->srcu);
6087-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6087+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
60886088
struct kvm_memory_slot *memslot;
60896089
struct kvm_memslots *slots = __kvm_memslots(kvm, i);
60906090
int bkt;

arch/x86/include/asm/kvm_host.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2136,9 +2136,15 @@ enum {
21362136
#define HF_SMM_MASK (1 << 1)
21372137
#define HF_SMM_INSIDE_NMI_MASK (1 << 2)
21382138

2139-
# define KVM_ADDRESS_SPACE_NUM 2
2139+
# define KVM_MAX_NR_ADDRESS_SPACES 2
21402140
# define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
21412141
# define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
2142+
2143+
static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
2144+
{
2145+
return KVM_MAX_NR_ADDRESS_SPACES;
2146+
}
2147+
21422148
#else
21432149
# define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0)
21442150
#endif

arch/x86/kvm/debugfs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v)
111111
mutex_lock(&kvm->slots_lock);
112112
write_lock(&kvm->mmu_lock);
113113

114-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
114+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
115115
int bkt;
116116

117117
slots = __kvm_memslots(kvm, i);

arch/x86/kvm/mmu/mmu.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3763,7 +3763,7 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm)
37633763
kvm_page_track_write_tracking_enabled(kvm))
37643764
goto out_success;
37653765

3766-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
3766+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
37673767
slots = __kvm_memslots(kvm, i);
37683768
kvm_for_each_memslot(slot, bkt, slots) {
37693769
/*
@@ -6309,7 +6309,7 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
63096309
if (!kvm_memslots_have_rmaps(kvm))
63106310
return flush;
63116311

6312-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
6312+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
63136313
slots = __kvm_memslots(kvm, i);
63146314

63156315
kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
@@ -6806,7 +6806,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
68066806
* modifier prior to checking for a wrap of the MMIO generation so
68076807
* that a wrap in any address space is detected.
68086808
*/
6809-
gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
6809+
gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1);
68106810

68116811
/*
68126812
* The very rare case: if the MMIO generation number has wrapped,

arch/x86/kvm/x86.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12577,7 +12577,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
1257712577
hva = slot->userspace_addr;
1257812578
}
1257912579

12580-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
12580+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
1258112581
struct kvm_userspace_memory_region2 m;
1258212582

1258312583
m.slot = id | (i << 16);

include/linux/kvm_host.h

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,8 @@
8080
/* Two fragments for cross MMIO pages. */
8181
#define KVM_MAX_MMIO_FRAGMENTS 2
8282

83-
#ifndef KVM_ADDRESS_SPACE_NUM
84-
#define KVM_ADDRESS_SPACE_NUM 1
83+
#ifndef KVM_MAX_NR_ADDRESS_SPACES
84+
#define KVM_MAX_NR_ADDRESS_SPACES 1
8585
#endif
8686

8787
/*
@@ -690,7 +690,12 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
690690
#define KVM_MEM_SLOTS_NUM SHRT_MAX
691691
#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
692692

693-
#if KVM_ADDRESS_SPACE_NUM == 1
693+
#if KVM_MAX_NR_ADDRESS_SPACES == 1
694+
static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
695+
{
696+
return KVM_MAX_NR_ADDRESS_SPACES;
697+
}
698+
694699
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
695700
{
696701
return 0;
@@ -745,9 +750,9 @@ struct kvm {
745750
struct mm_struct *mm; /* userspace tied to this vm */
746751
unsigned long nr_memslot_pages;
747752
/* The two memslot sets - active and inactive (per address space) */
748-
struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
753+
struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
749754
/* The current active memslot set for each address space */
750-
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
755+
struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
751756
struct xarray vcpu_array;
752757
/*
753758
* Protected by slots_lock, but can be read outside if an
@@ -1017,7 +1022,7 @@ void kvm_put_kvm_no_destroy(struct kvm *kvm);
10171022

10181023
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
10191024
{
1020-
as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
1025+
as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
10211026
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
10221027
lockdep_is_held(&kvm->slots_lock) ||
10231028
!refcount_read(&kvm->users_count));

virt/kvm/dirty_ring.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
5858
as_id = slot >> 16;
5959
id = (u16)slot;
6060

61-
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
61+
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
6262
return;
6363

6464
memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);

virt/kvm/kvm_main.c

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -615,7 +615,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
615615

616616
idx = srcu_read_lock(&kvm->srcu);
617617

618-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
618+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
619619
struct interval_tree_node *node;
620620

621621
slots = __kvm_memslots(kvm, i);
@@ -1241,7 +1241,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
12411241
goto out_err_no_irq_srcu;
12421242

12431243
refcount_set(&kvm->users_count, 1);
1244-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1244+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
12451245
for (j = 0; j < 2; j++) {
12461246
slots = &kvm->__memslots[i][j];
12471247

@@ -1391,7 +1391,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
13911391
#endif
13921392
kvm_arch_destroy_vm(kvm);
13931393
kvm_destroy_devices(kvm);
1394-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1394+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
13951395
kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
13961396
kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
13971397
}
@@ -1682,7 +1682,7 @@ static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
16821682
* space 0 will use generations 0, 2, 4, ... while address space 1 will
16831683
* use generations 1, 3, 5, ...
16841684
*/
1685-
gen += KVM_ADDRESS_SPACE_NUM;
1685+
gen += kvm_arch_nr_memslot_as_ids(kvm);
16861686

16871687
kvm_arch_memslots_updated(kvm, gen);
16881688

@@ -2052,7 +2052,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
20522052
(mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
20532053
mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
20542054
return -EINVAL;
2055-
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
2055+
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
20562056
return -EINVAL;
20572057
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
20582058
return -EINVAL;
@@ -2188,7 +2188,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
21882188

21892189
as_id = log->slot >> 16;
21902190
id = (u16)log->slot;
2191-
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2191+
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
21922192
return -EINVAL;
21932193

21942194
slots = __kvm_memslots(kvm, as_id);
@@ -2250,7 +2250,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
22502250

22512251
as_id = log->slot >> 16;
22522252
id = (u16)log->slot;
2253-
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2253+
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
22542254
return -EINVAL;
22552255

22562256
slots = __kvm_memslots(kvm, as_id);
@@ -2362,7 +2362,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
23622362

23632363
as_id = log->slot >> 16;
23642364
id = (u16)log->slot;
2365-
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2365+
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
23662366
return -EINVAL;
23672367

23682368
if (log->first_page & 63)
@@ -2493,7 +2493,7 @@ static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
24932493
gfn_range.arg = range->arg;
24942494
gfn_range.may_block = range->may_block;
24952495

2496-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
2496+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
24972497
slots = __kvm_memslots(kvm, i);
24982498

24992499
kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
@@ -4848,9 +4848,11 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
48484848
case KVM_CAP_IRQ_ROUTING:
48494849
return KVM_MAX_IRQ_ROUTES;
48504850
#endif
4851-
#if KVM_ADDRESS_SPACE_NUM > 1
4851+
#if KVM_MAX_NR_ADDRESS_SPACES > 1
48524852
case KVM_CAP_MULTI_ADDRESS_SPACE:
4853-
return KVM_ADDRESS_SPACE_NUM;
4853+
if (kvm)
4854+
return kvm_arch_nr_memslot_as_ids(kvm);
4855+
return KVM_MAX_NR_ADDRESS_SPACES;
48544856
#endif
48554857
case KVM_CAP_NR_MEMSLOTS:
48564858
return KVM_USER_MEM_SLOTS;
@@ -4958,7 +4960,7 @@ bool kvm_are_all_memslots_empty(struct kvm *kvm)
49584960

49594961
lockdep_assert_held(&kvm->slots_lock);
49604962

4961-
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
4963+
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
49624964
if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
49634965
return false;
49644966
}

0 commit comments

Comments
 (0)