@@ -895,9 +895,9 @@ static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
895895 untrack_possible_nx_huge_page (kvm , sp );
896896}
897897
898- static struct kvm_memory_slot *
899- gfn_to_memslot_dirty_bitmap ( struct kvm_vcpu * vcpu , gfn_t gfn ,
900- bool no_dirty_log )
898+ static struct kvm_memory_slot * gfn_to_memslot_dirty_bitmap ( struct kvm_vcpu * vcpu ,
899+ gfn_t gfn ,
900+ bool no_dirty_log )
901901{
902902 struct kvm_memory_slot * slot ;
903903
@@ -960,9 +960,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
960960 return count ;
961961}
962962
963- static void
964- pte_list_desc_remove_entry (struct kvm_rmap_head * rmap_head ,
965- struct pte_list_desc * desc , int i )
963+ static void pte_list_desc_remove_entry (struct kvm_rmap_head * rmap_head ,
964+ struct pte_list_desc * desc , int i )
966965{
967966 struct pte_list_desc * head_desc = (struct pte_list_desc * )(rmap_head -> val & ~1ul );
968967 int j = head_desc -> spte_count - 1 ;
@@ -1510,19 +1509,19 @@ struct slot_rmap_walk_iterator {
15101509 struct kvm_rmap_head * end_rmap ;
15111510};
15121511
1513- static void
1514- rmap_walk_init_level ( struct slot_rmap_walk_iterator * iterator , int level )
1512+ static void rmap_walk_init_level ( struct slot_rmap_walk_iterator * iterator ,
1513+ int level )
15151514{
15161515 iterator -> level = level ;
15171516 iterator -> gfn = iterator -> start_gfn ;
15181517 iterator -> rmap = gfn_to_rmap (iterator -> gfn , level , iterator -> slot );
15191518 iterator -> end_rmap = gfn_to_rmap (iterator -> end_gfn , level , iterator -> slot );
15201519}
15211520
1522- static void
1523- slot_rmap_walk_init ( struct slot_rmap_walk_iterator * iterator ,
1524- const struct kvm_memory_slot * slot , int start_level ,
1525- int end_level , gfn_t start_gfn , gfn_t end_gfn )
1521+ static void slot_rmap_walk_init ( struct slot_rmap_walk_iterator * iterator ,
1522+ const struct kvm_memory_slot * slot ,
1523+ int start_level , int end_level ,
1524+ gfn_t start_gfn , gfn_t end_gfn )
15261525{
15271526 iterator -> slot = slot ;
15281527 iterator -> start_level = start_level ;
@@ -3373,9 +3372,9 @@ static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
33733372 * Returns true if the SPTE was fixed successfully. Otherwise,
33743373 * someone else modified the SPTE from its original value.
33753374 */
3376- static bool
3377- fast_pf_fix_direct_spte ( struct kvm_vcpu * vcpu , struct kvm_page_fault * fault ,
3378- u64 * sptep , u64 old_spte , u64 new_spte )
3375+ static bool fast_pf_fix_direct_spte ( struct kvm_vcpu * vcpu ,
3376+ struct kvm_page_fault * fault ,
3377+ u64 * sptep , u64 old_spte , u64 new_spte )
33793378{
33803379 /*
33813380 * Theoretically we could also set dirty bit (and flush TLB) here in
@@ -4708,10 +4707,9 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
47084707#include "paging_tmpl.h"
47094708#undef PTTYPE
47104709
4711- static void
4712- __reset_rsvds_bits_mask (struct rsvd_bits_validate * rsvd_check ,
4713- u64 pa_bits_rsvd , int level , bool nx , bool gbpages ,
4714- bool pse , bool amd )
4710+ static void __reset_rsvds_bits_mask (struct rsvd_bits_validate * rsvd_check ,
4711+ u64 pa_bits_rsvd , int level , bool nx ,
4712+ bool gbpages , bool pse , bool amd )
47154713{
47164714 u64 gbpages_bit_rsvd = 0 ;
47174715 u64 nonleaf_bit8_rsvd = 0 ;
@@ -4824,9 +4822,9 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
48244822 guest_cpuid_is_amd_or_hygon (vcpu ));
48254823}
48264824
4827- static void
4828- __reset_rsvds_bits_mask_ept ( struct rsvd_bits_validate * rsvd_check ,
4829- u64 pa_bits_rsvd , bool execonly , int huge_page_level )
4825+ static void __reset_rsvds_bits_mask_ept ( struct rsvd_bits_validate * rsvd_check ,
4826+ u64 pa_bits_rsvd , bool execonly ,
4827+ int huge_page_level )
48304828{
48314829 u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits (0 , 51 );
48324830 u64 large_1g_rsvd = 0 , large_2m_rsvd = 0 ;
@@ -4926,8 +4924,7 @@ static inline bool boot_cpu_is_amd(void)
49264924 * the direct page table on host, use as much mmu features as
49274925 * possible, however, kvm currently does not do execution-protection.
49284926 */
4929- static void
4930- reset_tdp_shadow_zero_bits_mask (struct kvm_mmu * context )
4927+ static void reset_tdp_shadow_zero_bits_mask (struct kvm_mmu * context )
49314928{
49324929 struct rsvd_bits_validate * shadow_zero_check ;
49334930 int i ;
@@ -5140,8 +5137,8 @@ static void paging32_init_context(struct kvm_mmu *context)
51405137 context -> sync_spte = paging32_sync_spte ;
51415138}
51425139
5143- static union kvm_cpu_role
5144- kvm_calc_cpu_role ( struct kvm_vcpu * vcpu , const struct kvm_mmu_role_regs * regs )
5140+ static union kvm_cpu_role kvm_calc_cpu_role ( struct kvm_vcpu * vcpu ,
5141+ const struct kvm_mmu_role_regs * regs )
51455142{
51465143 union kvm_cpu_role role = {0 };
51475144
@@ -6750,8 +6747,8 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
67506747 }
67516748}
67526749
6753- static unsigned long
6754- mmu_shrink_scan ( struct shrinker * shrink , struct shrink_control * sc )
6750+ static unsigned long mmu_shrink_scan ( struct shrinker * shrink ,
6751+ struct shrink_control * sc )
67556752{
67566753 struct kvm * kvm ;
67576754 int nr_to_scan = sc -> nr_to_scan ;
@@ -6809,8 +6806,8 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
68096806 return freed ;
68106807}
68116808
6812- static unsigned long
6813- mmu_shrink_count ( struct shrinker * shrink , struct shrink_control * sc )
6809+ static unsigned long mmu_shrink_count ( struct shrinker * shrink ,
6810+ struct shrink_control * sc )
68146811{
68156812 return percpu_counter_read_positive (& kvm_total_used_mmu_pages );
68166813}
0 commit comments