@@ -937,87 +937,67 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
937937 * can't change unless all sptes pointing to it are nuked first.
938938 *
939939 * Returns
940- * < 0: the sp should be zapped
941- * 0: the sp is synced and no tlb flushing is required
942- * > 0: the sp is synced and tlb flushing is required
940+ * < 0: failed to sync spte
941+ * 0: the spte is synced and no tlb flushing is required
942+ * > 0: the spte is synced and tlb flushing is required
943943 */
944- static int FNAME (sync_page )(struct kvm_vcpu * vcpu , struct kvm_mmu_page * sp )
944+ static int FNAME (sync_spte )(struct kvm_vcpu * vcpu , struct kvm_mmu_page * sp , int i )
945945{
946- int i ;
947946 bool host_writable ;
948947 gpa_t first_pte_gpa ;
949- bool flush = false;
950-
951- first_pte_gpa = FNAME (get_level1_sp_gpa )(sp );
952-
953- for (i = 0 ; i < SPTE_ENT_PER_PAGE ; i ++ ) {
954- u64 * sptep , spte ;
955- struct kvm_memory_slot * slot ;
956- unsigned pte_access ;
957- pt_element_t gpte ;
958- gpa_t pte_gpa ;
959- gfn_t gfn ;
960-
961- if (!sp -> spt [i ])
962- continue ;
948+ u64 * sptep , spte ;
949+ struct kvm_memory_slot * slot ;
950+ unsigned pte_access ;
951+ pt_element_t gpte ;
952+ gpa_t pte_gpa ;
953+ gfn_t gfn ;
963954
964- pte_gpa = first_pte_gpa + i * sizeof (pt_element_t );
955+ if (!sp -> spt [i ])
956+ return 0 ;
965957
966- if (kvm_vcpu_read_guest_atomic (vcpu , pte_gpa , & gpte ,
967- sizeof (pt_element_t )))
968- return -1 ;
958+ first_pte_gpa = FNAME (get_level1_sp_gpa )(sp );
959+ pte_gpa = first_pte_gpa + i * sizeof (pt_element_t );
969960
970- if (FNAME (prefetch_invalid_gpte )(vcpu , sp , & sp -> spt [i ], gpte )) {
971- flush = true;
972- continue ;
973- }
961+ if (kvm_vcpu_read_guest_atomic (vcpu , pte_gpa , & gpte ,
962+ sizeof (pt_element_t )))
963+ return -1 ;
974964
975- gfn = gpte_to_gfn (gpte );
976- pte_access = sp -> role .access ;
977- pte_access &= FNAME (gpte_access )(gpte );
978- FNAME (protect_clean_gpte )(vcpu -> arch .mmu , & pte_access , gpte );
965+ if (FNAME (prefetch_invalid_gpte )(vcpu , sp , & sp -> spt [i ], gpte ))
966+ return 1 ;
979967
980- if (sync_mmio_spte (vcpu , & sp -> spt [i ], gfn , pte_access ))
981- continue ;
968+ gfn = gpte_to_gfn (gpte );
969+ pte_access = sp -> role .access ;
970+ pte_access &= FNAME (gpte_access )(gpte );
971+ FNAME (protect_clean_gpte )(vcpu -> arch .mmu , & pte_access , gpte );
982972
983- /*
984- * Drop the SPTE if the new protections would result in a RWX=0
985- * SPTE or if the gfn is changing. The RWX=0 case only affects
986- * EPT with execute-only support, i.e. EPT without an effective
987- * "present" bit, as all other paging modes will create a
988- * read-only SPTE if pte_access is zero.
989- */
990- if ((!pte_access && !shadow_present_mask ) ||
991- gfn != kvm_mmu_page_get_gfn (sp , i )) {
992- drop_spte (vcpu -> kvm , & sp -> spt [i ]);
993- flush = true;
994- continue ;
995- }
973+ if (sync_mmio_spte (vcpu , & sp -> spt [i ], gfn , pte_access ))
974+ return 0 ;
996975
997- /* Update the shadowed access bits in case they changed. */
998- kvm_mmu_page_set_access (sp , i , pte_access );
976+ /*
977+ * Drop the SPTE if the new protections would result in a RWX=0
978+ * SPTE or if the gfn is changing. The RWX=0 case only affects
979+ * EPT with execute-only support, i.e. EPT without an effective
980+ * "present" bit, as all other paging modes will create a
981+ * read-only SPTE if pte_access is zero.
982+ */
983+ if ((!pte_access && !shadow_present_mask ) ||
984+ gfn != kvm_mmu_page_get_gfn (sp , i )) {
985+ drop_spte (vcpu -> kvm , & sp -> spt [i ]);
986+ return 1 ;
987+ }
999988
1000- sptep = & sp -> spt [i ];
1001- spte = * sptep ;
1002- host_writable = spte & shadow_host_writable_mask ;
1003- slot = kvm_vcpu_gfn_to_memslot (vcpu , gfn );
1004- make_spte (vcpu , sp , slot , pte_access , gfn ,
1005- spte_to_pfn (spte ), spte , true, false,
1006- host_writable , & spte );
989+ /* Update the shadowed access bits in case they changed. */
990+ kvm_mmu_page_set_access (sp , i , pte_access );
1007991
1008- flush |= mmu_spte_update (sptep , spte );
1009- }
992+ sptep = & sp -> spt [i ];
993+ spte = * sptep ;
994+ host_writable = spte & shadow_host_writable_mask ;
995+ slot = kvm_vcpu_gfn_to_memslot (vcpu , gfn );
996+ make_spte (vcpu , sp , slot , pte_access , gfn ,
997+ spte_to_pfn (spte ), spte , true, false,
998+ host_writable , & spte );
1010999
1011- /*
1012- * Note, any flush is purely for KVM's correctness, e.g. when dropping
1013- * an existing SPTE or clearing W/A/D bits to ensure an mmu_notifier
1014- * unmap or dirty logging event doesn't fail to flush. The guest is
1015- * responsible for flushing the TLB to ensure any changes in protection
1016- * bits are recognized, i.e. until the guest flushes or page faults on
1017- * a relevant address, KVM is architecturally allowed to let vCPUs use
1018- * cached translations with the old protection bits.
1019- */
1020- return flush ;
1000+ return mmu_spte_update (sptep , spte );
10211001}
10221002
10231003#undef pt_element_t
0 commit comments