@@ -551,7 +551,7 @@ typedef void (*on_unlock_fn_t)(struct kvm *kvm);
551551struct kvm_hva_range {
552552 unsigned long start ;
553553 unsigned long end ;
554- pte_t pte ;
554+ union kvm_mmu_notifier_arg arg ;
555555 hva_handler_t handler ;
556556 on_lock_fn_t on_lock ;
557557 on_unlock_fn_t on_unlock ;
@@ -572,6 +572,8 @@ static void kvm_null_fn(void)
572572}
573573#define IS_KVM_NULL_FN (fn ) ((fn) == (void *)kvm_null_fn)
574574
575+ static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG ;
576+
575577/* Iterate over each memslot intersecting [start, last] (inclusive) range */
576578#define kvm_for_each_memslot_in_hva_range (node , slots , start , last ) \
577579 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
@@ -616,7 +618,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
616618 * bother making these conditional (to avoid writes on
617619 * the second or later invocation of the handler).
618620 */
619- gfn_range .pte = range -> pte ;
621+ gfn_range .arg = range -> arg ;
620622 gfn_range .may_block = range -> may_block ;
621623
622624 /*
@@ -657,14 +659,14 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
657659static __always_inline int kvm_handle_hva_range (struct mmu_notifier * mn ,
658660 unsigned long start ,
659661 unsigned long end ,
660- pte_t pte ,
662+ union kvm_mmu_notifier_arg arg ,
661663 hva_handler_t handler )
662664{
663665 struct kvm * kvm = mmu_notifier_to_kvm (mn );
664666 const struct kvm_hva_range range = {
665667 .start = start ,
666668 .end = end ,
667- .pte = pte ,
669+ .arg = arg ,
668670 .handler = handler ,
669671 .on_lock = (void * )kvm_null_fn ,
670672 .on_unlock = (void * )kvm_null_fn ,
@@ -684,7 +686,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
684686 const struct kvm_hva_range range = {
685687 .start = start ,
686688 .end = end ,
687- .pte = __pte (0 ),
688689 .handler = handler ,
689690 .on_lock = (void * )kvm_null_fn ,
690691 .on_unlock = (void * )kvm_null_fn ,
@@ -718,6 +719,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
718719 pte_t pte )
719720{
720721 struct kvm * kvm = mmu_notifier_to_kvm (mn );
722+ const union kvm_mmu_notifier_arg arg = { .pte = pte };
721723
722724 trace_kvm_set_spte_hva (address );
723725
@@ -733,7 +735,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
733735 if (!READ_ONCE (kvm -> mmu_invalidate_in_progress ))
734736 return ;
735737
736- kvm_handle_hva_range (mn , address , address + 1 , pte , kvm_change_spte_gfn );
738+ kvm_handle_hva_range (mn , address , address + 1 , arg , kvm_change_spte_gfn );
737739}
738740
739741void kvm_mmu_invalidate_begin (struct kvm * kvm , unsigned long start ,
@@ -772,7 +774,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
772774 const struct kvm_hva_range hva_range = {
773775 .start = range -> start ,
774776 .end = range -> end ,
775- .pte = __pte (0 ),
776777 .handler = kvm_unmap_gfn_range ,
777778 .on_lock = kvm_mmu_invalidate_begin ,
778779 .on_unlock = kvm_arch_guest_memory_reclaimed ,
@@ -837,7 +838,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
837838 const struct kvm_hva_range hva_range = {
838839 .start = range -> start ,
839840 .end = range -> end ,
840- .pte = __pte (0 ),
841841 .handler = (void * )kvm_null_fn ,
842842 .on_lock = kvm_mmu_invalidate_end ,
843843 .on_unlock = (void * )kvm_null_fn ,
@@ -870,7 +870,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
870870{
871871 trace_kvm_age_hva (start , end );
872872
873- return kvm_handle_hva_range (mn , start , end , __pte (0 ), kvm_age_gfn );
873+ return kvm_handle_hva_range (mn , start , end , KVM_MMU_NOTIFIER_NO_ARG ,
874+ kvm_age_gfn );
874875}
875876
876877static int kvm_mmu_notifier_clear_young (struct mmu_notifier * mn ,
0 commit comments