@@ -378,6 +378,12 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
378378 return ret ;
379379}
380380
381+ static struct kvm_vcpu * collection_to_vcpu (struct kvm * kvm ,
382+ struct its_collection * col )
383+ {
384+ return kvm_get_vcpu_by_id (kvm , col -> target_addr );
385+ }
386+
381387/*
382388 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
383389 * is targeting) to the VGIC's view, which deals with target VCPUs.
@@ -391,7 +397,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
391397 if (!its_is_collection_mapped (ite -> collection ))
392398 return ;
393399
394- vcpu = kvm_get_vcpu (kvm , ite -> collection -> target_addr );
400+ vcpu = collection_to_vcpu (kvm , ite -> collection );
395401 update_affinity (ite -> irq , vcpu );
396402}
397403
@@ -679,7 +685,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
679685 if (!ite || !its_is_collection_mapped (ite -> collection ))
680686 return E_ITS_INT_UNMAPPED_INTERRUPT ;
681687
682- vcpu = kvm_get_vcpu (kvm , ite -> collection -> target_addr );
688+ vcpu = collection_to_vcpu (kvm , ite -> collection );
683689 if (!vcpu )
684690 return E_ITS_INT_UNMAPPED_INTERRUPT ;
685691
@@ -887,7 +893,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
887893 return E_ITS_MOVI_UNMAPPED_COLLECTION ;
888894
889895 ite -> collection = collection ;
890- vcpu = kvm_get_vcpu (kvm , collection -> target_addr );
896+ vcpu = collection_to_vcpu (kvm , collection );
891897
892898 vgic_its_invalidate_cache (kvm );
893899
@@ -1121,7 +1127,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
11211127 }
11221128
11231129 if (its_is_collection_mapped (collection ))
1124- vcpu = kvm_get_vcpu (kvm , collection -> target_addr );
1130+ vcpu = collection_to_vcpu (kvm , collection );
11251131
11261132 irq = vgic_add_lpi (kvm , lpi_nr , vcpu );
11271133 if (IS_ERR (irq )) {
@@ -1242,21 +1248,22 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
12421248 u64 * its_cmd )
12431249{
12441250 u16 coll_id ;
1245- u32 target_addr ;
12461251 struct its_collection * collection ;
12471252 bool valid ;
12481253
12491254 valid = its_cmd_get_validbit (its_cmd );
12501255 coll_id = its_cmd_get_collection (its_cmd );
1251- target_addr = its_cmd_get_target_addr (its_cmd );
1252-
1253- if (target_addr >= atomic_read (& kvm -> online_vcpus ))
1254- return E_ITS_MAPC_PROCNUM_OOR ;
12551256
12561257 if (!valid ) {
12571258 vgic_its_free_collection (its , coll_id );
12581259 vgic_its_invalidate_cache (kvm );
12591260 } else {
1261+ struct kvm_vcpu * vcpu ;
1262+
1263+ vcpu = kvm_get_vcpu_by_id (kvm , its_cmd_get_target_addr (its_cmd ));
1264+ if (!vcpu )
1265+ return E_ITS_MAPC_PROCNUM_OOR ;
1266+
12601267 collection = find_collection (its , coll_id );
12611268
12621269 if (!collection ) {
@@ -1270,9 +1277,9 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
12701277 coll_id );
12711278 if (ret )
12721279 return ret ;
1273- collection -> target_addr = target_addr ;
1280+ collection -> target_addr = vcpu -> vcpu_id ;
12741281 } else {
1275- collection -> target_addr = target_addr ;
1282+ collection -> target_addr = vcpu -> vcpu_id ;
12761283 update_affinity_collection (kvm , its , collection );
12771284 }
12781285 }
@@ -1382,7 +1389,7 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
13821389 if (!its_is_collection_mapped (collection ))
13831390 return E_ITS_INVALL_UNMAPPED_COLLECTION ;
13841391
1385- vcpu = kvm_get_vcpu (kvm , collection -> target_addr );
1392+ vcpu = collection_to_vcpu (kvm , collection );
13861393 vgic_its_invall (vcpu );
13871394
13881395 return 0 ;
@@ -1399,23 +1406,21 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
13991406static int vgic_its_cmd_handle_movall (struct kvm * kvm , struct vgic_its * its ,
14001407 u64 * its_cmd )
14011408{
1402- u32 target1_addr = its_cmd_get_target_addr (its_cmd );
1403- u32 target2_addr = its_cmd_mask_field (its_cmd , 3 , 16 , 32 );
14041409 struct kvm_vcpu * vcpu1 , * vcpu2 ;
14051410 struct vgic_irq * irq ;
14061411 u32 * intids ;
14071412 int irq_count , i ;
14081413
1409- if (target1_addr >= atomic_read (& kvm -> online_vcpus ) ||
1410- target2_addr >= atomic_read (& kvm -> online_vcpus ))
1414+ /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */
1415+ vcpu1 = kvm_get_vcpu_by_id (kvm , its_cmd_get_target_addr (its_cmd ));
1416+ vcpu2 = kvm_get_vcpu_by_id (kvm , its_cmd_mask_field (its_cmd , 3 , 16 , 32 ));
1417+
1418+ if (!vcpu1 || !vcpu2 )
14111419 return E_ITS_MOVALL_PROCNUM_OOR ;
14121420
1413- if (target1_addr == target2_addr )
1421+ if (vcpu1 == vcpu2 )
14141422 return 0 ;
14151423
1416- vcpu1 = kvm_get_vcpu (kvm , target1_addr );
1417- vcpu2 = kvm_get_vcpu (kvm , target2_addr );
1418-
14191424 irq_count = vgic_copy_lpi_list (kvm , vcpu1 , & intids );
14201425 if (irq_count < 0 )
14211426 return irq_count ;
@@ -2258,7 +2263,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
22582263 return PTR_ERR (ite );
22592264
22602265 if (its_is_collection_mapped (collection ))
2261- vcpu = kvm_get_vcpu (kvm , collection -> target_addr );
2266+ vcpu = kvm_get_vcpu_by_id (kvm , collection -> target_addr );
22622267
22632268 irq = vgic_add_lpi (kvm , lpi_id , vcpu );
22642269 if (IS_ERR (irq )) {
@@ -2573,7 +2578,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
25732578 coll_id = val & KVM_ITS_CTE_ICID_MASK ;
25742579
25752580 if (target_addr != COLLECTION_NOT_MAPPED &&
2576- target_addr >= atomic_read ( & kvm -> online_vcpus ))
2581+ ! kvm_get_vcpu_by_id ( kvm , target_addr ))
25772582 return - EINVAL ;
25782583
25792584 collection = find_collection (its , coll_id );
0 commit comments