@@ -1468,11 +1468,13 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
14681468 dmac_v = MLX5_ADDR_OF (fte_match_param , headers_v , outer_headers .dmac_47_16 );
14691469 eth_broadcast_addr (dmac_c );
14701470 ether_addr_copy (dmac_v , mac );
1471- MLX5_SET (fte_match_set_lyr_2_4 , headers_c , cvlan_tag , 1 );
1471+ if (ndev -> mvdev .actual_features & BIT_ULL (VIRTIO_NET_F_CTRL_VLAN )) {
1472+ MLX5_SET (fte_match_set_lyr_2_4 , headers_c , cvlan_tag , 1 );
1473+ MLX5_SET_TO_ONES (fte_match_set_lyr_2_4 , headers_c , first_vid );
1474+ }
14721475 if (tagged ) {
14731476 MLX5_SET (fte_match_set_lyr_2_4 , headers_v , cvlan_tag , 1 );
1474- MLX5_SET_TO_ONES (fte_match_set_lyr_2_4 , headers_c , first_vid );
1475- MLX5_SET (fte_match_set_lyr_2_4 , headers_c , first_vid , vid );
1477+ MLX5_SET (fte_match_set_lyr_2_4 , headers_v , first_vid , vid );
14761478 }
14771479 flow_act .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ;
14781480 dest .type = MLX5_FLOW_DESTINATION_TYPE_TIR ;
@@ -1684,7 +1686,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
16841686
16851687 /* Need recreate the flow table entry, so that the packet could forward back
16861688 */
1687- mac_vlan_del (ndev , ndev -> config . mac , 0 , false);
1689+ mac_vlan_del (ndev , mac_back , 0 , false);
16881690
16891691 if (mac_vlan_add (ndev , ndev -> config .mac , 0 , false)) {
16901692 mlx5_vdpa_warn (mvdev , "failed to insert forward rules, try to restore\n" );
@@ -1821,6 +1823,9 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
18211823 size_t read ;
18221824 u16 id ;
18231825
1826+ if (!(ndev -> mvdev .actual_features & BIT_ULL (VIRTIO_NET_F_CTRL_VLAN )))
1827+ return status ;
1828+
18241829 switch (cmd ) {
18251830 case VIRTIO_NET_CTRL_VLAN_ADD :
18261831 read = vringh_iov_pull_iotlb (& cvq -> vring , & cvq -> riov , & vlan , sizeof (vlan ));
@@ -2389,7 +2394,8 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
23892394 }
23902395}
23912396
2392- static int mlx5_vdpa_change_map (struct mlx5_vdpa_dev * mvdev , struct vhost_iotlb * iotlb )
2397+ static int mlx5_vdpa_change_map (struct mlx5_vdpa_dev * mvdev ,
2398+ struct vhost_iotlb * iotlb , unsigned int asid )
23932399{
23942400 struct mlx5_vdpa_net * ndev = to_mlx5_vdpa_ndev (mvdev );
23952401 int err ;
@@ -2401,7 +2407,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
24012407
24022408 teardown_driver (ndev );
24032409 mlx5_vdpa_destroy_mr (mvdev );
2404- err = mlx5_vdpa_create_mr (mvdev , iotlb );
2410+ err = mlx5_vdpa_create_mr (mvdev , iotlb , asid );
24052411 if (err )
24062412 goto err_mr ;
24072413
@@ -2582,7 +2588,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
25822588 ++ mvdev -> generation ;
25832589
25842590 if (MLX5_CAP_GEN (mvdev -> mdev , umem_uid_0 )) {
2585- if (mlx5_vdpa_create_mr (mvdev , NULL ))
2591+ if (mlx5_vdpa_create_mr (mvdev , NULL , 0 ))
25862592 mlx5_vdpa_warn (mvdev , "create MR failed\n" );
25872593 }
25882594 up_write (& ndev -> reslock );
@@ -2618,41 +2624,20 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
26182624 return mvdev -> generation ;
26192625}
26202626
2621- static int set_map_control (struct mlx5_vdpa_dev * mvdev , struct vhost_iotlb * iotlb )
2622- {
2623- u64 start = 0ULL , last = 0ULL - 1 ;
2624- struct vhost_iotlb_map * map ;
2625- int err = 0 ;
2626-
2627- spin_lock (& mvdev -> cvq .iommu_lock );
2628- vhost_iotlb_reset (mvdev -> cvq .iotlb );
2629-
2630- for (map = vhost_iotlb_itree_first (iotlb , start , last ); map ;
2631- map = vhost_iotlb_itree_next (map , start , last )) {
2632- err = vhost_iotlb_add_range (mvdev -> cvq .iotlb , map -> start ,
2633- map -> last , map -> addr , map -> perm );
2634- if (err )
2635- goto out ;
2636- }
2637-
2638- out :
2639- spin_unlock (& mvdev -> cvq .iommu_lock );
2640- return err ;
2641- }
2642-
2643- static int set_map_data (struct mlx5_vdpa_dev * mvdev , struct vhost_iotlb * iotlb )
2627+ static int set_map_data (struct mlx5_vdpa_dev * mvdev , struct vhost_iotlb * iotlb ,
2628+ unsigned int asid )
26442629{
26452630 bool change_map ;
26462631 int err ;
26472632
2648- err = mlx5_vdpa_handle_set_map (mvdev , iotlb , & change_map );
2633+ err = mlx5_vdpa_handle_set_map (mvdev , iotlb , & change_map , asid );
26492634 if (err ) {
26502635 mlx5_vdpa_warn (mvdev , "set map failed(%d)\n" , err );
26512636 return err ;
26522637 }
26532638
26542639 if (change_map )
2655- err = mlx5_vdpa_change_map (mvdev , iotlb );
2640+ err = mlx5_vdpa_change_map (mvdev , iotlb , asid );
26562641
26572642 return err ;
26582643}
@@ -2665,16 +2650,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
26652650 int err = - EINVAL ;
26662651
26672652 down_write (& ndev -> reslock );
2668- if (mvdev -> group2asid [MLX5_VDPA_DATAVQ_GROUP ] == asid ) {
2669- err = set_map_data (mvdev , iotlb );
2670- if (err )
2671- goto out ;
2672- }
2673-
2674- if (mvdev -> group2asid [MLX5_VDPA_CVQ_GROUP ] == asid )
2675- err = set_map_control (mvdev , iotlb );
2676-
2677- out :
2653+ err = set_map_data (mvdev , iotlb , asid );
26782654 up_write (& ndev -> reslock );
26792655 return err ;
26802656}
@@ -2840,8 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
28402816 int i ;
28412817
28422818 down_write (& ndev -> reslock );
2843- mlx5_notifier_unregister (mvdev -> mdev , & ndev -> nb );
28442819 ndev -> nb_registered = false;
2820+ mlx5_notifier_unregister (mvdev -> mdev , & ndev -> nb );
28452821 flush_workqueue (ndev -> mvdev .wq );
28462822 for (i = 0 ; i < ndev -> cur_num_vqs ; i ++ ) {
28472823 mvq = & ndev -> vqs [i ];
@@ -3019,7 +2995,7 @@ static void update_carrier(struct work_struct *work)
30192995 else
30202996 ndev -> config .status &= cpu_to_mlx5vdpa16 (mvdev , ~VIRTIO_NET_S_LINK_UP );
30212997
3022- if (ndev -> config_cb .callback )
2998+ if (ndev -> nb_registered && ndev -> config_cb .callback )
30232999 ndev -> config_cb .callback (ndev -> config_cb .private );
30243000
30253001 kfree (wqent );
@@ -3036,21 +3012,13 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
30363012 switch (eqe -> sub_type ) {
30373013 case MLX5_PORT_CHANGE_SUBTYPE_DOWN :
30383014 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE :
3039- down_read (& ndev -> reslock );
3040- if (!ndev -> nb_registered ) {
3041- up_read (& ndev -> reslock );
3042- return NOTIFY_DONE ;
3043- }
30443015 wqent = kzalloc (sizeof (* wqent ), GFP_ATOMIC );
3045- if (!wqent ) {
3046- up_read (& ndev -> reslock );
3016+ if (!wqent )
30473017 return NOTIFY_DONE ;
3048- }
30493018
30503019 wqent -> mvdev = & ndev -> mvdev ;
30513020 INIT_WORK (& wqent -> work , update_carrier );
30523021 queue_work (ndev -> mvdev .wq , & wqent -> work );
3053- up_read (& ndev -> reslock );
30543022 ret = NOTIFY_OK ;
30553023 break ;
30563024 default :
@@ -3185,7 +3153,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
31853153 goto err_mpfs ;
31863154
31873155 if (MLX5_CAP_GEN (mvdev -> mdev , umem_uid_0 )) {
3188- err = mlx5_vdpa_create_mr (mvdev , NULL );
3156+ err = mlx5_vdpa_create_mr (mvdev , NULL , 0 );
31893157 if (err )
31903158 goto err_res ;
31913159 }
@@ -3237,8 +3205,8 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
32373205 struct workqueue_struct * wq ;
32383206
32393207 if (ndev -> nb_registered ) {
3240- mlx5_notifier_unregister (mvdev -> mdev , & ndev -> nb );
32413208 ndev -> nb_registered = false;
3209+ mlx5_notifier_unregister (mvdev -> mdev , & ndev -> nb );
32423210 }
32433211 wq = mvdev -> wq ;
32443212 mvdev -> wq = NULL ;
0 commit comments