Skip to content

Commit 1450969

Browse files
dtatuleamstsirkin
authored andcommitted
vdpa/mlx5: Introduce per vq and device resume
Implement vdpa vq and device resume if capability detected. Add support for suspend -> ready state change. Reviewed-by: Gal Pressman <gal@nvidia.com> Acked-by: Eugenio Pérez <eperezma@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com> Message-Id: <20231225151203.152687-4-dtatulea@nvidia.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
1 parent 651cdaa commit 1450969

1 file changed

Lines changed: 62 additions & 7 deletions

File tree

drivers/vdpa/mlx5/net/mlx5_vnet.c

Lines changed: 62 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1170,14 +1170,20 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu
11701170
return err;
11711171
}
11721172

1173-
static bool is_valid_state_change(int oldstate, int newstate)
1173+
static bool is_resumable(struct mlx5_vdpa_net *ndev)
1174+
{
1175+
return ndev->mvdev.vdev.config->resume;
1176+
}
1177+
1178+
static bool is_valid_state_change(int oldstate, int newstate, bool resumable)
11741179
{
11751180
switch (oldstate) {
11761181
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT:
11771182
return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY;
11781183
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY:
11791184
return newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND;
11801185
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND:
1186+
return resumable ? newstate == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY : false;
11811187
case MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR:
11821188
default:
11831189
return false;
@@ -1200,6 +1206,7 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
12001206
{
12011207
int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in);
12021208
u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {};
1209+
bool state_change = false;
12031210
void *obj_context;
12041211
void *cmd_hdr;
12051212
void *in;
@@ -1211,9 +1218,6 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
12111218
if (!modifiable_virtqueue_fields(mvq))
12121219
return -EINVAL;
12131220

1214-
if (!is_valid_state_change(mvq->fw_state, state))
1215-
return -EINVAL;
1216-
12171221
in = kzalloc(inlen, GFP_KERNEL);
12181222
if (!in)
12191223
return -ENOMEM;
@@ -1226,17 +1230,29 @@ static int modify_virtqueue(struct mlx5_vdpa_net *ndev,
12261230
MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid);
12271231

12281232
obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context);
1229-
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE)
1233+
1234+
if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) {
1235+
if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) {
1236+
err = -EINVAL;
1237+
goto done;
1238+
}
1239+
12301240
MLX5_SET(virtio_net_q_object, obj_context, state, state);
1241+
state_change = true;
1242+
}
12311243

12321244
MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields);
12331245
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
1234-
kfree(in);
1235-
if (!err)
1246+
if (err)
1247+
goto done;
1248+
1249+
if (state_change)
12361250
mvq->fw_state = state;
12371251

12381252
mvq->modified_fields = 0;
12391253

1254+
done:
1255+
kfree(in);
12401256
return err;
12411257
}
12421258

@@ -1430,6 +1446,24 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
14301446
suspend_vq(ndev, &ndev->vqs[i]);
14311447
}
14321448

1449+
static void resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
1450+
{
1451+
if (!mvq->initialized || !is_resumable(ndev))
1452+
return;
1453+
1454+
if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)
1455+
return;
1456+
1457+
if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY))
1458+
mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u\n", mvq->index);
1459+
}
1460+
1461+
static void resume_vqs(struct mlx5_vdpa_net *ndev)
1462+
{
1463+
for (int i = 0; i < ndev->mvdev.max_vqs; i++)
1464+
resume_vq(ndev, &ndev->vqs[i]);
1465+
}
1466+
14331467
static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq)
14341468
{
14351469
if (!mvq->initialized)
@@ -3261,6 +3295,23 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
32613295
return 0;
32623296
}
32633297

3298+
static int mlx5_vdpa_resume(struct vdpa_device *vdev)
3299+
{
3300+
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
3301+
struct mlx5_vdpa_net *ndev;
3302+
3303+
ndev = to_mlx5_vdpa_ndev(mvdev);
3304+
3305+
mlx5_vdpa_info(mvdev, "resuming device\n");
3306+
3307+
down_write(&ndev->reslock);
3308+
mvdev->suspended = false;
3309+
resume_vqs(ndev);
3310+
register_link_notifier(ndev);
3311+
up_write(&ndev->reslock);
3312+
return 0;
3313+
}
3314+
32643315
static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
32653316
unsigned int asid)
32663317
{
@@ -3317,6 +3368,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
33173368
.get_vq_dma_dev = mlx5_get_vq_dma_dev,
33183369
.free = mlx5_vdpa_free,
33193370
.suspend = mlx5_vdpa_suspend,
3371+
.resume = mlx5_vdpa_resume, /* Op disabled if not supported. */
33203372
};
33213373

33223374
static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
@@ -3688,6 +3740,9 @@ static int mlx5v_probe(struct auxiliary_device *adev,
36883740
if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, desc_group_mkey_supported))
36893741
mgtdev->vdpa_ops.get_vq_desc_group = NULL;
36903742

3743+
if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, freeze_to_rdy_supported))
3744+
mgtdev->vdpa_ops.resume = NULL;
3745+
36913746
err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
36923747
if (err)
36933748
goto reg_err;

0 commit comments

Comments
 (0)