|
| 1 | +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
| 2 | +// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
| 3 | + |
| 4 | +#include <linux/mlx5/device.h> |
| 5 | +#include <linux/mlx5/vport.h> |
| 6 | +#include "mlx5_core.h" |
| 7 | +#include "eswitch.h" |
| 8 | + |
| 9 | +static int esw_ipsec_vf_query_generic(struct mlx5_core_dev *dev, u16 vport_num, bool *result) |
| 10 | +{ |
| 11 | + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); |
| 12 | + void *hca_cap, *query_cap; |
| 13 | + int err; |
| 14 | + |
| 15 | + if (!MLX5_CAP_GEN(dev, vhca_resource_manager)) |
| 16 | + return -EOPNOTSUPP; |
| 17 | + |
| 18 | + if (!mlx5_esw_ipsec_vf_offload_supported(dev)) { |
| 19 | + *result = false; |
| 20 | + return 0; |
| 21 | + } |
| 22 | + |
| 23 | + query_cap = kvzalloc(query_sz, GFP_KERNEL); |
| 24 | + if (!query_cap) |
| 25 | + return -ENOMEM; |
| 26 | + |
| 27 | + err = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap); |
| 28 | + if (err) |
| 29 | + goto free; |
| 30 | + |
| 31 | + hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability); |
| 32 | + *result = MLX5_GET(cmd_hca_cap, hca_cap, ipsec_offload); |
| 33 | +free: |
| 34 | + kvfree(query_cap); |
| 35 | + return err; |
| 36 | +} |
| 37 | + |
| 38 | +enum esw_vport_ipsec_offload { |
| 39 | + MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD, |
| 40 | +}; |
| 41 | + |
| 42 | +int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport) |
| 43 | +{ |
| 44 | + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); |
| 45 | + void *hca_cap, *query_cap; |
| 46 | + bool ipsec_enabled; |
| 47 | + int err; |
| 48 | + |
| 49 | + /* Querying IPsec caps only makes sense when generic ipsec_offload |
| 50 | + * HCA cap is enabled |
| 51 | + */ |
| 52 | + err = esw_ipsec_vf_query_generic(dev, vport->vport, &ipsec_enabled); |
| 53 | + if (err) |
| 54 | + return err; |
| 55 | + |
| 56 | + if (!ipsec_enabled) { |
| 57 | + vport->info.ipsec_crypto_enabled = false; |
| 58 | + return 0; |
| 59 | + } |
| 60 | + |
| 61 | + query_cap = kvzalloc(query_sz, GFP_KERNEL); |
| 62 | + if (!query_cap) |
| 63 | + return -ENOMEM; |
| 64 | + |
| 65 | + err = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC); |
| 66 | + if (err) |
| 67 | + goto free; |
| 68 | + |
| 69 | + hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability); |
| 70 | + vport->info.ipsec_crypto_enabled = |
| 71 | + MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload); |
| 72 | +free: |
| 73 | + kvfree(query_cap); |
| 74 | + return err; |
| 75 | +} |
| 76 | + |
| 77 | +static int esw_ipsec_vf_set_generic(struct mlx5_core_dev *dev, u16 vport_num, bool ipsec_ofld) |
| 78 | +{ |
| 79 | + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); |
| 80 | + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); |
| 81 | + void *hca_cap, *query_cap, *cap; |
| 82 | + int ret; |
| 83 | + |
| 84 | + if (!MLX5_CAP_GEN(dev, vhca_resource_manager)) |
| 85 | + return -EOPNOTSUPP; |
| 86 | + |
| 87 | + query_cap = kvzalloc(query_sz, GFP_KERNEL); |
| 88 | + hca_cap = kvzalloc(set_sz, GFP_KERNEL); |
| 89 | + if (!hca_cap || !query_cap) { |
| 90 | + ret = -ENOMEM; |
| 91 | + goto free; |
| 92 | + } |
| 93 | + |
| 94 | + ret = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap); |
| 95 | + if (ret) |
| 96 | + goto free; |
| 97 | + |
| 98 | + cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability); |
| 99 | + memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability), |
| 100 | + MLX5_UN_SZ_BYTES(hca_cap_union)); |
| 101 | + MLX5_SET(cmd_hca_cap, cap, ipsec_offload, ipsec_ofld); |
| 102 | + |
| 103 | + MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP); |
| 104 | + MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1); |
| 105 | + MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num); |
| 106 | + |
| 107 | + MLX5_SET(set_hca_cap_in, hca_cap, op_mod, |
| 108 | + MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1); |
| 109 | + ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap); |
| 110 | +free: |
| 111 | + kvfree(hca_cap); |
| 112 | + kvfree(query_cap); |
| 113 | + return ret; |
| 114 | +} |
| 115 | + |
| 116 | +static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport *vport, |
| 117 | + bool enable, enum esw_vport_ipsec_offload type) |
| 118 | +{ |
| 119 | + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); |
| 120 | + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); |
| 121 | + void *hca_cap, *query_cap, *cap; |
| 122 | + int ret; |
| 123 | + |
| 124 | + if (!MLX5_CAP_GEN(dev, vhca_resource_manager)) |
| 125 | + return -EOPNOTSUPP; |
| 126 | + |
| 127 | + query_cap = kvzalloc(query_sz, GFP_KERNEL); |
| 128 | + hca_cap = kvzalloc(set_sz, GFP_KERNEL); |
| 129 | + if (!hca_cap || !query_cap) { |
| 130 | + ret = -ENOMEM; |
| 131 | + goto free; |
| 132 | + } |
| 133 | + |
| 134 | + ret = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC); |
| 135 | + if (ret) |
| 136 | + goto free; |
| 137 | + |
| 138 | + cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability); |
| 139 | + memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability), |
| 140 | + MLX5_UN_SZ_BYTES(hca_cap_union)); |
| 141 | + |
| 142 | + switch (type) { |
| 143 | + case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD: |
| 144 | + MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable); |
| 145 | + break; |
| 146 | + default: |
| 147 | + ret = -EOPNOTSUPP; |
| 148 | + goto free; |
| 149 | + } |
| 150 | + |
| 151 | + MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP); |
| 152 | + MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1); |
| 153 | + MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport->vport); |
| 154 | + |
| 155 | + MLX5_SET(set_hca_cap_in, hca_cap, op_mod, |
| 156 | + MLX5_SET_HCA_CAP_OP_MOD_IPSEC << 1); |
| 157 | + ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap); |
| 158 | +free: |
| 159 | + kvfree(hca_cap); |
| 160 | + kvfree(query_cap); |
| 161 | + return ret; |
| 162 | +} |
| 163 | + |
| 164 | +static int esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev *dev, u16 vport_num, bool enable) |
| 165 | +{ |
| 166 | + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); |
| 167 | + int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); |
| 168 | + struct mlx5_eswitch *esw = dev->priv.eswitch; |
| 169 | + void *hca_cap, *query_cap, *cap; |
| 170 | + int ret; |
| 171 | + |
| 172 | + query_cap = kvzalloc(query_sz, GFP_KERNEL); |
| 173 | + hca_cap = kvzalloc(set_sz, GFP_KERNEL); |
| 174 | + if (!hca_cap || !query_cap) { |
| 175 | + ret = -ENOMEM; |
| 176 | + goto free; |
| 177 | + } |
| 178 | + |
| 179 | + ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS); |
| 180 | + if (ret) |
| 181 | + goto free; |
| 182 | + |
| 183 | + cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability); |
| 184 | + memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability), |
| 185 | + MLX5_UN_SZ_BYTES(hca_cap_union)); |
| 186 | + MLX5_SET(per_protocol_networking_offload_caps, cap, insert_trailer, enable); |
| 187 | + MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP); |
| 188 | + MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1); |
| 189 | + MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num); |
| 190 | + MLX5_SET(set_hca_cap_in, hca_cap, op_mod, |
| 191 | + MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS << 1); |
| 192 | + ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap); |
| 193 | +free: |
| 194 | + kvfree(hca_cap); |
| 195 | + kvfree(query_cap); |
| 196 | + return ret; |
| 197 | +} |
| 198 | + |
| 199 | +static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
| 200 | + bool enable, enum esw_vport_ipsec_offload type) |
| 201 | +{ |
| 202 | + struct mlx5_core_dev *dev = esw->dev; |
| 203 | + int err; |
| 204 | + |
| 205 | + if (vport->vport == MLX5_VPORT_PF) |
| 206 | + return -EOPNOTSUPP; |
| 207 | + |
| 208 | + if (type == MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD) { |
| 209 | + err = esw_ipsec_vf_crypto_aux_caps_set(dev, vport->vport, enable); |
| 210 | + if (err) |
| 211 | + return err; |
| 212 | + } |
| 213 | + |
| 214 | + if (enable) { |
| 215 | + err = esw_ipsec_vf_set_generic(dev, vport->vport, enable); |
| 216 | + if (err) |
| 217 | + return err; |
| 218 | + err = esw_ipsec_vf_set_bytype(dev, vport, enable, type); |
| 219 | + if (err) |
| 220 | + return err; |
| 221 | + } else { |
| 222 | + err = esw_ipsec_vf_set_bytype(dev, vport, enable, type); |
| 223 | + if (err) |
| 224 | + return err; |
| 225 | + err = esw_ipsec_vf_set_generic(dev, vport->vport, enable); |
| 226 | + if (err) |
| 227 | + return err; |
| 228 | + } |
| 229 | + |
| 230 | + switch (type) { |
| 231 | + case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD: |
| 232 | + vport->info.ipsec_crypto_enabled = enable; |
| 233 | + break; |
| 234 | + default: |
| 235 | + return -EINVAL; |
| 236 | + } |
| 237 | + |
| 238 | + return 0; |
| 239 | +} |
| 240 | + |
| 241 | +static int esw_ipsec_offload_supported(struct mlx5_core_dev *dev, u16 vport_num) |
| 242 | +{ |
| 243 | + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); |
| 244 | + void *hca_cap, *query_cap; |
| 245 | + int ret; |
| 246 | + |
| 247 | + query_cap = kvzalloc(query_sz, GFP_KERNEL); |
| 248 | + if (!query_cap) |
| 249 | + return -ENOMEM; |
| 250 | + |
| 251 | + ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_GENERAL); |
| 252 | + if (ret) |
| 253 | + goto free; |
| 254 | + |
| 255 | + hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability); |
| 256 | + if (!MLX5_GET(cmd_hca_cap, hca_cap, log_max_dek)) |
| 257 | + ret = -EOPNOTSUPP; |
| 258 | +free: |
| 259 | + kvfree(query_cap); |
| 260 | + return ret; |
| 261 | +} |
| 262 | + |
| 263 | +bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev) |
| 264 | +{ |
| 265 | + /* Old firmware doesn't support ipsec_offload capability for VFs. This |
| 266 | + * can be detected by checking reformat_add_esp_trasport capability - |
| 267 | + * when this cap isn't supported it means firmware cannot be trusted |
| 268 | + * about what it reports for ipsec_offload cap. |
| 269 | + */ |
| 270 | + return MLX5_CAP_FLOWTABLE_NIC_TX(dev, reformat_add_esp_trasport); |
| 271 | +} |
| 272 | + |
| 273 | +int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev, |
| 274 | + u16 vport_num) |
| 275 | +{ |
| 276 | + int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); |
| 277 | + void *hca_cap, *query_cap; |
| 278 | + int err; |
| 279 | + |
| 280 | + if (!mlx5_esw_ipsec_vf_offload_supported(dev)) |
| 281 | + return -EOPNOTSUPP; |
| 282 | + |
| 283 | + err = esw_ipsec_offload_supported(dev, vport_num); |
| 284 | + if (err) |
| 285 | + return err; |
| 286 | + |
| 287 | + query_cap = kvzalloc(query_sz, GFP_KERNEL); |
| 288 | + if (!query_cap) |
| 289 | + return -ENOMEM; |
| 290 | + |
| 291 | + err = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS); |
| 292 | + if (err) |
| 293 | + goto free; |
| 294 | + |
| 295 | + hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability); |
| 296 | + if (!MLX5_GET(per_protocol_networking_offload_caps, hca_cap, swp)) |
| 297 | + goto free; |
| 298 | + |
| 299 | +free: |
| 300 | + kvfree(query_cap); |
| 301 | + return err; |
| 302 | +} |
| 303 | + |
| 304 | +int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
| 305 | + bool enable) |
| 306 | +{ |
| 307 | + return esw_ipsec_vf_offload_set_bytype(esw, vport, enable, |
| 308 | + MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD); |
| 309 | +} |
0 commit comments