Skip to content

Commit 06bab69

Browse files
chumakdkuba-moo
authored andcommitted
net/mlx5: Implement devlink port function cmds to control ipsec_crypto
Implement devlink port function commands to enable / disable IPsec crypto offloads. This is used to control the IPsec capability of the device. When ipsec_crypto is enabled for a VF, it prevents adding IPsec crypto offloads on the PF, because the two cannot be active simultaneously due to HW constraints. Conversely, if there are any active IPsec crypto offloads on the PF, it's not allowed to enable ipsec_crypto on a VF, until PF IPsec offloads are cleared. Signed-off-by: Dima Chumak <dchumak@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Link: https://lore.kernel.org/r/20230825062836.103744-8-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent 8efd7b1 commit 06bab69

7 files changed

Lines changed: 431 additions & 1 deletion

File tree

Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,16 @@ explicitly enable the VF migratable capability.
190190
mlx5 driver support devlink port function attr mechanism to setup migratable
191191
capability. (refer to Documentation/networking/devlink/devlink-port.rst)
192192

193+
IPsec crypto capability setup
194+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
195+
User who wants mlx5 PCI VFs to be able to perform IPsec crypto offloading need
196+
to explicitly enable the VF ipsec_crypto capability. Enabling IPsec capability
197+
for VFs is supported starting with ConnectX6dx devices and above. When a VF has
198+
IPsec capability enabled, any IPsec offloading is blocked on the PF.
199+
200+
mlx5 driver support devlink port function attr mechanism to setup ipsec_crypto
201+
capability. (refer to Documentation/networking/devlink/devlink-port.rst)
202+
193203
SF state setup
194204
--------------
195205

drivers/net/ethernet/mellanox/mlx5/core/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
6969
#
7070
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \
7171
ecpf.o rdma.o esw/legacy.o \
72-
esw/devlink_port.o esw/vporttbl.o esw/qos.o
72+
esw/devlink_port.o esw/vporttbl.o esw/qos.o esw/ipsec.o
7373

7474
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
7575
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \

drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,10 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
9292
.port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
9393
.port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
9494
.port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
95+
#ifdef CONFIG_XFRM_OFFLOAD
96+
.port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
97+
.port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
98+
#endif /* CONFIG_XFRM_OFFLOAD */
9599
};
96100

97101
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
Lines changed: 309 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,309 @@
1+
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2+
// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3+
4+
#include <linux/mlx5/device.h>
5+
#include <linux/mlx5/vport.h>
6+
#include "mlx5_core.h"
7+
#include "eswitch.h"
8+
9+
static int esw_ipsec_vf_query_generic(struct mlx5_core_dev *dev, u16 vport_num, bool *result)
10+
{
11+
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
12+
void *hca_cap, *query_cap;
13+
int err;
14+
15+
if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
16+
return -EOPNOTSUPP;
17+
18+
if (!mlx5_esw_ipsec_vf_offload_supported(dev)) {
19+
*result = false;
20+
return 0;
21+
}
22+
23+
query_cap = kvzalloc(query_sz, GFP_KERNEL);
24+
if (!query_cap)
25+
return -ENOMEM;
26+
27+
err = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
28+
if (err)
29+
goto free;
30+
31+
hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
32+
*result = MLX5_GET(cmd_hca_cap, hca_cap, ipsec_offload);
33+
free:
34+
kvfree(query_cap);
35+
return err;
36+
}
37+
38+
enum esw_vport_ipsec_offload {
39+
MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD,
40+
};
41+
42+
int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport)
43+
{
44+
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
45+
void *hca_cap, *query_cap;
46+
bool ipsec_enabled;
47+
int err;
48+
49+
/* Querying IPsec caps only makes sense when generic ipsec_offload
50+
* HCA cap is enabled
51+
*/
52+
err = esw_ipsec_vf_query_generic(dev, vport->vport, &ipsec_enabled);
53+
if (err)
54+
return err;
55+
56+
if (!ipsec_enabled) {
57+
vport->info.ipsec_crypto_enabled = false;
58+
return 0;
59+
}
60+
61+
query_cap = kvzalloc(query_sz, GFP_KERNEL);
62+
if (!query_cap)
63+
return -ENOMEM;
64+
65+
err = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
66+
if (err)
67+
goto free;
68+
69+
hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
70+
vport->info.ipsec_crypto_enabled =
71+
MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload);
72+
free:
73+
kvfree(query_cap);
74+
return err;
75+
}
76+
77+
static int esw_ipsec_vf_set_generic(struct mlx5_core_dev *dev, u16 vport_num, bool ipsec_ofld)
78+
{
79+
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
80+
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
81+
void *hca_cap, *query_cap, *cap;
82+
int ret;
83+
84+
if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
85+
return -EOPNOTSUPP;
86+
87+
query_cap = kvzalloc(query_sz, GFP_KERNEL);
88+
hca_cap = kvzalloc(set_sz, GFP_KERNEL);
89+
if (!hca_cap || !query_cap) {
90+
ret = -ENOMEM;
91+
goto free;
92+
}
93+
94+
ret = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
95+
if (ret)
96+
goto free;
97+
98+
cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
99+
memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
100+
MLX5_UN_SZ_BYTES(hca_cap_union));
101+
MLX5_SET(cmd_hca_cap, cap, ipsec_offload, ipsec_ofld);
102+
103+
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
104+
MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
105+
MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
106+
107+
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
108+
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
109+
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
110+
free:
111+
kvfree(hca_cap);
112+
kvfree(query_cap);
113+
return ret;
114+
}
115+
116+
static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport *vport,
117+
bool enable, enum esw_vport_ipsec_offload type)
118+
{
119+
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
120+
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
121+
void *hca_cap, *query_cap, *cap;
122+
int ret;
123+
124+
if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
125+
return -EOPNOTSUPP;
126+
127+
query_cap = kvzalloc(query_sz, GFP_KERNEL);
128+
hca_cap = kvzalloc(set_sz, GFP_KERNEL);
129+
if (!hca_cap || !query_cap) {
130+
ret = -ENOMEM;
131+
goto free;
132+
}
133+
134+
ret = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
135+
if (ret)
136+
goto free;
137+
138+
cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
139+
memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
140+
MLX5_UN_SZ_BYTES(hca_cap_union));
141+
142+
switch (type) {
143+
case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
144+
MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
145+
break;
146+
default:
147+
ret = -EOPNOTSUPP;
148+
goto free;
149+
}
150+
151+
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
152+
MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
153+
MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport->vport);
154+
155+
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
156+
MLX5_SET_HCA_CAP_OP_MOD_IPSEC << 1);
157+
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
158+
free:
159+
kvfree(hca_cap);
160+
kvfree(query_cap);
161+
return ret;
162+
}
163+
164+
static int esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev *dev, u16 vport_num, bool enable)
165+
{
166+
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
167+
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
168+
struct mlx5_eswitch *esw = dev->priv.eswitch;
169+
void *hca_cap, *query_cap, *cap;
170+
int ret;
171+
172+
query_cap = kvzalloc(query_sz, GFP_KERNEL);
173+
hca_cap = kvzalloc(set_sz, GFP_KERNEL);
174+
if (!hca_cap || !query_cap) {
175+
ret = -ENOMEM;
176+
goto free;
177+
}
178+
179+
ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
180+
if (ret)
181+
goto free;
182+
183+
cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
184+
memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
185+
MLX5_UN_SZ_BYTES(hca_cap_union));
186+
MLX5_SET(per_protocol_networking_offload_caps, cap, insert_trailer, enable);
187+
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
188+
MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
189+
MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
190+
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
191+
MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS << 1);
192+
ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap);
193+
free:
194+
kvfree(hca_cap);
195+
kvfree(query_cap);
196+
return ret;
197+
}
198+
199+
static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
200+
bool enable, enum esw_vport_ipsec_offload type)
201+
{
202+
struct mlx5_core_dev *dev = esw->dev;
203+
int err;
204+
205+
if (vport->vport == MLX5_VPORT_PF)
206+
return -EOPNOTSUPP;
207+
208+
if (type == MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD) {
209+
err = esw_ipsec_vf_crypto_aux_caps_set(dev, vport->vport, enable);
210+
if (err)
211+
return err;
212+
}
213+
214+
if (enable) {
215+
err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
216+
if (err)
217+
return err;
218+
err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
219+
if (err)
220+
return err;
221+
} else {
222+
err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
223+
if (err)
224+
return err;
225+
err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
226+
if (err)
227+
return err;
228+
}
229+
230+
switch (type) {
231+
case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
232+
vport->info.ipsec_crypto_enabled = enable;
233+
break;
234+
default:
235+
return -EINVAL;
236+
}
237+
238+
return 0;
239+
}
240+
241+
static int esw_ipsec_offload_supported(struct mlx5_core_dev *dev, u16 vport_num)
242+
{
243+
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
244+
void *hca_cap, *query_cap;
245+
int ret;
246+
247+
query_cap = kvzalloc(query_sz, GFP_KERNEL);
248+
if (!query_cap)
249+
return -ENOMEM;
250+
251+
ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_GENERAL);
252+
if (ret)
253+
goto free;
254+
255+
hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
256+
if (!MLX5_GET(cmd_hca_cap, hca_cap, log_max_dek))
257+
ret = -EOPNOTSUPP;
258+
free:
259+
kvfree(query_cap);
260+
return ret;
261+
}
262+
263+
bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev)
264+
{
265+
/* Old firmware doesn't support ipsec_offload capability for VFs. This
266+
* can be detected by checking reformat_add_esp_trasport capability -
267+
* when this cap isn't supported it means firmware cannot be trusted
268+
* about what it reports for ipsec_offload cap.
269+
*/
270+
return MLX5_CAP_FLOWTABLE_NIC_TX(dev, reformat_add_esp_trasport);
271+
}
272+
273+
int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
274+
u16 vport_num)
275+
{
276+
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
277+
void *hca_cap, *query_cap;
278+
int err;
279+
280+
if (!mlx5_esw_ipsec_vf_offload_supported(dev))
281+
return -EOPNOTSUPP;
282+
283+
err = esw_ipsec_offload_supported(dev, vport_num);
284+
if (err)
285+
return err;
286+
287+
query_cap = kvzalloc(query_sz, GFP_KERNEL);
288+
if (!query_cap)
289+
return -ENOMEM;
290+
291+
err = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
292+
if (err)
293+
goto free;
294+
295+
hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
296+
if (!MLX5_GET(per_protocol_networking_offload_caps, hca_cap, swp))
297+
goto free;
298+
299+
free:
300+
kvfree(query_cap);
301+
return err;
302+
}
303+
304+
int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
305+
bool enable)
306+
{
307+
return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
308+
MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD);
309+
}

drivers/net/ethernet/mellanox/mlx5/core/eswitch.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -832,6 +832,9 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
832832

833833
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
834834
vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
835+
836+
err = mlx5_esw_ipsec_vf_offload_get(esw->dev, vport);
837+
835838
out_free:
836839
kfree(query_ctx);
837840
return err;
@@ -914,6 +917,8 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
914917
/* Sync with current vport context */
915918
vport->enabled_events = enabled_events;
916919
vport->enabled = true;
920+
if (vport->vport != MLX5_VPORT_PF && vport->info.ipsec_crypto_enabled)
921+
esw->enabled_ipsec_vf_count++;
917922

918923
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
919924
* in smartNIC as it's a vport group manager.
@@ -970,6 +975,9 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
970975
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
971976
mlx5_esw_vport_vhca_id_clear(esw, vport_num);
972977

978+
if (vport->vport != MLX5_VPORT_PF && vport->info.ipsec_crypto_enabled)
979+
esw->enabled_ipsec_vf_count--;
980+
973981
/* We don't assume VFs will cleanup after themselves.
974982
* Calling vport change handler while vport is disabled will cleanup
975983
* the vport resources.

0 commit comments

Comments
 (0)