Skip to content

Commit fcfb035

Browse files
gurmichaelrleon
authored andcommitted
RDMA/mlx5: Align mkc page size capability check to PRM
Align the capabilities checked when using the log_page_size 6th bit in the mkey context to the PRM definition. The upper and lower bounds are set by max/min caps, and modification of the 6th bit by UMR is allowed only when a specific UMR cap is set. Current implementation falsely assumes all page sizes up-to 2^63 are supported when the UMR cap is set. In case the upper bound cap is lower than 63, this might result a FW syndrome on mkey creation, e.g: mlx5_core 0000:c1:00.0: mlx5_cmd_out_err:832:(pid 0): CREATE_MKEY(0×200) op_mod(0×0) failed, status bad parameter(0×3), syndrome (0×38a711), err(-22) Previous cap enforcement is still correct for all current HW, FW and driver combinations. However, this patch aligns the code to be PRM compliant in the general case. Signed-off-by: Michael Guralnik <michaelgur@nvidia.com> Link: https://patch.msgid.link/eab4eeb4785105a4bb5eb362dc0b3662cd840412.1751979184.git.leon@kernel.org Signed-off-by: Leon Romanovsky <leon@kernel.org>
1 parent 9879bdd commit fcfb035

2 files changed

Lines changed: 52 additions & 9 deletions

File tree

drivers/infiniband/hw/mlx5/mlx5_ib.h

Lines changed: 46 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1750,18 +1750,59 @@ static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
17501750
return (port - 1) / dev->num_ports + 1;
17511751
}
17521752

1753+
static inline unsigned int get_max_log_entity_size_cap(struct mlx5_ib_dev *dev,
1754+
int access_mode)
1755+
{
1756+
int max_log_size = 0;
1757+
1758+
if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
1759+
max_log_size =
1760+
MLX5_CAP_GEN_2(dev->mdev, max_mkey_log_entity_size_mtt);
1761+
else if (access_mode == MLX5_MKC_ACCESS_MODE_KSM)
1762+
max_log_size = MLX5_CAP_GEN_2(
1763+
dev->mdev, max_mkey_log_entity_size_fixed_buffer);
1764+
1765+
if (!max_log_size ||
1766+
(max_log_size > 31 &&
1767+
!MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5)))
1768+
max_log_size = 31;
1769+
1770+
return max_log_size;
1771+
}
1772+
1773+
static inline unsigned int get_min_log_entity_size_cap(struct mlx5_ib_dev *dev,
1774+
int access_mode)
1775+
{
1776+
int min_log_size = 0;
1777+
1778+
if (access_mode == MLX5_MKC_ACCESS_MODE_KSM &&
1779+
MLX5_CAP_GEN_2(dev->mdev,
1780+
min_mkey_log_entity_size_fixed_buffer_valid))
1781+
min_log_size = MLX5_CAP_GEN_2(
1782+
dev->mdev, min_mkey_log_entity_size_fixed_buffer);
1783+
else
1784+
min_log_size =
1785+
MLX5_CAP_GEN_2(dev->mdev, log_min_mkey_entity_size);
1786+
1787+
min_log_size = max(min_log_size, MLX5_ADAPTER_PAGE_SHIFT);
1788+
return min_log_size;
1789+
}
1790+
17531791
/*
17541792
* For mkc users, instead of a page_offset the command has a start_iova which
17551793
* specifies both the page_offset and the on-the-wire IOVA
17561794
*/
17571795
static __always_inline unsigned long
17581796
mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
1759-
u64 iova)
1797+
u64 iova, int access_mode)
17601798
{
1761-
int page_size_bits =
1762-
MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5) ? 6 : 5;
1763-
unsigned long bitmap =
1764-
__mlx5_log_page_size_to_bitmap(page_size_bits, 0);
1799+
unsigned int max_log_entity_size_cap, min_log_entity_size_cap;
1800+
unsigned long bitmap;
1801+
1802+
max_log_entity_size_cap = get_max_log_entity_size_cap(dev, access_mode);
1803+
min_log_entity_size_cap = get_min_log_entity_size_cap(dev, access_mode);
1804+
1805+
bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
17651806

17661807
return ib_umem_find_best_pgsz(umem, bitmap, iova);
17671808
}

drivers/infiniband/hw/mlx5/mr.c

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1130,7 +1130,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
11301130
if (umem->is_dmabuf)
11311131
page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
11321132
else
1133-
page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
1133+
page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova,
1134+
access_mode);
11341135
if (WARN_ON(!page_size))
11351136
return ERR_PTR(-EINVAL);
11361137

@@ -1435,8 +1436,8 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
14351436
mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
14361437
MLX5_MKC_ACCESS_MODE_MTT);
14371438
} else {
1438-
unsigned long page_size =
1439-
mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
1439+
unsigned long page_size = mlx5_umem_mkc_find_best_pgsz(
1440+
dev, umem, iova, MLX5_MKC_ACCESS_MODE_MTT);
14401441

14411442
mutex_lock(&dev->slow_path_mutex);
14421443
mr = reg_create(pd, umem, iova, access_flags, page_size,
@@ -1754,7 +1755,8 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
17541755
if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
17551756
return false;
17561757

1757-
*page_size = mlx5_umem_mkc_find_best_pgsz(dev, new_umem, iova);
1758+
*page_size = mlx5_umem_mkc_find_best_pgsz(
1759+
dev, new_umem, iova, mr->mmkey.cache_ent->rb_key.access_mode);
17581760
if (WARN_ON(!*page_size))
17591761
return false;
17601762
return (mr->mmkey.cache_ent->rb_key.ndescs) >=

0 commit comments

Comments
 (0)