@@ -94,6 +94,11 @@ static u16 in_to_opcode(void *in)
9494 return MLX5_GET (mbox_in , in , opcode );
9595}
9696
97+ static u16 in_to_uid (void * in )
98+ {
99+ return MLX5_GET (mbox_in , in , uid );
100+ }
101+
97102/* Returns true for opcodes that might be triggered very frequently and throttle
98103 * the command interface. Limit their command slots usage.
99104 */
@@ -823,7 +828,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
823828
824829 opcode = in_to_opcode (in );
825830 op_mod = MLX5_GET (mbox_in , in , op_mod );
826- uid = MLX5_GET ( mbox_in , in , uid );
831+ uid = in_to_uid ( in );
827832 status = MLX5_GET (mbox_out , out , status );
828833
829834 if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
@@ -1871,6 +1876,17 @@ static int is_manage_pages(void *in)
18711876 return in_to_opcode (in ) == MLX5_CMD_OP_MANAGE_PAGES ;
18721877}
18731878
1879+ static bool mlx5_has_privileged_uid (struct mlx5_core_dev * dev )
1880+ {
1881+ return !xa_empty (& dev -> cmd .vars .privileged_uids );
1882+ }
1883+
1884+ static bool mlx5_cmd_is_privileged_uid (struct mlx5_core_dev * dev ,
1885+ u16 uid )
1886+ {
1887+ return !!xa_load (& dev -> cmd .vars .privileged_uids , uid );
1888+ }
1889+
18741890/* Notes:
18751891 * 1. Callback functions may not sleep
18761892 * 2. Page queue commands do not support asynchrous completion
@@ -1881,7 +1897,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
18811897{
18821898 struct mlx5_cmd_msg * inb , * outb ;
18831899 u16 opcode = in_to_opcode (in );
1884- bool throttle_op ;
1900+ bool throttle_locked = false;
1901+ bool unpriv_locked = false;
1902+ u16 uid = in_to_uid (in );
18851903 int pages_queue ;
18861904 gfp_t gfp ;
18871905 u8 token ;
@@ -1890,12 +1908,17 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
18901908 if (mlx5_cmd_is_down (dev ) || !opcode_allowed (& dev -> cmd , opcode ))
18911909 return - ENXIO ;
18921910
1893- throttle_op = mlx5_cmd_is_throttle_opcode (opcode );
1894- if (throttle_op ) {
1895- if (callback ) {
1896- if (down_trylock (& dev -> cmd .vars .throttle_sem ))
1897- return - EBUSY ;
1898- } else {
1911+ if (!callback ) {
1912+ /* The semaphore is already held for callback commands. It was
1913+ * acquired in mlx5_cmd_exec_cb()
1914+ */
1915+ if (uid && mlx5_has_privileged_uid (dev )) {
1916+ if (!mlx5_cmd_is_privileged_uid (dev , uid )) {
1917+ unpriv_locked = true;
1918+ down (& dev -> cmd .vars .unprivileged_sem );
1919+ }
1920+ } else if (mlx5_cmd_is_throttle_opcode (opcode )) {
1921+ throttle_locked = true;
18991922 down (& dev -> cmd .vars .throttle_sem );
19001923 }
19011924 }
@@ -1941,8 +1964,11 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
19411964out_in :
19421965 free_msg (dev , inb );
19431966out_up :
1944- if (throttle_op )
1967+ if (throttle_locked )
19451968 up (& dev -> cmd .vars .throttle_sem );
1969+ if (unpriv_locked )
1970+ up (& dev -> cmd .vars .unprivileged_sem );
1971+
19461972 return err ;
19471973}
19481974
@@ -2104,18 +2130,22 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work)
21042130 struct mlx5_async_work * work = _work ;
21052131 struct mlx5_async_ctx * ctx ;
21062132 struct mlx5_core_dev * dev ;
2107- u16 opcode ;
2133+ bool throttle_locked ;
2134+ bool unpriv_locked ;
21082135
21092136 ctx = work -> ctx ;
21102137 dev = ctx -> dev ;
2111- opcode = work -> opcode ;
2138+ throttle_locked = work -> throttle_locked ;
2139+ unpriv_locked = work -> unpriv_locked ;
21122140 status = cmd_status_err (dev , status , work -> opcode , work -> op_mod , work -> out );
21132141 work -> user_callback (status , work );
21142142 /* Can't access "work" from this point on. It could have been freed in
21152143 * the callback.
21162144 */
2117- if (mlx5_cmd_is_throttle_opcode ( opcode ) )
2145+ if (throttle_locked )
21182146 up (& dev -> cmd .vars .throttle_sem );
2147+ if (unpriv_locked )
2148+ up (& dev -> cmd .vars .unprivileged_sem );
21192149 if (atomic_dec_and_test (& ctx -> num_inflight ))
21202150 complete (& ctx -> inflight_done );
21212151}
@@ -2124,18 +2154,52 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
21242154 void * out , int out_size , mlx5_async_cbk_t callback ,
21252155 struct mlx5_async_work * work )
21262156{
2157+ struct mlx5_core_dev * dev = ctx -> dev ;
2158+ u16 uid ;
21272159 int ret ;
21282160
21292161 work -> ctx = ctx ;
21302162 work -> user_callback = callback ;
21312163 work -> opcode = in_to_opcode (in );
21322164 work -> op_mod = MLX5_GET (mbox_in , in , op_mod );
21332165 work -> out = out ;
2166+ work -> throttle_locked = false;
2167+ work -> unpriv_locked = false;
2168+ uid = in_to_uid (in );
2169+
21342170 if (WARN_ON (!atomic_inc_not_zero (& ctx -> num_inflight )))
21352171 return - EIO ;
2136- ret = cmd_exec (ctx -> dev , in , in_size , out , out_size ,
2172+
2173+ if (uid && mlx5_has_privileged_uid (dev )) {
2174+ if (!mlx5_cmd_is_privileged_uid (dev , uid )) {
2175+ if (down_trylock (& dev -> cmd .vars .unprivileged_sem )) {
2176+ ret = - EBUSY ;
2177+ goto dec_num_inflight ;
2178+ }
2179+ work -> unpriv_locked = true;
2180+ }
2181+ } else if (mlx5_cmd_is_throttle_opcode (in_to_opcode (in ))) {
2182+ if (down_trylock (& dev -> cmd .vars .throttle_sem )) {
2183+ ret = - EBUSY ;
2184+ goto dec_num_inflight ;
2185+ }
2186+ work -> throttle_locked = true;
2187+ }
2188+
2189+ ret = cmd_exec (dev , in , in_size , out , out_size ,
21372190 mlx5_cmd_exec_cb_handler , work , false);
2138- if (ret && atomic_dec_and_test (& ctx -> num_inflight ))
2191+ if (ret )
2192+ goto sem_up ;
2193+
2194+ return 0 ;
2195+
2196+ sem_up :
2197+ if (work -> throttle_locked )
2198+ up (& dev -> cmd .vars .throttle_sem );
2199+ if (work -> unpriv_locked )
2200+ up (& dev -> cmd .vars .unprivileged_sem );
2201+ dec_num_inflight :
2202+ if (atomic_dec_and_test (& ctx -> num_inflight ))
21392203 complete (& ctx -> inflight_done );
21402204
21412205 return ret ;
@@ -2371,10 +2435,16 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
23712435 sema_init (& cmd -> vars .sem , cmd -> vars .max_reg_cmds );
23722436 sema_init (& cmd -> vars .pages_sem , 1 );
23732437 sema_init (& cmd -> vars .throttle_sem , DIV_ROUND_UP (cmd -> vars .max_reg_cmds , 2 ));
2438+ sema_init (& cmd -> vars .unprivileged_sem ,
2439+ DIV_ROUND_UP (cmd -> vars .max_reg_cmds , 2 ));
2440+
2441+ xa_init (& cmd -> vars .privileged_uids );
23742442
23752443 cmd -> pool = dma_pool_create ("mlx5_cmd" , mlx5_core_dma_dev (dev ), size , align , 0 );
2376- if (!cmd -> pool )
2377- return - ENOMEM ;
2444+ if (!cmd -> pool ) {
2445+ err = - ENOMEM ;
2446+ goto err_destroy_xa ;
2447+ }
23782448
23792449 err = alloc_cmd_page (dev , cmd );
23802450 if (err )
@@ -2408,6 +2478,8 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
24082478 free_cmd_page (dev , cmd );
24092479err_free_pool :
24102480 dma_pool_destroy (cmd -> pool );
2481+ err_destroy_xa :
2482+ xa_destroy (& dev -> cmd .vars .privileged_uids );
24112483 return err ;
24122484}
24132485
@@ -2420,10 +2492,26 @@ void mlx5_cmd_disable(struct mlx5_core_dev *dev)
24202492 destroy_msg_cache (dev );
24212493 free_cmd_page (dev , cmd );
24222494 dma_pool_destroy (cmd -> pool );
2495+ xa_destroy (& dev -> cmd .vars .privileged_uids );
24232496}
24242497
24252498void mlx5_cmd_set_state (struct mlx5_core_dev * dev ,
24262499 enum mlx5_cmdif_state cmdif_state )
24272500{
24282501 dev -> cmd .state = cmdif_state ;
24292502}
2503+
2504+ int mlx5_cmd_add_privileged_uid (struct mlx5_core_dev * dev , u16 uid )
2505+ {
2506+ return xa_insert (& dev -> cmd .vars .privileged_uids , uid ,
2507+ xa_mk_value (uid ), GFP_KERNEL );
2508+ }
2509+ EXPORT_SYMBOL (mlx5_cmd_add_privileged_uid );
2510+
2511+ void mlx5_cmd_remove_privileged_uid (struct mlx5_core_dev * dev , u16 uid )
2512+ {
2513+ void * data = xa_erase (& dev -> cmd .vars .privileged_uids , uid );
2514+
2515+ WARN (!data , "Privileged UID %u does not exist\n" , uid );
2516+ }
2517+ EXPORT_SYMBOL (mlx5_cmd_remove_privileged_uid );
0 commit comments