@@ -1052,32 +1052,34 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
10521052
10531053#ifdef CONFIG_PAGE_POOL
10541054
1055- /* This is the number of tokens that the user can SO_DEVMEM_DONTNEED in
1056- * 1 syscall. The limit exists to limit the amount of memory the kernel
1057- * allocates to copy these tokens.
1055+ /* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED
1056+ * in 1 syscall. The limit exists to limit the amount of memory the kernel
1057+ * allocates to copy these tokens, and to prevent looping over the frags for
1058+ * too long.
10581059 */
10591060#define MAX_DONTNEED_TOKENS 128
1061+ #define MAX_DONTNEED_FRAGS 1024
10601062
10611063static noinline_for_stack int
10621064sock_devmem_dontneed (struct sock * sk , sockptr_t optval , unsigned int optlen )
10631065{
10641066 unsigned int num_tokens , i , j , k , netmem_num = 0 ;
10651067 struct dmabuf_token * tokens ;
1068+ int ret = 0 , num_frags = 0 ;
10661069 netmem_ref netmems [16 ];
1067- int ret = 0 ;
10681070
10691071 if (!sk_is_tcp (sk ))
10701072 return - EBADF ;
10711073
1072- if (optlen % sizeof (struct dmabuf_token ) ||
1074+ if (optlen % sizeof (* tokens ) ||
10731075 optlen > sizeof (* tokens ) * MAX_DONTNEED_TOKENS )
10741076 return - EINVAL ;
10751077
1076- tokens = kvmalloc_array (optlen , sizeof (* tokens ), GFP_KERNEL );
1078+ num_tokens = optlen / sizeof (* tokens );
1079+ tokens = kvmalloc_array (num_tokens , sizeof (* tokens ), GFP_KERNEL );
10771080 if (!tokens )
10781081 return - ENOMEM ;
10791082
1080- num_tokens = optlen / sizeof (struct dmabuf_token );
10811083 if (copy_from_sockptr (tokens , optval , optlen )) {
10821084 kvfree (tokens );
10831085 return - EFAULT ;
@@ -1086,24 +1088,28 @@ sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen)
10861088 xa_lock_bh (& sk -> sk_user_frags );
10871089 for (i = 0 ; i < num_tokens ; i ++ ) {
10881090 for (j = 0 ; j < tokens [i ].token_count ; j ++ ) {
1091+ if (++ num_frags > MAX_DONTNEED_FRAGS )
1092+ goto frag_limit_reached ;
1093+
10891094 netmem_ref netmem = (__force netmem_ref )__xa_erase (
10901095 & sk -> sk_user_frags , tokens [i ].token_start + j );
10911096
1092- if (netmem &&
1093- !WARN_ON_ONCE (!netmem_is_net_iov (netmem ))) {
1094- netmems [netmem_num ++ ] = netmem ;
1095- if (netmem_num == ARRAY_SIZE (netmems )) {
1096- xa_unlock_bh (& sk -> sk_user_frags );
1097- for (k = 0 ; k < netmem_num ; k ++ )
1098- WARN_ON_ONCE (!napi_pp_put_page (netmems [k ]));
1099- netmem_num = 0 ;
1100- xa_lock_bh (& sk -> sk_user_frags );
1101- }
1102- ret ++ ;
1097+ if (!netmem || WARN_ON_ONCE (!netmem_is_net_iov (netmem )))
1098+ continue ;
1099+
1100+ netmems [netmem_num ++ ] = netmem ;
1101+ if (netmem_num == ARRAY_SIZE (netmems )) {
1102+ xa_unlock_bh (& sk -> sk_user_frags );
1103+ for (k = 0 ; k < netmem_num ; k ++ )
1104+ WARN_ON_ONCE (!napi_pp_put_page (netmems [k ]));
1105+ netmem_num = 0 ;
1106+ xa_lock_bh (& sk -> sk_user_frags );
11031107 }
1108+ ret ++ ;
11041109 }
11051110 }
11061111
1112+ frag_limit_reached :
11071113 xa_unlock_bh (& sk -> sk_user_frags );
11081114 for (k = 0 ; k < netmem_num ; k ++ )
11091115 WARN_ON_ONCE (!napi_pp_put_page (netmems [k ]));
0 commit comments