Skip to content

Commit 4d9d722

Browse files
emuslndavem330
authored andcommitted
ionic: count SGs in packet to minimize linearize
There are some cases where an skb carries more frags than the number of SGs that ionic can support per descriptor - this forces the driver to linearize the skb. However, if this is a TSO packet that is going to become multiple descriptors (one per MTU-sized packet) and spread the frags across them, this time-consuming linearization is likely not necessary. We scan the frag list and count up the number of SGs that would be created for each descriptor that would be generated, and only linearize if we hit the SG limit on a descriptor. In most cases, we won't even get to the frag list scan, so this doesn't affect typical traffic. Signed-off-by: Shannon Nelson <shannon.nelson@amd.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 4fa7011 commit 4d9d722

1 file changed

Lines changed: 68 additions & 9 deletions

File tree

drivers/net/ethernet/pensando/ionic/ionic_txrx.c

Lines changed: 68 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1239,25 +1239,84 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
12391239
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
12401240
{
12411241
struct ionic_tx_stats *stats = q_to_tx_stats(q);
1242+
bool too_many_frags = false;
1243+
skb_frag_t *frag;
1244+
int desc_bufs;
1245+
int chunk_len;
1246+
int frag_rem;
1247+
int tso_rem;
1248+
int seg_rem;
1249+
bool encap;
1250+
int hdrlen;
12421251
int ndescs;
12431252
int err;
12441253

12451254
/* Each desc is mss long max, so a descriptor for each gso_seg */
1246-
if (skb_is_gso(skb))
1255+
if (skb_is_gso(skb)) {
12471256
ndescs = skb_shinfo(skb)->gso_segs;
1248-
else
1257+
} else {
12491258
ndescs = 1;
1259+
if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) {
1260+
too_many_frags = true;
1261+
goto linearize;
1262+
}
1263+
}
12501264

1251-
/* If non-TSO, just need 1 desc and nr_frags sg elems */
1252-
if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
1265+
/* If non-TSO, or no frags to check, we're done */
1266+
if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags)
12531267
return ndescs;
12541268

1255-
/* Too many frags, so linearize */
1256-
err = skb_linearize(skb);
1257-
if (err)
1258-
return err;
1269+
/* We need to scan the skb to be sure that none of the MTU sized
1270+
* packets in the TSO will require more sgs per descriptor than we
1271+
* can support. We loop through the frags, add up the lengths for
1272+
* a packet, and count the number of sgs used per packet.
1273+
*/
1274+
tso_rem = skb->len;
1275+
frag = skb_shinfo(skb)->frags;
1276+
encap = skb->encapsulation;
1277+
1278+
/* start with just hdr in first part of first descriptor */
1279+
if (encap)
1280+
hdrlen = skb_inner_tcp_all_headers(skb);
1281+
else
1282+
hdrlen = skb_tcp_all_headers(skb);
1283+
seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size);
1284+
frag_rem = hdrlen;
1285+
1286+
while (tso_rem > 0) {
1287+
desc_bufs = 0;
1288+
while (seg_rem > 0) {
1289+
desc_bufs++;
1290+
1291+
/* We add the +1 because we can take buffers for one
1292+
* more than we have SGs: one for the initial desc data
1293+
* in addition to the SG segments that might follow.
1294+
*/
1295+
if (desc_bufs > q->max_sg_elems + 1) {
1296+
too_many_frags = true;
1297+
goto linearize;
1298+
}
1299+
1300+
if (frag_rem == 0) {
1301+
frag_rem = skb_frag_size(frag);
1302+
frag++;
1303+
}
1304+
chunk_len = min(frag_rem, seg_rem);
1305+
frag_rem -= chunk_len;
1306+
tso_rem -= chunk_len;
1307+
seg_rem -= chunk_len;
1308+
}
1309+
1310+
seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size);
1311+
}
12591312

1260-
stats->linearize++;
1313+
linearize:
1314+
if (too_many_frags) {
1315+
err = skb_linearize(skb);
1316+
if (err)
1317+
return err;
1318+
stats->linearize++;
1319+
}
12611320

12621321
return ndescs;
12631322
}

0 commit comments

Comments
 (0)