@@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
334334struct xenvif_tx_cb {
335335 u16 copy_pending_idx [XEN_NETBK_LEGACY_SLOTS_MAX + 1 ];
336336 u8 copy_count ;
337+ u32 split_mask ;
337338};
338339
339340#define XENVIF_TX_CB (skb ) ((struct xenvif_tx_cb *)(skb)->cb)
@@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
361362 struct sk_buff * skb =
362363 alloc_skb (size + NET_SKB_PAD + NET_IP_ALIGN ,
363364 GFP_ATOMIC | __GFP_NOWARN );
365+
366+ BUILD_BUG_ON (sizeof (* XENVIF_TX_CB (skb )) > sizeof (skb -> cb ));
364367 if (unlikely (skb == NULL ))
365368 return NULL ;
366369
@@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
396399 nr_slots = shinfo -> nr_frags + 1 ;
397400
398401 copy_count (skb ) = 0 ;
402+ XENVIF_TX_CB (skb )-> split_mask = 0 ;
399403
400404 /* Create copy ops for exactly data_len bytes into the skb head. */
401405 __skb_put (skb , data_len );
402406 while (data_len > 0 ) {
403407 int amount = data_len > txp -> size ? txp -> size : data_len ;
408+ bool split = false;
404409
405410 cop -> source .u .ref = txp -> gref ;
406411 cop -> source .domid = queue -> vif -> domid ;
@@ -413,14 +418,22 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
413418 cop -> dest .u .gmfn = virt_to_gfn (skb -> data + skb_headlen (skb )
414419 - data_len );
415420
421+ /* Don't cross local page boundary! */
422+ if (cop -> dest .offset + amount > XEN_PAGE_SIZE ) {
423+ amount = XEN_PAGE_SIZE - cop -> dest .offset ;
424+ XENVIF_TX_CB (skb )-> split_mask |= 1U << copy_count (skb );
425+ split = true;
426+ }
427+
416428 cop -> len = amount ;
417429 cop -> flags = GNTCOPY_source_gref ;
418430
419431 index = pending_index (queue -> pending_cons );
420432 pending_idx = queue -> pending_ring [index ];
421433 callback_param (queue , pending_idx ).ctx = NULL ;
422434 copy_pending_idx (skb , copy_count (skb )) = pending_idx ;
423- copy_count (skb )++ ;
435+ if (!split )
436+ copy_count (skb )++ ;
424437
425438 cop ++ ;
426439 data_len -= amount ;
@@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
441454 nr_slots -- ;
442455 } else {
443456 /* The copy op partially covered the tx_request.
444- * The remainder will be mapped.
457+ * The remainder will be mapped or copied in the next
458+ * iteration.
445459 */
446460 txp -> offset += amount ;
447461 txp -> size -= amount ;
@@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
539553 pending_idx = copy_pending_idx (skb , i );
540554
541555 newerr = (* gopp_copy )-> status ;
556+
557+ /* Split copies need to be handled together. */
558+ if (XENVIF_TX_CB (skb )-> split_mask & (1U << i )) {
559+ (* gopp_copy )++ ;
560+ if (!newerr )
561+ newerr = (* gopp_copy )-> status ;
562+ }
542563 if (likely (!newerr )) {
543564 /* The first frag might still have this slot mapped */
544565 if (i < copy_count (skb ) - 1 || !sharedslot )
@@ -1061,10 +1082,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
10611082 __skb_queue_tail (& queue -> tx_queue , skb );
10621083
10631084 queue -> tx .req_cons = idx ;
1064-
1065- if ((* map_ops >= ARRAY_SIZE (queue -> tx_map_ops )) ||
1066- (* copy_ops >= ARRAY_SIZE (queue -> tx_copy_ops )))
1067- break ;
10681085 }
10691086
10701087 return ;
0 commit comments