Merge branch 'tcp-gso-settings-defer'

Eric Dumazet says:

====================
tcp: defer shinfo->gso_size|type settings

We put shinfo->gso_segs in TCP_SKB_CB(skb) a while back for performance
reasons.

This was in commit cd7d8498c9 ("tcp: change tcp_skb_pcount() location")

This patch series complete the job for gso_size and gso_type, so that
we do not bring 2 extra cache lines in tcp write xmit fast path,
and making tcp_init_tso_segs() simpler and faster.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-06-11 16:33:11 -07:00
commit c63264def3
4 changed files with 34 additions and 46 deletions

View File

@ -730,11 +730,14 @@ struct tcp_skb_cb {
/* Note : tcp_tw_isn is used in input path only /* Note : tcp_tw_isn is used in input path only
* (isn chosen by tcp_timewait_state_process()) * (isn chosen by tcp_timewait_state_process())
* *
* tcp_gso_segs is used in write queue only, * tcp_gso_segs/size are used in write queue only,
* cf tcp_skb_pcount() * cf tcp_skb_pcount()/tcp_skb_mss()
*/ */
__u32 tcp_tw_isn; __u32 tcp_tw_isn;
__u32 tcp_gso_segs; struct {
u16 tcp_gso_segs;
u16 tcp_gso_size;
};
}; };
__u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
@ -790,10 +793,10 @@ static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
TCP_SKB_CB(skb)->tcp_gso_segs += segs; TCP_SKB_CB(skb)->tcp_gso_segs += segs;
} }
/* This is valid iff tcp_skb_pcount() > 1. */ /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
static inline int tcp_skb_mss(const struct sk_buff *skb) static inline int tcp_skb_mss(const struct sk_buff *skb)
{ {
return skb_shinfo(skb)->gso_size; return TCP_SKB_CB(skb)->tcp_gso_size;
} }
/* Events passed to congestion control interface */ /* Events passed to congestion control interface */

View File

@ -1316,16 +1316,12 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
* code can come after this skb later on it's better to keep * code can come after this skb later on it's better to keep
* setting gso_size to something. * setting gso_size to something.
*/ */
if (!skb_shinfo(prev)->gso_size) { if (!TCP_SKB_CB(prev)->tcp_gso_size)
skb_shinfo(prev)->gso_size = mss; TCP_SKB_CB(prev)->tcp_gso_size = mss;
skb_shinfo(prev)->gso_type = sk->sk_gso_type;
}
/* CHECKME: To clear or not to clear? Mimics normal skb currently */ /* CHECKME: To clear or not to clear? Mimics normal skb currently */
if (tcp_skb_pcount(skb) <= 1) { if (tcp_skb_pcount(skb) <= 1)
skb_shinfo(skb)->gso_size = 0; TCP_SKB_CB(skb)->tcp_gso_size = 0;
skb_shinfo(skb)->gso_type = 0;
}
/* Difference in this won't matter, both ACKed by the same cumul. ACK */ /* Difference in this won't matter, both ACKed by the same cumul. ACK */
TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
@ -2252,7 +2248,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
(oldcnt >= packets)) (oldcnt >= packets))
break; break;
mss = skb_shinfo(skb)->gso_size; mss = tcp_skb_mss(skb);
err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
mss, GFP_ATOMIC); mss, GFP_ATOMIC);
if (err < 0) if (err < 0)

View File

@ -77,7 +77,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
oldlen = (u16)~skb->len; oldlen = (u16)~skb->len;
__skb_pull(skb, thlen); __skb_pull(skb, thlen);
mss = tcp_skb_mss(skb); mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss)) if (unlikely(skb->len <= mss))
goto out; goto out;
@ -242,7 +242,7 @@ found:
flush |= *(u32 *)((u8 *)th + i) ^ flush |= *(u32 *)((u8 *)th + i) ^
*(u32 *)((u8 *)th2 + i); *(u32 *)((u8 *)th2 + i);
mss = tcp_skb_mss(p); mss = skb_shinfo(p)->gso_size;
flush |= (len - 1) >= mss; flush |= (len - 1) >= mss;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);

View File

@ -402,8 +402,6 @@ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
*/ */
static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
{ {
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb->ip_summed = CHECKSUM_PARTIAL; skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0; skb->csum = 0;
@ -411,8 +409,6 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->sacked = 0;
tcp_skb_pcount_set(skb, 1); tcp_skb_pcount_set(skb, 1);
shinfo->gso_size = 0;
shinfo->gso_type = 0;
TCP_SKB_CB(skb)->seq = seq; TCP_SKB_CB(skb)->seq = seq;
if (flags & (TCPHDR_SYN | TCPHDR_FIN)) if (flags & (TCPHDR_SYN | TCPHDR_FIN))
@ -1003,6 +999,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
} }
tcp_options_write((__be32 *)(th + 1), tp, &opts); tcp_options_write((__be32 *)(th + 1), tp, &opts);
skb_shinfo(skb)->gso_type = sk->sk_gso_type;
if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
tcp_ecn_send(sk, skb, tcp_header_size); tcp_ecn_send(sk, skb, tcp_header_size);
@ -1028,8 +1025,9 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
tcp_skb_pcount(skb)); tcp_skb_pcount(skb));
tp->segs_out += tcp_skb_pcount(skb); tp->segs_out += tcp_skb_pcount(skb);
/* OK, its time to fill skb_shinfo(skb)->gso_segs */ /* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb); skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
/* Our usage of tstamp should remain private */ /* Our usage of tstamp should remain private */
skb->tstamp.tv64 = 0; skb->tstamp.tv64 = 0;
@ -1066,25 +1064,17 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
} }
/* Initialize TSO segments for a packet. */ /* Initialize TSO segments for a packet. */
static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
unsigned int mss_now)
{ {
struct skb_shared_info *shinfo = skb_shinfo(skb);
/* Make sure we own this skb before messing gso_size/gso_segs */
WARN_ON_ONCE(skb_cloned(skb));
if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) { if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
/* Avoid the costly divide in the normal /* Avoid the costly divide in the normal
* non-TSO case. * non-TSO case.
*/ */
tcp_skb_pcount_set(skb, 1); tcp_skb_pcount_set(skb, 1);
shinfo->gso_size = 0; TCP_SKB_CB(skb)->tcp_gso_size = 0;
shinfo->gso_type = 0;
} else { } else {
tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now)); tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
shinfo->gso_size = mss_now; TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
shinfo->gso_type = sk->sk_gso_type;
} }
} }
@ -1216,8 +1206,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
old_factor = tcp_skb_pcount(skb); old_factor = tcp_skb_pcount(skb);
/* Fix up tso_factor for both original and new SKB. */ /* Fix up tso_factor for both original and new SKB. */
tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_set_skb_tso_segs(skb, mss_now);
tcp_set_skb_tso_segs(sk, buff, mss_now); tcp_set_skb_tso_segs(buff, mss_now);
/* If this packet has been sent out already, we must /* If this packet has been sent out already, we must
* adjust the various packet counters. * adjust the various packet counters.
@ -1297,7 +1287,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
/* Any change of skb->len requires recalculation of tso factor. */ /* Any change of skb->len requires recalculation of tso factor. */
if (tcp_skb_pcount(skb) > 1) if (tcp_skb_pcount(skb) > 1)
tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
return 0; return 0;
} }
@ -1629,13 +1619,12 @@ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
* This must be invoked the first time we consider transmitting * This must be invoked the first time we consider transmitting
* SKB onto the wire. * SKB onto the wire.
*/ */
static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
unsigned int mss_now)
{ {
int tso_segs = tcp_skb_pcount(skb); int tso_segs = tcp_skb_pcount(skb);
if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_set_skb_tso_segs(skb, mss_now);
tso_segs = tcp_skb_pcount(skb); tso_segs = tcp_skb_pcount(skb);
} }
return tso_segs; return tso_segs;
@ -1690,7 +1679,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
unsigned int cwnd_quota; unsigned int cwnd_quota;
tcp_init_tso_segs(sk, skb, cur_mss); tcp_init_tso_segs(skb, cur_mss);
if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
return 0; return 0;
@ -1759,8 +1748,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
tcp_fragment_tstamp(skb, buff); tcp_fragment_tstamp(skb, buff);
/* Fix up tso_factor for both original and new SKB. */ /* Fix up tso_factor for both original and new SKB. */
tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_set_skb_tso_segs(skb, mss_now);
tcp_set_skb_tso_segs(sk, buff, mss_now); tcp_set_skb_tso_segs(buff, mss_now);
/* Link BUFF into the send queue. */ /* Link BUFF into the send queue. */
__skb_header_release(buff); __skb_header_release(buff);
@ -1994,7 +1983,7 @@ static int tcp_mtu_probe(struct sock *sk)
skb->len, 0); skb->len, 0);
} else { } else {
__pskb_trim_head(skb, copy); __pskb_trim_head(skb, copy);
tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_set_skb_tso_segs(skb, mss_now);
} }
TCP_SKB_CB(skb)->seq += copy; TCP_SKB_CB(skb)->seq += copy;
} }
@ -2004,7 +1993,7 @@ static int tcp_mtu_probe(struct sock *sk)
if (len >= probe_size) if (len >= probe_size)
break; break;
} }
tcp_init_tso_segs(sk, nskb, nskb->len); tcp_init_tso_segs(nskb, nskb->len);
/* We're ready to send. If this fails, the probe will /* We're ready to send. If this fails, the probe will
* be resegmented into mss-sized pieces by tcp_write_xmit(). * be resegmented into mss-sized pieces by tcp_write_xmit().
@ -2066,7 +2055,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
while ((skb = tcp_send_head(sk))) { while ((skb = tcp_send_head(sk))) {
unsigned int limit; unsigned int limit;
tso_segs = tcp_init_tso_segs(sk, skb, mss_now); tso_segs = tcp_init_tso_segs(skb, mss_now);
BUG_ON(!tso_segs); BUG_ON(!tso_segs);
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
@ -2620,7 +2609,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (unlikely(oldpcount > 1)) { if (unlikely(oldpcount > 1)) {
if (skb_unclone(skb, GFP_ATOMIC)) if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM; return -ENOMEM;
tcp_init_tso_segs(sk, skb, cur_mss); tcp_init_tso_segs(skb, cur_mss);
tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
} }
} }
@ -3457,7 +3446,7 @@ int tcp_write_wakeup(struct sock *sk, int mib)
if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC)) if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC))
return -1; return -1;
} else if (!tcp_skb_pcount(skb)) } else if (!tcp_skb_pcount(skb))
tcp_set_skb_tso_segs(sk, skb, mss); tcp_set_skb_tso_segs(skb, mss);
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);