mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-29 13:00:35 +00:00
91a48a2e85
Currently the UFO fragmentation process does not correctly handle inner
UDP frames.
(The following tcpdumps are captured on the parent interface with ufo
disabled while tunnel has ufo enabled, 2000 bytes payload, mtu 1280,
both sit device):
IPv6:
16:39:10.031613 IP (tos 0x0, ttl 64, id 3208, offset 0, flags [DF], proto IPv6 (41), length 1300)
192.168.122.151 > 1.1.1.1: IP6 (hlim 64, next-header Fragment (44) payload length: 1240) 2001::1 > 2001::8: frag (0x00000001:0|1232) 44883 > distinct: UDP, length 2000
16:39:10.031709 IP (tos 0x0, ttl 64, id 3209, offset 0, flags [DF], proto IPv6 (41), length 844)
192.168.122.151 > 1.1.1.1: IP6 (hlim 64, next-header Fragment (44) payload length: 784) 2001::1 > 2001::8: frag (0x00000001:0|776) 58979 > 46366: UDP, length 5471
We can see that fragmentation header offset is not correctly updated.
(fragmentation id handling is corrected by 916e4cf46d
("ipv6: reuse
ip6_frag_id from ip6_ufo_append_data")).
IPv4:
16:39:57.737761 IP (tos 0x0, ttl 64, id 3209, offset 0, flags [DF], proto IPIP (4), length 1296)
192.168.122.151 > 1.1.1.1: IP (tos 0x0, ttl 64, id 57034, offset 0, flags [none], proto UDP (17), length 1276)
192.168.99.1.35961 > 192.168.99.2.distinct: UDP, length 2000
16:39:57.738028 IP (tos 0x0, ttl 64, id 3210, offset 0, flags [DF], proto IPIP (4), length 792)
192.168.122.151 > 1.1.1.1: IP (tos 0x0, ttl 64, id 57035, offset 0, flags [none], proto UDP (17), length 772)
192.168.99.1.13531 > 192.168.99.2.20653: UDP, length 51109
In this case fragmentation id is incremented and offset is not updated.
First, I aligned inet_gso_segment and ipv6_gso_segment:
* align naming of flags
* ipv6_gso_segment: setting skb->encapsulation is unnecessary, as we
always ensure that the state of this flag is left untouched when
returning from upper gso segmenation function
* ipv6_gso_segment: move skb_reset_inner_headers below updating the
fragmentation header data, we don't care for updating fragmentation
header data
* remove currently unneeded comment indicating skb->encapsulation might
get changed by upper gso_segment callback (gre and udp-tunnel reset
encapsulation after segmentation on each fragment)
If we encounter an IPIP or SIT gso skb we now check for the protocol ==
IPPROTO_UDP and that we at least have already traversed another ip(6)
protocol header.
The reason why we have to special case GSO_IPIP and GSO_SIT is that
we reset skb->encapsulation to 0 while skb_mac_gso_segment the inner
protocol of GSO_UDP_TUNNEL or GSO_GRE packets.
Reported-by: Wolfgang Walter <linux@stwm.de>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
340 lines
8.0 KiB
C
340 lines
8.0 KiB
C
/*
|
|
* IPV6 GSO/GRO offload support
|
|
* Linux INET6 implementation
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/socket.h>
|
|
#include <linux/netdevice.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/printk.h>
|
|
|
|
#include <net/protocol.h>
|
|
#include <net/ipv6.h>
|
|
|
|
#include "ip6_offload.h"
|
|
|
|
static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
|
|
{
|
|
const struct net_offload *ops = NULL;
|
|
|
|
for (;;) {
|
|
struct ipv6_opt_hdr *opth;
|
|
int len;
|
|
|
|
if (proto != NEXTHDR_HOP) {
|
|
ops = rcu_dereference(inet6_offloads[proto]);
|
|
|
|
if (unlikely(!ops))
|
|
break;
|
|
|
|
if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
|
|
break;
|
|
}
|
|
|
|
if (unlikely(!pskb_may_pull(skb, 8)))
|
|
break;
|
|
|
|
opth = (void *)skb->data;
|
|
len = ipv6_optlen(opth);
|
|
|
|
if (unlikely(!pskb_may_pull(skb, len)))
|
|
break;
|
|
|
|
proto = opth->nexthdr;
|
|
__skb_pull(skb, len);
|
|
}
|
|
|
|
return proto;
|
|
}
|
|
|
|
static int ipv6_gso_send_check(struct sk_buff *skb)
|
|
{
|
|
const struct ipv6hdr *ipv6h;
|
|
const struct net_offload *ops;
|
|
int err = -EINVAL;
|
|
|
|
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
|
|
goto out;
|
|
|
|
ipv6h = ipv6_hdr(skb);
|
|
__skb_pull(skb, sizeof(*ipv6h));
|
|
err = -EPROTONOSUPPORT;
|
|
|
|
ops = rcu_dereference(inet6_offloads[
|
|
ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
|
|
|
|
if (likely(ops && ops->callbacks.gso_send_check)) {
|
|
skb_reset_transport_header(skb);
|
|
err = ops->callbacks.gso_send_check(skb);
|
|
}
|
|
|
|
out:
|
|
return err;
|
|
}
|
|
|
|
static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
|
|
netdev_features_t features)
|
|
{
|
|
struct sk_buff *segs = ERR_PTR(-EINVAL);
|
|
struct ipv6hdr *ipv6h;
|
|
const struct net_offload *ops;
|
|
int proto;
|
|
struct frag_hdr *fptr;
|
|
unsigned int unfrag_ip6hlen;
|
|
u8 *prevhdr;
|
|
int offset = 0;
|
|
bool encap, udpfrag;
|
|
int nhoff;
|
|
|
|
if (unlikely(skb_shinfo(skb)->gso_type &
|
|
~(SKB_GSO_UDP |
|
|
SKB_GSO_DODGY |
|
|
SKB_GSO_TCP_ECN |
|
|
SKB_GSO_GRE |
|
|
SKB_GSO_IPIP |
|
|
SKB_GSO_SIT |
|
|
SKB_GSO_UDP_TUNNEL |
|
|
SKB_GSO_MPLS |
|
|
SKB_GSO_TCPV6 |
|
|
0)))
|
|
goto out;
|
|
|
|
skb_reset_network_header(skb);
|
|
nhoff = skb_network_header(skb) - skb_mac_header(skb);
|
|
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
|
|
goto out;
|
|
|
|
encap = SKB_GSO_CB(skb)->encap_level > 0;
|
|
if (encap)
|
|
features = skb->dev->hw_enc_features & netif_skb_features(skb);
|
|
SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
|
|
|
|
ipv6h = ipv6_hdr(skb);
|
|
__skb_pull(skb, sizeof(*ipv6h));
|
|
segs = ERR_PTR(-EPROTONOSUPPORT);
|
|
|
|
proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
|
|
|
|
if (skb->encapsulation &&
|
|
skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
|
|
udpfrag = proto == IPPROTO_UDP && encap;
|
|
else
|
|
udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
|
|
|
|
ops = rcu_dereference(inet6_offloads[proto]);
|
|
if (likely(ops && ops->callbacks.gso_segment)) {
|
|
skb_reset_transport_header(skb);
|
|
segs = ops->callbacks.gso_segment(skb, features);
|
|
}
|
|
|
|
if (IS_ERR(segs))
|
|
goto out;
|
|
|
|
for (skb = segs; skb; skb = skb->next) {
|
|
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
|
|
ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
|
|
skb->network_header = (u8 *)ipv6h - skb->head;
|
|
|
|
if (udpfrag) {
|
|
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
|
|
fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
|
|
fptr->frag_off = htons(offset);
|
|
if (skb->next != NULL)
|
|
fptr->frag_off |= htons(IP6_MF);
|
|
offset += (ntohs(ipv6h->payload_len) -
|
|
sizeof(struct frag_hdr));
|
|
}
|
|
if (encap)
|
|
skb_reset_inner_headers(skb);
|
|
}
|
|
|
|
out:
|
|
return segs;
|
|
}
|
|
|
|
/* Return the total length of all the extension hdrs, following the same
|
|
* logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
|
|
*/
|
|
static int ipv6_exthdrs_len(struct ipv6hdr *iph,
|
|
const struct net_offload **opps)
|
|
{
|
|
struct ipv6_opt_hdr *opth = (void *)iph;
|
|
int len = 0, proto, optlen = sizeof(*iph);
|
|
|
|
proto = iph->nexthdr;
|
|
for (;;) {
|
|
if (proto != NEXTHDR_HOP) {
|
|
*opps = rcu_dereference(inet6_offloads[proto]);
|
|
if (unlikely(!(*opps)))
|
|
break;
|
|
if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
|
|
break;
|
|
}
|
|
opth = (void *)opth + optlen;
|
|
optlen = ipv6_optlen(opth);
|
|
len += optlen;
|
|
proto = opth->nexthdr;
|
|
}
|
|
return len;
|
|
}
|
|
|
|
static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
|
|
struct sk_buff *skb)
|
|
{
|
|
const struct net_offload *ops;
|
|
struct sk_buff **pp = NULL;
|
|
struct sk_buff *p;
|
|
struct ipv6hdr *iph;
|
|
unsigned int nlen;
|
|
unsigned int hlen;
|
|
unsigned int off;
|
|
u16 flush = 1;
|
|
int proto;
|
|
__wsum csum;
|
|
|
|
off = skb_gro_offset(skb);
|
|
hlen = off + sizeof(*iph);
|
|
iph = skb_gro_header_fast(skb, off);
|
|
if (skb_gro_header_hard(skb, hlen)) {
|
|
iph = skb_gro_header_slow(skb, hlen, off);
|
|
if (unlikely(!iph))
|
|
goto out;
|
|
}
|
|
|
|
skb_set_network_header(skb, off);
|
|
skb_gro_pull(skb, sizeof(*iph));
|
|
skb_set_transport_header(skb, skb_gro_offset(skb));
|
|
|
|
flush += ntohs(iph->payload_len) != skb_gro_len(skb);
|
|
|
|
rcu_read_lock();
|
|
proto = iph->nexthdr;
|
|
ops = rcu_dereference(inet6_offloads[proto]);
|
|
if (!ops || !ops->callbacks.gro_receive) {
|
|
__pskb_pull(skb, skb_gro_offset(skb));
|
|
proto = ipv6_gso_pull_exthdrs(skb, proto);
|
|
skb_gro_pull(skb, -skb_transport_offset(skb));
|
|
skb_reset_transport_header(skb);
|
|
__skb_push(skb, skb_gro_offset(skb));
|
|
|
|
ops = rcu_dereference(inet6_offloads[proto]);
|
|
if (!ops || !ops->callbacks.gro_receive)
|
|
goto out_unlock;
|
|
|
|
iph = ipv6_hdr(skb);
|
|
}
|
|
|
|
NAPI_GRO_CB(skb)->proto = proto;
|
|
|
|
flush--;
|
|
nlen = skb_network_header_len(skb);
|
|
|
|
for (p = *head; p; p = p->next) {
|
|
const struct ipv6hdr *iph2;
|
|
__be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
|
|
|
|
if (!NAPI_GRO_CB(p)->same_flow)
|
|
continue;
|
|
|
|
iph2 = (struct ipv6hdr *)(p->data + off);
|
|
first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
|
|
|
|
/* All fields must match except length and Traffic Class.
|
|
* XXX skbs on the gro_list have all been parsed and pulled
|
|
* already so we don't need to compare nlen
|
|
* (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
|
|
* memcmp() alone below is suffcient, right?
|
|
*/
|
|
if ((first_word & htonl(0xF00FFFFF)) ||
|
|
memcmp(&iph->nexthdr, &iph2->nexthdr,
|
|
nlen - offsetof(struct ipv6hdr, nexthdr))) {
|
|
NAPI_GRO_CB(p)->same_flow = 0;
|
|
continue;
|
|
}
|
|
/* flush if Traffic Class fields are different */
|
|
NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
|
|
NAPI_GRO_CB(p)->flush |= flush;
|
|
}
|
|
|
|
NAPI_GRO_CB(skb)->flush |= flush;
|
|
|
|
csum = skb->csum;
|
|
skb_postpull_rcsum(skb, iph, skb_network_header_len(skb));
|
|
|
|
pp = ops->callbacks.gro_receive(head, skb);
|
|
|
|
skb->csum = csum;
|
|
|
|
out_unlock:
|
|
rcu_read_unlock();
|
|
|
|
out:
|
|
NAPI_GRO_CB(skb)->flush |= flush;
|
|
|
|
return pp;
|
|
}
|
|
|
|
static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
|
|
{
|
|
const struct net_offload *ops;
|
|
struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
|
|
int err = -ENOSYS;
|
|
|
|
iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
|
|
|
|
rcu_read_lock();
|
|
|
|
nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
|
|
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
|
|
goto out_unlock;
|
|
|
|
err = ops->callbacks.gro_complete(skb, nhoff);
|
|
|
|
out_unlock:
|
|
rcu_read_unlock();
|
|
|
|
return err;
|
|
}
|
|
|
|
static struct packet_offload ipv6_packet_offload __read_mostly = {
|
|
.type = cpu_to_be16(ETH_P_IPV6),
|
|
.callbacks = {
|
|
.gso_send_check = ipv6_gso_send_check,
|
|
.gso_segment = ipv6_gso_segment,
|
|
.gro_receive = ipv6_gro_receive,
|
|
.gro_complete = ipv6_gro_complete,
|
|
},
|
|
};
|
|
|
|
static const struct net_offload sit_offload = {
|
|
.callbacks = {
|
|
.gso_send_check = ipv6_gso_send_check,
|
|
.gso_segment = ipv6_gso_segment,
|
|
},
|
|
};
|
|
|
|
static int __init ipv6_offload_init(void)
|
|
{
|
|
|
|
if (tcpv6_offload_init() < 0)
|
|
pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
|
|
if (udp_offload_init() < 0)
|
|
pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
|
|
if (ipv6_exthdrs_offload_init() < 0)
|
|
pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
|
|
|
|
dev_add_offload(&ipv6_packet_offload);
|
|
|
|
inet_add_offload(&sit_offload, IPPROTO_IPV6);
|
|
|
|
return 0;
|
|
}
|
|
|
|
fs_initcall(ipv6_offload_init);
|