mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-14 21:01:29 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (27 commits)
slcan: fix ldisc->open retval
net/usb: mark LG VL600 LTE modem ethernet interface as WWAN
xfrm: Don't allow esn with disabled anti replay detection
xfrm: Assign the inner mode output function to the dst entry
net: dev_close() should check IFF_UP
vlan: fix GVRP at dismantle time
netfilter: revert a2361c8735
netfilter: IPv6: fix DSCP mangle code
netfilter: IPv6: initialize TOS field in REJECT target module
IPVS: init and cleanup restructuring
IPVS: Change of socket usage to enable name space exit.
netfilter: ebtables: only call xt_compat_add_offset once per rule
netfilter: fix ebtables compat support
netfilter: ctnetlink: fix timestamp support for new conntracks
pch_gbe: support ML7223 IOH
PCH_GbE : Fixed the issue of checksum judgment
PCH_GbE : Fixed the issue of collision detection
NET: slip, fix ldisc->open retval
be2net: Fixed bugs related to PVID.
ehea: fix wrongly reported speed and port
...
This commit is contained in:
commit
9f381a61f5
@ -2536,7 +2536,7 @@ config S6GMAC
|
||||
source "drivers/net/stmmac/Kconfig"
|
||||
|
||||
config PCH_GBE
|
||||
tristate "PCH Gigabit Ethernet"
|
||||
tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
|
||||
depends on PCI
|
||||
select MII
|
||||
---help---
|
||||
@ -2548,6 +2548,12 @@ config PCH_GBE
|
||||
to Gigabit Ethernet.
|
||||
This driver enables Gigabit Ethernet function.
|
||||
|
||||
This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
|
||||
Output Hub), ML7223.
|
||||
ML7223 IOH is for MP(Media Phone) use.
|
||||
ML7223 is companion chip for Intel Atom E6xx series.
|
||||
ML7223 is completely compatible for Intel EG20T PCH.
|
||||
|
||||
endif # NETDEV_1000
|
||||
|
||||
#
|
||||
|
@ -213,7 +213,7 @@ struct be_rx_stats {
|
||||
|
||||
struct be_rx_compl_info {
|
||||
u32 rss_hash;
|
||||
u16 vid;
|
||||
u16 vlan_tag;
|
||||
u16 pkt_size;
|
||||
u16 rxq_idx;
|
||||
u16 mac_id;
|
||||
|
@ -132,7 +132,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
|
||||
struct be_async_event_grp5_pvid_state *evt)
|
||||
{
|
||||
if (evt->enabled)
|
||||
adapter->pvid = evt->tag;
|
||||
adapter->pvid = le16_to_cpu(evt->tag);
|
||||
else
|
||||
adapter->pvid = 0;
|
||||
}
|
||||
|
@ -1018,7 +1018,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
|
||||
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
|
||||
rxcp->vlan_tag);
|
||||
} else {
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
@ -1076,7 +1077,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
|
||||
if (likely(!rxcp->vlanf))
|
||||
napi_gro_frags(&eq_obj->napi);
|
||||
else
|
||||
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
|
||||
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
|
||||
rxcp->vlan_tag);
|
||||
}
|
||||
|
||||
static void be_parse_rx_compl_v1(struct be_adapter *adapter,
|
||||
@ -1102,7 +1104,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
|
||||
rxcp->pkt_type =
|
||||
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
|
||||
rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
|
||||
rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
|
||||
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
|
||||
compl);
|
||||
}
|
||||
|
||||
static void be_parse_rx_compl_v0(struct be_adapter *adapter,
|
||||
@ -1128,7 +1131,8 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter,
|
||||
rxcp->pkt_type =
|
||||
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
|
||||
rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
|
||||
rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
|
||||
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
|
||||
compl);
|
||||
}
|
||||
|
||||
static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
|
||||
@ -1155,9 +1159,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
|
||||
rxcp->vlanf = 0;
|
||||
|
||||
if (!lancer_chip(adapter))
|
||||
rxcp->vid = swab16(rxcp->vid);
|
||||
rxcp->vlan_tag = swab16(rxcp->vlan_tag);
|
||||
|
||||
if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
|
||||
if (((adapter->pvid & VLAN_VID_MASK) ==
|
||||
(rxcp->vlan_tag & VLAN_VID_MASK)) &&
|
||||
!adapter->vlan_tag[rxcp->vlan_tag])
|
||||
rxcp->vlanf = 0;
|
||||
|
||||
/* As the compl has been parsed, reset it; we wont touch it again */
|
||||
|
@ -346,10 +346,10 @@ static void sja1000_rx(struct net_device *dev)
|
||||
| (priv->read_reg(priv, REG_ID2) >> 5);
|
||||
}
|
||||
|
||||
cf->can_dlc = get_can_dlc(fi & 0x0F);
|
||||
if (fi & FI_RTR) {
|
||||
id |= CAN_RTR_FLAG;
|
||||
} else {
|
||||
cf->can_dlc = get_can_dlc(fi & 0x0F);
|
||||
for (i = 0; i < cf->can_dlc; i++)
|
||||
cf->data[i] = priv->read_reg(priv, dreg++);
|
||||
}
|
||||
|
@ -583,7 +583,9 @@ static int slcan_open(struct tty_struct *tty)
|
||||
/* Done. We have linked the TTY line to a channel. */
|
||||
rtnl_unlock();
|
||||
tty->receive_room = 65536; /* We don't flow control */
|
||||
return sl->dev->base_addr;
|
||||
|
||||
/* TTY layer expects 0 on success */
|
||||
return 0;
|
||||
|
||||
err_free_chan:
|
||||
sl->tty = NULL;
|
||||
|
@ -55,15 +55,20 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
cmd->duplex = -1;
|
||||
}
|
||||
|
||||
cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full
|
||||
| SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half
|
||||
| SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half
|
||||
| SUPPORTED_Autoneg | SUPPORTED_FIBRE);
|
||||
if (cmd->speed == SPEED_10000) {
|
||||
cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
|
||||
cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
|
||||
cmd->port = PORT_FIBRE;
|
||||
} else {
|
||||
cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
|
||||
| SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
|
||||
| SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
|
||||
| SUPPORTED_TP);
|
||||
cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
|
||||
| ADVERTISED_TP);
|
||||
cmd->port = PORT_TP;
|
||||
}
|
||||
|
||||
cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg
|
||||
| ADVERTISED_FIBRE);
|
||||
|
||||
cmd->port = PORT_FIBRE;
|
||||
cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
|
||||
|
||||
return 0;
|
||||
|
@ -34,6 +34,10 @@ const char pch_driver_version[] = DRV_VERSION;
|
||||
#define PCH_GBE_COPYBREAK_DEFAULT 256
|
||||
#define PCH_GBE_PCI_BAR 1
|
||||
|
||||
/* Macros for ML7223 */
|
||||
#define PCI_VENDOR_ID_ROHM 0x10db
|
||||
#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
|
||||
|
||||
#define PCH_GBE_TX_WEIGHT 64
|
||||
#define PCH_GBE_RX_WEIGHT 64
|
||||
#define PCH_GBE_RX_BUFFER_WRITE 16
|
||||
@ -43,8 +47,7 @@ const char pch_driver_version[] = DRV_VERSION;
|
||||
|
||||
#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
|
||||
PCH_GBE_CHIP_TYPE_INTERNAL | \
|
||||
PCH_GBE_RGMII_MODE_RGMII | \
|
||||
PCH_GBE_CRS_SEL \
|
||||
PCH_GBE_RGMII_MODE_RGMII \
|
||||
)
|
||||
|
||||
/* Ethertype field values */
|
||||
@ -1494,12 +1497,11 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
|
||||
/* Write meta date of skb */
|
||||
skb_put(skb, length);
|
||||
skb->protocol = eth_type_trans(skb, netdev);
|
||||
if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) ==
|
||||
PCH_GBE_RXD_ACC_STAT_TCPIPOK) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
} else {
|
||||
if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
}
|
||||
else
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
napi_gro_receive(&adapter->napi, skb);
|
||||
(*work_done)++;
|
||||
pr_debug("Receive skb->ip_summed: %d length: %d\n",
|
||||
@ -2420,6 +2422,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
|
||||
.class = (PCI_CLASS_NETWORK_ETHERNET << 8),
|
||||
.class_mask = (0xFFFF00)
|
||||
},
|
||||
{.vendor = PCI_VENDOR_ID_ROHM,
|
||||
.device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class = (PCI_CLASS_NETWORK_ETHERNET << 8),
|
||||
.class_mask = (0xFFFF00)
|
||||
},
|
||||
/* required last entry */
|
||||
{0}
|
||||
};
|
||||
|
@ -853,7 +853,9 @@ static int slip_open(struct tty_struct *tty)
|
||||
/* Done. We have linked the TTY line to a channel. */
|
||||
rtnl_unlock();
|
||||
tty->receive_room = 65536; /* We don't flow control */
|
||||
return sl->dev->base_addr;
|
||||
|
||||
/* TTY layer expects 0 on success */
|
||||
return 0;
|
||||
|
||||
err_free_bufs:
|
||||
sl_free_bufs(sl);
|
||||
|
@ -567,7 +567,7 @@ static const struct usb_device_id products [] = {
|
||||
{
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||
.driver_info = 0,
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
},
|
||||
|
||||
/*
|
||||
|
@ -65,6 +65,7 @@
|
||||
#define IPHETH_USBINTF_PROTO 1
|
||||
|
||||
#define IPHETH_BUF_SIZE 1516
|
||||
#define IPHETH_IP_ALIGN 2 /* padding at front of URB */
|
||||
#define IPHETH_TX_TIMEOUT (5 * HZ)
|
||||
|
||||
#define IPHETH_INTFNUM 2
|
||||
@ -202,18 +203,21 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
|
||||
return;
|
||||
}
|
||||
|
||||
len = urb->actual_length;
|
||||
buf = urb->transfer_buffer;
|
||||
if (urb->actual_length <= IPHETH_IP_ALIGN) {
|
||||
dev->net->stats.rx_length_errors++;
|
||||
return;
|
||||
}
|
||||
len = urb->actual_length - IPHETH_IP_ALIGN;
|
||||
buf = urb->transfer_buffer + IPHETH_IP_ALIGN;
|
||||
|
||||
skb = dev_alloc_skb(NET_IP_ALIGN + len);
|
||||
skb = dev_alloc_skb(len);
|
||||
if (!skb) {
|
||||
err("%s: dev_alloc_skb: -ENOMEM", __func__);
|
||||
dev->net->stats.rx_dropped++;
|
||||
return;
|
||||
}
|
||||
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN);
|
||||
memcpy(skb_put(skb, len), buf, len);
|
||||
skb->dev = dev->net;
|
||||
skb->protocol = eth_type_trans(skb, dev->net);
|
||||
|
||||
|
@ -645,6 +645,7 @@ int usbnet_stop (struct net_device *net)
|
||||
struct driver_info *info = dev->driver_info;
|
||||
int retval;
|
||||
|
||||
clear_bit(EVENT_DEV_OPEN, &dev->flags);
|
||||
netif_stop_queue (net);
|
||||
|
||||
netif_info(dev, ifdown, dev->net,
|
||||
@ -1524,9 +1525,12 @@ int usbnet_resume (struct usb_interface *intf)
|
||||
smp_mb();
|
||||
clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
|
||||
spin_unlock_irq(&dev->txq.lock);
|
||||
if (!(dev->txq.qlen >= TX_QLEN(dev)))
|
||||
netif_start_queue(dev->net);
|
||||
tasklet_schedule (&dev->bh);
|
||||
|
||||
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
|
||||
if (!(dev->txq.qlen >= TX_QLEN(dev)))
|
||||
netif_start_queue(dev->net);
|
||||
tasklet_schedule (&dev->bh);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -178,6 +178,7 @@ static void
|
||||
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
u32 events = le32_to_cpu(adapter->shared->ecr);
|
||||
if (!events)
|
||||
return;
|
||||
@ -190,10 +191,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
|
||||
|
||||
/* Check if there is an error on xmit/recv queues */
|
||||
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
|
||||
spin_lock(&adapter->cmd_lock);
|
||||
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
||||
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
||||
VMXNET3_CMD_GET_QUEUE_STATUS);
|
||||
spin_unlock(&adapter->cmd_lock);
|
||||
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
||||
|
||||
for (i = 0; i < adapter->num_tx_queues; i++)
|
||||
if (adapter->tqd_start[i].status.stopped)
|
||||
@ -2733,13 +2734,14 @@ static void
|
||||
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
|
||||
{
|
||||
u32 cfg;
|
||||
unsigned long flags;
|
||||
|
||||
/* intr settings */
|
||||
spin_lock(&adapter->cmd_lock);
|
||||
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
||||
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
||||
VMXNET3_CMD_GET_CONF_INTR);
|
||||
cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
|
||||
spin_unlock(&adapter->cmd_lock);
|
||||
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
||||
adapter->intr.type = cfg & 0x3;
|
||||
adapter->intr.mask_mode = (cfg >> 2) & 0x3;
|
||||
|
||||
|
@ -791,6 +791,7 @@ struct ip_vs_app {
|
||||
/* IPVS in network namespace */
|
||||
struct netns_ipvs {
|
||||
int gen; /* Generation */
|
||||
int enable; /* enable like nf_hooks do */
|
||||
/*
|
||||
* Hash table: for real service lookups
|
||||
*/
|
||||
@ -1089,6 +1090,22 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
|
||||
atomic_inc(&ctl_cp->n_control);
|
||||
}
|
||||
|
||||
/*
|
||||
* IPVS netns init & cleanup functions
|
||||
*/
|
||||
extern int __ip_vs_estimator_init(struct net *net);
|
||||
extern int __ip_vs_control_init(struct net *net);
|
||||
extern int __ip_vs_protocol_init(struct net *net);
|
||||
extern int __ip_vs_app_init(struct net *net);
|
||||
extern int __ip_vs_conn_init(struct net *net);
|
||||
extern int __ip_vs_sync_init(struct net *net);
|
||||
extern void __ip_vs_conn_cleanup(struct net *net);
|
||||
extern void __ip_vs_app_cleanup(struct net *net);
|
||||
extern void __ip_vs_protocol_cleanup(struct net *net);
|
||||
extern void __ip_vs_control_cleanup(struct net *net);
|
||||
extern void __ip_vs_estimator_cleanup(struct net *net);
|
||||
extern void __ip_vs_sync_cleanup(struct net *net);
|
||||
extern void __ip_vs_service_cleanup(struct net *net);
|
||||
|
||||
/*
|
||||
* IPVS application functions
|
||||
|
@ -324,6 +324,7 @@ struct xfrm_state_afinfo {
|
||||
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
|
||||
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
|
||||
int (*output)(struct sk_buff *skb);
|
||||
int (*output_finish)(struct sk_buff *skb);
|
||||
int (*extract_input)(struct xfrm_state *x,
|
||||
struct sk_buff *skb);
|
||||
int (*extract_output)(struct xfrm_state *x,
|
||||
@ -1454,6 +1455,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
|
||||
extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
|
||||
extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
|
||||
extern int xfrm4_output(struct sk_buff *skb);
|
||||
extern int xfrm4_output_finish(struct sk_buff *skb);
|
||||
extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
|
||||
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
|
||||
extern int xfrm6_extract_header(struct sk_buff *skb);
|
||||
@ -1470,6 +1472,7 @@ extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr);
|
||||
extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
|
||||
extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
|
||||
extern int xfrm6_output(struct sk_buff *skb);
|
||||
extern int xfrm6_output_finish(struct sk_buff *skb);
|
||||
extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
|
||||
u8 **prevhdr);
|
||||
|
||||
|
@ -124,6 +124,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
|
||||
|
||||
grp->nr_vlans--;
|
||||
|
||||
if (vlan->flags & VLAN_FLAG_GVRP)
|
||||
vlan_gvrp_request_leave(dev);
|
||||
|
||||
vlan_group_set_device(grp, vlan_id, NULL);
|
||||
if (!grp->killall)
|
||||
synchronize_net();
|
||||
|
@ -487,9 +487,6 @@ static int vlan_dev_stop(struct net_device *dev)
|
||||
struct vlan_dev_info *vlan = vlan_dev_info(dev);
|
||||
struct net_device *real_dev = vlan->real_dev;
|
||||
|
||||
if (vlan->flags & VLAN_FLAG_GVRP)
|
||||
vlan_gvrp_request_leave(dev);
|
||||
|
||||
dev_mc_unsync(real_dev, dev);
|
||||
dev_uc_unsync(real_dev, dev);
|
||||
if (dev->flags & IFF_ALLMULTI)
|
||||
|
@ -1766,7 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info,
|
||||
|
||||
newinfo->entries_size = size;
|
||||
|
||||
xt_compat_init_offsets(AF_INET, info->nentries);
|
||||
xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
|
||||
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
|
||||
entries, newinfo);
|
||||
}
|
||||
@ -1882,7 +1882,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
|
||||
struct xt_match *match;
|
||||
struct xt_target *wt;
|
||||
void *dst = NULL;
|
||||
int off, pad = 0, ret = 0;
|
||||
int off, pad = 0;
|
||||
unsigned int size_kern, entry_offset, match_size = mwt->match_size;
|
||||
|
||||
strlcpy(name, mwt->u.name, sizeof(name));
|
||||
@ -1935,13 +1935,6 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
|
||||
break;
|
||||
}
|
||||
|
||||
if (!dst) {
|
||||
ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
|
||||
off + ebt_compat_entry_padsize());
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
state->buf_kern_offset += match_size + off;
|
||||
state->buf_user_offset += match_size;
|
||||
pad = XT_ALIGN(size_kern) - size_kern;
|
||||
@ -2016,50 +2009,6 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
|
||||
return growth;
|
||||
}
|
||||
|
||||
#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
|
||||
({ \
|
||||
unsigned int __i; \
|
||||
int __ret = 0; \
|
||||
struct compat_ebt_entry_mwt *__watcher; \
|
||||
\
|
||||
for (__i = e->watchers_offset; \
|
||||
__i < (e)->target_offset; \
|
||||
__i += __watcher->watcher_size + \
|
||||
sizeof(struct compat_ebt_entry_mwt)) { \
|
||||
__watcher = (void *)(e) + __i; \
|
||||
__ret = fn(__watcher , ## args); \
|
||||
if (__ret != 0) \
|
||||
break; \
|
||||
} \
|
||||
if (__ret == 0) { \
|
||||
if (__i != (e)->target_offset) \
|
||||
__ret = -EINVAL; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
|
||||
({ \
|
||||
unsigned int __i; \
|
||||
int __ret = 0; \
|
||||
struct compat_ebt_entry_mwt *__match; \
|
||||
\
|
||||
for (__i = sizeof(struct ebt_entry); \
|
||||
__i < (e)->watchers_offset; \
|
||||
__i += __match->match_size + \
|
||||
sizeof(struct compat_ebt_entry_mwt)) { \
|
||||
__match = (void *)(e) + __i; \
|
||||
__ret = fn(__match , ## args); \
|
||||
if (__ret != 0) \
|
||||
break; \
|
||||
} \
|
||||
if (__ret == 0) { \
|
||||
if (__i != (e)->watchers_offset) \
|
||||
__ret = -EINVAL; \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
/* called for all ebt_entry structures. */
|
||||
static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
|
||||
unsigned int *total,
|
||||
@ -2132,6 +2081,14 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
|
||||
}
|
||||
}
|
||||
|
||||
if (state->buf_kern_start == NULL) {
|
||||
unsigned int offset = buf_start - (char *) base;
|
||||
|
||||
ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
startoff = state->buf_user_offset - startoff;
|
||||
|
||||
BUG_ON(*total < startoff);
|
||||
@ -2240,6 +2197,7 @@ static int compat_do_replace(struct net *net, void __user *user,
|
||||
|
||||
xt_compat_lock(NFPROTO_BRIDGE);
|
||||
|
||||
xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
|
||||
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
|
@ -1284,11 +1284,13 @@ static int dev_close_many(struct list_head *head)
|
||||
*/
|
||||
int dev_close(struct net_device *dev)
|
||||
{
|
||||
LIST_HEAD(single);
|
||||
if (dev->flags & IFF_UP) {
|
||||
LIST_HEAD(single);
|
||||
|
||||
list_add(&dev->unreg_list, &single);
|
||||
dev_close_many(&single);
|
||||
list_del(&single);
|
||||
list_add(&dev->unreg_list, &single);
|
||||
dev_close_many(&single);
|
||||
list_del(&single);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_close);
|
||||
|
@ -123,6 +123,8 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
|
||||
case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R:
|
||||
if (pkt_type == DCCP_PKT_DATA) /* RFC 4340, 6 */
|
||||
break;
|
||||
if (len == 0)
|
||||
goto out_invalid_option;
|
||||
rc = dccp_feat_parse_options(sk, dreq, mandatory, opt,
|
||||
*value, value + 1, len - 1);
|
||||
if (rc)
|
||||
|
@ -223,31 +223,30 @@ static void ip_expire(unsigned long arg)
|
||||
|
||||
if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
|
||||
struct sk_buff *head = qp->q.fragments;
|
||||
const struct iphdr *iph;
|
||||
int err;
|
||||
|
||||
rcu_read_lock();
|
||||
head->dev = dev_get_by_index_rcu(net, qp->iif);
|
||||
if (!head->dev)
|
||||
goto out_rcu_unlock;
|
||||
|
||||
/* skb dst is stale, drop it, and perform route lookup again */
|
||||
skb_dst_drop(head);
|
||||
iph = ip_hdr(head);
|
||||
err = ip_route_input_noref(head, iph->daddr, iph->saddr,
|
||||
iph->tos, head->dev);
|
||||
if (err)
|
||||
goto out_rcu_unlock;
|
||||
|
||||
/*
|
||||
* Only search router table for the head fragment,
|
||||
* when defraging timeout at PRE_ROUTING HOOK.
|
||||
* Only an end host needs to send an ICMP
|
||||
* "Fragment Reassembly Timeout" message, per RFC792.
|
||||
*/
|
||||
if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
|
||||
const struct iphdr *iph = ip_hdr(head);
|
||||
int err = ip_route_input(head, iph->daddr, iph->saddr,
|
||||
iph->tos, head->dev);
|
||||
if (unlikely(err))
|
||||
goto out_rcu_unlock;
|
||||
if (qp->user == IP_DEFRAG_CONNTRACK_IN &&
|
||||
skb_rtable(head)->rt_type != RTN_LOCAL)
|
||||
goto out_rcu_unlock;
|
||||
|
||||
/*
|
||||
* Only an end host needs to send an ICMP
|
||||
* "Fragment Reassembly Timeout" message, per RFC792.
|
||||
*/
|
||||
if (skb_rtable(head)->rt_type != RTN_LOCAL)
|
||||
goto out_rcu_unlock;
|
||||
|
||||
}
|
||||
|
||||
/* Send an ICMP "Fragment Reassembly Timeout" message. */
|
||||
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
|
||||
|
@ -93,6 +93,7 @@ struct bictcp {
|
||||
u32 ack_cnt; /* number of acks */
|
||||
u32 tcp_cwnd; /* estimated tcp cwnd */
|
||||
#define ACK_RATIO_SHIFT 4
|
||||
#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
|
||||
u16 delayed_ack; /* estimate the ratio of Packets/ACKs << 4 */
|
||||
u8 sample_cnt; /* number of samples to decide curr_rtt */
|
||||
u8 found; /* the exit point is found? */
|
||||
@ -398,8 +399,12 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
|
||||
u32 delay;
|
||||
|
||||
if (icsk->icsk_ca_state == TCP_CA_Open) {
|
||||
cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
|
||||
ca->delayed_ack += cnt;
|
||||
u32 ratio = ca->delayed_ack;
|
||||
|
||||
ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
|
||||
ratio += cnt;
|
||||
|
||||
ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
|
||||
}
|
||||
|
||||
/* Some calls are for duplicates without timetamps */
|
||||
|
@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL(xfrm4_prepare_output);
|
||||
|
||||
static int xfrm4_output_finish(struct sk_buff *skb)
|
||||
int xfrm4_output_finish(struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NETFILTER
|
||||
if (!skb_dst(skb)->xfrm) {
|
||||
@ -86,7 +86,11 @@ static int xfrm4_output_finish(struct sk_buff *skb)
|
||||
|
||||
int xfrm4_output(struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct xfrm_state *x = dst->xfrm;
|
||||
|
||||
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
|
||||
NULL, skb_dst(skb)->dev, xfrm4_output_finish,
|
||||
NULL, dst->dev,
|
||||
x->outer_mode->afinfo->output_finish,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
}
|
||||
|
@ -78,6 +78,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
|
||||
.init_tempsel = __xfrm4_init_tempsel,
|
||||
.init_temprop = xfrm4_init_temprop,
|
||||
.output = xfrm4_output,
|
||||
.output_finish = xfrm4_output_finish,
|
||||
.extract_input = xfrm4_extract_input,
|
||||
.extract_output = xfrm4_extract_output,
|
||||
.transport_finish = xfrm4_transport_finish,
|
||||
|
@ -45,6 +45,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
|
||||
int tcphoff, needs_ack;
|
||||
const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
|
||||
struct ipv6hdr *ip6h;
|
||||
#define DEFAULT_TOS_VALUE 0x0U
|
||||
const __u8 tclass = DEFAULT_TOS_VALUE;
|
||||
struct dst_entry *dst = NULL;
|
||||
u8 proto;
|
||||
struct flowi6 fl6;
|
||||
@ -124,7 +126,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
|
||||
skb_put(nskb, sizeof(struct ipv6hdr));
|
||||
skb_reset_network_header(nskb);
|
||||
ip6h = ipv6_hdr(nskb);
|
||||
ip6h->version = 6;
|
||||
*(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20));
|
||||
ip6h->hop_limit = ip6_dst_hoplimit(dst);
|
||||
ip6h->nexthdr = IPPROTO_TCP;
|
||||
ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
|
||||
|
@ -79,7 +79,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL(xfrm6_prepare_output);
|
||||
|
||||
static int xfrm6_output_finish(struct sk_buff *skb)
|
||||
int xfrm6_output_finish(struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NETFILTER
|
||||
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
|
||||
@ -97,9 +97,9 @@ static int __xfrm6_output(struct sk_buff *skb)
|
||||
if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
|
||||
((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
|
||||
dst_allfrag(skb_dst(skb)))) {
|
||||
return ip6_fragment(skb, xfrm6_output_finish);
|
||||
return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
|
||||
}
|
||||
return xfrm6_output_finish(skb);
|
||||
return x->outer_mode->afinfo->output_finish(skb);
|
||||
}
|
||||
|
||||
int xfrm6_output(struct sk_buff *skb)
|
||||
|
@ -178,6 +178,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
|
||||
.tmpl_sort = __xfrm6_tmpl_sort,
|
||||
.state_sort = __xfrm6_state_sort,
|
||||
.output = xfrm6_output,
|
||||
.output_finish = xfrm6_output_finish,
|
||||
.extract_input = xfrm6_extract_input,
|
||||
.extract_output = xfrm6_extract_output,
|
||||
.transport_finish = xfrm6_transport_finish,
|
||||
|
@ -576,7 +576,7 @@ static const struct file_operations ip_vs_app_fops = {
|
||||
};
|
||||
#endif
|
||||
|
||||
static int __net_init __ip_vs_app_init(struct net *net)
|
||||
int __net_init __ip_vs_app_init(struct net *net)
|
||||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
|
||||
@ -585,26 +585,17 @@ static int __net_init __ip_vs_app_init(struct net *net)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit __ip_vs_app_cleanup(struct net *net)
|
||||
void __net_exit __ip_vs_app_cleanup(struct net *net)
|
||||
{
|
||||
proc_net_remove(net, "ip_vs_app");
|
||||
}
|
||||
|
||||
static struct pernet_operations ip_vs_app_ops = {
|
||||
.init = __ip_vs_app_init,
|
||||
.exit = __ip_vs_app_cleanup,
|
||||
};
|
||||
|
||||
int __init ip_vs_app_init(void)
|
||||
{
|
||||
int rv;
|
||||
|
||||
rv = register_pernet_subsys(&ip_vs_app_ops);
|
||||
return rv;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void ip_vs_app_cleanup(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip_vs_app_ops);
|
||||
}
|
||||
|
@ -1258,22 +1258,17 @@ int __net_init __ip_vs_conn_init(struct net *net)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit __ip_vs_conn_cleanup(struct net *net)
|
||||
void __net_exit __ip_vs_conn_cleanup(struct net *net)
|
||||
{
|
||||
/* flush all the connection entries first */
|
||||
ip_vs_conn_flush(net);
|
||||
proc_net_remove(net, "ip_vs_conn");
|
||||
proc_net_remove(net, "ip_vs_conn_sync");
|
||||
}
|
||||
static struct pernet_operations ipvs_conn_ops = {
|
||||
.init = __ip_vs_conn_init,
|
||||
.exit = __ip_vs_conn_cleanup,
|
||||
};
|
||||
|
||||
int __init ip_vs_conn_init(void)
|
||||
{
|
||||
int idx;
|
||||
int retc;
|
||||
|
||||
/* Compute size and mask */
|
||||
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
|
||||
@ -1309,17 +1304,14 @@ int __init ip_vs_conn_init(void)
|
||||
rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
|
||||
}
|
||||
|
||||
retc = register_pernet_subsys(&ipvs_conn_ops);
|
||||
|
||||
/* calculate the random value for connection hash */
|
||||
get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
|
||||
|
||||
return retc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ip_vs_conn_cleanup(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ipvs_conn_ops);
|
||||
/* Release the empty cache */
|
||||
kmem_cache_destroy(ip_vs_conn_cachep);
|
||||
vfree(ip_vs_conn_tab);
|
||||
|
@ -1113,6 +1113,9 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
|
||||
return NF_ACCEPT;
|
||||
|
||||
net = skb_net(skb);
|
||||
if (!net_ipvs(net)->enable)
|
||||
return NF_ACCEPT;
|
||||
|
||||
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
|
||||
#ifdef CONFIG_IP_VS_IPV6
|
||||
if (af == AF_INET6) {
|
||||
@ -1343,6 +1346,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
|
||||
return NF_ACCEPT; /* The packet looks wrong, ignore */
|
||||
|
||||
net = skb_net(skb);
|
||||
|
||||
pd = ip_vs_proto_data_get(net, cih->protocol);
|
||||
if (!pd)
|
||||
return NF_ACCEPT;
|
||||
@ -1529,6 +1533,11 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
|
||||
IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
/* ipvs enabled in this netns ? */
|
||||
net = skb_net(skb);
|
||||
if (!net_ipvs(net)->enable)
|
||||
return NF_ACCEPT;
|
||||
|
||||
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
|
||||
|
||||
/* Bad... Do not break raw sockets */
|
||||
@ -1562,7 +1571,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
|
||||
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
|
||||
}
|
||||
|
||||
net = skb_net(skb);
|
||||
/* Protocol supported? */
|
||||
pd = ip_vs_proto_data_get(net, iph.protocol);
|
||||
if (unlikely(!pd))
|
||||
@ -1588,7 +1596,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
|
||||
}
|
||||
|
||||
IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
|
||||
net = skb_net(skb);
|
||||
ipvs = net_ipvs(net);
|
||||
/* Check the server status */
|
||||
if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
|
||||
@ -1743,10 +1750,16 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
int r;
|
||||
struct net *net;
|
||||
|
||||
if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
|
||||
return NF_ACCEPT;
|
||||
|
||||
/* ipvs enabled in this netns ? */
|
||||
net = skb_net(skb);
|
||||
if (!net_ipvs(net)->enable)
|
||||
return NF_ACCEPT;
|
||||
|
||||
return ip_vs_in_icmp(skb, &r, hooknum);
|
||||
}
|
||||
|
||||
@ -1757,10 +1770,16 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
int r;
|
||||
struct net *net;
|
||||
|
||||
if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
|
||||
return NF_ACCEPT;
|
||||
|
||||
/* ipvs enabled in this netns ? */
|
||||
net = skb_net(skb);
|
||||
if (!net_ipvs(net)->enable)
|
||||
return NF_ACCEPT;
|
||||
|
||||
return ip_vs_in_icmp_v6(skb, &r, hooknum);
|
||||
}
|
||||
#endif
|
||||
@ -1884,19 +1903,70 @@ static int __net_init __ip_vs_init(struct net *net)
|
||||
pr_err("%s(): no memory.\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Hold the beast until a service is registerd */
|
||||
ipvs->enable = 0;
|
||||
ipvs->net = net;
|
||||
/* Counters used for creating unique names */
|
||||
ipvs->gen = atomic_read(&ipvs_netns_cnt);
|
||||
atomic_inc(&ipvs_netns_cnt);
|
||||
net->ipvs = ipvs;
|
||||
|
||||
if (__ip_vs_estimator_init(net) < 0)
|
||||
goto estimator_fail;
|
||||
|
||||
if (__ip_vs_control_init(net) < 0)
|
||||
goto control_fail;
|
||||
|
||||
if (__ip_vs_protocol_init(net) < 0)
|
||||
goto protocol_fail;
|
||||
|
||||
if (__ip_vs_app_init(net) < 0)
|
||||
goto app_fail;
|
||||
|
||||
if (__ip_vs_conn_init(net) < 0)
|
||||
goto conn_fail;
|
||||
|
||||
if (__ip_vs_sync_init(net) < 0)
|
||||
goto sync_fail;
|
||||
|
||||
printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
|
||||
sizeof(struct netns_ipvs), ipvs->gen);
|
||||
return 0;
|
||||
/*
|
||||
* Error handling
|
||||
*/
|
||||
|
||||
sync_fail:
|
||||
__ip_vs_conn_cleanup(net);
|
||||
conn_fail:
|
||||
__ip_vs_app_cleanup(net);
|
||||
app_fail:
|
||||
__ip_vs_protocol_cleanup(net);
|
||||
protocol_fail:
|
||||
__ip_vs_control_cleanup(net);
|
||||
control_fail:
|
||||
__ip_vs_estimator_cleanup(net);
|
||||
estimator_fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __net_exit __ip_vs_cleanup(struct net *net)
|
||||
{
|
||||
IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen);
|
||||
__ip_vs_service_cleanup(net); /* ip_vs_flush() with locks */
|
||||
__ip_vs_conn_cleanup(net);
|
||||
__ip_vs_app_cleanup(net);
|
||||
__ip_vs_protocol_cleanup(net);
|
||||
__ip_vs_control_cleanup(net);
|
||||
__ip_vs_estimator_cleanup(net);
|
||||
IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
|
||||
}
|
||||
|
||||
static void __net_exit __ip_vs_dev_cleanup(struct net *net)
|
||||
{
|
||||
EnterFunction(2);
|
||||
net_ipvs(net)->enable = 0; /* Disable packet reception */
|
||||
__ip_vs_sync_cleanup(net);
|
||||
LeaveFunction(2);
|
||||
}
|
||||
|
||||
static struct pernet_operations ipvs_core_ops = {
|
||||
@ -1906,6 +1976,10 @@ static struct pernet_operations ipvs_core_ops = {
|
||||
.size = sizeof(struct netns_ipvs),
|
||||
};
|
||||
|
||||
static struct pernet_operations ipvs_core_dev_ops = {
|
||||
.exit = __ip_vs_dev_cleanup,
|
||||
};
|
||||
|
||||
/*
|
||||
* Initialize IP Virtual Server
|
||||
*/
|
||||
@ -1913,10 +1987,6 @@ static int __init ip_vs_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ip_vs_estimator_init();
|
||||
ret = ip_vs_control_init();
|
||||
if (ret < 0) {
|
||||
@ -1944,15 +2014,28 @@ static int __init ip_vs_init(void)
|
||||
goto cleanup_conn;
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
|
||||
if (ret < 0)
|
||||
goto cleanup_sync;
|
||||
|
||||
ret = register_pernet_device(&ipvs_core_dev_ops);
|
||||
if (ret < 0)
|
||||
goto cleanup_sub;
|
||||
|
||||
ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
|
||||
if (ret < 0) {
|
||||
pr_err("can't register hooks.\n");
|
||||
goto cleanup_sync;
|
||||
goto cleanup_dev;
|
||||
}
|
||||
|
||||
pr_info("ipvs loaded.\n");
|
||||
|
||||
return ret;
|
||||
|
||||
cleanup_dev:
|
||||
unregister_pernet_device(&ipvs_core_dev_ops);
|
||||
cleanup_sub:
|
||||
unregister_pernet_subsys(&ipvs_core_ops);
|
||||
cleanup_sync:
|
||||
ip_vs_sync_cleanup();
|
||||
cleanup_conn:
|
||||
@ -1964,20 +2047,20 @@ cleanup_sync:
|
||||
ip_vs_control_cleanup();
|
||||
cleanup_estimator:
|
||||
ip_vs_estimator_cleanup();
|
||||
unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip_vs_cleanup(void)
|
||||
{
|
||||
nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
|
||||
unregister_pernet_device(&ipvs_core_dev_ops);
|
||||
unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
|
||||
ip_vs_sync_cleanup();
|
||||
ip_vs_conn_cleanup();
|
||||
ip_vs_app_cleanup();
|
||||
ip_vs_protocol_cleanup();
|
||||
ip_vs_control_cleanup();
|
||||
ip_vs_estimator_cleanup();
|
||||
unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
|
||||
pr_info("ipvs unloaded.\n");
|
||||
}
|
||||
|
||||
|
@ -69,6 +69,11 @@ int ip_vs_get_debug_level(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* Protos */
|
||||
static void __ip_vs_del_service(struct ip_vs_service *svc);
|
||||
|
||||
|
||||
#ifdef CONFIG_IP_VS_IPV6
|
||||
/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
|
||||
static int __ip_vs_addr_is_local_v6(struct net *net,
|
||||
@ -1214,6 +1219,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
|
||||
write_unlock_bh(&__ip_vs_svc_lock);
|
||||
|
||||
*svc_p = svc;
|
||||
/* Now there is a service - full throttle */
|
||||
ipvs->enable = 1;
|
||||
return 0;
|
||||
|
||||
|
||||
@ -1472,6 +1479,84 @@ static int ip_vs_flush(struct net *net)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete service by {netns} in the service table.
|
||||
* Called by __ip_vs_cleanup()
|
||||
*/
|
||||
void __ip_vs_service_cleanup(struct net *net)
|
||||
{
|
||||
EnterFunction(2);
|
||||
/* Check for "full" addressed entries */
|
||||
mutex_lock(&__ip_vs_mutex);
|
||||
ip_vs_flush(net);
|
||||
mutex_unlock(&__ip_vs_mutex);
|
||||
LeaveFunction(2);
|
||||
}
|
||||
/*
|
||||
* Release dst hold by dst_cache
|
||||
*/
|
||||
static inline void
|
||||
__ip_vs_dev_reset(struct ip_vs_dest *dest, struct net_device *dev)
|
||||
{
|
||||
spin_lock_bh(&dest->dst_lock);
|
||||
if (dest->dst_cache && dest->dst_cache->dev == dev) {
|
||||
IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n",
|
||||
dev->name,
|
||||
IP_VS_DBG_ADDR(dest->af, &dest->addr),
|
||||
ntohs(dest->port),
|
||||
atomic_read(&dest->refcnt));
|
||||
ip_vs_dst_reset(dest);
|
||||
}
|
||||
spin_unlock_bh(&dest->dst_lock);
|
||||
|
||||
}
|
||||
/*
|
||||
* Netdev event receiver
|
||||
* Currently only NETDEV_UNREGISTER is handled, i.e. if we hold a reference to
|
||||
* a device that is "unregister" it must be released.
|
||||
*/
|
||||
static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
|
||||
void *ptr)
|
||||
{
|
||||
struct net_device *dev = ptr;
|
||||
struct net *net = dev_net(dev);
|
||||
struct ip_vs_service *svc;
|
||||
struct ip_vs_dest *dest;
|
||||
unsigned int idx;
|
||||
|
||||
if (event != NETDEV_UNREGISTER)
|
||||
return NOTIFY_DONE;
|
||||
IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
|
||||
EnterFunction(2);
|
||||
mutex_lock(&__ip_vs_mutex);
|
||||
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
|
||||
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
|
||||
if (net_eq(svc->net, net)) {
|
||||
list_for_each_entry(dest, &svc->destinations,
|
||||
n_list) {
|
||||
__ip_vs_dev_reset(dest, dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
|
||||
if (net_eq(svc->net, net)) {
|
||||
list_for_each_entry(dest, &svc->destinations,
|
||||
n_list) {
|
||||
__ip_vs_dev_reset(dest, dev);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
|
||||
__ip_vs_dev_reset(dest, dev);
|
||||
}
|
||||
mutex_unlock(&__ip_vs_mutex);
|
||||
LeaveFunction(2);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Zero counters in a service or all services
|
||||
@ -3588,6 +3673,10 @@ void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { }
|
||||
|
||||
#endif
|
||||
|
||||
static struct notifier_block ip_vs_dst_notifier = {
|
||||
.notifier_call = ip_vs_dst_event,
|
||||
};
|
||||
|
||||
int __net_init __ip_vs_control_init(struct net *net)
|
||||
{
|
||||
int idx;
|
||||
@ -3626,7 +3715,7 @@ err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __net_exit __ip_vs_control_cleanup(struct net *net)
|
||||
void __net_exit __ip_vs_control_cleanup(struct net *net)
|
||||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
|
||||
@ -3639,11 +3728,6 @@ static void __net_exit __ip_vs_control_cleanup(struct net *net)
|
||||
free_percpu(ipvs->tot_stats.cpustats);
|
||||
}
|
||||
|
||||
static struct pernet_operations ipvs_control_ops = {
|
||||
.init = __ip_vs_control_init,
|
||||
.exit = __ip_vs_control_cleanup,
|
||||
};
|
||||
|
||||
int __init ip_vs_control_init(void)
|
||||
{
|
||||
int idx;
|
||||
@ -3657,33 +3741,32 @@ int __init ip_vs_control_init(void)
|
||||
INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
|
||||
}
|
||||
|
||||
ret = register_pernet_subsys(&ipvs_control_ops);
|
||||
if (ret) {
|
||||
pr_err("cannot register namespace.\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
smp_wmb(); /* Do we really need it now ? */
|
||||
|
||||
ret = nf_register_sockopt(&ip_vs_sockopts);
|
||||
if (ret) {
|
||||
pr_err("cannot register sockopt.\n");
|
||||
goto err_net;
|
||||
goto err_sock;
|
||||
}
|
||||
|
||||
ret = ip_vs_genl_register();
|
||||
if (ret) {
|
||||
pr_err("cannot register Generic Netlink interface.\n");
|
||||
nf_unregister_sockopt(&ip_vs_sockopts);
|
||||
goto err_net;
|
||||
goto err_genl;
|
||||
}
|
||||
|
||||
ret = register_netdevice_notifier(&ip_vs_dst_notifier);
|
||||
if (ret < 0)
|
||||
goto err_notf;
|
||||
|
||||
LeaveFunction(2);
|
||||
return 0;
|
||||
|
||||
err_net:
|
||||
unregister_pernet_subsys(&ipvs_control_ops);
|
||||
err:
|
||||
err_notf:
|
||||
ip_vs_genl_unregister();
|
||||
err_genl:
|
||||
nf_unregister_sockopt(&ip_vs_sockopts);
|
||||
err_sock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3691,7 +3774,6 @@ err:
|
||||
void ip_vs_control_cleanup(void)
|
||||
{
|
||||
EnterFunction(2);
|
||||
unregister_pernet_subsys(&ipvs_control_ops);
|
||||
ip_vs_genl_unregister();
|
||||
nf_unregister_sockopt(&ip_vs_sockopts);
|
||||
LeaveFunction(2);
|
||||
|
@ -192,7 +192,7 @@ void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
|
||||
dst->outbps = (e->outbps + 0xF) >> 5;
|
||||
}
|
||||
|
||||
static int __net_init __ip_vs_estimator_init(struct net *net)
|
||||
int __net_init __ip_vs_estimator_init(struct net *net)
|
||||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
|
||||
@ -203,24 +203,16 @@ static int __net_init __ip_vs_estimator_init(struct net *net)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit __ip_vs_estimator_exit(struct net *net)
|
||||
void __net_exit __ip_vs_estimator_cleanup(struct net *net)
|
||||
{
|
||||
del_timer_sync(&net_ipvs(net)->est_timer);
|
||||
}
|
||||
static struct pernet_operations ip_vs_app_ops = {
|
||||
.init = __ip_vs_estimator_init,
|
||||
.exit = __ip_vs_estimator_exit,
|
||||
};
|
||||
|
||||
int __init ip_vs_estimator_init(void)
|
||||
{
|
||||
int rv;
|
||||
|
||||
rv = register_pernet_subsys(&ip_vs_app_ops);
|
||||
return rv;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ip_vs_estimator_cleanup(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip_vs_app_ops);
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
|
||||
/*
|
||||
* per network name-space init
|
||||
*/
|
||||
static int __net_init __ip_vs_protocol_init(struct net *net)
|
||||
int __net_init __ip_vs_protocol_init(struct net *net)
|
||||
{
|
||||
#ifdef CONFIG_IP_VS_PROTO_TCP
|
||||
register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
|
||||
@ -336,7 +336,7 @@ static int __net_init __ip_vs_protocol_init(struct net *net)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
|
||||
void __net_exit __ip_vs_protocol_cleanup(struct net *net)
|
||||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
struct ip_vs_proto_data *pd;
|
||||
@ -349,11 +349,6 @@ static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
|
||||
}
|
||||
}
|
||||
|
||||
static struct pernet_operations ipvs_proto_ops = {
|
||||
.init = __ip_vs_protocol_init,
|
||||
.exit = __ip_vs_protocol_cleanup,
|
||||
};
|
||||
|
||||
int __init ip_vs_protocol_init(void)
|
||||
{
|
||||
char protocols[64];
|
||||
@ -382,7 +377,6 @@ int __init ip_vs_protocol_init(void)
|
||||
REGISTER_PROTOCOL(&ip_vs_protocol_esp);
|
||||
#endif
|
||||
pr_info("Registered protocols (%s)\n", &protocols[2]);
|
||||
return register_pernet_subsys(&ipvs_proto_ops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -393,7 +387,6 @@ void ip_vs_protocol_cleanup(void)
|
||||
struct ip_vs_protocol *pp;
|
||||
int i;
|
||||
|
||||
unregister_pernet_subsys(&ipvs_proto_ops);
|
||||
/* unregister all the ipvs protocols */
|
||||
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
|
||||
while ((pp = ip_vs_proto_table[i]) != NULL)
|
||||
|
@ -1303,13 +1303,18 @@ static struct socket *make_send_sock(struct net *net)
|
||||
struct socket *sock;
|
||||
int result;
|
||||
|
||||
/* First create a socket */
|
||||
result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
|
||||
/* First create a socket move it to right name space later */
|
||||
result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
|
||||
if (result < 0) {
|
||||
pr_err("Error during creation of socket; terminating\n");
|
||||
return ERR_PTR(result);
|
||||
}
|
||||
|
||||
/*
|
||||
* Kernel sockets that are a part of a namespace, should not
|
||||
* hold a reference to a namespace in order to allow to stop it.
|
||||
* After sk_change_net should be released using sk_release_kernel.
|
||||
*/
|
||||
sk_change_net(sock->sk, net);
|
||||
result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
|
||||
if (result < 0) {
|
||||
pr_err("Error setting outbound mcast interface\n");
|
||||
@ -1334,8 +1339,8 @@ static struct socket *make_send_sock(struct net *net)
|
||||
|
||||
return sock;
|
||||
|
||||
error:
|
||||
sock_release(sock);
|
||||
error:
|
||||
sk_release_kernel(sock->sk);
|
||||
return ERR_PTR(result);
|
||||
}
|
||||
|
||||
@ -1350,12 +1355,17 @@ static struct socket *make_receive_sock(struct net *net)
|
||||
int result;
|
||||
|
||||
/* First create a socket */
|
||||
result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
|
||||
result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
|
||||
if (result < 0) {
|
||||
pr_err("Error during creation of socket; terminating\n");
|
||||
return ERR_PTR(result);
|
||||
}
|
||||
|
||||
/*
|
||||
* Kernel sockets that are a part of a namespace, should not
|
||||
* hold a reference to a namespace in order to allow to stop it.
|
||||
* After sk_change_net should be released using sk_release_kernel.
|
||||
*/
|
||||
sk_change_net(sock->sk, net);
|
||||
/* it is equivalent to the REUSEADDR option in user-space */
|
||||
sock->sk->sk_reuse = 1;
|
||||
|
||||
@ -1377,8 +1387,8 @@ static struct socket *make_receive_sock(struct net *net)
|
||||
|
||||
return sock;
|
||||
|
||||
error:
|
||||
sock_release(sock);
|
||||
error:
|
||||
sk_release_kernel(sock->sk);
|
||||
return ERR_PTR(result);
|
||||
}
|
||||
|
||||
@ -1473,7 +1483,7 @@ static int sync_thread_master(void *data)
|
||||
ip_vs_sync_buff_release(sb);
|
||||
|
||||
/* release the sending multicast socket */
|
||||
sock_release(tinfo->sock);
|
||||
sk_release_kernel(tinfo->sock->sk);
|
||||
kfree(tinfo);
|
||||
|
||||
return 0;
|
||||
@ -1513,7 +1523,7 @@ static int sync_thread_backup(void *data)
|
||||
}
|
||||
|
||||
/* release the sending multicast socket */
|
||||
sock_release(tinfo->sock);
|
||||
sk_release_kernel(tinfo->sock->sk);
|
||||
kfree(tinfo->buf);
|
||||
kfree(tinfo);
|
||||
|
||||
@ -1601,7 +1611,7 @@ outtinfo:
|
||||
outbuf:
|
||||
kfree(buf);
|
||||
outsocket:
|
||||
sock_release(sock);
|
||||
sk_release_kernel(sock->sk);
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
@ -1610,6 +1620,7 @@ out:
|
||||
int stop_sync_thread(struct net *net, int state)
|
||||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
int retc = -EINVAL;
|
||||
|
||||
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
|
||||
|
||||
@ -1629,7 +1640,7 @@ int stop_sync_thread(struct net *net, int state)
|
||||
spin_lock_bh(&ipvs->sync_lock);
|
||||
ipvs->sync_state &= ~IP_VS_STATE_MASTER;
|
||||
spin_unlock_bh(&ipvs->sync_lock);
|
||||
kthread_stop(ipvs->master_thread);
|
||||
retc = kthread_stop(ipvs->master_thread);
|
||||
ipvs->master_thread = NULL;
|
||||
} else if (state == IP_VS_STATE_BACKUP) {
|
||||
if (!ipvs->backup_thread)
|
||||
@ -1639,22 +1650,20 @@ int stop_sync_thread(struct net *net, int state)
|
||||
task_pid_nr(ipvs->backup_thread));
|
||||
|
||||
ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
|
||||
kthread_stop(ipvs->backup_thread);
|
||||
retc = kthread_stop(ipvs->backup_thread);
|
||||
ipvs->backup_thread = NULL;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* decrease the module use count */
|
||||
ip_vs_use_count_dec();
|
||||
|
||||
return 0;
|
||||
return retc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize data struct for each netns
|
||||
*/
|
||||
static int __net_init __ip_vs_sync_init(struct net *net)
|
||||
int __net_init __ip_vs_sync_init(struct net *net)
|
||||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
|
||||
@ -1668,24 +1677,24 @@ static int __net_init __ip_vs_sync_init(struct net *net)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __ip_vs_sync_cleanup(struct net *net)
|
||||
void __ip_vs_sync_cleanup(struct net *net)
|
||||
{
|
||||
stop_sync_thread(net, IP_VS_STATE_MASTER);
|
||||
stop_sync_thread(net, IP_VS_STATE_BACKUP);
|
||||
int retc;
|
||||
|
||||
retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
|
||||
if (retc && retc != -ESRCH)
|
||||
pr_err("Failed to stop Master Daemon\n");
|
||||
|
||||
retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
|
||||
if (retc && retc != -ESRCH)
|
||||
pr_err("Failed to stop Backup Daemon\n");
|
||||
}
|
||||
|
||||
static struct pernet_operations ipvs_sync_ops = {
|
||||
.init = __ip_vs_sync_init,
|
||||
.exit = __ip_vs_sync_cleanup,
|
||||
};
|
||||
|
||||
|
||||
int __init ip_vs_sync_init(void)
|
||||
{
|
||||
return register_pernet_subsys(&ipvs_sync_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ip_vs_sync_cleanup(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ipvs_sync_ops);
|
||||
}
|
||||
|
@ -1334,6 +1334,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
|
||||
struct nf_conn *ct;
|
||||
int err = -EINVAL;
|
||||
struct nf_conntrack_helper *helper;
|
||||
struct nf_conn_tstamp *tstamp;
|
||||
|
||||
ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
|
||||
if (IS_ERR(ct))
|
||||
@ -1451,6 +1452,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
|
||||
__set_bit(IPS_EXPECTED_BIT, &ct->status);
|
||||
ct->master = master_ct;
|
||||
}
|
||||
tstamp = nf_conn_tstamp_find(ct);
|
||||
if (tstamp)
|
||||
tstamp->start = ktime_to_ns(ktime_get_real());
|
||||
|
||||
add_timer(&ct->timeout);
|
||||
nf_conntrack_hash_insert(ct);
|
||||
|
@ -455,6 +455,7 @@ void xt_compat_flush_offsets(u_int8_t af)
|
||||
vfree(xt[af].compat_tab);
|
||||
xt[af].compat_tab = NULL;
|
||||
xt[af].number = 0;
|
||||
xt[af].cur = 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
|
||||
@ -473,8 +474,7 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
|
||||
else
|
||||
return mid ? tmp[mid - 1].delta : 0;
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
return left ? tmp[left - 1].delta : 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
|
||||
|
||||
|
@ -99,7 +99,7 @@ tos_tg6(struct sk_buff *skb, const struct xt_action_param *par)
|
||||
u_int8_t orig, nv;
|
||||
|
||||
orig = ipv6_get_dsfield(iph);
|
||||
nv = (orig & info->tos_mask) ^ info->tos_value;
|
||||
nv = (orig & ~info->tos_mask) ^ info->tos_value;
|
||||
|
||||
if (orig != nv) {
|
||||
if (!skb_make_writable(skb, sizeof(struct iphdr)))
|
||||
|
@ -272,11 +272,6 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (strcmp(par->table, "raw") == 0) {
|
||||
pr_info("state is undetermined at the time of raw table\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = nf_ct_l3proto_try_module_get(par->family);
|
||||
if (ret < 0)
|
||||
pr_info("cannot load conntrack support for proto=%u\n",
|
||||
|
@ -1406,6 +1406,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
|
||||
struct net *net = xp_net(policy);
|
||||
unsigned long now = jiffies;
|
||||
struct net_device *dev;
|
||||
struct xfrm_mode *inner_mode;
|
||||
struct dst_entry *dst_prev = NULL;
|
||||
struct dst_entry *dst0 = NULL;
|
||||
int i = 0;
|
||||
@ -1436,6 +1437,17 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
|
||||
goto put_states;
|
||||
}
|
||||
|
||||
if (xfrm[i]->sel.family == AF_UNSPEC) {
|
||||
inner_mode = xfrm_ip2inner_mode(xfrm[i],
|
||||
xfrm_af2proto(family));
|
||||
if (!inner_mode) {
|
||||
err = -EAFNOSUPPORT;
|
||||
dst_release(dst);
|
||||
goto put_states;
|
||||
}
|
||||
} else
|
||||
inner_mode = xfrm[i]->inner_mode;
|
||||
|
||||
if (!dst_prev)
|
||||
dst0 = dst1;
|
||||
else {
|
||||
@ -1464,7 +1476,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
|
||||
dst1->lastuse = now;
|
||||
|
||||
dst1->input = dst_discard;
|
||||
dst1->output = xfrm[i]->outer_mode->afinfo->output;
|
||||
dst1->output = inner_mode->afinfo->output;
|
||||
|
||||
dst1->next = dst_prev;
|
||||
dst_prev = dst1;
|
||||
|
@ -535,6 +535,9 @@ int xfrm_init_replay(struct xfrm_state *x)
|
||||
replay_esn->bmp_len * sizeof(__u32) * 8)
|
||||
return -EINVAL;
|
||||
|
||||
if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn)
|
||||
x->repl = &xfrm_replay_esn;
|
||||
else
|
||||
|
Loading…
Reference in New Issue
Block a user