mirror of
https://github.com/joel16/android_kernel_sony_msm8994.git
synced 2024-12-11 14:25:51 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "A moderately sized pile of fixes, some specifically for merge window introduced regressions although others are for longer standing items and have been queued up for -stable. I'm kind of tired of all the RDS protocol bugs over the years, to be honest, it's way out of proportion to the number of people who actually use it. 1) Fix missing range initialization in netfilter IPSET, from Jozsef Kadlecsik. 2) ieee80211_local->tim_lock needs to use BH disabling, from Johannes Berg. 3) Fix DMA syncing in SFC driver, from Ben Hutchings. 4) Fix regression in BOND device MAC address setting, from Jiri Pirko. 5) Missing usb_free_urb in ISDN Hisax driver, from Marina Makienko. 6) Fix UDP checksumming in bnx2x driver for 57710 and 57711 chips, fix from Dmitry Kravkov. 7) Missing cfgspace_lock initialization in BCMA driver. 8) Validate parameter size for SCTP assoc stats getsockopt(), from Guenter Roeck. 9) Fix SCTP association hangs, from Lee A Roberts. 10) Fix jumbo frame handling in r8169, from Francois Romieu. 11) Fix phy_device memory leak, from Petr Malat. 12) Omit trailing FCS from frames received in BGMAC driver, from Hauke Mehrtens. 13) Missing socket refcount release in L2TP, from Guillaume Nault. 14) sctp_endpoint_init should respect passed in gfp_t, rather than use GFP_KERNEL unconditionally. From Dan Carpenter. 15) Add AISX AX88179 USB driver, from Freddy Xin. 16) Remove MAINTAINERS entries for drivers deleted during the merge window, from Cesar Eduardo Barros. 17) RDS protocol can try to allocate huge amounts of memory, check that the user's request length makes sense, from Cong Wang. 18) SCTP should use the provided KMALLOC_MAX_SIZE instead of it's own, bogus, definition. From Cong Wang. 19) Fix deadlocks in FEC driver by moving TX reclaim into NAPI poll, from Frank Li. Also, fix a build error introduced in the merge window. 20) Fix bogus purging of default routes in ipv6, from Lorenzo Colitti. 21) Don't double count RTT measurements when we leave the TCP receive fast path, from Neal Cardwell." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (61 commits) tcp: fix double-counted receiver RTT when leaving receiver fast path CAIF: fix sparse warning for caif_usb rds: simplify a warning message net: fec: fix build error in no MXC platform net: ipv6: Don't purge default router if accept_ra=2 net: fec: put tx to napi poll function to fix dead lock sctp: use KMALLOC_MAX_SIZE instead of its own MAX_KMALLOC_SIZE rds: limit the size allocated by rds_message_alloc() MAINTAINERS: remove eexpress MAINTAINERS: remove drivers/net/wan/cycx* MAINTAINERS: remove 3c505 caif_dev: fix sparse warnings for caif_flow_cb ax88179_178a: ASIX AX88179_178A USB 3.0/2.0 to gigabit ethernet adapter driver sctp: use the passed in gfp flags instead GFP_KERNEL ipv[4|6]: correct dropwatch false positive in local_deliver_finish l2tp: Restore socket refcount when sendmsg succeeds net/phy: micrel: Disable asymmetric pause for KSZ9021 bgmac: omit the fcs phy: Fix phy_device_free memory leak bnx2x: Fix KR2 work-around condition ...
This commit is contained in:
commit
9da060d0ed
18
MAINTAINERS
18
MAINTAINERS
@ -114,12 +114,6 @@ Maintainers List (try to look for most precise areas first)
|
||||
|
||||
-----------------------------------
|
||||
|
||||
3C505 NETWORK DRIVER
|
||||
M: Philip Blundell <philb@gnu.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/i825xx/3c505*
|
||||
|
||||
3C59X NETWORK DRIVER
|
||||
M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
|
||||
L: netdev@vger.kernel.org
|
||||
@ -2361,12 +2355,6 @@ W: http://www.arm.linux.org.uk/
|
||||
S: Maintained
|
||||
F: drivers/video/cyber2000fb.*
|
||||
|
||||
CYCLADES 2X SYNC CARD DRIVER
|
||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
||||
W: http://oops.ghostprotocols.net:81/blog
|
||||
S: Maintained
|
||||
F: drivers/net/wan/cycx*
|
||||
|
||||
CYCLADES ASYNC MUX DRIVER
|
||||
W: http://www.cyclades.com/
|
||||
S: Orphan
|
||||
@ -3067,12 +3055,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
|
||||
F: drivers/video/s1d13xxxfb.c
|
||||
F: include/video/s1d13xxxfb.h
|
||||
|
||||
ETHEREXPRESS-16 NETWORK DRIVER
|
||||
M: Philip Blundell <philb@gnu.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/i825xx/eexpress.*
|
||||
|
||||
ETHERNET BRIDGE
|
||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
||||
L: bridge@lists.linux-foundation.org
|
||||
|
@ -404,6 +404,8 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_init(&pc_host->cfgspace_lock);
|
||||
|
||||
pc->host_controller = pc_host;
|
||||
pc_host->pci_controller.io_resource = &pc_host->io_resource;
|
||||
pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
|
||||
|
@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
||||
(task_active_pid_ns(current) != &init_pid_ns))
|
||||
return;
|
||||
|
||||
/* Can only change if privileged. */
|
||||
if (!capable(CAP_NET_ADMIN)) {
|
||||
err = EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mc_op = (enum proc_cn_mcast_op *)msg->data;
|
||||
switch (*mc_op) {
|
||||
case PROC_CN_MCAST_LISTEN:
|
||||
@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
||||
err = EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
cn_proc_ack(err, msg->seq, msg->ack);
|
||||
}
|
||||
|
||||
|
@ -294,13 +294,13 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
|
||||
// Allocate URBs and buffers for interrupt endpoint
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!urb) {
|
||||
return -ENOMEM;
|
||||
goto err1;
|
||||
}
|
||||
intr->urb = urb;
|
||||
|
||||
buf = kmalloc(INT_PKT_SIZE, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
return -ENOMEM;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
endpoint = &altsetting->endpoint[EP_INT-1];
|
||||
@ -313,6 +313,14 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
|
||||
endpoint->desc.bInterval);
|
||||
|
||||
return 0;
|
||||
err2:
|
||||
usb_free_urb(intr->urb);
|
||||
intr->urb = NULL;
|
||||
err1:
|
||||
usb_free_urb(ctrl->urb);
|
||||
ctrl->urb = NULL;
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1629,7 +1629,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
|
||||
|
||||
/* If this is the first slave, then we need to set the master's hardware
|
||||
* address to be the same as the slave's. */
|
||||
if (bond->dev_addr_from_first)
|
||||
if (bond->slave_cnt == 0 && bond->dev_addr_from_first)
|
||||
bond_set_dev_addr(bond->dev, slave_dev);
|
||||
|
||||
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
|
||||
|
@ -301,12 +301,16 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
|
||||
bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
|
||||
ring->start);
|
||||
} else {
|
||||
/* Omit CRC. */
|
||||
len -= ETH_FCS_LEN;
|
||||
|
||||
new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
|
||||
if (new_skb) {
|
||||
skb_put(new_skb, len);
|
||||
skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
|
||||
new_skb->data,
|
||||
len);
|
||||
skb_checksum_none_assert(skb);
|
||||
new_skb->protocol =
|
||||
eth_type_trans(new_skb, bgmac->net_dev);
|
||||
netif_receive_skb(new_skb);
|
||||
|
@ -3142,7 +3142,7 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
|
||||
tsum = ~csum_fold(csum_add((__force __wsum) csum,
|
||||
csum_partial(t_header, -fix, 0)));
|
||||
|
||||
return bswab16(csum);
|
||||
return bswab16(tsum);
|
||||
}
|
||||
|
||||
static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
|
||||
|
@ -281,6 +281,8 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
|
||||
if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
|
||||
cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
|
||||
if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
|
||||
cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
|
||||
}
|
||||
|
||||
cmd->maxtxpkt = 0;
|
||||
@ -463,6 +465,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
ADVERTISED_10000baseKR_Full))
|
||||
bp->link_params.speed_cap_mask[cfg_idx] |=
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
|
||||
|
||||
if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
|
||||
bp->link_params.speed_cap_mask[cfg_idx] |=
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
|
||||
}
|
||||
} else { /* forced speed */
|
||||
/* advertise the requested speed and duplex if supported */
|
||||
|
@ -10422,6 +10422,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_LED1_MASK,
|
||||
0x0);
|
||||
if (phy->type ==
|
||||
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
|
||||
/* Disable MI_INT interrupt before setting LED4
|
||||
* source to constant off.
|
||||
*/
|
||||
if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
|
||||
params->port*4) &
|
||||
NIG_MASK_MI_INT) {
|
||||
params->link_flags |=
|
||||
LINK_FLAGS_INT_DISABLED;
|
||||
|
||||
bnx2x_bits_dis(
|
||||
bp,
|
||||
NIG_REG_MASK_INTERRUPT_PORT0 +
|
||||
params->port*4,
|
||||
NIG_MASK_MI_INT);
|
||||
}
|
||||
bnx2x_cl45_write(bp, phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_SIGNAL_MASK,
|
||||
0x0);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case LED_MODE_ON:
|
||||
@ -10468,6 +10490,28 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_LED1_MASK,
|
||||
0x20);
|
||||
if (phy->type ==
|
||||
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
|
||||
/* Disable MI_INT interrupt before setting LED4
|
||||
* source to constant on.
|
||||
*/
|
||||
if (REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
|
||||
params->port*4) &
|
||||
NIG_MASK_MI_INT) {
|
||||
params->link_flags |=
|
||||
LINK_FLAGS_INT_DISABLED;
|
||||
|
||||
bnx2x_bits_dis(
|
||||
bp,
|
||||
NIG_REG_MASK_INTERRUPT_PORT0 +
|
||||
params->port*4,
|
||||
NIG_MASK_MI_INT);
|
||||
}
|
||||
bnx2x_cl45_write(bp, phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_SIGNAL_MASK,
|
||||
0x20);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
@ -10532,6 +10576,22 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_LINK_SIGNAL,
|
||||
val);
|
||||
if (phy->type ==
|
||||
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
|
||||
/* Restore LED4 source to external link,
|
||||
* and re-enable interrupts.
|
||||
*/
|
||||
bnx2x_cl45_write(bp, phy,
|
||||
MDIO_PMA_DEVAD,
|
||||
MDIO_PMA_REG_8481_SIGNAL_MASK,
|
||||
0x40);
|
||||
if (params->link_flags &
|
||||
LINK_FLAGS_INT_DISABLED) {
|
||||
bnx2x_link_int_enable(params);
|
||||
params->link_flags &=
|
||||
~LINK_FLAGS_INT_DISABLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -11791,6 +11851,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
|
||||
phy->media_type = ETH_PHY_KR;
|
||||
phy->flags |= FLAGS_WC_DUAL_MODE;
|
||||
phy->supported &= (SUPPORTED_20000baseKR2_Full |
|
||||
SUPPORTED_10000baseT_Full |
|
||||
SUPPORTED_1000baseT_Full |
|
||||
SUPPORTED_Autoneg |
|
||||
SUPPORTED_FIBRE |
|
||||
SUPPORTED_Pause |
|
||||
@ -13437,7 +13499,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
|
||||
struct bnx2x_phy *phy = ¶ms->phy[INT_PHY];
|
||||
bnx2x_set_aer_mmd(params, phy);
|
||||
if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
|
||||
(phy->speed_cap_mask & SPEED_20000))
|
||||
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
|
||||
bnx2x_check_kr2_wa(params, vars, phy);
|
||||
bnx2x_check_over_curr(params, vars);
|
||||
if (vars->rx_tx_asic_rst)
|
||||
|
@ -307,7 +307,8 @@ struct link_params {
|
||||
struct bnx2x *bp;
|
||||
u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
|
||||
req_flow_ctrl is set to AUTO */
|
||||
u16 rsrv1;
|
||||
u16 link_flags;
|
||||
#define LINK_FLAGS_INT_DISABLED (1<<0)
|
||||
u32 lfa_base;
|
||||
};
|
||||
|
||||
|
@ -246,14 +246,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
struct bufdesc *bdp;
|
||||
void *bufaddr;
|
||||
unsigned short status;
|
||||
unsigned long flags;
|
||||
unsigned int index;
|
||||
|
||||
if (!fep->link) {
|
||||
/* Link is down or autonegotiation is in progress. */
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fep->hw_lock, flags);
|
||||
/* Fill in a Tx ring entry */
|
||||
bdp = fep->cur_tx;
|
||||
|
||||
@ -264,7 +263,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
* This should not happen, since ndev->tbusy should be set.
|
||||
*/
|
||||
printk("%s: tx queue full!.\n", ndev->name);
|
||||
spin_unlock_irqrestore(&fep->hw_lock, flags);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
@ -280,13 +278,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
* 4-byte boundaries. Use bounce buffers to copy data
|
||||
* and get it aligned. Ugh.
|
||||
*/
|
||||
if (fep->bufdesc_ex)
|
||||
index = (struct bufdesc_ex *)bdp -
|
||||
(struct bufdesc_ex *)fep->tx_bd_base;
|
||||
else
|
||||
index = bdp - fep->tx_bd_base;
|
||||
|
||||
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
|
||||
unsigned int index;
|
||||
if (fep->bufdesc_ex)
|
||||
index = (struct bufdesc_ex *)bdp -
|
||||
(struct bufdesc_ex *)fep->tx_bd_base;
|
||||
else
|
||||
index = bdp - fep->tx_bd_base;
|
||||
memcpy(fep->tx_bounce[index], skb->data, skb->len);
|
||||
bufaddr = fep->tx_bounce[index];
|
||||
}
|
||||
@ -300,10 +298,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
swap_buffer(bufaddr, skb->len);
|
||||
|
||||
/* Save skb pointer */
|
||||
fep->tx_skbuff[fep->skb_cur] = skb;
|
||||
|
||||
ndev->stats.tx_bytes += skb->len;
|
||||
fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
|
||||
fep->tx_skbuff[index] = skb;
|
||||
|
||||
/* Push the data cache so the CPM does not get stale memory
|
||||
* data.
|
||||
@ -331,25 +326,21 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
ebdp->cbd_esc = BD_ENET_TX_INT;
|
||||
}
|
||||
}
|
||||
/* Trigger transmission start */
|
||||
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
|
||||
|
||||
/* If this was the last BD in the ring, start at the beginning again. */
|
||||
if (status & BD_ENET_TX_WRAP)
|
||||
bdp = fep->tx_bd_base;
|
||||
else
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
|
||||
if (bdp == fep->dirty_tx) {
|
||||
fep->tx_full = 1;
|
||||
netif_stop_queue(ndev);
|
||||
}
|
||||
|
||||
fep->cur_tx = bdp;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
if (fep->cur_tx == fep->dirty_tx)
|
||||
netif_stop_queue(ndev);
|
||||
|
||||
spin_unlock_irqrestore(&fep->hw_lock, flags);
|
||||
/* Trigger transmission start */
|
||||
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
@ -406,11 +397,8 @@ fec_restart(struct net_device *ndev, int duplex)
|
||||
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
|
||||
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
|
||||
|
||||
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
|
||||
fep->cur_rx = fep->rx_bd_base;
|
||||
|
||||
/* Reset SKB transmit buffers. */
|
||||
fep->skb_cur = fep->skb_dirty = 0;
|
||||
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
|
||||
if (fep->tx_skbuff[i]) {
|
||||
dev_kfree_skb_any(fep->tx_skbuff[i]);
|
||||
@ -573,20 +561,35 @@ fec_enet_tx(struct net_device *ndev)
|
||||
struct bufdesc *bdp;
|
||||
unsigned short status;
|
||||
struct sk_buff *skb;
|
||||
int index = 0;
|
||||
|
||||
fep = netdev_priv(ndev);
|
||||
spin_lock(&fep->hw_lock);
|
||||
bdp = fep->dirty_tx;
|
||||
|
||||
/* get next bdp of dirty_tx */
|
||||
if (bdp->cbd_sc & BD_ENET_TX_WRAP)
|
||||
bdp = fep->tx_bd_base;
|
||||
else
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
|
||||
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
|
||||
if (bdp == fep->cur_tx && fep->tx_full == 0)
|
||||
|
||||
/* current queue is empty */
|
||||
if (bdp == fep->cur_tx)
|
||||
break;
|
||||
|
||||
if (fep->bufdesc_ex)
|
||||
index = (struct bufdesc_ex *)bdp -
|
||||
(struct bufdesc_ex *)fep->tx_bd_base;
|
||||
else
|
||||
index = bdp - fep->tx_bd_base;
|
||||
|
||||
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
|
||||
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
|
||||
bdp->cbd_bufaddr = 0;
|
||||
|
||||
skb = fep->tx_skbuff[fep->skb_dirty];
|
||||
skb = fep->tx_skbuff[index];
|
||||
|
||||
/* Check for errors. */
|
||||
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
|
||||
BD_ENET_TX_RL | BD_ENET_TX_UN |
|
||||
@ -631,8 +634,9 @@ fec_enet_tx(struct net_device *ndev)
|
||||
|
||||
/* Free the sk buffer associated with this last transmit */
|
||||
dev_kfree_skb_any(skb);
|
||||
fep->tx_skbuff[fep->skb_dirty] = NULL;
|
||||
fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
|
||||
fep->tx_skbuff[index] = NULL;
|
||||
|
||||
fep->dirty_tx = bdp;
|
||||
|
||||
/* Update pointer to next buffer descriptor to be transmitted */
|
||||
if (status & BD_ENET_TX_WRAP)
|
||||
@ -642,14 +646,12 @@ fec_enet_tx(struct net_device *ndev)
|
||||
|
||||
/* Since we have freed up a buffer, the ring is no longer full
|
||||
*/
|
||||
if (fep->tx_full) {
|
||||
fep->tx_full = 0;
|
||||
if (fep->dirty_tx != fep->cur_tx) {
|
||||
if (netif_queue_stopped(ndev))
|
||||
netif_wake_queue(ndev);
|
||||
}
|
||||
}
|
||||
fep->dirty_tx = bdp;
|
||||
spin_unlock(&fep->hw_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@ -816,7 +818,7 @@ fec_enet_interrupt(int irq, void *dev_id)
|
||||
int_events = readl(fep->hwp + FEC_IEVENT);
|
||||
writel(int_events, fep->hwp + FEC_IEVENT);
|
||||
|
||||
if (int_events & FEC_ENET_RXF) {
|
||||
if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
/* Disable the RX interrupt */
|
||||
@ -827,15 +829,6 @@ fec_enet_interrupt(int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
/* Transmit OK, or non-fatal error. Update the buffer
|
||||
* descriptors. FEC handles all errors, we just discover
|
||||
* them as part of the transmit process.
|
||||
*/
|
||||
if (int_events & FEC_ENET_TXF) {
|
||||
ret = IRQ_HANDLED;
|
||||
fec_enet_tx(ndev);
|
||||
}
|
||||
|
||||
if (int_events & FEC_ENET_MII) {
|
||||
ret = IRQ_HANDLED;
|
||||
complete(&fep->mdio_done);
|
||||
@ -851,6 +844,8 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
|
||||
int pkts = fec_enet_rx(ndev, budget);
|
||||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
|
||||
fec_enet_tx(ndev);
|
||||
|
||||
if (pkts < budget) {
|
||||
napi_complete(napi);
|
||||
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
|
||||
@ -1646,6 +1641,7 @@ static int fec_enet_init(struct net_device *ndev)
|
||||
|
||||
/* ...and the same for transmit */
|
||||
bdp = fep->tx_bd_base;
|
||||
fep->cur_tx = bdp;
|
||||
for (i = 0; i < TX_RING_SIZE; i++) {
|
||||
|
||||
/* Initialize the BD for every fragment in the page. */
|
||||
@ -1657,6 +1653,7 @@ static int fec_enet_init(struct net_device *ndev)
|
||||
/* Set the last buffer to wrap */
|
||||
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
|
||||
bdp->cbd_sc |= BD_SC_WRAP;
|
||||
fep->dirty_tx = bdp;
|
||||
|
||||
fec_restart(ndev, 0);
|
||||
|
||||
|
@ -97,6 +97,13 @@ struct bufdesc {
|
||||
unsigned short cbd_sc; /* Control and status info */
|
||||
unsigned long cbd_bufaddr; /* Buffer address */
|
||||
};
|
||||
#else
|
||||
struct bufdesc {
|
||||
unsigned short cbd_sc; /* Control and status info */
|
||||
unsigned short cbd_datlen; /* Data length */
|
||||
unsigned long cbd_bufaddr; /* Buffer address */
|
||||
};
|
||||
#endif
|
||||
|
||||
struct bufdesc_ex {
|
||||
struct bufdesc desc;
|
||||
@ -107,14 +114,6 @@ struct bufdesc_ex {
|
||||
unsigned short res0[4];
|
||||
};
|
||||
|
||||
#else
|
||||
struct bufdesc {
|
||||
unsigned short cbd_sc; /* Control and status info */
|
||||
unsigned short cbd_datlen; /* Data length */
|
||||
unsigned long cbd_bufaddr; /* Buffer address */
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The following definitions courtesy of commproc.h, which where
|
||||
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
|
||||
@ -214,8 +213,6 @@ struct fec_enet_private {
|
||||
unsigned char *tx_bounce[TX_RING_SIZE];
|
||||
struct sk_buff *tx_skbuff[TX_RING_SIZE];
|
||||
struct sk_buff *rx_skbuff[RX_RING_SIZE];
|
||||
ushort skb_cur;
|
||||
ushort skb_dirty;
|
||||
|
||||
/* CPM dual port RAM relative addresses */
|
||||
dma_addr_t bd_dma;
|
||||
@ -227,7 +224,6 @@ struct fec_enet_private {
|
||||
/* The ring entries to be free()ed */
|
||||
struct bufdesc *dirty_tx;
|
||||
|
||||
uint tx_full;
|
||||
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
|
||||
spinlock_t hw_lock;
|
||||
|
||||
|
@ -4765,8 +4765,10 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
|
||||
|
||||
rtl_tx_performance_tweak(pdev,
|
||||
(0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN) {
|
||||
rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
|
||||
PCI_EXP_DEVCTL_NOSNOOP_EN);
|
||||
}
|
||||
}
|
||||
|
||||
static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
|
||||
@ -4789,7 +4791,8 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
rtl_disable_clock_request(pdev);
|
||||
|
||||
@ -4822,7 +4825,8 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
|
||||
}
|
||||
@ -4841,7 +4845,8 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W8(MaxTxPacketSize, TxPacketMax);
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
|
||||
}
|
||||
@ -4901,7 +4906,8 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp)
|
||||
|
||||
RTL_W8(MaxTxPacketSize, TxPacketMax);
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
|
||||
}
|
||||
@ -4913,7 +4919,8 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
|
||||
|
||||
rtl_csi_access_enable_1(tp);
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
RTL_W8(MaxTxPacketSize, TxPacketMax);
|
||||
|
||||
@ -4972,7 +4979,8 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
|
||||
|
||||
rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
RTL_W8(MaxTxPacketSize, TxPacketMax);
|
||||
|
||||
@ -4998,7 +5006,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
|
||||
|
||||
rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
|
||||
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
if (tp->dev->mtu <= ETH_DATA_LEN)
|
||||
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
|
||||
|
||||
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
|
||||
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
|
||||
|
@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
|
||||
tx_queue->txd.entries);
|
||||
}
|
||||
|
||||
efx_device_detach_sync(efx);
|
||||
efx_stop_all(efx);
|
||||
efx_stop_interrupts(efx, true);
|
||||
|
||||
@ -832,6 +833,7 @@ out:
|
||||
|
||||
efx_start_interrupts(efx, true);
|
||||
efx_start_all(efx);
|
||||
netif_device_attach(efx->net_dev);
|
||||
return rc;
|
||||
|
||||
rollback:
|
||||
@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
|
||||
/* Flush efx_mac_work(), refill_workqueue, monitor_work */
|
||||
efx_flush_all(efx);
|
||||
|
||||
/* Stop the kernel transmit interface late, so the watchdog
|
||||
* timer isn't ticking over the flush */
|
||||
/* Stop the kernel transmit interface. This is only valid if
|
||||
* the device is stopped or detached; otherwise the watchdog
|
||||
* may fire immediately.
|
||||
*/
|
||||
WARN_ON(netif_running(efx->net_dev) &&
|
||||
netif_device_present(efx->net_dev));
|
||||
netif_tx_disable(efx->net_dev);
|
||||
|
||||
efx_stop_datapath(efx);
|
||||
@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
|
||||
if (new_mtu > EFX_MAX_MTU)
|
||||
return -EINVAL;
|
||||
|
||||
efx_stop_all(efx);
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
|
||||
|
||||
efx_device_detach_sync(efx);
|
||||
efx_stop_all(efx);
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
net_dev->mtu = new_mtu;
|
||||
efx->type->reconfigure_mac(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
efx_start_all(efx);
|
||||
netif_device_attach(efx->net_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -210,6 +210,7 @@ struct efx_tx_queue {
|
||||
* Will be %NULL if the buffer slot is currently free.
|
||||
* @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
|
||||
* Will be %NULL if the buffer slot is currently free.
|
||||
* @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
|
||||
* @len: Buffer length, in bytes.
|
||||
* @flags: Flags for buffer and packet state.
|
||||
*/
|
||||
@ -219,7 +220,8 @@ struct efx_rx_buffer {
|
||||
struct sk_buff *skb;
|
||||
struct page *page;
|
||||
} u;
|
||||
unsigned int len;
|
||||
u16 page_offset;
|
||||
u16 len;
|
||||
u16 flags;
|
||||
};
|
||||
#define EFX_RX_BUF_PAGE 0x0001
|
||||
|
@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
|
||||
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *buf)
|
||||
{
|
||||
/* Offset is always within one page, so we don't need to consider
|
||||
* the page order.
|
||||
*/
|
||||
return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
|
||||
efx->type->rx_buffer_hash_size;
|
||||
return buf->page_offset + efx->type->rx_buffer_hash_size;
|
||||
}
|
||||
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
|
||||
{
|
||||
@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
struct page *page;
|
||||
unsigned int page_offset;
|
||||
struct efx_rx_page_state *state;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned index, count;
|
||||
@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
||||
state->dma_addr = dma_addr;
|
||||
|
||||
dma_addr += sizeof(struct efx_rx_page_state);
|
||||
page_offset = sizeof(struct efx_rx_page_state);
|
||||
|
||||
split:
|
||||
index = rx_queue->added_count & rx_queue->ptr_mask;
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
|
||||
rx_buf->u.page = page;
|
||||
rx_buf->page_offset = page_offset;
|
||||
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
|
||||
rx_buf->flags = EFX_RX_BUF_PAGE;
|
||||
++rx_queue->added_count;
|
||||
@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
||||
/* Use the second half of the page */
|
||||
get_page(page);
|
||||
dma_addr += (PAGE_SIZE >> 1);
|
||||
page_offset += (PAGE_SIZE >> 1);
|
||||
++count;
|
||||
goto split;
|
||||
}
|
||||
@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
||||
}
|
||||
|
||||
static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int used_len)
|
||||
{
|
||||
if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
|
||||
struct efx_rx_page_state *state;
|
||||
@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
|
||||
state->dma_addr,
|
||||
efx_rx_buf_size(efx),
|
||||
DMA_FROM_DEVICE);
|
||||
} else if (used_len) {
|
||||
dma_sync_single_for_cpu(&efx->pci_dev->dev,
|
||||
rx_buf->dma_addr, used_len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
|
||||
dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
|
||||
@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
|
||||
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf)
|
||||
{
|
||||
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
|
||||
efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
|
||||
efx_free_rx_buffer(rx_queue->efx, rx_buf);
|
||||
}
|
||||
|
||||
@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Release card resources - assumes all RX buffers consumed in-order
|
||||
* per RX queue
|
||||
/* Release and/or sync DMA mapping - assumes all RX buffers
|
||||
* consumed in-order per RX queue
|
||||
*/
|
||||
efx_unmap_rx_buffer(efx, rx_buf);
|
||||
efx_unmap_rx_buffer(efx, rx_buf, len);
|
||||
|
||||
/* Prefetch nice and early so data will (hopefully) be in cache by
|
||||
* the time we look at it.
|
||||
|
@ -731,7 +731,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
|
||||
|
||||
writel(vlan, &priv->host_port_regs->port_vlan);
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
for (i = 0; i < priv->data.slaves; i++)
|
||||
slave_write(priv->slaves + i, vlan, reg);
|
||||
|
||||
cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
|
||||
|
@ -257,8 +257,7 @@ static struct phy_driver ksphy_driver[] = {
|
||||
.phy_id = PHY_ID_KSZ9021,
|
||||
.phy_id_mask = 0x000ffffe,
|
||||
.name = "Micrel KSZ9021 Gigabit PHY",
|
||||
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
|
||||
| SUPPORTED_Asym_Pause),
|
||||
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
|
||||
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
|
||||
.config_init = kszphy_config_init,
|
||||
.config_aneg = genphy_config_aneg,
|
||||
|
@ -44,13 +44,13 @@ MODULE_LICENSE("GPL");
|
||||
|
||||
void phy_device_free(struct phy_device *phydev)
|
||||
{
|
||||
kfree(phydev);
|
||||
put_device(&phydev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(phy_device_free);
|
||||
|
||||
static void phy_device_release(struct device *dev)
|
||||
{
|
||||
phy_device_free(to_phy_device(dev));
|
||||
kfree(to_phy_device(dev));
|
||||
}
|
||||
|
||||
static struct phy_driver genphy_driver;
|
||||
@ -201,6 +201,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
|
||||
there's no driver _already_ loaded. */
|
||||
request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
|
||||
|
||||
device_initialize(&dev->dev);
|
||||
|
||||
return dev;
|
||||
}
|
||||
EXPORT_SYMBOL(phy_device_create);
|
||||
@ -363,9 +365,9 @@ int phy_device_register(struct phy_device *phydev)
|
||||
/* Run all of the fixups for this PHY */
|
||||
phy_scan_fixups(phydev);
|
||||
|
||||
err = device_register(&phydev->dev);
|
||||
err = device_add(&phydev->dev);
|
||||
if (err) {
|
||||
pr_err("phy %d failed to register\n", phydev->addr);
|
||||
pr_err("PHY %d failed to add\n", phydev->addr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -156,6 +156,24 @@ config USB_NET_AX8817X
|
||||
This driver creates an interface named "ethX", where X depends on
|
||||
what other networking devices you have in use.
|
||||
|
||||
config USB_NET_AX88179_178A
|
||||
tristate "ASIX AX88179/178A USB 3.0/2.0 to Gigabit Ethernet"
|
||||
depends on USB_USBNET
|
||||
select CRC32
|
||||
select PHYLIB
|
||||
default y
|
||||
help
|
||||
This option adds support for ASIX AX88179 based USB 3.0/2.0
|
||||
to Gigabit Ethernet adapters.
|
||||
|
||||
This driver should work with at least the following devices:
|
||||
* ASIX AX88179
|
||||
* ASIX AX88178A
|
||||
* Sitcomm LN-032
|
||||
|
||||
This driver creates an interface named "ethX", where X depends on
|
||||
what other networking devices you have in use.
|
||||
|
||||
config USB_NET_CDCETHER
|
||||
tristate "CDC Ethernet support (smart devices such as cable modems)"
|
||||
depends on USB_USBNET
|
||||
|
@ -9,6 +9,7 @@ obj-$(CONFIG_USB_RTL8150) += rtl8150.o
|
||||
obj-$(CONFIG_USB_HSO) += hso.o
|
||||
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
|
||||
asix-y := asix_devices.o asix_common.o ax88172a.o
|
||||
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
|
||||
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
|
||||
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
|
||||
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
|
||||
|
@ -924,6 +924,29 @@ static const struct driver_info ax88178_info = {
|
||||
.tx_fixup = asix_tx_fixup,
|
||||
};
|
||||
|
||||
/*
|
||||
* USBLINK 20F9 "USB 2.0 LAN" USB ethernet adapter, typically found in
|
||||
* no-name packaging.
|
||||
* USB device strings are:
|
||||
* 1: Manufacturer: USBLINK
|
||||
* 2: Product: HG20F9 USB2.0
|
||||
* 3: Serial: 000003
|
||||
* Appears to be compatible with Asix 88772B.
|
||||
*/
|
||||
static const struct driver_info hg20f9_info = {
|
||||
.description = "HG20F9 USB 2.0 Ethernet",
|
||||
.bind = ax88772_bind,
|
||||
.unbind = ax88772_unbind,
|
||||
.status = asix_status,
|
||||
.link_reset = ax88772_link_reset,
|
||||
.reset = ax88772_reset,
|
||||
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
|
||||
FLAG_MULTI_PACKET,
|
||||
.rx_fixup = asix_rx_fixup_common,
|
||||
.tx_fixup = asix_tx_fixup,
|
||||
.data = FLAG_EEPROM_MAC,
|
||||
};
|
||||
|
||||
extern const struct driver_info ax88172a_info;
|
||||
|
||||
static const struct usb_device_id products [] = {
|
||||
@ -1063,6 +1086,14 @@ static const struct usb_device_id products [] = {
|
||||
/* ASIX 88172a demo board */
|
||||
USB_DEVICE(0x0b95, 0x172a),
|
||||
.driver_info = (unsigned long) &ax88172a_info,
|
||||
}, {
|
||||
/*
|
||||
* USBLINK HG20F9 "USB 2.0 LAN"
|
||||
* Appears to have gazumped Linksys's manufacturer ID but
|
||||
* doesn't (yet) conflict with any known Linksys product.
|
||||
*/
|
||||
USB_DEVICE(0x066b, 0x20f9),
|
||||
.driver_info = (unsigned long) &hg20f9_info,
|
||||
},
|
||||
{ }, // END
|
||||
};
|
||||
|
1448
drivers/net/usb/ax88179_178a.c
Normal file
1448
drivers/net/usb/ax88179_178a.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -1213,6 +1213,14 @@ static const struct usb_device_id cdc_devs[] = {
|
||||
.driver_info = (unsigned long) &wwan_info,
|
||||
},
|
||||
|
||||
/* tag Huawei devices as wwan */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1,
|
||||
USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_NCM,
|
||||
USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
},
|
||||
|
||||
/* Huawei NCM devices disguised as vendor specific */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
|
||||
.driver_info = (unsigned long)&wwan_info,
|
||||
|
@ -27,7 +27,7 @@
|
||||
#define WME_MAX_BA WME_BA_BMP_SIZE
|
||||
#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
|
||||
|
||||
#define ATH_RSSI_DUMMY_MARKER 0x127
|
||||
#define ATH_RSSI_DUMMY_MARKER 127
|
||||
#define ATH_RSSI_LPF_LEN 10
|
||||
#define RSSI_LPF_THRESHOLD -20
|
||||
#define ATH_RSSI_EP_MULTIPLIER (1<<7)
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/mac80211.h>
|
||||
|
@ -1067,15 +1067,19 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
|
||||
|
||||
last_rssi = priv->rx.last_rssi;
|
||||
|
||||
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
|
||||
rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
|
||||
ATH_RSSI_EP_MULTIPLIER);
|
||||
if (ieee80211_is_beacon(hdr->frame_control) &&
|
||||
!is_zero_ether_addr(common->curbssid) &&
|
||||
ether_addr_equal(hdr->addr3, common->curbssid)) {
|
||||
s8 rssi = rxbuf->rxstatus.rs_rssi;
|
||||
|
||||
if (rxbuf->rxstatus.rs_rssi < 0)
|
||||
rxbuf->rxstatus.rs_rssi = 0;
|
||||
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
|
||||
rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
|
||||
|
||||
if (ieee80211_is_beacon(fc))
|
||||
priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
|
||||
if (rssi < 0)
|
||||
rssi = 0;
|
||||
|
||||
priv->ah->stats.avgbrssi = rssi;
|
||||
}
|
||||
|
||||
rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
|
||||
rx_status->band = hw->conf.channel->band;
|
||||
|
@ -1463,7 +1463,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
|
||||
reset_type = ATH9K_RESET_POWER_ON;
|
||||
else
|
||||
reset_type = ATH9K_RESET_COLD;
|
||||
}
|
||||
} else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
|
||||
(REG_READ(ah, AR_CR) & AR_CR_RXE))
|
||||
reset_type = ATH9K_RESET_COLD;
|
||||
|
||||
if (!ath9k_hw_set_reset_reg(ah, reset_type))
|
||||
return false;
|
||||
|
@ -349,25 +349,23 @@ TRACE_EVENT(iwlwifi_dev_rx_data,
|
||||
TRACE_EVENT(iwlwifi_dev_hcmd,
|
||||
TP_PROTO(const struct device *dev,
|
||||
struct iwl_host_cmd *cmd, u16 total_size,
|
||||
const void *hdr, size_t hdr_len),
|
||||
TP_ARGS(dev, cmd, total_size, hdr, hdr_len),
|
||||
struct iwl_cmd_header *hdr),
|
||||
TP_ARGS(dev, cmd, total_size, hdr),
|
||||
TP_STRUCT__entry(
|
||||
DEV_ENTRY
|
||||
__dynamic_array(u8, hcmd, total_size)
|
||||
__field(u32, flags)
|
||||
),
|
||||
TP_fast_assign(
|
||||
int i, offset = hdr_len;
|
||||
int i, offset = sizeof(*hdr);
|
||||
|
||||
DEV_ASSIGN;
|
||||
__entry->flags = cmd->flags;
|
||||
memcpy(__get_dynamic_array(hcmd), hdr, hdr_len);
|
||||
memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
|
||||
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
if (!cmd->len[i])
|
||||
continue;
|
||||
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
|
||||
continue;
|
||||
memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
|
||||
cmd->data[i], cmd->len[i]);
|
||||
offset += cmd->len[i];
|
||||
|
@ -136,12 +136,6 @@ struct iwl_calib_res_notif_phy_db {
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
#define IWL_PHY_DB_STATIC_PIC cpu_to_le32(0x21436587)
|
||||
static inline void iwl_phy_db_test_pic(__le32 pic)
|
||||
{
|
||||
WARN_ON(IWL_PHY_DB_STATIC_PIC != pic);
|
||||
}
|
||||
|
||||
struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
|
||||
@ -260,11 +254,6 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
|
||||
(size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
|
||||
}
|
||||
|
||||
/* Test PIC */
|
||||
if (type != IWL_PHY_DB_CFG)
|
||||
iwl_phy_db_test_pic(*(((__le32 *)phy_db_notif->data) +
|
||||
(size / sizeof(__le32)) - 1));
|
||||
|
||||
IWL_DEBUG_INFO(phy_db->trans,
|
||||
"%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
|
||||
__func__, __LINE__, type, size);
|
||||
@ -372,11 +361,6 @@ int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
|
||||
*size = entry->size;
|
||||
}
|
||||
|
||||
/* Test PIC */
|
||||
if (type != IWL_PHY_DB_CFG)
|
||||
iwl_phy_db_test_pic(*(((__le32 *)*data) +
|
||||
(*size / sizeof(__le32)) - 1));
|
||||
|
||||
IWL_DEBUG_INFO(phy_db->trans,
|
||||
"%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
|
||||
__func__, __LINE__, type, *size);
|
||||
|
@ -61,6 +61,7 @@
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <net/cfg80211.h>
|
||||
#include <net/ipv6.h>
|
||||
#include "iwl-modparams.h"
|
||||
@ -192,6 +193,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
sizeof(wkc), &wkc);
|
||||
data->error = ret != 0;
|
||||
|
||||
mvm->ptk_ivlen = key->iv_len;
|
||||
mvm->ptk_icvlen = key->icv_len;
|
||||
mvm->gtk_ivlen = key->iv_len;
|
||||
mvm->gtk_icvlen = key->icv_len;
|
||||
|
||||
/* don't upload key again */
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -304,9 +310,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
*/
|
||||
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
|
||||
key->hw_key_idx = 0;
|
||||
mvm->ptk_ivlen = key->iv_len;
|
||||
mvm->ptk_icvlen = key->icv_len;
|
||||
} else {
|
||||
data->gtk_key_idx++;
|
||||
key->hw_key_idx = data->gtk_key_idx;
|
||||
mvm->gtk_ivlen = key->iv_len;
|
||||
mvm->gtk_icvlen = key->icv_len;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true);
|
||||
@ -649,6 +659,11 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
|
||||
/* We reprogram keys and shouldn't allocate new key indices */
|
||||
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
|
||||
|
||||
mvm->ptk_ivlen = 0;
|
||||
mvm->ptk_icvlen = 0;
|
||||
mvm->ptk_ivlen = 0;
|
||||
mvm->ptk_icvlen = 0;
|
||||
|
||||
/*
|
||||
* The D3 firmware still hardcodes the AP station ID for the
|
||||
* BSS we're associated with as 0. As a result, we have to move
|
||||
@ -783,7 +798,6 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
||||
struct iwl_wowlan_status *status;
|
||||
u32 reasons;
|
||||
int ret, len;
|
||||
bool pkt8023 = false;
|
||||
struct sk_buff *pkt = NULL;
|
||||
|
||||
iwl_trans_read_mem_bytes(mvm->trans, base,
|
||||
@ -824,7 +838,8 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
||||
status = (void *)cmd.resp_pkt->data;
|
||||
|
||||
if (len - sizeof(struct iwl_cmd_header) !=
|
||||
sizeof(*status) + le32_to_cpu(status->wake_packet_bufsize)) {
|
||||
sizeof(*status) +
|
||||
ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
|
||||
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
|
||||
goto out;
|
||||
}
|
||||
@ -836,61 +851,96 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
||||
goto report;
|
||||
}
|
||||
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) {
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
|
||||
wakeup.magic_pkt = true;
|
||||
pkt8023 = true;
|
||||
}
|
||||
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) {
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
|
||||
wakeup.pattern_idx =
|
||||
le16_to_cpu(status->pattern_number);
|
||||
pkt8023 = true;
|
||||
}
|
||||
|
||||
if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
|
||||
IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
|
||||
wakeup.disconnect = true;
|
||||
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) {
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
|
||||
wakeup.gtk_rekey_failure = true;
|
||||
pkt8023 = true;
|
||||
}
|
||||
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) {
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
|
||||
wakeup.rfkill_release = true;
|
||||
pkt8023 = true;
|
||||
}
|
||||
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) {
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
|
||||
wakeup.eap_identity_req = true;
|
||||
pkt8023 = true;
|
||||
}
|
||||
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) {
|
||||
if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
|
||||
wakeup.four_way_handshake = true;
|
||||
pkt8023 = true;
|
||||
}
|
||||
|
||||
if (status->wake_packet_bufsize) {
|
||||
u32 pktsize = le32_to_cpu(status->wake_packet_bufsize);
|
||||
u32 pktlen = le32_to_cpu(status->wake_packet_length);
|
||||
int pktsize = le32_to_cpu(status->wake_packet_bufsize);
|
||||
int pktlen = le32_to_cpu(status->wake_packet_length);
|
||||
const u8 *pktdata = status->wake_packet;
|
||||
struct ieee80211_hdr *hdr = (void *)pktdata;
|
||||
int truncated = pktlen - pktsize;
|
||||
|
||||
/* this would be a firmware bug */
|
||||
if (WARN_ON_ONCE(truncated < 0))
|
||||
truncated = 0;
|
||||
|
||||
if (ieee80211_is_data(hdr->frame_control)) {
|
||||
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
int ivlen = 0, icvlen = 4; /* also FCS */
|
||||
|
||||
if (pkt8023) {
|
||||
pkt = alloc_skb(pktsize, GFP_KERNEL);
|
||||
if (!pkt)
|
||||
goto report;
|
||||
memcpy(skb_put(pkt, pktsize), status->wake_packet,
|
||||
pktsize);
|
||||
|
||||
memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
|
||||
pktdata += hdrlen;
|
||||
pktsize -= hdrlen;
|
||||
|
||||
if (ieee80211_has_protected(hdr->frame_control)) {
|
||||
if (is_multicast_ether_addr(hdr->addr1)) {
|
||||
ivlen = mvm->gtk_ivlen;
|
||||
icvlen += mvm->gtk_icvlen;
|
||||
} else {
|
||||
ivlen = mvm->ptk_ivlen;
|
||||
icvlen += mvm->ptk_icvlen;
|
||||
}
|
||||
}
|
||||
|
||||
/* if truncated, FCS/ICV is (partially) gone */
|
||||
if (truncated >= icvlen) {
|
||||
icvlen = 0;
|
||||
truncated -= icvlen;
|
||||
} else {
|
||||
icvlen -= truncated;
|
||||
truncated = 0;
|
||||
}
|
||||
|
||||
pktsize -= ivlen + icvlen;
|
||||
pktdata += ivlen;
|
||||
|
||||
memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
|
||||
|
||||
if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
|
||||
goto report;
|
||||
wakeup.packet = pkt->data;
|
||||
wakeup.packet_present_len = pkt->len;
|
||||
wakeup.packet_len = pkt->len - (pktlen - pktsize);
|
||||
wakeup.packet_len = pkt->len - truncated;
|
||||
wakeup.packet_80211 = false;
|
||||
} else {
|
||||
int fcslen = 4;
|
||||
|
||||
if (truncated >= 4) {
|
||||
truncated -= 4;
|
||||
fcslen = 0;
|
||||
} else {
|
||||
fcslen -= truncated;
|
||||
truncated = 0;
|
||||
}
|
||||
pktsize -= fcslen;
|
||||
wakeup.packet = status->wake_packet;
|
||||
wakeup.packet_present_len = pktsize;
|
||||
wakeup.packet_len = pktlen;
|
||||
wakeup.packet_len = pktlen - truncated;
|
||||
wakeup.packet_80211 = true;
|
||||
}
|
||||
}
|
||||
|
@ -557,11 +557,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
u32 tfd_msk = 0, ac;
|
||||
|
||||
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
|
||||
@ -594,12 +592,21 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
|
||||
*/
|
||||
flush_work(&mvm->sta_drained_wk);
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
iwl_mvm_prepare_mac_removal(mvm, vif);
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
/*
|
||||
* For AP/GO interface, the tear down of the resources allocated to the
|
||||
* interface should be handled as part of the bss_info_changed flow.
|
||||
* interface is be handled as part of the stop_ap flow.
|
||||
*/
|
||||
if (vif->type == NL80211_IFTYPE_AP) {
|
||||
iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
|
||||
@ -763,6 +770,8 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
iwl_mvm_prepare_mac_removal(mvm, vif);
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
mvmvif->ap_active = false;
|
||||
|
@ -327,6 +327,10 @@ struct iwl_mvm {
|
||||
struct led_classdev led;
|
||||
|
||||
struct ieee80211_vif *p2p_device_vif;
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Extract MVM priv from op_mode and _hw */
|
||||
|
@ -182,6 +182,15 @@ struct iwl_queue {
|
||||
#define TFD_TX_CMD_SLOTS 256
|
||||
#define TFD_CMD_SLOTS 32
|
||||
|
||||
/*
|
||||
* The FH will write back to the first TB only, so we need
|
||||
* to copy some data into the buffer regardless of whether
|
||||
* it should be mapped or not. This indicates how much to
|
||||
* copy, even for HCMDs it must be big enough to fit the
|
||||
* DRAM scratch from the TX cmd, at least 16 bytes.
|
||||
*/
|
||||
#define IWL_HCMD_MIN_COPY_SIZE 16
|
||||
|
||||
struct iwl_pcie_txq_entry {
|
||||
struct iwl_device_cmd *cmd;
|
||||
struct iwl_device_cmd *copy_cmd;
|
||||
|
@ -1152,10 +1152,12 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
void *dup_buf = NULL;
|
||||
dma_addr_t phys_addr;
|
||||
int idx;
|
||||
u16 copy_size, cmd_size;
|
||||
u16 copy_size, cmd_size, dma_size;
|
||||
bool had_nocopy = false;
|
||||
int i;
|
||||
u32 cmd_pos;
|
||||
const u8 *cmddata[IWL_MAX_CMD_TFDS];
|
||||
u16 cmdlen[IWL_MAX_CMD_TFDS];
|
||||
|
||||
copy_size = sizeof(out_cmd->hdr);
|
||||
cmd_size = sizeof(out_cmd->hdr);
|
||||
@ -1164,8 +1166,23 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
|
||||
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
cmddata[i] = cmd->data[i];
|
||||
cmdlen[i] = cmd->len[i];
|
||||
|
||||
if (!cmd->len[i])
|
||||
continue;
|
||||
|
||||
/* need at least IWL_HCMD_MIN_COPY_SIZE copied */
|
||||
if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
|
||||
int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
|
||||
|
||||
if (copy > cmdlen[i])
|
||||
copy = cmdlen[i];
|
||||
cmdlen[i] -= copy;
|
||||
cmddata[i] += copy;
|
||||
copy_size += copy;
|
||||
}
|
||||
|
||||
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
|
||||
had_nocopy = true;
|
||||
if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
|
||||
@ -1185,7 +1202,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
goto free_dup_buf;
|
||||
}
|
||||
|
||||
dup_buf = kmemdup(cmd->data[i], cmd->len[i],
|
||||
dup_buf = kmemdup(cmddata[i], cmdlen[i],
|
||||
GFP_ATOMIC);
|
||||
if (!dup_buf)
|
||||
return -ENOMEM;
|
||||
@ -1195,7 +1212,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
idx = -EINVAL;
|
||||
goto free_dup_buf;
|
||||
}
|
||||
copy_size += cmd->len[i];
|
||||
copy_size += cmdlen[i];
|
||||
}
|
||||
cmd_size += cmd->len[i];
|
||||
}
|
||||
@ -1242,14 +1259,31 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
|
||||
/* and copy the data that needs to be copied */
|
||||
cmd_pos = offsetof(struct iwl_device_cmd, payload);
|
||||
copy_size = sizeof(out_cmd->hdr);
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
if (!cmd->len[i])
|
||||
int copy = 0;
|
||||
|
||||
if (!cmd->len)
|
||||
continue;
|
||||
if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
|
||||
IWL_HCMD_DFL_DUP))
|
||||
break;
|
||||
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
|
||||
cmd_pos += cmd->len[i];
|
||||
|
||||
/* need at least IWL_HCMD_MIN_COPY_SIZE copied */
|
||||
if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
|
||||
copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
|
||||
|
||||
if (copy > cmd->len[i])
|
||||
copy = cmd->len[i];
|
||||
}
|
||||
|
||||
/* copy everything if not nocopy/dup */
|
||||
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
|
||||
IWL_HCMD_DFL_DUP)))
|
||||
copy = cmd->len[i];
|
||||
|
||||
if (copy) {
|
||||
memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
|
||||
cmd_pos += copy;
|
||||
copy_size += copy;
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(txq->entries[idx].copy_cmd);
|
||||
@ -1275,7 +1309,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
|
||||
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
|
||||
|
||||
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
|
||||
/*
|
||||
* If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
|
||||
* still map at least that many bytes for the hardware to write back to.
|
||||
* We have enough space, so that's not a problem.
|
||||
*/
|
||||
dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
|
||||
|
||||
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
|
||||
idx = -ENOMEM;
|
||||
@ -1283,14 +1324,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
}
|
||||
|
||||
dma_unmap_addr_set(out_meta, mapping, phys_addr);
|
||||
dma_unmap_len_set(out_meta, len, copy_size);
|
||||
dma_unmap_len_set(out_meta, len, dma_size);
|
||||
|
||||
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
|
||||
|
||||
/* map the remaining (adjusted) nocopy/dup fragments */
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
const void *data = cmd->data[i];
|
||||
const void *data = cmddata[i];
|
||||
|
||||
if (!cmd->len[i])
|
||||
if (!cmdlen[i])
|
||||
continue;
|
||||
if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
|
||||
IWL_HCMD_DFL_DUP)))
|
||||
@ -1298,7 +1340,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
|
||||
data = dup_buf;
|
||||
phys_addr = dma_map_single(trans->dev, (void *)data,
|
||||
cmd->len[i], DMA_BIDIRECTIONAL);
|
||||
cmdlen[i], DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(trans->dev, phys_addr)) {
|
||||
iwl_pcie_tfd_unmap(trans, out_meta,
|
||||
&txq->tfds[q->write_ptr],
|
||||
@ -1307,7 +1349,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
goto out;
|
||||
}
|
||||
|
||||
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
|
||||
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], 0);
|
||||
}
|
||||
|
||||
out_meta->flags = cmd->flags;
|
||||
@ -1317,8 +1359,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
|
||||
txq->need_update = 1;
|
||||
|
||||
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size,
|
||||
&out_cmd->hdr, copy_size);
|
||||
trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
|
||||
|
||||
/* start timer if queue currently empty */
|
||||
if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
|
||||
|
@ -825,6 +825,11 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
|
||||
|
||||
sdio_release_host(func);
|
||||
|
||||
/* Set fw_ready before queuing any commands so that
|
||||
* lbs_thread won't block from sending them to firmware.
|
||||
*/
|
||||
priv->fw_ready = 1;
|
||||
|
||||
/*
|
||||
* FUNC_INIT is required for SD8688 WLAN/BT multiple functions
|
||||
*/
|
||||
@ -839,7 +844,6 @@ static void if_sdio_finish_power_on(struct if_sdio_card *card)
|
||||
netdev_alert(priv->dev, "CMD_FUNC_INIT cmd failed\n");
|
||||
}
|
||||
|
||||
priv->fw_ready = 1;
|
||||
wake_up(&card->pwron_waitq);
|
||||
|
||||
if (!card->started) {
|
||||
|
@ -302,7 +302,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
|
||||
i++;
|
||||
usleep_range(10, 20);
|
||||
/* 50ms max wait */
|
||||
if (i == 50000)
|
||||
if (i == 5000)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1236,8 +1236,10 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
|
||||
*/
|
||||
if_limit = &rt2x00dev->if_limits_ap;
|
||||
if_limit->max = rt2x00dev->ops->max_ap_intf;
|
||||
if_limit->types = BIT(NL80211_IFTYPE_AP) |
|
||||
BIT(NL80211_IFTYPE_MESH_POINT);
|
||||
if_limit->types = BIT(NL80211_IFTYPE_AP);
|
||||
#ifdef CONFIG_MAC80211_MESH
|
||||
if_limit->types |= BIT(NL80211_IFTYPE_MESH_POINT);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Build up AP interface combinations structure.
|
||||
@ -1309,7 +1311,9 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
|
||||
rt2x00dev->hw->wiphy->interface_modes |=
|
||||
BIT(NL80211_IFTYPE_ADHOC) |
|
||||
BIT(NL80211_IFTYPE_AP) |
|
||||
#ifdef CONFIG_MAC80211_MESH
|
||||
BIT(NL80211_IFTYPE_MESH_POINT) |
|
||||
#endif
|
||||
BIT(NL80211_IFTYPE_WDS);
|
||||
|
||||
rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
|
||||
|
@ -854,6 +854,8 @@ type_pf_tresize(struct ip_set *set, bool retried)
|
||||
retry:
|
||||
ret = 0;
|
||||
htable_bits++;
|
||||
pr_debug("attempt to resize set %s from %u to %u, t %p\n",
|
||||
set->name, orig->htable_bits, htable_bits, orig);
|
||||
if (!htable_bits) {
|
||||
/* In case we have plenty of memory :-) */
|
||||
pr_warning("Cannot increase the hashsize of set %s further\n",
|
||||
@ -873,7 +875,7 @@ retry:
|
||||
data = ahash_tdata(n, j);
|
||||
m = hbucket(t, HKEY(data, h->initval, htable_bits));
|
||||
ret = type_pf_elem_tadd(m, data, AHASH_MAX(h), 0,
|
||||
type_pf_data_timeout(data));
|
||||
ip_set_timeout_get(type_pf_data_timeout(data)));
|
||||
if (ret < 0) {
|
||||
read_unlock_bh(&set->lock);
|
||||
ahash_destroy(t);
|
||||
|
@ -1045,6 +1045,10 @@ static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
|
||||
if (sysctl_tcp_low_latency || !tp->ucopy.task)
|
||||
return false;
|
||||
|
||||
if (skb->len <= tcp_hdrlen(skb) &&
|
||||
skb_queue_len(&tp->ucopy.prequeue) == 0)
|
||||
return false;
|
||||
|
||||
__skb_queue_tail(&tp->ucopy.prequeue, skb);
|
||||
tp->ucopy.memory += skb->truesize;
|
||||
if (tp->ucopy.memory > sk->sk_rcvbuf) {
|
||||
|
@ -118,7 +118,7 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void caif_flow_cb(struct sk_buff *skb)
|
||||
static void caif_flow_cb(struct sk_buff *skb)
|
||||
{
|
||||
struct caif_device_entry *caifd;
|
||||
void (*dtor)(struct sk_buff *skb) = NULL;
|
||||
|
@ -81,8 +81,8 @@ static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
|
||||
layr->up->ctrlcmd(layr->up, ctrl, layr->id);
|
||||
}
|
||||
|
||||
struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
|
||||
u8 braddr[ETH_ALEN])
|
||||
static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
|
||||
u8 braddr[ETH_ALEN])
|
||||
{
|
||||
struct cfusbl *this = kmalloc(sizeof(struct cfusbl), GFP_ATOMIC);
|
||||
|
||||
|
@ -228,9 +228,11 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
|
||||
icmp_send(skb, ICMP_DEST_UNREACH,
|
||||
ICMP_PROT_UNREACH, 0);
|
||||
}
|
||||
} else
|
||||
kfree_skb(skb);
|
||||
} else {
|
||||
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS);
|
||||
kfree_skb(skb);
|
||||
consume_skb(skb);
|
||||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
|
@ -5485,6 +5485,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
if (tcp_checksum_complete_user(sk, skb))
|
||||
goto csum_error;
|
||||
|
||||
if ((int)skb->truesize > sk->sk_forward_alloc)
|
||||
goto step5;
|
||||
|
||||
/* Predicted packet is in window by definition.
|
||||
* seq == rcv_nxt and rcv_wup <= rcv_nxt.
|
||||
* Hence, check seq<=rcv_wup reduces to:
|
||||
@ -5496,9 +5499,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
tcp_rcv_rtt_measure_ts(sk, skb);
|
||||
|
||||
if ((int)skb->truesize > sk->sk_forward_alloc)
|
||||
goto step5;
|
||||
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
|
||||
|
||||
/* Bulk data transfer: receiver */
|
||||
|
@ -241,9 +241,11 @@ resubmit:
|
||||
icmpv6_send(skb, ICMPV6_PARAMPROB,
|
||||
ICMPV6_UNK_NEXTHDR, nhoff);
|
||||
}
|
||||
} else
|
||||
kfree_skb(skb);
|
||||
} else {
|
||||
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
|
||||
kfree_skb(skb);
|
||||
consume_skb(skb);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
|
@ -1915,7 +1915,8 @@ void rt6_purge_dflt_routers(struct net *net)
|
||||
restart:
|
||||
read_lock_bh(&table->tb6_lock);
|
||||
for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
|
||||
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
|
||||
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
|
||||
(!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
|
||||
dst_hold(&rt->dst);
|
||||
read_unlock_bh(&table->tb6_lock);
|
||||
ip6_del_rt(rt);
|
||||
|
@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
|
||||
/* case CS_ISO_8859_9: */
|
||||
/* case CS_UNICODE: */
|
||||
default:
|
||||
IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
|
||||
__func__, ias_charset_types[charset]);
|
||||
IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
|
||||
__func__, charset,
|
||||
charset < ARRAY_SIZE(ias_charset_types) ?
|
||||
ias_charset_types[charset] :
|
||||
"(unknown)");
|
||||
|
||||
/* Aborting, close connection! */
|
||||
iriap_disconnect_request(self);
|
||||
|
@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
|
||||
l2tp_xmit_skb(session, skb, session->hdr_len);
|
||||
|
||||
sock_put(ps->tunnel_sock);
|
||||
sock_put(sk);
|
||||
|
||||
return error;
|
||||
|
||||
|
@ -3285,13 +3285,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
|
||||
struct cfg80211_chan_def *chandef)
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
|
||||
struct ieee80211_local *local = wiphy_priv(wiphy);
|
||||
struct ieee80211_chanctx_conf *chanctx_conf;
|
||||
int ret = -ENODATA;
|
||||
|
||||
rcu_read_lock();
|
||||
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
||||
if (chanctx_conf) {
|
||||
*chandef = chanctx_conf->def;
|
||||
if (local->use_chanctx) {
|
||||
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
||||
if (chanctx_conf) {
|
||||
*chandef = chanctx_conf->def;
|
||||
ret = 0;
|
||||
}
|
||||
} else if (local->open_count == local->monitors) {
|
||||
*chandef = local->monitor_chandef;
|
||||
ret = 0;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -107,7 +107,7 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
|
||||
|
||||
lockdep_assert_held(&local->mtx);
|
||||
|
||||
active = !list_empty(&local->chanctx_list);
|
||||
active = !list_empty(&local->chanctx_list) || local->monitors;
|
||||
|
||||
if (!local->ops->remain_on_channel) {
|
||||
list_for_each_entry(roc, &local->roc_list, list) {
|
||||
|
@ -1231,34 +1231,40 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
|
||||
if (local->queue_stop_reasons[q] ||
|
||||
(!txpending && !skb_queue_empty(&local->pending[q]))) {
|
||||
if (unlikely(info->flags &
|
||||
IEEE80211_TX_INTFL_OFFCHAN_TX_OK &&
|
||||
local->queue_stop_reasons[q] &
|
||||
~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL))) {
|
||||
IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) {
|
||||
if (local->queue_stop_reasons[q] &
|
||||
~BIT(IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL)) {
|
||||
/*
|
||||
* Drop off-channel frames if queues
|
||||
* are stopped for any reason other
|
||||
* than off-channel operation. Never
|
||||
* queue them.
|
||||
*/
|
||||
spin_unlock_irqrestore(
|
||||
&local->queue_stop_reason_lock,
|
||||
flags);
|
||||
ieee80211_purge_tx_queue(&local->hw,
|
||||
skbs);
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
|
||||
/*
|
||||
* Drop off-channel frames if queues are stopped
|
||||
* for any reason other than off-channel
|
||||
* operation. Never queue them.
|
||||
* Since queue is stopped, queue up frames for
|
||||
* later transmission from the tx-pending
|
||||
* tasklet when the queue is woken again.
|
||||
*/
|
||||
spin_unlock_irqrestore(
|
||||
&local->queue_stop_reason_lock, flags);
|
||||
ieee80211_purge_tx_queue(&local->hw, skbs);
|
||||
return true;
|
||||
if (txpending)
|
||||
skb_queue_splice_init(skbs,
|
||||
&local->pending[q]);
|
||||
else
|
||||
skb_queue_splice_tail_init(skbs,
|
||||
&local->pending[q]);
|
||||
|
||||
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
|
||||
flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since queue is stopped, queue up frames for later
|
||||
* transmission from the tx-pending tasklet when the
|
||||
* queue is woken again.
|
||||
*/
|
||||
if (txpending)
|
||||
skb_queue_splice_init(skbs, &local->pending[q]);
|
||||
else
|
||||
skb_queue_splice_tail_init(skbs,
|
||||
&local->pending[q]);
|
||||
|
||||
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
|
||||
flags);
|
||||
return false;
|
||||
}
|
||||
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
|
||||
|
||||
@ -1844,9 +1850,24 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
if (!is_multicast_ether_addr(skb->data)) {
|
||||
struct sta_info *next_hop;
|
||||
bool mpp_lookup = true;
|
||||
|
||||
mpath = mesh_path_lookup(sdata, skb->data);
|
||||
if (!mpath)
|
||||
if (mpath) {
|
||||
mpp_lookup = false;
|
||||
next_hop = rcu_dereference(mpath->next_hop);
|
||||
if (!next_hop ||
|
||||
!(mpath->flags & (MESH_PATH_ACTIVE |
|
||||
MESH_PATH_RESOLVING)))
|
||||
mpp_lookup = true;
|
||||
}
|
||||
|
||||
if (mpp_lookup)
|
||||
mppath = mpp_path_lookup(sdata, skb->data);
|
||||
|
||||
if (mppath && mpath)
|
||||
mesh_path_del(mpath->sdata, mpath->dst);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2350,9 +2371,9 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
|
||||
if (local->tim_in_locked_section) {
|
||||
__ieee80211_beacon_add_tim(sdata, ps, skb);
|
||||
} else {
|
||||
spin_lock(&local->tim_lock);
|
||||
spin_lock_bh(&local->tim_lock);
|
||||
__ieee80211_beacon_add_tim(sdata, ps, skb);
|
||||
spin_unlock(&local->tim_lock);
|
||||
spin_unlock_bh(&local->tim_lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
|
||||
if (ret == -EAGAIN)
|
||||
ret = 1;
|
||||
|
||||
return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
|
||||
return (ret < 0 && ret != -ENOTEMPTY) ? ret :
|
||||
ret > 0 ? 0 : -IPSET_ERR_EXIST;
|
||||
}
|
||||
|
||||
/* Get headed data of a set */
|
||||
|
@ -82,10 +82,7 @@ static void rds_message_purge(struct rds_message *rm)
|
||||
void rds_message_put(struct rds_message *rm)
|
||||
{
|
||||
rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
|
||||
if (atomic_read(&rm->m_refcount) == 0) {
|
||||
printk(KERN_CRIT "danger refcount zero on %p\n", rm);
|
||||
WARN_ON(1);
|
||||
}
|
||||
WARN(!atomic_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
|
||||
if (atomic_dec_and_test(&rm->m_refcount)) {
|
||||
BUG_ON(!list_empty(&rm->m_sock_item));
|
||||
BUG_ON(!list_empty(&rm->m_conn_item));
|
||||
@ -197,6 +194,9 @@ struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
|
||||
{
|
||||
struct rds_message *rm;
|
||||
|
||||
if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
|
||||
return NULL;
|
||||
|
||||
rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
|
||||
if (!rm)
|
||||
goto out;
|
||||
|
@ -155,7 +155,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
||||
|
||||
/* SCTP-AUTH extensions*/
|
||||
INIT_LIST_HEAD(&ep->endpoint_shared_keys);
|
||||
null_key = sctp_auth_shkey_create(0, GFP_KERNEL);
|
||||
null_key = sctp_auth_shkey_create(0, gfp);
|
||||
if (!null_key)
|
||||
goto nomem;
|
||||
|
||||
|
@ -5653,6 +5653,9 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
|
||||
if (len < sizeof(sctp_assoc_t))
|
||||
return -EINVAL;
|
||||
|
||||
/* Allow the struct to grow and fill in as much as possible */
|
||||
len = min_t(size_t, len, sizeof(sas));
|
||||
|
||||
if (copy_from_user(&sas, optval, len))
|
||||
return -EFAULT;
|
||||
|
||||
@ -5686,9 +5689,6 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
|
||||
/* Mark beginning of a new observation period */
|
||||
asoc->stats.max_obs_rto = asoc->rto_min;
|
||||
|
||||
/* Allow the struct to grow and fill in as much as possible */
|
||||
len = min_t(size_t, len, sizeof(sas));
|
||||
|
||||
if (put_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -41,8 +41,6 @@
|
||||
#include <net/sctp/sctp.h>
|
||||
#include <net/sctp/sm.h>
|
||||
|
||||
#define MAX_KMALLOC_SIZE 131072
|
||||
|
||||
static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
|
||||
__u16 out);
|
||||
|
||||
@ -65,7 +63,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
|
||||
int size;
|
||||
|
||||
size = sctp_ssnmap_size(in, out);
|
||||
if (size <= MAX_KMALLOC_SIZE)
|
||||
if (size <= KMALLOC_MAX_SIZE)
|
||||
retval = kmalloc(size, gfp);
|
||||
else
|
||||
retval = (struct sctp_ssnmap *)
|
||||
@ -82,7 +80,7 @@ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
|
||||
return retval;
|
||||
|
||||
fail_map:
|
||||
if (size <= MAX_KMALLOC_SIZE)
|
||||
if (size <= KMALLOC_MAX_SIZE)
|
||||
kfree(retval);
|
||||
else
|
||||
free_pages((unsigned long)retval, get_order(size));
|
||||
@ -124,7 +122,7 @@ void sctp_ssnmap_free(struct sctp_ssnmap *map)
|
||||
int size;
|
||||
|
||||
size = sctp_ssnmap_size(map->in.len, map->out.len);
|
||||
if (size <= MAX_KMALLOC_SIZE)
|
||||
if (size <= KMALLOC_MAX_SIZE)
|
||||
kfree(map);
|
||||
else
|
||||
free_pages((unsigned long)map, get_order(size));
|
||||
|
@ -51,7 +51,7 @@
|
||||
static void sctp_tsnmap_update(struct sctp_tsnmap *map);
|
||||
static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off,
|
||||
__u16 len, __u16 *start, __u16 *end);
|
||||
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap);
|
||||
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size);
|
||||
|
||||
/* Initialize a block of memory as a tsnmap. */
|
||||
struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len,
|
||||
@ -124,7 +124,7 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
|
||||
|
||||
gap = tsn - map->base_tsn;
|
||||
|
||||
if (gap >= map->len && !sctp_tsnmap_grow(map, gap))
|
||||
if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1))
|
||||
return -ENOMEM;
|
||||
|
||||
if (!sctp_tsnmap_has_gap(map) && gap == 0) {
|
||||
@ -360,23 +360,24 @@ __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map,
|
||||
return ngaps;
|
||||
}
|
||||
|
||||
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 gap)
|
||||
static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size)
|
||||
{
|
||||
unsigned long *new;
|
||||
unsigned long inc;
|
||||
u16 len;
|
||||
|
||||
if (gap >= SCTP_TSN_MAP_SIZE)
|
||||
if (size > SCTP_TSN_MAP_SIZE)
|
||||
return 0;
|
||||
|
||||
inc = ALIGN((gap - map->len),BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
|
||||
inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT;
|
||||
len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE);
|
||||
|
||||
new = kzalloc(len>>3, GFP_ATOMIC);
|
||||
if (!new)
|
||||
return 0;
|
||||
|
||||
bitmap_copy(new, map->tsn_map, map->max_tsn_seen - map->base_tsn);
|
||||
bitmap_copy(new, map->tsn_map,
|
||||
map->max_tsn_seen - map->cumulative_tsn_ack_point);
|
||||
kfree(map->tsn_map);
|
||||
map->tsn_map = new;
|
||||
map->len = len;
|
||||
|
@ -106,6 +106,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
||||
{
|
||||
struct sk_buff_head temp;
|
||||
struct sctp_ulpevent *event;
|
||||
int event_eor = 0;
|
||||
|
||||
/* Create an event from the incoming chunk. */
|
||||
event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
|
||||
@ -127,10 +128,12 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
||||
/* Send event to the ULP. 'event' is the sctp_ulpevent for
|
||||
* very first SKB on the 'temp' list.
|
||||
*/
|
||||
if (event)
|
||||
if (event) {
|
||||
event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
|
||||
sctp_ulpq_tail_event(ulpq, event);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return event_eor;
|
||||
}
|
||||
|
||||
/* Add a new event for propagation to the ULP. */
|
||||
@ -540,14 +543,19 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
|
||||
ctsn = cevent->tsn;
|
||||
|
||||
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
|
||||
case SCTP_DATA_FIRST_FRAG:
|
||||
if (!first_frag)
|
||||
return NULL;
|
||||
goto done;
|
||||
case SCTP_DATA_MIDDLE_FRAG:
|
||||
if (!first_frag) {
|
||||
first_frag = pos;
|
||||
next_tsn = ctsn + 1;
|
||||
last_frag = pos;
|
||||
} else if (next_tsn == ctsn)
|
||||
} else if (next_tsn == ctsn) {
|
||||
next_tsn++;
|
||||
else
|
||||
last_frag = pos;
|
||||
} else
|
||||
goto done;
|
||||
break;
|
||||
case SCTP_DATA_LAST_FRAG:
|
||||
@ -651,6 +659,14 @@ static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
|
||||
} else
|
||||
goto done;
|
||||
break;
|
||||
|
||||
case SCTP_DATA_LAST_FRAG:
|
||||
if (!first_frag)
|
||||
return NULL;
|
||||
else
|
||||
goto done;
|
||||
break;
|
||||
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
@ -962,20 +978,43 @@ static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
|
||||
struct sk_buff_head *list, __u16 needed)
|
||||
{
|
||||
__u16 freed = 0;
|
||||
__u32 tsn;
|
||||
struct sk_buff *skb;
|
||||
__u32 tsn, last_tsn;
|
||||
struct sk_buff *skb, *flist, *last;
|
||||
struct sctp_ulpevent *event;
|
||||
struct sctp_tsnmap *tsnmap;
|
||||
|
||||
tsnmap = &ulpq->asoc->peer.tsn_map;
|
||||
|
||||
while ((skb = __skb_dequeue_tail(list)) != NULL) {
|
||||
freed += skb_headlen(skb);
|
||||
while ((skb = skb_peek_tail(list)) != NULL) {
|
||||
event = sctp_skb2event(skb);
|
||||
tsn = event->tsn;
|
||||
|
||||
/* Don't renege below the Cumulative TSN ACK Point. */
|
||||
if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
|
||||
break;
|
||||
|
||||
/* Events in ordering queue may have multiple fragments
|
||||
* corresponding to additional TSNs. Sum the total
|
||||
* freed space; find the last TSN.
|
||||
*/
|
||||
freed += skb_headlen(skb);
|
||||
flist = skb_shinfo(skb)->frag_list;
|
||||
for (last = flist; flist; flist = flist->next) {
|
||||
last = flist;
|
||||
freed += skb_headlen(last);
|
||||
}
|
||||
if (last)
|
||||
last_tsn = sctp_skb2event(last)->tsn;
|
||||
else
|
||||
last_tsn = tsn;
|
||||
|
||||
/* Unlink the event, then renege all applicable TSNs. */
|
||||
__skb_unlink(skb, list);
|
||||
sctp_ulpevent_free(event);
|
||||
sctp_tsnmap_renege(tsnmap, tsn);
|
||||
while (TSN_lte(tsn, last_tsn)) {
|
||||
sctp_tsnmap_renege(tsnmap, tsn);
|
||||
tsn++;
|
||||
}
|
||||
if (freed >= needed)
|
||||
return freed;
|
||||
}
|
||||
@ -1002,16 +1041,28 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
|
||||
struct sctp_ulpevent *event;
|
||||
struct sctp_association *asoc;
|
||||
struct sctp_sock *sp;
|
||||
__u32 ctsn;
|
||||
struct sk_buff *skb;
|
||||
|
||||
asoc = ulpq->asoc;
|
||||
sp = sctp_sk(asoc->base.sk);
|
||||
|
||||
/* If the association is already in Partial Delivery mode
|
||||
* we have noting to do.
|
||||
* we have nothing to do.
|
||||
*/
|
||||
if (ulpq->pd_mode)
|
||||
return;
|
||||
|
||||
/* Data must be at or below the Cumulative TSN ACK Point to
|
||||
* start partial delivery.
|
||||
*/
|
||||
skb = skb_peek(&asoc->ulpq.reasm);
|
||||
if (skb != NULL) {
|
||||
ctsn = sctp_skb2event(skb)->tsn;
|
||||
if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the user enabled fragment interleave socket option,
|
||||
* multiple associations can enter partial delivery.
|
||||
* Otherwise, we can only enter partial delivery if the
|
||||
@ -1054,12 +1105,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
|
||||
}
|
||||
/* If able to free enough room, accept this chunk. */
|
||||
if (chunk && (freed >= needed)) {
|
||||
__u32 tsn;
|
||||
tsn = ntohl(chunk->subh.data_hdr->tsn);
|
||||
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
|
||||
sctp_ulpq_tail_data(ulpq, chunk, gfp);
|
||||
|
||||
sctp_ulpq_partial_delivery(ulpq, gfp);
|
||||
int retval;
|
||||
retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
|
||||
/*
|
||||
* Enter partial delivery if chunk has not been
|
||||
* delivered; otherwise, drain the reassembly queue.
|
||||
*/
|
||||
if (retval <= 0)
|
||||
sctp_ulpq_partial_delivery(ulpq, gfp);
|
||||
else if (retval == 1)
|
||||
sctp_ulpq_reasm_drain(ulpq);
|
||||
}
|
||||
|
||||
sk_mem_reclaim(asoc->base.sk);
|
||||
|
@ -554,16 +554,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
|
||||
if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
|
||||
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
|
||||
goto nla_put_failure;
|
||||
if (chan->flags & IEEE80211_CHAN_RADAR) {
|
||||
u32 time = elapsed_jiffies_msecs(chan->dfs_state_entered);
|
||||
if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
|
||||
chan->dfs_state))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME, time))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
if ((chan->flags & IEEE80211_CHAN_RADAR) &&
|
||||
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
|
||||
goto nla_put_failure;
|
||||
if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
|
||||
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
|
||||
goto nla_put_failure;
|
||||
@ -900,9 +893,6 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
|
||||
nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
|
||||
c->max_interfaces))
|
||||
goto nla_put_failure;
|
||||
if (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
|
||||
c->radar_detect_widths))
|
||||
goto nla_put_failure;
|
||||
|
||||
nla_nest_end(msg, nl_combi);
|
||||
}
|
||||
@ -914,48 +904,6 @@ nla_put_failure:
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
|
||||
struct sk_buff *msg)
|
||||
{
|
||||
const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
|
||||
struct nlattr *nl_tcp;
|
||||
|
||||
if (!tcp)
|
||||
return 0;
|
||||
|
||||
nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
|
||||
if (!nl_tcp)
|
||||
return -ENOBUFS;
|
||||
|
||||
if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
|
||||
tcp->data_payload_max))
|
||||
return -ENOBUFS;
|
||||
|
||||
if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
|
||||
tcp->data_payload_max))
|
||||
return -ENOBUFS;
|
||||
|
||||
if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
|
||||
return -ENOBUFS;
|
||||
|
||||
if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
|
||||
sizeof(*tcp->tok), tcp->tok))
|
||||
return -ENOBUFS;
|
||||
|
||||
if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
|
||||
tcp->data_interval_max))
|
||||
return -ENOBUFS;
|
||||
|
||||
if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
|
||||
tcp->wake_payload_max))
|
||||
return -ENOBUFS;
|
||||
|
||||
nla_nest_end(msg, nl_tcp);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags,
|
||||
struct cfg80211_registered_device *dev)
|
||||
{
|
||||
@ -1330,9 +1278,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
|
||||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
if (nl80211_send_wowlan_tcp_caps(dev, msg))
|
||||
goto nla_put_failure;
|
||||
|
||||
nla_nest_end(msg, nl_wowlan);
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user