mirror of
https://github.com/joel16/android_kernel_sony_msm8994.git
synced 2024-11-30 15:41:27 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: Phonet: keep TX queue disabled when the device is off SCHED: netem: Correct documentation comment in code. netfilter: update rwlock initialization for nat_table netlabel: Compiler warning and NULL pointer dereference fix e1000e: fix double release of mutex IA64: HP_SIMETH needs to depend upon NET netpoll: fix race on poll_list resulting in garbage entry ipv6: silence log messages for locally generated multicast sungem: improve ethtool output with internal pcs and serdes tcp: tcp_vegas cong avoid fix sungem: Make PCS PHY support partially work again.
This commit is contained in:
commit
7004405cb8
@ -4,6 +4,7 @@ menu "HP Simulator drivers"
|
||||
|
||||
config HP_SIMETH
|
||||
bool "Simulated Ethernet "
|
||||
depends on NET
|
||||
|
||||
config HP_SIMSERIAL
|
||||
bool "Simulated serial driver support"
|
||||
|
@ -1893,12 +1893,17 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
||||
ctrl |= E1000_CTRL_PHY_RST;
|
||||
}
|
||||
ret_val = e1000_acquire_swflag_ich8lan(hw);
|
||||
/* Whether or not the swflag was acquired, we need to reset the part */
|
||||
hw_dbg(hw, "Issuing a global reset to ich8lan");
|
||||
ew32(CTRL, (ctrl | E1000_CTRL_RST));
|
||||
msleep(20);
|
||||
|
||||
/* release the swflag because it is not reset by hardware reset */
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
if (!ret_val) {
|
||||
/* release the swflag because it is not reset by
|
||||
* hardware reset
|
||||
*/
|
||||
e1000_release_swflag_ich8lan(hw);
|
||||
}
|
||||
|
||||
ret_val = e1000e_get_auto_rd_done(hw);
|
||||
if (ret_val) {
|
||||
|
@ -1142,6 +1142,70 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void gem_pcs_reset(struct gem *gp)
|
||||
{
|
||||
int limit;
|
||||
u32 val;
|
||||
|
||||
/* Reset PCS unit. */
|
||||
val = readl(gp->regs + PCS_MIICTRL);
|
||||
val |= PCS_MIICTRL_RST;
|
||||
writel(val, gp->regs + PCS_MIICTRL);
|
||||
|
||||
limit = 32;
|
||||
while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
|
||||
udelay(100);
|
||||
if (limit-- <= 0)
|
||||
break;
|
||||
}
|
||||
if (limit <= 0)
|
||||
printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
|
||||
gp->dev->name);
|
||||
}
|
||||
|
||||
static void gem_pcs_reinit_adv(struct gem *gp)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* Make sure PCS is disabled while changing advertisement
|
||||
* configuration.
|
||||
*/
|
||||
val = readl(gp->regs + PCS_CFG);
|
||||
val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
|
||||
writel(val, gp->regs + PCS_CFG);
|
||||
|
||||
/* Advertise all capabilities except assymetric
|
||||
* pause.
|
||||
*/
|
||||
val = readl(gp->regs + PCS_MIIADV);
|
||||
val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
|
||||
PCS_MIIADV_SP | PCS_MIIADV_AP);
|
||||
writel(val, gp->regs + PCS_MIIADV);
|
||||
|
||||
/* Enable and restart auto-negotiation, disable wrapback/loopback,
|
||||
* and re-enable PCS.
|
||||
*/
|
||||
val = readl(gp->regs + PCS_MIICTRL);
|
||||
val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
|
||||
val &= ~PCS_MIICTRL_WB;
|
||||
writel(val, gp->regs + PCS_MIICTRL);
|
||||
|
||||
val = readl(gp->regs + PCS_CFG);
|
||||
val |= PCS_CFG_ENABLE;
|
||||
writel(val, gp->regs + PCS_CFG);
|
||||
|
||||
/* Make sure serialink loopback is off. The meaning
|
||||
* of this bit is logically inverted based upon whether
|
||||
* you are in Serialink or SERDES mode.
|
||||
*/
|
||||
val = readl(gp->regs + PCS_SCTRL);
|
||||
if (gp->phy_type == phy_serialink)
|
||||
val &= ~PCS_SCTRL_LOOP;
|
||||
else
|
||||
val |= PCS_SCTRL_LOOP;
|
||||
writel(val, gp->regs + PCS_SCTRL);
|
||||
}
|
||||
|
||||
#define STOP_TRIES 32
|
||||
|
||||
/* Must be invoked under gp->lock and gp->tx_lock. */
|
||||
@ -1168,6 +1232,9 @@ static void gem_reset(struct gem *gp)
|
||||
|
||||
if (limit <= 0)
|
||||
printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
|
||||
|
||||
if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
|
||||
gem_pcs_reinit_adv(gp);
|
||||
}
|
||||
|
||||
/* Must be invoked under gp->lock and gp->tx_lock. */
|
||||
@ -1324,7 +1391,7 @@ static int gem_set_link_modes(struct gem *gp)
|
||||
gp->phy_type == phy_serdes) {
|
||||
u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
|
||||
|
||||
if (pcs_lpa & PCS_MIIADV_FD)
|
||||
if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
|
||||
full_duplex = 1;
|
||||
speed = SPEED_1000;
|
||||
}
|
||||
@ -1488,6 +1555,9 @@ static void gem_link_timer(unsigned long data)
|
||||
val = readl(gp->regs + PCS_MIISTAT);
|
||||
|
||||
if ((val & PCS_MIISTAT_LS) != 0) {
|
||||
if (gp->lstate == link_up)
|
||||
goto restart;
|
||||
|
||||
gp->lstate = link_up;
|
||||
netif_carrier_on(gp->dev);
|
||||
(void)gem_set_link_modes(gp);
|
||||
@ -1708,61 +1778,8 @@ static void gem_init_phy(struct gem *gp)
|
||||
if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
|
||||
gp->phy_mii.def->ops->init(&gp->phy_mii);
|
||||
} else {
|
||||
u32 val;
|
||||
int limit;
|
||||
|
||||
/* Reset PCS unit. */
|
||||
val = readl(gp->regs + PCS_MIICTRL);
|
||||
val |= PCS_MIICTRL_RST;
|
||||
writel(val, gp->regs + PCS_MIICTRL);
|
||||
|
||||
limit = 32;
|
||||
while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
|
||||
udelay(100);
|
||||
if (limit-- <= 0)
|
||||
break;
|
||||
}
|
||||
if (limit <= 0)
|
||||
printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
|
||||
gp->dev->name);
|
||||
|
||||
/* Make sure PCS is disabled while changing advertisement
|
||||
* configuration.
|
||||
*/
|
||||
val = readl(gp->regs + PCS_CFG);
|
||||
val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
|
||||
writel(val, gp->regs + PCS_CFG);
|
||||
|
||||
/* Advertise all capabilities except assymetric
|
||||
* pause.
|
||||
*/
|
||||
val = readl(gp->regs + PCS_MIIADV);
|
||||
val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
|
||||
PCS_MIIADV_SP | PCS_MIIADV_AP);
|
||||
writel(val, gp->regs + PCS_MIIADV);
|
||||
|
||||
/* Enable and restart auto-negotiation, disable wrapback/loopback,
|
||||
* and re-enable PCS.
|
||||
*/
|
||||
val = readl(gp->regs + PCS_MIICTRL);
|
||||
val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
|
||||
val &= ~PCS_MIICTRL_WB;
|
||||
writel(val, gp->regs + PCS_MIICTRL);
|
||||
|
||||
val = readl(gp->regs + PCS_CFG);
|
||||
val |= PCS_CFG_ENABLE;
|
||||
writel(val, gp->regs + PCS_CFG);
|
||||
|
||||
/* Make sure serialink loopback is off. The meaning
|
||||
* of this bit is logically inverted based upon whether
|
||||
* you are in Serialink or SERDES mode.
|
||||
*/
|
||||
val = readl(gp->regs + PCS_SCTRL);
|
||||
if (gp->phy_type == phy_serialink)
|
||||
val &= ~PCS_SCTRL_LOOP;
|
||||
else
|
||||
val |= PCS_SCTRL_LOOP;
|
||||
writel(val, gp->regs + PCS_SCTRL);
|
||||
gem_pcs_reset(gp);
|
||||
gem_pcs_reinit_adv(gp);
|
||||
}
|
||||
|
||||
/* Default aneg parameters */
|
||||
@ -2680,6 +2697,21 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
cmd->speed = 0;
|
||||
cmd->duplex = cmd->port = cmd->phy_address =
|
||||
cmd->transceiver = cmd->autoneg = 0;
|
||||
|
||||
/* serdes means usually a Fibre connector, with most fixed */
|
||||
if (gp->phy_type == phy_serdes) {
|
||||
cmd->port = PORT_FIBRE;
|
||||
cmd->supported = (SUPPORTED_1000baseT_Half |
|
||||
SUPPORTED_1000baseT_Full |
|
||||
SUPPORTED_FIBRE | SUPPORTED_Autoneg |
|
||||
SUPPORTED_Pause | SUPPORTED_Asym_Pause);
|
||||
cmd->advertising = cmd->supported;
|
||||
cmd->transceiver = XCVR_INTERNAL;
|
||||
if (gp->lstate == link_up)
|
||||
cmd->speed = SPEED_1000;
|
||||
cmd->duplex = DUPLEX_FULL;
|
||||
cmd->autoneg = 1;
|
||||
}
|
||||
}
|
||||
cmd->maxtxpkt = cmd->maxrxpkt = 0;
|
||||
|
||||
|
@ -319,6 +319,7 @@ enum
|
||||
{
|
||||
NAPI_STATE_SCHED, /* Poll is scheduled */
|
||||
NAPI_STATE_DISABLE, /* Disable pending */
|
||||
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
|
||||
};
|
||||
|
||||
extern void __napi_schedule(struct napi_struct *n);
|
||||
@ -1497,6 +1498,12 @@ static inline void netif_rx_complete(struct net_device *dev,
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* don't let napi dequeue from the cpu poll list
|
||||
* just in case its running on a different cpu
|
||||
*/
|
||||
if (unlikely(test_bit(NAPI_STATE_NPSVC, &napi->state)))
|
||||
return;
|
||||
local_irq_save(flags);
|
||||
__netif_rx_complete(dev, napi);
|
||||
local_irq_restore(flags);
|
||||
|
@ -133,9 +133,11 @@ static int poll_one_napi(struct netpoll_info *npinfo,
|
||||
|
||||
npinfo->rx_flags |= NETPOLL_RX_DROP;
|
||||
atomic_inc(&trapped);
|
||||
set_bit(NAPI_STATE_NPSVC, &napi->state);
|
||||
|
||||
work = napi->poll(napi, budget);
|
||||
|
||||
clear_bit(NAPI_STATE_NPSVC, &napi->state);
|
||||
atomic_dec(&trapped);
|
||||
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
|
||||
|
||||
|
@ -61,7 +61,7 @@ static struct
|
||||
static struct xt_table nat_table = {
|
||||
.name = "nat",
|
||||
.valid_hooks = NAT_VALID_HOOKS,
|
||||
.lock = __RW_LOCK_UNLOCKED(__nat_table.lock),
|
||||
.lock = __RW_LOCK_UNLOCKED(nat_table.lock),
|
||||
.me = THIS_MODULE,
|
||||
.af = AF_INET,
|
||||
};
|
||||
|
@ -40,18 +40,14 @@
|
||||
|
||||
#include "tcp_vegas.h"
|
||||
|
||||
/* Default values of the Vegas variables, in fixed-point representation
|
||||
* with V_PARAM_SHIFT bits to the right of the binary point.
|
||||
*/
|
||||
#define V_PARAM_SHIFT 1
|
||||
static int alpha = 2<<V_PARAM_SHIFT;
|
||||
static int beta = 4<<V_PARAM_SHIFT;
|
||||
static int gamma = 1<<V_PARAM_SHIFT;
|
||||
static int alpha = 2;
|
||||
static int beta = 4;
|
||||
static int gamma = 1;
|
||||
|
||||
module_param(alpha, int, 0644);
|
||||
MODULE_PARM_DESC(alpha, "lower bound of packets in network (scale by 2)");
|
||||
MODULE_PARM_DESC(alpha, "lower bound of packets in network");
|
||||
module_param(beta, int, 0644);
|
||||
MODULE_PARM_DESC(beta, "upper bound of packets in network (scale by 2)");
|
||||
MODULE_PARM_DESC(beta, "upper bound of packets in network");
|
||||
module_param(gamma, int, 0644);
|
||||
MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
|
||||
|
||||
@ -172,49 +168,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
|
||||
return;
|
||||
}
|
||||
|
||||
/* The key players are v_beg_snd_una and v_beg_snd_nxt.
|
||||
*
|
||||
* These are so named because they represent the approximate values
|
||||
* of snd_una and snd_nxt at the beginning of the current RTT. More
|
||||
* precisely, they represent the amount of data sent during the RTT.
|
||||
* At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
|
||||
* we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
|
||||
* bytes of data have been ACKed during the course of the RTT, giving
|
||||
* an "actual" rate of:
|
||||
*
|
||||
* (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
|
||||
*
|
||||
* Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
|
||||
* because delayed ACKs can cover more than one segment, so they
|
||||
* don't line up nicely with the boundaries of RTTs.
|
||||
*
|
||||
* Another unfortunate fact of life is that delayed ACKs delay the
|
||||
* advance of the left edge of our send window, so that the number
|
||||
* of bytes we send in an RTT is often less than our cwnd will allow.
|
||||
* So we keep track of our cwnd separately, in v_beg_snd_cwnd.
|
||||
*/
|
||||
|
||||
if (after(ack, vegas->beg_snd_nxt)) {
|
||||
/* Do the Vegas once-per-RTT cwnd adjustment. */
|
||||
u32 old_wnd, old_snd_cwnd;
|
||||
|
||||
|
||||
/* Here old_wnd is essentially the window of data that was
|
||||
* sent during the previous RTT, and has all
|
||||
* been acknowledged in the course of the RTT that ended
|
||||
* with the ACK we just received. Likewise, old_snd_cwnd
|
||||
* is the cwnd during the previous RTT.
|
||||
*/
|
||||
old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
|
||||
tp->mss_cache;
|
||||
old_snd_cwnd = vegas->beg_snd_cwnd;
|
||||
|
||||
/* Save the extent of the current window so we can use this
|
||||
* at the end of the next RTT.
|
||||
*/
|
||||
vegas->beg_snd_una = vegas->beg_snd_nxt;
|
||||
vegas->beg_snd_nxt = tp->snd_nxt;
|
||||
vegas->beg_snd_cwnd = tp->snd_cwnd;
|
||||
|
||||
/* We do the Vegas calculations only if we got enough RTT
|
||||
* samples that we can be reasonably sure that we got
|
||||
@ -252,22 +212,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
|
||||
*
|
||||
* This is:
|
||||
* (actual rate in segments) * baseRTT
|
||||
* We keep it as a fixed point number with
|
||||
* V_PARAM_SHIFT bits to the right of the binary point.
|
||||
*/
|
||||
target_cwnd = ((u64)old_wnd * vegas->baseRTT);
|
||||
target_cwnd <<= V_PARAM_SHIFT;
|
||||
do_div(target_cwnd, rtt);
|
||||
target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
|
||||
|
||||
/* Calculate the difference between the window we had,
|
||||
* and the window we would like to have. This quantity
|
||||
* is the "Diff" from the Arizona Vegas papers.
|
||||
*
|
||||
* Again, this is a fixed point number with
|
||||
* V_PARAM_SHIFT bits to the right of the binary
|
||||
* point.
|
||||
*/
|
||||
diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
|
||||
diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
|
||||
|
||||
if (diff > gamma && tp->snd_ssthresh > 2 ) {
|
||||
/* Going too fast. Time to slow down
|
||||
@ -282,16 +234,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
|
||||
* truncation robs us of full link
|
||||
* utilization.
|
||||
*/
|
||||
tp->snd_cwnd = min(tp->snd_cwnd,
|
||||
((u32)target_cwnd >>
|
||||
V_PARAM_SHIFT)+1);
|
||||
tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
|
||||
|
||||
} else if (tp->snd_cwnd <= tp->snd_ssthresh) {
|
||||
/* Slow start. */
|
||||
tcp_slow_start(tp);
|
||||
} else {
|
||||
/* Congestion avoidance. */
|
||||
u32 next_snd_cwnd;
|
||||
|
||||
/* Figure out where we would like cwnd
|
||||
* to be.
|
||||
@ -300,26 +249,17 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
|
||||
/* The old window was too fast, so
|
||||
* we slow down.
|
||||
*/
|
||||
next_snd_cwnd = old_snd_cwnd - 1;
|
||||
tp->snd_cwnd--;
|
||||
} else if (diff < alpha) {
|
||||
/* We don't have enough extra packets
|
||||
* in the network, so speed up.
|
||||
*/
|
||||
next_snd_cwnd = old_snd_cwnd + 1;
|
||||
tp->snd_cwnd++;
|
||||
} else {
|
||||
/* Sending just as fast as we
|
||||
* should be.
|
||||
*/
|
||||
next_snd_cwnd = old_snd_cwnd;
|
||||
}
|
||||
|
||||
/* Adjust cwnd upward or downward, toward the
|
||||
* desired value.
|
||||
*/
|
||||
if (next_snd_cwnd > tp->snd_cwnd)
|
||||
tp->snd_cwnd++;
|
||||
else if (next_snd_cwnd < tp->snd_cwnd)
|
||||
tp->snd_cwnd--;
|
||||
}
|
||||
|
||||
if (tp->snd_cwnd < 2)
|
||||
|
@ -912,8 +912,13 @@ static void ndisc_recv_na(struct sk_buff *skb)
|
||||
is invalid, but ndisc specs say nothing
|
||||
about it. It could be misconfiguration, or
|
||||
an smart proxy agent tries to help us :-)
|
||||
|
||||
We should not print the error if NA has been
|
||||
received from loopback - it is just our own
|
||||
unsolicited advertisement.
|
||||
*/
|
||||
ND_PRINTK1(KERN_WARNING
|
||||
if (skb->pkt_type != PACKET_LOOPBACK)
|
||||
ND_PRINTK1(KERN_WARNING
|
||||
"ICMPv6 NA: someone advertises our address on %s!\n",
|
||||
ifp->idev->dev->name);
|
||||
in6_ifa_put(ifp);
|
||||
|
@ -562,7 +562,6 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
|
||||
const struct in_addr *mask,
|
||||
struct netlbl_audit *audit_info)
|
||||
{
|
||||
int ret_val = 0;
|
||||
struct netlbl_af4list *list_entry;
|
||||
struct netlbl_unlhsh_addr4 *entry;
|
||||
struct audit_buffer *audit_buf;
|
||||
@ -577,7 +576,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
|
||||
if (list_entry != NULL)
|
||||
entry = netlbl_unlhsh_addr4_entry(list_entry);
|
||||
else
|
||||
ret_val = -ENOENT;
|
||||
entry = NULL;
|
||||
|
||||
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
|
||||
audit_info);
|
||||
@ -588,19 +587,21 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
|
||||
addr->s_addr, mask->s_addr);
|
||||
if (dev != NULL)
|
||||
dev_put(dev);
|
||||
if (entry && security_secid_to_secctx(entry->secid,
|
||||
&secctx,
|
||||
&secctx_len) == 0) {
|
||||
if (entry != NULL &&
|
||||
security_secid_to_secctx(entry->secid,
|
||||
&secctx, &secctx_len) == 0) {
|
||||
audit_log_format(audit_buf, " sec_obj=%s", secctx);
|
||||
security_release_secctx(secctx, secctx_len);
|
||||
}
|
||||
audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
|
||||
audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
|
||||
audit_log_end(audit_buf);
|
||||
}
|
||||
|
||||
if (ret_val == 0)
|
||||
call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4);
|
||||
return ret_val;
|
||||
if (entry == NULL)
|
||||
return -ENOENT;
|
||||
|
||||
call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
||||
@ -624,7 +625,6 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
|
||||
const struct in6_addr *mask,
|
||||
struct netlbl_audit *audit_info)
|
||||
{
|
||||
int ret_val = 0;
|
||||
struct netlbl_af6list *list_entry;
|
||||
struct netlbl_unlhsh_addr6 *entry;
|
||||
struct audit_buffer *audit_buf;
|
||||
@ -638,7 +638,7 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
|
||||
if (list_entry != NULL)
|
||||
entry = netlbl_unlhsh_addr6_entry(list_entry);
|
||||
else
|
||||
ret_val = -ENOENT;
|
||||
entry = NULL;
|
||||
|
||||
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
|
||||
audit_info);
|
||||
@ -649,19 +649,21 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
|
||||
addr, mask);
|
||||
if (dev != NULL)
|
||||
dev_put(dev);
|
||||
if (entry && security_secid_to_secctx(entry->secid,
|
||||
&secctx,
|
||||
&secctx_len) == 0) {
|
||||
if (entry != NULL &&
|
||||
security_secid_to_secctx(entry->secid,
|
||||
&secctx, &secctx_len) == 0) {
|
||||
audit_log_format(audit_buf, " sec_obj=%s", secctx);
|
||||
security_release_secctx(secctx, secctx_len);
|
||||
}
|
||||
audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
|
||||
audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
|
||||
audit_log_end(audit_buf);
|
||||
}
|
||||
|
||||
if (ret_val == 0)
|
||||
call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6);
|
||||
return ret_val;
|
||||
if (entry == NULL)
|
||||
return -ENOENT;
|
||||
|
||||
call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6);
|
||||
return 0;
|
||||
}
|
||||
#endif /* IPv6 */
|
||||
|
||||
|
@ -155,12 +155,13 @@ static void gprs_data_ready(struct sock *sk, int len)
|
||||
static void gprs_write_space(struct sock *sk)
|
||||
{
|
||||
struct gprs_dev *dev = sk->sk_user_data;
|
||||
struct net_device *net = dev->net;
|
||||
unsigned credits = pep_writeable(sk);
|
||||
|
||||
spin_lock_bh(&dev->tx_lock);
|
||||
dev->tx_max = credits;
|
||||
if (credits > skb_queue_len(&dev->tx_queue))
|
||||
netif_wake_queue(dev->net);
|
||||
if (credits > skb_queue_len(&dev->tx_queue) && netif_running(net))
|
||||
netif_wake_queue(net);
|
||||
spin_unlock_bh(&dev->tx_lock);
|
||||
}
|
||||
|
||||
@ -168,6 +169,23 @@ static void gprs_write_space(struct sock *sk)
|
||||
* Network device callbacks
|
||||
*/
|
||||
|
||||
static int gprs_open(struct net_device *dev)
|
||||
{
|
||||
struct gprs_dev *gp = netdev_priv(dev);
|
||||
|
||||
gprs_write_space(gp->sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gprs_close(struct net_device *dev)
|
||||
{
|
||||
struct gprs_dev *gp = netdev_priv(dev);
|
||||
|
||||
netif_stop_queue(dev);
|
||||
flush_work(&gp->tx_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gprs_xmit(struct sk_buff *skb, struct net_device *net)
|
||||
{
|
||||
struct gprs_dev *dev = netdev_priv(net);
|
||||
@ -254,6 +272,8 @@ static void gprs_setup(struct net_device *net)
|
||||
net->tx_queue_len = 10;
|
||||
|
||||
net->destructor = free_netdev;
|
||||
net->open = gprs_open;
|
||||
net->stop = gprs_close;
|
||||
net->hard_start_xmit = gprs_xmit; /* mandatory */
|
||||
net->change_mtu = gprs_set_mtu;
|
||||
net->get_stats = gprs_get_stats;
|
||||
@ -318,7 +338,6 @@ int gprs_attach(struct sock *sk)
|
||||
dev->sk = sk;
|
||||
|
||||
printk(KERN_DEBUG"%s: attached\n", net->name);
|
||||
gprs_write_space(sk); /* kick off TX */
|
||||
return net->ifindex;
|
||||
|
||||
out_rel:
|
||||
@ -341,7 +360,5 @@ void gprs_detach(struct sock *sk)
|
||||
|
||||
printk(KERN_DEBUG"%s: detached\n", net->name);
|
||||
unregister_netdev(net);
|
||||
flush_scheduled_work();
|
||||
sock_put(sk);
|
||||
skb_queue_purge(&dev->tx_queue);
|
||||
}
|
||||
|
@ -46,9 +46,6 @@
|
||||
layering other disciplines. It does not need to do bandwidth
|
||||
control either since that can be handled by using token
|
||||
bucket or other rate control.
|
||||
|
||||
The simulator is limited by the Linux timer resolution
|
||||
and will create packet bursts on the HZ boundary (1ms).
|
||||
*/
|
||||
|
||||
struct netem_sched_data {
|
||||
|
Loading…
Reference in New Issue
Block a user