mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-13 11:51:32 +00:00
Various fixes:
* iTXQ fixes from Felix * tracing fix - increase message length * fix SW_CRYPTO_CONTROL enforcement * WMM rule handling for regdomain intersection * max_interfaces in hwsim - reported by syzbot * clear private data in some more commands * a clang compiler warning fix I added a patch with two new (unused) macros for rate-limited printing to simplify getting the users into the tree. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEH1e1rEeCd0AIMq6MB8qZga/fl8QFAlyshYoACgkQB8qZga/f l8Q1EQ//YO53GM06uvgdOlQkoF3D3uSLtPbJyxWcZCaJXjJ6jOOZsSrZZBLn7avf i80UziS34ep0zdRHyVmGe6Q/VqufWUnzd6KBNdINEDdepOw6jFlBe9Ue+7mGwBnl bqzJJ+rxQj1wVqMKEQMtznqCEX58ULiDKcPUPTG2Szw7moqMoTVaw7u2d3eX5mTS Ka/rdsA/V9v4VOkBi//JqYmlZjdq83el8r4rshI2YbNdxwljTchK5YDsJreDawzF X7pDjAou2EAskot2Pahz1qAEew0XUnSliTDRVTsgcsellnUHFsEGXytXQ7sp8Myy kpGuBdeBqJuPWwKTbHfxiQb1VudpBWuMB2x/rotszgzrqSoVWot5j6/UyDSvMPyC U/drsGJrN1iF/mYTn6G/r8sKPIaCxN7h02Z75IV7LBokGmz9Yt7srEjrOKt0uuK+ l5nb/YQNcN37lkREa6zLzec0f3wDzoqw1GWL0IuQyb+cFC2+6q5iFQ8dyXnu3FdQ 1cSJGJdnyG3bM0b42WrOcEOFt3EKmhb4UeuusoxkOd4aKuhBXRjVEzM+X6ADqTdR mWvk7bEfYD0UMsqhtvrYBdBfquPuy3S4QzA0IMzxjk8bhCug233KpOPcLgVFjlzh g7wCgfahl0TRhH7R1D3hnRUtcdsjifoa75XMAaDnWfnR9PiN6uI= =2EV9 -----END PGP SIGNATURE----- Merge tag 'mac80211-for-davem-2019-04-09' of git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211 Johannes Berg says: ==================== Various fixes: * iTXQ fixes from Felix * tracing fix - increase message length * fix SW_CRYPTO_CONTROL enforcement * WMM rule handling for regdomain intersection * max_interfaces in hwsim - reported by syzbot * clear private data in some more commands * a clang compiler warning fix I added a patch with two new (unused) macros for rate-limited printing to simplify getting the users into the tree. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c03fd0171b
@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
|
||||
num_msdus++;
|
||||
num_bytes += ret;
|
||||
}
|
||||
ieee80211_return_txq(hw, txq);
|
||||
ieee80211_return_txq(hw, txq, false);
|
||||
ieee80211_txq_schedule_end(hw, txq->ac);
|
||||
|
||||
record->num_msdus = cpu_to_le16(num_msdus);
|
||||
|
@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
ieee80211_return_txq(hw, txq);
|
||||
ieee80211_return_txq(hw, txq, false);
|
||||
ath10k_htt_tx_txq_update(hw, txq);
|
||||
if (ret == -EBUSY)
|
||||
break;
|
||||
@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
ieee80211_return_txq(hw, txq);
|
||||
ieee80211_return_txq(hw, txq, false);
|
||||
ath10k_htt_tx_txq_update(hw, txq);
|
||||
out:
|
||||
ieee80211_txq_schedule_end(hw, ac);
|
||||
|
@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
||||
goto out;
|
||||
|
||||
while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
|
||||
bool force;
|
||||
|
||||
tid = (struct ath_atx_tid *)queue->drv_priv;
|
||||
|
||||
ret = ath_tx_sched_aggr(sc, txq, tid);
|
||||
ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
|
||||
|
||||
ieee80211_return_txq(hw, queue);
|
||||
force = !skb_queue_empty(&tid->retry_q);
|
||||
ieee80211_return_txq(hw, queue, force);
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
|
||||
enum nl80211_band band;
|
||||
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
|
||||
struct net *net;
|
||||
int idx;
|
||||
int idx, i;
|
||||
int n_limits = 0;
|
||||
|
||||
if (WARN_ON(param->channels > 1 && !param->use_chanctx))
|
||||
@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
|
||||
goto failed_hw;
|
||||
}
|
||||
|
||||
data->if_combination.max_interfaces = 0;
|
||||
for (i = 0; i < n_limits; i++)
|
||||
data->if_combination.max_interfaces +=
|
||||
data->if_limits[i].max;
|
||||
|
||||
data->if_combination.n_limits = n_limits;
|
||||
data->if_combination.max_interfaces = 2048;
|
||||
data->if_combination.limits = data->if_limits;
|
||||
|
||||
hw->wiphy->iface_combinations = &data->if_combination;
|
||||
hw->wiphy->n_iface_combinations = 1;
|
||||
/*
|
||||
* If we actually were asked to support combinations,
|
||||
* advertise them - if there's only a single thing like
|
||||
* only IBSS then don't advertise it as combinations.
|
||||
*/
|
||||
if (data->if_combination.max_interfaces > 1) {
|
||||
hw->wiphy->iface_combinations = &data->if_combination;
|
||||
hw->wiphy->n_iface_combinations = 1;
|
||||
}
|
||||
|
||||
if (param->ciphers) {
|
||||
memcpy(data->ciphers, param->ciphers,
|
||||
|
@ -7183,6 +7183,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
|
||||
#define wiphy_info(wiphy, format, args...) \
|
||||
dev_info(&(wiphy)->dev, format, ##args)
|
||||
|
||||
#define wiphy_err_ratelimited(wiphy, format, args...) \
|
||||
dev_err_ratelimited(&(wiphy)->dev, format, ##args)
|
||||
#define wiphy_warn_ratelimited(wiphy, format, args...) \
|
||||
dev_warn_ratelimited(&(wiphy)->dev, format, ##args)
|
||||
|
||||
#define wiphy_debug(wiphy, format, args...) \
|
||||
wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
|
||||
|
||||
|
@ -6231,8 +6231,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @ac: AC number to return packets from.
|
||||
*
|
||||
* Should only be called between calls to ieee80211_txq_schedule_start()
|
||||
* and ieee80211_txq_schedule_end().
|
||||
* Returns the next txq if successful, %NULL if no queue is eligible. If a txq
|
||||
* is returned, it should be returned with ieee80211_return_txq() after the
|
||||
* driver has finished scheduling it.
|
||||
@ -6240,38 +6238,23 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
|
||||
|
||||
/**
|
||||
* ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
|
||||
*
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @txq: pointer obtained from station or virtual interface
|
||||
*
|
||||
* Should only be called between calls to ieee80211_txq_schedule_start()
|
||||
* and ieee80211_txq_schedule_end().
|
||||
*/
|
||||
void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
|
||||
|
||||
/**
|
||||
* ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
|
||||
* ieee80211_txq_schedule_start - start new scheduling round for TXQs
|
||||
*
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @ac: AC number to acquire locks for
|
||||
*
|
||||
* Acquire locks needed to schedule TXQs from the given AC. Should be called
|
||||
* before ieee80211_next_txq() or ieee80211_return_txq().
|
||||
* Should be called before ieee80211_next_txq() or ieee80211_return_txq().
|
||||
* The driver must not call multiple TXQ scheduling rounds concurrently.
|
||||
*/
|
||||
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
|
||||
__acquires(txq_lock);
|
||||
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac);
|
||||
|
||||
/**
|
||||
* ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
|
||||
*
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @ac: AC number to acquire locks for
|
||||
*
|
||||
* Release locks previously acquired by ieee80211_txq_schedule_end().
|
||||
*/
|
||||
void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
||||
__releases(txq_lock);
|
||||
/* (deprecated) */
|
||||
static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
||||
{
|
||||
}
|
||||
|
||||
void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq, bool force);
|
||||
|
||||
/**
|
||||
* ieee80211_schedule_txq - schedule a TXQ for transmission
|
||||
@ -6279,12 +6262,34 @@ void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @txq: pointer obtained from station or virtual interface
|
||||
*
|
||||
* Schedules a TXQ for transmission if it is not already scheduled. Takes a
|
||||
* lock, which means it must *not* be called between
|
||||
* ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end()
|
||||
* Schedules a TXQ for transmission if it is not already scheduled,
|
||||
* even if mac80211 does not have any packets buffered.
|
||||
*
|
||||
* The driver may call this function if it has buffered packets for
|
||||
* this TXQ internally.
|
||||
*/
|
||||
void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
||||
__acquires(txq_lock) __releases(txq_lock);
|
||||
static inline void
|
||||
ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
||||
{
|
||||
__ieee80211_schedule_txq(hw, txq, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
|
||||
*
|
||||
* @hw: pointer as obtained from ieee80211_alloc_hw()
|
||||
* @txq: pointer obtained from station or virtual interface
|
||||
* @force: schedule txq even if mac80211 does not have any buffered packets.
|
||||
*
|
||||
* The driver may set force=true if it has buffered packets for this TXQ
|
||||
* internally.
|
||||
*/
|
||||
static inline void
|
||||
ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
|
||||
bool force)
|
||||
{
|
||||
__ieee80211_schedule_txq(hw, txq, force);
|
||||
}
|
||||
|
||||
/**
|
||||
* ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
|
||||
|
@ -1195,6 +1195,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
|
||||
{
|
||||
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
|
||||
|
||||
if (local->in_reconfig)
|
||||
return;
|
||||
|
||||
if (!check_sdata_in_driver(sdata))
|
||||
return;
|
||||
|
||||
|
@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
|
||||
* The driver doesn't know anything about VLAN interfaces.
|
||||
* Hence, don't send GTKs for VLAN interfaces to the driver.
|
||||
*/
|
||||
if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
|
||||
if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
|
||||
ret = 1;
|
||||
goto out_unsupported;
|
||||
}
|
||||
}
|
||||
|
||||
ret = drv_set_key(key->local, SET_KEY, sdata,
|
||||
@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
|
||||
/* all of these we can do in software - if driver can */
|
||||
if (ret == 1)
|
||||
return 0;
|
||||
if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) {
|
||||
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
||||
return 0;
|
||||
if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
|
||||
static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
|
||||
{
|
||||
/* Use last four bytes of hw addr as hash index */
|
||||
return jhash_1word(*(u32 *)(addr+2), seed);
|
||||
return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
|
||||
}
|
||||
|
||||
static const struct rhashtable_params mesh_rht_params = {
|
||||
|
@ -1568,7 +1568,15 @@ static void sta_ps_start(struct sta_info *sta)
|
||||
return;
|
||||
|
||||
for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
|
||||
if (txq_has_queue(sta->sta.txq[tid]))
|
||||
struct ieee80211_txq *txq = sta->sta.txq[tid];
|
||||
struct txq_info *txqi = to_txq_info(txq);
|
||||
|
||||
spin_lock(&local->active_txq_lock[txq->ac]);
|
||||
if (!list_empty(&txqi->schedule_order))
|
||||
list_del_init(&txqi->schedule_order);
|
||||
spin_unlock(&local->active_txq_lock[txq->ac]);
|
||||
|
||||
if (txq_has_queue(txq))
|
||||
set_bit(tid, &sta->txq_buffered_tids);
|
||||
else
|
||||
clear_bit(tid, &sta->txq_buffered_tids);
|
||||
|
@ -1,4 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Portions of this file
|
||||
* Copyright (C) 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MAC80211_MESSAGE_TRACING
|
||||
|
||||
#if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
|
||||
@ -11,7 +16,7 @@
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM mac80211_msg
|
||||
|
||||
#define MAX_MSG_LEN 100
|
||||
#define MAX_MSG_LEN 120
|
||||
|
||||
DECLARE_EVENT_CLASS(mac80211_msg_event,
|
||||
TP_PROTO(struct va_format *vaf),
|
||||
|
@ -3221,6 +3221,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
|
||||
u8 max_subframes = sta->sta.max_amsdu_subframes;
|
||||
int max_frags = local->hw.max_tx_fragments;
|
||||
int max_amsdu_len = sta->sta.max_amsdu_len;
|
||||
int orig_truesize;
|
||||
__be16 len;
|
||||
void *data;
|
||||
bool ret = false;
|
||||
@ -3261,6 +3262,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
|
||||
if (!head || skb_is_gso(head))
|
||||
goto out;
|
||||
|
||||
orig_truesize = head->truesize;
|
||||
orig_len = head->len;
|
||||
|
||||
if (skb->len + head->len > max_amsdu_len)
|
||||
@ -3318,6 +3320,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
|
||||
*frag_tail = skb;
|
||||
|
||||
out_recalc:
|
||||
fq->memory_usage += head->truesize - orig_truesize;
|
||||
if (head->len != orig_len) {
|
||||
flow->backlog += head->len - orig_len;
|
||||
tin->backlog_bytes += head->len - orig_len;
|
||||
@ -3646,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
|
||||
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
struct ieee80211_txq *ret = NULL;
|
||||
struct txq_info *txqi = NULL;
|
||||
|
||||
lockdep_assert_held(&local->active_txq_lock[ac]);
|
||||
spin_lock_bh(&local->active_txq_lock[ac]);
|
||||
|
||||
begin:
|
||||
txqi = list_first_entry_or_null(&local->active_txqs[ac],
|
||||
struct txq_info,
|
||||
schedule_order);
|
||||
if (!txqi)
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
if (txqi->txq.sta) {
|
||||
struct sta_info *sta = container_of(txqi->txq.sta,
|
||||
@ -3672,24 +3676,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
|
||||
|
||||
|
||||
if (txqi->schedule_round == local->schedule_round[ac])
|
||||
return NULL;
|
||||
goto out;
|
||||
|
||||
list_del_init(&txqi->schedule_order);
|
||||
txqi->schedule_round = local->schedule_round[ac];
|
||||
return &txqi->txq;
|
||||
ret = &txqi->txq;
|
||||
|
||||
out:
|
||||
spin_unlock_bh(&local->active_txq_lock[ac]);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_next_txq);
|
||||
|
||||
void ieee80211_return_txq(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq)
|
||||
void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq,
|
||||
bool force)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
struct txq_info *txqi = to_txq_info(txq);
|
||||
|
||||
lockdep_assert_held(&local->active_txq_lock[txq->ac]);
|
||||
spin_lock_bh(&local->active_txq_lock[txq->ac]);
|
||||
|
||||
if (list_empty(&txqi->schedule_order) &&
|
||||
(!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) {
|
||||
(force || !skb_queue_empty(&txqi->frags) ||
|
||||
txqi->tin.backlog_packets)) {
|
||||
/* If airtime accounting is active, always enqueue STAs at the
|
||||
* head of the list to ensure that they only get moved to the
|
||||
* back by the airtime DRR scheduler once they have a negative
|
||||
@ -3706,20 +3716,10 @@ void ieee80211_return_txq(struct ieee80211_hw *hw,
|
||||
list_add_tail(&txqi->schedule_order,
|
||||
&local->active_txqs[txq->ac]);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_return_txq);
|
||||
|
||||
void ieee80211_schedule_txq(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq)
|
||||
__acquires(txq_lock) __releases(txq_lock)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
|
||||
spin_lock_bh(&local->active_txq_lock[txq->ac]);
|
||||
ieee80211_return_txq(hw, txq);
|
||||
spin_unlock_bh(&local->active_txq_lock[txq->ac]);
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_schedule_txq);
|
||||
EXPORT_SYMBOL(__ieee80211_schedule_txq);
|
||||
|
||||
bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq)
|
||||
@ -3729,7 +3729,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
|
||||
struct sta_info *sta;
|
||||
u8 ac = txq->ac;
|
||||
|
||||
lockdep_assert_held(&local->active_txq_lock[ac]);
|
||||
spin_lock_bh(&local->active_txq_lock[ac]);
|
||||
|
||||
if (!txqi->txq.sta)
|
||||
goto out;
|
||||
@ -3759,34 +3759,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
|
||||
|
||||
sta->airtime[ac].deficit += sta->airtime_weight;
|
||||
list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
|
||||
spin_unlock_bh(&local->active_txq_lock[ac]);
|
||||
|
||||
return false;
|
||||
out:
|
||||
if (!list_empty(&txqi->schedule_order))
|
||||
list_del_init(&txqi->schedule_order);
|
||||
spin_unlock_bh(&local->active_txq_lock[ac]);
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_txq_may_transmit);
|
||||
|
||||
void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
|
||||
__acquires(txq_lock)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
|
||||
spin_lock_bh(&local->active_txq_lock[ac]);
|
||||
local->schedule_round[ac]++;
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
|
||||
|
||||
void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
|
||||
__releases(txq_lock)
|
||||
{
|
||||
struct ieee80211_local *local = hw_to_local(hw);
|
||||
|
||||
spin_unlock_bh(&local->active_txq_lock[ac]);
|
||||
}
|
||||
EXPORT_SYMBOL(ieee80211_txq_schedule_end);
|
||||
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
|
||||
|
||||
void __ieee80211_subif_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
|
@ -13650,7 +13650,8 @@ static const struct genl_ops nl80211_ops[] = {
|
||||
.policy = nl80211_policy,
|
||||
.flags = GENL_UNS_ADMIN_PERM,
|
||||
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
|
||||
NL80211_FLAG_NEED_RTNL,
|
||||
NL80211_FLAG_NEED_RTNL |
|
||||
NL80211_FLAG_CLEAR_SKB,
|
||||
},
|
||||
{
|
||||
.cmd = NL80211_CMD_DEAUTHENTICATE,
|
||||
@ -13701,7 +13702,8 @@ static const struct genl_ops nl80211_ops[] = {
|
||||
.policy = nl80211_policy,
|
||||
.flags = GENL_UNS_ADMIN_PERM,
|
||||
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
|
||||
NL80211_FLAG_NEED_RTNL,
|
||||
NL80211_FLAG_NEED_RTNL |
|
||||
NL80211_FLAG_CLEAR_SKB,
|
||||
},
|
||||
{
|
||||
.cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
|
||||
@ -13709,7 +13711,8 @@ static const struct genl_ops nl80211_ops[] = {
|
||||
.policy = nl80211_policy,
|
||||
.flags = GENL_ADMIN_PERM,
|
||||
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
|
||||
NL80211_FLAG_NEED_RTNL,
|
||||
NL80211_FLAG_NEED_RTNL |
|
||||
NL80211_FLAG_CLEAR_SKB,
|
||||
},
|
||||
{
|
||||
.cmd = NL80211_CMD_DISCONNECT,
|
||||
@ -13738,7 +13741,8 @@ static const struct genl_ops nl80211_ops[] = {
|
||||
.policy = nl80211_policy,
|
||||
.flags = GENL_UNS_ADMIN_PERM,
|
||||
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
|
||||
NL80211_FLAG_NEED_RTNL,
|
||||
NL80211_FLAG_NEED_RTNL |
|
||||
NL80211_FLAG_CLEAR_SKB,
|
||||
},
|
||||
{
|
||||
.cmd = NL80211_CMD_DEL_PMKSA,
|
||||
@ -14090,7 +14094,8 @@ static const struct genl_ops nl80211_ops[] = {
|
||||
.policy = nl80211_policy,
|
||||
.flags = GENL_UNS_ADMIN_PERM,
|
||||
.internal_flags = NL80211_FLAG_NEED_WIPHY |
|
||||
NL80211_FLAG_NEED_RTNL,
|
||||
NL80211_FLAG_NEED_RTNL |
|
||||
NL80211_FLAG_CLEAR_SKB,
|
||||
},
|
||||
{
|
||||
.cmd = NL80211_CMD_SET_QOS_MAP,
|
||||
@ -14145,7 +14150,8 @@ static const struct genl_ops nl80211_ops[] = {
|
||||
.doit = nl80211_set_pmk,
|
||||
.policy = nl80211_policy,
|
||||
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
|
||||
NL80211_FLAG_NEED_RTNL,
|
||||
NL80211_FLAG_NEED_RTNL |
|
||||
NL80211_FLAG_CLEAR_SKB,
|
||||
},
|
||||
{
|
||||
.cmd = NL80211_CMD_DEL_PMK,
|
||||
|
@ -1309,6 +1309,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
|
||||
return dfs_region1;
|
||||
}
|
||||
|
||||
static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1,
|
||||
const struct ieee80211_wmm_ac *wmm_ac2,
|
||||
struct ieee80211_wmm_ac *intersect)
|
||||
{
|
||||
intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min);
|
||||
intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max);
|
||||
intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot);
|
||||
intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn);
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper for regdom_intersect(), this does the real
|
||||
* mathematical intersection fun
|
||||
@ -1323,6 +1333,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
|
||||
struct ieee80211_freq_range *freq_range;
|
||||
const struct ieee80211_power_rule *power_rule1, *power_rule2;
|
||||
struct ieee80211_power_rule *power_rule;
|
||||
const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2;
|
||||
struct ieee80211_wmm_rule *wmm_rule;
|
||||
u32 freq_diff, max_bandwidth1, max_bandwidth2;
|
||||
|
||||
freq_range1 = &rule1->freq_range;
|
||||
@ -1333,6 +1345,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
|
||||
power_rule2 = &rule2->power_rule;
|
||||
power_rule = &intersected_rule->power_rule;
|
||||
|
||||
wmm_rule1 = &rule1->wmm_rule;
|
||||
wmm_rule2 = &rule2->wmm_rule;
|
||||
wmm_rule = &intersected_rule->wmm_rule;
|
||||
|
||||
freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
|
||||
freq_range2->start_freq_khz);
|
||||
freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
|
||||
@ -1376,6 +1392,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
|
||||
intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
|
||||
rule2->dfs_cac_ms);
|
||||
|
||||
if (rule1->has_wmm && rule2->has_wmm) {
|
||||
u8 ac;
|
||||
|
||||
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
|
||||
reg_wmm_rules_intersect(&wmm_rule1->client[ac],
|
||||
&wmm_rule2->client[ac],
|
||||
&wmm_rule->client[ac]);
|
||||
reg_wmm_rules_intersect(&wmm_rule1->ap[ac],
|
||||
&wmm_rule2->ap[ac],
|
||||
&wmm_rule->ap[ac]);
|
||||
}
|
||||
|
||||
intersected_rule->has_wmm = true;
|
||||
} else if (rule1->has_wmm) {
|
||||
*wmm_rule = *wmm_rule1;
|
||||
intersected_rule->has_wmm = true;
|
||||
} else if (rule2->has_wmm) {
|
||||
*wmm_rule = *wmm_rule2;
|
||||
intersected_rule->has_wmm = true;
|
||||
} else {
|
||||
intersected_rule->has_wmm = false;
|
||||
}
|
||||
|
||||
if (!is_valid_reg_rule(intersected_rule))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -190,10 +190,9 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
|
||||
/* copy subelement as we need to change its content to
|
||||
* mark an ie after it is processed.
|
||||
*/
|
||||
sub_copy = kmalloc(subie_len, gfp);
|
||||
sub_copy = kmemdup(subelement, subie_len, gfp);
|
||||
if (!sub_copy)
|
||||
return 0;
|
||||
memcpy(sub_copy, subelement, subie_len);
|
||||
|
||||
pos = &new_ie[0];
|
||||
|
||||
|
@ -1220,9 +1220,11 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
|
||||
else if (rate->bw == RATE_INFO_BW_HE_RU &&
|
||||
rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26)
|
||||
result = rates_26[rate->he_gi];
|
||||
else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
|
||||
rate->bw, rate->he_ru_alloc))
|
||||
else {
|
||||
WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
|
||||
rate->bw, rate->he_ru_alloc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* now scale to the appropriate MCS */
|
||||
tmp = result;
|
||||
|
Loading…
Reference in New Issue
Block a user