Merge branch 'for-linville' of git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx

This commit is contained in:
John W. Linville 2011-07-08 11:05:20 -04:00
commit e441a5eab9
17 changed files with 414 additions and 144 deletions

View File

@ -90,7 +90,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
struct acx_current_tx_power *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr");
wl1271_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr %d", power);
if (power < 0 || power > 25)
return -EINVAL;
@ -1624,22 +1624,22 @@ out:
return ret;
}
int wl1271_acx_max_tx_retry(struct wl1271 *wl)
int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
{
struct wl1271_acx_max_tx_retry *acx = NULL;
struct wl1271_acx_ap_max_tx_retry *acx = NULL;
int ret;
wl1271_debug(DEBUG_ACX, "acx max tx retry");
wl1271_debug(DEBUG_ACX, "acx ap max tx retry");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->max_tx_retry = cpu_to_le16(wl->conf.tx.ap_max_tx_retries);
acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx max tx retry failed: %d", ret);
wl1271_warning("acx ap max tx retry failed: %d", ret);
goto out;
}

View File

@ -1168,7 +1168,7 @@ struct wl1271_acx_ps_rx_streaming {
u8 timeout;
} __packed;
struct wl1271_acx_max_tx_retry {
struct wl1271_acx_ap_max_tx_retry {
struct acx_header header;
/*
@ -1400,7 +1400,7 @@ int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
bool enable);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
int wl1271_acx_max_tx_retry(struct wl1271 *wl);
int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
int wl1271_acx_config_ps(struct wl1271 *wl);
int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
int wl1271_acx_set_ap_beacon_filter(struct wl1271 *wl, bool enable);

View File

@ -513,7 +513,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
PERIODIC_SCAN_COMPLETE_EVENT_ID;
if (wl->bss_type == BSS_TYPE_AP_BSS)
wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID |
INACTIVE_STA_EVENT_ID |
MAX_TX_RETRY_EVENT_ID;
else
wl->event_mask |= DUMMY_PACKET_EVENT_ID |
BA_SESSION_RX_CONSTRAINT_EVENT_ID;

View File

@ -400,10 +400,6 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
/* reset TX security counters */
wl->tx_security_last_seq = 0;
wl->tx_security_seq = 0;
wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x",
join->basic_rate_set, join->supported_rate_set);
@ -1084,7 +1080,7 @@ int wl1271_cmd_start_bss(struct wl1271 *wl)
memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC);
cmd->aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
cmd->bss_index = WL1271_AP_BSS_INDEX;
cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;

View File

@ -713,8 +713,16 @@ struct conf_tx_settings {
/*
* AP-mode - allow this number of TX retries to a station before an
* event is triggered from FW.
* In AP-mode the hlids of unreachable stations are given in the
* "sta_tx_retry_exceeded" member in the event mailbox.
*/
u16 ap_max_tx_retries;
u8 max_tx_retries;
/*
* AP-mode - after this number of seconds a connected station is
* considered inactive.
*/
u16 ap_aging_period;
/*
* Configuration for TID parameters.

View File

@ -30,6 +30,7 @@
#include "acx.h"
#include "ps.h"
#include "io.h"
#include "tx.h"
/* ms */
#define WL1271_DEBUGFS_STATS_LIFETIME 1000
@ -233,7 +234,7 @@ static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
char buf[20];
int res;
queue_len = wl->tx_queue_count;
queue_len = wl1271_tx_total_queue_count(wl);
res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
@ -338,10 +339,16 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x")
DRIVER_STATE_PRINT_INT(tx_blocks_available);
DRIVER_STATE_PRINT_INT(tx_allocated_blocks);
DRIVER_STATE_PRINT_INT(tx_allocated_blocks[0]);
DRIVER_STATE_PRINT_INT(tx_allocated_blocks[1]);
DRIVER_STATE_PRINT_INT(tx_allocated_blocks[2]);
DRIVER_STATE_PRINT_INT(tx_allocated_blocks[3]);
DRIVER_STATE_PRINT_INT(tx_frames_cnt);
DRIVER_STATE_PRINT_LHEX(tx_frames_map[0]);
DRIVER_STATE_PRINT_INT(tx_queue_count);
DRIVER_STATE_PRINT_INT(tx_queue_count[0]);
DRIVER_STATE_PRINT_INT(tx_queue_count[1]);
DRIVER_STATE_PRINT_INT(tx_queue_count[2]);
DRIVER_STATE_PRINT_INT(tx_queue_count[3]);
DRIVER_STATE_PRINT_INT(tx_packets_count);
DRIVER_STATE_PRINT_INT(tx_results_count);
DRIVER_STATE_PRINT_LHEX(flags);
@ -349,7 +356,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_INT(tx_blocks_freed[1]);
DRIVER_STATE_PRINT_INT(tx_blocks_freed[2]);
DRIVER_STATE_PRINT_INT(tx_blocks_freed[3]);
DRIVER_STATE_PRINT_INT(tx_security_last_seq);
DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
DRIVER_STATE_PRINT_INT(rx_counter);
DRIVER_STATE_PRINT_INT(session_counter);
DRIVER_STATE_PRINT_INT(state);

View File

@ -214,6 +214,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
u32 vector;
bool beacon_loss = false;
bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
bool disconnect_sta = false;
unsigned long sta_bitmap = 0;
wl1271_event_mbox_dump(mbox);
@ -295,6 +297,46 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_tx_dummy_packet(wl);
}
/*
* "TX retries exceeded" has a different meaning according to mode.
* In AP mode the offending station is disconnected.
*/
if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) {
wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
disconnect_sta = true;
}
if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) {
wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
disconnect_sta = true;
}
if (is_ap && disconnect_sta) {
u32 num_packets = wl->conf.tx.max_tx_retries;
struct ieee80211_sta *sta;
const u8 *addr;
int h;
for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS);
h < AP_MAX_LINKS;
h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) {
if (!wl1271_is_active_sta(wl, h))
continue;
addr = wl->links[h].addr;
rcu_read_lock();
sta = ieee80211_find_sta(wl->vif, addr);
if (sta) {
wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
ieee80211_report_low_ack(sta, num_packets);
}
rcu_read_unlock();
}
}
if (wl->vif && beacon_loss)
ieee80211_connection_loss(wl->vif);

View File

@ -58,13 +58,16 @@ enum {
CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
BSS_LOSE_EVENT_ID = BIT(18),
REGAINED_BSS_EVENT_ID = BIT(19),
ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20),
MAX_TX_RETRY_EVENT_ID = BIT(20),
/* STA: dummy paket for dynamic mem blocks */
DUMMY_PACKET_EVENT_ID = BIT(21),
/* AP: STA remove complete */
STA_REMOVE_COMPLETE_EVENT_ID = BIT(21),
SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
/* STA: SG prediction */
SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
/* AP: Inactive STA */
INACTIVE_STA_EVENT_ID = BIT(23),
SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25),
DBG_EVENT_ID = BIT(26),
@ -119,7 +122,11 @@ struct event_mailbox {
/* AP FW only */
u8 hlid_removed;
/* a bitmap of hlids for stations that have been inactive too long */
__le16 sta_aging_status;
/* a bitmap of hlids for stations which didn't respond to TX */
__le16 sta_tx_retry_exceeded;
/*
@ -143,4 +150,7 @@ void wl1271_event_mbox_config(struct wl1271 *wl);
int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
void wl1271_pspoll_work(struct work_struct *work);
/* Functions from main.c */
bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
#endif

View File

@ -447,7 +447,7 @@ static int wl1271_ap_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
ret = wl1271_acx_max_tx_retry(wl);
ret = wl1271_acx_ap_max_tx_retry(wl);
if (ret < 0)
return ret;
@ -455,6 +455,11 @@ static int wl1271_ap_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
/* initialize Tx power */
ret = wl1271_acx_tx_power(wl, wl->power_level);
if (ret < 0)
return ret;
return 0;
}

View File

@ -210,7 +210,8 @@ static struct conf_drv_settings default_conf = {
.tx_op_limit = 1504,
},
},
.ap_max_tx_retries = 100,
.max_tx_retries = 100,
.ap_aging_period = 300,
.tid_conf_count = 4,
.tid_conf = {
[CONF_TX_AC_BE] = {
@ -823,13 +824,24 @@ static void wl1271_irq_update_links_status(struct wl1271 *wl,
}
}
static u32 wl1271_tx_allocated_blocks(struct wl1271 *wl)
{
int i;
u32 total_alloc_blocks = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
total_alloc_blocks += wl->tx_allocated_blocks[i];
return total_alloc_blocks;
}
static void wl1271_fw_status(struct wl1271 *wl,
struct wl1271_fw_full_status *full_status)
{
struct wl1271_fw_common_status *status = &full_status->common;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
u32 freed_blocks = 0;
u32 freed_blocks = 0, ac_freed_blocks;
int i;
if (wl->bss_type == BSS_TYPE_AP_BSS) {
@ -849,21 +861,23 @@ static void wl1271_fw_status(struct wl1271 *wl,
/* update number of available TX blocks */
for (i = 0; i < NUM_TX_QUEUES; i++) {
freed_blocks += le32_to_cpu(status->tx_released_blks[i]) -
wl->tx_blocks_freed[i];
ac_freed_blocks = le32_to_cpu(status->tx_released_blks[i]) -
wl->tx_blocks_freed[i];
freed_blocks += ac_freed_blocks;
wl->tx_allocated_blocks[i] -= ac_freed_blocks;
wl->tx_blocks_freed[i] =
le32_to_cpu(status->tx_released_blks[i]);
}
wl->tx_allocated_blocks -= freed_blocks;
if (wl->bss_type == BSS_TYPE_AP_BSS) {
/* Update num of allocated TX blocks per link and ps status */
wl1271_irq_update_links_status(wl, &full_status->ap);
wl->tx_blocks_available += freed_blocks;
} else {
int avail = full_status->sta.tx_total - wl->tx_allocated_blocks;
int avail = full_status->sta.tx_total -
wl1271_tx_allocated_blocks(wl);
/*
* The FW might change the total number of TX memblocks before
@ -978,7 +992,7 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
/* Check if any tx blocks were freed */
spin_lock_irqsave(&wl->wl_lock, flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
wl->tx_queue_count) {
wl1271_tx_total_queue_count(wl) > 0) {
spin_unlock_irqrestore(&wl->wl_lock, flags);
/*
* In order to avoid starvation of the TX path,
@ -1026,7 +1040,7 @@ out:
/* In case TX was not handled here, queue TX work */
clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
wl->tx_queue_count)
wl1271_tx_total_queue_count(wl) > 0)
ieee80211_queue_work(wl->hw, &wl->tx_work);
spin_unlock_irqrestore(&wl->wl_lock, flags);
@ -1227,6 +1241,15 @@ static void wl1271_recovery_work(struct work_struct *work)
wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4));
/*
* Advance security sequence number to overcome potential progress
* in the firmware during recovery. This doens't hurt if the network is
* not encrypted.
*/
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING;
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
ieee80211_connection_loss(wl->vif);
@ -1474,26 +1497,27 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
unsigned long flags;
int q;
int q, mapping;
u8 hlid = 0;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping);
if (wl->bss_type == BSS_TYPE_AP_BSS)
hlid = wl1271_tx_get_hlid(skb);
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count++;
wl->tx_queue_count[q]++;
/*
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
ieee80211_stop_queues(wl->hw);
set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
ieee80211_stop_queue(wl->hw, mapping);
set_bit(q, &wl->stopped_queues_map);
}
/* queue the packet */
@ -1519,10 +1543,11 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
int wl1271_tx_dummy_packet(struct wl1271 *wl)
{
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
wl->tx_queue_count++;
wl->tx_queue_count[q]++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
/* The FW is low on RX memory blocks, so send the dummy packet asap */
@ -1586,10 +1611,13 @@ static struct notifier_block wl1271_dev_notifier = {
#ifdef CONFIG_PM
static int wl1271_configure_suspend_sta(struct wl1271 *wl)
{
int ret;
int ret = 0;
mutex_lock(&wl->mutex);
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out_unlock;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
@ -1634,10 +1662,13 @@ out:
static int wl1271_configure_suspend_ap(struct wl1271 *wl)
{
int ret;
int ret = 0;
mutex_lock(&wl->mutex);
if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
goto out_unlock;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
@ -1705,7 +1736,6 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
}
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
flush_delayed_work(&wl->scan_complete_work);
/*
* disable and re-enable interrupts in order to flush
@ -1977,11 +2007,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->tx_blocks_available = 0;
wl->tx_allocated_blocks = 0;
wl->tx_results_count = 0;
wl->tx_packets_count = 0;
wl->tx_security_last_seq = 0;
wl->tx_security_seq = 0;
wl->time_offset = 0;
wl->session_counter = 0;
wl->rate_set = CONF_TX_RATE_MASK_BASIC;
@ -2000,8 +2027,10 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
*/
wl->flags = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->tx_blocks_freed[i] = 0;
wl->tx_allocated_blocks[i] = 0;
}
wl1271_debugfs_reset(wl);
@ -2154,6 +2183,10 @@ static int wl1271_unjoin(struct wl1271 *wl)
clear_bit(WL1271_FLAG_JOINED, &wl->flags);
memset(wl->bssid, 0, ETH_ALEN);
/* reset TX security counters on a clean disconnect */
wl->tx_security_last_seq_lsb = 0;
wl->tx_security_seq = 0;
/* stop filtering packets based on bssid */
wl1271_configure_filters(wl, FIF_OTHER_BSS);
@ -2246,6 +2279,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
wl->channel = channel;
}
if ((changed & IEEE80211_CONF_CHANGE_POWER))
wl->power_level = conf->power_level;
goto out;
}
@ -2753,6 +2789,44 @@ out:
return ret;
}
static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
mutex_lock(&wl->mutex);
if (wl->state == WL1271_STATE_OFF)
goto out;
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
ret = wl1271_scan_stop(wl);
if (ret < 0)
goto out_sleep;
}
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
out_sleep:
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
cancel_delayed_work_sync(&wl->scan_complete_work);
}
static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
@ -3515,6 +3589,12 @@ static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
}
bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
{
int id = hlid - WL1271_AP_STA_HLID_START;
return test_bit(id, wl->ap_hlid_map);
}
static int wl1271_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@ -3673,7 +3753,7 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
goto out;
/* packets are considered pending if in the TX queue or the FW */
ret = (wl->tx_queue_count > 0) || (wl->tx_frames_cnt > 0);
ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
/* the above is appropriate for STA mode for PS purposes */
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
@ -3836,40 +3916,40 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
/* 5 GHz band channels for WL1273 */
static struct ieee80211_channel wl1271_channels_5ghz[] = {
{ .hw_value = 7, .center_freq = 5035},
{ .hw_value = 8, .center_freq = 5040},
{ .hw_value = 9, .center_freq = 5045},
{ .hw_value = 11, .center_freq = 5055},
{ .hw_value = 12, .center_freq = 5060},
{ .hw_value = 16, .center_freq = 5080},
{ .hw_value = 34, .center_freq = 5170},
{ .hw_value = 36, .center_freq = 5180},
{ .hw_value = 38, .center_freq = 5190},
{ .hw_value = 40, .center_freq = 5200},
{ .hw_value = 42, .center_freq = 5210},
{ .hw_value = 44, .center_freq = 5220},
{ .hw_value = 46, .center_freq = 5230},
{ .hw_value = 48, .center_freq = 5240},
{ .hw_value = 52, .center_freq = 5260},
{ .hw_value = 56, .center_freq = 5280},
{ .hw_value = 60, .center_freq = 5300},
{ .hw_value = 64, .center_freq = 5320},
{ .hw_value = 100, .center_freq = 5500},
{ .hw_value = 104, .center_freq = 5520},
{ .hw_value = 108, .center_freq = 5540},
{ .hw_value = 112, .center_freq = 5560},
{ .hw_value = 116, .center_freq = 5580},
{ .hw_value = 120, .center_freq = 5600},
{ .hw_value = 124, .center_freq = 5620},
{ .hw_value = 128, .center_freq = 5640},
{ .hw_value = 132, .center_freq = 5660},
{ .hw_value = 136, .center_freq = 5680},
{ .hw_value = 140, .center_freq = 5700},
{ .hw_value = 149, .center_freq = 5745},
{ .hw_value = 153, .center_freq = 5765},
{ .hw_value = 157, .center_freq = 5785},
{ .hw_value = 161, .center_freq = 5805},
{ .hw_value = 165, .center_freq = 5825},
{ .hw_value = 7, .center_freq = 5035, .max_power = 25 },
{ .hw_value = 8, .center_freq = 5040, .max_power = 25 },
{ .hw_value = 9, .center_freq = 5045, .max_power = 25 },
{ .hw_value = 11, .center_freq = 5055, .max_power = 25 },
{ .hw_value = 12, .center_freq = 5060, .max_power = 25 },
{ .hw_value = 16, .center_freq = 5080, .max_power = 25 },
{ .hw_value = 34, .center_freq = 5170, .max_power = 25 },
{ .hw_value = 36, .center_freq = 5180, .max_power = 25 },
{ .hw_value = 38, .center_freq = 5190, .max_power = 25 },
{ .hw_value = 40, .center_freq = 5200, .max_power = 25 },
{ .hw_value = 42, .center_freq = 5210, .max_power = 25 },
{ .hw_value = 44, .center_freq = 5220, .max_power = 25 },
{ .hw_value = 46, .center_freq = 5230, .max_power = 25 },
{ .hw_value = 48, .center_freq = 5240, .max_power = 25 },
{ .hw_value = 52, .center_freq = 5260, .max_power = 25 },
{ .hw_value = 56, .center_freq = 5280, .max_power = 25 },
{ .hw_value = 60, .center_freq = 5300, .max_power = 25 },
{ .hw_value = 64, .center_freq = 5320, .max_power = 25 },
{ .hw_value = 100, .center_freq = 5500, .max_power = 25 },
{ .hw_value = 104, .center_freq = 5520, .max_power = 25 },
{ .hw_value = 108, .center_freq = 5540, .max_power = 25 },
{ .hw_value = 112, .center_freq = 5560, .max_power = 25 },
{ .hw_value = 116, .center_freq = 5580, .max_power = 25 },
{ .hw_value = 120, .center_freq = 5600, .max_power = 25 },
{ .hw_value = 124, .center_freq = 5620, .max_power = 25 },
{ .hw_value = 128, .center_freq = 5640, .max_power = 25 },
{ .hw_value = 132, .center_freq = 5660, .max_power = 25 },
{ .hw_value = 136, .center_freq = 5680, .max_power = 25 },
{ .hw_value = 140, .center_freq = 5700, .max_power = 25 },
{ .hw_value = 149, .center_freq = 5745, .max_power = 25 },
{ .hw_value = 153, .center_freq = 5765, .max_power = 25 },
{ .hw_value = 157, .center_freq = 5785, .max_power = 25 },
{ .hw_value = 161, .center_freq = 5805, .max_power = 25 },
{ .hw_value = 165, .center_freq = 5825, .max_power = 25 },
};
/* mapping to indexes for wl1271_rates_5ghz */
@ -3930,6 +4010,7 @@ static const struct ieee80211_ops wl1271_ops = {
.tx = wl1271_op_tx,
.set_key = wl1271_op_set_key,
.hw_scan = wl1271_op_hw_scan,
.cancel_hw_scan = wl1271_op_cancel_hw_scan,
.sched_scan_start = wl1271_op_sched_scan_start,
.sched_scan_stop = wl1271_op_sched_scan_stop,
.bss_info_changed = wl1271_op_bss_info_changed,
@ -4327,6 +4408,9 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->quirks = 0;
wl->platform_quirks = 0;
wl->sched_scanning = false;
wl->tx_security_seq = 0;
wl->tx_security_last_seq_lsb = 0;
setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
(unsigned long) wl);
wl->fwlog_size = 0;

View File

@ -193,24 +193,27 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
{
int i, filtered = 0;
int i;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
unsigned long flags;
int filtered[NUM_TX_QUEUES];
/* filter all frames currently the low level queus for this hlid */
for (i = 0; i < NUM_TX_QUEUES; i++) {
filtered[i] = 0;
while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
info->status.rates[0].idx = -1;
ieee80211_tx_status_ni(wl->hw, skb);
filtered++;
filtered[i]++;
}
}
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count -= filtered;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_queue_count[i] -= filtered[i];
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_handle_tx_low_watermark(wl);

View File

@ -321,6 +321,33 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
return 0;
}
int wl1271_scan_stop(struct wl1271 *wl)
{
struct wl1271_cmd_header *cmd = NULL;
int ret = 0;
if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE))
return -EINVAL;
wl1271_debug(DEBUG_CMD, "cmd scan stop");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd,
sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("cmd stop_scan failed");
goto out;
}
out:
kfree(cmd);
return ret;
}
static int
wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
struct cfg80211_sched_scan_request *req,

View File

@ -28,6 +28,7 @@
int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req);
int wl1271_scan_stop(struct wl1271 *wl);
int wl1271_scan_build_probe_req(struct wl1271 *wl,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);

View File

@ -166,13 +166,13 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
ret = pm_runtime_get_sync(&func->dev);
if (ret)
goto out;
} else {
/* Runtime PM is disabled: power up the card manually */
ret = mmc_power_restore_host(func->card->host);
if (ret < 0)
goto out;
}
/* Runtime PM might be disabled, so power up the card manually */
ret = mmc_power_restore_host(func->card->host);
if (ret < 0)
goto out;
sdio_claim_host(func);
sdio_enable_func(func);
@ -188,7 +188,7 @@ static int wl1271_sdio_power_off(struct wl1271 *wl)
sdio_disable_func(func);
sdio_release_host(func);
/* Runtime PM might be disabled, so power off the card manually */
/* Power off the card manually, even if runtime PM is enabled. */
ret = mmc_power_save_host(func->card->host);
if (ret < 0)
return ret;

View File

@ -168,7 +168,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
u32 len;
u32 total_blocks;
int id, ret = -EBUSY;
int id, ret = -EBUSY, ac;
u32 spare_blocks;
if (unlikely(wl->quirks & WL12XX_QUIRK_USE_2_SPARE_BLOCKS))
@ -206,7 +206,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
desc->id = id;
wl->tx_blocks_available -= total_blocks;
wl->tx_allocated_blocks += total_blocks;
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_blocks[ac] += total_blocks;
if (wl->bss_type == BSS_TYPE_AP_BSS)
wl->links[hlid].allocated_blks += total_blocks;
@ -383,6 +385,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
if (ret < 0)
return ret;
wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
if (wl->bss_type == BSS_TYPE_AP_BSS) {
wl1271_tx_ap_update_inconnection_sta(wl, skb);
wl1271_tx_regulate_link(wl, hlid);
@ -390,8 +394,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
wl1271_tx_update_filters(wl, skb);
}
wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
/*
* The length of each packet is stored in terms of
* words. Thus, we must pad the skb data to make sure its
@ -442,37 +444,62 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{
unsigned long flags;
int i;
if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
wl->tx_queue_count <= WL1271_TX_QUEUE_LOW_WATERMARK) {
/* firmware buffer has space, restart queues */
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_wake_queues(wl->hw);
clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
for (i = 0; i < NUM_TX_QUEUES; i++) {
if (test_bit(i, &wl->stopped_queues_map) &&
wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
/* firmware buffer has space, restart queues */
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_wake_queue(wl->hw,
wl1271_tx_get_mac80211_queue(i));
clear_bit(i, &wl->stopped_queues_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
}
}
static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
struct sk_buff_head *queues)
{
int i, q = -1;
u32 min_blks = 0xffffffff;
/*
* Find a non-empty ac where:
* 1. There are packets to transmit
* 2. The FW has the least allocated blocks
*/
for (i = 0; i < NUM_TX_QUEUES; i++)
if (!skb_queue_empty(&queues[i]) &&
(wl->tx_allocated_blocks[i] < min_blks)) {
q = i;
min_blks = wl->tx_allocated_blocks[q];
}
if (q == -1)
return NULL;
return &queues[q];
}
static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
{
struct sk_buff *skb = NULL;
unsigned long flags;
struct sk_buff_head *queue;
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VO]);
if (skb)
queue = wl1271_select_queue(wl, wl->tx_queue);
if (!queue)
goto out;
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VI]);
if (skb)
goto out;
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BE]);
if (skb)
goto out;
skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BK]);
skb = skb_dequeue(queue);
out:
if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count--;
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@ -484,6 +511,7 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
struct sk_buff *skb = NULL;
unsigned long flags;
int i, h, start_hlid;
struct sk_buff_head *queue;
/* start from the link after the last one */
start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
@ -492,25 +520,25 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
for (i = 0; i < AP_MAX_LINKS; i++) {
h = (start_hlid + i) % AP_MAX_LINKS;
skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]);
/* only consider connected stations */
if (h >= WL1271_AP_STA_HLID_START &&
!test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
continue;
queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
if (!queue)
continue;
skb = skb_dequeue(queue);
if (skb)
goto out;
skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]);
if (skb)
goto out;
skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]);
if (skb)
goto out;
skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]);
if (skb)
goto out;
break;
}
out:
if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->last_tx_hlid = h;
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count--;
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
} else {
wl->last_tx_hlid = 0;
@ -531,9 +559,12 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
int q;
skb = wl->dummy_packet;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count--;
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@ -558,7 +589,7 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
}
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count++;
wl->tx_queue_count[q]++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
@ -704,10 +735,24 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
wl->stats.retry_count += result->ack_failures;
/* update security sequence number */
wl->tx_security_seq += (result->lsb_security_sequence_number -
wl->tx_security_last_seq);
wl->tx_security_last_seq = result->lsb_security_sequence_number;
/*
* update sequence number only when relevant, i.e. only in
* sessions of TKIP, AES and GEM (not in open or WEP sessions)
*/
if (info->control.hw_key &&
(info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
u8 fw_lsb = result->tx_security_sequence_number_lsb;
u8 cur_lsb = wl->tx_security_last_seq_lsb;
/*
* update security sequence number, taking care of potential
* wrap-around
*/
wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
wl->tx_security_last_seq_lsb = fw_lsb;
}
/* remove private header from packet */
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
@ -772,23 +817,26 @@ void wl1271_tx_complete(struct wl1271 *wl)
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
{
struct sk_buff *skb;
int i, total = 0;
int i;
unsigned long flags;
struct ieee80211_tx_info *info;
int total[NUM_TX_QUEUES];
for (i = 0; i < NUM_TX_QUEUES; i++) {
total[i] = 0;
while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
info = IEEE80211_SKB_CB(skb);
info->status.rates[0].idx = -1;
info->status.rates[0].count = 0;
ieee80211_tx_status_ni(wl->hw, skb);
total++;
total[i]++;
}
}
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count -= total;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_queue_count[i] -= total[i];
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_handle_tx_low_watermark(wl);
@ -823,10 +871,11 @@ void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
ieee80211_tx_status_ni(wl->hw, skb);
}
}
wl->tx_queue_count[i] = 0;
}
}
wl->tx_queue_count = 0;
wl->stopped_queues_map = 0;
/*
* Make sure the driver is at a consistent state, in case this
@ -879,8 +928,10 @@ void wl1271_tx_flush(struct wl1271 *wl)
while (!time_after(jiffies, timeout)) {
mutex_lock(&wl->mutex);
wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
wl->tx_frames_cnt, wl->tx_queue_count);
if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
wl->tx_frames_cnt,
wl1271_tx_total_queue_count(wl));
if ((wl->tx_frames_cnt == 0) &&
(wl1271_tx_total_queue_count(wl) == 0)) {
mutex_unlock(&wl->mutex);
return;
}

View File

@ -150,7 +150,7 @@ struct wl1271_tx_hw_res_descr {
(from 1st EDCA AIFS counter until TX Complete). */
__le32 medium_delay;
/* LS-byte of last TKIP seq-num (saved per AC for recovery). */
u8 lsb_security_sequence_number;
u8 tx_security_sequence_number_lsb;
/* Retry count - number of transmissions without successful ACK.*/
u8 ack_failures;
/* The rate that succeeded getting ACK
@ -182,6 +182,32 @@ static inline int wl1271_tx_get_queue(int queue)
}
}
static inline int wl1271_tx_get_mac80211_queue(int queue)
{
switch (queue) {
case CONF_TX_AC_VO:
return 0;
case CONF_TX_AC_VI:
return 1;
case CONF_TX_AC_BE:
return 2;
case CONF_TX_AC_BK:
return 3;
default:
return 2;
}
}
static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
{
int i, count = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
count += wl->tx_queue_count[i];
return count;
}
void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl);

View File

@ -144,6 +144,7 @@ extern u32 wl12xx_debug_level;
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
#define WL1271_TX_SQN_POST_RECOVERY_PADDING 0xff
#define WL1271_CIPHER_SUITE_GEM 0x00147201
@ -172,7 +173,6 @@ extern u32 wl12xx_debug_level;
#define WL1271_PS_STA_MAX_BLOCKS (2 * 9)
#define WL1271_AP_BSS_INDEX 0
#define WL1271_AP_DEF_INACTIV_SEC 300
#define WL1271_AP_DEF_BEACON_EXP 20
#define ACX_TX_DESCRIPTORS 32
@ -424,7 +424,7 @@ struct wl1271 {
/* Accounting for allocated / available TX blocks on HW */
u32 tx_blocks_freed[NUM_TX_QUEUES];
u32 tx_blocks_available;
u32 tx_allocated_blocks;
u32 tx_allocated_blocks[NUM_TX_QUEUES];
u32 tx_results_count;
/* Transmitted TX packets counter for chipset interface */
@ -438,7 +438,8 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */
struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count;
int tx_queue_count[NUM_TX_QUEUES];
long stopped_queues_map;
/* Frames received, not handled yet by mac80211 */
struct sk_buff_head deferred_rx_queue;
@ -454,9 +455,16 @@ struct wl1271 {
struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
int tx_frames_cnt;
/* Security sequence number counters */
u8 tx_security_last_seq;
s64 tx_security_seq;
/*
* Security sequence number
* bits 0-15: lower 16 bits part of sequence number
* bits 16-47: higher 32 bits part of sequence number
* bits 48-63: not in use
*/
u64 tx_security_seq;
/* 8 bits of the last sequence number in use */
u8 tx_security_last_seq_lsb;
/* FW Rx counter */
u32 rx_counter;
@ -632,8 +640,8 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
#define WL1271_DEFAULT_POWER_LEVEL 0
#define WL1271_TX_QUEUE_LOW_WATERMARK 10
#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
#define WL1271_TX_QUEUE_LOW_WATERMARK 32
#define WL1271_TX_QUEUE_HIGH_WATERMARK 256
#define WL1271_DEFERRED_QUEUE_LIMIT 64