enum mwifiex_ba_status ba_status)
{
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
if (tx_ba_tsr_tbl->ba_status == ba_status) {
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
- flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return tx_ba_tsr_tbl;
}
}
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return NULL;
}
{
int i;
struct mwifiex_tx_ba_stream_tbl *del_tbl_ptr, *tmp_node;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
&priv->tx_ba_stream_tbl_ptr, list)
mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, del_tbl_ptr);
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
mwifiex_get_ba_tbl(struct mwifiex_private *priv, int tid, u8 *ra)
{
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
if (ether_addr_equal_unaligned(tx_ba_tsr_tbl->ra, ra) &&
tx_ba_tsr_tbl->tid == tid) {
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock,
- flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return tx_ba_tsr_tbl;
}
}
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return NULL;
}
{
struct mwifiex_tx_ba_stream_tbl *new_node;
struct mwifiex_ra_list_tbl *ra_list;
- unsigned long flags;
int tid_down;
if (!mwifiex_get_ba_tbl(priv, tid, ra)) {
new_node->ba_status = ba_status;
memcpy(new_node->ra, ra, ETH_ALEN);
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_add_tail(&new_node->list, &priv->tx_ba_stream_tbl_ptr);
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
}
}
u32 tx_win_size = priv->add_ba_param.tx_win_size;
static u8 dialog_tok;
int ret;
- unsigned long flags;
u16 block_ack_param_set;
mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid);
memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
struct mwifiex_sta_node *sta_ptr;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
if (!sta_ptr) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
mwifiex_dbg(priv->adapter, ERROR,
"BA setup with unknown TDLS peer %pM!\n",
peer_mac);
}
if (sta_ptr->is_11ac_enabled)
tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
block_ack_param_set = (u16)((tid << BLOCKACKPARAM_TID_POS) |
void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
{
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry(rx_reor_tbl_ptr, &priv->rx_reorder_tbl_ptr, list) {
if (rx_reor_tbl_ptr->tid == tid) {
dev_dbg(priv->adapter->dev,
}
}
exit:
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
}
/*
struct mwifiex_ds_rx_reorder_tbl *rx_reo_tbl = buf;
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr;
int count = 0;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry(rx_reorder_tbl_ptr, &priv->rx_reorder_tbl_ptr,
list) {
rx_reo_tbl->tid = (u16) rx_reorder_tbl_ptr->tid;
if (count >= MWIFIEX_MAX_RX_BASTREAM_SUPPORTED)
break;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return count;
}
struct mwifiex_tx_ba_stream_tbl *tx_ba_tsr_tbl;
struct mwifiex_ds_tx_ba_stream_tbl *rx_reo_tbl = buf;
int count = 0;
- unsigned long flags;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid;
mwifiex_dbg(priv->adapter, DATA, "data: %s tid=%d\n",
if (count >= MWIFIEX_MAX_TX_BASTREAM_SUPPORTED)
break;
}
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return count;
}
void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra)
{
struct mwifiex_tx_ba_stream_tbl *tbl, *tmp;
- unsigned long flags;
if (!ra)
return;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry_safe(tbl, tmp, &priv->tx_ba_stream_tbl_ptr, list)
if (!memcmp(tbl->ra, ra, ETH_ALEN))
mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, tbl);
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return;
}
int tid;
u8 ret = false;
struct mwifiex_tx_ba_stream_tbl *tx_tbl;
- unsigned long flags;
tid = priv->aggr_prio_tbl[ptr_tid].ampdu_user;
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
list_for_each_entry(tx_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
if (tid > priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user) {
tid = priv->aggr_prio_tbl[tx_tbl->tid].ampdu_user;
ret = true;
}
}
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
return ret;
}
int
mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *pra_list,
- int ptrindex, unsigned long ra_list_flags)
+ int ptrindex)
__releases(&priv->wmm.ra_list_spinlock)
{
struct mwifiex_adapter *adapter = priv->adapter;
skb_src = skb_peek(&pra_list->skb_head);
if (!skb_src) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return 0;
}
skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
GFP_ATOMIC);
if (!skb_aggr) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return -1;
}
pra_list->total_pkt_count--;
atomic_dec(&priv->wmm.tx_pkts_queued);
aggr_num++;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
mwifiex_write_data_complete(adapter, skb_src, 0, 0);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return -1;
}
} while (skb_src);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
/* Last AMSDU packet does not need padding */
skb_trim(skb_aggr, skb_aggr->len - pad);
}
switch (ret) {
case -EBUSY:
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb_aggr, 1, -1);
return -1;
}
atomic_inc(&priv->wmm.tx_pkts_queued);
tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
break;
case -1:
struct sk_buff *skb);
int mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ptr,
- int ptr_index, unsigned long flags)
+ int ptr_index)
__releases(&priv->wmm.ra_list_spinlock);
#endif /* !_MWIFIEX_11N_AGGR_H_ */
struct sk_buff_head list;
struct sk_buff *skb;
int pkt_to_send, i;
- unsigned long flags;
__skb_queue_head_init(&list);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
pkt_to_send = (start_win > tbl->start_win) ?
min((start_win - tbl->start_win), tbl->win_size) :
}
tbl->start_win = start_win;
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
while ((skb = __skb_dequeue(&list)))
mwifiex_11n_dispatch_pkt(priv, skb);
struct sk_buff_head list;
struct sk_buff *skb;
int i, j, xchg;
- unsigned long flags;
__skb_queue_head_init(&list);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
for (i = 0; i < tbl->win_size; ++i) {
if (!tbl->rx_reorder_ptr[i])
}
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
while ((skb = __skb_dequeue(&list)))
mwifiex_11n_dispatch_pkt(priv, skb);
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *tbl)
{
- unsigned long flags;
int start_win;
if (!tbl)
return;
- spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
+ spin_lock_bh(&priv->adapter->rx_proc_lock);
priv->adapter->rx_locked = true;
if (priv->adapter->rx_processing) {
- spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&priv->adapter->rx_proc_lock);
flush_workqueue(priv->adapter->rx_workqueue);
} else {
- spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&priv->adapter->rx_proc_lock);
}
start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
del_timer_sync(&tbl->timer_context.timer);
tbl->timer_context.timer_is_set = false;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_del(&tbl->list);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
kfree(tbl->rx_reorder_ptr);
kfree(tbl);
- spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
+ spin_lock_bh(&priv->adapter->rx_proc_lock);
priv->adapter->rx_locked = false;
- spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&priv->adapter->rx_proc_lock);
}
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return tbl;
}
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return NULL;
}
void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl, *tmp;
- unsigned long flags;
if (!ta)
return;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
mwifiex_del_rx_reorder_entry(priv, tbl);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
}
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return;
}
{
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
struct mwifiex_private *priv = ctx->priv;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return i;
}
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
return -1;
}
int i;
struct mwifiex_rx_reorder_tbl *tbl, *new_node;
u16 last_seq = 0;
- unsigned long flags;
struct mwifiex_sta_node *node;
/*
new_node->init_win = seq_num;
new_node->flags = 0;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
if (mwifiex_queuing_ra_based(priv)) {
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
node = mwifiex_get_sta_entry(priv, ta);
else
last_seq = priv->rx_seq[tid];
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
mwifiex_dbg(priv->adapter, INFO,
"info: last_seq=%d start_win=%d\n",
for (i = 0; i < win_size; ++i)
new_node->rx_reorder_ptr[i] = NULL;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
}
static void
u32 rx_win_size = priv->add_ba_param.rx_win_size;
u8 tid;
int win_size;
- unsigned long flags;
uint16_t block_ack_param_set;
if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
priv->adapter->is_hw_11ac_capable &&
memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv,
cmd_addba_req->peer_mac_addr);
if (!sta_ptr) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
mwifiex_dbg(priv->adapter, ERROR,
"BA setup with unknown TDLS peer %pM!\n",
cmd_addba_req->peer_mac_addr);
}
if (sta_ptr->is_11ac_enabled)
rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
struct mwifiex_ra_list_tbl *ra_list;
u8 cleanup_rx_reorder_tbl;
- unsigned long flags;
int tid_down;
if (type == TYPE_DELBA_RECEIVE)
ra_list->amsdu_in_ampdu = false;
ra_list->ba_status = BA_SETUP_NONE;
}
- spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_lock_bh(&priv->tx_ba_stream_tbl_lock);
mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
- spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+ spin_unlock_bh(&priv->tx_ba_stream_tbl_lock);
}
}
void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
{
struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node;
- unsigned long flags;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
&priv->rx_reorder_tbl_ptr, list) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
}
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
mwifiex_reset_11n_rx_seq_num(priv);
}
{
struct mwifiex_private *priv;
struct mwifiex_rx_reorder_tbl *tbl;
- unsigned long lock_flags;
int i;
for (i = 0; i < adapter->priv_num; i++) {
if (!priv)
continue;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
+ spin_lock_bh(&priv->rx_reorder_tbl_lock);
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
tbl->flags = flags;
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
+ spin_unlock_bh(&priv->rx_reorder_tbl_lock);
}
return;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
}
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
adapter->rx_locked = true;
if (adapter->rx_processing) {
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
flush_workqueue(adapter->rx_workqueue);
} else {
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
}
mwifiex_free_priv(priv);
adapter->main_locked = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
adapter->rx_locked = false;
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
mwifiex_set_mac_address(priv, dev, false, NULL);
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_sta_node *sta_node;
u8 deauth_mac[ETH_ALEN];
- unsigned long flags;
if (!priv->bss_started && priv->wdev.cac_started) {
mwifiex_dbg(priv->adapter, INFO, "%s: abort CAC!\n", __func__);
eth_zero_addr(deauth_mac);
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_node = mwifiex_get_sta_entry(priv, params->mac);
if (sta_node)
ether_addr_copy(deauth_mac, params->mac);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
if (is_valid_ether_addr(deauth_mac)) {
if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_STA_DEAUTH,
struct cfg80211_chan_def *chandef)
{
struct mwifiex_sta_node *sta_ptr;
- unsigned long flags;
u16 chan;
u8 second_chan_offset, band;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv, addr);
if (!sta_ptr) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
__func__, addr);
return -ENOENT;
if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] &
WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
wiphy_err(wiphy, "%pM do not support tdls cs\n", addr);
return -ENOENT;
}
if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
wiphy_err(wiphy, "channel switch is running, abort request\n");
return -EALREADY;
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
chan = chandef->chan->hw_value;
second_chan_offset = mwifiex_get_sec_chan_offset(chan);
const u8 *addr)
{
struct mwifiex_sta_node *sta_ptr;
- unsigned long flags;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv, addr);
if (!sta_ptr) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
__func__, addr);
} else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
sta_ptr->tdls_status == TDLS_IN_BASE_CHAN ||
sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n",
addr);
} else {
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
mwifiex_stop_tdls_cs(priv, addr);
}
}
mwifiex_get_cmd_node(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node;
- unsigned long flags;
- spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_free_q_lock);
if (list_empty(&adapter->cmd_free_q)) {
mwifiex_dbg(adapter, ERROR,
"GET_CMD_NODE: cmd node not available\n");
- spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_free_q_lock);
return NULL;
}
cmd_node = list_first_entry(&adapter->cmd_free_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_free_q_lock);
return cmd_node;
}
mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
- unsigned long flags;
-
if (!cmd_node)
return;
mwifiex_clean_cmd_node(adapter, cmd_node);
/* Insert node into cmd_free_q */
- spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_free_q_lock);
list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
- spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_free_q_lock);
}
/* This function reuses a command node. */
struct host_cmd_ds_command *host_cmd;
uint16_t cmd_code;
uint16_t cmd_size;
- unsigned long flags;
if (!adapter || !cmd_node)
return -1;
cmd_node->priv->bss_num,
cmd_node->priv->bss_type));
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->curr_cmd = cmd_node;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
/* Adjust skb length */
if (cmd_node->cmd_skb->len > cmd_size)
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
adapter->dbg.num_cmd_host_to_card_failure++;
return -1;
{
struct host_cmd_ds_command *host_cmd = NULL;
u16 command;
- unsigned long flags;
bool add_tail = true;
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
}
}
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
if (add_tail)
list_add_tail(&cmd_node->list, &adapter->cmd_pending_q);
else
list_add(&cmd_node->list, &adapter->cmd_pending_q);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
atomic_inc(&adapter->cmd_pending);
mwifiex_dbg(adapter, CMD,
struct cmd_ctrl_node *cmd_node;
int ret = 0;
struct host_cmd_ds_command *host_cmd;
- unsigned long cmd_flags;
- unsigned long cmd_pending_q_flags;
/* Check if already in processing */
if (adapter->curr_cmd) {
return -1;
}
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
/* Check if any command is pending */
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
if (list_empty(&adapter->cmd_pending_q)) {
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
return 0;
}
cmd_node = list_first_entry(&adapter->cmd_pending_q,
mwifiex_dbg(adapter, ERROR,
"%s: cannot send cmd in sleep state,\t"
"this should not happen\n", __func__);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
return ret;
}
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
- cmd_pending_q_flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
ret = mwifiex_dnld_cmd_to_fw(priv, cmd_node);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
/* Any command sent to the firmware when host is in sleep
uint16_t orig_cmdresp_no;
uint16_t cmdresp_no;
uint16_t cmdresp_result;
- unsigned long flags;
if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
resp = (struct host_cmd_ds_command *) adapter->upld_buf;
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
return -1;
}
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
}
return ret;
mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
- unsigned long flags;
/* Cancel all pending scan command */
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
list_for_each_entry_safe(cmd_node, tmp_node,
&adapter->scan_pending_q, list) {
list_del(&cmd_node->list);
cmd_node->wait_q_enabled = false;
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
}
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
}
/*
mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
- unsigned long flags, cmd_flags;
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
/* Cancel current cmd */
if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
adapter->cmd_wait_q.status = -1;
/* no recycle probably wait for response */
}
/* Cancel all pending command */
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
list_for_each_entry_safe(cmd_node, tmp_node,
&adapter->cmd_pending_q, list) {
list_del(&cmd_node->list);
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, cmd_node);
}
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
mwifiex_cancel_scan(adapter);
}
mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL;
- unsigned long cmd_flags;
if ((adapter->curr_cmd) &&
(adapter->curr_cmd->wait_q_enabled)) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
cmd_node = adapter->curr_cmd;
/* setting curr_cmd to NULL is quite dangerous, because
* mwifiex_process_cmdresp checks curr_cmd to be != NULL
* at that point
*/
adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
mwifiex_recycle_cmd_node(adapter, cmd_node);
}
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_bss_prio_node *bss_prio;
struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
- unsigned long flags;
bss_prio = kzalloc(sizeof(struct mwifiex_bss_prio_node), GFP_KERNEL);
if (!bss_prio)
bss_prio->priv = priv;
INIT_LIST_HEAD(&bss_prio->list);
- spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
list_add_tail(&bss_prio->list, &tbl[priv->bss_priority].bss_prio_head);
- spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
return 0;
}
void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
struct mwifiex_adapter *adapter)
{
- unsigned long dev_queue_flags;
-
- spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ spin_lock_bh(&adapter->queue_lock);
netif_tx_wake_all_queues(netdev);
- spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+ spin_unlock_bh(&adapter->queue_lock);
}
/*
void mwifiex_stop_net_dev_queue(struct net_device *netdev,
struct mwifiex_adapter *adapter)
{
- unsigned long dev_queue_flags;
-
- spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ spin_lock_bh(&adapter->queue_lock);
netif_tx_stop_all_queues(netdev);
- spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+ spin_unlock_bh(&adapter->queue_lock);
}
/*
struct mwifiex_private *priv;
u8 i, first_sta = true;
int is_cmd_pend_q_empty;
- unsigned long flags;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
}
}
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
if (!is_cmd_pend_q_empty) {
/* Send the first command in queue and return */
if (mwifiex_main_process(adapter) != -1)
struct mwifiex_bss_prio_node *bssprio_node, *tmp_node;
struct list_head *head;
spinlock_t *lock; /* bss priority lock */
- unsigned long flags;
for (i = 0; i < adapter->priv_num; ++i) {
head = &adapter->bss_prio_tbl[i].bss_prio_head;
priv->bss_type, priv->bss_num, i, head);
{
- spin_lock_irqsave(lock, flags);
+ spin_lock_bh(lock);
list_for_each_entry_safe(bssprio_node, tmp_node, head,
list) {
if (bssprio_node->priv == priv) {
kfree(bssprio_node);
}
}
- spin_unlock_irqrestore(lock, flags);
+ spin_unlock_bh(lock);
}
}
}
{
struct mwifiex_private *priv;
s32 i;
- unsigned long flags;
struct sk_buff *skb;
/* mwifiex already shutdown */
while ((skb = skb_dequeue(&adapter->tx_data_q)))
mwifiex_write_data_complete(adapter, skb, 0, 0);
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
while ((skb = skb_dequeue(&adapter->rx_data_q))) {
struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
dev_kfree_skb_any(skb);
}
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
mwifiex_adapter_cleanup(adapter);
static void mwifiex_queue_rx_work(struct mwifiex_adapter *adapter)
{
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
if (adapter->rx_processing) {
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
} else {
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
queue_work(adapter->rx_workqueue, &adapter->rx_work);
}
}
static int mwifiex_process_rx(struct mwifiex_adapter *adapter)
{
- unsigned long flags;
struct sk_buff *skb;
struct mwifiex_rxinfo *rx_info;
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
if (adapter->rx_processing || adapter->rx_locked) {
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
goto exit_rx_proc;
} else {
adapter->rx_processing = true;
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
}
/* Check for Rx data */
mwifiex_handle_rx_packet(adapter, skb);
}
}
- spin_lock_irqsave(&adapter->rx_proc_lock, flags);
+ spin_lock_bh(&adapter->rx_proc_lock);
adapter->rx_processing = false;
- spin_unlock_irqrestore(&adapter->rx_proc_lock, flags);
+ spin_unlock_bh(&adapter->rx_proc_lock);
exit_rx_proc:
return 0;
skb = skb_clone(skb, GFP_ATOMIC);
if (skb) {
- unsigned long flags;
int id;
- spin_lock_irqsave(&priv->ack_status_lock, flags);
+ spin_lock_bh(&priv->ack_status_lock);
id = idr_alloc(&priv->ack_status_frames, orig_skb,
1, 0x10, GFP_ATOMIC);
- spin_unlock_irqrestore(&priv->ack_status_lock, flags);
+ spin_unlock_bh(&priv->ack_status_lock);
if (id >= 0) {
tx_info = MWIFIEX_SKB_TXCB(skb);
*/
int is_command_pending(struct mwifiex_adapter *adapter)
{
- unsigned long flags;
int is_cmd_pend_q_empty;
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
is_cmd_pend_q_empty = list_empty(&adapter->cmd_pending_q);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
return !is_cmd_pend_q_empty;
}
u8 filtered_scan;
u8 scan_current_chan_only;
u8 max_chan_per_scan;
- unsigned long flags;
if (adapter->scan_processing) {
mwifiex_dbg(adapter, WARN,
return -EFAULT;
}
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = true;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv),
GFP_KERNEL);
/* Get scan command from scan_pending_q and put to cmd_pending_q */
if (!ret) {
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
if (!list_empty(&adapter->scan_pending_q)) {
cmd_node = list_first_entry(&adapter->scan_pending_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node);
queue_work(adapter->workqueue, &adapter->main_work);
mwifiex_wait_queue_complete(adapter, cmd_node);
}
} else {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
}
}
kfree(scan_chan_list);
done:
if (ret) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
}
return ret;
}
{
struct mwifiex_bssdescriptor *bss_desc;
int ret;
- unsigned long flags;
/* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), GFP_KERNEL);
if (ret)
goto done;
- spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
+ spin_lock_bh(&priv->curr_bcn_buf_lock);
/* Make a copy of current BSSID descriptor */
memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc,
sizeof(priv->curr_bss_params.bss_descriptor));
* in mwifiex_save_curr_bcn()
*/
mwifiex_save_curr_bcn(priv);
- spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
+ spin_unlock_bh(&priv->curr_bcn_buf_lock);
done:
/* beacon_ie buffer was allocated in function
{
struct mwifiex_adapter *adapter = priv->adapter;
struct cmd_ctrl_node *cmd_node;
- unsigned long flags;
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
if (list_empty(&adapter->scan_pending_q)) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
mwifiex_active_scan_req_for_passive_chan(priv);
}
} else if ((priv->scan_aborting && !priv->scan_request) ||
priv->scan_block) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
mwifiex_cancel_pending_scan_cmd(adapter);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
if (!adapter->active_scan_triggered) {
if (priv->scan_request) {
cmd_node = list_first_entry(&adapter->scan_pending_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node);
}
void mwifiex_cancel_scan(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
- unsigned long cmd_flags;
int i;
mwifiex_cancel_pending_scan_cmd(adapter);
if (adapter->scan_processing) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
if (!priv)
struct host_cmd_ds_command *cmd_ptr;
struct cmd_ctrl_node *cmd_node;
- unsigned long cmd_flags, scan_flags;
bool complete_scan = false;
mwifiex_dbg(adapter, INFO, "info: EXT scan returns successfully\n");
sizeof(struct mwifiex_ie_types_header));
}
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_flags);
- spin_lock_irqsave(&adapter->scan_pending_q_lock, scan_flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
if (list_empty(&adapter->scan_pending_q)) {
complete_scan = true;
list_for_each_entry(cmd_node, &adapter->cmd_pending_q, list) {
}
}
}
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, scan_flags);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
if (complete_scan)
mwifiex_complete_scan(priv);
struct cmd_ctrl_node *cmd_node)
{
struct mwifiex_adapter *adapter = priv->adapter;
- unsigned long flags;
cmd_node->wait_q_enabled = true;
cmd_node->condition = &adapter->scan_wait_q_woken;
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
list_add_tail(&cmd_node->list, &adapter->scan_pending_q);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
}
/*
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_ps_mode_enh *pm;
- unsigned long flags;
mwifiex_dbg(adapter, ERROR,
"CMD_RESP: cmd %#x error, result=%#x\n",
/* Handling errors here */
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->curr_cmd = NULL;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
}
/*
{
struct mwifiex_tx_pause_tlv *tp;
struct mwifiex_sta_node *sta_ptr;
- unsigned long flags;
tp = (void *)tlv;
mwifiex_dbg(priv->adapter, EVENT,
} else if (is_multicast_ether_addr(tp->peermac)) {
mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause);
} else {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
sta_ptr->tx_pause = tp->tx_pause;
mwifiex_update_ralist_tx_pause(priv, tp->peermac,
tp->tx_pause);
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
}
struct mwifiex_tx_pause_tlv *tp;
struct mwifiex_sta_node *sta_ptr;
int status;
- unsigned long flags;
tp = (void *)tlv;
mwifiex_dbg(priv->adapter, EVENT,
status = mwifiex_get_tdls_link_status(priv, tp->peermac);
if (mwifiex_is_tdls_link_setup(status)) {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
sta_ptr->tx_pause = tp->tx_pause;
tp->peermac,
tp->tx_pause);
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
}
}
struct list_head *tid_list;
struct sk_buff *skb, *tmp;
struct mwifiex_txinfo *tx_info;
- unsigned long flags;
u32 tid;
u8 tid_down;
mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
if (!ether_addr_equal(mac, skb->data))
atomic_inc(&priv->wmm.tx_pkts_queued);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return;
}
struct mwifiex_ra_list_tbl *ra_list;
struct list_head *ra_list_head;
struct sk_buff *skb, *tmp;
- unsigned long flags;
int i;
mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; i++) {
if (!list_empty(&priv->wmm.tid_tbl_ptr[i].ra_list)) {
}
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return;
}
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
- unsigned long flags;
memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
sta_ptr = mwifiex_get_sta_entry(priv, peer);
if (sta_ptr) {
if (sta_ptr->is_11n_enabled) {
mwifiex_11n_cleanup_reorder_tbl(priv);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
mwifiex_del_sta_entry(priv, peer);
}
{
struct mwifiex_sta_node *sta_ptr;
struct ieee80211_mcs_info mcs;
- unsigned long flags;
int i;
sta_ptr = mwifiex_get_sta_entry(priv, peer);
"tdls: enable link %pM failed\n", peer);
if (sta_ptr) {
mwifiex_11n_cleanup_reorder_tbl(priv);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_del_sta_entry(priv, peer);
}
mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
struct mwifiex_sta_node *sta_ptr;
struct tdls_peer_info *peer = buf;
int count = 0;
- unsigned long flags;
if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
return 0;
if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
return 0;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
list_for_each_entry(sta_ptr, &priv->sta_list, list) {
if (mwifiex_is_tdls_link_setup(sta_ptr->tdls_status)) {
ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
break;
}
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
return count;
}
{
struct mwifiex_sta_node *sta_ptr;
struct mwifiex_ds_tdls_oper tdls_oper;
- unsigned long flags;
if (list_empty(&priv->sta_list))
return;
if (sta_ptr->is_11n_enabled) {
mwifiex_11n_cleanup_reorder_tbl(priv);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
mwifiex_restore_tdls_packets(priv, sta_ptr->mac_addr,
int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb)
{
struct mwifiex_auto_tdls_peer *peer;
- unsigned long flags;
u8 mac[ETH_ALEN];
ether_addr_copy(mac, skb->data);
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(peer, &priv->auto_tdls_list, list) {
if (!memcmp(mac, peer->mac_addr, ETH_ALEN)) {
if (peer->rssi <= MWIFIEX_TDLS_RSSI_HIGH &&
}
}
}
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
return 0;
}
void mwifiex_flush_auto_tdls_list(struct mwifiex_private *priv)
{
struct mwifiex_auto_tdls_peer *peer, *tmp_node;
- unsigned long flags;
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry_safe(peer, tmp_node, &priv->auto_tdls_list, list) {
list_del(&peer->list);
kfree(peer);
}
INIT_LIST_HEAD(&priv->auto_tdls_list);
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
priv->check_tdls_tx = false;
}
void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_auto_tdls_peer *tdls_peer;
- unsigned long flags;
if (!priv->adapter->auto_tdls)
return;
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
if (!memcmp(tdls_peer->mac_addr, mac, ETH_ALEN)) {
tdls_peer->tdls_status = TDLS_SETUP_INPROGRESS;
tdls_peer->rssi_jiffies = jiffies;
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
return;
}
}
"Add auto TDLS peer= %pM to list\n", mac);
}
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
}
void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
const u8 *mac, u8 link_status)
{
struct mwifiex_auto_tdls_peer *peer;
- unsigned long flags;
if (!priv->adapter->auto_tdls)
return;
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(peer, &priv->auto_tdls_list, list) {
if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) {
if ((link_status == TDLS_NOT_SETUP) &&
break;
}
}
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
}
void mwifiex_auto_tdls_update_peer_signal(struct mwifiex_private *priv,
u8 *mac, s8 snr, s8 nflr)
{
struct mwifiex_auto_tdls_peer *peer;
- unsigned long flags;
if (!priv->adapter->auto_tdls)
return;
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(peer, &priv->auto_tdls_list, list) {
if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) {
peer->rssi = nflr - snr;
break;
}
}
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
}
void mwifiex_check_auto_tdls(struct timer_list *t)
{
struct mwifiex_private *priv = from_timer(priv, t, auto_tdls_timer);
struct mwifiex_auto_tdls_peer *tdls_peer;
- unsigned long flags;
u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
if (WARN_ON_ONCE(!priv || !priv->adapter)) {
priv->check_tdls_tx = false;
- spin_lock_irqsave(&priv->auto_tdls_lock, flags);
+ spin_lock_bh(&priv->auto_tdls_lock);
list_for_each_entry(tdls_peer, &priv->auto_tdls_list, list) {
if ((jiffies - tdls_peer->rssi_jiffies) >
(MWIFIEX_AUTO_TDLS_IDLE_TIME * HZ)) {
tdls_peer->rssi);
}
}
- spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
+ spin_unlock_bh(&priv->auto_tdls_lock);
mod_timer(&priv->auto_tdls_timer,
jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
{
struct tx_status_event *tx_status = (void *)priv->adapter->event_body;
struct sk_buff *ack_skb;
- unsigned long flags;
struct mwifiex_txinfo *tx_info;
if (!tx_status->tx_token_id)
return;
- spin_lock_irqsave(&priv->ack_status_lock, flags);
+ spin_lock_bh(&priv->ack_status_lock);
ack_skb = idr_remove(&priv->ack_status_frames, tx_status->tx_token_id);
- spin_unlock_irqrestore(&priv->ack_status_lock, flags);
+ spin_unlock_bh(&priv->ack_status_lock);
if (ack_skb) {
tx_info = MWIFIEX_SKB_TXCB(ack_skb);
*/
static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
{
- unsigned long flags;
struct list_head *ra_list;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) {
if (priv->del_list_idx == MAX_NUM_TID)
}
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
struct rx_packet_hdr *rx_pkt_hdr;
u16 rx_pkt_type;
u8 ta[ETH_ALEN], pkt_type;
- unsigned long flags;
struct mwifiex_sta_node *node;
uap_rx_pd = (struct uap_rxpd *)(skb->data);
if (rx_pkt_type != PKT_TYPE_BAR && uap_rx_pd->priority < MAX_NUM_TID) {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
node = mwifiex_get_sta_entry(priv, ta);
if (node)
node->rx_seq[uap_rx_pd->priority] =
le16_to_cpu(uap_rx_pd->seq_num);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
if (!priv->ap_11n_enabled ||
from_timer(timer_context, t, hold_timer);
struct mwifiex_adapter *adapter = timer_context->adapter;
struct usb_tx_data_port *port = timer_context->port;
- unsigned long flags;
int err = 0;
- spin_lock_irqsave(&port->tx_aggr_lock, flags);
+ spin_lock_bh(&port->tx_aggr_lock);
err = mwifiex_usb_prepare_tx_aggr_skb(adapter, port, &skb_send);
if (err) {
mwifiex_dbg(adapter, ERROR,
if (err == -1)
mwifiex_write_data_complete(adapter, skb_send, 0, -1);
unlock:
- spin_unlock_irqrestore(&port->tx_aggr_lock, flags);
+ spin_unlock_bh(&port->tx_aggr_lock);
}
/* This function write a command/data packet to card. */
struct usb_card_rec *card = adapter->card;
struct urb_context *context = NULL;
struct usb_tx_data_port *port = NULL;
- unsigned long flags;
int idx, ret;
if (test_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags)) {
}
if (adapter->bus_aggr.enable) {
- spin_lock_irqsave(&port->tx_aggr_lock, flags);
+ spin_lock_bh(&port->tx_aggr_lock);
ret = mwifiex_usb_aggr_tx_data(adapter, ep, skb,
tx_param, port);
- spin_unlock_irqrestore(&port->tx_aggr_lock, flags);
+ spin_unlock_bh(&port->tx_aggr_lock);
return ret;
}
mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *node;
- unsigned long flags;
if (!mac)
return NULL;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
node = mwifiex_get_sta_entry(priv, mac);
if (node)
goto done;
list_add_tail(&node->list, &priv->sta_list);
done:
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
return node;
}
void mwifiex_del_sta_entry(struct mwifiex_private *priv, const u8 *mac)
{
struct mwifiex_sta_node *node;
- unsigned long flags;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
node = mwifiex_get_sta_entry(priv, mac);
if (node) {
kfree(node);
}
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
return;
}
void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
{
struct mwifiex_sta_node *node, *tmp;
- unsigned long flags;
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
list_del(&node->list);
}
INIT_LIST_HEAD(&priv->sta_list);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
return;
}
struct mwifiex_ra_list_tbl *ra_list;
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_sta_node *node;
- unsigned long flags;
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
}
} else {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
node = mwifiex_get_sta_entry(priv, ra);
if (node)
ra_list->tx_paused = node->tx_pause;
mwifiex_is_sta_11n_enabled(priv, node);
if (ra_list->is_11n_enabled)
ra_list->max_amsdu = node->max_amsdu;
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
void
mwifiex_clean_txrx(struct mwifiex_private *priv)
{
- unsigned long flags;
struct sk_buff *skb, *tmp;
mwifiex_11n_cleanup_reorder_tbl(priv);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_wmm_cleanup_queues(priv);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
if (priv->adapter->if_ops.clean_pcie_ring &&
!test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
skb_unlink(skb, &priv->tdls_txq);
{
struct mwifiex_ra_list_tbl *ra_list;
u32 pkt_cnt = 0, tx_pkts_queued;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/* This function updates non-tdls peer ralist tx_pause while
{
struct mwifiex_ra_list_tbl *ra_list;
u32 pkt_cnt = 0, tx_pkts_queued;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; ++i) {
list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
list_del(&ra_list->list);
kfree(ra_list);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
u32 tid;
struct mwifiex_ra_list_tbl *ra_list;
u8 ra[ETH_ALEN], tid_down;
- unsigned long flags;
struct list_head list_head;
int tdls_status = TDLS_NOT_SETUP;
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
tid = skb->priority;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
break;
case TDLS_SETUP_INPROGRESS:
skb_queue_tail(&priv->tdls_txq, skb);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return;
default:
list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
}
if (!ra_list) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
else
atomic_inc(&priv->wmm.tx_pkts_queued);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
struct mwifiex_ra_list_tbl *ptr;
struct mwifiex_tid_tbl *tid_ptr;
atomic_t *hqp;
- unsigned long flags_ra;
int i, j;
/* check the BSS with highest priority first */
hqp = &priv_tmp->wmm.highest_queued_prio;
for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
- spin_lock_irqsave(&priv_tmp->wmm.
- ra_list_spinlock, flags_ra);
+ spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock);
tid_ptr = &(priv_tmp)->wmm.
tid_tbl_ptr[tos_to_tid[i]];
goto found;
}
- spin_unlock_irqrestore(&priv_tmp->wmm.
- ra_list_spinlock,
- flags_ra);
+ spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
}
if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) {
/* holds ra_list_spinlock */
if (atomic_read(hqp) > i)
atomic_set(hqp, i);
- spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
+ spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
*priv = priv_tmp;
*tid = tos_to_tid[i];
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
- unsigned long flags;
- spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
/*
* dirty trick: we remove 'head' temporarily and reinsert it after
* curr bss node. imagine list to stay fixed while head is moved
*/
list_move(&tbl[priv->bss_priority].bss_prio_head,
&tbl[priv->bss_priority].bss_prio_cur->list);
- spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (mwifiex_is_ralist_valid(priv, ra, tid)) {
priv->wmm.packets_out[tid]++;
/* same as above */
list_move(&tid_ptr->ra_list, &ra->list);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
*/
static void
mwifiex_send_single_packet(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ptr, int ptr_index,
- unsigned long ra_list_flags)
+ struct mwifiex_ra_list_tbl *ptr, int ptr_index)
__releases(&priv->wmm.ra_list_spinlock)
{
struct sk_buff *skb, *skb_next;
struct mwifiex_txinfo *tx_info;
if (skb_queue_empty(&ptr->skb_head)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
return;
}
else
skb_next = NULL;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
sizeof(struct txpd) : 0);
if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
/* Queue the packet back at the head */
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
ptr->total_pkt_count++;
ptr->ba_pkt_count++;
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
} else {
mwifiex_rotate_priolists(priv, ptr, ptr_index);
atomic_dec(&priv->wmm.tx_pkts_queued);
*/
static void
mwifiex_send_processed_packet(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ptr, int ptr_index,
- unsigned long ra_list_flags)
+ struct mwifiex_ra_list_tbl *ptr, int ptr_index)
__releases(&priv->wmm.ra_list_spinlock)
{
struct mwifiex_tx_param tx_param;
struct mwifiex_txinfo *tx_info;
if (skb_queue_empty(&ptr->skb_head)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return;
}
if (adapter->data_sent || adapter->tx_lock_flag) {
ptr->total_pkt_count--;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
skb_queue_tail(&adapter->tx_data_q, skb);
atomic_dec(&priv->wmm.tx_pkts_queued);
atomic_inc(&adapter->tx_queued);
tx_info = MWIFIEX_SKB_TXCB(skb);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
tx_param.next_pkt_len =
((skb_next) ? skb_next->len +
switch (ret) {
case -EBUSY:
mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
skb_queue_tail(&ptr->skb_head, skb);
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
break;
case -1:
mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
if (ret != -EBUSY) {
mwifiex_rotate_priolists(priv, ptr, ptr_index);
atomic_dec(&priv->wmm.tx_pkts_queued);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
ptr->total_pkt_count--;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
}
int ptr_index = 0;
u8 ra[ETH_ALEN];
int tid_del = 0, tid = 0;
- unsigned long flags;
ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
if (!ptr)
mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return -1;
}
if (mwifiex_is_ptr_processed(priv, ptr)) {
- mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
+ mwifiex_send_processed_packet(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
mwifiex_send_processed_packet() */
return 0;
mwifiex_is_amsdu_allowed(priv, tid) &&
mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
- mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
+ mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
* mwifiex_11n_aggregate_pkt()
*/
else
- mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+ mwifiex_send_single_packet(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
* mwifiex_send_single_packet()
*/
if (mwifiex_is_amsdu_allowed(priv, tid) &&
mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
- mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
+ mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
mwifiex_11n_aggregate_pkt() */
else
- mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+ mwifiex_send_single_packet(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
mwifiex_send_single_packet() */
}