|
|
deb259 |
From fd89017061fc9e11c32ba2bada6fb175d5894417 Mon Sep 17 00:00:00 2001
|
|
|
deb259 |
From: Eugene Syromiatnikov <esyr@redhat.com>
|
|
|
deb259 |
Date: Wed, 31 Jul 2019 18:48:56 +0200
|
|
|
deb259 |
Subject: [PATCH 7/7] Revert "iwlwifi: mvm: support mac80211 TXQs model"
|
|
|
deb259 |
|
|
|
deb259 |
This reverts commit cfbc6c4c5b91c7725ef14465b98ac347d31f2334.
|
|
|
deb259 |
---
|
|
|
deb259 |
drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 8 +-
|
|
|
deb259 |
drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 5 +-
|
|
|
deb259 |
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 72 ++++-
|
|
|
deb259 |
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 143 ++-------
|
|
|
deb259 |
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 53 +---
|
|
|
deb259 |
drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 87 +++---
|
|
|
deb259 |
drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 360 +++++++++++++---------
|
|
|
deb259 |
drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 4 +
|
|
|
deb259 |
drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 69 ++++-
|
|
|
deb259 |
9 files changed, 442 insertions(+), 359 deletions(-)
|
|
|
deb259 |
|
|
|
deb259 |
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
|
|
|
deb259 |
===================================================================
|
|
|
deb259 |
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/d3.c 2019-06-27 14:54:04.132678349 +0200
|
|
|
deb259 |
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/d3.c 2019-07-31 18:51:11.751815280 +0200
|
|
|
deb259 |
@@ -2231,6 +2231,7 @@
|
|
|
deb259 |
|
|
|
deb259 |
file->private_data = inode->i_private;
|
|
|
deb259 |
|
|
|
deb259 |
+ ieee80211_stop_queues(mvm->hw);
|
|
|
deb259 |
synchronize_net();
|
|
|
deb259 |
|
|
|
deb259 |
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
|
|
|
deb259 |
@@ -2245,9 +2246,10 @@
|
|
|
deb259 |
rtnl_unlock();
|
|
|
deb259 |
if (err > 0)
|
|
|
deb259 |
err = -EINVAL;
|
|
|
deb259 |
- if (err)
|
|
|
deb259 |
+ if (err) {
|
|
|
deb259 |
+ ieee80211_wake_queues(mvm->hw);
|
|
|
deb259 |
return err;
|
|
|
deb259 |
-
|
|
|
deb259 |
+ }
|
|
|
deb259 |
mvm->d3_test_active = true;
|
|
|
deb259 |
mvm->keep_vif = NULL;
|
|
|
deb259 |
return 0;
|
|
|
deb259 |
@@ -2327,6 +2329,8 @@
|
|
|
deb259 |
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
|
|
|
deb259 |
iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
|
|
|
deb259 |
|
|
|
deb259 |
+ ieee80211_wake_queues(mvm->hw);
|
|
|
deb259 |
+
|
|
|
deb259 |
return 0;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
|
|
|
deb259 |
===================================================================
|
|
|
deb259 |
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/fw.c 2019-06-27 14:54:04.134678325 +0200
|
|
|
deb259 |
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/fw.c 2019-07-31 18:51:11.752815267 +0200
|
|
|
deb259 |
@@ -308,7 +308,7 @@
|
|
|
deb259 |
struct iwl_notification_wait alive_wait;
|
|
|
deb259 |
struct iwl_mvm_alive_data alive_data = {};
|
|
|
deb259 |
const struct fw_img *fw;
|
|
|
deb259 |
- int ret;
|
|
|
deb259 |
+ int ret, i;
|
|
|
deb259 |
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
|
|
|
deb259 |
static const u16 alive_cmd[] = { MVM_ALIVE };
|
|
|
deb259 |
|
|
|
deb259 |
@@ -390,6 +390,9 @@
|
|
|
deb259 |
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
|
|
|
deb259 |
BIT(IWL_MAX_TID_COUNT + 2);
|
|
|
deb259 |
|
|
|
deb259 |
+ for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
|
|
|
deb259 |
+ atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
|
|
|
deb259 |
+
|
|
|
deb259 |
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
|
|
deb259 |
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
|
|
deb259 |
iwl_fw_set_dbg_rec_on(&mvm->fwrt);
|
|
|
deb259 |
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
|
|
|
deb259 |
===================================================================
|
|
|
deb259 |
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 2019-07-31 18:51:06.045891463 +0200
|
|
|
deb259 |
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c 2019-07-31 18:51:11.752815267 +0200
|
|
|
deb259 |
@@ -97,6 +97,11 @@
|
|
|
deb259 |
bool found_vif;
|
|
|
deb259 |
};
|
|
|
deb259 |
|
|
|
deb259 |
+struct iwl_mvm_hw_queues_iface_iterator_data {
|
|
|
deb259 |
+ struct ieee80211_vif *exclude_vif;
|
|
|
deb259 |
+ unsigned long used_hw_queues;
|
|
|
deb259 |
+};
|
|
|
deb259 |
+
|
|
|
deb259 |
static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
|
|
|
deb259 |
struct ieee80211_vif *vif)
|
|
|
deb259 |
{
|
|
|
deb259 |
@@ -203,6 +208,61 @@
|
|
|
deb259 |
data->preferred_tsf = NUM_TSF_IDS;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
+/*
|
|
|
deb259 |
+ * Get the mask of the queues used by the vif
|
|
|
deb259 |
+ */
|
|
|
deb259 |
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
|
|
|
deb259 |
+{
|
|
|
deb259 |
+ u32 qmask = 0, ac;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
|
|
|
deb259 |
+ return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
|
|
|
deb259 |
+ if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
|
|
|
deb259 |
+ qmask |= BIT(vif->hw_queue[ac]);
|
|
|
deb259 |
+ }
|
|
|
deb259 |
+
|
|
|
deb259 |
+ if (vif->type == NL80211_IFTYPE_AP ||
|
|
|
deb259 |
+ vif->type == NL80211_IFTYPE_ADHOC)
|
|
|
deb259 |
+ qmask |= BIT(vif->cab_queue);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ return qmask;
|
|
|
deb259 |
+}
|
|
|
deb259 |
+
|
|
|
deb259 |
+static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
|
|
|
deb259 |
+ struct ieee80211_vif *vif)
|
|
|
deb259 |
+{
|
|
|
deb259 |
+ struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ /* exclude the given vif */
|
|
|
deb259 |
+ if (vif == data->exclude_vif)
|
|
|
deb259 |
+ return;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
|
|
|
deb259 |
+}
|
|
|
deb259 |
+
|
|
|
deb259 |
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
|
|
|
deb259 |
+ struct ieee80211_vif *exclude_vif)
|
|
|
deb259 |
+{
|
|
|
deb259 |
+ struct iwl_mvm_hw_queues_iface_iterator_data data = {
|
|
|
deb259 |
+ .exclude_vif = exclude_vif,
|
|
|
deb259 |
+ .used_hw_queues =
|
|
|
deb259 |
+ BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
|
|
|
deb259 |
+ BIT(mvm->aux_queue) |
|
|
|
deb259 |
+ BIT(IWL_MVM_DQA_GCAST_QUEUE),
|
|
|
deb259 |
+ };
|
|
|
deb259 |
+
|
|
|
deb259 |
+ lockdep_assert_held(&mvm->mutex);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ /* mark all VIF used hw queues */
|
|
|
deb259 |
+ ieee80211_iterate_active_interfaces_atomic(
|
|
|
deb259 |
+ mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
|
|
|
deb259 |
+ iwl_mvm_iface_hw_queues_iter, &data);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ return data.used_hw_queues;
|
|
|
deb259 |
+}
|
|
|
deb259 |
+
|
|
|
deb259 |
static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
|
|
|
deb259 |
struct ieee80211_vif *vif)
|
|
|
deb259 |
{
|
|
|
deb259 |
@@ -300,6 +360,8 @@
|
|
|
deb259 |
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
|
|
|
deb259 |
iwl_mvm_mac_iface_iterator, &data);
|
|
|
deb259 |
|
|
|
deb259 |
+ used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
|
|
|
deb259 |
+
|
|
|
deb259 |
/*
|
|
|
deb259 |
* In the case we're getting here during resume, it's similar to
|
|
|
deb259 |
* firmware restart, and with RESUME_ALL the iterator will find
|
|
|
deb259 |
@@ -354,6 +416,9 @@
|
|
|
deb259 |
* the ones here - no real limit
|
|
|
deb259 |
*/
|
|
|
deb259 |
queue_limit = IEEE80211_MAX_QUEUES;
|
|
|
deb259 |
+ BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
|
|
|
deb259 |
+ BITS_PER_BYTE *
|
|
|
deb259 |
+ sizeof(mvm->hw_queue_to_mac80211[0]));
|
|
|
deb259 |
|
|
|
deb259 |
/*
|
|
|
deb259 |
* Find available queues, and allocate them to the ACs. When in
|
|
|
deb259 |
@@ -381,6 +446,9 @@
|
|
|
deb259 |
* queue value (when queue is enabled).
|
|
|
deb259 |
*/
|
|
|
deb259 |
mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
|
|
|
deb259 |
+ vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
|
|
|
deb259 |
+ } else {
|
|
|
deb259 |
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
|
|
|
deb259 |
@@ -394,6 +462,8 @@
|
|
|
deb259 |
|
|
|
deb259 |
exit_fail:
|
|
|
deb259 |
memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
|
|
|
deb259 |
+ memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
|
|
|
deb259 |
+ vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
|
|
|
deb259 |
return ret;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
@@ -1120,7 +1190,7 @@
|
|
|
deb259 |
|
|
|
deb259 |
if (!fw_has_api(&mvm->fw->ucode_capa,
|
|
|
deb259 |
IWL_UCODE_TLV_API_STA_TYPE))
|
|
|
deb259 |
- ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue);
|
|
|
deb259 |
+ ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
|
|
|
deb259 |
|
|
|
deb259 |
/*
|
|
|
deb259 |
* Only set the beacon time when the MAC is being added, when we
|
|
|
deb259 |
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
|
|
|
deb259 |
===================================================================
|
|
|
deb259 |
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 2019-07-31 18:50:40.636230724 +0200
|
|
|
deb259 |
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 2019-07-31 18:51:11.753815253 +0200
|
|
|
deb259 |
@@ -414,6 +414,7 @@
|
|
|
deb259 |
ieee80211_hw_set(hw, SIGNAL_DBM);
|
|
|
deb259 |
ieee80211_hw_set(hw, SPECTRUM_MGMT);
|
|
|
deb259 |
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
|
|
|
deb259 |
+ ieee80211_hw_set(hw, QUEUE_CONTROL);
|
|
|
deb259 |
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
|
|
|
deb259 |
ieee80211_hw_set(hw, SUPPORTS_PS);
|
|
|
deb259 |
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
|
|
|
deb259 |
@@ -427,8 +428,6 @@
|
|
|
deb259 |
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
|
|
|
deb259 |
ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
|
|
|
deb259 |
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
|
|
|
deb259 |
- ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
|
|
|
deb259 |
- ieee80211_hw_set(hw, STA_MMPDU_TXQ);
|
|
|
deb259 |
|
|
|
deb259 |
if (iwl_mvm_has_tlc_offload(mvm)) {
|
|
|
deb259 |
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
|
|
|
deb259 |
@@ -539,7 +538,6 @@
|
|
|
deb259 |
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
|
|
|
deb259 |
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
|
|
|
deb259 |
hw->chanctx_data_size = sizeof(u16);
|
|
|
deb259 |
- hw->txq_data_size = sizeof(struct iwl_mvm_txq);
|
|
|
deb259 |
|
|
|
deb259 |
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
|
|
|
deb259 |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
|
|
|
deb259 |
@@ -785,6 +783,7 @@
|
|
|
deb259 |
goto out;
|
|
|
deb259 |
|
|
|
deb259 |
__skb_queue_tail(&mvm->d0i3_tx, skb);
|
|
|
deb259 |
+ ieee80211_stop_queues(mvm->hw);
|
|
|
deb259 |
|
|
|
deb259 |
/* trigger wakeup */
|
|
|
deb259 |
iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
|
|
|
deb259 |
@@ -804,15 +803,13 @@
|
|
|
deb259 |
struct ieee80211_sta *sta = control->sta;
|
|
|
deb259 |
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
deb259 |
struct ieee80211_hdr *hdr = (void *)skb->data;
|
|
|
deb259 |
- bool offchannel = IEEE80211_SKB_CB(skb)->flags &
|
|
|
deb259 |
- IEEE80211_TX_CTL_TX_OFFCHAN;
|
|
|
deb259 |
|
|
|
deb259 |
if (iwl_mvm_is_radio_killed(mvm)) {
|
|
|
deb259 |
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
|
|
|
deb259 |
goto drop;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
- if (offchannel &&
|
|
|
deb259 |
+ if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
|
|
|
deb259 |
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
|
|
|
deb259 |
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
|
|
|
deb259 |
goto drop;
|
|
|
deb259 |
@@ -825,8 +822,8 @@
|
|
|
deb259 |
sta = NULL;
|
|
|
deb259 |
|
|
|
deb259 |
/* If there is no sta, and it's not offchannel - send through AP */
|
|
|
deb259 |
- if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
|
|
|
deb259 |
- !offchannel) {
|
|
|
deb259 |
+ if (info->control.vif->type == NL80211_IFTYPE_STATION &&
|
|
|
deb259 |
+ info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
|
|
|
deb259 |
struct iwl_mvm_vif *mvmvif =
|
|
|
deb259 |
iwl_mvm_vif_from_mac80211(info->control.vif);
|
|
|
deb259 |
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
|
|
|
deb259 |
@@ -854,107 +851,6 @@
|
|
|
deb259 |
ieee80211_free_txskb(hw, skb);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
-void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
|
|
deb259 |
-{
|
|
|
deb259 |
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
|
|
deb259 |
- struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
|
|
|
deb259 |
- struct sk_buff *skb = NULL;
|
|
|
deb259 |
-
|
|
|
deb259 |
- /*
|
|
|
deb259 |
- * No need for threads to be pending here, they can leave the first
|
|
|
deb259 |
- * taker all the work.
|
|
|
deb259 |
- *
|
|
|
deb259 |
- * mvmtxq->tx_request logic:
|
|
|
deb259 |
- *
|
|
|
deb259 |
- * If 0, no one is currently TXing, set to 1 to indicate current thread
|
|
|
deb259 |
- * will now start TX and other threads should quit.
|
|
|
deb259 |
- *
|
|
|
deb259 |
- * If 1, another thread is currently TXing, set to 2 to indicate to
|
|
|
deb259 |
- * that thread that there was another request. Since that request may
|
|
|
deb259 |
- * have raced with the check whether the queue is empty, the TXing
|
|
|
deb259 |
- * thread should check the queue's status one more time before leaving.
|
|
|
deb259 |
- * This check is done in order to not leave any TX hanging in the queue
|
|
|
deb259 |
- * until the next TX invocation (which may not even happen).
|
|
|
deb259 |
- *
|
|
|
deb259 |
- * If 2, another thread is currently TXing, and it will already double
|
|
|
deb259 |
- * check the queue, so do nothing.
|
|
|
deb259 |
- */
|
|
|
deb259 |
-#if 0 /* Not in RHEL */
|
|
|
deb259 |
- if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
|
|
|
deb259 |
-#else
|
|
|
deb259 |
- if (__atomic_add_unless(&mvmtxq->tx_request, 1, 2))
|
|
|
deb259 |
-#endif
|
|
|
deb259 |
- return;
|
|
|
deb259 |
-
|
|
|
deb259 |
-
|
|
|
deb259 |
- rcu_read_lock();
|
|
|
deb259 |
- do {
|
|
|
deb259 |
- while (likely(!mvmtxq->stopped &&
|
|
|
deb259 |
- (mvm->trans->system_pm_mode ==
|
|
|
deb259 |
- IWL_PLAT_PM_MODE_DISABLED))) {
|
|
|
deb259 |
- skb = ieee80211_tx_dequeue(hw, txq);
|
|
|
deb259 |
-
|
|
|
deb259 |
- if (!skb) {
|
|
|
deb259 |
- if (txq->sta)
|
|
|
deb259 |
- IWL_DEBUG_TX(mvm,
|
|
|
deb259 |
- "TXQ of sta %pM tid %d is now empty\n",
|
|
|
deb259 |
- txq->sta->addr,
|
|
|
deb259 |
- txq->tid);
|
|
|
deb259 |
- break;
|
|
|
deb259 |
- }
|
|
|
deb259 |
-
|
|
|
deb259 |
- if (!txq->sta)
|
|
|
deb259 |
- iwl_mvm_tx_skb_non_sta(mvm, skb);
|
|
|
deb259 |
- else
|
|
|
deb259 |
- iwl_mvm_tx_skb(mvm, skb, txq->sta);
|
|
|
deb259 |
- }
|
|
|
deb259 |
- } while (atomic_dec_return(&mvmtxq->tx_request));
|
|
|
deb259 |
- rcu_read_unlock();
|
|
|
deb259 |
-}
|
|
|
deb259 |
-
|
|
|
deb259 |
-static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
|
|
|
deb259 |
- struct ieee80211_txq *txq)
|
|
|
deb259 |
-{
|
|
|
deb259 |
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
|
|
deb259 |
- struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
|
|
|
deb259 |
-
|
|
|
deb259 |
- /*
|
|
|
deb259 |
- * Please note that racing is handled very carefully here:
|
|
|
deb259 |
- * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
|
|
|
deb259 |
- * deleted afterwards.
|
|
|
deb259 |
- * This means that if:
|
|
|
deb259 |
- * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
|
|
|
deb259 |
- * queue is allocated and we can TX.
|
|
|
deb259 |
- * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
|
|
|
deb259 |
- * a race, should defer the frame.
|
|
|
deb259 |
- * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
|
|
|
deb259 |
- * need to allocate the queue and defer the frame.
|
|
|
deb259 |
- * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
|
|
|
deb259 |
- * queue is already scheduled for allocation, no need to allocate,
|
|
|
deb259 |
- * should defer the frame.
|
|
|
deb259 |
- */
|
|
|
deb259 |
-
|
|
|
deb259 |
- /* If the queue is allocated TX and return. */
|
|
|
deb259 |
- if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
|
|
|
deb259 |
- /*
|
|
|
deb259 |
- * Check that list is empty to avoid a race where txq_id is
|
|
|
deb259 |
- * already updated, but the queue allocation work wasn't
|
|
|
deb259 |
- * finished
|
|
|
deb259 |
- */
|
|
|
deb259 |
- if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
|
|
|
deb259 |
- return;
|
|
|
deb259 |
-
|
|
|
deb259 |
- iwl_mvm_mac_itxq_xmit(hw, txq);
|
|
|
deb259 |
- return;
|
|
|
deb259 |
- }
|
|
|
deb259 |
-
|
|
|
deb259 |
- /* The list is being deleted only after the queue is fully allocated. */
|
|
|
deb259 |
- if (!list_empty(&mvmtxq->list))
|
|
|
deb259 |
- return;
|
|
|
deb259 |
-
|
|
|
deb259 |
- list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
|
|
|
deb259 |
- schedule_work(&mvm->add_stream_wk);
|
|
|
deb259 |
-}
|
|
|
deb259 |
|
|
|
deb259 |
#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
|
|
|
deb259 |
do { \
|
|
|
deb259 |
@@ -1172,6 +1068,7 @@
|
|
|
deb259 |
|
|
|
deb259 |
iwl_mvm_reset_phy_ctxts(mvm);
|
|
|
deb259 |
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
|
|
|
deb259 |
+ memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
|
|
|
deb259 |
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
|
|
|
deb259 |
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
|
|
|
deb259 |
|
|
|
deb259 |
@@ -3086,6 +2983,32 @@
|
|
|
deb259 |
peer_addr, action);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
+static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
|
|
|
deb259 |
+ struct iwl_mvm_sta *mvm_sta)
|
|
|
deb259 |
+{
|
|
|
deb259 |
+ struct iwl_mvm_tid_data *tid_data;
|
|
|
deb259 |
+ struct sk_buff *skb;
|
|
|
deb259 |
+ int i;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ spin_lock_bh(&mvm_sta->lock);
|
|
|
deb259 |
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
|
|
|
deb259 |
+ tid_data = &mvm_sta->tid_data[i];
|
|
|
deb259 |
+
|
|
|
deb259 |
+ while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
|
|
|
deb259 |
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ /*
|
|
|
deb259 |
+ * The first deferred frame should've stopped the MAC
|
|
|
deb259 |
+ * queues, so we should never get a second deferred
|
|
|
deb259 |
+ * frame for the RA/TID.
|
|
|
deb259 |
+ */
|
|
|
deb259 |
+ iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
|
|
|
deb259 |
+ ieee80211_free_txskb(mvm->hw, skb);
|
|
|
deb259 |
+ }
|
|
|
deb259 |
+ }
|
|
|
deb259 |
+ spin_unlock_bh(&mvm_sta->lock);
|
|
|
deb259 |
+}
|
|
|
deb259 |
+
|
|
|
deb259 |
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
|
|
deb259 |
struct ieee80211_vif *vif,
|
|
|
deb259 |
struct ieee80211_sta *sta,
|
|
|
deb259 |
@@ -3119,6 +3042,7 @@
|
|
|
deb259 |
*/
|
|
|
deb259 |
if (old_state == IEEE80211_STA_NONE &&
|
|
|
deb259 |
new_state == IEEE80211_STA_NOTEXIST) {
|
|
|
deb259 |
+ iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
|
|
|
deb259 |
flush_work(&mvm->add_stream_wk);
|
|
|
deb259 |
|
|
|
deb259 |
/*
|
|
|
deb259 |
@@ -4914,7 +4838,6 @@
|
|
|
deb259 |
|
|
|
deb259 |
const struct ieee80211_ops iwl_mvm_hw_ops = {
|
|
|
deb259 |
.tx = iwl_mvm_mac_tx,
|
|
|
deb259 |
- .wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
|
|
|
deb259 |
.ampdu_action = iwl_mvm_mac_ampdu_action,
|
|
|
deb259 |
.start = iwl_mvm_mac_start,
|
|
|
deb259 |
.reconfig_complete = iwl_mvm_mac_reconfig_complete,
|
|
|
deb259 |
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
|
|
|
deb259 |
===================================================================
|
|
|
deb259 |
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 2019-07-31 18:50:40.637230710 +0200
|
|
|
deb259 |
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 2019-07-31 18:51:11.753815253 +0200
|
|
|
deb259 |
@@ -805,39 +805,6 @@
|
|
|
deb259 |
u8 values[ACPI_GEO_TABLE_SIZE];
|
|
|
deb259 |
};
|
|
|
deb259 |
|
|
|
deb259 |
-struct iwl_mvm_txq {
|
|
|
deb259 |
- struct list_head list;
|
|
|
deb259 |
- u16 txq_id;
|
|
|
deb259 |
- atomic_t tx_request;
|
|
|
deb259 |
- bool stopped;
|
|
|
deb259 |
-};
|
|
|
deb259 |
-
|
|
|
deb259 |
-static inline struct iwl_mvm_txq *
|
|
|
deb259 |
-iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq)
|
|
|
deb259 |
-{
|
|
|
deb259 |
- return (void *)txq->drv_priv;
|
|
|
deb259 |
-}
|
|
|
deb259 |
-
|
|
|
deb259 |
-static inline struct iwl_mvm_txq *
|
|
|
deb259 |
-iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid)
|
|
|
deb259 |
-{
|
|
|
deb259 |
- if (tid == IWL_MAX_TID_COUNT)
|
|
|
deb259 |
- tid = IEEE80211_NUM_TIDS;
|
|
|
deb259 |
-
|
|
|
deb259 |
- return (void *)sta->txq[tid]->drv_priv;
|
|
|
deb259 |
-}
|
|
|
deb259 |
-
|
|
|
deb259 |
-/**
|
|
|
deb259 |
- * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid
|
|
|
deb259 |
- *
|
|
|
deb259 |
- * @sta_id: sta id
|
|
|
deb259 |
- * @txq_tid: txq tid
|
|
|
deb259 |
- */
|
|
|
deb259 |
-struct iwl_mvm_tvqm_txq_info {
|
|
|
deb259 |
- u8 sta_id;
|
|
|
deb259 |
- u8 txq_tid;
|
|
|
deb259 |
-};
|
|
|
deb259 |
-
|
|
|
deb259 |
struct iwl_mvm_dqa_txq_info {
|
|
|
deb259 |
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
|
|
|
deb259 |
bool reserved; /* Is this the TXQ reserved for a STA */
|
|
|
deb259 |
@@ -900,13 +867,13 @@
|
|
|
deb259 |
u64 on_time_scan;
|
|
|
deb259 |
} radio_stats, accu_radio_stats;
|
|
|
deb259 |
|
|
|
deb259 |
- struct list_head add_stream_txqs;
|
|
|
deb259 |
- union {
|
|
|
deb259 |
- struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
|
|
|
deb259 |
- struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
|
|
|
deb259 |
- };
|
|
|
deb259 |
+ u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
|
|
|
deb259 |
+
|
|
|
deb259 |
+ struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
|
|
|
deb259 |
struct work_struct add_stream_wk; /* To add streams to queues */
|
|
|
deb259 |
|
|
|
deb259 |
+ atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
|
|
|
deb259 |
+
|
|
|
deb259 |
const char *nvm_file_name;
|
|
|
deb259 |
struct iwl_nvm_data *nvm_data;
|
|
|
deb259 |
/* NVM sections */
|
|
|
deb259 |
@@ -920,6 +887,7 @@
|
|
|
deb259 |
/* data related to data path */
|
|
|
deb259 |
struct iwl_rx_phy_info last_phy_info;
|
|
|
deb259 |
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
|
|
|
deb259 |
+ unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
|
|
|
deb259 |
u8 rx_ba_sessions;
|
|
|
deb259 |
|
|
|
deb259 |
/* configured by mac80211 */
|
|
|
deb259 |
@@ -1552,8 +1520,6 @@
|
|
|
deb259 |
void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
|
|
|
deb259 |
struct ieee80211_tx_info *info,
|
|
|
deb259 |
struct ieee80211_sta *sta, __le16 fc);
|
|
|
deb259 |
-void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
|
|
|
deb259 |
-
|
|
|
deb259 |
#ifdef CONFIG_IWLWIFI_DEBUG
|
|
|
deb259 |
const char *iwl_mvm_get_tx_fail_reason(u32 status);
|
|
|
deb259 |
#else
|
|
|
deb259 |
@@ -1684,6 +1650,7 @@
|
|
|
deb259 |
int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|
|
deb259 |
bool force_assoc_off, const u8 *bssid_override);
|
|
|
deb259 |
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
|
|
deb259 |
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
|
|
|
deb259 |
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
|
|
|
deb259 |
struct ieee80211_vif *vif);
|
|
|
deb259 |
int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
|
|
|
deb259 |
@@ -1710,6 +1677,8 @@
|
|
|
deb259 |
struct iwl_rx_cmd_buffer *rxb);
|
|
|
deb259 |
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
|
|
|
deb259 |
struct ieee80211_vif *vif);
|
|
|
deb259 |
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
|
|
|
deb259 |
+ struct ieee80211_vif *exclude_vif);
|
|
|
deb259 |
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
|
|
|
deb259 |
struct iwl_rx_cmd_buffer *rxb);
|
|
|
deb259 |
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
|
|
|
deb259 |
@@ -2020,6 +1989,10 @@
|
|
|
deb259 |
iwl_fw_dump_conf_clear(&mvm->fwrt);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
+/* Stop/start all mac queues in a given bitmap */
|
|
|
deb259 |
+void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
|
|
|
deb259 |
+void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
|
|
|
deb259 |
+
|
|
|
deb259 |
/* Re-configure the SCD for a queue that has already been configured */
|
|
|
deb259 |
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
|
|
deb259 |
int tid, int frame_limit, u16 ssn);
|
|
|
deb259 |
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
|
|
|
deb259 |
===================================================================
|
|
|
deb259 |
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/ops.c 2019-07-31 18:50:59.172983228 +0200
|
|
|
deb259 |
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/ops.c 2019-07-31 18:51:11.754815240 +0200
|
|
|
deb259 |
@@ -707,7 +707,6 @@
|
|
|
deb259 |
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
|
|
|
deb259 |
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
|
|
|
deb259 |
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
|
|
|
deb259 |
- INIT_LIST_HEAD(&mvm->add_stream_txqs);
|
|
|
deb259 |
|
|
|
deb259 |
spin_lock_init(&mvm->d0i3_tx_lock);
|
|
|
deb259 |
spin_lock_init(&mvm->refs_lock);
|
|
|
deb259 |
@@ -1091,6 +1090,24 @@
|
|
|
deb259 |
iwl_mvm_rx_common(mvm, rxb, pkt);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
+void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
|
|
|
deb259 |
+{
|
|
|
deb259 |
+ int q;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ if (WARN_ON_ONCE(!mq))
|
|
|
deb259 |
+ return;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
|
|
|
deb259 |
+ if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
|
|
|
deb259 |
+ IWL_DEBUG_TX_QUEUES(mvm,
|
|
|
deb259 |
+ "mac80211 %d already stopped\n", q);
|
|
|
deb259 |
+ continue;
|
|
|
deb259 |
+ }
|
|
|
deb259 |
+
|
|
|
deb259 |
+ ieee80211_stop_queue(mvm->hw, q);
|
|
|
deb259 |
+ }
|
|
|
deb259 |
+}
|
|
|
deb259 |
+
|
|
|
deb259 |
static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
|
|
|
deb259 |
const struct iwl_device_cmd *cmd)
|
|
|
deb259 |
{
|
|
|
deb259 |
@@ -1103,66 +1120,38 @@
|
|
|
deb259 |
iwl_trans_block_txq_ptrs(mvm->trans, false);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
-static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
|
|
|
deb259 |
- int hw_queue, bool start)
|
|
|
deb259 |
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
|
|
deb259 |
{
|
|
|
deb259 |
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
|
|
deb259 |
- struct ieee80211_sta *sta;
|
|
|
deb259 |
- struct ieee80211_txq *txq;
|
|
|
deb259 |
- struct iwl_mvm_txq *mvmtxq;
|
|
|
deb259 |
- int i;
|
|
|
deb259 |
- unsigned long tid_bitmap;
|
|
|
deb259 |
- struct iwl_mvm_sta *mvmsta;
|
|
|
deb259 |
- u8 sta_id;
|
|
|
deb259 |
-
|
|
|
deb259 |
- sta_id = iwl_mvm_has_new_tx_api(mvm) ?
|
|
|
deb259 |
- mvm->tvqm_info[hw_queue].sta_id :
|
|
|
deb259 |
- mvm->queue_info[hw_queue].ra_sta_id;
|
|
|
deb259 |
-
|
|
|
deb259 |
- if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
|
|
|
deb259 |
- return;
|
|
|
deb259 |
+ unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
|
|
|
deb259 |
|
|
|
deb259 |
- rcu_read_lock();
|
|
|
deb259 |
-
|
|
|
deb259 |
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
|
|
deb259 |
- if (IS_ERR_OR_NULL(sta))
|
|
|
deb259 |
- goto out;
|
|
|
deb259 |
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
deb259 |
-
|
|
|
deb259 |
- if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
deb259 |
- int tid = mvm->tvqm_info[hw_queue].txq_tid;
|
|
|
deb259 |
-
|
|
|
deb259 |
- tid_bitmap = BIT(tid);
|
|
|
deb259 |
- } else {
|
|
|
deb259 |
- tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
|
|
|
deb259 |
- }
|
|
|
deb259 |
+ iwl_mvm_stop_mac_queues(mvm, mq);
|
|
|
deb259 |
+}
|
|
|
deb259 |
|
|
|
deb259 |
- for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
|
|
deb259 |
- int tid = i;
|
|
|
deb259 |
+void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
|
|
|
deb259 |
+{
|
|
|
deb259 |
+ int q;
|
|
|
deb259 |
|
|
|
deb259 |
- if (tid == IWL_MAX_TID_COUNT)
|
|
|
deb259 |
- tid = IEEE80211_NUM_TIDS;
|
|
|
deb259 |
+ if (WARN_ON_ONCE(!mq))
|
|
|
deb259 |
+ return;
|
|
|
deb259 |
|
|
|
deb259 |
- txq = sta->txq[tid];
|
|
|
deb259 |
- mvmtxq = iwl_mvm_txq_from_mac80211(txq);
|
|
|
deb259 |
- mvmtxq->stopped = !start;
|
|
|
deb259 |
+ for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
|
|
|
deb259 |
+ if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
|
|
|
deb259 |
+ IWL_DEBUG_TX_QUEUES(mvm,
|
|
|
deb259 |
+ "mac80211 %d still stopped\n", q);
|
|
|
deb259 |
+ continue;
|
|
|
deb259 |
+ }
|
|
|
deb259 |
|
|
|
deb259 |
- if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
|
|
|
deb259 |
- iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
|
|
|
deb259 |
+ ieee80211_wake_queue(mvm->hw, q);
|
|
|
deb259 |
}
|
|
|
deb259 |
-
|
|
|
deb259 |
-out:
|
|
|
deb259 |
- rcu_read_unlock();
|
|
|
deb259 |
-}
|
|
|
deb259 |
-
|
|
|
deb259 |
-static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
|
|
deb259 |
-{
|
|
|
deb259 |
- iwl_mvm_queue_state_change(op_mode, hw_queue, false);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
|
|
deb259 |
{
|
|
|
deb259 |
- iwl_mvm_queue_state_change(op_mode, hw_queue, true);
|
|
|
deb259 |
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
|
|
deb259 |
+ unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
|
|
|
deb259 |
+
|
|
|
deb259 |
+ iwl_mvm_start_mac_queues(mvm, mq);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
|
|
|
deb259 |
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
|
|
|
deb259 |
===================================================================
|
|
|
deb259 |
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/sta.c 2019-07-31 18:50:53.312061481 +0200
|
|
|
deb259 |
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/sta.c 2019-07-31 18:51:11.755815227 +0200
|
|
|
deb259 |
@@ -353,16 +353,24 @@
|
|
|
deb259 |
&cmd, &status);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
-static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|
|
deb259 |
- int queue, u8 tid, u8 flags)
|
|
|
deb259 |
+static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
|
|
|
deb259 |
+ int mac80211_queue, u8 tid, u8 flags)
|
|
|
deb259 |
{
|
|
|
deb259 |
struct iwl_scd_txq_cfg_cmd cmd = {
|
|
|
deb259 |
.scd_queue = queue,
|
|
|
deb259 |
.action = SCD_CFG_DISABLE_QUEUE,
|
|
|
deb259 |
};
|
|
|
deb259 |
+ bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
|
|
|
deb259 |
int ret;
|
|
|
deb259 |
|
|
|
deb259 |
+ if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
|
|
|
deb259 |
+ return -EINVAL;
|
|
|
deb259 |
+
|
|
|
deb259 |
if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
deb259 |
+ if (remove_mac_queue)
|
|
|
deb259 |
+ mvm->hw_queue_to_mac80211[queue] &=
|
|
|
deb259 |
+ ~BIT(mac80211_queue);
|
|
|
deb259 |
+
|
|
|
deb259 |
iwl_trans_txq_free(mvm->trans, queue);
|
|
|
deb259 |
|
|
|
deb259 |
return 0;
|
|
|
deb259 |
@@ -373,15 +381,36 @@
|
|
|
deb259 |
|
|
|
deb259 |
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
|
|
deb259 |
|
|
|
deb259 |
+ /*
|
|
|
deb259 |
+ * If there is another TID with the same AC - don't remove the MAC queue
|
|
|
deb259 |
+ * from the mapping
|
|
|
deb259 |
+ */
|
|
|
deb259 |
+ if (tid < IWL_MAX_TID_COUNT) {
|
|
|
deb259 |
+ unsigned long tid_bitmap =
|
|
|
deb259 |
+ mvm->queue_info[queue].tid_bitmap;
|
|
|
deb259 |
+ int ac = tid_to_mac80211_ac[tid];
|
|
|
deb259 |
+ int i;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
|
|
|
deb259 |
+ if (tid_to_mac80211_ac[i] == ac)
|
|
|
deb259 |
+ remove_mac_queue = false;
|
|
|
deb259 |
+ }
|
|
|
deb259 |
+ }
|
|
|
deb259 |
+
|
|
|
deb259 |
+ if (remove_mac_queue)
|
|
|
deb259 |
+ mvm->hw_queue_to_mac80211[queue] &=
|
|
|
deb259 |
+ ~BIT(mac80211_queue);
|
|
|
deb259 |
+
|
|
|
deb259 |
cmd.action = mvm->queue_info[queue].tid_bitmap ?
|
|
|
deb259 |
SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
|
|
|
deb259 |
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
|
|
|
deb259 |
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
|
|
|
deb259 |
|
|
|
deb259 |
IWL_DEBUG_TX_QUEUES(mvm,
|
|
|
deb259 |
- "Disabling TXQ #%d tids=0x%x\n",
|
|
|
deb259 |
+ "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
|
|
|
deb259 |
queue,
|
|
|
deb259 |
- mvm->queue_info[queue].tid_bitmap);
|
|
|
deb259 |
+ mvm->queue_info[queue].tid_bitmap,
|
|
|
deb259 |
+ mvm->hw_queue_to_mac80211[queue]);
|
|
|
deb259 |
|
|
|
deb259 |
/* If the queue is still enabled - nothing left to do in this func */
|
|
|
deb259 |
if (cmd.action == SCD_CFG_ENABLE_QUEUE)
|
|
|
deb259 |
@@ -391,19 +420,15 @@
|
|
|
deb259 |
cmd.tid = mvm->queue_info[queue].txq_tid;
|
|
|
deb259 |
|
|
|
deb259 |
/* Make sure queue info is correct even though we overwrite it */
|
|
|
deb259 |
- WARN(mvm->queue_info[queue].tid_bitmap,
|
|
|
deb259 |
- "TXQ #%d info out-of-sync - tids=0x%x\n",
|
|
|
deb259 |
- queue, mvm->queue_info[queue].tid_bitmap);
|
|
|
deb259 |
+ WARN(mvm->queue_info[queue].tid_bitmap ||
|
|
|
deb259 |
+ mvm->hw_queue_to_mac80211[queue],
|
|
|
deb259 |
+ "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
|
|
|
deb259 |
+ queue, mvm->hw_queue_to_mac80211[queue],
|
|
|
deb259 |
+ mvm->queue_info[queue].tid_bitmap);
|
|
|
deb259 |
|
|
|
deb259 |
/* If we are here - the queue is freed and we can zero out these vals */
|
|
|
deb259 |
mvm->queue_info[queue].tid_bitmap = 0;
|
|
|
deb259 |
-
|
|
|
deb259 |
- if (sta) {
|
|
|
deb259 |
- struct iwl_mvm_txq *mvmtxq =
|
|
|
deb259 |
- iwl_mvm_txq_from_tid(sta, tid);
|
|
|
deb259 |
-
|
|
|
deb259 |
- mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
|
|
deb259 |
- }
|
|
|
deb259 |
+ mvm->hw_queue_to_mac80211[queue] = 0;
|
|
|
deb259 |
|
|
|
deb259 |
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
|
|
|
deb259 |
mvm->queue_info[queue].reserved = false;
|
|
|
deb259 |
@@ -489,14 +514,9 @@
|
|
|
deb259 |
spin_lock_bh(&mvmsta->lock);
|
|
|
deb259 |
/* Unmap MAC queues and TIDs from this queue */
|
|
|
deb259 |
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
|
|
deb259 |
- struct iwl_mvm_txq *mvmtxq =
|
|
|
deb259 |
- iwl_mvm_txq_from_tid(sta, tid);
|
|
|
deb259 |
-
|
|
|
deb259 |
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
|
|
|
deb259 |
disable_agg_tids |= BIT(tid);
|
|
|
deb259 |
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
|
|
|
deb259 |
-
|
|
|
deb259 |
- mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
|
|
|
deb259 |
@@ -518,11 +538,10 @@
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
|
|
deb259 |
- struct ieee80211_sta *old_sta,
|
|
|
deb259 |
u8 new_sta_id)
|
|
|
deb259 |
{
|
|
|
deb259 |
struct iwl_mvm_sta *mvmsta;
|
|
|
deb259 |
- u8 sta_id, tid;
|
|
|
deb259 |
+ u8 txq_curr_ac, sta_id, tid;
|
|
|
deb259 |
unsigned long disable_agg_tids = 0;
|
|
|
deb259 |
bool same_sta;
|
|
|
deb259 |
int ret;
|
|
|
deb259 |
@@ -532,6 +551,7 @@
|
|
|
deb259 |
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
|
|
deb259 |
return -EINVAL;
|
|
|
deb259 |
|
|
|
deb259 |
+ txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
|
|
|
deb259 |
sta_id = mvm->queue_info[queue].ra_sta_id;
|
|
|
deb259 |
tid = mvm->queue_info[queue].txq_tid;
|
|
|
deb259 |
|
|
|
deb259 |
@@ -547,7 +567,9 @@
|
|
|
deb259 |
iwl_mvm_invalidate_sta_queue(mvm, queue,
|
|
|
deb259 |
disable_agg_tids, false);
|
|
|
deb259 |
|
|
|
deb259 |
- ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
|
|
|
deb259 |
+ ret = iwl_mvm_disable_txq(mvm, queue,
|
|
|
deb259 |
+ mvmsta->vif->hw_queue[txq_curr_ac],
|
|
|
deb259 |
+ tid, 0);
|
|
|
deb259 |
if (ret) {
|
|
|
deb259 |
IWL_ERR(mvm,
|
|
|
deb259 |
"Failed to free inactive queue %d (ret=%d)\n",
|
|
|
deb259 |
@@ -637,15 +659,16 @@
|
|
|
deb259 |
* in such a case, otherwise - if no redirection required - it does nothing,
|
|
|
deb259 |
* unless the %force param is true.
|
|
|
deb259 |
*/
|
|
|
deb259 |
-static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
|
|
|
deb259 |
- int ac, int ssn, unsigned int wdg_timeout,
|
|
|
deb259 |
- bool force, struct iwl_mvm_txq *txq)
|
|
|
deb259 |
+static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
|
|
deb259 |
+ int ac, int ssn, unsigned int wdg_timeout,
|
|
|
deb259 |
+ bool force)
|
|
|
deb259 |
{
|
|
|
deb259 |
struct iwl_scd_txq_cfg_cmd cmd = {
|
|
|
deb259 |
.scd_queue = queue,
|
|
|
deb259 |
.action = SCD_CFG_DISABLE_QUEUE,
|
|
|
deb259 |
};
|
|
|
deb259 |
bool shared_queue;
|
|
|
deb259 |
+ unsigned long mq;
|
|
|
deb259 |
int ret;
|
|
|
deb259 |
|
|
|
deb259 |
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
|
|
deb259 |
@@ -669,14 +692,14 @@
|
|
|
deb259 |
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
|
|
deb259 |
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
|
|
|
deb259 |
cmd.tid = mvm->queue_info[queue].txq_tid;
|
|
|
deb259 |
+ mq = mvm->hw_queue_to_mac80211[queue];
|
|
|
deb259 |
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
|
|
|
deb259 |
|
|
|
deb259 |
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
|
|
|
deb259 |
queue, iwl_mvm_ac_to_tx_fifo[ac]);
|
|
|
deb259 |
|
|
|
deb259 |
- /* Stop the queue and wait for it to empty */
|
|
|
deb259 |
- txq->stopped = true;
|
|
|
deb259 |
-
|
|
|
deb259 |
+ /* Stop MAC queues and wait for this queue to empty */
|
|
|
deb259 |
+ iwl_mvm_stop_mac_queues(mvm, mq);
|
|
|
deb259 |
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
|
|
|
deb259 |
if (ret) {
|
|
|
deb259 |
IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
|
|
|
deb259 |
@@ -717,8 +740,8 @@
|
|
|
deb259 |
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
|
|
|
deb259 |
|
|
|
deb259 |
out:
|
|
|
deb259 |
- /* Continue using the queue */
|
|
|
deb259 |
- txq->stopped = false;
|
|
|
deb259 |
+ /* Continue using the MAC queues */
|
|
|
deb259 |
+ iwl_mvm_start_mac_queues(mvm, mq);
|
|
|
deb259 |
|
|
|
deb259 |
return ret;
|
|
|
deb259 |
}
|
|
|
deb259 |
@@ -743,7 +766,7 @@
|
|
|
deb259 |
return -ENOSPC;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
-static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
|
|
|
deb259 |
+static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
|
|
|
deb259 |
u8 sta_id, u8 tid, unsigned int timeout)
|
|
|
deb259 |
{
|
|
|
deb259 |
int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
|
|
deb259 |
@@ -768,7 +791,10 @@
|
|
|
deb259 |
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
|
|
|
deb259 |
queue, sta_id, tid);
|
|
|
deb259 |
|
|
|
deb259 |
- IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
|
|
|
deb259 |
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
|
|
|
deb259 |
+ IWL_DEBUG_TX_QUEUES(mvm,
|
|
|
deb259 |
+ "Enabling TXQ #%d (mac80211 map:0x%x)\n",
|
|
|
deb259 |
+ queue, mvm->hw_queue_to_mac80211[queue]);
|
|
|
deb259 |
|
|
|
deb259 |
return queue;
|
|
|
deb259 |
}
|
|
|
deb259 |
@@ -778,10 +804,9 @@
|
|
|
deb259 |
int tid)
|
|
|
deb259 |
{
|
|
|
deb259 |
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
deb259 |
- struct iwl_mvm_txq *mvmtxq =
|
|
|
deb259 |
- iwl_mvm_txq_from_tid(sta, tid);
|
|
|
deb259 |
unsigned int wdg_timeout =
|
|
|
deb259 |
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
|
|
|
deb259 |
+ u8 mac_queue = mvmsta->vif->hw_queue[ac];
|
|
|
deb259 |
int queue = -1;
|
|
|
deb259 |
|
|
|
deb259 |
lockdep_assert_held(&mvm->mutex);
|
|
|
deb259 |
@@ -789,16 +814,11 @@
|
|
|
deb259 |
IWL_DEBUG_TX_QUEUES(mvm,
|
|
|
deb259 |
"Allocating queue for sta %d on tid %d\n",
|
|
|
deb259 |
mvmsta->sta_id, tid);
|
|
|
deb259 |
- queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
|
|
|
deb259 |
+ queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
|
|
|
deb259 |
+ wdg_timeout);
|
|
|
deb259 |
if (queue < 0)
|
|
|
deb259 |
return queue;
|
|
|
deb259 |
|
|
|
deb259 |
- if (sta) {
|
|
|
deb259 |
- mvmtxq->txq_id = queue;
|
|
|
deb259 |
- mvm->tvqm_info[queue].txq_tid = tid;
|
|
|
deb259 |
- mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
|
|
|
deb259 |
- }
|
|
|
deb259 |
-
|
|
|
deb259 |
IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
|
|
|
deb259 |
|
|
|
deb259 |
spin_lock_bh(&mvmsta->lock);
|
|
|
deb259 |
@@ -808,9 +828,8 @@
|
|
|
deb259 |
return 0;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
-static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
|
|
|
deb259 |
- struct ieee80211_sta *sta,
|
|
|
deb259 |
- int queue, u8 sta_id, u8 tid)
|
|
|
deb259 |
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
|
|
deb259 |
+ int mac80211_queue, u8 sta_id, u8 tid)
|
|
|
deb259 |
{
|
|
|
deb259 |
bool enable_queue = true;
|
|
|
deb259 |
|
|
|
deb259 |
@@ -825,6 +844,14 @@
|
|
|
deb259 |
if (mvm->queue_info[queue].tid_bitmap)
|
|
|
deb259 |
enable_queue = false;
|
|
|
deb259 |
|
|
|
deb259 |
+ if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
|
|
|
deb259 |
+ WARN(mac80211_queue >=
|
|
|
deb259 |
+ BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
|
|
|
deb259 |
+ "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
|
|
|
deb259 |
+ mac80211_queue, queue, sta_id, tid);
|
|
|
deb259 |
+ mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
|
|
|
deb259 |
+ }
|
|
|
deb259 |
+
|
|
|
deb259 |
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
|
|
|
deb259 |
mvm->queue_info[queue].ra_sta_id = sta_id;
|
|
|
deb259 |
|
|
|
deb259 |
@@ -838,22 +865,16 @@
|
|
|
deb259 |
mvm->queue_info[queue].txq_tid = tid;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
- if (sta) {
|
|
|
deb259 |
- struct iwl_mvm_txq *mvmtxq =
|
|
|
deb259 |
- iwl_mvm_txq_from_tid(sta, tid);
|
|
|
deb259 |
-
|
|
|
deb259 |
- mvmtxq->txq_id = queue;
|
|
|
deb259 |
- }
|
|
|
deb259 |
-
|
|
|
deb259 |
IWL_DEBUG_TX_QUEUES(mvm,
|
|
|
deb259 |
- "Enabling TXQ #%d tids=0x%x\n",
|
|
|
deb259 |
- queue, mvm->queue_info[queue].tid_bitmap);
|
|
|
deb259 |
+ "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
|
|
|
deb259 |
+ queue, mvm->queue_info[queue].tid_bitmap,
|
|
|
deb259 |
+ mvm->hw_queue_to_mac80211[queue]);
|
|
|
deb259 |
|
|
|
deb259 |
return enable_queue;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
-static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|
|
deb259 |
- int queue, u16 ssn,
|
|
|
deb259 |
+static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
|
|
|
deb259 |
+ int mac80211_queue, u16 ssn,
|
|
|
deb259 |
const struct iwl_trans_txq_scd_cfg *cfg,
|
|
|
deb259 |
unsigned int wdg_timeout)
|
|
|
deb259 |
{
|
|
|
deb259 |
@@ -873,7 +894,8 @@
|
|
|
deb259 |
return false;
|
|
|
deb259 |
|
|
|
deb259 |
/* Send the enabling command if we need to */
|
|
|
deb259 |
- if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
|
|
|
deb259 |
+ if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
|
|
|
deb259 |
+ cfg->sta_id, cfg->tid))
|
|
|
deb259 |
return false;
|
|
|
deb259 |
|
|
|
deb259 |
inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
|
|
|
deb259 |
@@ -966,10 +988,9 @@
|
|
|
deb259 |
|
|
|
deb259 |
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
|
|
|
deb259 |
|
|
|
deb259 |
- ret = iwl_mvm_redirect_queue(mvm, queue, tid,
|
|
|
deb259 |
- tid_to_mac80211_ac[tid], ssn,
|
|
|
deb259 |
- wdg_timeout, true,
|
|
|
deb259 |
- iwl_mvm_txq_from_tid(sta, tid));
|
|
|
deb259 |
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
|
|
|
deb259 |
+ tid_to_mac80211_ac[tid], ssn,
|
|
|
deb259 |
+ wdg_timeout, true);
|
|
|
deb259 |
if (ret) {
|
|
|
deb259 |
IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
|
|
|
deb259 |
return;
|
|
|
deb259 |
@@ -1046,9 +1067,11 @@
|
|
|
deb259 |
* Remove the ones that did.
|
|
|
deb259 |
*/
|
|
|
deb259 |
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
|
|
deb259 |
+ int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
|
|
|
deb259 |
u16 tid_bitmap;
|
|
|
deb259 |
|
|
|
deb259 |
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
|
|
|
deb259 |
+ mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
|
|
|
deb259 |
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
|
|
deb259 |
|
|
|
deb259 |
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
|
|
deb259 |
@@ -1081,6 +1104,10 @@
|
|
|
deb259 |
* sure all TIDs have existing corresponding mac queues enabled
|
|
|
deb259 |
*/
|
|
|
deb259 |
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
|
|
deb259 |
+ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
|
|
deb259 |
+ mvm->hw_queue_to_mac80211[queue] |=
|
|
|
deb259 |
+ BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
|
|
|
deb259 |
+ }
|
|
|
deb259 |
|
|
|
deb259 |
/* If the queue is marked as shared - "unshare" it */
|
|
|
deb259 |
if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
|
|
|
deb259 |
@@ -1108,7 +1135,6 @@
|
|
|
deb259 |
unsigned long unshare_queues = 0;
|
|
|
deb259 |
unsigned long changetid_queues = 0;
|
|
|
deb259 |
int i, ret, free_queue = -ENOSPC;
|
|
|
deb259 |
- struct ieee80211_sta *queue_owner = NULL;
|
|
|
deb259 |
|
|
|
deb259 |
lockdep_assert_held(&mvm->mutex);
|
|
|
deb259 |
|
|
|
deb259 |
@@ -1174,14 +1200,13 @@
|
|
|
deb259 |
inactive_tid_bitmap,
|
|
|
deb259 |
&unshare_queues,
|
|
|
deb259 |
&changetid_queues);
|
|
|
deb259 |
- if (ret >= 0 && free_queue < 0) {
|
|
|
deb259 |
- queue_owner = sta;
|
|
|
deb259 |
+ if (ret >= 0 && free_queue < 0)
|
|
|
deb259 |
free_queue = ret;
|
|
|
deb259 |
- }
|
|
|
deb259 |
/* only unlock sta lock - we still need the queue info lock */
|
|
|
deb259 |
spin_unlock_bh(&mvmsta->lock);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
+ rcu_read_unlock();
|
|
|
deb259 |
|
|
|
deb259 |
/* Reconfigure queues requiring reconfiguation */
|
|
|
deb259 |
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
|
|
|
deb259 |
@@ -1190,21 +1215,18 @@
|
|
|
deb259 |
iwl_mvm_change_queue_tid(mvm, i);
|
|
|
deb259 |
|
|
|
deb259 |
if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
|
|
|
deb259 |
- ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
|
|
|
deb259 |
+ ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
|
|
|
deb259 |
alloc_for_sta);
|
|
|
deb259 |
- if (ret) {
|
|
|
deb259 |
- rcu_read_unlock();
|
|
|
deb259 |
+ if (ret)
|
|
|
deb259 |
return ret;
|
|
|
deb259 |
- }
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
- rcu_read_unlock();
|
|
|
deb259 |
-
|
|
|
deb259 |
return free_queue;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|
|
deb259 |
- struct ieee80211_sta *sta, u8 ac, int tid)
|
|
|
deb259 |
+ struct ieee80211_sta *sta, u8 ac, int tid,
|
|
|
deb259 |
+ struct ieee80211_hdr *hdr)
|
|
|
deb259 |
{
|
|
|
deb259 |
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
deb259 |
struct iwl_trans_txq_scd_cfg cfg = {
|
|
|
deb259 |
@@ -1215,6 +1237,7 @@
|
|
|
deb259 |
};
|
|
|
deb259 |
unsigned int wdg_timeout =
|
|
|
deb259 |
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
|
|
|
deb259 |
+ u8 mac_queue = mvmsta->vif->hw_queue[ac];
|
|
|
deb259 |
int queue = -1;
|
|
|
deb259 |
unsigned long disable_agg_tids = 0;
|
|
|
deb259 |
enum iwl_mvm_agg_state queue_state;
|
|
|
deb259 |
@@ -1233,7 +1256,12 @@
|
|
|
deb259 |
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
|
|
|
deb259 |
spin_unlock_bh(&mvmsta->lock);
|
|
|
deb259 |
|
|
|
deb259 |
- if (tid == IWL_MAX_TID_COUNT) {
|
|
|
deb259 |
+ /*
|
|
|
deb259 |
+ * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
|
|
|
deb259 |
+ * exists
|
|
|
deb259 |
+ */
|
|
|
deb259 |
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
|
|
|
deb259 |
+ ieee80211_is_qos_nullfunc(hdr->frame_control)) {
|
|
|
deb259 |
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
|
|
|
deb259 |
IWL_MVM_DQA_MIN_MGMT_QUEUE,
|
|
|
deb259 |
IWL_MVM_DQA_MAX_MGMT_QUEUE);
|
|
|
deb259 |
@@ -1312,7 +1340,8 @@
|
|
|
deb259 |
}
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
- inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
|
|
|
deb259 |
+ inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
|
|
|
deb259 |
+ ssn, &cfg, wdg_timeout);
|
|
|
deb259 |
|
|
|
deb259 |
/*
|
|
|
deb259 |
* Mark queue as shared in transport if shared
|
|
|
deb259 |
@@ -1354,9 +1383,8 @@
|
|
|
deb259 |
}
|
|
|
deb259 |
} else {
|
|
|
deb259 |
/* Redirect queue, if needed */
|
|
|
deb259 |
- ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
|
|
|
deb259 |
- wdg_timeout, false,
|
|
|
deb259 |
- iwl_mvm_txq_from_tid(sta, tid));
|
|
|
deb259 |
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
|
|
|
deb259 |
+ wdg_timeout, false);
|
|
|
deb259 |
if (ret)
|
|
|
deb259 |
goto out_err;
|
|
|
deb259 |
}
|
|
|
deb259 |
@@ -1364,7 +1392,7 @@
|
|
|
deb259 |
return 0;
|
|
|
deb259 |
|
|
|
deb259 |
out_err:
|
|
|
deb259 |
- iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
|
|
|
deb259 |
+ iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
|
|
|
deb259 |
|
|
|
deb259 |
return ret;
|
|
|
deb259 |
}
|
|
|
deb259 |
@@ -1377,34 +1405,87 @@
|
|
|
deb259 |
return tid_to_mac80211_ac[tid];
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
+static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
|
|
|
deb259 |
+ struct ieee80211_sta *sta, int tid)
|
|
|
deb259 |
+{
|
|
|
deb259 |
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
deb259 |
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
|
|
|
deb259 |
+ struct sk_buff *skb;
|
|
|
deb259 |
+ struct ieee80211_hdr *hdr;
|
|
|
deb259 |
+ struct sk_buff_head deferred_tx;
|
|
|
deb259 |
+ u8 mac_queue;
|
|
|
deb259 |
+ bool no_queue = false; /* Marks if there is a problem with the queue */
|
|
|
deb259 |
+ u8 ac;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ lockdep_assert_held(&mvm->mutex);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ skb = skb_peek(&tid_data->deferred_tx_frames);
|
|
|
deb259 |
+ if (!skb)
|
|
|
deb259 |
+ return;
|
|
|
deb259 |
+ hdr = (void *)skb->data;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ ac = iwl_mvm_tid_to_ac_queue(tid);
|
|
|
deb259 |
+ mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
|
|
|
deb259 |
+ iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
|
|
|
deb259 |
+ IWL_ERR(mvm,
|
|
|
deb259 |
+ "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
|
|
|
deb259 |
+ mvmsta->sta_id, tid);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ /*
|
|
|
deb259 |
+ * Mark queue as problematic so later the deferred traffic is
|
|
|
deb259 |
+ * freed, as we can do nothing with it
|
|
|
deb259 |
+ */
|
|
|
deb259 |
+ no_queue = true;
|
|
|
deb259 |
+ }
|
|
|
deb259 |
+
|
|
|
deb259 |
+ __skb_queue_head_init(&deferred_tx);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ /* Disable bottom-halves when entering TX path */
|
|
|
deb259 |
+ local_bh_disable();
|
|
|
deb259 |
+ spin_lock(&mvmsta->lock);
|
|
|
deb259 |
+ skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
|
|
|
deb259 |
+ mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
|
|
|
deb259 |
+ spin_unlock(&mvmsta->lock);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ while ((skb = __skb_dequeue(&deferred_tx)))
|
|
|
deb259 |
+ if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
|
|
|
deb259 |
+ ieee80211_free_txskb(mvm->hw, skb);
|
|
|
deb259 |
+ local_bh_enable();
|
|
|
deb259 |
+
|
|
|
deb259 |
+ /* Wake queue */
|
|
|
deb259 |
+ iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
|
|
|
deb259 |
+}
|
|
|
deb259 |
+
|
|
|
deb259 |
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
|
|
|
deb259 |
{
|
|
|
deb259 |
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
|
|
|
deb259 |
add_stream_wk);
|
|
|
deb259 |
+ struct ieee80211_sta *sta;
|
|
|
deb259 |
+ struct iwl_mvm_sta *mvmsta;
|
|
|
deb259 |
+ unsigned long deferred_tid_traffic;
|
|
|
deb259 |
+ int sta_id, tid;
|
|
|
deb259 |
|
|
|
deb259 |
mutex_lock(&mvm->mutex);
|
|
|
deb259 |
|
|
|
deb259 |
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
|
|
|
deb259 |
|
|
|
deb259 |
- while (!list_empty(&mvm->add_stream_txqs)) {
|
|
|
deb259 |
- struct iwl_mvm_txq *mvmtxq;
|
|
|
deb259 |
- struct ieee80211_txq *txq;
|
|
|
deb259 |
- u8 tid;
|
|
|
deb259 |
-
|
|
|
deb259 |
- mvmtxq = list_first_entry(&mvm->add_stream_txqs,
|
|
|
deb259 |
- struct iwl_mvm_txq, list);
|
|
|
deb259 |
-
|
|
|
deb259 |
- txq = container_of((void *)mvmtxq, struct ieee80211_txq,
|
|
|
deb259 |
- drv_priv);
|
|
|
deb259 |
- tid = txq->tid;
|
|
|
deb259 |
- if (tid == IEEE80211_NUM_TIDS)
|
|
|
deb259 |
- tid = IWL_MAX_TID_COUNT;
|
|
|
deb259 |
-
|
|
|
deb259 |
- iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
|
|
|
deb259 |
- list_del_init(&mvmtxq->list);
|
|
|
deb259 |
- local_bh_disable();
|
|
|
deb259 |
- iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
|
|
|
deb259 |
- local_bh_enable();
|
|
|
deb259 |
+ /* Go over all stations with deferred traffic */
|
|
|
deb259 |
+ for_each_set_bit(sta_id, mvm->sta_deferred_frames,
|
|
|
deb259 |
+ IWL_MVM_STATION_COUNT) {
|
|
|
deb259 |
+ clear_bit(sta_id, mvm->sta_deferred_frames);
|
|
|
deb259 |
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
|
|
|
deb259 |
+ lockdep_is_held(&mvm->mutex));
|
|
|
deb259 |
+ if (IS_ERR_OR_NULL(sta))
|
|
|
deb259 |
+ continue;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
deb259 |
+ deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ for_each_set_bit(tid, &deferred_tid_traffic,
|
|
|
deb259 |
+ IWL_MAX_TID_COUNT + 1)
|
|
|
deb259 |
+ iwl_mvm_tx_deferred_stream(mvm, sta, tid);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
mutex_unlock(&mvm->mutex);
|
|
|
deb259 |
@@ -1460,11 +1541,10 @@
|
|
|
deb259 |
* Note that re-enabling aggregations isn't done in this function.
|
|
|
deb259 |
*/
|
|
|
deb259 |
static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
|
|
|
deb259 |
- struct ieee80211_sta *sta)
|
|
|
deb259 |
+ struct iwl_mvm_sta *mvm_sta)
|
|
|
deb259 |
{
|
|
|
deb259 |
- struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
deb259 |
- unsigned int wdg =
|
|
|
deb259 |
- iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
|
|
|
deb259 |
+ unsigned int wdg_timeout =
|
|
|
deb259 |
+ iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
|
|
|
deb259 |
int i;
|
|
|
deb259 |
struct iwl_trans_txq_scd_cfg cfg = {
|
|
|
deb259 |
.sta_id = mvm_sta->sta_id,
|
|
|
deb259 |
@@ -1480,18 +1560,23 @@
|
|
|
deb259 |
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
|
|
|
deb259 |
int txq_id = tid_data->txq_id;
|
|
|
deb259 |
int ac;
|
|
|
deb259 |
+ u8 mac_queue;
|
|
|
deb259 |
|
|
|
deb259 |
if (txq_id == IWL_MVM_INVALID_QUEUE)
|
|
|
deb259 |
continue;
|
|
|
deb259 |
|
|
|
deb259 |
+ skb_queue_head_init(&tid_data->deferred_tx_frames);
|
|
|
deb259 |
+
|
|
|
deb259 |
ac = tid_to_mac80211_ac[i];
|
|
|
deb259 |
+ mac_queue = mvm_sta->vif->hw_queue[ac];
|
|
|
deb259 |
|
|
|
deb259 |
if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
deb259 |
IWL_DEBUG_TX_QUEUES(mvm,
|
|
|
deb259 |
"Re-mapping sta %d tid %d\n",
|
|
|
deb259 |
mvm_sta->sta_id, i);
|
|
|
deb259 |
- txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
|
|
|
deb259 |
- i, wdg);
|
|
|
deb259 |
+ txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
|
|
|
deb259 |
+ mvm_sta->sta_id,
|
|
|
deb259 |
+ i, wdg_timeout);
|
|
|
deb259 |
tid_data->txq_id = txq_id;
|
|
|
deb259 |
|
|
|
deb259 |
/*
|
|
|
deb259 |
@@ -1514,7 +1599,8 @@
|
|
|
deb259 |
"Re-mapping sta %d tid %d to queue %d\n",
|
|
|
deb259 |
mvm_sta->sta_id, i, txq_id);
|
|
|
deb259 |
|
|
|
deb259 |
- iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
|
|
|
deb259 |
+ iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
|
|
|
deb259 |
+ wdg_timeout);
|
|
|
deb259 |
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
|
|
|
deb259 |
}
|
|
|
deb259 |
}
|
|
|
deb259 |
@@ -1604,7 +1690,7 @@
|
|
|
deb259 |
if (ret)
|
|
|
deb259 |
goto err;
|
|
|
deb259 |
|
|
|
deb259 |
- iwl_mvm_realloc_queues_after_restart(mvm, sta);
|
|
|
deb259 |
+ iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
|
|
|
deb259 |
sta_update = true;
|
|
|
deb259 |
sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
|
|
|
deb259 |
goto update_fw;
|
|
|
deb259 |
@@ -1637,17 +1723,9 @@
|
|
|
deb259 |
* frames until the queue is allocated
|
|
|
deb259 |
*/
|
|
|
deb259 |
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
|
|
|
deb259 |
+ skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
|
|
|
deb259 |
}
|
|
|
deb259 |
-
|
|
|
deb259 |
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
|
|
|
deb259 |
- struct iwl_mvm_txq *mvmtxq =
|
|
|
deb259 |
- iwl_mvm_txq_from_mac80211(sta->txq[i]);
|
|
|
deb259 |
-
|
|
|
deb259 |
- mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
|
|
deb259 |
- INIT_LIST_HEAD(&mvmtxq->list);
|
|
|
deb259 |
- atomic_set(&mvmtxq->tx_request, 0);
|
|
|
deb259 |
- }
|
|
|
deb259 |
-
|
|
|
deb259 |
+ mvm_sta->deferred_traffic_tid_map = 0;
|
|
|
deb259 |
mvm_sta->agg_tids = 0;
|
|
|
deb259 |
|
|
|
deb259 |
if (iwl_mvm_has_new_rx_api(mvm) &&
|
|
|
deb259 |
@@ -1782,9 +1860,9 @@
|
|
|
deb259 |
|
|
|
deb259 |
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
|
|
deb259 |
struct ieee80211_vif *vif,
|
|
|
deb259 |
- struct ieee80211_sta *sta)
|
|
|
deb259 |
+ struct iwl_mvm_sta *mvm_sta)
|
|
|
deb259 |
{
|
|
|
deb259 |
- struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
|
|
deb259 |
+ int ac;
|
|
|
deb259 |
int i;
|
|
|
deb259 |
|
|
|
deb259 |
lockdep_assert_held(&mvm->mutex);
|
|
|
deb259 |
@@ -1793,17 +1871,11 @@
|
|
|
deb259 |
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
|
|
|
deb259 |
continue;
|
|
|
deb259 |
|
|
|
deb259 |
- iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
|
|
|
deb259 |
- 0);
|
|
|
deb259 |
+ ac = iwl_mvm_tid_to_ac_queue(i);
|
|
|
deb259 |
+ iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
|
|
|
deb259 |
+ vif->hw_queue[ac], i, 0);
|
|
|
deb259 |
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
|
|
|
deb259 |
}
|
|
|
deb259 |
-
|
|
|
deb259 |
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
|
|
|
deb259 |
- struct iwl_mvm_txq *mvmtxq =
|
|
|
deb259 |
- iwl_mvm_txq_from_mac80211(sta->txq[i]);
|
|
|
deb259 |
-
|
|
|
deb259 |
- mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
|
|
deb259 |
- }
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
|
|
|
deb259 |
@@ -1865,7 +1937,7 @@
|
|
|
deb259 |
|
|
|
deb259 |
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
|
|
|
deb259 |
|
|
|
deb259 |
- iwl_mvm_disable_sta_queues(mvm, vif, sta);
|
|
|
deb259 |
+ iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
|
|
|
deb259 |
|
|
|
deb259 |
/* If there is a TXQ still marked as reserved - free it */
|
|
|
deb259 |
if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
|
|
|
deb259 |
@@ -1971,7 +2043,7 @@
|
|
|
deb259 |
|
|
|
deb259 |
if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
deb259 |
int tvqm_queue =
|
|
|
deb259 |
- iwl_mvm_tvqm_enable_txq(mvm, sta_id,
|
|
|
deb259 |
+ iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
|
|
|
deb259 |
IWL_MAX_TID_COUNT,
|
|
|
deb259 |
wdg_timeout);
|
|
|
deb259 |
*queue = tvqm_queue;
|
|
|
deb259 |
@@ -1984,7 +2056,7 @@
|
|
|
deb259 |
.frame_limit = IWL_FRAME_LIMIT,
|
|
|
deb259 |
};
|
|
|
deb259 |
|
|
|
deb259 |
- iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
|
|
|
deb259 |
+ iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
|
|
|
deb259 |
}
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
@@ -2062,7 +2134,8 @@
|
|
|
deb259 |
|
|
|
deb259 |
lockdep_assert_held(&mvm->mutex);
|
|
|
deb259 |
|
|
|
deb259 |
- iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
|
|
|
deb259 |
+ iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
|
|
|
deb259 |
+ IWL_MAX_TID_COUNT, 0);
|
|
|
deb259 |
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
|
|
|
deb259 |
if (ret)
|
|
|
deb259 |
IWL_WARN(mvm, "Failed sending remove station\n");
|
|
|
deb259 |
@@ -2123,7 +2196,8 @@
|
|
|
deb259 |
|
|
|
deb259 |
bsta->tfd_queue_msk |= BIT(queue);
|
|
|
deb259 |
|
|
|
deb259 |
- iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
|
|
|
deb259 |
+ iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
|
|
|
deb259 |
+ &cfg, wdg_timeout);
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
if (vif->type == NL80211_IFTYPE_ADHOC)
|
|
|
deb259 |
@@ -2142,7 +2216,8 @@
|
|
|
deb259 |
* to firmware so enable queue here - after the station was added
|
|
|
deb259 |
*/
|
|
|
deb259 |
if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
deb259 |
- queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
|
|
|
deb259 |
+ queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
|
|
|
deb259 |
+ bsta->sta_id,
|
|
|
deb259 |
IWL_MAX_TID_COUNT,
|
|
|
deb259 |
wdg_timeout);
|
|
|
deb259 |
|
|
|
deb259 |
@@ -2180,7 +2255,7 @@
|
|
|
deb259 |
return;
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
- iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
|
|
|
deb259 |
+ iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
|
|
|
deb259 |
if (iwl_mvm_has_new_tx_api(mvm))
|
|
|
deb259 |
return;
|
|
|
deb259 |
|
|
|
deb259 |
@@ -2304,8 +2379,10 @@
|
|
|
deb259 |
* Note that this is done here as we want to avoid making DQA
|
|
|
deb259 |
* changes in mac80211 layer.
|
|
|
deb259 |
*/
|
|
|
deb259 |
- if (vif->type == NL80211_IFTYPE_ADHOC)
|
|
|
deb259 |
- mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
|
|
|
deb259 |
+ if (vif->type == NL80211_IFTYPE_ADHOC) {
|
|
|
deb259 |
+ vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
|
|
|
deb259 |
+ mvmvif->cab_queue = vif->cab_queue;
|
|
|
deb259 |
+ }
|
|
|
deb259 |
|
|
|
deb259 |
/*
|
|
|
deb259 |
* While in previous FWs we had to exclude cab queue from TFD queue
|
|
|
deb259 |
@@ -2313,9 +2390,9 @@
|
|
|
deb259 |
*/
|
|
|
deb259 |
if (!iwl_mvm_has_new_tx_api(mvm) &&
|
|
|
deb259 |
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
|
|
|
deb259 |
- iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
|
|
|
deb259 |
- timeout);
|
|
|
deb259 |
- msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
|
|
|
deb259 |
+ iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
|
|
deb259 |
+ &cfg, timeout);
|
|
|
deb259 |
+ msta->tfd_queue_msk |= BIT(vif->cab_queue);
|
|
|
deb259 |
}
|
|
|
deb259 |
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
|
|
|
deb259 |
mvmvif->id, mvmvif->color);
|
|
|
deb259 |
@@ -2332,14 +2409,15 @@
|
|
|
deb259 |
* tfd_queue_mask.
|
|
|
deb259 |
*/
|
|
|
deb259 |
if (iwl_mvm_has_new_tx_api(mvm)) {
|
|
|
deb259 |
- int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
|
|
|
deb259 |
+ int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
|
|
|
deb259 |
+ msta->sta_id,
|
|
|
deb259 |
0,
|
|
|
deb259 |
timeout);
|
|
|
deb259 |
mvmvif->cab_queue = queue;
|
|
|
deb259 |
} else if (!fw_has_api(&mvm->fw->ucode_capa,
|
|
|
deb259 |
IWL_UCODE_TLV_API_STA_TYPE))
|
|
|
deb259 |
- iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
|
|
|
deb259 |
- timeout);
|
|
|
deb259 |
+ iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
|
|
deb259 |
+ &cfg, timeout);
|
|
|
deb259 |
|
|
|
deb259 |
return 0;
|
|
|
deb259 |
}
|
|
|
deb259 |
@@ -2410,7 +2488,8 @@
|
|
|
deb259 |
|
|
|
deb259 |
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
|
|
|
deb259 |
|
|
|
deb259 |
- iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
|
|
|
deb259 |
+ iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
|
|
|
deb259 |
+ 0, 0);
|
|
|
deb259 |
|
|
|
deb259 |
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
|
|
|
deb259 |
if (ret)
|
|
|
deb259 |
@@ -2946,7 +3025,8 @@
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
if (alloc_queue)
|
|
|
deb259 |
- iwl_mvm_enable_txq(mvm, sta, queue, ssn,
|
|
|
deb259 |
+ iwl_mvm_enable_txq(mvm, queue,
|
|
|
deb259 |
+ vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
|
|
|
deb259 |
&cfg, wdg_timeout);
|
|
|
deb259 |
|
|
|
deb259 |
/* Send ADD_STA command to enable aggs only if the queue isn't shared */
|
|
|
deb259 |
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
|
|
|
deb259 |
===================================================================
|
|
|
deb259 |
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/sta.h 2019-06-27 14:54:04.140678253 +0200
|
|
|
deb259 |
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/sta.h 2019-07-31 18:51:11.756815213 +0200
|
|
|
deb259 |
@@ -297,6 +297,7 @@
|
|
|
deb259 |
|
|
|
deb259 |
/**
|
|
|
deb259 |
* struct iwl_mvm_tid_data - holds the states for each RA / TID
|
|
|
deb259 |
+ * @deferred_tx_frames: deferred TX frames for this RA/TID
|
|
|
deb259 |
* @seq_number: the next WiFi sequence number to use
|
|
|
deb259 |
* @next_reclaimed: the WiFi sequence number of the next packet to be acked.
|
|
|
deb259 |
* This is basically (last acked packet++).
|
|
|
deb259 |
@@ -317,6 +318,7 @@
|
|
|
deb259 |
* tpt_meas_start
|
|
|
deb259 |
*/
|
|
|
deb259 |
struct iwl_mvm_tid_data {
|
|
|
deb259 |
+ struct sk_buff_head deferred_tx_frames;
|
|
|
deb259 |
u16 seq_number;
|
|
|
deb259 |
u16 next_reclaimed;
|
|
|
deb259 |
/* The rest is Tx AGG related */
|
|
|
deb259 |
@@ -425,6 +427,8 @@
|
|
|
deb259 |
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
|
|
|
deb259 |
struct iwl_mvm_rxq_dup_data *dup_data;
|
|
|
deb259 |
|
|
|
deb259 |
+ u16 deferred_traffic_tid_map;
|
|
|
deb259 |
+
|
|
|
deb259 |
u8 reserved_queue;
|
|
|
deb259 |
|
|
|
deb259 |
/* Temporary, until the new TLC will control the Tx protection */
|
|
|
deb259 |
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
|
|
|
deb259 |
===================================================================
|
|
|
deb259 |
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/tx.c 2019-07-31 18:50:40.639230684 +0200
|
|
|
deb259 |
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/tx.c 2019-07-31 18:51:11.756815213 +0200
|
|
|
deb259 |
@@ -605,12 +605,11 @@
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
|
|
deb259 |
- struct ieee80211_tx_info *info,
|
|
|
deb259 |
- struct ieee80211_hdr *hdr)
|
|
|
deb259 |
+ struct ieee80211_tx_info *info, __le16 fc)
|
|
|
deb259 |
{
|
|
|
deb259 |
- struct iwl_mvm_vif *mvmvif =
|
|
|
deb259 |
- iwl_mvm_vif_from_mac80211(info->control.vif);
|
|
|
deb259 |
- __le16 fc = hdr->frame_control;
|
|
|
deb259 |
+ struct iwl_mvm_vif *mvmvif;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
|
|
|
deb259 |
|
|
|
deb259 |
switch (info->control.vif->type) {
|
|
|
deb259 |
case NL80211_IFTYPE_AP:
|
|
|
deb259 |
@@ -629,9 +628,7 @@
|
|
|
deb259 |
(!ieee80211_is_bufferable_mmpdu(fc) ||
|
|
|
deb259 |
ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
|
|
|
deb259 |
return mvm->probe_queue;
|
|
|
deb259 |
-
|
|
|
deb259 |
- if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
|
|
|
deb259 |
- is_multicast_ether_addr(hdr->addr1))
|
|
|
deb259 |
+ if (info->hw_queue == info->control.vif->cab_queue)
|
|
|
deb259 |
return mvmvif->cab_queue;
|
|
|
deb259 |
|
|
|
deb259 |
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
|
|
|
deb259 |
@@ -640,6 +637,8 @@
|
|
|
deb259 |
case NL80211_IFTYPE_P2P_DEVICE:
|
|
|
deb259 |
if (ieee80211_is_mgmt(fc))
|
|
|
deb259 |
return mvm->p2p_dev_queue;
|
|
|
deb259 |
+ if (info->hw_queue == info->control.vif->cab_queue)
|
|
|
deb259 |
+ return mvmvif->cab_queue;
|
|
|
deb259 |
|
|
|
deb259 |
WARN_ON_ONCE(1);
|
|
|
deb259 |
return mvm->p2p_dev_queue;
|
|
|
deb259 |
@@ -717,8 +716,6 @@
|
|
|
deb259 |
u8 sta_id;
|
|
|
deb259 |
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
|
|
deb259 |
__le16 fc = hdr->frame_control;
|
|
|
deb259 |
- bool offchannel = IEEE80211_SKB_CB(skb)->flags &
|
|
|
deb259 |
- IEEE80211_TX_CTL_TX_OFFCHAN;
|
|
|
deb259 |
int queue = -1;
|
|
|
deb259 |
|
|
|
deb259 |
if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
|
|
|
deb259 |
@@ -729,6 +726,11 @@
|
|
|
deb259 |
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
|
|
|
deb259 |
return -1;
|
|
|
deb259 |
|
|
|
deb259 |
+ if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
|
|
|
deb259 |
+ (!info.control.vif ||
|
|
|
deb259 |
+ info.hw_queue != info.control.vif->cab_queue)))
|
|
|
deb259 |
+ return -1;
|
|
|
deb259 |
+
|
|
|
deb259 |
if (info.control.vif) {
|
|
|
deb259 |
struct iwl_mvm_vif *mvmvif =
|
|
|
deb259 |
iwl_mvm_vif_from_mac80211(info.control.vif);
|
|
|
deb259 |
@@ -741,12 +743,14 @@
|
|
|
deb259 |
else
|
|
|
deb259 |
sta_id = mvmvif->mcast_sta.sta_id;
|
|
|
deb259 |
|
|
|
deb259 |
- queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
|
|
|
deb259 |
+ queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
|
|
|
deb259 |
+ hdr->frame_control);
|
|
|
deb259 |
+
|
|
|
deb259 |
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
|
|
|
deb259 |
queue = mvm->snif_queue;
|
|
|
deb259 |
sta_id = mvm->snif_sta.sta_id;
|
|
|
deb259 |
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
|
|
|
deb259 |
- offchannel) {
|
|
|
deb259 |
+ info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) {
|
|
|
deb259 |
/*
|
|
|
deb259 |
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
|
|
|
deb259 |
* that can be used in 2 different types of vifs, P2P &
|
|
|
deb259 |
@@ -760,10 +764,8 @@
|
|
|
deb259 |
}
|
|
|
deb259 |
}
|
|
|
deb259 |
|
|
|
deb259 |
- if (queue < 0) {
|
|
|
deb259 |
- IWL_ERR(mvm, "No queue was found. Dropping TX\n");
|
|
|
deb259 |
+ if (queue < 0)
|
|
|
deb259 |
return -1;
|
|
|
deb259 |
- }
|
|
|
deb259 |
|
|
|
deb259 |
if (unlikely(ieee80211_is_probe_resp(fc)))
|
|
|
deb259 |
iwl_mvm_probe_resp_set_noa(mvm, skb);
|
|
|
deb259 |
@@ -1006,6 +1008,34 @@
|
|
|
deb259 |
}
|
|
|
deb259 |
#endif
|
|
|
deb259 |
|
|
|
deb259 |
+static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
|
|
|
deb259 |
+ struct iwl_mvm_sta *mvm_sta, u8 tid,
|
|
|
deb259 |
+ struct sk_buff *skb)
|
|
|
deb259 |
+{
|
|
|
deb259 |
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
deb259 |
+ u8 mac_queue = info->hw_queue;
|
|
|
deb259 |
+ struct sk_buff_head *deferred_tx_frames;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ lockdep_assert_held(&mvm_sta->lock);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ mvm_sta->deferred_traffic_tid_map |= BIT(tid);
|
|
|
deb259 |
+ set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
|
|
|
deb259 |
+
|
|
|
deb259 |
+ skb_queue_tail(deferred_tx_frames, skb);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ /*
|
|
|
deb259 |
+ * The first deferred frame should've stopped the MAC queues, so we
|
|
|
deb259 |
+ * should never get a second deferred frame for the RA/TID.
|
|
|
deb259 |
+ * In case of GSO the first packet may have been split, so don't warn.
|
|
|
deb259 |
+ */
|
|
|
deb259 |
+ if (skb_queue_len(deferred_tx_frames) == 1) {
|
|
|
deb259 |
+ iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
|
|
|
deb259 |
+ schedule_work(&mvm->add_stream_wk);
|
|
|
deb259 |
+ }
|
|
|
deb259 |
+}
|
|
|
deb259 |
+
|
|
|
deb259 |
/* Check if there are any timed-out TIDs on a given shared TXQ */
|
|
|
deb259 |
static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
|
|
|
deb259 |
{
|
|
|
deb259 |
@@ -1076,7 +1106,7 @@
|
|
|
deb259 |
__le16 fc;
|
|
|
deb259 |
u16 seq_number = 0;
|
|
|
deb259 |
u8 tid = IWL_MAX_TID_COUNT;
|
|
|
deb259 |
- u16 txq_id;
|
|
|
deb259 |
+ u16 txq_id = info->hw_queue;
|
|
|
deb259 |
bool is_ampdu = false;
|
|
|
deb259 |
int hdrlen;
|
|
|
deb259 |
|
|
|
deb259 |
@@ -1145,7 +1175,14 @@
|
|
|
deb259 |
|
|
|
deb259 |
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
|
|
|
deb259 |
|
|
|
deb259 |
+ /* Check if TXQ needs to be allocated or re-activated */
|
|
|
deb259 |
if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
|
|
|
deb259 |
+ iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
|
|
|
deb259 |
+
|
|
|
deb259 |
+ /*
|
|
|
deb259 |
+ * The frame is now deferred, and the worker scheduled
|
|
|
deb259 |
+ * will re-allocate it, so we can free it for now.
|
|
|
deb259 |
+ */
|
|
|
deb259 |
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
|
|
|
deb259 |
spin_unlock(&mvmsta->lock);
|
|
|
deb259 |
return 0;
|