Blob Blame History Raw
From fd89017061fc9e11c32ba2bada6fb175d5894417 Mon Sep 17 00:00:00 2001
From: Eugene Syromiatnikov <esyr@redhat.com>
Date: Wed, 31 Jul 2019 18:48:56 +0200
Subject: [PATCH 7/7] Revert "iwlwifi: mvm: support mac80211 TXQs model"

This reverts commit cfbc6c4c5b91c7725ef14465b98ac347d31f2334.
---
 drivers/net/wireless/intel/iwlwifi/mvm/d3.c       |   8 +-
 drivers/net/wireless/intel/iwlwifi/mvm/fw.c       |   5 +-
 drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c |  72 ++++-
 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 143 ++-------
 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h      |  53 +---
 drivers/net/wireless/intel/iwlwifi/mvm/ops.c      |  87 +++---
 drivers/net/wireless/intel/iwlwifi/mvm/sta.c      | 360 +++++++++++++---------
 drivers/net/wireless/intel/iwlwifi/mvm/sta.h      |   4 +
 drivers/net/wireless/intel/iwlwifi/mvm/tx.c       |  69 ++++-
 9 files changed, 442 insertions(+), 359 deletions(-)

Index: src/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
===================================================================
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/d3.c	2019-06-27 14:54:04.132678349 +0200
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/d3.c	2019-07-31 18:51:11.751815280 +0200
@@ -2231,6 +2231,7 @@
 
 	file->private_data = inode->i_private;
 
+	ieee80211_stop_queues(mvm->hw);
 	synchronize_net();
 
 	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
@@ -2245,9 +2246,10 @@
 	rtnl_unlock();
 	if (err > 0)
 		err = -EINVAL;
-	if (err)
+	if (err) {
+		ieee80211_wake_queues(mvm->hw);
 		return err;
-
+	}
 	mvm->d3_test_active = true;
 	mvm->keep_vif = NULL;
 	return 0;
@@ -2327,6 +2329,8 @@
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 		iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
 
+	ieee80211_wake_queues(mvm->hw);
+
 	return 0;
 }
 
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
===================================================================
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/fw.c	2019-06-27 14:54:04.134678325 +0200
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/fw.c	2019-07-31 18:51:11.752815267 +0200
@@ -308,7 +308,7 @@
 	struct iwl_notification_wait alive_wait;
 	struct iwl_mvm_alive_data alive_data = {};
 	const struct fw_img *fw;
-	int ret;
+	int ret, i;
 	enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
 	static const u16 alive_cmd[] = { MVM_ALIVE };
 
@@ -390,6 +390,9 @@
 	mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
 		BIT(IWL_MAX_TID_COUNT + 2);
 
+	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
+		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
+
 	set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 	iwl_fw_set_dbg_rec_on(&mvm->fwrt);
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
===================================================================
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c	2019-07-31 18:51:06.045891463 +0200
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c	2019-07-31 18:51:11.752815267 +0200
@@ -97,6 +97,11 @@
 	bool found_vif;
 };
 
+struct iwl_mvm_hw_queues_iface_iterator_data {
+	struct ieee80211_vif *exclude_vif;
+	unsigned long used_hw_queues;
+};
+
 static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
 				    struct ieee80211_vif *vif)
 {
@@ -203,6 +208,61 @@
 		data->preferred_tsf = NUM_TSF_IDS;
 }
 
+/*
+ * Get the mask of the queues used by the vif
+ */
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
+{
+	u32 qmask = 0, ac;
+
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+		return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
+
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+		if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+			qmask |= BIT(vif->hw_queue[ac]);
+	}
+
+	if (vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_ADHOC)
+		qmask |= BIT(vif->cab_queue);
+
+	return qmask;
+}
+
+static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
+					 struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
+
+	/* exclude the given vif */
+	if (vif == data->exclude_vif)
+		return;
+
+	data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
+}
+
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *exclude_vif)
+{
+	struct iwl_mvm_hw_queues_iface_iterator_data data = {
+		.exclude_vif = exclude_vif,
+		.used_hw_queues =
+			BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
+			BIT(mvm->aux_queue) |
+			BIT(IWL_MVM_DQA_GCAST_QUEUE),
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* mark all VIF used hw queues */
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		iwl_mvm_iface_hw_queues_iter, &data);
+
+	return data.used_hw_queues;
+}
+
 static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
 				       struct ieee80211_vif *vif)
 {
@@ -300,6 +360,8 @@
 		mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
 		iwl_mvm_mac_iface_iterator, &data);
 
+	used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
+
 	/*
 	 * In the case we're getting here during resume, it's similar to
 	 * firmware restart, and with RESUME_ALL the iterator will find
@@ -354,6 +416,9 @@
 	 * the ones here - no real limit
 	 */
 	queue_limit = IEEE80211_MAX_QUEUES;
+	BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
+		     BITS_PER_BYTE *
+		     sizeof(mvm->hw_queue_to_mac80211[0]));
 
 	/*
 	 * Find available queues, and allocate them to the ACs. When in
@@ -381,6 +446,9 @@
 		 * queue value (when queue is enabled).
 		 */
 		mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+		vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+	} else {
+		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
 	}
 
 	mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
@@ -394,6 +462,8 @@
 
 exit_fail:
 	memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
+	memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
+	vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
 	return ret;
 }
 
@@ -1120,7 +1190,7 @@
 
 	if (!fw_has_api(&mvm->fw->ucode_capa,
 			IWL_UCODE_TLV_API_STA_TYPE))
-		ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue);
+		ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
 
 	/*
 	 * Only set the beacon time when the MAC is being added, when we
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
===================================================================
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c	2019-07-31 18:50:40.636230724 +0200
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c	2019-07-31 18:51:11.753815253 +0200
@@ -414,6 +414,7 @@
 	ieee80211_hw_set(hw, SIGNAL_DBM);
 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, QUEUE_CONTROL);
 	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
 	ieee80211_hw_set(hw, SUPPORTS_PS);
 	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
@@ -427,8 +428,6 @@
 	ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
 	ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
 	ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
-	ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
-	ieee80211_hw_set(hw, STA_MMPDU_TXQ);
 
 	if (iwl_mvm_has_tlc_offload(mvm)) {
 		ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
@@ -539,7 +538,6 @@
 	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
 	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
 	hw->chanctx_data_size = sizeof(u16);
-	hw->txq_data_size = sizeof(struct iwl_mvm_txq);
 
 	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -785,6 +783,7 @@
 		goto out;
 
 	__skb_queue_tail(&mvm->d0i3_tx, skb);
+	ieee80211_stop_queues(mvm->hw);
 
 	/* trigger wakeup */
 	iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
@@ -804,15 +803,13 @@
 	struct ieee80211_sta *sta = control->sta;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (void *)skb->data;
-	bool offchannel = IEEE80211_SKB_CB(skb)->flags &
-		IEEE80211_TX_CTL_TX_OFFCHAN;
 
 	if (iwl_mvm_is_radio_killed(mvm)) {
 		IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
 		goto drop;
 	}
 
-	if (offchannel &&
+	if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
 	    !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
 	    !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
 		goto drop;
@@ -825,8 +822,8 @@
 		sta = NULL;
 
 	/* If there is no sta, and it's not offchannel - send through AP */
-	if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
-	    !offchannel) {
+	if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+	    info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
 		struct iwl_mvm_vif *mvmvif =
 			iwl_mvm_vif_from_mac80211(info->control.vif);
 		u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
@@ -854,107 +851,6 @@
 	ieee80211_free_txskb(hw, skb);
 }
 
-void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
-{
-	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-	struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
-	struct sk_buff *skb = NULL;
-
-	/*
-	 * No need for threads to be pending here, they can leave the first
-	 * taker all the work.
-	 *
-	 * mvmtxq->tx_request logic:
-	 *
-	 * If 0, no one is currently TXing, set to 1 to indicate current thread
-	 * will now start TX and other threads should quit.
-	 *
-	 * If 1, another thread is currently TXing, set to 2 to indicate to
-	 * that thread that there was another request. Since that request may
-	 * have raced with the check whether the queue is empty, the TXing
-	 * thread should check the queue's status one more time before leaving.
-	 * This check is done in order to not leave any TX hanging in the queue
-	 * until the next TX invocation (which may not even happen).
-	 *
-	 * If 2, another thread is currently TXing, and it will already double
-	 * check the queue, so do nothing.
-	 */
-#if 0 /* Not in RHEL */
-	if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
-#else
-	if (__atomic_add_unless(&mvmtxq->tx_request, 1, 2))
-#endif
-		return;
-
-
-	rcu_read_lock();
-	do {
-		while (likely(!mvmtxq->stopped &&
-			      (mvm->trans->system_pm_mode ==
-			       IWL_PLAT_PM_MODE_DISABLED))) {
-			skb = ieee80211_tx_dequeue(hw, txq);
-
-			if (!skb) {
-				if (txq->sta)
-					IWL_DEBUG_TX(mvm,
-						     "TXQ of sta %pM tid %d is now empty\n",
-						     txq->sta->addr,
-						     txq->tid);
-				break;
-			}
-
-			if (!txq->sta)
-				iwl_mvm_tx_skb_non_sta(mvm, skb);
-			else
-				iwl_mvm_tx_skb(mvm, skb, txq->sta);
-		}
-	} while (atomic_dec_return(&mvmtxq->tx_request));
-	rcu_read_unlock();
-}
-
-static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
-				      struct ieee80211_txq *txq)
-{
-	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-	struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
-
-	/*
-	 * Please note that racing is handled very carefully here:
-	 * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
-	 * deleted afterwards.
-	 * This means that if:
-	 * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
-	 *	queue is allocated and we can TX.
-	 * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
-	 *	a race, should defer the frame.
-	 * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
-	 *	need to allocate the queue and defer the frame.
-	 * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
-	 *	queue is already scheduled for allocation, no need to allocate,
-	 *	should defer the frame.
-	 */
-
-	/* If the queue is allocated TX and return. */
-	if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
-		/*
-		 * Check that list is empty to avoid a race where txq_id is
-		 * already updated, but the queue allocation work wasn't
-		 * finished
-		 */
-		if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
-			return;
-
-		iwl_mvm_mac_itxq_xmit(hw, txq);
-		return;
-	}
-
-	/* The list is being deleted only after the queue is fully allocated. */
-	if (!list_empty(&mvmtxq->list))
-		return;
-
-	list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
-	schedule_work(&mvm->add_stream_wk);
-}
 
 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)		\
 	do {								\
@@ -1172,6 +1068,7 @@
 
 	iwl_mvm_reset_phy_ctxts(mvm);
 	memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+	memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
 	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
 	memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
 
@@ -3086,6 +2983,32 @@
 				peer_addr, action);
 }
 
+static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
+					     struct iwl_mvm_sta *mvm_sta)
+{
+	struct iwl_mvm_tid_data *tid_data;
+	struct sk_buff *skb;
+	int i;
+
+	spin_lock_bh(&mvm_sta->lock);
+	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+		tid_data = &mvm_sta->tid_data[i];
+
+		while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
+			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+			/*
+			 * The first deferred frame should've stopped the MAC
+			 * queues, so we should never get a second deferred
+			 * frame for the RA/TID.
+			 */
+			iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
+			ieee80211_free_txskb(mvm->hw, skb);
+		}
+	}
+	spin_unlock_bh(&mvm_sta->lock);
+}
+
 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
 				 struct ieee80211_vif *vif,
 				 struct ieee80211_sta *sta,
@@ -3119,6 +3042,7 @@
 	 */
 	if (old_state == IEEE80211_STA_NONE &&
 	    new_state == IEEE80211_STA_NOTEXIST) {
+		iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
 		flush_work(&mvm->add_stream_wk);
 
 		/*
@@ -4914,7 +4838,6 @@
 
 const struct ieee80211_ops iwl_mvm_hw_ops = {
 	.tx = iwl_mvm_mac_tx,
-	.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
 	.ampdu_action = iwl_mvm_mac_ampdu_action,
 	.start = iwl_mvm_mac_start,
 	.reconfig_complete = iwl_mvm_mac_reconfig_complete,
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
===================================================================
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h	2019-07-31 18:50:40.637230710 +0200
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h	2019-07-31 18:51:11.753815253 +0200
@@ -805,39 +805,6 @@
 	u8 values[ACPI_GEO_TABLE_SIZE];
 };
 
-struct iwl_mvm_txq {
-	struct list_head list;
-	u16 txq_id;
-	atomic_t tx_request;
-	bool stopped;
-};
-
-static inline struct iwl_mvm_txq *
-iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq)
-{
-	return (void *)txq->drv_priv;
-}
-
-static inline struct iwl_mvm_txq *
-iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid)
-{
-	if (tid == IWL_MAX_TID_COUNT)
-		tid = IEEE80211_NUM_TIDS;
-
-	return (void *)sta->txq[tid]->drv_priv;
-}
-
-/**
- * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid
- *
- * @sta_id: sta id
- * @txq_tid: txq tid
- */
-struct iwl_mvm_tvqm_txq_info {
-	u8 sta_id;
-	u8 txq_tid;
-};
-
 struct iwl_mvm_dqa_txq_info {
 	u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
 	bool reserved; /* Is this the TXQ reserved for a STA */
@@ -900,13 +867,13 @@
 		u64 on_time_scan;
 	} radio_stats, accu_radio_stats;
 
-	struct list_head add_stream_txqs;
-	union {
-		struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
-		struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
-	};
+	u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
+
+	struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
 	struct work_struct add_stream_wk; /* To add streams to queues */
 
+	atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
+
 	const char *nvm_file_name;
 	struct iwl_nvm_data *nvm_data;
 	/* NVM sections */
@@ -920,6 +887,7 @@
 	/* data related to data path */
 	struct iwl_rx_phy_info last_phy_info;
 	struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
+	unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
 	u8 rx_ba_sessions;
 
 	/* configured by mac80211 */
@@ -1552,8 +1520,6 @@
 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 			    struct ieee80211_tx_info *info,
 			    struct ieee80211_sta *sta, __le16 fc);
-void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
-
 #ifdef CONFIG_IWLWIFI_DEBUG
 const char *iwl_mvm_get_tx_fail_reason(u32 status);
 #else
@@ -1684,6 +1650,7 @@
 int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			     bool force_assoc_off, const u8 *bssid_override);
 int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
 int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
 				    struct ieee80211_vif *vif);
 int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
@@ -1710,6 +1677,8 @@
 				 struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
 				    struct ieee80211_vif *vif);
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *exclude_vif);
 void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
 				   struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
@@ -2020,6 +1989,10 @@
 	iwl_fw_dump_conf_clear(&mvm->fwrt);
 }
 
+/* Stop/start all mac queues in a given bitmap */
+void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
+void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
+
 /* Re-configure the SCD for a queue that has already been configured */
 int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
 			 int tid, int frame_limit, u16 ssn);
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
===================================================================
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/ops.c	2019-07-31 18:50:59.172983228 +0200
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/ops.c	2019-07-31 18:51:11.754815240 +0200
@@ -707,7 +707,6 @@
 	INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
 	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
 	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
-	INIT_LIST_HEAD(&mvm->add_stream_txqs);
 
 	spin_lock_init(&mvm->d0i3_tx_lock);
 	spin_lock_init(&mvm->refs_lock);
@@ -1091,6 +1090,24 @@
 		iwl_mvm_rx_common(mvm, rxb, pkt);
 }
 
+void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
+{
+	int q;
+
+	if (WARN_ON_ONCE(!mq))
+		return;
+
+	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+		if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "mac80211 %d already stopped\n", q);
+			continue;
+		}
+
+		ieee80211_stop_queue(mvm->hw, q);
+	}
+}
+
 static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
 			     const struct iwl_device_cmd *cmd)
 {
@@ -1103,66 +1120,38 @@
 	iwl_trans_block_txq_ptrs(mvm->trans, false);
 }
 
-static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
-				       int hw_queue, bool start)
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
 {
 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-	struct ieee80211_sta *sta;
-	struct ieee80211_txq *txq;
-	struct iwl_mvm_txq *mvmtxq;
-	int i;
-	unsigned long tid_bitmap;
-	struct iwl_mvm_sta *mvmsta;
-	u8 sta_id;
-
-	sta_id = iwl_mvm_has_new_tx_api(mvm) ?
-		mvm->tvqm_info[hw_queue].sta_id :
-		mvm->queue_info[hw_queue].ra_sta_id;
-
-	if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
-		return;
+	unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
 
-	rcu_read_lock();
-
-	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-	if (IS_ERR_OR_NULL(sta))
-		goto out;
-	mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
-	if (iwl_mvm_has_new_tx_api(mvm)) {
-		int tid = mvm->tvqm_info[hw_queue].txq_tid;
-
-		tid_bitmap = BIT(tid);
-	} else {
-		tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
-	}
+	iwl_mvm_stop_mac_queues(mvm, mq);
+}
 
-	for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
-		int tid = i;
+void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
+{
+	int q;
 
-		if (tid == IWL_MAX_TID_COUNT)
-			tid = IEEE80211_NUM_TIDS;
+	if (WARN_ON_ONCE(!mq))
+		return;
 
-		txq = sta->txq[tid];
-		mvmtxq = iwl_mvm_txq_from_mac80211(txq);
-		mvmtxq->stopped = !start;
+	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+		if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "mac80211 %d still stopped\n", q);
+			continue;
+		}
 
-		if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
-			iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+		ieee80211_wake_queue(mvm->hw, q);
 	}
-
-out:
-	rcu_read_unlock();
-}
-
-static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
-{
-	iwl_mvm_queue_state_change(op_mode, hw_queue, false);
 }
 
 static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
 {
-	iwl_mvm_queue_state_change(op_mode, hw_queue, true);
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
+
+	iwl_mvm_start_mac_queues(mvm, mq);
 }
 
 static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
===================================================================
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/sta.c	2019-07-31 18:50:53.312061481 +0200
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/sta.c	2019-07-31 18:51:11.755815227 +0200
@@ -353,16 +353,24 @@
 					   &cmd, &status);
 }
 
-static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			       int queue, u8 tid, u8 flags)
+static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
+			       int mac80211_queue, u8 tid, u8 flags)
 {
 	struct iwl_scd_txq_cfg_cmd cmd = {
 		.scd_queue = queue,
 		.action = SCD_CFG_DISABLE_QUEUE,
 	};
+	bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
 	int ret;
 
+	if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
+		return -EINVAL;
+
 	if (iwl_mvm_has_new_tx_api(mvm)) {
+		if (remove_mac_queue)
+			mvm->hw_queue_to_mac80211[queue] &=
+				~BIT(mac80211_queue);
+
 		iwl_trans_txq_free(mvm->trans, queue);
 
 		return 0;
@@ -373,15 +381,36 @@
 
 	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
 
+	/*
+	 * If there is another TID with the same AC - don't remove the MAC queue
+	 * from the mapping
+	 */
+	if (tid < IWL_MAX_TID_COUNT) {
+		unsigned long tid_bitmap =
+			mvm->queue_info[queue].tid_bitmap;
+		int ac = tid_to_mac80211_ac[tid];
+		int i;
+
+		for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
+			if (tid_to_mac80211_ac[i] == ac)
+				remove_mac_queue = false;
+		}
+	}
+
+	if (remove_mac_queue)
+		mvm->hw_queue_to_mac80211[queue] &=
+			~BIT(mac80211_queue);
+
 	cmd.action = mvm->queue_info[queue].tid_bitmap ?
 		SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
 	if (cmd.action == SCD_CFG_DISABLE_QUEUE)
 		mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
 
 	IWL_DEBUG_TX_QUEUES(mvm,
-			    "Disabling TXQ #%d tids=0x%x\n",
+			    "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
 			    queue,
-			    mvm->queue_info[queue].tid_bitmap);
+			    mvm->queue_info[queue].tid_bitmap,
+			    mvm->hw_queue_to_mac80211[queue]);
 
 	/* If the queue is still enabled - nothing left to do in this func */
 	if (cmd.action == SCD_CFG_ENABLE_QUEUE)
@@ -391,19 +420,15 @@
 	cmd.tid = mvm->queue_info[queue].txq_tid;
 
 	/* Make sure queue info is correct even though we overwrite it */
-	WARN(mvm->queue_info[queue].tid_bitmap,
-	     "TXQ #%d info out-of-sync - tids=0x%x\n",
-	     queue, mvm->queue_info[queue].tid_bitmap);
+	WARN(mvm->queue_info[queue].tid_bitmap ||
+	     mvm->hw_queue_to_mac80211[queue],
+	     "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
+	     queue, mvm->hw_queue_to_mac80211[queue],
+	     mvm->queue_info[queue].tid_bitmap);
 
 	/* If we are here - the queue is freed and we can zero out these vals */
 	mvm->queue_info[queue].tid_bitmap = 0;
-
-	if (sta) {
-		struct iwl_mvm_txq *mvmtxq =
-			iwl_mvm_txq_from_tid(sta, tid);
-
-		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
-	}
+	mvm->hw_queue_to_mac80211[queue] = 0;
 
 	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
 	mvm->queue_info[queue].reserved = false;
@@ -489,14 +514,9 @@
 	spin_lock_bh(&mvmsta->lock);
 	/* Unmap MAC queues and TIDs from this queue */
 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
-		struct iwl_mvm_txq *mvmtxq =
-			iwl_mvm_txq_from_tid(sta, tid);
-
 		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
 			disable_agg_tids |= BIT(tid);
 		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
-
-		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
 	}
 
 	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
@@ -518,11 +538,10 @@
 }
 
 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
-				       struct ieee80211_sta *old_sta,
 				       u8 new_sta_id)
 {
 	struct iwl_mvm_sta *mvmsta;
-	u8 sta_id, tid;
+	u8 txq_curr_ac, sta_id, tid;
 	unsigned long disable_agg_tids = 0;
 	bool same_sta;
 	int ret;
@@ -532,6 +551,7 @@
 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
 		return -EINVAL;
 
+	txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
 	sta_id = mvm->queue_info[queue].ra_sta_id;
 	tid = mvm->queue_info[queue].txq_tid;
 
@@ -547,7 +567,9 @@
 		iwl_mvm_invalidate_sta_queue(mvm, queue,
 					     disable_agg_tids, false);
 
-	ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
+	ret = iwl_mvm_disable_txq(mvm, queue,
+				  mvmsta->vif->hw_queue[txq_curr_ac],
+				  tid, 0);
 	if (ret) {
 		IWL_ERR(mvm,
 			"Failed to free inactive queue %d (ret=%d)\n",
@@ -637,15 +659,16 @@
  * in such a case, otherwise - if no redirection required - it does nothing,
  * unless the %force param is true.
  */
-static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
-				  int ac, int ssn, unsigned int wdg_timeout,
-				  bool force, struct iwl_mvm_txq *txq)
+static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+				      int ac, int ssn, unsigned int wdg_timeout,
+				      bool force)
 {
 	struct iwl_scd_txq_cfg_cmd cmd = {
 		.scd_queue = queue,
 		.action = SCD_CFG_DISABLE_QUEUE,
 	};
 	bool shared_queue;
+	unsigned long mq;
 	int ret;
 
 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
@@ -669,14 +692,14 @@
 	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
 	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
 	cmd.tid = mvm->queue_info[queue].txq_tid;
+	mq = mvm->hw_queue_to_mac80211[queue];
 	shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
 
 	IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
 			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
 
-	/* Stop the queue and wait for it to empty */
-	txq->stopped = true;
-
+	/* Stop MAC queues and wait for this queue to empty */
+	iwl_mvm_stop_mac_queues(mvm, mq);
 	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
 	if (ret) {
 		IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
@@ -717,8 +740,8 @@
 		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
 
 out:
-	/* Continue using the queue */
-	txq->stopped = false;
+	/* Continue using the MAC queues */
+	iwl_mvm_start_mac_queues(mvm, mq);
 
 	return ret;
 }
@@ -743,7 +766,7 @@
 	return -ENOSPC;
 }
 
-static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
+static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
 				   u8 sta_id, u8 tid, unsigned int timeout)
 {
 	int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
@@ -768,7 +791,10 @@
 	IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
 			    queue, sta_id, tid);
 
-	IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
+	mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Enabling TXQ #%d (mac80211 map:0x%x)\n",
+			    queue, mvm->hw_queue_to_mac80211[queue]);
 
 	return queue;
 }
@@ -778,10 +804,9 @@
 					int tid)
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-	struct iwl_mvm_txq *mvmtxq =
-		iwl_mvm_txq_from_tid(sta, tid);
 	unsigned int wdg_timeout =
 		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+	u8 mac_queue = mvmsta->vif->hw_queue[ac];
 	int queue = -1;
 
 	lockdep_assert_held(&mvm->mutex);
@@ -789,16 +814,11 @@
 	IWL_DEBUG_TX_QUEUES(mvm,
 			    "Allocating queue for sta %d on tid %d\n",
 			    mvmsta->sta_id, tid);
-	queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
+	queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
+					wdg_timeout);
 	if (queue < 0)
 		return queue;
 
-	if (sta) {
-		mvmtxq->txq_id = queue;
-		mvm->tvqm_info[queue].txq_tid = tid;
-		mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
-	}
-
 	IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
 
 	spin_lock_bh(&mvmsta->lock);
@@ -808,9 +828,8 @@
 	return 0;
 }
 
-static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
-				       struct ieee80211_sta *sta,
-				       int queue, u8 sta_id, u8 tid)
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
+				       int mac80211_queue, u8 sta_id, u8 tid)
 {
 	bool enable_queue = true;
 
@@ -825,6 +844,14 @@
 	if (mvm->queue_info[queue].tid_bitmap)
 		enable_queue = false;
 
+	if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
+		WARN(mac80211_queue >=
+		     BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
+		     "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
+		     mac80211_queue, queue, sta_id, tid);
+		mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+	}
+
 	mvm->queue_info[queue].tid_bitmap |= BIT(tid);
 	mvm->queue_info[queue].ra_sta_id = sta_id;
 
@@ -838,22 +865,16 @@
 		mvm->queue_info[queue].txq_tid = tid;
 	}
 
-	if (sta) {
-		struct iwl_mvm_txq *mvmtxq =
-			iwl_mvm_txq_from_tid(sta, tid);
-
-		mvmtxq->txq_id = queue;
-	}
-
 	IWL_DEBUG_TX_QUEUES(mvm,
-			    "Enabling TXQ #%d tids=0x%x\n",
-			    queue, mvm->queue_info[queue].tid_bitmap);
+			    "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
+			    queue, mvm->queue_info[queue].tid_bitmap,
+			    mvm->hw_queue_to_mac80211[queue]);
 
 	return enable_queue;
 }
 
-static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-			       int queue, u16 ssn,
+static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
+			       int mac80211_queue, u16 ssn,
 			       const struct iwl_trans_txq_scd_cfg *cfg,
 			       unsigned int wdg_timeout)
 {
@@ -873,7 +894,8 @@
 		return false;
 
 	/* Send the enabling command if we need to */
-	if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
+	if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+					cfg->sta_id, cfg->tid))
 		return false;
 
 	inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
@@ -966,10 +988,9 @@
 
 	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
 
-	ret = iwl_mvm_redirect_queue(mvm, queue, tid,
-				     tid_to_mac80211_ac[tid], ssn,
-				     wdg_timeout, true,
-				     iwl_mvm_txq_from_tid(sta, tid));
+	ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
+					 tid_to_mac80211_ac[tid], ssn,
+					 wdg_timeout, true);
 	if (ret) {
 		IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
 		return;
@@ -1046,9 +1067,11 @@
 	 * Remove the ones that did.
 	 */
 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+		int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
 		u16 tid_bitmap;
 
 		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+		mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
 		mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
 
 		tid_bitmap = mvm->queue_info[queue].tid_bitmap;
@@ -1081,6 +1104,10 @@
 	 * sure all TIDs have existing corresponding mac queues enabled
 	 */
 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+		mvm->hw_queue_to_mac80211[queue] |=
+			BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
+	}
 
 	/* If the queue is marked as shared - "unshare" it */
 	if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
@@ -1108,7 +1135,6 @@
 	unsigned long unshare_queues = 0;
 	unsigned long changetid_queues = 0;
 	int i, ret, free_queue = -ENOSPC;
-	struct ieee80211_sta *queue_owner  = NULL;
 
 	lockdep_assert_held(&mvm->mutex);
 
@@ -1174,14 +1200,13 @@
 						   inactive_tid_bitmap,
 						   &unshare_queues,
 						   &changetid_queues);
-		if (ret >= 0 && free_queue < 0) {
-			queue_owner = sta;
+		if (ret >= 0 && free_queue < 0)
 			free_queue = ret;
-		}
 		/* only unlock sta lock - we still need the queue info lock */
 		spin_unlock_bh(&mvmsta->lock);
 	}
 
+	rcu_read_unlock();
 
 	/* Reconfigure queues requiring reconfiguation */
 	for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
@@ -1190,21 +1215,18 @@
 		iwl_mvm_change_queue_tid(mvm, i);
 
 	if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
-		ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
+		ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
 						  alloc_for_sta);
-		if (ret) {
-			rcu_read_unlock();
+		if (ret)
 			return ret;
-		}
 	}
 
-	rcu_read_unlock();
-
 	return free_queue;
 }
 
 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
-				   struct ieee80211_sta *sta, u8 ac, int tid)
+				   struct ieee80211_sta *sta, u8 ac, int tid,
+				   struct ieee80211_hdr *hdr)
 {
 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	struct iwl_trans_txq_scd_cfg cfg = {
@@ -1215,6 +1237,7 @@
 	};
 	unsigned int wdg_timeout =
 		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+	u8 mac_queue = mvmsta->vif->hw_queue[ac];
 	int queue = -1;
 	unsigned long disable_agg_tids = 0;
 	enum iwl_mvm_agg_state queue_state;
@@ -1233,7 +1256,12 @@
 	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
 	spin_unlock_bh(&mvmsta->lock);
 
-	if (tid == IWL_MAX_TID_COUNT) {
+	/*
+	 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
+	 * exists
+	 */
+	if (!ieee80211_is_data_qos(hdr->frame_control) ||
+	    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
 		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
 						IWL_MVM_DQA_MIN_MGMT_QUEUE,
 						IWL_MVM_DQA_MAX_MGMT_QUEUE);
@@ -1312,7 +1340,8 @@
 		}
 	}
 
-	inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
+	inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
+				     ssn, &cfg, wdg_timeout);
 
 	/*
 	 * Mark queue as shared in transport if shared
@@ -1354,9 +1383,8 @@
 		}
 	} else {
 		/* Redirect queue, if needed */
-		ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
-					     wdg_timeout, false,
-					     iwl_mvm_txq_from_tid(sta, tid));
+		ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
+						 wdg_timeout, false);
 		if (ret)
 			goto out_err;
 	}
@@ -1364,7 +1392,7 @@
 	return 0;
 
 out_err:
-	iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
+	iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
 
 	return ret;
 }
@@ -1377,34 +1405,87 @@
 	return tid_to_mac80211_ac[tid];
 }
 
+static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
+				       struct ieee80211_sta *sta, int tid)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+	struct sk_buff *skb;
+	struct ieee80211_hdr *hdr;
+	struct sk_buff_head deferred_tx;
+	u8 mac_queue;
+	bool no_queue = false; /* Marks if there is a problem with the queue */
+	u8 ac;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	skb = skb_peek(&tid_data->deferred_tx_frames);
+	if (!skb)
+		return;
+	hdr = (void *)skb->data;
+
+	ac = iwl_mvm_tid_to_ac_queue(tid);
+	mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
+
+	if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
+	    iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
+		IWL_ERR(mvm,
+			"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
+			mvmsta->sta_id, tid);
+
+		/*
+		 * Mark queue as problematic so later the deferred traffic is
+		 * freed, as we can do nothing with it
+		 */
+		no_queue = true;
+	}
+
+	__skb_queue_head_init(&deferred_tx);
+
+	/* Disable bottom-halves when entering TX path */
+	local_bh_disable();
+	spin_lock(&mvmsta->lock);
+	skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
+	mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
+	spin_unlock(&mvmsta->lock);
+
+	while ((skb = __skb_dequeue(&deferred_tx)))
+		if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
+			ieee80211_free_txskb(mvm->hw, skb);
+	local_bh_enable();
+
+	/* Wake queue */
+	iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
+}
+
 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
 {
 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
 					   add_stream_wk);
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	unsigned long deferred_tid_traffic;
+	int sta_id, tid;
 
 	mutex_lock(&mvm->mutex);
 
 	iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
 
-	while (!list_empty(&mvm->add_stream_txqs)) {
-		struct iwl_mvm_txq *mvmtxq;
-		struct ieee80211_txq *txq;
-		u8 tid;
-
-		mvmtxq = list_first_entry(&mvm->add_stream_txqs,
-					  struct iwl_mvm_txq, list);
-
-		txq = container_of((void *)mvmtxq, struct ieee80211_txq,
-				   drv_priv);
-		tid = txq->tid;
-		if (tid == IEEE80211_NUM_TIDS)
-			tid = IWL_MAX_TID_COUNT;
-
-		iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
-		list_del_init(&mvmtxq->list);
-		local_bh_disable();
-		iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
-		local_bh_enable();
+	/* Go over all stations with deferred traffic */
+	for_each_set_bit(sta_id, mvm->sta_deferred_frames,
+			 IWL_MVM_STATION_COUNT) {
+		clear_bit(sta_id, mvm->sta_deferred_frames);
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+						lockdep_is_held(&mvm->mutex));
+		if (IS_ERR_OR_NULL(sta))
+			continue;
+
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
+
+		for_each_set_bit(tid, &deferred_tid_traffic,
+				 IWL_MAX_TID_COUNT + 1)
+			iwl_mvm_tx_deferred_stream(mvm, sta, tid);
 	}
 
 	mutex_unlock(&mvm->mutex);
@@ -1460,11 +1541,10 @@
  * Note that re-enabling aggregations isn't done in this function.
  */
 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
-						 struct ieee80211_sta *sta)
+						 struct iwl_mvm_sta *mvm_sta)
 {
-	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-	unsigned int wdg =
-		iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+	unsigned int wdg_timeout =
+			iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
 	int i;
 	struct iwl_trans_txq_scd_cfg cfg = {
 		.sta_id = mvm_sta->sta_id,
@@ -1480,18 +1560,23 @@
 		struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
 		int txq_id = tid_data->txq_id;
 		int ac;
+		u8 mac_queue;
 
 		if (txq_id == IWL_MVM_INVALID_QUEUE)
 			continue;
 
+		skb_queue_head_init(&tid_data->deferred_tx_frames);
+
 		ac = tid_to_mac80211_ac[i];
+		mac_queue = mvm_sta->vif->hw_queue[ac];
 
 		if (iwl_mvm_has_new_tx_api(mvm)) {
 			IWL_DEBUG_TX_QUEUES(mvm,
 					    "Re-mapping sta %d tid %d\n",
 					    mvm_sta->sta_id, i);
-			txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
-							 i, wdg);
+			txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
+							 mvm_sta->sta_id,
+							 i, wdg_timeout);
 			tid_data->txq_id = txq_id;
 
 			/*
@@ -1514,7 +1599,8 @@
 					    "Re-mapping sta %d tid %d to queue %d\n",
 					    mvm_sta->sta_id, i, txq_id);
 
-			iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
+			iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
+					   wdg_timeout);
 			mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
 		}
 	}
@@ -1604,7 +1690,7 @@
 		if (ret)
 			goto err;
 
-		iwl_mvm_realloc_queues_after_restart(mvm, sta);
+		iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
 		sta_update = true;
 		sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
 		goto update_fw;
@@ -1637,17 +1723,9 @@
 		 * frames until the queue is allocated
 		 */
 		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+		skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
 	}
-
-	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
-		struct iwl_mvm_txq *mvmtxq =
-			iwl_mvm_txq_from_mac80211(sta->txq[i]);
-
-		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
-		INIT_LIST_HEAD(&mvmtxq->list);
-		atomic_set(&mvmtxq->tx_request, 0);
-	}
-
+	mvm_sta->deferred_traffic_tid_map = 0;
 	mvm_sta->agg_tids = 0;
 
 	if (iwl_mvm_has_new_rx_api(mvm) &&
@@ -1782,9 +1860,9 @@
 
 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
 				       struct ieee80211_vif *vif,
-				       struct ieee80211_sta *sta)
+				       struct iwl_mvm_sta *mvm_sta)
 {
-	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	int ac;
 	int i;
 
 	lockdep_assert_held(&mvm->mutex);
@@ -1793,17 +1871,11 @@
 		if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
 			continue;
 
-		iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
-				    0);
+		ac = iwl_mvm_tid_to_ac_queue(i);
+		iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
+				    vif->hw_queue[ac], i, 0);
 		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
 	}
-
-	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
-		struct iwl_mvm_txq *mvmtxq =
-			iwl_mvm_txq_from_mac80211(sta->txq[i]);
-
-		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
-	}
 }
 
 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
@@ -1865,7 +1937,7 @@
 
 	ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
 
-	iwl_mvm_disable_sta_queues(mvm, vif, sta);
+	iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
 
 	/* If there is a TXQ still marked as reserved - free it */
 	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
@@ -1971,7 +2043,7 @@
 
 	if (iwl_mvm_has_new_tx_api(mvm)) {
 		int tvqm_queue =
-			iwl_mvm_tvqm_enable_txq(mvm, sta_id,
+			iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
 						IWL_MAX_TID_COUNT,
 						wdg_timeout);
 		*queue = tvqm_queue;
@@ -1984,7 +2056,7 @@
 			.frame_limit = IWL_FRAME_LIMIT,
 		};
 
-		iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
+		iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
 	}
 }
 
@@ -2062,7 +2134,8 @@
 
 	lockdep_assert_held(&mvm->mutex);
 
-	iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
+	iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
+			    IWL_MAX_TID_COUNT, 0);
 	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
 	if (ret)
 		IWL_WARN(mvm, "Failed sending remove station\n");
@@ -2123,7 +2196,8 @@
 
 		bsta->tfd_queue_msk |= BIT(queue);
 
-		iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
+		iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
+				   &cfg, wdg_timeout);
 	}
 
 	if (vif->type == NL80211_IFTYPE_ADHOC)
@@ -2142,7 +2216,8 @@
 	 * to firmware so enable queue here - after the station was added
 	 */
 	if (iwl_mvm_has_new_tx_api(mvm)) {
-		queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
+		queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
+						bsta->sta_id,
 						IWL_MAX_TID_COUNT,
 						wdg_timeout);
 
@@ -2180,7 +2255,7 @@
 		return;
 	}
 
-	iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
+	iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
 	if (iwl_mvm_has_new_tx_api(mvm))
 		return;
 
@@ -2304,8 +2379,10 @@
 	 * Note that this is done here as we want to avoid making DQA
 	 * changes in mac80211 layer.
 	 */
-	if (vif->type == NL80211_IFTYPE_ADHOC)
-		mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+	if (vif->type == NL80211_IFTYPE_ADHOC) {
+		vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+		mvmvif->cab_queue = vif->cab_queue;
+	}
 
 	/*
 	 * While in previous FWs we had to exclude cab queue from TFD queue
@@ -2313,9 +2390,9 @@
 	 */
 	if (!iwl_mvm_has_new_tx_api(mvm) &&
 	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
-		iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
-				   timeout);
-		msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
+		iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+				   &cfg, timeout);
+		msta->tfd_queue_msk |= BIT(vif->cab_queue);
 	}
 	ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
 					 mvmvif->id, mvmvif->color);
@@ -2332,14 +2409,15 @@
 	 * tfd_queue_mask.
 	 */
 	if (iwl_mvm_has_new_tx_api(mvm)) {
-		int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
+		int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
+						    msta->sta_id,
 						    0,
 						    timeout);
 		mvmvif->cab_queue = queue;
 	} else if (!fw_has_api(&mvm->fw->ucode_capa,
 			       IWL_UCODE_TLV_API_STA_TYPE))
-		iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
-				   timeout);
+		iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+				   &cfg, timeout);
 
 	return 0;
 }
@@ -2410,7 +2488,8 @@
 
 	iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
 
-	iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
+	iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
+			    0, 0);
 
 	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
 	if (ret)
@@ -2946,7 +3025,8 @@
 	}
 
 	if (alloc_queue)
-		iwl_mvm_enable_txq(mvm, sta, queue, ssn,
+		iwl_mvm_enable_txq(mvm, queue,
+				   vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
 				   &cfg, wdg_timeout);
 
 	/* Send ADD_STA command to enable aggs only if the queue isn't shared */
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
===================================================================
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/sta.h	2019-06-27 14:54:04.140678253 +0200
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/sta.h	2019-07-31 18:51:11.756815213 +0200
@@ -297,6 +297,7 @@
 
 /**
  * struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @deferred_tx_frames: deferred TX frames for this RA/TID
  * @seq_number: the next WiFi sequence number to use
  * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
  *	This is basically (last acked packet++).
@@ -317,6 +318,7 @@
  *	 tpt_meas_start
  */
 struct iwl_mvm_tid_data {
+	struct sk_buff_head deferred_tx_frames;
 	u16 seq_number;
 	u16 next_reclaimed;
 	/* The rest is Tx AGG related */
@@ -425,6 +427,8 @@
 	struct iwl_mvm_key_pn __rcu *ptk_pn[4];
 	struct iwl_mvm_rxq_dup_data *dup_data;
 
+	u16 deferred_traffic_tid_map;
+
 	u8 reserved_queue;
 
 	/* Temporary, until the new TLC will control the Tx protection */
Index: src/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
===================================================================
--- src.orig/drivers/net/wireless/intel/iwlwifi/mvm/tx.c	2019-07-31 18:50:40.639230684 +0200
+++ src/drivers/net/wireless/intel/iwlwifi/mvm/tx.c	2019-07-31 18:51:11.756815213 +0200
@@ -605,12 +605,11 @@
 }
 
 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
-				      struct ieee80211_tx_info *info,
-				      struct ieee80211_hdr *hdr)
+				      struct ieee80211_tx_info *info, __le16 fc)
 {
-	struct iwl_mvm_vif *mvmvif =
-		iwl_mvm_vif_from_mac80211(info->control.vif);
-	__le16 fc = hdr->frame_control;
+	struct iwl_mvm_vif *mvmvif;
+
+	mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
 
 	switch (info->control.vif->type) {
 	case NL80211_IFTYPE_AP:
@@ -629,9 +628,7 @@
 		    (!ieee80211_is_bufferable_mmpdu(fc) ||
 		     ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
 			return mvm->probe_queue;
-
-		if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
-		    is_multicast_ether_addr(hdr->addr1))
+		if (info->hw_queue == info->control.vif->cab_queue)
 			return mvmvif->cab_queue;
 
 		WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
@@ -640,6 +637,8 @@
 	case NL80211_IFTYPE_P2P_DEVICE:
 		if (ieee80211_is_mgmt(fc))
 			return mvm->p2p_dev_queue;
+		if (info->hw_queue == info->control.vif->cab_queue)
+			return mvmvif->cab_queue;
 
 		WARN_ON_ONCE(1);
 		return mvm->p2p_dev_queue;
@@ -717,8 +716,6 @@
 	u8 sta_id;
 	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 	__le16 fc = hdr->frame_control;
-	bool offchannel = IEEE80211_SKB_CB(skb)->flags &
-		IEEE80211_TX_CTL_TX_OFFCHAN;
 	int queue = -1;
 
 	if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
@@ -729,6 +726,11 @@
 	if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
 		return -1;
 
+	if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
+			 (!info.control.vif ||
+			  info.hw_queue != info.control.vif->cab_queue)))
+		return -1;
+
 	if (info.control.vif) {
 		struct iwl_mvm_vif *mvmvif =
 			iwl_mvm_vif_from_mac80211(info.control.vif);
@@ -741,12 +743,14 @@
 			else
 				sta_id = mvmvif->mcast_sta.sta_id;
 
-			queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
+			queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
+							   hdr->frame_control);
+
 		} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
 			queue = mvm->snif_queue;
 			sta_id = mvm->snif_sta.sta_id;
 		} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
-			   offchannel) {
+			   info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) {
 			/*
 			 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
 			 * that can be used in 2 different types of vifs, P2P &
@@ -760,10 +764,8 @@
 		}
 	}
 
-	if (queue < 0) {
-		IWL_ERR(mvm, "No queue was found. Dropping TX\n");
+	if (queue < 0)
 		return -1;
-	}
 
 	if (unlikely(ieee80211_is_probe_resp(fc)))
 		iwl_mvm_probe_resp_set_noa(mvm, skb);
@@ -1006,6 +1008,34 @@
 }
 #endif
 
+static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
+				  struct iwl_mvm_sta *mvm_sta, u8 tid,
+				  struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	u8 mac_queue = info->hw_queue;
+	struct sk_buff_head *deferred_tx_frames;
+
+	lockdep_assert_held(&mvm_sta->lock);
+
+	mvm_sta->deferred_traffic_tid_map |= BIT(tid);
+	set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
+
+	deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
+
+	skb_queue_tail(deferred_tx_frames, skb);
+
+	/*
+	 * The first deferred frame should've stopped the MAC queues, so we
+	 * should never get a second deferred frame for the RA/TID.
+	 * In case of GSO the first packet may have been split, so don't warn.
+	 */
+	if (skb_queue_len(deferred_tx_frames) == 1) {
+		iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
+		schedule_work(&mvm->add_stream_wk);
+	}
+}
+
 /* Check if there are any timed-out TIDs on a given shared TXQ */
 static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
 {
@@ -1076,7 +1106,7 @@
 	__le16 fc;
 	u16 seq_number = 0;
 	u8 tid = IWL_MAX_TID_COUNT;
-	u16 txq_id;
+	u16 txq_id = info->hw_queue;
 	bool is_ampdu = false;
 	int hdrlen;
 
@@ -1145,7 +1175,14 @@
 
 	WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
 
+	/* Check if TXQ needs to be allocated or re-activated */
 	if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
+		iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
+
+		/*
+		 * The frame is now deferred, and the worker scheduled
+		 * will re-allocate it, so we can free it for now.
+		 */
 		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
 		spin_unlock(&mvmsta->lock);
 		return 0;