1514 lines
50 KiB
Diff
1514 lines
50 KiB
Diff
From: Matthias Schiffer <mschiffer@universe-factory.net>
|
|
Date: Fri, 30 Sep 2016 16:58:01 +0200
|
|
Subject: ath9k: remove patch causing stability issues with powersave devices (FS#176)
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
|
|
Backport of LEDE fc88eb3fdfce6d39b4c62158cf6f42605a360a1e
|
|
|
|
diff --git a/package/kernel/mac80211/patches/341-ath9k-release-PS-buffered-frames-as-A-MPDU-if-enable.patch b/package/kernel/mac80211/patches/341-ath9k-release-PS-buffered-frames-as-A-MPDU-if-enable.patch
|
|
deleted file mode 100644
|
|
index 1cc1667..0000000
|
|
--- a/package/kernel/mac80211/patches/341-ath9k-release-PS-buffered-frames-as-A-MPDU-if-enable.patch
|
|
+++ /dev/null
|
|
@@ -1,40 +0,0 @@
|
|
-From: Felix Fietkau <nbd@nbd.name>
|
|
-Date: Sun, 28 Aug 2016 13:15:10 +0200
|
|
-Subject: [PATCH] ath9k: release PS buffered frames as A-MPDU if enabled
|
|
-
|
|
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
----
|
|
-
|
|
---- a/drivers/net/wireless/ath/ath9k/xmit.c
|
|
-+++ b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
-@@ -1660,10 +1660,11 @@ void ath9k_release_buffered_frames(struc
|
|
- struct ath_node *an = (struct ath_node *)sta->drv_priv;
|
|
- struct ath_txq *txq = sc->tx.uapsdq;
|
|
- struct ieee80211_tx_info *info;
|
|
-+ struct ath_frame_info *fi;
|
|
- struct list_head bf_q;
|
|
- struct ath_buf *bf_tail = NULL, *bf;
|
|
- int sent = 0;
|
|
-- int i;
|
|
-+ int n, i;
|
|
-
|
|
- INIT_LIST_HEAD(&bf_q);
|
|
- for (i = 0; tids && nframes; i++, tids >>= 1) {
|
|
-@@ -1683,10 +1684,15 @@ void ath9k_release_buffered_frames(struc
|
|
- ath9k_set_moredata(sc, bf, true);
|
|
- list_add_tail(&bf->list, &bf_q);
|
|
- ath_set_rates(tid->an->vif, tid->an->sta, bf, true);
|
|
-- if (bf_isampdu(bf)) {
|
|
-+ if (bf_isampdu(bf))
|
|
- ath_tx_addto_baw(sc, tid, bf);
|
|
-- bf->bf_state.bf_type &= ~BUF_AGGR;
|
|
-+ if (bf_isaggr(bf)) {
|
|
-+ fi = get_frame_info(bf->bf_mpdu);
|
|
-+ n = ath_compute_num_delims(sc, tid, bf,
|
|
-+ fi->framelen, true);
|
|
-+ bf->bf_state.ndelim = n;
|
|
- }
|
|
-+
|
|
- if (bf_tail)
|
|
- bf_tail->bf_next = bf;
|
|
-
|
|
diff --git a/package/kernel/mac80211/patches/341-ath9k-report-tx-status-on-EOSP.patch b/package/kernel/mac80211/patches/341-ath9k-report-tx-status-on-EOSP.patch
|
|
new file mode 100644
|
|
index 0000000..80a3074
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/341-ath9k-report-tx-status-on-EOSP.patch
|
|
@@ -0,0 +1,19 @@
|
|
+From: Felix Fietkau <nbd@nbd.name>
|
|
+Date: Sun, 28 Aug 2016 13:23:27 +0200
|
|
+Subject: [PATCH] ath9k: report tx status on EOSP
|
|
+
|
|
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
+---
|
|
+
|
|
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
|
|
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
+@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee802
|
|
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
+ struct ieee80211_sta *sta = info->status.status_driver_data[0];
|
|
+
|
|
+- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
|
|
++ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
|
|
++ IEEE80211_TX_STATUS_EOSP)) {
|
|
+ ieee80211_tx_status(hw, skb);
|
|
+ return;
|
|
+ }
|
|
diff --git a/package/kernel/mac80211/patches/342-ath9k-fix-block-ack-window-tracking-issues.patch b/package/kernel/mac80211/patches/342-ath9k-fix-block-ack-window-tracking-issues.patch
|
|
new file mode 100644
|
|
index 0000000..fea147b
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/342-ath9k-fix-block-ack-window-tracking-issues.patch
|
|
@@ -0,0 +1,114 @@
|
|
+From: Felix Fietkau <nbd@nbd.name>
|
|
+Date: Tue, 30 Aug 2016 12:44:08 +0200
|
|
+Subject: [PATCH] ath9k: fix block-ack window tracking issues
|
|
+
|
|
+Ensure that a buffer gets tracked as part of the block-ack window as
|
|
+soon as it's dequeued from the tid for the first time. Ensure that
|
|
+double calls to ath_tx_addto_baw (e.g. on retransmission) don't cause
|
|
+any issues.
|
|
+
|
|
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
+---
|
|
+
|
|
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
|
|
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
+@@ -62,7 +62,7 @@ static void ath_tx_rc_status(struct ath_
|
|
+ struct ath_tx_status *ts, int nframes, int nbad,
|
|
+ int txok);
|
|
+ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
|
|
+- int seqno);
|
|
++ struct ath_buf *bf);
|
|
+ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
|
|
+ struct ath_txq *txq,
|
|
+ struct ath_atx_tid *tid,
|
|
+@@ -300,7 +300,7 @@ static void ath_tx_flush_tid(struct ath_
|
|
+ }
|
|
+
|
|
+ if (fi->baw_tracked) {
|
|
+- ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
|
|
++ ath_tx_update_baw(sc, tid, bf);
|
|
+ sendbar = true;
|
|
+ }
|
|
+
|
|
+@@ -316,10 +316,15 @@ static void ath_tx_flush_tid(struct ath_
|
|
+ }
|
|
+
|
|
+ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
|
|
+- int seqno)
|
|
++ struct ath_buf *bf)
|
|
+ {
|
|
++ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
|
|
++ u16 seqno = bf->bf_state.seqno;
|
|
+ int index, cindex;
|
|
+
|
|
++ if (!fi->baw_tracked)
|
|
++ return;
|
|
++
|
|
+ index = ATH_BA_INDEX(tid->seq_start, seqno);
|
|
+ cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
|
|
+
|
|
+@@ -340,6 +345,9 @@ static void ath_tx_addto_baw(struct ath_
|
|
+ u16 seqno = bf->bf_state.seqno;
|
|
+ int index, cindex;
|
|
+
|
|
++ if (fi->baw_tracked)
|
|
++ return;
|
|
++
|
|
+ index = ATH_BA_INDEX(tid->seq_start, seqno);
|
|
+ cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
|
|
+ __set_bit(cindex, tid->tx_buf);
|
|
+@@ -616,7 +624,7 @@ static void ath_tx_complete_aggr(struct
|
|
+ * complete the acked-ones/xretried ones; update
|
|
+ * block-ack window
|
|
+ */
|
|
+- ath_tx_update_baw(sc, tid, seqno);
|
|
++ ath_tx_update_baw(sc, tid, bf);
|
|
+
|
|
+ if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
|
|
+ memcpy(tx_info->control.rates, rates, sizeof(rates));
|
|
+@@ -646,7 +654,7 @@ static void ath_tx_complete_aggr(struct
|
|
+ * run out of tx buf.
|
|
+ */
|
|
+ if (!tbf) {
|
|
+- ath_tx_update_baw(sc, tid, seqno);
|
|
++ ath_tx_update_baw(sc, tid, bf);
|
|
+
|
|
+ ath_tx_complete_buf(sc, bf, txq,
|
|
+ &bf_head, NULL, ts,
|
|
+@@ -986,11 +994,14 @@ ath_tx_get_tid_subframe(struct ath_softc
|
|
+
|
|
+ INIT_LIST_HEAD(&bf_head);
|
|
+ list_add(&bf->list, &bf_head);
|
|
+- ath_tx_update_baw(sc, tid, seqno);
|
|
++ ath_tx_update_baw(sc, tid, bf);
|
|
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
++ if (bf_isampdu(bf))
|
|
++ ath_tx_addto_baw(sc, tid, bf);
|
|
++
|
|
+ return bf;
|
|
+ }
|
|
+
|
|
+@@ -1048,8 +1059,6 @@ ath_tx_form_aggr(struct ath_softc *sc, s
|
|
+ bf->bf_next = NULL;
|
|
+
|
|
+ /* link buffers of this frame to the aggregate */
|
|
+- if (!fi->baw_tracked)
|
|
+- ath_tx_addto_baw(sc, tid, bf);
|
|
+ bf->bf_state.ndelim = ndelim;
|
|
+
|
|
+ list_add_tail(&bf->list, bf_q);
|
|
+@@ -1684,10 +1693,8 @@ void ath9k_release_buffered_frames(struc
|
|
+ ath9k_set_moredata(sc, bf, true);
|
|
+ list_add_tail(&bf->list, &bf_q);
|
|
+ ath_set_rates(tid->an->vif, tid->an->sta, bf, true);
|
|
+- if (bf_isampdu(bf)) {
|
|
+- ath_tx_addto_baw(sc, tid, bf);
|
|
++ if (bf_isampdu(bf))
|
|
+ bf->bf_state.bf_type &= ~BUF_AGGR;
|
|
+- }
|
|
+ if (bf_tail)
|
|
+ bf_tail->bf_next = bf;
|
|
+
|
|
diff --git a/package/kernel/mac80211/patches/342-ath9k-report-tx-status-on-EOSP.patch b/package/kernel/mac80211/patches/342-ath9k-report-tx-status-on-EOSP.patch
|
|
deleted file mode 100644
|
|
index 80a3074..0000000
|
|
--- a/package/kernel/mac80211/patches/342-ath9k-report-tx-status-on-EOSP.patch
|
|
+++ /dev/null
|
|
@@ -1,19 +0,0 @@
|
|
-From: Felix Fietkau <nbd@nbd.name>
|
|
-Date: Sun, 28 Aug 2016 13:23:27 +0200
|
|
-Subject: [PATCH] ath9k: report tx status on EOSP
|
|
-
|
|
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
----
|
|
-
|
|
---- a/drivers/net/wireless/ath/ath9k/xmit.c
|
|
-+++ b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
-@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee802
|
|
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
- struct ieee80211_sta *sta = info->status.status_driver_data[0];
|
|
-
|
|
-- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
|
|
-+ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
|
|
-+ IEEE80211_TX_STATUS_EOSP)) {
|
|
- ieee80211_tx_status(hw, skb);
|
|
- return;
|
|
- }
|
|
diff --git a/package/kernel/mac80211/patches/343-ath9k-fix-block-ack-window-tracking-issues.patch b/package/kernel/mac80211/patches/343-ath9k-fix-block-ack-window-tracking-issues.patch
|
|
deleted file mode 100644
|
|
index 007a8d7d..0000000
|
|
--- a/package/kernel/mac80211/patches/343-ath9k-fix-block-ack-window-tracking-issues.patch
|
|
+++ /dev/null
|
|
@@ -1,111 +0,0 @@
|
|
-From: Felix Fietkau <nbd@nbd.name>
|
|
-Date: Tue, 30 Aug 2016 12:44:08 +0200
|
|
-Subject: [PATCH] ath9k: fix block-ack window tracking issues
|
|
-
|
|
-Ensure that a buffer gets tracked as part of the block-ack window as
|
|
-soon as it's dequeued from the tid for the first time. Ensure that
|
|
-double calls to ath_tx_addto_baw (e.g. on retransmission) don't cause
|
|
-any issues.
|
|
-
|
|
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
----
|
|
-
|
|
---- a/drivers/net/wireless/ath/ath9k/xmit.c
|
|
-+++ b/drivers/net/wireless/ath/ath9k/xmit.c
|
|
-@@ -62,7 +62,7 @@ static void ath_tx_rc_status(struct ath_
|
|
- struct ath_tx_status *ts, int nframes, int nbad,
|
|
- int txok);
|
|
- static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
|
|
-- int seqno);
|
|
-+ struct ath_buf *bf);
|
|
- static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
|
|
- struct ath_txq *txq,
|
|
- struct ath_atx_tid *tid,
|
|
-@@ -300,7 +300,7 @@ static void ath_tx_flush_tid(struct ath_
|
|
- }
|
|
-
|
|
- if (fi->baw_tracked) {
|
|
-- ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
|
|
-+ ath_tx_update_baw(sc, tid, bf);
|
|
- sendbar = true;
|
|
- }
|
|
-
|
|
-@@ -316,10 +316,15 @@ static void ath_tx_flush_tid(struct ath_
|
|
- }
|
|
-
|
|
- static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
|
|
-- int seqno)
|
|
-+ struct ath_buf *bf)
|
|
- {
|
|
-+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
|
|
-+ u16 seqno = bf->bf_state.seqno;
|
|
- int index, cindex;
|
|
-
|
|
-+ if (!fi->baw_tracked)
|
|
-+ return;
|
|
-+
|
|
- index = ATH_BA_INDEX(tid->seq_start, seqno);
|
|
- cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
|
|
-
|
|
-@@ -340,6 +345,9 @@ static void ath_tx_addto_baw(struct ath_
|
|
- u16 seqno = bf->bf_state.seqno;
|
|
- int index, cindex;
|
|
-
|
|
-+ if (fi->baw_tracked)
|
|
-+ return;
|
|
-+
|
|
- index = ATH_BA_INDEX(tid->seq_start, seqno);
|
|
- cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
|
|
- __set_bit(cindex, tid->tx_buf);
|
|
-@@ -616,7 +624,7 @@ static void ath_tx_complete_aggr(struct
|
|
- * complete the acked-ones/xretried ones; update
|
|
- * block-ack window
|
|
- */
|
|
-- ath_tx_update_baw(sc, tid, seqno);
|
|
-+ ath_tx_update_baw(sc, tid, bf);
|
|
-
|
|
- if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
|
|
- memcpy(tx_info->control.rates, rates, sizeof(rates));
|
|
-@@ -646,7 +654,7 @@ static void ath_tx_complete_aggr(struct
|
|
- * run out of tx buf.
|
|
- */
|
|
- if (!tbf) {
|
|
-- ath_tx_update_baw(sc, tid, seqno);
|
|
-+ ath_tx_update_baw(sc, tid, bf);
|
|
-
|
|
- ath_tx_complete_buf(sc, bf, txq,
|
|
- &bf_head, NULL, ts,
|
|
-@@ -986,11 +994,14 @@ ath_tx_get_tid_subframe(struct ath_softc
|
|
-
|
|
- INIT_LIST_HEAD(&bf_head);
|
|
- list_add(&bf->list, &bf_head);
|
|
-- ath_tx_update_baw(sc, tid, seqno);
|
|
-+ ath_tx_update_baw(sc, tid, bf);
|
|
- ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
|
|
- continue;
|
|
- }
|
|
-
|
|
-+ if (bf_isampdu(bf))
|
|
-+ ath_tx_addto_baw(sc, tid, bf);
|
|
-+
|
|
- return bf;
|
|
- }
|
|
-
|
|
-@@ -1048,8 +1059,6 @@ ath_tx_form_aggr(struct ath_softc *sc, s
|
|
- bf->bf_next = NULL;
|
|
-
|
|
- /* link buffers of this frame to the aggregate */
|
|
-- if (!fi->baw_tracked)
|
|
-- ath_tx_addto_baw(sc, tid, bf);
|
|
- bf->bf_state.ndelim = ndelim;
|
|
-
|
|
- list_add_tail(&bf->list, bf_q);
|
|
-@@ -1685,8 +1694,6 @@ void ath9k_release_buffered_frames(struc
|
|
- ath9k_set_moredata(sc, bf, true);
|
|
- list_add_tail(&bf->list, &bf_q);
|
|
- ath_set_rates(tid->an->vif, tid->an->sta, bf, true);
|
|
-- if (bf_isampdu(bf))
|
|
-- ath_tx_addto_baw(sc, tid, bf);
|
|
- if (bf_isaggr(bf)) {
|
|
- fi = get_frame_info(bf->bf_mpdu);
|
|
- n = ath_compute_num_delims(sc, tid, bf,
|
|
diff --git a/package/kernel/mac80211/patches/343-mac80211-send-delBA-on-unexpected-BlockAck-data-fram.patch b/package/kernel/mac80211/patches/343-mac80211-send-delBA-on-unexpected-BlockAck-data-fram.patch
|
|
new file mode 100644
|
|
index 0000000..3bbca22
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/343-mac80211-send-delBA-on-unexpected-BlockAck-data-fram.patch
|
|
@@ -0,0 +1,64 @@
|
|
+From: Johannes Berg <johannes.berg@intel.com>
|
|
+Date: Mon, 29 Aug 2016 23:25:18 +0300
|
|
+Subject: [PATCH] mac80211: send delBA on unexpected BlockAck data frames
|
|
+
|
|
+When we receive data frames with ACK policy BlockAck, send
|
|
+delBA as requested by the 802.11 spec. Since this would be
|
|
+happening for every frame inside an A-MPDU if it's really
|
|
+received outside a session, limit it to a single attempt.
|
|
+
|
|
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
|
|
+---
|
|
+
|
|
+--- a/net/mac80211/agg-rx.c
|
|
++++ b/net/mac80211/agg-rx.c
|
|
+@@ -388,8 +388,10 @@ void __ieee80211_start_rx_ba_session(str
|
|
+ }
|
|
+
|
|
+ end:
|
|
+- if (status == WLAN_STATUS_SUCCESS)
|
|
++ if (status == WLAN_STATUS_SUCCESS) {
|
|
+ __set_bit(tid, sta->ampdu_mlme.agg_session_valid);
|
|
++ __clear_bit(tid, sta->ampdu_mlme.unexpected_agg);
|
|
++ }
|
|
+ mutex_unlock(&sta->ampdu_mlme.mtx);
|
|
+
|
|
+ end_no_lock:
|
|
+--- a/net/mac80211/rx.c
|
|
++++ b/net/mac80211/rx.c
|
|
+@@ -1072,8 +1072,15 @@ static void ieee80211_rx_reorder_ampdu(s
|
|
+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
|
|
+
|
|
+ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
|
|
+- if (!tid_agg_rx)
|
|
++ if (!tid_agg_rx) {
|
|
++ if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
|
|
++ !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
|
|
++ !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
|
|
++ ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
|
|
++ WLAN_BACK_RECIPIENT,
|
|
++ WLAN_REASON_QSTA_REQUIRE_SETUP);
|
|
+ goto dont_reorder;
|
|
++ }
|
|
+
|
|
+ /* qos null data frames are excluded */
|
|
+ if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
|
|
+--- a/net/mac80211/sta_info.h
|
|
++++ b/net/mac80211/sta_info.h
|
|
+@@ -230,6 +230,8 @@ struct tid_ampdu_rx {
|
|
+ * @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the
|
|
+ * driver requested to close until the work for it runs
|
|
+ * @agg_session_valid: bitmap indicating which TID has a rx BA session open on
|
|
++ * @unexpected_agg: bitmap indicating which TID already sent a delBA due to
|
|
++ * unexpected aggregation related frames outside a session
|
|
+ * @work: work struct for starting/stopping aggregation
|
|
+ * @tid_tx: aggregation info for Tx per TID
|
|
+ * @tid_start_tx: sessions where start was requested
|
|
+@@ -244,6 +246,7 @@ struct sta_ampdu_mlme {
|
|
+ unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
|
|
+ unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
|
|
+ unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
|
|
++ unsigned long unexpected_agg[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
|
|
+ /* tx */
|
|
+ struct work_struct work;
|
|
+ struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS];
|
|
diff --git a/package/kernel/mac80211/patches/344-mac80211-send-delBA-on-unexpected-BlockAck-Request.patch b/package/kernel/mac80211/patches/344-mac80211-send-delBA-on-unexpected-BlockAck-Request.patch
|
|
new file mode 100644
|
|
index 0000000..c3d3118
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/344-mac80211-send-delBA-on-unexpected-BlockAck-Request.patch
|
|
@@ -0,0 +1,26 @@
|
|
+From: Johannes Berg <johannes.berg@intel.com>
|
|
+Date: Mon, 29 Aug 2016 23:25:19 +0300
|
|
+Subject: [PATCH] mac80211: send delBA on unexpected BlockAck Request
|
|
+
|
|
+If we don't have a BA session, send delBA, as requested by the
|
|
+IEEE 802.11 spec. Apply the same limit of sending such a delBA
|
|
+only once as in the previous patch.
|
|
+
|
|
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
|
|
+---
|
|
+
|
|
+--- a/net/mac80211/rx.c
|
|
++++ b/net/mac80211/rx.c
|
|
+@@ -2537,6 +2537,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_
|
|
+
|
|
+ tid = le16_to_cpu(bar_data.control) >> 12;
|
|
+
|
|
++ if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
|
|
++ !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
|
|
++ ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
|
|
++ WLAN_BACK_RECIPIENT,
|
|
++ WLAN_REASON_QSTA_REQUIRE_SETUP);
|
|
++
|
|
+ tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
|
|
+ if (!tid_agg_rx)
|
|
+ return RX_DROP_MONITOR;
|
|
diff --git a/package/kernel/mac80211/patches/344-mac80211-send-delBA-on-unexpected-BlockAck-data-fram.patch b/package/kernel/mac80211/patches/344-mac80211-send-delBA-on-unexpected-BlockAck-data-fram.patch
|
|
deleted file mode 100644
|
|
index 3bbca22..0000000
|
|
--- a/package/kernel/mac80211/patches/344-mac80211-send-delBA-on-unexpected-BlockAck-data-fram.patch
|
|
+++ /dev/null
|
|
@@ -1,64 +0,0 @@
|
|
-From: Johannes Berg <johannes.berg@intel.com>
|
|
-Date: Mon, 29 Aug 2016 23:25:18 +0300
|
|
-Subject: [PATCH] mac80211: send delBA on unexpected BlockAck data frames
|
|
-
|
|
-When we receive data frames with ACK policy BlockAck, send
|
|
-delBA as requested by the 802.11 spec. Since this would be
|
|
-happening for every frame inside an A-MPDU if it's really
|
|
-received outside a session, limit it to a single attempt.
|
|
-
|
|
-Signed-off-by: Johannes Berg <johannes.berg@intel.com>
|
|
----
|
|
-
|
|
---- a/net/mac80211/agg-rx.c
|
|
-+++ b/net/mac80211/agg-rx.c
|
|
-@@ -388,8 +388,10 @@ void __ieee80211_start_rx_ba_session(str
|
|
- }
|
|
-
|
|
- end:
|
|
-- if (status == WLAN_STATUS_SUCCESS)
|
|
-+ if (status == WLAN_STATUS_SUCCESS) {
|
|
- __set_bit(tid, sta->ampdu_mlme.agg_session_valid);
|
|
-+ __clear_bit(tid, sta->ampdu_mlme.unexpected_agg);
|
|
-+ }
|
|
- mutex_unlock(&sta->ampdu_mlme.mtx);
|
|
-
|
|
- end_no_lock:
|
|
---- a/net/mac80211/rx.c
|
|
-+++ b/net/mac80211/rx.c
|
|
-@@ -1072,8 +1072,15 @@ static void ieee80211_rx_reorder_ampdu(s
|
|
- tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
|
|
-
|
|
- tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
|
|
-- if (!tid_agg_rx)
|
|
-+ if (!tid_agg_rx) {
|
|
-+ if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
|
|
-+ !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
|
|
-+ !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
|
|
-+ ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
|
|
-+ WLAN_BACK_RECIPIENT,
|
|
-+ WLAN_REASON_QSTA_REQUIRE_SETUP);
|
|
- goto dont_reorder;
|
|
-+ }
|
|
-
|
|
- /* qos null data frames are excluded */
|
|
- if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
|
|
---- a/net/mac80211/sta_info.h
|
|
-+++ b/net/mac80211/sta_info.h
|
|
-@@ -230,6 +230,8 @@ struct tid_ampdu_rx {
|
|
- * @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the
|
|
- * driver requested to close until the work for it runs
|
|
- * @agg_session_valid: bitmap indicating which TID has a rx BA session open on
|
|
-+ * @unexpected_agg: bitmap indicating which TID already sent a delBA due to
|
|
-+ * unexpected aggregation related frames outside a session
|
|
- * @work: work struct for starting/stopping aggregation
|
|
- * @tid_tx: aggregation info for Tx per TID
|
|
- * @tid_start_tx: sessions where start was requested
|
|
-@@ -244,6 +246,7 @@ struct sta_ampdu_mlme {
|
|
- unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
|
|
- unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
|
|
- unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
|
|
-+ unsigned long unexpected_agg[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
|
|
- /* tx */
|
|
- struct work_struct work;
|
|
- struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS];
|
|
diff --git a/package/kernel/mac80211/patches/345-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch b/package/kernel/mac80211/patches/345-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch
|
|
new file mode 100644
|
|
index 0000000..aba1ff4
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/345-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch
|
|
@@ -0,0 +1,478 @@
|
|
+From: Felix Fietkau <nbd@nbd.name>
|
|
+Date: Sun, 4 Sep 2016 17:46:24 +0200
|
|
+Subject: [PATCH] mac80211: fix sequence number assignment for PS response
|
|
+ frames
|
|
+
|
|
+When using intermediate queues, sequence number allocation is deferred
|
|
+until dequeue. This doesn't work for PS response frames, which bypass
|
|
+those queues.
|
|
+
|
|
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
+---
|
|
+
|
|
+--- a/net/mac80211/tx.c
|
|
++++ b/net/mac80211/tx.c
|
|
+@@ -38,6 +38,12 @@
|
|
+ #include "wme.h"
|
|
+ #include "rate.h"
|
|
+
|
|
++static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx);
|
|
++static bool ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
|
|
++ struct sta_info *sta, u8 pn_offs,
|
|
++ struct ieee80211_key_conf *key_conf,
|
|
++ struct sk_buff *skb);
|
|
++
|
|
+ /* misc utils */
|
|
+
|
|
+ static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
|
|
+@@ -849,8 +855,7 @@ ieee80211_tx_h_sequence(struct ieee80211
|
|
+ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
|
|
+ tx->sta->tx_stats.msdu[tid]++;
|
|
+
|
|
+- if (!tx->sta->sta.txq[0])
|
|
+- hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
|
|
++ hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
|
|
+
|
|
+ return TX_CONTINUE;
|
|
+ }
|
|
+@@ -1398,6 +1403,7 @@ void ieee80211_txq_init(struct ieee80211
|
|
+ fq_tin_init(&txqi->tin);
|
|
+ fq_flow_init(&txqi->def_flow);
|
|
+ codel_vars_init(&txqi->def_cvars);
|
|
++ __skb_queue_head_init(&txqi->frags);
|
|
+
|
|
+ txqi->txq.vif = &sdata->vif;
|
|
+
|
|
+@@ -1420,6 +1426,7 @@ void ieee80211_txq_purge(struct ieee8021
|
|
+ struct fq_tin *tin = &txqi->tin;
|
|
+
|
|
+ fq_tin_reset(fq, tin, fq_skb_free_func);
|
|
++ ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
|
|
+ }
|
|
+
|
|
+ int ieee80211_txq_setup_flows(struct ieee80211_local *local)
|
|
+@@ -1476,12 +1483,19 @@ struct sk_buff *ieee80211_tx_dequeue(str
|
|
+ struct sk_buff *skb = NULL;
|
|
+ struct fq *fq = &local->fq;
|
|
+ struct fq_tin *tin = &txqi->tin;
|
|
++ struct ieee80211_tx_info *info;
|
|
+
|
|
+ spin_lock_bh(&fq->lock);
|
|
+
|
|
+ if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
|
|
+ goto out;
|
|
+
|
|
++ /* Make sure fragments stay together. */
|
|
++ skb = __skb_dequeue(&txqi->frags);
|
|
++ if (skb)
|
|
++ goto out;
|
|
++
|
|
++begin:
|
|
+ skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
|
|
+ if (!skb)
|
|
+ goto out;
|
|
+@@ -1489,16 +1503,38 @@ struct sk_buff *ieee80211_tx_dequeue(str
|
|
+ ieee80211_set_skb_vif(skb, txqi);
|
|
+
|
|
+ hdr = (struct ieee80211_hdr *)skb->data;
|
|
+- if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) {
|
|
++ info = IEEE80211_SKB_CB(skb);
|
|
++ if (txq->sta && info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
|
|
+ struct sta_info *sta = container_of(txq->sta, struct sta_info,
|
|
+ sta);
|
|
+- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
++ u8 pn_offs = 0;
|
|
+
|
|
+- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, txq->tid);
|
|
+- if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
|
|
+- info->flags |= IEEE80211_TX_CTL_AMPDU;
|
|
+- else
|
|
+- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
|
++ if (info->control.hw_key)
|
|
++ pn_offs = ieee80211_padded_hdrlen(hw, hdr->frame_control);
|
|
++
|
|
++ ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
|
|
++ info->control.hw_key, skb);
|
|
++ } else {
|
|
++ struct ieee80211_tx_data tx = { };
|
|
++
|
|
++ __skb_queue_head_init(&tx.skbs);
|
|
++ tx.local = local;
|
|
++ tx.skb = skb;
|
|
++ tx.hdrlen = ieee80211_padded_hdrlen(hw, hdr->frame_control);
|
|
++ if (txq->sta) {
|
|
++ tx.sta = container_of(txq->sta, struct sta_info, sta);
|
|
++ tx.sdata = tx.sta->sdata;
|
|
++ } else {
|
|
++ tx.sdata = vif_to_sdata(info->control.vif);
|
|
++ }
|
|
++
|
|
++ if (invoke_tx_handlers_late(&tx))
|
|
++ goto begin;
|
|
++
|
|
++ skb = __skb_dequeue(&tx.skbs);
|
|
++
|
|
++ if (!skb_queue_empty(&tx.skbs))
|
|
++ skb_queue_splice_tail(&tx.skbs, &txqi->frags);
|
|
+ }
|
|
+
|
|
+ out:
|
|
+@@ -1512,6 +1548,47 @@ out:
|
|
+ }
|
|
+ EXPORT_SYMBOL(ieee80211_tx_dequeue);
|
|
+
|
|
++static bool ieee80211_queue_skb(struct ieee80211_local *local,
|
|
++ struct ieee80211_sub_if_data *sdata,
|
|
++ struct sta_info *sta,
|
|
++ struct sk_buff *skb)
|
|
++{
|
|
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
++ struct fq *fq = &local->fq;
|
|
++ struct ieee80211_vif *vif;
|
|
++ struct txq_info *txqi;
|
|
++ struct ieee80211_sta *pubsta;
|
|
++
|
|
++ if (!local->ops->wake_tx_queue ||
|
|
++ sdata->vif.type == NL80211_IFTYPE_MONITOR)
|
|
++ return false;
|
|
++
|
|
++ if (sta && sta->uploaded)
|
|
++ pubsta = &sta->sta;
|
|
++ else
|
|
++ pubsta = NULL;
|
|
++
|
|
++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
++ sdata = container_of(sdata->bss,
|
|
++ struct ieee80211_sub_if_data, u.ap);
|
|
++
|
|
++ vif = &sdata->vif;
|
|
++ txqi = ieee80211_get_txq(local, vif, pubsta, skb);
|
|
++
|
|
++ if (!txqi)
|
|
++ return false;
|
|
++
|
|
++ info->control.vif = vif;
|
|
++
|
|
++ spin_lock_bh(&fq->lock);
|
|
++ ieee80211_txq_enqueue(local, txqi, skb);
|
|
++ spin_unlock_bh(&fq->lock);
|
|
++
|
|
++ drv_wake_tx_queue(local, txqi);
|
|
++
|
|
++ return true;
|
|
++}
|
|
++
|
|
+ static bool ieee80211_tx_frags(struct ieee80211_local *local,
|
|
+ struct ieee80211_vif *vif,
|
|
+ struct ieee80211_sta *sta,
|
|
+@@ -1519,9 +1596,7 @@ static bool ieee80211_tx_frags(struct ie
|
|
+ bool txpending)
|
|
+ {
|
|
+ struct ieee80211_tx_control control = {};
|
|
+- struct fq *fq = &local->fq;
|
|
+ struct sk_buff *skb, *tmp;
|
|
+- struct txq_info *txqi;
|
|
+ unsigned long flags;
|
|
+
|
|
+ skb_queue_walk_safe(skbs, skb, tmp) {
|
|
+@@ -1536,21 +1611,6 @@ static bool ieee80211_tx_frags(struct ie
|
|
+ }
|
|
+ #endif
|
|
+
|
|
+- txqi = ieee80211_get_txq(local, vif, sta, skb);
|
|
+- if (txqi) {
|
|
+- info->control.vif = vif;
|
|
+-
|
|
+- __skb_unlink(skb, skbs);
|
|
+-
|
|
+- spin_lock_bh(&fq->lock);
|
|
+- ieee80211_txq_enqueue(local, txqi, skb);
|
|
+- spin_unlock_bh(&fq->lock);
|
|
+-
|
|
+- drv_wake_tx_queue(local, txqi);
|
|
+-
|
|
+- continue;
|
|
+- }
|
|
+-
|
|
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
+ if (local->queue_stop_reasons[q] ||
|
|
+ (!txpending && !skb_queue_empty(&local->pending[q]))) {
|
|
+@@ -1671,10 +1731,13 @@ static bool __ieee80211_tx(struct ieee80
|
|
+ /*
|
|
+ * Invoke TX handlers, return 0 on success and non-zero if the
|
|
+ * frame was dropped or queued.
|
|
++ *
|
|
++ * The handlers are split into an early and late part. The latter is everything
|
|
++ * that can be sensitive to reordering, and will be deferred to after packets
|
|
++ * are dequeued from the intermediate queues (when they are enabled).
|
|
+ */
|
|
+-static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
|
|
++static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
|
|
+ {
|
|
+- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
+ ieee80211_tx_result res = TX_DROP;
|
|
+
|
|
+ #define CALL_TXH(txh) \
|
|
+@@ -1688,16 +1751,42 @@ static int invoke_tx_handlers(struct iee
|
|
+ CALL_TXH(ieee80211_tx_h_check_assoc);
|
|
+ CALL_TXH(ieee80211_tx_h_ps_buf);
|
|
+ CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
|
|
+- CALL_TXH(ieee80211_tx_h_select_key);
|
|
++
|
|
+ if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
|
|
+ CALL_TXH(ieee80211_tx_h_rate_ctrl);
|
|
+
|
|
++ txh_done:
|
|
++ if (unlikely(res == TX_DROP)) {
|
|
++ I802_DEBUG_INC(tx->local->tx_handlers_drop);
|
|
++ if (tx->skb)
|
|
++ ieee80211_free_txskb(&tx->local->hw, tx->skb);
|
|
++ else
|
|
++ ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
|
|
++ return -1;
|
|
++ } else if (unlikely(res == TX_QUEUED)) {
|
|
++ I802_DEBUG_INC(tx->local->tx_handlers_queued);
|
|
++ return -1;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++/*
|
|
++ * Late handlers can be called while the sta lock is held. Handlers that can
|
|
++ * cause packets to be generated will cause deadlock!
|
|
++ */
|
|
++static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
|
|
++{
|
|
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
++ ieee80211_tx_result res = TX_CONTINUE;
|
|
++
|
|
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
|
|
+ __skb_queue_tail(&tx->skbs, tx->skb);
|
|
+ tx->skb = NULL;
|
|
+ goto txh_done;
|
|
+ }
|
|
+
|
|
++ CALL_TXH(ieee80211_tx_h_select_key);
|
|
+ CALL_TXH(ieee80211_tx_h_michael_mic_add);
|
|
+ CALL_TXH(ieee80211_tx_h_sequence);
|
|
+ CALL_TXH(ieee80211_tx_h_fragment);
|
|
+@@ -1724,6 +1813,15 @@ static int invoke_tx_handlers(struct iee
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
++static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
|
|
++{
|
|
++ int r = invoke_tx_handlers_early(tx);
|
|
++ if (r)
|
|
++ return r;
|
|
++
|
|
++ return invoke_tx_handlers_late(tx);
|
|
++}
|
|
++
|
|
+ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
|
|
+ struct ieee80211_vif *vif, struct sk_buff *skb,
|
|
+ int band, struct ieee80211_sta **sta)
|
|
+@@ -1798,7 +1896,13 @@ static bool ieee80211_tx(struct ieee8021
|
|
+ info->hw_queue =
|
|
+ sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
+
|
|
+- if (!invoke_tx_handlers(&tx))
|
|
++ if (invoke_tx_handlers_early(&tx))
|
|
++ return false;
|
|
++
|
|
++ if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
|
|
++ return true;
|
|
++
|
|
++ if (!invoke_tx_handlers_late(&tx))
|
|
+ result = __ieee80211_tx(local, &tx.skbs, led_len,
|
|
+ tx.sta, txpending);
|
|
+
|
|
+@@ -3181,7 +3285,7 @@ out:
|
|
+ }
|
|
+
|
|
+ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
|
+- struct net_device *dev, struct sta_info *sta,
|
|
++ struct sta_info *sta,
|
|
+ struct ieee80211_fast_tx *fast_tx,
|
|
+ struct sk_buff *skb)
|
|
+ {
|
|
+@@ -3192,9 +3296,9 @@ static bool ieee80211_xmit_fast(struct i
|
|
+ struct ethhdr eth;
|
|
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
+ struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
|
|
+- struct ieee80211_tx_data tx;
|
|
+- ieee80211_tx_result r;
|
|
+ struct tid_ampdu_tx *tid_tx = NULL;
|
|
++ ieee80211_tx_result r;
|
|
++ struct ieee80211_tx_data tx;
|
|
+ u8 tid = IEEE80211_NUM_TIDS;
|
|
+
|
|
+ /* control port protocol needs a lot of special handling */
|
|
+@@ -3232,8 +3336,6 @@ static bool ieee80211_xmit_fast(struct i
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+- ieee80211_tx_stats(dev, skb->len + extra_head);
|
|
+-
|
|
+ if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
|
|
+ ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
|
|
+ return true;
|
|
+@@ -3262,24 +3364,7 @@ static bool ieee80211_xmit_fast(struct i
|
|
+ info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
|
|
+ IEEE80211_TX_CTL_DONTFRAG |
|
|
+ (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
|
|
+-
|
|
+- if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
|
|
+- *ieee80211_get_qos_ctl(hdr) = tid;
|
|
+- if (!sta->sta.txq[0])
|
|
+- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
|
|
+- } else {
|
|
+- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
|
|
+- hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
|
|
+- sdata->sequence_number += 0x10;
|
|
+- }
|
|
+-
|
|
+- if (skb_shinfo(skb)->gso_size)
|
|
+- sta->tx_stats.msdu[tid] +=
|
|
+- DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
|
|
+- else
|
|
+- sta->tx_stats.msdu[tid]++;
|
|
+-
|
|
+- info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
++ info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
|
|
+
|
|
+ __skb_queue_head_init(&tx.skbs);
|
|
+
|
|
+@@ -3305,22 +3390,71 @@ static bool ieee80211_xmit_fast(struct i
|
|
+ }
|
|
+ }
|
|
+
|
|
++ if (ieee80211_queue_skb(local, sdata, sta, skb))
|
|
++ return true;
|
|
++
|
|
++ ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
|
|
++ &fast_tx->key->conf, skb);
|
|
++
|
|
++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
++ sdata = container_of(sdata->bss,
|
|
++ struct ieee80211_sub_if_data, u.ap);
|
|
++
|
|
++ __skb_queue_tail(&tx.skbs, skb);
|
|
++ ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
|
|
++
|
|
++ return true;
|
|
++}
|
|
++
|
|
++/*
|
|
++ * Can be called while the sta lock is held. Anything that can cause packets to
|
|
++ * be generated will cause deadlock!
|
|
++ */
|
|
++static bool ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
|
|
++ struct sta_info *sta, u8 pn_offs,
|
|
++ struct ieee80211_key_conf *key_conf,
|
|
++ struct sk_buff *skb)
|
|
++{
|
|
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
++ struct ieee80211_hdr *hdr = (void *)skb->data;
|
|
++ u8 tid = IEEE80211_NUM_TIDS;
|
|
++
|
|
++ ieee80211_tx_stats(skb->dev, skb->len);
|
|
++
|
|
++ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
|
|
++ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
++ *ieee80211_get_qos_ctl(hdr) = tid;
|
|
++ hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
|
|
++ } else {
|
|
++ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
|
|
++ hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
|
|
++ sdata->sequence_number += 0x10;
|
|
++ }
|
|
++
|
|
++ if (skb_shinfo(skb)->gso_size)
|
|
++ sta->tx_stats.msdu[tid] +=
|
|
++ DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
|
|
++ else
|
|
++ sta->tx_stats.msdu[tid]++;
|
|
++
|
|
++ info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
++
|
|
+ /* statistics normally done by ieee80211_tx_h_stats (but that
|
|
+ * has to consider fragmentation, so is more complex)
|
|
+ */
|
|
+ sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
|
|
+ sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
|
|
+
|
|
+- if (fast_tx->pn_offs) {
|
|
++ if (pn_offs && (key_conf->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
|
|
+ u64 pn;
|
|
+- u8 *crypto_hdr = skb->data + fast_tx->pn_offs;
|
|
++ u8 *crypto_hdr = skb->data + pn_offs;
|
|
+
|
|
+- switch (fast_tx->key->conf.cipher) {
|
|
++ switch (key_conf->cipher) {
|
|
+ case WLAN_CIPHER_SUITE_CCMP:
|
|
+ case WLAN_CIPHER_SUITE_CCMP_256:
|
|
+ case WLAN_CIPHER_SUITE_GCMP:
|
|
+ case WLAN_CIPHER_SUITE_GCMP_256:
|
|
+- pn = atomic64_inc_return(&fast_tx->key->conf.tx_pn);
|
|
++ pn = atomic64_inc_return(&key_conf->tx_pn);
|
|
+ crypto_hdr[0] = pn;
|
|
+ crypto_hdr[1] = pn >> 8;
|
|
+ crypto_hdr[4] = pn >> 16;
|
|
+@@ -3331,12 +3465,6 @@ static bool ieee80211_xmit_fast(struct i
|
|
+ }
|
|
+ }
|
|
+
|
|
+- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
+- sdata = container_of(sdata->bss,
|
|
+- struct ieee80211_sub_if_data, u.ap);
|
|
+-
|
|
+- __skb_queue_tail(&tx.skbs, skb);
|
|
+- ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+@@ -3364,7 +3492,7 @@ void __ieee80211_subif_start_xmit(struct
|
|
+ fast_tx = rcu_dereference(sta->fast_tx);
|
|
+
|
|
+ if (fast_tx &&
|
|
+- ieee80211_xmit_fast(sdata, dev, sta, fast_tx, skb))
|
|
++ ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+--- a/include/net/mac80211.h
|
|
++++ b/include/net/mac80211.h
|
|
+@@ -715,6 +715,7 @@ enum mac80211_tx_info_flags {
|
|
+ * frame (PS-Poll or uAPSD).
|
|
+ * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
|
|
+ * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
|
|
++ * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
|
|
+ *
|
|
+ * These flags are used in tx_info->control.flags.
|
|
+ */
|
|
+@@ -723,6 +724,7 @@ enum mac80211_tx_control_flags {
|
|
+ IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
|
|
+ IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
|
|
+ IEEE80211_TX_CTRL_AMSDU = BIT(3),
|
|
++ IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
|
|
+ };
|
|
+
|
|
+ /*
|
|
+--- a/net/mac80211/ieee80211_i.h
|
|
++++ b/net/mac80211/ieee80211_i.h
|
|
+@@ -814,11 +814,13 @@ enum txq_info_flags {
|
|
+ * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
|
|
+ * a fq_flow which is already owned by a different tin
|
|
+ * @def_cvars: codel vars for @def_flow
|
|
++ * @frags: used to keep fragments created after dequeue
|
|
+ */
|
|
+ struct txq_info {
|
|
+ struct fq_tin tin;
|
|
+ struct fq_flow def_flow;
|
|
+ struct codel_vars def_cvars;
|
|
++ struct sk_buff_head frags;
|
|
+ unsigned long flags;
|
|
+
|
|
+ /* keep last! */
|
|
diff --git a/package/kernel/mac80211/patches/345-mac80211-send-delBA-on-unexpected-BlockAck-Request.patch b/package/kernel/mac80211/patches/345-mac80211-send-delBA-on-unexpected-BlockAck-Request.patch
|
|
deleted file mode 100644
|
|
index c3d3118..0000000
|
|
--- a/package/kernel/mac80211/patches/345-mac80211-send-delBA-on-unexpected-BlockAck-Request.patch
|
|
+++ /dev/null
|
|
@@ -1,26 +0,0 @@
|
|
-From: Johannes Berg <johannes.berg@intel.com>
|
|
-Date: Mon, 29 Aug 2016 23:25:19 +0300
|
|
-Subject: [PATCH] mac80211: send delBA on unexpected BlockAck Request
|
|
-
|
|
-If we don't have a BA session, send delBA, as requested by the
|
|
-IEEE 802.11 spec. Apply the same limit of sending such a delBA
|
|
-only once as in the previous patch.
|
|
-
|
|
-Signed-off-by: Johannes Berg <johannes.berg@intel.com>
|
|
----
|
|
-
|
|
---- a/net/mac80211/rx.c
|
|
-+++ b/net/mac80211/rx.c
|
|
-@@ -2537,6 +2537,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_
|
|
-
|
|
- tid = le16_to_cpu(bar_data.control) >> 12;
|
|
-
|
|
-+ if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
|
|
-+ !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
|
|
-+ ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
|
|
-+ WLAN_BACK_RECIPIENT,
|
|
-+ WLAN_REASON_QSTA_REQUIRE_SETUP);
|
|
-+
|
|
- tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
|
|
- if (!tid_agg_rx)
|
|
- return RX_DROP_MONITOR;
|
|
diff --git a/package/kernel/mac80211/patches/346-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch b/package/kernel/mac80211/patches/346-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch
|
|
deleted file mode 100644
|
|
index aba1ff4..0000000
|
|
--- a/package/kernel/mac80211/patches/346-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch
|
|
+++ /dev/null
|
|
@@ -1,478 +0,0 @@
|
|
-From: Felix Fietkau <nbd@nbd.name>
|
|
-Date: Sun, 4 Sep 2016 17:46:24 +0200
|
|
-Subject: [PATCH] mac80211: fix sequence number assignment for PS response
|
|
- frames
|
|
-
|
|
-When using intermediate queues, sequence number allocation is deferred
|
|
-until dequeue. This doesn't work for PS response frames, which bypass
|
|
-those queues.
|
|
-
|
|
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
----
|
|
-
|
|
---- a/net/mac80211/tx.c
|
|
-+++ b/net/mac80211/tx.c
|
|
-@@ -38,6 +38,12 @@
|
|
- #include "wme.h"
|
|
- #include "rate.h"
|
|
-
|
|
-+static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx);
|
|
-+static bool ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
|
|
-+ struct sta_info *sta, u8 pn_offs,
|
|
-+ struct ieee80211_key_conf *key_conf,
|
|
-+ struct sk_buff *skb);
|
|
-+
|
|
- /* misc utils */
|
|
-
|
|
- static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
|
|
-@@ -849,8 +855,7 @@ ieee80211_tx_h_sequence(struct ieee80211
|
|
- tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
|
|
- tx->sta->tx_stats.msdu[tid]++;
|
|
-
|
|
-- if (!tx->sta->sta.txq[0])
|
|
-- hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
|
|
-+ hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
|
|
-
|
|
- return TX_CONTINUE;
|
|
- }
|
|
-@@ -1398,6 +1403,7 @@ void ieee80211_txq_init(struct ieee80211
|
|
- fq_tin_init(&txqi->tin);
|
|
- fq_flow_init(&txqi->def_flow);
|
|
- codel_vars_init(&txqi->def_cvars);
|
|
-+ __skb_queue_head_init(&txqi->frags);
|
|
-
|
|
- txqi->txq.vif = &sdata->vif;
|
|
-
|
|
-@@ -1420,6 +1426,7 @@ void ieee80211_txq_purge(struct ieee8021
|
|
- struct fq_tin *tin = &txqi->tin;
|
|
-
|
|
- fq_tin_reset(fq, tin, fq_skb_free_func);
|
|
-+ ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
|
|
- }
|
|
-
|
|
- int ieee80211_txq_setup_flows(struct ieee80211_local *local)
|
|
-@@ -1476,12 +1483,19 @@ struct sk_buff *ieee80211_tx_dequeue(str
|
|
- struct sk_buff *skb = NULL;
|
|
- struct fq *fq = &local->fq;
|
|
- struct fq_tin *tin = &txqi->tin;
|
|
-+ struct ieee80211_tx_info *info;
|
|
-
|
|
- spin_lock_bh(&fq->lock);
|
|
-
|
|
- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
|
|
- goto out;
|
|
-
|
|
-+ /* Make sure fragments stay together. */
|
|
-+ skb = __skb_dequeue(&txqi->frags);
|
|
-+ if (skb)
|
|
-+ goto out;
|
|
-+
|
|
-+begin:
|
|
- skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
|
|
- if (!skb)
|
|
- goto out;
|
|
-@@ -1489,16 +1503,38 @@ struct sk_buff *ieee80211_tx_dequeue(str
|
|
- ieee80211_set_skb_vif(skb, txqi);
|
|
-
|
|
- hdr = (struct ieee80211_hdr *)skb->data;
|
|
-- if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) {
|
|
-+ info = IEEE80211_SKB_CB(skb);
|
|
-+ if (txq->sta && info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
|
|
- struct sta_info *sta = container_of(txq->sta, struct sta_info,
|
|
- sta);
|
|
-- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
-+ u8 pn_offs = 0;
|
|
-
|
|
-- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, txq->tid);
|
|
-- if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
|
|
-- info->flags |= IEEE80211_TX_CTL_AMPDU;
|
|
-- else
|
|
-- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
|
|
-+ if (info->control.hw_key)
|
|
-+ pn_offs = ieee80211_padded_hdrlen(hw, hdr->frame_control);
|
|
-+
|
|
-+ ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
|
|
-+ info->control.hw_key, skb);
|
|
-+ } else {
|
|
-+ struct ieee80211_tx_data tx = { };
|
|
-+
|
|
-+ __skb_queue_head_init(&tx.skbs);
|
|
-+ tx.local = local;
|
|
-+ tx.skb = skb;
|
|
-+ tx.hdrlen = ieee80211_padded_hdrlen(hw, hdr->frame_control);
|
|
-+ if (txq->sta) {
|
|
-+ tx.sta = container_of(txq->sta, struct sta_info, sta);
|
|
-+ tx.sdata = tx.sta->sdata;
|
|
-+ } else {
|
|
-+ tx.sdata = vif_to_sdata(info->control.vif);
|
|
-+ }
|
|
-+
|
|
-+ if (invoke_tx_handlers_late(&tx))
|
|
-+ goto begin;
|
|
-+
|
|
-+ skb = __skb_dequeue(&tx.skbs);
|
|
-+
|
|
-+ if (!skb_queue_empty(&tx.skbs))
|
|
-+ skb_queue_splice_tail(&tx.skbs, &txqi->frags);
|
|
- }
|
|
-
|
|
- out:
|
|
-@@ -1512,6 +1548,47 @@ out:
|
|
- }
|
|
- EXPORT_SYMBOL(ieee80211_tx_dequeue);
|
|
-
|
|
-+static bool ieee80211_queue_skb(struct ieee80211_local *local,
|
|
-+ struct ieee80211_sub_if_data *sdata,
|
|
-+ struct sta_info *sta,
|
|
-+ struct sk_buff *skb)
|
|
-+{
|
|
-+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
-+ struct fq *fq = &local->fq;
|
|
-+ struct ieee80211_vif *vif;
|
|
-+ struct txq_info *txqi;
|
|
-+ struct ieee80211_sta *pubsta;
|
|
-+
|
|
-+ if (!local->ops->wake_tx_queue ||
|
|
-+ sdata->vif.type == NL80211_IFTYPE_MONITOR)
|
|
-+ return false;
|
|
-+
|
|
-+ if (sta && sta->uploaded)
|
|
-+ pubsta = &sta->sta;
|
|
-+ else
|
|
-+ pubsta = NULL;
|
|
-+
|
|
-+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
-+ sdata = container_of(sdata->bss,
|
|
-+ struct ieee80211_sub_if_data, u.ap);
|
|
-+
|
|
-+ vif = &sdata->vif;
|
|
-+ txqi = ieee80211_get_txq(local, vif, pubsta, skb);
|
|
-+
|
|
-+ if (!txqi)
|
|
-+ return false;
|
|
-+
|
|
-+ info->control.vif = vif;
|
|
-+
|
|
-+ spin_lock_bh(&fq->lock);
|
|
-+ ieee80211_txq_enqueue(local, txqi, skb);
|
|
-+ spin_unlock_bh(&fq->lock);
|
|
-+
|
|
-+ drv_wake_tx_queue(local, txqi);
|
|
-+
|
|
-+ return true;
|
|
-+}
|
|
-+
|
|
- static bool ieee80211_tx_frags(struct ieee80211_local *local,
|
|
- struct ieee80211_vif *vif,
|
|
- struct ieee80211_sta *sta,
|
|
-@@ -1519,9 +1596,7 @@ static bool ieee80211_tx_frags(struct ie
|
|
- bool txpending)
|
|
- {
|
|
- struct ieee80211_tx_control control = {};
|
|
-- struct fq *fq = &local->fq;
|
|
- struct sk_buff *skb, *tmp;
|
|
-- struct txq_info *txqi;
|
|
- unsigned long flags;
|
|
-
|
|
- skb_queue_walk_safe(skbs, skb, tmp) {
|
|
-@@ -1536,21 +1611,6 @@ static bool ieee80211_tx_frags(struct ie
|
|
- }
|
|
- #endif
|
|
-
|
|
-- txqi = ieee80211_get_txq(local, vif, sta, skb);
|
|
-- if (txqi) {
|
|
-- info->control.vif = vif;
|
|
--
|
|
-- __skb_unlink(skb, skbs);
|
|
--
|
|
-- spin_lock_bh(&fq->lock);
|
|
-- ieee80211_txq_enqueue(local, txqi, skb);
|
|
-- spin_unlock_bh(&fq->lock);
|
|
--
|
|
-- drv_wake_tx_queue(local, txqi);
|
|
--
|
|
-- continue;
|
|
-- }
|
|
--
|
|
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
|
|
- if (local->queue_stop_reasons[q] ||
|
|
- (!txpending && !skb_queue_empty(&local->pending[q]))) {
|
|
-@@ -1671,10 +1731,13 @@ static bool __ieee80211_tx(struct ieee80
|
|
- /*
|
|
- * Invoke TX handlers, return 0 on success and non-zero if the
|
|
- * frame was dropped or queued.
|
|
-+ *
|
|
-+ * The handlers are split into an early and late part. The latter is everything
|
|
-+ * that can be sensitive to reordering, and will be deferred to after packets
|
|
-+ * are dequeued from the intermediate queues (when they are enabled).
|
|
- */
|
|
--static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
|
|
-+static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
|
|
- {
|
|
-- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
- ieee80211_tx_result res = TX_DROP;
|
|
-
|
|
- #define CALL_TXH(txh) \
|
|
-@@ -1688,16 +1751,42 @@ static int invoke_tx_handlers(struct iee
|
|
- CALL_TXH(ieee80211_tx_h_check_assoc);
|
|
- CALL_TXH(ieee80211_tx_h_ps_buf);
|
|
- CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
|
|
-- CALL_TXH(ieee80211_tx_h_select_key);
|
|
-+
|
|
- if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
|
|
- CALL_TXH(ieee80211_tx_h_rate_ctrl);
|
|
-
|
|
-+ txh_done:
|
|
-+ if (unlikely(res == TX_DROP)) {
|
|
-+ I802_DEBUG_INC(tx->local->tx_handlers_drop);
|
|
-+ if (tx->skb)
|
|
-+ ieee80211_free_txskb(&tx->local->hw, tx->skb);
|
|
-+ else
|
|
-+ ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
|
|
-+ return -1;
|
|
-+ } else if (unlikely(res == TX_QUEUED)) {
|
|
-+ I802_DEBUG_INC(tx->local->tx_handlers_queued);
|
|
-+ return -1;
|
|
-+ }
|
|
-+
|
|
-+ return 0;
|
|
-+}
|
|
-+
|
|
-+/*
|
|
-+ * Late handlers can be called while the sta lock is held. Handlers that can
|
|
-+ * cause packets to be generated will cause deadlock!
|
|
-+ */
|
|
-+static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
|
|
-+{
|
|
-+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
|
|
-+ ieee80211_tx_result res = TX_CONTINUE;
|
|
-+
|
|
- if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
|
|
- __skb_queue_tail(&tx->skbs, tx->skb);
|
|
- tx->skb = NULL;
|
|
- goto txh_done;
|
|
- }
|
|
-
|
|
-+ CALL_TXH(ieee80211_tx_h_select_key);
|
|
- CALL_TXH(ieee80211_tx_h_michael_mic_add);
|
|
- CALL_TXH(ieee80211_tx_h_sequence);
|
|
- CALL_TXH(ieee80211_tx_h_fragment);
|
|
-@@ -1724,6 +1813,15 @@ static int invoke_tx_handlers(struct iee
|
|
- return 0;
|
|
- }
|
|
-
|
|
-+static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
|
|
-+{
|
|
-+ int r = invoke_tx_handlers_early(tx);
|
|
-+ if (r)
|
|
-+ return r;
|
|
-+
|
|
-+ return invoke_tx_handlers_late(tx);
|
|
-+}
|
|
-+
|
|
- bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
|
|
- struct ieee80211_vif *vif, struct sk_buff *skb,
|
|
- int band, struct ieee80211_sta **sta)
|
|
-@@ -1798,7 +1896,13 @@ static bool ieee80211_tx(struct ieee8021
|
|
- info->hw_queue =
|
|
- sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
-
|
|
-- if (!invoke_tx_handlers(&tx))
|
|
-+ if (invoke_tx_handlers_early(&tx))
|
|
-+ return false;
|
|
-+
|
|
-+ if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
|
|
-+ return true;
|
|
-+
|
|
-+ if (!invoke_tx_handlers_late(&tx))
|
|
- result = __ieee80211_tx(local, &tx.skbs, led_len,
|
|
- tx.sta, txpending);
|
|
-
|
|
-@@ -3181,7 +3285,7 @@ out:
|
|
- }
|
|
-
|
|
- static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
|
-- struct net_device *dev, struct sta_info *sta,
|
|
-+ struct sta_info *sta,
|
|
- struct ieee80211_fast_tx *fast_tx,
|
|
- struct sk_buff *skb)
|
|
- {
|
|
-@@ -3192,9 +3296,9 @@ static bool ieee80211_xmit_fast(struct i
|
|
- struct ethhdr eth;
|
|
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
- struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
|
|
-- struct ieee80211_tx_data tx;
|
|
-- ieee80211_tx_result r;
|
|
- struct tid_ampdu_tx *tid_tx = NULL;
|
|
-+ ieee80211_tx_result r;
|
|
-+ struct ieee80211_tx_data tx;
|
|
- u8 tid = IEEE80211_NUM_TIDS;
|
|
-
|
|
- /* control port protocol needs a lot of special handling */
|
|
-@@ -3232,8 +3336,6 @@ static bool ieee80211_xmit_fast(struct i
|
|
- return true;
|
|
- }
|
|
-
|
|
-- ieee80211_tx_stats(dev, skb->len + extra_head);
|
|
--
|
|
- if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
|
|
- ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
|
|
- return true;
|
|
-@@ -3262,24 +3364,7 @@ static bool ieee80211_xmit_fast(struct i
|
|
- info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
|
|
- IEEE80211_TX_CTL_DONTFRAG |
|
|
- (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
|
|
--
|
|
-- if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
|
|
-- *ieee80211_get_qos_ctl(hdr) = tid;
|
|
-- if (!sta->sta.txq[0])
|
|
-- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
|
|
-- } else {
|
|
-- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
|
|
-- hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
|
|
-- sdata->sequence_number += 0x10;
|
|
-- }
|
|
--
|
|
-- if (skb_shinfo(skb)->gso_size)
|
|
-- sta->tx_stats.msdu[tid] +=
|
|
-- DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
|
|
-- else
|
|
-- sta->tx_stats.msdu[tid]++;
|
|
--
|
|
-- info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
-+ info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
|
|
-
|
|
- __skb_queue_head_init(&tx.skbs);
|
|
-
|
|
-@@ -3305,22 +3390,71 @@ static bool ieee80211_xmit_fast(struct i
|
|
- }
|
|
- }
|
|
-
|
|
-+ if (ieee80211_queue_skb(local, sdata, sta, skb))
|
|
-+ return true;
|
|
-+
|
|
-+ ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
|
|
-+ &fast_tx->key->conf, skb);
|
|
-+
|
|
-+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
-+ sdata = container_of(sdata->bss,
|
|
-+ struct ieee80211_sub_if_data, u.ap);
|
|
-+
|
|
-+ __skb_queue_tail(&tx.skbs, skb);
|
|
-+ ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
|
|
-+
|
|
-+ return true;
|
|
-+}
|
|
-+
|
|
-+/*
|
|
-+ * Can be called while the sta lock is held. Anything that can cause packets to
|
|
-+ * be generated will cause deadlock!
|
|
-+ */
|
|
-+static bool ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
|
|
-+ struct sta_info *sta, u8 pn_offs,
|
|
-+ struct ieee80211_key_conf *key_conf,
|
|
-+ struct sk_buff *skb)
|
|
-+{
|
|
-+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
-+ struct ieee80211_hdr *hdr = (void *)skb->data;
|
|
-+ u8 tid = IEEE80211_NUM_TIDS;
|
|
-+
|
|
-+ ieee80211_tx_stats(skb->dev, skb->len);
|
|
-+
|
|
-+ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
|
|
-+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
|
|
-+ *ieee80211_get_qos_ctl(hdr) = tid;
|
|
-+ hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
|
|
-+ } else {
|
|
-+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
|
|
-+ hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
|
|
-+ sdata->sequence_number += 0x10;
|
|
-+ }
|
|
-+
|
|
-+ if (skb_shinfo(skb)->gso_size)
|
|
-+ sta->tx_stats.msdu[tid] +=
|
|
-+ DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
|
|
-+ else
|
|
-+ sta->tx_stats.msdu[tid]++;
|
|
-+
|
|
-+ info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
|
|
-+
|
|
- /* statistics normally done by ieee80211_tx_h_stats (but that
|
|
- * has to consider fragmentation, so is more complex)
|
|
- */
|
|
- sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
|
|
- sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
|
|
-
|
|
-- if (fast_tx->pn_offs) {
|
|
-+ if (pn_offs && (key_conf->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
|
|
- u64 pn;
|
|
-- u8 *crypto_hdr = skb->data + fast_tx->pn_offs;
|
|
-+ u8 *crypto_hdr = skb->data + pn_offs;
|
|
-
|
|
-- switch (fast_tx->key->conf.cipher) {
|
|
-+ switch (key_conf->cipher) {
|
|
- case WLAN_CIPHER_SUITE_CCMP:
|
|
- case WLAN_CIPHER_SUITE_CCMP_256:
|
|
- case WLAN_CIPHER_SUITE_GCMP:
|
|
- case WLAN_CIPHER_SUITE_GCMP_256:
|
|
-- pn = atomic64_inc_return(&fast_tx->key->conf.tx_pn);
|
|
-+ pn = atomic64_inc_return(&key_conf->tx_pn);
|
|
- crypto_hdr[0] = pn;
|
|
- crypto_hdr[1] = pn >> 8;
|
|
- crypto_hdr[4] = pn >> 16;
|
|
-@@ -3331,12 +3465,6 @@ static bool ieee80211_xmit_fast(struct i
|
|
- }
|
|
- }
|
|
-
|
|
-- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
|
-- sdata = container_of(sdata->bss,
|
|
-- struct ieee80211_sub_if_data, u.ap);
|
|
--
|
|
-- __skb_queue_tail(&tx.skbs, skb);
|
|
-- ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
|
|
- return true;
|
|
- }
|
|
-
|
|
-@@ -3364,7 +3492,7 @@ void __ieee80211_subif_start_xmit(struct
|
|
- fast_tx = rcu_dereference(sta->fast_tx);
|
|
-
|
|
- if (fast_tx &&
|
|
-- ieee80211_xmit_fast(sdata, dev, sta, fast_tx, skb))
|
|
-+ ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
|
|
- goto out;
|
|
- }
|
|
-
|
|
---- a/include/net/mac80211.h
|
|
-+++ b/include/net/mac80211.h
|
|
-@@ -715,6 +715,7 @@ enum mac80211_tx_info_flags {
|
|
- * frame (PS-Poll or uAPSD).
|
|
- * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
|
|
- * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
|
|
-+ * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
|
|
- *
|
|
- * These flags are used in tx_info->control.flags.
|
|
- */
|
|
-@@ -723,6 +724,7 @@ enum mac80211_tx_control_flags {
|
|
- IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
|
|
- IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
|
|
- IEEE80211_TX_CTRL_AMSDU = BIT(3),
|
|
-+ IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
|
|
- };
|
|
-
|
|
- /*
|
|
---- a/net/mac80211/ieee80211_i.h
|
|
-+++ b/net/mac80211/ieee80211_i.h
|
|
-@@ -814,11 +814,13 @@ enum txq_info_flags {
|
|
- * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
|
|
- * a fq_flow which is already owned by a different tin
|
|
- * @def_cvars: codel vars for @def_flow
|
|
-+ * @frags: used to keep fragments created after dequeue
|
|
- */
|
|
- struct txq_info {
|
|
- struct fq_tin tin;
|
|
- struct fq_flow def_flow;
|
|
- struct codel_vars def_cvars;
|
|
-+ struct sk_buff_head frags;
|
|
- unsigned long flags;
|
|
-
|
|
- /* keep last! */
|