8aa65f1c2c
Also add some patches from OpenWrt trunk and use the CT ath10k firmware.
33502 lines
1.0 MiB
33502 lines
1.0 MiB
From: Matthias Schiffer <mschiffer@universe-factory.net>
|
|
Date: Tue, 10 Mar 2015 12:40:53 +0100
|
|
Subject: mac80211: update ath10k to compat-wireless-2015-03-05
|
|
|
|
Taken from http://openwrt.reigndropsfall.net/
|
|
|
|
diff --git a/package/kernel/mac80211/patches/917-mac80211-rx-reordering.patch b/package/kernel/mac80211/patches/917-mac80211-rx-reordering.patch
|
|
new file mode 100644
|
|
index 0000000..1d0c559
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/917-mac80211-rx-reordering.patch
|
|
@@ -0,0 +1,271 @@
|
|
+commit 08cf42e843f9a7e253502011c81677f61f7e5c42
|
|
+Author: Michal Kazior <michal.kazior@tieto.com>
|
|
+Date: Wed Jul 16 12:12:15 2014 +0200
|
|
+
|
|
+ mac80211: add support for Rx reordering offloading
|
|
+
|
|
+ Some drivers may be performing most of Tx/Rx
|
|
+ aggregation on their own (e.g. in firmware)
|
|
+ including AddBa/DelBa negotiations but may
|
|
+ otherwise require Rx reordering assistance.
|
|
+
|
|
+ The patch exports 2 new functions for establishing
|
|
+ Rx aggregation sessions in assumption device
|
|
+ driver has taken care of the necessary
|
|
+ negotiations.
|
|
+
|
|
+ Signed-off-by: Michal Kazior <michal.kazior@tieto.com>
|
|
+ [fix endian bug]
|
|
+ Signed-off-by: Johannes Berg <johannes.berg@intel.com>
|
|
+
|
|
+--- a/include/net/mac80211.h
|
|
++++ b/include/net/mac80211.h
|
|
+@@ -4481,6 +4481,40 @@ void ieee80211_stop_rx_ba_session(struct
|
|
+ */
|
|
+ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
|
|
+
|
|
++/**
|
|
++ * ieee80211_start_rx_ba_session_offl - start a Rx BA session
|
|
++ *
|
|
++ * Some device drivers may offload part of the Rx aggregation flow including
|
|
++ * AddBa/DelBa negotiation but may otherwise be incapable of full Rx
|
|
++ * reordering.
|
|
++ *
|
|
++ * Create structures responsible for reordering so device drivers may call here
|
|
++ * when they complete AddBa negotiation.
|
|
++ *
|
|
++ * @vif: &struct ieee80211_vif pointer from the add_interface callback
|
|
++ * @addr: station mac address
|
|
++ * @tid: the rx tid
|
|
++ */
|
|
++void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
|
|
++ const u8 *addr, u16 tid);
|
|
++
|
|
++/**
|
|
++ * ieee80211_stop_rx_ba_session_offl - stop a Rx BA session
|
|
++ *
|
|
++ * Some device drivers may offload part of the Rx aggregation flow including
|
|
++ * AddBa/DelBa negotiation but may otherwise be incapable of full Rx
|
|
++ * reordering.
|
|
++ *
|
|
++ * Destroy structures responsible for reordering so device drivers may call here
|
|
++ * when they complete DelBa negotiation.
|
|
++ *
|
|
++ * @vif: &struct ieee80211_vif pointer from the add_interface callback
|
|
++ * @addr: station mac address
|
|
++ * @tid: the rx tid
|
|
++ */
|
|
++void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
|
|
++ const u8 *addr, u16 tid);
|
|
++
|
|
+ /* Rate control API */
|
|
+
|
|
+ /**
|
|
+--- a/net/mac80211/agg-rx.c
|
|
++++ b/net/mac80211/agg-rx.c
|
|
+@@ -224,28 +224,15 @@ static void ieee80211_send_addba_resp(st
|
|
+ ieee80211_tx_skb(sdata, skb);
|
|
+ }
|
|
+
|
|
+-void ieee80211_process_addba_request(struct ieee80211_local *local,
|
|
+- struct sta_info *sta,
|
|
+- struct ieee80211_mgmt *mgmt,
|
|
+- size_t len)
|
|
++void __ieee80211_start_rx_ba_session(struct sta_info *sta,
|
|
++ u8 dialog_token, u16 timeout,
|
|
++ u16 start_seq_num, u16 ba_policy, u16 tid,
|
|
++ u16 buf_size, bool tx)
|
|
+ {
|
|
++ struct ieee80211_local *local = sta->sdata->local;
|
|
+ struct tid_ampdu_rx *tid_agg_rx;
|
|
+- u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
|
|
+- u8 dialog_token;
|
|
+ int ret = -EOPNOTSUPP;
|
|
+-
|
|
+- /* extract session parameters from addba request frame */
|
|
+- dialog_token = mgmt->u.action.u.addba_req.dialog_token;
|
|
+- timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
|
|
+- start_seq_num =
|
|
+- le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
|
|
+-
|
|
+- capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
|
|
+- ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
|
|
+- tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
|
|
+- buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
|
|
+-
|
|
+- status = WLAN_STATUS_REQUEST_DECLINED;
|
|
++ u16 status = WLAN_STATUS_REQUEST_DECLINED;
|
|
+
|
|
+ if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
|
|
+ ht_dbg(sta->sdata,
|
|
+@@ -264,7 +251,7 @@ void ieee80211_process_addba_request(str
|
|
+ status = WLAN_STATUS_INVALID_QOS_PARAM;
|
|
+ ht_dbg_ratelimited(sta->sdata,
|
|
+ "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
|
|
+- mgmt->sa, tid, ba_policy, buf_size);
|
|
++ sta->sta.addr, tid, ba_policy, buf_size);
|
|
+ goto end_no_lock;
|
|
+ }
|
|
+ /* determine default buffer size */
|
|
+@@ -281,7 +268,7 @@ void ieee80211_process_addba_request(str
|
|
+ if (sta->ampdu_mlme.tid_rx[tid]) {
|
|
+ ht_dbg_ratelimited(sta->sdata,
|
|
+ "unexpected AddBA Req from %pM on tid %u\n",
|
|
+- mgmt->sa, tid);
|
|
++ sta->sta.addr, tid);
|
|
+
|
|
+ /* delete existing Rx BA session on the same tid */
|
|
+ ___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
|
|
+@@ -350,6 +337,74 @@ end:
|
|
+ mutex_unlock(&sta->ampdu_mlme.mtx);
|
|
+
|
|
+ end_no_lock:
|
|
+- ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
|
|
+- dialog_token, status, 1, buf_size, timeout);
|
|
++ if (tx)
|
|
++ ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
|
|
++ dialog_token, status, 1, buf_size,
|
|
++ timeout);
|
|
++}
|
|
++
|
|
++void ieee80211_process_addba_request(struct ieee80211_local *local,
|
|
++ struct sta_info *sta,
|
|
++ struct ieee80211_mgmt *mgmt,
|
|
++ size_t len)
|
|
++{
|
|
++ u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num;
|
|
++ u8 dialog_token;
|
|
++
|
|
++ /* extract session parameters from addba request frame */
|
|
++ dialog_token = mgmt->u.action.u.addba_req.dialog_token;
|
|
++ timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
|
|
++ start_seq_num =
|
|
++ le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
|
|
++
|
|
++ capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
|
|
++ ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
|
|
++ tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
|
|
++ buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
|
|
++
|
|
++ __ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
|
|
++ start_seq_num, ba_policy, tid,
|
|
++ buf_size, true);
|
|
++}
|
|
++
|
|
++void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
|
|
++ const u8 *addr, u16 tid)
|
|
++{
|
|
++ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
++ struct ieee80211_local *local = sdata->local;
|
|
++ struct ieee80211_rx_agg *rx_agg;
|
|
++ struct sk_buff *skb = dev_alloc_skb(0);
|
|
++
|
|
++ if (unlikely(!skb))
|
|
++ return;
|
|
++
|
|
++ rx_agg = (struct ieee80211_rx_agg *) &skb->cb;
|
|
++ memcpy(&rx_agg->addr, addr, ETH_ALEN);
|
|
++ rx_agg->tid = tid;
|
|
++
|
|
++ skb->pkt_type = IEEE80211_SDATA_QUEUE_RX_AGG_START;
|
|
++ skb_queue_tail(&sdata->skb_queue, skb);
|
|
++ ieee80211_queue_work(&local->hw, &sdata->work);
|
|
++}
|
|
++EXPORT_SYMBOL(ieee80211_start_rx_ba_session_offl);
|
|
++
|
|
++void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
|
|
++ const u8 *addr, u16 tid)
|
|
++{
|
|
++ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
|
|
++ struct ieee80211_local *local = sdata->local;
|
|
++ struct ieee80211_rx_agg *rx_agg;
|
|
++ struct sk_buff *skb = dev_alloc_skb(0);
|
|
++
|
|
++ if (unlikely(!skb))
|
|
++ return;
|
|
++
|
|
++ rx_agg = (struct ieee80211_rx_agg *) &skb->cb;
|
|
++ memcpy(&rx_agg->addr, addr, ETH_ALEN);
|
|
++ rx_agg->tid = tid;
|
|
++
|
|
++ skb->pkt_type = IEEE80211_SDATA_QUEUE_RX_AGG_STOP;
|
|
++ skb_queue_tail(&sdata->skb_queue, skb);
|
|
++ ieee80211_queue_work(&local->hw, &sdata->work);
|
|
+ }
|
|
++EXPORT_SYMBOL(ieee80211_stop_rx_ba_session_offl);
|
|
+--- a/net/mac80211/ieee80211_i.h
|
|
++++ b/net/mac80211/ieee80211_i.h
|
|
+@@ -902,10 +902,17 @@ ieee80211_vif_get_shift(struct ieee80211
|
|
+ return shift;
|
|
+ }
|
|
+
|
|
++struct ieee80211_rx_agg {
|
|
++ u8 addr[ETH_ALEN];
|
|
++ u16 tid;
|
|
++};
|
|
++
|
|
+ enum sdata_queue_type {
|
|
+ IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
|
|
+ IEEE80211_SDATA_QUEUE_AGG_START = 1,
|
|
+ IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
|
|
++ IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
|
|
++ IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
|
|
+ };
|
|
+
|
|
+ enum {
|
|
+@@ -1554,6 +1561,10 @@ void ___ieee80211_stop_rx_ba_session(str
|
|
+ u16 initiator, u16 reason, bool stop);
|
|
+ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
|
|
+ u16 initiator, u16 reason, bool stop);
|
|
++void __ieee80211_start_rx_ba_session(struct sta_info *sta,
|
|
++ u8 dialog_token, u16 timeout,
|
|
++ u16 start_seq_num, u16 ba_policy, u16 tid,
|
|
++ u16 buf_size, bool tx);
|
|
+ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
|
|
+ enum ieee80211_agg_stop_reason reason);
|
|
+ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
|
|
+--- a/net/mac80211/iface.c
|
|
++++ b/net/mac80211/iface.c
|
|
+@@ -1154,6 +1154,7 @@ static void ieee80211_iface_work(struct
|
|
+ struct sk_buff *skb;
|
|
+ struct sta_info *sta;
|
|
+ struct ieee80211_ra_tid *ra_tid;
|
|
++ struct ieee80211_rx_agg *rx_agg;
|
|
+
|
|
+ if (!ieee80211_sdata_running(sdata))
|
|
+ return;
|
|
+@@ -1181,6 +1182,34 @@ static void ieee80211_iface_work(struct
|
|
+ ra_tid = (void *)&skb->cb;
|
|
+ ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
|
|
+ ra_tid->tid);
|
|
++ } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
|
|
++ rx_agg = (void *)&skb->cb;
|
|
++ mutex_lock(&local->sta_mtx);
|
|
++ sta = sta_info_get_bss(sdata, rx_agg->addr);
|
|
++ if (sta) {
|
|
++ u16 last_seq;
|
|
++
|
|
++ last_seq = le16_to_cpu(
|
|
++ sta->last_seq_ctrl[rx_agg->tid]);
|
|
++
|
|
++ __ieee80211_start_rx_ba_session(sta,
|
|
++ 0, 0,
|
|
++ ieee80211_sn_inc(last_seq),
|
|
++ 1, rx_agg->tid,
|
|
++ IEEE80211_MAX_AMPDU_BUF,
|
|
++ false);
|
|
++ }
|
|
++ mutex_unlock(&local->sta_mtx);
|
|
++ } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_STOP) {
|
|
++ rx_agg = (void *)&skb->cb;
|
|
++ mutex_lock(&local->sta_mtx);
|
|
++ sta = sta_info_get_bss(sdata, rx_agg->addr);
|
|
++ if (sta)
|
|
++ __ieee80211_stop_rx_ba_session(sta,
|
|
++ rx_agg->tid,
|
|
++ WLAN_BACK_RECIPIENT, 0,
|
|
++ false);
|
|
++ mutex_unlock(&local->sta_mtx);
|
|
+ } else if (ieee80211_is_action(mgmt->frame_control) &&
|
|
+ mgmt->u.action.category == WLAN_CATEGORY_BACK) {
|
|
+ int len = skb->len;
|
|
diff --git a/package/kernel/mac80211/patches/918-ath-spectral-debugfs.patch b/package/kernel/mac80211/patches/918-ath-spectral-debugfs.patch
|
|
new file mode 100644
|
|
index 0000000..d0c1bbd
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/918-ath-spectral-debugfs.patch
|
|
@@ -0,0 +1,192 @@
|
|
+--- a/drivers/net/wireless/ath/ath9k/spectral.h
|
|
++++ b/drivers/net/wireless/ath/ath9k/spectral.h
|
|
+@@ -17,6 +17,8 @@
|
|
+ #ifndef SPECTRAL_H
|
|
+ #define SPECTRAL_H
|
|
+
|
|
++#include "../spectral_common.h"
|
|
++
|
|
+ /* enum spectral_mode:
|
|
+ *
|
|
+ * @SPECTRAL_DISABLED: spectral mode is disabled
|
|
+@@ -54,8 +56,6 @@ struct ath_ht20_mag_info {
|
|
+ u8 max_exp;
|
|
+ } __packed;
|
|
+
|
|
+-#define SPECTRAL_HT20_NUM_BINS 56
|
|
+-
|
|
+ /* WARNING: don't actually use this struct! MAC may vary the amount of
|
|
+ * data by -1/+2. This struct is for reference only.
|
|
+ */
|
|
+@@ -83,8 +83,6 @@ struct ath_ht20_40_mag_info {
|
|
+ u8 max_exp;
|
|
+ } __packed;
|
|
+
|
|
+-#define SPECTRAL_HT20_40_NUM_BINS 128
|
|
+-
|
|
+ /* WARNING: don't actually use this struct! MAC may vary the amount of
|
|
+ * data. This struct is for reference only.
|
|
+ */
|
|
+@@ -125,71 +123,6 @@ static inline u8 spectral_bitmap_weight(
|
|
+ return bins[0] & 0x3f;
|
|
+ }
|
|
+
|
|
+-/* FFT sample format given to userspace via debugfs.
|
|
+- *
|
|
+- * Please keep the type/length at the front position and change
|
|
+- * other fields after adding another sample type
|
|
+- *
|
|
+- * TODO: this might need rework when switching to nl80211-based
|
|
+- * interface.
|
|
+- */
|
|
+-enum ath_fft_sample_type {
|
|
+- ATH_FFT_SAMPLE_HT20 = 1,
|
|
+- ATH_FFT_SAMPLE_HT20_40,
|
|
+-};
|
|
+-
|
|
+-struct fft_sample_tlv {
|
|
+- u8 type; /* see ath_fft_sample */
|
|
+- __be16 length;
|
|
+- /* type dependent data follows */
|
|
+-} __packed;
|
|
+-
|
|
+-struct fft_sample_ht20 {
|
|
+- struct fft_sample_tlv tlv;
|
|
+-
|
|
+- u8 max_exp;
|
|
+-
|
|
+- __be16 freq;
|
|
+- s8 rssi;
|
|
+- s8 noise;
|
|
+-
|
|
+- __be16 max_magnitude;
|
|
+- u8 max_index;
|
|
+- u8 bitmap_weight;
|
|
+-
|
|
+- __be64 tsf;
|
|
+-
|
|
+- u8 data[SPECTRAL_HT20_NUM_BINS];
|
|
+-} __packed;
|
|
+-
|
|
+-struct fft_sample_ht20_40 {
|
|
+- struct fft_sample_tlv tlv;
|
|
+-
|
|
+- u8 channel_type;
|
|
+- __be16 freq;
|
|
+-
|
|
+- s8 lower_rssi;
|
|
+- s8 upper_rssi;
|
|
+-
|
|
+- __be64 tsf;
|
|
+-
|
|
+- s8 lower_noise;
|
|
+- s8 upper_noise;
|
|
+-
|
|
+- __be16 lower_max_magnitude;
|
|
+- __be16 upper_max_magnitude;
|
|
+-
|
|
+- u8 lower_max_index;
|
|
+- u8 upper_max_index;
|
|
+-
|
|
+- u8 lower_bitmap_weight;
|
|
+- u8 upper_bitmap_weight;
|
|
+-
|
|
+- u8 max_exp;
|
|
+-
|
|
+- u8 data[SPECTRAL_HT20_40_NUM_BINS];
|
|
+-} __packed;
|
|
+-
|
|
+ void ath9k_spectral_init_debug(struct ath_softc *sc);
|
|
+ void ath9k_spectral_deinit_debug(struct ath_softc *sc);
|
|
+
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/spectral_common.h
|
|
+@@ -0,0 +1,88 @@
|
|
++/*
|
|
++ * Copyright (c) 2013 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++
|
|
++#ifndef SPECTRAL_COMMON_H
|
|
++#define SPECTRAL_COMMON_H
|
|
++
|
|
++#define SPECTRAL_HT20_NUM_BINS 56
|
|
++#define SPECTRAL_HT20_40_NUM_BINS 128
|
|
++
|
|
++/* FFT sample format given to userspace via debugfs.
|
|
++ *
|
|
++ * Please keep the type/length at the front position and change
|
|
++ * other fields after adding another sample type
|
|
++ *
|
|
++ * TODO: this might need rework when switching to nl80211-based
|
|
++ * interface.
|
|
++ */
|
|
++enum ath_fft_sample_type {
|
|
++ ATH_FFT_SAMPLE_HT20 = 1,
|
|
++ ATH_FFT_SAMPLE_HT20_40,
|
|
++};
|
|
++
|
|
++struct fft_sample_tlv {
|
|
++ u8 type; /* see ath_fft_sample */
|
|
++ __be16 length;
|
|
++ /* type dependent data follows */
|
|
++} __packed;
|
|
++
|
|
++struct fft_sample_ht20 {
|
|
++ struct fft_sample_tlv tlv;
|
|
++
|
|
++ u8 max_exp;
|
|
++
|
|
++ __be16 freq;
|
|
++ s8 rssi;
|
|
++ s8 noise;
|
|
++
|
|
++ __be16 max_magnitude;
|
|
++ u8 max_index;
|
|
++ u8 bitmap_weight;
|
|
++
|
|
++ __be64 tsf;
|
|
++
|
|
++ u8 data[SPECTRAL_HT20_NUM_BINS];
|
|
++} __packed;
|
|
++
|
|
++struct fft_sample_ht20_40 {
|
|
++ struct fft_sample_tlv tlv;
|
|
++
|
|
++ u8 channel_type;
|
|
++ __be16 freq;
|
|
++
|
|
++ s8 lower_rssi;
|
|
++ s8 upper_rssi;
|
|
++
|
|
++ __be64 tsf;
|
|
++
|
|
++ s8 lower_noise;
|
|
++ s8 upper_noise;
|
|
++
|
|
++ __be16 lower_max_magnitude;
|
|
++ __be16 upper_max_magnitude;
|
|
++
|
|
++ u8 lower_max_index;
|
|
++ u8 upper_max_index;
|
|
++
|
|
++ u8 lower_bitmap_weight;
|
|
++ u8 upper_bitmap_weight;
|
|
++
|
|
++ u8 max_exp;
|
|
++
|
|
++ u8 data[SPECTRAL_HT20_40_NUM_BINS];
|
|
++} __packed;
|
|
++
|
|
++#endif /* SPECTRAL_COMMON_H */
|
|
diff --git a/package/kernel/mac80211/patches/919-update-ath10k.patch b/package/kernel/mac80211/patches/919-update-ath10k.patch
|
|
new file mode 100644
|
|
index 0000000..0f5d87a
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/919-update-ath10k.patch
|
|
@@ -0,0 +1,32869 @@
|
|
+--- a/drivers/net/wireless/ath/ath10k/Kconfig
|
|
++++ b/drivers/net/wireless/ath/ath10k/Kconfig
|
|
+@@ -26,13 +26,15 @@ config ATH10K_DEBUG
|
|
+
|
|
+ config ATH10K_DEBUGFS
|
|
+ bool "Atheros ath10k debugfs support"
|
|
+- depends on ATH10K
|
|
++ depends on ATH10K && DEBUG_FS
|
|
++ depends on RELAY
|
|
+ ---help---
|
|
+ Enabled debugfs support
|
|
+
|
|
+ If unsure, say Y to make it easier to debug problems.
|
|
+
|
|
+ config ATH10K_TRACING
|
|
++ depends on !KERNEL_3_4
|
|
+ bool "Atheros ath10k tracing support"
|
|
+ depends on ATH10K
|
|
+ depends on EVENT_TRACING
|
|
+--- a/drivers/net/wireless/ath/ath10k/Makefile
|
|
++++ b/drivers/net/wireless/ath/ath10k/Makefile
|
|
+@@ -8,9 +8,15 @@ ath10k_core-y += mac.o \
|
|
+ htt_tx.o \
|
|
+ txrx.o \
|
|
+ wmi.o \
|
|
+- bmi.o
|
|
++ wmi-tlv.o \
|
|
++ bmi.o \
|
|
++ hw.o
|
|
+
|
|
++ath10k_core-$(CPTCFG_ATH10K_DEBUGFS) += spectral.o
|
|
++ath10k_core-$(CPTCFG_NL80211_TESTMODE) += testmode.o
|
|
+ ath10k_core-$(CPTCFG_ATH10K_TRACING) += trace.o
|
|
++ath10k_core-$(CONFIG_THERMAL) += thermal.o
|
|
++ath10k_core-$(CPTCFG_MAC80211_DEBUGFS) += debugfs_sta.o
|
|
+
|
|
+ obj-$(CPTCFG_ATH10K_PCI) += ath10k_pci.o
|
|
+ ath10k_pci-y += pci.o \
|
|
+--- a/drivers/net/wireless/ath/ath10k/bmi.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/bmi.c
|
|
+@@ -22,7 +22,7 @@
|
|
+
|
|
+ void ath10k_bmi_start(struct ath10k *ar)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
|
|
+
|
|
+ ar->bmi.done_sent = false;
|
|
+ }
|
|
+@@ -33,10 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
|
|
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
|
|
+ int ret;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
|
|
+
|
|
+ if (ar->bmi.done_sent) {
|
|
+- ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+@@ -45,7 +45,7 @@ int ath10k_bmi_done(struct ath10k *ar)
|
|
+
|
|
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
|
|
+ if (ret) {
|
|
+- ath10k_warn("unable to write to the device: %d\n", ret);
|
|
++ ath10k_warn(ar, "unable to write to the device: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+@@ -61,10 +61,10 @@ int ath10k_bmi_get_target_info(struct at
|
|
+ u32 resplen = sizeof(resp.get_target_info);
|
|
+ int ret;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
|
|
+
|
|
+ if (ar->bmi.done_sent) {
|
|
+- ath10k_warn("BMI Get Target Info Command disallowed\n");
|
|
++ ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+@@ -72,12 +72,12 @@ int ath10k_bmi_get_target_info(struct at
|
|
+
|
|
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
|
|
+ if (ret) {
|
|
+- ath10k_warn("unable to get target info from device\n");
|
|
++ ath10k_warn(ar, "unable to get target info from device\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (resplen < sizeof(resp.get_target_info)) {
|
|
+- ath10k_warn("invalid get_target_info response length (%d)\n",
|
|
++ ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
|
|
+ resplen);
|
|
+ return -EIO;
|
|
+ }
|
|
+@@ -97,11 +97,11 @@ int ath10k_bmi_read_memory(struct ath10k
|
|
+ u32 rxlen;
|
|
+ int ret;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
|
|
+ address, length);
|
|
+
|
|
+ if (ar->bmi.done_sent) {
|
|
+- ath10k_warn("command disallowed\n");
|
|
++ ath10k_warn(ar, "command disallowed\n");
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+@@ -115,7 +115,7 @@ int ath10k_bmi_read_memory(struct ath10k
|
|
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
|
|
+ &resp, &rxlen);
|
|
+ if (ret) {
|
|
+- ath10k_warn("unable to read from the device (%d)\n",
|
|
++ ath10k_warn(ar, "unable to read from the device (%d)\n",
|
|
+ ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -137,11 +137,11 @@ int ath10k_bmi_write_memory(struct ath10
|
|
+ u32 txlen;
|
|
+ int ret;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
|
|
+ address, length);
|
|
+
|
|
+ if (ar->bmi.done_sent) {
|
|
+- ath10k_warn("command disallowed\n");
|
|
++ ath10k_warn(ar, "command disallowed\n");
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+@@ -159,7 +159,7 @@ int ath10k_bmi_write_memory(struct ath10
|
|
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
|
|
+ NULL, NULL);
|
|
+ if (ret) {
|
|
+- ath10k_warn("unable to write to the device (%d)\n",
|
|
++ ath10k_warn(ar, "unable to write to the device (%d)\n",
|
|
+ ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -183,11 +183,11 @@ int ath10k_bmi_execute(struct ath10k *ar
|
|
+ u32 resplen = sizeof(resp.execute);
|
|
+ int ret;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
|
|
+ address, param);
|
|
+
|
|
+ if (ar->bmi.done_sent) {
|
|
+- ath10k_warn("command disallowed\n");
|
|
++ ath10k_warn(ar, "command disallowed\n");
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+@@ -197,19 +197,19 @@ int ath10k_bmi_execute(struct ath10k *ar
|
|
+
|
|
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
|
|
+ if (ret) {
|
|
+- ath10k_warn("unable to read from the device\n");
|
|
++ ath10k_warn(ar, "unable to read from the device\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (resplen < sizeof(resp.execute)) {
|
|
+- ath10k_warn("invalid execute response length (%d)\n",
|
|
++ ath10k_warn(ar, "invalid execute response length (%d)\n",
|
|
+ resplen);
|
|
+ return -EIO;
|
|
+ }
|
|
+
|
|
+ *result = __le32_to_cpu(resp.execute.result);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+@@ -221,11 +221,11 @@ int ath10k_bmi_lz_data(struct ath10k *ar
|
|
+ u32 txlen;
|
|
+ int ret;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
|
|
+ buffer, length);
|
|
+
|
|
+ if (ar->bmi.done_sent) {
|
|
+- ath10k_warn("command disallowed\n");
|
|
++ ath10k_warn(ar, "command disallowed\n");
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+@@ -241,7 +241,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar
|
|
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
|
|
+ NULL, NULL);
|
|
+ if (ret) {
|
|
+- ath10k_warn("unable to write to the device\n");
|
|
++ ath10k_warn(ar, "unable to write to the device\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+@@ -258,11 +258,11 @@ int ath10k_bmi_lz_stream_start(struct at
|
|
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
|
|
+ int ret;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
|
|
+ address);
|
|
+
|
|
+ if (ar->bmi.done_sent) {
|
|
+- ath10k_warn("command disallowed\n");
|
|
++ ath10k_warn(ar, "command disallowed\n");
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+@@ -271,7 +271,7 @@ int ath10k_bmi_lz_stream_start(struct at
|
|
+
|
|
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
|
|
+ if (ret) {
|
|
+- ath10k_warn("unable to Start LZ Stream to the device\n");
|
|
++ ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+@@ -286,7 +286,7 @@ int ath10k_bmi_fast_download(struct ath1
|
|
+ u32 trailer_len = length - head_len;
|
|
+ int ret;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BMI,
|
|
+ "bmi fast download address 0x%x buffer 0x%p length %d\n",
|
|
+ address, buffer, length);
|
|
+
|
|
+--- a/drivers/net/wireless/ath/ath10k/bmi.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/bmi.h
|
|
+@@ -177,7 +177,6 @@ struct bmi_target_info {
|
|
+ u32 type;
|
|
+ };
|
|
+
|
|
+-
|
|
+ /* in msec */
|
|
+ #define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
|
|
+
|
|
+@@ -201,7 +200,8 @@ int ath10k_bmi_write_memory(struct ath10
|
|
+ \
|
|
+ addr = host_interest_item_address(HI_ITEM(item)); \
|
|
+ ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
|
|
+- *val = __le32_to_cpu(tmp); \
|
|
++ if (!ret) \
|
|
++ *val = __le32_to_cpu(tmp); \
|
|
+ ret; \
|
|
+ })
|
|
+
|
|
+--- a/drivers/net/wireless/ath/ath10k/ce.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/ce.c
|
|
+@@ -260,7 +260,6 @@ static inline void ath10k_ce_engine_int_
|
|
+ ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
|
|
+ }
|
|
+
|
|
+-
|
|
+ /*
|
|
+ * Guts of ath10k_ce_send, used by both ath10k_ce_send and
|
|
+ * ath10k_ce_sendlist_send.
|
|
+@@ -284,13 +283,9 @@ int ath10k_ce_send_nolock(struct ath10k_
|
|
+ int ret = 0;
|
|
+
|
|
+ if (nbytes > ce_state->src_sz_max)
|
|
+- ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
|
|
++ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
|
|
+ __func__, nbytes, ce_state->src_sz_max);
|
|
+
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret)
|
|
+- return ret;
|
|
+-
|
|
+ if (unlikely(CE_RING_DELTA(nentries_mask,
|
|
+ write_index, sw_index - 1) <= 0)) {
|
|
+ ret = -ENOSR;
|
|
+@@ -325,10 +320,36 @@ int ath10k_ce_send_nolock(struct ath10k_
|
|
+
|
|
+ src_ring->write_index = write_index;
|
|
+ exit:
|
|
+- ath10k_pci_sleep(ar);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
++void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
|
|
++{
|
|
++ struct ath10k *ar = pipe->ar;
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ struct ath10k_ce_ring *src_ring = pipe->src_ring;
|
|
++ u32 ctrl_addr = pipe->ctrl_addr;
|
|
++
|
|
++ lockdep_assert_held(&ar_pci->ce_lock);
|
|
++
|
|
++ /*
|
|
++ * This function must be called only if there is an incomplete
|
|
++ * scatter-gather transfer (before index register is updated)
|
|
++ * that needs to be cleaned up.
|
|
++ */
|
|
++ if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
|
|
++ return;
|
|
++
|
|
++ if (WARN_ON_ONCE(src_ring->write_index ==
|
|
++ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
|
|
++ return;
|
|
++
|
|
++ src_ring->write_index--;
|
|
++ src_ring->write_index &= src_ring->nentries_mask;
|
|
++
|
|
++ src_ring->per_transfer_context[src_ring->write_index] = NULL;
|
|
++}
|
|
++
|
|
+ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
|
|
+ void *per_transfer_context,
|
|
+ u32 buffer,
|
|
+@@ -363,49 +384,56 @@ int ath10k_ce_num_free_src_entries(struc
|
|
+ return delta;
|
|
+ }
|
|
+
|
|
+-int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
|
|
+- void *per_recv_context,
|
|
+- u32 buffer)
|
|
++int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
|
|
+ {
|
|
+- struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
|
|
+- u32 ctrl_addr = ce_state->ctrl_addr;
|
|
+- struct ath10k *ar = ce_state->ar;
|
|
++ struct ath10k *ar = pipe->ar;
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
|
|
+ unsigned int nentries_mask = dest_ring->nentries_mask;
|
|
+- unsigned int write_index;
|
|
+- unsigned int sw_index;
|
|
+- int ret;
|
|
++ unsigned int write_index = dest_ring->write_index;
|
|
++ unsigned int sw_index = dest_ring->sw_index;
|
|
+
|
|
+- spin_lock_bh(&ar_pci->ce_lock);
|
|
+- write_index = dest_ring->write_index;
|
|
+- sw_index = dest_ring->sw_index;
|
|
++ lockdep_assert_held(&ar_pci->ce_lock);
|
|
+
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret)
|
|
+- goto out;
|
|
++ return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
|
|
++}
|
|
+
|
|
+- if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
|
|
+- struct ce_desc *base = dest_ring->base_addr_owner_space;
|
|
+- struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
|
|
++int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
|
|
++{
|
|
++ struct ath10k *ar = pipe->ar;
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
|
|
++ unsigned int nentries_mask = dest_ring->nentries_mask;
|
|
++ unsigned int write_index = dest_ring->write_index;
|
|
++ unsigned int sw_index = dest_ring->sw_index;
|
|
++ struct ce_desc *base = dest_ring->base_addr_owner_space;
|
|
++ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
|
|
++ u32 ctrl_addr = pipe->ctrl_addr;
|
|
+
|
|
+- /* Update destination descriptor */
|
|
+- desc->addr = __cpu_to_le32(buffer);
|
|
+- desc->nbytes = 0;
|
|
++ lockdep_assert_held(&ar_pci->ce_lock);
|
|
+
|
|
+- dest_ring->per_transfer_context[write_index] =
|
|
+- per_recv_context;
|
|
++ if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
|
|
++ return -EIO;
|
|
+
|
|
+- /* Update Destination Ring Write Index */
|
|
+- write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
|
|
+- ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
|
|
+- dest_ring->write_index = write_index;
|
|
+- ret = 0;
|
|
+- } else {
|
|
+- ret = -EIO;
|
|
+- }
|
|
+- ath10k_pci_sleep(ar);
|
|
++ desc->addr = __cpu_to_le32(paddr);
|
|
++ desc->nbytes = 0;
|
|
++
|
|
++ dest_ring->per_transfer_context[write_index] = ctx;
|
|
++ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
|
|
++ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
|
|
++ dest_ring->write_index = write_index;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
+
|
|
+-out:
|
|
++int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
|
|
++{
|
|
++ struct ath10k *ar = pipe->ar;
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ int ret;
|
|
++
|
|
++ spin_lock_bh(&ar_pci->ce_lock);
|
|
++ ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
|
|
+ spin_unlock_bh(&ar_pci->ce_lock);
|
|
+
|
|
+ return ret;
|
|
+@@ -415,12 +443,12 @@ out:
|
|
+ * Guts of ath10k_ce_completed_recv_next.
|
|
+ * The caller takes responsibility for any necessary locking.
|
|
+ */
|
|
+-static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
|
|
+- void **per_transfer_contextp,
|
|
+- u32 *bufferp,
|
|
+- unsigned int *nbytesp,
|
|
+- unsigned int *transfer_idp,
|
|
+- unsigned int *flagsp)
|
|
++int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
|
|
++ void **per_transfer_contextp,
|
|
++ u32 *bufferp,
|
|
++ unsigned int *nbytesp,
|
|
++ unsigned int *transfer_idp,
|
|
++ unsigned int *flagsp)
|
|
+ {
|
|
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
|
|
+ unsigned int nentries_mask = dest_ring->nentries_mask;
|
|
+@@ -530,6 +558,7 @@ int ath10k_ce_revoke_recv_next(struct at
|
|
+
|
|
+ /* sanity */
|
|
+ dest_ring->per_transfer_context[sw_index] = NULL;
|
|
++ desc->nbytes = 0;
|
|
+
|
|
+ /* Update sw_index */
|
|
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
|
|
+@@ -548,11 +577,11 @@ int ath10k_ce_revoke_recv_next(struct at
|
|
+ * Guts of ath10k_ce_completed_send_next.
|
|
+ * The caller takes responsibility for any necessary locking.
|
|
+ */
|
|
+-static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
|
|
+- void **per_transfer_contextp,
|
|
+- u32 *bufferp,
|
|
+- unsigned int *nbytesp,
|
|
+- unsigned int *transfer_idp)
|
|
++int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
|
|
++ void **per_transfer_contextp,
|
|
++ u32 *bufferp,
|
|
++ unsigned int *nbytesp,
|
|
++ unsigned int *transfer_idp)
|
|
+ {
|
|
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
|
|
+ u32 ctrl_addr = ce_state->ctrl_addr;
|
|
+@@ -561,7 +590,6 @@ static int ath10k_ce_completed_send_next
|
|
+ unsigned int sw_index = src_ring->sw_index;
|
|
+ struct ce_desc *sdesc, *sbase;
|
|
+ unsigned int read_index;
|
|
+- int ret;
|
|
+
|
|
+ if (src_ring->hw_index == sw_index) {
|
|
+ /*
|
|
+@@ -572,20 +600,17 @@ static int ath10k_ce_completed_send_next
|
|
+ * value of the HW index has become stale.
|
|
+ */
|
|
+
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret)
|
|
+- return ret;
|
|
+-
|
|
+- src_ring->hw_index =
|
|
+- ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
|
|
+- src_ring->hw_index &= nentries_mask;
|
|
++ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
|
|
++ if (read_index == 0xffffffff)
|
|
++ return -ENODEV;
|
|
+
|
|
+- ath10k_pci_sleep(ar);
|
|
++ read_index &= nentries_mask;
|
|
++ src_ring->hw_index = read_index;
|
|
+ }
|
|
+
|
|
+ read_index = src_ring->hw_index;
|
|
+
|
|
+- if ((read_index == sw_index) || (read_index == 0xffffffff))
|
|
++ if (read_index == sw_index)
|
|
+ return -EIO;
|
|
+
|
|
+ sbase = src_ring->shadow_base;
|
|
+@@ -701,11 +726,6 @@ void ath10k_ce_per_engine_service(struct
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
|
|
+ u32 ctrl_addr = ce_state->ctrl_addr;
|
|
+- int ret;
|
|
+-
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret)
|
|
+- return;
|
|
+
|
|
+ spin_lock_bh(&ar_pci->ce_lock);
|
|
+
|
|
+@@ -730,7 +750,6 @@ void ath10k_ce_per_engine_service(struct
|
|
+ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
|
|
+
|
|
+ spin_unlock_bh(&ar_pci->ce_lock);
|
|
+- ath10k_pci_sleep(ar);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+@@ -741,13 +760,9 @@ void ath10k_ce_per_engine_service(struct
|
|
+
|
|
+ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
|
|
+ {
|
|
+- int ce_id, ret;
|
|
++ int ce_id;
|
|
+ u32 intr_summary;
|
|
+
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret)
|
|
+- return;
|
|
+-
|
|
+ intr_summary = CE_INTERRUPT_SUMMARY(ar);
|
|
+
|
|
+ for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
|
|
+@@ -759,8 +774,6 @@ void ath10k_ce_per_engine_service_any(st
|
|
+
|
|
+ ath10k_ce_per_engine_service(ar, ce_id);
|
|
+ }
|
|
+-
|
|
+- ath10k_pci_sleep(ar);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+@@ -770,16 +783,11 @@ void ath10k_ce_per_engine_service_any(st
|
|
+ *
|
|
+ * Called with ce_lock held.
|
|
+ */
|
|
+-static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
|
|
+- int disable_copy_compl_intr)
|
|
++static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
|
|
+ {
|
|
+ u32 ctrl_addr = ce_state->ctrl_addr;
|
|
+ struct ath10k *ar = ce_state->ar;
|
|
+- int ret;
|
|
+-
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret)
|
|
+- return;
|
|
++ bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
|
|
+
|
|
+ if ((!disable_copy_compl_intr) &&
|
|
+ (ce_state->send_cb || ce_state->recv_cb))
|
|
+@@ -788,54 +796,33 @@ static void ath10k_ce_per_engine_handler
|
|
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
|
|
+
|
|
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
|
|
+-
|
|
+- ath10k_pci_sleep(ar);
|
|
+ }
|
|
+
|
|
+ int ath10k_ce_disable_interrupts(struct ath10k *ar)
|
|
+ {
|
|
+- int ce_id, ret;
|
|
+-
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret)
|
|
+- return ret;
|
|
++ int ce_id;
|
|
+
|
|
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
|
|
+- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
|
|
++ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
|
+
|
|
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
|
|
+ ath10k_ce_error_intr_disable(ar, ctrl_addr);
|
|
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
|
|
+ }
|
|
+
|
|
+- ath10k_pci_sleep(ar);
|
|
+-
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
|
|
+- void (*send_cb)(struct ath10k_ce_pipe *),
|
|
+- int disable_interrupts)
|
|
++void ath10k_ce_enable_interrupts(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k *ar = ce_state->ar;
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ int ce_id;
|
|
+
|
|
+- spin_lock_bh(&ar_pci->ce_lock);
|
|
+- ce_state->send_cb = send_cb;
|
|
+- ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
|
|
+- spin_unlock_bh(&ar_pci->ce_lock);
|
|
+-}
|
|
+-
|
|
+-void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
|
|
+- void (*recv_cb)(struct ath10k_ce_pipe *))
|
|
+-{
|
|
+- struct ath10k *ar = ce_state->ar;
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+-
|
|
+- spin_lock_bh(&ar_pci->ce_lock);
|
|
+- ce_state->recv_cb = recv_cb;
|
|
+- ath10k_ce_per_engine_handler_adjust(ce_state, 0);
|
|
+- spin_unlock_bh(&ar_pci->ce_lock);
|
|
++ /* Skip the last copy engine, CE7 the diagnostic window, as that
|
|
++ * uses polling and isn't initialized for interrupts.
|
|
++ */
|
|
++ for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
|
|
++ ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
|
|
+ }
|
|
+
|
|
+ static int ath10k_ce_init_src_ring(struct ath10k *ar,
|
|
+@@ -845,12 +832,12 @@ static int ath10k_ce_init_src_ring(struc
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
|
|
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
|
|
+- u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
|
|
++ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
|
+
|
|
+ nentries = roundup_pow_of_two(attr->src_nentries);
|
|
+
|
|
+- memset(src_ring->per_transfer_context, 0,
|
|
+- nentries * sizeof(*src_ring->per_transfer_context));
|
|
++ memset(src_ring->base_addr_owner_space, 0,
|
|
++ nentries * sizeof(struct ce_desc));
|
|
+
|
|
+ src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
|
|
+ src_ring->sw_index &= src_ring->nentries_mask;
|
|
+@@ -868,7 +855,7 @@ static int ath10k_ce_init_src_ring(struc
|
|
+ ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
|
|
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "boot init ce src ring id %d entries %d base_addr %p\n",
|
|
+ ce_id, nentries, src_ring->base_addr_owner_space);
|
|
+
|
|
+@@ -882,12 +869,12 @@ static int ath10k_ce_init_dest_ring(stru
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
|
|
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
|
|
+- u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
|
|
++ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
|
+
|
|
+ nentries = roundup_pow_of_two(attr->dest_nentries);
|
|
+
|
|
+- memset(dest_ring->per_transfer_context, 0,
|
|
+- nentries * sizeof(*dest_ring->per_transfer_context));
|
|
++ memset(dest_ring->base_addr_owner_space, 0,
|
|
++ nentries * sizeof(struct ce_desc));
|
|
+
|
|
+ dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
|
|
+ dest_ring->sw_index &= dest_ring->nentries_mask;
|
|
+@@ -902,7 +889,7 @@ static int ath10k_ce_init_dest_ring(stru
|
|
+ ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
|
|
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "boot ce dest ring id %d entries %d base_addr %p\n",
|
|
+ ce_id, nentries, dest_ring->base_addr_owner_space);
|
|
+
|
|
+@@ -1039,59 +1026,32 @@ ath10k_ce_alloc_dest_ring(struct ath10k
|
|
+ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
|
|
+ const struct ce_attr *attr)
|
|
+ {
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
|
|
+ int ret;
|
|
+
|
|
+- /*
|
|
+- * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
|
|
+- * additional TX locking checks.
|
|
+- *
|
|
+- * For the lack of a better place do the check here.
|
|
+- */
|
|
+- BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
|
|
+- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
|
|
+- BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
|
|
+- (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
|
|
+-
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret)
|
|
+- return ret;
|
|
+-
|
|
+- spin_lock_bh(&ar_pci->ce_lock);
|
|
+- ce_state->ar = ar;
|
|
+- ce_state->id = ce_id;
|
|
+- ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
|
|
+- ce_state->attr_flags = attr->flags;
|
|
+- ce_state->src_sz_max = attr->src_sz_max;
|
|
+- spin_unlock_bh(&ar_pci->ce_lock);
|
|
+-
|
|
+ if (attr->src_nentries) {
|
|
+ ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
|
|
+ if (ret) {
|
|
+- ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
|
|
++ ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
|
|
+ ce_id, ret);
|
|
+- goto out;
|
|
++ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (attr->dest_nentries) {
|
|
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
|
|
+ if (ret) {
|
|
+- ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
|
|
++ ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
|
|
+ ce_id, ret);
|
|
+- goto out;
|
|
++ return ret;
|
|
+ }
|
|
+ }
|
|
+
|
|
+-out:
|
|
+- ath10k_pci_sleep(ar);
|
|
+- return ret;
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
+ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
|
|
+ {
|
|
+- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
|
|
++ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
|
+
|
|
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
|
|
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
|
|
+@@ -1101,7 +1061,7 @@ static void ath10k_ce_deinit_src_ring(st
|
|
+
|
|
+ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
|
|
+ {
|
|
+- u32 ctrl_addr = ath10k_ce_base_address(ce_id);
|
|
++ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
|
+
|
|
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
|
|
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
|
|
+@@ -1110,30 +1070,49 @@ static void ath10k_ce_deinit_dest_ring(s
|
|
+
|
|
+ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
|
|
+ {
|
|
+- int ret;
|
|
+-
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret)
|
|
+- return;
|
|
+-
|
|
+ ath10k_ce_deinit_src_ring(ar, ce_id);
|
|
+ ath10k_ce_deinit_dest_ring(ar, ce_id);
|
|
+-
|
|
+- ath10k_pci_sleep(ar);
|
|
+ }
|
|
+
|
|
+ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
|
|
+- const struct ce_attr *attr)
|
|
++ const struct ce_attr *attr,
|
|
++ void (*send_cb)(struct ath10k_ce_pipe *),
|
|
++ void (*recv_cb)(struct ath10k_ce_pipe *))
|
|
+ {
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+ struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
|
|
+ int ret;
|
|
+
|
|
++ /*
|
|
++ * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
|
|
++ * additional TX locking checks.
|
|
++ *
|
|
++ * For the lack of a better place do the check here.
|
|
++ */
|
|
++ BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
|
|
++ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
|
|
++ BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
|
|
++ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
|
|
++ BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
|
|
++ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
|
|
++
|
|
++ ce_state->ar = ar;
|
|
++ ce_state->id = ce_id;
|
|
++ ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
|
|
++ ce_state->attr_flags = attr->flags;
|
|
++ ce_state->src_sz_max = attr->src_sz_max;
|
|
++
|
|
++ if (attr->src_nentries)
|
|
++ ce_state->send_cb = send_cb;
|
|
++
|
|
++ if (attr->dest_nentries)
|
|
++ ce_state->recv_cb = recv_cb;
|
|
++
|
|
+ if (attr->src_nentries) {
|
|
+ ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
|
|
+ if (IS_ERR(ce_state->src_ring)) {
|
|
+ ret = PTR_ERR(ce_state->src_ring);
|
|
+- ath10k_err("failed to allocate copy engine source ring %d: %d\n",
|
|
++ ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
|
|
+ ce_id, ret);
|
|
+ ce_state->src_ring = NULL;
|
|
+ return ret;
|
|
+@@ -1145,7 +1124,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *
|
|
+ attr);
|
|
+ if (IS_ERR(ce_state->dest_ring)) {
|
|
+ ret = PTR_ERR(ce_state->dest_ring);
|
|
+- ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
|
|
++ ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
|
|
+ ce_id, ret);
|
|
+ ce_state->dest_ring = NULL;
|
|
+ return ret;
|
|
+--- a/drivers/net/wireless/ath/ath10k/ce.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/ce.h
|
|
+@@ -20,7 +20,6 @@
|
|
+
|
|
+ #include "hif.h"
|
|
+
|
|
+-
|
|
+ /* Maximum number of Copy Engine's supported */
|
|
+ #define CE_COUNT_MAX 8
|
|
+ #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
|
|
+@@ -37,11 +36,10 @@
|
|
+
|
|
+ struct ath10k_ce_pipe;
|
|
+
|
|
+-
|
|
+ #define CE_DESC_FLAGS_GATHER (1 << 0)
|
|
+ #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
|
|
+ #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
|
|
+-#define CE_DESC_FLAGS_META_DATA_LSB 3
|
|
++#define CE_DESC_FLAGS_META_DATA_LSB 2
|
|
+
|
|
+ struct ce_desc {
|
|
+ __le32 addr;
|
|
+@@ -160,30 +158,15 @@ int ath10k_ce_send_nolock(struct ath10k_
|
|
+ unsigned int transfer_id,
|
|
+ unsigned int flags);
|
|
+
|
|
+-void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
|
|
+- void (*send_cb)(struct ath10k_ce_pipe *),
|
|
+- int disable_interrupts);
|
|
++void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
|
|
+
|
|
+ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
|
|
+
|
|
+ /*==================Recv=======================*/
|
|
+
|
|
+-/*
|
|
+- * Make a buffer available to receive. The buffer must be at least of a
|
|
+- * minimal size appropriate for this copy engine (src_sz_max attribute).
|
|
+- * ce - which copy engine to use
|
|
+- * per_transfer_recv_context - context passed back to caller's recv_cb
|
|
+- * buffer - address of buffer in CE space
|
|
+- * Returns 0 on success; otherwise an error status.
|
|
+- *
|
|
+- * Implemenation note: Pushes a buffer to Dest ring.
|
|
+- */
|
|
+-int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
|
|
+- void *per_transfer_recv_context,
|
|
+- u32 buffer);
|
|
+-
|
|
+-void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
|
|
+- void (*recv_cb)(struct ath10k_ce_pipe *));
|
|
++int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
|
|
++int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
|
|
++int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
|
|
+
|
|
+ /* recv flags */
|
|
+ /* Data is byte-swapped */
|
|
+@@ -204,10 +187,16 @@ int ath10k_ce_completed_recv_next(struct
|
|
+ * Pops 1 completed send buffer from Source ring.
|
|
+ */
|
|
+ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
|
|
+- void **per_transfer_contextp,
|
|
+- u32 *bufferp,
|
|
+- unsigned int *nbytesp,
|
|
+- unsigned int *transfer_idp);
|
|
++ void **per_transfer_contextp,
|
|
++ u32 *bufferp,
|
|
++ unsigned int *nbytesp,
|
|
++ unsigned int *transfer_idp);
|
|
++
|
|
++int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
|
|
++ void **per_transfer_contextp,
|
|
++ u32 *bufferp,
|
|
++ unsigned int *nbytesp,
|
|
++ unsigned int *transfer_idp);
|
|
+
|
|
+ /*==================CE Engine Initialization=======================*/
|
|
+
|
|
+@@ -215,7 +204,9 @@ int ath10k_ce_init_pipe(struct ath10k *a
|
|
+ const struct ce_attr *attr);
|
|
+ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
|
|
+ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
|
|
+- const struct ce_attr *attr);
|
|
++ const struct ce_attr *attr,
|
|
++ void (*send_cb)(struct ath10k_ce_pipe *),
|
|
++ void (*recv_cb)(struct ath10k_ce_pipe *));
|
|
+ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
|
|
+
|
|
+ /*==================CE Engine Shutdown=======================*/
|
|
+@@ -228,6 +219,13 @@ int ath10k_ce_revoke_recv_next(struct at
|
|
+ void **per_transfer_contextp,
|
|
+ u32 *bufferp);
|
|
+
|
|
++int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
|
|
++ void **per_transfer_contextp,
|
|
++ u32 *bufferp,
|
|
++ unsigned int *nbytesp,
|
|
++ unsigned int *transfer_idp,
|
|
++ unsigned int *flagsp);
|
|
++
|
|
+ /*
|
|
+ * Support clean shutdown by allowing the caller to cancel
|
|
+ * pending sends. Target DMA must be stopped before using
|
|
+@@ -243,6 +241,7 @@ int ath10k_ce_cancel_send_next(struct at
|
|
+ void ath10k_ce_per_engine_service_any(struct ath10k *ar);
|
|
+ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
|
|
+ int ath10k_ce_disable_interrupts(struct ath10k *ar);
|
|
++void ath10k_ce_enable_interrupts(struct ath10k *ar);
|
|
+
|
|
+ /* ce_attr.flags values */
|
|
+ /* Use NonSnooping PCIe accesses? */
|
|
+@@ -395,8 +394,7 @@ struct ce_attr {
|
|
+ #define DST_WATERMARK_HIGH_RESET 0
|
|
+ #define DST_WATERMARK_ADDRESS 0x0050
|
|
+
|
|
+-
|
|
+-static inline u32 ath10k_ce_base_address(unsigned int ce_id)
|
|
++static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
|
|
+ {
|
|
+ return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
|
|
+ }
|
|
+--- a/drivers/net/wireless/ath/ath10k/core.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/core.c
|
|
+@@ -17,6 +17,7 @@
|
|
+
|
|
+ #include <linux/module.h>
|
|
+ #include <linux/firmware.h>
|
|
++#include <linux/of.h>
|
|
+
|
|
+ #include "core.h"
|
|
+ #include "mac.h"
|
|
+@@ -26,68 +27,88 @@
|
|
+ #include "bmi.h"
|
|
+ #include "debug.h"
|
|
+ #include "htt.h"
|
|
++#include "testmode.h"
|
|
++#include "wmi-ops.h"
|
|
+
|
|
+ unsigned int ath10k_debug_mask;
|
|
+ static bool uart_print;
|
|
+-static unsigned int ath10k_p2p;
|
|
++static bool skip_otp;
|
|
++
|
|
+ module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
|
|
+ module_param(uart_print, bool, 0644);
|
|
+-module_param_named(p2p, ath10k_p2p, uint, 0644);
|
|
++module_param(skip_otp, bool, 0644);
|
|
++
|
|
+ MODULE_PARM_DESC(debug_mask, "Debugging mask");
|
|
+ MODULE_PARM_DESC(uart_print, "Uart target debugging");
|
|
+-MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
|
|
++MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
|
|
+
|
|
+ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
|
+ {
|
|
+ .id = QCA988X_HW_2_0_VERSION,
|
|
+ .name = "qca988x hw2.0",
|
|
+ .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
|
|
++ .uart_pin = 7,
|
|
+ .fw = {
|
|
+ .dir = QCA988X_HW_2_0_FW_DIR,
|
|
+ .fw = QCA988X_HW_2_0_FW_FILE,
|
|
+ .otp = QCA988X_HW_2_0_OTP_FILE,
|
|
+ .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
|
|
++ .board_size = QCA988X_BOARD_DATA_SZ,
|
|
++ .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
|
|
++ },
|
|
++ },
|
|
++ {
|
|
++ .id = QCA6174_HW_2_1_VERSION,
|
|
++ .name = "qca6174 hw2.1",
|
|
++ .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
|
|
++ .uart_pin = 6,
|
|
++ .fw = {
|
|
++ .dir = QCA6174_HW_2_1_FW_DIR,
|
|
++ .fw = QCA6174_HW_2_1_FW_FILE,
|
|
++ .otp = QCA6174_HW_2_1_OTP_FILE,
|
|
++ .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
|
|
++ .board_size = QCA6174_BOARD_DATA_SZ,
|
|
++ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
|
|
++ },
|
|
++ },
|
|
++ {
|
|
++ .id = QCA6174_HW_3_0_VERSION,
|
|
++ .name = "qca6174 hw3.0",
|
|
++ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
|
|
++ .uart_pin = 6,
|
|
++ .fw = {
|
|
++ .dir = QCA6174_HW_3_0_FW_DIR,
|
|
++ .fw = QCA6174_HW_3_0_FW_FILE,
|
|
++ .otp = QCA6174_HW_3_0_OTP_FILE,
|
|
++ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
|
|
++ .board_size = QCA6174_BOARD_DATA_SZ,
|
|
++ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
|
|
++ },
|
|
++ },
|
|
++ {
|
|
++ .id = QCA6174_HW_3_2_VERSION,
|
|
++ .name = "qca6174 hw3.2",
|
|
++ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
|
|
++ .uart_pin = 6,
|
|
++ .fw = {
|
|
++ /* uses same binaries as hw3.0 */
|
|
++ .dir = QCA6174_HW_3_0_FW_DIR,
|
|
++ .fw = QCA6174_HW_3_0_FW_FILE,
|
|
++ .otp = QCA6174_HW_3_0_OTP_FILE,
|
|
++ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
|
|
++ .board_size = QCA6174_BOARD_DATA_SZ,
|
|
++ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
|
|
+ },
|
|
+ },
|
|
+ };
|
|
+
|
|
+ static void ath10k_send_suspend_complete(struct ath10k *ar)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
|
|
+
|
|
+ complete(&ar->target_suspend);
|
|
+ }
|
|
+
|
|
+-static int ath10k_init_connect_htc(struct ath10k *ar)
|
|
+-{
|
|
+- int status;
|
|
+-
|
|
+- status = ath10k_wmi_connect_htc_service(ar);
|
|
+- if (status)
|
|
+- goto conn_fail;
|
|
+-
|
|
+- /* Start HTC */
|
|
+- status = ath10k_htc_start(&ar->htc);
|
|
+- if (status)
|
|
+- goto conn_fail;
|
|
+-
|
|
+- /* Wait for WMI event to be ready */
|
|
+- status = ath10k_wmi_wait_for_service_ready(ar);
|
|
+- if (status <= 0) {
|
|
+- ath10k_warn("wmi service ready event not received");
|
|
+- status = -ETIMEDOUT;
|
|
+- goto timeout;
|
|
+- }
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
|
|
+- return 0;
|
|
+-
|
|
+-timeout:
|
|
+- ath10k_htc_stop(&ar->htc);
|
|
+-conn_fail:
|
|
+- return status;
|
|
+-}
|
|
+-
|
|
+ static int ath10k_init_configure_target(struct ath10k *ar)
|
|
+ {
|
|
+ u32 param_host;
|
|
+@@ -97,14 +118,14 @@ static int ath10k_init_configure_target(
|
|
+ ret = ath10k_bmi_write32(ar, hi_app_host_interest,
|
|
+ HTC_PROTOCOL_VERSION);
|
|
+ if (ret) {
|
|
+- ath10k_err("settings HTC version failed\n");
|
|
++ ath10k_err(ar, "settings HTC version failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* set the firmware mode to STA/IBSS/AP */
|
|
+ ret = ath10k_bmi_read32(ar, hi_option_flag, ¶m_host);
|
|
+ if (ret) {
|
|
+- ath10k_err("setting firmware mode (1/2) failed\n");
|
|
++ ath10k_err(ar, "setting firmware mode (1/2) failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+@@ -123,14 +144,14 @@ static int ath10k_init_configure_target(
|
|
+
|
|
+ ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
|
|
+ if (ret) {
|
|
+- ath10k_err("setting firmware mode (2/2) failed\n");
|
|
++ ath10k_err(ar, "setting firmware mode (2/2) failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* We do all byte-swapping on the host */
|
|
+ ret = ath10k_bmi_write32(ar, hi_be, 0);
|
|
+ if (ret) {
|
|
+- ath10k_err("setting host CPU BE mode failed\n");
|
|
++ ath10k_err(ar, "setting host CPU BE mode failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+@@ -138,7 +159,7 @@ static int ath10k_init_configure_target(
|
|
+ ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
|
|
+
|
|
+ if (ret) {
|
|
+- ath10k_err("setting FW data/desc swap flags failed\n");
|
|
++ ath10k_err(ar, "setting FW data/desc swap flags failed\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+@@ -167,79 +188,83 @@ static const struct firmware *ath10k_fet
|
|
+ return fw;
|
|
+ }
|
|
+
|
|
+-static int ath10k_push_board_ext_data(struct ath10k *ar)
|
|
++static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
|
|
++ size_t data_len)
|
|
+ {
|
|
+- u32 board_data_size = QCA988X_BOARD_DATA_SZ;
|
|
+- u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
|
|
++ u32 board_data_size = ar->hw_params.fw.board_size;
|
|
++ u32 board_ext_data_size = ar->hw_params.fw.board_ext_size;
|
|
+ u32 board_ext_data_addr;
|
|
+ int ret;
|
|
+
|
|
+ ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not read board ext data addr (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not read board ext data addr (%d)\n",
|
|
++ ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "boot push board extended data addr 0x%x\n",
|
|
+ board_ext_data_addr);
|
|
+
|
|
+ if (board_ext_data_addr == 0)
|
|
+ return 0;
|
|
+
|
|
+- if (ar->board_len != (board_data_size + board_ext_data_size)) {
|
|
+- ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
|
|
+- ar->board_len, board_data_size, board_ext_data_size);
|
|
++ if (data_len != (board_data_size + board_ext_data_size)) {
|
|
++ ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
|
|
++ data_len, board_data_size, board_ext_data_size);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
|
|
+- ar->board_data + board_data_size,
|
|
++ data + board_data_size,
|
|
+ board_ext_data_size);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not write board ext data (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not write board ext data (%d)\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
|
|
+ (board_ext_data_size << 16) | 1);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not write board ext data bit (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not write board ext data bit (%d)\n",
|
|
++ ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static int ath10k_download_board_data(struct ath10k *ar)
|
|
++static int ath10k_download_board_data(struct ath10k *ar, const void *data,
|
|
++ size_t data_len)
|
|
+ {
|
|
+- u32 board_data_size = QCA988X_BOARD_DATA_SZ;
|
|
++ u32 board_data_size = ar->hw_params.fw.board_size;
|
|
+ u32 address;
|
|
+ int ret;
|
|
+
|
|
+- ret = ath10k_push_board_ext_data(ar);
|
|
++ ret = ath10k_push_board_ext_data(ar, data, data_len);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not push board ext data (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not push board ext data (%d)\n", ret);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_bmi_read32(ar, hi_board_data, &address);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not read board data addr (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not read board data addr (%d)\n", ret);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
|
|
++ ret = ath10k_bmi_write_memory(ar, address, data,
|
|
+ min_t(u32, board_data_size,
|
|
+- ar->board_len));
|
|
++ data_len));
|
|
+ if (ret) {
|
|
+- ath10k_err("could not write board data (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not write board data (%d)\n", ret);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not write board data bit (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not write board data bit (%d)\n", ret);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+@@ -247,73 +272,182 @@ exit:
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
++static int ath10k_download_cal_file(struct ath10k *ar)
|
|
++{
|
|
++ int ret;
|
|
++
|
|
++ if (!ar->cal_file)
|
|
++ return -ENOENT;
|
|
++
|
|
++ if (IS_ERR(ar->cal_file))
|
|
++ return PTR_ERR(ar->cal_file);
|
|
++
|
|
++ ret = ath10k_download_board_data(ar, ar->cal_file->data,
|
|
++ ar->cal_file->size);
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_download_cal_dt(struct ath10k *ar)
|
|
++{
|
|
++ struct device_node *node;
|
|
++ int data_len;
|
|
++ void *data;
|
|
++ int ret;
|
|
++
|
|
++ node = ar->dev->of_node;
|
|
++ if (!node)
|
|
++ /* Device Tree is optional, don't print any warnings if
|
|
++ * there's no node for ath10k.
|
|
++ */
|
|
++ return -ENOENT;
|
|
++
|
|
++ if (!of_get_property(node, "qcom,ath10k-calibration-data",
|
|
++ &data_len)) {
|
|
++ /* The calibration data node is optional */
|
|
++ return -ENOENT;
|
|
++ }
|
|
++
|
|
++ if (data_len != QCA988X_CAL_DATA_LEN) {
|
|
++ ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
|
|
++ data_len);
|
|
++ ret = -EMSGSIZE;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ data = kmalloc(data_len, GFP_KERNEL);
|
|
++ if (!data) {
|
|
++ ret = -ENOMEM;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
|
|
++ data, data_len);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
|
|
++ ret);
|
|
++ goto out_free;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_download_board_data(ar, data, data_len);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
|
|
++ ret);
|
|
++ goto out_free;
|
|
++ }
|
|
++
|
|
++ ret = 0;
|
|
++
|
|
++out_free:
|
|
++ kfree(data);
|
|
++
|
|
++out:
|
|
++ return ret;
|
|
++}
|
|
++
|
|
+ static int ath10k_download_and_run_otp(struct ath10k *ar)
|
|
+ {
|
|
+ u32 result, address = ar->hw_params.patch_load_addr;
|
|
+ int ret;
|
|
+
|
|
++ ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to download board data: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
+ /* OTP is optional */
|
|
+
|
|
+ if (!ar->otp_data || !ar->otp_len) {
|
|
+- ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
|
|
++ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
|
|
+ ar->otp_data, ar->otp_len);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
|
|
+ address, ar->otp_len);
|
|
+
|
|
+ ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not write otp (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not write otp (%d)\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_bmi_execute(ar, address, 0, &result);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not execute otp (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not execute otp (%d)\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
|
|
+
|
|
+- if (result != 0) {
|
|
+- ath10k_err("otp calibration failed: %d", result);
|
|
++ if (!skip_otp && result != 0) {
|
|
++ ath10k_err(ar, "otp calibration failed: %d", result);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static int ath10k_download_fw(struct ath10k *ar)
|
|
++static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
|
|
+ {
|
|
+- u32 address;
|
|
++ u32 address, data_len;
|
|
++ const char *mode_name;
|
|
++ const void *data;
|
|
+ int ret;
|
|
+
|
|
+ address = ar->hw_params.patch_load_addr;
|
|
+
|
|
+- ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
|
|
+- ar->firmware_len);
|
|
++ switch (mode) {
|
|
++ case ATH10K_FIRMWARE_MODE_NORMAL:
|
|
++ data = ar->firmware_data;
|
|
++ data_len = ar->firmware_len;
|
|
++ mode_name = "normal";
|
|
++ break;
|
|
++ case ATH10K_FIRMWARE_MODE_UTF:
|
|
++ data = ar->testmode.utf->data;
|
|
++ data_len = ar->testmode.utf->size;
|
|
++ mode_name = "utf";
|
|
++ break;
|
|
++ default:
|
|
++ ath10k_err(ar, "unknown firmware mode: %d\n", mode);
|
|
++ return -EINVAL;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
++ "boot uploading firmware image %p len %d mode %s\n",
|
|
++ data, data_len, mode_name);
|
|
++
|
|
++ ret = ath10k_bmi_fast_download(ar, address, data, data_len);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not write fw (%d)\n", ret);
|
|
+- goto exit;
|
|
++ ath10k_err(ar, "failed to download %s firmware: %d\n",
|
|
++ mode_name, ret);
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+-exit:
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ static void ath10k_core_free_firmware_files(struct ath10k *ar)
|
|
+ {
|
|
+- if (ar->board && !IS_ERR(ar->board))
|
|
++ if (!IS_ERR(ar->board))
|
|
+ release_firmware(ar->board);
|
|
+
|
|
+- if (ar->otp && !IS_ERR(ar->otp))
|
|
++ if (!IS_ERR(ar->otp))
|
|
+ release_firmware(ar->otp);
|
|
+
|
|
+- if (ar->firmware && !IS_ERR(ar->firmware))
|
|
++ if (!IS_ERR(ar->firmware))
|
|
+ release_firmware(ar->firmware);
|
|
+
|
|
++ if (!IS_ERR(ar->cal_file))
|
|
++ release_firmware(ar->cal_file);
|
|
++
|
|
+ ar->board = NULL;
|
|
+ ar->board_data = NULL;
|
|
+ ar->board_len = 0;
|
|
+@@ -325,6 +459,27 @@ static void ath10k_core_free_firmware_fi
|
|
+ ar->firmware = NULL;
|
|
+ ar->firmware_data = NULL;
|
|
+ ar->firmware_len = 0;
|
|
++
|
|
++ ar->cal_file = NULL;
|
|
++}
|
|
++
|
|
++static int ath10k_fetch_cal_file(struct ath10k *ar)
|
|
++{
|
|
++ char filename[100];
|
|
++
|
|
++ /* cal-<bus>-<id>.bin */
|
|
++ scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
|
|
++ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
|
|
++
|
|
++ ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
|
|
++ if (IS_ERR(ar->cal_file))
|
|
++ /* calibration file is optional, don't print any warnings */
|
|
++ return PTR_ERR(ar->cal_file);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
|
|
++ ATH10K_FW_DIR, filename);
|
|
++
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
+ static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
|
|
+@@ -332,12 +487,12 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ int ret = 0;
|
|
+
|
|
+ if (ar->hw_params.fw.fw == NULL) {
|
|
+- ath10k_err("firmware file not defined\n");
|
|
++ ath10k_err(ar, "firmware file not defined\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (ar->hw_params.fw.board == NULL) {
|
|
+- ath10k_err("board data file not defined");
|
|
++ ath10k_err(ar, "board data file not defined");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+@@ -346,7 +501,7 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ ar->hw_params.fw.board);
|
|
+ if (IS_ERR(ar->board)) {
|
|
+ ret = PTR_ERR(ar->board);
|
|
+- ath10k_err("could not fetch board data (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not fetch board data (%d)\n", ret);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+@@ -358,7 +513,7 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ ar->hw_params.fw.fw);
|
|
+ if (IS_ERR(ar->firmware)) {
|
|
+ ret = PTR_ERR(ar->firmware);
|
|
+- ath10k_err("could not fetch firmware (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+@@ -374,7 +529,7 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ ar->hw_params.fw.otp);
|
|
+ if (IS_ERR(ar->otp)) {
|
|
+ ret = PTR_ERR(ar->otp);
|
|
+- ath10k_err("could not fetch otp (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not fetch otp (%d)\n", ret);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+@@ -394,12 +549,12 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ int ie_id, i, index, bit, ret;
|
|
+ struct ath10k_fw_ie *hdr;
|
|
+ const u8 *data;
|
|
+- __le32 *timestamp;
|
|
++ __le32 *timestamp, *version;
|
|
+
|
|
+ /* first fetch the firmware file (firmware-*.bin) */
|
|
+ ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
|
|
+ if (IS_ERR(ar->firmware)) {
|
|
+- ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
|
|
++ ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
|
|
+ ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
|
|
+ return PTR_ERR(ar->firmware);
|
|
+ }
|
|
+@@ -411,14 +566,14 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
|
|
+
|
|
+ if (len < magic_len) {
|
|
+- ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
|
|
++ ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n",
|
|
+ ar->hw_params.fw.dir, name, len);
|
|
+ ret = -EINVAL;
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+ if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
|
|
+- ath10k_err("invalid firmware magic\n");
|
|
++ ath10k_err(ar, "invalid firmware magic\n");
|
|
+ ret = -EINVAL;
|
|
+ goto err;
|
|
+ }
|
|
+@@ -440,7 +595,7 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ data += sizeof(*hdr);
|
|
+
|
|
+ if (len < ie_len) {
|
|
+- ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
|
|
++ ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
|
|
+ ie_id, len, ie_len);
|
|
+ ret = -EINVAL;
|
|
+ goto err;
|
|
+@@ -454,7 +609,7 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ memcpy(ar->hw->wiphy->fw_version, data, ie_len);
|
|
+ ar->hw->wiphy->fw_version[ie_len] = '\0';
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "found fw version %s\n",
|
|
+ ar->hw->wiphy->fw_version);
|
|
+ break;
|
|
+@@ -464,11 +619,11 @@ static int ath10k_core_fetch_firmware_ap
|
|
+
|
|
+ timestamp = (__le32 *)data;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n",
|
|
+ le32_to_cpup(timestamp));
|
|
+ break;
|
|
+ case ATH10K_FW_IE_FEATURES:
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "found firmware features ie (%zd B)\n",
|
|
+ ie_len);
|
|
+
|
|
+@@ -480,19 +635,19 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ break;
|
|
+
|
|
+ if (data[index] & (1 << bit)) {
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "Enabling feature bit: %i\n",
|
|
+ i);
|
|
+ __set_bit(i, ar->fw_features);
|
|
+ }
|
|
+ }
|
|
+
|
|
+- ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
|
|
+ ar->fw_features,
|
|
+ sizeof(ar->fw_features));
|
|
+ break;
|
|
+ case ATH10K_FW_IE_FW_IMAGE:
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "found fw image ie (%zd B)\n",
|
|
+ ie_len);
|
|
+
|
|
+@@ -501,7 +656,7 @@ static int ath10k_core_fetch_firmware_ap
|
|
+
|
|
+ break;
|
|
+ case ATH10K_FW_IE_OTP_IMAGE:
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "found otp image ie (%zd B)\n",
|
|
+ ie_len);
|
|
+
|
|
+@@ -509,8 +664,19 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ ar->otp_len = ie_len;
|
|
+
|
|
+ break;
|
|
++ case ATH10K_FW_IE_WMI_OP_VERSION:
|
|
++ if (ie_len != sizeof(u32))
|
|
++ break;
|
|
++
|
|
++ version = (__le32 *)data;
|
|
++
|
|
++ ar->wmi.op_version = le32_to_cpup(version);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
|
|
++ ar->wmi.op_version);
|
|
++ break;
|
|
+ default:
|
|
+- ath10k_warn("Unknown FW IE: %u\n",
|
|
++ ath10k_warn(ar, "Unknown FW IE: %u\n",
|
|
+ le32_to_cpu(hdr->id));
|
|
+ break;
|
|
+ }
|
|
+@@ -523,7 +689,7 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ }
|
|
+
|
|
+ if (!ar->firmware_data || !ar->firmware_len) {
|
|
+- ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
|
|
++ ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
|
|
+ ar->hw_params.fw.dir, name);
|
|
+ ret = -ENOMEDIUM;
|
|
+ goto err;
|
|
+@@ -531,7 +697,7 @@ static int ath10k_core_fetch_firmware_ap
|
|
+
|
|
+ /* now fetch the board file */
|
|
+ if (ar->hw_params.fw.board == NULL) {
|
|
+- ath10k_err("board data file not defined");
|
|
++ ath10k_err(ar, "board data file not defined");
|
|
+ ret = -EINVAL;
|
|
+ goto err;
|
|
+ }
|
|
+@@ -541,7 +707,7 @@ static int ath10k_core_fetch_firmware_ap
|
|
+ ar->hw_params.fw.board);
|
|
+ if (IS_ERR(ar->board)) {
|
|
+ ret = PTR_ERR(ar->board);
|
|
+- ath10k_err("could not fetch board data '%s/%s' (%d)\n",
|
|
++ ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
|
|
+ ar->hw_params.fw.dir, ar->hw_params.fw.board,
|
|
+ ret);
|
|
+ goto err;
|
|
+@@ -561,49 +727,79 @@ static int ath10k_core_fetch_firmware_fi
|
|
+ {
|
|
+ int ret;
|
|
+
|
|
++ /* calibration file is optional, don't check for any errors */
|
|
++ ath10k_fetch_cal_file(ar);
|
|
++
|
|
++ ar->fw_api = 4;
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
|
|
++
|
|
++ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
|
|
++ if (ret == 0)
|
|
++ goto success;
|
|
++
|
|
++ ar->fw_api = 3;
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
|
|
++
|
|
++ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
|
|
++ if (ret == 0)
|
|
++ goto success;
|
|
++
|
|
+ ar->fw_api = 2;
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
|
|
+
|
|
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
|
|
+ if (ret == 0)
|
|
+ goto success;
|
|
+
|
|
+ ar->fw_api = 1;
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
|
|
+
|
|
+ ret = ath10k_core_fetch_firmware_api_1(ar);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ success:
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static int ath10k_init_download_firmware(struct ath10k *ar)
|
|
++static int ath10k_download_cal_data(struct ath10k *ar)
|
|
+ {
|
|
+ int ret;
|
|
+
|
|
+- ret = ath10k_download_board_data(ar);
|
|
+- if (ret) {
|
|
+- ath10k_err("failed to download board data: %d\n", ret);
|
|
+- return ret;
|
|
++ ret = ath10k_download_cal_file(ar);
|
|
++ if (ret == 0) {
|
|
++ ar->cal_mode = ATH10K_CAL_MODE_FILE;
|
|
++ goto done;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_download_and_run_otp(ar);
|
|
+- if (ret) {
|
|
+- ath10k_err("failed to run otp: %d\n", ret);
|
|
+- return ret;
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
++ "boot did not find a calibration file, try DT next: %d\n",
|
|
++ ret);
|
|
++
|
|
++ ret = ath10k_download_cal_dt(ar);
|
|
++ if (ret == 0) {
|
|
++ ar->cal_mode = ATH10K_CAL_MODE_DT;
|
|
++ goto done;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_download_fw(ar);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
++ "boot did not find DT entry, try OTP next: %d\n",
|
|
++ ret);
|
|
++
|
|
++ ret = ath10k_download_and_run_otp(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to download firmware: %d\n", ret);
|
|
++ ath10k_err(ar, "failed to run otp: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+- return ret;
|
|
++ ar->cal_mode = ATH10K_CAL_MODE_OTP;
|
|
++
|
|
++done:
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
|
|
++ ath10k_cal_mode_str(ar->cal_mode));
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
+ static int ath10k_init_uart(struct ath10k *ar)
|
|
+@@ -616,33 +812,33 @@ static int ath10k_init_uart(struct ath10
|
|
+ */
|
|
+ ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
|
|
+ if (ret) {
|
|
+- ath10k_warn("could not disable UART prints (%d)\n", ret);
|
|
++ ath10k_warn(ar, "could not disable UART prints (%d)\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (!uart_print)
|
|
+ return 0;
|
|
+
|
|
+- ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
|
|
++ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin);
|
|
+ if (ret) {
|
|
+- ath10k_warn("could not enable UART prints (%d)\n", ret);
|
|
++ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
|
|
+ if (ret) {
|
|
+- ath10k_warn("could not enable UART prints (%d)\n", ret);
|
|
++ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* Set the UART baud rate to 19200. */
|
|
+ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
|
|
+ if (ret) {
|
|
+- ath10k_warn("could not set the baud rate (%d)\n", ret);
|
|
++ ath10k_warn(ar, "could not set the baud rate (%d)\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+- ath10k_info("UART prints enabled\n");
|
|
++ ath10k_info(ar, "UART prints enabled\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+@@ -659,14 +855,14 @@ static int ath10k_init_hw_params(struct
|
|
+ }
|
|
+
|
|
+ if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
|
|
+- ath10k_err("Unsupported hardware version: 0x%x\n",
|
|
++ ath10k_err(ar, "Unsupported hardware version: 0x%x\n",
|
|
+ ar->target_version);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ ar->hw_params = *hw_params;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
|
|
+ ar->hw_params.name, ar->target_version);
|
|
+
|
|
+ return 0;
|
|
+@@ -676,101 +872,124 @@ static void ath10k_core_restart(struct w
|
|
+ {
|
|
+ struct ath10k *ar = container_of(work, struct ath10k, restart_work);
|
|
+
|
|
++ set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
|
|
++
|
|
++ /* Place a barrier to make sure the compiler doesn't reorder
|
|
++ * CRASH_FLUSH and calling other functions.
|
|
++ */
|
|
++ barrier();
|
|
++
|
|
++ ieee80211_stop_queues(ar->hw);
|
|
++ ath10k_drain_tx(ar);
|
|
++ complete_all(&ar->scan.started);
|
|
++ complete_all(&ar->scan.completed);
|
|
++ complete_all(&ar->scan.on_channel);
|
|
++ complete_all(&ar->offchan_tx_completed);
|
|
++ complete_all(&ar->install_key_done);
|
|
++ complete_all(&ar->vdev_setup_done);
|
|
++ complete_all(&ar->thermal.wmi_sync);
|
|
++ wake_up(&ar->htt.empty_tx_wq);
|
|
++ wake_up(&ar->wmi.tx_credits_wq);
|
|
++ wake_up(&ar->peer_mapping_wq);
|
|
++
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+ switch (ar->state) {
|
|
+ case ATH10K_STATE_ON:
|
|
+ ar->state = ATH10K_STATE_RESTARTING;
|
|
+- ath10k_halt(ar);
|
|
++ ath10k_hif_stop(ar);
|
|
++ ath10k_scan_finish(ar);
|
|
+ ieee80211_restart_hw(ar->hw);
|
|
+ break;
|
|
+ case ATH10K_STATE_OFF:
|
|
+ /* this can happen if driver is being unloaded
|
|
+ * or if the crash happens during FW probing */
|
|
+- ath10k_warn("cannot restart a device that hasn't been started\n");
|
|
++ ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
|
|
+ break;
|
|
+ case ATH10K_STATE_RESTARTING:
|
|
++ /* hw restart might be requested from multiple places */
|
|
++ break;
|
|
+ case ATH10K_STATE_RESTARTED:
|
|
+ ar->state = ATH10K_STATE_WEDGED;
|
|
+ /* fall through */
|
|
+ case ATH10K_STATE_WEDGED:
|
|
+- ath10k_warn("device is wedged, will not restart\n");
|
|
++ ath10k_warn(ar, "device is wedged, will not restart\n");
|
|
++ break;
|
|
++ case ATH10K_STATE_UTF:
|
|
++ ath10k_warn(ar, "firmware restart in UTF mode not supported\n");
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+ }
|
|
+
|
|
+-struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
|
|
+- const struct ath10k_hif_ops *hif_ops)
|
|
++static int ath10k_core_init_firmware_features(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k *ar;
|
|
+-
|
|
+- ar = ath10k_mac_create();
|
|
+- if (!ar)
|
|
+- return NULL;
|
|
+-
|
|
+- ar->ath_common.priv = ar;
|
|
+- ar->ath_common.hw = ar->hw;
|
|
+-
|
|
+- ar->p2p = !!ath10k_p2p;
|
|
+- ar->dev = dev;
|
|
+-
|
|
+- ar->hif.priv = hif_priv;
|
|
+- ar->hif.ops = hif_ops;
|
|
+-
|
|
+- init_completion(&ar->scan.started);
|
|
+- init_completion(&ar->scan.completed);
|
|
+- init_completion(&ar->scan.on_channel);
|
|
+- init_completion(&ar->target_suspend);
|
|
+-
|
|
+- init_completion(&ar->install_key_done);
|
|
+- init_completion(&ar->vdev_setup_done);
|
|
+-
|
|
+- setup_timer(&ar->scan.timeout, ath10k_reset_scan, (unsigned long)ar);
|
|
+-
|
|
+- ar->workqueue = create_singlethread_workqueue("ath10k_wq");
|
|
+- if (!ar->workqueue)
|
|
+- goto err_wq;
|
|
+-
|
|
+- mutex_init(&ar->conf_mutex);
|
|
+- spin_lock_init(&ar->data_lock);
|
|
+-
|
|
+- INIT_LIST_HEAD(&ar->peers);
|
|
+- init_waitqueue_head(&ar->peer_mapping_wq);
|
|
+-
|
|
+- init_completion(&ar->offchan_tx_completed);
|
|
+- INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
|
|
+- skb_queue_head_init(&ar->offchan_tx_queue);
|
|
+-
|
|
+- INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
|
|
+- skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
|
|
+-
|
|
+- INIT_WORK(&ar->restart_work, ath10k_core_restart);
|
|
++ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
|
|
++ !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
|
|
++ ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
|
|
++ return -EINVAL;
|
|
++ }
|
|
+
|
|
+- return ar;
|
|
++ if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
|
|
++ ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
|
|
++ ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
|
|
++ return -EINVAL;
|
|
++ }
|
|
+
|
|
+-err_wq:
|
|
+- ath10k_mac_destroy(ar);
|
|
+- return NULL;
|
|
+-}
|
|
+-EXPORT_SYMBOL(ath10k_core_create);
|
|
++ /* Backwards compatibility for firmwares without
|
|
++ * ATH10K_FW_IE_WMI_OP_VERSION.
|
|
++ */
|
|
++ if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
|
|
++ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
|
|
++ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
|
|
++ ar->fw_features))
|
|
++ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
|
|
++ else
|
|
++ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
|
|
++ } else {
|
|
++ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
|
|
++ }
|
|
++ }
|
|
+
|
|
+-void ath10k_core_destroy(struct ath10k *ar)
|
|
+-{
|
|
+- flush_workqueue(ar->workqueue);
|
|
+- destroy_workqueue(ar->workqueue);
|
|
++ switch (ar->wmi.op_version) {
|
|
++ case ATH10K_FW_WMI_OP_VERSION_MAIN:
|
|
++ ar->max_num_peers = TARGET_NUM_PEERS;
|
|
++ ar->max_num_stations = TARGET_NUM_STATIONS;
|
|
++ ar->max_num_vdevs = TARGET_NUM_VDEVS;
|
|
++ ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
|
|
++ break;
|
|
++ case ATH10K_FW_WMI_OP_VERSION_10_1:
|
|
++ case ATH10K_FW_WMI_OP_VERSION_10_2:
|
|
++ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
|
|
++ ar->max_num_peers = TARGET_10X_NUM_PEERS;
|
|
++ ar->max_num_stations = TARGET_10X_NUM_STATIONS;
|
|
++ ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
|
|
++ ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
|
|
++ break;
|
|
++ case ATH10K_FW_WMI_OP_VERSION_TLV:
|
|
++ ar->max_num_peers = TARGET_TLV_NUM_PEERS;
|
|
++ ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
|
|
++ ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
|
|
++ ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
|
|
++ break;
|
|
++ case ATH10K_FW_WMI_OP_VERSION_UNSET:
|
|
++ case ATH10K_FW_WMI_OP_VERSION_MAX:
|
|
++ WARN_ON(1);
|
|
++ return -EINVAL;
|
|
++ }
|
|
+
|
|
+- ath10k_mac_destroy(ar);
|
|
++ return 0;
|
|
+ }
|
|
+-EXPORT_SYMBOL(ath10k_core_destroy);
|
|
+
|
|
+-int ath10k_core_start(struct ath10k *ar)
|
|
++int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
|
|
+ {
|
|
+ int status;
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
++ clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
|
|
++
|
|
+ ath10k_bmi_start(ar);
|
|
+
|
|
+ if (ath10k_init_configure_target(ar)) {
|
|
+@@ -778,7 +997,11 @@ int ath10k_core_start(struct ath10k *ar)
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+- status = ath10k_init_download_firmware(ar);
|
|
++ status = ath10k_download_cal_data(ar);
|
|
++ if (status)
|
|
++ goto err;
|
|
++
|
|
++ status = ath10k_download_fw(ar, mode);
|
|
+ if (status)
|
|
+ goto err;
|
|
+
|
|
+@@ -791,7 +1014,7 @@ int ath10k_core_start(struct ath10k *ar)
|
|
+
|
|
+ status = ath10k_htc_init(ar);
|
|
+ if (status) {
|
|
+- ath10k_err("could not init HTC (%d)\n", status);
|
|
++ ath10k_err(ar, "could not init HTC (%d)\n", status);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+@@ -801,79 +1024,123 @@ int ath10k_core_start(struct ath10k *ar)
|
|
+
|
|
+ status = ath10k_wmi_attach(ar);
|
|
+ if (status) {
|
|
+- ath10k_err("WMI attach failed: %d\n", status);
|
|
++ ath10k_err(ar, "WMI attach failed: %d\n", status);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+- status = ath10k_hif_start(ar);
|
|
++ status = ath10k_htt_init(ar);
|
|
++ if (status) {
|
|
++ ath10k_err(ar, "failed to init htt: %d\n", status);
|
|
++ goto err_wmi_detach;
|
|
++ }
|
|
++
|
|
++ status = ath10k_htt_tx_alloc(&ar->htt);
|
|
+ if (status) {
|
|
+- ath10k_err("could not start HIF: %d\n", status);
|
|
++ ath10k_err(ar, "failed to alloc htt tx: %d\n", status);
|
|
+ goto err_wmi_detach;
|
|
+ }
|
|
+
|
|
++ status = ath10k_htt_rx_alloc(&ar->htt);
|
|
++ if (status) {
|
|
++ ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
|
|
++ goto err_htt_tx_detach;
|
|
++ }
|
|
++
|
|
++ status = ath10k_hif_start(ar);
|
|
++ if (status) {
|
|
++ ath10k_err(ar, "could not start HIF: %d\n", status);
|
|
++ goto err_htt_rx_detach;
|
|
++ }
|
|
++
|
|
+ status = ath10k_htc_wait_target(&ar->htc);
|
|
+ if (status) {
|
|
+- ath10k_err("failed to connect to HTC: %d\n", status);
|
|
++ ath10k_err(ar, "failed to connect to HTC: %d\n", status);
|
|
+ goto err_hif_stop;
|
|
+ }
|
|
+
|
|
+- status = ath10k_htt_attach(ar);
|
|
++ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
|
|
++ status = ath10k_htt_connect(&ar->htt);
|
|
++ if (status) {
|
|
++ ath10k_err(ar, "failed to connect htt (%d)\n", status);
|
|
++ goto err_hif_stop;
|
|
++ }
|
|
++ }
|
|
++
|
|
++ status = ath10k_wmi_connect(ar);
|
|
+ if (status) {
|
|
+- ath10k_err("could not attach htt (%d)\n", status);
|
|
++ ath10k_err(ar, "could not connect wmi: %d\n", status);
|
|
+ goto err_hif_stop;
|
|
+ }
|
|
+
|
|
+- status = ath10k_init_connect_htc(ar);
|
|
+- if (status)
|
|
+- goto err_htt_detach;
|
|
++ status = ath10k_htc_start(&ar->htc);
|
|
++ if (status) {
|
|
++ ath10k_err(ar, "failed to start htc: %d\n", status);
|
|
++ goto err_hif_stop;
|
|
++ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
|
|
++ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
|
|
++ status = ath10k_wmi_wait_for_service_ready(ar);
|
|
++ if (status <= 0) {
|
|
++ ath10k_warn(ar, "wmi service ready event not received");
|
|
++ status = -ETIMEDOUT;
|
|
++ goto err_hif_stop;
|
|
++ }
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
|
|
+ ar->hw->wiphy->fw_version);
|
|
+
|
|
+ status = ath10k_wmi_cmd_init(ar);
|
|
+ if (status) {
|
|
+- ath10k_err("could not send WMI init command (%d)\n", status);
|
|
+- goto err_disconnect_htc;
|
|
++ ath10k_err(ar, "could not send WMI init command (%d)\n",
|
|
++ status);
|
|
++ goto err_hif_stop;
|
|
+ }
|
|
+
|
|
+ status = ath10k_wmi_wait_for_unified_ready(ar);
|
|
+ if (status <= 0) {
|
|
+- ath10k_err("wmi unified ready event not received\n");
|
|
++ ath10k_err(ar, "wmi unified ready event not received\n");
|
|
+ status = -ETIMEDOUT;
|
|
+- goto err_disconnect_htc;
|
|
++ goto err_hif_stop;
|
|
+ }
|
|
+
|
|
+- status = ath10k_htt_attach_target(&ar->htt);
|
|
+- if (status)
|
|
+- goto err_disconnect_htc;
|
|
++ /* If firmware indicates Full Rx Reorder support it must be used in a
|
|
++ * slightly different manner. Let HTT code know.
|
|
++ */
|
|
++ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
|
|
++ ar->wmi.svc_map));
|
|
++
|
|
++ status = ath10k_htt_rx_ring_refill(ar);
|
|
++ if (status) {
|
|
++ ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
|
|
++ goto err_hif_stop;
|
|
++ }
|
|
++
|
|
++ /* we don't care about HTT in UTF mode */
|
|
++ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
|
|
++ status = ath10k_htt_setup(&ar->htt);
|
|
++ if (status) {
|
|
++ ath10k_err(ar, "failed to setup htt: %d\n", status);
|
|
++ goto err_hif_stop;
|
|
++ }
|
|
++ }
|
|
+
|
|
+ status = ath10k_debug_start(ar);
|
|
+ if (status)
|
|
+- goto err_disconnect_htc;
|
|
+-
|
|
+- ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
|
|
+- INIT_LIST_HEAD(&ar->arvifs);
|
|
++ goto err_hif_stop;
|
|
+
|
|
+- if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
|
|
+- ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
|
|
+- ar->hw_params.name,
|
|
+- ar->target_version,
|
|
+- ar->chip_id,
|
|
+- ar->hw->wiphy->fw_version,
|
|
+- ar->fw_api,
|
|
+- ar->htt.target_version_major,
|
|
+- ar->htt.target_version_minor);
|
|
++ ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
|
|
+
|
|
+- __set_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags);
|
|
++ INIT_LIST_HEAD(&ar->arvifs);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+-err_disconnect_htc:
|
|
+- ath10k_htc_stop(&ar->htc);
|
|
+-err_htt_detach:
|
|
+- ath10k_htt_detach(&ar->htt);
|
|
+ err_hif_stop:
|
|
+ ath10k_hif_stop(ar);
|
|
++err_htt_rx_detach:
|
|
++ ath10k_htt_rx_free(&ar->htt);
|
|
++err_htt_tx_detach:
|
|
++ ath10k_htt_tx_free(&ar->htt);
|
|
+ err_wmi_detach:
|
|
+ ath10k_wmi_detach(ar);
|
|
+ err:
|
|
+@@ -889,14 +1156,14 @@ int ath10k_wait_for_suspend(struct ath10
|
|
+
|
|
+ ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
|
|
+ if (ret) {
|
|
+- ath10k_warn("could not suspend target (%d)\n", ret);
|
|
++ ath10k_warn(ar, "could not suspend target (%d)\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
|
|
+
|
|
+ if (ret == 0) {
|
|
+- ath10k_warn("suspend timed out - target pause event never came\n");
|
|
++ ath10k_warn(ar, "suspend timed out - target pause event never came\n");
|
|
+ return -ETIMEDOUT;
|
|
+ }
|
|
+
|
|
+@@ -908,12 +1175,14 @@ void ath10k_core_stop(struct ath10k *ar)
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+ /* try to suspend target */
|
|
+- if (ar->state != ATH10K_STATE_RESTARTING)
|
|
++ if (ar->state != ATH10K_STATE_RESTARTING &&
|
|
++ ar->state != ATH10K_STATE_UTF)
|
|
+ ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
|
|
+
|
|
+ ath10k_debug_stop(ar);
|
|
+- ath10k_htc_stop(&ar->htc);
|
|
+- ath10k_htt_detach(&ar->htt);
|
|
++ ath10k_hif_stop(ar);
|
|
++ ath10k_htt_tx_free(&ar->htt);
|
|
++ ath10k_htt_rx_free(&ar->htt);
|
|
+ ath10k_wmi_detach(ar);
|
|
+ }
|
|
+ EXPORT_SYMBOL(ath10k_core_stop);
|
|
+@@ -929,16 +1198,15 @@ static int ath10k_core_probe_fw(struct a
|
|
+
|
|
+ ret = ath10k_hif_power_up(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not start pci hif (%d)\n", ret);
|
|
++ ath10k_err(ar, "could not start pci hif (%d)\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ memset(&target_info, 0, sizeof(target_info));
|
|
+ ret = ath10k_bmi_get_target_info(ar, &target_info);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not get target info (%d)\n", ret);
|
|
+- ath10k_hif_power_down(ar);
|
|
+- return ret;
|
|
++ ath10k_err(ar, "could not get target info (%d)\n", ret);
|
|
++ goto err_power_down;
|
|
+ }
|
|
+
|
|
+ ar->target_version = target_info.version;
|
|
+@@ -946,118 +1214,233 @@ static int ath10k_core_probe_fw(struct a
|
|
+
|
|
+ ret = ath10k_init_hw_params(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not get hw params (%d)\n", ret);
|
|
+- ath10k_hif_power_down(ar);
|
|
+- return ret;
|
|
++ ath10k_err(ar, "could not get hw params (%d)\n", ret);
|
|
++ goto err_power_down;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_core_fetch_firmware_files(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not fetch firmware files (%d)\n", ret);
|
|
+- ath10k_hif_power_down(ar);
|
|
+- return ret;
|
|
++ ath10k_err(ar, "could not fetch firmware files (%d)\n", ret);
|
|
++ goto err_power_down;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_core_init_firmware_features(ar);
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "fatal problem with firmware features: %d\n",
|
|
++ ret);
|
|
++ goto err_free_firmware_files;
|
|
+ }
|
|
+
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+- ret = ath10k_core_start(ar);
|
|
++ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not init core (%d)\n", ret);
|
|
+- ath10k_core_free_firmware_files(ar);
|
|
+- ath10k_hif_power_down(ar);
|
|
+- mutex_unlock(&ar->conf_mutex);
|
|
+- return ret;
|
|
++ ath10k_err(ar, "could not init core (%d)\n", ret);
|
|
++ goto err_unlock;
|
|
+ }
|
|
+
|
|
++ ath10k_print_driver_info(ar);
|
|
+ ath10k_core_stop(ar);
|
|
+
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+
|
|
+ ath10k_hif_power_down(ar);
|
|
+ return 0;
|
|
+-}
|
|
+-
|
|
+-static int ath10k_core_check_chip_id(struct ath10k *ar)
|
|
+-{
|
|
+- u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
|
|
+- ar->chip_id, hw_revision);
|
|
+
|
|
+- /* Check that we are not using hw1.0 (some of them have same pci id
|
|
+- * as hw2.0) before doing anything else as ath10k crashes horribly
|
|
+- * due to missing hw1.0 workarounds. */
|
|
+- switch (hw_revision) {
|
|
+- case QCA988X_HW_1_0_CHIP_ID_REV:
|
|
+- ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
|
|
+- return -EOPNOTSUPP;
|
|
++err_unlock:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
+
|
|
+- case QCA988X_HW_2_0_CHIP_ID_REV:
|
|
+- /* known hardware revision, continue normally */
|
|
+- return 0;
|
|
++err_free_firmware_files:
|
|
++ ath10k_core_free_firmware_files(ar);
|
|
+
|
|
+- default:
|
|
+- ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
|
|
+- ar->chip_id);
|
|
+- return 0;
|
|
+- }
|
|
++err_power_down:
|
|
++ ath10k_hif_power_down(ar);
|
|
+
|
|
+- return 0;
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+-int ath10k_core_register(struct ath10k *ar, u32 chip_id)
|
|
++static void ath10k_core_register_work(struct work_struct *work)
|
|
+ {
|
|
++ struct ath10k *ar = container_of(work, struct ath10k, register_work);
|
|
+ int status;
|
|
+
|
|
+- ar->chip_id = chip_id;
|
|
+-
|
|
+- status = ath10k_core_check_chip_id(ar);
|
|
+- if (status) {
|
|
+- ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
|
|
+- return status;
|
|
+- }
|
|
+-
|
|
+ status = ath10k_core_probe_fw(ar);
|
|
+ if (status) {
|
|
+- ath10k_err("could not probe fw (%d)\n", status);
|
|
+- return status;
|
|
++ ath10k_err(ar, "could not probe fw (%d)\n", status);
|
|
++ goto err;
|
|
+ }
|
|
+
|
|
+ status = ath10k_mac_register(ar);
|
|
+ if (status) {
|
|
+- ath10k_err("could not register to mac80211 (%d)\n", status);
|
|
++ ath10k_err(ar, "could not register to mac80211 (%d)\n", status);
|
|
+ goto err_release_fw;
|
|
+ }
|
|
+
|
|
+- status = ath10k_debug_create(ar);
|
|
++ status = ath10k_debug_register(ar);
|
|
+ if (status) {
|
|
+- ath10k_err("unable to initialize debugfs\n");
|
|
++ ath10k_err(ar, "unable to initialize debugfs\n");
|
|
+ goto err_unregister_mac;
|
|
+ }
|
|
+
|
|
+- return 0;
|
|
++ status = ath10k_spectral_create(ar);
|
|
++ if (status) {
|
|
++ ath10k_err(ar, "failed to initialize spectral\n");
|
|
++ goto err_debug_destroy;
|
|
++ }
|
|
+
|
|
++ status = ath10k_thermal_register(ar);
|
|
++ if (status) {
|
|
++ ath10k_err(ar, "could not register thermal device: %d\n",
|
|
++ status);
|
|
++ goto err_spectral_destroy;
|
|
++ }
|
|
++
|
|
++ set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
|
|
++ return;
|
|
++
|
|
++err_spectral_destroy:
|
|
++ ath10k_spectral_destroy(ar);
|
|
++err_debug_destroy:
|
|
++ ath10k_debug_destroy(ar);
|
|
+ err_unregister_mac:
|
|
+ ath10k_mac_unregister(ar);
|
|
+ err_release_fw:
|
|
+ ath10k_core_free_firmware_files(ar);
|
|
+- return status;
|
|
++err:
|
|
++ /* TODO: It's probably a good idea to release device from the driver
|
|
++ * but calling device_release_driver() here will cause a deadlock.
|
|
++ */
|
|
++ return;
|
|
++}
|
|
++
|
|
++int ath10k_core_register(struct ath10k *ar, u32 chip_id)
|
|
++{
|
|
++ ar->chip_id = chip_id;
|
|
++ queue_work(ar->workqueue, &ar->register_work);
|
|
++
|
|
++ return 0;
|
|
+ }
|
|
+ EXPORT_SYMBOL(ath10k_core_register);
|
|
+
|
|
+ void ath10k_core_unregister(struct ath10k *ar)
|
|
+ {
|
|
++ cancel_work_sync(&ar->register_work);
|
|
++
|
|
++ if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
|
|
++ return;
|
|
++
|
|
++ ath10k_thermal_unregister(ar);
|
|
++ /* Stop spectral before unregistering from mac80211 to remove the
|
|
++ * relayfs debugfs file cleanly. Otherwise the parent debugfs tree
|
|
++ * would be already be free'd recursively, leading to a double free.
|
|
++ */
|
|
++ ath10k_spectral_destroy(ar);
|
|
++
|
|
+ /* We must unregister from mac80211 before we stop HTC and HIF.
|
|
+ * Otherwise we will fail to submit commands to FW and mac80211 will be
|
|
+ * unhappy about callback failures. */
|
|
+ ath10k_mac_unregister(ar);
|
|
+
|
|
++ ath10k_testmode_destroy(ar);
|
|
++
|
|
+ ath10k_core_free_firmware_files(ar);
|
|
+
|
|
+- ath10k_debug_destroy(ar);
|
|
++ ath10k_debug_unregister(ar);
|
|
+ }
|
|
+ EXPORT_SYMBOL(ath10k_core_unregister);
|
|
+
|
|
++struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
|
++ enum ath10k_bus bus,
|
|
++ enum ath10k_hw_rev hw_rev,
|
|
++ const struct ath10k_hif_ops *hif_ops)
|
|
++{
|
|
++ struct ath10k *ar;
|
|
++ int ret;
|
|
++
|
|
++ ar = ath10k_mac_create(priv_size);
|
|
++ if (!ar)
|
|
++ return NULL;
|
|
++
|
|
++ ar->ath_common.priv = ar;
|
|
++ ar->ath_common.hw = ar->hw;
|
|
++ ar->dev = dev;
|
|
++ ar->hw_rev = hw_rev;
|
|
++ ar->hif.ops = hif_ops;
|
|
++ ar->hif.bus = bus;
|
|
++
|
|
++ switch (hw_rev) {
|
|
++ case ATH10K_HW_QCA988X:
|
|
++ ar->regs = &qca988x_regs;
|
|
++ break;
|
|
++ case ATH10K_HW_QCA6174:
|
|
++ ar->regs = &qca6174_regs;
|
|
++ break;
|
|
++ default:
|
|
++ ath10k_err(ar, "unsupported core hardware revision %d\n",
|
|
++ hw_rev);
|
|
++ ret = -ENOTSUPP;
|
|
++ goto err_free_mac;
|
|
++ }
|
|
++
|
|
++ init_completion(&ar->scan.started);
|
|
++ init_completion(&ar->scan.completed);
|
|
++ init_completion(&ar->scan.on_channel);
|
|
++ init_completion(&ar->target_suspend);
|
|
++
|
|
++ init_completion(&ar->install_key_done);
|
|
++ init_completion(&ar->vdev_setup_done);
|
|
++ init_completion(&ar->thermal.wmi_sync);
|
|
++
|
|
++ INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
|
|
++
|
|
++ ar->workqueue = create_singlethread_workqueue("ath10k_wq");
|
|
++ if (!ar->workqueue)
|
|
++ goto err_free_mac;
|
|
++
|
|
++ mutex_init(&ar->conf_mutex);
|
|
++ spin_lock_init(&ar->data_lock);
|
|
++
|
|
++ INIT_LIST_HEAD(&ar->peers);
|
|
++ init_waitqueue_head(&ar->peer_mapping_wq);
|
|
++ init_waitqueue_head(&ar->htt.empty_tx_wq);
|
|
++ init_waitqueue_head(&ar->wmi.tx_credits_wq);
|
|
++
|
|
++ init_completion(&ar->offchan_tx_completed);
|
|
++ INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
|
|
++ skb_queue_head_init(&ar->offchan_tx_queue);
|
|
++
|
|
++ INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
|
|
++ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
|
|
++
|
|
++ INIT_WORK(&ar->register_work, ath10k_core_register_work);
|
|
++ INIT_WORK(&ar->restart_work, ath10k_core_restart);
|
|
++
|
|
++ ret = ath10k_debug_create(ar);
|
|
++ if (ret)
|
|
++ goto err_free_wq;
|
|
++
|
|
++ return ar;
|
|
++
|
|
++err_free_wq:
|
|
++ destroy_workqueue(ar->workqueue);
|
|
++
|
|
++err_free_mac:
|
|
++ ath10k_mac_destroy(ar);
|
|
++
|
|
++ return NULL;
|
|
++}
|
|
++EXPORT_SYMBOL(ath10k_core_create);
|
|
++
|
|
++void ath10k_core_destroy(struct ath10k *ar)
|
|
++{
|
|
++ flush_workqueue(ar->workqueue);
|
|
++ destroy_workqueue(ar->workqueue);
|
|
++
|
|
++ ath10k_debug_destroy(ar);
|
|
++ ath10k_mac_destroy(ar);
|
|
++}
|
|
++EXPORT_SYMBOL(ath10k_core_destroy);
|
|
++
|
|
+ MODULE_AUTHOR("Qualcomm Atheros");
|
|
+ MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
|
|
+ MODULE_LICENSE("Dual BSD/GPL");
|
|
+--- a/drivers/net/wireless/ath/ath10k/core.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/core.h
|
|
+@@ -22,6 +22,8 @@
|
|
+ #include <linux/if_ether.h>
|
|
+ #include <linux/types.h>
|
|
+ #include <linux/pci.h>
|
|
++#include <linux/uuid.h>
|
|
++#include <linux/time.h>
|
|
+
|
|
+ #include "htt.h"
|
|
+ #include "htc.h"
|
|
+@@ -31,6 +33,8 @@
|
|
+ #include "../ath.h"
|
|
+ #include "../regd.h"
|
|
+ #include "../dfs_pattern_detector.h"
|
|
++#include "spectral.h"
|
|
++#include "thermal.h"
|
|
+
|
|
+ #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
|
|
+ #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
|
|
+@@ -60,12 +64,28 @@
|
|
+
|
|
+ struct ath10k;
|
|
+
|
|
++enum ath10k_bus {
|
|
++ ATH10K_BUS_PCI,
|
|
++};
|
|
++
|
|
++static inline const char *ath10k_bus_str(enum ath10k_bus bus)
|
|
++{
|
|
++ switch (bus) {
|
|
++ case ATH10K_BUS_PCI:
|
|
++ return "pci";
|
|
++ }
|
|
++
|
|
++ return "unknown";
|
|
++}
|
|
++
|
|
+ struct ath10k_skb_cb {
|
|
+ dma_addr_t paddr;
|
|
++ u8 eid;
|
|
+ u8 vdev_id;
|
|
+
|
|
+ struct {
|
|
+ u8 tid;
|
|
++ u16 freq;
|
|
+ bool is_offchan;
|
|
+ struct ath10k_htt_txbuf *txbuf;
|
|
+ u32 txbuf_paddr;
|
|
+@@ -77,6 +97,11 @@ struct ath10k_skb_cb {
|
|
+ } bcn;
|
|
+ } __packed;
|
|
+
|
|
++struct ath10k_skb_rxcb {
|
|
++ dma_addr_t paddr;
|
|
++ struct hlist_node hlist;
|
|
++};
|
|
++
|
|
+ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
|
|
+ {
|
|
+ BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
|
|
+@@ -84,6 +109,15 @@ static inline struct ath10k_skb_cb *ATH1
|
|
+ return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
|
|
+ }
|
|
+
|
|
++static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
|
|
++{
|
|
++ BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
|
|
++ return (struct ath10k_skb_rxcb *)skb->cb;
|
|
++}
|
|
++
|
|
++#define ATH10K_RXCB_SKB(rxcb) \
|
|
++ container_of((void *)rxcb, struct sk_buff, cb)
|
|
++
|
|
+ static inline u32 host_interest_item_address(u32 item_offset)
|
|
+ {
|
|
+ return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
|
|
+@@ -93,8 +127,6 @@ struct ath10k_bmi {
|
|
+ bool done_sent;
|
|
+ };
|
|
+
|
|
+-#define ATH10K_MAX_MEM_REQS 16
|
|
+-
|
|
+ struct ath10k_mem_chunk {
|
|
+ void *vaddr;
|
|
+ dma_addr_t paddr;
|
|
+@@ -103,26 +135,52 @@ struct ath10k_mem_chunk {
|
|
+ };
|
|
+
|
|
+ struct ath10k_wmi {
|
|
++ enum ath10k_fw_wmi_op_version op_version;
|
|
+ enum ath10k_htc_ep_id eid;
|
|
+ struct completion service_ready;
|
|
+ struct completion unified_ready;
|
|
+ wait_queue_head_t tx_credits_wq;
|
|
++ DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
|
|
+ struct wmi_cmd_map *cmd;
|
|
+ struct wmi_vdev_param_map *vdev_param;
|
|
+ struct wmi_pdev_param_map *pdev_param;
|
|
++ const struct wmi_ops *ops;
|
|
+
|
|
+ u32 num_mem_chunks;
|
|
+- struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
|
|
++ struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
|
|
+ };
|
|
+
|
|
+-struct ath10k_peer_stat {
|
|
++struct ath10k_fw_stats_peer {
|
|
++ struct list_head list;
|
|
++
|
|
+ u8 peer_macaddr[ETH_ALEN];
|
|
+ u32 peer_rssi;
|
|
+ u32 peer_tx_rate;
|
|
+ u32 peer_rx_rate; /* 10x only */
|
|
+ };
|
|
+
|
|
+-struct ath10k_target_stats {
|
|
++struct ath10k_fw_stats_vdev {
|
|
++ struct list_head list;
|
|
++
|
|
++ u32 vdev_id;
|
|
++ u32 beacon_snr;
|
|
++ u32 data_snr;
|
|
++ u32 num_tx_frames[4];
|
|
++ u32 num_rx_frames;
|
|
++ u32 num_tx_frames_retries[4];
|
|
++ u32 num_tx_frames_failures[4];
|
|
++ u32 num_rts_fail;
|
|
++ u32 num_rts_success;
|
|
++ u32 num_rx_err;
|
|
++ u32 num_rx_discard;
|
|
++ u32 num_tx_not_acked;
|
|
++ u32 tx_rate_history[10];
|
|
++ u32 beacon_rssi_history[10];
|
|
++};
|
|
++
|
|
++struct ath10k_fw_stats_pdev {
|
|
++ struct list_head list;
|
|
++
|
|
+ /* PDEV stats */
|
|
+ s32 ch_noise_floor;
|
|
+ u32 tx_frame_count;
|
|
+@@ -177,15 +235,12 @@ struct ath10k_target_stats {
|
|
+ s32 phy_errs;
|
|
+ s32 phy_err_drop;
|
|
+ s32 mpdu_errs;
|
|
++};
|
|
+
|
|
+- /* VDEV STATS */
|
|
+-
|
|
+- /* PEER STATS */
|
|
+- u8 peers;
|
|
+- struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
|
|
+-
|
|
+- /* TODO: Beacon filter stats */
|
|
+-
|
|
++struct ath10k_fw_stats {
|
|
++ struct list_head pdevs;
|
|
++ struct list_head vdevs;
|
|
++ struct list_head peers;
|
|
+ };
|
|
+
|
|
+ struct ath10k_dfs_stats {
|
|
+@@ -203,6 +258,8 @@ struct ath10k_peer {
|
|
+ int vdev_id;
|
|
+ u8 addr[ETH_ALEN];
|
|
+ DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
|
|
++
|
|
++ /* protected by ar->data_lock */
|
|
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
|
|
+ };
|
|
+
|
|
+@@ -216,10 +273,21 @@ struct ath10k_sta {
|
|
+ u32 smps;
|
|
+
|
|
+ struct work_struct update_wk;
|
|
++
|
|
++#ifdef CPTCFG_MAC80211_DEBUGFS
|
|
++ /* protected by conf_mutex */
|
|
++ bool aggr_mode;
|
|
++#endif
|
|
+ };
|
|
+
|
|
+ #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
|
|
+
|
|
++enum ath10k_beacon_state {
|
|
++ ATH10K_BEACON_SCHEDULED = 0,
|
|
++ ATH10K_BEACON_SENDING,
|
|
++ ATH10K_BEACON_SENT,
|
|
++};
|
|
++
|
|
+ struct ath10k_vif {
|
|
+ struct list_head list;
|
|
+
|
|
+@@ -230,20 +298,22 @@ struct ath10k_vif {
|
|
+ u32 dtim_period;
|
|
+ struct sk_buff *beacon;
|
|
+ /* protected by data_lock */
|
|
+- bool beacon_sent;
|
|
++ enum ath10k_beacon_state beacon_state;
|
|
++ void *beacon_buf;
|
|
++ dma_addr_t beacon_paddr;
|
|
+
|
|
+ struct ath10k *ar;
|
|
+ struct ieee80211_vif *vif;
|
|
+
|
|
+ bool is_started;
|
|
+ bool is_up;
|
|
++ bool spectral_enabled;
|
|
++ bool ps;
|
|
+ u32 aid;
|
|
+ u8 bssid[ETH_ALEN];
|
|
+
|
|
+- struct work_struct wep_key_work;
|
|
+ struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
|
|
+- u8 def_wep_key_idx;
|
|
+- u8 def_wep_key_newidx;
|
|
++ s8 def_wep_key_idx;
|
|
+
|
|
+ u16 tx_seq_no;
|
|
+
|
|
+@@ -269,6 +339,8 @@ struct ath10k_vif {
|
|
+ u8 force_sgi;
|
|
+ bool use_cts_prot;
|
|
+ int num_legacy_stations;
|
|
++ int txpower;
|
|
++ struct wmi_wmm_params_all_arg wmm_params;
|
|
+ };
|
|
+
|
|
+ struct ath10k_vif_iter {
|
|
+@@ -276,20 +348,38 @@ struct ath10k_vif_iter {
|
|
+ struct ath10k_vif *arvif;
|
|
+ };
|
|
+
|
|
++/* used for crash-dump storage, protected by data-lock */
|
|
++struct ath10k_fw_crash_data {
|
|
++ bool crashed_since_read;
|
|
++
|
|
++ uuid_le uuid;
|
|
++ struct timespec timestamp;
|
|
++ __le32 registers[REG_DUMP_COUNT_QCA988X];
|
|
++};
|
|
++
|
|
+ struct ath10k_debug {
|
|
+ struct dentry *debugfs_phy;
|
|
+
|
|
+- struct ath10k_target_stats target_stats;
|
|
+- u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
|
|
+-
|
|
+- struct completion event_stats_compl;
|
|
++ struct ath10k_fw_stats fw_stats;
|
|
++ struct completion fw_stats_complete;
|
|
++ bool fw_stats_done;
|
|
+
|
|
+ unsigned long htt_stats_mask;
|
|
+ struct delayed_work htt_stats_dwork;
|
|
+ struct ath10k_dfs_stats dfs_stats;
|
|
+ struct ath_dfs_pool_stats dfs_pool_stats;
|
|
+
|
|
++ /* protected by conf_mutex */
|
|
+ u32 fw_dbglog_mask;
|
|
++ u32 fw_dbglog_level;
|
|
++ u32 pktlog_filter;
|
|
++ u32 reg_addr;
|
|
++ u32 nf_cal_period;
|
|
++
|
|
++ u8 htt_max_amsdu;
|
|
++ u8 htt_max_ampdu;
|
|
++
|
|
++ struct ath10k_fw_crash_data *fw_crash_data;
|
|
+ };
|
|
+
|
|
+ enum ath10k_state {
|
|
+@@ -312,13 +402,24 @@ enum ath10k_state {
|
|
+ * prevents completion timeouts and makes the driver more responsive to
|
|
+ * userspace commands. This is also prevents recursive recovery. */
|
|
+ ATH10K_STATE_WEDGED,
|
|
++
|
|
++ /* factory tests */
|
|
++ ATH10K_STATE_UTF,
|
|
++};
|
|
++
|
|
++enum ath10k_firmware_mode {
|
|
++ /* the default mode, standard 802.11 functionality */
|
|
++ ATH10K_FIRMWARE_MODE_NORMAL,
|
|
++
|
|
++ /* factory tests etc */
|
|
++ ATH10K_FIRMWARE_MODE_UTF,
|
|
+ };
|
|
+
|
|
+ enum ath10k_fw_features {
|
|
+ /* wmi_mgmt_rx_hdr contains extra RSSI information */
|
|
+ ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
|
|
+
|
|
+- /* firmware from 10X branch */
|
|
++ /* Firmware from 10X branch. Deprecated, don't use in new code. */
|
|
+ ATH10K_FW_FEATURE_WMI_10X = 1,
|
|
+
|
|
+ /* firmware support tx frame management over WMI, otherwise it's HTT */
|
|
+@@ -327,6 +428,18 @@ enum ath10k_fw_features {
|
|
+ /* Firmware does not support P2P */
|
|
+ ATH10K_FW_FEATURE_NO_P2P = 3,
|
|
+
|
|
++ /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature
|
|
++ * bit is required to be set as well. Deprecated, don't use in new
|
|
++ * code.
|
|
++ */
|
|
++ ATH10K_FW_FEATURE_WMI_10_2 = 4,
|
|
++
|
|
++ /* Some firmware revisions lack proper multi-interface client powersave
|
|
++ * implementation. Enabling PS could result in connection drops,
|
|
++ * traffic stalls, etc.
|
|
++ */
|
|
++ ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
|
|
++
|
|
+ /* keep last */
|
|
+ ATH10K_FW_FEATURE_COUNT,
|
|
+ };
|
|
+@@ -334,15 +447,64 @@ enum ath10k_fw_features {
|
|
+ enum ath10k_dev_flags {
|
|
+ /* Indicates that ath10k device is during CAC phase of DFS */
|
|
+ ATH10K_CAC_RUNNING,
|
|
+- ATH10K_FLAG_FIRST_BOOT_DONE,
|
|
++ ATH10K_FLAG_CORE_REGISTERED,
|
|
++
|
|
++ /* Device has crashed and needs to restart. This indicates any pending
|
|
++ * waiters should immediately cancel instead of waiting for a time out.
|
|
++ */
|
|
++ ATH10K_FLAG_CRASH_FLUSH,
|
|
++};
|
|
++
|
|
++enum ath10k_cal_mode {
|
|
++ ATH10K_CAL_MODE_FILE,
|
|
++ ATH10K_CAL_MODE_OTP,
|
|
++ ATH10K_CAL_MODE_DT,
|
|
++};
|
|
++
|
|
++static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
|
|
++{
|
|
++ switch (mode) {
|
|
++ case ATH10K_CAL_MODE_FILE:
|
|
++ return "file";
|
|
++ case ATH10K_CAL_MODE_OTP:
|
|
++ return "otp";
|
|
++ case ATH10K_CAL_MODE_DT:
|
|
++ return "dt";
|
|
++ }
|
|
++
|
|
++ return "unknown";
|
|
++}
|
|
++
|
|
++enum ath10k_scan_state {
|
|
++ ATH10K_SCAN_IDLE,
|
|
++ ATH10K_SCAN_STARTING,
|
|
++ ATH10K_SCAN_RUNNING,
|
|
++ ATH10K_SCAN_ABORTING,
|
|
+ };
|
|
+
|
|
++static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
|
|
++{
|
|
++ switch (state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ return "idle";
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ return "starting";
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ return "running";
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
++ return "aborting";
|
|
++ }
|
|
++
|
|
++ return "unknown";
|
|
++}
|
|
++
|
|
+ struct ath10k {
|
|
+ struct ath_common ath_common;
|
|
+ struct ieee80211_hw *hw;
|
|
+ struct device *dev;
|
|
+ u8 mac_addr[ETH_ALEN];
|
|
+
|
|
++ enum ath10k_hw_rev hw_rev;
|
|
+ u32 chip_id;
|
|
+ u32 target_version;
|
|
+ u8 fw_version_major;
|
|
+@@ -358,18 +520,16 @@ struct ath10k {
|
|
+
|
|
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
|
|
+
|
|
+- struct targetdef *targetdef;
|
|
+- struct hostdef *hostdef;
|
|
+-
|
|
+ bool p2p;
|
|
+
|
|
+ struct {
|
|
+- void *priv;
|
|
++ enum ath10k_bus bus;
|
|
+ const struct ath10k_hif_ops *ops;
|
|
+ } hif;
|
|
+
|
|
+ struct completion target_suspend;
|
|
+
|
|
++ const struct ath10k_hw_regs *regs;
|
|
+ struct ath10k_bmi bmi;
|
|
+ struct ath10k_wmi wmi;
|
|
+ struct ath10k_htc htc;
|
|
+@@ -379,12 +539,15 @@ struct ath10k {
|
|
+ u32 id;
|
|
+ const char *name;
|
|
+ u32 patch_load_addr;
|
|
++ int uart_pin;
|
|
+
|
|
+ struct ath10k_hw_params_fw {
|
|
+ const char *dir;
|
|
+ const char *fw;
|
|
+ const char *otp;
|
|
+ const char *board;
|
|
++ size_t board_size;
|
|
++ size_t board_ext_size;
|
|
+ } fw;
|
|
+ } hw_params;
|
|
+
|
|
+@@ -400,16 +563,18 @@ struct ath10k {
|
|
+ const void *firmware_data;
|
|
+ size_t firmware_len;
|
|
+
|
|
++ const struct firmware *cal_file;
|
|
++
|
|
+ int fw_api;
|
|
++ enum ath10k_cal_mode cal_mode;
|
|
+
|
|
+ struct {
|
|
+ struct completion started;
|
|
+ struct completion completed;
|
|
+ struct completion on_channel;
|
|
+- struct timer_list timeout;
|
|
++ struct delayed_work timeout;
|
|
++ enum ath10k_scan_state state;
|
|
+ bool is_roc;
|
|
+- bool in_progress;
|
|
+- bool aborting;
|
|
+ int vdev_id;
|
|
+ int roc_freq;
|
|
+ } scan;
|
|
+@@ -427,8 +592,7 @@ struct ath10k {
|
|
+ /* current operating channel definition */
|
|
+ struct cfg80211_chan_def chandef;
|
|
+
|
|
+- int free_vdev_map;
|
|
+- bool promisc;
|
|
++ unsigned long long free_vdev_map;
|
|
+ bool monitor;
|
|
+ int monitor_vdev_id;
|
|
+ bool monitor_started;
|
|
+@@ -440,7 +604,12 @@ struct ath10k {
|
|
+ bool radar_enabled;
|
|
+ int num_started_vdevs;
|
|
+
|
|
+- struct wmi_pdev_set_wmm_params_arg wmm_params;
|
|
++ /* Protected by conf-mutex */
|
|
++ u8 supp_tx_chainmask;
|
|
++ u8 supp_rx_chainmask;
|
|
++ u8 cfg_tx_chainmask;
|
|
++ u8 cfg_rx_chainmask;
|
|
++
|
|
+ struct completion install_key_done;
|
|
+
|
|
+ struct completion vdev_setup_done;
|
|
+@@ -457,8 +626,13 @@ struct ath10k {
|
|
+ struct list_head peers;
|
|
+ wait_queue_head_t peer_mapping_wq;
|
|
+
|
|
+- /* number of created peers; protected by data_lock */
|
|
++ /* protected by conf_mutex */
|
|
+ int num_peers;
|
|
++ int num_stations;
|
|
++
|
|
++ int max_num_peers;
|
|
++ int max_num_stations;
|
|
++ int max_num_vdevs;
|
|
+
|
|
+ struct work_struct offchan_tx_work;
|
|
+ struct sk_buff_head offchan_tx_queue;
|
|
+@@ -470,6 +644,7 @@ struct ath10k {
|
|
+
|
|
+ enum ath10k_state state;
|
|
+
|
|
++ struct work_struct register_work;
|
|
+ struct work_struct restart_work;
|
|
+
|
|
+ /* cycle count is reported twice for each visited channel during scan.
|
|
+@@ -483,13 +658,46 @@ struct ath10k {
|
|
+ #ifdef CPTCFG_ATH10K_DEBUGFS
|
|
+ struct ath10k_debug debug;
|
|
+ #endif
|
|
++
|
|
++ struct {
|
|
++ /* relay(fs) channel for spectral scan */
|
|
++ struct rchan *rfs_chan_spec_scan;
|
|
++
|
|
++ /* spectral_mode and spec_config are protected by conf_mutex */
|
|
++ enum ath10k_spectral_mode mode;
|
|
++ struct ath10k_spec_scan config;
|
|
++ } spectral;
|
|
++
|
|
++ struct {
|
|
++ /* protected by conf_mutex */
|
|
++ const struct firmware *utf;
|
|
++ DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
|
|
++ enum ath10k_fw_wmi_op_version orig_wmi_op_version;
|
|
++
|
|
++ /* protected by data_lock */
|
|
++ bool utf_monitor;
|
|
++ } testmode;
|
|
++
|
|
++ struct {
|
|
++ /* protected by data_lock */
|
|
++ u32 fw_crash_counter;
|
|
++ u32 fw_warm_reset_counter;
|
|
++ u32 fw_cold_reset_counter;
|
|
++ } stats;
|
|
++
|
|
++ struct ath10k_thermal thermal;
|
|
++
|
|
++ /* must be last */
|
|
++ u8 drv_priv[0] __aligned(sizeof(void *));
|
|
+ };
|
|
+
|
|
+-struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
|
|
++struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
|
++ enum ath10k_bus bus,
|
|
++ enum ath10k_hw_rev hw_rev,
|
|
+ const struct ath10k_hif_ops *hif_ops);
|
|
+ void ath10k_core_destroy(struct ath10k *ar);
|
|
+
|
|
+-int ath10k_core_start(struct ath10k *ar);
|
|
++int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
|
|
+ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
|
|
+ void ath10k_core_stop(struct ath10k *ar);
|
|
+ int ath10k_core_register(struct ath10k *ar, u32 chip_id);
|
|
+--- a/drivers/net/wireless/ath/ath10k/debug.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/debug.c
|
|
+@@ -17,107 +17,176 @@
|
|
+
|
|
+ #include <linux/module.h>
|
|
+ #include <linux/debugfs.h>
|
|
++#include <linux/vmalloc.h>
|
|
++#include <linux/utsname.h>
|
|
+
|
|
+ #include "core.h"
|
|
+ #include "debug.h"
|
|
++#include "hif.h"
|
|
++#include "wmi-ops.h"
|
|
+
|
|
+ /* ms */
|
|
+ #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
|
|
+
|
|
+-static int ath10k_printk(const char *level, const char *fmt, ...)
|
|
+-{
|
|
+- struct va_format vaf;
|
|
+- va_list args;
|
|
+- int rtn;
|
|
++#define ATH10K_FW_CRASH_DUMP_VERSION 1
|
|
+
|
|
+- va_start(args, fmt);
|
|
++/**
|
|
++ * enum ath10k_fw_crash_dump_type - types of data in the dump file
|
|
++ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
|
|
++ */
|
|
++enum ath10k_fw_crash_dump_type {
|
|
++ ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
|
|
+
|
|
+- vaf.fmt = fmt;
|
|
+- vaf.va = &args;
|
|
++ ATH10K_FW_CRASH_DUMP_MAX,
|
|
++};
|
|
+
|
|
+- rtn = printk("%sath10k: %pV", level, &vaf);
|
|
++struct ath10k_tlv_dump_data {
|
|
++ /* see ath10k_fw_crash_dump_type above */
|
|
++ __le32 type;
|
|
+
|
|
+- va_end(args);
|
|
++ /* in bytes */
|
|
++ __le32 tlv_len;
|
|
+
|
|
+- return rtn;
|
|
+-}
|
|
++ /* pad to 32-bit boundaries as needed */
|
|
++ u8 tlv_data[];
|
|
++} __packed;
|
|
++
|
|
++struct ath10k_dump_file_data {
|
|
++ /* dump file information */
|
|
++
|
|
++ /* "ATH10K-FW-DUMP" */
|
|
++ char df_magic[16];
|
|
++
|
|
++ __le32 len;
|
|
++
|
|
++ /* file dump version */
|
|
++ __le32 version;
|
|
++
|
|
++ /* some info we can get from ath10k struct that might help */
|
|
++
|
|
++ u8 uuid[16];
|
|
++
|
|
++ __le32 chip_id;
|
|
++
|
|
++ /* 0 for now, in place for later hardware */
|
|
++ __le32 bus_type;
|
|
++
|
|
++ __le32 target_version;
|
|
++ __le32 fw_version_major;
|
|
++ __le32 fw_version_minor;
|
|
++ __le32 fw_version_release;
|
|
++ __le32 fw_version_build;
|
|
++ __le32 phy_capability;
|
|
++ __le32 hw_min_tx_power;
|
|
++ __le32 hw_max_tx_power;
|
|
++ __le32 ht_cap_info;
|
|
++ __le32 vht_cap_info;
|
|
++ __le32 num_rf_chains;
|
|
++
|
|
++ /* firmware version string */
|
|
++ char fw_ver[ETHTOOL_FWVERS_LEN];
|
|
++
|
|
++ /* Kernel related information */
|
|
++
|
|
++ /* time-of-day stamp */
|
|
++ __le64 tv_sec;
|
|
++
|
|
++ /* time-of-day stamp, nano-seconds */
|
|
++ __le64 tv_nsec;
|
|
++
|
|
++ /* LINUX_VERSION_CODE */
|
|
++ __le32 kernel_ver_code;
|
|
++
|
|
++ /* VERMAGIC_STRING */
|
|
++ char kernel_ver[64];
|
|
+
|
|
+-int ath10k_info(const char *fmt, ...)
|
|
++ /* room for growth w/out changing binary format */
|
|
++ u8 unused[128];
|
|
++
|
|
++ /* struct ath10k_tlv_dump_data + more */
|
|
++ u8 data[0];
|
|
++} __packed;
|
|
++
|
|
++void ath10k_info(struct ath10k *ar, const char *fmt, ...)
|
|
+ {
|
|
+ struct va_format vaf = {
|
|
+ .fmt = fmt,
|
|
+ };
|
|
+ va_list args;
|
|
+- int ret;
|
|
+
|
|
+ va_start(args, fmt);
|
|
+ vaf.va = &args;
|
|
+- ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
|
|
+- trace_ath10k_log_info(&vaf);
|
|
++ dev_info(ar->dev, "%pV", &vaf);
|
|
++ trace_ath10k_log_info(ar, &vaf);
|
|
+ va_end(args);
|
|
+-
|
|
+- return ret;
|
|
+ }
|
|
+ EXPORT_SYMBOL(ath10k_info);
|
|
+
|
|
+-int ath10k_err(const char *fmt, ...)
|
|
++void ath10k_print_driver_info(struct ath10k *ar)
|
|
++{
|
|
++ ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
|
|
++ ar->hw_params.name,
|
|
++ ar->target_version,
|
|
++ ar->chip_id,
|
|
++ ar->hw->wiphy->fw_version,
|
|
++ ar->fw_api,
|
|
++ ar->htt.target_version_major,
|
|
++ ar->htt.target_version_minor,
|
|
++ ar->wmi.op_version,
|
|
++ ath10k_cal_mode_str(ar->cal_mode),
|
|
++ ar->max_num_stations);
|
|
++ ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
|
|
++ config_enabled(CPTCFG_ATH10K_DEBUG),
|
|
++ config_enabled(CPTCFG_ATH10K_DEBUGFS),
|
|
++ config_enabled(CPTCFG_ATH10K_TRACING),
|
|
++ config_enabled(CPTCFG_ATH10K_DFS_CERTIFIED),
|
|
++ config_enabled(CPTCFG_NL80211_TESTMODE));
|
|
++}
|
|
++EXPORT_SYMBOL(ath10k_print_driver_info);
|
|
++
|
|
++void ath10k_err(struct ath10k *ar, const char *fmt, ...)
|
|
+ {
|
|
+ struct va_format vaf = {
|
|
+ .fmt = fmt,
|
|
+ };
|
|
+ va_list args;
|
|
+- int ret;
|
|
+
|
|
+ va_start(args, fmt);
|
|
+ vaf.va = &args;
|
|
+- ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
|
|
+- trace_ath10k_log_err(&vaf);
|
|
++ dev_err(ar->dev, "%pV", &vaf);
|
|
++ trace_ath10k_log_err(ar, &vaf);
|
|
+ va_end(args);
|
|
+-
|
|
+- return ret;
|
|
+ }
|
|
+ EXPORT_SYMBOL(ath10k_err);
|
|
+
|
|
+-int ath10k_warn(const char *fmt, ...)
|
|
++void ath10k_warn(struct ath10k *ar, const char *fmt, ...)
|
|
+ {
|
|
+ struct va_format vaf = {
|
|
+ .fmt = fmt,
|
|
+ };
|
|
+ va_list args;
|
|
+- int ret = 0;
|
|
+
|
|
+ va_start(args, fmt);
|
|
+ vaf.va = &args;
|
|
+-
|
|
+- if (net_ratelimit())
|
|
+- ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
|
|
+-
|
|
+- trace_ath10k_log_warn(&vaf);
|
|
++ dev_warn_ratelimited(ar->dev, "%pV", &vaf);
|
|
++ trace_ath10k_log_warn(ar, &vaf);
|
|
+
|
|
+ va_end(args);
|
|
+-
|
|
+- return ret;
|
|
+ }
|
|
+ EXPORT_SYMBOL(ath10k_warn);
|
|
+
|
|
+ #ifdef CPTCFG_ATH10K_DEBUGFS
|
|
+
|
|
+-void ath10k_debug_read_service_map(struct ath10k *ar,
|
|
+- void *service_map,
|
|
+- size_t map_size)
|
|
+-{
|
|
+- memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
|
|
+-}
|
|
+-
|
|
+ static ssize_t ath10k_read_wmi_services(struct file *file,
|
|
+ char __user *user_buf,
|
|
+ size_t count, loff_t *ppos)
|
|
+ {
|
|
+ struct ath10k *ar = file->private_data;
|
|
+ char *buf;
|
|
+- unsigned int len = 0, buf_len = 1500;
|
|
+- const char *status;
|
|
++ unsigned int len = 0, buf_len = 4096;
|
|
++ const char *name;
|
|
+ ssize_t ret_cnt;
|
|
++ bool enabled;
|
|
+ int i;
|
|
+
|
|
+ buf = kzalloc(buf_len, GFP_KERNEL);
|
|
+@@ -129,16 +198,25 @@ static ssize_t ath10k_read_wmi_services(
|
|
+ if (len > buf_len)
|
|
+ len = buf_len;
|
|
+
|
|
+- for (i = 0; i < WMI_SERVICE_LAST; i++) {
|
|
+- if (WMI_SERVICE_IS_ENABLED(ar->debug.wmi_service_bitmap, i))
|
|
+- status = "enabled";
|
|
+- else
|
|
+- status = "disabled";
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ for (i = 0; i < WMI_SERVICE_MAX; i++) {
|
|
++ enabled = test_bit(i, ar->wmi.svc_map);
|
|
++ name = wmi_service_name(i);
|
|
++
|
|
++ if (!name) {
|
|
++ if (enabled)
|
|
++ len += scnprintf(buf + len, buf_len - len,
|
|
++ "%-40s %s (bit %d)\n",
|
|
++ "unknown", "enabled", i);
|
|
++
|
|
++ continue;
|
|
++ }
|
|
+
|
|
+ len += scnprintf(buf + len, buf_len - len,
|
|
+- "0x%02x - %20s - %s\n",
|
|
+- i, wmi_service_name(i), status);
|
|
++ "%-40s %s\n",
|
|
++ name, enabled ? "enabled" : "-");
|
|
+ }
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
+
|
|
+@@ -155,169 +233,221 @@ static const struct file_operations fops
|
|
+ .llseek = default_llseek,
|
|
+ };
|
|
+
|
|
+-void ath10k_debug_read_target_stats(struct ath10k *ar,
|
|
+- struct wmi_stats_event *ev)
|
|
++static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
|
|
+ {
|
|
+- u8 *tmp = ev->data;
|
|
+- struct ath10k_target_stats *stats;
|
|
+- int num_pdev_stats, num_vdev_stats, num_peer_stats;
|
|
+- struct wmi_pdev_stats_10x *ps;
|
|
+- int i;
|
|
++ struct ath10k_fw_stats_pdev *i, *tmp;
|
|
+
|
|
++ list_for_each_entry_safe(i, tmp, head, list) {
|
|
++ list_del(&i->list);
|
|
++ kfree(i);
|
|
++ }
|
|
++}
|
|
++
|
|
++static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
|
|
++{
|
|
++ struct ath10k_fw_stats_vdev *i, *tmp;
|
|
++
|
|
++ list_for_each_entry_safe(i, tmp, head, list) {
|
|
++ list_del(&i->list);
|
|
++ kfree(i);
|
|
++ }
|
|
++}
|
|
++
|
|
++static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
|
|
++{
|
|
++ struct ath10k_fw_stats_peer *i, *tmp;
|
|
++
|
|
++ list_for_each_entry_safe(i, tmp, head, list) {
|
|
++ list_del(&i->list);
|
|
++ kfree(i);
|
|
++ }
|
|
++}
|
|
++
|
|
++static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
|
|
++{
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
++ ar->debug.fw_stats_done = false;
|
|
++ ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
|
|
++ ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
|
|
++ ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++}
|
|
+
|
|
+- stats = &ar->debug.target_stats;
|
|
++static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
|
|
++{
|
|
++ struct ath10k_fw_stats_peer *i;
|
|
++ size_t num = 0;
|
|
+
|
|
+- num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
|
|
+- num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
|
|
+- num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
|
|
+-
|
|
+- if (num_pdev_stats) {
|
|
+- ps = (struct wmi_pdev_stats_10x *)tmp;
|
|
+-
|
|
+- stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
|
|
+- stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
|
|
+- stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
|
|
+- stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
|
|
+- stats->cycle_count = __le32_to_cpu(ps->cycle_count);
|
|
+- stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
|
|
+- stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
|
|
+-
|
|
+- stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
|
|
+- stats->comp_delivered =
|
|
+- __le32_to_cpu(ps->wal.tx.comp_delivered);
|
|
+- stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
|
|
+- stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
|
|
+- stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
|
|
+- stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
|
|
+- stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
|
|
+- stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
|
|
+- stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
|
|
+- stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
|
|
+- stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
|
|
+- stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
|
|
+- stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
|
|
+- stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
|
|
+- stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
|
|
+- stats->sw_retry_failure =
|
|
+- __le32_to_cpu(ps->wal.tx.sw_retry_failure);
|
|
+- stats->illgl_rate_phy_err =
|
|
+- __le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
|
|
+- stats->pdev_cont_xretry =
|
|
+- __le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
|
|
+- stats->pdev_tx_timeout =
|
|
+- __le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
|
|
+- stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
|
|
+- stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
|
|
+- stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
|
|
+-
|
|
+- stats->mid_ppdu_route_change =
|
|
+- __le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
|
|
+- stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
|
|
+- stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
|
|
+- stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
|
|
+- stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
|
|
+- stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
|
|
+- stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
|
|
+- stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
|
|
+- stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
|
|
+- stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
|
|
+- stats->oversize_amsdu =
|
|
+- __le32_to_cpu(ps->wal.rx.oversize_amsdu);
|
|
+- stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
|
|
+- stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
|
|
+- stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
|
|
+-
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
|
|
+- ar->fw_features)) {
|
|
+- stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
|
|
+- stats->rts_bad = __le32_to_cpu(ps->rts_bad);
|
|
+- stats->rts_good = __le32_to_cpu(ps->rts_good);
|
|
+- stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
|
|
+- stats->no_beacons = __le32_to_cpu(ps->no_beacons);
|
|
+- stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
|
|
+- tmp += sizeof(struct wmi_pdev_stats_10x);
|
|
+- } else {
|
|
+- tmp += sizeof(struct wmi_pdev_stats_old);
|
|
+- }
|
|
++ list_for_each_entry(i, head, list)
|
|
++ ++num;
|
|
++
|
|
++ return num;
|
|
++}
|
|
++
|
|
++static size_t ath10k_debug_fw_stats_num_vdevs(struct list_head *head)
|
|
++{
|
|
++ struct ath10k_fw_stats_vdev *i;
|
|
++ size_t num = 0;
|
|
++
|
|
++ list_for_each_entry(i, head, list)
|
|
++ ++num;
|
|
++
|
|
++ return num;
|
|
++}
|
|
++
|
|
++void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ struct ath10k_fw_stats stats = {};
|
|
++ bool is_start, is_started, is_end;
|
|
++ size_t num_peers;
|
|
++ size_t num_vdevs;
|
|
++ int ret;
|
|
++
|
|
++ INIT_LIST_HEAD(&stats.pdevs);
|
|
++ INIT_LIST_HEAD(&stats.vdevs);
|
|
++ INIT_LIST_HEAD(&stats.peers);
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
|
|
++ goto unlock;
|
|
+ }
|
|
+
|
|
+- /* 0 or max vdevs */
|
|
+- /* Currently firmware does not support VDEV stats */
|
|
+- if (num_vdev_stats) {
|
|
+- struct wmi_vdev_stats *vdev_stats;
|
|
+-
|
|
+- for (i = 0; i < num_vdev_stats; i++) {
|
|
+- vdev_stats = (struct wmi_vdev_stats *)tmp;
|
|
+- tmp += sizeof(struct wmi_vdev_stats);
|
|
+- }
|
|
++ /* Stat data may exceed htc-wmi buffer limit. In such case firmware
|
|
++ * splits the stats data and delivers it in a ping-pong fashion of
|
|
++ * request cmd-update event.
|
|
++ *
|
|
++ * However there is no explicit end-of-data. Instead start-of-data is
|
|
++ * used as an implicit one. This works as follows:
|
|
++ * a) discard stat update events until one with pdev stats is
|
|
++ * delivered - this skips session started at end of (b)
|
|
++ * b) consume stat update events until another one with pdev stats is
|
|
++ * delivered which is treated as end-of-data and is itself discarded
|
|
++ */
|
|
++
|
|
++ if (ar->debug.fw_stats_done) {
|
|
++ ath10k_warn(ar, "received unsolicited stats update event\n");
|
|
++ goto free;
|
|
+ }
|
|
+
|
|
+- if (num_peer_stats) {
|
|
+- struct wmi_peer_stats_10x *peer_stats;
|
|
+- struct ath10k_peer_stat *s;
|
|
+-
|
|
+- stats->peers = num_peer_stats;
|
|
+-
|
|
+- for (i = 0; i < num_peer_stats; i++) {
|
|
+- peer_stats = (struct wmi_peer_stats_10x *)tmp;
|
|
+- s = &stats->peer_stat[i];
|
|
+-
|
|
+- memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
|
|
+- ETH_ALEN);
|
|
+- s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
|
|
+- s->peer_tx_rate =
|
|
+- __le32_to_cpu(peer_stats->peer_tx_rate);
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
|
|
+- ar->fw_features)) {
|
|
+- s->peer_rx_rate =
|
|
+- __le32_to_cpu(peer_stats->peer_rx_rate);
|
|
+- tmp += sizeof(struct wmi_peer_stats_10x);
|
|
+-
|
|
+- } else {
|
|
+- tmp += sizeof(struct wmi_peer_stats_old);
|
|
+- }
|
|
++ num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
|
|
++ num_vdevs = ath10k_debug_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
|
|
++ is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
|
|
++ !list_empty(&stats.pdevs));
|
|
++ is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
|
|
++ !list_empty(&stats.pdevs));
|
|
++
|
|
++ if (is_start)
|
|
++ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
|
|
++
|
|
++ if (is_end)
|
|
++ ar->debug.fw_stats_done = true;
|
|
++
|
|
++ is_started = !list_empty(&ar->debug.fw_stats.pdevs);
|
|
++
|
|
++ if (is_started && !is_end) {
|
|
++ if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) {
|
|
++ /* Although this is unlikely impose a sane limit to
|
|
++ * prevent firmware from DoS-ing the host.
|
|
++ */
|
|
++ ath10k_warn(ar, "dropping fw peer stats\n");
|
|
++ goto free;
|
|
+ }
|
|
++
|
|
++ if (num_vdevs >= BITS_PER_LONG) {
|
|
++ ath10k_warn(ar, "dropping fw vdev stats\n");
|
|
++ goto free;
|
|
++ }
|
|
++
|
|
++ list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
|
|
++ list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
|
|
+ }
|
|
+
|
|
++ complete(&ar->debug.fw_stats_complete);
|
|
++
|
|
++free:
|
|
++ /* In some cases lists have been spliced and cleared. Free up
|
|
++ * resources if that is not the case.
|
|
++ */
|
|
++ ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
|
|
++ ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
|
|
++ ath10k_debug_fw_stats_peers_free(&stats.peers);
|
|
++
|
|
++unlock:
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+- complete(&ar->debug.event_stats_compl);
|
|
+ }
|
|
+
|
|
+-static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
|
|
+- size_t count, loff_t *ppos)
|
|
++static int ath10k_debug_fw_stats_request(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k *ar = file->private_data;
|
|
+- struct ath10k_target_stats *fw_stats;
|
|
+- char *buf = NULL;
|
|
+- unsigned int len = 0, buf_len = 8000;
|
|
+- ssize_t ret_cnt = 0;
|
|
+- long left;
|
|
+- int i;
|
|
++ unsigned long timeout;
|
|
+ int ret;
|
|
+
|
|
+- fw_stats = &ar->debug.target_stats;
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+- mutex_lock(&ar->conf_mutex);
|
|
++ timeout = jiffies + msecs_to_jiffies(1*HZ);
|
|
+
|
|
+- if (ar->state != ATH10K_STATE_ON)
|
|
+- goto exit;
|
|
++ ath10k_debug_fw_stats_reset(ar);
|
|
+
|
|
+- buf = kzalloc(buf_len, GFP_KERNEL);
|
|
+- if (!buf)
|
|
+- goto exit;
|
|
++ for (;;) {
|
|
++ if (time_after(jiffies, timeout))
|
|
++ return -ETIMEDOUT;
|
|
+
|
|
+- ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
|
|
+- if (ret) {
|
|
+- ath10k_warn("could not request stats (%d)\n", ret);
|
|
+- goto exit;
|
|
++ reinit_completion(&ar->debug.fw_stats_complete);
|
|
++
|
|
++ ret = ath10k_wmi_request_stats(ar,
|
|
++ WMI_STAT_PDEV |
|
|
++ WMI_STAT_VDEV |
|
|
++ WMI_STAT_PEER);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "could not request stats (%d)\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
|
|
++ 1*HZ);
|
|
++ if (ret == 0)
|
|
++ return -ETIMEDOUT;
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ if (ar->debug.fw_stats_done) {
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++ break;
|
|
++ }
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
+ }
|
|
+
|
|
+- left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
|
|
+- if (left <= 0)
|
|
+- goto exit;
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++/* FIXME: How to calculate the buffer size sanely? */
|
|
++#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
|
|
++
|
|
++static void ath10k_fw_stats_fill(struct ath10k *ar,
|
|
++ struct ath10k_fw_stats *fw_stats,
|
|
++ char *buf)
|
|
++{
|
|
++ unsigned int len = 0;
|
|
++ unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
|
|
++ const struct ath10k_fw_stats_pdev *pdev;
|
|
++ const struct ath10k_fw_stats_vdev *vdev;
|
|
++ const struct ath10k_fw_stats_peer *peer;
|
|
++ size_t num_peers;
|
|
++ size_t num_vdevs;
|
|
++ int i;
|
|
+
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
++
|
|
++ pdev = list_first_entry_or_null(&fw_stats->pdevs,
|
|
++ struct ath10k_fw_stats_pdev, list);
|
|
++ if (!pdev) {
|
|
++ ath10k_warn(ar, "failed to get pdev stats\n");
|
|
++ goto unlock;
|
|
++ }
|
|
++
|
|
++ num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
|
|
++ num_vdevs = ath10k_debug_fw_stats_num_vdevs(&fw_stats->vdevs);
|
|
++
|
|
+ len += scnprintf(buf + len, buf_len - len, "\n");
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
|
|
+ "ath10k PDEV stats");
|
|
+@@ -325,29 +455,29 @@ static ssize_t ath10k_read_fw_stats(stru
|
|
+ "=================");
|
|
+
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Channel noise floor", fw_stats->ch_noise_floor);
|
|
++ "Channel noise floor", pdev->ch_noise_floor);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "Channel TX power", fw_stats->chan_tx_power);
|
|
++ "Channel TX power", pdev->chan_tx_power);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "TX frame count", fw_stats->tx_frame_count);
|
|
++ "TX frame count", pdev->tx_frame_count);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "RX frame count", fw_stats->rx_frame_count);
|
|
++ "RX frame count", pdev->rx_frame_count);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "RX clear count", fw_stats->rx_clear_count);
|
|
++ "RX clear count", pdev->rx_clear_count);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "Cycle count", fw_stats->cycle_count);
|
|
++ "Cycle count", pdev->cycle_count);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "PHY error count", fw_stats->phy_err_count);
|
|
++ "PHY error count", pdev->phy_err_count);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "RTS bad count", fw_stats->rts_bad);
|
|
++ "RTS bad count", pdev->rts_bad);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "RTS good count", fw_stats->rts_good);
|
|
++ "RTS good count", pdev->rts_good);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "FCS bad count", fw_stats->fcs_bad);
|
|
++ "FCS bad count", pdev->fcs_bad);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "No beacon count", fw_stats->no_beacons);
|
|
++ "No beacon count", pdev->no_beacons);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
|
|
+- "MIB int count", fw_stats->mib_int_count);
|
|
++ "MIB int count", pdev->mib_int_count);
|
|
+
|
|
+ len += scnprintf(buf + len, buf_len - len, "\n");
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
|
|
+@@ -356,51 +486,51 @@ static ssize_t ath10k_read_fw_stats(stru
|
|
+ "=================");
|
|
+
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "HTT cookies queued", fw_stats->comp_queued);
|
|
++ "HTT cookies queued", pdev->comp_queued);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "HTT cookies disp.", fw_stats->comp_delivered);
|
|
++ "HTT cookies disp.", pdev->comp_delivered);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "MSDU queued", fw_stats->msdu_enqued);
|
|
++ "MSDU queued", pdev->msdu_enqued);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "MPDU queued", fw_stats->mpdu_enqued);
|
|
++ "MPDU queued", pdev->mpdu_enqued);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "MSDUs dropped", fw_stats->wmm_drop);
|
|
++ "MSDUs dropped", pdev->wmm_drop);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Local enqued", fw_stats->local_enqued);
|
|
++ "Local enqued", pdev->local_enqued);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Local freed", fw_stats->local_freed);
|
|
++ "Local freed", pdev->local_freed);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "HW queued", fw_stats->hw_queued);
|
|
++ "HW queued", pdev->hw_queued);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "PPDUs reaped", fw_stats->hw_reaped);
|
|
++ "PPDUs reaped", pdev->hw_reaped);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Num underruns", fw_stats->underrun);
|
|
++ "Num underruns", pdev->underrun);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "PPDUs cleaned", fw_stats->tx_abort);
|
|
++ "PPDUs cleaned", pdev->tx_abort);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "MPDUs requed", fw_stats->mpdus_requed);
|
|
++ "MPDUs requed", pdev->mpdus_requed);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Excessive retries", fw_stats->tx_ko);
|
|
++ "Excessive retries", pdev->tx_ko);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "HW rate", fw_stats->data_rc);
|
|
++ "HW rate", pdev->data_rc);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Sched self tiggers", fw_stats->self_triggers);
|
|
++ "Sched self tiggers", pdev->self_triggers);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+ "Dropped due to SW retries",
|
|
+- fw_stats->sw_retry_failure);
|
|
++ pdev->sw_retry_failure);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+ "Illegal rate phy errors",
|
|
+- fw_stats->illgl_rate_phy_err);
|
|
++ pdev->illgl_rate_phy_err);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Pdev continous xretry", fw_stats->pdev_cont_xretry);
|
|
++ "Pdev continous xretry", pdev->pdev_cont_xretry);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "TX timeout", fw_stats->pdev_tx_timeout);
|
|
++ "TX timeout", pdev->pdev_tx_timeout);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "PDEV resets", fw_stats->pdev_resets);
|
|
++ "PDEV resets", pdev->pdev_resets);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "PHY underrun", fw_stats->phy_underrun);
|
|
++ "PHY underrun", pdev->phy_underrun);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "MPDU is more than txop limit", fw_stats->txop_ovf);
|
|
++ "MPDU is more than txop limit", pdev->txop_ovf);
|
|
+
|
|
+ len += scnprintf(buf + len, buf_len - len, "\n");
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
|
|
+@@ -410,84 +540,254 @@ static ssize_t ath10k_read_fw_stats(stru
|
|
+
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+ "Mid PPDU route change",
|
|
+- fw_stats->mid_ppdu_route_change);
|
|
++ pdev->mid_ppdu_route_change);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Tot. number of statuses", fw_stats->status_rcvd);
|
|
++ "Tot. number of statuses", pdev->status_rcvd);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Extra frags on rings 0", fw_stats->r0_frags);
|
|
++ "Extra frags on rings 0", pdev->r0_frags);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Extra frags on rings 1", fw_stats->r1_frags);
|
|
++ "Extra frags on rings 1", pdev->r1_frags);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Extra frags on rings 2", fw_stats->r2_frags);
|
|
++ "Extra frags on rings 2", pdev->r2_frags);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Extra frags on rings 3", fw_stats->r3_frags);
|
|
++ "Extra frags on rings 3", pdev->r3_frags);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "MSDUs delivered to HTT", fw_stats->htt_msdus);
|
|
++ "MSDUs delivered to HTT", pdev->htt_msdus);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "MPDUs delivered to HTT", fw_stats->htt_mpdus);
|
|
++ "MPDUs delivered to HTT", pdev->htt_mpdus);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "MSDUs delivered to stack", fw_stats->loc_msdus);
|
|
++ "MSDUs delivered to stack", pdev->loc_msdus);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "MPDUs delivered to stack", fw_stats->loc_mpdus);
|
|
++ "MPDUs delivered to stack", pdev->loc_mpdus);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "Oversized AMSUs", fw_stats->oversize_amsdu);
|
|
++ "Oversized AMSUs", pdev->oversize_amsdu);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "PHY errors", fw_stats->phy_errs);
|
|
++ "PHY errors", pdev->phy_errs);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "PHY errors drops", fw_stats->phy_err_drop);
|
|
++ "PHY errors drops", pdev->phy_err_drop);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
|
|
+- "MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
|
|
++ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
|
|
++
|
|
++ len += scnprintf(buf + len, buf_len - len, "\n");
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
|
|
++ "ath10k VDEV stats", num_vdevs);
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
|
|
++ "=================");
|
|
++
|
|
++ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
++ "vdev id", vdev->vdev_id);
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
++ "beacon snr", vdev->beacon_snr);
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
++ "data snr", vdev->data_snr);
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
++ "num rx frames", vdev->num_rx_frames);
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
++ "num rts fail", vdev->num_rts_fail);
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
++ "num rts success", vdev->num_rts_success);
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
++ "num rx err", vdev->num_rx_err);
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
++ "num rx discard", vdev->num_rx_discard);
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
++ "num tx not acked", vdev->num_tx_not_acked);
|
|
++
|
|
++ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
|
|
++ len += scnprintf(buf + len, buf_len - len,
|
|
++ "%25s [%02d] %u\n",
|
|
++ "num tx frames", i,
|
|
++ vdev->num_tx_frames[i]);
|
|
++
|
|
++ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
|
|
++ len += scnprintf(buf + len, buf_len - len,
|
|
++ "%25s [%02d] %u\n",
|
|
++ "num tx frames retries", i,
|
|
++ vdev->num_tx_frames_retries[i]);
|
|
++
|
|
++ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
|
|
++ len += scnprintf(buf + len, buf_len - len,
|
|
++ "%25s [%02d] %u\n",
|
|
++ "num tx frames failures", i,
|
|
++ vdev->num_tx_frames_failures[i]);
|
|
++
|
|
++ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
|
|
++ len += scnprintf(buf + len, buf_len - len,
|
|
++ "%25s [%02d] 0x%08x\n",
|
|
++ "tx rate history", i,
|
|
++ vdev->tx_rate_history[i]);
|
|
++
|
|
++ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
|
|
++ len += scnprintf(buf + len, buf_len - len,
|
|
++ "%25s [%02d] %u\n",
|
|
++ "beacon rssi history", i,
|
|
++ vdev->beacon_rssi_history[i]);
|
|
++
|
|
++ len += scnprintf(buf + len, buf_len - len, "\n");
|
|
++ }
|
|
+
|
|
+ len += scnprintf(buf + len, buf_len - len, "\n");
|
|
+- len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
|
|
+- "ath10k PEER stats", fw_stats->peers);
|
|
++ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
|
|
++ "ath10k PEER stats", num_peers);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
|
|
+ "=================");
|
|
+
|
|
+- for (i = 0; i < fw_stats->peers; i++) {
|
|
++ list_for_each_entry(peer, &fw_stats->peers, list) {
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
|
|
+- "Peer MAC address",
|
|
+- fw_stats->peer_stat[i].peer_macaddr);
|
|
++ "Peer MAC address", peer->peer_macaddr);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
+- "Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
|
|
++ "Peer RSSI", peer->peer_rssi);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
+- "Peer TX rate",
|
|
+- fw_stats->peer_stat[i].peer_tx_rate);
|
|
++ "Peer TX rate", peer->peer_tx_rate);
|
|
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
|
|
+- "Peer RX rate",
|
|
+- fw_stats->peer_stat[i].peer_rx_rate);
|
|
++ "Peer RX rate", peer->peer_rx_rate);
|
|
+ len += scnprintf(buf + len, buf_len - len, "\n");
|
|
+ }
|
|
++
|
|
++unlock:
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+- if (len > buf_len)
|
|
+- len = buf_len;
|
|
++ if (len >= buf_len)
|
|
++ buf[len - 1] = 0;
|
|
++ else
|
|
++ buf[len] = 0;
|
|
++}
|
|
+
|
|
+- ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
|
|
++{
|
|
++ struct ath10k *ar = inode->i_private;
|
|
++ void *buf = NULL;
|
|
++ int ret;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_ON) {
|
|
++ ret = -ENETDOWN;
|
|
++ goto err_unlock;
|
|
++ }
|
|
++
|
|
++ buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE);
|
|
++ if (!buf) {
|
|
++ ret = -ENOMEM;
|
|
++ goto err_unlock;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_debug_fw_stats_request(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to request fw stats: %d\n", ret);
|
|
++ goto err_free;
|
|
++ }
|
|
++
|
|
++ ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
|
|
++ file->private_data = buf;
|
|
+
|
|
+-exit:
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+- kfree(buf);
|
|
+- return ret_cnt;
|
|
++ return 0;
|
|
++
|
|
++err_free:
|
|
++ vfree(buf);
|
|
++
|
|
++err_unlock:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static int ath10k_fw_stats_release(struct inode *inode, struct file *file)
|
|
++{
|
|
++ vfree(file->private_data);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ const char *buf = file->private_data;
|
|
++ unsigned int len = strlen(buf);
|
|
++
|
|
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
+ }
|
|
+
|
|
+ static const struct file_operations fops_fw_stats = {
|
|
+- .read = ath10k_read_fw_stats,
|
|
++ .open = ath10k_fw_stats_open,
|
|
++ .release = ath10k_fw_stats_release,
|
|
++ .read = ath10k_fw_stats_read,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ int ret, len, buf_len;
|
|
++ char *buf;
|
|
++
|
|
++ buf_len = 500;
|
|
++ buf = kmalloc(buf_len, GFP_KERNEL);
|
|
++ if (!buf)
|
|
++ return -ENOMEM;
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++
|
|
++ len = 0;
|
|
++ len += scnprintf(buf + len, buf_len - len,
|
|
++ "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
|
|
++ len += scnprintf(buf + len, buf_len - len,
|
|
++ "fw_warm_reset_counter\t\t%d\n",
|
|
++ ar->stats.fw_warm_reset_counter);
|
|
++ len += scnprintf(buf + len, buf_len - len,
|
|
++ "fw_cold_reset_counter\t\t%d\n",
|
|
++ ar->stats.fw_cold_reset_counter);
|
|
++
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++
|
|
++ kfree(buf);
|
|
++
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_fw_reset_stats = {
|
|
+ .open = simple_open,
|
|
++ .read = ath10k_debug_fw_reset_stats_read,
|
|
+ .owner = THIS_MODULE,
|
|
+ .llseek = default_llseek,
|
|
+ };
|
|
+
|
|
++/* This is a clean assert crash in firmware. */
|
|
++static int ath10k_debug_fw_assert(struct ath10k *ar)
|
|
++{
|
|
++ struct wmi_vdev_install_key_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16);
|
|
++ if (!skb)
|
|
++ return -ENOMEM;
|
|
++
|
|
++ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
|
|
++ memset(cmd, 0, sizeof(*cmd));
|
|
++
|
|
++ /* big enough number so that firmware asserts */
|
|
++ cmd->vdev_id = __cpu_to_le32(0x7ffe);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->vdev_install_key_cmdid);
|
|
++}
|
|
++
|
|
+ static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
|
|
+ char __user *user_buf,
|
|
+ size_t count, loff_t *ppos)
|
|
+ {
|
|
+- const char buf[] = "To simulate firmware crash write one of the"
|
|
+- " keywords to this file:\n `soft` - this will send"
|
|
+- " WMI_FORCE_FW_HANG_ASSERT to firmware if FW"
|
|
+- " supports that command.\n `hard` - this will send"
|
|
+- " to firmware command with illegal parameters"
|
|
+- " causing firmware crash.\n";
|
|
++ const char buf[] =
|
|
++ "To simulate firmware crash write one of the keywords to this file:\n"
|
|
++ "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n"
|
|
++ "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n"
|
|
++ "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"
|
|
++ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
|
|
+
|
|
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
|
|
+ }
|
|
+@@ -527,19 +827,30 @@ static ssize_t ath10k_write_simulate_fw_
|
|
+ }
|
|
+
|
|
+ if (!strcmp(buf, "soft")) {
|
|
+- ath10k_info("simulating soft firmware crash\n");
|
|
++ ath10k_info(ar, "simulating soft firmware crash\n");
|
|
+ ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
|
|
+ } else if (!strcmp(buf, "hard")) {
|
|
+- ath10k_info("simulating hard firmware crash\n");
|
|
+- ret = ath10k_wmi_vdev_set_param(ar, TARGET_NUM_VDEVS + 1,
|
|
+- ar->wmi.vdev_param->rts_threshold, 0);
|
|
++ ath10k_info(ar, "simulating hard firmware crash\n");
|
|
++ /* 0x7fff is vdev id, and it is always out of range for all
|
|
++ * firmware variants in order to force a firmware crash.
|
|
++ */
|
|
++ ret = ath10k_wmi_vdev_set_param(ar, 0x7fff,
|
|
++ ar->wmi.vdev_param->rts_threshold,
|
|
++ 0);
|
|
++ } else if (!strcmp(buf, "assert")) {
|
|
++ ath10k_info(ar, "simulating firmware assert crash\n");
|
|
++ ret = ath10k_debug_fw_assert(ar);
|
|
++ } else if (!strcmp(buf, "hw-restart")) {
|
|
++ ath10k_info(ar, "user requested hw restart\n");
|
|
++ queue_work(ar->workqueue, &ar->restart_work);
|
|
++ ret = 0;
|
|
+ } else {
|
|
+ ret = -EINVAL;
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to simulate firmware crash: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+@@ -565,13 +876,375 @@ static ssize_t ath10k_read_chip_id(struc
|
|
+ unsigned int len;
|
|
+ char buf[50];
|
|
+
|
|
+- len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
|
|
++ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
|
|
++
|
|
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_chip_id = {
|
|
++ .read = ath10k_read_chip_id,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++struct ath10k_fw_crash_data *
|
|
++ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
|
|
++{
|
|
++ struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
|
|
++
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
++
|
|
++ crash_data->crashed_since_read = true;
|
|
++ uuid_le_gen(&crash_data->uuid);
|
|
++ getnstimeofday(&crash_data->timestamp);
|
|
++
|
|
++ return crash_data;
|
|
++}
|
|
++EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
|
|
++
|
|
++static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
|
|
++{
|
|
++ struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
|
|
++ struct ath10k_dump_file_data *dump_data;
|
|
++ struct ath10k_tlv_dump_data *dump_tlv;
|
|
++ int hdr_len = sizeof(*dump_data);
|
|
++ unsigned int len, sofar = 0;
|
|
++ unsigned char *buf;
|
|
++
|
|
++ len = hdr_len;
|
|
++ len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
|
|
++
|
|
++ sofar += hdr_len;
|
|
++
|
|
++ /* This is going to get big when we start dumping FW RAM and such,
|
|
++ * so go ahead and use vmalloc.
|
|
++ */
|
|
++ buf = vzalloc(len);
|
|
++ if (!buf)
|
|
++ return NULL;
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++
|
|
++ if (!crash_data->crashed_since_read) {
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++ vfree(buf);
|
|
++ return NULL;
|
|
++ }
|
|
++
|
|
++ dump_data = (struct ath10k_dump_file_data *)(buf);
|
|
++ strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
|
|
++ sizeof(dump_data->df_magic));
|
|
++ dump_data->len = cpu_to_le32(len);
|
|
++
|
|
++ dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
|
|
++
|
|
++ memcpy(dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid));
|
|
++ dump_data->chip_id = cpu_to_le32(ar->chip_id);
|
|
++ dump_data->bus_type = cpu_to_le32(0);
|
|
++ dump_data->target_version = cpu_to_le32(ar->target_version);
|
|
++ dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
|
|
++ dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
|
|
++ dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
|
|
++ dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
|
|
++ dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
|
|
++ dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
|
|
++ dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
|
|
++ dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
|
|
++ dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
|
|
++ dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
|
|
++
|
|
++ strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
|
|
++ sizeof(dump_data->fw_ver));
|
|
++
|
|
++ dump_data->kernel_ver_code = 0;
|
|
++ strlcpy(dump_data->kernel_ver, init_utsname()->release,
|
|
++ sizeof(dump_data->kernel_ver));
|
|
++
|
|
++ dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
|
|
++ dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
|
|
++
|
|
++ /* Gather crash-dump */
|
|
++ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
|
|
++ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
|
|
++ dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
|
|
++ memcpy(dump_tlv->tlv_data, &crash_data->registers,
|
|
++ sizeof(crash_data->registers));
|
|
++ sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
|
|
++
|
|
++ ar->debug.fw_crash_data->crashed_since_read = false;
|
|
++
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ return dump_data;
|
|
++}
|
|
++
|
|
++static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
|
|
++{
|
|
++ struct ath10k *ar = inode->i_private;
|
|
++ struct ath10k_dump_file_data *dump;
|
|
++
|
|
++ dump = ath10k_build_dump_file(ar);
|
|
++ if (!dump)
|
|
++ return -ENODATA;
|
|
++
|
|
++ file->private_data = dump;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static ssize_t ath10k_fw_crash_dump_read(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k_dump_file_data *dump_file = file->private_data;
|
|
++
|
|
++ return simple_read_from_buffer(user_buf, count, ppos,
|
|
++ dump_file,
|
|
++ le32_to_cpu(dump_file->len));
|
|
++}
|
|
++
|
|
++static int ath10k_fw_crash_dump_release(struct inode *inode,
|
|
++ struct file *file)
|
|
++{
|
|
++ vfree(file->private_data);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_fw_crash_dump = {
|
|
++ .open = ath10k_fw_crash_dump_open,
|
|
++ .read = ath10k_fw_crash_dump_read,
|
|
++ .release = ath10k_fw_crash_dump_release,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static ssize_t ath10k_reg_addr_read(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ u8 buf[32];
|
|
++ unsigned int len = 0;
|
|
++ u32 reg_addr;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ reg_addr = ar->debug.reg_addr;
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr);
|
|
++
|
|
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++}
|
|
++
|
|
++static ssize_t ath10k_reg_addr_write(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ u32 reg_addr;
|
|
++ int ret;
|
|
++
|
|
++ ret = kstrtou32_from_user(user_buf, count, 0, ®_addr);
|
|
++ if (ret)
|
|
++ return ret;
|
|
++
|
|
++ if (!IS_ALIGNED(reg_addr, 4))
|
|
++ return -EFAULT;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ ar->debug.reg_addr = reg_addr;
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return count;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_reg_addr = {
|
|
++ .read = ath10k_reg_addr_read,
|
|
++ .write = ath10k_reg_addr_write,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static ssize_t ath10k_reg_value_read(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ u8 buf[48];
|
|
++ unsigned int len;
|
|
++ u32 reg_addr, reg_val;
|
|
++ int ret;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_ON &&
|
|
++ ar->state != ATH10K_STATE_UTF) {
|
|
++ ret = -ENETDOWN;
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ reg_addr = ar->debug.reg_addr;
|
|
++
|
|
++ reg_val = ath10k_hif_read32(ar, reg_addr);
|
|
++ len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val);
|
|
++
|
|
++ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++
|
|
++exit:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static ssize_t ath10k_reg_value_write(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ u32 reg_addr, reg_val;
|
|
++ int ret;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_ON &&
|
|
++ ar->state != ATH10K_STATE_UTF) {
|
|
++ ret = -ENETDOWN;
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ reg_addr = ar->debug.reg_addr;
|
|
++
|
|
++ ret = kstrtou32_from_user(user_buf, count, 0, ®_val);
|
|
++ if (ret)
|
|
++ goto exit;
|
|
++
|
|
++ ath10k_hif_write32(ar, reg_addr, reg_val);
|
|
++
|
|
++ ret = count;
|
|
++
|
|
++exit:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_reg_value = {
|
|
++ .read = ath10k_reg_value_read,
|
|
++ .write = ath10k_reg_value_write,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static ssize_t ath10k_mem_value_read(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ u8 *buf;
|
|
++ int ret;
|
|
++
|
|
++ if (*ppos < 0)
|
|
++ return -EINVAL;
|
|
++
|
|
++ if (!count)
|
|
++ return 0;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ buf = vmalloc(count);
|
|
++ if (!buf) {
|
|
++ ret = -ENOMEM;
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_ON &&
|
|
++ ar->state != ATH10K_STATE_UTF) {
|
|
++ ret = -ENETDOWN;
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n",
|
|
++ (u32)(*ppos), ret);
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ ret = copy_to_user(user_buf, buf, count);
|
|
++ if (ret) {
|
|
++ ret = -EFAULT;
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ count -= ret;
|
|
++ *ppos += count;
|
|
++ ret = count;
|
|
++
|
|
++exit:
|
|
++ vfree(buf);
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static ssize_t ath10k_mem_value_write(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ u8 *buf;
|
|
++ int ret;
|
|
++
|
|
++ if (*ppos < 0)
|
|
++ return -EINVAL;
|
|
++
|
|
++ if (!count)
|
|
++ return 0;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ buf = vmalloc(count);
|
|
++ if (!buf) {
|
|
++ ret = -ENOMEM;
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_ON &&
|
|
++ ar->state != ATH10K_STATE_UTF) {
|
|
++ ret = -ENETDOWN;
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ ret = copy_from_user(buf, user_buf, count);
|
|
++ if (ret) {
|
|
++ ret = -EFAULT;
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_hif_diag_write(ar, *ppos, buf, count);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
|
|
++ (u32)(*ppos), ret);
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ *ppos += count;
|
|
++ ret = count;
|
|
+
|
|
+- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++exit:
|
|
++ vfree(buf);
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+-static const struct file_operations fops_chip_id = {
|
|
+- .read = ath10k_read_chip_id,
|
|
++static const struct file_operations fops_mem_value = {
|
|
++ .read = ath10k_mem_value_read,
|
|
++ .write = ath10k_mem_value_write,
|
|
+ .open = simple_open,
|
|
+ .owner = THIS_MODULE,
|
|
+ .llseek = default_llseek,
|
|
+@@ -596,7 +1269,7 @@ static int ath10k_debug_htt_stats_req(st
|
|
+ ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
|
|
+ cookie);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to send htt stats request: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to send htt stats request: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+@@ -619,8 +1292,8 @@ static void ath10k_debug_htt_stats_dwork
|
|
+ }
|
|
+
|
|
+ static ssize_t ath10k_read_htt_stats_mask(struct file *file,
|
|
+- char __user *user_buf,
|
|
+- size_t count, loff_t *ppos)
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
+ {
|
|
+ struct ath10k *ar = file->private_data;
|
|
+ char buf[32];
|
|
+@@ -632,8 +1305,8 @@ static ssize_t ath10k_read_htt_stats_mas
|
|
+ }
|
|
+
|
|
+ static ssize_t ath10k_write_htt_stats_mask(struct file *file,
|
|
+- const char __user *user_buf,
|
|
+- size_t count, loff_t *ppos)
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
+ {
|
|
+ struct ath10k *ar = file->private_data;
|
|
+ unsigned long mask;
|
|
+@@ -671,16 +1344,82 @@ static const struct file_operations fops
|
|
+ .llseek = default_llseek,
|
|
+ };
|
|
+
|
|
++static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ char buf[64];
|
|
++ u8 amsdu = 3, ampdu = 64;
|
|
++ unsigned int len;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->debug.htt_max_amsdu)
|
|
++ amsdu = ar->debug.htt_max_amsdu;
|
|
++
|
|
++ if (ar->debug.htt_max_ampdu)
|
|
++ ampdu = ar->debug.htt_max_ampdu;
|
|
++
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
|
|
++
|
|
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++}
|
|
++
|
|
++static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ int res;
|
|
++ char buf[64];
|
|
++ unsigned int amsdu, ampdu;
|
|
++
|
|
++ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
|
++
|
|
++ /* make sure that buf is null terminated */
|
|
++ buf[sizeof(buf) - 1] = 0;
|
|
++
|
|
++ res = sscanf(buf, "%u %u", &amsdu, &du);
|
|
++
|
|
++ if (res != 2)
|
|
++ return -EINVAL;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu);
|
|
++ if (res)
|
|
++ goto out;
|
|
++
|
|
++ res = count;
|
|
++ ar->debug.htt_max_amsdu = amsdu;
|
|
++ ar->debug.htt_max_ampdu = ampdu;
|
|
++
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return res;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_htt_max_amsdu_ampdu = {
|
|
++ .read = ath10k_read_htt_max_amsdu_ampdu,
|
|
++ .write = ath10k_write_htt_max_amsdu_ampdu,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
+ static ssize_t ath10k_read_fw_dbglog(struct file *file,
|
|
+- char __user *user_buf,
|
|
+- size_t count, loff_t *ppos)
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
+ {
|
|
+ struct ath10k *ar = file->private_data;
|
|
+ unsigned int len;
|
|
+- char buf[32];
|
|
++ char buf[64];
|
|
+
|
|
+- len = scnprintf(buf, sizeof(buf), "0x%08x\n",
|
|
+- ar->debug.fw_dbglog_mask);
|
|
++ len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
|
|
++ ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
|
|
+
|
|
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
+ }
|
|
+@@ -690,21 +1429,34 @@ static ssize_t ath10k_write_fw_dbglog(st
|
|
+ size_t count, loff_t *ppos)
|
|
+ {
|
|
+ struct ath10k *ar = file->private_data;
|
|
+- unsigned long mask;
|
|
+ int ret;
|
|
++ char buf[64];
|
|
++ unsigned int log_level, mask;
|
|
+
|
|
+- ret = kstrtoul_from_user(user_buf, count, 0, &mask);
|
|
+- if (ret)
|
|
+- return ret;
|
|
++ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
|
++
|
|
++ /* make sure that buf is null terminated */
|
|
++ buf[sizeof(buf) - 1] = 0;
|
|
++
|
|
++ ret = sscanf(buf, "%x %u", &mask, &log_level);
|
|
++
|
|
++ if (!ret)
|
|
++ return -EINVAL;
|
|
++
|
|
++ if (ret == 1)
|
|
++ /* default if user did not specify */
|
|
++ log_level = ATH10K_DBGLOG_LEVEL_WARN;
|
|
+
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+ ar->debug.fw_dbglog_mask = mask;
|
|
++ ar->debug.fw_dbglog_level = log_level;
|
|
+
|
|
+ if (ar->state == ATH10K_STATE_ON) {
|
|
+- ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
|
|
++ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
|
|
++ ar->debug.fw_dbglog_level);
|
|
+ if (ret) {
|
|
+- ath10k_warn("dbglog cfg failed from debugfs: %d\n",
|
|
++ ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
|
|
+ ret);
|
|
+ goto exit;
|
|
+ }
|
|
+@@ -718,6 +1470,166 @@ exit:
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
++/* TODO: Would be nice to always support ethtool stats, would need to
|
|
++ * move the stats storage out of ath10k_debug, or always have ath10k_debug
|
|
++ * struct available..
|
|
++ */
|
|
++
|
|
++/* This generally cooresponds to the debugfs fw_stats file */
|
|
++static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
|
|
++ "tx_pkts_nic",
|
|
++ "tx_bytes_nic",
|
|
++ "rx_pkts_nic",
|
|
++ "rx_bytes_nic",
|
|
++ "d_noise_floor",
|
|
++ "d_cycle_count",
|
|
++ "d_phy_error",
|
|
++ "d_rts_bad",
|
|
++ "d_rts_good",
|
|
++ "d_tx_power", /* in .5 dbM I think */
|
|
++ "d_rx_crc_err", /* fcs_bad */
|
|
++ "d_no_beacon",
|
|
++ "d_tx_mpdus_queued",
|
|
++ "d_tx_msdu_queued",
|
|
++ "d_tx_msdu_dropped",
|
|
++ "d_local_enqued",
|
|
++ "d_local_freed",
|
|
++ "d_tx_ppdu_hw_queued",
|
|
++ "d_tx_ppdu_reaped",
|
|
++ "d_tx_fifo_underrun",
|
|
++ "d_tx_ppdu_abort",
|
|
++ "d_tx_mpdu_requed",
|
|
++ "d_tx_excessive_retries",
|
|
++ "d_tx_hw_rate",
|
|
++ "d_tx_dropped_sw_retries",
|
|
++ "d_tx_illegal_rate",
|
|
++ "d_tx_continuous_xretries",
|
|
++ "d_tx_timeout",
|
|
++ "d_tx_mpdu_txop_limit",
|
|
++ "d_pdev_resets",
|
|
++ "d_rx_mid_ppdu_route_change",
|
|
++ "d_rx_status",
|
|
++ "d_rx_extra_frags_ring0",
|
|
++ "d_rx_extra_frags_ring1",
|
|
++ "d_rx_extra_frags_ring2",
|
|
++ "d_rx_extra_frags_ring3",
|
|
++ "d_rx_msdu_htt",
|
|
++ "d_rx_mpdu_htt",
|
|
++ "d_rx_msdu_stack",
|
|
++ "d_rx_mpdu_stack",
|
|
++ "d_rx_phy_err",
|
|
++ "d_rx_phy_err_drops",
|
|
++ "d_rx_mpdu_errors", /* FCS, MIC, ENC */
|
|
++ "d_fw_crash_count",
|
|
++ "d_fw_warm_reset_count",
|
|
++ "d_fw_cold_reset_count",
|
|
++};
|
|
++
|
|
++#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats)
|
|
++
|
|
++void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
|
|
++ struct ieee80211_vif *vif,
|
|
++ u32 sset, u8 *data)
|
|
++{
|
|
++ if (sset == ETH_SS_STATS)
|
|
++ memcpy(data, *ath10k_gstrings_stats,
|
|
++ sizeof(ath10k_gstrings_stats));
|
|
++}
|
|
++
|
|
++int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
|
|
++ struct ieee80211_vif *vif, int sset)
|
|
++{
|
|
++ if (sset == ETH_SS_STATS)
|
|
++ return ATH10K_SSTATS_LEN;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
|
|
++ struct ieee80211_vif *vif,
|
|
++ struct ethtool_stats *stats, u64 *data)
|
|
++{
|
|
++ struct ath10k *ar = hw->priv;
|
|
++ static const struct ath10k_fw_stats_pdev zero_stats = {};
|
|
++ const struct ath10k_fw_stats_pdev *pdev_stats;
|
|
++ int i = 0, ret;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->state == ATH10K_STATE_ON) {
|
|
++ ret = ath10k_debug_fw_stats_request(ar);
|
|
++ if (ret) {
|
|
++ /* just print a warning and try to use older results */
|
|
++ ath10k_warn(ar,
|
|
++ "failed to get fw stats for ethtool: %d\n",
|
|
++ ret);
|
|
++ }
|
|
++ }
|
|
++
|
|
++ pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs,
|
|
++ struct ath10k_fw_stats_pdev,
|
|
++ list);
|
|
++ if (!pdev_stats) {
|
|
++ /* no results available so just return zeroes */
|
|
++ pdev_stats = &zero_stats;
|
|
++ }
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++
|
|
++ data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */
|
|
++ data[i++] = 0; /* tx bytes */
|
|
++ data[i++] = pdev_stats->htt_mpdus;
|
|
++ data[i++] = 0; /* rx bytes */
|
|
++ data[i++] = pdev_stats->ch_noise_floor;
|
|
++ data[i++] = pdev_stats->cycle_count;
|
|
++ data[i++] = pdev_stats->phy_err_count;
|
|
++ data[i++] = pdev_stats->rts_bad;
|
|
++ data[i++] = pdev_stats->rts_good;
|
|
++ data[i++] = pdev_stats->chan_tx_power;
|
|
++ data[i++] = pdev_stats->fcs_bad;
|
|
++ data[i++] = pdev_stats->no_beacons;
|
|
++ data[i++] = pdev_stats->mpdu_enqued;
|
|
++ data[i++] = pdev_stats->msdu_enqued;
|
|
++ data[i++] = pdev_stats->wmm_drop;
|
|
++ data[i++] = pdev_stats->local_enqued;
|
|
++ data[i++] = pdev_stats->local_freed;
|
|
++ data[i++] = pdev_stats->hw_queued;
|
|
++ data[i++] = pdev_stats->hw_reaped;
|
|
++ data[i++] = pdev_stats->underrun;
|
|
++ data[i++] = pdev_stats->tx_abort;
|
|
++ data[i++] = pdev_stats->mpdus_requed;
|
|
++ data[i++] = pdev_stats->tx_ko;
|
|
++ data[i++] = pdev_stats->data_rc;
|
|
++ data[i++] = pdev_stats->sw_retry_failure;
|
|
++ data[i++] = pdev_stats->illgl_rate_phy_err;
|
|
++ data[i++] = pdev_stats->pdev_cont_xretry;
|
|
++ data[i++] = pdev_stats->pdev_tx_timeout;
|
|
++ data[i++] = pdev_stats->txop_ovf;
|
|
++ data[i++] = pdev_stats->pdev_resets;
|
|
++ data[i++] = pdev_stats->mid_ppdu_route_change;
|
|
++ data[i++] = pdev_stats->status_rcvd;
|
|
++ data[i++] = pdev_stats->r0_frags;
|
|
++ data[i++] = pdev_stats->r1_frags;
|
|
++ data[i++] = pdev_stats->r2_frags;
|
|
++ data[i++] = pdev_stats->r3_frags;
|
|
++ data[i++] = pdev_stats->htt_msdus;
|
|
++ data[i++] = pdev_stats->htt_mpdus;
|
|
++ data[i++] = pdev_stats->loc_msdus;
|
|
++ data[i++] = pdev_stats->loc_mpdus;
|
|
++ data[i++] = pdev_stats->phy_errs;
|
|
++ data[i++] = pdev_stats->phy_err_drop;
|
|
++ data[i++] = pdev_stats->mpdu_errs;
|
|
++ data[i++] = ar->stats.fw_crash_counter;
|
|
++ data[i++] = ar->stats.fw_warm_reset_counter;
|
|
++ data[i++] = ar->stats.fw_cold_reset_counter;
|
|
++
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ WARN_ON(i != ATH10K_SSTATS_LEN);
|
|
++}
|
|
++
|
|
+ static const struct file_operations fops_fw_dbglog = {
|
|
+ .read = ath10k_read_fw_dbglog,
|
|
+ .write = ath10k_write_fw_dbglog,
|
|
+@@ -726,6 +1638,151 @@ static const struct file_operations fops
|
|
+ .llseek = default_llseek,
|
|
+ };
|
|
+
|
|
++static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
|
|
++{
|
|
++ struct ath10k *ar = inode->i_private;
|
|
++ void *buf;
|
|
++ u32 hi_addr;
|
|
++ __le32 addr;
|
|
++ int ret;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_ON &&
|
|
++ ar->state != ATH10K_STATE_UTF) {
|
|
++ ret = -ENETDOWN;
|
|
++ goto err;
|
|
++ }
|
|
++
|
|
++ buf = vmalloc(QCA988X_CAL_DATA_LEN);
|
|
++ if (!buf) {
|
|
++ ret = -ENOMEM;
|
|
++ goto err;
|
|
++ }
|
|
++
|
|
++ hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
|
|
++
|
|
++ ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
|
|
++ goto err_vfree;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
|
|
++ QCA988X_CAL_DATA_LEN);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
|
|
++ goto err_vfree;
|
|
++ }
|
|
++
|
|
++ file->private_data = buf;
|
|
++
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return 0;
|
|
++
|
|
++err_vfree:
|
|
++ vfree(buf);
|
|
++
|
|
++err:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static ssize_t ath10k_debug_cal_data_read(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ void *buf = file->private_data;
|
|
++
|
|
++ return simple_read_from_buffer(user_buf, count, ppos,
|
|
++ buf, QCA988X_CAL_DATA_LEN);
|
|
++}
|
|
++
|
|
++static int ath10k_debug_cal_data_release(struct inode *inode,
|
|
++ struct file *file)
|
|
++{
|
|
++ vfree(file->private_data);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_cal_data = {
|
|
++ .open = ath10k_debug_cal_data_open,
|
|
++ .read = ath10k_debug_cal_data_read,
|
|
++ .release = ath10k_debug_cal_data_release,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static ssize_t ath10k_read_nf_cal_period(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ unsigned int len;
|
|
++ char buf[32];
|
|
++
|
|
++ len = scnprintf(buf, sizeof(buf), "%d\n",
|
|
++ ar->debug.nf_cal_period);
|
|
++
|
|
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++}
|
|
++
|
|
++static ssize_t ath10k_write_nf_cal_period(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ unsigned long period;
|
|
++ int ret;
|
|
++
|
|
++ ret = kstrtoul_from_user(user_buf, count, 0, &period);
|
|
++ if (ret)
|
|
++ return ret;
|
|
++
|
|
++ if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX)
|
|
++ return -EINVAL;
|
|
++
|
|
++ /* there's no way to switch back to the firmware default */
|
|
++ if (period == 0)
|
|
++ return -EINVAL;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ ar->debug.nf_cal_period = period;
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_ON) {
|
|
++ /* firmware is not running, nothing else to do */
|
|
++ ret = count;
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period,
|
|
++ ar->debug.nf_cal_period);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n",
|
|
++ ret);
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ ret = count;
|
|
++
|
|
++exit:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_nf_cal_period = {
|
|
++ .read = ath10k_read_nf_cal_period,
|
|
++ .write = ath10k_write_nf_cal_period,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
+ int ath10k_debug_start(struct ath10k *ar)
|
|
+ {
|
|
+ int ret;
|
|
+@@ -735,17 +1792,44 @@ int ath10k_debug_start(struct ath10k *ar
|
|
+ ret = ath10k_debug_htt_stats_req(ar);
|
|
+ if (ret)
|
|
+ /* continue normally anyway, this isn't serious */
|
|
+- ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to start htt stats workqueue: %d\n",
|
|
++ ret);
|
|
+
|
|
+ if (ar->debug.fw_dbglog_mask) {
|
|
+- ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
|
|
++ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
|
|
++ ATH10K_DBGLOG_LEVEL_WARN);
|
|
+ if (ret)
|
|
+ /* not serious */
|
|
+- ath10k_warn("failed to enable dbglog during start: %d",
|
|
++ ath10k_warn(ar, "failed to enable dbglog during start: %d",
|
|
+ ret);
|
|
+ }
|
|
+
|
|
+- return 0;
|
|
++ if (ar->debug.pktlog_filter) {
|
|
++ ret = ath10k_wmi_pdev_pktlog_enable(ar,
|
|
++ ar->debug.pktlog_filter);
|
|
++ if (ret)
|
|
++ /* not serious */
|
|
++ ath10k_warn(ar,
|
|
++ "failed to enable pktlog filter %x: %d\n",
|
|
++ ar->debug.pktlog_filter, ret);
|
|
++ } else {
|
|
++ ret = ath10k_wmi_pdev_pktlog_disable(ar);
|
|
++ if (ret)
|
|
++ /* not serious */
|
|
++ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
|
|
++ }
|
|
++
|
|
++ if (ar->debug.nf_cal_period) {
|
|
++ ret = ath10k_wmi_pdev_set_param(ar,
|
|
++ ar->wmi.pdev_param->cal_period,
|
|
++ ar->debug.nf_cal_period);
|
|
++ if (ret)
|
|
++ /* not serious */
|
|
++ ath10k_warn(ar, "cal period cfg failed from debug start: %d\n",
|
|
++ ret);
|
|
++ }
|
|
++
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+ void ath10k_debug_stop(struct ath10k *ar)
|
|
+@@ -757,6 +1841,11 @@ void ath10k_debug_stop(struct ath10k *ar
|
|
+ * warning from del_timer(). */
|
|
+ if (ar->debug.htt_stats_mask != 0)
|
|
+ cancel_delayed_work(&ar->debug.htt_stats_dwork);
|
|
++
|
|
++ ar->debug.htt_max_amsdu = 0;
|
|
++ ar->debug.htt_max_ampdu = 0;
|
|
++
|
|
++ ath10k_wmi_pdev_pktlog_disable(ar);
|
|
+ }
|
|
+
|
|
+ static ssize_t ath10k_write_simulate_radar(struct file *file,
|
|
+@@ -839,37 +1928,149 @@ static const struct file_operations fops
|
|
+ .llseek = default_llseek,
|
|
+ };
|
|
+
|
|
++static ssize_t ath10k_write_pktlog_filter(struct file *file,
|
|
++ const char __user *ubuf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ u32 filter;
|
|
++ int ret;
|
|
++
|
|
++ if (kstrtouint_from_user(ubuf, count, 0, &filter))
|
|
++ return -EINVAL;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_ON) {
|
|
++ ar->debug.pktlog_filter = filter;
|
|
++ ret = count;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ if (filter && (filter != ar->debug.pktlog_filter)) {
|
|
++ ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
|
|
++ ar->debug.pktlog_filter, ret);
|
|
++ goto out;
|
|
++ }
|
|
++ } else {
|
|
++ ret = ath10k_wmi_pdev_pktlog_disable(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
|
|
++ goto out;
|
|
++ }
|
|
++ }
|
|
++
|
|
++ ar->debug.pktlog_filter = filter;
|
|
++ ret = count;
|
|
++
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ char buf[32];
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ int len = 0;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
|
|
++ ar->debug.pktlog_filter);
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_pktlog_filter = {
|
|
++ .read = ath10k_read_pktlog_filter,
|
|
++ .write = ath10k_write_pktlog_filter,
|
|
++ .open = simple_open
|
|
++};
|
|
++
|
|
+ int ath10k_debug_create(struct ath10k *ar)
|
|
+ {
|
|
++ ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
|
|
++ if (!ar->debug.fw_crash_data)
|
|
++ return -ENOMEM;
|
|
++
|
|
++ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
|
|
++ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
|
|
++ INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++void ath10k_debug_destroy(struct ath10k *ar)
|
|
++{
|
|
++ vfree(ar->debug.fw_crash_data);
|
|
++ ar->debug.fw_crash_data = NULL;
|
|
++
|
|
++ ath10k_debug_fw_stats_reset(ar);
|
|
++}
|
|
++
|
|
++int ath10k_debug_register(struct ath10k *ar)
|
|
++{
|
|
+ ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
|
|
+ ar->hw->wiphy->debugfsdir);
|
|
++ if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) {
|
|
++ if (IS_ERR(ar->debug.debugfs_phy))
|
|
++ return PTR_ERR(ar->debug.debugfs_phy);
|
|
+
|
|
+- if (!ar->debug.debugfs_phy)
|
|
+ return -ENOMEM;
|
|
++ }
|
|
+
|
|
+ INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
|
|
+ ath10k_debug_htt_stats_dwork);
|
|
+
|
|
+- init_completion(&ar->debug.event_stats_compl);
|
|
++ init_completion(&ar->debug.fw_stats_complete);
|
|
+
|
|
+ debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
|
|
+ &fops_fw_stats);
|
|
+
|
|
++ debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy,
|
|
++ ar, &fops_fw_reset_stats);
|
|
++
|
|
+ debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
|
|
+ &fops_wmi_services);
|
|
+
|
|
+ debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
|
|
+ ar, &fops_simulate_fw_crash);
|
|
+
|
|
++ debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
|
|
++ ar, &fops_fw_crash_dump);
|
|
++
|
|
++ debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR,
|
|
++ ar->debug.debugfs_phy, ar, &fops_reg_addr);
|
|
++
|
|
++ debugfs_create_file("reg_value", S_IRUSR | S_IWUSR,
|
|
++ ar->debug.debugfs_phy, ar, &fops_reg_value);
|
|
++
|
|
++ debugfs_create_file("mem_value", S_IRUSR | S_IWUSR,
|
|
++ ar->debug.debugfs_phy, ar, &fops_mem_value);
|
|
++
|
|
+ debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
|
|
+ ar, &fops_chip_id);
|
|
+
|
|
+ debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
|
|
+ ar, &fops_htt_stats_mask);
|
|
+
|
|
++ debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR,
|
|
++ ar->debug.debugfs_phy, ar,
|
|
++ &fops_htt_max_amsdu_ampdu);
|
|
++
|
|
+ debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
|
|
+ ar, &fops_fw_dbglog);
|
|
+
|
|
++ debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
|
|
++ ar, &fops_cal_data);
|
|
++
|
|
++ debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
|
|
++ ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
|
|
++
|
|
+ if (config_enabled(CPTCFG_ATH10K_DFS_CERTIFIED)) {
|
|
+ debugfs_create_file("dfs_simulate_radar", S_IWUSR,
|
|
+ ar->debug.debugfs_phy, ar,
|
|
+@@ -884,10 +2085,13 @@ int ath10k_debug_create(struct ath10k *a
|
|
+ &fops_dfs_stats);
|
|
+ }
|
|
+
|
|
++ debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
|
|
++ ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
|
|
++
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-void ath10k_debug_destroy(struct ath10k *ar)
|
|
++void ath10k_debug_unregister(struct ath10k *ar)
|
|
+ {
|
|
+ cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
|
|
+ }
|
|
+@@ -895,7 +2099,8 @@ void ath10k_debug_destroy(struct ath10k
|
|
+ #endif /* CPTCFG_ATH10K_DEBUGFS */
|
|
+
|
|
+ #ifdef CPTCFG_ATH10K_DEBUG
|
|
+-void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
|
|
++void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
|
|
++ const char *fmt, ...)
|
|
+ {
|
|
+ struct va_format vaf;
|
|
+ va_list args;
|
|
+@@ -906,27 +2111,43 @@ void ath10k_dbg(enum ath10k_debug_mask m
|
|
+ vaf.va = &args;
|
|
+
|
|
+ if (ath10k_debug_mask & mask)
|
|
+- ath10k_printk(KERN_DEBUG, "%pV", &vaf);
|
|
++ dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf);
|
|
+
|
|
+- trace_ath10k_log_dbg(mask, &vaf);
|
|
++ trace_ath10k_log_dbg(ar, mask, &vaf);
|
|
+
|
|
+ va_end(args);
|
|
+ }
|
|
+ EXPORT_SYMBOL(ath10k_dbg);
|
|
+
|
|
+-void ath10k_dbg_dump(enum ath10k_debug_mask mask,
|
|
++void ath10k_dbg_dump(struct ath10k *ar,
|
|
++ enum ath10k_debug_mask mask,
|
|
+ const char *msg, const char *prefix,
|
|
+ const void *buf, size_t len)
|
|
+ {
|
|
++ char linebuf[256];
|
|
++ unsigned int linebuflen;
|
|
++ const void *ptr;
|
|
++
|
|
+ if (ath10k_debug_mask & mask) {
|
|
+ if (msg)
|
|
+- ath10k_dbg(mask, "%s\n", msg);
|
|
++ ath10k_dbg(ar, mask, "%s\n", msg);
|
|
+
|
|
+- print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
|
|
++ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
|
|
++ linebuflen = 0;
|
|
++ linebuflen += scnprintf(linebuf + linebuflen,
|
|
++ sizeof(linebuf) - linebuflen,
|
|
++ "%s%08x: ",
|
|
++ (prefix ? prefix : ""),
|
|
++ (unsigned int)(ptr - buf));
|
|
++ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
|
|
++ linebuf + linebuflen,
|
|
++ sizeof(linebuf) - linebuflen, true);
|
|
++ dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf);
|
|
++ }
|
|
+ }
|
|
+
|
|
+ /* tracing code doesn't like null strings :/ */
|
|
+- trace_ath10k_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
|
|
++ trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "",
|
|
+ buf, len);
|
|
+ }
|
|
+ EXPORT_SYMBOL(ath10k_dbg_dump);
|
|
+--- a/drivers/net/wireless/ath/ath10k/debug.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/debug.h
|
|
+@@ -34,28 +34,55 @@ enum ath10k_debug_mask {
|
|
+ ATH10K_DBG_DATA = 0x00000200,
|
|
+ ATH10K_DBG_BMI = 0x00000400,
|
|
+ ATH10K_DBG_REGULATORY = 0x00000800,
|
|
++ ATH10K_DBG_TESTMODE = 0x00001000,
|
|
++ ATH10K_DBG_WMI_PRINT = 0x00002000,
|
|
+ ATH10K_DBG_ANY = 0xffffffff,
|
|
+ };
|
|
+
|
|
++enum ath10k_pktlog_filter {
|
|
++ ATH10K_PKTLOG_RX = 0x000000001,
|
|
++ ATH10K_PKTLOG_TX = 0x000000002,
|
|
++ ATH10K_PKTLOG_RCFIND = 0x000000004,
|
|
++ ATH10K_PKTLOG_RCUPDATE = 0x000000008,
|
|
++ ATH10K_PKTLOG_DBG_PRINT = 0x000000010,
|
|
++ ATH10K_PKTLOG_ANY = 0x00000001f,
|
|
++};
|
|
++
|
|
++enum ath10k_dbg_aggr_mode {
|
|
++ ATH10K_DBG_AGGR_MODE_AUTO,
|
|
++ ATH10K_DBG_AGGR_MODE_MANUAL,
|
|
++ ATH10K_DBG_AGGR_MODE_MAX,
|
|
++};
|
|
++
|
|
+ extern unsigned int ath10k_debug_mask;
|
|
+
|
|
+-__printf(1, 2) int ath10k_info(const char *fmt, ...);
|
|
+-__printf(1, 2) int ath10k_err(const char *fmt, ...);
|
|
+-__printf(1, 2) int ath10k_warn(const char *fmt, ...);
|
|
++__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
|
|
++__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
|
|
++__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
|
|
++void ath10k_print_driver_info(struct ath10k *ar);
|
|
+
|
|
+ #ifdef CPTCFG_ATH10K_DEBUGFS
|
|
+ int ath10k_debug_start(struct ath10k *ar);
|
|
+ void ath10k_debug_stop(struct ath10k *ar);
|
|
+ int ath10k_debug_create(struct ath10k *ar);
|
|
+ void ath10k_debug_destroy(struct ath10k *ar);
|
|
+-void ath10k_debug_read_service_map(struct ath10k *ar,
|
|
+- void *service_map,
|
|
+- size_t map_size);
|
|
+-void ath10k_debug_read_target_stats(struct ath10k *ar,
|
|
+- struct wmi_stats_event *ev);
|
|
++int ath10k_debug_register(struct ath10k *ar);
|
|
++void ath10k_debug_unregister(struct ath10k *ar);
|
|
++void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
|
|
++struct ath10k_fw_crash_data *
|
|
++ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
|
|
+
|
|
++void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
|
|
+ #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
|
|
+
|
|
++void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
|
|
++ struct ieee80211_vif *vif,
|
|
++ u32 sset, u8 *data);
|
|
++int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
|
|
++ struct ieee80211_vif *vif, int sset);
|
|
++void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
|
|
++ struct ieee80211_vif *vif,
|
|
++ struct ethtool_stats *stats, u64 *data);
|
|
+ #else
|
|
+ static inline int ath10k_debug_start(struct ath10k *ar)
|
|
+ {
|
|
+@@ -75,36 +102,62 @@ static inline void ath10k_debug_destroy(
|
|
+ {
|
|
+ }
|
|
+
|
|
+-static inline void ath10k_debug_read_service_map(struct ath10k *ar,
|
|
+- void *service_map,
|
|
+- size_t map_size)
|
|
++static inline int ath10k_debug_register(struct ath10k *ar)
|
|
++{
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static inline void ath10k_debug_unregister(struct ath10k *ar)
|
|
+ {
|
|
+ }
|
|
+
|
|
+-static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
|
|
+- struct wmi_stats_event *ev)
|
|
++static inline void ath10k_debug_fw_stats_process(struct ath10k *ar,
|
|
++ struct sk_buff *skb)
|
|
+ {
|
|
+ }
|
|
+
|
|
++static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
|
|
++ int len)
|
|
++{
|
|
++}
|
|
++
|
|
++static inline struct ath10k_fw_crash_data *
|
|
++ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
|
|
++{
|
|
++ return NULL;
|
|
++}
|
|
++
|
|
+ #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
|
|
+
|
|
++#define ath10k_debug_get_et_strings NULL
|
|
++#define ath10k_debug_get_et_sset_count NULL
|
|
++#define ath10k_debug_get_et_stats NULL
|
|
++
|
|
+ #endif /* CPTCFG_ATH10K_DEBUGFS */
|
|
++#ifdef CPTCFG_MAC80211_DEBUGFS
|
|
++void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|
++ struct ieee80211_sta *sta, struct dentry *dir);
|
|
++#endif /* CPTCFG_MAC80211_DEBUGFS */
|
|
+
|
|
+ #ifdef CPTCFG_ATH10K_DEBUG
|
|
+-__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
|
|
++__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
|
|
++ enum ath10k_debug_mask mask,
|
|
+ const char *fmt, ...);
|
|
+-void ath10k_dbg_dump(enum ath10k_debug_mask mask,
|
|
++void ath10k_dbg_dump(struct ath10k *ar,
|
|
++ enum ath10k_debug_mask mask,
|
|
+ const char *msg, const char *prefix,
|
|
+ const void *buf, size_t len);
|
|
+ #else /* CPTCFG_ATH10K_DEBUG */
|
|
+
|
|
+-static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask,
|
|
++static inline int ath10k_dbg(struct ath10k *ar,
|
|
++ enum ath10k_debug_mask dbg_mask,
|
|
+ const char *fmt, ...)
|
|
+ {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask,
|
|
++static inline void ath10k_dbg_dump(struct ath10k *ar,
|
|
++ enum ath10k_debug_mask mask,
|
|
+ const char *msg, const char *prefix,
|
|
+ const void *buf, size_t len)
|
|
+ {
|
|
+--- a/drivers/net/wireless/ath/ath10k/hif.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/hif.h
|
|
+@@ -20,6 +20,7 @@
|
|
+
|
|
+ #include <linux/kernel.h>
|
|
+ #include "core.h"
|
|
++#include "debug.h"
|
|
+
|
|
+ struct ath10k_hif_sg_item {
|
|
+ u16 transfer_id;
|
|
+@@ -31,11 +32,9 @@ struct ath10k_hif_sg_item {
|
|
+
|
|
+ struct ath10k_hif_cb {
|
|
+ int (*tx_completion)(struct ath10k *ar,
|
|
+- struct sk_buff *wbuf,
|
|
+- unsigned transfer_id);
|
|
++ struct sk_buff *wbuf);
|
|
+ int (*rx_completion)(struct ath10k *ar,
|
|
+- struct sk_buff *wbuf,
|
|
+- u8 pipe_id);
|
|
++ struct sk_buff *wbuf);
|
|
+ };
|
|
+
|
|
+ struct ath10k_hif_ops {
|
|
+@@ -43,6 +42,12 @@ struct ath10k_hif_ops {
|
|
+ int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
|
|
+ struct ath10k_hif_sg_item *items, int n_items);
|
|
+
|
|
++ /* read firmware memory through the diagnose interface */
|
|
++ int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
|
|
++ size_t buf_len);
|
|
++
|
|
++ int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
|
|
++ int nbytes);
|
|
+ /*
|
|
+ * API to handle HIF-specific BMI message exchanges, this API is
|
|
+ * synchronous and only allowed to be called from a context that
|
|
+@@ -80,6 +85,10 @@ struct ath10k_hif_ops {
|
|
+
|
|
+ u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
|
|
+
|
|
++ u32 (*read32)(struct ath10k *ar, u32 address);
|
|
++
|
|
++ void (*write32)(struct ath10k *ar, u32 address, u32 value);
|
|
++
|
|
+ /* Power up the device and enter BMI transfer mode for FW download */
|
|
+ int (*power_up)(struct ath10k *ar);
|
|
+
|
|
+@@ -91,7 +100,6 @@ struct ath10k_hif_ops {
|
|
+ int (*resume)(struct ath10k *ar);
|
|
+ };
|
|
+
|
|
+-
|
|
+ static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
|
|
+ struct ath10k_hif_sg_item *items,
|
|
+ int n_items)
|
|
+@@ -99,6 +107,21 @@ static inline int ath10k_hif_tx_sg(struc
|
|
+ return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
|
|
+ }
|
|
+
|
|
++static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
|
|
++ size_t buf_len)
|
|
++{
|
|
++ return ar->hif.ops->diag_read(ar, address, buf, buf_len);
|
|
++}
|
|
++
|
|
++static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address,
|
|
++ const void *data, int nbytes)
|
|
++{
|
|
++ if (!ar->hif.ops->diag_write)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->hif.ops->diag_write(ar, address, data, nbytes);
|
|
++}
|
|
++
|
|
+ static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
|
|
+ void *request, u32 request_len,
|
|
+ void *response, u32 *response_len)
|
|
+@@ -178,4 +201,25 @@ static inline int ath10k_hif_resume(stru
|
|
+ return ar->hif.ops->resume(ar);
|
|
+ }
|
|
+
|
|
++static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address)
|
|
++{
|
|
++ if (!ar->hif.ops->read32) {
|
|
++ ath10k_warn(ar, "hif read32 not supported\n");
|
|
++ return 0xdeaddead;
|
|
++ }
|
|
++
|
|
++ return ar->hif.ops->read32(ar, address);
|
|
++}
|
|
++
|
|
++static inline void ath10k_hif_write32(struct ath10k *ar,
|
|
++ u32 address, u32 data)
|
|
++{
|
|
++ if (!ar->hif.ops->write32) {
|
|
++ ath10k_warn(ar, "hif write32 not supported\n");
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ ar->hif.ops->write32(ar, address, data);
|
|
++}
|
|
++
|
|
+ #endif /* _HIF_H_ */
|
|
+--- a/drivers/net/wireless/ath/ath10k/htc.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/htc.c
|
|
+@@ -45,10 +45,8 @@ static struct sk_buff *ath10k_htc_build_
|
|
+ struct ath10k_skb_cb *skb_cb;
|
|
+
|
|
+ skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
|
|
+- if (!skb) {
|
|
+- ath10k_warn("Unable to allocate ctrl skb\n");
|
|
++ if (!skb)
|
|
+ return NULL;
|
|
+- }
|
|
+
|
|
+ skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
|
|
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
|
|
+@@ -56,7 +54,7 @@ static struct sk_buff *ath10k_htc_build_
|
|
+ skb_cb = ATH10K_SKB_CB(skb);
|
|
+ memset(skb_cb, 0, sizeof(*skb_cb));
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
|
|
+ return skb;
|
|
+ }
|
|
+
|
|
+@@ -72,13 +70,15 @@ static inline void ath10k_htc_restore_tx
|
|
+ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
|
|
+ struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
|
|
++ struct ath10k *ar = ep->htc->ar;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
|
|
+ ep->eid, skb);
|
|
+
|
|
+ ath10k_htc_restore_tx_skb(ep->htc, skb);
|
|
+
|
|
+ if (!ep->ep_ops.ep_tx_complete) {
|
|
+- ath10k_warn("no tx handler for eid %d\n", ep->eid);
|
|
++ ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
|
|
+ dev_kfree_skb_any(skb);
|
|
+ return;
|
|
+ }
|
|
+@@ -89,12 +89,14 @@ static void ath10k_htc_notify_tx_complet
|
|
+ /* assumes tx_lock is held */
|
|
+ static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
|
|
+ {
|
|
++ struct ath10k *ar = ep->htc->ar;
|
|
++
|
|
+ if (!ep->tx_credit_flow_enabled)
|
|
+ return false;
|
|
+ if (ep->tx_credits >= ep->tx_credits_per_max_message)
|
|
+ return false;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
|
|
+ ep->eid);
|
|
+ return true;
|
|
+ }
|
|
+@@ -123,6 +125,7 @@ int ath10k_htc_send(struct ath10k_htc *h
|
|
+ enum ath10k_htc_ep_id eid,
|
|
+ struct sk_buff *skb)
|
|
+ {
|
|
++ struct ath10k *ar = htc->ar;
|
|
+ struct ath10k_htc_ep *ep = &htc->endpoint[eid];
|
|
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
|
|
+ struct ath10k_hif_sg_item sg_item;
|
|
+@@ -134,18 +137,10 @@ int ath10k_htc_send(struct ath10k_htc *h
|
|
+ return -ECOMM;
|
|
+
|
|
+ if (eid >= ATH10K_HTC_EP_COUNT) {
|
|
+- ath10k_warn("Invalid endpoint id: %d\n", eid);
|
|
++ ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
|
|
+ return -ENOENT;
|
|
+ }
|
|
+
|
|
+- /* FIXME: This looks ugly, can we fix it? */
|
|
+- spin_lock_bh(&htc->tx_lock);
|
|
+- if (htc->stopped) {
|
|
+- spin_unlock_bh(&htc->tx_lock);
|
|
+- return -ESHUTDOWN;
|
|
+- }
|
|
+- spin_unlock_bh(&htc->tx_lock);
|
|
+-
|
|
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
|
|
+
|
|
+ if (ep->tx_credit_flow_enabled) {
|
|
+@@ -157,7 +152,7 @@ int ath10k_htc_send(struct ath10k_htc *h
|
|
+ goto err_pull;
|
|
+ }
|
|
+ ep->tx_credits -= credits;
|
|
+- ath10k_dbg(ATH10K_DBG_HTC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC,
|
|
+ "htc ep %d consumed %d credits (total %d)\n",
|
|
+ eid, credits, ep->tx_credits);
|
|
+ spin_unlock_bh(&htc->tx_lock);
|
|
+@@ -165,6 +160,7 @@ int ath10k_htc_send(struct ath10k_htc *h
|
|
+
|
|
+ ath10k_htc_prepare_tx_skb(ep, skb);
|
|
+
|
|
++ skb_cb->eid = eid;
|
|
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
|
|
+ ret = dma_mapping_error(dev, skb_cb->paddr);
|
|
+ if (ret)
|
|
+@@ -188,7 +184,7 @@ err_credits:
|
|
+ if (ep->tx_credit_flow_enabled) {
|
|
+ spin_lock_bh(&htc->tx_lock);
|
|
+ ep->tx_credits += credits;
|
|
+- ath10k_dbg(ATH10K_DBG_HTC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC,
|
|
+ "htc ep %d reverted %d credits back (total %d)\n",
|
|
+ eid, credits, ep->tx_credits);
|
|
+ spin_unlock_bh(&htc->tx_lock);
|
|
+@@ -202,15 +198,18 @@ err_pull:
|
|
+ }
|
|
+
|
|
+ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
|
|
+- struct sk_buff *skb,
|
|
+- unsigned int eid)
|
|
++ struct sk_buff *skb)
|
|
+ {
|
|
+ struct ath10k_htc *htc = &ar->htc;
|
|
+- struct ath10k_htc_ep *ep = &htc->endpoint[eid];
|
|
++ struct ath10k_skb_cb *skb_cb;
|
|
++ struct ath10k_htc_ep *ep;
|
|
+
|
|
+ if (WARN_ON_ONCE(!skb))
|
|
+ return 0;
|
|
+
|
|
++ skb_cb = ATH10K_SKB_CB(skb);
|
|
++ ep = &htc->endpoint[skb_cb->eid];
|
|
++
|
|
+ ath10k_htc_notify_tx_completion(ep, skb);
|
|
+ /* the skb now belongs to the completion handler */
|
|
+
|
|
+@@ -227,11 +226,12 @@ ath10k_htc_process_credit_report(struct
|
|
+ int len,
|
|
+ enum ath10k_htc_ep_id eid)
|
|
+ {
|
|
++ struct ath10k *ar = htc->ar;
|
|
+ struct ath10k_htc_ep *ep;
|
|
+ int i, n_reports;
|
|
+
|
|
+ if (len % sizeof(*report))
|
|
+- ath10k_warn("Uneven credit report len %d", len);
|
|
++ ath10k_warn(ar, "Uneven credit report len %d", len);
|
|
+
|
|
+ n_reports = len / sizeof(*report);
|
|
+
|
|
+@@ -243,7 +243,7 @@ ath10k_htc_process_credit_report(struct
|
|
+ ep = &htc->endpoint[report->eid];
|
|
+ ep->tx_credits += report->credits;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
|
|
+ report->eid, report->credits, ep->tx_credits);
|
|
+
|
|
+ if (ep->ep_ops.ep_tx_credits) {
|
|
+@@ -260,6 +260,7 @@ static int ath10k_htc_process_trailer(st
|
|
+ int length,
|
|
+ enum ath10k_htc_ep_id src_eid)
|
|
+ {
|
|
++ struct ath10k *ar = htc->ar;
|
|
+ int status = 0;
|
|
+ struct ath10k_htc_record *record;
|
|
+ u8 *orig_buffer;
|
|
+@@ -279,7 +280,7 @@ static int ath10k_htc_process_trailer(st
|
|
+
|
|
+ if (record->hdr.len > length) {
|
|
+ /* no room left in buffer for record */
|
|
+- ath10k_warn("Invalid record length: %d\n",
|
|
++ ath10k_warn(ar, "Invalid record length: %d\n",
|
|
+ record->hdr.len);
|
|
+ status = -EINVAL;
|
|
+ break;
|
|
+@@ -289,7 +290,7 @@ static int ath10k_htc_process_trailer(st
|
|
+ case ATH10K_HTC_RECORD_CREDITS:
|
|
+ len = sizeof(struct ath10k_htc_credit_report);
|
|
+ if (record->hdr.len < len) {
|
|
+- ath10k_warn("Credit report too long\n");
|
|
++ ath10k_warn(ar, "Credit report too long\n");
|
|
+ status = -EINVAL;
|
|
+ break;
|
|
+ }
|
|
+@@ -299,7 +300,7 @@ static int ath10k_htc_process_trailer(st
|
|
+ src_eid);
|
|
+ break;
|
|
+ default:
|
|
+- ath10k_warn("Unhandled record: id:%d length:%d\n",
|
|
++ ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
|
|
+ record->hdr.id, record->hdr.len);
|
|
+ break;
|
|
+ }
|
|
+@@ -313,15 +314,14 @@ static int ath10k_htc_process_trailer(st
|
|
+ }
|
|
+
|
|
+ if (status)
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "",
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
|
|
+ orig_buffer, orig_length);
|
|
+
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
|
|
+- struct sk_buff *skb,
|
|
+- u8 pipe_id)
|
|
++ struct sk_buff *skb)
|
|
+ {
|
|
+ int status = 0;
|
|
+ struct ath10k_htc *htc = &ar->htc;
|
|
+@@ -339,8 +339,8 @@ static int ath10k_htc_rx_completion_hand
|
|
+ eid = hdr->eid;
|
|
+
|
|
+ if (eid >= ATH10K_HTC_EP_COUNT) {
|
|
+- ath10k_warn("HTC Rx: invalid eid %d\n", eid);
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
|
|
++ ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
|
|
+ hdr, sizeof(*hdr));
|
|
+ status = -EINVAL;
|
|
+ goto out;
|
|
+@@ -360,19 +360,19 @@ static int ath10k_htc_rx_completion_hand
|
|
+ payload_len = __le16_to_cpu(hdr->len);
|
|
+
|
|
+ if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
|
|
+- ath10k_warn("HTC rx frame too long, len: %zu\n",
|
|
++ ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
|
|
+ payload_len + sizeof(*hdr));
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
|
|
+ hdr, sizeof(*hdr));
|
|
+ status = -EINVAL;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (skb->len < payload_len) {
|
|
+- ath10k_dbg(ATH10K_DBG_HTC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC,
|
|
+ "HTC Rx: insufficient length, got %d, expected %d\n",
|
|
+ skb->len, payload_len);
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
|
|
+ "", hdr, sizeof(*hdr));
|
|
+ status = -EINVAL;
|
|
+ goto out;
|
|
+@@ -388,7 +388,7 @@ static int ath10k_htc_rx_completion_hand
|
|
+
|
|
+ if ((trailer_len < min_len) ||
|
|
+ (trailer_len > payload_len)) {
|
|
+- ath10k_warn("Invalid trailer length: %d\n",
|
|
++ ath10k_warn(ar, "Invalid trailer length: %d\n",
|
|
+ trailer_len);
|
|
+ status = -EPROTO;
|
|
+ goto out;
|
|
+@@ -421,7 +421,7 @@ static int ath10k_htc_rx_completion_hand
|
|
+ * this is a fatal error, target should not be
|
|
+ * sending unsolicited messages on the ep 0
|
|
+ */
|
|
+- ath10k_warn("HTC rx ctrl still processing\n");
|
|
++ ath10k_warn(ar, "HTC rx ctrl still processing\n");
|
|
+ status = -EINVAL;
|
|
+ complete(&htc->ctl_resp);
|
|
+ goto out;
|
|
+@@ -442,7 +442,7 @@ static int ath10k_htc_rx_completion_hand
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
|
|
+ eid, skb);
|
|
+ ep->ep_ops.ep_rx_complete(ar, skb);
|
|
+
|
|
+@@ -459,7 +459,7 @@ static void ath10k_htc_control_rx_comple
|
|
+ {
|
|
+ /* This is unexpected. FW is not supposed to send regular rx on this
|
|
+ * endpoint. */
|
|
+- ath10k_warn("unexpected htc rx\n");
|
|
++ ath10k_warn(ar, "unexpected htc rx\n");
|
|
+ kfree_skb(skb);
|
|
+ }
|
|
+
|
|
+@@ -546,7 +546,8 @@ static u8 ath10k_htc_get_credit_allocati
|
|
+
|
|
+ int ath10k_htc_wait_target(struct ath10k_htc *htc)
|
|
+ {
|
|
+- int status = 0;
|
|
++ struct ath10k *ar = htc->ar;
|
|
++ int i, status = 0;
|
|
+ struct ath10k_htc_svc_conn_req conn_req;
|
|
+ struct ath10k_htc_svc_conn_resp conn_resp;
|
|
+ struct ath10k_htc_msg *msg;
|
|
+@@ -556,16 +557,32 @@ int ath10k_htc_wait_target(struct ath10k
|
|
+
|
|
+ status = wait_for_completion_timeout(&htc->ctl_resp,
|
|
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
|
|
+- if (status <= 0) {
|
|
++ if (status == 0) {
|
|
++ /* Workaround: In some cases the PCI HIF doesn't
|
|
++ * receive interrupt for the control response message
|
|
++ * even if the buffer was completed. It is suspected
|
|
++ * iomap writes unmasking PCI CE irqs aren't propagated
|
|
++ * properly in KVM PCI-passthrough sometimes.
|
|
++ */
|
|
++ ath10k_warn(ar, "failed to receive control response completion, polling..\n");
|
|
++
|
|
++ for (i = 0; i < CE_COUNT; i++)
|
|
++ ath10k_hif_send_complete_check(htc->ar, i, 1);
|
|
++
|
|
++ status = wait_for_completion_timeout(&htc->ctl_resp,
|
|
++ ATH10K_HTC_WAIT_TIMEOUT_HZ);
|
|
++
|
|
+ if (status == 0)
|
|
+ status = -ETIMEDOUT;
|
|
++ }
|
|
+
|
|
+- ath10k_err("ctl_resp never came in (%d)\n", status);
|
|
++ if (status < 0) {
|
|
++ ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+ if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
|
|
+- ath10k_err("Invalid HTC ready msg len:%d\n",
|
|
++ ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
|
|
+ htc->control_resp_len);
|
|
+ return -ECOMM;
|
|
+ }
|
|
+@@ -576,21 +593,21 @@ int ath10k_htc_wait_target(struct ath10k
|
|
+ credit_size = __le16_to_cpu(msg->ready.credit_size);
|
|
+
|
|
+ if (message_id != ATH10K_HTC_MSG_READY_ID) {
|
|
+- ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
|
|
++ ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
|
|
+ return -ECOMM;
|
|
+ }
|
|
+
|
|
+ htc->total_transmit_credits = credit_count;
|
|
+ htc->target_credit_size = credit_size;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC,
|
|
+ "Target ready! transmit resources: %d size:%d\n",
|
|
+ htc->total_transmit_credits,
|
|
+ htc->target_credit_size);
|
|
+
|
|
+ if ((htc->total_transmit_credits == 0) ||
|
|
+ (htc->target_credit_size == 0)) {
|
|
+- ath10k_err("Invalid credit size received\n");
|
|
++ ath10k_err(ar, "Invalid credit size received\n");
|
|
+ return -ECOMM;
|
|
+ }
|
|
+
|
|
+@@ -607,7 +624,8 @@ int ath10k_htc_wait_target(struct ath10k
|
|
+ /* connect fake service */
|
|
+ status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
|
|
+ if (status) {
|
|
+- ath10k_err("could not connect to htc service (%d)\n", status);
|
|
++ ath10k_err(ar, "could not connect to htc service (%d)\n",
|
|
++ status);
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+@@ -618,6 +636,7 @@ int ath10k_htc_connect_service(struct at
|
|
+ struct ath10k_htc_svc_conn_req *conn_req,
|
|
+ struct ath10k_htc_svc_conn_resp *conn_resp)
|
|
+ {
|
|
++ struct ath10k *ar = htc->ar;
|
|
+ struct ath10k_htc_msg *msg;
|
|
+ struct ath10k_htc_conn_svc *req_msg;
|
|
+ struct ath10k_htc_conn_svc_response resp_msg_dummy;
|
|
+@@ -643,13 +662,13 @@ int ath10k_htc_connect_service(struct at
|
|
+ tx_alloc = ath10k_htc_get_credit_allocation(htc,
|
|
+ conn_req->service_id);
|
|
+ if (!tx_alloc)
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "boot htc service %s does not allocate target credits\n",
|
|
+ htc_service_name(conn_req->service_id));
|
|
+
|
|
+ skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
|
|
+ if (!skb) {
|
|
+- ath10k_err("Failed to allocate HTC packet\n");
|
|
++ ath10k_err(ar, "Failed to allocate HTC packet\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+@@ -684,11 +703,9 @@ int ath10k_htc_connect_service(struct at
|
|
+ /* wait for response */
|
|
+ status = wait_for_completion_timeout(&htc->ctl_resp,
|
|
+ ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
|
|
+- if (status <= 0) {
|
|
+- if (status == 0)
|
|
+- status = -ETIMEDOUT;
|
|
+- ath10k_err("Service connect timeout: %d\n", status);
|
|
+- return status;
|
|
++ if (status == 0) {
|
|
++ ath10k_err(ar, "Service connect timeout: %d\n", status);
|
|
++ return -ETIMEDOUT;
|
|
+ }
|
|
+
|
|
+ /* we controlled the buffer creation, it's aligned */
|
|
+@@ -700,11 +717,11 @@ int ath10k_htc_connect_service(struct at
|
|
+ if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
|
|
+ (htc->control_resp_len < sizeof(msg->hdr) +
|
|
+ sizeof(msg->connect_service_response))) {
|
|
+- ath10k_err("Invalid resp message ID 0x%x", message_id);
|
|
++ ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
|
|
+ return -EPROTO;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC,
|
|
+ "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
|
|
+ htc_service_name(service_id),
|
|
+ resp_msg->status, resp_msg->eid);
|
|
+@@ -713,7 +730,7 @@ int ath10k_htc_connect_service(struct at
|
|
+
|
|
+ /* check response status */
|
|
+ if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
|
|
+- ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
|
|
++ ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
|
|
+ htc_service_name(service_id),
|
|
+ resp_msg->status);
|
|
+ return -EPROTO;
|
|
+@@ -764,18 +781,18 @@ setup:
|
|
+ if (status)
|
|
+ return status;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
|
|
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
|
|
+ ep->dl_pipe_id, ep->eid);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "boot htc ep %d ul polled %d dl polled %d\n",
|
|
+ ep->eid, ep->ul_is_polled, ep->dl_is_polled);
|
|
+
|
|
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
|
|
+ ep->tx_credit_flow_enabled = false;
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT,
|
|
+ "boot htc service '%s' eid %d TX flow control disabled\n",
|
|
+ htc_service_name(ep->service_id), assigned_eid);
|
|
+ }
|
|
+@@ -783,27 +800,26 @@ setup:
|
|
+ return status;
|
|
+ }
|
|
+
|
|
+-struct sk_buff *ath10k_htc_alloc_skb(int size)
|
|
++struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
|
|
+ {
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+ skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
|
|
+- if (!skb) {
|
|
+- ath10k_warn("could not allocate HTC tx skb\n");
|
|
++ if (!skb)
|
|
+ return NULL;
|
|
+- }
|
|
+
|
|
+ skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
|
|
+
|
|
+ /* FW/HTC requires 4-byte aligned streams */
|
|
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
|
|
+- ath10k_warn("Unaligned HTC tx skb\n");
|
|
++ ath10k_warn(ar, "Unaligned HTC tx skb\n");
|
|
+
|
|
+ return skb;
|
|
+ }
|
|
+
|
|
+ int ath10k_htc_start(struct ath10k_htc *htc)
|
|
+ {
|
|
++ struct ath10k *ar = htc->ar;
|
|
+ struct sk_buff *skb;
|
|
+ int status = 0;
|
|
+ struct ath10k_htc_msg *msg;
|
|
+@@ -819,7 +835,7 @@ int ath10k_htc_start(struct ath10k_htc *
|
|
+ msg->hdr.message_id =
|
|
+ __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
|
|
+
|
|
+ status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
|
|
+ if (status) {
|
|
+@@ -830,19 +846,6 @@ int ath10k_htc_start(struct ath10k_htc *
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-/*
|
|
+- * stop HTC communications, i.e. stop interrupt reception, and flush all
|
|
+- * queued buffers
|
|
+- */
|
|
+-void ath10k_htc_stop(struct ath10k_htc *htc)
|
|
+-{
|
|
+- spin_lock_bh(&htc->tx_lock);
|
|
+- htc->stopped = true;
|
|
+- spin_unlock_bh(&htc->tx_lock);
|
|
+-
|
|
+- ath10k_hif_stop(htc->ar);
|
|
+-}
|
|
+-
|
|
+ /* registered target arrival callback from the HIF layer */
|
|
+ int ath10k_htc_init(struct ath10k *ar)
|
|
+ {
|
|
+@@ -852,7 +855,6 @@ int ath10k_htc_init(struct ath10k *ar)
|
|
+
|
|
+ spin_lock_init(&htc->tx_lock);
|
|
+
|
|
+- htc->stopped = false;
|
|
+ ath10k_htc_reset_endpoint_states(htc);
|
|
+
|
|
+ /* setup HIF layer callbacks */
|
|
+--- a/drivers/net/wireless/ath/ath10k/htc.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/htc.h
|
|
+@@ -214,7 +214,6 @@ struct ath10k_htc_frame {
|
|
+ struct ath10k_htc_record trailer[0];
|
|
+ } __packed __aligned(4);
|
|
+
|
|
+-
|
|
+ /*******************/
|
|
+ /* Host-side stuff */
|
|
+ /*******************/
|
|
+@@ -332,7 +331,7 @@ struct ath10k_htc {
|
|
+ struct ath10k *ar;
|
|
+ struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
|
|
+
|
|
+- /* protects endpoint and stopped fields */
|
|
++ /* protects endpoints */
|
|
+ spinlock_t tx_lock;
|
|
+
|
|
+ struct ath10k_htc_ops htc_ops;
|
|
+@@ -345,8 +344,6 @@ struct ath10k_htc {
|
|
+ int total_transmit_credits;
|
|
+ struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
|
|
+ int target_credit_size;
|
|
+-
|
|
+- bool stopped;
|
|
+ };
|
|
+
|
|
+ int ath10k_htc_init(struct ath10k *ar);
|
|
+@@ -357,7 +354,6 @@ int ath10k_htc_connect_service(struct at
|
|
+ struct ath10k_htc_svc_conn_resp *conn_resp);
|
|
+ int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
|
|
+ struct sk_buff *packet);
|
|
+-void ath10k_htc_stop(struct ath10k_htc *htc);
|
|
+-struct sk_buff *ath10k_htc_alloc_skb(int size);
|
|
++struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
|
|
+
|
|
+ #endif
|
|
+--- a/drivers/net/wireless/ath/ath10k/htt.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/htt.c
|
|
+@@ -22,7 +22,7 @@
|
|
+ #include "core.h"
|
|
+ #include "debug.h"
|
|
+
|
|
+-static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
|
|
++int ath10k_htt_connect(struct ath10k_htt *htt)
|
|
+ {
|
|
+ struct ath10k_htc_svc_conn_req conn_req;
|
|
+ struct ath10k_htc_svc_conn_resp conn_resp;
|
|
+@@ -48,37 +48,11 @@ static int ath10k_htt_htc_attach(struct
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-int ath10k_htt_attach(struct ath10k *ar)
|
|
++int ath10k_htt_init(struct ath10k *ar)
|
|
+ {
|
|
+ struct ath10k_htt *htt = &ar->htt;
|
|
+- int ret;
|
|
+
|
|
+ htt->ar = ar;
|
|
+- htt->max_throughput_mbps = 800;
|
|
+-
|
|
+- /*
|
|
+- * Connect to HTC service.
|
|
+- * This has to be done before calling ath10k_htt_rx_attach,
|
|
+- * since ath10k_htt_rx_attach involves sending a rx ring configure
|
|
+- * message to the target.
|
|
+- */
|
|
+- ret = ath10k_htt_htc_attach(htt);
|
|
+- if (ret) {
|
|
+- ath10k_err("could not attach htt htc (%d)\n", ret);
|
|
+- goto err_htc_attach;
|
|
+- }
|
|
+-
|
|
+- ret = ath10k_htt_tx_attach(htt);
|
|
+- if (ret) {
|
|
+- ath10k_err("could not attach htt tx (%d)\n", ret);
|
|
+- goto err_htc_attach;
|
|
+- }
|
|
+-
|
|
+- ret = ath10k_htt_rx_attach(htt);
|
|
+- if (ret) {
|
|
+- ath10k_err("could not attach htt rx (%d)\n", ret);
|
|
+- goto err_rx_attach;
|
|
+- }
|
|
+
|
|
+ /*
|
|
+ * Prefetch enough data to satisfy target
|
|
+@@ -93,23 +67,20 @@ int ath10k_htt_attach(struct ath10k *ar)
|
|
+ 2; /* ip4 dscp or ip6 priority */
|
|
+
|
|
+ return 0;
|
|
+-
|
|
+-err_rx_attach:
|
|
+- ath10k_htt_tx_detach(htt);
|
|
+-err_htc_attach:
|
|
+- return ret;
|
|
+ }
|
|
+
|
|
+ #define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
|
|
+
|
|
+ static int ath10k_htt_verify_version(struct ath10k_htt *htt)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n",
|
|
++ struct ath10k *ar = htt->ar;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n",
|
|
+ htt->target_version_major, htt->target_version_minor);
|
|
+
|
|
+ if (htt->target_version_major != 2 &&
|
|
+ htt->target_version_major != 3) {
|
|
+- ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
|
|
++ ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
|
|
+ htt->target_version_major);
|
|
+ return -ENOTSUPP;
|
|
+ }
|
|
+@@ -117,8 +88,9 @@ static int ath10k_htt_verify_version(str
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-int ath10k_htt_attach_target(struct ath10k_htt *htt)
|
|
++int ath10k_htt_setup(struct ath10k_htt *htt)
|
|
+ {
|
|
++ struct ath10k *ar = htt->ar;
|
|
+ int status;
|
|
+
|
|
+ init_completion(&htt->target_version_received);
|
|
+@@ -128,9 +100,9 @@ int ath10k_htt_attach_target(struct ath1
|
|
+ return status;
|
|
+
|
|
+ status = wait_for_completion_timeout(&htt->target_version_received,
|
|
+- HTT_TARGET_VERSION_TIMEOUT_HZ);
|
|
+- if (status <= 0) {
|
|
+- ath10k_warn("htt version request timed out\n");
|
|
++ HTT_TARGET_VERSION_TIMEOUT_HZ);
|
|
++ if (status == 0) {
|
|
++ ath10k_warn(ar, "htt version request timed out\n");
|
|
+ return -ETIMEDOUT;
|
|
+ }
|
|
+
|
|
+@@ -140,9 +112,3 @@ int ath10k_htt_attach_target(struct ath1
|
|
+
|
|
+ return ath10k_htt_send_rx_ring_cfg_ll(htt);
|
|
+ }
|
|
+-
|
|
+-void ath10k_htt_detach(struct ath10k_htt *htt)
|
|
+-{
|
|
+- ath10k_htt_rx_detach(htt);
|
|
+- ath10k_htt_tx_detach(htt);
|
|
+-}
|
|
+--- a/drivers/net/wireless/ath/ath10k/htt.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/htt.h
|
|
+@@ -21,6 +21,7 @@
|
|
+ #include <linux/bug.h>
|
|
+ #include <linux/interrupt.h>
|
|
+ #include <linux/dmapool.h>
|
|
++#include <linux/hashtable.h>
|
|
+ #include <net/mac80211.h>
|
|
+
|
|
+ #include "htc.h"
|
|
+@@ -126,6 +127,7 @@ enum htt_data_tx_ext_tid {
|
|
+ * (HL hosts manage queues on the host )
|
|
+ * more_in_batch: only for HL hosts. indicates if more packets are
|
|
+ * pending. this allows target to wait and aggregate
|
|
++ * freq: 0 means home channel of given vdev. intended for offchannel
|
|
+ */
|
|
+ struct htt_data_tx_desc {
|
|
+ u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
|
|
+@@ -133,7 +135,8 @@ struct htt_data_tx_desc {
|
|
+ __le16 len;
|
|
+ __le16 id;
|
|
+ __le32 frags_paddr;
|
|
+- __le32 peerid;
|
|
++ __le16 peerid;
|
|
++ __le16 freq;
|
|
+ u8 prefetch[0]; /* start of frame, for FW classification engine */
|
|
+ } __packed;
|
|
+
|
|
+@@ -156,6 +159,9 @@ enum htt_rx_ring_flags {
|
|
+ HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
|
|
+ };
|
|
+
|
|
++#define HTT_RX_RING_SIZE_MIN 128
|
|
++#define HTT_RX_RING_SIZE_MAX 2048
|
|
++
|
|
+ struct htt_rx_ring_setup_ring {
|
|
+ __le32 fw_idx_shadow_reg_paddr;
|
|
+ __le32 rx_ring_base_paddr;
|
|
+@@ -240,16 +246,10 @@ struct htt_oob_sync_req {
|
|
+ __le16 rsvd0;
|
|
+ } __packed;
|
|
+
|
|
+-#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_MASK 0x1F
|
|
+-#define HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_LSB 0
|
|
+-
|
|
+ struct htt_aggr_conf {
|
|
+ u8 max_num_ampdu_subframes;
|
|
+- union {
|
|
+- /* dont use bitfields; undefined behaviour */
|
|
+- u8 flags; /* see %HTT_AGGR_CONF_MAX_NUM_AMSDU_SUBFRAMES_ */
|
|
+- u8 max_num_amsdu_subframes:5;
|
|
+- } __packed;
|
|
++ /* amsdu_subframes is limited by 0x1F mask */
|
|
++ u8 max_num_amsdu_subframes;
|
|
+ } __packed;
|
|
+
|
|
+ #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
|
|
+@@ -271,7 +271,6 @@ enum htt_mgmt_tx_status {
|
|
+
|
|
+ /*=== target -> host messages ===============================================*/
|
|
+
|
|
+-
|
|
+ enum htt_t2h_msg_type {
|
|
+ HTT_T2H_MSG_TYPE_VERSION_CONF = 0x0,
|
|
+ HTT_T2H_MSG_TYPE_RX_IND = 0x1,
|
|
+@@ -288,7 +287,19 @@ enum htt_t2h_msg_type {
|
|
+ HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
|
|
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
|
|
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
|
|
++ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
|
|
++ HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
|
|
++ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
|
|
++ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
|
|
++ /* 0x13 reservd */
|
|
++ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
|
|
++
|
|
++ /* FIXME: Do not depend on this event id. Numbering of this event id is
|
|
++ * broken across different firmware revisions and HTT version fails to
|
|
++ * indicate this.
|
|
++ */
|
|
+ HTT_T2H_MSG_TYPE_TEST,
|
|
++
|
|
+ /* keep this last */
|
|
+ HTT_T2H_NUM_MSGS
|
|
+ };
|
|
+@@ -657,6 +668,53 @@ struct htt_rx_fragment_indication {
|
|
+ #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
|
|
+ #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
|
|
+
|
|
++struct htt_rx_pn_ind {
|
|
++ __le16 peer_id;
|
|
++ u8 tid;
|
|
++ u8 seqno_start;
|
|
++ u8 seqno_end;
|
|
++ u8 pn_ie_count;
|
|
++ u8 reserved;
|
|
++ u8 pn_ies[0];
|
|
++} __packed;
|
|
++
|
|
++struct htt_rx_offload_msdu {
|
|
++ __le16 msdu_len;
|
|
++ __le16 peer_id;
|
|
++ u8 vdev_id;
|
|
++ u8 tid;
|
|
++ u8 fw_desc;
|
|
++ u8 payload[0];
|
|
++} __packed;
|
|
++
|
|
++struct htt_rx_offload_ind {
|
|
++ u8 reserved;
|
|
++ __le16 msdu_count;
|
|
++} __packed;
|
|
++
|
|
++struct htt_rx_in_ord_msdu_desc {
|
|
++ __le32 msdu_paddr;
|
|
++ __le16 msdu_len;
|
|
++ u8 fw_desc;
|
|
++ u8 reserved;
|
|
++} __packed;
|
|
++
|
|
++struct htt_rx_in_ord_ind {
|
|
++ u8 info;
|
|
++ __le16 peer_id;
|
|
++ u8 vdev_id;
|
|
++ u8 reserved;
|
|
++ __le16 msdu_count;
|
|
++ struct htt_rx_in_ord_msdu_desc msdu_descs[0];
|
|
++} __packed;
|
|
++
|
|
++#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
|
|
++#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
|
|
++#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
|
|
++#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
|
|
++#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
|
|
++#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
|
|
++
|
|
+ /*
|
|
+ * target -> host test message definition
|
|
+ *
|
|
+@@ -732,7 +790,7 @@ static inline u8 *htt_rx_test_get_chars(
|
|
+ */
|
|
+ struct htt_pktlog_msg {
|
|
+ u8 pad[3];
|
|
+- __le32 payload[1 /* or more */];
|
|
++ u8 payload[0];
|
|
+ } __packed;
|
|
+
|
|
+ struct htt_dbg_stats_rx_reorder_stats {
|
|
+@@ -1038,6 +1096,7 @@ static inline struct htt_stats_conf_item
|
|
+ {
|
|
+ return (void *)item + sizeof(*item) + roundup(item->length, 4);
|
|
+ }
|
|
++
|
|
+ /*
|
|
+ * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
|
|
+ *
|
|
+@@ -1151,10 +1210,12 @@ struct htt_resp {
|
|
+ struct htt_rx_test rx_test;
|
|
+ struct htt_pktlog_msg pktlog_msg;
|
|
+ struct htt_stats_conf stats_conf;
|
|
++ struct htt_rx_pn_ind rx_pn_ind;
|
|
++ struct htt_rx_offload_ind rx_offload_ind;
|
|
++ struct htt_rx_in_ord_ind rx_in_ord_ind;
|
|
+ };
|
|
+ } __packed;
|
|
+
|
|
+-
|
|
+ /*** host side structures follow ***/
|
|
+
|
|
+ struct htt_tx_done {
|
|
+@@ -1184,7 +1245,6 @@ struct ath10k_htt {
|
|
+ struct ath10k *ar;
|
|
+ enum ath10k_htc_ep_id eid;
|
|
+
|
|
+- int max_throughput_mbps;
|
|
+ u8 target_version_major;
|
|
+ u8 target_version_minor;
|
|
+ struct completion target_version_received;
|
|
+@@ -1200,6 +1260,20 @@ struct ath10k_htt {
|
|
+ * filled.
|
|
+ */
|
|
+ struct sk_buff **netbufs_ring;
|
|
++
|
|
++ /* This is used only with firmware supporting IN_ORD_IND.
|
|
++ *
|
|
++ * With Full Rx Reorder the HTT Rx Ring is more of a temporary
|
|
++ * buffer ring from which buffer addresses are copied by the
|
|
++ * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
|
|
++ * pointing to specific (re-ordered) buffers.
|
|
++ *
|
|
++ * FIXME: With kernel generic hashing functions there's a lot
|
|
++ * of hash collisions for sk_buffs.
|
|
++ */
|
|
++ bool in_ord_rx;
|
|
++ DECLARE_HASHTABLE(skb_table, 4);
|
|
++
|
|
+ /*
|
|
+ * Ring of buffer addresses -
|
|
+ * This ring holds the "physical" device address of the
|
|
+@@ -1254,12 +1328,11 @@ struct ath10k_htt {
|
|
+
|
|
+ unsigned int prefetch_len;
|
|
+
|
|
+- /* Protects access to %pending_tx, %used_msdu_ids */
|
|
++ /* Protects access to pending_tx, num_pending_tx */
|
|
+ spinlock_t tx_lock;
|
|
+ int max_num_pending_tx;
|
|
+ int num_pending_tx;
|
|
+- struct sk_buff **pending_tx;
|
|
+- unsigned long *used_msdu_ids; /* bitmap */
|
|
++ struct idr pending_tx;
|
|
+ wait_queue_head_t empty_tx_wq;
|
|
+ struct dma_pool *tx_pool;
|
|
+
|
|
+@@ -1273,6 +1346,7 @@ struct ath10k_htt {
|
|
+ struct tasklet_struct txrx_compl_task;
|
|
+ struct sk_buff_head tx_compl_q;
|
|
+ struct sk_buff_head rx_compl_q;
|
|
++ struct sk_buff_head rx_in_ord_compl_q;
|
|
+
|
|
+ /* rx_status template */
|
|
+ struct ieee80211_rx_status rx_status;
|
|
+@@ -1328,22 +1402,28 @@ struct htt_rx_desc {
|
|
+ #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
|
|
+ #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
|
|
+
|
|
+-int ath10k_htt_attach(struct ath10k *ar);
|
|
+-int ath10k_htt_attach_target(struct ath10k_htt *htt);
|
|
+-void ath10k_htt_detach(struct ath10k_htt *htt);
|
|
+-
|
|
+-int ath10k_htt_tx_attach(struct ath10k_htt *htt);
|
|
+-void ath10k_htt_tx_detach(struct ath10k_htt *htt);
|
|
+-int ath10k_htt_rx_attach(struct ath10k_htt *htt);
|
|
+-void ath10k_htt_rx_detach(struct ath10k_htt *htt);
|
|
++int ath10k_htt_connect(struct ath10k_htt *htt);
|
|
++int ath10k_htt_init(struct ath10k *ar);
|
|
++int ath10k_htt_setup(struct ath10k_htt *htt);
|
|
++
|
|
++int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
|
|
++void ath10k_htt_tx_free(struct ath10k_htt *htt);
|
|
++
|
|
++int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
|
|
++int ath10k_htt_rx_ring_refill(struct ath10k *ar);
|
|
++void ath10k_htt_rx_free(struct ath10k_htt *htt);
|
|
++
|
|
+ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
|
|
+ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
|
|
+ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
|
|
+ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
|
|
+ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
|
|
++int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
|
|
++ u8 max_subfrms_ampdu,
|
|
++ u8 max_subfrms_amsdu);
|
|
+
|
|
+ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
|
|
+-int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
|
|
++int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
|
|
+ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
|
|
+ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
|
|
+ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
|
|
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
|
|
+@@ -21,118 +21,84 @@
|
|
+ #include "txrx.h"
|
|
+ #include "debug.h"
|
|
+ #include "trace.h"
|
|
++#include "mac.h"
|
|
+
|
|
+ #include <linux/log2.h>
|
|
+
|
|
+-/* slightly larger than one large A-MPDU */
|
|
+-#define HTT_RX_RING_SIZE_MIN 128
|
|
+-
|
|
+-/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
|
|
+-#define HTT_RX_RING_SIZE_MAX 2048
|
|
+-
|
|
+-#define HTT_RX_AVG_FRM_BYTES 1000
|
|
+-
|
|
+-/* ms, very conservative */
|
|
+-#define HTT_RX_HOST_LATENCY_MAX_MS 20
|
|
+-
|
|
+-/* ms, conservative */
|
|
+-#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10
|
|
++#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
|
|
++#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
|
|
+
|
|
+ /* when under memory pressure rx ring refill may fail and needs a retry */
|
|
+ #define HTT_RX_RING_REFILL_RETRY_MS 50
|
|
+
|
|
+-
|
|
+ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
|
|
+ static void ath10k_htt_txrx_compl_task(unsigned long ptr);
|
|
+
|
|
+-static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
|
|
+-{
|
|
+- int size;
|
|
+-
|
|
+- /*
|
|
+- * It is expected that the host CPU will typically be able to
|
|
+- * service the rx indication from one A-MPDU before the rx
|
|
+- * indication from the subsequent A-MPDU happens, roughly 1-2 ms
|
|
+- * later. However, the rx ring should be sized very conservatively,
|
|
+- * to accomodate the worst reasonable delay before the host CPU
|
|
+- * services a rx indication interrupt.
|
|
+- *
|
|
+- * The rx ring need not be kept full of empty buffers. In theory,
|
|
+- * the htt host SW can dynamically track the low-water mark in the
|
|
+- * rx ring, and dynamically adjust the level to which the rx ring
|
|
+- * is filled with empty buffers, to dynamically meet the desired
|
|
+- * low-water mark.
|
|
+- *
|
|
+- * In contrast, it's difficult to resize the rx ring itself, once
|
|
+- * it's in use. Thus, the ring itself should be sized very
|
|
+- * conservatively, while the degree to which the ring is filled
|
|
+- * with empty buffers should be sized moderately conservatively.
|
|
+- */
|
|
+-
|
|
+- /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
|
|
+- size =
|
|
+- htt->max_throughput_mbps +
|
|
+- 1000 /
|
|
+- (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
|
|
+-
|
|
+- if (size < HTT_RX_RING_SIZE_MIN)
|
|
+- size = HTT_RX_RING_SIZE_MIN;
|
|
+-
|
|
+- if (size > HTT_RX_RING_SIZE_MAX)
|
|
+- size = HTT_RX_RING_SIZE_MAX;
|
|
+-
|
|
+- size = roundup_pow_of_two(size);
|
|
+-
|
|
+- return size;
|
|
+-}
|
|
+-
|
|
+-static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
|
|
++static struct sk_buff *
|
|
++ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
|
|
+ {
|
|
+- int size;
|
|
++ struct ath10k_skb_rxcb *rxcb;
|
|
+
|
|
+- /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
|
|
+- size =
|
|
+- htt->max_throughput_mbps *
|
|
+- 1000 /
|
|
+- (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
|
|
+-
|
|
+- /*
|
|
+- * Make sure the fill level is at least 1 less than the ring size.
|
|
+- * Leaving 1 element empty allows the SW to easily distinguish
|
|
+- * between a full ring vs. an empty ring.
|
|
+- */
|
|
+- if (size >= htt->rx_ring.size)
|
|
+- size = htt->rx_ring.size - 1;
|
|
++ hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
|
|
++ if (rxcb->paddr == paddr)
|
|
++ return ATH10K_RXCB_SKB(rxcb);
|
|
+
|
|
+- return size;
|
|
++ WARN_ON_ONCE(1);
|
|
++ return NULL;
|
|
+ }
|
|
+
|
|
+ static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
|
|
+ {
|
|
+ struct sk_buff *skb;
|
|
+- struct ath10k_skb_cb *cb;
|
|
++ struct ath10k_skb_rxcb *rxcb;
|
|
++ struct hlist_node *n;
|
|
+ int i;
|
|
+
|
|
+- for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
|
|
+- skb = htt->rx_ring.netbufs_ring[i];
|
|
+- cb = ATH10K_SKB_CB(skb);
|
|
+- dma_unmap_single(htt->ar->dev, cb->paddr,
|
|
+- skb->len + skb_tailroom(skb),
|
|
+- DMA_FROM_DEVICE);
|
|
+- dev_kfree_skb_any(skb);
|
|
++ if (htt->rx_ring.in_ord_rx) {
|
|
++ hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
|
|
++ skb = ATH10K_RXCB_SKB(rxcb);
|
|
++ dma_unmap_single(htt->ar->dev, rxcb->paddr,
|
|
++ skb->len + skb_tailroom(skb),
|
|
++ DMA_FROM_DEVICE);
|
|
++ hash_del(&rxcb->hlist);
|
|
++ dev_kfree_skb_any(skb);
|
|
++ }
|
|
++ } else {
|
|
++ for (i = 0; i < htt->rx_ring.size; i++) {
|
|
++ skb = htt->rx_ring.netbufs_ring[i];
|
|
++ if (!skb)
|
|
++ continue;
|
|
++
|
|
++ rxcb = ATH10K_SKB_RXCB(skb);
|
|
++ dma_unmap_single(htt->ar->dev, rxcb->paddr,
|
|
++ skb->len + skb_tailroom(skb),
|
|
++ DMA_FROM_DEVICE);
|
|
++ dev_kfree_skb_any(skb);
|
|
++ }
|
|
+ }
|
|
+
|
|
+ htt->rx_ring.fill_cnt = 0;
|
|
++ hash_init(htt->rx_ring.skb_table);
|
|
++ memset(htt->rx_ring.netbufs_ring, 0,
|
|
++ htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
|
|
+ }
|
|
+
|
|
+ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
|
|
+ {
|
|
+ struct htt_rx_desc *rx_desc;
|
|
++ struct ath10k_skb_rxcb *rxcb;
|
|
+ struct sk_buff *skb;
|
|
+ dma_addr_t paddr;
|
|
+ int ret = 0, idx;
|
|
+
|
|
+- idx = __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr));
|
|
++ /* The Full Rx Reorder firmware has no way of telling the host
|
|
++ * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
|
|
++ * To keep things simple make sure ring is always half empty. This
|
|
++ * guarantees there'll be no replenishment overruns possible.
|
|
++ */
|
|
++ BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
|
|
++
|
|
++ idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
|
|
+ while (num > 0) {
|
|
+ skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
|
|
+ if (!skb) {
|
|
+@@ -159,18 +125,30 @@ static int __ath10k_htt_rx_ring_fill_n(s
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+- ATH10K_SKB_CB(skb)->paddr = paddr;
|
|
++ rxcb = ATH10K_SKB_RXCB(skb);
|
|
++ rxcb->paddr = paddr;
|
|
+ htt->rx_ring.netbufs_ring[idx] = skb;
|
|
+ htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
|
|
+ htt->rx_ring.fill_cnt++;
|
|
+
|
|
++ if (htt->rx_ring.in_ord_rx) {
|
|
++ hash_add(htt->rx_ring.skb_table,
|
|
++ &ATH10K_SKB_RXCB(skb)->hlist,
|
|
++ (u32)paddr);
|
|
++ }
|
|
++
|
|
+ num--;
|
|
+ idx++;
|
|
+ idx &= htt->rx_ring.size_mask;
|
|
+ }
|
|
+
|
|
+ fail:
|
|
+- *(htt->rx_ring.alloc_idx.vaddr) = __cpu_to_le32(idx);
|
|
++ /*
|
|
++ * Make sure the rx buffer is updated before available buffer
|
|
++ * index to avoid any potential rx ring corruption.
|
|
++ */
|
|
++ mb();
|
|
++ *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+@@ -198,7 +176,7 @@ static void ath10k_htt_rx_msdu_buff_repl
|
|
+ * automatically balances load wrt to CPU power.
|
|
+ *
|
|
+ * This probably comes at a cost of lower maximum throughput but
|
|
+- * improves the avarage and stability. */
|
|
++ * improves the average and stability. */
|
|
+ spin_lock_bh(&htt->rx_ring.lock);
|
|
+ num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
|
|
+ num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
|
|
+@@ -222,32 +200,37 @@ static void ath10k_htt_rx_msdu_buff_repl
|
|
+ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
|
|
+ {
|
|
+ struct ath10k_htt *htt = (struct ath10k_htt *)arg;
|
|
++
|
|
+ ath10k_htt_rx_msdu_buff_replenish(htt);
|
|
+ }
|
|
+
|
|
+-void ath10k_htt_rx_detach(struct ath10k_htt *htt)
|
|
++int ath10k_htt_rx_ring_refill(struct ath10k *ar)
|
|
+ {
|
|
+- int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
|
|
++ struct ath10k_htt *htt = &ar->htt;
|
|
++ int ret;
|
|
++
|
|
++ spin_lock_bh(&htt->rx_ring.lock);
|
|
++ ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
|
|
++ htt->rx_ring.fill_cnt));
|
|
++ spin_unlock_bh(&htt->rx_ring.lock);
|
|
++
|
|
++ if (ret)
|
|
++ ath10k_htt_rx_ring_free(htt);
|
|
++
|
|
++ return ret;
|
|
++}
|
|
+
|
|
++void ath10k_htt_rx_free(struct ath10k_htt *htt)
|
|
++{
|
|
+ del_timer_sync(&htt->rx_ring.refill_retry_timer);
|
|
+ tasklet_kill(&htt->rx_replenish_task);
|
|
+ tasklet_kill(&htt->txrx_compl_task);
|
|
+
|
|
+ skb_queue_purge(&htt->tx_compl_q);
|
|
+ skb_queue_purge(&htt->rx_compl_q);
|
|
++ skb_queue_purge(&htt->rx_in_ord_compl_q);
|
|
+
|
|
+- while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
|
|
+- struct sk_buff *skb =
|
|
+- htt->rx_ring.netbufs_ring[sw_rd_idx];
|
|
+- struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
|
|
+-
|
|
+- dma_unmap_single(htt->ar->dev, cb->paddr,
|
|
+- skb->len + skb_tailroom(skb),
|
|
+- DMA_FROM_DEVICE);
|
|
+- dev_kfree_skb_any(htt->rx_ring.netbufs_ring[sw_rd_idx]);
|
|
+- sw_rd_idx++;
|
|
+- sw_rd_idx &= htt->rx_ring.size_mask;
|
|
+- }
|
|
++ ath10k_htt_rx_ring_free(htt);
|
|
+
|
|
+ dma_free_coherent(htt->ar->dev,
|
|
+ (htt->rx_ring.size *
|
|
+@@ -265,66 +248,59 @@ void ath10k_htt_rx_detach(struct ath10k_
|
|
+
|
|
+ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
|
|
+ {
|
|
++ struct ath10k *ar = htt->ar;
|
|
+ int idx;
|
|
+ struct sk_buff *msdu;
|
|
+
|
|
+ lockdep_assert_held(&htt->rx_ring.lock);
|
|
+
|
|
+ if (htt->rx_ring.fill_cnt == 0) {
|
|
+- ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
|
|
++ ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ idx = htt->rx_ring.sw_rd_idx.msdu_payld;
|
|
+ msdu = htt->rx_ring.netbufs_ring[idx];
|
|
++ htt->rx_ring.netbufs_ring[idx] = NULL;
|
|
++ htt->rx_ring.paddrs_ring[idx] = 0;
|
|
+
|
|
+ idx++;
|
|
+ idx &= htt->rx_ring.size_mask;
|
|
+ htt->rx_ring.sw_rd_idx.msdu_payld = idx;
|
|
+ htt->rx_ring.fill_cnt--;
|
|
+
|
|
+- return msdu;
|
|
+-}
|
|
+-
|
|
+-static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
|
|
+-{
|
|
+- struct sk_buff *next;
|
|
++ dma_unmap_single(htt->ar->dev,
|
|
++ ATH10K_SKB_RXCB(msdu)->paddr,
|
|
++ msdu->len + skb_tailroom(msdu),
|
|
++ DMA_FROM_DEVICE);
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
|
|
++ msdu->data, msdu->len + skb_tailroom(msdu));
|
|
+
|
|
+- while (skb) {
|
|
+- next = skb->next;
|
|
+- dev_kfree_skb_any(skb);
|
|
+- skb = next;
|
|
+- }
|
|
++ return msdu;
|
|
+ }
|
|
+
|
|
+ /* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
|
|
+ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
|
|
+ u8 **fw_desc, int *fw_desc_len,
|
|
+- struct sk_buff **head_msdu,
|
|
+- struct sk_buff **tail_msdu)
|
|
++ struct sk_buff_head *amsdu)
|
|
+ {
|
|
++ struct ath10k *ar = htt->ar;
|
|
+ int msdu_len, msdu_chaining = 0;
|
|
+ struct sk_buff *msdu;
|
|
+ struct htt_rx_desc *rx_desc;
|
|
+
|
|
+ lockdep_assert_held(&htt->rx_ring.lock);
|
|
+
|
|
+- if (htt->rx_confused) {
|
|
+- ath10k_warn("htt is confused. refusing rx\n");
|
|
+- return -1;
|
|
+- }
|
|
+-
|
|
+- msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
|
|
+- while (msdu) {
|
|
++ for (;;) {
|
|
+ int last_msdu, msdu_len_invalid, msdu_chained;
|
|
+
|
|
+- dma_unmap_single(htt->ar->dev,
|
|
+- ATH10K_SKB_CB(msdu)->paddr,
|
|
+- msdu->len + skb_tailroom(msdu),
|
|
+- DMA_FROM_DEVICE);
|
|
++ msdu = ath10k_htt_rx_netbuf_pop(htt);
|
|
++ if (!msdu) {
|
|
++ __skb_queue_purge(amsdu);
|
|
++ return -ENOENT;
|
|
++ }
|
|
+
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
|
|
+- msdu->data, msdu->len + skb_tailroom(msdu));
|
|
++ __skb_queue_tail(amsdu, msdu);
|
|
+
|
|
+ rx_desc = (struct htt_rx_desc *)msdu->data;
|
|
+
|
|
+@@ -343,12 +319,8 @@ static int ath10k_htt_rx_amsdu_pop(struc
|
|
+ */
|
|
+ if (!(__le32_to_cpu(rx_desc->attention.flags)
|
|
+ & RX_ATTENTION_FLAGS_MSDU_DONE)) {
|
|
+- ath10k_htt_rx_free_msdu_chain(*head_msdu);
|
|
+- *head_msdu = NULL;
|
|
+- msdu = NULL;
|
|
+- ath10k_err("htt rx stopped. cannot recover\n");
|
|
+- htt->rx_confused = true;
|
|
+- break;
|
|
++ __skb_queue_purge(amsdu);
|
|
++ return -EIO;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+@@ -399,7 +371,6 @@ static int ath10k_htt_rx_amsdu_pop(struc
|
|
+ msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
|
|
+ RX_MSDU_START_INFO0_MSDU_LENGTH);
|
|
+ msdu_chained = rx_desc->frag_info.ring2_more_count;
|
|
+- msdu_chaining = msdu_chained;
|
|
+
|
|
+ if (msdu_len_invalid)
|
|
+ msdu_len = 0;
|
|
+@@ -408,42 +379,32 @@ static int ath10k_htt_rx_amsdu_pop(struc
|
|
+ skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
|
|
+ msdu_len -= msdu->len;
|
|
+
|
|
+- /* FIXME: Do chained buffers include htt_rx_desc or not? */
|
|
++ /* Note: Chained buffers do not contain rx descriptor */
|
|
+ while (msdu_chained--) {
|
|
+- struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
|
|
+-
|
|
+- dma_unmap_single(htt->ar->dev,
|
|
+- ATH10K_SKB_CB(next)->paddr,
|
|
+- next->len + skb_tailroom(next),
|
|
+- DMA_FROM_DEVICE);
|
|
+-
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
|
|
+- "htt rx chained: ", next->data,
|
|
+- next->len + skb_tailroom(next));
|
|
+-
|
|
+- skb_trim(next, 0);
|
|
+- skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
|
|
+- msdu_len -= next->len;
|
|
++ msdu = ath10k_htt_rx_netbuf_pop(htt);
|
|
++ if (!msdu) {
|
|
++ __skb_queue_purge(amsdu);
|
|
++ return -ENOENT;
|
|
++ }
|
|
+
|
|
+- msdu->next = next;
|
|
+- msdu = next;
|
|
++ __skb_queue_tail(amsdu, msdu);
|
|
++ skb_trim(msdu, 0);
|
|
++ skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
|
|
++ msdu_len -= msdu->len;
|
|
++ msdu_chaining = 1;
|
|
+ }
|
|
+
|
|
+ last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
|
|
+ RX_MSDU_END_INFO0_LAST_MSDU;
|
|
+
|
|
+- if (last_msdu) {
|
|
+- msdu->next = NULL;
|
|
++ trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
|
|
++ sizeof(*rx_desc) - sizeof(u32));
|
|
++
|
|
++ if (last_msdu)
|
|
+ break;
|
|
+- } else {
|
|
+- struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);
|
|
+- msdu->next = next;
|
|
+- msdu = next;
|
|
+- }
|
|
+ }
|
|
+- *tail_msdu = msdu;
|
|
+
|
|
+- if (*head_msdu == NULL)
|
|
++ if (skb_queue_empty(amsdu))
|
|
+ msdu_chaining = -1;
|
|
+
|
|
+ /*
|
|
+@@ -465,43 +426,117 @@ static int ath10k_htt_rx_amsdu_pop(struc
|
|
+ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
|
|
+ {
|
|
+ struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
|
|
++
|
|
+ ath10k_htt_rx_msdu_buff_replenish(htt);
|
|
+ }
|
|
+
|
|
+-int ath10k_htt_rx_attach(struct ath10k_htt *htt)
|
|
++static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
|
|
++ u32 paddr)
|
|
++{
|
|
++ struct ath10k *ar = htt->ar;
|
|
++ struct ath10k_skb_rxcb *rxcb;
|
|
++ struct sk_buff *msdu;
|
|
++
|
|
++ lockdep_assert_held(&htt->rx_ring.lock);
|
|
++
|
|
++ msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
|
|
++ if (!msdu)
|
|
++ return NULL;
|
|
++
|
|
++ rxcb = ATH10K_SKB_RXCB(msdu);
|
|
++ hash_del(&rxcb->hlist);
|
|
++ htt->rx_ring.fill_cnt--;
|
|
++
|
|
++ dma_unmap_single(htt->ar->dev, rxcb->paddr,
|
|
++ msdu->len + skb_tailroom(msdu),
|
|
++ DMA_FROM_DEVICE);
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
|
|
++ msdu->data, msdu->len + skb_tailroom(msdu));
|
|
++
|
|
++ return msdu;
|
|
++}
|
|
++
|
|
++static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
|
|
++ struct htt_rx_in_ord_ind *ev,
|
|
++ struct sk_buff_head *list)
|
|
+ {
|
|
++ struct ath10k *ar = htt->ar;
|
|
++ struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
|
|
++ struct htt_rx_desc *rxd;
|
|
++ struct sk_buff *msdu;
|
|
++ int msdu_count;
|
|
++ bool is_offload;
|
|
++ u32 paddr;
|
|
++
|
|
++ lockdep_assert_held(&htt->rx_ring.lock);
|
|
++
|
|
++ msdu_count = __le16_to_cpu(ev->msdu_count);
|
|
++ is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
|
|
++
|
|
++ while (msdu_count--) {
|
|
++ paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
|
|
++
|
|
++ msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
|
|
++ if (!msdu) {
|
|
++ __skb_queue_purge(list);
|
|
++ return -ENOENT;
|
|
++ }
|
|
++
|
|
++ __skb_queue_tail(list, msdu);
|
|
++
|
|
++ if (!is_offload) {
|
|
++ rxd = (void *)msdu->data;
|
|
++
|
|
++ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
|
|
++
|
|
++ skb_put(msdu, sizeof(*rxd));
|
|
++ skb_pull(msdu, sizeof(*rxd));
|
|
++ skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
|
|
++
|
|
++ if (!(__le32_to_cpu(rxd->attention.flags) &
|
|
++ RX_ATTENTION_FLAGS_MSDU_DONE)) {
|
|
++ ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
|
|
++ return -EIO;
|
|
++ }
|
|
++ }
|
|
++
|
|
++ msdu_desc++;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
|
|
++{
|
|
++ struct ath10k *ar = htt->ar;
|
|
+ dma_addr_t paddr;
|
|
+ void *vaddr;
|
|
++ size_t size;
|
|
+ struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
|
|
+
|
|
+- htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
|
|
+- if (!is_power_of_2(htt->rx_ring.size)) {
|
|
+- ath10k_warn("htt rx ring size is not power of 2\n");
|
|
+- return -EINVAL;
|
|
+- }
|
|
++ htt->rx_confused = false;
|
|
+
|
|
++ /* XXX: The fill level could be changed during runtime in response to
|
|
++ * the host processing latency. Is this really worth it?
|
|
++ */
|
|
++ htt->rx_ring.size = HTT_RX_RING_SIZE;
|
|
+ htt->rx_ring.size_mask = htt->rx_ring.size - 1;
|
|
++ htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
|
|
+
|
|
+- /*
|
|
+- * Set the initial value for the level to which the rx ring
|
|
+- * should be filled, based on the max throughput and the
|
|
+- * worst likely latency for the host to fill the rx ring
|
|
+- * with new buffers. In theory, this fill level can be
|
|
+- * dynamically adjusted from the initial value set here, to
|
|
+- * reflect the actual host latency rather than a
|
|
+- * conservative assumption about the host latency.
|
|
+- */
|
|
+- htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);
|
|
++ if (!is_power_of_2(htt->rx_ring.size)) {
|
|
++ ath10k_warn(ar, "htt rx ring size is not power of 2\n");
|
|
++ return -EINVAL;
|
|
++ }
|
|
+
|
|
+ htt->rx_ring.netbufs_ring =
|
|
+- kmalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
|
|
++ kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
|
|
+ GFP_KERNEL);
|
|
+ if (!htt->rx_ring.netbufs_ring)
|
|
+ goto err_netbuf;
|
|
+
|
|
+- vaddr = dma_alloc_coherent(htt->ar->dev,
|
|
+- (htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring)),
|
|
+- &paddr, GFP_DMA);
|
|
++ size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
|
|
++
|
|
++ vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
|
|
+ if (!vaddr)
|
|
+ goto err_dma_ring;
|
|
+
|
|
+@@ -516,7 +551,7 @@ int ath10k_htt_rx_attach(struct ath10k_h
|
|
+
|
|
+ htt->rx_ring.alloc_idx.vaddr = vaddr;
|
|
+ htt->rx_ring.alloc_idx.paddr = paddr;
|
|
+- htt->rx_ring.sw_rd_idx.msdu_payld = 0;
|
|
++ htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
|
|
+ *htt->rx_ring.alloc_idx.vaddr = 0;
|
|
+
|
|
+ /* Initialize the Rx refill retry timer */
|
|
+@@ -525,28 +560,23 @@ int ath10k_htt_rx_attach(struct ath10k_h
|
|
+ spin_lock_init(&htt->rx_ring.lock);
|
|
+
|
|
+ htt->rx_ring.fill_cnt = 0;
|
|
+- if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
|
|
+- goto err_fill_ring;
|
|
++ htt->rx_ring.sw_rd_idx.msdu_payld = 0;
|
|
++ hash_init(htt->rx_ring.skb_table);
|
|
+
|
|
+ tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
|
|
+ (unsigned long)htt);
|
|
+
|
|
+ skb_queue_head_init(&htt->tx_compl_q);
|
|
+ skb_queue_head_init(&htt->rx_compl_q);
|
|
++ skb_queue_head_init(&htt->rx_in_ord_compl_q);
|
|
+
|
|
+ tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
|
|
+ (unsigned long)htt);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
|
|
+ htt->rx_ring.size, htt->rx_ring.fill_level);
|
|
+ return 0;
|
|
+
|
|
+-err_fill_ring:
|
|
+- ath10k_htt_rx_ring_free(htt);
|
|
+- dma_free_coherent(htt->ar->dev,
|
|
+- sizeof(*htt->rx_ring.alloc_idx.vaddr),
|
|
+- htt->rx_ring.alloc_idx.vaddr,
|
|
+- htt->rx_ring.alloc_idx.paddr);
|
|
+ err_dma_idx:
|
|
+ dma_free_coherent(htt->ar->dev,
|
|
+ (htt->rx_ring.size *
|
|
+@@ -559,73 +589,54 @@ err_netbuf:
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+-static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
|
|
++static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
|
|
++ enum htt_rx_mpdu_encrypt_type type)
|
|
+ {
|
|
+ switch (type) {
|
|
++ case HTT_RX_MPDU_ENCRYPT_NONE:
|
|
++ return 0;
|
|
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
|
|
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
|
|
+- return 4;
|
|
++ return IEEE80211_WEP_IV_LEN;
|
|
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
|
|
+- case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
|
|
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
|
|
+- case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
|
|
++ return IEEE80211_TKIP_IV_LEN;
|
|
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
|
|
+- return 8;
|
|
+- case HTT_RX_MPDU_ENCRYPT_NONE:
|
|
+- return 0;
|
|
++ return IEEE80211_CCMP_HDR_LEN;
|
|
++ case HTT_RX_MPDU_ENCRYPT_WEP128:
|
|
++ case HTT_RX_MPDU_ENCRYPT_WAPI:
|
|
++ break;
|
|
+ }
|
|
+
|
|
+- ath10k_warn("unknown encryption type %d\n", type);
|
|
++ ath10k_warn(ar, "unsupported encryption type %d\n", type);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
|
|
++#define MICHAEL_MIC_LEN 8
|
|
++
|
|
++static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
|
|
++ enum htt_rx_mpdu_encrypt_type type)
|
|
+ {
|
|
+ switch (type) {
|
|
+ case HTT_RX_MPDU_ENCRYPT_NONE:
|
|
++ return 0;
|
|
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
|
|
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
|
|
+- case HTT_RX_MPDU_ENCRYPT_WEP128:
|
|
+- case HTT_RX_MPDU_ENCRYPT_WAPI:
|
|
+- return 0;
|
|
++ return IEEE80211_WEP_ICV_LEN;
|
|
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
|
|
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
|
|
+- return 4;
|
|
++ return IEEE80211_TKIP_ICV_LEN;
|
|
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
|
|
+- return 8;
|
|
++ return IEEE80211_CCMP_MIC_LEN;
|
|
++ case HTT_RX_MPDU_ENCRYPT_WEP128:
|
|
++ case HTT_RX_MPDU_ENCRYPT_WAPI:
|
|
++ break;
|
|
+ }
|
|
+
|
|
+- ath10k_warn("unknown encryption type %d\n", type);
|
|
++ ath10k_warn(ar, "unsupported encryption type %d\n", type);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-/* Applies for first msdu in chain, before altering it. */
|
|
+-static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
|
|
+-{
|
|
+- struct htt_rx_desc *rxd;
|
|
+- enum rx_msdu_decap_format fmt;
|
|
+-
|
|
+- rxd = (void *)skb->data - sizeof(*rxd);
|
|
+- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
|
|
+- RX_MSDU_START_INFO1_DECAP_FORMAT);
|
|
+-
|
|
+- if (fmt == RX_MSDU_DECAP_RAW)
|
|
+- return (void *)skb->data;
|
|
+- else
|
|
+- return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
|
|
+-}
|
|
+-
|
|
+-/* This function only applies for first msdu in an msdu chain */
|
|
+-static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
|
|
+-{
|
|
+- if (ieee80211_is_data_qos(hdr->frame_control)) {
|
|
+- u8 *qc = ieee80211_get_qos_ctl(hdr);
|
|
+- if (qc[0] & 0x80)
|
|
+- return true;
|
|
+- }
|
|
+- return false;
|
|
+-}
|
|
+-
|
|
+ struct rfc1042_hdr {
|
|
+ u8 llc_dsap;
|
|
+ u8 llc_ssap;
|
|
+@@ -660,23 +671,34 @@ static const u8 rx_legacy_rate_idx[] = {
|
|
+ };
|
|
+
|
|
+ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
|
|
+- enum ieee80211_band band,
|
|
+- u8 info0, u32 info1, u32 info2,
|
|
+- struct ieee80211_rx_status *status)
|
|
++ struct ieee80211_rx_status *status,
|
|
++ struct htt_rx_desc *rxd)
|
|
+ {
|
|
++ enum ieee80211_band band;
|
|
+ u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
|
|
+ u8 preamble = 0;
|
|
++ u32 info1, info2, info3;
|
|
+
|
|
+- /* Check if valid fields */
|
|
+- if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
|
|
++ /* Band value can't be set as undefined but freq can be 0 - use that to
|
|
++ * determine whether band is provided.
|
|
++ *
|
|
++ * FIXME: Perhaps this can go away if CCK rate reporting is a little
|
|
++ * reworked?
|
|
++ */
|
|
++ if (!status->freq)
|
|
+ return;
|
|
+
|
|
+- preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);
|
|
++ band = status->band;
|
|
++ info1 = __le32_to_cpu(rxd->ppdu_start.info1);
|
|
++ info2 = __le32_to_cpu(rxd->ppdu_start.info2);
|
|
++ info3 = __le32_to_cpu(rxd->ppdu_start.info3);
|
|
++
|
|
++ preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
|
|
+
|
|
+ switch (preamble) {
|
|
+ case HTT_RX_LEGACY:
|
|
+- cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
|
|
+- rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
|
|
++ cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
|
|
++ rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
|
|
+ rate_idx = 0;
|
|
+
|
|
+ if (rate < 0x08 || rate > 0x0F)
|
|
+@@ -703,11 +725,11 @@ static void ath10k_htt_rx_h_rates(struct
|
|
+ break;
|
|
+ case HTT_RX_HT:
|
|
+ case HTT_RX_HT_WITH_TXBF:
|
|
+- /* HT-SIG - Table 20-11 in info1 and info2 */
|
|
+- mcs = info1 & 0x1F;
|
|
++ /* HT-SIG - Table 20-11 in info2 and info3 */
|
|
++ mcs = info2 & 0x1F;
|
|
+ nss = mcs >> 3;
|
|
+- bw = (info1 >> 7) & 1;
|
|
+- sgi = (info2 >> 7) & 1;
|
|
++ bw = (info2 >> 7) & 1;
|
|
++ sgi = (info3 >> 7) & 1;
|
|
+
|
|
+ status->rate_idx = mcs;
|
|
+ status->flag |= RX_FLAG_HT;
|
|
+@@ -718,12 +740,12 @@ static void ath10k_htt_rx_h_rates(struct
|
|
+ break;
|
|
+ case HTT_RX_VHT:
|
|
+ case HTT_RX_VHT_WITH_TXBF:
|
|
+- /* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
|
|
++ /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
|
|
+ TODO check this */
|
|
+- mcs = (info2 >> 4) & 0x0F;
|
|
+- nss = ((info1 >> 10) & 0x07) + 1;
|
|
+- bw = info1 & 3;
|
|
+- sgi = info2 & 1;
|
|
++ mcs = (info3 >> 4) & 0x0F;
|
|
++ nss = ((info2 >> 10) & 0x07) + 1;
|
|
++ bw = info2 & 3;
|
|
++ sgi = info3 & 1;
|
|
+
|
|
+ status->rate_idx = mcs;
|
|
+ status->vht_nss = nss;
|
|
+@@ -751,28 +773,6 @@ static void ath10k_htt_rx_h_rates(struct
|
|
+ }
|
|
+ }
|
|
+
|
|
+-static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
|
|
+- struct ieee80211_rx_status *rx_status,
|
|
+- struct sk_buff *skb,
|
|
+- enum htt_rx_mpdu_encrypt_type enctype)
|
|
+-{
|
|
+- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
+-
|
|
+-
|
|
+- if (enctype == HTT_RX_MPDU_ENCRYPT_NONE) {
|
|
+- rx_status->flag &= ~(RX_FLAG_DECRYPTED |
|
|
+- RX_FLAG_IV_STRIPPED |
|
|
+- RX_FLAG_MMIC_STRIPPED);
|
|
+- return;
|
|
+- }
|
|
+-
|
|
+- rx_status->flag |= RX_FLAG_DECRYPTED |
|
|
+- RX_FLAG_IV_STRIPPED |
|
|
+- RX_FLAG_MMIC_STRIPPED;
|
|
+- hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
|
|
+- ~IEEE80211_FCTL_PROTECTED);
|
|
+-}
|
|
+-
|
|
+ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
|
|
+ struct ieee80211_rx_status *status)
|
|
+ {
|
|
+@@ -793,19 +793,121 @@ static bool ath10k_htt_rx_h_channel(stru
|
|
+ return true;
|
|
+ }
|
|
+
|
|
++static void ath10k_htt_rx_h_signal(struct ath10k *ar,
|
|
++ struct ieee80211_rx_status *status,
|
|
++ struct htt_rx_desc *rxd)
|
|
++{
|
|
++ /* FIXME: Get real NF */
|
|
++ status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
|
|
++ rxd->ppdu_start.rssi_comb;
|
|
++ status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
|
|
++ struct ieee80211_rx_status *status,
|
|
++ struct htt_rx_desc *rxd)
|
|
++{
|
|
++ /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
|
|
++ * means all prior MSDUs in a PPDU are reported to mac80211 without the
|
|
++ * TSF. Is it worth holding frames until end of PPDU is known?
|
|
++ *
|
|
++ * FIXME: Can we get/compute 64bit TSF?
|
|
++ */
|
|
++ status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
|
|
++ status->flag |= RX_FLAG_MACTIME_END;
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
|
|
++ struct sk_buff_head *amsdu,
|
|
++ struct ieee80211_rx_status *status)
|
|
++{
|
|
++ struct sk_buff *first;
|
|
++ struct htt_rx_desc *rxd;
|
|
++ bool is_first_ppdu;
|
|
++ bool is_last_ppdu;
|
|
++
|
|
++ if (skb_queue_empty(amsdu))
|
|
++ return;
|
|
++
|
|
++ first = skb_peek(amsdu);
|
|
++ rxd = (void *)first->data - sizeof(*rxd);
|
|
++
|
|
++ is_first_ppdu = !!(rxd->attention.flags &
|
|
++ __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
|
|
++ is_last_ppdu = !!(rxd->attention.flags &
|
|
++ __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
|
|
++
|
|
++ if (is_first_ppdu) {
|
|
++ /* New PPDU starts so clear out the old per-PPDU status. */
|
|
++ status->freq = 0;
|
|
++ status->rate_idx = 0;
|
|
++ status->vht_nss = 0;
|
|
++ status->vht_flag &= ~RX_VHT_FLAG_80MHZ;
|
|
++ status->flag &= ~(RX_FLAG_HT |
|
|
++ RX_FLAG_VHT |
|
|
++ RX_FLAG_SHORT_GI |
|
|
++ RX_FLAG_40MHZ |
|
|
++ RX_FLAG_MACTIME_END);
|
|
++ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
|
|
++
|
|
++ ath10k_htt_rx_h_signal(ar, status, rxd);
|
|
++ ath10k_htt_rx_h_channel(ar, status);
|
|
++ ath10k_htt_rx_h_rates(ar, status, rxd);
|
|
++ }
|
|
++
|
|
++ if (is_last_ppdu)
|
|
++ ath10k_htt_rx_h_mactime(ar, status, rxd);
|
|
++}
|
|
++
|
|
++static const char * const tid_to_ac[] = {
|
|
++ "BE",
|
|
++ "BK",
|
|
++ "BK",
|
|
++ "BE",
|
|
++ "VI",
|
|
++ "VI",
|
|
++ "VO",
|
|
++ "VO",
|
|
++};
|
|
++
|
|
++static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
|
|
++{
|
|
++ u8 *qc;
|
|
++ int tid;
|
|
++
|
|
++ if (!ieee80211_is_data_qos(hdr->frame_control))
|
|
++ return "";
|
|
++
|
|
++ qc = ieee80211_get_qos_ctl(hdr);
|
|
++ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
|
|
++ if (tid < 8)
|
|
++ snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
|
|
++ else
|
|
++ snprintf(out, size, "tid %d", tid);
|
|
++
|
|
++ return out;
|
|
++}
|
|
++
|
|
+ static void ath10k_process_rx(struct ath10k *ar,
|
|
+ struct ieee80211_rx_status *rx_status,
|
|
+ struct sk_buff *skb)
|
|
+ {
|
|
+ struct ieee80211_rx_status *status;
|
|
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
++ char tid[32];
|
|
+
|
|
+ status = IEEE80211_SKB_RXCB(skb);
|
|
+ *status = *rx_status;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_DATA,
|
|
+- "rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %imic-err %i\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_DATA,
|
|
++ "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
|
|
+ skb,
|
|
+ skb->len,
|
|
++ ieee80211_get_SA(hdr),
|
|
++ ath10k_get_tid(hdr, tid, sizeof(tid)),
|
|
++ is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
|
|
++ "mcast" : "ucast",
|
|
++ (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
|
|
+ status->flag == 0 ? "legacy" : "",
|
|
+ status->flag & RX_FLAG_HT ? "ht" : "",
|
|
+ status->flag & RX_FLAG_VHT ? "vht" : "",
|
|
+@@ -817,9 +919,12 @@ static void ath10k_process_rx(struct ath
|
|
+ status->freq,
|
|
+ status->band, status->flag,
|
|
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
|
|
+- !!(status->flag & RX_FLAG_MMIC_ERROR));
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
|
|
++ !!(status->flag & RX_FLAG_MMIC_ERROR),
|
|
++ !!(status->flag & RX_FLAG_AMSDU_MORE));
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
|
|
+ skb->data, skb->len);
|
|
++ trace_ath10k_rx_hdr(ar, skb->data, skb->len);
|
|
++ trace_ath10k_rx_payload(ar, skb->data, skb->len);
|
|
+
|
|
+ ieee80211_rx(ar->hw, skb);
|
|
+ }
|
|
+@@ -830,179 +935,263 @@ static int ath10k_htt_rx_nwifi_hdrlen(st
|
|
+ return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
|
|
+ }
|
|
+
|
|
+-static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
|
|
+- struct ieee80211_rx_status *rx_status,
|
|
+- struct sk_buff *skb_in)
|
|
++static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
|
|
++ struct sk_buff *msdu,
|
|
++ struct ieee80211_rx_status *status,
|
|
++ enum htt_rx_mpdu_encrypt_type enctype,
|
|
++ bool is_decrypted)
|
|
+ {
|
|
++ struct ieee80211_hdr *hdr;
|
|
+ struct htt_rx_desc *rxd;
|
|
+- struct sk_buff *skb = skb_in;
|
|
+- struct sk_buff *first;
|
|
+- enum rx_msdu_decap_format fmt;
|
|
+- enum htt_rx_mpdu_encrypt_type enctype;
|
|
++ size_t hdr_len;
|
|
++ size_t crypto_len;
|
|
++ bool is_first;
|
|
++ bool is_last;
|
|
++
|
|
++ rxd = (void *)msdu->data - sizeof(*rxd);
|
|
++ is_first = !!(rxd->msdu_end.info0 &
|
|
++ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
|
|
++ is_last = !!(rxd->msdu_end.info0 &
|
|
++ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
|
|
++
|
|
++ /* Delivered decapped frame:
|
|
++ * [802.11 header]
|
|
++ * [crypto param] <-- can be trimmed if !fcs_err &&
|
|
++ * !decrypt_err && !peer_idx_invalid
|
|
++ * [amsdu header] <-- only if A-MSDU
|
|
++ * [rfc1042/llc]
|
|
++ * [payload]
|
|
++ * [FCS] <-- at end, needs to be trimmed
|
|
++ */
|
|
++
|
|
++ /* This probably shouldn't happen but warn just in case */
|
|
++ if (unlikely(WARN_ON_ONCE(!is_first)))
|
|
++ return;
|
|
++
|
|
++ /* This probably shouldn't happen but warn just in case */
|
|
++ if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
|
|
++ return;
|
|
++
|
|
++ skb_trim(msdu, msdu->len - FCS_LEN);
|
|
++
|
|
++ /* In most cases this will be true for sniffed frames. It makes sense
|
|
++ * to deliver them as-is without stripping the crypto param. This would
|
|
++ * also make sense for software based decryption (which is not
|
|
++ * implemented in ath10k).
|
|
++ *
|
|
++ * If there's no error then the frame is decrypted. At least that is
|
|
++ * the case for frames that come in via fragmented rx indication.
|
|
++ */
|
|
++ if (!is_decrypted)
|
|
++ return;
|
|
++
|
|
++ /* The payload is decrypted so strip crypto params. Start from tail
|
|
++ * since hdr is used to compute some stuff.
|
|
++ */
|
|
++
|
|
++ hdr = (void *)msdu->data;
|
|
++
|
|
++ /* Tail */
|
|
++ skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
|
|
++
|
|
++ /* MMIC */
|
|
++ if (!ieee80211_has_morefrags(hdr->frame_control) &&
|
|
++ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
|
|
++ skb_trim(msdu, msdu->len - 8);
|
|
++
|
|
++ /* Head */
|
|
++ hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
|
++ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
|
|
++
|
|
++ memmove((void *)msdu->data + crypto_len,
|
|
++ (void *)msdu->data, hdr_len);
|
|
++ skb_pull(msdu, crypto_len);
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
|
|
++ struct sk_buff *msdu,
|
|
++ struct ieee80211_rx_status *status,
|
|
++ const u8 first_hdr[64])
|
|
++{
|
|
+ struct ieee80211_hdr *hdr;
|
|
+- u8 hdr_buf[64], addr[ETH_ALEN], *qos;
|
|
+- unsigned int hdr_len;
|
|
++ size_t hdr_len;
|
|
++ u8 da[ETH_ALEN];
|
|
++ u8 sa[ETH_ALEN];
|
|
++
|
|
++ /* Delivered decapped frame:
|
|
++ * [nwifi 802.11 header] <-- replaced with 802.11 hdr
|
|
++ * [rfc1042/llc]
|
|
++ *
|
|
++ * Note: The nwifi header doesn't have QoS Control and is
|
|
++ * (always?) a 3addr frame.
|
|
++ *
|
|
++ * Note2: There's no A-MSDU subframe header. Even if it's part
|
|
++ * of an A-MSDU.
|
|
++ */
|
|
+
|
|
+- rxd = (void *)skb->data - sizeof(*rxd);
|
|
+- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
|
|
+- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
|
|
++ /* pull decapped header and copy SA & DA */
|
|
++ hdr = (struct ieee80211_hdr *)msdu->data;
|
|
++ hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
|
|
++ ether_addr_copy(da, ieee80211_get_DA(hdr));
|
|
++ ether_addr_copy(sa, ieee80211_get_SA(hdr));
|
|
++ skb_pull(msdu, hdr_len);
|
|
+
|
|
+- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
|
|
++ /* push original 802.11 header */
|
|
++ hdr = (struct ieee80211_hdr *)first_hdr;
|
|
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
|
+- memcpy(hdr_buf, hdr, hdr_len);
|
|
+- hdr = (struct ieee80211_hdr *)hdr_buf;
|
|
++ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
|
|
+
|
|
+- first = skb;
|
|
+- while (skb) {
|
|
+- void *decap_hdr;
|
|
+- int len;
|
|
+-
|
|
+- rxd = (void *)skb->data - sizeof(*rxd);
|
|
+- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
|
|
+- RX_MSDU_START_INFO1_DECAP_FORMAT);
|
|
+- decap_hdr = (void *)rxd->rx_hdr_status;
|
|
+-
|
|
+- skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
|
|
+-
|
|
+- /* First frame in an A-MSDU chain has more decapped data. */
|
|
+- if (skb == first) {
|
|
+- len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
|
|
+- len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
|
|
+- 4);
|
|
+- decap_hdr += len;
|
|
+- }
|
|
++ /* original 802.11 header has a different DA and in
|
|
++ * case of 4addr it may also have different SA
|
|
++ */
|
|
++ hdr = (struct ieee80211_hdr *)msdu->data;
|
|
++ ether_addr_copy(ieee80211_get_DA(hdr), da);
|
|
++ ether_addr_copy(ieee80211_get_SA(hdr), sa);
|
|
++}
|
|
+
|
|
+- switch (fmt) {
|
|
+- case RX_MSDU_DECAP_RAW:
|
|
+- /* remove trailing FCS */
|
|
+- skb_trim(skb, skb->len - FCS_LEN);
|
|
+- break;
|
|
+- case RX_MSDU_DECAP_NATIVE_WIFI:
|
|
+- /* pull decapped header and copy DA */
|
|
+- hdr = (struct ieee80211_hdr *)skb->data;
|
|
+- hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
|
|
+- memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
|
|
+- skb_pull(skb, hdr_len);
|
|
+-
|
|
+- /* push original 802.11 header */
|
|
+- hdr = (struct ieee80211_hdr *)hdr_buf;
|
|
+- hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
|
+- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
|
|
+-
|
|
+- /* original A-MSDU header has the bit set but we're
|
|
+- * not including A-MSDU subframe header */
|
|
+- hdr = (struct ieee80211_hdr *)skb->data;
|
|
+- qos = ieee80211_get_qos_ctl(hdr);
|
|
+- qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
|
|
++static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
|
|
++ struct sk_buff *msdu,
|
|
++ enum htt_rx_mpdu_encrypt_type enctype)
|
|
++{
|
|
++ struct ieee80211_hdr *hdr;
|
|
++ struct htt_rx_desc *rxd;
|
|
++ size_t hdr_len, crypto_len;
|
|
++ void *rfc1042;
|
|
++ bool is_first, is_last, is_amsdu;
|
|
+
|
|
+- /* original 802.11 header has a different DA */
|
|
+- memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
|
|
+- break;
|
|
+- case RX_MSDU_DECAP_ETHERNET2_DIX:
|
|
+- /* strip ethernet header and insert decapped 802.11
|
|
+- * header, amsdu subframe header and rfc1042 header */
|
|
+-
|
|
+- len = 0;
|
|
+- len += sizeof(struct rfc1042_hdr);
|
|
+- len += sizeof(struct amsdu_subframe_hdr);
|
|
+-
|
|
+- skb_pull(skb, sizeof(struct ethhdr));
|
|
+- memcpy(skb_push(skb, len), decap_hdr, len);
|
|
+- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
|
|
+- break;
|
|
+- case RX_MSDU_DECAP_8023_SNAP_LLC:
|
|
+- /* insert decapped 802.11 header making a singly
|
|
+- * A-MSDU */
|
|
+- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
|
|
+- break;
|
|
+- }
|
|
++ rxd = (void *)msdu->data - sizeof(*rxd);
|
|
++ hdr = (void *)rxd->rx_hdr_status;
|
|
+
|
|
+- skb_in = skb;
|
|
+- ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype);
|
|
+- skb = skb->next;
|
|
+- skb_in->next = NULL;
|
|
++ is_first = !!(rxd->msdu_end.info0 &
|
|
++ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
|
|
++ is_last = !!(rxd->msdu_end.info0 &
|
|
++ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
|
|
++ is_amsdu = !(is_first && is_last);
|
|
+
|
|
+- if (skb)
|
|
+- rx_status->flag |= RX_FLAG_AMSDU_MORE;
|
|
+- else
|
|
+- rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
|
|
++ rfc1042 = hdr;
|
|
++
|
|
++ if (is_first) {
|
|
++ hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
|
++ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
|
|
+
|
|
+- ath10k_process_rx(htt->ar, rx_status, skb_in);
|
|
++ rfc1042 += round_up(hdr_len, 4) +
|
|
++ round_up(crypto_len, 4);
|
|
+ }
|
|
+
|
|
+- /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
|
|
+- * monitor interface active for sniffing purposes. */
|
|
++ if (is_amsdu)
|
|
++ rfc1042 += sizeof(struct amsdu_subframe_hdr);
|
|
++
|
|
++ return rfc1042;
|
|
+ }
|
|
+
|
|
+-static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
|
|
+- struct ieee80211_rx_status *rx_status,
|
|
+- struct sk_buff *skb)
|
|
++static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
|
|
++ struct sk_buff *msdu,
|
|
++ struct ieee80211_rx_status *status,
|
|
++ const u8 first_hdr[64],
|
|
++ enum htt_rx_mpdu_encrypt_type enctype)
|
|
+ {
|
|
+- struct htt_rx_desc *rxd;
|
|
+ struct ieee80211_hdr *hdr;
|
|
+- enum rx_msdu_decap_format fmt;
|
|
+- enum htt_rx_mpdu_encrypt_type enctype;
|
|
+- int hdr_len;
|
|
++ struct ethhdr *eth;
|
|
++ size_t hdr_len;
|
|
+ void *rfc1042;
|
|
++ u8 da[ETH_ALEN];
|
|
++ u8 sa[ETH_ALEN];
|
|
+
|
|
+- /* This shouldn't happen. If it does than it may be a FW bug. */
|
|
+- if (skb->next) {
|
|
+- ath10k_warn("htt rx received chained non A-MSDU frame\n");
|
|
+- ath10k_htt_rx_free_msdu_chain(skb->next);
|
|
+- skb->next = NULL;
|
|
+- }
|
|
++ /* Delivered decapped frame:
|
|
++ * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
|
|
++ * [payload]
|
|
++ */
|
|
+
|
|
+- rxd = (void *)skb->data - sizeof(*rxd);
|
|
+- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
|
|
+- RX_MSDU_START_INFO1_DECAP_FORMAT);
|
|
+- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
|
|
+- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
|
|
+- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
|
|
++ rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
|
|
++ if (WARN_ON_ONCE(!rfc1042))
|
|
++ return;
|
|
++
|
|
++ /* pull decapped header and copy SA & DA */
|
|
++ eth = (struct ethhdr *)msdu->data;
|
|
++ ether_addr_copy(da, eth->h_dest);
|
|
++ ether_addr_copy(sa, eth->h_source);
|
|
++ skb_pull(msdu, sizeof(struct ethhdr));
|
|
++
|
|
++ /* push rfc1042/llc/snap */
|
|
++ memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
|
|
++ sizeof(struct rfc1042_hdr));
|
|
++
|
|
++ /* push original 802.11 header */
|
|
++ hdr = (struct ieee80211_hdr *)first_hdr;
|
|
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
|
++ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
|
|
++
|
|
++ /* original 802.11 header has a different DA and in
|
|
++ * case of 4addr it may also have different SA
|
|
++ */
|
|
++ hdr = (struct ieee80211_hdr *)msdu->data;
|
|
++ ether_addr_copy(ieee80211_get_DA(hdr), da);
|
|
++ ether_addr_copy(ieee80211_get_SA(hdr), sa);
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
|
|
++ struct sk_buff *msdu,
|
|
++ struct ieee80211_rx_status *status,
|
|
++ const u8 first_hdr[64])
|
|
++{
|
|
++ struct ieee80211_hdr *hdr;
|
|
++ size_t hdr_len;
|
|
++
|
|
++ /* Delivered decapped frame:
|
|
++ * [amsdu header] <-- replaced with 802.11 hdr
|
|
++ * [rfc1042/llc]
|
|
++ * [payload]
|
|
++ */
|
|
++
|
|
++ skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
|
|
++
|
|
++ hdr = (struct ieee80211_hdr *)first_hdr;
|
|
++ hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
|
++ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
|
|
++ struct sk_buff *msdu,
|
|
++ struct ieee80211_rx_status *status,
|
|
++ u8 first_hdr[64],
|
|
++ enum htt_rx_mpdu_encrypt_type enctype,
|
|
++ bool is_decrypted)
|
|
++{
|
|
++ struct htt_rx_desc *rxd;
|
|
++ enum rx_msdu_decap_format decap;
|
|
++ struct ieee80211_hdr *hdr;
|
|
++
|
|
++ /* First msdu's decapped header:
|
|
++ * [802.11 header] <-- padded to 4 bytes long
|
|
++ * [crypto param] <-- padded to 4 bytes long
|
|
++ * [amsdu header] <-- only if A-MSDU
|
|
++ * [rfc1042/llc]
|
|
++ *
|
|
++ * Other (2nd, 3rd, ..) msdu's decapped header:
|
|
++ * [amsdu header] <-- only if A-MSDU
|
|
++ * [rfc1042/llc]
|
|
++ */
|
|
+
|
|
+- skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
|
|
++ rxd = (void *)msdu->data - sizeof(*rxd);
|
|
++ hdr = (void *)rxd->rx_hdr_status;
|
|
++ decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
|
|
++ RX_MSDU_START_INFO1_DECAP_FORMAT);
|
|
+
|
|
+- switch (fmt) {
|
|
++ switch (decap) {
|
|
+ case RX_MSDU_DECAP_RAW:
|
|
+- /* remove trailing FCS */
|
|
+- skb_trim(skb, skb->len - FCS_LEN);
|
|
++ ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
|
|
++ is_decrypted);
|
|
+ break;
|
|
+ case RX_MSDU_DECAP_NATIVE_WIFI:
|
|
+- /* Pull decapped header */
|
|
+- hdr = (struct ieee80211_hdr *)skb->data;
|
|
+- hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
|
|
+- skb_pull(skb, hdr_len);
|
|
+-
|
|
+- /* Push original header */
|
|
+- hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
|
|
+- hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
|
+- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
|
|
++ ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
|
|
+ break;
|
|
+ case RX_MSDU_DECAP_ETHERNET2_DIX:
|
|
+- /* strip ethernet header and insert decapped 802.11 header and
|
|
+- * rfc1042 header */
|
|
+-
|
|
+- rfc1042 = hdr;
|
|
+- rfc1042 += roundup(hdr_len, 4);
|
|
+- rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
|
|
+-
|
|
+- skb_pull(skb, sizeof(struct ethhdr));
|
|
+- memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
|
|
+- rfc1042, sizeof(struct rfc1042_hdr));
|
|
+- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
|
|
++ ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
|
|
+ break;
|
|
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
|
|
+- /* remove A-MSDU subframe header and insert
|
|
+- * decapped 802.11 header. rfc1042 header is already there */
|
|
+-
|
|
+- skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
|
|
+- memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
|
|
++ ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
|
|
+ break;
|
|
+ }
|
|
+-
|
|
+- ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype);
|
|
+-
|
|
+- ath10k_process_rx(htt->ar, rx_status, skb);
|
|
+ }
|
|
+
|
|
+ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
|
|
+@@ -1036,10 +1225,128 @@ static int ath10k_htt_rx_get_csum_state(
|
|
+ return CHECKSUM_UNNECESSARY;
|
|
+ }
|
|
+
|
|
+-static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
|
|
++static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
|
|
++{
|
|
++ msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
|
|
++ struct sk_buff_head *amsdu,
|
|
++ struct ieee80211_rx_status *status)
|
|
++{
|
|
++ struct sk_buff *first;
|
|
++ struct sk_buff *last;
|
|
++ struct sk_buff *msdu;
|
|
++ struct htt_rx_desc *rxd;
|
|
++ struct ieee80211_hdr *hdr;
|
|
++ enum htt_rx_mpdu_encrypt_type enctype;
|
|
++ u8 first_hdr[64];
|
|
++ u8 *qos;
|
|
++ size_t hdr_len;
|
|
++ bool has_fcs_err;
|
|
++ bool has_crypto_err;
|
|
++ bool has_tkip_err;
|
|
++ bool has_peer_idx_invalid;
|
|
++ bool is_decrypted;
|
|
++ u32 attention;
|
|
++
|
|
++ if (skb_queue_empty(amsdu))
|
|
++ return;
|
|
++
|
|
++ first = skb_peek(amsdu);
|
|
++ rxd = (void *)first->data - sizeof(*rxd);
|
|
++
|
|
++ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
|
|
++ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
|
|
++
|
|
++ /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
|
|
++ * decapped header. It'll be used for undecapping of each MSDU.
|
|
++ */
|
|
++ hdr = (void *)rxd->rx_hdr_status;
|
|
++ hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
|
++ memcpy(first_hdr, hdr, hdr_len);
|
|
++
|
|
++ /* Each A-MSDU subframe will use the original header as the base and be
|
|
++ * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
|
|
++ */
|
|
++ hdr = (void *)first_hdr;
|
|
++ qos = ieee80211_get_qos_ctl(hdr);
|
|
++ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
|
|
++
|
|
++ /* Some attention flags are valid only in the last MSDU. */
|
|
++ last = skb_peek_tail(amsdu);
|
|
++ rxd = (void *)last->data - sizeof(*rxd);
|
|
++ attention = __le32_to_cpu(rxd->attention.flags);
|
|
++
|
|
++ has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
|
|
++ has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
|
|
++ has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
|
|
++ has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
|
|
++
|
|
++ /* Note: If hardware captures an encrypted frame that it can't decrypt,
|
|
++ * e.g. due to fcs error, missing peer or invalid key data it will
|
|
++ * report the frame as raw.
|
|
++ */
|
|
++ is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
|
|
++ !has_fcs_err &&
|
|
++ !has_crypto_err &&
|
|
++ !has_peer_idx_invalid);
|
|
++
|
|
++ /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
|
|
++ status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
|
|
++ RX_FLAG_MMIC_ERROR |
|
|
++ RX_FLAG_DECRYPTED |
|
|
++ RX_FLAG_IV_STRIPPED |
|
|
++ RX_FLAG_MMIC_STRIPPED);
|
|
++
|
|
++ if (has_fcs_err)
|
|
++ status->flag |= RX_FLAG_FAILED_FCS_CRC;
|
|
++
|
|
++ if (has_tkip_err)
|
|
++ status->flag |= RX_FLAG_MMIC_ERROR;
|
|
++
|
|
++ if (is_decrypted)
|
|
++ status->flag |= RX_FLAG_DECRYPTED |
|
|
++ RX_FLAG_IV_STRIPPED |
|
|
++ RX_FLAG_MMIC_STRIPPED;
|
|
++
|
|
++ skb_queue_walk(amsdu, msdu) {
|
|
++ ath10k_htt_rx_h_csum_offload(msdu);
|
|
++ ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
|
|
++ is_decrypted);
|
|
++
|
|
++ /* Undecapping involves copying the original 802.11 header back
|
|
++ * to sk_buff. If frame is protected and hardware has decrypted
|
|
++ * it then remove the protected bit.
|
|
++ */
|
|
++ if (!is_decrypted)
|
|
++ continue;
|
|
++
|
|
++ hdr = (void *)msdu->data;
|
|
++ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
|
|
++ }
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
|
|
++ struct sk_buff_head *amsdu,
|
|
++ struct ieee80211_rx_status *status)
|
|
++{
|
|
++ struct sk_buff *msdu;
|
|
++
|
|
++ while ((msdu = __skb_dequeue(amsdu))) {
|
|
++ /* Setup per-MSDU flags */
|
|
++ if (skb_queue_empty(amsdu))
|
|
++ status->flag &= ~RX_FLAG_AMSDU_MORE;
|
|
++ else
|
|
++ status->flag |= RX_FLAG_AMSDU_MORE;
|
|
++
|
|
++ ath10k_process_rx(ar, status, msdu);
|
|
++ }
|
|
++}
|
|
++
|
|
++static int ath10k_unchain_msdu(struct sk_buff_head *amsdu)
|
|
+ {
|
|
+- struct sk_buff *next = msdu_head->next;
|
|
+- struct sk_buff *to_free = next;
|
|
++ struct sk_buff *skb, *first;
|
|
+ int space;
|
|
+ int total_len = 0;
|
|
+
|
|
+@@ -1050,110 +1357,142 @@ static int ath10k_unchain_msdu(struct sk
|
|
+ * skb?
|
|
+ */
|
|
+
|
|
+- msdu_head->next = NULL;
|
|
++ first = __skb_dequeue(amsdu);
|
|
+
|
|
+ /* Allocate total length all at once. */
|
|
+- while (next) {
|
|
+- total_len += next->len;
|
|
+- next = next->next;
|
|
+- }
|
|
++ skb_queue_walk(amsdu, skb)
|
|
++ total_len += skb->len;
|
|
+
|
|
+- space = total_len - skb_tailroom(msdu_head);
|
|
++ space = total_len - skb_tailroom(first);
|
|
+ if ((space > 0) &&
|
|
+- (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
|
|
++ (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
|
|
+ /* TODO: bump some rx-oom error stat */
|
|
+ /* put it back together so we can free the
|
|
+ * whole list at once.
|
|
+ */
|
|
+- msdu_head->next = to_free;
|
|
++ __skb_queue_head(amsdu, first);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ /* Walk list again, copying contents into
|
|
+ * msdu_head
|
|
+ */
|
|
+- next = to_free;
|
|
+- while (next) {
|
|
+- skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
|
|
+- next->len);
|
|
+- next = next->next;
|
|
++ while ((skb = __skb_dequeue(amsdu))) {
|
|
++ skb_copy_from_linear_data(skb, skb_put(first, skb->len),
|
|
++ skb->len);
|
|
++ dev_kfree_skb_any(skb);
|
|
+ }
|
|
+
|
|
+- /* If here, we have consolidated skb. Free the
|
|
+- * fragments and pass the main skb on up the
|
|
+- * stack.
|
|
+- */
|
|
+- ath10k_htt_rx_free_msdu_chain(to_free);
|
|
++ __skb_queue_head(amsdu, first);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
|
|
+- struct sk_buff *head,
|
|
+- enum htt_rx_mpdu_status status,
|
|
+- bool channel_set,
|
|
+- u32 attention)
|
|
+-{
|
|
+- if (head->len == 0) {
|
|
+- ath10k_dbg(ATH10K_DBG_HTT,
|
|
+- "htt rx dropping due to zero-len\n");
|
|
+- return false;
|
|
+- }
|
|
++static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
|
|
++ struct sk_buff_head *amsdu,
|
|
++ bool chained)
|
|
++{
|
|
++ struct sk_buff *first;
|
|
++ struct htt_rx_desc *rxd;
|
|
++ enum rx_msdu_decap_format decap;
|
|
+
|
|
+- if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
|
|
+- ath10k_dbg(ATH10K_DBG_HTT,
|
|
+- "htt rx dropping due to decrypt-err\n");
|
|
+- return false;
|
|
+- }
|
|
++ first = skb_peek(amsdu);
|
|
++ rxd = (void *)first->data - sizeof(*rxd);
|
|
++ decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
|
|
++ RX_MSDU_START_INFO1_DECAP_FORMAT);
|
|
+
|
|
+- if (!channel_set) {
|
|
+- ath10k_warn("no channel configured; ignoring frame!\n");
|
|
+- return false;
|
|
++ if (!chained)
|
|
++ return;
|
|
++
|
|
++ /* FIXME: Current unchaining logic can only handle simple case of raw
|
|
++ * msdu chaining. If decapping is other than raw the chaining may be
|
|
++ * more complex and this isn't handled by the current code. Don't even
|
|
++ * try re-constructing such frames - it'll be pretty much garbage.
|
|
++ */
|
|
++ if (decap != RX_MSDU_DECAP_RAW ||
|
|
++ skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
|
|
++ __skb_queue_purge(amsdu);
|
|
++ return;
|
|
+ }
|
|
+
|
|
+- /* Skip mgmt frames while we handle this in WMI */
|
|
+- if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
|
|
+- attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
|
|
++ ath10k_unchain_msdu(amsdu);
|
|
++}
|
|
++
|
|
++static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
|
|
++ struct sk_buff_head *amsdu,
|
|
++ struct ieee80211_rx_status *rx_status)
|
|
++{
|
|
++ struct sk_buff *msdu;
|
|
++ struct htt_rx_desc *rxd;
|
|
++ bool is_mgmt;
|
|
++ bool has_fcs_err;
|
|
++
|
|
++ msdu = skb_peek(amsdu);
|
|
++ rxd = (void *)msdu->data - sizeof(*rxd);
|
|
++
|
|
++ /* FIXME: It might be a good idea to do some fuzzy-testing to drop
|
|
++ * invalid/dangerous frames.
|
|
++ */
|
|
++
|
|
++ if (!rx_status->freq) {
|
|
++ ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+- if (status != HTT_RX_IND_MPDU_STATUS_OK &&
|
|
+- status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
|
|
+- status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
|
|
+- !htt->ar->monitor_started) {
|
|
+- ath10k_dbg(ATH10K_DBG_HTT,
|
|
+- "htt rx ignoring frame w/ status %d\n",
|
|
+- status);
|
|
++ is_mgmt = !!(rxd->attention.flags &
|
|
++ __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
|
|
++ has_fcs_err = !!(rxd->attention.flags &
|
|
++ __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
|
|
++
|
|
++ /* Management frames are handled via WMI events. The pros of such
|
|
++ * approach is that channel is explicitly provided in WMI events
|
|
++ * whereas HTT doesn't provide channel information for Rxed frames.
|
|
++ *
|
|
++ * However some firmware revisions don't report corrupted frames via
|
|
++ * WMI so don't drop them.
|
|
++ */
|
|
++ if (is_mgmt && !has_fcs_err) {
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+- if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
|
|
+- ath10k_dbg(ATH10K_DBG_HTT,
|
|
+- "htt rx CAC running\n");
|
|
++ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return true;
|
|
+ }
|
|
+
|
|
++static void ath10k_htt_rx_h_filter(struct ath10k *ar,
|
|
++ struct sk_buff_head *amsdu,
|
|
++ struct ieee80211_rx_status *rx_status)
|
|
++{
|
|
++ if (skb_queue_empty(amsdu))
|
|
++ return;
|
|
++
|
|
++ if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
|
|
++ return;
|
|
++
|
|
++ __skb_queue_purge(amsdu);
|
|
++}
|
|
++
|
|
+ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
|
+ struct htt_rx_indication *rx)
|
|
+ {
|
|
++ struct ath10k *ar = htt->ar;
|
|
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
|
|
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
|
|
+- struct htt_rx_desc *rxd;
|
|
+- enum htt_rx_mpdu_status status;
|
|
+- struct ieee80211_hdr *hdr;
|
|
++ struct sk_buff_head amsdu;
|
|
+ int num_mpdu_ranges;
|
|
+- u32 attention;
|
|
+ int fw_desc_len;
|
|
+ u8 *fw_desc;
|
|
+- bool channel_set;
|
|
+- int i, j;
|
|
+- int ret;
|
|
++ int i, ret, mpdu_count = 0;
|
|
+
|
|
+ lockdep_assert_held(&htt->rx_ring.lock);
|
|
+
|
|
++ if (htt->rx_confused)
|
|
++ return;
|
|
++
|
|
+ fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
|
|
+ fw_desc = (u8 *)&rx->fw_desc;
|
|
+
|
|
+@@ -1161,201 +1500,82 @@ static void ath10k_htt_rx_handler(struct
|
|
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
|
|
+ mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
|
|
+
|
|
+- /* Fill this once, while this is per-ppdu */
|
|
+- if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
|
|
+- memset(rx_status, 0, sizeof(*rx_status));
|
|
+- rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
|
|
+- rx->ppdu.combined_rssi;
|
|
+- }
|
|
+-
|
|
+- if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
|
|
+- /* TSF available only in 32-bit */
|
|
+- rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
|
|
+- rx_status->flag |= RX_FLAG_MACTIME_END;
|
|
+- }
|
|
+-
|
|
+- channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
|
|
+-
|
|
+- if (channel_set) {
|
|
+- ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
|
|
+- rx->ppdu.info0,
|
|
+- __le32_to_cpu(rx->ppdu.info1),
|
|
+- __le32_to_cpu(rx->ppdu.info2),
|
|
+- rx_status);
|
|
+- }
|
|
+-
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
|
|
+ rx, sizeof(*rx) +
|
|
+ (sizeof(struct htt_rx_indication_mpdu_range) *
|
|
+ num_mpdu_ranges));
|
|
+
|
|
+- for (i = 0; i < num_mpdu_ranges; i++) {
|
|
+- status = mpdu_ranges[i].mpdu_range_status;
|
|
+-
|
|
+- for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
|
|
+- struct sk_buff *msdu_head, *msdu_tail;
|
|
++ for (i = 0; i < num_mpdu_ranges; i++)
|
|
++ mpdu_count += mpdu_ranges[i].mpdu_count;
|
|
+
|
|
+- msdu_head = NULL;
|
|
+- msdu_tail = NULL;
|
|
+- ret = ath10k_htt_rx_amsdu_pop(htt,
|
|
+- &fw_desc,
|
|
+- &fw_desc_len,
|
|
+- &msdu_head,
|
|
+- &msdu_tail);
|
|
+-
|
|
+- if (ret < 0) {
|
|
+- ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
|
|
+- ret);
|
|
+- ath10k_htt_rx_free_msdu_chain(msdu_head);
|
|
+- continue;
|
|
+- }
|
|
+-
|
|
+- rxd = container_of((void *)msdu_head->data,
|
|
+- struct htt_rx_desc,
|
|
+- msdu_payload);
|
|
+- attention = __le32_to_cpu(rxd->attention.flags);
|
|
+-
|
|
+- if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
|
|
+- status,
|
|
+- channel_set,
|
|
+- attention)) {
|
|
+- ath10k_htt_rx_free_msdu_chain(msdu_head);
|
|
+- continue;
|
|
+- }
|
|
+-
|
|
+- if (ret > 0 &&
|
|
+- ath10k_unchain_msdu(msdu_head) < 0) {
|
|
+- ath10k_htt_rx_free_msdu_chain(msdu_head);
|
|
+- continue;
|
|
+- }
|
|
+-
|
|
+- if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
|
|
+- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
|
|
+- else
|
|
+- rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
|
|
+-
|
|
+- if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
|
|
+- rx_status->flag |= RX_FLAG_MMIC_ERROR;
|
|
+- else
|
|
+- rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
|
|
+-
|
|
+- hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
|
|
+-
|
|
+- if (ath10k_htt_rx_hdr_is_amsdu(hdr))
|
|
+- ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
|
|
+- else
|
|
+- ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
|
|
++ while (mpdu_count--) {
|
|
++ __skb_queue_head_init(&amsdu);
|
|
++ ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
|
|
++ &fw_desc_len, &amsdu);
|
|
++ if (ret < 0) {
|
|
++ ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
|
|
++ __skb_queue_purge(&amsdu);
|
|
++ /* FIXME: It's probably a good idea to reboot the
|
|
++ * device instead of leaving it inoperable.
|
|
++ */
|
|
++ htt->rx_confused = true;
|
|
++ break;
|
|
+ }
|
|
++
|
|
++ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
|
|
++ ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
|
|
++ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
|
|
++ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
|
|
++ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
|
|
+ }
|
|
+
|
|
+ tasklet_schedule(&htt->rx_replenish_task);
|
|
+ }
|
|
+
|
|
+ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
|
|
+- struct htt_rx_fragment_indication *frag)
|
|
++ struct htt_rx_fragment_indication *frag)
|
|
+ {
|
|
+- struct sk_buff *msdu_head, *msdu_tail;
|
|
+- enum htt_rx_mpdu_encrypt_type enctype;
|
|
+- struct htt_rx_desc *rxd;
|
|
+- enum rx_msdu_decap_format fmt;
|
|
++ struct ath10k *ar = htt->ar;
|
|
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
|
|
+- struct ieee80211_hdr *hdr;
|
|
++ struct sk_buff_head amsdu;
|
|
+ int ret;
|
|
+- bool tkip_mic_err;
|
|
+- bool decrypt_err;
|
|
+ u8 *fw_desc;
|
|
+- int fw_desc_len, hdrlen, paramlen;
|
|
+- int trim;
|
|
++ int fw_desc_len;
|
|
+
|
|
+ fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
|
|
+ fw_desc = (u8 *)frag->fw_msdu_rx_desc;
|
|
+
|
|
+- msdu_head = NULL;
|
|
+- msdu_tail = NULL;
|
|
++ __skb_queue_head_init(&amsdu);
|
|
+
|
|
+ spin_lock_bh(&htt->rx_ring.lock);
|
|
+ ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
|
|
+- &msdu_head, &msdu_tail);
|
|
++ &amsdu);
|
|
+ spin_unlock_bh(&htt->rx_ring.lock);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
|
|
++ tasklet_schedule(&htt->rx_replenish_task);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
|
|
+
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
|
|
++ ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
|
|
+ ret);
|
|
+- ath10k_htt_rx_free_msdu_chain(msdu_head);
|
|
++ __skb_queue_purge(&amsdu);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+- /* FIXME: implement signal strength */
|
|
+-
|
|
+- hdr = (struct ieee80211_hdr *)msdu_head->data;
|
|
+- rxd = (void *)msdu_head->data - sizeof(*rxd);
|
|
+- tkip_mic_err = !!(__le32_to_cpu(rxd->attention.flags) &
|
|
+- RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
|
|
+- decrypt_err = !!(__le32_to_cpu(rxd->attention.flags) &
|
|
+- RX_ATTENTION_FLAGS_DECRYPT_ERR);
|
|
+- fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
|
|
+- RX_MSDU_START_INFO1_DECAP_FORMAT);
|
|
+-
|
|
+- if (fmt != RX_MSDU_DECAP_RAW) {
|
|
+- ath10k_warn("we dont support non-raw fragmented rx yet\n");
|
|
+- dev_kfree_skb_any(msdu_head);
|
|
+- goto end;
|
|
+- }
|
|
+-
|
|
+- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
|
|
+- RX_MPDU_START_INFO0_ENCRYPT_TYPE);
|
|
+- ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype);
|
|
+- msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
|
|
+-
|
|
+- if (tkip_mic_err)
|
|
+- ath10k_warn("tkip mic error\n");
|
|
+-
|
|
+- if (decrypt_err) {
|
|
+- ath10k_warn("decryption err in fragmented rx\n");
|
|
+- dev_kfree_skb_any(msdu_head);
|
|
+- goto end;
|
|
+- }
|
|
+-
|
|
+- if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
|
|
+- hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
|
+- paramlen = ath10k_htt_rx_crypto_param_len(enctype);
|
|
+-
|
|
+- /* It is more efficient to move the header than the payload */
|
|
+- memmove((void *)msdu_head->data + paramlen,
|
|
+- (void *)msdu_head->data,
|
|
+- hdrlen);
|
|
+- skb_pull(msdu_head, paramlen);
|
|
+- hdr = (struct ieee80211_hdr *)msdu_head->data;
|
|
+- }
|
|
+-
|
|
+- /* remove trailing FCS */
|
|
+- trim = 4;
|
|
+-
|
|
+- /* remove crypto trailer */
|
|
+- trim += ath10k_htt_rx_crypto_tail_len(enctype);
|
|
+-
|
|
+- /* last fragment of TKIP frags has MIC */
|
|
+- if (!ieee80211_has_morefrags(hdr->frame_control) &&
|
|
+- enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
|
|
+- trim += 8;
|
|
+-
|
|
+- if (trim > msdu_head->len) {
|
|
+- ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
|
|
+- dev_kfree_skb_any(msdu_head);
|
|
+- goto end;
|
|
++ if (skb_queue_len(&amsdu) != 1) {
|
|
++ ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
|
|
++ __skb_queue_purge(&amsdu);
|
|
++ return;
|
|
+ }
|
|
+
|
|
+- skb_trim(msdu_head, msdu_head->len - trim);
|
|
+-
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
|
|
+- msdu_head->data, msdu_head->len);
|
|
+- ath10k_process_rx(htt->ar, rx_status, msdu_head);
|
|
++ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
|
|
++ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
|
|
++ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
|
|
++ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
|
|
+
|
|
+-end:
|
|
+ if (fw_desc_len > 0) {
|
|
+- ath10k_dbg(ATH10K_DBG_HTT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT,
|
|
+ "expecting more fragmented rx in one indication %d\n",
|
|
+ fw_desc_len);
|
|
+ }
|
|
+@@ -1385,12 +1605,12 @@ static void ath10k_htt_rx_frm_tx_compl(s
|
|
+ tx_done.discard = true;
|
|
+ break;
|
|
+ default:
|
|
+- ath10k_warn("unhandled tx completion status %d\n", status);
|
|
++ ath10k_warn(ar, "unhandled tx completion status %d\n", status);
|
|
+ tx_done.discard = true;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
|
|
+ resp->data_tx_completion.num_msdus);
|
|
+
|
|
+ for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
|
|
+@@ -1400,6 +1620,274 @@ static void ath10k_htt_rx_frm_tx_compl(s
|
|
+ }
|
|
+ }
|
|
+
|
|
++static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
|
|
++{
|
|
++ struct htt_rx_addba *ev = &resp->rx_addba;
|
|
++ struct ath10k_peer *peer;
|
|
++ struct ath10k_vif *arvif;
|
|
++ u16 info0, tid, peer_id;
|
|
++
|
|
++ info0 = __le16_to_cpu(ev->info0);
|
|
++ tid = MS(info0, HTT_RX_BA_INFO0_TID);
|
|
++ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT,
|
|
++ "htt rx addba tid %hu peer_id %hu size %hhu\n",
|
|
++ tid, peer_id, ev->window_size);
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ peer = ath10k_peer_find_by_id(ar, peer_id);
|
|
++ if (!peer) {
|
|
++ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
|
|
++ peer_id);
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ arvif = ath10k_get_arvif(ar, peer->vdev_id);
|
|
++ if (!arvif) {
|
|
++ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
|
|
++ peer->vdev_id);
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT,
|
|
++ "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
|
|
++ peer->addr, tid, ev->window_size);
|
|
++
|
|
++ ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
|
|
++{
|
|
++ struct htt_rx_delba *ev = &resp->rx_delba;
|
|
++ struct ath10k_peer *peer;
|
|
++ struct ath10k_vif *arvif;
|
|
++ u16 info0, tid, peer_id;
|
|
++
|
|
++ info0 = __le16_to_cpu(ev->info0);
|
|
++ tid = MS(info0, HTT_RX_BA_INFO0_TID);
|
|
++ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT,
|
|
++ "htt rx delba tid %hu peer_id %hu\n",
|
|
++ tid, peer_id);
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ peer = ath10k_peer_find_by_id(ar, peer_id);
|
|
++ if (!peer) {
|
|
++ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
|
|
++ peer_id);
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ arvif = ath10k_get_arvif(ar, peer->vdev_id);
|
|
++ if (!arvif) {
|
|
++ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
|
|
++ peer->vdev_id);
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT,
|
|
++ "htt rx stop rx ba session sta %pM tid %hu\n",
|
|
++ peer->addr, tid);
|
|
++
|
|
++ ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++}
|
|
++
|
|
++static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
|
|
++ struct sk_buff_head *amsdu)
|
|
++{
|
|
++ struct sk_buff *msdu;
|
|
++ struct htt_rx_desc *rxd;
|
|
++
|
|
++ if (skb_queue_empty(list))
|
|
++ return -ENOBUFS;
|
|
++
|
|
++ if (WARN_ON(!skb_queue_empty(amsdu)))
|
|
++ return -EINVAL;
|
|
++
|
|
++ while ((msdu = __skb_dequeue(list))) {
|
|
++ __skb_queue_tail(amsdu, msdu);
|
|
++
|
|
++ rxd = (void *)msdu->data - sizeof(*rxd);
|
|
++ if (rxd->msdu_end.info0 &
|
|
++ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
|
|
++ break;
|
|
++ }
|
|
++
|
|
++ msdu = skb_peek_tail(amsdu);
|
|
++ rxd = (void *)msdu->data - sizeof(*rxd);
|
|
++ if (!(rxd->msdu_end.info0 &
|
|
++ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
|
|
++ skb_queue_splice_init(amsdu, list);
|
|
++ return -EAGAIN;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
|
|
++ struct sk_buff *skb)
|
|
++{
|
|
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
++
|
|
++ if (!ieee80211_has_protected(hdr->frame_control))
|
|
++ return;
|
|
++
|
|
++ /* Offloaded frames are already decrypted but firmware insists they are
|
|
++ * protected in the 802.11 header. Strip the flag. Otherwise mac80211
|
|
++ * will drop the frame.
|
|
++ */
|
|
++
|
|
++ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
|
|
++ status->flag |= RX_FLAG_DECRYPTED |
|
|
++ RX_FLAG_IV_STRIPPED |
|
|
++ RX_FLAG_MMIC_STRIPPED;
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
|
|
++ struct sk_buff_head *list)
|
|
++{
|
|
++ struct ath10k_htt *htt = &ar->htt;
|
|
++ struct ieee80211_rx_status *status = &htt->rx_status;
|
|
++ struct htt_rx_offload_msdu *rx;
|
|
++ struct sk_buff *msdu;
|
|
++ size_t offset;
|
|
++
|
|
++ while ((msdu = __skb_dequeue(list))) {
|
|
++ /* Offloaded frames don't have Rx descriptor. Instead they have
|
|
++ * a short meta information header.
|
|
++ */
|
|
++
|
|
++ rx = (void *)msdu->data;
|
|
++
|
|
++ skb_put(msdu, sizeof(*rx));
|
|
++ skb_pull(msdu, sizeof(*rx));
|
|
++
|
|
++ if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
|
|
++ ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
|
|
++ dev_kfree_skb_any(msdu);
|
|
++ continue;
|
|
++ }
|
|
++
|
|
++ skb_put(msdu, __le16_to_cpu(rx->msdu_len));
|
|
++
|
|
++ /* Offloaded rx header length isn't multiple of 2 nor 4 so the
|
|
++ * actual payload is unaligned. Align the frame. Otherwise
|
|
++ * mac80211 complains. This shouldn't reduce performance much
|
|
++ * because these offloaded frames are rare.
|
|
++ */
|
|
++ offset = 4 - ((unsigned long)msdu->data & 3);
|
|
++ skb_put(msdu, offset);
|
|
++ memmove(msdu->data + offset, msdu->data, msdu->len);
|
|
++ skb_pull(msdu, offset);
|
|
++
|
|
++ /* FIXME: The frame is NWifi. Re-construct QoS Control
|
|
++ * if possible later.
|
|
++ */
|
|
++
|
|
++ memset(status, 0, sizeof(*status));
|
|
++ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
|
|
++
|
|
++ ath10k_htt_rx_h_rx_offload_prot(status, msdu);
|
|
++ ath10k_htt_rx_h_channel(ar, status);
|
|
++ ath10k_process_rx(ar, status, msdu);
|
|
++ }
|
|
++}
|
|
++
|
|
++static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ struct ath10k_htt *htt = &ar->htt;
|
|
++ struct htt_resp *resp = (void *)skb->data;
|
|
++ struct ieee80211_rx_status *status = &htt->rx_status;
|
|
++ struct sk_buff_head list;
|
|
++ struct sk_buff_head amsdu;
|
|
++ u16 peer_id;
|
|
++ u16 msdu_count;
|
|
++ u8 vdev_id;
|
|
++ u8 tid;
|
|
++ bool offload;
|
|
++ bool frag;
|
|
++ int ret;
|
|
++
|
|
++ lockdep_assert_held(&htt->rx_ring.lock);
|
|
++
|
|
++ if (htt->rx_confused)
|
|
++ return;
|
|
++
|
|
++ skb_pull(skb, sizeof(resp->hdr));
|
|
++ skb_pull(skb, sizeof(resp->rx_in_ord_ind));
|
|
++
|
|
++ peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
|
|
++ msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
|
|
++ vdev_id = resp->rx_in_ord_ind.vdev_id;
|
|
++ tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
|
|
++ offload = !!(resp->rx_in_ord_ind.info &
|
|
++ HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
|
|
++ frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT,
|
|
++ "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
|
|
++ vdev_id, peer_id, tid, offload, frag, msdu_count);
|
|
++
|
|
++ if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
|
|
++ ath10k_warn(ar, "dropping invalid in order rx indication\n");
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
|
|
++ * extracted and processed.
|
|
++ */
|
|
++ __skb_queue_head_init(&list);
|
|
++ ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
|
|
++ if (ret < 0) {
|
|
++ ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
|
|
++ htt->rx_confused = true;
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ /* Offloaded frames are very different and need to be handled
|
|
++ * separately.
|
|
++ */
|
|
++ if (offload)
|
|
++ ath10k_htt_rx_h_rx_offload(ar, &list);
|
|
++
|
|
++ while (!skb_queue_empty(&list)) {
|
|
++ __skb_queue_head_init(&amsdu);
|
|
++ ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
|
|
++ switch (ret) {
|
|
++ case 0:
|
|
++ /* Note: The in-order indication may report interleaved
|
|
++ * frames from different PPDUs meaning reported rx rate
|
|
++ * to mac80211 isn't accurate/reliable. It's still
|
|
++ * better to report something than nothing though. This
|
|
++ * should still give an idea about rx rate to the user.
|
|
++ */
|
|
++ ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
|
|
++ ath10k_htt_rx_h_filter(ar, &amsdu, status);
|
|
++ ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
|
|
++ ath10k_htt_rx_h_deliver(ar, &amsdu, status);
|
|
++ break;
|
|
++ case -EAGAIN:
|
|
++ /* fall through */
|
|
++ default:
|
|
++ /* Should not happen. */
|
|
++ ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
|
|
++ htt->rx_confused = true;
|
|
++ __skb_queue_purge(&list);
|
|
++ return;
|
|
++ }
|
|
++ }
|
|
++
|
|
++ tasklet_schedule(&htt->rx_replenish_task);
|
|
++}
|
|
++
|
|
+ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+ struct ath10k_htt *htt = &ar->htt;
|
|
+@@ -1407,9 +1895,9 @@ void ath10k_htt_t2h_msg_handler(struct a
|
|
+
|
|
+ /* confirm alignment */
|
|
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
|
|
+- ath10k_warn("unaligned htt message, expect trouble\n");
|
|
++ ath10k_warn(ar, "unaligned htt message, expect trouble\n");
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
|
|
+ resp->hdr.msg_type);
|
|
+ switch (resp->hdr.msg_type) {
|
|
+ case HTT_T2H_MSG_TYPE_VERSION_CONF: {
|
|
+@@ -1473,7 +1961,7 @@ void ath10k_htt_t2h_msg_handler(struct a
|
|
+ struct ath10k *ar = htt->ar;
|
|
+ struct htt_security_indication *ev = &resp->security_indication;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT,
|
|
+ "sec ind peer_id %d unicast %d type %d\n",
|
|
+ __le16_to_cpu(ev->peer_id),
|
|
+ !!(ev->flags & HTT_SECURITY_IS_UNICAST),
|
|
+@@ -1482,7 +1970,7 @@ void ath10k_htt_t2h_msg_handler(struct a
|
|
+ break;
|
|
+ }
|
|
+ case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
|
|
+ skb->data, skb->len);
|
|
+ ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
|
|
+ break;
|
|
+@@ -1491,16 +1979,55 @@ void ath10k_htt_t2h_msg_handler(struct a
|
|
+ /* FIX THIS */
|
|
+ break;
|
|
+ case HTT_T2H_MSG_TYPE_STATS_CONF:
|
|
+- trace_ath10k_htt_stats(skb->data, skb->len);
|
|
++ trace_ath10k_htt_stats(ar, skb->data, skb->len);
|
|
+ break;
|
|
+ case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
|
|
++ /* Firmware can return tx frames if it's unable to fully
|
|
++ * process them and suspects host may be able to fix it. ath10k
|
|
++ * sends all tx frames as already inspected so this shouldn't
|
|
++ * happen unless fw has a bug.
|
|
++ */
|
|
++ ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
|
|
++ break;
|
|
+ case HTT_T2H_MSG_TYPE_RX_ADDBA:
|
|
++ ath10k_htt_rx_addba(ar, resp);
|
|
++ break;
|
|
+ case HTT_T2H_MSG_TYPE_RX_DELBA:
|
|
+- case HTT_T2H_MSG_TYPE_RX_FLUSH:
|
|
++ ath10k_htt_rx_delba(ar, resp);
|
|
++ break;
|
|
++ case HTT_T2H_MSG_TYPE_PKTLOG: {
|
|
++ struct ath10k_pktlog_hdr *hdr =
|
|
++ (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
|
|
++
|
|
++ trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
|
|
++ sizeof(*hdr) +
|
|
++ __le16_to_cpu(hdr->size));
|
|
++ break;
|
|
++ }
|
|
++ case HTT_T2H_MSG_TYPE_RX_FLUSH: {
|
|
++ /* Ignore this event because mac80211 takes care of Rx
|
|
++ * aggregation reordering.
|
|
++ */
|
|
++ break;
|
|
++ }
|
|
++ case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
|
|
++ spin_lock_bh(&htt->rx_ring.lock);
|
|
++ __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
|
|
++ spin_unlock_bh(&htt->rx_ring.lock);
|
|
++ tasklet_schedule(&htt->txrx_compl_task);
|
|
++ return;
|
|
++ }
|
|
++ case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
|
|
++ /* FIXME: This WMI-TLV event is overlapping with 10.2
|
|
++ * CHAN_CHANGE - both being 0xF. Neither is being used in
|
|
++ * practice so no immediate action is necessary. Nevertheless
|
|
++ * HTT may need an abstraction layer like WMI has one day.
|
|
++ */
|
|
++ break;
|
|
+ default:
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
|
|
+- resp->hdr.msg_type);
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
|
|
++ ath10k_warn(ar, "htt event (%d) not handled\n",
|
|
++ resp->hdr.msg_type);
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
|
|
+ skb->data, skb->len);
|
|
+ break;
|
|
+ };
|
|
+@@ -1512,6 +2039,7 @@ void ath10k_htt_t2h_msg_handler(struct a
|
|
+ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
|
|
+ {
|
|
+ struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
|
|
++ struct ath10k *ar = htt->ar;
|
|
+ struct htt_resp *resp;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+@@ -1528,5 +2056,10 @@ static void ath10k_htt_txrx_compl_task(u
|
|
+ ath10k_htt_rx_handler(htt, &resp->rx_ind);
|
|
+ dev_kfree_skb_any(skb);
|
|
+ }
|
|
++
|
|
++ while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
|
|
++ ath10k_htt_rx_in_ord_ind(ar, skb);
|
|
++ dev_kfree_skb_any(skb);
|
|
++ }
|
|
+ spin_unlock_bh(&htt->rx_ring.lock);
|
|
+ }
|
|
+--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
|
|
+@@ -56,98 +56,74 @@ exit:
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+-int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
|
|
++int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
|
|
+ {
|
|
+- int msdu_id;
|
|
++ struct ath10k *ar = htt->ar;
|
|
++ int ret;
|
|
+
|
|
+ lockdep_assert_held(&htt->tx_lock);
|
|
+
|
|
+- msdu_id = find_first_zero_bit(htt->used_msdu_ids,
|
|
+- htt->max_num_pending_tx);
|
|
+- if (msdu_id == htt->max_num_pending_tx)
|
|
+- return -ENOBUFS;
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
|
|
+- __set_bit(msdu_id, htt->used_msdu_ids);
|
|
+- return msdu_id;
|
|
++ ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
|
|
++
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
|
|
+ {
|
|
++ struct ath10k *ar = htt->ar;
|
|
++
|
|
+ lockdep_assert_held(&htt->tx_lock);
|
|
+
|
|
+- if (!test_bit(msdu_id, htt->used_msdu_ids))
|
|
+- ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
|
|
+- __clear_bit(msdu_id, htt->used_msdu_ids);
|
|
++ idr_remove(&htt->pending_tx, msdu_id);
|
|
+ }
|
|
+
|
|
+-int ath10k_htt_tx_attach(struct ath10k_htt *htt)
|
|
++int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
|
|
+ {
|
|
+- spin_lock_init(&htt->tx_lock);
|
|
+- init_waitqueue_head(&htt->empty_tx_wq);
|
|
+-
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, htt->ar->fw_features))
|
|
+- htt->max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
|
|
+- else
|
|
+- htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
|
|
++ struct ath10k *ar = htt->ar;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
|
|
+ htt->max_num_pending_tx);
|
|
+
|
|
+- htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
|
|
+- htt->max_num_pending_tx, GFP_KERNEL);
|
|
+- if (!htt->pending_tx)
|
|
+- return -ENOMEM;
|
|
+-
|
|
+- htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
|
|
+- BITS_TO_LONGS(htt->max_num_pending_tx),
|
|
+- GFP_KERNEL);
|
|
+- if (!htt->used_msdu_ids) {
|
|
+- kfree(htt->pending_tx);
|
|
+- return -ENOMEM;
|
|
+- }
|
|
++ spin_lock_init(&htt->tx_lock);
|
|
++ idr_init(&htt->pending_tx);
|
|
+
|
|
+ htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
|
|
+ sizeof(struct ath10k_htt_txbuf), 4, 0);
|
|
+ if (!htt->tx_pool) {
|
|
+- kfree(htt->used_msdu_ids);
|
|
+- kfree(htt->pending_tx);
|
|
++ idr_destroy(&htt->pending_tx);
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
|
|
++static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
|
|
+ {
|
|
++ struct ath10k *ar = ctx;
|
|
++ struct ath10k_htt *htt = &ar->htt;
|
|
+ struct htt_tx_done tx_done = {0};
|
|
+- int msdu_id;
|
|
+-
|
|
+- spin_lock_bh(&htt->tx_lock);
|
|
+- for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
|
|
+- if (!test_bit(msdu_id, htt->used_msdu_ids))
|
|
+- continue;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
|
|
+- msdu_id);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
|
|
+
|
|
+- tx_done.discard = 1;
|
|
+- tx_done.msdu_id = msdu_id;
|
|
++ tx_done.discard = 1;
|
|
++ tx_done.msdu_id = msdu_id;
|
|
+
|
|
+- ath10k_txrx_tx_unref(htt, &tx_done);
|
|
+- }
|
|
++ spin_lock_bh(&htt->tx_lock);
|
|
++ ath10k_txrx_tx_unref(htt, &tx_done);
|
|
+ spin_unlock_bh(&htt->tx_lock);
|
|
++
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
+-void ath10k_htt_tx_detach(struct ath10k_htt *htt)
|
|
++void ath10k_htt_tx_free(struct ath10k_htt *htt)
|
|
+ {
|
|
+- ath10k_htt_tx_cleanup_pending(htt);
|
|
+- kfree(htt->pending_tx);
|
|
+- kfree(htt->used_msdu_ids);
|
|
++ idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
|
|
++ idr_destroy(&htt->pending_tx);
|
|
+ dma_pool_destroy(htt->tx_pool);
|
|
+- return;
|
|
+ }
|
|
+
|
|
+ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
|
|
+@@ -157,6 +133,7 @@ void ath10k_htt_htc_tx_complete(struct a
|
|
+
|
|
+ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
|
|
+ {
|
|
++ struct ath10k *ar = htt->ar;
|
|
+ struct sk_buff *skb;
|
|
+ struct htt_cmd *cmd;
|
|
+ int len = 0;
|
|
+@@ -165,7 +142,7 @@ int ath10k_htt_h2t_ver_req_msg(struct at
|
|
+ len += sizeof(cmd->hdr);
|
|
+ len += sizeof(cmd->ver_req);
|
|
+
|
|
+- skb = ath10k_htc_alloc_skb(len);
|
|
++ skb = ath10k_htc_alloc_skb(ar, len);
|
|
+ if (!skb)
|
|
+ return -ENOMEM;
|
|
+
|
|
+@@ -184,6 +161,7 @@ int ath10k_htt_h2t_ver_req_msg(struct at
|
|
+
|
|
+ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
|
|
+ {
|
|
++ struct ath10k *ar = htt->ar;
|
|
+ struct htt_stats_req *req;
|
|
+ struct sk_buff *skb;
|
|
+ struct htt_cmd *cmd;
|
|
+@@ -192,7 +170,7 @@ int ath10k_htt_h2t_stats_req(struct ath1
|
|
+ len += sizeof(cmd->hdr);
|
|
+ len += sizeof(cmd->stats_req);
|
|
+
|
|
+- skb = ath10k_htc_alloc_skb(len);
|
|
++ skb = ath10k_htc_alloc_skb(ar, len);
|
|
+ if (!skb)
|
|
+ return -ENOMEM;
|
|
+
|
|
+@@ -214,7 +192,8 @@ int ath10k_htt_h2t_stats_req(struct ath1
|
|
+
|
|
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to send htt type stats request: %d", ret);
|
|
++ ath10k_warn(ar, "failed to send htt type stats request: %d",
|
|
++ ret);
|
|
+ dev_kfree_skb_any(skb);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -224,6 +203,7 @@ int ath10k_htt_h2t_stats_req(struct ath1
|
|
+
|
|
+ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
|
|
+ {
|
|
++ struct ath10k *ar = htt->ar;
|
|
+ struct sk_buff *skb;
|
|
+ struct htt_cmd *cmd;
|
|
+ struct htt_rx_ring_setup_ring *ring;
|
|
+@@ -242,7 +222,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struc
|
|
+
|
|
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
|
|
+ + (sizeof(*ring) * num_rx_ring);
|
|
+- skb = ath10k_htc_alloc_skb(len);
|
|
++ skb = ath10k_htc_alloc_skb(ar, len);
|
|
+ if (!skb)
|
|
+ return -ENOMEM;
|
|
+
|
|
+@@ -307,9 +287,57 @@ int ath10k_htt_send_rx_ring_cfg_ll(struc
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
++int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
|
|
++ u8 max_subfrms_ampdu,
|
|
++ u8 max_subfrms_amsdu)
|
|
++{
|
|
++ struct ath10k *ar = htt->ar;
|
|
++ struct htt_aggr_conf *aggr_conf;
|
|
++ struct sk_buff *skb;
|
|
++ struct htt_cmd *cmd;
|
|
++ int len;
|
|
++ int ret;
|
|
++
|
|
++ /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
|
|
++
|
|
++ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
|
|
++ return -EINVAL;
|
|
++
|
|
++ if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
|
|
++ return -EINVAL;
|
|
++
|
|
++ len = sizeof(cmd->hdr);
|
|
++ len += sizeof(cmd->aggr_conf);
|
|
++
|
|
++ skb = ath10k_htc_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return -ENOMEM;
|
|
++
|
|
++ skb_put(skb, len);
|
|
++ cmd = (struct htt_cmd *)skb->data;
|
|
++ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
|
|
++
|
|
++ aggr_conf = &cmd->aggr_conf;
|
|
++ aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
|
|
++ aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
|
|
++ aggr_conf->max_num_amsdu_subframes,
|
|
++ aggr_conf->max_num_ampdu_subframes);
|
|
++
|
|
++ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
|
|
++ if (ret) {
|
|
++ dev_kfree_skb_any(skb);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
+ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
|
+ {
|
|
+- struct device *dev = htt->ar->dev;
|
|
++ struct ath10k *ar = htt->ar;
|
|
++ struct device *dev = ar->dev;
|
|
+ struct sk_buff *txdesc = NULL;
|
|
+ struct htt_cmd *cmd;
|
|
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
|
|
+@@ -318,7 +346,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt
|
|
+ int msdu_id = -1;
|
|
+ int res;
|
|
+
|
|
+-
|
|
+ res = ath10k_htt_tx_inc_pending(htt);
|
|
+ if (res)
|
|
+ goto err;
|
|
+@@ -327,16 +354,15 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt
|
|
+ len += sizeof(cmd->mgmt_tx);
|
|
+
|
|
+ spin_lock_bh(&htt->tx_lock);
|
|
+- res = ath10k_htt_tx_alloc_msdu_id(htt);
|
|
++ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
|
|
+ if (res < 0) {
|
|
+ spin_unlock_bh(&htt->tx_lock);
|
|
+ goto err_tx_dec;
|
|
+ }
|
|
+ msdu_id = res;
|
|
+- htt->pending_tx[msdu_id] = msdu;
|
|
+ spin_unlock_bh(&htt->tx_lock);
|
|
+
|
|
+- txdesc = ath10k_htc_alloc_skb(len);
|
|
++ txdesc = ath10k_htc_alloc_skb(ar, len);
|
|
+ if (!txdesc) {
|
|
+ res = -ENOMEM;
|
|
+ goto err_free_msdu_id;
|
|
+@@ -372,7 +398,6 @@ err_free_txdesc:
|
|
+ dev_kfree_skb_any(txdesc);
|
|
+ err_free_msdu_id:
|
|
+ spin_lock_bh(&htt->tx_lock);
|
|
+- htt->pending_tx[msdu_id] = NULL;
|
|
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
|
|
+ spin_unlock_bh(&htt->tx_lock);
|
|
+ err_tx_dec:
|
|
+@@ -383,7 +408,8 @@ err:
|
|
+
|
|
+ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
|
+ {
|
|
+- struct device *dev = htt->ar->dev;
|
|
++ struct ath10k *ar = htt->ar;
|
|
++ struct device *dev = ar->dev;
|
|
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
|
|
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
|
|
+ struct ath10k_hif_sg_item sg_items[2];
|
|
+@@ -403,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt
|
|
+ goto err;
|
|
+
|
|
+ spin_lock_bh(&htt->tx_lock);
|
|
+- res = ath10k_htt_tx_alloc_msdu_id(htt);
|
|
++ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
|
|
+ if (res < 0) {
|
|
+ spin_unlock_bh(&htt->tx_lock);
|
|
+ goto err_tx_dec;
|
|
+ }
|
|
+ msdu_id = res;
|
|
+- htt->pending_tx[msdu_id] = msdu;
|
|
+ spin_unlock_bh(&htt->tx_lock);
|
|
+
|
|
+ prefetch_len = min(htt->prefetch_len, msdu->len);
|
|
+@@ -423,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt
|
|
+
|
|
+ skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
|
|
+ &paddr);
|
|
+- if (!skb_cb->htt.txbuf)
|
|
++ if (!skb_cb->htt.txbuf) {
|
|
++ res = -ENOMEM;
|
|
+ goto err_free_msdu_id;
|
|
++ }
|
|
+ skb_cb->htt.txbuf_paddr = paddr;
|
|
+
|
|
++ if ((ieee80211_is_action(hdr->frame_control) ||
|
|
++ ieee80211_is_deauth(hdr->frame_control) ||
|
|
++ ieee80211_is_disassoc(hdr->frame_control)) &&
|
|
++ ieee80211_has_protected(hdr->frame_control))
|
|
++ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
|
|
++
|
|
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
|
|
+ DMA_TO_DEVICE);
|
|
+ res = dma_mapping_error(dev, skb_cb->paddr);
|
|
+@@ -482,8 +515,16 @@ int ath10k_htt_tx(struct ath10k_htt *htt
|
|
+
|
|
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
|
|
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
|
|
+- flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
|
|
+- flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
|
|
++ if (msdu->ip_summed == CHECKSUM_PARTIAL) {
|
|
++ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
|
|
++ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
|
|
++ }
|
|
++
|
|
++ /* Prevent firmware from sending up tx inspection requests. There's
|
|
++ * nothing ath10k can do with frames requested for inspection so force
|
|
++ * it to simply rely a regular tx completion with discard status.
|
|
++ */
|
|
++ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
|
|
+
|
|
+ skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
|
|
+ skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
|
|
+@@ -491,14 +532,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt
|
|
+ skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
|
|
+ skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
|
|
+ skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
|
|
+- skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
|
|
++ skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
|
|
++ skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT,
|
|
+- "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
|
|
++ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT,
|
|
++ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
|
|
+ flags0, flags1, msdu->len, msdu_id, frags_paddr,
|
|
+- (u32)skb_cb->paddr, vdev_id, tid);
|
|
+- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
|
|
++ (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
|
|
+ msdu->data, msdu->len);
|
|
++ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
|
|
++ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
|
|
+
|
|
+ sg_items[0].transfer_id = 0;
|
|
+ sg_items[0].transfer_context = NULL;
|
|
+@@ -531,7 +576,6 @@ err_free_txbuf:
|
|
+ skb_cb->htt.txbuf_paddr);
|
|
+ err_free_msdu_id:
|
|
+ spin_lock_bh(&htt->tx_lock);
|
|
+- htt->pending_tx[msdu_id] = NULL;
|
|
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
|
|
+ spin_unlock_bh(&htt->tx_lock);
|
|
+ err_tx_dec:
|
|
+--- a/drivers/net/wireless/ath/ath10k/hw.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/hw.h
|
|
+@@ -20,24 +20,73 @@
|
|
+
|
|
+ #include "targaddrs.h"
|
|
+
|
|
++#define ATH10K_FW_DIR "ath10k"
|
|
++
|
|
+ /* QCA988X 1.0 definitions (unsupported) */
|
|
+ #define QCA988X_HW_1_0_CHIP_ID_REV 0x0
|
|
+
|
|
+ /* QCA988X 2.0 definitions */
|
|
+ #define QCA988X_HW_2_0_VERSION 0x4100016c
|
|
+ #define QCA988X_HW_2_0_CHIP_ID_REV 0x2
|
|
+-#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
|
|
++#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
|
|
+ #define QCA988X_HW_2_0_FW_FILE "firmware.bin"
|
|
+-#define QCA988X_HW_2_0_FW_2_FILE "firmware-2.bin"
|
|
+ #define QCA988X_HW_2_0_OTP_FILE "otp.bin"
|
|
+ #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
|
|
+ #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
|
|
+
|
|
++/* QCA6174 target BMI version signatures */
|
|
++#define QCA6174_HW_1_0_VERSION 0x05000000
|
|
++#define QCA6174_HW_1_1_VERSION 0x05000001
|
|
++#define QCA6174_HW_1_3_VERSION 0x05000003
|
|
++#define QCA6174_HW_2_1_VERSION 0x05010000
|
|
++#define QCA6174_HW_3_0_VERSION 0x05020000
|
|
++#define QCA6174_HW_3_2_VERSION 0x05030000
|
|
++
|
|
++enum qca6174_pci_rev {
|
|
++ QCA6174_PCI_REV_1_1 = 0x11,
|
|
++ QCA6174_PCI_REV_1_3 = 0x13,
|
|
++ QCA6174_PCI_REV_2_0 = 0x20,
|
|
++ QCA6174_PCI_REV_3_0 = 0x30,
|
|
++};
|
|
++
|
|
++enum qca6174_chip_id_rev {
|
|
++ QCA6174_HW_1_0_CHIP_ID_REV = 0,
|
|
++ QCA6174_HW_1_1_CHIP_ID_REV = 1,
|
|
++ QCA6174_HW_1_3_CHIP_ID_REV = 2,
|
|
++ QCA6174_HW_2_1_CHIP_ID_REV = 4,
|
|
++ QCA6174_HW_2_2_CHIP_ID_REV = 5,
|
|
++ QCA6174_HW_3_0_CHIP_ID_REV = 8,
|
|
++ QCA6174_HW_3_1_CHIP_ID_REV = 9,
|
|
++ QCA6174_HW_3_2_CHIP_ID_REV = 10,
|
|
++};
|
|
++
|
|
++#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
|
|
++#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
|
|
++#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
|
|
++#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
|
|
++#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
|
|
++
|
|
++#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
|
|
++#define QCA6174_HW_3_0_FW_FILE "firmware.bin"
|
|
++#define QCA6174_HW_3_0_OTP_FILE "otp.bin"
|
|
++#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
|
|
++#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
|
|
++
|
|
+ #define ATH10K_FW_API2_FILE "firmware-2.bin"
|
|
++#define ATH10K_FW_API3_FILE "firmware-3.bin"
|
|
++
|
|
++/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
|
|
++#define ATH10K_FW_API4_FILE "firmware-4.bin"
|
|
++
|
|
++#define ATH10K_FW_UTF_FILE "utf.bin"
|
|
+
|
|
+ /* includes also the null byte */
|
|
+ #define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
|
|
+
|
|
++#define REG_DUMP_COUNT_QCA988X 60
|
|
++
|
|
++#define QCA988X_CAL_DATA_LEN 2116
|
|
++
|
|
+ struct ath10k_fw_ie {
|
|
+ __le32 id;
|
|
+ __le32 len;
|
|
+@@ -50,8 +99,57 @@ enum ath10k_fw_ie_type {
|
|
+ ATH10K_FW_IE_FEATURES = 2,
|
|
+ ATH10K_FW_IE_FW_IMAGE = 3,
|
|
+ ATH10K_FW_IE_OTP_IMAGE = 4,
|
|
++
|
|
++ /* WMI "operations" interface version, 32 bit value. Supported from
|
|
++ * FW API 4 and above.
|
|
++ */
|
|
++ ATH10K_FW_IE_WMI_OP_VERSION = 5,
|
|
++};
|
|
++
|
|
++enum ath10k_fw_wmi_op_version {
|
|
++ ATH10K_FW_WMI_OP_VERSION_UNSET = 0,
|
|
++
|
|
++ ATH10K_FW_WMI_OP_VERSION_MAIN = 1,
|
|
++ ATH10K_FW_WMI_OP_VERSION_10_1 = 2,
|
|
++ ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
|
|
++ ATH10K_FW_WMI_OP_VERSION_TLV = 4,
|
|
++ ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
|
|
++
|
|
++ /* keep last */
|
|
++ ATH10K_FW_WMI_OP_VERSION_MAX,
|
|
++};
|
|
++
|
|
++enum ath10k_hw_rev {
|
|
++ ATH10K_HW_QCA988X,
|
|
++ ATH10K_HW_QCA6174,
|
|
++};
|
|
++
|
|
++struct ath10k_hw_regs {
|
|
++ u32 rtc_state_cold_reset_mask;
|
|
++ u32 rtc_soc_base_address;
|
|
++ u32 rtc_wmac_base_address;
|
|
++ u32 soc_core_base_address;
|
|
++ u32 ce_wrapper_base_address;
|
|
++ u32 ce0_base_address;
|
|
++ u32 ce1_base_address;
|
|
++ u32 ce2_base_address;
|
|
++ u32 ce3_base_address;
|
|
++ u32 ce4_base_address;
|
|
++ u32 ce5_base_address;
|
|
++ u32 ce6_base_address;
|
|
++ u32 ce7_base_address;
|
|
++ u32 soc_reset_control_si0_rst_mask;
|
|
++ u32 soc_reset_control_ce_rst_mask;
|
|
++ u32 soc_chip_id_address;
|
|
++ u32 scratch_3_address;
|
|
+ };
|
|
+
|
|
++extern const struct ath10k_hw_regs qca988x_regs;
|
|
++extern const struct ath10k_hw_regs qca6174_regs;
|
|
++
|
|
++#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
|
|
++#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
|
|
++
|
|
+ /* Known pecularities:
|
|
+ * - current FW doesn't support raw rx mode (last tested v599)
|
|
+ * - current FW dumps upon raw tx mode (last tested v599)
|
|
+@@ -73,6 +171,15 @@ enum ath10k_mcast2ucast_mode {
|
|
+ ATH10K_MCAST2UCAST_ENABLED = 1,
|
|
+ };
|
|
+
|
|
++struct ath10k_pktlog_hdr {
|
|
++ __le16 flags;
|
|
++ __le16 missed_cnt;
|
|
++ __le16 log_type;
|
|
++ __le16 size;
|
|
++ __le32 timestamp;
|
|
++ u8 payload[0];
|
|
++} __packed;
|
|
++
|
|
+ /* Target specific defines for MAIN firmware */
|
|
+ #define TARGET_NUM_VDEVS 8
|
|
+ #define TARGET_NUM_PEER_AST 2
|
|
+@@ -80,11 +187,13 @@ enum ath10k_mcast2ucast_mode {
|
|
+ #define TARGET_DMA_BURST_SIZE 0
|
|
+ #define TARGET_MAC_AGGR_DELIM 0
|
|
+ #define TARGET_AST_SKID_LIMIT 16
|
|
+-#define TARGET_NUM_PEERS 16
|
|
++#define TARGET_NUM_STATIONS 16
|
|
++#define TARGET_NUM_PEERS ((TARGET_NUM_STATIONS) + \
|
|
++ (TARGET_NUM_VDEVS))
|
|
+ #define TARGET_NUM_OFFLOAD_PEERS 0
|
|
+ #define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
|
|
+ #define TARGET_NUM_PEER_KEYS 2
|
|
+-#define TARGET_NUM_TIDS (2 * ((TARGET_NUM_PEERS) + (TARGET_NUM_VDEVS)))
|
|
++#define TARGET_NUM_TIDS ((TARGET_NUM_PEERS) * 2)
|
|
+ #define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
|
|
+ #define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
|
|
+ #define TARGET_RX_TIMEOUT_LO_PRI 100
|
|
+@@ -115,12 +224,15 @@ enum ath10k_mcast2ucast_mode {
|
|
+ #define TARGET_10X_DMA_BURST_SIZE 0
|
|
+ #define TARGET_10X_MAC_AGGR_DELIM 0
|
|
+ #define TARGET_10X_AST_SKID_LIMIT 16
|
|
+-#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
|
|
+-#define TARGET_10X_NUM_PEERS_MAX 128
|
|
++#define TARGET_10X_NUM_STATIONS 128
|
|
++#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
|
|
++ (TARGET_10X_NUM_VDEVS))
|
|
+ #define TARGET_10X_NUM_OFFLOAD_PEERS 0
|
|
+ #define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
|
|
+ #define TARGET_10X_NUM_PEER_KEYS 2
|
|
+-#define TARGET_10X_NUM_TIDS 256
|
|
++#define TARGET_10X_NUM_TIDS_MAX 256
|
|
++#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
|
|
++ (TARGET_10X_NUM_PEERS) * 2)
|
|
+ #define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
|
|
+ #define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
|
|
+ #define TARGET_10X_RX_TIMEOUT_LO_PRI 100
|
|
+@@ -140,6 +252,18 @@ enum ath10k_mcast2ucast_mode {
|
|
+ #define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
|
|
+ #define TARGET_10X_MAX_FRAG_ENTRIES 0
|
|
+
|
|
++/* 10.2 parameters */
|
|
++#define TARGET_10_2_DMA_BURST_SIZE 1
|
|
++
|
|
++/* Target specific defines for WMI-TLV firmware */
|
|
++#define TARGET_TLV_NUM_VDEVS 3
|
|
++#define TARGET_TLV_NUM_STATIONS 32
|
|
++#define TARGET_TLV_NUM_PEERS ((TARGET_TLV_NUM_STATIONS) + \
|
|
++ (TARGET_TLV_NUM_VDEVS) + \
|
|
++ 2)
|
|
++#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
|
|
++#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
|
|
++
|
|
+ /* Number of Copy Engines supported */
|
|
+ #define CE_COUNT 8
|
|
+
|
|
+@@ -170,7 +294,7 @@ enum ath10k_mcast2ucast_mode {
|
|
+ /* as of IP3.7.1 */
|
|
+ #define RTC_STATE_V_ON 3
|
|
+
|
|
+-#define RTC_STATE_COLD_RESET_MASK 0x00000400
|
|
++#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
|
|
+ #define RTC_STATE_V_LSB 0
|
|
+ #define RTC_STATE_V_MASK 0x00000007
|
|
+ #define RTC_STATE_ADDRESS 0x0000
|
|
+@@ -179,12 +303,12 @@ enum ath10k_mcast2ucast_mode {
|
|
+ #define PCIE_SOC_WAKE_RESET 0x00000000
|
|
+ #define SOC_GLOBAL_RESET_ADDRESS 0x0008
|
|
+
|
|
+-#define RTC_SOC_BASE_ADDRESS 0x00004000
|
|
+-#define RTC_WMAC_BASE_ADDRESS 0x00005000
|
|
++#define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
|
|
++#define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
|
|
+ #define MAC_COEX_BASE_ADDRESS 0x00006000
|
|
+ #define BT_COEX_BASE_ADDRESS 0x00007000
|
|
+ #define SOC_PCIE_BASE_ADDRESS 0x00008000
|
|
+-#define SOC_CORE_BASE_ADDRESS 0x00009000
|
|
++#define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
|
|
+ #define WLAN_UART_BASE_ADDRESS 0x0000c000
|
|
+ #define WLAN_SI_BASE_ADDRESS 0x00010000
|
|
+ #define WLAN_GPIO_BASE_ADDRESS 0x00014000
|
|
+@@ -193,23 +317,23 @@ enum ath10k_mcast2ucast_mode {
|
|
+ #define EFUSE_BASE_ADDRESS 0x00030000
|
|
+ #define FPGA_REG_BASE_ADDRESS 0x00039000
|
|
+ #define WLAN_UART2_BASE_ADDRESS 0x00054c00
|
|
+-#define CE_WRAPPER_BASE_ADDRESS 0x00057000
|
|
+-#define CE0_BASE_ADDRESS 0x00057400
|
|
+-#define CE1_BASE_ADDRESS 0x00057800
|
|
+-#define CE2_BASE_ADDRESS 0x00057c00
|
|
+-#define CE3_BASE_ADDRESS 0x00058000
|
|
+-#define CE4_BASE_ADDRESS 0x00058400
|
|
+-#define CE5_BASE_ADDRESS 0x00058800
|
|
+-#define CE6_BASE_ADDRESS 0x00058c00
|
|
+-#define CE7_BASE_ADDRESS 0x00059000
|
|
++#define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
|
|
++#define CE0_BASE_ADDRESS ar->regs->ce0_base_address
|
|
++#define CE1_BASE_ADDRESS ar->regs->ce1_base_address
|
|
++#define CE2_BASE_ADDRESS ar->regs->ce2_base_address
|
|
++#define CE3_BASE_ADDRESS ar->regs->ce3_base_address
|
|
++#define CE4_BASE_ADDRESS ar->regs->ce4_base_address
|
|
++#define CE5_BASE_ADDRESS ar->regs->ce5_base_address
|
|
++#define CE6_BASE_ADDRESS ar->regs->ce6_base_address
|
|
++#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
|
|
+ #define DBI_BASE_ADDRESS 0x00060000
|
|
+ #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
|
|
+ #define PCIE_LOCAL_BASE_ADDRESS 0x00080000
|
|
+
|
|
+ #define SOC_RESET_CONTROL_ADDRESS 0x00000000
|
|
+ #define SOC_RESET_CONTROL_OFFSET 0x00000000
|
|
+-#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001
|
|
+-#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000
|
|
++#define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
|
|
++#define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
|
|
+ #define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
|
|
+ #define SOC_CPU_CLOCK_OFFSET 0x00000020
|
|
+ #define SOC_CPU_CLOCK_STANDARD_LSB 0
|
|
+@@ -223,7 +347,7 @@ enum ath10k_mcast2ucast_mode {
|
|
+ #define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
|
|
+ #define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
|
|
+
|
|
+-#define SOC_CHIP_ID_ADDRESS 0x000000ec
|
|
++#define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
|
|
+ #define SOC_CHIP_ID_REV_LSB 8
|
|
+ #define SOC_CHIP_ID_REV_MASK 0x00000f00
|
|
+
|
|
+@@ -274,11 +398,12 @@ enum ath10k_mcast2ucast_mode {
|
|
+ #define SI_RX_DATA1_OFFSET 0x00000014
|
|
+
|
|
+ #define CORE_CTRL_CPU_INTR_MASK 0x00002000
|
|
++#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800
|
|
+ #define CORE_CTRL_ADDRESS 0x0000
|
|
+ #define PCIE_INTR_ENABLE_ADDRESS 0x0008
|
|
+ #define PCIE_INTR_CAUSE_ADDRESS 0x000c
|
|
+ #define PCIE_INTR_CLR_ADDRESS 0x0014
|
|
+-#define SCRATCH_3_ADDRESS 0x0030
|
|
++#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
|
|
+ #define CPU_INTR_ADDRESS 0x0010
|
|
+
|
|
+ /* Firmware indications to the Host via SCRATCH_3 register. */
|
|
+--- a/drivers/net/wireless/ath/ath10k/mac.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/mac.c
|
|
+@@ -26,6 +26,9 @@
|
|
+ #include "wmi.h"
|
|
+ #include "htt.h"
|
|
+ #include "txrx.h"
|
|
++#include "testmode.h"
|
|
++#include "wmi.h"
|
|
++#include "wmi-ops.h"
|
|
+
|
|
+ /**********/
|
|
+ /* Crypto */
|
|
+@@ -34,8 +37,9 @@
|
|
+ static int ath10k_send_key(struct ath10k_vif *arvif,
|
|
+ struct ieee80211_key_conf *key,
|
|
+ enum set_key_cmd cmd,
|
|
+- const u8 *macaddr)
|
|
++ const u8 *macaddr, bool def_idx)
|
|
+ {
|
|
++ struct ath10k *ar = arvif->ar;
|
|
+ struct wmi_vdev_install_key_arg arg = {
|
|
+ .vdev_id = arvif->vdev_id,
|
|
+ .key_idx = key->keyidx,
|
|
+@@ -54,7 +58,7 @@ static int ath10k_send_key(struct ath10k
|
|
+ switch (key->cipher) {
|
|
+ case WLAN_CIPHER_SUITE_CCMP:
|
|
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
|
|
+- key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
|
|
++ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
|
|
+ break;
|
|
+ case WLAN_CIPHER_SUITE_TKIP:
|
|
+ arg.key_cipher = WMI_CIPHER_TKIP;
|
|
+@@ -68,9 +72,12 @@ static int ath10k_send_key(struct ath10k
|
|
+ * Otherwise pairwise key must be set */
|
|
+ if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
|
|
+ arg.key_flags = WMI_KEY_PAIRWISE;
|
|
++
|
|
++ if (def_idx)
|
|
++ arg.key_flags |= WMI_KEY_TX_USAGE;
|
|
+ break;
|
|
+ default:
|
|
+- ath10k_warn("cipher %d is not supported\n", key->cipher);
|
|
++ ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
|
|
+ return -EOPNOTSUPP;
|
|
+ }
|
|
+
|
|
+@@ -85,7 +92,7 @@ static int ath10k_send_key(struct ath10k
|
|
+ static int ath10k_install_key(struct ath10k_vif *arvif,
|
|
+ struct ieee80211_key_conf *key,
|
|
+ enum set_key_cmd cmd,
|
|
+- const u8 *macaddr)
|
|
++ const u8 *macaddr, bool def_idx)
|
|
+ {
|
|
+ struct ath10k *ar = arvif->ar;
|
|
+ int ret;
|
|
+@@ -94,7 +101,7 @@ static int ath10k_install_key(struct ath
|
|
+
|
|
+ reinit_completion(&ar->install_key_done);
|
|
+
|
|
+- ret = ath10k_send_key(arvif, key, cmd, macaddr);
|
|
++ ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+@@ -112,6 +119,7 @@ static int ath10k_install_peer_wep_keys(
|
|
+ struct ath10k_peer *peer;
|
|
+ int ret;
|
|
+ int i;
|
|
++ bool def_idx;
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+@@ -125,13 +133,20 @@ static int ath10k_install_peer_wep_keys(
|
|
+ for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
|
|
+ if (arvif->wep_keys[i] == NULL)
|
|
+ continue;
|
|
++ /* set TX_USAGE flag for default key id */
|
|
++ if (arvif->def_wep_key_idx == i)
|
|
++ def_idx = true;
|
|
++ else
|
|
++ def_idx = false;
|
|
+
|
|
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
|
|
+- addr);
|
|
++ addr, def_idx);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
+ peer->keys[i] = arvif->wep_keys[i];
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+@@ -159,21 +174,49 @@ static int ath10k_clear_peer_keys(struct
|
|
+ if (peer->keys[i] == NULL)
|
|
+ continue;
|
|
+
|
|
++ /* key flags are not required to delete the key */
|
|
+ ret = ath10k_install_key(arvif, peer->keys[i],
|
|
+- DISABLE_KEY, addr);
|
|
++ DISABLE_KEY, addr, false);
|
|
+ if (ret && first_errno == 0)
|
|
+ first_errno = ret;
|
|
+
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to remove peer wep key %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
|
|
+ i, ret);
|
|
+
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
+ peer->keys[i] = NULL;
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
+ }
|
|
+
|
|
+ return first_errno;
|
|
+ }
|
|
+
|
|
++bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
|
|
++ u8 keyidx)
|
|
++{
|
|
++ struct ath10k_peer *peer;
|
|
++ int i;
|
|
++
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
++
|
|
++ /* We don't know which vdev this peer belongs to,
|
|
++ * since WMI doesn't give us that information.
|
|
++ *
|
|
++ * FIXME: multi-bss needs to be handled.
|
|
++ */
|
|
++ peer = ath10k_peer_find(ar, 0, addr);
|
|
++ if (!peer)
|
|
++ return false;
|
|
++
|
|
++ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
|
|
++ if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
|
|
++ return true;
|
|
++ }
|
|
++
|
|
++ return false;
|
|
++}
|
|
++
|
|
+ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
|
|
+ struct ieee80211_key_conf *key)
|
|
+ {
|
|
+@@ -194,7 +237,7 @@ static int ath10k_clear_vdev_key(struct
|
|
+ list_for_each_entry(peer, &ar->peers, list) {
|
|
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
|
|
+ if (peer->keys[i] == key) {
|
|
+- memcpy(addr, peer->addr, ETH_ALEN);
|
|
++ ether_addr_copy(addr, peer->addr);
|
|
+ peer->keys[i] = NULL;
|
|
+ break;
|
|
+ }
|
|
+@@ -207,20 +250,19 @@ static int ath10k_clear_vdev_key(struct
|
|
+
|
|
+ if (i == ARRAY_SIZE(peer->keys))
|
|
+ break;
|
|
+-
|
|
+- ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr);
|
|
++ /* key flags are not required to delete the key */
|
|
++ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
|
|
+ if (ret && first_errno == 0)
|
|
+ first_errno = ret;
|
|
+
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to remove key for %pM: %d\n",
|
|
++ ath10k_warn(ar, "failed to remove key for %pM: %d\n",
|
|
+ addr, ret);
|
|
+ }
|
|
+
|
|
+ return first_errno;
|
|
+ }
|
|
+
|
|
+-
|
|
+ /*********************/
|
|
+ /* General utilities */
|
|
+ /*********************/
|
|
+@@ -234,7 +276,10 @@ chan_to_phymode(const struct cfg80211_ch
|
|
+ case IEEE80211_BAND_2GHZ:
|
|
+ switch (chandef->width) {
|
|
+ case NL80211_CHAN_WIDTH_20_NOHT:
|
|
+- phymode = MODE_11G;
|
|
++ if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
|
|
++ phymode = MODE_11B;
|
|
++ else
|
|
++ phymode = MODE_11G;
|
|
+ break;
|
|
+ case NL80211_CHAN_WIDTH_20:
|
|
+ phymode = MODE_11NG_HT20;
|
|
+@@ -322,22 +367,24 @@ static int ath10k_peer_create(struct ath
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
++ if (ar->num_peers >= ar->max_num_peers)
|
|
++ return -ENOBUFS;
|
|
++
|
|
+ ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n",
|
|
++ ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
|
|
+ addr, vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n",
|
|
++ ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
|
|
+ addr, vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+- spin_lock_bh(&ar->data_lock);
|
|
++
|
|
+ ar->num_peers++;
|
|
+- spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+@@ -352,7 +399,7 @@ static int ath10k_mac_set_kickout(struct
|
|
+ ret = ath10k_wmi_pdev_set_param(ar, param,
|
|
+ ATH10K_KICKOUT_THRESHOLD);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set kickout threshold on vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -361,7 +408,7 @@ static int ath10k_mac_set_kickout(struct
|
|
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
|
|
+ ATH10K_KEEPALIVE_MIN_IDLE);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -370,7 +417,7 @@ static int ath10k_mac_set_kickout(struct
|
|
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
|
|
+ ATH10K_KEEPALIVE_MAX_IDLE);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -379,7 +426,7 @@ static int ath10k_mac_set_kickout(struct
|
|
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
|
|
+ ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -387,15 +434,11 @@ static int ath10k_mac_set_kickout(struct
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
|
|
++static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
|
|
+ {
|
|
+ struct ath10k *ar = arvif->ar;
|
|
+ u32 vdev_param;
|
|
+
|
|
+- if (value != 0xFFFFFFFF)
|
|
+- value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
|
|
+- ATH10K_RTS_MAX);
|
|
+-
|
|
+ vdev_param = ar->wmi.vdev_param->rts_threshold;
|
|
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
|
|
+ }
|
|
+@@ -428,9 +471,7 @@ static int ath10k_peer_delete(struct ath
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+- spin_lock_bh(&ar->data_lock);
|
|
+ ar->num_peers--;
|
|
+- spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+@@ -446,7 +487,7 @@ static void ath10k_peer_cleanup(struct a
|
|
+ if (peer->vdev_id != vdev_id)
|
|
+ continue;
|
|
+
|
|
+- ath10k_warn("removing stale peer %pM from vdev_id %d\n",
|
|
++ ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
|
|
+ peer->addr, vdev_id);
|
|
+
|
|
+ list_del(&peer->list);
|
|
+@@ -467,20 +508,63 @@ static void ath10k_peer_cleanup_all(stru
|
|
+ list_del(&peer->list);
|
|
+ kfree(peer);
|
|
+ }
|
|
+- ar->num_peers = 0;
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ ar->num_peers = 0;
|
|
++ ar->num_stations = 0;
|
|
+ }
|
|
+
|
|
+ /************************/
|
|
+ /* Interface management */
|
|
+ /************************/
|
|
+
|
|
++void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
|
|
++{
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
++
|
|
++ if (!arvif->beacon)
|
|
++ return;
|
|
++
|
|
++ if (!arvif->beacon_buf)
|
|
++ dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
|
|
++ arvif->beacon->len, DMA_TO_DEVICE);
|
|
++
|
|
++ if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
|
|
++ arvif->beacon_state != ATH10K_BEACON_SENT))
|
|
++ return;
|
|
++
|
|
++ dev_kfree_skb_any(arvif->beacon);
|
|
++
|
|
++ arvif->beacon = NULL;
|
|
++ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
|
|
++}
|
|
++
|
|
++static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
|
|
++{
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
++
|
|
++ ath10k_mac_vif_beacon_free(arvif);
|
|
++
|
|
++ if (arvif->beacon_buf) {
|
|
++ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
|
|
++ arvif->beacon_buf, arvif->beacon_paddr);
|
|
++ arvif->beacon_buf = NULL;
|
|
++ }
|
|
++}
|
|
++
|
|
+ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
|
|
+ {
|
|
+ int ret;
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
++ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
|
|
++ return -ESHUTDOWN;
|
|
++
|
|
+ ret = wait_for_completion_timeout(&ar->vdev_setup_done,
|
|
+ ATH10K_VDEV_SETUP_TIMEOUT_HZ);
|
|
+ if (ret == 0)
|
|
+@@ -489,19 +573,6 @@ static inline int ath10k_vdev_setup_sync
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static bool ath10k_monitor_is_enabled(struct ath10k *ar)
|
|
+-{
|
|
+- lockdep_assert_held(&ar->conf_mutex);
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
+- "mac monitor refs: promisc %d monitor %d cac %d\n",
|
|
+- ar->promisc, ar->monitor,
|
|
+- test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags));
|
|
+-
|
|
+- return ar->promisc || ar->monitor ||
|
|
+- test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
|
|
+-}
|
|
+-
|
|
+ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
|
|
+ {
|
|
+ struct cfg80211_chan_def *chandef = &ar->chandef;
|
|
+@@ -526,37 +597,39 @@ static int ath10k_monitor_vdev_start(str
|
|
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
|
|
+ arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
|
|
+
|
|
++ reinit_completion(&ar->vdev_setup_done);
|
|
++
|
|
+ ret = ath10k_wmi_vdev_start(ar, &arg);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to request monitor vdev %i start: %d\n",
|
|
++ ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
|
|
+ vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_vdev_setup_sync(ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
|
|
+ vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to put up monitor vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
|
|
+ vdev_id, ret);
|
|
+ goto vdev_stop;
|
|
+ }
|
|
+
|
|
+ ar->monitor_vdev_id = vdev_id;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
|
|
+ ar->monitor_vdev_id);
|
|
+ return 0;
|
|
+
|
|
+ vdev_stop:
|
|
+ ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n",
|
|
++ ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
|
|
+ ar->monitor_vdev_id, ret);
|
|
+
|
|
+ return ret;
|
|
+@@ -570,20 +643,22 @@ static int ath10k_monitor_vdev_stop(stru
|
|
+
|
|
+ ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to put down monitor vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
|
|
+ ar->monitor_vdev_id, ret);
|
|
+
|
|
++ reinit_completion(&ar->vdev_setup_done);
|
|
++
|
|
+ ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to to request monitor vdev %i stop: %d\n",
|
|
++ ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
|
|
+ ar->monitor_vdev_id, ret);
|
|
+
|
|
+ ret = ath10k_vdev_setup_sync(ar);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to synchronise monitor vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
|
|
+ ar->monitor_vdev_id, ret);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
|
|
+ ar->monitor_vdev_id);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -594,35 +669,29 @@ static int ath10k_monitor_vdev_create(st
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+- bit = ffs(ar->free_vdev_map);
|
|
+- if (bit == 0) {
|
|
+- ath10k_warn("failed to find free vdev id for monitor vdev\n");
|
|
++ if (ar->free_vdev_map == 0) {
|
|
++ ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+- ar->monitor_vdev_id = bit - 1;
|
|
+- ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id);
|
|
++ bit = __ffs64(ar->free_vdev_map);
|
|
++
|
|
++ ar->monitor_vdev_id = bit;
|
|
+
|
|
+ ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
|
|
+ WMI_VDEV_TYPE_MONITOR,
|
|
+ 0, ar->mac_addr);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to request monitor vdev %i creation: %d\n",
|
|
++ ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
|
|
+ ar->monitor_vdev_id, ret);
|
|
+- goto vdev_fail;
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
|
|
++ ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
|
|
+ ar->monitor_vdev_id);
|
|
+
|
|
+ return 0;
|
|
+-
|
|
+-vdev_fail:
|
|
+- /*
|
|
+- * Restore the ID to the global map.
|
|
+- */
|
|
+- ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
|
|
+- return ret;
|
|
+ }
|
|
+
|
|
+ static int ath10k_monitor_vdev_delete(struct ath10k *ar)
|
|
+@@ -633,14 +702,14 @@ static int ath10k_monitor_vdev_delete(st
|
|
+
|
|
+ ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n",
|
|
++ ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
|
|
+ ar->monitor_vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+- ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
|
|
++ ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
|
|
+ ar->monitor_vdev_id);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -651,63 +720,70 @@ static int ath10k_monitor_start(struct a
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+- if (!ath10k_monitor_is_enabled(ar)) {
|
|
+- ath10k_warn("trying to start monitor with no references\n");
|
|
+- return 0;
|
|
+- }
|
|
+-
|
|
+- if (ar->monitor_started) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n");
|
|
+- return 0;
|
|
+- }
|
|
+-
|
|
+ ret = ath10k_monitor_vdev_create(ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to create monitor vdev: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to start monitor vdev: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
|
|
+ ath10k_monitor_vdev_delete(ar);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ar->monitor_started = true;
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static void ath10k_monitor_stop(struct ath10k *ar)
|
|
++static int ath10k_monitor_stop(struct ath10k *ar)
|
|
+ {
|
|
+ int ret;
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+- if (ath10k_monitor_is_enabled(ar)) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
+- "mac monitor will be stopped later\n");
|
|
+- return;
|
|
++ ret = ath10k_monitor_vdev_stop(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+- if (!ar->monitor_started) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
+- "mac monitor probably failed to start earlier\n");
|
|
+- return;
|
|
++ ret = ath10k_monitor_vdev_delete(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_monitor_vdev_stop(ar);
|
|
+- if (ret)
|
|
+- ath10k_warn("failed to stop monitor vdev: %d\n", ret);
|
|
++ ar->monitor_started = false;
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
|
|
+
|
|
+- ret = ath10k_monitor_vdev_delete(ar);
|
|
+- if (ret)
|
|
+- ath10k_warn("failed to delete monitor vdev: %d\n", ret);
|
|
++ return 0;
|
|
++}
|
|
+
|
|
+- ar->monitor_started = false;
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n");
|
|
++static int ath10k_monitor_recalc(struct ath10k *ar)
|
|
++{
|
|
++ bool should_start;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ should_start = ar->monitor ||
|
|
++ ar->filter_flags & FIF_PROMISC_IN_BSS ||
|
|
++ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
++ "mac monitor recalc started? %d should? %d\n",
|
|
++ ar->monitor_started, should_start);
|
|
++
|
|
++ if (should_start == ar->monitor_started)
|
|
++ return 0;
|
|
++
|
|
++ if (should_start)
|
|
++ return ath10k_monitor_start(ar);
|
|
++
|
|
++ return ath10k_monitor_stop(ar);
|
|
+ }
|
|
+
|
|
+ static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
|
|
+@@ -738,14 +814,14 @@ static int ath10k_start_cac(struct ath10
|
|
+
|
|
+ set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
|
|
+
|
|
+- ret = ath10k_monitor_start(ar);
|
|
++ ret = ath10k_monitor_recalc(ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to start monitor (cac): %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
|
|
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
|
|
+ ar->monitor_vdev_id);
|
|
+
|
|
+ return 0;
|
|
+@@ -762,7 +838,7 @@ static int ath10k_stop_cac(struct ath10k
|
|
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
|
|
+ ath10k_monitor_stop(ar);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+@@ -788,12 +864,12 @@ static void ath10k_recalc_radar_detectio
|
|
+ * radiation is not allowed, make this channel DFS_UNAVAILABLE
|
|
+ * by indicating that radar was detected.
|
|
+ */
|
|
+- ath10k_warn("failed to start CAC: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to start CAC: %d\n", ret);
|
|
+ ieee80211_radar_detected(ar->hw);
|
|
+ }
|
|
+ }
|
|
+
|
|
+-static int ath10k_vdev_start(struct ath10k_vif *arvif)
|
|
++static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
|
|
+ {
|
|
+ struct ath10k *ar = arvif->ar;
|
|
+ struct cfg80211_chan_def *chandef = &ar->chandef;
|
|
+@@ -830,22 +906,27 @@ static int ath10k_vdev_start(struct ath1
|
|
+ arg.ssid_len = arvif->vif->bss_conf.ssid_len;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
+ "mac vdev %d start center_freq %d phymode %s\n",
|
|
+ arg.vdev_id, arg.channel.freq,
|
|
+ ath10k_wmi_phymode_str(arg.channel.mode));
|
|
+
|
|
+- ret = ath10k_wmi_vdev_start(ar, &arg);
|
|
++ if (restart)
|
|
++ ret = ath10k_wmi_vdev_restart(ar, &arg);
|
|
++ else
|
|
++ ret = ath10k_wmi_vdev_start(ar, &arg);
|
|
++
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to start WMI vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
|
|
+ arg.vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_vdev_setup_sync(ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to synchronise setup for vdev %i: %d\n",
|
|
+- arg.vdev_id, ret);
|
|
++ ath10k_warn(ar,
|
|
++ "failed to synchronize setup for vdev %i restart %d: %d\n",
|
|
++ arg.vdev_id, restart, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+@@ -855,6 +936,16 @@ static int ath10k_vdev_start(struct ath1
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
++static int ath10k_vdev_start(struct ath10k_vif *arvif)
|
|
++{
|
|
++ return ath10k_vdev_start_restart(arvif, false);
|
|
++}
|
|
++
|
|
++static int ath10k_vdev_restart(struct ath10k_vif *arvif)
|
|
++{
|
|
++ return ath10k_vdev_start_restart(arvif, true);
|
|
++}
|
|
++
|
|
+ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
|
|
+ {
|
|
+ struct ath10k *ar = arvif->ar;
|
|
+@@ -866,14 +957,14 @@ static int ath10k_vdev_stop(struct ath10
|
|
+
|
|
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to stop WMI vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_vdev_setup_sync(ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to syncronise setup for vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -888,9 +979,147 @@ static int ath10k_vdev_stop(struct ath10
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
++static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
|
|
++ struct sk_buff *bcn)
|
|
++{
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++ struct ieee80211_mgmt *mgmt;
|
|
++ const u8 *p2p_ie;
|
|
++ int ret;
|
|
++
|
|
++ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
|
|
++ return 0;
|
|
++
|
|
++ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
|
|
++ return 0;
|
|
++
|
|
++ mgmt = (void *)bcn->data;
|
|
++ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
|
|
++ mgmt->u.beacon.variable,
|
|
++ bcn->len - (mgmt->u.beacon.variable -
|
|
++ bcn->data));
|
|
++ if (!p2p_ie)
|
|
++ return -ENOENT;
|
|
++
|
|
++ ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
|
|
++ u8 oui_type, size_t ie_offset)
|
|
++{
|
|
++ size_t len;
|
|
++ const u8 *next;
|
|
++ const u8 *end;
|
|
++ u8 *ie;
|
|
++
|
|
++ if (WARN_ON(skb->len < ie_offset))
|
|
++ return -EINVAL;
|
|
++
|
|
++ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
|
|
++ skb->data + ie_offset,
|
|
++ skb->len - ie_offset);
|
|
++ if (!ie)
|
|
++ return -ENOENT;
|
|
++
|
|
++ len = ie[1] + 2;
|
|
++ end = skb->data + skb->len;
|
|
++ next = ie + len;
|
|
++
|
|
++ if (WARN_ON(next > end))
|
|
++ return -EINVAL;
|
|
++
|
|
++ memmove(ie, next, end - next);
|
|
++ skb_trim(skb, skb->len - len);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
|
|
++{
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++ struct ieee80211_hw *hw = ar->hw;
|
|
++ struct ieee80211_vif *vif = arvif->vif;
|
|
++ struct ieee80211_mutable_offsets offs = {};
|
|
++ struct sk_buff *bcn;
|
|
++ int ret;
|
|
++
|
|
++ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
|
|
++ return 0;
|
|
++
|
|
++ bcn = ieee80211_beacon_get_template(hw, vif, &offs);
|
|
++ if (!bcn) {
|
|
++ ath10k_warn(ar, "failed to get beacon template from mac80211\n");
|
|
++ return -EPERM;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
|
|
++ kfree_skb(bcn);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ /* P2P IE is inserted by firmware automatically (as configured above)
|
|
++ * so remove it from the base beacon template to avoid duplicate P2P
|
|
++ * IEs in beacon frames.
|
|
++ */
|
|
++ ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
|
|
++ offsetof(struct ieee80211_mgmt,
|
|
++ u.beacon.variable));
|
|
++
|
|
++ ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
|
|
++ 0, NULL, 0);
|
|
++ kfree_skb(bcn);
|
|
++
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to submit beacon template command: %d\n",
|
|
++ ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
|
|
++{
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++ struct ieee80211_hw *hw = ar->hw;
|
|
++ struct ieee80211_vif *vif = arvif->vif;
|
|
++ struct sk_buff *prb;
|
|
++ int ret;
|
|
++
|
|
++ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
|
|
++ return 0;
|
|
++
|
|
++ prb = ieee80211_proberesp_get(hw, vif);
|
|
++ if (!prb) {
|
|
++ ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
|
|
++ return -EPERM;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
|
|
++ kfree_skb(prb);
|
|
++
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
|
|
++ ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
+ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
|
|
+- struct ieee80211_bss_conf *info)
|
|
++ struct ieee80211_bss_conf *info)
|
|
+ {
|
|
++ struct ath10k *ar = arvif->ar;
|
|
+ int ret = 0;
|
|
+
|
|
+ lockdep_assert_held(&arvif->ar->conf_mutex);
|
|
+@@ -902,15 +1131,7 @@ static void ath10k_control_beaconing(str
|
|
+ arvif->is_up = false;
|
|
+
|
|
+ spin_lock_bh(&arvif->ar->data_lock);
|
|
+- if (arvif->beacon) {
|
|
+- dma_unmap_single(arvif->ar->dev,
|
|
+- ATH10K_SKB_CB(arvif->beacon)->paddr,
|
|
+- arvif->beacon->len, DMA_TO_DEVICE);
|
|
+- dev_kfree_skb_any(arvif->beacon);
|
|
+-
|
|
+- arvif->beacon = NULL;
|
|
+- arvif->beacon_sent = false;
|
|
+- }
|
|
++ ath10k_mac_vif_beacon_free(arvif);
|
|
+ spin_unlock_bh(&arvif->ar->data_lock);
|
|
+
|
|
+ return;
|
|
+@@ -923,12 +1144,12 @@ static void ath10k_control_beaconing(str
|
|
+ return;
|
|
+
|
|
+ arvif->aid = 0;
|
|
+- memcpy(arvif->bssid, info->bssid, ETH_ALEN);
|
|
++ ether_addr_copy(arvif->bssid, info->bssid);
|
|
+
|
|
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
|
|
+ arvif->bssid);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to bring up vdev %d: %i\n",
|
|
++ ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ ath10k_vdev_stop(arvif);
|
|
+ return;
|
|
+@@ -937,13 +1158,14 @@ static void ath10k_control_beaconing(str
|
|
+ arvif->is_started = true;
|
|
+ arvif->is_up = true;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
|
|
+ }
|
|
+
|
|
+ static void ath10k_control_ibss(struct ath10k_vif *arvif,
|
|
+ struct ieee80211_bss_conf *info,
|
|
+ const u8 self_peer[ETH_ALEN])
|
|
+ {
|
|
++ struct ath10k *ar = arvif->ar;
|
|
+ u32 vdev_param;
|
|
+ int ret = 0;
|
|
+
|
|
+@@ -952,20 +1174,12 @@ static void ath10k_control_ibss(struct a
|
|
+ if (!info->ibss_joined) {
|
|
+ ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n",
|
|
+ self_peer, arvif->vdev_id, ret);
|
|
+
|
|
+ if (is_zero_ether_addr(arvif->bssid))
|
|
+ return;
|
|
+
|
|
+- ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
|
|
+- arvif->bssid);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
|
|
+- arvif->bssid, arvif->vdev_id, ret);
|
|
+- return;
|
|
+- }
|
|
+-
|
|
+ memset(arvif->bssid, 0, ETH_ALEN);
|
|
+
|
|
+ return;
|
|
+@@ -973,7 +1187,7 @@ static void ath10k_control_ibss(struct a
|
|
+
|
|
+ ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n",
|
|
+ self_peer, arvif->vdev_id, ret);
|
|
+ return;
|
|
+ }
|
|
+@@ -982,103 +1196,211 @@ static void ath10k_control_ibss(struct a
|
|
+ ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
|
|
+ ATH10K_DEFAULT_ATIM);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ }
|
|
+
|
|
+-/*
|
|
+- * Review this when mac80211 gains per-interface powersave support.
|
|
+- */
|
|
+-static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
|
|
++static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
|
|
+ {
|
|
+ struct ath10k *ar = arvif->ar;
|
|
+- struct ieee80211_conf *conf = &ar->hw->conf;
|
|
+- enum wmi_sta_powersave_param param;
|
|
+- enum wmi_sta_ps_mode psmode;
|
|
++ u32 param;
|
|
++ u32 value;
|
|
+ int ret;
|
|
+
|
|
+ lockdep_assert_held(&arvif->ar->conf_mutex);
|
|
+
|
|
+- if (arvif->vif->type != NL80211_IFTYPE_STATION)
|
|
+- return 0;
|
|
+-
|
|
+- if (conf->flags & IEEE80211_CONF_PS) {
|
|
+- psmode = WMI_STA_PS_MODE_ENABLED;
|
|
+- param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
|
|
++ if (arvif->u.sta.uapsd)
|
|
++ value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
|
|
++ else
|
|
++ value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
|
|
+
|
|
+- ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
|
|
+- conf->dynamic_ps_timeout);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to set inactivity time for vdev %d: %i\n",
|
|
+- arvif->vdev_id, ret);
|
|
+- return ret;
|
|
+- }
|
|
+- } else {
|
|
+- psmode = WMI_STA_PS_MODE_DISABLED;
|
|
++ param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
|
|
++ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
|
|
++ value, arvif->vdev_id, ret);
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
|
|
+- arvif->vdev_id, psmode ? "enable" : "disable");
|
|
++ return 0;
|
|
++}
|
|
+
|
|
+- ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
|
|
++static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
|
|
++{
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++ u32 param;
|
|
++ u32 value;
|
|
++ int ret;
|
|
++
|
|
++ lockdep_assert_held(&arvif->ar->conf_mutex);
|
|
++
|
|
++ if (arvif->u.sta.uapsd)
|
|
++ value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
|
|
++ else
|
|
++ value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
|
|
++
|
|
++ param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
|
|
++ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
|
|
++ param, value);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n",
|
|
+- psmode, arvif->vdev_id, ret);
|
|
++ ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
|
|
++ value, arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-/**********************/
|
|
+-/* Station management */
|
|
+-/**********************/
|
|
+-
|
|
+-static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
|
|
+- struct ath10k_vif *arvif,
|
|
+- struct ieee80211_sta *sta,
|
|
+- struct ieee80211_bss_conf *bss_conf,
|
|
+- struct wmi_peer_assoc_complete_arg *arg)
|
|
++static int ath10k_mac_ps_vif_count(struct ath10k *ar)
|
|
+ {
|
|
++ struct ath10k_vif *arvif;
|
|
++ int num = 0;
|
|
++
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+- memcpy(arg->addr, sta->addr, ETH_ALEN);
|
|
+- arg->vdev_id = arvif->vdev_id;
|
|
+- arg->peer_aid = sta->aid;
|
|
+- arg->peer_flags |= WMI_PEER_AUTH;
|
|
++ list_for_each_entry(arvif, &ar->arvifs, list)
|
|
++ if (arvif->ps)
|
|
++ num++;
|
|
+
|
|
+- if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
|
|
+- /*
|
|
+- * Seems FW have problems with Power Save in STA
|
|
+- * mode when we setup this parameter to high (eg. 5).
|
|
+- * Often we see that FW don't send NULL (with clean P flags)
|
|
+- * frame even there is info about buffered frames in beacons.
|
|
+- * Sometimes we have to wait more than 10 seconds before FW
|
|
+- * will wakeup. Often sending one ping from AP to our device
|
|
+- * just fail (more than 50%).
|
|
+- *
|
|
+- * Seems setting this FW parameter to 1 couse FW
|
|
+- * will check every beacon and will wakup immediately
|
|
+- * after detection buffered data.
|
|
+- */
|
|
+- arg->peer_listen_intval = 1;
|
|
+- else
|
|
+- arg->peer_listen_intval = ar->hw->conf.listen_interval;
|
|
++ return num;
|
|
++}
|
|
+
|
|
+- arg->peer_num_spatial_streams = 1;
|
|
++static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
|
|
++{
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++ struct ieee80211_vif *vif = arvif->vif;
|
|
++ struct ieee80211_conf *conf = &ar->hw->conf;
|
|
++ enum wmi_sta_powersave_param param;
|
|
++ enum wmi_sta_ps_mode psmode;
|
|
++ int ret;
|
|
++ int ps_timeout;
|
|
++ bool enable_ps;
|
|
+
|
|
+- /*
|
|
+- * The assoc capabilities are available only in managed mode.
|
|
++ lockdep_assert_held(&arvif->ar->conf_mutex);
|
|
++
|
|
++ if (arvif->vif->type != NL80211_IFTYPE_STATION)
|
|
++ return 0;
|
|
++
|
|
++ enable_ps = arvif->ps;
|
|
++
|
|
++ if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
|
|
++ !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
|
|
++ ar->fw_features)) {
|
|
++ ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
|
|
++ arvif->vdev_id);
|
|
++ enable_ps = false;
|
|
++ }
|
|
++
|
|
++ if (enable_ps) {
|
|
++ psmode = WMI_STA_PS_MODE_ENABLED;
|
|
++ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
|
|
++
|
|
++ ps_timeout = conf->dynamic_ps_timeout;
|
|
++ if (ps_timeout == 0) {
|
|
++ /* Firmware doesn't like 0 */
|
|
++ ps_timeout = ieee80211_tu_to_usec(
|
|
++ vif->bss_conf.beacon_int) / 1000;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
|
|
++ ps_timeout);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ return ret;
|
|
++ }
|
|
++ } else {
|
|
++ psmode = WMI_STA_PS_MODE_DISABLED;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
|
|
++ arvif->vdev_id, psmode ? "enable" : "disable");
|
|
++
|
|
++ ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
|
|
++ psmode, arvif->vdev_id, ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
|
|
++{
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++ struct wmi_sta_keepalive_arg arg = {};
|
|
++ int ret;
|
|
++
|
|
++ lockdep_assert_held(&arvif->ar->conf_mutex);
|
|
++
|
|
++ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
|
|
++ return 0;
|
|
++
|
|
++ if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
|
|
++ return 0;
|
|
++
|
|
++ /* Some firmware revisions have a bug and ignore the `enabled` field.
|
|
++ * Instead use the interval to disable the keepalive.
|
|
++ */
|
|
++ arg.vdev_id = arvif->vdev_id;
|
|
++ arg.enabled = 1;
|
|
++ arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
|
|
++ arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
|
|
++
|
|
++ ret = ath10k_wmi_sta_keepalive(ar, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++/**********************/
|
|
++/* Station management */
|
|
++/**********************/
|
|
++
|
|
++static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
|
|
++ struct ieee80211_vif *vif)
|
|
++{
|
|
++ /* Some firmware revisions have unstable STA powersave when listen
|
|
++ * interval is set too high (e.g. 5). The symptoms are firmware doesn't
|
|
++ * generate NullFunc frames properly even if buffered frames have been
|
|
++ * indicated in Beacon TIM. Firmware would seldom wake up to pull
|
|
++ * buffered frames. Often pinging the device from AP would simply fail.
|
|
++ *
|
|
++ * As a workaround set it to 1.
|
|
+ */
|
|
+- if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf)
|
|
+- arg->peer_caps = bss_conf->assoc_capability;
|
|
++ if (vif->type == NL80211_IFTYPE_STATION)
|
|
++ return 1;
|
|
++
|
|
++ return ar->hw->conf.listen_interval;
|
|
++}
|
|
++
|
|
++static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
|
|
++ struct ieee80211_vif *vif,
|
|
++ struct ieee80211_sta *sta,
|
|
++ struct wmi_peer_assoc_complete_arg *arg)
|
|
++{
|
|
++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ ether_addr_copy(arg->addr, sta->addr);
|
|
++ arg->vdev_id = arvif->vdev_id;
|
|
++ arg->peer_aid = sta->aid;
|
|
++ arg->peer_flags |= WMI_PEER_AUTH;
|
|
++ arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
|
|
++ arg->peer_num_spatial_streams = 1;
|
|
++ arg->peer_caps = vif->bss_conf.assoc_capability;
|
|
+ }
|
|
+
|
|
+ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
|
|
+- struct ath10k_vif *arvif,
|
|
++ struct ieee80211_vif *vif,
|
|
+ struct wmi_peer_assoc_complete_arg *arg)
|
|
+ {
|
|
+- struct ieee80211_vif *vif = arvif->vif;
|
|
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
|
|
+ struct cfg80211_bss *bss;
|
|
+ const u8 *rsnie = NULL;
|
|
+@@ -1097,21 +1419,21 @@ static void ath10k_peer_assoc_h_crypto(s
|
|
+ ies = rcu_dereference(bss->ies);
|
|
+
|
|
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
|
|
+- WLAN_OUI_TYPE_MICROSOFT_WPA,
|
|
+- ies->data,
|
|
+- ies->len);
|
|
++ WLAN_OUI_TYPE_MICROSOFT_WPA,
|
|
++ ies->data,
|
|
++ ies->len);
|
|
+ rcu_read_unlock();
|
|
+ cfg80211_put_bss(ar->hw->wiphy, bss);
|
|
+ }
|
|
+
|
|
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
|
|
+ if (rsnie || wpaie) {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
|
|
+ arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
|
|
+ }
|
|
+
|
|
+ if (wpaie) {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
|
|
+ arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
|
|
+ }
|
|
+ }
|
|
+@@ -1149,6 +1471,7 @@ static void ath10k_peer_assoc_h_ht(struc
|
|
+ {
|
|
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
|
|
+ int i, n;
|
|
++ u32 stbc;
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+@@ -1185,7 +1508,6 @@ static void ath10k_peer_assoc_h_ht(struc
|
|
+ }
|
|
+
|
|
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
|
|
+- u32 stbc;
|
|
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
|
|
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
|
|
+ stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
|
|
+@@ -1220,7 +1542,7 @@ static void ath10k_peer_assoc_h_ht(struc
|
|
+ arg->peer_num_spatial_streams = sta->rx_nss;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
|
|
+ arg->addr,
|
|
+ arg->peer_ht_rates.num_rates,
|
|
+ arg->peer_num_spatial_streams);
|
|
+@@ -1237,7 +1559,7 @@ static int ath10k_peer_assoc_qos_ap(stru
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+ if (sta->wme && sta->uapsd_queues) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
|
|
+ sta->uapsd_queues, sta->max_sp);
|
|
+
|
|
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
|
|
+@@ -1253,7 +1575,6 @@ static int ath10k_peer_assoc_qos_ap(stru
|
|
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
|
|
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
|
|
+
|
|
+-
|
|
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
|
|
+ max_sp = sta->max_sp;
|
|
+
|
|
+@@ -1262,7 +1583,7 @@ static int ath10k_peer_assoc_qos_ap(stru
|
|
+ WMI_AP_PS_PEER_PARAM_UAPSD,
|
|
+ uapsd);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -1272,7 +1593,7 @@ static int ath10k_peer_assoc_qos_ap(stru
|
|
+ WMI_AP_PS_PEER_PARAM_MAX_SP,
|
|
+ max_sp);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -1282,9 +1603,10 @@ static int ath10k_peer_assoc_qos_ap(stru
|
|
+ sta->listen_interval - mac80211 patch required.
|
|
+ Currently use 10 seconds */
|
|
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
|
|
+- WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10);
|
|
++ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
|
|
++ 10);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -1304,8 +1626,11 @@ static void ath10k_peer_assoc_h_vht(stru
|
|
+ return;
|
|
+
|
|
+ arg->peer_flags |= WMI_PEER_VHT;
|
|
+- arg->peer_vht_caps = vht_cap->cap;
|
|
+
|
|
++ if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
|
|
++ arg->peer_flags |= WMI_PEER_VHT_2G;
|
|
++
|
|
++ arg->peer_vht_caps = vht_cap->cap;
|
|
+
|
|
+ ampdu_factor = (vht_cap->cap &
|
|
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
|
|
+@@ -1331,16 +1656,17 @@ static void ath10k_peer_assoc_h_vht(stru
|
|
+ arg->peer_vht_rates.tx_mcs_set =
|
|
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
|
|
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
|
|
+ }
|
|
+
|
|
+ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
|
|
+- struct ath10k_vif *arvif,
|
|
++ struct ieee80211_vif *vif,
|
|
+ struct ieee80211_sta *sta,
|
|
+- struct ieee80211_bss_conf *bss_conf,
|
|
+ struct wmi_peer_assoc_complete_arg *arg)
|
|
+ {
|
|
++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
++
|
|
+ switch (arvif->vdev_type) {
|
|
+ case WMI_VDEV_TYPE_AP:
|
|
+ if (sta->wme)
|
|
+@@ -1352,16 +1678,29 @@ static void ath10k_peer_assoc_h_qos(stru
|
|
+ }
|
|
+ break;
|
|
+ case WMI_VDEV_TYPE_STA:
|
|
+- if (bss_conf->qos)
|
|
++ if (vif->bss_conf.qos)
|
|
++ arg->peer_flags |= WMI_PEER_QOS;
|
|
++ break;
|
|
++ case WMI_VDEV_TYPE_IBSS:
|
|
++ if (sta->wme)
|
|
+ arg->peer_flags |= WMI_PEER_QOS;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
|
|
++ sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
|
|
++}
|
|
++
|
|
++static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta)
|
|
++{
|
|
++ /* First 4 rates in ath10k_rates are CCK (11b) rates. */
|
|
++ return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4;
|
|
+ }
|
|
+
|
|
+ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
|
|
+- struct ath10k_vif *arvif,
|
|
++ struct ieee80211_vif *vif,
|
|
+ struct ieee80211_sta *sta,
|
|
+ struct wmi_peer_assoc_complete_arg *arg)
|
|
+ {
|
|
+@@ -1369,13 +1708,20 @@ static void ath10k_peer_assoc_h_phymode(
|
|
+
|
|
+ switch (ar->hw->conf.chandef.chan->band) {
|
|
+ case IEEE80211_BAND_2GHZ:
|
|
+- if (sta->ht_cap.ht_supported) {
|
|
++ if (sta->vht_cap.vht_supported) {
|
|
++ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
|
|
++ phymode = MODE_11AC_VHT40;
|
|
++ else
|
|
++ phymode = MODE_11AC_VHT20;
|
|
++ } else if (sta->ht_cap.ht_supported) {
|
|
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
|
|
+ phymode = MODE_11NG_HT40;
|
|
+ else
|
|
+ phymode = MODE_11NG_HT20;
|
|
+- } else {
|
|
++ } else if (ath10k_mac_sta_has_11g_rates(sta)) {
|
|
+ phymode = MODE_11G;
|
|
++ } else {
|
|
++ phymode = MODE_11B;
|
|
+ }
|
|
+
|
|
+ break;
|
|
+@@ -1404,7 +1750,7 @@ static void ath10k_peer_assoc_h_phymode(
|
|
+ break;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
|
|
+ sta->addr, ath10k_wmi_phymode_str(phymode));
|
|
+
|
|
+ arg->peer_phymode = phymode;
|
|
+@@ -1412,22 +1758,21 @@ static void ath10k_peer_assoc_h_phymode(
|
|
+ }
|
|
+
|
|
+ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
|
|
+- struct ath10k_vif *arvif,
|
|
++ struct ieee80211_vif *vif,
|
|
+ struct ieee80211_sta *sta,
|
|
+- struct ieee80211_bss_conf *bss_conf,
|
|
+ struct wmi_peer_assoc_complete_arg *arg)
|
|
+ {
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+ memset(arg, 0, sizeof(*arg));
|
|
+
|
|
+- ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
|
|
+- ath10k_peer_assoc_h_crypto(ar, arvif, arg);
|
|
++ ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
|
|
++ ath10k_peer_assoc_h_crypto(ar, vif, arg);
|
|
+ ath10k_peer_assoc_h_rates(ar, sta, arg);
|
|
+ ath10k_peer_assoc_h_ht(ar, sta, arg);
|
|
+ ath10k_peer_assoc_h_vht(ar, sta, arg);
|
|
+- ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
|
|
+- ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
|
|
++ ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
|
|
++ ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+@@ -1459,6 +1804,68 @@ static int ath10k_setup_peer_smps(struct
|
|
+ ath10k_smps_map[smps]);
|
|
+ }
|
|
+
|
|
++static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
|
|
++ struct ieee80211_vif *vif,
|
|
++ struct ieee80211_sta_vht_cap vht_cap)
|
|
++{
|
|
++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
++ int ret;
|
|
++ u32 param;
|
|
++ u32 value;
|
|
++
|
|
++ if (!(ar->vht_cap_info &
|
|
++ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
|
|
++ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
|
|
++ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
|
|
++ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
|
|
++ return 0;
|
|
++
|
|
++ param = ar->wmi.vdev_param->txbf;
|
|
++ value = 0;
|
|
++
|
|
++ if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
|
|
++ return 0;
|
|
++
|
|
++ /* The following logic is correct. If a remote STA advertises support
|
|
++ * for being a beamformer then we should enable us being a beamformee.
|
|
++ */
|
|
++
|
|
++ if (ar->vht_cap_info &
|
|
++ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
|
|
++ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
|
|
++ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
|
|
++ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
|
|
++
|
|
++ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
|
|
++ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
|
|
++ }
|
|
++
|
|
++ if (ar->vht_cap_info &
|
|
++ (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
|
|
++ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
|
|
++ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
|
|
++ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
|
|
++
|
|
++ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
|
|
++ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
|
|
++ }
|
|
++
|
|
++ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
|
|
++ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
|
|
++
|
|
++ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
|
|
++ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
|
|
++
|
|
++ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
|
|
++ value, ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
+ /* can be called only in mac80211 callbacks due to `key_count` usage */
|
|
+ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
|
|
+ struct ieee80211_vif *vif,
|
|
+@@ -1467,17 +1874,21 @@ static void ath10k_bss_assoc(struct ieee
|
|
+ struct ath10k *ar = hw->priv;
|
|
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
+ struct ieee80211_sta_ht_cap ht_cap;
|
|
++ struct ieee80211_sta_vht_cap vht_cap;
|
|
+ struct wmi_peer_assoc_complete_arg peer_arg;
|
|
+ struct ieee80211_sta *ap_sta;
|
|
+ int ret;
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
|
|
++ arvif->vdev_id, arvif->bssid, arvif->aid);
|
|
++
|
|
+ rcu_read_lock();
|
|
+
|
|
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
|
|
+ if (!ap_sta) {
|
|
+- ath10k_warn("failed to find station entry for bss %pM vdev %i\n",
|
|
++ ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
|
|
+ bss_conf->bssid, arvif->vdev_id);
|
|
+ rcu_read_unlock();
|
|
+ return;
|
|
+@@ -1486,11 +1897,11 @@ static void ath10k_bss_assoc(struct ieee
|
|
+ /* ap_sta must be accessed only within rcu section which must be left
|
|
+ * before calling ath10k_setup_peer_smps() which might sleep. */
|
|
+ ht_cap = ap_sta->ht_cap;
|
|
++ vht_cap = ap_sta->vht_cap;
|
|
+
|
|
+- ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
|
|
+- bss_conf, &peer_arg);
|
|
++ ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
|
|
+ bss_conf->bssid, arvif->vdev_id, ret);
|
|
+ rcu_read_unlock();
|
|
+ return;
|
|
+@@ -1500,88 +1911,100 @@ static void ath10k_bss_assoc(struct ieee
|
|
+
|
|
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
|
|
+ bss_conf->bssid, arvif->vdev_id, ret);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
++ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
|
|
++ arvif->vdev_id, bss_conf->bssid, ret);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
|
|
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
|
|
+
|
|
++ WARN_ON(arvif->is_up);
|
|
++
|
|
+ arvif->aid = bss_conf->aid;
|
|
+- memcpy(arvif->bssid, bss_conf->bssid, ETH_ALEN);
|
|
++ ether_addr_copy(arvif->bssid, bss_conf->bssid);
|
|
+
|
|
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set vdev %d up: %d\n",
|
|
++ ath10k_warn(ar, "failed to set vdev %d up: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ arvif->is_up = true;
|
|
++
|
|
++ /* Workaround: Some firmware revisions (tested with qca6174
|
|
++ * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
|
|
++ * poked with peer param command.
|
|
++ */
|
|
++ ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
|
|
++ WMI_PEER_DUMMY_VAR, 1);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
|
|
++ arvif->bssid, arvif->vdev_id, ret);
|
|
++ return;
|
|
++ }
|
|
+ }
|
|
+
|
|
+-/*
|
|
+- * FIXME: flush TIDs
|
|
+- */
|
|
+ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
|
|
+ struct ieee80211_vif *vif)
|
|
+ {
|
|
+ struct ath10k *ar = hw->priv;
|
|
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
++ struct ieee80211_sta_vht_cap vht_cap = {};
|
|
+ int ret;
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+- /*
|
|
+- * For some reason, calling VDEV-DOWN before VDEV-STOP
|
|
+- * makes the FW to send frames via HTT after disassociation.
|
|
+- * No idea why this happens, even though VDEV-DOWN is supposed
|
|
+- * to be analogous to link down, so just stop the VDEV.
|
|
+- */
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
|
|
+- arvif->vdev_id);
|
|
+-
|
|
+- /* FIXME: check return value */
|
|
+- ret = ath10k_vdev_stop(arvif);
|
|
+-
|
|
+- /*
|
|
+- * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
|
|
+- * report beacons from previously associated network through HTT.
|
|
+- * This in turn would spam mac80211 WARN_ON if we bring down all
|
|
+- * interfaces as it expects there is no rx when no interface is
|
|
+- * running.
|
|
+- */
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
|
|
++ arvif->vdev_id, arvif->bssid);
|
|
+
|
|
+- /* FIXME: why don't we print error if wmi call fails? */
|
|
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "faield to down vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
+
|
|
+- arvif->def_wep_key_idx = 0;
|
|
++ arvif->def_wep_key_idx = -1;
|
|
++
|
|
++ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ return;
|
|
++ }
|
|
+
|
|
+- arvif->is_started = false;
|
|
+ arvif->is_up = false;
|
|
+ }
|
|
+
|
|
+-static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
|
|
+- struct ieee80211_sta *sta, bool reassoc)
|
|
++static int ath10k_station_assoc(struct ath10k *ar,
|
|
++ struct ieee80211_vif *vif,
|
|
++ struct ieee80211_sta *sta,
|
|
++ bool reassoc)
|
|
+ {
|
|
++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
+ struct wmi_peer_assoc_complete_arg peer_arg;
|
|
+ int ret = 0;
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+- ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
|
|
++ ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
|
|
++ ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
|
|
+ sta->addr, arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -1589,48 +2012,59 @@ static int ath10k_station_assoc(struct a
|
|
+ peer_arg.peer_reassoc = reassoc;
|
|
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
|
|
+ sta->addr, arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n",
|
|
+- arvif->vdev_id, ret);
|
|
+- return ret;
|
|
+- }
|
|
+-
|
|
+- if (!sta->wme) {
|
|
+- arvif->num_legacy_stations++;
|
|
+- ret = ath10k_recalc_rtscts_prot(arvif);
|
|
++ /* Re-assoc is run only to update supported rates for given station. It
|
|
++ * doesn't make much sense to reconfigure the peer completely.
|
|
++ */
|
|
++ if (!reassoc) {
|
|
++ ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
|
|
++ &sta->ht_cap);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+- }
|
|
+
|
|
+- ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to install peer wep keys for vdev %i: %d\n",
|
|
+- arvif->vdev_id, ret);
|
|
+- return ret;
|
|
+- }
|
|
++ ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
|
|
++ sta->addr, arvif->vdev_id, ret);
|
|
++ return ret;
|
|
++ }
|
|
+
|
|
+- ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n",
|
|
+- sta->addr, arvif->vdev_id, ret);
|
|
+- return ret;
|
|
++ if (!sta->wme) {
|
|
++ arvif->num_legacy_stations++;
|
|
++ ret = ath10k_recalc_rtscts_prot(arvif);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ return ret;
|
|
++ }
|
|
++ }
|
|
++
|
|
++ /* Plumb cached keys only for static WEP */
|
|
++ if (arvif->def_wep_key_idx != -1) {
|
|
++ ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ return ret;
|
|
++ }
|
|
++ }
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+-static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
|
|
++static int ath10k_station_disassoc(struct ath10k *ar,
|
|
++ struct ieee80211_vif *vif,
|
|
+ struct ieee80211_sta *sta)
|
|
+ {
|
|
++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
+ int ret = 0;
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+@@ -1639,7 +2073,7 @@ static int ath10k_station_disassoc(struc
|
|
+ arvif->num_legacy_stations--;
|
|
+ ret = ath10k_recalc_rtscts_prot(arvif);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -1647,7 +2081,7 @@ static int ath10k_station_disassoc(struc
|
|
+
|
|
+ ret = ath10k_clear_peer_keys(arvif, sta->addr);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -1722,6 +2156,7 @@ static int ath10k_update_channel_list(st
|
|
+ ch->passive = passive;
|
|
+
|
|
+ ch->freq = channel->center_freq;
|
|
++ ch->band_center_freq1 = channel->center_freq;
|
|
+ ch->min_power = 0;
|
|
+ ch->max_power = channel->max_power * 2;
|
|
+ ch->max_reg_power = channel->max_reg_power * 2;
|
|
+@@ -1739,7 +2174,7 @@ static int ath10k_update_channel_list(st
|
|
+ if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
|
|
+ continue;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
|
|
+ ch - arg.channels, arg.n_channels,
|
|
+ ch->freq, ch->max_power, ch->max_reg_power,
|
|
+@@ -1782,7 +2217,7 @@ static void ath10k_regd_update(struct at
|
|
+
|
|
+ ret = ath10k_update_channel_list(ar);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to update channel list: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to update channel list: %d\n", ret);
|
|
+
|
|
+ regpair = ar->ath_common.regulatory.regpair;
|
|
+
|
|
+@@ -1803,7 +2238,7 @@ static void ath10k_regd_update(struct at
|
|
+ regpair->reg_5ghz_ctl,
|
|
+ wmi_dfs_reg);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to set pdev regdomain: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
|
|
+ }
|
|
+
|
|
+ static void ath10k_reg_notifier(struct wiphy *wiphy,
|
|
+@@ -1816,12 +2251,12 @@ static void ath10k_reg_notifier(struct w
|
|
+ ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
|
|
+
|
|
+ if (config_enabled(CPTCFG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
|
|
+ request->dfs_region);
|
|
+ result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
|
|
+ request->dfs_region);
|
|
+ if (!result)
|
|
+- ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n",
|
|
++ ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
|
|
+ request->dfs_region);
|
|
+ }
|
|
+
|
|
+@@ -1849,28 +2284,25 @@ static u8 ath10k_tx_h_get_tid(struct iee
|
|
+ return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
|
|
+ }
|
|
+
|
|
+-static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
|
|
+- struct ieee80211_tx_info *info)
|
|
++static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
|
|
+ {
|
|
+- if (info->control.vif)
|
|
+- return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
|
|
++ if (vif)
|
|
++ return ath10k_vif_to_arvif(vif)->vdev_id;
|
|
+
|
|
+ if (ar->monitor_started)
|
|
+ return ar->monitor_vdev_id;
|
|
+
|
|
+- ath10k_warn("failed to resolve vdev id\n");
|
|
++ ath10k_warn(ar, "failed to resolve vdev id\n");
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-/*
|
|
+- * Frames sent to the FW have to be in "Native Wifi" format.
|
|
+- * Strip the QoS field from the 802.11 header.
|
|
++/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
|
|
++ * Control in the header.
|
|
+ */
|
|
+-static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
|
|
+- struct ieee80211_tx_control *control,
|
|
+- struct sk_buff *skb)
|
|
++static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
|
|
+ {
|
|
+ struct ieee80211_hdr *hdr = (void *)skb->data;
|
|
++ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
|
|
+ u8 *qos_ctl;
|
|
+
|
|
+ if (!ieee80211_is_data_qos(hdr->frame_control))
|
|
+@@ -1880,68 +2312,24 @@ static void ath10k_tx_h_qos_workaround(s
|
|
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
|
|
+ skb->data, (void *)qos_ctl - (void *)skb->data);
|
|
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
|
|
+-}
|
|
+-
|
|
+-static void ath10k_tx_wep_key_work(struct work_struct *work)
|
|
+-{
|
|
+- struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
|
|
+- wep_key_work);
|
|
+- int ret, keyidx = arvif->def_wep_key_newidx;
|
|
+-
|
|
+- if (arvif->def_wep_key_idx == keyidx)
|
|
+- return;
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
|
|
+- arvif->vdev_id, keyidx);
|
|
+
|
|
+- ret = ath10k_wmi_vdev_set_param(arvif->ar,
|
|
+- arvif->vdev_id,
|
|
+- arvif->ar->wmi.vdev_param->def_keyid,
|
|
+- keyidx);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to update wep key index for vdev %d: %d\n",
|
|
+- arvif->vdev_id,
|
|
+- ret);
|
|
+- return;
|
|
++ /* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
|
|
++ * frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
|
|
++ * used only for CQM purposes (e.g. hostapd station keepalive ping) so
|
|
++ * it is safe to downgrade to NullFunc.
|
|
++ */
|
|
++ hdr = (void *)skb->data;
|
|
++ if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
|
|
++ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
|
|
++ cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
|
|
+ }
|
|
+-
|
|
+- arvif->def_wep_key_idx = keyidx;
|
|
+ }
|
|
+
|
|
+-static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
|
|
+-{
|
|
+- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
+- struct ieee80211_vif *vif = info->control.vif;
|
|
+- struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
+- struct ath10k *ar = arvif->ar;
|
|
+- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
+- struct ieee80211_key_conf *key = info->control.hw_key;
|
|
+-
|
|
+- if (!ieee80211_has_protected(hdr->frame_control))
|
|
+- return;
|
|
+-
|
|
+- if (!key)
|
|
+- return;
|
|
+-
|
|
+- if (key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
|
|
+- key->cipher != WLAN_CIPHER_SUITE_WEP104)
|
|
+- return;
|
|
+-
|
|
+- if (key->keyidx == arvif->def_wep_key_idx)
|
|
+- return;
|
|
+-
|
|
+- /* FIXME: Most likely a few frames will be TXed with an old key. Simply
|
|
+- * queueing frames until key index is updated is not an option because
|
|
+- * sk_buff may need more processing to be done, e.g. offchannel */
|
|
+- arvif->def_wep_key_newidx = key->keyidx;
|
|
+- ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
|
|
+-}
|
|
+-
|
|
+-static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
|
|
++static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
|
|
++ struct ieee80211_vif *vif,
|
|
++ struct sk_buff *skb)
|
|
+ {
|
|
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
+- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
+- struct ieee80211_vif *vif = info->control.vif;
|
|
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
+
|
|
+ /* This is case only for P2P_GO */
|
|
+@@ -1961,6 +2349,18 @@ static void ath10k_tx_h_add_p2p_noa_ie(s
|
|
+ }
|
|
+ }
|
|
+
|
|
++static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
|
|
++{
|
|
++ /* FIXME: Not really sure since when the behaviour changed. At some
|
|
++ * point new firmware stopped requiring creation of peer entries for
|
|
++ * offchannel tx (and actually creating them causes issues with wmi-htc
|
|
++ * tx credit replenishment and reliability). Assuming it's at least 3.4
|
|
++ * because that's when the `freq` was introduced to TX_FRM HTT command.
|
|
++ */
|
|
++ return !(ar->htt.target_version_major >= 3 &&
|
|
++ ar->htt.target_version_minor >= 4);
|
|
++}
|
|
++
|
|
+ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
+@@ -1977,7 +2377,7 @@ static void ath10k_tx_htt(struct ath10k
|
|
+ ar->fw_features)) {
|
|
+ if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
|
|
+ ATH10K_MAX_NUM_MGMT_PENDING) {
|
|
+- ath10k_warn("reached WMI management tranmist queue limit\n");
|
|
++ ath10k_warn(ar, "reached WMI management transmit queue limit\n");
|
|
+ ret = -EBUSY;
|
|
+ goto exit;
|
|
+ }
|
|
+@@ -2001,7 +2401,8 @@ static void ath10k_tx_htt(struct ath10k
|
|
+
|
|
+ exit:
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to transmit packet, dropping: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
|
|
++ ret);
|
|
+ ieee80211_free_txskb(ar->hw, skb);
|
|
+ }
|
|
+ }
|
|
+@@ -2043,7 +2444,7 @@ void ath10k_offchan_tx_work(struct work_
|
|
+
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
|
|
+ skb);
|
|
+
|
|
+ hdr = (struct ieee80211_hdr *)skb->data;
|
|
+@@ -2056,13 +2457,13 @@ void ath10k_offchan_tx_work(struct work_
|
|
+
|
|
+ if (peer)
|
|
+ /* FIXME: should this use ath10k_warn()? */
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
|
|
+ peer_addr, vdev_id);
|
|
+
|
|
+ if (!peer) {
|
|
+ ret = ath10k_peer_create(ar, vdev_id, peer_addr);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to create peer %pM on vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
|
|
+ peer_addr, vdev_id, ret);
|
|
+ }
|
|
+
|
|
+@@ -2075,14 +2476,14 @@ void ath10k_offchan_tx_work(struct work_
|
|
+
|
|
+ ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
|
|
+ 3 * HZ);
|
|
+- if (ret <= 0)
|
|
+- ath10k_warn("timed out waiting for offchannel skb %p\n",
|
|
++ if (ret == 0)
|
|
++ ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
|
|
+ skb);
|
|
+
|
|
+ if (!peer) {
|
|
+ ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
|
|
+ peer_addr, vdev_id, ret);
|
|
+ }
|
|
+
|
|
+@@ -2116,7 +2517,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct
|
|
+
|
|
+ ret = ath10k_wmi_mgmt_tx(ar, skb);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to transmit management frame via WMI: %d\n",
|
|
++ ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
|
|
+ ret);
|
|
+ ieee80211_free_txskb(ar->hw, skb);
|
|
+ }
|
|
+@@ -2127,34 +2528,41 @@ void ath10k_mgmt_over_wmi_tx_work(struct
|
|
+ /* Scanning */
|
|
+ /************/
|
|
+
|
|
+-/*
|
|
+- * This gets called if we dont get a heart-beat during scan.
|
|
+- * This may indicate the FW has hung and we need to abort the
|
|
+- * scan manually to prevent cancel_hw_scan() from deadlocking
|
|
+- */
|
|
+-void ath10k_reset_scan(unsigned long ptr)
|
|
++void __ath10k_scan_finish(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k *ar = (struct ath10k *)ptr;
|
|
+-
|
|
+- spin_lock_bh(&ar->data_lock);
|
|
+- if (!ar->scan.in_progress) {
|
|
+- spin_unlock_bh(&ar->data_lock);
|
|
+- return;
|
|
+- }
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
+
|
|
+- ath10k_warn("scan timed out, firmware problem?\n");
|
|
+-
|
|
+- if (ar->scan.is_roc)
|
|
+- ieee80211_remain_on_channel_expired(ar->hw);
|
|
+- else
|
|
+- ieee80211_scan_completed(ar->hw, 1 /* aborted */);
|
|
++ switch (ar->scan.state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ break;
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ if (ar->scan.is_roc)
|
|
++ ieee80211_remain_on_channel_expired(ar->hw);
|
|
++ /* fall through */
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
++ if (!ar->scan.is_roc)
|
|
++ ieee80211_scan_completed(ar->hw,
|
|
++ (ar->scan.state ==
|
|
++ ATH10K_SCAN_ABORTING));
|
|
++ /* fall through */
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ ar->scan.state = ATH10K_SCAN_IDLE;
|
|
++ ar->scan_channel = NULL;
|
|
++ ath10k_offchan_tx_purge(ar);
|
|
++ cancel_delayed_work(&ar->scan.timeout);
|
|
++ complete_all(&ar->scan.completed);
|
|
++ break;
|
|
++ }
|
|
++}
|
|
+
|
|
+- ar->scan.in_progress = false;
|
|
+- complete_all(&ar->scan.completed);
|
|
++void ath10k_scan_finish(struct ath10k *ar)
|
|
++{
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ __ath10k_scan_finish(ar);
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+ }
|
|
+
|
|
+-static int ath10k_abort_scan(struct ath10k *ar)
|
|
++static int ath10k_scan_stop(struct ath10k *ar)
|
|
+ {
|
|
+ struct wmi_stop_scan_arg arg = {
|
|
+ .req_id = 1, /* FIXME */
|
|
+@@ -2165,47 +2573,79 @@ static int ath10k_abort_scan(struct ath1
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+- del_timer_sync(&ar->scan.timeout);
|
|
++ ret = ath10k_wmi_stop_scan(ar, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
|
|
++ goto out;
|
|
++ }
|
|
+
|
|
+- spin_lock_bh(&ar->data_lock);
|
|
+- if (!ar->scan.in_progress) {
|
|
+- spin_unlock_bh(&ar->data_lock);
|
|
+- return 0;
|
|
++ ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
|
|
++ if (ret == 0) {
|
|
++ ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
|
|
++ ret = -ETIMEDOUT;
|
|
++ } else if (ret > 0) {
|
|
++ ret = 0;
|
|
+ }
|
|
+
|
|
+- ar->scan.aborting = true;
|
|
++out:
|
|
++ /* Scan state should be updated upon scan completion but in case
|
|
++ * firmware fails to deliver the event (for whatever reason) it is
|
|
++ * desired to clean up scan state anyway. Firmware may have just
|
|
++ * dropped the scan completion event delivery due to transport pipe
|
|
++ * being overflown with data and/or it can recover on its own before
|
|
++ * next scan request is submitted.
|
|
++ */
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ if (ar->scan.state != ATH10K_SCAN_IDLE)
|
|
++ __ath10k_scan_finish(ar);
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+- ret = ath10k_wmi_stop_scan(ar, &arg);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to stop wmi scan: %d\n", ret);
|
|
+- spin_lock_bh(&ar->data_lock);
|
|
+- ar->scan.in_progress = false;
|
|
+- ath10k_offchan_tx_purge(ar);
|
|
+- spin_unlock_bh(&ar->data_lock);
|
|
+- return -EIO;
|
|
+- }
|
|
++ return ret;
|
|
++}
|
|
+
|
|
+- ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
|
|
+- if (ret == 0)
|
|
+- ath10k_warn("timed out while waiting for scan to stop\n");
|
|
++static void ath10k_scan_abort(struct ath10k *ar)
|
|
++{
|
|
++ int ret;
|
|
+
|
|
+- /* scan completion may be done right after we timeout here, so let's
|
|
+- * check the in_progress and tell mac80211 scan is completed. if we
|
|
+- * don't do that and FW fails to send us scan completion indication
|
|
+- * then userspace won't be able to scan anymore */
|
|
+- ret = 0;
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+- if (ar->scan.in_progress) {
|
|
+- ath10k_warn("failed to stop scan, it's still in progress\n");
|
|
+- ar->scan.in_progress = false;
|
|
+- ath10k_offchan_tx_purge(ar);
|
|
+- ret = -ETIMEDOUT;
|
|
++
|
|
++ switch (ar->scan.state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ /* This can happen if timeout worker kicked in and called
|
|
++ * abortion while scan completion was being processed.
|
|
++ */
|
|
++ break;
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
++ ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
|
|
++ ath10k_scan_state_str(ar->scan.state),
|
|
++ ar->scan.state);
|
|
++ break;
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ ar->scan.state = ATH10K_SCAN_ABORTING;
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ ret = ath10k_scan_stop(ar);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to abort scan: %d\n", ret);
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ break;
|
|
+ }
|
|
++
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
++}
|
|
+
|
|
+- return ret;
|
|
++void ath10k_scan_timeout_work(struct work_struct *work)
|
|
++{
|
|
++ struct ath10k *ar = container_of(work, struct ath10k,
|
|
++ scan.timeout.work);
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ ath10k_scan_abort(ar);
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
+ }
|
|
+
|
|
+ static int ath10k_start_scan(struct ath10k *ar,
|
|
+@@ -2221,17 +2661,27 @@ static int ath10k_start_scan(struct ath1
|
|
+
|
|
+ ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
|
|
+ if (ret == 0) {
|
|
+- ath10k_abort_scan(ar);
|
|
+- return ret;
|
|
++ ret = ath10k_scan_stop(ar);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
|
|
++
|
|
++ return -ETIMEDOUT;
|
|
++ }
|
|
++
|
|
++ /* If we failed to start the scan, return error code at
|
|
++ * this point. This is probably due to some issue in the
|
|
++ * firmware, but no need to wedge the driver due to that...
|
|
++ */
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ if (ar->scan.state == ATH10K_SCAN_IDLE) {
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++ return -EINVAL;
|
|
+ }
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+- /* the scan can complete earlier, before we even
|
|
+- * start the timer. in that case the timer handler
|
|
+- * checks ar->scan.in_progress and bails out if its
|
|
+- * false. Add a 200ms margin to account event/command
|
|
+- * processing. */
|
|
+- mod_timer(&ar->scan.timeout, jiffies +
|
|
+- msecs_to_jiffies(arg->max_scan_time+200));
|
|
++ /* Add a 200ms margin to account for event/command processing */
|
|
++ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
|
|
++ msecs_to_jiffies(arg->max_scan_time+200));
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+@@ -2243,90 +2693,163 @@ static void ath10k_tx(struct ieee80211_h
|
|
+ struct ieee80211_tx_control *control,
|
|
+ struct sk_buff *skb)
|
|
+ {
|
|
++ struct ath10k *ar = hw->priv;
|
|
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
++ struct ieee80211_vif *vif = info->control.vif;
|
|
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
+- struct ath10k *ar = hw->priv;
|
|
+- u8 tid, vdev_id;
|
|
+
|
|
+ /* We should disable CCK RATE due to P2P */
|
|
+ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
|
|
+
|
|
+- /* we must calculate tid before we apply qos workaround
|
|
+- * as we'd lose the qos control field */
|
|
+- tid = ath10k_tx_h_get_tid(hdr);
|
|
+- vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
|
|
++ ATH10K_SKB_CB(skb)->htt.is_offchan = false;
|
|
++ ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
|
|
++ ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
|
|
+
|
|
+ /* it makes no sense to process injected frames like that */
|
|
+- if (info->control.vif &&
|
|
+- info->control.vif->type != NL80211_IFTYPE_MONITOR) {
|
|
+- ath10k_tx_h_qos_workaround(hw, control, skb);
|
|
+- ath10k_tx_h_update_wep_key(skb);
|
|
+- ath10k_tx_h_add_p2p_noa_ie(ar, skb);
|
|
+- ath10k_tx_h_seq_no(skb);
|
|
++ if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
|
|
++ ath10k_tx_h_nwifi(hw, skb);
|
|
++ ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
|
|
++ ath10k_tx_h_seq_no(vif, skb);
|
|
+ }
|
|
+
|
|
+- ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
|
|
+- ATH10K_SKB_CB(skb)->htt.is_offchan = false;
|
|
+- ATH10K_SKB_CB(skb)->htt.tid = tid;
|
|
+-
|
|
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+- ATH10K_SKB_CB(skb)->htt.is_offchan = true;
|
|
++ ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq;
|
|
+ ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
|
|
++ if (ath10k_mac_need_offchan_tx_work(ar)) {
|
|
++ ATH10K_SKB_CB(skb)->htt.freq = 0;
|
|
++ ATH10K_SKB_CB(skb)->htt.is_offchan = true;
|
|
+
|
|
+- skb_queue_tail(&ar->offchan_tx_queue, skb);
|
|
+- ieee80211_queue_work(hw, &ar->offchan_tx_work);
|
|
+- return;
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
|
|
++ skb);
|
|
++
|
|
++ skb_queue_tail(&ar->offchan_tx_queue, skb);
|
|
++ ieee80211_queue_work(hw, &ar->offchan_tx_work);
|
|
++ return;
|
|
++ }
|
|
+ }
|
|
+
|
|
+ ath10k_tx_htt(ar, skb);
|
|
+ }
|
|
+
|
|
+-/*
|
|
+- * Initialize various parameters with default vaules.
|
|
+- */
|
|
++/* Must not be called with conf_mutex held as workers can use that also. */
|
|
++void ath10k_drain_tx(struct ath10k *ar)
|
|
++{
|
|
++ /* make sure rcu-protected mac80211 tx path itself is drained */
|
|
++ synchronize_net();
|
|
++
|
|
++ ath10k_offchan_tx_purge(ar);
|
|
++ ath10k_mgmt_over_wmi_tx_purge(ar);
|
|
++
|
|
++ cancel_work_sync(&ar->offchan_tx_work);
|
|
++ cancel_work_sync(&ar->wmi_mgmt_tx_work);
|
|
++}
|
|
++
|
|
+ void ath10k_halt(struct ath10k *ar)
|
|
+ {
|
|
+ struct ath10k_vif *arvif;
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+- if (ath10k_monitor_is_enabled(ar)) {
|
|
+- clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
|
|
+- ar->promisc = false;
|
|
+- ar->monitor = false;
|
|
++ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
|
|
++ ar->filter_flags = 0;
|
|
++ ar->monitor = false;
|
|
++
|
|
++ if (ar->monitor_started)
|
|
+ ath10k_monitor_stop(ar);
|
|
+- }
|
|
+
|
|
+- del_timer_sync(&ar->scan.timeout);
|
|
+- ath10k_offchan_tx_purge(ar);
|
|
+- ath10k_mgmt_over_wmi_tx_purge(ar);
|
|
++ ar->monitor_started = false;
|
|
++
|
|
++ ath10k_scan_finish(ar);
|
|
+ ath10k_peer_cleanup_all(ar);
|
|
+ ath10k_core_stop(ar);
|
|
+ ath10k_hif_power_down(ar);
|
|
+
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+- if (ar->scan.in_progress) {
|
|
+- del_timer(&ar->scan.timeout);
|
|
+- ar->scan.in_progress = false;
|
|
+- ieee80211_scan_completed(ar->hw, true);
|
|
++ list_for_each_entry(arvif, &ar->arvifs, list)
|
|
++ ath10k_mac_vif_beacon_cleanup(arvif);
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++}
|
|
++
|
|
++static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
|
|
++{
|
|
++ struct ath10k *ar = hw->priv;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->cfg_tx_chainmask) {
|
|
++ *tx_ant = ar->cfg_tx_chainmask;
|
|
++ *rx_ant = ar->cfg_rx_chainmask;
|
|
++ } else {
|
|
++ *tx_ant = ar->supp_tx_chainmask;
|
|
++ *rx_ant = ar->supp_rx_chainmask;
|
|
+ }
|
|
+
|
|
+- list_for_each_entry(arvif, &ar->arvifs, list) {
|
|
+- if (!arvif->beacon)
|
|
+- continue;
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
+
|
|
+- dma_unmap_single(arvif->ar->dev,
|
|
+- ATH10K_SKB_CB(arvif->beacon)->paddr,
|
|
+- arvif->beacon->len, DMA_TO_DEVICE);
|
|
+- dev_kfree_skb_any(arvif->beacon);
|
|
+- arvif->beacon = NULL;
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
|
|
++{
|
|
++ /* It is not clear that allowing gaps in chainmask
|
|
++ * is helpful. Probably it will not do what user
|
|
++ * is hoping for, so warn in that case.
|
|
++ */
|
|
++ if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
|
|
++ return;
|
|
++
|
|
++ ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
|
|
++ dbg, cm);
|
|
++}
|
|
++
|
|
++static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
|
|
++{
|
|
++ int ret;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ ath10k_check_chain_mask(ar, tx_ant, "tx");
|
|
++ ath10k_check_chain_mask(ar, rx_ant, "rx");
|
|
++
|
|
++ ar->cfg_tx_chainmask = tx_ant;
|
|
++ ar->cfg_rx_chainmask = rx_ant;
|
|
++
|
|
++ if ((ar->state != ATH10K_STATE_ON) &&
|
|
++ (ar->state != ATH10K_STATE_RESTARTED))
|
|
++ return 0;
|
|
++
|
|
++ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
|
|
++ tx_ant);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
|
|
++ ret, tx_ant);
|
|
++ return ret;
|
|
+ }
|
|
+- spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
|
|
++ rx_ant);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
|
|
++ ret, rx_ant);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
|
|
++{
|
|
++ struct ath10k *ar = hw->priv;
|
|
++ int ret;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+ static int ath10k_start(struct ieee80211_hw *hw)
|
|
+@@ -2334,41 +2857,61 @@ static int ath10k_start(struct ieee80211
|
|
+ struct ath10k *ar = hw->priv;
|
|
+ int ret = 0;
|
|
+
|
|
++ /*
|
|
++ * This makes sense only when restarting hw. It is harmless to call
|
|
++ * uncoditionally. This is necessary to make sure no HTT/WMI tx
|
|
++ * commands will be submitted while restarting.
|
|
++ */
|
|
++ ath10k_drain_tx(ar);
|
|
++
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+- if (ar->state != ATH10K_STATE_OFF &&
|
|
+- ar->state != ATH10K_STATE_RESTARTING) {
|
|
++ switch (ar->state) {
|
|
++ case ATH10K_STATE_OFF:
|
|
++ ar->state = ATH10K_STATE_ON;
|
|
++ break;
|
|
++ case ATH10K_STATE_RESTARTING:
|
|
++ ath10k_halt(ar);
|
|
++ ar->state = ATH10K_STATE_RESTARTED;
|
|
++ break;
|
|
++ case ATH10K_STATE_ON:
|
|
++ case ATH10K_STATE_RESTARTED:
|
|
++ case ATH10K_STATE_WEDGED:
|
|
++ WARN_ON(1);
|
|
+ ret = -EINVAL;
|
|
+- goto exit;
|
|
++ goto err;
|
|
++ case ATH10K_STATE_UTF:
|
|
++ ret = -EBUSY;
|
|
++ goto err;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_hif_power_up(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("Could not init hif: %d\n", ret);
|
|
+- ar->state = ATH10K_STATE_OFF;
|
|
+- goto exit;
|
|
++ ath10k_err(ar, "Could not init hif: %d\n", ret);
|
|
++ goto err_off;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_core_start(ar);
|
|
++ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
|
|
+ if (ret) {
|
|
+- ath10k_err("Could not init core: %d\n", ret);
|
|
+- ath10k_hif_power_down(ar);
|
|
+- ar->state = ATH10K_STATE_OFF;
|
|
+- goto exit;
|
|
++ ath10k_err(ar, "Could not init core: %d\n", ret);
|
|
++ goto err_power_down;
|
|
+ }
|
|
+
|
|
+- if (ar->state == ATH10K_STATE_OFF)
|
|
+- ar->state = ATH10K_STATE_ON;
|
|
+- else if (ar->state == ATH10K_STATE_RESTARTING)
|
|
+- ar->state = ATH10K_STATE_RESTARTED;
|
|
+-
|
|
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
|
|
+- if (ret)
|
|
+- ath10k_warn("failed to enable PMF QOS: %d\n", ret);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
|
|
++ goto err_core_stop;
|
|
++ }
|
|
+
|
|
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
|
|
+- if (ret)
|
|
+- ath10k_warn("failed to enable dynamic BW: %d\n", ret);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
|
|
++ goto err_core_stop;
|
|
++ }
|
|
++
|
|
++ if (ar->cfg_tx_chainmask)
|
|
++ __ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
|
|
++ ar->cfg_rx_chainmask);
|
|
+
|
|
+ /*
|
|
+ * By default FW set ARP frames ac to voice (6). In that case ARP
|
|
+@@ -2382,16 +2925,29 @@ static int ath10k_start(struct ieee80211
|
|
+ ret = ath10k_wmi_pdev_set_param(ar,
|
|
+ ar->wmi.pdev_param->arp_ac_override, 0);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set arp ac override parameter: %d\n",
|
|
++ ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
|
|
+ ret);
|
|
+- goto exit;
|
|
++ goto err_core_stop;
|
|
+ }
|
|
+
|
|
+ ar->num_started_vdevs = 0;
|
|
+ ath10k_regd_update(ar);
|
|
+- ret = 0;
|
|
+
|
|
+-exit:
|
|
++ ath10k_spectral_start(ar);
|
|
++
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return 0;
|
|
++
|
|
++err_core_stop:
|
|
++ ath10k_core_stop(ar);
|
|
++
|
|
++err_power_down:
|
|
++ ath10k_hif_power_down(ar);
|
|
++
|
|
++err_off:
|
|
++ ar->state = ATH10K_STATE_OFF;
|
|
++
|
|
++err:
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -2400,19 +2956,16 @@ static void ath10k_stop(struct ieee80211
|
|
+ {
|
|
+ struct ath10k *ar = hw->priv;
|
|
+
|
|
++ ath10k_drain_tx(ar);
|
|
++
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+- if (ar->state == ATH10K_STATE_ON ||
|
|
+- ar->state == ATH10K_STATE_RESTARTED ||
|
|
+- ar->state == ATH10K_STATE_WEDGED)
|
|
++ if (ar->state != ATH10K_STATE_OFF) {
|
|
+ ath10k_halt(ar);
|
|
+-
|
|
+- ar->state = ATH10K_STATE_OFF;
|
|
++ ar->state = ATH10K_STATE_OFF;
|
|
++ }
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+
|
|
+- ath10k_mgmt_over_wmi_tx_purge(ar);
|
|
+-
|
|
+- cancel_work_sync(&ar->offchan_tx_work);
|
|
+- cancel_work_sync(&ar->wmi_mgmt_tx_work);
|
|
++ cancel_delayed_work_sync(&ar->scan.timeout);
|
|
+ cancel_work_sync(&ar->restart_work);
|
|
+ }
|
|
+
|
|
+@@ -2426,7 +2979,7 @@ static int ath10k_config_ps(struct ath10
|
|
+ list_for_each_entry(arvif, &ar->arvifs, list) {
|
|
+ ret = ath10k_mac_vif_setup_ps(arvif);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to setup powersave: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+@@ -2464,7 +3017,7 @@ static void ath10k_config_chan(struct at
|
|
+
|
|
+ lockdep_assert_held(&ar->conf_mutex);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
+ "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
|
|
+ ar->chandef.chan->center_freq,
|
|
+ ar->chandef.center_freq1,
|
|
+@@ -2474,24 +3027,27 @@ static void ath10k_config_chan(struct at
|
|
+ /* First stop monitor interface. Some FW versions crash if there's a
|
|
+ * lone monitor interface. */
|
|
+ if (ar->monitor_started)
|
|
+- ath10k_monitor_vdev_stop(ar);
|
|
++ ath10k_monitor_stop(ar);
|
|
+
|
|
+ list_for_each_entry(arvif, &ar->arvifs, list) {
|
|
+ if (!arvif->is_started)
|
|
+ continue;
|
|
+
|
|
++ if (!arvif->is_up)
|
|
++ continue;
|
|
++
|
|
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
|
|
+ continue;
|
|
+
|
|
+- ret = ath10k_vdev_stop(arvif);
|
|
++ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to stop vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to down vdev %d: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+
|
|
+- /* all vdevs are now stopped - now attempt to restart them */
|
|
++ /* all vdevs are downed now - attempt to restart and re-up them */
|
|
+
|
|
+ list_for_each_entry(arvif, &ar->arvifs, list) {
|
|
+ if (!arvif->is_started)
|
|
+@@ -2500,9 +3056,9 @@ static void ath10k_config_chan(struct at
|
|
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
|
|
+ continue;
|
|
+
|
|
+- ret = ath10k_vdev_start(arvif);
|
|
++ ret = ath10k_vdev_restart(arvif);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to start vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to restart vdev %d: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ continue;
|
|
+ }
|
|
+@@ -2513,14 +3069,70 @@ static void ath10k_config_chan(struct at
|
|
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
|
|
+ arvif->bssid);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to bring vdev up %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ continue;
|
|
+ }
|
|
+ }
|
|
+
|
|
+- if (ath10k_monitor_is_enabled(ar))
|
|
+- ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
|
|
++ ath10k_monitor_recalc(ar);
|
|
++}
|
|
++
|
|
++static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
|
|
++{
|
|
++ int ret;
|
|
++ u32 param;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
|
|
++
|
|
++ param = ar->wmi.pdev_param->txpower_limit2g;
|
|
++ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
|
|
++ txpower, ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ param = ar->wmi.pdev_param->txpower_limit5g;
|
|
++ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
|
|
++ txpower, ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_mac_txpower_recalc(struct ath10k *ar)
|
|
++{
|
|
++ struct ath10k_vif *arvif;
|
|
++ int ret, txpower = -1;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ list_for_each_entry(arvif, &ar->arvifs, list) {
|
|
++ WARN_ON(arvif->txpower < 0);
|
|
++
|
|
++ if (txpower == -1)
|
|
++ txpower = arvif->txpower;
|
|
++ else
|
|
++ txpower = min(txpower, arvif->txpower);
|
|
++ }
|
|
++
|
|
++ if (WARN_ON(txpower == -1))
|
|
++ return -EINVAL;
|
|
++
|
|
++ ret = ath10k_mac_txpower_setup(ar, txpower);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to setup tx power %d: %d\n",
|
|
++ txpower, ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
+ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
|
|
+@@ -2528,12 +3140,11 @@ static int ath10k_config(struct ieee8021
|
|
+ struct ath10k *ar = hw->priv;
|
|
+ struct ieee80211_conf *conf = &hw->conf;
|
|
+ int ret = 0;
|
|
+- u32 param;
|
|
+
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
+ "mac config channel %dMHz flags 0x%x radar %d\n",
|
|
+ conf->chandef.chan->center_freq,
|
|
+ conf->chandef.chan->flags,
|
|
+@@ -2552,48 +3163,31 @@ static int ath10k_config(struct ieee8021
|
|
+ }
|
|
+ }
|
|
+
|
|
+- if (changed & IEEE80211_CONF_CHANGE_POWER) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac config power %d\n",
|
|
+- hw->conf.power_level);
|
|
+-
|
|
+- param = ar->wmi.pdev_param->txpower_limit2g;
|
|
+- ret = ath10k_wmi_pdev_set_param(ar, param,
|
|
+- hw->conf.power_level * 2);
|
|
+- if (ret)
|
|
+- ath10k_warn("failed to set 2g txpower %d: %d\n",
|
|
+- hw->conf.power_level, ret);
|
|
+-
|
|
+- param = ar->wmi.pdev_param->txpower_limit5g;
|
|
+- ret = ath10k_wmi_pdev_set_param(ar, param,
|
|
+- hw->conf.power_level * 2);
|
|
+- if (ret)
|
|
+- ath10k_warn("failed to set 5g txpower %d: %d\n",
|
|
+- hw->conf.power_level, ret);
|
|
+- }
|
|
+-
|
|
+ if (changed & IEEE80211_CONF_CHANGE_PS)
|
|
+ ath10k_config_ps(ar);
|
|
+
|
|
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
|
|
+- if (conf->flags & IEEE80211_CONF_MONITOR && !ar->monitor) {
|
|
+- ar->monitor = true;
|
|
+- ret = ath10k_monitor_start(ar);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to start monitor (config): %d\n",
|
|
+- ret);
|
|
+- ar->monitor = false;
|
|
+- }
|
|
+- } else if (!(conf->flags & IEEE80211_CONF_MONITOR) &&
|
|
+- ar->monitor) {
|
|
+- ar->monitor = false;
|
|
+- ath10k_monitor_stop(ar);
|
|
+- }
|
|
++ ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
|
|
++ ret = ath10k_monitor_recalc(ar);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
++static u32 get_nss_from_chainmask(u16 chain_mask)
|
|
++{
|
|
++ if ((chain_mask & 0x15) == 0x15)
|
|
++ return 4;
|
|
++ else if ((chain_mask & 0x7) == 0x7)
|
|
++ return 3;
|
|
++ else if ((chain_mask & 0x3) == 0x3)
|
|
++ return 2;
|
|
++ return 1;
|
|
++}
|
|
++
|
|
+ /*
|
|
+ * TODO:
|
|
+ * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
|
|
+@@ -2619,22 +3213,26 @@ static int ath10k_add_interface(struct i
|
|
+ arvif->ar = ar;
|
|
+ arvif->vif = vif;
|
|
+
|
|
+- INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
|
|
+ INIT_LIST_HEAD(&arvif->list);
|
|
+
|
|
+- bit = ffs(ar->free_vdev_map);
|
|
+- if (bit == 0) {
|
|
++ if (ar->free_vdev_map == 0) {
|
|
++ ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
|
|
+ ret = -EBUSY;
|
|
+ goto err;
|
|
+ }
|
|
++ bit = __ffs64(ar->free_vdev_map);
|
|
+
|
|
+- arvif->vdev_id = bit - 1;
|
|
+- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
|
|
++ bit, ar->free_vdev_map);
|
|
+
|
|
+- if (ar->p2p)
|
|
+- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
|
|
++ arvif->vdev_id = bit;
|
|
++ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
|
|
+
|
|
+ switch (vif->type) {
|
|
++ case NL80211_IFTYPE_P2P_DEVICE:
|
|
++ arvif->vdev_type = WMI_VDEV_TYPE_STA;
|
|
++ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
|
|
++ break;
|
|
+ case NL80211_IFTYPE_UNSPECIFIED:
|
|
+ case NL80211_IFTYPE_STATION:
|
|
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
|
|
+@@ -2658,50 +3256,98 @@ static int ath10k_add_interface(struct i
|
|
+ break;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
|
|
+- arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
|
|
++ /* Some firmware revisions don't wait for beacon tx completion before
|
|
++ * sending another SWBA event. This could lead to hardware using old
|
|
++ * (freed) beacon data in some cases, e.g. tx credit starvation
|
|
++ * combined with missed TBTT. This is very very rare.
|
|
++ *
|
|
++ * On non-IOMMU-enabled hosts this could be a possible security issue
|
|
++ * because hw could beacon some random data on the air. On
|
|
++ * IOMMU-enabled hosts DMAR faults would occur in most cases and target
|
|
++ * device would crash.
|
|
++ *
|
|
++ * Since there are no beacon tx completions (implicit nor explicit)
|
|
++ * propagated to host the only workaround for this is to allocate a
|
|
++ * DMA-coherent buffer for a lifetime of a vif and use it for all
|
|
++ * beacon tx commands. Worst case for this approach is some beacons may
|
|
++ * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
|
|
++ */
|
|
++ if (vif->type == NL80211_IFTYPE_ADHOC ||
|
|
++ vif->type == NL80211_IFTYPE_AP) {
|
|
++ arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
|
|
++ IEEE80211_MAX_FRAME_LEN,
|
|
++ &arvif->beacon_paddr,
|
|
++ GFP_ATOMIC);
|
|
++ if (!arvif->beacon_buf) {
|
|
++ ret = -ENOMEM;
|
|
++ ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
|
|
++ ret);
|
|
++ goto err;
|
|
++ }
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
|
|
++ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
|
|
++ arvif->beacon_buf ? "single-buf" : "per-skb");
|
|
+
|
|
+ ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
|
|
+ arvif->vdev_subtype, vif->addr);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to create WMI vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ goto err;
|
|
+ }
|
|
+
|
|
+- ar->free_vdev_map &= ~BIT(arvif->vdev_id);
|
|
++ ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
|
|
+ list_add(&arvif->list, &ar->arvifs);
|
|
+
|
|
+- vdev_param = ar->wmi.vdev_param->def_keyid;
|
|
+- ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
|
|
+- arvif->def_wep_key_idx);
|
|
++ /* It makes no sense to have firmware do keepalives. mac80211 already
|
|
++ * takes care of this with idle connection polling.
|
|
++ */
|
|
++ ret = ath10k_mac_vif_disable_keepalive(arvif);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set vdev %i default key id: %d\n",
|
|
++ ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ goto err_vdev_delete;
|
|
+ }
|
|
+
|
|
++ arvif->def_wep_key_idx = -1;
|
|
++
|
|
+ vdev_param = ar->wmi.vdev_param->tx_encap_type;
|
|
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
|
+ ATH10K_HW_TXRX_NATIVE_WIFI);
|
|
+ /* 10.X firmware does not support this VDEV parameter. Do not warn */
|
|
+ if (ret && ret != -EOPNOTSUPP) {
|
|
+- ath10k_warn("failed to set vdev %i TX encapsulation: %d\n",
|
|
++ ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ goto err_vdev_delete;
|
|
+ }
|
|
+
|
|
++ if (ar->cfg_tx_chainmask) {
|
|
++ u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
|
|
++
|
|
++ vdev_param = ar->wmi.vdev_param->nss;
|
|
++ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
|
++ nss);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
|
|
++ arvif->vdev_id, ar->cfg_tx_chainmask, nss,
|
|
++ ret);
|
|
++ goto err_vdev_delete;
|
|
++ }
|
|
++ }
|
|
++
|
|
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
|
|
+ ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to create vdev %i peer for AP: %d\n",
|
|
++ ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ goto err_vdev_delete;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_mac_set_kickout(arvif);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set vdev %i kickout parameters: %d\n",
|
|
++ ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ goto err_peer_delete;
|
|
+ }
|
|
+@@ -2713,27 +3359,21 @@ static int ath10k_add_interface(struct i
|
|
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
|
|
+ param, value);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set vdev %i RX wake policy: %d\n",
|
|
++ ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ goto err_peer_delete;
|
|
+ }
|
|
+
|
|
+- param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
|
|
+- value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
|
|
+- ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
|
|
+- param, value);
|
|
++ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set vdev %i TX wake thresh: %d\n",
|
|
++ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ goto err_peer_delete;
|
|
+ }
|
|
+
|
|
+- param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
|
|
+- value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
|
|
+- ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
|
|
+- param, value);
|
|
++ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set vdev %i PSPOLL count: %d\n",
|
|
++ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ goto err_peer_delete;
|
|
+ }
|
|
+@@ -2741,15 +3381,22 @@ static int ath10k_add_interface(struct i
|
|
+
|
|
+ ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ goto err_peer_delete;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ goto err_peer_delete;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
|
|
++ arvif->txpower = vif->bss_conf.txpower;
|
|
++ ret = ath10k_mac_txpower_recalc(ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set frag threshold for vdev %d: %d\n",
|
|
+- arvif->vdev_id, ret);
|
|
++ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
|
|
+ goto err_peer_delete;
|
|
+ }
|
|
+
|
|
+@@ -2762,10 +3409,16 @@ err_peer_delete:
|
|
+
|
|
+ err_vdev_delete:
|
|
+ ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
|
|
+- ar->free_vdev_map &= ~BIT(arvif->vdev_id);
|
|
++ ar->free_vdev_map |= 1LL << arvif->vdev_id;
|
|
+ list_del(&arvif->list);
|
|
+
|
|
+ err:
|
|
++ if (arvif->beacon_buf) {
|
|
++ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
|
|
++ arvif->beacon_buf, arvif->beacon_paddr);
|
|
++ arvif->beacon_buf = NULL;
|
|
++ }
|
|
++
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+
|
|
+ return ret;
|
|
+@@ -2780,38 +3433,51 @@ static void ath10k_remove_interface(stru
|
|
+
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+- cancel_work_sync(&arvif->wep_key_work);
|
|
+-
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+- if (arvif->beacon) {
|
|
+- dma_unmap_single(arvif->ar->dev,
|
|
+- ATH10K_SKB_CB(arvif->beacon)->paddr,
|
|
+- arvif->beacon->len, DMA_TO_DEVICE);
|
|
+- dev_kfree_skb_any(arvif->beacon);
|
|
+- arvif->beacon = NULL;
|
|
+- }
|
|
++ ath10k_mac_vif_beacon_cleanup(arvif);
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+- ar->free_vdev_map |= 1 << (arvif->vdev_id);
|
|
++ ret = ath10k_spectral_vif_stop(arvif);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++
|
|
++ ar->free_vdev_map |= 1LL << arvif->vdev_id;
|
|
+ list_del(&arvif->list);
|
|
+
|
|
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
|
|
+- ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
|
|
++ ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
|
|
++ vif->addr);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to remove peer for AP vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+
|
|
+ kfree(arvif->u.ap.noa_data);
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
|
|
+ arvif->vdev_id);
|
|
+
|
|
+ ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to delete WMI vdev %i: %d\n",
|
|
++ ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+
|
|
++ /* Some firmware revisions don't notify host about self-peer removal
|
|
++ * until after associated vdev is deleted.
|
|
++ */
|
|
++ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
|
|
++ ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
|
|
++ vif->addr);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ ar->num_peers--;
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++ }
|
|
++
|
|
+ ath10k_peer_cleanup(ar, arvif->vdev_id);
|
|
+
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+@@ -2844,18 +3510,9 @@ static void ath10k_configure_filter(stru
|
|
+ *total_flags &= SUPPORTED_FILTERS;
|
|
+ ar->filter_flags = *total_flags;
|
|
+
|
|
+- if (ar->filter_flags & FIF_PROMISC_IN_BSS && !ar->promisc) {
|
|
+- ar->promisc = true;
|
|
+- ret = ath10k_monitor_start(ar);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to start monitor (promisc): %d\n",
|
|
+- ret);
|
|
+- ar->promisc = false;
|
|
+- }
|
|
+- } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) && ar->promisc) {
|
|
+- ar->promisc = false;
|
|
+- ath10k_monitor_stop(ar);
|
|
+- }
|
|
++ ret = ath10k_monitor_recalc(ar);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
|
|
+
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+ }
|
|
+@@ -2868,7 +3525,7 @@ static void ath10k_bss_info_changed(stru
|
|
+ struct ath10k *ar = hw->priv;
|
|
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
+ int ret = 0;
|
|
+- u32 vdev_param, pdev_param;
|
|
++ u32 vdev_param, pdev_param, slottime, preamble;
|
|
+
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+@@ -2880,17 +3537,17 @@ static void ath10k_bss_info_changed(stru
|
|
+ vdev_param = ar->wmi.vdev_param->beacon_interval;
|
|
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
|
+ arvif->beacon_interval);
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
+ "mac vdev %d beacon_interval %d\n",
|
|
+ arvif->vdev_id, arvif->beacon_interval);
|
|
+
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to set beacon interval for vdev %d: %i\n",
|
|
++ ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ }
|
|
+
|
|
+ if (changed & BSS_CHANGED_BEACON) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
+ "vdev %d set beacon tx mode to staggered\n",
|
|
+ arvif->vdev_id);
|
|
+
|
|
+@@ -2898,14 +3555,26 @@ static void ath10k_bss_info_changed(stru
|
|
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
|
|
+ WMI_BEACON_STAGGERED_MODE);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to set beacon mode for vdev %d: %i\n",
|
|
++ ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
|
|
++ arvif->vdev_id, ret);
|
|
++
|
|
++ ret = ath10k_mac_setup_bcn_tmpl(arvif);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to update beacon template: %d\n",
|
|
++ ret);
|
|
++ }
|
|
++
|
|
++ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
|
|
++ ret = ath10k_mac_setup_prb_tmpl(arvif);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ }
|
|
+
|
|
+- if (changed & BSS_CHANGED_BEACON_INFO) {
|
|
++ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
|
|
+ arvif->dtim_period = info->dtim_period;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
+ "mac vdev %d dtim_period %d\n",
|
|
+ arvif->vdev_id, arvif->dtim_period);
|
|
+
|
|
+@@ -2913,7 +3582,7 @@ static void ath10k_bss_info_changed(stru
|
|
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
|
+ arvif->dtim_period);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to set dtim period for vdev %d: %i\n",
|
|
++ ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ }
|
|
+
|
|
+@@ -2925,91 +3594,48 @@ static void ath10k_bss_info_changed(stru
|
|
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
|
|
+ }
|
|
+
|
|
+- if (changed & BSS_CHANGED_BSSID) {
|
|
+- if (!is_zero_ether_addr(info->bssid)) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
+- "mac vdev %d create peer %pM\n",
|
|
+- arvif->vdev_id, info->bssid);
|
|
+-
|
|
+- ret = ath10k_peer_create(ar, arvif->vdev_id,
|
|
+- info->bssid);
|
|
+- if (ret)
|
|
+- ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n",
|
|
+- info->bssid, arvif->vdev_id, ret);
|
|
+-
|
|
+- if (vif->type == NL80211_IFTYPE_STATION) {
|
|
+- /*
|
|
+- * this is never erased as we it for crypto key
|
|
+- * clearing; this is FW requirement
|
|
+- */
|
|
+- memcpy(arvif->bssid, info->bssid, ETH_ALEN);
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
+- "mac vdev %d start %pM\n",
|
|
+- arvif->vdev_id, info->bssid);
|
|
+-
|
|
+- ret = ath10k_vdev_start(arvif);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to start vdev %i: %d\n",
|
|
+- arvif->vdev_id, ret);
|
|
+- goto exit;
|
|
+- }
|
|
+-
|
|
+- arvif->is_started = true;
|
|
+- }
|
|
+-
|
|
+- /*
|
|
+- * Mac80211 does not keep IBSS bssid when leaving IBSS,
|
|
+- * so driver need to store it. It is needed when leaving
|
|
+- * IBSS in order to remove BSSID peer.
|
|
+- */
|
|
+- if (vif->type == NL80211_IFTYPE_ADHOC)
|
|
+- memcpy(arvif->bssid, info->bssid,
|
|
+- ETH_ALEN);
|
|
+- }
|
|
+- }
|
|
++ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
|
|
++ ether_addr_copy(arvif->bssid, info->bssid);
|
|
+
|
|
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
|
|
+ ath10k_control_beaconing(arvif, info);
|
|
+
|
|
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
|
|
+ arvif->use_cts_prot = info->use_cts_prot;
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
|
|
+ arvif->vdev_id, info->use_cts_prot);
|
|
+
|
|
+ ret = ath10k_recalc_rtscts_prot(arvif);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ }
|
|
+
|
|
+ if (changed & BSS_CHANGED_ERP_SLOT) {
|
|
+- u32 slottime;
|
|
+ if (info->use_short_slot)
|
|
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
|
|
+
|
|
+ else
|
|
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
|
|
+ arvif->vdev_id, slottime);
|
|
+
|
|
+ vdev_param = ar->wmi.vdev_param->slot_time;
|
|
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
|
+ slottime);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to set erp slot for vdev %d: %i\n",
|
|
++ ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ }
|
|
+
|
|
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
|
|
+- u32 preamble;
|
|
+ if (info->use_short_preamble)
|
|
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
|
|
+ else
|
|
+ preamble = WMI_VDEV_PREAMBLE_LONG;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
+ "mac vdev %d preamble %dn",
|
|
+ arvif->vdev_id, preamble);
|
|
+
|
|
+@@ -3017,16 +3643,44 @@ static void ath10k_bss_info_changed(stru
|
|
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
|
+ preamble);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to set preamble for vdev %d: %i\n",
|
|
++ ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ }
|
|
+
|
|
+ if (changed & BSS_CHANGED_ASSOC) {
|
|
+- if (info->assoc)
|
|
++ if (info->assoc) {
|
|
++ /* Workaround: Make sure monitor vdev is not running
|
|
++ * when associating to prevent some firmware revisions
|
|
++ * (e.g. 10.1 and 10.2) from crashing.
|
|
++ */
|
|
++ if (ar->monitor_started)
|
|
++ ath10k_monitor_stop(ar);
|
|
+ ath10k_bss_assoc(hw, vif, info);
|
|
++ ath10k_monitor_recalc(ar);
|
|
++ } else {
|
|
++ ath10k_bss_disassoc(hw, vif);
|
|
++ }
|
|
++ }
|
|
++
|
|
++ if (changed & BSS_CHANGED_TXPOWER) {
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
|
|
++ arvif->vdev_id, info->txpower);
|
|
++
|
|
++ arvif->txpower = info->txpower;
|
|
++ ret = ath10k_mac_txpower_recalc(ar);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
|
|
++ }
|
|
++
|
|
++ if (changed & BSS_CHANGED_PS) {
|
|
++ arvif->ps = vif->bss_conf.ps;
|
|
++
|
|
++ ret = ath10k_config_ps(ar);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
+ }
|
|
+
|
|
+-exit:
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+ }
|
|
+
|
|
+@@ -3043,20 +3697,26 @@ static int ath10k_hw_scan(struct ieee802
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+- if (ar->scan.in_progress) {
|
|
+- spin_unlock_bh(&ar->data_lock);
|
|
++ switch (ar->scan.state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ reinit_completion(&ar->scan.started);
|
|
++ reinit_completion(&ar->scan.completed);
|
|
++ ar->scan.state = ATH10K_SCAN_STARTING;
|
|
++ ar->scan.is_roc = false;
|
|
++ ar->scan.vdev_id = arvif->vdev_id;
|
|
++ ret = 0;
|
|
++ break;
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
+ ret = -EBUSY;
|
|
+- goto exit;
|
|
++ break;
|
|
+ }
|
|
+-
|
|
+- reinit_completion(&ar->scan.started);
|
|
+- reinit_completion(&ar->scan.completed);
|
|
+- ar->scan.in_progress = true;
|
|
+- ar->scan.aborting = false;
|
|
+- ar->scan.is_roc = false;
|
|
+- ar->scan.vdev_id = arvif->vdev_id;
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
++ if (ret)
|
|
++ goto exit;
|
|
++
|
|
+ memset(&arg, 0, sizeof(arg));
|
|
+ ath10k_wmi_start_scan_init(ar, &arg);
|
|
+ arg.vdev_id = arvif->vdev_id;
|
|
+@@ -3088,9 +3748,9 @@ static int ath10k_hw_scan(struct ieee802
|
|
+
|
|
+ ret = ath10k_start_scan(ar, &arg);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to start hw scan: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+- ar->scan.in_progress = false;
|
|
++ ar->scan.state = ATH10K_SCAN_IDLE;
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+ }
|
|
+
|
|
+@@ -3103,15 +3763,12 @@ static void ath10k_cancel_hw_scan(struct
|
|
+ struct ieee80211_vif *vif)
|
|
+ {
|
|
+ struct ath10k *ar = hw->priv;
|
|
+- int ret;
|
|
+
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+- ret = ath10k_abort_scan(ar);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to abort scan: %d\n", ret);
|
|
+- ieee80211_scan_completed(hw, 1 /* aborted */);
|
|
+- }
|
|
++ ath10k_scan_abort(ar);
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ cancel_delayed_work_sync(&ar->scan.timeout);
|
|
+ }
|
|
+
|
|
+ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
|
|
+@@ -3148,7 +3805,7 @@ static void ath10k_set_key_h_def_keyidx(
|
|
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
|
+ key->keyidx);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to set vdev %i group key as default key: %d\n",
|
|
++ ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ }
|
|
+
|
|
+@@ -3162,6 +3819,7 @@ static int ath10k_set_key(struct ieee802
|
|
+ const u8 *peer_addr;
|
|
+ bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
|
|
+ key->cipher == WLAN_CIPHER_SUITE_WEP104;
|
|
++ bool def_idx = false;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
|
|
+@@ -3186,7 +3844,7 @@ static int ath10k_set_key(struct ieee802
|
|
+
|
|
+ if (!peer) {
|
|
+ if (cmd == SET_KEY) {
|
|
+- ath10k_warn("failed to install key for non-existent peer %pM\n",
|
|
++ ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
|
|
+ peer_addr);
|
|
+ ret = -EOPNOTSUPP;
|
|
+ goto exit;
|
|
+@@ -3207,9 +3865,16 @@ static int ath10k_set_key(struct ieee802
|
|
+ ath10k_clear_vdev_key(arvif, key);
|
|
+ }
|
|
+
|
|
+- ret = ath10k_install_key(arvif, key, cmd, peer_addr);
|
|
++ /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
|
|
++ * static WEP, do not set this flag for the keys whose key id
|
|
++ * is greater than default key id.
|
|
++ */
|
|
++ if (arvif->def_wep_key_idx == -1)
|
|
++ def_idx = true;
|
|
++
|
|
++ ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to install key for vdev %i peer %pM: %d\n",
|
|
++ ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
|
|
+ arvif->vdev_id, peer_addr, ret);
|
|
+ goto exit;
|
|
+ }
|
|
+@@ -3224,7 +3889,7 @@ static int ath10k_set_key(struct ieee802
|
|
+ peer->keys[key->keyidx] = NULL;
|
|
+ else if (peer == NULL)
|
|
+ /* impossible unless FW goes crazy */
|
|
+- ath10k_warn("Peer %pM disappeared!\n", peer_addr);
|
|
++ ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+ exit:
|
|
+@@ -3232,6 +3897,39 @@ exit:
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
++static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
|
|
++ struct ieee80211_vif *vif,
|
|
++ int keyidx)
|
|
++{
|
|
++ struct ath10k *ar = hw->priv;
|
|
++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
++ int ret;
|
|
++
|
|
++ mutex_lock(&arvif->ar->conf_mutex);
|
|
++
|
|
++ if (arvif->ar->state != ATH10K_STATE_ON)
|
|
++ goto unlock;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
|
|
++ arvif->vdev_id, keyidx);
|
|
++
|
|
++ ret = ath10k_wmi_vdev_set_param(arvif->ar,
|
|
++ arvif->vdev_id,
|
|
++ arvif->ar->wmi.vdev_param->def_keyid,
|
|
++ keyidx);
|
|
++
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
|
|
++ arvif->vdev_id,
|
|
++ ret);
|
|
++ goto unlock;
|
|
++ }
|
|
++
|
|
++ arvif->def_wep_key_idx = keyidx;
|
|
++unlock:
|
|
++ mutex_unlock(&arvif->ar->conf_mutex);
|
|
++}
|
|
++
|
|
+ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
|
|
+ {
|
|
+ struct ath10k *ar;
|
|
+@@ -3260,51 +3958,83 @@ static void ath10k_sta_rc_update_wk(stru
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+ if (changed & IEEE80211_RC_BW_CHANGED) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
|
|
+ sta->addr, bw);
|
|
+
|
|
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
|
|
+ WMI_PEER_CHAN_WIDTH, bw);
|
|
+ if (err)
|
|
+- ath10k_warn("failed to update STA %pM peer bw %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
|
|
+ sta->addr, bw, err);
|
|
+ }
|
|
+
|
|
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
|
|
+ sta->addr, nss);
|
|
+
|
|
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
|
|
+ WMI_PEER_NSS, nss);
|
|
+ if (err)
|
|
+- ath10k_warn("failed to update STA %pM nss %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
|
|
+ sta->addr, nss, err);
|
|
+ }
|
|
+
|
|
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
|
|
+ sta->addr, smps);
|
|
+
|
|
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
|
|
+ WMI_PEER_SMPS_STATE, smps);
|
|
+ if (err)
|
|
+- ath10k_warn("failed to update STA %pM smps %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
|
|
+ sta->addr, smps, err);
|
|
+ }
|
|
+
|
|
+- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
|
|
++ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
|
|
++ changed & IEEE80211_RC_NSS_CHANGED) {
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
|
|
+ sta->addr);
|
|
+
|
|
+- err = ath10k_station_assoc(ar, arvif, sta, true);
|
|
++ err = ath10k_station_assoc(ar, arvif->vif, sta, true);
|
|
+ if (err)
|
|
+- ath10k_warn("failed to reassociate station: %pM\n",
|
|
++ ath10k_warn(ar, "failed to reassociate station: %pM\n",
|
|
+ sta->addr);
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+ }
|
|
+
|
|
++static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
|
|
++{
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
|
|
++ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
|
|
++ return 0;
|
|
++
|
|
++ if (ar->num_stations >= ar->max_num_stations)
|
|
++ return -ENOBUFS;
|
|
++
|
|
++ ar->num_stations++;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif)
|
|
++{
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
|
|
++ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
|
|
++ return;
|
|
++
|
|
++ ar->num_stations--;
|
|
++}
|
|
++
|
|
+ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|
+ struct ieee80211_vif *vif,
|
|
+ struct ieee80211_sta *sta,
|
|
+@@ -3314,7 +4044,6 @@ static int ath10k_sta_state(struct ieee8
|
|
+ struct ath10k *ar = hw->priv;
|
|
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
|
+- int max_num_peers;
|
|
+ int ret = 0;
|
|
+
|
|
+ if (old_state == IEEE80211_STA_NOTEXIST &&
|
|
+@@ -3332,46 +4061,72 @@ static int ath10k_sta_state(struct ieee8
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+ if (old_state == IEEE80211_STA_NOTEXIST &&
|
|
+- new_state == IEEE80211_STA_NONE &&
|
|
+- vif->type != NL80211_IFTYPE_STATION) {
|
|
++ new_state == IEEE80211_STA_NONE) {
|
|
+ /*
|
|
+ * New station addition.
|
|
+ */
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
|
|
+- max_num_peers = TARGET_10X_NUM_PEERS_MAX - 1;
|
|
+- else
|
|
+- max_num_peers = TARGET_NUM_PEERS;
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
++ "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
|
|
++ arvif->vdev_id, sta->addr,
|
|
++ ar->num_stations + 1, ar->max_num_stations,
|
|
++ ar->num_peers + 1, ar->max_num_peers);
|
|
+
|
|
+- if (ar->num_peers >= max_num_peers) {
|
|
+- ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n",
|
|
+- ar->num_peers, max_num_peers);
|
|
+- ret = -ENOBUFS;
|
|
++ ret = ath10k_mac_inc_num_stations(arvif);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
|
|
++ ar->max_num_stations);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
+- "mac vdev %d peer create %pM (new sta) num_peers %d\n",
|
|
+- arvif->vdev_id, sta->addr, ar->num_peers);
|
|
+-
|
|
+ ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
|
|
+- if (ret)
|
|
+- ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n",
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
|
|
+ sta->addr, arvif->vdev_id, ret);
|
|
++ ath10k_mac_dec_num_stations(arvif);
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ if (vif->type == NL80211_IFTYPE_STATION) {
|
|
++ WARN_ON(arvif->is_started);
|
|
++
|
|
++ ret = ath10k_vdev_start(arvif);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to start vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
|
|
++ sta->addr));
|
|
++ ath10k_mac_dec_num_stations(arvif);
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ arvif->is_started = true;
|
|
++ }
|
|
+ } else if ((old_state == IEEE80211_STA_NONE &&
|
|
+ new_state == IEEE80211_STA_NOTEXIST)) {
|
|
+ /*
|
|
+ * Existing station deletion.
|
|
+ */
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
+ "mac vdev %d peer delete %pM (sta gone)\n",
|
|
+ arvif->vdev_id, sta->addr);
|
|
++
|
|
++ if (vif->type == NL80211_IFTYPE_STATION) {
|
|
++ WARN_ON(!arvif->is_started);
|
|
++
|
|
++ ret = ath10k_vdev_stop(arvif);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to stop vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++
|
|
++ arvif->is_started = false;
|
|
++ }
|
|
++
|
|
+ ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to delete peer %pM for vdev %d: %i\n",
|
|
++ ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
|
|
+ sta->addr, arvif->vdev_id, ret);
|
|
+
|
|
+- if (vif->type == NL80211_IFTYPE_STATION)
|
|
+- ath10k_bss_disassoc(hw, vif);
|
|
++ ath10k_mac_dec_num_stations(arvif);
|
|
+ } else if (old_state == IEEE80211_STA_AUTH &&
|
|
+ new_state == IEEE80211_STA_ASSOC &&
|
|
+ (vif->type == NL80211_IFTYPE_AP ||
|
|
+@@ -3379,12 +4134,12 @@ static int ath10k_sta_state(struct ieee8
|
|
+ /*
|
|
+ * New association.
|
|
+ */
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
|
|
+ sta->addr);
|
|
+
|
|
+- ret = ath10k_station_assoc(ar, arvif, sta, false);
|
|
++ ret = ath10k_station_assoc(ar, vif, sta, false);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to associate station %pM for vdev %i: %i\n",
|
|
++ ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
|
|
+ sta->addr, arvif->vdev_id, ret);
|
|
+ } else if (old_state == IEEE80211_STA_ASSOC &&
|
|
+ new_state == IEEE80211_STA_AUTH &&
|
|
+@@ -3393,12 +4148,12 @@ static int ath10k_sta_state(struct ieee8
|
|
+ /*
|
|
+ * Disassociation.
|
|
+ */
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
|
|
+ sta->addr);
|
|
+
|
|
+- ret = ath10k_station_disassoc(ar, arvif, sta);
|
|
++ ret = ath10k_station_disassoc(ar, vif, sta);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n",
|
|
++ ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
|
|
+ sta->addr, arvif->vdev_id, ret);
|
|
+ }
|
|
+ exit:
|
|
+@@ -3407,9 +4162,11 @@ exit:
|
|
+ }
|
|
+
|
|
+ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
|
|
+- u16 ac, bool enable)
|
|
++ u16 ac, bool enable)
|
|
+ {
|
|
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
++ struct wmi_sta_uapsd_auto_trig_arg arg = {};
|
|
++ u32 prio = 0, acc = 0;
|
|
+ u32 value = 0;
|
|
+ int ret = 0;
|
|
+
|
|
+@@ -3422,18 +4179,26 @@ static int ath10k_conf_tx_uapsd(struct a
|
|
+ case IEEE80211_AC_VO:
|
|
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
|
|
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
|
|
++ prio = 7;
|
|
++ acc = 3;
|
|
+ break;
|
|
+ case IEEE80211_AC_VI:
|
|
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
|
|
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
|
|
++ prio = 5;
|
|
++ acc = 2;
|
|
+ break;
|
|
+ case IEEE80211_AC_BE:
|
|
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
|
|
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
|
|
++ prio = 2;
|
|
++ acc = 1;
|
|
+ break;
|
|
+ case IEEE80211_AC_BK:
|
|
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
|
|
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
|
|
++ prio = 0;
|
|
++ acc = 0;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+@@ -3446,7 +4211,7 @@ static int ath10k_conf_tx_uapsd(struct a
|
|
+ WMI_STA_PS_PARAM_UAPSD,
|
|
+ arvif->u.sta.uapsd);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set uapsd params: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+@@ -3459,7 +4224,44 @@ static int ath10k_conf_tx_uapsd(struct a
|
|
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
|
|
+ value);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to set rx wake param: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
|
|
++
|
|
++ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
|
|
++ test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
|
|
++ /* Only userspace can make an educated decision when to send
|
|
++ * trigger frame. The following effectively disables u-UAPSD
|
|
++ * autotrigger in firmware (which is enabled by default
|
|
++ * provided the autotrigger service is available).
|
|
++ */
|
|
++
|
|
++ arg.wmm_ac = acc;
|
|
++ arg.user_priority = prio;
|
|
++ arg.service_interval = 0;
|
|
++ arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
|
|
++ arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
|
|
++
|
|
++ ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
|
|
++ arvif->bssid, &arg, 1);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
|
|
++ ret);
|
|
++ return ret;
|
|
++ }
|
|
++ }
|
|
+
|
|
+ exit:
|
|
+ return ret;
|
|
+@@ -3470,6 +4272,7 @@ static int ath10k_conf_tx(struct ieee802
|
|
+ const struct ieee80211_tx_queue_params *params)
|
|
+ {
|
|
+ struct ath10k *ar = hw->priv;
|
|
++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
+ struct wmi_wmm_params_arg *p = NULL;
|
|
+ int ret;
|
|
+
|
|
+@@ -3477,16 +4280,16 @@ static int ath10k_conf_tx(struct ieee802
|
|
+
|
|
+ switch (ac) {
|
|
+ case IEEE80211_AC_VO:
|
|
+- p = &ar->wmm_params.ac_vo;
|
|
++ p = &arvif->wmm_params.ac_vo;
|
|
+ break;
|
|
+ case IEEE80211_AC_VI:
|
|
+- p = &ar->wmm_params.ac_vi;
|
|
++ p = &arvif->wmm_params.ac_vi;
|
|
+ break;
|
|
+ case IEEE80211_AC_BE:
|
|
+- p = &ar->wmm_params.ac_be;
|
|
++ p = &arvif->wmm_params.ac_be;
|
|
+ break;
|
|
+ case IEEE80211_AC_BK:
|
|
+- p = &ar->wmm_params.ac_bk;
|
|
++ p = &arvif->wmm_params.ac_bk;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+@@ -3506,16 +4309,28 @@ static int ath10k_conf_tx(struct ieee802
|
|
+ */
|
|
+ p->txop = params->txop * 32;
|
|
+
|
|
+- /* FIXME: FW accepts wmm params per hw, not per vif */
|
|
+- ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to set wmm params: %d\n", ret);
|
|
+- goto exit;
|
|
++ if (ar->wmi.ops->gen_vdev_wmm_conf) {
|
|
++ ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
|
|
++ &arvif->wmm_params);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
|
|
++ arvif->vdev_id, ret);
|
|
++ goto exit;
|
|
++ }
|
|
++ } else {
|
|
++ /* This won't work well with multi-interface cases but it's
|
|
++ * better than nothing.
|
|
++ */
|
|
++ ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
|
|
++ goto exit;
|
|
++ }
|
|
+ }
|
|
+
|
|
+ ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to set sta uapsd: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
|
|
+
|
|
+ exit:
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+@@ -3533,27 +4348,35 @@ static int ath10k_remain_on_channel(stru
|
|
+ struct ath10k *ar = hw->priv;
|
|
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
+ struct wmi_start_scan_arg arg;
|
|
+- int ret;
|
|
++ int ret = 0;
|
|
+
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+- if (ar->scan.in_progress) {
|
|
+- spin_unlock_bh(&ar->data_lock);
|
|
++ switch (ar->scan.state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ reinit_completion(&ar->scan.started);
|
|
++ reinit_completion(&ar->scan.completed);
|
|
++ reinit_completion(&ar->scan.on_channel);
|
|
++ ar->scan.state = ATH10K_SCAN_STARTING;
|
|
++ ar->scan.is_roc = true;
|
|
++ ar->scan.vdev_id = arvif->vdev_id;
|
|
++ ar->scan.roc_freq = chan->center_freq;
|
|
++ ret = 0;
|
|
++ break;
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
+ ret = -EBUSY;
|
|
+- goto exit;
|
|
++ break;
|
|
+ }
|
|
+-
|
|
+- reinit_completion(&ar->scan.started);
|
|
+- reinit_completion(&ar->scan.completed);
|
|
+- reinit_completion(&ar->scan.on_channel);
|
|
+- ar->scan.in_progress = true;
|
|
+- ar->scan.aborting = false;
|
|
+- ar->scan.is_roc = true;
|
|
+- ar->scan.vdev_id = arvif->vdev_id;
|
|
+- ar->scan.roc_freq = chan->center_freq;
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
++ if (ret)
|
|
++ goto exit;
|
|
++
|
|
++ duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC);
|
|
++
|
|
+ memset(&arg, 0, sizeof(arg));
|
|
+ ath10k_wmi_start_scan_init(ar, &arg);
|
|
+ arg.vdev_id = arvif->vdev_id;
|
|
+@@ -3568,17 +4391,21 @@ static int ath10k_remain_on_channel(stru
|
|
+
|
|
+ ret = ath10k_start_scan(ar, &arg);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to start roc scan: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+- ar->scan.in_progress = false;
|
|
++ ar->scan.state = ATH10K_SCAN_IDLE;
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
|
|
+ if (ret == 0) {
|
|
+- ath10k_warn("failed to switch to channel for roc scan\n");
|
|
+- ath10k_abort_scan(ar);
|
|
++ ath10k_warn(ar, "failed to switch to channel for roc scan\n");
|
|
++
|
|
++ ret = ath10k_scan_stop(ar);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
|
|
++
|
|
+ ret = -ETIMEDOUT;
|
|
+ goto exit;
|
|
+ }
|
|
+@@ -3594,9 +4421,11 @@ static int ath10k_cancel_remain_on_chann
|
|
+ struct ath10k *ar = hw->priv;
|
|
+
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+- ath10k_abort_scan(ar);
|
|
++ ath10k_scan_abort(ar);
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+
|
|
++ cancel_delayed_work_sync(&ar->scan.timeout);
|
|
++
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+@@ -3613,35 +4442,12 @@ static int ath10k_set_rts_threshold(stru
|
|
+
|
|
+ mutex_lock(&ar->conf_mutex);
|
|
+ list_for_each_entry(arvif, &ar->arvifs, list) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
|
|
+- arvif->vdev_id, value);
|
|
+-
|
|
+- ret = ath10k_mac_set_rts(arvif, value);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
|
|
+- arvif->vdev_id, ret);
|
|
+- break;
|
|
+- }
|
|
+- }
|
|
+- mutex_unlock(&ar->conf_mutex);
|
|
+-
|
|
+- return ret;
|
|
+-}
|
|
+-
|
|
+-static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
|
|
+-{
|
|
+- struct ath10k *ar = hw->priv;
|
|
+- struct ath10k_vif *arvif;
|
|
+- int ret = 0;
|
|
+-
|
|
+- mutex_lock(&ar->conf_mutex);
|
|
+- list_for_each_entry(arvif, &ar->arvifs, list) {
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
|
|
+ arvif->vdev_id, value);
|
|
+
|
|
+ ret = ath10k_mac_set_rts(arvif, value);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
|
|
+ arvif->vdev_id, ret);
|
|
+ break;
|
|
+ }
|
|
+@@ -3675,13 +4481,15 @@ static void ath10k_flush(struct ieee8021
|
|
+ empty = (ar->htt.num_pending_tx == 0);
|
|
+ spin_unlock_bh(&ar->htt.tx_lock);
|
|
+
|
|
+- skip = (ar->state == ATH10K_STATE_WEDGED);
|
|
++ skip = (ar->state == ATH10K_STATE_WEDGED) ||
|
|
++ test_bit(ATH10K_FLAG_CRASH_FLUSH,
|
|
++ &ar->dev_flags);
|
|
+
|
|
+ (empty || skip);
|
|
+ }), ATH10K_FLUSH_TIMEOUT_HZ);
|
|
+
|
|
+ if (ret <= 0 || skip)
|
|
+- ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n",
|
|
++ ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
|
|
+ skip, ar->state, ret);
|
|
+
|
|
+ skip:
|
|
+@@ -3716,7 +4524,7 @@ static int ath10k_suspend(struct ieee802
|
|
+
|
|
+ ret = ath10k_hif_suspend(ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to suspend hif: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
|
|
+ goto resume;
|
|
+ }
|
|
+
|
|
+@@ -3725,7 +4533,7 @@ static int ath10k_suspend(struct ieee802
|
|
+ resume:
|
|
+ ret = ath10k_wmi_pdev_resume_target(ar);
|
|
+ if (ret)
|
|
+- ath10k_warn("failed to resume target: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to resume target: %d\n", ret);
|
|
+
|
|
+ ret = 1;
|
|
+ exit:
|
|
+@@ -3742,14 +4550,14 @@ static int ath10k_resume(struct ieee8021
|
|
+
|
|
+ ret = ath10k_hif_resume(ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to resume hif: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
|
|
+ ret = 1;
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_wmi_pdev_resume_target(ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to resume target: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to resume target: %d\n", ret);
|
|
+ ret = 1;
|
|
+ goto exit;
|
|
+ }
|
|
+@@ -3770,8 +4578,9 @@ static void ath10k_restart_complete(stru
|
|
+ /* If device failed to restart it will be in a different state, e.g.
|
|
+ * ATH10K_STATE_WEDGED */
|
|
+ if (ar->state == ATH10K_STATE_RESTARTED) {
|
|
+- ath10k_info("device successfully recovered\n");
|
|
++ ath10k_info(ar, "device successfully recovered\n");
|
|
+ ar->state = ATH10K_STATE_ON;
|
|
++ ieee80211_wake_queues(ar->hw);
|
|
+ }
|
|
+
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+@@ -3807,6 +4616,9 @@ static int ath10k_get_survey(struct ieee
|
|
+
|
|
+ survey->channel = &sband->channels[idx];
|
|
+
|
|
++ if (ar->rx_channel == survey->channel)
|
|
++ survey->filled |= SURVEY_INFO_IN_USE;
|
|
++
|
|
+ exit:
|
|
+ mutex_unlock(&ar->conf_mutex);
|
|
+ return ret;
|
|
+@@ -3854,6 +4666,10 @@ ath10k_default_bitrate_mask(struct ath10
|
|
+ u32 legacy = 0x00ff;
|
|
+ u8 ht = 0xff, i;
|
|
+ u16 vht = 0x3ff;
|
|
++ u16 nrf = ar->num_rf_chains;
|
|
++
|
|
++ if (ar->cfg_tx_chainmask)
|
|
++ nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask);
|
|
+
|
|
+ switch (band) {
|
|
+ case IEEE80211_BAND_2GHZ:
|
|
+@@ -3869,11 +4685,11 @@ ath10k_default_bitrate_mask(struct ath10
|
|
+ if (mask->control[band].legacy != legacy)
|
|
+ return false;
|
|
+
|
|
+- for (i = 0; i < ar->num_rf_chains; i++)
|
|
++ for (i = 0; i < nrf; i++)
|
|
+ if (mask->control[band].ht_mcs[i] != ht)
|
|
+ return false;
|
|
+
|
|
+- for (i = 0; i < ar->num_rf_chains; i++)
|
|
++ for (i = 0; i < nrf; i++)
|
|
+ if (mask->control[band].vht_mcs[i] != vht)
|
|
+ return false;
|
|
+
|
|
+@@ -3897,8 +4713,8 @@ ath10k_bitrate_mask_nss(const struct cfg
|
|
+ continue;
|
|
+ else if (mask->control[band].ht_mcs[i] == 0x00)
|
|
+ break;
|
|
+- else
|
|
+- return false;
|
|
++
|
|
++ return false;
|
|
+ }
|
|
+
|
|
+ ht_nss = i;
|
|
+@@ -3909,8 +4725,8 @@ ath10k_bitrate_mask_nss(const struct cfg
|
|
+ continue;
|
|
+ else if (mask->control[band].vht_mcs[i] == 0x0000)
|
|
+ break;
|
|
+- else
|
|
+- return false;
|
|
++
|
|
++ return false;
|
|
+ }
|
|
+
|
|
+ vht_nss = i;
|
|
+@@ -3967,7 +4783,8 @@ ath10k_bitrate_mask_correct(const struct
|
|
+ }
|
|
+
|
|
+ static bool
|
|
+-ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask,
|
|
++ath10k_bitrate_mask_rate(struct ath10k *ar,
|
|
++ const struct cfg80211_bitrate_mask *mask,
|
|
+ enum ieee80211_band band,
|
|
+ u8 *fixed_rate,
|
|
+ u8 *fixed_nss)
|
|
+@@ -4025,7 +4842,7 @@ ath10k_bitrate_mask_rate(const struct cf
|
|
+ nss <<= 4;
|
|
+ pream <<= 6;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
|
|
+ pream, nss, rate);
|
|
+
|
|
+ *fixed_rate = pream | nss | rate;
|
|
+@@ -4033,7 +4850,8 @@ ath10k_bitrate_mask_rate(const struct cf
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+-static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
|
|
++static bool ath10k_get_fixed_rate_nss(struct ath10k *ar,
|
|
++ const struct cfg80211_bitrate_mask *mask,
|
|
+ enum ieee80211_band band,
|
|
+ u8 *fixed_rate,
|
|
+ u8 *fixed_nss)
|
|
+@@ -4043,7 +4861,7 @@ static bool ath10k_get_fixed_rate_nss(co
|
|
+ return true;
|
|
+
|
|
+ /* Next Check single rate is set */
|
|
+- return ath10k_bitrate_mask_rate(mask, band, fixed_rate, fixed_nss);
|
|
++ return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss);
|
|
+ }
|
|
+
|
|
+ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
|
|
+@@ -4063,16 +4881,16 @@ static int ath10k_set_fixed_rate_param(s
|
|
+ goto exit;
|
|
+
|
|
+ if (fixed_rate == WMI_FIXED_RATE_NONE)
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
|
|
+
|
|
+ if (force_sgi)
|
|
+- ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n");
|
|
+
|
|
+ vdev_param = ar->wmi.vdev_param->fixed_rate;
|
|
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
|
|
+ vdev_param, fixed_rate);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set fixed rate param 0x%02x: %d\n",
|
|
++ ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
|
|
+ fixed_rate, ret);
|
|
+ ret = -EINVAL;
|
|
+ goto exit;
|
|
+@@ -4085,7 +4903,7 @@ static int ath10k_set_fixed_rate_param(s
|
|
+ vdev_param, fixed_nss);
|
|
+
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set fixed nss param %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to set fixed nss param %d: %d\n",
|
|
+ fixed_nss, ret);
|
|
+ ret = -EINVAL;
|
|
+ goto exit;
|
|
+@@ -4098,7 +4916,7 @@ static int ath10k_set_fixed_rate_param(s
|
|
+ force_sgi);
|
|
+
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to set sgi param %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to set sgi param %d: %d\n",
|
|
+ force_sgi, ret);
|
|
+ ret = -EINVAL;
|
|
+ goto exit;
|
|
+@@ -4122,19 +4940,22 @@ static int ath10k_set_bitrate_mask(struc
|
|
+ u8 fixed_nss = ar->num_rf_chains;
|
|
+ u8 force_sgi;
|
|
+
|
|
++ if (ar->cfg_tx_chainmask)
|
|
++ fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
|
|
++
|
|
+ force_sgi = mask->control[band].gi;
|
|
+ if (force_sgi == NL80211_TXRATE_FORCE_LGI)
|
|
+ return -EINVAL;
|
|
+
|
|
+ if (!ath10k_default_bitrate_mask(ar, band, mask)) {
|
|
+- if (!ath10k_get_fixed_rate_nss(mask, band,
|
|
++ if (!ath10k_get_fixed_rate_nss(ar, mask, band,
|
|
+ &fixed_rate,
|
|
+ &fixed_nss))
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
|
|
+- ath10k_warn("failed to force SGI usage for default rate settings\n");
|
|
++ ath10k_warn(ar, "failed to force SGI usage for default rate settings\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+@@ -4153,7 +4974,7 @@ static void ath10k_sta_rc_update(struct
|
|
+
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MAC,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
|
|
+ sta->addr, changed, sta->bandwidth, sta->rx_nss,
|
|
+ sta->smps_mode);
|
|
+@@ -4172,7 +4993,7 @@ static void ath10k_sta_rc_update(struct
|
|
+ bw = WMI_PEER_CHWIDTH_80MHZ;
|
|
+ break;
|
|
+ case IEEE80211_STA_RX_BW_160:
|
|
+- ath10k_warn("Invalid bandwith %d in rc update for %pM\n",
|
|
++ ath10k_warn(ar, "Invalid bandwith %d in rc update for %pM\n",
|
|
+ sta->bandwidth, sta->addr);
|
|
+ bw = WMI_PEER_CHWIDTH_20MHZ;
|
|
+ break;
|
|
+@@ -4199,7 +5020,7 @@ static void ath10k_sta_rc_update(struct
|
|
+ smps = WMI_PEER_SMPS_DYNAMIC;
|
|
+ break;
|
|
+ case IEEE80211_SMPS_NUM_MODES:
|
|
+- ath10k_warn("Invalid smps %d in sta rc update for %pM\n",
|
|
++ ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
|
|
+ sta->smps_mode, sta->addr);
|
|
+ smps = WMI_PEER_SMPS_PS_NONE;
|
|
+ break;
|
|
+@@ -4225,6 +5046,39 @@ static u64 ath10k_get_tsf(struct ieee802
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
++static int ath10k_ampdu_action(struct ieee80211_hw *hw,
|
|
++ struct ieee80211_vif *vif,
|
|
++ enum ieee80211_ampdu_mlme_action action,
|
|
++ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
|
|
++ u8 buf_size)
|
|
++{
|
|
++ struct ath10k *ar = hw->priv;
|
|
++ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
|
|
++ arvif->vdev_id, sta->addr, tid, action);
|
|
++
|
|
++ switch (action) {
|
|
++ case IEEE80211_AMPDU_RX_START:
|
|
++ case IEEE80211_AMPDU_RX_STOP:
|
|
++ /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
|
|
++ * creation/removal. Do we need to verify this?
|
|
++ */
|
|
++ return 0;
|
|
++ case IEEE80211_AMPDU_TX_START:
|
|
++ case IEEE80211_AMPDU_TX_STOP_CONT:
|
|
++ case IEEE80211_AMPDU_TX_STOP_FLUSH:
|
|
++ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
|
|
++ case IEEE80211_AMPDU_TX_OPERATIONAL:
|
|
++ /* Firmware offloads Tx aggregation entirely so deny mac80211
|
|
++ * Tx aggregation requests.
|
|
++ */
|
|
++ return -EOPNOTSUPP;
|
|
++ }
|
|
++
|
|
++ return -EINVAL;
|
|
++}
|
|
++
|
|
+ static const struct ieee80211_ops ath10k_ops = {
|
|
+ .tx = ath10k_tx,
|
|
+ .start = ath10k_start,
|
|
+@@ -4237,23 +5091,35 @@ static const struct ieee80211_ops ath10k
|
|
+ .hw_scan = ath10k_hw_scan,
|
|
+ .cancel_hw_scan = ath10k_cancel_hw_scan,
|
|
+ .set_key = ath10k_set_key,
|
|
++ .set_default_unicast_key = ath10k_set_default_unicast_key,
|
|
+ .sta_state = ath10k_sta_state,
|
|
+ .conf_tx = ath10k_conf_tx,
|
|
+ .remain_on_channel = ath10k_remain_on_channel,
|
|
+ .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
|
|
+ .set_rts_threshold = ath10k_set_rts_threshold,
|
|
+- .set_frag_threshold = ath10k_set_frag_threshold,
|
|
+ .flush = ath10k_flush,
|
|
+ .tx_last_beacon = ath10k_tx_last_beacon,
|
|
++ .set_antenna = ath10k_set_antenna,
|
|
++ .get_antenna = ath10k_get_antenna,
|
|
+ .restart_complete = ath10k_restart_complete,
|
|
+ .get_survey = ath10k_get_survey,
|
|
+ .set_bitrate_mask = ath10k_set_bitrate_mask,
|
|
+ .sta_rc_update = ath10k_sta_rc_update,
|
|
+ .get_tsf = ath10k_get_tsf,
|
|
++ .ampdu_action = ath10k_ampdu_action,
|
|
++ .get_et_sset_count = ath10k_debug_get_et_sset_count,
|
|
++ .get_et_stats = ath10k_debug_get_et_stats,
|
|
++ .get_et_strings = ath10k_debug_get_et_strings,
|
|
++
|
|
++ CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
|
|
++
|
|
+ #ifdef CONFIG_PM
|
|
+ .suspend = ath10k_suspend,
|
|
+ .resume = ath10k_resume,
|
|
+ #endif
|
|
++#ifdef CPTCFG_MAC80211_DEBUGFS
|
|
++ .sta_add_debugfs = ath10k_sta_add_debugfs,
|
|
++#endif
|
|
+ };
|
|
+
|
|
+ #define RATETAB_ENT(_rate, _rateid, _flags) { \
|
|
+@@ -4324,6 +5190,9 @@ static const struct ieee80211_channel at
|
|
+ CHAN5G(165, 5825, 0),
|
|
+ };
|
|
+
|
|
++/* Note: Be careful if you re-order these. There is code which depends on this
|
|
++ * ordering.
|
|
++ */
|
|
+ static struct ieee80211_rate ath10k_rates[] = {
|
|
+ /* CCK */
|
|
+ RATETAB_ENT(10, 0x82, 0),
|
|
+@@ -4346,12 +5215,12 @@ static struct ieee80211_rate ath10k_rate
|
|
+ #define ath10k_g_rates (ath10k_rates + 0)
|
|
+ #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
|
|
+
|
|
+-struct ath10k *ath10k_mac_create(void)
|
|
++struct ath10k *ath10k_mac_create(size_t priv_size)
|
|
+ {
|
|
+ struct ieee80211_hw *hw;
|
|
+ struct ath10k *ar;
|
|
+
|
|
+- hw = ieee80211_alloc_hw(sizeof(struct ath10k), &ath10k_ops);
|
|
++ hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
|
|
+ if (!hw)
|
|
+ return NULL;
|
|
+
|
|
+@@ -4377,6 +5246,10 @@ static const struct ieee80211_iface_limi
|
|
+ .types = BIT(NL80211_IFTYPE_P2P_GO)
|
|
+ },
|
|
+ {
|
|
++ .max = 1,
|
|
++ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
|
|
++ },
|
|
++ {
|
|
+ .max = 7,
|
|
+ .types = BIT(NL80211_IFTYPE_AP)
|
|
+ },
|
|
+@@ -4501,7 +5374,6 @@ static struct ieee80211_sta_ht_cap ath10
|
|
+ return ht_cap;
|
|
+ }
|
|
+
|
|
+-
|
|
+ static void ath10k_get_arvif_iter(void *data, u8 *mac,
|
|
+ struct ieee80211_vif *vif)
|
|
+ {
|
|
+@@ -4526,7 +5398,7 @@ struct ath10k_vif *ath10k_get_arvif(stru
|
|
+ ath10k_get_arvif_iter,
|
|
+ &arvif_iter);
|
|
+ if (!arvif_iter.arvif) {
|
|
+- ath10k_warn("No VIF found for vdev %d\n", vdev_id);
|
|
++ ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+@@ -4564,7 +5436,8 @@ int ath10k_mac_register(struct ath10k *a
|
|
+ band->bitrates = ath10k_g_rates;
|
|
+ band->ht_cap = ht_cap;
|
|
+
|
|
+- /* vht is not supported in 2.4 GHz */
|
|
++ /* Enable the VHT support at 2.4 GHz */
|
|
++ band->vht_cap = vht_cap;
|
|
+
|
|
+ ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
|
|
+ }
|
|
+@@ -4590,18 +5463,20 @@ int ath10k_mac_register(struct ath10k *a
|
|
+
|
|
+ ar->hw->wiphy->interface_modes =
|
|
+ BIT(NL80211_IFTYPE_STATION) |
|
|
+- BIT(NL80211_IFTYPE_ADHOC) |
|
|
+ BIT(NL80211_IFTYPE_AP);
|
|
+
|
|
++ ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
|
|
++ ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
|
|
++
|
|
+ if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
|
|
+ ar->hw->wiphy->interface_modes |=
|
|
++ BIT(NL80211_IFTYPE_P2P_DEVICE) |
|
|
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
|
|
+ BIT(NL80211_IFTYPE_P2P_GO);
|
|
+
|
|
+ ar->hw->flags = IEEE80211_HW_SIGNAL_DBM |
|
|
+ IEEE80211_HW_SUPPORTS_PS |
|
|
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
|
|
+- IEEE80211_HW_SUPPORTS_UAPSD |
|
|
+ IEEE80211_HW_MFP_CAPABLE |
|
|
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
|
|
+ IEEE80211_HW_HAS_RATE_CONTROL |
|
|
+@@ -4609,10 +5484,6 @@ int ath10k_mac_register(struct ath10k *a
|
|
+ IEEE80211_HW_AP_LINK_PS |
|
|
+ IEEE80211_HW_SPECTRUM_MGMT;
|
|
+
|
|
+- /* MSDU can have HTT TX fragment pushed in front. The additional 4
|
|
+- * bytes is used for padding/alignment if necessary. */
|
|
+- ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
|
|
+-
|
|
+ if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
|
|
+ ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
|
|
+
|
|
+@@ -4629,25 +5500,52 @@ int ath10k_mac_register(struct ath10k *a
|
|
+
|
|
+ ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
|
|
+
|
|
++ if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
|
|
++ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
|
|
++
|
|
++ /* Firmware delivers WPS/P2P Probe Requests frames to driver so
|
|
++ * that userspace (e.g. wpa_supplicant/hostapd) can generate
|
|
++ * correct Probe Responses. This is more of a hack advert..
|
|
++ */
|
|
++ ar->hw->wiphy->probe_resp_offload |=
|
|
++ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
|
|
++ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
|
|
++ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
|
|
++ }
|
|
++
|
|
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
|
|
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
|
|
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
|
|
+
|
|
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
|
|
++ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
|
|
++
|
|
+ /*
|
|
+ * on LL hardware queues are managed entirely by the FW
|
|
+ * so we only advertise to mac we can do the queues thing
|
|
+ */
|
|
+ ar->hw->queues = 4;
|
|
+
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
|
|
+- ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
|
|
+- ar->hw->wiphy->n_iface_combinations =
|
|
+- ARRAY_SIZE(ath10k_10x_if_comb);
|
|
+- } else {
|
|
++ switch (ar->wmi.op_version) {
|
|
++ case ATH10K_FW_WMI_OP_VERSION_MAIN:
|
|
++ case ATH10K_FW_WMI_OP_VERSION_TLV:
|
|
+ ar->hw->wiphy->iface_combinations = ath10k_if_comb;
|
|
+ ar->hw->wiphy->n_iface_combinations =
|
|
+ ARRAY_SIZE(ath10k_if_comb);
|
|
++ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
|
|
++ break;
|
|
++ case ATH10K_FW_WMI_OP_VERSION_10_1:
|
|
++ case ATH10K_FW_WMI_OP_VERSION_10_2:
|
|
++ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
|
|
++ ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
|
|
++ ar->hw->wiphy->n_iface_combinations =
|
|
++ ARRAY_SIZE(ath10k_10x_if_comb);
|
|
++ break;
|
|
++ case ATH10K_FW_WMI_OP_VERSION_UNSET:
|
|
++ case ATH10K_FW_WMI_OP_VERSION_MAX:
|
|
++ WARN_ON(1);
|
|
++ ret = -EINVAL;
|
|
++ goto err_free;
|
|
+ }
|
|
+
|
|
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
|
|
+@@ -4659,19 +5557,19 @@ int ath10k_mac_register(struct ath10k *a
|
|
+ NL80211_DFS_UNSET);
|
|
+
|
|
+ if (!ar->dfs_detector)
|
|
+- ath10k_warn("failed to initialise DFS pattern detector\n");
|
|
++ ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
|
|
+ }
|
|
+
|
|
+ ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
|
|
+ ath10k_reg_notifier);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to initialise regulatory: %i\n", ret);
|
|
++ ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
|
|
+ goto err_free;
|
|
+ }
|
|
+
|
|
+ ret = ieee80211_register_hw(ar->hw);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to register ieee80211: %d\n", ret);
|
|
++ ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
|
|
+ goto err_free;
|
|
+ }
|
|
+
|
|
+--- a/drivers/net/wireless/ath/ath10k/mac.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/mac.h
|
|
+@@ -21,33 +21,41 @@
|
|
+ #include <net/mac80211.h>
|
|
+ #include "core.h"
|
|
+
|
|
++#define WEP_KEYID_SHIFT 6
|
|
++
|
|
+ struct ath10k_generic_iter {
|
|
+ struct ath10k *ar;
|
|
+ int ret;
|
|
+ };
|
|
+
|
|
+-struct ath10k *ath10k_mac_create(void);
|
|
++struct ath10k *ath10k_mac_create(size_t priv_size);
|
|
+ void ath10k_mac_destroy(struct ath10k *ar);
|
|
+ int ath10k_mac_register(struct ath10k *ar);
|
|
+ void ath10k_mac_unregister(struct ath10k *ar);
|
|
+ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
|
|
+-void ath10k_reset_scan(unsigned long ptr);
|
|
++void __ath10k_scan_finish(struct ath10k *ar);
|
|
++void ath10k_scan_finish(struct ath10k *ar);
|
|
++void ath10k_scan_timeout_work(struct work_struct *work);
|
|
+ void ath10k_offchan_tx_purge(struct ath10k *ar);
|
|
+ void ath10k_offchan_tx_work(struct work_struct *work);
|
|
+ void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
|
|
+ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
|
|
+ void ath10k_halt(struct ath10k *ar);
|
|
++void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
|
|
++void ath10k_drain_tx(struct ath10k *ar);
|
|
++bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
|
|
++ u8 keyidx);
|
|
+
|
|
+ static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
|
|
+ {
|
|
+ return (struct ath10k_vif *)vif->drv_priv;
|
|
+ }
|
|
+
|
|
+-static inline void ath10k_tx_h_seq_no(struct sk_buff *skb)
|
|
++static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
|
|
++ struct sk_buff *skb)
|
|
+ {
|
|
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
+- struct ieee80211_vif *vif = info->control.vif;
|
|
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
|
|
+
|
|
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
|
|
+--- a/drivers/net/wireless/ath/ath10k/pci.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/pci.c
|
|
+@@ -44,13 +44,9 @@ enum ath10k_pci_reset_mode {
|
|
+ ATH10K_PCI_RESET_WARM_ONLY = 1,
|
|
+ };
|
|
+
|
|
+-static unsigned int ath10k_pci_target_ps;
|
|
+ static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
|
|
+ static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
|
|
+
|
|
+-module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
|
|
+-MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
|
|
+-
|
|
+ module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
|
|
+ MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
|
|
+
|
|
+@@ -59,21 +55,31 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1
|
|
+
|
|
+ /* how long wait to wait for target to initialise, in ms */
|
|
+ #define ATH10K_PCI_TARGET_WAIT 3000
|
|
++#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
|
|
+
|
|
+ #define QCA988X_2_0_DEVICE_ID (0x003c)
|
|
++#define QCA6174_2_1_DEVICE_ID (0x003e)
|
|
+
|
|
+-static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
|
|
++static const struct pci_device_id ath10k_pci_id_table[] = {
|
|
+ { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
|
|
++ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
|
|
+ {0}
|
|
+ };
|
|
+
|
|
+-static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
|
|
+- u32 *data);
|
|
++static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
|
|
++ /* QCA988X pre 2.0 chips are not supported because they need some nasty
|
|
++ * hacks. ath10k doesn't have them and these devices crash horribly
|
|
++ * because of that.
|
|
++ */
|
|
++ { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
|
|
++ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
|
|
++ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
|
|
++ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
|
|
++ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
|
|
++ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
|
|
++};
|
|
+
|
|
+-static int ath10k_pci_post_rx(struct ath10k *ar);
|
|
+-static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
|
|
+- int num);
|
|
+-static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
|
|
++static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
|
|
+ static int ath10k_pci_cold_reset(struct ath10k *ar);
|
|
+ static int ath10k_pci_warm_reset(struct ath10k *ar);
|
|
+ static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
|
|
+@@ -98,7 +104,7 @@ static const struct ce_attr host_ce_conf
|
|
+ {
|
|
+ .flags = CE_ATTR_FLAGS,
|
|
+ .src_nentries = 0,
|
|
+- .src_sz_max = 512,
|
|
++ .src_sz_max = 2048,
|
|
+ .dest_nentries = 512,
|
|
+ },
|
|
+
|
|
+@@ -155,79 +161,175 @@ static const struct ce_attr host_ce_conf
|
|
+ static const struct ce_pipe_config target_ce_config_wlan[] = {
|
|
+ /* CE0: host->target HTC control and raw streams */
|
|
+ {
|
|
+- .pipenum = 0,
|
|
+- .pipedir = PIPEDIR_OUT,
|
|
+- .nentries = 32,
|
|
+- .nbytes_max = 256,
|
|
+- .flags = CE_ATTR_FLAGS,
|
|
+- .reserved = 0,
|
|
++ .pipenum = __cpu_to_le32(0),
|
|
++ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
++ .nentries = __cpu_to_le32(32),
|
|
++ .nbytes_max = __cpu_to_le32(256),
|
|
++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
++ .reserved = __cpu_to_le32(0),
|
|
+ },
|
|
+
|
|
+ /* CE1: target->host HTT + HTC control */
|
|
+ {
|
|
+- .pipenum = 1,
|
|
+- .pipedir = PIPEDIR_IN,
|
|
+- .nentries = 32,
|
|
+- .nbytes_max = 512,
|
|
+- .flags = CE_ATTR_FLAGS,
|
|
+- .reserved = 0,
|
|
++ .pipenum = __cpu_to_le32(1),
|
|
++ .pipedir = __cpu_to_le32(PIPEDIR_IN),
|
|
++ .nentries = __cpu_to_le32(32),
|
|
++ .nbytes_max = __cpu_to_le32(2048),
|
|
++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
++ .reserved = __cpu_to_le32(0),
|
|
+ },
|
|
+
|
|
+ /* CE2: target->host WMI */
|
|
+ {
|
|
+- .pipenum = 2,
|
|
+- .pipedir = PIPEDIR_IN,
|
|
+- .nentries = 32,
|
|
+- .nbytes_max = 2048,
|
|
+- .flags = CE_ATTR_FLAGS,
|
|
+- .reserved = 0,
|
|
++ .pipenum = __cpu_to_le32(2),
|
|
++ .pipedir = __cpu_to_le32(PIPEDIR_IN),
|
|
++ .nentries = __cpu_to_le32(32),
|
|
++ .nbytes_max = __cpu_to_le32(2048),
|
|
++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
++ .reserved = __cpu_to_le32(0),
|
|
+ },
|
|
+
|
|
+ /* CE3: host->target WMI */
|
|
+ {
|
|
+- .pipenum = 3,
|
|
+- .pipedir = PIPEDIR_OUT,
|
|
+- .nentries = 32,
|
|
+- .nbytes_max = 2048,
|
|
+- .flags = CE_ATTR_FLAGS,
|
|
+- .reserved = 0,
|
|
++ .pipenum = __cpu_to_le32(3),
|
|
++ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
++ .nentries = __cpu_to_le32(32),
|
|
++ .nbytes_max = __cpu_to_le32(2048),
|
|
++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
++ .reserved = __cpu_to_le32(0),
|
|
+ },
|
|
+
|
|
+ /* CE4: host->target HTT */
|
|
+ {
|
|
+- .pipenum = 4,
|
|
+- .pipedir = PIPEDIR_OUT,
|
|
+- .nentries = 256,
|
|
+- .nbytes_max = 256,
|
|
+- .flags = CE_ATTR_FLAGS,
|
|
+- .reserved = 0,
|
|
++ .pipenum = __cpu_to_le32(4),
|
|
++ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
++ .nentries = __cpu_to_le32(256),
|
|
++ .nbytes_max = __cpu_to_le32(256),
|
|
++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
++ .reserved = __cpu_to_le32(0),
|
|
+ },
|
|
+
|
|
+ /* NB: 50% of src nentries, since tx has 2 frags */
|
|
+
|
|
+ /* CE5: unused */
|
|
+ {
|
|
+- .pipenum = 5,
|
|
+- .pipedir = PIPEDIR_OUT,
|
|
+- .nentries = 32,
|
|
+- .nbytes_max = 2048,
|
|
+- .flags = CE_ATTR_FLAGS,
|
|
+- .reserved = 0,
|
|
++ .pipenum = __cpu_to_le32(5),
|
|
++ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
|
|
++ .nentries = __cpu_to_le32(32),
|
|
++ .nbytes_max = __cpu_to_le32(2048),
|
|
++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
++ .reserved = __cpu_to_le32(0),
|
|
+ },
|
|
+
|
|
+ /* CE6: Reserved for target autonomous hif_memcpy */
|
|
+ {
|
|
+- .pipenum = 6,
|
|
+- .pipedir = PIPEDIR_INOUT,
|
|
+- .nentries = 32,
|
|
+- .nbytes_max = 4096,
|
|
+- .flags = CE_ATTR_FLAGS,
|
|
+- .reserved = 0,
|
|
++ .pipenum = __cpu_to_le32(6),
|
|
++ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
|
|
++ .nentries = __cpu_to_le32(32),
|
|
++ .nbytes_max = __cpu_to_le32(4096),
|
|
++ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
|
|
++ .reserved = __cpu_to_le32(0),
|
|
+ },
|
|
+
|
|
+ /* CE7 used only by Host */
|
|
+ };
|
|
+
|
|
++/*
|
|
++ * Map from service/endpoint to Copy Engine.
|
|
++ * This table is derived from the CE_PCI TABLE, above.
|
|
++ * It is passed to the Target at startup for use by firmware.
|
|
++ */
|
|
++static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
|
|
++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
++ __cpu_to_le32(3),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
|
|
++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
++ __cpu_to_le32(2),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
|
|
++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
++ __cpu_to_le32(3),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
|
|
++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
++ __cpu_to_le32(2),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
|
|
++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
++ __cpu_to_le32(3),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
|
|
++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
++ __cpu_to_le32(2),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
|
|
++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
++ __cpu_to_le32(3),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
|
|
++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
++ __cpu_to_le32(2),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
|
|
++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
++ __cpu_to_le32(3),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
|
|
++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
++ __cpu_to_le32(2),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
|
|
++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
++ __cpu_to_le32(0),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
|
|
++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
++ __cpu_to_le32(1),
|
|
++ },
|
|
++ { /* not used */
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
|
|
++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
++ __cpu_to_le32(0),
|
|
++ },
|
|
++ { /* not used */
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
|
|
++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
++ __cpu_to_le32(1),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
|
|
++ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
|
|
++ __cpu_to_le32(4),
|
|
++ },
|
|
++ {
|
|
++ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
|
|
++ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
|
|
++ __cpu_to_le32(1),
|
|
++ },
|
|
++
|
|
++ /* (Additions here) */
|
|
++
|
|
++ { /* must be last */
|
|
++ __cpu_to_le32(0),
|
|
++ __cpu_to_le32(0),
|
|
++ __cpu_to_le32(0),
|
|
++ },
|
|
++};
|
|
++
|
|
+ static bool ath10k_pci_irq_pending(struct ath10k *ar)
|
|
+ {
|
|
+ u32 cause;
|
|
+@@ -253,8 +355,8 @@ static void ath10k_pci_disable_and_clear
|
|
+
|
|
+ /* IMPORTANT: this extra read transaction is required to
|
|
+ * flush the posted write buffer. */
|
|
+- (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
+- PCIE_INTR_ENABLE_ADDRESS);
|
|
++ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
++ PCIE_INTR_ENABLE_ADDRESS);
|
|
+ }
|
|
+
|
|
+ static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
|
|
+@@ -265,48 +367,116 @@ static void ath10k_pci_enable_legacy_irq
|
|
+
|
|
+ /* IMPORTANT: this extra read transaction is required to
|
|
+ * flush the posted write buffer. */
|
|
+- (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
+- PCIE_INTR_ENABLE_ADDRESS);
|
|
++ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
++ PCIE_INTR_ENABLE_ADDRESS);
|
|
+ }
|
|
+
|
|
+-static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
|
|
++static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k *ar = arg;
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+
|
|
+- if (ar_pci->num_msi_intrs == 0) {
|
|
+- if (!ath10k_pci_irq_pending(ar))
|
|
+- return IRQ_NONE;
|
|
+-
|
|
+- ath10k_pci_disable_and_clear_legacy_irq(ar);
|
|
+- }
|
|
++ if (ar_pci->num_msi_intrs > 1)
|
|
++ return "msi-x";
|
|
+
|
|
+- tasklet_schedule(&ar_pci->early_irq_tasklet);
|
|
++ if (ar_pci->num_msi_intrs == 1)
|
|
++ return "msi";
|
|
+
|
|
+- return IRQ_HANDLED;
|
|
++ return "legacy";
|
|
+ }
|
|
+
|
|
+-static int ath10k_pci_request_early_irq(struct ath10k *ar)
|
|
++static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
|
|
+ {
|
|
++ struct ath10k *ar = pipe->hif_ce_state;
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
|
|
++ struct sk_buff *skb;
|
|
++ dma_addr_t paddr;
|
|
+ int ret;
|
|
+
|
|
+- /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
|
|
+- * interrupt from irq vector is triggered in all cases for FW
|
|
+- * indication/errors */
|
|
+- ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
|
|
+- IRQF_SHARED, "ath10k_pci (early)", ar);
|
|
++ lockdep_assert_held(&ar_pci->ce_lock);
|
|
++
|
|
++ skb = dev_alloc_skb(pipe->buf_sz);
|
|
++ if (!skb)
|
|
++ return -ENOMEM;
|
|
++
|
|
++ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
|
|
++
|
|
++ paddr = dma_map_single(ar->dev, skb->data,
|
|
++ skb->len + skb_tailroom(skb),
|
|
++ DMA_FROM_DEVICE);
|
|
++ if (unlikely(dma_mapping_error(ar->dev, paddr))) {
|
|
++ ath10k_warn(ar, "failed to dma map pci rx buf\n");
|
|
++ dev_kfree_skb_any(skb);
|
|
++ return -EIO;
|
|
++ }
|
|
++
|
|
++ ATH10K_SKB_RXCB(skb)->paddr = paddr;
|
|
++
|
|
++ ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to request early irq: %d\n", ret);
|
|
++ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
|
|
++ dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
|
|
++ DMA_FROM_DEVICE);
|
|
++ dev_kfree_skb_any(skb);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static void ath10k_pci_free_early_irq(struct ath10k *ar)
|
|
++static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
|
|
++{
|
|
++ struct ath10k *ar = pipe->hif_ce_state;
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
|
|
++ int ret, num;
|
|
++
|
|
++ lockdep_assert_held(&ar_pci->ce_lock);
|
|
++
|
|
++ if (pipe->buf_sz == 0)
|
|
++ return;
|
|
++
|
|
++ if (!ce_pipe->dest_ring)
|
|
++ return;
|
|
++
|
|
++ num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
|
|
++ while (num--) {
|
|
++ ret = __ath10k_pci_rx_post_buf(pipe);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
|
|
++ mod_timer(&ar_pci->rx_post_retry, jiffies +
|
|
++ ATH10K_PCI_RX_POST_RETRY_MS);
|
|
++ break;
|
|
++ }
|
|
++ }
|
|
++}
|
|
++
|
|
++static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
|
|
++{
|
|
++ struct ath10k *ar = pipe->hif_ce_state;
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++
|
|
++ spin_lock_bh(&ar_pci->ce_lock);
|
|
++ __ath10k_pci_rx_post_pipe(pipe);
|
|
++ spin_unlock_bh(&ar_pci->ce_lock);
|
|
++}
|
|
++
|
|
++static void ath10k_pci_rx_post(struct ath10k *ar)
|
|
++{
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ int i;
|
|
++
|
|
++ spin_lock_bh(&ar_pci->ce_lock);
|
|
++ for (i = 0; i < CE_COUNT; i++)
|
|
++ __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
|
|
++ spin_unlock_bh(&ar_pci->ce_lock);
|
|
++}
|
|
++
|
|
++static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
|
|
+ {
|
|
+- free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
|
|
++ struct ath10k *ar = (void *)ptr;
|
|
++
|
|
++ ath10k_pci_rx_post(ar);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+@@ -330,24 +500,7 @@ static int ath10k_pci_diag_read_mem(stru
|
|
+ void *data_buf = NULL;
|
|
+ int i;
|
|
+
|
|
+- /*
|
|
+- * This code cannot handle reads to non-memory space. Redirect to the
|
|
+- * register read fn but preserve the multi word read capability of
|
|
+- * this fn
|
|
+- */
|
|
+- if (address < DRAM_BASE_ADDRESS) {
|
|
+- if (!IS_ALIGNED(address, 4) ||
|
|
+- !IS_ALIGNED((unsigned long)data, 4))
|
|
+- return -EIO;
|
|
+-
|
|
+- while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
|
|
+- ar, address, (u32 *)data)) == 0)) {
|
|
+- nbytes -= sizeof(u32);
|
|
+- address += sizeof(u32);
|
|
+- data += sizeof(u32);
|
|
+- }
|
|
+- return ret;
|
|
+- }
|
|
++ spin_lock_bh(&ar_pci->ce_lock);
|
|
+
|
|
+ ce_diag = ar_pci->ce_diag;
|
|
+
|
|
+@@ -375,7 +528,7 @@ static int ath10k_pci_diag_read_mem(stru
|
|
+ nbytes = min_t(unsigned int, remaining_bytes,
|
|
+ DIAG_TRANSFER_LIMIT);
|
|
+
|
|
+- ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
|
|
++ ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
|
|
+ if (ret != 0)
|
|
+ goto done;
|
|
+
|
|
+@@ -388,20 +541,18 @@ static int ath10k_pci_diag_read_mem(stru
|
|
+ * convert it from Target CPU virtual address space
|
|
+ * to CE address space
|
|
+ */
|
|
+- ath10k_pci_wake(ar);
|
|
+ address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
|
|
+ address);
|
|
+- ath10k_pci_sleep(ar);
|
|
+
|
|
+- ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
|
|
+- 0);
|
|
++ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
|
|
++ 0);
|
|
+ if (ret)
|
|
+ goto done;
|
|
+
|
|
+ i = 0;
|
|
+- while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
|
|
+- &completed_nbytes,
|
|
+- &id) != 0) {
|
|
++ while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
|
|
++ &completed_nbytes,
|
|
++ &id) != 0) {
|
|
+ mdelay(1);
|
|
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
+ ret = -EBUSY;
|
|
+@@ -414,15 +565,15 @@ static int ath10k_pci_diag_read_mem(stru
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
+- if (buf != (u32) address) {
|
|
++ if (buf != (u32)address) {
|
|
+ ret = -EIO;
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
+ i = 0;
|
|
+- while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
|
|
+- &completed_nbytes,
|
|
+- &id, &flags) != 0) {
|
|
++ while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
|
|
++ &completed_nbytes,
|
|
++ &id, &flags) != 0) {
|
|
+ mdelay(1);
|
|
+
|
|
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
+@@ -447,38 +598,60 @@ static int ath10k_pci_diag_read_mem(stru
|
|
+ }
|
|
+
|
|
+ done:
|
|
+- if (ret == 0) {
|
|
+- /* Copy data from allocated DMA buf to caller's buf */
|
|
+- WARN_ON_ONCE(orig_nbytes & 3);
|
|
+- for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
|
|
+- ((u32 *)data)[i] =
|
|
+- __le32_to_cpu(((__le32 *)data_buf)[i]);
|
|
+- }
|
|
+- } else
|
|
+- ath10k_warn("failed to read diag value at 0x%x: %d\n",
|
|
++ if (ret == 0)
|
|
++ memcpy(data, data_buf, orig_nbytes);
|
|
++ else
|
|
++ ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
|
|
+ address, ret);
|
|
+
|
|
+ if (data_buf)
|
|
+ dma_free_coherent(ar->dev, orig_nbytes, data_buf,
|
|
+ ce_data_base);
|
|
+
|
|
++ spin_unlock_bh(&ar_pci->ce_lock);
|
|
++
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+-/* Read 4-byte aligned data from Target memory or register */
|
|
+-static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
|
|
+- u32 *data)
|
|
+-{
|
|
+- /* Assume range doesn't cross this boundary */
|
|
+- if (address >= DRAM_BASE_ADDRESS)
|
|
+- return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
|
|
++static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
|
|
++{
|
|
++ __le32 val = 0;
|
|
++ int ret;
|
|
++
|
|
++ ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
|
|
++ *value = __le32_to_cpu(val);
|
|
++
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
|
|
++ u32 src, u32 len)
|
|
++{
|
|
++ u32 host_addr, addr;
|
|
++ int ret;
|
|
++
|
|
++ host_addr = host_interest_item_address(src);
|
|
++
|
|
++ ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
|
|
++ if (ret != 0) {
|
|
++ ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
|
|
++ src, ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
|
|
++ if (ret != 0) {
|
|
++ ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
|
|
++ addr, len, ret);
|
|
++ return ret;
|
|
++ }
|
|
+
|
|
+- ath10k_pci_wake(ar);
|
|
+- *data = ath10k_pci_read32(ar, address);
|
|
+- ath10k_pci_sleep(ar);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
++#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
|
|
++ __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
|
|
++
|
|
+ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
|
|
+ const void *data, int nbytes)
|
|
+ {
|
|
+@@ -494,6 +667,8 @@ static int ath10k_pci_diag_write_mem(str
|
|
+ dma_addr_t ce_data_base = 0;
|
|
+ int i;
|
|
+
|
|
++ spin_lock_bh(&ar_pci->ce_lock);
|
|
++
|
|
+ ce_diag = ar_pci->ce_diag;
|
|
+
|
|
+ /*
|
|
+@@ -513,9 +688,7 @@ static int ath10k_pci_diag_write_mem(str
|
|
+ }
|
|
+
|
|
+ /* Copy caller's data to allocated DMA buf */
|
|
+- WARN_ON_ONCE(orig_nbytes & 3);
|
|
+- for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
|
|
+- ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
|
|
++ memcpy(data_buf, data, orig_nbytes);
|
|
+
|
|
+ /*
|
|
+ * The address supplied by the caller is in the
|
|
+@@ -527,9 +700,7 @@ static int ath10k_pci_diag_write_mem(str
|
|
+ * to
|
|
+ * CE address space
|
|
+ */
|
|
+- ath10k_pci_wake(ar);
|
|
+ address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
|
|
+- ath10k_pci_sleep(ar);
|
|
+
|
|
+ remaining_bytes = orig_nbytes;
|
|
+ ce_data = ce_data_base;
|
|
+@@ -538,7 +709,7 @@ static int ath10k_pci_diag_write_mem(str
|
|
+ nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
|
|
+
|
|
+ /* Set up to receive directly into Target(!) address */
|
|
+- ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
|
|
++ ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
|
|
+ if (ret != 0)
|
|
+ goto done;
|
|
+
|
|
+@@ -546,15 +717,15 @@ static int ath10k_pci_diag_write_mem(str
|
|
+ * Request CE to send caller-supplied data that
|
|
+ * was copied to bounce buffer to Target(!) address.
|
|
+ */
|
|
+- ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
|
|
+- nbytes, 0, 0);
|
|
++ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
|
|
++ nbytes, 0, 0);
|
|
+ if (ret != 0)
|
|
+ goto done;
|
|
+
|
|
+ i = 0;
|
|
+- while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
|
|
+- &completed_nbytes,
|
|
+- &id) != 0) {
|
|
++ while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
|
|
++ &completed_nbytes,
|
|
++ &id) != 0) {
|
|
+ mdelay(1);
|
|
+
|
|
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
+@@ -574,9 +745,9 @@ static int ath10k_pci_diag_write_mem(str
|
|
+ }
|
|
+
|
|
+ i = 0;
|
|
+- while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
|
|
+- &completed_nbytes,
|
|
+- &id, &flags) != 0) {
|
|
++ while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
|
|
++ &completed_nbytes,
|
|
++ &id, &flags) != 0) {
|
|
+ mdelay(1);
|
|
+
|
|
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
|
|
+@@ -607,66 +778,36 @@ done:
|
|
+ }
|
|
+
|
|
+ if (ret != 0)
|
|
+- ath10k_warn("failed to write diag value at 0x%x: %d\n",
|
|
++ ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
|
|
+ address, ret);
|
|
+
|
|
++ spin_unlock_bh(&ar_pci->ce_lock);
|
|
++
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+-/* Write 4B data to Target memory or register */
|
|
+-static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
|
|
+- u32 data)
|
|
+-{
|
|
+- /* Assume range doesn't cross this boundary */
|
|
+- if (address >= DRAM_BASE_ADDRESS)
|
|
+- return ath10k_pci_diag_write_mem(ar, address, &data,
|
|
+- sizeof(u32));
|
|
++static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
|
|
++{
|
|
++ __le32 val = __cpu_to_le32(value);
|
|
+
|
|
+- ath10k_pci_wake(ar);
|
|
+- ath10k_pci_write32(ar, address, data);
|
|
+- ath10k_pci_sleep(ar);
|
|
+- return 0;
|
|
++ return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
|
|
+ }
|
|
+
|
|
+-static bool ath10k_pci_target_is_awake(struct ath10k *ar)
|
|
++static bool ath10k_pci_is_awake(struct ath10k *ar)
|
|
+ {
|
|
+- void __iomem *mem = ath10k_pci_priv(ar)->mem;
|
|
+- u32 val;
|
|
+- val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
|
|
+- RTC_STATE_ADDRESS);
|
|
+- return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
|
|
++ u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
|
|
++
|
|
++ return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
|
|
+ }
|
|
+
|
|
+-int ath10k_do_pci_wake(struct ath10k *ar)
|
|
++static int ath10k_pci_wake_wait(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- void __iomem *pci_addr = ar_pci->mem;
|
|
+ int tot_delay = 0;
|
|
+ int curr_delay = 5;
|
|
+
|
|
+- if (atomic_read(&ar_pci->keep_awake_count) == 0) {
|
|
+- /* Force AWAKE */
|
|
+- iowrite32(PCIE_SOC_WAKE_V_MASK,
|
|
+- pci_addr + PCIE_LOCAL_BASE_ADDRESS +
|
|
+- PCIE_SOC_WAKE_ADDRESS);
|
|
+- }
|
|
+- atomic_inc(&ar_pci->keep_awake_count);
|
|
+-
|
|
+- if (ar_pci->verified_awake)
|
|
+- return 0;
|
|
+-
|
|
+- for (;;) {
|
|
+- if (ath10k_pci_target_is_awake(ar)) {
|
|
+- ar_pci->verified_awake = true;
|
|
++ while (tot_delay < PCIE_WAKE_TIMEOUT) {
|
|
++ if (ath10k_pci_is_awake(ar))
|
|
+ return 0;
|
|
+- }
|
|
+-
|
|
+- if (tot_delay > PCIE_WAKE_TIMEOUT) {
|
|
+- ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
|
|
+- PCIE_WAKE_TIMEOUT,
|
|
+- atomic_read(&ar_pci->keep_awake_count));
|
|
+- return -ETIMEDOUT;
|
|
+- }
|
|
+
|
|
+ udelay(curr_delay);
|
|
+ tot_delay += curr_delay;
|
|
+@@ -674,20 +815,21 @@ int ath10k_do_pci_wake(struct ath10k *ar
|
|
+ if (curr_delay < 50)
|
|
+ curr_delay += 5;
|
|
+ }
|
|
++
|
|
++ return -ETIMEDOUT;
|
|
+ }
|
|
+
|
|
+-void ath10k_do_pci_sleep(struct ath10k *ar)
|
|
++static int ath10k_pci_wake(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- void __iomem *pci_addr = ar_pci->mem;
|
|
++ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
|
|
++ PCIE_SOC_WAKE_V_MASK);
|
|
++ return ath10k_pci_wake_wait(ar);
|
|
++}
|
|
+
|
|
+- if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
|
|
+- /* Allow sleep */
|
|
+- ar_pci->verified_awake = false;
|
|
+- iowrite32(PCIE_SOC_WAKE_RESET,
|
|
+- pci_addr + PCIE_LOCAL_BASE_ADDRESS +
|
|
+- PCIE_SOC_WAKE_ADDRESS);
|
|
+- }
|
|
++static void ath10k_pci_sleep(struct ath10k *ar)
|
|
++{
|
|
++ ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
|
|
++ PCIE_SOC_WAKE_RESET);
|
|
+ }
|
|
+
|
|
+ /* Called by lower (CE) layer when a send to Target completes. */
|
|
+@@ -696,20 +838,24 @@ static void ath10k_pci_ce_send_done(stru
|
|
+ struct ath10k *ar = ce_state->ar;
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+ struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
|
|
+- void *transfer_context;
|
|
++ struct sk_buff_head list;
|
|
++ struct sk_buff *skb;
|
|
+ u32 ce_data;
|
|
+ unsigned int nbytes;
|
|
+ unsigned int transfer_id;
|
|
+
|
|
+- while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
|
|
+- &ce_data, &nbytes,
|
|
+- &transfer_id) == 0) {
|
|
++ __skb_queue_head_init(&list);
|
|
++ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
|
|
++ &nbytes, &transfer_id) == 0) {
|
|
+ /* no need to call tx completion for NULL pointers */
|
|
+- if (transfer_context == NULL)
|
|
++ if (skb == NULL)
|
|
+ continue;
|
|
+
|
|
+- cb->tx_completion(ar, transfer_context, transfer_id);
|
|
++ __skb_queue_tail(&list, skb);
|
|
+ }
|
|
++
|
|
++ while ((skb = __skb_dequeue(&list)))
|
|
++ cb->tx_completion(ar, skb);
|
|
+ }
|
|
+
|
|
+ /* Called by lower (CE) layer when data is received from the Target. */
|
|
+@@ -720,38 +866,43 @@ static void ath10k_pci_ce_recv_data(stru
|
|
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
|
|
+ struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
|
|
+ struct sk_buff *skb;
|
|
++ struct sk_buff_head list;
|
|
+ void *transfer_context;
|
|
+ u32 ce_data;
|
|
+ unsigned int nbytes, max_nbytes;
|
|
+ unsigned int transfer_id;
|
|
+ unsigned int flags;
|
|
+- int err;
|
|
+
|
|
++ __skb_queue_head_init(&list);
|
|
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
|
|
+ &ce_data, &nbytes, &transfer_id,
|
|
+ &flags) == 0) {
|
|
+- err = ath10k_pci_post_rx_pipe(pipe_info, 1);
|
|
+- if (unlikely(err)) {
|
|
+- /* FIXME: retry */
|
|
+- ath10k_warn("failed to replenish CE rx ring %d: %d\n",
|
|
+- pipe_info->pipe_num, err);
|
|
+- }
|
|
+-
|
|
+ skb = transfer_context;
|
|
+ max_nbytes = skb->len + skb_tailroom(skb);
|
|
+- dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
|
|
++ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
|
|
+ max_nbytes, DMA_FROM_DEVICE);
|
|
+
|
|
+ if (unlikely(max_nbytes < nbytes)) {
|
|
+- ath10k_warn("rxed more than expected (nbytes %d, max %d)",
|
|
++ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
|
|
+ nbytes, max_nbytes);
|
|
+ dev_kfree_skb_any(skb);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ skb_put(skb, nbytes);
|
|
+- cb->rx_completion(ar, skb, pipe_info->pipe_num);
|
|
++ __skb_queue_tail(&list, skb);
|
|
++ }
|
|
++
|
|
++ while ((skb = __skb_dequeue(&list))) {
|
|
++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
|
|
++ ce_state->id, skb->len);
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
|
|
++ skb->data, skb->len);
|
|
++
|
|
++ cb->rx_completion(ar, skb);
|
|
+ }
|
|
++
|
|
++ ath10k_pci_rx_post_pipe(pipe_info);
|
|
+ }
|
|
+
|
|
+ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
|
|
+@@ -761,24 +912,28 @@ static int ath10k_pci_hif_tx_sg(struct a
|
|
+ struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
|
|
+ struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
|
|
+ struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
|
|
+- unsigned int nentries_mask = src_ring->nentries_mask;
|
|
+- unsigned int sw_index = src_ring->sw_index;
|
|
+- unsigned int write_index = src_ring->write_index;
|
|
+- int err, i;
|
|
++ unsigned int nentries_mask;
|
|
++ unsigned int sw_index;
|
|
++ unsigned int write_index;
|
|
++ int err, i = 0;
|
|
+
|
|
+ spin_lock_bh(&ar_pci->ce_lock);
|
|
+
|
|
++ nentries_mask = src_ring->nentries_mask;
|
|
++ sw_index = src_ring->sw_index;
|
|
++ write_index = src_ring->write_index;
|
|
++
|
|
+ if (unlikely(CE_RING_DELTA(nentries_mask,
|
|
+ write_index, sw_index - 1) < n_items)) {
|
|
+ err = -ENOBUFS;
|
|
+- goto unlock;
|
|
++ goto err;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < n_items - 1; i++) {
|
|
+- ath10k_dbg(ATH10K_DBG_PCI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_PCI,
|
|
+ "pci tx item %d paddr 0x%08x len %d n_items %d\n",
|
|
+ i, items[i].paddr, items[i].len, n_items);
|
|
+- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
|
|
+ items[i].vaddr, items[i].len);
|
|
+
|
|
+ err = ath10k_ce_send_nolock(ce_pipe,
|
|
+@@ -788,15 +943,15 @@ static int ath10k_pci_hif_tx_sg(struct a
|
|
+ items[i].transfer_id,
|
|
+ CE_SEND_FLAG_GATHER);
|
|
+ if (err)
|
|
+- goto unlock;
|
|
++ goto err;
|
|
+ }
|
|
+
|
|
+ /* `i` is equal to `n_items -1` after for() */
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_PCI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_PCI,
|
|
+ "pci tx item %d paddr 0x%08x len %d n_items %d\n",
|
|
+ i, items[i].paddr, items[i].len, n_items);
|
|
+- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
|
|
+ items[i].vaddr, items[i].len);
|
|
+
|
|
+ err = ath10k_ce_send_nolock(ce_pipe,
|
|
+@@ -806,64 +961,89 @@ static int ath10k_pci_hif_tx_sg(struct a
|
|
+ items[i].transfer_id,
|
|
+ 0);
|
|
+ if (err)
|
|
+- goto unlock;
|
|
++ goto err;
|
|
++
|
|
++ spin_unlock_bh(&ar_pci->ce_lock);
|
|
++ return 0;
|
|
++
|
|
++err:
|
|
++ for (; i > 0; i--)
|
|
++ __ath10k_ce_send_revert(ce_pipe);
|
|
+
|
|
+- err = 0;
|
|
+-unlock:
|
|
+ spin_unlock_bh(&ar_pci->ce_lock);
|
|
+ return err;
|
|
+ }
|
|
+
|
|
++static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
|
|
++ size_t buf_len)
|
|
++{
|
|
++ return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
|
|
++}
|
|
++
|
|
+ static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
|
|
+ {
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
|
|
+
|
|
+ return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
|
|
+ }
|
|
+
|
|
+-static void ath10k_pci_hif_dump_area(struct ath10k *ar)
|
|
++static void ath10k_pci_dump_registers(struct ath10k *ar,
|
|
++ struct ath10k_fw_crash_data *crash_data)
|
|
+ {
|
|
+- u32 reg_dump_area = 0;
|
|
+- u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
|
|
+- u32 host_addr;
|
|
+- int ret;
|
|
+- u32 i;
|
|
+-
|
|
+- ath10k_err("firmware crashed!\n");
|
|
+- ath10k_err("hardware name %s version 0x%x\n",
|
|
+- ar->hw_params.name, ar->target_version);
|
|
+- ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
|
|
+-
|
|
+- host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
|
|
+- ret = ath10k_pci_diag_read_mem(ar, host_addr,
|
|
+- ®_dump_area, sizeof(u32));
|
|
+- if (ret) {
|
|
+- ath10k_err("failed to read FW dump area address: %d\n", ret);
|
|
+- return;
|
|
+- }
|
|
++ __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
|
|
++ int i, ret;
|
|
+
|
|
+- ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
+
|
|
+- ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
|
|
+- ®_dump_values[0],
|
|
+- REG_DUMP_COUNT_QCA988X * sizeof(u32));
|
|
+- if (ret != 0) {
|
|
+- ath10k_err("failed to read FW dump area: %d\n", ret);
|
|
++ ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
|
|
++ hi_failure_state,
|
|
++ REG_DUMP_COUNT_QCA988X * sizeof(__le32));
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
|
|
+
|
|
+- ath10k_err("target Register Dump\n");
|
|
++ ath10k_err(ar, "firmware register dump:\n");
|
|
+ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
|
|
+- ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
|
|
++ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
|
|
+ i,
|
|
+- reg_dump_values[i],
|
|
+- reg_dump_values[i + 1],
|
|
+- reg_dump_values[i + 2],
|
|
+- reg_dump_values[i + 3]);
|
|
++ __le32_to_cpu(reg_dump_values[i]),
|
|
++ __le32_to_cpu(reg_dump_values[i + 1]),
|
|
++ __le32_to_cpu(reg_dump_values[i + 2]),
|
|
++ __le32_to_cpu(reg_dump_values[i + 3]));
|
|
++
|
|
++ if (!crash_data)
|
|
++ return;
|
|
++
|
|
++ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
|
|
++ crash_data->registers[i] = reg_dump_values[i];
|
|
++}
|
|
++
|
|
++static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
|
|
++{
|
|
++ struct ath10k_fw_crash_data *crash_data;
|
|
++ char uuid[50];
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++
|
|
++ ar->stats.fw_crash_counter++;
|
|
++
|
|
++ crash_data = ath10k_debug_get_new_fw_crash_data(ar);
|
|
++
|
|
++ if (crash_data)
|
|
++ scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
|
|
++ else
|
|
++ scnprintf(uuid, sizeof(uuid), "n/a");
|
|
++
|
|
++ ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
|
|
++ ath10k_print_driver_info(ar);
|
|
++ ath10k_pci_dump_registers(ar, crash_data);
|
|
++
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+ queue_work(ar->workqueue, &ar->restart_work);
|
|
+ }
|
|
+@@ -871,7 +1051,7 @@ static void ath10k_pci_hif_dump_area(str
|
|
+ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
|
|
+ int force)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
|
|
+
|
|
+ if (!force) {
|
|
+ int resources;
|
|
+@@ -899,43 +1079,12 @@ static void ath10k_pci_hif_set_callbacks
|
|
+ {
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
|
|
+
|
|
+ memcpy(&ar_pci->msg_callbacks_current, callbacks,
|
|
+ sizeof(ar_pci->msg_callbacks_current));
|
|
+ }
|
|
+
|
|
+-static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
|
|
+-{
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- const struct ce_attr *attr;
|
|
+- struct ath10k_pci_pipe *pipe_info;
|
|
+- int pipe_num, disable_interrupts;
|
|
+-
|
|
+- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
|
|
+- pipe_info = &ar_pci->pipe_info[pipe_num];
|
|
+-
|
|
+- /* Handle Diagnostic CE specially */
|
|
+- if (pipe_info->ce_hdl == ar_pci->ce_diag)
|
|
+- continue;
|
|
+-
|
|
+- attr = &host_ce_config_wlan[pipe_num];
|
|
+-
|
|
+- if (attr->src_nentries) {
|
|
+- disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
|
|
+- ath10k_ce_send_cb_register(pipe_info->ce_hdl,
|
|
+- ath10k_pci_ce_send_done,
|
|
+- disable_interrupts);
|
|
+- }
|
|
+-
|
|
+- if (attr->dest_nentries)
|
|
+- ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
|
|
+- ath10k_pci_ce_recv_data);
|
|
+- }
|
|
+-
|
|
+- return 0;
|
|
+-}
|
|
+-
|
|
+ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
|
|
+ {
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+@@ -943,82 +1092,72 @@ static void ath10k_pci_kill_tasklet(stru
|
|
+
|
|
+ tasklet_kill(&ar_pci->intr_tq);
|
|
+ tasklet_kill(&ar_pci->msi_fw_err);
|
|
+- tasklet_kill(&ar_pci->early_irq_tasklet);
|
|
+
|
|
+ for (i = 0; i < CE_COUNT; i++)
|
|
+ tasklet_kill(&ar_pci->pipe_info[i].intr);
|
|
++
|
|
++ del_timer_sync(&ar_pci->rx_post_retry);
|
|
+ }
|
|
+
|
|
+-/* TODO - temporary mapping while we have too few CE's */
|
|
+ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
|
|
+ u16 service_id, u8 *ul_pipe,
|
|
+ u8 *dl_pipe, int *ul_is_polled,
|
|
+ int *dl_is_polled)
|
|
+ {
|
|
+- int ret = 0;
|
|
++ const struct service_to_pipe *entry;
|
|
++ bool ul_set = false, dl_set = false;
|
|
++ int i;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
|
|
+
|
|
+ /* polling for received messages not supported */
|
|
+ *dl_is_polled = 0;
|
|
+
|
|
+- switch (service_id) {
|
|
+- case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
|
|
+- /*
|
|
+- * Host->target HTT gets its own pipe, so it can be polled
|
|
+- * while other pipes are interrupt driven.
|
|
+- */
|
|
+- *ul_pipe = 4;
|
|
+- /*
|
|
+- * Use the same target->host pipe for HTC ctrl, HTC raw
|
|
+- * streams, and HTT.
|
|
+- */
|
|
+- *dl_pipe = 1;
|
|
+- break;
|
|
++ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
|
|
++ entry = &target_service_to_ce_map_wlan[i];
|
|
+
|
|
+- case ATH10K_HTC_SVC_ID_RSVD_CTRL:
|
|
+- case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
|
|
+- /*
|
|
+- * Note: HTC_RAW_STREAMS_SVC is currently unused, and
|
|
+- * HTC_CTRL_RSVD_SVC could share the same pipe as the
|
|
+- * WMI services. So, if another CE is needed, change
|
|
+- * this to *ul_pipe = 3, which frees up CE 0.
|
|
+- */
|
|
+- /* *ul_pipe = 3; */
|
|
+- *ul_pipe = 0;
|
|
+- *dl_pipe = 1;
|
|
+- break;
|
|
++ if (__le32_to_cpu(entry->service_id) != service_id)
|
|
++ continue;
|
|
+
|
|
+- case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
|
|
+- case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
|
|
+- case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
|
|
+- case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
|
|
+-
|
|
+- case ATH10K_HTC_SVC_ID_WMI_CONTROL:
|
|
+- *ul_pipe = 3;
|
|
+- *dl_pipe = 2;
|
|
+- break;
|
|
++ switch (__le32_to_cpu(entry->pipedir)) {
|
|
++ case PIPEDIR_NONE:
|
|
++ break;
|
|
++ case PIPEDIR_IN:
|
|
++ WARN_ON(dl_set);
|
|
++ *dl_pipe = __le32_to_cpu(entry->pipenum);
|
|
++ dl_set = true;
|
|
++ break;
|
|
++ case PIPEDIR_OUT:
|
|
++ WARN_ON(ul_set);
|
|
++ *ul_pipe = __le32_to_cpu(entry->pipenum);
|
|
++ ul_set = true;
|
|
++ break;
|
|
++ case PIPEDIR_INOUT:
|
|
++ WARN_ON(dl_set);
|
|
++ WARN_ON(ul_set);
|
|
++ *dl_pipe = __le32_to_cpu(entry->pipenum);
|
|
++ *ul_pipe = __le32_to_cpu(entry->pipenum);
|
|
++ dl_set = true;
|
|
++ ul_set = true;
|
|
++ break;
|
|
++ }
|
|
++ }
|
|
+
|
|
+- /* pipe 5 unused */
|
|
+- /* pipe 6 reserved */
|
|
+- /* pipe 7 reserved */
|
|
++ if (WARN_ON(!ul_set || !dl_set))
|
|
++ return -ENOENT;
|
|
+
|
|
+- default:
|
|
+- ret = -1;
|
|
+- break;
|
|
+- }
|
|
+ *ul_is_polled =
|
|
+ (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
|
|
+
|
|
+- return ret;
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
+ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
|
|
+- u8 *ul_pipe, u8 *dl_pipe)
|
|
++ u8 *ul_pipe, u8 *dl_pipe)
|
|
+ {
|
|
+ int ul_is_polled, dl_is_polled;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
|
|
+
|
|
+ (void)ath10k_pci_hif_map_service_to_pipe(ar,
|
|
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
|
|
+@@ -1028,209 +1167,127 @@ static void ath10k_pci_hif_get_default_p
|
|
+ &dl_is_polled);
|
|
+ }
|
|
+
|
|
+-static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
|
|
+- int num)
|
|
++static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k *ar = pipe_info->hif_ce_state;
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
|
|
+- struct sk_buff *skb;
|
|
+- dma_addr_t ce_data;
|
|
+- int i, ret = 0;
|
|
++ u32 val;
|
|
+
|
|
+- if (pipe_info->buf_sz == 0)
|
|
+- return 0;
|
|
++ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
|
|
++ val &= ~CORE_CTRL_PCIE_REG_31_MASK;
|
|
+
|
|
+- for (i = 0; i < num; i++) {
|
|
+- skb = dev_alloc_skb(pipe_info->buf_sz);
|
|
+- if (!skb) {
|
|
+- ath10k_warn("failed to allocate skbuff for pipe %d\n",
|
|
+- num);
|
|
+- ret = -ENOMEM;
|
|
+- goto err;
|
|
+- }
|
|
++ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
|
|
++}
|
|
+
|
|
+- WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
|
|
++static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
|
|
++{
|
|
++ u32 val;
|
|
+
|
|
+- ce_data = dma_map_single(ar->dev, skb->data,
|
|
+- skb->len + skb_tailroom(skb),
|
|
+- DMA_FROM_DEVICE);
|
|
++ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
|
|
++ val |= CORE_CTRL_PCIE_REG_31_MASK;
|
|
+
|
|
+- if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
|
|
+- ath10k_warn("failed to DMA map sk_buff\n");
|
|
+- dev_kfree_skb_any(skb);
|
|
+- ret = -EIO;
|
|
+- goto err;
|
|
+- }
|
|
++ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
|
|
++}
|
|
+
|
|
+- ATH10K_SKB_CB(skb)->paddr = ce_data;
|
|
++static void ath10k_pci_irq_disable(struct ath10k *ar)
|
|
++{
|
|
++ ath10k_ce_disable_interrupts(ar);
|
|
++ ath10k_pci_disable_and_clear_legacy_irq(ar);
|
|
++ ath10k_pci_irq_msi_fw_mask(ar);
|
|
++}
|
|
+
|
|
+- pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
|
|
+- pipe_info->buf_sz,
|
|
+- PCI_DMA_FROMDEVICE);
|
|
+-
|
|
+- ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
|
|
+- ce_data);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to enqueue to pipe %d: %d\n",
|
|
+- num, ret);
|
|
+- goto err;
|
|
+- }
|
|
+- }
|
|
+-
|
|
+- return ret;
|
|
+-
|
|
+-err:
|
|
+- ath10k_pci_rx_pipe_cleanup(pipe_info);
|
|
+- return ret;
|
|
+-}
|
|
+-
|
|
+-static int ath10k_pci_post_rx(struct ath10k *ar)
|
|
++static void ath10k_pci_irq_sync(struct ath10k *ar)
|
|
+ {
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- struct ath10k_pci_pipe *pipe_info;
|
|
+- const struct ce_attr *attr;
|
|
+- int pipe_num, ret = 0;
|
|
+-
|
|
+- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
|
|
+- pipe_info = &ar_pci->pipe_info[pipe_num];
|
|
+- attr = &host_ce_config_wlan[pipe_num];
|
|
+-
|
|
+- if (attr->dest_nentries == 0)
|
|
+- continue;
|
|
+-
|
|
+- ret = ath10k_pci_post_rx_pipe(pipe_info,
|
|
+- attr->dest_nentries - 1);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
|
|
+- pipe_num, ret);
|
|
++ int i;
|
|
+
|
|
+- for (; pipe_num >= 0; pipe_num--) {
|
|
+- pipe_info = &ar_pci->pipe_info[pipe_num];
|
|
+- ath10k_pci_rx_pipe_cleanup(pipe_info);
|
|
+- }
|
|
+- return ret;
|
|
+- }
|
|
+- }
|
|
++ for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
|
|
++ synchronize_irq(ar_pci->pdev->irq + i);
|
|
++}
|
|
+
|
|
+- return 0;
|
|
++static void ath10k_pci_irq_enable(struct ath10k *ar)
|
|
++{
|
|
++ ath10k_ce_enable_interrupts(ar);
|
|
++ ath10k_pci_enable_legacy_irq(ar);
|
|
++ ath10k_pci_irq_msi_fw_unmask(ar);
|
|
+ }
|
|
+
|
|
+ static int ath10k_pci_hif_start(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- int ret, ret_early;
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
|
|
+-
|
|
+- ath10k_pci_free_early_irq(ar);
|
|
+- ath10k_pci_kill_tasklet(ar);
|
|
+-
|
|
+- ret = ath10k_pci_request_irq(ar);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to post RX buffers for all pipes: %d\n",
|
|
+- ret);
|
|
+- goto err_early_irq;
|
|
+- }
|
|
+-
|
|
+- ret = ath10k_pci_setup_ce_irq(ar);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to setup CE interrupts: %d\n", ret);
|
|
+- goto err_stop;
|
|
+- }
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
|
|
+
|
|
+- /* Post buffers once to start things off. */
|
|
+- ret = ath10k_pci_post_rx(ar);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to post RX buffers for all pipes: %d\n",
|
|
+- ret);
|
|
+- goto err_stop;
|
|
+- }
|
|
++ ath10k_pci_irq_enable(ar);
|
|
++ ath10k_pci_rx_post(ar);
|
|
+
|
|
+- ar_pci->started = 1;
|
|
+ return 0;
|
|
+-
|
|
+-err_stop:
|
|
+- ath10k_ce_disable_interrupts(ar);
|
|
+- ath10k_pci_free_irq(ar);
|
|
+- ath10k_pci_kill_tasklet(ar);
|
|
+-err_early_irq:
|
|
+- /* Though there should be no interrupts (device was reset)
|
|
+- * power_down() expects the early IRQ to be installed as per the
|
|
+- * driver lifecycle. */
|
|
+- ret_early = ath10k_pci_request_early_irq(ar);
|
|
+- if (ret_early)
|
|
+- ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
|
|
+-
|
|
+- return ret;
|
|
+ }
|
|
+
|
|
+-static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
|
|
++static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
|
|
+ {
|
|
+ struct ath10k *ar;
|
|
+- struct ath10k_pci *ar_pci;
|
|
+- struct ath10k_ce_pipe *ce_hdl;
|
|
+- u32 buf_sz;
|
|
+- struct sk_buff *netbuf;
|
|
+- u32 ce_data;
|
|
++ struct ath10k_ce_pipe *ce_pipe;
|
|
++ struct ath10k_ce_ring *ce_ring;
|
|
++ struct sk_buff *skb;
|
|
++ int i;
|
|
+
|
|
+- buf_sz = pipe_info->buf_sz;
|
|
++ ar = pci_pipe->hif_ce_state;
|
|
++ ce_pipe = pci_pipe->ce_hdl;
|
|
++ ce_ring = ce_pipe->dest_ring;
|
|
+
|
|
+- /* Unused Copy Engine */
|
|
+- if (buf_sz == 0)
|
|
++ if (!ce_ring)
|
|
+ return;
|
|
+
|
|
+- ar = pipe_info->hif_ce_state;
|
|
+- ar_pci = ath10k_pci_priv(ar);
|
|
+-
|
|
+- if (!ar_pci->started)
|
|
++ if (!pci_pipe->buf_sz)
|
|
+ return;
|
|
+
|
|
+- ce_hdl = pipe_info->ce_hdl;
|
|
++ for (i = 0; i < ce_ring->nentries; i++) {
|
|
++ skb = ce_ring->per_transfer_context[i];
|
|
++ if (!skb)
|
|
++ continue;
|
|
++
|
|
++ ce_ring->per_transfer_context[i] = NULL;
|
|
+
|
|
+- while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
|
|
+- &ce_data) == 0) {
|
|
+- dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
|
|
+- netbuf->len + skb_tailroom(netbuf),
|
|
++ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
|
|
++ skb->len + skb_tailroom(skb),
|
|
+ DMA_FROM_DEVICE);
|
|
+- dev_kfree_skb_any(netbuf);
|
|
++ dev_kfree_skb_any(skb);
|
|
+ }
|
|
+ }
|
|
+
|
|
+-static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
|
|
++static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
|
|
+ {
|
|
+ struct ath10k *ar;
|
|
+ struct ath10k_pci *ar_pci;
|
|
+- struct ath10k_ce_pipe *ce_hdl;
|
|
+- struct sk_buff *netbuf;
|
|
+- u32 ce_data;
|
|
+- unsigned int nbytes;
|
|
++ struct ath10k_ce_pipe *ce_pipe;
|
|
++ struct ath10k_ce_ring *ce_ring;
|
|
++ struct ce_desc *ce_desc;
|
|
++ struct sk_buff *skb;
|
|
+ unsigned int id;
|
|
+- u32 buf_sz;
|
|
++ int i;
|
|
+
|
|
+- buf_sz = pipe_info->buf_sz;
|
|
++ ar = pci_pipe->hif_ce_state;
|
|
++ ar_pci = ath10k_pci_priv(ar);
|
|
++ ce_pipe = pci_pipe->ce_hdl;
|
|
++ ce_ring = ce_pipe->src_ring;
|
|
+
|
|
+- /* Unused Copy Engine */
|
|
+- if (buf_sz == 0)
|
|
++ if (!ce_ring)
|
|
+ return;
|
|
+
|
|
+- ar = pipe_info->hif_ce_state;
|
|
+- ar_pci = ath10k_pci_priv(ar);
|
|
+-
|
|
+- if (!ar_pci->started)
|
|
++ if (!pci_pipe->buf_sz)
|
|
+ return;
|
|
+
|
|
+- ce_hdl = pipe_info->ce_hdl;
|
|
++ ce_desc = ce_ring->shadow_base;
|
|
++ if (WARN_ON(!ce_desc))
|
|
++ return;
|
|
+
|
|
+- while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
|
|
+- &ce_data, &nbytes, &id) == 0) {
|
|
+- /* no need to call tx completion for NULL pointers */
|
|
+- if (!netbuf)
|
|
++ for (i = 0; i < ce_ring->nentries; i++) {
|
|
++ skb = ce_ring->per_transfer_context[i];
|
|
++ if (!skb)
|
|
+ continue;
|
|
+
|
|
+- ar_pci->msg_callbacks_current.tx_completion(ar,
|
|
+- netbuf,
|
|
+- id);
|
|
++ ce_ring->per_transfer_context[i] = NULL;
|
|
++ id = MS(__le16_to_cpu(ce_desc[i].flags),
|
|
++ CE_DESC_FLAGS_META_DATA);
|
|
++
|
|
++ ar_pci->msg_callbacks_current.tx_completion(ar, skb);
|
|
+ }
|
|
+ }
|
|
+
|
|
+@@ -1264,38 +1321,32 @@ static void ath10k_pci_ce_deinit(struct
|
|
+ ath10k_ce_deinit_pipe(ar, i);
|
|
+ }
|
|
+
|
|
+-static void ath10k_pci_hif_stop(struct ath10k *ar)
|
|
++static void ath10k_pci_flush(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- int ret;
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
|
|
+-
|
|
+- ret = ath10k_ce_disable_interrupts(ar);
|
|
+- if (ret)
|
|
+- ath10k_warn("failed to disable CE interrupts: %d\n", ret);
|
|
+-
|
|
+- ath10k_pci_free_irq(ar);
|
|
+ ath10k_pci_kill_tasklet(ar);
|
|
+-
|
|
+- ret = ath10k_pci_request_early_irq(ar);
|
|
+- if (ret)
|
|
+- ath10k_warn("failed to re-enable early irq: %d\n", ret);
|
|
+-
|
|
+- /* At this point, asynchronous threads are stopped, the target should
|
|
+- * not DMA nor interrupt. We process the leftovers and then free
|
|
+- * everything else up. */
|
|
+-
|
|
+ ath10k_pci_buffer_cleanup(ar);
|
|
++}
|
|
+
|
|
+- /* Make the sure the device won't access any structures on the host by
|
|
+- * resetting it. The device was fed with PCI CE ringbuffer
|
|
+- * configuration during init. If ringbuffers are freed and the device
|
|
+- * were to access them this could lead to memory corruption on the
|
|
+- * host. */
|
|
++static void ath10k_pci_hif_stop(struct ath10k *ar)
|
|
++{
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
|
|
++
|
|
++ /* Most likely the device has HTT Rx ring configured. The only way to
|
|
++ * prevent the device from accessing (and possible corrupting) host
|
|
++ * memory is to reset the chip now.
|
|
++ *
|
|
++ * There's also no known way of masking MSI interrupts on the device.
|
|
++ * For ranged MSI the CE-related interrupts can be masked. However
|
|
++ * regardless how many MSI interrupts are assigned the first one
|
|
++ * is always used for firmware indications (crashes) and cannot be
|
|
++ * masked. To prevent the device from asserting the interrupt reset it
|
|
++ * before proceeding with cleanup.
|
|
++ */
|
|
+ ath10k_pci_warm_reset(ar);
|
|
+
|
|
+- ar_pci->started = 0;
|
|
++ ath10k_pci_irq_disable(ar);
|
|
++ ath10k_pci_irq_sync(ar);
|
|
++ ath10k_pci_flush(ar);
|
|
+ }
|
|
+
|
|
+ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
|
|
+@@ -1346,11 +1397,9 @@ static int ath10k_pci_hif_exchange_bmi_m
|
|
+ xfer.wait_for_resp = true;
|
|
+ xfer.resp_len = 0;
|
|
+
|
|
+- ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
|
|
++ ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
|
|
+ }
|
|
+
|
|
+- init_completion(&xfer.done);
|
|
+-
|
|
+ ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
|
|
+ if (ret)
|
|
+ goto err_resp;
|
|
+@@ -1401,14 +1450,12 @@ static void ath10k_pci_bmi_send_done(str
|
|
+ &nbytes, &transfer_id))
|
|
+ return;
|
|
+
|
|
+- if (xfer->wait_for_resp)
|
|
+- return;
|
|
+-
|
|
+- complete(&xfer->done);
|
|
++ xfer->tx_done = true;
|
|
+ }
|
|
+
|
|
+ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
|
|
+ {
|
|
++ struct ath10k *ar = ce_state->ar;
|
|
+ struct bmi_xfer *xfer;
|
|
+ u32 ce_data;
|
|
+ unsigned int nbytes;
|
|
+@@ -1419,13 +1466,16 @@ static void ath10k_pci_bmi_recv_data(str
|
|
+ &nbytes, &transfer_id, &flags))
|
|
+ return;
|
|
+
|
|
++ if (WARN_ON_ONCE(!xfer))
|
|
++ return;
|
|
++
|
|
+ if (!xfer->wait_for_resp) {
|
|
+- ath10k_warn("unexpected: BMI data received; ignoring\n");
|
|
++ ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ xfer->resp_len = nbytes;
|
|
+- complete(&xfer->done);
|
|
++ xfer->rx_done = true;
|
|
+ }
|
|
+
|
|
+ static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
|
|
+@@ -1438,7 +1488,7 @@ static int ath10k_pci_bmi_wait(struct at
|
|
+ ath10k_pci_bmi_send_done(tx_pipe);
|
|
+ ath10k_pci_bmi_recv_data(rx_pipe);
|
|
+
|
|
+- if (completion_done(&xfer->done))
|
|
++ if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
|
|
+ return 0;
|
|
+
|
|
+ schedule();
|
|
+@@ -1448,131 +1498,48 @@ static int ath10k_pci_bmi_wait(struct at
|
|
+ }
|
|
+
|
|
+ /*
|
|
+- * Map from service/endpoint to Copy Engine.
|
|
+- * This table is derived from the CE_PCI TABLE, above.
|
|
+- * It is passed to the Target at startup for use by firmware.
|
|
+- */
|
|
+-static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_WMI_DATA_VO,
|
|
+- PIPEDIR_OUT, /* out = UL = host -> target */
|
|
+- 3,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_WMI_DATA_VO,
|
|
+- PIPEDIR_IN, /* in = DL = target -> host */
|
|
+- 2,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_WMI_DATA_BK,
|
|
+- PIPEDIR_OUT, /* out = UL = host -> target */
|
|
+- 3,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_WMI_DATA_BK,
|
|
+- PIPEDIR_IN, /* in = DL = target -> host */
|
|
+- 2,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_WMI_DATA_BE,
|
|
+- PIPEDIR_OUT, /* out = UL = host -> target */
|
|
+- 3,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_WMI_DATA_BE,
|
|
+- PIPEDIR_IN, /* in = DL = target -> host */
|
|
+- 2,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_WMI_DATA_VI,
|
|
+- PIPEDIR_OUT, /* out = UL = host -> target */
|
|
+- 3,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_WMI_DATA_VI,
|
|
+- PIPEDIR_IN, /* in = DL = target -> host */
|
|
+- 2,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_WMI_CONTROL,
|
|
+- PIPEDIR_OUT, /* out = UL = host -> target */
|
|
+- 3,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_WMI_CONTROL,
|
|
+- PIPEDIR_IN, /* in = DL = target -> host */
|
|
+- 2,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_RSVD_CTRL,
|
|
+- PIPEDIR_OUT, /* out = UL = host -> target */
|
|
+- 0, /* could be moved to 3 (share with WMI) */
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_RSVD_CTRL,
|
|
+- PIPEDIR_IN, /* in = DL = target -> host */
|
|
+- 1,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
|
|
+- PIPEDIR_OUT, /* out = UL = host -> target */
|
|
+- 0,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
|
|
+- PIPEDIR_IN, /* in = DL = target -> host */
|
|
+- 1,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
|
|
+- PIPEDIR_OUT, /* out = UL = host -> target */
|
|
+- 4,
|
|
+- },
|
|
+- {
|
|
+- ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
|
|
+- PIPEDIR_IN, /* in = DL = target -> host */
|
|
+- 1,
|
|
+- },
|
|
+-
|
|
+- /* (Additions here) */
|
|
+-
|
|
+- { /* Must be last */
|
|
+- 0,
|
|
+- 0,
|
|
+- 0,
|
|
+- },
|
|
+-};
|
|
+-
|
|
+-/*
|
|
+ * Send an interrupt to the device to wake up the Target CPU
|
|
+ * so it has an opportunity to notice any changed state.
|
|
+ */
|
|
+ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
|
|
+ {
|
|
+- int ret;
|
|
+- u32 core_ctrl;
|
|
++ u32 addr, val;
|
|
+
|
|
+- ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
|
|
+- CORE_CTRL_ADDRESS,
|
|
+- &core_ctrl);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to read core_ctrl: %d\n", ret);
|
|
+- return ret;
|
|
+- }
|
|
++ addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
|
|
++ val = ath10k_pci_read32(ar, addr);
|
|
++ val |= CORE_CTRL_CPU_INTR_MASK;
|
|
++ ath10k_pci_write32(ar, addr, val);
|
|
+
|
|
+- /* A_INUM_FIRMWARE interrupt to Target CPU */
|
|
+- core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
|
|
++ return 0;
|
|
++}
|
|
+
|
|
+- ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
|
|
+- CORE_CTRL_ADDRESS,
|
|
+- core_ctrl);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to set target CPU interrupt mask: %d\n",
|
|
+- ret);
|
|
+- return ret;
|
|
++static int ath10k_pci_get_num_banks(struct ath10k *ar)
|
|
++{
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++
|
|
++ switch (ar_pci->pdev->device) {
|
|
++ case QCA988X_2_0_DEVICE_ID:
|
|
++ return 1;
|
|
++ case QCA6174_2_1_DEVICE_ID:
|
|
++ switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
|
|
++ case QCA6174_HW_1_0_CHIP_ID_REV:
|
|
++ case QCA6174_HW_1_1_CHIP_ID_REV:
|
|
++ return 3;
|
|
++ case QCA6174_HW_1_3_CHIP_ID_REV:
|
|
++ return 2;
|
|
++ case QCA6174_HW_2_1_CHIP_ID_REV:
|
|
++ case QCA6174_HW_2_2_CHIP_ID_REV:
|
|
++ return 6;
|
|
++ case QCA6174_HW_3_0_CHIP_ID_REV:
|
|
++ case QCA6174_HW_3_1_CHIP_ID_REV:
|
|
++ case QCA6174_HW_3_2_CHIP_ID_REV:
|
|
++ return 9;
|
|
++ }
|
|
++ break;
|
|
+ }
|
|
+
|
|
+- return 0;
|
|
++ ath10k_warn(ar, "unknown number of banks, assuming 1\n");
|
|
++ return 1;
|
|
+ }
|
|
+
|
|
+ static int ath10k_pci_init_config(struct ath10k *ar)
|
|
+@@ -1593,144 +1560,162 @@ static int ath10k_pci_init_config(struct
|
|
+ host_interest_item_address(HI_ITEM(hi_interconnect_state));
|
|
+
|
|
+ /* Supply Target-side CE configuration */
|
|
+- ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
|
|
+- &pcie_state_targ_addr);
|
|
++ ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
|
|
++ &pcie_state_targ_addr);
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Failed to get pcie state addr: %d\n", ret);
|
|
++ ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (pcie_state_targ_addr == 0) {
|
|
+ ret = -EIO;
|
|
+- ath10k_err("Invalid pcie state addr\n");
|
|
++ ath10k_err(ar, "Invalid pcie state addr\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
|
|
++ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
|
|
+ offsetof(struct pcie_state,
|
|
+- pipe_cfg_addr),
|
|
+- &pipe_cfg_targ_addr);
|
|
++ pipe_cfg_addr)),
|
|
++ &pipe_cfg_targ_addr);
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
|
|
++ ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (pipe_cfg_targ_addr == 0) {
|
|
+ ret = -EIO;
|
|
+- ath10k_err("Invalid pipe cfg addr\n");
|
|
++ ath10k_err(ar, "Invalid pipe cfg addr\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
|
|
+- target_ce_config_wlan,
|
|
+- sizeof(target_ce_config_wlan));
|
|
++ target_ce_config_wlan,
|
|
++ sizeof(target_ce_config_wlan));
|
|
+
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Failed to write pipe cfg: %d\n", ret);
|
|
++ ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
|
|
++ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
|
|
+ offsetof(struct pcie_state,
|
|
+- svc_to_pipe_map),
|
|
+- &svc_to_pipe_map);
|
|
++ svc_to_pipe_map)),
|
|
++ &svc_to_pipe_map);
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Failed to get svc/pipe map: %d\n", ret);
|
|
++ ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (svc_to_pipe_map == 0) {
|
|
+ ret = -EIO;
|
|
+- ath10k_err("Invalid svc_to_pipe map\n");
|
|
++ ath10k_err(ar, "Invalid svc_to_pipe map\n");
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
|
|
+- target_service_to_ce_map_wlan,
|
|
+- sizeof(target_service_to_ce_map_wlan));
|
|
++ target_service_to_ce_map_wlan,
|
|
++ sizeof(target_service_to_ce_map_wlan));
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Failed to write svc/pipe map: %d\n", ret);
|
|
++ ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
|
|
++ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
|
|
+ offsetof(struct pcie_state,
|
|
+- config_flags),
|
|
+- &pcie_config_flags);
|
|
++ config_flags)),
|
|
++ &pcie_config_flags);
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Failed to get pcie config_flags: %d\n", ret);
|
|
++ ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
|
|
+
|
|
+- ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
|
|
+- offsetof(struct pcie_state, config_flags),
|
|
+- &pcie_config_flags,
|
|
+- sizeof(pcie_config_flags));
|
|
++ ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
|
|
++ offsetof(struct pcie_state,
|
|
++ config_flags)),
|
|
++ pcie_config_flags);
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Failed to write pcie config_flags: %d\n", ret);
|
|
++ ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* configure early allocation */
|
|
+ ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
|
|
+
|
|
+- ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
|
|
++ ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Faile to get early alloc val: %d\n", ret);
|
|
++ ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* first bank is switched to IRAM */
|
|
+ ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
|
|
+ HI_EARLY_ALLOC_MAGIC_MASK);
|
|
+- ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
|
|
++ ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
|
|
++ HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
|
|
+ HI_EARLY_ALLOC_IRAM_BANKS_MASK);
|
|
+
|
|
+- ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
|
|
++ ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Failed to set early alloc val: %d\n", ret);
|
|
++ ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ /* Tell Target to proceed with initialization */
|
|
+ flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
|
|
+
|
|
+- ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
|
|
++ ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Failed to get option val: %d\n", ret);
|
|
++ ath10k_err(ar, "Failed to get option val: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ flag2_value |= HI_OPTION_EARLY_CFG_DONE;
|
|
+
|
|
+- ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
|
|
++ ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
|
|
+ if (ret != 0) {
|
|
+- ath10k_err("Failed to set option val: %d\n", ret);
|
|
++ ath10k_err(ar, "Failed to set option val: %d\n", ret);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static int ath10k_pci_alloc_ce(struct ath10k *ar)
|
|
++static int ath10k_pci_alloc_pipes(struct ath10k *ar)
|
|
+ {
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ struct ath10k_pci_pipe *pipe;
|
|
+ int i, ret;
|
|
+
|
|
+ for (i = 0; i < CE_COUNT; i++) {
|
|
+- ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
|
|
++ pipe = &ar_pci->pipe_info[i];
|
|
++ pipe->ce_hdl = &ar_pci->ce_states[i];
|
|
++ pipe->pipe_num = i;
|
|
++ pipe->hif_ce_state = ar;
|
|
++
|
|
++ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
|
|
++ ath10k_pci_ce_send_done,
|
|
++ ath10k_pci_ce_recv_data);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to allocate copy engine pipe %d: %d\n",
|
|
++ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
|
|
+ i, ret);
|
|
+ return ret;
|
|
+ }
|
|
++
|
|
++ /* Last CE is Diagnostic Window */
|
|
++ if (i == CE_COUNT - 1) {
|
|
++ ar_pci->ce_diag = pipe->ce_hdl;
|
|
++ continue;
|
|
++ }
|
|
++
|
|
++ pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static void ath10k_pci_free_ce(struct ath10k *ar)
|
|
++static void ath10k_pci_free_pipes(struct ath10k *ar)
|
|
+ {
|
|
+ int i;
|
|
+
|
|
+@@ -1738,305 +1723,319 @@ static void ath10k_pci_free_ce(struct at
|
|
+ ath10k_ce_free_pipe(ar, i);
|
|
+ }
|
|
+
|
|
+-static int ath10k_pci_ce_init(struct ath10k *ar)
|
|
++static int ath10k_pci_init_pipes(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- struct ath10k_pci_pipe *pipe_info;
|
|
+- const struct ce_attr *attr;
|
|
+- int pipe_num, ret;
|
|
+-
|
|
+- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
|
|
+- pipe_info = &ar_pci->pipe_info[pipe_num];
|
|
+- pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
|
|
+- pipe_info->pipe_num = pipe_num;
|
|
+- pipe_info->hif_ce_state = ar;
|
|
+- attr = &host_ce_config_wlan[pipe_num];
|
|
++ int i, ret;
|
|
+
|
|
+- ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
|
|
++ for (i = 0; i < CE_COUNT; i++) {
|
|
++ ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to initialize copy engine pipe %d: %d\n",
|
|
+- pipe_num, ret);
|
|
++ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
|
|
++ i, ret);
|
|
+ return ret;
|
|
+ }
|
|
+-
|
|
+- if (pipe_num == CE_COUNT - 1) {
|
|
+- /*
|
|
+- * Reserve the ultimate CE for
|
|
+- * diagnostic Window support
|
|
+- */
|
|
+- ar_pci->ce_diag = pipe_info->ce_hdl;
|
|
+- continue;
|
|
+- }
|
|
+-
|
|
+- pipe_info->buf_sz = (size_t) (attr->src_sz_max);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
|
|
++static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- u32 fw_indicator;
|
|
+-
|
|
+- ath10k_pci_wake(ar);
|
|
+-
|
|
+- fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
|
|
+-
|
|
+- if (fw_indicator & FW_IND_EVENT_PENDING) {
|
|
+- /* ACK: clear Target-side pending event */
|
|
+- ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
|
|
+- fw_indicator & ~FW_IND_EVENT_PENDING);
|
|
+-
|
|
+- if (ar_pci->started) {
|
|
+- ath10k_pci_hif_dump_area(ar);
|
|
+- } else {
|
|
+- /*
|
|
+- * Probable Target failure before we're prepared
|
|
+- * to handle it. Generally unexpected.
|
|
+- */
|
|
+- ath10k_warn("early firmware event indicated\n");
|
|
+- }
|
|
+- }
|
|
+-
|
|
+- ath10k_pci_sleep(ar);
|
|
++ return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
|
|
++ FW_IND_EVENT_PENDING;
|
|
+ }
|
|
+
|
|
+-static int ath10k_pci_warm_reset(struct ath10k *ar)
|
|
++static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
|
|
+ {
|
|
+- int ret = 0;
|
|
+ u32 val;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
|
|
++ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
|
|
++ val &= ~FW_IND_EVENT_PENDING;
|
|
++ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
|
|
++}
|
|
+
|
|
+- ret = ath10k_do_pci_wake(ar);
|
|
+- if (ret) {
|
|
+- ath10k_err("failed to wake up target: %d\n", ret);
|
|
+- return ret;
|
|
+- }
|
|
++/* this function effectively clears target memory controller assert line */
|
|
++static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
|
|
++{
|
|
++ u32 val;
|
|
+
|
|
+- /* debug */
|
|
+- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
+- PCIE_INTR_CAUSE_ADDRESS);
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
|
|
++ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
++ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
|
|
++ val | SOC_RESET_CONTROL_SI0_RST_MASK);
|
|
++ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
+
|
|
+- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
+- CPU_INTR_ADDRESS);
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
|
|
+- val);
|
|
++ msleep(10);
|
|
+
|
|
+- /* disable pending irqs */
|
|
+- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
|
|
+- PCIE_INTR_ENABLE_ADDRESS, 0);
|
|
++ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
++ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
|
|
++ val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
|
|
++ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
|
|
+
|
|
+- ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
|
|
+- PCIE_INTR_CLR_ADDRESS, ~0);
|
|
++ msleep(10);
|
|
++}
|
|
+
|
|
+- msleep(100);
|
|
++static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
|
|
++{
|
|
++ u32 val;
|
|
+
|
|
+- /* clear fw indicator */
|
|
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
|
|
+
|
|
+- /* clear target LF timer interrupts */
|
|
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
+- SOC_LF_TIMER_CONTROL0_ADDRESS);
|
|
+- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
|
|
+- SOC_LF_TIMER_CONTROL0_ADDRESS,
|
|
+- val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
|
|
++ SOC_RESET_CONTROL_ADDRESS);
|
|
++ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
|
|
++ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
|
|
++}
|
|
++
|
|
++static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
|
|
++{
|
|
++ u32 val;
|
|
+
|
|
+- /* reset CE */
|
|
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
+ SOC_RESET_CONTROL_ADDRESS);
|
|
++
|
|
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
|
|
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
|
|
+- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
+- SOC_RESET_CONTROL_ADDRESS);
|
|
+ msleep(10);
|
|
+-
|
|
+- /* unreset CE */
|
|
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
|
|
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
|
|
+- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
+- SOC_RESET_CONTROL_ADDRESS);
|
|
+- msleep(10);
|
|
++}
|
|
+
|
|
+- /* debug */
|
|
+- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
+- PCIE_INTR_CAUSE_ADDRESS);
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
|
|
+-
|
|
+- val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
|
|
+- CPU_INTR_ADDRESS);
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
|
|
+- val);
|
|
++static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
|
|
++{
|
|
++ u32 val;
|
|
+
|
|
+- /* CPU warm reset */
|
|
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
+- SOC_RESET_CONTROL_ADDRESS);
|
|
+- ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
|
|
+- val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
|
|
++ SOC_LF_TIMER_CONTROL0_ADDRESS);
|
|
++ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
|
|
++ SOC_LF_TIMER_CONTROL0_ADDRESS,
|
|
++ val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
|
|
++}
|
|
+
|
|
+- val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
|
|
+- SOC_RESET_CONTROL_ADDRESS);
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
|
|
++static int ath10k_pci_warm_reset(struct ath10k *ar)
|
|
++{
|
|
++ int ret;
|
|
+
|
|
+- msleep(100);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ ar->stats.fw_warm_reset_counter++;
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ ath10k_pci_irq_disable(ar);
|
|
++
|
|
++ /* Make sure the target CPU is not doing anything dangerous, e.g. if it
|
|
++ * were to access copy engine while host performs copy engine reset
|
|
++ * then it is possible for the device to confuse pci-e controller to
|
|
++ * the point of bringing host system to a complete stop (i.e. hang).
|
|
++ */
|
|
++ ath10k_pci_warm_reset_si0(ar);
|
|
++ ath10k_pci_warm_reset_cpu(ar);
|
|
++ ath10k_pci_init_pipes(ar);
|
|
++ ath10k_pci_wait_for_target_init(ar);
|
|
++
|
|
++ ath10k_pci_warm_reset_clear_lf(ar);
|
|
++ ath10k_pci_warm_reset_ce(ar);
|
|
++ ath10k_pci_warm_reset_cpu(ar);
|
|
++ ath10k_pci_init_pipes(ar);
|
|
+
|
|
+- ath10k_do_pci_sleep(ar);
|
|
+- return ret;
|
|
++ ret = ath10k_pci_wait_for_target_init(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
|
|
++
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
+-static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
|
|
++static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- const char *irq_mode;
|
|
+- int ret;
|
|
++ int i, ret;
|
|
++ u32 val;
|
|
+
|
|
+- /*
|
|
+- * Bring the target up cleanly.
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
|
|
++
|
|
++ /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
|
|
++ * It is thus preferred to use warm reset which is safer but may not be
|
|
++ * able to recover the device from all possible fail scenarios.
|
|
+ *
|
|
+- * The target may be in an undefined state with an AUX-powered Target
|
|
+- * and a Host in WoW mode. If the Host crashes, loses power, or is
|
|
+- * restarted (without unloading the driver) then the Target is left
|
|
+- * (aux) powered and running. On a subsequent driver load, the Target
|
|
+- * is in an unexpected state. We try to catch that here in order to
|
|
+- * reset the Target and retry the probe.
|
|
++ * Warm reset doesn't always work on first try so attempt it a few
|
|
++ * times before giving up.
|
|
+ */
|
|
+- if (cold_reset)
|
|
+- ret = ath10k_pci_cold_reset(ar);
|
|
+- else
|
|
++ for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
|
|
+ ret = ath10k_pci_warm_reset(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
|
|
++ i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
|
|
++ ret);
|
|
++ continue;
|
|
++ }
|
|
++
|
|
++ /* FIXME: Sometimes copy engine doesn't recover after warm
|
|
++ * reset. In most cases this needs cold reset. In some of these
|
|
++ * cases the device is in such a state that a cold reset may
|
|
++ * lock up the host.
|
|
++ *
|
|
++ * Reading any host interest register via copy engine is
|
|
++ * sufficient to verify if device is capable of booting
|
|
++ * firmware blob.
|
|
++ */
|
|
++ ret = ath10k_pci_init_pipes(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to init copy engine: %d\n",
|
|
++ ret);
|
|
++ continue;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
|
|
++ &val);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to poke copy engine: %d\n",
|
|
++ ret);
|
|
++ continue;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
|
|
++ return 0;
|
|
++ }
|
|
+
|
|
++ if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
|
|
++ ath10k_warn(ar, "refusing cold reset as requested\n");
|
|
++ return -EPERM;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_pci_cold_reset(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to reset target: %d\n", ret);
|
|
+- goto err;
|
|
++ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_pci_wait_for_target_init(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
|
|
++ ret);
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+- if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
|
|
+- /* Force AWAKE forever */
|
|
+- ath10k_do_pci_wake(ar);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
|
|
+
|
|
+- ret = ath10k_pci_ce_init(ar);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
|
|
++{
|
|
++ int ret;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
|
|
++
|
|
++ /* FIXME: QCA6174 requires cold + warm reset to work. */
|
|
++
|
|
++ ret = ath10k_pci_cold_reset(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to initialize CE: %d\n", ret);
|
|
+- goto err_ps;
|
|
++ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_ce_disable_interrupts(ar);
|
|
++ ret = ath10k_pci_wait_for_target_init(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to disable CE interrupts: %d\n", ret);
|
|
+- goto err_ce;
|
|
++ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
|
|
++ ret);
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_pci_init_irq(ar);
|
|
++ ret = ath10k_pci_warm_reset(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to warm reset: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_pci_chip_reset(struct ath10k *ar)
|
|
++{
|
|
++ if (QCA_REV_988X(ar))
|
|
++ return ath10k_pci_qca988x_chip_reset(ar);
|
|
++ else if (QCA_REV_6174(ar))
|
|
++ return ath10k_pci_qca6174_chip_reset(ar);
|
|
++ else
|
|
++ return -ENOTSUPP;
|
|
++}
|
|
++
|
|
++static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
|
++{
|
|
++ int ret;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
|
|
++
|
|
++ ret = ath10k_pci_wake(ar);
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to wake up target: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ /*
|
|
++ * Bring the target up cleanly.
|
|
++ *
|
|
++ * The target may be in an undefined state with an AUX-powered Target
|
|
++ * and a Host in WoW mode. If the Host crashes, loses power, or is
|
|
++ * restarted (without unloading the driver) then the Target is left
|
|
++ * (aux) powered and running. On a subsequent driver load, the Target
|
|
++ * is in an unexpected state. We try to catch that here in order to
|
|
++ * reset the Target and retry the probe.
|
|
++ */
|
|
++ ret = ath10k_pci_chip_reset(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to init irqs: %d\n", ret);
|
|
+- goto err_ce;
|
|
+- }
|
|
++ if (ath10k_pci_has_fw_crashed(ar)) {
|
|
++ ath10k_warn(ar, "firmware crashed during chip reset\n");
|
|
++ ath10k_pci_fw_crashed_clear(ar);
|
|
++ ath10k_pci_fw_crashed_dump(ar);
|
|
++ }
|
|
+
|
|
+- ret = ath10k_pci_request_early_irq(ar);
|
|
+- if (ret) {
|
|
+- ath10k_err("failed to request early irq: %d\n", ret);
|
|
+- goto err_deinit_irq;
|
|
++ ath10k_err(ar, "failed to reset chip: %d\n", ret);
|
|
++ goto err_sleep;
|
|
+ }
|
|
+
|
|
+- ret = ath10k_pci_wait_for_target_init(ar);
|
|
++ ret = ath10k_pci_init_pipes(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to wait for target to init: %d\n", ret);
|
|
+- goto err_free_early_irq;
|
|
++ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
|
|
++ goto err_sleep;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_pci_init_config(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to setup init config: %d\n", ret);
|
|
+- goto err_free_early_irq;
|
|
++ ath10k_err(ar, "failed to setup init config: %d\n", ret);
|
|
++ goto err_ce;
|
|
+ }
|
|
+
|
|
+ ret = ath10k_pci_wake_target_cpu(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("could not wake up target CPU: %d\n", ret);
|
|
+- goto err_free_early_irq;
|
|
++ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
|
|
++ goto err_ce;
|
|
+ }
|
|
+
|
|
+- if (ar_pci->num_msi_intrs > 1)
|
|
+- irq_mode = "MSI-X";
|
|
+- else if (ar_pci->num_msi_intrs == 1)
|
|
+- irq_mode = "MSI";
|
|
+- else
|
|
+- irq_mode = "legacy";
|
|
+-
|
|
+- if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
|
|
+- ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
|
|
+- irq_mode, ath10k_pci_irq_mode,
|
|
+- ath10k_pci_reset_mode);
|
|
+-
|
|
+ return 0;
|
|
+
|
|
+-err_free_early_irq:
|
|
+- ath10k_pci_free_early_irq(ar);
|
|
+-err_deinit_irq:
|
|
+- ath10k_pci_deinit_irq(ar);
|
|
+ err_ce:
|
|
+ ath10k_pci_ce_deinit(ar);
|
|
+- ath10k_pci_warm_reset(ar);
|
|
+-err_ps:
|
|
+- if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
|
|
+- ath10k_do_pci_sleep(ar);
|
|
+-err:
|
|
+- return ret;
|
|
+-}
|
|
+-
|
|
+-static int ath10k_pci_hif_power_up(struct ath10k *ar)
|
|
+-{
|
|
+- int ret;
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
|
|
+-
|
|
+- /*
|
|
+- * Hardware CUS232 version 2 has some issues with cold reset and the
|
|
+- * preferred (and safer) way to perform a device reset is through a
|
|
+- * warm reset.
|
|
+- *
|
|
+- * Warm reset doesn't always work though (notably after a firmware
|
|
+- * crash) so fall back to cold reset if necessary.
|
|
+- */
|
|
+- ret = __ath10k_pci_hif_power_up(ar, false);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to power up target using warm reset: %d\n",
|
|
+- ret);
|
|
+-
|
|
+- if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
|
|
+- return ret;
|
|
+
|
|
+- ath10k_warn("trying cold reset\n");
|
|
+-
|
|
+- ret = __ath10k_pci_hif_power_up(ar, true);
|
|
+- if (ret) {
|
|
+- ath10k_err("failed to power up target using cold reset too (%d)\n",
|
|
+- ret);
|
|
+- return ret;
|
|
+- }
|
|
+- }
|
|
+-
|
|
+- return 0;
|
|
++err_sleep:
|
|
++ ath10k_pci_sleep(ar);
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+ static void ath10k_pci_hif_power_down(struct ath10k *ar)
|
|
+ {
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
|
|
+-
|
|
+- ath10k_pci_free_early_irq(ar);
|
|
+- ath10k_pci_kill_tasklet(ar);
|
|
+- ath10k_pci_deinit_irq(ar);
|
|
+- ath10k_pci_ce_deinit(ar);
|
|
+- ath10k_pci_warm_reset(ar);
|
|
++ /* Currently hif_power_up performs effectively a reset and hif_stop
|
|
++ * resets the chip as well so there's no point in resetting here.
|
|
++ */
|
|
+
|
|
+- if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
|
|
+- ath10k_do_pci_sleep(ar);
|
|
++ ath10k_pci_sleep(ar);
|
|
+ }
|
|
+
|
|
+ #ifdef CONFIG_PM
|
|
+@@ -2090,6 +2089,8 @@ static int ath10k_pci_hif_resume(struct
|
|
+
|
|
+ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
|
|
+ .tx_sg = ath10k_pci_hif_tx_sg,
|
|
++ .diag_read = ath10k_pci_hif_diag_read,
|
|
++ .diag_write = ath10k_pci_diag_write_mem,
|
|
+ .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
|
|
+ .start = ath10k_pci_hif_start,
|
|
+ .stop = ath10k_pci_hif_stop,
|
|
+@@ -2100,6 +2101,8 @@ static const struct ath10k_hif_ops ath10
|
|
+ .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
|
|
+ .power_up = ath10k_pci_hif_power_up,
|
|
+ .power_down = ath10k_pci_hif_power_down,
|
|
++ .read32 = ath10k_pci_read32,
|
|
++ .write32 = ath10k_pci_write32,
|
|
+ #ifdef CONFIG_PM
|
|
+ .suspend = ath10k_pci_hif_suspend,
|
|
+ .resume = ath10k_pci_hif_resume,
|
|
+@@ -2118,7 +2121,14 @@ static void ath10k_msi_err_tasklet(unsig
|
|
+ {
|
|
+ struct ath10k *ar = (struct ath10k *)data;
|
|
+
|
|
+- ath10k_pci_fw_interrupt_handler(ar);
|
|
++ if (!ath10k_pci_has_fw_crashed(ar)) {
|
|
++ ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ ath10k_pci_irq_disable(ar);
|
|
++ ath10k_pci_fw_crashed_clear(ar);
|
|
++ ath10k_pci_fw_crashed_dump(ar);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+@@ -2132,7 +2142,8 @@ static irqreturn_t ath10k_pci_per_engine
|
|
+ int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
|
|
+
|
|
+ if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
|
|
+- ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
|
|
++ ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
|
|
++ ce_id);
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
+
|
|
+@@ -2179,39 +2190,18 @@ static irqreturn_t ath10k_pci_interrupt_
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
+
|
|
+-static void ath10k_pci_early_irq_tasklet(unsigned long data)
|
|
++static void ath10k_pci_tasklet(unsigned long data)
|
|
+ {
|
|
+ struct ath10k *ar = (struct ath10k *)data;
|
|
+- u32 fw_ind;
|
|
+- int ret;
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to wake target in early irq tasklet: %d\n",
|
|
+- ret);
|
|
++ if (ath10k_pci_has_fw_crashed(ar)) {
|
|
++ ath10k_pci_irq_disable(ar);
|
|
++ ath10k_pci_fw_crashed_clear(ar);
|
|
++ ath10k_pci_fw_crashed_dump(ar);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+- fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
|
|
+- if (fw_ind & FW_IND_EVENT_PENDING) {
|
|
+- ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
|
|
+- fw_ind & ~FW_IND_EVENT_PENDING);
|
|
+-
|
|
+- /* Some structures are unavailable during early boot or at
|
|
+- * driver teardown so just print that the device has crashed. */
|
|
+- ath10k_warn("device crashed - no diagnostics available\n");
|
|
+- }
|
|
+-
|
|
+- ath10k_pci_sleep(ar);
|
|
+- ath10k_pci_enable_legacy_irq(ar);
|
|
+-}
|
|
+-
|
|
+-static void ath10k_pci_tasklet(unsigned long data)
|
|
+-{
|
|
+- struct ath10k *ar = (struct ath10k *)data;
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+-
|
|
+- ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
|
|
+ ath10k_ce_per_engine_service_any(ar);
|
|
+
|
|
+ /* Re-enable legacy irq that was disabled in the irq handler */
|
|
+@@ -2228,7 +2218,7 @@ static int ath10k_pci_request_irq_msix(s
|
|
+ ath10k_pci_msi_fw_handler,
|
|
+ IRQF_SHARED, "ath10k_pci", ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
|
|
+ ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -2238,7 +2228,7 @@ static int ath10k_pci_request_irq_msix(s
|
|
+ ath10k_pci_per_engine_handler,
|
|
+ IRQF_SHARED, "ath10k_pci", ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
|
|
+ ar_pci->pdev->irq + i, ret);
|
|
+
|
|
+ for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
|
|
+@@ -2261,7 +2251,7 @@ static int ath10k_pci_request_irq_msi(st
|
|
+ ath10k_pci_interrupt_handler,
|
|
+ IRQF_SHARED, "ath10k_pci", ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to request MSI irq %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
|
|
+ ar_pci->pdev->irq, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -2278,7 +2268,7 @@ static int ath10k_pci_request_irq_legacy
|
|
+ ath10k_pci_interrupt_handler,
|
|
+ IRQF_SHARED, "ath10k_pci", ar);
|
|
+ if (ret) {
|
|
+- ath10k_warn("failed to request legacy irq %d: %d\n",
|
|
++ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
|
|
+ ar_pci->pdev->irq, ret);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -2299,7 +2289,7 @@ static int ath10k_pci_request_irq(struct
|
|
+ return ath10k_pci_request_irq_msix(ar);
|
|
+ }
|
|
+
|
|
+- ath10k_warn("unknown irq configuration upon request\n");
|
|
++ ath10k_warn(ar, "unknown irq configuration upon request\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+@@ -2322,8 +2312,6 @@ static void ath10k_pci_init_irq_tasklets
|
|
+ tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
|
|
+ tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
|
|
+ (unsigned long)ar);
|
|
+- tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
|
|
+- (unsigned long)ar);
|
|
+
|
|
+ for (i = 0; i < CE_COUNT; i++) {
|
|
+ ar_pci->pipe_info[i].ar_pci = ar_pci;
|
|
+@@ -2335,21 +2323,19 @@ static void ath10k_pci_init_irq_tasklets
|
|
+ static int ath10k_pci_init_irq(struct ath10k *ar)
|
|
+ {
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+- bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
|
|
+- ar_pci->features);
|
|
+ int ret;
|
|
+
|
|
+ ath10k_pci_init_irq_tasklets(ar);
|
|
+
|
|
+- if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
|
|
+- !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
|
|
+- ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
|
|
++ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
|
|
++ ath10k_info(ar, "limiting irq mode to: %d\n",
|
|
++ ath10k_pci_irq_mode);
|
|
+
|
|
+ /* Try MSI-X */
|
|
+- if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
|
|
++ if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
|
|
+ ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
|
|
+ ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
|
|
+- ar_pci->num_msi_intrs);
|
|
++ ar_pci->num_msi_intrs);
|
|
+ if (ret > 0)
|
|
+ return 0;
|
|
+
|
|
+@@ -2376,34 +2362,16 @@ static int ath10k_pci_init_irq(struct at
|
|
+ * synchronization checking. */
|
|
+ ar_pci->num_msi_intrs = 0;
|
|
+
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to wake target: %d\n", ret);
|
|
+- return ret;
|
|
+- }
|
|
+-
|
|
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
|
|
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
|
|
+- ath10k_pci_sleep(ar);
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
|
|
++static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
|
|
+ {
|
|
+- int ret;
|
|
+-
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to wake target: %d\n", ret);
|
|
+- return ret;
|
|
+- }
|
|
+-
|
|
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
|
|
+ 0);
|
|
+- ath10k_pci_sleep(ar);
|
|
+-
|
|
+- return 0;
|
|
+ }
|
|
+
|
|
+ static int ath10k_pci_deinit_irq(struct ath10k *ar)
|
|
+@@ -2412,7 +2380,8 @@ static int ath10k_pci_deinit_irq(struct
|
|
+
|
|
+ switch (ar_pci->num_msi_intrs) {
|
|
+ case 0:
|
|
+- return ath10k_pci_deinit_irq_legacy(ar);
|
|
++ ath10k_pci_deinit_irq_legacy(ar);
|
|
++ return 0;
|
|
+ case 1:
|
|
+ /* fall-through */
|
|
+ case MSI_NUM_REQUEST:
|
|
+@@ -2422,7 +2391,7 @@ static int ath10k_pci_deinit_irq(struct
|
|
+ pci_disable_msi(ar_pci->pdev);
|
|
+ }
|
|
+
|
|
+- ath10k_warn("unknown irq configuration upon deinit\n");
|
|
++ ath10k_warn(ar, "unknown irq configuration upon deinit\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+@@ -2430,23 +2399,17 @@ static int ath10k_pci_wait_for_target_in
|
|
+ {
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+ unsigned long timeout;
|
|
+- int ret;
|
|
+ u32 val;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
|
|
+-
|
|
+- ret = ath10k_pci_wake(ar);
|
|
+- if (ret) {
|
|
+- ath10k_err("failed to wake up target for init: %d\n", ret);
|
|
+- return ret;
|
|
+- }
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
|
|
+
|
|
+ timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
|
|
+
|
|
+ do {
|
|
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
|
|
++ val);
|
|
+
|
|
+ /* target should never return this */
|
|
+ if (val == 0xffffffff)
|
|
+@@ -2461,52 +2424,46 @@ static int ath10k_pci_wait_for_target_in
|
|
+
|
|
+ if (ar_pci->num_msi_intrs == 0)
|
|
+ /* Fix potential race by repeating CORE_BASE writes */
|
|
+- ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
|
|
+- PCIE_INTR_FIRMWARE_MASK |
|
|
+- PCIE_INTR_CE_MASK_ALL);
|
|
++ ath10k_pci_enable_legacy_irq(ar);
|
|
+
|
|
+ mdelay(10);
|
|
+ } while (time_before(jiffies, timeout));
|
|
+
|
|
++ ath10k_pci_disable_and_clear_legacy_irq(ar);
|
|
++ ath10k_pci_irq_msi_fw_mask(ar);
|
|
++
|
|
+ if (val == 0xffffffff) {
|
|
+- ath10k_err("failed to read device register, device is gone\n");
|
|
+- ret = -EIO;
|
|
+- goto out;
|
|
++ ath10k_err(ar, "failed to read device register, device is gone\n");
|
|
++ return -EIO;
|
|
+ }
|
|
+
|
|
+ if (val & FW_IND_EVENT_PENDING) {
|
|
+- ath10k_warn("device has crashed during init\n");
|
|
+- ret = -ECOMM;
|
|
+- goto out;
|
|
++ ath10k_warn(ar, "device has crashed during init\n");
|
|
++ return -ECOMM;
|
|
+ }
|
|
+
|
|
+ if (!(val & FW_IND_INITIALIZED)) {
|
|
+- ath10k_err("failed to receive initialized event from target: %08x\n",
|
|
++ ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
|
|
+ val);
|
|
+- ret = -ETIMEDOUT;
|
|
+- goto out;
|
|
++ return -ETIMEDOUT;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
|
|
+-
|
|
+-out:
|
|
+- ath10k_pci_sleep(ar);
|
|
+- return ret;
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
+ static int ath10k_pci_cold_reset(struct ath10k *ar)
|
|
+ {
|
|
+- int i, ret;
|
|
++ int i;
|
|
+ u32 val;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
|
|
+
|
|
+- ret = ath10k_do_pci_wake(ar);
|
|
+- if (ret) {
|
|
+- ath10k_err("failed to wake up target: %d\n",
|
|
+- ret);
|
|
+- return ret;
|
|
+- }
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++
|
|
++ ar->stats.fw_cold_reset_counter++;
|
|
++
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+ /* Put Target, including PCIe, into RESET. */
|
|
+ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
|
|
+@@ -2531,181 +2488,227 @@ static int ath10k_pci_cold_reset(struct
|
|
+ msleep(1);
|
|
+ }
|
|
+
|
|
+- ath10k_do_pci_sleep(ar);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_pci_claim(struct ath10k *ar)
|
|
++{
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ struct pci_dev *pdev = ar_pci->pdev;
|
|
++ u32 lcr_val;
|
|
++ int ret;
|
|
++
|
|
++ pci_set_drvdata(pdev, ar);
|
|
++
|
|
++ ret = pci_enable_device(pdev);
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to enable pci device: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ret = pci_request_region(pdev, BAR_NUM, "ath");
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
|
|
++ ret);
|
|
++ goto err_device;
|
|
++ }
|
|
++
|
|
++ /* Target expects 32 bit DMA. Enforce it. */
|
|
++ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
|
|
++ goto err_region;
|
|
++ }
|
|
++
|
|
++ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
|
|
++ ret);
|
|
++ goto err_region;
|
|
++ }
|
|
++
|
|
++ pci_set_master(pdev);
|
|
++
|
|
++ /* Workaround: Disable ASPM */
|
|
++ pci_read_config_dword(pdev, 0x80, &lcr_val);
|
|
++ pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
|
|
++ /* Arrange for access to Target SoC registers. */
|
|
++ ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
|
|
++ if (!ar_pci->mem) {
|
|
++ ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
|
|
++ ret = -EIO;
|
|
++ goto err_master;
|
|
++ }
|
|
+
|
|
++ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
|
|
+ return 0;
|
|
++
|
|
++err_master:
|
|
++ pci_clear_master(pdev);
|
|
++
|
|
++err_region:
|
|
++ pci_release_region(pdev, BAR_NUM);
|
|
++
|
|
++err_device:
|
|
++ pci_disable_device(pdev);
|
|
++
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+-static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
|
|
++static void ath10k_pci_release(struct ath10k *ar)
|
|
+ {
|
|
++ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
++ struct pci_dev *pdev = ar_pci->pdev;
|
|
++
|
|
++ pci_iounmap(pdev, ar_pci->mem);
|
|
++ pci_release_region(pdev, BAR_NUM);
|
|
++ pci_clear_master(pdev);
|
|
++ pci_disable_device(pdev);
|
|
++}
|
|
++
|
|
++static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
|
|
++{
|
|
++ const struct ath10k_pci_supp_chip *supp_chip;
|
|
+ int i;
|
|
++ u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
|
|
+
|
|
+- for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
|
|
+- if (!test_bit(i, ar_pci->features))
|
|
+- continue;
|
|
++ for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
|
|
++ supp_chip = &ath10k_pci_supp_chips[i];
|
|
+
|
|
+- switch (i) {
|
|
+- case ATH10K_PCI_FEATURE_MSI_X:
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
|
|
+- break;
|
|
+- case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
|
|
+- break;
|
|
+- }
|
|
++ if (supp_chip->dev_id == dev_id &&
|
|
++ supp_chip->rev_id == rev_id)
|
|
++ return true;
|
|
+ }
|
|
++
|
|
++ return false;
|
|
+ }
|
|
+
|
|
+ static int ath10k_pci_probe(struct pci_dev *pdev,
|
|
+ const struct pci_device_id *pci_dev)
|
|
+ {
|
|
+- void __iomem *mem;
|
|
+ int ret = 0;
|
|
+ struct ath10k *ar;
|
|
+ struct ath10k_pci *ar_pci;
|
|
+- u32 lcr_val, chip_id;
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
|
|
+-
|
|
+- ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
|
|
+- if (ar_pci == NULL)
|
|
+- return -ENOMEM;
|
|
+-
|
|
+- ar_pci->pdev = pdev;
|
|
+- ar_pci->dev = &pdev->dev;
|
|
++ enum ath10k_hw_rev hw_rev;
|
|
++ u32 chip_id;
|
|
+
|
|
+ switch (pci_dev->device) {
|
|
+ case QCA988X_2_0_DEVICE_ID:
|
|
+- set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
|
|
++ hw_rev = ATH10K_HW_QCA988X;
|
|
++ break;
|
|
++ case QCA6174_2_1_DEVICE_ID:
|
|
++ hw_rev = ATH10K_HW_QCA6174;
|
|
+ break;
|
|
+ default:
|
|
+- ret = -ENODEV;
|
|
+- ath10k_err("Unknown device ID: %d\n", pci_dev->device);
|
|
+- goto err_ar_pci;
|
|
++ WARN_ON(1);
|
|
++ return -ENOTSUPP;
|
|
+ }
|
|
+
|
|
+- if (ath10k_pci_target_ps)
|
|
+- set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
|
|
+-
|
|
+- ath10k_pci_dump_features(ar_pci);
|
|
+-
|
|
+- ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
|
|
++ ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
|
|
++ hw_rev, &ath10k_pci_hif_ops);
|
|
+ if (!ar) {
|
|
+- ath10k_err("failed to create driver core\n");
|
|
+- ret = -EINVAL;
|
|
+- goto err_ar_pci;
|
|
++ dev_err(&pdev->dev, "failed to allocate core\n");
|
|
++ return -ENOMEM;
|
|
+ }
|
|
+
|
|
++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
|
|
++
|
|
++ ar_pci = ath10k_pci_priv(ar);
|
|
++ ar_pci->pdev = pdev;
|
|
++ ar_pci->dev = &pdev->dev;
|
|
+ ar_pci->ar = ar;
|
|
+- atomic_set(&ar_pci->keep_awake_count, 0);
|
|
+
|
|
+- pci_set_drvdata(pdev, ar);
|
|
++ spin_lock_init(&ar_pci->ce_lock);
|
|
++ setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
|
|
++ (unsigned long)ar);
|
|
+
|
|
+- /*
|
|
+- * Without any knowledge of the Host, the Target may have been reset or
|
|
+- * power cycled and its Config Space may no longer reflect the PCI
|
|
+- * address space that was assigned earlier by the PCI infrastructure.
|
|
+- * Refresh it now.
|
|
+- */
|
|
+- ret = pci_assign_resource(pdev, BAR_NUM);
|
|
++ ret = ath10k_pci_claim(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to assign PCI space: %d\n", ret);
|
|
+- goto err_ar;
|
|
++ ath10k_err(ar, "failed to claim device: %d\n", ret);
|
|
++ goto err_core_destroy;
|
|
+ }
|
|
+
|
|
+- ret = pci_enable_device(pdev);
|
|
++ ret = ath10k_pci_wake(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to enable PCI device: %d\n", ret);
|
|
+- goto err_ar;
|
|
++ ath10k_err(ar, "failed to wake up: %d\n", ret);
|
|
++ goto err_release;
|
|
+ }
|
|
+
|
|
+- /* Request MMIO resources */
|
|
+- ret = pci_request_region(pdev, BAR_NUM, "ath");
|
|
++ ret = ath10k_pci_alloc_pipes(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to request MMIO region: %d\n", ret);
|
|
+- goto err_device;
|
|
++ ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
|
|
++ ret);
|
|
++ goto err_sleep;
|
|
+ }
|
|
+
|
|
+- /*
|
|
+- * Target structures have a limit of 32 bit DMA pointers.
|
|
+- * DMA pointers can be wider than 32 bits by default on some systems.
|
|
+- */
|
|
+- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
+- if (ret) {
|
|
+- ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
|
|
+- goto err_region;
|
|
+- }
|
|
++ ath10k_pci_ce_deinit(ar);
|
|
++ ath10k_pci_irq_disable(ar);
|
|
+
|
|
+- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
++ ret = ath10k_pci_init_irq(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to set consistent DMA mask to 32-bit\n");
|
|
+- goto err_region;
|
|
++ ath10k_err(ar, "failed to init irqs: %d\n", ret);
|
|
++ goto err_free_pipes;
|
|
+ }
|
|
+
|
|
+- /* Set bus master bit in PCI_COMMAND to enable DMA */
|
|
+- pci_set_master(pdev);
|
|
+-
|
|
+- /*
|
|
+- * Temporary FIX: disable ASPM
|
|
+- * Will be removed after the OTP is programmed
|
|
+- */
|
|
+- pci_read_config_dword(pdev, 0x80, &lcr_val);
|
|
+- pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
|
|
++ ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
|
|
++ ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
|
|
++ ath10k_pci_irq_mode, ath10k_pci_reset_mode);
|
|
+
|
|
+- /* Arrange for access to Target SoC registers. */
|
|
+- mem = pci_iomap(pdev, BAR_NUM, 0);
|
|
+- if (!mem) {
|
|
+- ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
|
|
+- ret = -EIO;
|
|
+- goto err_master;
|
|
++ ret = ath10k_pci_request_irq(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
|
|
++ goto err_deinit_irq;
|
|
+ }
|
|
+
|
|
+- ar_pci->mem = mem;
|
|
+-
|
|
+- spin_lock_init(&ar_pci->ce_lock);
|
|
+-
|
|
+- ret = ath10k_do_pci_wake(ar);
|
|
++ ret = ath10k_pci_chip_reset(ar);
|
|
+ if (ret) {
|
|
+- ath10k_err("Failed to get chip id: %d\n", ret);
|
|
+- goto err_iomap;
|
|
++ ath10k_err(ar, "failed to reset chip: %d\n", ret);
|
|
++ goto err_free_irq;
|
|
+ }
|
|
+
|
|
+ chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
|
|
++ if (chip_id == 0xffffffff) {
|
|
++ ath10k_err(ar, "failed to get chip id\n");
|
|
++ goto err_free_irq;
|
|
++ }
|
|
+
|
|
+- ath10k_do_pci_sleep(ar);
|
|
+-
|
|
+- ret = ath10k_pci_alloc_ce(ar);
|
|
+- if (ret) {
|
|
+- ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
|
|
+- goto err_iomap;
|
|
++ if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
|
|
++ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
|
|
++ pdev->device, chip_id);
|
|
++ goto err_sleep;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
|
|
++ ath10k_pci_sleep(ar);
|
|
+
|
|
+ ret = ath10k_core_register(ar, chip_id);
|
|
+ if (ret) {
|
|
+- ath10k_err("failed to register driver core: %d\n", ret);
|
|
+- goto err_free_ce;
|
|
++ ath10k_err(ar, "failed to register driver core: %d\n", ret);
|
|
++ goto err_free_irq;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+-err_free_ce:
|
|
+- ath10k_pci_free_ce(ar);
|
|
+-err_iomap:
|
|
+- pci_iounmap(pdev, mem);
|
|
+-err_master:
|
|
+- pci_clear_master(pdev);
|
|
+-err_region:
|
|
+- pci_release_region(pdev, BAR_NUM);
|
|
+-err_device:
|
|
+- pci_disable_device(pdev);
|
|
+-err_ar:
|
|
++err_free_irq:
|
|
++ ath10k_pci_free_irq(ar);
|
|
++ ath10k_pci_kill_tasklet(ar);
|
|
++
|
|
++err_deinit_irq:
|
|
++ ath10k_pci_deinit_irq(ar);
|
|
++
|
|
++err_free_pipes:
|
|
++ ath10k_pci_free_pipes(ar);
|
|
++
|
|
++err_sleep:
|
|
++ ath10k_pci_sleep(ar);
|
|
++
|
|
++err_release:
|
|
++ ath10k_pci_release(ar);
|
|
++
|
|
++err_core_destroy:
|
|
+ ath10k_core_destroy(ar);
|
|
+-err_ar_pci:
|
|
+- /* call HIF PCI free here */
|
|
+- kfree(ar_pci);
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+@@ -2715,7 +2718,7 @@ static void ath10k_pci_remove(struct pci
|
|
+ struct ath10k *ar = pci_get_drvdata(pdev);
|
|
+ struct ath10k_pci *ar_pci;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
|
|
+
|
|
+ if (!ar)
|
|
+ return;
|
|
+@@ -2725,18 +2728,14 @@ static void ath10k_pci_remove(struct pci
|
|
+ if (!ar_pci)
|
|
+ return;
|
|
+
|
|
+- tasklet_kill(&ar_pci->msi_fw_err);
|
|
+-
|
|
+ ath10k_core_unregister(ar);
|
|
+- ath10k_pci_free_ce(ar);
|
|
+-
|
|
+- pci_iounmap(pdev, ar_pci->mem);
|
|
+- pci_release_region(pdev, BAR_NUM);
|
|
+- pci_clear_master(pdev);
|
|
+- pci_disable_device(pdev);
|
|
+-
|
|
++ ath10k_pci_free_irq(ar);
|
|
++ ath10k_pci_kill_tasklet(ar);
|
|
++ ath10k_pci_deinit_irq(ar);
|
|
++ ath10k_pci_ce_deinit(ar);
|
|
++ ath10k_pci_free_pipes(ar);
|
|
++ ath10k_pci_release(ar);
|
|
+ ath10k_core_destroy(ar);
|
|
+- kfree(ar_pci);
|
|
+ }
|
|
+
|
|
+ MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
|
|
+@@ -2754,7 +2753,8 @@ static int __init ath10k_pci_init(void)
|
|
+
|
|
+ ret = pci_register_driver(&ath10k_pci_driver);
|
|
+ if (ret)
|
|
+- ath10k_err("failed to register PCI driver: %d\n", ret);
|
|
++ printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
|
|
++ ret);
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+@@ -2770,5 +2770,7 @@ module_exit(ath10k_pci_exit);
|
|
+ MODULE_AUTHOR("Qualcomm Atheros");
|
|
+ MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
|
|
+ MODULE_LICENSE("Dual BSD/GPL");
|
|
+-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
|
|
++MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
|
|
++MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
|
|
++MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
|
|
+ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
|
|
+--- a/drivers/net/wireless/ath/ath10k/pci.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/pci.h
|
|
+@@ -23,9 +23,6 @@
|
|
+ #include "hw.h"
|
|
+ #include "ce.h"
|
|
+
|
|
+-/* FW dump area */
|
|
+-#define REG_DUMP_COUNT_QCA988X 60
|
|
+-
|
|
+ /*
|
|
+ * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
|
|
+ */
|
|
+@@ -38,7 +35,8 @@
|
|
+ #define DIAG_TRANSFER_LIMIT 2048
|
|
+
|
|
+ struct bmi_xfer {
|
|
+- struct completion done;
|
|
++ bool tx_done;
|
|
++ bool rx_done;
|
|
+ bool wait_for_resp;
|
|
+ u32 resp_len;
|
|
+ };
|
|
+@@ -102,12 +100,12 @@ struct pcie_state {
|
|
+ * NOTE: Structure is shared between Host software and Target firmware!
|
|
+ */
|
|
+ struct ce_pipe_config {
|
|
+- u32 pipenum;
|
|
+- u32 pipedir;
|
|
+- u32 nentries;
|
|
+- u32 nbytes_max;
|
|
+- u32 flags;
|
|
+- u32 reserved;
|
|
++ __le32 pipenum;
|
|
++ __le32 pipedir;
|
|
++ __le32 nentries;
|
|
++ __le32 nbytes_max;
|
|
++ __le32 flags;
|
|
++ __le32 reserved;
|
|
+ };
|
|
+
|
|
+ /*
|
|
+@@ -129,17 +127,9 @@ struct ce_pipe_config {
|
|
+
|
|
+ /* Establish a mapping between a service/direction and a pipe. */
|
|
+ struct service_to_pipe {
|
|
+- u32 service_id;
|
|
+- u32 pipedir;
|
|
+- u32 pipenum;
|
|
+-};
|
|
+-
|
|
+-enum ath10k_pci_features {
|
|
+- ATH10K_PCI_FEATURE_MSI_X = 0,
|
|
+- ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 1,
|
|
+-
|
|
+- /* keep last */
|
|
+- ATH10K_PCI_FEATURE_COUNT
|
|
++ __le32 service_id;
|
|
++ __le32 pipedir;
|
|
++ __le32 pipenum;
|
|
+ };
|
|
+
|
|
+ /* Per-pipe state. */
|
|
+@@ -162,14 +152,17 @@ struct ath10k_pci_pipe {
|
|
+ struct tasklet_struct intr;
|
|
+ };
|
|
+
|
|
++struct ath10k_pci_supp_chip {
|
|
++ u32 dev_id;
|
|
++ u32 rev_id;
|
|
++};
|
|
++
|
|
+ struct ath10k_pci {
|
|
+ struct pci_dev *pdev;
|
|
+ struct device *dev;
|
|
+ struct ath10k *ar;
|
|
+ void __iomem *mem;
|
|
+
|
|
+- DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
|
|
+-
|
|
+ /*
|
|
+ * Number of MSI interrupts granted, 0 --> using legacy PCI line
|
|
+ * interrupts.
|
|
+@@ -178,12 +171,6 @@ struct ath10k_pci {
|
|
+
|
|
+ struct tasklet_struct intr_tq;
|
|
+ struct tasklet_struct msi_fw_err;
|
|
+- struct tasklet_struct early_irq_tasklet;
|
|
+-
|
|
+- int started;
|
|
+-
|
|
+- atomic_t keep_awake_count;
|
|
+- bool verified_awake;
|
|
+
|
|
+ struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
|
|
+
|
|
+@@ -197,29 +184,17 @@ struct ath10k_pci {
|
|
+
|
|
+ /* Map CE id to ce_state */
|
|
+ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
|
|
++ struct timer_list rx_post_retry;
|
|
+ };
|
|
+
|
|
+ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
|
|
+ {
|
|
+- return ar->hif.priv;
|
|
+-}
|
|
+-
|
|
+-static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
|
|
+-{
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+-
|
|
+- return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
|
|
+-}
|
|
+-
|
|
+-static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
|
|
+-{
|
|
+- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+-
|
|
+- iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
|
|
++ return (struct ath10k_pci *)ar->drv_priv;
|
|
+ }
|
|
+
|
|
++#define ATH10K_PCI_RX_POST_RETRY_MS 50
|
|
+ #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
|
|
+-#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */
|
|
++#define PCIE_WAKE_TIMEOUT 10000 /* 10ms */
|
|
+
|
|
+ #define BAR_NUM 0
|
|
+
|
|
+@@ -241,35 +216,17 @@ static inline void ath10k_pci_reg_write3
|
|
+ /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
|
|
+ #define DIAG_ACCESS_CE_TIMEOUT_MS 10
|
|
+
|
|
+-/*
|
|
+- * This API allows the Host to access Target registers directly
|
|
+- * and relatively efficiently over PCIe.
|
|
+- * This allows the Host to avoid extra overhead associated with
|
|
+- * sending a message to firmware and waiting for a response message
|
|
+- * from firmware, as is done on other interconnects.
|
|
+- *
|
|
+- * Yet there is some complexity with direct accesses because the
|
|
+- * Target's power state is not known a priori. The Host must issue
|
|
+- * special PCIe reads/writes in order to explicitly wake the Target
|
|
+- * and to verify that it is awake and will remain awake.
|
|
+- *
|
|
+- * Usage:
|
|
++/* Target exposes its registers for direct access. However before host can
|
|
++ * access them it needs to make sure the target is awake (ath10k_pci_wake,
|
|
++ * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
|
|
++ * to sleep unless host tells it to (ath10k_pci_sleep).
|
|
+ *
|
|
+- * Use ath10k_pci_read32 and ath10k_pci_write32 to access Target space.
|
|
+- * These calls must be bracketed by ath10k_pci_wake and
|
|
+- * ath10k_pci_sleep. A single BEGIN/END pair is adequate for
|
|
+- * multiple READ/WRITE operations.
|
|
++ * If host tries to access target registers without waking it up it can
|
|
++ * scribble over host memory.
|
|
+ *
|
|
+- * Use ath10k_pci_wake to put the Target in a state in
|
|
+- * which it is legal for the Host to directly access it. This
|
|
+- * may involve waking the Target from a low power state, which
|
|
+- * may take up to 2Ms!
|
|
+- *
|
|
+- * Use ath10k_pci_sleep to tell the Target that as far as
|
|
+- * this code path is concerned, it no longer needs to remain
|
|
+- * directly accessible. BEGIN/END is under a reference counter;
|
|
+- * multiple code paths may issue BEGIN/END on a single targid.
|
|
++ * If target is asleep waking it up may take up to even 2ms.
|
|
+ */
|
|
++
|
|
+ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
|
|
+ u32 value)
|
|
+ {
|
|
+@@ -295,25 +252,18 @@ static inline void ath10k_pci_soc_write3
|
|
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
|
|
+ }
|
|
+
|
|
+-int ath10k_do_pci_wake(struct ath10k *ar);
|
|
+-void ath10k_do_pci_sleep(struct ath10k *ar);
|
|
+-
|
|
+-static inline int ath10k_pci_wake(struct ath10k *ar)
|
|
++static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
|
|
+ {
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+
|
|
+- if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
|
|
+- return ath10k_do_pci_wake(ar);
|
|
+-
|
|
+- return 0;
|
|
++ return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
|
|
+ }
|
|
+
|
|
+-static inline void ath10k_pci_sleep(struct ath10k *ar)
|
|
++static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
|
|
+ {
|
|
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
|
+
|
|
+- if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
|
|
+- ath10k_do_pci_sleep(ar);
|
|
++ iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
|
|
+ }
|
|
+
|
|
+ #endif /* _PCI_H_ */
|
|
+--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
|
|
+@@ -839,7 +839,6 @@ struct rx_ppdu_start {
|
|
+ * Reserved: HW should fill with 0, FW should ignore.
|
|
+ */
|
|
+
|
|
+-
|
|
+ #define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0)
|
|
+ #define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1)
|
|
+ #define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2)
|
|
+@@ -851,7 +850,7 @@ struct rx_ppdu_start {
|
|
+
|
|
+ #define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
|
|
+
|
|
+-struct rx_ppdu_end {
|
|
++struct rx_ppdu_end_common {
|
|
+ __le32 evm_p0;
|
|
+ __le32 evm_p1;
|
|
+ __le32 evm_p2;
|
|
+@@ -874,10 +873,33 @@ struct rx_ppdu_end {
|
|
+ u8 phy_err_code;
|
|
+ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
|
|
+ __le32 info0; /* %RX_PPDU_END_INFO0_ */
|
|
++} __packed;
|
|
++
|
|
++struct rx_ppdu_end_qca988x {
|
|
++ __le16 bb_length;
|
|
++ __le16 info1; /* %RX_PPDU_END_INFO1_ */
|
|
++} __packed;
|
|
++
|
|
++#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
|
|
++#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0
|
|
++#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000
|
|
++#define RX_PPDU_END_RTT_UNUSED_LSB 24
|
|
++#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
|
|
++
|
|
++struct rx_ppdu_end_qca6174 {
|
|
++ __le32 rtt; /* %RX_PPDU_END_RTT_ */
|
|
+ __le16 bb_length;
|
|
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
|
|
+ } __packed;
|
|
+
|
|
++struct rx_ppdu_end {
|
|
++ struct rx_ppdu_end_common common;
|
|
++ union {
|
|
++ struct rx_ppdu_end_qca988x qca988x;
|
|
++ struct rx_ppdu_end_qca6174 qca6174;
|
|
++ } __packed;
|
|
++} __packed;
|
|
++
|
|
+ /*
|
|
+ * evm_p0
|
|
+ * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
|
|
+--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
|
|
+@@ -18,6 +18,8 @@
|
|
+ #ifndef __TARGADDRS_H__
|
|
+ #define __TARGADDRS_H__
|
|
+
|
|
++#include "hw.h"
|
|
++
|
|
+ /*
|
|
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
|
|
+ * host_interest structure. It must match the address of the _host_interest
|
|
+@@ -284,7 +286,6 @@ Fw Mode/SubMode Mask
|
|
+ #define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
|
|
+ #define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
|
|
+
|
|
+-
|
|
+ /* hi_option_flag2 options */
|
|
+ #define HI_OPTION_OFFLOAD_AMSDU 0x01
|
|
+ #define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
|
|
+@@ -446,4 +447,7 @@ Fw Mode/SubMode Mask
|
|
+ #define QCA988X_BOARD_DATA_SZ 7168
|
|
+ #define QCA988X_BOARD_EXT_DATA_SZ 0
|
|
+
|
|
++#define QCA6174_BOARD_DATA_SZ 8192
|
|
++#define QCA6174_BOARD_EXT_DATA_SZ 0
|
|
++
|
|
+ #endif /* __TARGADDRS_H__ */
|
|
+--- a/drivers/net/wireless/ath/ath10k/trace.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/trace.h
|
|
+@@ -18,6 +18,16 @@
|
|
+ #if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
|
+
|
|
+ #include <linux/tracepoint.h>
|
|
++#include "core.h"
|
|
++
|
|
++#if !defined(_TRACE_H_)
|
|
++static inline u32 ath10k_frm_hdr_len(const void *buf)
|
|
++{
|
|
++ const struct ieee80211_hdr *hdr = buf;
|
|
++
|
|
++ return ieee80211_hdrlen(hdr->frame_control);
|
|
++}
|
|
++#endif
|
|
+
|
|
+ #define _TRACE_H_
|
|
+
|
|
+@@ -39,59 +49,79 @@ static inline void trace_ ## name(proto)
|
|
+ #define ATH10K_MSG_MAX 200
|
|
+
|
|
+ DECLARE_EVENT_CLASS(ath10k_log_event,
|
|
+- TP_PROTO(struct va_format *vaf),
|
|
+- TP_ARGS(vaf),
|
|
++ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
|
|
++ TP_ARGS(ar, vaf),
|
|
+ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
+ __dynamic_array(char, msg, ATH10K_MSG_MAX)
|
|
+ ),
|
|
+ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
|
|
+ ATH10K_MSG_MAX,
|
|
+ vaf->fmt,
|
|
+ *vaf->va) >= ATH10K_MSG_MAX);
|
|
+ ),
|
|
+- TP_printk("%s", __get_str(msg))
|
|
++ TP_printk(
|
|
++ "%s %s %s",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __get_str(msg)
|
|
++ )
|
|
+ );
|
|
+
|
|
+ DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
|
|
+- TP_PROTO(struct va_format *vaf),
|
|
+- TP_ARGS(vaf)
|
|
++ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
|
|
++ TP_ARGS(ar, vaf)
|
|
+ );
|
|
+
|
|
+ DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
|
|
+- TP_PROTO(struct va_format *vaf),
|
|
+- TP_ARGS(vaf)
|
|
++ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
|
|
++ TP_ARGS(ar, vaf)
|
|
+ );
|
|
+
|
|
+ DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
|
|
+- TP_PROTO(struct va_format *vaf),
|
|
+- TP_ARGS(vaf)
|
|
++ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
|
|
++ TP_ARGS(ar, vaf)
|
|
+ );
|
|
+
|
|
+ TRACE_EVENT(ath10k_log_dbg,
|
|
+- TP_PROTO(unsigned int level, struct va_format *vaf),
|
|
+- TP_ARGS(level, vaf),
|
|
++ TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf),
|
|
++ TP_ARGS(ar, level, vaf),
|
|
+ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
+ __field(unsigned int, level)
|
|
+ __dynamic_array(char, msg, ATH10K_MSG_MAX)
|
|
+ ),
|
|
+ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
+ __entry->level = level;
|
|
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
|
|
+ ATH10K_MSG_MAX,
|
|
+ vaf->fmt,
|
|
+ *vaf->va) >= ATH10K_MSG_MAX);
|
|
+ ),
|
|
+- TP_printk("%s", __get_str(msg))
|
|
++ TP_printk(
|
|
++ "%s %s %s",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __get_str(msg)
|
|
++ )
|
|
+ );
|
|
+
|
|
+ TRACE_EVENT(ath10k_log_dbg_dump,
|
|
+- TP_PROTO(const char *msg, const char *prefix,
|
|
++ TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix,
|
|
+ const void *buf, size_t buf_len),
|
|
+
|
|
+- TP_ARGS(msg, prefix, buf, buf_len),
|
|
++ TP_ARGS(ar, msg, prefix, buf, buf_len),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
+ __string(msg, msg)
|
|
+ __string(prefix, prefix)
|
|
+ __field(size_t, buf_len)
|
|
+@@ -99,6 +129,8 @@ TRACE_EVENT(ath10k_log_dbg_dump,
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
+ __assign_str(msg, msg);
|
|
+ __assign_str(prefix, prefix);
|
|
+ __entry->buf_len = buf_len;
|
|
+@@ -106,16 +138,23 @@ TRACE_EVENT(ath10k_log_dbg_dump,
|
|
+ ),
|
|
+
|
|
+ TP_printk(
|
|
+- "%s/%s\n", __get_str(prefix), __get_str(msg)
|
|
++ "%s %s %s/%s\n",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __get_str(prefix),
|
|
++ __get_str(msg)
|
|
+ )
|
|
+ );
|
|
+
|
|
+ TRACE_EVENT(ath10k_wmi_cmd,
|
|
+- TP_PROTO(int id, void *buf, size_t buf_len, int ret),
|
|
++ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
|
|
++ int ret),
|
|
+
|
|
+- TP_ARGS(id, buf, buf_len, ret),
|
|
++ TP_ARGS(ar, id, buf, buf_len, ret),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
+ __field(unsigned int, id)
|
|
+ __field(size_t, buf_len)
|
|
+ __dynamic_array(u8, buf, buf_len)
|
|
+@@ -123,6 +162,8 @@ TRACE_EVENT(ath10k_wmi_cmd,
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
+ __entry->id = id;
|
|
+ __entry->buf_len = buf_len;
|
|
+ __entry->ret = ret;
|
|
+@@ -130,7 +171,9 @@ TRACE_EVENT(ath10k_wmi_cmd,
|
|
+ ),
|
|
+
|
|
+ TP_printk(
|
|
+- "id %d len %zu ret %d",
|
|
++ "%s %s id %d len %zu ret %d",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
+ __entry->id,
|
|
+ __entry->buf_len,
|
|
+ __entry->ret
|
|
+@@ -138,71 +181,346 @@ TRACE_EVENT(ath10k_wmi_cmd,
|
|
+ );
|
|
+
|
|
+ TRACE_EVENT(ath10k_wmi_event,
|
|
+- TP_PROTO(int id, void *buf, size_t buf_len),
|
|
++ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
|
|
+
|
|
+- TP_ARGS(id, buf, buf_len),
|
|
++ TP_ARGS(ar, id, buf, buf_len),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
+ __field(unsigned int, id)
|
|
+ __field(size_t, buf_len)
|
|
+ __dynamic_array(u8, buf, buf_len)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
+ __entry->id = id;
|
|
+ __entry->buf_len = buf_len;
|
|
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
|
|
+ ),
|
|
+
|
|
+ TP_printk(
|
|
+- "id %d len %zu",
|
|
++ "%s %s id %d len %zu",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
+ __entry->id,
|
|
+ __entry->buf_len
|
|
+ )
|
|
+ );
|
|
+
|
|
+ TRACE_EVENT(ath10k_htt_stats,
|
|
+- TP_PROTO(void *buf, size_t buf_len),
|
|
++ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
|
|
+
|
|
+- TP_ARGS(buf, buf_len),
|
|
++ TP_ARGS(ar, buf, buf_len),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
+ __field(size_t, buf_len)
|
|
+ __dynamic_array(u8, buf, buf_len)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
+ __entry->buf_len = buf_len;
|
|
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
|
|
+ ),
|
|
+
|
|
+ TP_printk(
|
|
+- "len %zu",
|
|
++ "%s %s len %zu",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
+ __entry->buf_len
|
|
+ )
|
|
+ );
|
|
+
|
|
+ TRACE_EVENT(ath10k_wmi_dbglog,
|
|
+- TP_PROTO(void *buf, size_t buf_len),
|
|
++ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
|
|
+
|
|
+- TP_ARGS(buf, buf_len),
|
|
++ TP_ARGS(ar, buf, buf_len),
|
|
+
|
|
+ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
+ __field(size_t, buf_len)
|
|
+ __dynamic_array(u8, buf, buf_len)
|
|
+ ),
|
|
+
|
|
+ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
+ __entry->buf_len = buf_len;
|
|
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
|
|
+ ),
|
|
+
|
|
+ TP_printk(
|
|
+- "len %zu",
|
|
++ "%s %s len %zu",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
+ __entry->buf_len
|
|
+ )
|
|
+ );
|
|
+
|
|
++TRACE_EVENT(ath10k_htt_pktlog,
|
|
++ TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len),
|
|
++
|
|
++ TP_ARGS(ar, buf, buf_len),
|
|
++
|
|
++ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
++ __field(u16, buf_len)
|
|
++ __dynamic_array(u8, pktlog, buf_len)
|
|
++ ),
|
|
++
|
|
++ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
++ __entry->buf_len = buf_len;
|
|
++ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
|
|
++ ),
|
|
++
|
|
++ TP_printk(
|
|
++ "%s %s size %hu",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __entry->buf_len
|
|
++ )
|
|
++);
|
|
++
|
|
++TRACE_EVENT(ath10k_htt_tx,
|
|
++ TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
|
|
++ u8 vdev_id, u8 tid),
|
|
++
|
|
++ TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid),
|
|
++
|
|
++ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
++ __field(u16, msdu_id)
|
|
++ __field(u16, msdu_len)
|
|
++ __field(u8, vdev_id)
|
|
++ __field(u8, tid)
|
|
++ ),
|
|
++
|
|
++ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
++ __entry->msdu_id = msdu_id;
|
|
++ __entry->msdu_len = msdu_len;
|
|
++ __entry->vdev_id = vdev_id;
|
|
++ __entry->tid = tid;
|
|
++ ),
|
|
++
|
|
++ TP_printk(
|
|
++ "%s %s msdu_id %d msdu_len %d vdev_id %d tid %d",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __entry->msdu_id,
|
|
++ __entry->msdu_len,
|
|
++ __entry->vdev_id,
|
|
++ __entry->tid
|
|
++ )
|
|
++);
|
|
++
|
|
++TRACE_EVENT(ath10k_txrx_tx_unref,
|
|
++ TP_PROTO(struct ath10k *ar, u16 msdu_id),
|
|
++
|
|
++ TP_ARGS(ar, msdu_id),
|
|
++
|
|
++ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
++ __field(u16, msdu_id)
|
|
++ ),
|
|
++
|
|
++ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
++ __entry->msdu_id = msdu_id;
|
|
++ ),
|
|
++
|
|
++ TP_printk(
|
|
++ "%s %s msdu_id %d",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __entry->msdu_id
|
|
++ )
|
|
++);
|
|
++
|
|
++DECLARE_EVENT_CLASS(ath10k_hdr_event,
|
|
++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
|
|
++
|
|
++ TP_ARGS(ar, data, len),
|
|
++
|
|
++ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
++ __field(size_t, len)
|
|
++ __dynamic_array(u8, data, ath10k_frm_hdr_len(data))
|
|
++ ),
|
|
++
|
|
++ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
++ __entry->len = ath10k_frm_hdr_len(data);
|
|
++ memcpy(__get_dynamic_array(data), data, __entry->len);
|
|
++ ),
|
|
++
|
|
++ TP_printk(
|
|
++ "%s %s len %zu\n",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __entry->len
|
|
++ )
|
|
++);
|
|
++
|
|
++DECLARE_EVENT_CLASS(ath10k_payload_event,
|
|
++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
|
|
++
|
|
++ TP_ARGS(ar, data, len),
|
|
++
|
|
++ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
++ __field(size_t, len)
|
|
++ __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
|
|
++ ),
|
|
++
|
|
++ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
++ __entry->len = len - ath10k_frm_hdr_len(data);
|
|
++ memcpy(__get_dynamic_array(payload),
|
|
++ data + ath10k_frm_hdr_len(data), __entry->len);
|
|
++ ),
|
|
++
|
|
++ TP_printk(
|
|
++ "%s %s len %zu\n",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __entry->len
|
|
++ )
|
|
++);
|
|
++
|
|
++DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr,
|
|
++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
|
|
++ TP_ARGS(ar, data, len)
|
|
++);
|
|
++
|
|
++DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload,
|
|
++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
|
|
++ TP_ARGS(ar, data, len)
|
|
++);
|
|
++
|
|
++DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr,
|
|
++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
|
|
++ TP_ARGS(ar, data, len)
|
|
++);
|
|
++
|
|
++DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload,
|
|
++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
|
|
++ TP_ARGS(ar, data, len)
|
|
++);
|
|
++
|
|
++TRACE_EVENT(ath10k_htt_rx_desc,
|
|
++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
|
|
++
|
|
++ TP_ARGS(ar, data, len),
|
|
++
|
|
++ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
++ __field(u16, len)
|
|
++ __dynamic_array(u8, rxdesc, len)
|
|
++ ),
|
|
++
|
|
++ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
++ __entry->len = len;
|
|
++ memcpy(__get_dynamic_array(rxdesc), data, len);
|
|
++ ),
|
|
++
|
|
++ TP_printk(
|
|
++ "%s %s rxdesc len %d",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __entry->len
|
|
++ )
|
|
++);
|
|
++
|
|
++TRACE_EVENT(ath10k_wmi_diag_container,
|
|
++ TP_PROTO(struct ath10k *ar,
|
|
++ u8 type,
|
|
++ u32 timestamp,
|
|
++ u32 code,
|
|
++ u16 len,
|
|
++ const void *data),
|
|
++
|
|
++ TP_ARGS(ar, type, timestamp, code, len, data),
|
|
++
|
|
++ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
++ __field(u8, type)
|
|
++ __field(u32, timestamp)
|
|
++ __field(u32, code)
|
|
++ __field(u16, len)
|
|
++ __dynamic_array(u8, data, len)
|
|
++ ),
|
|
++
|
|
++ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
++ __entry->type = type;
|
|
++ __entry->timestamp = timestamp;
|
|
++ __entry->code = code;
|
|
++ __entry->len = len;
|
|
++ memcpy(__get_dynamic_array(data), data, len);
|
|
++ ),
|
|
++
|
|
++ TP_printk(
|
|
++ "%s %s diag container type %hhu timestamp %u code %u len %d",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __entry->type,
|
|
++ __entry->timestamp,
|
|
++ __entry->code,
|
|
++ __entry->len
|
|
++ )
|
|
++);
|
|
++
|
|
++TRACE_EVENT(ath10k_wmi_diag,
|
|
++ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
|
|
++
|
|
++ TP_ARGS(ar, data, len),
|
|
++
|
|
++ TP_STRUCT__entry(
|
|
++ __string(device, dev_name(ar->dev))
|
|
++ __string(driver, dev_driver_string(ar->dev))
|
|
++ __field(u16, len)
|
|
++ __dynamic_array(u8, data, len)
|
|
++ ),
|
|
++
|
|
++ TP_fast_assign(
|
|
++ __assign_str(device, dev_name(ar->dev));
|
|
++ __assign_str(driver, dev_driver_string(ar->dev));
|
|
++ __entry->len = len;
|
|
++ memcpy(__get_dynamic_array(data), data, len);
|
|
++ ),
|
|
++
|
|
++ TP_printk(
|
|
++ "%s %s tlv diag len %d",
|
|
++ __get_str(driver),
|
|
++ __get_str(device),
|
|
++ __entry->len
|
|
++ )
|
|
++);
|
|
++
|
|
+ #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
|
|
+
|
|
+ /* we don't want to use include/trace/events */
|
|
+--- a/drivers/net/wireless/ath/ath10k/txrx.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/txrx.c
|
|
+@@ -32,14 +32,14 @@ static void ath10k_report_offchan_tx(str
|
|
+ * offchan_tx_skb. */
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+ if (ar->offchan_tx_skb != skb) {
|
|
+- ath10k_warn("completed old offchannel frame\n");
|
|
++ ath10k_warn(ar, "completed old offchannel frame\n");
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ complete(&ar->offchan_tx_completed);
|
|
+ ar->offchan_tx_skb = NULL; /* just for sanity */
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
|
|
+ out:
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+ }
|
|
+@@ -47,23 +47,30 @@ out:
|
|
+ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
|
+ const struct htt_tx_done *tx_done)
|
|
+ {
|
|
+- struct device *dev = htt->ar->dev;
|
|
++ struct ath10k *ar = htt->ar;
|
|
++ struct device *dev = ar->dev;
|
|
+ struct ieee80211_tx_info *info;
|
|
+ struct ath10k_skb_cb *skb_cb;
|
|
+ struct sk_buff *msdu;
|
|
+
|
|
+ lockdep_assert_held(&htt->tx_lock);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
|
|
+ tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
|
|
+
|
|
+ if (tx_done->msdu_id >= htt->max_num_pending_tx) {
|
|
+- ath10k_warn("warning: msdu_id %d too big, ignoring\n",
|
|
++ ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
|
|
++ tx_done->msdu_id);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
|
|
++ if (!msdu) {
|
|
++ ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
|
|
+ tx_done->msdu_id);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+- msdu = htt->pending_tx[tx_done->msdu_id];
|
|
+ skb_cb = ATH10K_SKB_CB(msdu);
|
|
+
|
|
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
|
|
+@@ -77,6 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_
|
|
+
|
|
+ info = IEEE80211_SKB_CB(msdu);
|
|
+ memset(&info->status, 0, sizeof(info->status));
|
|
++ trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
|
|
+
|
|
+ if (tx_done->discard) {
|
|
+ ieee80211_free_txskb(htt->ar->hw, msdu);
|
|
+@@ -93,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_
|
|
+ /* we do not own the msdu anymore */
|
|
+
|
|
+ exit:
|
|
+- htt->pending_tx[tx_done->msdu_id] = NULL;
|
|
+ ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
|
|
+ __ath10k_htt_tx_dec_pending(htt);
|
|
+ if (htt->num_pending_tx == 0)
|
|
+@@ -119,8 +126,7 @@ struct ath10k_peer *ath10k_peer_find(str
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+-static struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar,
|
|
+- int peer_id)
|
|
++struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
|
|
+ {
|
|
+ struct ath10k_peer *peer;
|
|
+
|
|
+@@ -145,7 +151,8 @@ static int ath10k_wait_for_peer_common(s
|
|
+ mapped = !!ath10k_peer_find(ar, vdev_id, addr);
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+
|
|
+- mapped == expect_mapped;
|
|
++ (mapped == expect_mapped ||
|
|
++ test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
|
|
+ }), 3*HZ);
|
|
+
|
|
+ if (ret <= 0)
|
|
+@@ -178,12 +185,12 @@ void ath10k_peer_map_event(struct ath10k
|
|
+ goto exit;
|
|
+
|
|
+ peer->vdev_id = ev->vdev_id;
|
|
+- memcpy(peer->addr, ev->addr, ETH_ALEN);
|
|
++ ether_addr_copy(peer->addr, ev->addr);
|
|
+ list_add(&peer->list, &ar->peers);
|
|
+ wake_up(&ar->peer_mapping_wq);
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
|
|
+ ev->vdev_id, ev->addr, ev->peer_id);
|
|
+
|
|
+ set_bit(ev->peer_id, peer->peer_ids);
|
|
+@@ -200,12 +207,12 @@ void ath10k_peer_unmap_event(struct ath1
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+ peer = ath10k_peer_find_by_id(ar, ev->peer_id);
|
|
+ if (!peer) {
|
|
+- ath10k_warn("peer-unmap-event: unknown peer id %d\n",
|
|
++ ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
|
|
+ ev->peer_id);
|
|
+ goto exit;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
|
|
+ peer->vdev_id, peer->addr, ev->peer_id);
|
|
+
|
|
+ clear_bit(ev->peer_id, peer->peer_ids);
|
|
+--- a/drivers/net/wireless/ath/ath10k/txrx.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/txrx.h
|
|
+@@ -24,6 +24,7 @@ void ath10k_txrx_tx_unref(struct ath10k_
|
|
+
|
|
+ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
|
|
+ const u8 *addr);
|
|
++struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
|
|
+ int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
|
|
+ const u8 *addr);
|
|
+ int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
|
|
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
|
|
+@@ -22,7 +22,10 @@
|
|
+ #include "htc.h"
|
|
+ #include "debug.h"
|
|
+ #include "wmi.h"
|
|
++#include "wmi-tlv.h"
|
|
+ #include "mac.h"
|
|
++#include "testmode.h"
|
|
++#include "wmi-ops.h"
|
|
+
|
|
+ /* MAIN WMI cmd track */
|
|
+ static struct wmi_cmd_map wmi_cmd_map = {
|
|
+@@ -142,6 +145,7 @@ static struct wmi_cmd_map wmi_cmd_map =
|
|
+ .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
|
|
+ .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
|
|
+ .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
|
|
++ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
|
|
+ };
|
|
+
|
|
+ /* 10.X WMI cmd track */
|
|
+@@ -264,6 +268,129 @@ static struct wmi_cmd_map wmi_10x_cmd_ma
|
|
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
|
|
+ .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
|
|
+ .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
|
|
++ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++};
|
|
++
|
|
++/* 10.2.4 WMI cmd track */
|
|
++static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
|
|
++ .init_cmdid = WMI_10_2_INIT_CMDID,
|
|
++ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
|
|
++ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
|
|
++ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
|
|
++ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
|
|
++ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
|
|
++ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
|
|
++ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
|
|
++ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
|
|
++ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
|
|
++ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
|
|
++ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
|
|
++ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
|
|
++ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
|
|
++ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
|
|
++ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
|
|
++ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
|
|
++ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
|
|
++ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
|
|
++ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
|
|
++ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
|
|
++ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
|
|
++ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
|
|
++ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
|
|
++ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
|
|
++ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
|
|
++ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
|
|
++ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
|
|
++ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
|
|
++ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
|
|
++ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
|
|
++ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
|
|
++ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
|
|
++ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
|
|
++ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
|
|
++ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
|
|
++ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
|
|
++ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
|
|
++ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
|
|
++ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
|
|
++ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
|
|
++ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
|
|
++ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
|
|
++ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
|
|
++ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
|
|
++ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
|
|
++ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
|
|
++ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
|
|
++ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
|
|
++ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
|
|
++ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
|
|
++ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
|
|
++ .roam_scan_rssi_change_threshold =
|
|
++ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
|
|
++ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
|
|
++ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
|
|
++ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
|
|
++ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
|
|
++ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
|
|
++ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
|
|
++ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
|
|
++ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
|
|
++ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
|
|
++ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
|
|
++ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
|
|
++ .wlan_profile_set_hist_intvl_cmdid =
|
|
++ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
|
|
++ .wlan_profile_get_profile_data_cmdid =
|
|
++ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
|
|
++ .wlan_profile_enable_profile_id_cmdid =
|
|
++ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
|
|
++ .wlan_profile_list_profile_id_cmdid =
|
|
++ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
|
|
++ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
|
|
++ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
|
|
++ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
|
|
++ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
|
|
++ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
|
|
++ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
|
|
++ .wow_enable_disable_wake_event_cmdid =
|
|
++ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
|
|
++ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
|
|
++ .wow_hostwakeup_from_sleep_cmdid =
|
|
++ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
|
|
++ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
|
|
++ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
|
|
++ .vdev_spectral_scan_configure_cmdid =
|
|
++ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
|
|
++ .vdev_spectral_scan_enable_cmdid =
|
|
++ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
|
|
++ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
|
|
++ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
|
|
++ .echo_cmdid = WMI_10_2_ECHO_CMDID,
|
|
++ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
|
|
++ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
|
|
++ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
|
|
++ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
|
|
++ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
|
|
++ .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
|
|
+ };
|
|
+
|
|
+ /* MAIN WMI VDEV param map */
|
|
+@@ -384,6 +511,64 @@ static struct wmi_vdev_param_map wmi_10x
|
|
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
|
|
+ };
|
|
+
|
|
++static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
|
|
++ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
|
|
++ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
|
|
++ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
|
|
++ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
|
|
++ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
|
|
++ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
|
|
++ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
|
|
++ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
|
|
++ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
|
|
++ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
|
|
++ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
|
|
++ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
|
|
++ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
|
|
++ .wmi_vdev_oc_scheduler_air_time_limit =
|
|
++ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
|
|
++ .wds = WMI_10X_VDEV_PARAM_WDS,
|
|
++ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
|
|
++ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
|
|
++ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
|
|
++ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
|
|
++ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
|
|
++ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
|
|
++ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
|
|
++ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
|
|
++ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
|
|
++ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
|
|
++ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
|
|
++ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
|
|
++ .sgi = WMI_10X_VDEV_PARAM_SGI,
|
|
++ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
|
|
++ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
|
|
++ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
|
|
++ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
|
|
++ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
|
|
++ .nss = WMI_10X_VDEV_PARAM_NSS,
|
|
++ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
|
|
++ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
|
|
++ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
|
|
++ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
|
|
++ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
|
|
++ .ap_keepalive_min_idle_inactive_time_secs =
|
|
++ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
|
|
++ .ap_keepalive_max_idle_inactive_time_secs =
|
|
++ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
|
|
++ .ap_keepalive_max_unresponsive_time_secs =
|
|
++ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
|
|
++ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
|
|
++ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
|
|
++ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
|
|
++ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
|
|
++ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
|
|
++ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
|
|
++ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
|
|
++ .ap_detect_out_of_sync_sleeping_sta_time_secs =
|
|
++ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
|
|
++};
|
|
++
|
|
+ static struct wmi_pdev_param_map wmi_pdev_param_map = {
|
|
+ .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
|
|
+ .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
|
|
+@@ -433,6 +618,7 @@ static struct wmi_pdev_param_map wmi_pde
|
|
+ .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
+ .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
+ .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
++ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
+ };
|
|
+
|
|
+ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
|
|
+@@ -485,11 +671,221 @@ static struct wmi_pdev_param_map wmi_10x
|
|
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
|
|
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
|
|
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
|
|
++ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
|
|
++};
|
|
++
|
|
++static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
|
|
++ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
|
|
++ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
|
|
++ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
|
|
++ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
|
|
++ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
|
|
++ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
|
|
++ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
|
|
++ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
|
|
++ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
|
|
++ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
|
|
++ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
|
|
++ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
|
|
++ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
|
|
++ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
|
|
++ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
|
|
++ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
|
|
++ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
|
|
++ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
|
|
++ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
|
|
++ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
|
|
++ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
|
|
++ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
|
|
++ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
|
|
++ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
|
|
++ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
|
|
++ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
++ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
++ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
++ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
++ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
|
|
++ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
|
|
++ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
|
|
++ .bcnflt_stats_update_period =
|
|
++ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
|
|
++ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
|
|
++ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
|
|
++ .dcs = WMI_10X_PDEV_PARAM_DCS,
|
|
++ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
|
|
++ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
|
|
++ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
|
|
++ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
|
|
++ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
|
|
++ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
|
|
++ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
++ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
++ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
++ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
|
|
++ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
|
|
++ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
|
|
++ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
|
|
++};
|
|
++
|
|
++/* firmware 10.2 specific mappings */
|
|
++static struct wmi_cmd_map wmi_10_2_cmd_map = {
|
|
++ .init_cmdid = WMI_10_2_INIT_CMDID,
|
|
++ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
|
|
++ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
|
|
++ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
|
|
++ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
|
|
++ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
|
|
++ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
|
|
++ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
|
|
++ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
|
|
++ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
|
|
++ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
|
|
++ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
|
|
++ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
|
|
++ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
|
|
++ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
|
|
++ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
|
|
++ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
|
|
++ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
|
|
++ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
|
|
++ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
|
|
++ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
|
|
++ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
|
|
++ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
|
|
++ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
|
|
++ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
|
|
++ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
|
|
++ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
|
|
++ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
|
|
++ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
|
|
++ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
|
|
++ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
|
|
++ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
|
|
++ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
|
|
++ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
|
|
++ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
|
|
++ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
|
|
++ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
|
|
++ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
|
|
++ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
|
|
++ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
|
|
++ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
|
|
++ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
|
|
++ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
|
|
++ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
|
|
++ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
|
|
++ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
|
|
++ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
|
|
++ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
|
|
++ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
|
|
++ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
|
|
++ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
|
|
++ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
|
|
++ .roam_scan_rssi_change_threshold =
|
|
++ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
|
|
++ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
|
|
++ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
|
|
++ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
|
|
++ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
|
|
++ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
|
|
++ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
|
|
++ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
|
|
++ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
|
|
++ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
|
|
++ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
|
|
++ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
|
|
++ .wlan_profile_set_hist_intvl_cmdid =
|
|
++ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
|
|
++ .wlan_profile_get_profile_data_cmdid =
|
|
++ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
|
|
++ .wlan_profile_enable_profile_id_cmdid =
|
|
++ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
|
|
++ .wlan_profile_list_profile_id_cmdid =
|
|
++ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
|
|
++ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
|
|
++ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
|
|
++ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
|
|
++ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
|
|
++ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
|
|
++ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
|
|
++ .wow_enable_disable_wake_event_cmdid =
|
|
++ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
|
|
++ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
|
|
++ .wow_hostwakeup_from_sleep_cmdid =
|
|
++ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
|
|
++ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
|
|
++ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
|
|
++ .vdev_spectral_scan_configure_cmdid =
|
|
++ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
|
|
++ .vdev_spectral_scan_enable_cmdid =
|
|
++ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
|
|
++ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
|
|
++ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
|
|
++ .echo_cmdid = WMI_10_2_ECHO_CMDID,
|
|
++ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
|
|
++ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
|
|
++ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
|
|
++ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
|
|
++ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
|
|
++ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
|
|
++ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
|
|
+ };
|
|
+
|
|
++void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
|
|
++ const struct wmi_channel_arg *arg)
|
|
++{
|
|
++ u32 flags = 0;
|
|
++
|
|
++ memset(ch, 0, sizeof(*ch));
|
|
++
|
|
++ if (arg->passive)
|
|
++ flags |= WMI_CHAN_FLAG_PASSIVE;
|
|
++ if (arg->allow_ibss)
|
|
++ flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
|
|
++ if (arg->allow_ht)
|
|
++ flags |= WMI_CHAN_FLAG_ALLOW_HT;
|
|
++ if (arg->allow_vht)
|
|
++ flags |= WMI_CHAN_FLAG_ALLOW_VHT;
|
|
++ if (arg->ht40plus)
|
|
++ flags |= WMI_CHAN_FLAG_HT40_PLUS;
|
|
++ if (arg->chan_radar)
|
|
++ flags |= WMI_CHAN_FLAG_DFS;
|
|
++
|
|
++ ch->mhz = __cpu_to_le32(arg->freq);
|
|
++ ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
|
|
++ ch->band_center_freq2 = 0;
|
|
++ ch->min_power = arg->min_power;
|
|
++ ch->max_power = arg->max_power;
|
|
++ ch->reg_power = arg->max_reg_power;
|
|
++ ch->antenna_max = arg->max_antenna_gain;
|
|
++
|
|
++ /* mode & flags share storage */
|
|
++ ch->mode = arg->mode;
|
|
++ ch->flags |= __cpu_to_le32(flags);
|
|
++}
|
|
++
|
|
+ int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
|
|
+ {
|
|
+ int ret;
|
|
++
|
|
+ ret = wait_for_completion_timeout(&ar->wmi.service_ready,
|
|
+ WMI_SERVICE_READY_TIMEOUT_HZ);
|
|
+ return ret;
|
|
+@@ -498,23 +894,24 @@ int ath10k_wmi_wait_for_service_ready(st
|
|
+ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
|
|
+ {
|
|
+ int ret;
|
|
++
|
|
+ ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
|
|
+ WMI_UNIFIED_READY_TIMEOUT_HZ);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+-static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
|
|
++struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
|
|
+ {
|
|
+ struct sk_buff *skb;
|
|
+ u32 round_len = roundup(len, 4);
|
|
+
|
|
+- skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
|
|
++ skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
|
|
+ if (!skb)
|
|
+ return NULL;
|
|
+
|
|
+ skb_reserve(skb, WMI_SKB_HEADROOM);
|
|
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
|
|
+- ath10k_warn("Unaligned WMI skb\n");
|
|
++ ath10k_warn(ar, "Unaligned WMI skb\n");
|
|
+
|
|
+ skb_put(skb, round_len);
|
|
+ memset(skb->data, 0, round_len);
|
|
+@@ -527,8 +924,8 @@ static void ath10k_wmi_htc_tx_complete(s
|
|
+ dev_kfree_skb(skb);
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
|
|
+- u32 cmd_id)
|
|
++int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
|
|
++ u32 cmd_id)
|
|
+ {
|
|
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
|
|
+ struct wmi_cmd_hdr *cmd_hdr;
|
|
+@@ -545,7 +942,7 @@ static int ath10k_wmi_cmd_send_nowait(st
|
|
+
|
|
+ memset(skb_cb, 0, sizeof(*skb_cb));
|
|
+ ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
|
|
+- trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
|
|
++ trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
|
|
+
|
|
+ if (ret)
|
|
+ goto err_pull;
|
|
+@@ -559,23 +956,45 @@ err_pull:
|
|
+
|
|
+ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
|
|
+ {
|
|
++ struct ath10k *ar = arvif->ar;
|
|
++ struct ath10k_skb_cb *cb;
|
|
++ struct sk_buff *bcn;
|
|
+ int ret;
|
|
+
|
|
+- lockdep_assert_held(&arvif->ar->data_lock);
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
+
|
|
+- if (arvif->beacon == NULL)
|
|
+- return;
|
|
++ bcn = arvif->beacon;
|
|
+
|
|
+- if (arvif->beacon_sent)
|
|
+- return;
|
|
++ if (!bcn)
|
|
++ goto unlock;
|
|
+
|
|
+- ret = ath10k_wmi_beacon_send_ref_nowait(arvif);
|
|
+- if (ret)
|
|
+- return;
|
|
++ cb = ATH10K_SKB_CB(bcn);
|
|
++
|
|
++ switch (arvif->beacon_state) {
|
|
++ case ATH10K_BEACON_SENDING:
|
|
++ case ATH10K_BEACON_SENT:
|
|
++ break;
|
|
++ case ATH10K_BEACON_SCHEDULED:
|
|
++ arvif->beacon_state = ATH10K_BEACON_SENDING;
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
|
|
++ arvif->vdev_id,
|
|
++ bcn->data, bcn->len,
|
|
++ cb->paddr,
|
|
++ cb->bcn.dtim_zero,
|
|
++ cb->bcn.deliver_cab);
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
+
|
|
+- /* We need to retain the arvif->beacon reference for DMA unmapping and
|
|
+- * freeing the skbuff later. */
|
|
+- arvif->beacon_sent = true;
|
|
++ if (ret == 0)
|
|
++ arvif->beacon_state = ATH10K_BEACON_SENT;
|
|
++ else
|
|
++ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
|
|
++ }
|
|
++
|
|
++unlock:
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
+ }
|
|
+
|
|
+ static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
|
|
+@@ -588,12 +1007,10 @@ static void ath10k_wmi_tx_beacons_iter(v
|
|
+
|
|
+ static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
|
|
+ {
|
|
+- spin_lock_bh(&ar->data_lock);
|
|
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
|
|
+ IEEE80211_IFACE_ITER_NORMAL,
|
|
+ ath10k_wmi_tx_beacons_iter,
|
|
+ NULL);
|
|
+- spin_unlock_bh(&ar->data_lock);
|
|
+ }
|
|
+
|
|
+ static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
|
|
+@@ -604,15 +1021,14 @@ static void ath10k_wmi_op_ep_tx_credits(
|
|
+ wake_up(&ar->wmi.tx_credits_wq);
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
|
|
+- u32 cmd_id)
|
|
++int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
|
|
+ {
|
|
+ int ret = -EOPNOTSUPP;
|
|
+
|
|
+ might_sleep();
|
|
+
|
|
+ if (cmd_id == WMI_CMD_UNSUPPORTED) {
|
|
+- ath10k_warn("wmi command %d is not supported by firmware\n",
|
|
++ ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
|
|
+ cmd_id);
|
|
+ return ret;
|
|
+ }
|
|
+@@ -622,6 +1038,10 @@ static int ath10k_wmi_cmd_send(struct at
|
|
+ ath10k_wmi_tx_beacons_nowait(ar);
|
|
+
|
|
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
|
|
++
|
|
++ if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
|
|
++ ret = -ESHUTDOWN;
|
|
++
|
|
+ (ret != -EAGAIN);
|
|
+ }), 3*HZ);
|
|
+
|
|
+@@ -631,147 +1051,270 @@ static int ath10k_wmi_cmd_send(struct at
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
|
|
+ {
|
|
+- int ret = 0;
|
|
+ struct wmi_mgmt_tx_cmd *cmd;
|
|
+ struct ieee80211_hdr *hdr;
|
|
+- struct sk_buff *wmi_skb;
|
|
+- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
++ struct sk_buff *skb;
|
|
+ int len;
|
|
++ u32 buf_len = msdu->len;
|
|
+ u16 fc;
|
|
+
|
|
+- hdr = (struct ieee80211_hdr *)skb->data;
|
|
++ hdr = (struct ieee80211_hdr *)msdu->data;
|
|
+ fc = le16_to_cpu(hdr->frame_control);
|
|
+
|
|
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
|
|
+- return -EINVAL;
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ len = sizeof(cmd->hdr) + msdu->len;
|
|
++
|
|
++ if ((ieee80211_is_action(hdr->frame_control) ||
|
|
++ ieee80211_is_deauth(hdr->frame_control) ||
|
|
++ ieee80211_is_disassoc(hdr->frame_control)) &&
|
|
++ ieee80211_has_protected(hdr->frame_control)) {
|
|
++ len += IEEE80211_CCMP_MIC_LEN;
|
|
++ buf_len += IEEE80211_CCMP_MIC_LEN;
|
|
++ }
|
|
+
|
|
+- len = sizeof(cmd->hdr) + skb->len;
|
|
+ len = round_up(len, 4);
|
|
+
|
|
+- wmi_skb = ath10k_wmi_alloc_skb(len);
|
|
+- if (!wmi_skb)
|
|
+- return -ENOMEM;
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+- cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
|
|
++ cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
|
|
+
|
|
+- cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
|
|
++ cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id);
|
|
+ cmd->hdr.tx_rate = 0;
|
|
+ cmd->hdr.tx_power = 0;
|
|
+- cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
|
|
++ cmd->hdr.buf_len = __cpu_to_le32(buf_len);
|
|
+
|
|
+- memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
|
|
+- memcpy(cmd->buf, skb->data, skb->len);
|
|
++ ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
|
|
++ memcpy(cmd->buf, msdu->data, msdu->len);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
|
|
+- wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
|
|
++ msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
|
|
+ fc & IEEE80211_FCTL_STYPE);
|
|
++ trace_ath10k_tx_hdr(ar, skb->data, skb->len);
|
|
++ trace_ath10k_tx_payload(ar, skb->data, skb->len);
|
|
+
|
|
+- /* Send the management frame buffer to the target */
|
|
+- ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
|
|
+- if (ret)
|
|
+- return ret;
|
|
+-
|
|
+- /* TODO: report tx status to mac80211 - temporary just ACK */
|
|
+- info->flags |= IEEE80211_TX_STAT_ACK;
|
|
+- ieee80211_tx_status_irqsafe(ar->hw, skb);
|
|
+-
|
|
+- return ret;
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
|
|
++static void ath10k_wmi_event_scan_started(struct ath10k *ar)
|
|
+ {
|
|
+- struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
|
|
+- enum wmi_scan_event_type event_type;
|
|
+- enum wmi_scan_completion_reason reason;
|
|
+- u32 freq;
|
|
+- u32 req_id;
|
|
+- u32 scan_id;
|
|
+- u32 vdev_id;
|
|
+-
|
|
+- event_type = __le32_to_cpu(event->event_type);
|
|
+- reason = __le32_to_cpu(event->reason);
|
|
+- freq = __le32_to_cpu(event->channel_freq);
|
|
+- req_id = __le32_to_cpu(event->scan_req_id);
|
|
+- scan_id = __le32_to_cpu(event->scan_id);
|
|
+- vdev_id = __le32_to_cpu(event->vdev_id);
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENTID\n");
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
+- "scan event type %d reason %d freq %d req_id %d "
|
|
+- "scan_id %d vdev_id %d\n",
|
|
+- event_type, reason, freq, req_id, scan_id, vdev_id);
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
+
|
|
+- spin_lock_bh(&ar->data_lock);
|
|
++ switch (ar->scan.state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
++ ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
|
|
++ ath10k_scan_state_str(ar->scan.state),
|
|
++ ar->scan.state);
|
|
++ break;
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ ar->scan.state = ATH10K_SCAN_RUNNING;
|
|
+
|
|
+- switch (event_type) {
|
|
+- case WMI_SCAN_EVENT_STARTED:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_STARTED\n");
|
|
+- if (ar->scan.in_progress && ar->scan.is_roc)
|
|
++ if (ar->scan.is_roc)
|
|
+ ieee80211_ready_on_channel(ar->hw);
|
|
+
|
|
+ complete(&ar->scan.started);
|
|
+ break;
|
|
+- case WMI_SCAN_EVENT_COMPLETED:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_COMPLETED\n");
|
|
+- switch (reason) {
|
|
+- case WMI_SCAN_REASON_COMPLETED:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_COMPLETED\n");
|
|
+- break;
|
|
+- case WMI_SCAN_REASON_CANCELLED:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_CANCELED\n");
|
|
+- break;
|
|
+- case WMI_SCAN_REASON_PREEMPTED:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_PREEMPTED\n");
|
|
+- break;
|
|
+- case WMI_SCAN_REASON_TIMEDOUT:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_REASON_TIMEDOUT\n");
|
|
+- break;
|
|
+- default:
|
|
+- break;
|
|
+- }
|
|
+-
|
|
+- ar->scan_channel = NULL;
|
|
+- if (!ar->scan.in_progress) {
|
|
+- ath10k_warn("no scan requested, ignoring\n");
|
|
+- break;
|
|
+- }
|
|
+-
|
|
+- if (ar->scan.is_roc) {
|
|
+- ath10k_offchan_tx_purge(ar);
|
|
++ }
|
|
++}
|
|
+
|
|
+- if (!ar->scan.aborting)
|
|
+- ieee80211_remain_on_channel_expired(ar->hw);
|
|
+- } else {
|
|
+- ieee80211_scan_completed(ar->hw, ar->scan.aborting);
|
|
+- }
|
|
++static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
|
|
++{
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
+
|
|
+- del_timer(&ar->scan.timeout);
|
|
+- complete_all(&ar->scan.completed);
|
|
+- ar->scan.in_progress = false;
|
|
++ switch (ar->scan.state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
++ ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
|
|
++ ath10k_scan_state_str(ar->scan.state),
|
|
++ ar->scan.state);
|
|
+ break;
|
|
+- case WMI_SCAN_EVENT_BSS_CHANNEL:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_BSS_CHANNEL\n");
|
|
+- ar->scan_channel = NULL;
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ complete(&ar->scan.started);
|
|
++ __ath10k_scan_finish(ar);
|
|
+ break;
|
|
+- case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_FOREIGN_CHANNEL\n");
|
|
++ }
|
|
++}
|
|
++
|
|
++static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
|
|
++{
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
++
|
|
++ switch (ar->scan.state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ /* One suspected reason scan can be completed while starting is
|
|
++ * if firmware fails to deliver all scan events to the host,
|
|
++ * e.g. when transport pipe is full. This has been observed
|
|
++ * with spectral scan phyerr events starving wmi transport
|
|
++ * pipe. In such case the "scan completed" event should be (and
|
|
++ * is) ignored by the host as it may be just firmware's scan
|
|
++ * state machine recovering.
|
|
++ */
|
|
++ ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
|
|
++ ath10k_scan_state_str(ar->scan.state),
|
|
++ ar->scan.state);
|
|
++ break;
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
++ __ath10k_scan_finish(ar);
|
|
++ break;
|
|
++ }
|
|
++}
|
|
++
|
|
++static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
|
|
++{
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
++
|
|
++ switch (ar->scan.state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
|
|
++ ath10k_scan_state_str(ar->scan.state),
|
|
++ ar->scan.state);
|
|
++ break;
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
++ ar->scan_channel = NULL;
|
|
++ break;
|
|
++ }
|
|
++}
|
|
++
|
|
++static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
|
|
++{
|
|
++ lockdep_assert_held(&ar->data_lock);
|
|
++
|
|
++ switch (ar->scan.state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
|
|
++ ath10k_scan_state_str(ar->scan.state),
|
|
++ ar->scan.state);
|
|
++ break;
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
|
|
+- if (ar->scan.in_progress && ar->scan.is_roc &&
|
|
+- ar->scan.roc_freq == freq) {
|
|
++
|
|
++ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
|
|
+ complete(&ar->scan.on_channel);
|
|
+- }
|
|
+ break;
|
|
++ }
|
|
++}
|
|
++
|
|
++static const char *
|
|
++ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
|
|
++ enum wmi_scan_completion_reason reason)
|
|
++{
|
|
++ switch (type) {
|
|
++ case WMI_SCAN_EVENT_STARTED:
|
|
++ return "started";
|
|
++ case WMI_SCAN_EVENT_COMPLETED:
|
|
++ switch (reason) {
|
|
++ case WMI_SCAN_REASON_COMPLETED:
|
|
++ return "completed";
|
|
++ case WMI_SCAN_REASON_CANCELLED:
|
|
++ return "completed [cancelled]";
|
|
++ case WMI_SCAN_REASON_PREEMPTED:
|
|
++ return "completed [preempted]";
|
|
++ case WMI_SCAN_REASON_TIMEDOUT:
|
|
++ return "completed [timedout]";
|
|
++ case WMI_SCAN_REASON_MAX:
|
|
++ break;
|
|
++ }
|
|
++ return "completed [unknown]";
|
|
++ case WMI_SCAN_EVENT_BSS_CHANNEL:
|
|
++ return "bss channel";
|
|
++ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
|
|
++ return "foreign channel";
|
|
+ case WMI_SCAN_EVENT_DEQUEUED:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "SCAN_EVENT_DEQUEUED\n");
|
|
+- break;
|
|
++ return "dequeued";
|
|
+ case WMI_SCAN_EVENT_PREEMPTED:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_PREEMPTED\n");
|
|
++ return "preempted";
|
|
++ case WMI_SCAN_EVENT_START_FAILED:
|
|
++ return "start failed";
|
|
++ default:
|
|
++ return "unknown";
|
|
++ }
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_scan_ev_arg *arg)
|
|
++{
|
|
++ struct wmi_scan_event *ev = (void *)skb->data;
|
|
++
|
|
++ if (skb->len < sizeof(*ev))
|
|
++ return -EPROTO;
|
|
++
|
|
++ skb_pull(skb, sizeof(*ev));
|
|
++ arg->event_type = ev->event_type;
|
|
++ arg->reason = ev->reason;
|
|
++ arg->channel_freq = ev->channel_freq;
|
|
++ arg->scan_req_id = ev->scan_req_id;
|
|
++ arg->scan_id = ev->scan_id;
|
|
++ arg->vdev_id = ev->vdev_id;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ struct wmi_scan_ev_arg arg = {};
|
|
++ enum wmi_scan_event_type event_type;
|
|
++ enum wmi_scan_completion_reason reason;
|
|
++ u32 freq;
|
|
++ u32 req_id;
|
|
++ u32 scan_id;
|
|
++ u32 vdev_id;
|
|
++ int ret;
|
|
++
|
|
++ ret = ath10k_wmi_pull_scan(ar, skb, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ event_type = __le32_to_cpu(arg.event_type);
|
|
++ reason = __le32_to_cpu(arg.reason);
|
|
++ freq = __le32_to_cpu(arg.channel_freq);
|
|
++ req_id = __le32_to_cpu(arg.scan_req_id);
|
|
++ scan_id = __le32_to_cpu(arg.scan_id);
|
|
++ vdev_id = __le32_to_cpu(arg.vdev_id);
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
|
|
++ ath10k_wmi_event_scan_type_str(event_type, reason),
|
|
++ event_type, reason, freq, req_id, scan_id, vdev_id,
|
|
++ ath10k_scan_state_str(ar->scan.state), ar->scan.state);
|
|
++
|
|
++ switch (event_type) {
|
|
++ case WMI_SCAN_EVENT_STARTED:
|
|
++ ath10k_wmi_event_scan_started(ar);
|
|
++ break;
|
|
++ case WMI_SCAN_EVENT_COMPLETED:
|
|
++ ath10k_wmi_event_scan_completed(ar);
|
|
++ break;
|
|
++ case WMI_SCAN_EVENT_BSS_CHANNEL:
|
|
++ ath10k_wmi_event_scan_bss_chan(ar);
|
|
++ break;
|
|
++ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
|
|
++ ath10k_wmi_event_scan_foreign_chan(ar, freq);
|
|
+ break;
|
|
+ case WMI_SCAN_EVENT_START_FAILED:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_SCAN_EVENT_START_FAILED\n");
|
|
++ ath10k_warn(ar, "received scan start failure event\n");
|
|
++ ath10k_wmi_event_scan_start_failed(ar);
|
|
+ break;
|
|
++ case WMI_SCAN_EVENT_DEQUEUED:
|
|
++ case WMI_SCAN_EVENT_PREEMPTED:
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+@@ -865,13 +1408,86 @@ static inline u8 get_rate_idx(u32 rate,
|
|
+ return rate_idx;
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
++/* If keys are configured, HW decrypts all frames
|
|
++ * with protected bit set. Mark such frames as decrypted.
|
|
++ */
|
|
++static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct ieee80211_rx_status *status)
|
|
++{
|
|
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
|
++ unsigned int hdrlen;
|
|
++ bool peer_key;
|
|
++ u8 *addr, keyidx;
|
|
++
|
|
++ if (!ieee80211_is_auth(hdr->frame_control) ||
|
|
++ !ieee80211_has_protected(hdr->frame_control))
|
|
++ return;
|
|
++
|
|
++ hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
|
++ if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
|
|
++ return;
|
|
++
|
|
++ keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
|
|
++ addr = ieee80211_get_SA(hdr);
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ if (peer_key) {
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MAC,
|
|
++ "mac wep key present for peer %pM\n", addr);
|
|
++ status->flag |= RX_FLAG_DECRYPTED;
|
|
++ }
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_mgmt_rx_ev_arg *arg)
|
|
+ {
|
|
+ struct wmi_mgmt_rx_event_v1 *ev_v1;
|
|
+ struct wmi_mgmt_rx_event_v2 *ev_v2;
|
|
+ struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
|
|
++ size_t pull_len;
|
|
++ u32 msdu_len;
|
|
++
|
|
++ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
|
|
++ ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
|
|
++ ev_hdr = &ev_v2->hdr.v1;
|
|
++ pull_len = sizeof(*ev_v2);
|
|
++ } else {
|
|
++ ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
|
|
++ ev_hdr = &ev_v1->hdr;
|
|
++ pull_len = sizeof(*ev_v1);
|
|
++ }
|
|
++
|
|
++ if (skb->len < pull_len)
|
|
++ return -EPROTO;
|
|
++
|
|
++ skb_pull(skb, pull_len);
|
|
++ arg->channel = ev_hdr->channel;
|
|
++ arg->buf_len = ev_hdr->buf_len;
|
|
++ arg->status = ev_hdr->status;
|
|
++ arg->snr = ev_hdr->snr;
|
|
++ arg->phy_mode = ev_hdr->phy_mode;
|
|
++ arg->rate = ev_hdr->rate;
|
|
++
|
|
++ msdu_len = __le32_to_cpu(arg->buf_len);
|
|
++ if (skb->len < msdu_len)
|
|
++ return -EPROTO;
|
|
++
|
|
++ /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
|
|
++ * trailer with credit update. Trim the excess garbage.
|
|
++ */
|
|
++ skb_trim(skb, msdu_len);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ struct wmi_mgmt_rx_ev_arg arg = {};
|
|
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
|
+- struct ieee80211_channel *ch;
|
|
+ struct ieee80211_hdr *hdr;
|
|
+ u32 rx_status;
|
|
+ u32 channel;
|
|
+@@ -880,28 +1496,24 @@ static int ath10k_wmi_event_mgmt_rx(stru
|
|
+ u32 rate;
|
|
+ u32 buf_len;
|
|
+ u16 fc;
|
|
+- int pull_len;
|
|
++ int ret;
|
|
+
|
|
+- if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
|
|
+- ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
|
|
+- ev_hdr = &ev_v2->hdr.v1;
|
|
+- pull_len = sizeof(*ev_v2);
|
|
+- } else {
|
|
+- ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
|
|
+- ev_hdr = &ev_v1->hdr;
|
|
+- pull_len = sizeof(*ev_v1);
|
|
++ ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
|
|
++ return ret;
|
|
+ }
|
|
+
|
|
+- channel = __le32_to_cpu(ev_hdr->channel);
|
|
+- buf_len = __le32_to_cpu(ev_hdr->buf_len);
|
|
+- rx_status = __le32_to_cpu(ev_hdr->status);
|
|
+- snr = __le32_to_cpu(ev_hdr->snr);
|
|
+- phy_mode = __le32_to_cpu(ev_hdr->phy_mode);
|
|
+- rate = __le32_to_cpu(ev_hdr->rate);
|
|
++ channel = __le32_to_cpu(arg.channel);
|
|
++ buf_len = __le32_to_cpu(arg.buf_len);
|
|
++ rx_status = __le32_to_cpu(arg.status);
|
|
++ snr = __le32_to_cpu(arg.snr);
|
|
++ phy_mode = __le32_to_cpu(arg.phy_mode);
|
|
++ rate = __le32_to_cpu(arg.rate);
|
|
+
|
|
+ memset(status, 0, sizeof(*status));
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MGMT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MGMT,
|
|
+ "event mgmt rx status %08x\n", rx_status);
|
|
+
|
|
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
|
|
+@@ -919,66 +1531,70 @@ static int ath10k_wmi_event_mgmt_rx(stru
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+- if (rx_status & WMI_RX_STATUS_ERR_CRC)
|
|
+- status->flag |= RX_FLAG_FAILED_FCS_CRC;
|
|
++ if (rx_status & WMI_RX_STATUS_ERR_CRC) {
|
|
++ dev_kfree_skb(skb);
|
|
++ return 0;
|
|
++ }
|
|
++
|
|
+ if (rx_status & WMI_RX_STATUS_ERR_MIC)
|
|
+ status->flag |= RX_FLAG_MMIC_ERROR;
|
|
+
|
|
+- /* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to
|
|
++ /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
|
|
+ * MODE_11B. This means phy_mode is not a reliable source for the band
|
|
+- * of mgmt rx. */
|
|
+-
|
|
+- ch = ar->scan_channel;
|
|
+- if (!ch)
|
|
+- ch = ar->rx_channel;
|
|
+-
|
|
+- if (ch) {
|
|
+- status->band = ch->band;
|
|
+-
|
|
+- if (phy_mode == MODE_11B &&
|
|
+- status->band == IEEE80211_BAND_5GHZ)
|
|
+- ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
|
|
++ * of mgmt rx.
|
|
++ */
|
|
++ if (channel >= 1 && channel <= 14) {
|
|
++ status->band = IEEE80211_BAND_2GHZ;
|
|
++ } else if (channel >= 36 && channel <= 165) {
|
|
++ status->band = IEEE80211_BAND_5GHZ;
|
|
+ } else {
|
|
+- ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n");
|
|
+- status->band = phy_mode_to_band(phy_mode);
|
|
++ /* Shouldn't happen unless list of advertised channels to
|
|
++ * mac80211 has been changed.
|
|
++ */
|
|
++ WARN_ON_ONCE(1);
|
|
++ dev_kfree_skb(skb);
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
++ if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
|
|
++
|
|
+ status->freq = ieee80211_channel_to_frequency(channel, status->band);
|
|
+ status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
|
|
+ status->rate_idx = get_rate_idx(rate, status->band);
|
|
+
|
|
+- skb_pull(skb, pull_len);
|
|
+-
|
|
+ hdr = (struct ieee80211_hdr *)skb->data;
|
|
+ fc = le16_to_cpu(hdr->frame_control);
|
|
+
|
|
++ ath10k_wmi_handle_wep_reauth(ar, skb, status);
|
|
++
|
|
+ /* FW delivers WEP Shared Auth frame with Protected Bit set and
|
|
+ * encrypted payload. However in case of PMF it delivers decrypted
|
|
+ * frames with Protected Bit set. */
|
|
+ if (ieee80211_has_protected(hdr->frame_control) &&
|
|
+ !ieee80211_is_auth(hdr->frame_control)) {
|
|
+- status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED |
|
|
+- RX_FLAG_MMIC_STRIPPED;
|
|
+- hdr->frame_control = __cpu_to_le16(fc &
|
|
++ status->flag |= RX_FLAG_DECRYPTED;
|
|
++
|
|
++ if (!ieee80211_is_action(hdr->frame_control) &&
|
|
++ !ieee80211_is_deauth(hdr->frame_control) &&
|
|
++ !ieee80211_is_disassoc(hdr->frame_control)) {
|
|
++ status->flag |= RX_FLAG_IV_STRIPPED |
|
|
++ RX_FLAG_MMIC_STRIPPED;
|
|
++ hdr->frame_control = __cpu_to_le16(fc &
|
|
+ ~IEEE80211_FCTL_PROTECTED);
|
|
++ }
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MGMT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MGMT,
|
|
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
|
|
+ skb, skb->len,
|
|
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MGMT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MGMT,
|
|
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
|
|
+ status->freq, status->band, status->signal,
|
|
+ status->rate_idx);
|
|
+
|
|
+- /*
|
|
+- * packets from HTC come aligned to 4byte boundaries
|
|
+- * because they can originally come in along with a trailer
|
|
+- */
|
|
+- skb_trim(skb, buf_len);
|
|
+-
|
|
+ ieee80211_rx(ar->hw, skb);
|
|
+ return 0;
|
|
+ }
|
|
+@@ -1002,37 +1618,65 @@ exit:
|
|
+ return idx;
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
|
|
++static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_ch_info_ev_arg *arg)
|
|
++{
|
|
++ struct wmi_chan_info_event *ev = (void *)skb->data;
|
|
++
|
|
++ if (skb->len < sizeof(*ev))
|
|
++ return -EPROTO;
|
|
++
|
|
++ skb_pull(skb, sizeof(*ev));
|
|
++ arg->err_code = ev->err_code;
|
|
++ arg->freq = ev->freq;
|
|
++ arg->cmd_flags = ev->cmd_flags;
|
|
++ arg->noise_floor = ev->noise_floor;
|
|
++ arg->rx_clear_count = ev->rx_clear_count;
|
|
++ arg->cycle_count = ev->cycle_count;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- struct wmi_chan_info_event *ev;
|
|
++ struct wmi_ch_info_ev_arg arg = {};
|
|
+ struct survey_info *survey;
|
|
+ u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
|
|
+- int idx;
|
|
++ int idx, ret;
|
|
+
|
|
+- ev = (struct wmi_chan_info_event *)skb->data;
|
|
++ ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
|
|
++ return;
|
|
++ }
|
|
+
|
|
+- err_code = __le32_to_cpu(ev->err_code);
|
|
+- freq = __le32_to_cpu(ev->freq);
|
|
+- cmd_flags = __le32_to_cpu(ev->cmd_flags);
|
|
+- noise_floor = __le32_to_cpu(ev->noise_floor);
|
|
+- rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
|
|
+- cycle_count = __le32_to_cpu(ev->cycle_count);
|
|
++ err_code = __le32_to_cpu(arg.err_code);
|
|
++ freq = __le32_to_cpu(arg.freq);
|
|
++ cmd_flags = __le32_to_cpu(arg.cmd_flags);
|
|
++ noise_floor = __le32_to_cpu(arg.noise_floor);
|
|
++ rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
|
|
++ cycle_count = __le32_to_cpu(arg.cycle_count);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
|
|
+ err_code, freq, cmd_flags, noise_floor, rx_clear_count,
|
|
+ cycle_count);
|
|
+
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+
|
|
+- if (!ar->scan.in_progress) {
|
|
+- ath10k_warn("chan info event without a scan request?\n");
|
|
++ switch (ar->scan.state) {
|
|
++ case ATH10K_SCAN_IDLE:
|
|
++ case ATH10K_SCAN_STARTING:
|
|
++ ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
|
|
+ goto exit;
|
|
++ case ATH10K_SCAN_RUNNING:
|
|
++ case ATH10K_SCAN_ABORTING:
|
|
++ break;
|
|
+ }
|
|
+
|
|
+ idx = freq_to_idx(ar, freq);
|
|
+ if (idx >= ARRAY_SIZE(ar->survey)) {
|
|
+- ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
|
|
++ ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
|
|
+ freq, idx);
|
|
+ goto exit;
|
|
+ }
|
|
+@@ -1061,191 +1705,579 @@ exit:
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
|
|
++void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
|
|
++int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
|
|
+ skb->len);
|
|
+
|
|
+- trace_ath10k_wmi_dbglog(skb->data, skb->len);
|
|
++ trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_update_stats(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
|
|
++ struct ath10k_fw_stats_pdev *dst)
|
|
+ {
|
|
+- struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
|
|
+-
|
|
+- ath10k_debug_read_target_stats(ar, ev);
|
|
+-}
|
|
+-
|
|
+-static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++ dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
|
|
++ dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
|
|
++ dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
|
|
++ dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
|
|
++ dst->cycle_count = __le32_to_cpu(src->cycle_count);
|
|
++ dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
|
|
++ dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
|
|
++}
|
|
++
|
|
++void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
|
|
++ struct ath10k_fw_stats_pdev *dst)
|
|
++{
|
|
++ dst->comp_queued = __le32_to_cpu(src->comp_queued);
|
|
++ dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
|
|
++ dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
|
|
++ dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
|
|
++ dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
|
|
++ dst->local_enqued = __le32_to_cpu(src->local_enqued);
|
|
++ dst->local_freed = __le32_to_cpu(src->local_freed);
|
|
++ dst->hw_queued = __le32_to_cpu(src->hw_queued);
|
|
++ dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
|
|
++ dst->underrun = __le32_to_cpu(src->underrun);
|
|
++ dst->tx_abort = __le32_to_cpu(src->tx_abort);
|
|
++ dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
|
|
++ dst->tx_ko = __le32_to_cpu(src->tx_ko);
|
|
++ dst->data_rc = __le32_to_cpu(src->data_rc);
|
|
++ dst->self_triggers = __le32_to_cpu(src->self_triggers);
|
|
++ dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
|
|
++ dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
|
|
++ dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
|
|
++ dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
|
|
++ dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
|
|
++ dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
|
|
++ dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
|
|
++}
|
|
++
|
|
++void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
|
|
++ struct ath10k_fw_stats_pdev *dst)
|
|
++{
|
|
++ dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
|
|
++ dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
|
|
++ dst->r0_frags = __le32_to_cpu(src->r0_frags);
|
|
++ dst->r1_frags = __le32_to_cpu(src->r1_frags);
|
|
++ dst->r2_frags = __le32_to_cpu(src->r2_frags);
|
|
++ dst->r3_frags = __le32_to_cpu(src->r3_frags);
|
|
++ dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
|
|
++ dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
|
|
++ dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
|
|
++ dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
|
|
++ dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
|
|
++ dst->phy_errs = __le32_to_cpu(src->phy_errs);
|
|
++ dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
|
|
++ dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
|
|
++}
|
|
++
|
|
++void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
|
|
++ struct ath10k_fw_stats_pdev *dst)
|
|
++{
|
|
++ dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
|
|
++ dst->rts_bad = __le32_to_cpu(src->rts_bad);
|
|
++ dst->rts_good = __le32_to_cpu(src->rts_good);
|
|
++ dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
|
|
++ dst->no_beacons = __le32_to_cpu(src->no_beacons);
|
|
++ dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
|
|
++}
|
|
++
|
|
++void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
|
|
++ struct ath10k_fw_stats_peer *dst)
|
|
++{
|
|
++ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
|
|
++ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
|
|
++ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct ath10k_fw_stats *stats)
|
|
+ {
|
|
+- struct wmi_vdev_start_response_event *ev;
|
|
++ const struct wmi_stats_event *ev = (void *)skb->data;
|
|
++ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
|
|
++ int i;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
|
|
++ if (!skb_pull(skb, sizeof(*ev)))
|
|
++ return -EPROTO;
|
|
+
|
|
+- ev = (struct wmi_vdev_start_response_event *)skb->data;
|
|
++ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
|
|
++ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
|
|
++ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
|
|
++
|
|
++ for (i = 0; i < num_pdev_stats; i++) {
|
|
++ const struct wmi_pdev_stats *src;
|
|
++ struct ath10k_fw_stats_pdev *dst;
|
|
++
|
|
++ src = (void *)skb->data;
|
|
++ if (!skb_pull(skb, sizeof(*src)))
|
|
++ return -EPROTO;
|
|
+
|
|
+- if (WARN_ON(__le32_to_cpu(ev->status)))
|
|
+- return;
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
+
|
|
+- complete(&ar->vdev_setup_done);
|
|
+-}
|
|
++ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
|
|
+
|
|
+-static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
+-{
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
|
|
+- complete(&ar->vdev_setup_done);
|
|
+-}
|
|
++ list_add_tail(&dst->list, &stats->pdevs);
|
|
++ }
|
|
+
|
|
+-static void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
+-{
|
|
+- struct wmi_peer_sta_kickout_event *ev;
|
|
+- struct ieee80211_sta *sta;
|
|
++ /* fw doesn't implement vdev stats */
|
|
+
|
|
+- ev = (struct wmi_peer_sta_kickout_event *)skb->data;
|
|
++ for (i = 0; i < num_peer_stats; i++) {
|
|
++ const struct wmi_peer_stats *src;
|
|
++ struct ath10k_fw_stats_peer *dst;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
|
|
+- ev->peer_macaddr.addr);
|
|
++ src = (void *)skb->data;
|
|
++ if (!skb_pull(skb, sizeof(*src)))
|
|
++ return -EPROTO;
|
|
+
|
|
+- rcu_read_lock();
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
+
|
|
+- sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
|
|
+- if (!sta) {
|
|
+- ath10k_warn("Spurious quick kickout for STA %pM\n",
|
|
+- ev->peer_macaddr.addr);
|
|
+- goto exit;
|
|
++ ath10k_wmi_pull_peer_stats(src, dst);
|
|
++ list_add_tail(&dst->list, &stats->peers);
|
|
+ }
|
|
+
|
|
+- ieee80211_report_low_ack(sta, 10);
|
|
+-
|
|
+-exit:
|
|
+- rcu_read_unlock();
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
+-/*
|
|
+- * FIXME
|
|
+- *
|
|
+- * We don't report to mac80211 sleep state of connected
|
|
+- * stations. Due to this mac80211 can't fill in TIM IE
|
|
+- * correctly.
|
|
+- *
|
|
+- * I know of no way of getting nullfunc frames that contain
|
|
+- * sleep transition from connected stations - these do not
|
|
+- * seem to be sent from the target to the host. There also
|
|
+- * doesn't seem to be a dedicated event for that. So the
|
|
+- * only way left to do this would be to read tim_bitmap
|
|
+- * during SWBA.
|
|
+- *
|
|
+- * We could probably try using tim_bitmap from SWBA to tell
|
|
+- * mac80211 which stations are asleep and which are not. The
|
|
+- * problem here is calling mac80211 functions so many times
|
|
+- * could take too long and make us miss the time to submit
|
|
+- * the beacon to the target.
|
|
+- *
|
|
+- * So as a workaround we try to extend the TIM IE if there
|
|
+- * is unicast buffered for stations with aid > 7 and fill it
|
|
+- * in ourselves.
|
|
+- */
|
|
+-static void ath10k_wmi_update_tim(struct ath10k *ar,
|
|
+- struct ath10k_vif *arvif,
|
|
+- struct sk_buff *bcn,
|
|
+- struct wmi_bcn_info *bcn_info)
|
|
++static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct ath10k_fw_stats *stats)
|
|
+ {
|
|
+- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
|
|
+- struct ieee80211_tim_ie *tim;
|
|
+- u8 *ies, *ie;
|
|
+- u8 ie_len, pvm_len;
|
|
++ const struct wmi_stats_event *ev = (void *)skb->data;
|
|
++ u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
|
|
++ int i;
|
|
+
|
|
+- /* if next SWBA has no tim_changed the tim_bitmap is garbage.
|
|
+- * we must copy the bitmap upon change and reuse it later */
|
|
+- if (__le32_to_cpu(bcn_info->tim_info.tim_changed)) {
|
|
+- int i;
|
|
++ if (!skb_pull(skb, sizeof(*ev)))
|
|
++ return -EPROTO;
|
|
+
|
|
+- BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
|
|
+- sizeof(bcn_info->tim_info.tim_bitmap));
|
|
++ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
|
|
++ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
|
|
++ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
|
|
++
|
|
++ for (i = 0; i < num_pdev_stats; i++) {
|
|
++ const struct wmi_10x_pdev_stats *src;
|
|
++ struct ath10k_fw_stats_pdev *dst;
|
|
++
|
|
++ src = (void *)skb->data;
|
|
++ if (!skb_pull(skb, sizeof(*src)))
|
|
++ return -EPROTO;
|
|
+
|
|
+- for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
|
|
+- __le32 t = bcn_info->tim_info.tim_bitmap[i / 4];
|
|
+- u32 v = __le32_to_cpu(t);
|
|
+- arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
|
|
+- }
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
+
|
|
+- /* FW reports either length 0 or 16
|
|
+- * so we calculate this on our own */
|
|
+- arvif->u.ap.tim_len = 0;
|
|
+- for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
|
|
+- if (arvif->u.ap.tim_bitmap[i])
|
|
+- arvif->u.ap.tim_len = i;
|
|
++ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
|
|
+
|
|
+- arvif->u.ap.tim_len++;
|
|
++ list_add_tail(&dst->list, &stats->pdevs);
|
|
+ }
|
|
+
|
|
+- ies = bcn->data;
|
|
+- ies += ieee80211_hdrlen(hdr->frame_control);
|
|
+- ies += 12; /* fixed parameters */
|
|
++ /* fw doesn't implement vdev stats */
|
|
+
|
|
+- ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
|
|
+- (u8 *)skb_tail_pointer(bcn) - ies);
|
|
+- if (!ie) {
|
|
+- if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
|
|
+- ath10k_warn("no tim ie found;\n");
|
|
+- return;
|
|
+- }
|
|
++ for (i = 0; i < num_peer_stats; i++) {
|
|
++ const struct wmi_10x_peer_stats *src;
|
|
++ struct ath10k_fw_stats_peer *dst;
|
|
+
|
|
+- tim = (void *)ie + 2;
|
|
+- ie_len = ie[1];
|
|
+- pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
|
|
++ src = (void *)skb->data;
|
|
++ if (!skb_pull(skb, sizeof(*src)))
|
|
++ return -EPROTO;
|
|
+
|
|
+- if (pvm_len < arvif->u.ap.tim_len) {
|
|
+- int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
|
|
+- int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
|
|
+- void *next_ie = ie + 2 + ie_len;
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
+
|
|
+- if (skb_put(bcn, expand_size)) {
|
|
+- memmove(next_ie + expand_size, next_ie, move_size);
|
|
++ ath10k_wmi_pull_peer_stats(&src->old, dst);
|
|
+
|
|
+- ie[1] += expand_size;
|
|
+- ie_len += expand_size;
|
|
+- pvm_len += expand_size;
|
|
+- } else {
|
|
+- ath10k_warn("tim expansion failed\n");
|
|
+- }
|
|
+- }
|
|
++ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
|
|
+
|
|
+- if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
|
|
+- ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
|
|
+- return;
|
|
++ list_add_tail(&dst->list, &stats->peers);
|
|
+ }
|
|
+
|
|
+- tim->bitmap_ctrl = !!__le32_to_cpu(bcn_info->tim_info.tim_mcast);
|
|
+- memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
|
|
++ return 0;
|
|
++}
|
|
+
|
|
+- if (tim->dtim_count == 0) {
|
|
+- ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
|
|
++static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct ath10k_fw_stats *stats)
|
|
++{
|
|
++ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
|
|
++ u32 num_pdev_stats;
|
|
++ u32 num_pdev_ext_stats;
|
|
++ u32 num_vdev_stats;
|
|
++ u32 num_peer_stats;
|
|
++ int i;
|
|
+
|
|
+- if (__le32_to_cpu(bcn_info->tim_info.tim_mcast) == 1)
|
|
+- ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
|
|
+- }
|
|
++ if (!skb_pull(skb, sizeof(*ev)))
|
|
++ return -EPROTO;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
|
|
+- tim->dtim_count, tim->dtim_period,
|
|
+- tim->bitmap_ctrl, pvm_len);
|
|
++ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
|
|
++ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
|
|
++ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
|
|
++ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
|
|
++
|
|
++ for (i = 0; i < num_pdev_stats; i++) {
|
|
++ const struct wmi_10_2_pdev_stats *src;
|
|
++ struct ath10k_fw_stats_pdev *dst;
|
|
++
|
|
++ src = (void *)skb->data;
|
|
++ if (!skb_pull(skb, sizeof(*src)))
|
|
++ return -EPROTO;
|
|
++
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
++
|
|
++ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
|
|
++ /* FIXME: expose 10.2 specific values */
|
|
++
|
|
++ list_add_tail(&dst->list, &stats->pdevs);
|
|
++ }
|
|
++
|
|
++ for (i = 0; i < num_pdev_ext_stats; i++) {
|
|
++ const struct wmi_10_2_pdev_ext_stats *src;
|
|
++
|
|
++ src = (void *)skb->data;
|
|
++ if (!skb_pull(skb, sizeof(*src)))
|
|
++ return -EPROTO;
|
|
++
|
|
++ /* FIXME: expose values to userspace
|
|
++ *
|
|
++ * Note: Even though this loop seems to do nothing it is
|
|
++ * required to parse following sub-structures properly.
|
|
++ */
|
|
++ }
|
|
++
|
|
++ /* fw doesn't implement vdev stats */
|
|
++
|
|
++ for (i = 0; i < num_peer_stats; i++) {
|
|
++ const struct wmi_10_2_peer_stats *src;
|
|
++ struct ath10k_fw_stats_peer *dst;
|
|
++
|
|
++ src = (void *)skb->data;
|
|
++ if (!skb_pull(skb, sizeof(*src)))
|
|
++ return -EPROTO;
|
|
++
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
++
|
|
++ ath10k_wmi_pull_peer_stats(&src->old, dst);
|
|
++
|
|
++ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
|
|
++ /* FIXME: expose 10.2 specific values */
|
|
++
|
|
++ list_add_tail(&dst->list, &stats->peers);
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct ath10k_fw_stats *stats)
|
|
++{
|
|
++ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
|
|
++ u32 num_pdev_stats;
|
|
++ u32 num_pdev_ext_stats;
|
|
++ u32 num_vdev_stats;
|
|
++ u32 num_peer_stats;
|
|
++ int i;
|
|
++
|
|
++ if (!skb_pull(skb, sizeof(*ev)))
|
|
++ return -EPROTO;
|
|
++
|
|
++ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
|
|
++ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
|
|
++ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
|
|
++ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
|
|
++
|
|
++ for (i = 0; i < num_pdev_stats; i++) {
|
|
++ const struct wmi_10_2_pdev_stats *src;
|
|
++ struct ath10k_fw_stats_pdev *dst;
|
|
++
|
|
++ src = (void *)skb->data;
|
|
++ if (!skb_pull(skb, sizeof(*src)))
|
|
++ return -EPROTO;
|
|
++
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
++
|
|
++ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
|
|
++ /* FIXME: expose 10.2 specific values */
|
|
++
|
|
++ list_add_tail(&dst->list, &stats->pdevs);
|
|
++ }
|
|
++
|
|
++ for (i = 0; i < num_pdev_ext_stats; i++) {
|
|
++ const struct wmi_10_2_pdev_ext_stats *src;
|
|
++
|
|
++ src = (void *)skb->data;
|
|
++ if (!skb_pull(skb, sizeof(*src)))
|
|
++ return -EPROTO;
|
|
++
|
|
++ /* FIXME: expose values to userspace
|
|
++ *
|
|
++ * Note: Even though this loop seems to do nothing it is
|
|
++ * required to parse following sub-structures properly.
|
|
++ */
|
|
++ }
|
|
++
|
|
++ /* fw doesn't implement vdev stats */
|
|
++
|
|
++ for (i = 0; i < num_peer_stats; i++) {
|
|
++ const struct wmi_10_2_4_peer_stats *src;
|
|
++ struct ath10k_fw_stats_peer *dst;
|
|
++
|
|
++ src = (void *)skb->data;
|
|
++ if (!skb_pull(skb, sizeof(*src)))
|
|
++ return -EPROTO;
|
|
++
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
++
|
|
++ ath10k_wmi_pull_peer_stats(&src->common.old, dst);
|
|
++
|
|
++ dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
|
|
++ /* FIXME: expose 10.2 specific values */
|
|
++
|
|
++ list_add_tail(&dst->list, &stats->peers);
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
|
|
++ ath10k_debug_fw_stats_process(ar, skb);
|
|
++}
|
|
++
|
|
++static int
|
|
++ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_vdev_start_ev_arg *arg)
|
|
++{
|
|
++ struct wmi_vdev_start_response_event *ev = (void *)skb->data;
|
|
++
|
|
++ if (skb->len < sizeof(*ev))
|
|
++ return -EPROTO;
|
|
++
|
|
++ skb_pull(skb, sizeof(*ev));
|
|
++ arg->vdev_id = ev->vdev_id;
|
|
++ arg->req_id = ev->req_id;
|
|
++ arg->resp_type = ev->resp_type;
|
|
++ arg->status = ev->status;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ struct wmi_vdev_start_ev_arg arg = {};
|
|
++ int ret;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
|
|
++
|
|
++ ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ if (WARN_ON(__le32_to_cpu(arg.status)))
|
|
++ return;
|
|
++
|
|
++ complete(&ar->vdev_setup_done);
|
|
++}
|
|
++
|
|
++void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
|
|
++ complete(&ar->vdev_setup_done);
|
|
++}
|
|
++
|
|
++static int
|
|
++ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_peer_kick_ev_arg *arg)
|
|
++{
|
|
++ struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
|
|
++
|
|
++ if (skb->len < sizeof(*ev))
|
|
++ return -EPROTO;
|
|
++
|
|
++ skb_pull(skb, sizeof(*ev));
|
|
++ arg->mac_addr = ev->peer_macaddr.addr;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ struct wmi_peer_kick_ev_arg arg = {};
|
|
++ struct ieee80211_sta *sta;
|
|
++ int ret;
|
|
++
|
|
++ ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
|
|
++ ret);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
|
|
++ arg.mac_addr);
|
|
++
|
|
++ rcu_read_lock();
|
|
++
|
|
++ sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
|
|
++ if (!sta) {
|
|
++ ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
|
|
++ arg.mac_addr);
|
|
++ goto exit;
|
|
++ }
|
|
++
|
|
++ ieee80211_report_low_ack(sta, 10);
|
|
++
|
|
++exit:
|
|
++ rcu_read_unlock();
|
|
++}
|
|
++
|
|
++/*
|
|
++ * FIXME
|
|
++ *
|
|
++ * We don't report to mac80211 sleep state of connected
|
|
++ * stations. Due to this mac80211 can't fill in TIM IE
|
|
++ * correctly.
|
|
++ *
|
|
++ * I know of no way of getting nullfunc frames that contain
|
|
++ * sleep transition from connected stations - these do not
|
|
++ * seem to be sent from the target to the host. There also
|
|
++ * doesn't seem to be a dedicated event for that. So the
|
|
++ * only way left to do this would be to read tim_bitmap
|
|
++ * during SWBA.
|
|
++ *
|
|
++ * We could probably try using tim_bitmap from SWBA to tell
|
|
++ * mac80211 which stations are asleep and which are not. The
|
|
++ * problem here is calling mac80211 functions so many times
|
|
++ * could take too long and make us miss the time to submit
|
|
++ * the beacon to the target.
|
|
++ *
|
|
++ * So as a workaround we try to extend the TIM IE if there
|
|
++ * is unicast buffered for stations with aid > 7 and fill it
|
|
++ * in ourselves.
|
|
++ */
|
|
++static void ath10k_wmi_update_tim(struct ath10k *ar,
|
|
++ struct ath10k_vif *arvif,
|
|
++ struct sk_buff *bcn,
|
|
++ const struct wmi_tim_info *tim_info)
|
|
++{
|
|
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
|
|
++ struct ieee80211_tim_ie *tim;
|
|
++ u8 *ies, *ie;
|
|
++ u8 ie_len, pvm_len;
|
|
++ __le32 t;
|
|
++ u32 v;
|
|
++
|
|
++ /* if next SWBA has no tim_changed the tim_bitmap is garbage.
|
|
++ * we must copy the bitmap upon change and reuse it later */
|
|
++ if (__le32_to_cpu(tim_info->tim_changed)) {
|
|
++ int i;
|
|
++
|
|
++ BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
|
|
++ sizeof(tim_info->tim_bitmap));
|
|
++
|
|
++ for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
|
|
++ t = tim_info->tim_bitmap[i / 4];
|
|
++ v = __le32_to_cpu(t);
|
|
++ arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
|
|
++ }
|
|
++
|
|
++ /* FW reports either length 0 or 16
|
|
++ * so we calculate this on our own */
|
|
++ arvif->u.ap.tim_len = 0;
|
|
++ for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
|
|
++ if (arvif->u.ap.tim_bitmap[i])
|
|
++ arvif->u.ap.tim_len = i;
|
|
++
|
|
++ arvif->u.ap.tim_len++;
|
|
++ }
|
|
++
|
|
++ ies = bcn->data;
|
|
++ ies += ieee80211_hdrlen(hdr->frame_control);
|
|
++ ies += 12; /* fixed parameters */
|
|
++
|
|
++ ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
|
|
++ (u8 *)skb_tail_pointer(bcn) - ies);
|
|
++ if (!ie) {
|
|
++ if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
|
|
++ ath10k_warn(ar, "no tim ie found;\n");
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ tim = (void *)ie + 2;
|
|
++ ie_len = ie[1];
|
|
++ pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
|
|
++
|
|
++ if (pvm_len < arvif->u.ap.tim_len) {
|
|
++ int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
|
|
++ int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
|
|
++ void *next_ie = ie + 2 + ie_len;
|
|
++
|
|
++ if (skb_put(bcn, expand_size)) {
|
|
++ memmove(next_ie + expand_size, next_ie, move_size);
|
|
++
|
|
++ ie[1] += expand_size;
|
|
++ ie_len += expand_size;
|
|
++ pvm_len += expand_size;
|
|
++ } else {
|
|
++ ath10k_warn(ar, "tim expansion failed\n");
|
|
++ }
|
|
++ }
|
|
++
|
|
++ if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
|
|
++ ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
|
|
++ memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
|
|
++
|
|
++ if (tim->dtim_count == 0) {
|
|
++ ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
|
|
++
|
|
++ if (__le32_to_cpu(tim_info->tim_mcast) == 1)
|
|
++ ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
|
|
++ }
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
|
|
++ tim->dtim_count, tim->dtim_period,
|
|
++ tim->bitmap_ctrl, pvm_len);
|
|
+ }
|
|
+
|
|
+ static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
|
|
+- struct wmi_p2p_noa_info *noa)
|
|
++ const struct wmi_p2p_noa_info *noa)
|
|
+ {
|
|
+ struct ieee80211_p2p_noa_attr *noa_attr;
|
|
+ u8 ctwindow_oppps = noa->ctwindow_oppps;
|
|
+@@ -1287,14 +2319,13 @@ static void ath10k_p2p_fill_noa_ie(u8 *d
|
|
+ *noa_attr_len = __cpu_to_le16(attr_len);
|
|
+ }
|
|
+
|
|
+-static u32 ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info *noa)
|
|
++static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
|
|
+ {
|
|
+ u32 len = 0;
|
|
+ u8 noa_descriptors = noa->num_descriptors;
|
|
+ u8 opp_ps_info = noa->ctwindow_oppps;
|
|
+ bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
|
|
+
|
|
+-
|
|
+ if (!noa_descriptors && !opps_enabled)
|
|
+ return len;
|
|
+
|
|
+@@ -1308,16 +2339,15 @@ static u32 ath10k_p2p_calc_noa_ie_len(st
|
|
+
|
|
+ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
|
|
+ struct sk_buff *bcn,
|
|
+- struct wmi_bcn_info *bcn_info)
|
|
++ const struct wmi_p2p_noa_info *noa)
|
|
+ {
|
|
+- struct wmi_p2p_noa_info *noa = &bcn_info->p2p_noa_info;
|
|
+ u8 *new_data, *old_data = arvif->u.ap.noa_data;
|
|
+ u32 new_len;
|
|
+
|
|
+ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
|
|
+ return;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
|
|
+ if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
|
|
+ new_len = ath10k_p2p_calc_noa_ie_len(noa);
|
|
+ if (!new_len)
|
|
+@@ -1351,22 +2381,59 @@ cleanup:
|
|
+ kfree(old_data);
|
|
+ }
|
|
+
|
|
++static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_swba_ev_arg *arg)
|
|
++{
|
|
++ struct wmi_host_swba_event *ev = (void *)skb->data;
|
|
++ u32 map;
|
|
++ size_t i;
|
|
++
|
|
++ if (skb->len < sizeof(*ev))
|
|
++ return -EPROTO;
|
|
++
|
|
++ skb_pull(skb, sizeof(*ev));
|
|
++ arg->vdev_map = ev->vdev_map;
|
|
++
|
|
++ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
|
|
++ if (!(map & BIT(0)))
|
|
++ continue;
|
|
++
|
|
++ /* If this happens there were some changes in firmware and
|
|
++ * ath10k should update the max size of tim_info array.
|
|
++ */
|
|
++ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
|
|
++ break;
|
|
++
|
|
++ arg->tim_info[i] = &ev->bcn_info[i].tim_info;
|
|
++ arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
|
|
++ i++;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
+
|
|
+-static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
|
|
++void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- struct wmi_host_swba_event *ev;
|
|
++ struct wmi_swba_ev_arg arg = {};
|
|
+ u32 map;
|
|
+ int i = -1;
|
|
+- struct wmi_bcn_info *bcn_info;
|
|
++ const struct wmi_tim_info *tim_info;
|
|
++ const struct wmi_p2p_noa_info *noa_info;
|
|
+ struct ath10k_vif *arvif;
|
|
+ struct sk_buff *bcn;
|
|
++ dma_addr_t paddr;
|
|
+ int ret, vdev_id = 0;
|
|
+
|
|
+- ev = (struct wmi_host_swba_event *)skb->data;
|
|
+- map = __le32_to_cpu(ev->vdev_map);
|
|
++ ret = ath10k_wmi_pull_swba(ar, skb, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ map = __le32_to_cpu(arg.vdev_map);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
|
|
+- ev->vdev_map);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
|
|
++ map);
|
|
+
|
|
+ for (; map; map >>= 1, vdev_id++) {
|
|
+ if (!(map & 0x1))
|
|
+@@ -1375,27 +2442,29 @@ static void ath10k_wmi_event_host_swba(s
|
|
+ i++;
|
|
+
|
|
+ if (i >= WMI_MAX_AP_VDEV) {
|
|
+- ath10k_warn("swba has corrupted vdev map\n");
|
|
++ ath10k_warn(ar, "swba has corrupted vdev map\n");
|
|
+ break;
|
|
+ }
|
|
+
|
|
+- bcn_info = &ev->bcn_info[i];
|
|
++ tim_info = arg.tim_info[i];
|
|
++ noa_info = arg.noa_info[i];
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_MGMT,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_MGMT,
|
|
+ "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
|
|
+ i,
|
|
+- __le32_to_cpu(bcn_info->tim_info.tim_len),
|
|
+- __le32_to_cpu(bcn_info->tim_info.tim_mcast),
|
|
+- __le32_to_cpu(bcn_info->tim_info.tim_changed),
|
|
+- __le32_to_cpu(bcn_info->tim_info.tim_num_ps_pending),
|
|
+- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[3]),
|
|
+- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[2]),
|
|
+- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[1]),
|
|
+- __le32_to_cpu(bcn_info->tim_info.tim_bitmap[0]));
|
|
++ __le32_to_cpu(tim_info->tim_len),
|
|
++ __le32_to_cpu(tim_info->tim_mcast),
|
|
++ __le32_to_cpu(tim_info->tim_changed),
|
|
++ __le32_to_cpu(tim_info->tim_num_ps_pending),
|
|
++ __le32_to_cpu(tim_info->tim_bitmap[3]),
|
|
++ __le32_to_cpu(tim_info->tim_bitmap[2]),
|
|
++ __le32_to_cpu(tim_info->tim_bitmap[1]),
|
|
++ __le32_to_cpu(tim_info->tim_bitmap[0]));
|
|
+
|
|
+ arvif = ath10k_get_arvif(ar, vdev_id);
|
|
+ if (arvif == NULL) {
|
|
+- ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
|
|
++ ath10k_warn(ar, "no vif for vdev_id %d found\n",
|
|
++ vdev_id);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+@@ -1412,57 +2481,77 @@ static void ath10k_wmi_event_host_swba(s
|
|
+
|
|
+ bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
|
|
+ if (!bcn) {
|
|
+- ath10k_warn("could not get mac80211 beacon\n");
|
|
++ ath10k_warn(ar, "could not get mac80211 beacon\n");
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+- ath10k_tx_h_seq_no(bcn);
|
|
+- ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
|
|
+- ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
|
|
++ ath10k_tx_h_seq_no(arvif->vif, bcn);
|
|
++ ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
|
|
++ ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
|
|
+
|
|
+ spin_lock_bh(&ar->data_lock);
|
|
+
|
|
+ if (arvif->beacon) {
|
|
+- if (!arvif->beacon_sent)
|
|
+- ath10k_warn("SWBA overrun on vdev %d\n",
|
|
++ switch (arvif->beacon_state) {
|
|
++ case ATH10K_BEACON_SENT:
|
|
++ break;
|
|
++ case ATH10K_BEACON_SCHEDULED:
|
|
++ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
|
|
+ arvif->vdev_id);
|
|
++ break;
|
|
++ case ATH10K_BEACON_SENDING:
|
|
++ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
|
|
++ arvif->vdev_id);
|
|
++ dev_kfree_skb(bcn);
|
|
++ goto skip;
|
|
++ }
|
|
+
|
|
+- dma_unmap_single(arvif->ar->dev,
|
|
+- ATH10K_SKB_CB(arvif->beacon)->paddr,
|
|
+- arvif->beacon->len, DMA_TO_DEVICE);
|
|
+- dev_kfree_skb_any(arvif->beacon);
|
|
+- arvif->beacon = NULL;
|
|
++ ath10k_mac_vif_beacon_free(arvif);
|
|
+ }
|
|
+
|
|
+- ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
|
|
+- bcn->data, bcn->len,
|
|
+- DMA_TO_DEVICE);
|
|
+- ret = dma_mapping_error(arvif->ar->dev,
|
|
+- ATH10K_SKB_CB(bcn)->paddr);
|
|
+- if (ret) {
|
|
+- ath10k_warn("failed to map beacon: %d\n", ret);
|
|
+- dev_kfree_skb_any(bcn);
|
|
+- goto skip;
|
|
++ if (!arvif->beacon_buf) {
|
|
++ paddr = dma_map_single(arvif->ar->dev, bcn->data,
|
|
++ bcn->len, DMA_TO_DEVICE);
|
|
++ ret = dma_mapping_error(arvif->ar->dev, paddr);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to map beacon: %d\n",
|
|
++ ret);
|
|
++ dev_kfree_skb_any(bcn);
|
|
++ goto skip;
|
|
++ }
|
|
++
|
|
++ ATH10K_SKB_CB(bcn)->paddr = paddr;
|
|
++ } else {
|
|
++ if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
|
|
++ ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
|
|
++ bcn->len, IEEE80211_MAX_FRAME_LEN);
|
|
++ skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
|
|
++ }
|
|
++ memcpy(arvif->beacon_buf, bcn->data, bcn->len);
|
|
++ ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
|
|
+ }
|
|
+
|
|
+ arvif->beacon = bcn;
|
|
+- arvif->beacon_sent = false;
|
|
++ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
|
|
++
|
|
++ trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
|
|
++ trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
|
|
+
|
|
+- ath10k_wmi_tx_beacon_nowait(arvif);
|
|
+ skip:
|
|
+ spin_unlock_bh(&ar->data_lock);
|
|
+ }
|
|
++
|
|
++ ath10k_wmi_tx_beacons_nowait(ar);
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
|
|
+ }
|
|
+
|
|
+ static void ath10k_dfs_radar_report(struct ath10k *ar,
|
|
+- struct wmi_single_phyerr_rx_event *event,
|
|
+- struct phyerr_radar_report *rr,
|
|
++ const struct wmi_phyerr *phyerr,
|
|
++ const struct phyerr_radar_report *rr,
|
|
+ u64 tsf)
|
|
+ {
|
|
+ u32 reg0, reg1, tsf32l;
|
|
+@@ -1473,20 +2562,20 @@ static void ath10k_dfs_radar_report(stru
|
|
+ reg0 = __le32_to_cpu(rr->reg0);
|
|
+ reg1 = __le32_to_cpu(rr->reg1);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
|
|
+ "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
|
|
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
|
|
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
|
|
+ MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
|
|
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
|
|
+ "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
|
|
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
|
|
+ MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
|
|
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
|
|
+ MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
|
|
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
|
|
+ "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
|
|
+ MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
|
|
+ MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
|
|
+@@ -1495,12 +2584,12 @@ static void ath10k_dfs_radar_report(stru
|
|
+ return;
|
|
+
|
|
+ /* report event to DFS pattern detector */
|
|
+- tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp);
|
|
++ tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
|
|
+ tsf64 = tsf & (~0xFFFFFFFFULL);
|
|
+ tsf64 |= tsf32l;
|
|
+
|
|
+ width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
|
|
+- rssi = event->hdr.rssi_combined;
|
|
++ rssi = phyerr->rssi_combined;
|
|
+
|
|
+ /* hardware store this as 8 bit signed value,
|
|
+ * set to zero if negative number
|
|
+@@ -1513,25 +2602,25 @@ static void ath10k_dfs_radar_report(stru
|
|
+ pe.width = width;
|
|
+ pe.rssi = rssi;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
|
|
+ "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
|
|
+ pe.freq, pe.width, pe.rssi, pe.ts);
|
|
+
|
|
+ ATH10K_DFS_STAT_INC(ar, pulses_detected);
|
|
+
|
|
+ if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
|
|
+ "dfs no pulse pattern detected, yet\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
|
|
+ ATH10K_DFS_STAT_INC(ar, radar_detected);
|
|
+
|
|
+ /* Control radar events reporting in debugfs file
|
|
+ dfs_block_radar_events */
|
|
+ if (ar->dfs_block_radar_events) {
|
|
+- ath10k_info("DFS Radar detected, but ignored as requested\n");
|
|
++ ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+@@ -1539,8 +2628,8 @@ static void ath10k_dfs_radar_report(stru
|
|
+ }
|
|
+
|
|
+ static int ath10k_dfs_fft_report(struct ath10k *ar,
|
|
+- struct wmi_single_phyerr_rx_event *event,
|
|
+- struct phyerr_fft_report *fftr,
|
|
++ const struct wmi_phyerr *phyerr,
|
|
++ const struct phyerr_fft_report *fftr,
|
|
+ u64 tsf)
|
|
+ {
|
|
+ u32 reg0, reg1;
|
|
+@@ -1548,15 +2637,15 @@ static int ath10k_dfs_fft_report(struct
|
|
+
|
|
+ reg0 = __le32_to_cpu(fftr->reg0);
|
|
+ reg1 = __le32_to_cpu(fftr->reg1);
|
|
+- rssi = event->hdr.rssi_combined;
|
|
++ rssi = phyerr->rssi_combined;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
|
|
+ "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
|
|
+ MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
|
|
+ MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
|
|
+ MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
|
|
+ MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
|
|
+ "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
|
|
+ MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
|
|
+ MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
|
|
+@@ -1568,7 +2657,7 @@ static int ath10k_dfs_fft_report(struct
|
|
+ /* false event detection */
|
|
+ if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
|
|
+ peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
|
|
+ ATH10K_DFS_STAT_INC(ar, pulses_discarded);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+@@ -1576,21 +2665,21 @@ static int ath10k_dfs_fft_report(struct
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_dfs(struct ath10k *ar,
|
|
+- struct wmi_single_phyerr_rx_event *event,
|
|
+- u64 tsf)
|
|
++void ath10k_wmi_event_dfs(struct ath10k *ar,
|
|
++ const struct wmi_phyerr *phyerr,
|
|
++ u64 tsf)
|
|
+ {
|
|
+ int buf_len, tlv_len, res, i = 0;
|
|
+- struct phyerr_tlv *tlv;
|
|
+- struct phyerr_radar_report *rr;
|
|
+- struct phyerr_fft_report *fftr;
|
|
+- u8 *tlv_buf;
|
|
++ const struct phyerr_tlv *tlv;
|
|
++ const struct phyerr_radar_report *rr;
|
|
++ const struct phyerr_fft_report *fftr;
|
|
++ const u8 *tlv_buf;
|
|
+
|
|
+- buf_len = __le32_to_cpu(event->hdr.buf_len);
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY,
|
|
++ buf_len = __le32_to_cpu(phyerr->buf_len);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
|
|
+ "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
|
|
+- event->hdr.phy_err_code, event->hdr.rssi_combined,
|
|
+- __le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
|
|
++ phyerr->phy_err_code, phyerr->rssi_combined,
|
|
++ __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
|
|
+
|
|
+ /* Skip event if DFS disabled */
|
|
+ if (!config_enabled(CPTCFG_ATH10K_DFS_CERTIFIED))
|
|
+@@ -1600,36 +2689,38 @@ static void ath10k_wmi_event_dfs(struct
|
|
+
|
|
+ while (i < buf_len) {
|
|
+ if (i + sizeof(*tlv) > buf_len) {
|
|
+- ath10k_warn("too short buf for tlv header (%d)\n", i);
|
|
++ ath10k_warn(ar, "too short buf for tlv header (%d)\n",
|
|
++ i);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+- tlv = (struct phyerr_tlv *)&event->bufp[i];
|
|
++ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
|
|
+ tlv_len = __le16_to_cpu(tlv->len);
|
|
+- tlv_buf = &event->bufp[i + sizeof(*tlv)];
|
|
+- ath10k_dbg(ATH10K_DBG_REGULATORY,
|
|
++ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
|
|
++ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
|
|
+ "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
|
|
+ tlv_len, tlv->tag, tlv->sig);
|
|
+
|
|
+ switch (tlv->tag) {
|
|
+ case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
|
|
+ if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
|
|
+- ath10k_warn("too short radar pulse summary (%d)\n",
|
|
++ ath10k_warn(ar, "too short radar pulse summary (%d)\n",
|
|
+ i);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ rr = (struct phyerr_radar_report *)tlv_buf;
|
|
+- ath10k_dfs_radar_report(ar, event, rr, tsf);
|
|
++ ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
|
|
+ break;
|
|
+ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
|
|
+ if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
|
|
+- ath10k_warn("too short fft report (%d)\n", i);
|
|
++ ath10k_warn(ar, "too short fft report (%d)\n",
|
|
++ i);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ fftr = (struct phyerr_fft_report *)tlv_buf;
|
|
+- res = ath10k_dfs_fft_report(ar, event, fftr, tsf);
|
|
++ res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
|
|
+ if (res)
|
|
+ return;
|
|
+ break;
|
|
+@@ -1639,58 +2730,122 @@ static void ath10k_wmi_event_dfs(struct
|
|
+ }
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
|
|
+- struct wmi_single_phyerr_rx_event *event,
|
|
+- u64 tsf)
|
|
+-{
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi event spectral scan\n");
|
|
+-}
|
|
+-
|
|
+-static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
|
|
++void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
|
|
++ const struct wmi_phyerr *phyerr,
|
|
++ u64 tsf)
|
|
+ {
|
|
+- struct wmi_comb_phyerr_rx_event *comb_event;
|
|
+- struct wmi_single_phyerr_rx_event *event;
|
|
+- u32 count, i, buf_len, phy_err_code;
|
|
+- u64 tsf;
|
|
+- int left_len = skb->len;
|
|
+-
|
|
+- ATH10K_DFS_STAT_INC(ar, phy_errors);
|
|
+-
|
|
+- /* Check if combined event available */
|
|
+- if (left_len < sizeof(*comb_event)) {
|
|
+- ath10k_warn("wmi phyerr combined event wrong len\n");
|
|
+- return;
|
|
+- }
|
|
+-
|
|
+- left_len -= sizeof(*comb_event);
|
|
++ int buf_len, tlv_len, res, i = 0;
|
|
++ struct phyerr_tlv *tlv;
|
|
++ const void *tlv_buf;
|
|
++ const struct phyerr_fft_report *fftr;
|
|
++ size_t fftr_len;
|
|
+
|
|
+- /* Check number of included events */
|
|
+- comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data;
|
|
+- count = __le32_to_cpu(comb_event->hdr.num_phyerr_events);
|
|
++ buf_len = __le32_to_cpu(phyerr->buf_len);
|
|
+
|
|
+- tsf = __le32_to_cpu(comb_event->hdr.tsf_u32);
|
|
+- tsf <<= 32;
|
|
+- tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
|
|
++ while (i < buf_len) {
|
|
++ if (i + sizeof(*tlv) > buf_len) {
|
|
++ ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
|
|
++ i);
|
|
++ return;
|
|
++ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
+- "wmi event phyerr count %d tsf64 0x%llX\n",
|
|
+- count, tsf);
|
|
++ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
|
|
++ tlv_len = __le16_to_cpu(tlv->len);
|
|
++ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
|
|
+
|
|
+- event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp;
|
|
+- for (i = 0; i < count; i++) {
|
|
+- /* Check if we can read event header */
|
|
+- if (left_len < sizeof(*event)) {
|
|
+- ath10k_warn("single event (%d) wrong head len\n", i);
|
|
++ if (i + sizeof(*tlv) + tlv_len > buf_len) {
|
|
++ ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
|
|
++ i);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+- left_len -= sizeof(*event);
|
|
+-
|
|
+- buf_len = __le32_to_cpu(event->hdr.buf_len);
|
|
+- phy_err_code = event->hdr.phy_err_code;
|
|
++ switch (tlv->tag) {
|
|
++ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
|
|
++ if (sizeof(*fftr) > tlv_len) {
|
|
++ ath10k_warn(ar, "failed to parse fft report at byte %d\n",
|
|
++ i);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ fftr_len = tlv_len - sizeof(*fftr);
|
|
++ fftr = tlv_buf;
|
|
++ res = ath10k_spectral_process_fft(ar, phyerr,
|
|
++ fftr, fftr_len,
|
|
++ tsf);
|
|
++ if (res < 0) {
|
|
++ ath10k_warn(ar, "failed to process fft report: %d\n",
|
|
++ res);
|
|
++ return;
|
|
++ }
|
|
++ break;
|
|
++ }
|
|
++
|
|
++ i += sizeof(*tlv) + tlv_len;
|
|
++ }
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_phyerr_ev_arg *arg)
|
|
++{
|
|
++ struct wmi_phyerr_event *ev = (void *)skb->data;
|
|
++
|
|
++ if (skb->len < sizeof(*ev))
|
|
++ return -EPROTO;
|
|
++
|
|
++ arg->num_phyerrs = ev->num_phyerrs;
|
|
++ arg->tsf_l32 = ev->tsf_l32;
|
|
++ arg->tsf_u32 = ev->tsf_u32;
|
|
++ arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev));
|
|
++ arg->phyerrs = ev->phyerrs;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ struct wmi_phyerr_ev_arg arg = {};
|
|
++ const struct wmi_phyerr *phyerr;
|
|
++ u32 count, i, buf_len, phy_err_code;
|
|
++ u64 tsf;
|
|
++ int left_len, ret;
|
|
++
|
|
++ ATH10K_DFS_STAT_INC(ar, phy_errors);
|
|
++
|
|
++ ret = ath10k_wmi_pull_phyerr(ar, skb, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ left_len = __le32_to_cpu(arg.buf_len);
|
|
++
|
|
++ /* Check number of included events */
|
|
++ count = __le32_to_cpu(arg.num_phyerrs);
|
|
++
|
|
++ tsf = __le32_to_cpu(arg.tsf_u32);
|
|
++ tsf <<= 32;
|
|
++ tsf |= __le32_to_cpu(arg.tsf_l32);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi event phyerr count %d tsf64 0x%llX\n",
|
|
++ count, tsf);
|
|
++
|
|
++ phyerr = arg.phyerrs;
|
|
++ for (i = 0; i < count; i++) {
|
|
++ /* Check if we can read event header */
|
|
++ if (left_len < sizeof(*phyerr)) {
|
|
++ ath10k_warn(ar, "single event (%d) wrong head len\n",
|
|
++ i);
|
|
++ return;
|
|
++ }
|
|
++
|
|
++ left_len -= sizeof(*phyerr);
|
|
++
|
|
++ buf_len = __le32_to_cpu(phyerr->buf_len);
|
|
++ phy_err_code = phyerr->phy_err_code;
|
|
+
|
|
+ if (left_len < buf_len) {
|
|
+- ath10k_warn("single event (%d) wrong buf len\n", i);
|
|
++ ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+@@ -1698,36 +2853,34 @@ static void ath10k_wmi_event_phyerr(stru
|
|
+
|
|
+ switch (phy_err_code) {
|
|
+ case PHY_ERROR_RADAR:
|
|
+- ath10k_wmi_event_dfs(ar, event, tsf);
|
|
++ ath10k_wmi_event_dfs(ar, phyerr, tsf);
|
|
+ break;
|
|
+ case PHY_ERROR_SPECTRAL_SCAN:
|
|
+- ath10k_wmi_event_spectral_scan(ar, event, tsf);
|
|
++ ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
|
|
+ break;
|
|
+ case PHY_ERROR_FALSE_RADAR_EXT:
|
|
+- ath10k_wmi_event_dfs(ar, event, tsf);
|
|
+- ath10k_wmi_event_spectral_scan(ar, event, tsf);
|
|
++ ath10k_wmi_event_dfs(ar, phyerr, tsf);
|
|
++ ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+- event += sizeof(*event) + buf_len;
|
|
++ phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
|
|
+ }
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
|
|
++void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_profile_match(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_debug_print(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+ char buf[101], c;
|
|
+ int i;
|
|
+@@ -1748,7 +2901,7 @@ static void ath10k_wmi_event_debug_print
|
|
+ }
|
|
+
|
|
+ if (i == sizeof(buf) - 1)
|
|
+- ath10k_warn("wmi debug print truncated: %d\n", skb->len);
|
|
++ ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
|
|
+
|
|
+ /* for some reason the debug prints end with \n, remove that */
|
|
+ if (skb->data[i - 1] == '\n')
|
|
+@@ -1757,112 +2910,99 @@ static void ath10k_wmi_event_debug_print
|
|
+ /* the last byte is always reserved for the null character */
|
|
+ buf[i] = '\0';
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
|
|
++void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
|
|
++void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
|
|
+ struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
|
|
++void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
|
|
+ struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
|
|
++void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
|
|
+ struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
|
|
+ }
|
|
+
|
|
+ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
|
|
+- u32 num_units, u32 unit_len)
|
|
++ u32 num_units, u32 unit_len)
|
|
+ {
|
|
+ dma_addr_t paddr;
|
|
+ u32 pool_size;
|
|
+@@ -1878,7 +3018,7 @@ static int ath10k_wmi_alloc_host_mem(str
|
|
+ &paddr,
|
|
+ GFP_ATOMIC);
|
|
+ if (!ar->wmi.mem_chunks[idx].vaddr) {
|
|
+- ath10k_warn("failed to allocate memory chunk\n");
|
|
++ ath10k_warn(ar, "failed to allocate memory chunk\n");
|
|
+ return -ENOMEM;
|
|
+ }
|
|
+
|
|
+@@ -1892,45 +3032,124 @@ static int ath10k_wmi_alloc_host_mem(str
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
++static int
|
|
++ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_svc_rdy_ev_arg *arg)
|
|
++{
|
|
++ struct wmi_service_ready_event *ev;
|
|
++ size_t i, n;
|
|
++
|
|
++ if (skb->len < sizeof(*ev))
|
|
++ return -EPROTO;
|
|
++
|
|
++ ev = (void *)skb->data;
|
|
++ skb_pull(skb, sizeof(*ev));
|
|
++ arg->min_tx_power = ev->hw_min_tx_power;
|
|
++ arg->max_tx_power = ev->hw_max_tx_power;
|
|
++ arg->ht_cap = ev->ht_cap_info;
|
|
++ arg->vht_cap = ev->vht_cap_info;
|
|
++ arg->sw_ver0 = ev->sw_version;
|
|
++ arg->sw_ver1 = ev->sw_version_1;
|
|
++ arg->phy_capab = ev->phy_capability;
|
|
++ arg->num_rf_chains = ev->num_rf_chains;
|
|
++ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
|
|
++ arg->num_mem_reqs = ev->num_mem_reqs;
|
|
++ arg->service_map = ev->wmi_service_bitmap;
|
|
++ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
|
|
++
|
|
++ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
|
|
++ ARRAY_SIZE(arg->mem_reqs));
|
|
++ for (i = 0; i < n; i++)
|
|
++ arg->mem_reqs[i] = &ev->mem_reqs[i];
|
|
++
|
|
++ if (skb->len <
|
|
++ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
|
|
++ return -EPROTO;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int
|
|
++ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_svc_rdy_ev_arg *arg)
|
|
++{
|
|
++ struct wmi_10x_service_ready_event *ev;
|
|
++ int i, n;
|
|
++
|
|
++ if (skb->len < sizeof(*ev))
|
|
++ return -EPROTO;
|
|
++
|
|
++ ev = (void *)skb->data;
|
|
++ skb_pull(skb, sizeof(*ev));
|
|
++ arg->min_tx_power = ev->hw_min_tx_power;
|
|
++ arg->max_tx_power = ev->hw_max_tx_power;
|
|
++ arg->ht_cap = ev->ht_cap_info;
|
|
++ arg->vht_cap = ev->vht_cap_info;
|
|
++ arg->sw_ver0 = ev->sw_version;
|
|
++ arg->phy_capab = ev->phy_capability;
|
|
++ arg->num_rf_chains = ev->num_rf_chains;
|
|
++ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
|
|
++ arg->num_mem_reqs = ev->num_mem_reqs;
|
|
++ arg->service_map = ev->wmi_service_bitmap;
|
|
++ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
|
|
++
|
|
++ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
|
|
++ ARRAY_SIZE(arg->mem_reqs));
|
|
++ for (i = 0; i < n; i++)
|
|
++ arg->mem_reqs[i] = &ev->mem_reqs[i];
|
|
++
|
|
++ if (skb->len <
|
|
++ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
|
|
++ return -EPROTO;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- struct wmi_service_ready_event *ev = (void *)skb->data;
|
|
++ struct wmi_svc_rdy_ev_arg arg = {};
|
|
++ u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
|
|
++ int ret;
|
|
+
|
|
+- if (skb->len < sizeof(*ev)) {
|
|
+- ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
|
|
+- skb->len, sizeof(*ev));
|
|
++ ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+- ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
|
|
+- ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
|
|
+- ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
|
|
+- ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
|
|
++ memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
|
|
++ ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
|
|
++ arg.service_map_len);
|
|
++
|
|
++ ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
|
|
++ ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
|
|
++ ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
|
|
++ ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
|
|
+ ar->fw_version_major =
|
|
+- (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
|
|
+- ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
|
|
++ (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
|
|
++ ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
|
|
+ ar->fw_version_release =
|
|
+- (__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
|
|
+- ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
|
|
+- ar->phy_capability = __le32_to_cpu(ev->phy_capability);
|
|
+- ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
|
|
++ (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
|
|
++ ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
|
|
++ ar->phy_capability = __le32_to_cpu(arg.phy_capab);
|
|
++ ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
|
|
++ ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
|
|
++
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
|
|
++ arg.service_map, arg.service_map_len);
|
|
+
|
|
+ /* only manually set fw features when not using FW IE format */
|
|
+ if (ar->fw_api == 1 && ar->fw_version_build > 636)
|
|
+ set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
|
|
+
|
|
+ if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
|
|
+- ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
|
|
++ ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
|
|
+ ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
|
|
+ ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
|
|
+ }
|
|
+
|
|
+- ar->ath_common.regulatory.current_rd =
|
|
+- __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
|
|
+-
|
|
+- ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
|
|
+- sizeof(ev->wmi_service_bitmap));
|
|
++ ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
|
|
++ ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
|
|
+
|
|
+ if (strlen(ar->hw->wiphy->fw_version) == 0) {
|
|
+ snprintf(ar->hw->wiphy->fw_version,
|
|
+@@ -1942,90 +3161,18 @@ static void ath10k_wmi_service_ready_eve
|
|
+ ar->fw_version_build);
|
|
+ }
|
|
+
|
|
+- /* FIXME: it probably should be better to support this */
|
|
+- if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
|
|
+- ath10k_warn("target requested %d memory chunks; ignoring\n",
|
|
+- __le32_to_cpu(ev->num_mem_reqs));
|
|
+- }
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
+- "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
|
|
+- __le32_to_cpu(ev->sw_version),
|
|
+- __le32_to_cpu(ev->sw_version_1),
|
|
+- __le32_to_cpu(ev->abi_version),
|
|
+- __le32_to_cpu(ev->phy_capability),
|
|
+- __le32_to_cpu(ev->ht_cap_info),
|
|
+- __le32_to_cpu(ev->vht_cap_info),
|
|
+- __le32_to_cpu(ev->vht_supp_mcs),
|
|
+- __le32_to_cpu(ev->sys_cap_info),
|
|
+- __le32_to_cpu(ev->num_mem_reqs),
|
|
+- __le32_to_cpu(ev->num_rf_chains));
|
|
+-
|
|
+- complete(&ar->wmi.service_ready);
|
|
+-}
|
|
+-
|
|
+-static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
|
|
+- struct sk_buff *skb)
|
|
+-{
|
|
+- u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
|
|
+- int ret;
|
|
+- struct wmi_service_ready_event_10x *ev = (void *)skb->data;
|
|
+-
|
|
+- if (skb->len < sizeof(*ev)) {
|
|
+- ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
|
|
+- skb->len, sizeof(*ev));
|
|
+- return;
|
|
+- }
|
|
+-
|
|
+- ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
|
|
+- ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
|
|
+- ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
|
|
+- ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
|
|
+- ar->fw_version_major =
|
|
+- (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
|
|
+- ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
|
|
+- ar->phy_capability = __le32_to_cpu(ev->phy_capability);
|
|
+- ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
|
|
+-
|
|
+- if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
|
|
+- ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
|
|
+- ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
|
|
+- ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
|
|
+- }
|
|
+-
|
|
+- ar->ath_common.regulatory.current_rd =
|
|
+- __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
|
|
+-
|
|
+- ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
|
|
+- sizeof(ev->wmi_service_bitmap));
|
|
+-
|
|
+- if (strlen(ar->hw->wiphy->fw_version) == 0) {
|
|
+- snprintf(ar->hw->wiphy->fw_version,
|
|
+- sizeof(ar->hw->wiphy->fw_version),
|
|
+- "%u.%u",
|
|
+- ar->fw_version_major,
|
|
+- ar->fw_version_minor);
|
|
+- }
|
|
+-
|
|
+- num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
|
|
+-
|
|
+- if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
|
|
+- ath10k_warn("requested memory chunks number (%d) exceeds the limit\n",
|
|
++ num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
|
|
++ if (num_mem_reqs > WMI_MAX_MEM_REQS) {
|
|
++ ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
|
|
+ num_mem_reqs);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+- if (!num_mem_reqs)
|
|
+- goto exit;
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
|
|
+- num_mem_reqs);
|
|
+-
|
|
+ for (i = 0; i < num_mem_reqs; ++i) {
|
|
+- req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
|
|
+- num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
|
|
+- unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
|
|
+- num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
|
|
++ req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
|
|
++ num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
|
|
++ unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
|
|
++ num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
|
|
+
|
|
+ if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
|
|
+ /* number of units to allocate is number of
|
|
+@@ -2036,10 +3183,10 @@ static void ath10k_wmi_10x_service_ready
|
|
+ else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
|
|
+ num_units = TARGET_10X_NUM_VDEVS + 1;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
|
|
+ req_id,
|
|
+- __le32_to_cpu(ev->mem_reqs[i].num_units),
|
|
++ __le32_to_cpu(arg.mem_reqs[i]->num_units),
|
|
+ num_unit_info,
|
|
+ unit_size,
|
|
+ num_units);
|
|
+@@ -2050,47 +3197,79 @@ static void ath10k_wmi_10x_service_ready
|
|
+ return;
|
|
+ }
|
|
+
|
|
+-exit:
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
+- "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
|
|
+- __le32_to_cpu(ev->sw_version),
|
|
+- __le32_to_cpu(ev->abi_version),
|
|
+- __le32_to_cpu(ev->phy_capability),
|
|
+- __le32_to_cpu(ev->ht_cap_info),
|
|
+- __le32_to_cpu(ev->vht_cap_info),
|
|
+- __le32_to_cpu(ev->vht_supp_mcs),
|
|
+- __le32_to_cpu(ev->sys_cap_info),
|
|
+- __le32_to_cpu(ev->num_mem_reqs),
|
|
+- __le32_to_cpu(ev->num_rf_chains));
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
|
|
++ __le32_to_cpu(arg.min_tx_power),
|
|
++ __le32_to_cpu(arg.max_tx_power),
|
|
++ __le32_to_cpu(arg.ht_cap),
|
|
++ __le32_to_cpu(arg.vht_cap),
|
|
++ __le32_to_cpu(arg.sw_ver0),
|
|
++ __le32_to_cpu(arg.sw_ver1),
|
|
++ __le32_to_cpu(arg.fw_build),
|
|
++ __le32_to_cpu(arg.phy_capab),
|
|
++ __le32_to_cpu(arg.num_rf_chains),
|
|
++ __le32_to_cpu(arg.eeprom_rd),
|
|
++ __le32_to_cpu(arg.num_mem_reqs));
|
|
+
|
|
+ complete(&ar->wmi.service_ready);
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
++static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_rdy_ev_arg *arg)
|
|
+ {
|
|
+- struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
|
|
++ struct wmi_ready_event *ev = (void *)skb->data;
|
|
+
|
|
+- if (WARN_ON(skb->len < sizeof(*ev)))
|
|
+- return -EINVAL;
|
|
++ if (skb->len < sizeof(*ev))
|
|
++ return -EPROTO;
|
|
++
|
|
++ skb_pull(skb, sizeof(*ev));
|
|
++ arg->sw_version = ev->sw_version;
|
|
++ arg->abi_version = ev->abi_version;
|
|
++ arg->status = ev->status;
|
|
++ arg->mac_addr = ev->mac_addr.addr;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ struct wmi_rdy_ev_arg arg = {};
|
|
++ int ret;
|
|
+
|
|
+- memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
|
|
++ ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
+- "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
|
|
+- __le32_to_cpu(ev->sw_version),
|
|
+- __le32_to_cpu(ev->abi_version),
|
|
+- ev->mac_addr.addr,
|
|
+- __le32_to_cpu(ev->status), skb->len, sizeof(*ev));
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
|
|
++ __le32_to_cpu(arg.sw_version),
|
|
++ __le32_to_cpu(arg.abi_version),
|
|
++ arg.mac_addr,
|
|
++ __le32_to_cpu(arg.status));
|
|
+
|
|
++ ether_addr_copy(ar->mac_addr, arg.mac_addr);
|
|
+ complete(&ar->wmi.unified_ready);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
++static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ const struct wmi_pdev_temperature_event *ev;
|
|
++
|
|
++ ev = (struct wmi_pdev_temperature_event *)skb->data;
|
|
++ if (WARN_ON(skb->len < sizeof(*ev)))
|
|
++ return -EPROTO;
|
|
++
|
|
++ ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+ struct wmi_cmd_hdr *cmd_hdr;
|
|
+ enum wmi_event_id id;
|
|
+- u16 len;
|
|
+
|
|
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
|
|
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
|
|
+@@ -2098,9 +3277,7 @@ static void ath10k_wmi_main_process_rx(s
|
|
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
|
|
+ return;
|
|
+
|
|
+- len = skb->len;
|
|
+-
|
|
+- trace_ath10k_wmi_event(id, skb->data, skb->len);
|
|
++ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
|
|
+
|
|
+ switch (id) {
|
|
+ case WMI_MGMT_RX_EVENTID:
|
|
+@@ -2192,24 +3369,24 @@ static void ath10k_wmi_main_process_rx(s
|
|
+ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
|
|
+ break;
|
|
+ case WMI_SERVICE_READY_EVENTID:
|
|
+- ath10k_wmi_service_ready_event_rx(ar, skb);
|
|
++ ath10k_wmi_event_service_ready(ar, skb);
|
|
+ break;
|
|
+ case WMI_READY_EVENTID:
|
|
+- ath10k_wmi_ready_event_rx(ar, skb);
|
|
++ ath10k_wmi_event_ready(ar, skb);
|
|
+ break;
|
|
+ default:
|
|
+- ath10k_warn("Unknown eventid: %d\n", id);
|
|
++ ath10k_warn(ar, "Unknown eventid: %d\n", id);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ dev_kfree_skb(skb);
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
++static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+ struct wmi_cmd_hdr *cmd_hdr;
|
|
+ enum wmi_10x_event_id id;
|
|
+- u16 len;
|
|
++ bool consumed;
|
|
+
|
|
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
|
|
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
|
|
+@@ -2217,9 +3394,19 @@ static void ath10k_wmi_10x_process_rx(st
|
|
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
|
|
+ return;
|
|
+
|
|
+- len = skb->len;
|
|
++ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
|
|
++
|
|
++ consumed = ath10k_tm_event_wmi(ar, id, skb);
|
|
+
|
|
+- trace_ath10k_wmi_event(id, skb->data, skb->len);
|
|
++ /* Ready event must be handled normally also in UTF mode so that we
|
|
++ * know the UTF firmware has booted, others we are just bypass WMI
|
|
++ * events to testmode.
|
|
++ */
|
|
++ if (consumed && id != WMI_10X_READY_EVENTID) {
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi testmode consumed 0x%x\n", id);
|
|
++ goto out;
|
|
++ }
|
|
+
|
|
+ switch (id) {
|
|
+ case WMI_10X_MGMT_RX_EVENTID:
|
|
+@@ -2302,64 +3489,153 @@ static void ath10k_wmi_10x_process_rx(st
|
|
+ ath10k_wmi_event_vdev_resume_req(ar, skb);
|
|
+ break;
|
|
+ case WMI_10X_SERVICE_READY_EVENTID:
|
|
+- ath10k_wmi_10x_service_ready_event_rx(ar, skb);
|
|
++ ath10k_wmi_event_service_ready(ar, skb);
|
|
+ break;
|
|
+ case WMI_10X_READY_EVENTID:
|
|
+- ath10k_wmi_ready_event_rx(ar, skb);
|
|
++ ath10k_wmi_event_ready(ar, skb);
|
|
++ break;
|
|
++ case WMI_10X_PDEV_UTF_EVENTID:
|
|
++ /* ignore utf events */
|
|
+ break;
|
|
+ default:
|
|
+- ath10k_warn("Unknown eventid: %d\n", id);
|
|
++ ath10k_warn(ar, "Unknown eventid: %d\n", id);
|
|
+ break;
|
|
+ }
|
|
+
|
|
++out:
|
|
+ dev_kfree_skb(skb);
|
|
+ }
|
|
+
|
|
+-
|
|
+-static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
++static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
|
|
+- ath10k_wmi_10x_process_rx(ar, skb);
|
|
+- else
|
|
+- ath10k_wmi_main_process_rx(ar, skb);
|
|
+-}
|
|
++ struct wmi_cmd_hdr *cmd_hdr;
|
|
++ enum wmi_10_2_event_id id;
|
|
+
|
|
+-/* WMI Initialization functions */
|
|
+-int ath10k_wmi_attach(struct ath10k *ar)
|
|
+-{
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
|
|
+- ar->wmi.cmd = &wmi_10x_cmd_map;
|
|
+- ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
|
|
+- ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
|
|
+- } else {
|
|
+- ar->wmi.cmd = &wmi_cmd_map;
|
|
+- ar->wmi.vdev_param = &wmi_vdev_param_map;
|
|
+- ar->wmi.pdev_param = &wmi_pdev_param_map;
|
|
+- }
|
|
++ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
|
|
++ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
|
|
+
|
|
+- init_completion(&ar->wmi.service_ready);
|
|
+- init_completion(&ar->wmi.unified_ready);
|
|
+- init_waitqueue_head(&ar->wmi.tx_credits_wq);
|
|
++ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
|
|
++ return;
|
|
+
|
|
+- return 0;
|
|
++ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
|
|
++
|
|
++ switch (id) {
|
|
++ case WMI_10_2_MGMT_RX_EVENTID:
|
|
++ ath10k_wmi_event_mgmt_rx(ar, skb);
|
|
++ /* mgmt_rx() owns the skb now! */
|
|
++ return;
|
|
++ case WMI_10_2_SCAN_EVENTID:
|
|
++ ath10k_wmi_event_scan(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_CHAN_INFO_EVENTID:
|
|
++ ath10k_wmi_event_chan_info(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_ECHO_EVENTID:
|
|
++ ath10k_wmi_event_echo(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_DEBUG_MESG_EVENTID:
|
|
++ ath10k_wmi_event_debug_mesg(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_UPDATE_STATS_EVENTID:
|
|
++ ath10k_wmi_event_update_stats(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_VDEV_START_RESP_EVENTID:
|
|
++ ath10k_wmi_event_vdev_start_resp(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_VDEV_STOPPED_EVENTID:
|
|
++ ath10k_wmi_event_vdev_stopped(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
|
|
++ ath10k_wmi_event_peer_sta_kickout(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_HOST_SWBA_EVENTID:
|
|
++ ath10k_wmi_event_host_swba(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
|
|
++ ath10k_wmi_event_tbttoffset_update(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_PHYERR_EVENTID:
|
|
++ ath10k_wmi_event_phyerr(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_ROAM_EVENTID:
|
|
++ ath10k_wmi_event_roam(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_PROFILE_MATCH:
|
|
++ ath10k_wmi_event_profile_match(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_DEBUG_PRINT_EVENTID:
|
|
++ ath10k_wmi_event_debug_print(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_PDEV_QVIT_EVENTID:
|
|
++ ath10k_wmi_event_pdev_qvit(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
|
|
++ ath10k_wmi_event_wlan_profile_data(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
|
|
++ ath10k_wmi_event_rtt_measurement_report(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
|
|
++ ath10k_wmi_event_tsf_measurement_report(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
|
|
++ ath10k_wmi_event_rtt_error_report(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
|
|
++ ath10k_wmi_event_wow_wakeup_host(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_DCS_INTERFERENCE_EVENTID:
|
|
++ ath10k_wmi_event_dcs_interference(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
|
|
++ ath10k_wmi_event_pdev_tpc_config(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_INST_RSSI_STATS_EVENTID:
|
|
++ ath10k_wmi_event_inst_rssi_stats(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
|
|
++ ath10k_wmi_event_vdev_standby_req(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
|
|
++ ath10k_wmi_event_vdev_resume_req(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_SERVICE_READY_EVENTID:
|
|
++ ath10k_wmi_event_service_ready(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_READY_EVENTID:
|
|
++ ath10k_wmi_event_ready(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
|
|
++ ath10k_wmi_event_temperature(ar, skb);
|
|
++ break;
|
|
++ case WMI_10_2_RTT_KEEPALIVE_EVENTID:
|
|
++ case WMI_10_2_GPIO_INPUT_EVENTID:
|
|
++ case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
|
|
++ case WMI_10_2_GENERIC_BUFFER_EVENTID:
|
|
++ case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
|
|
++ case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
|
|
++ case WMI_10_2_WDS_PEER_EVENTID:
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "received event id %d not implemented\n", id);
|
|
++ break;
|
|
++ default:
|
|
++ ath10k_warn(ar, "Unknown eventid: %d\n", id);
|
|
++ break;
|
|
++ }
|
|
++
|
|
++ dev_kfree_skb(skb);
|
|
+ }
|
|
+
|
|
+-void ath10k_wmi_detach(struct ath10k *ar)
|
|
++static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
+ {
|
|
+- int i;
|
|
+-
|
|
+- /* free the host memory chunks requested by firmware */
|
|
+- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
|
|
+- dma_free_coherent(ar->dev,
|
|
+- ar->wmi.mem_chunks[i].len,
|
|
+- ar->wmi.mem_chunks[i].vaddr,
|
|
+- ar->wmi.mem_chunks[i].paddr);
|
|
+- }
|
|
++ int ret;
|
|
+
|
|
+- ar->wmi.num_mem_chunks = 0;
|
|
++ ret = ath10k_wmi_rx(ar, skb);
|
|
++ if (ret)
|
|
++ ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_connect_htc_service(struct ath10k *ar)
|
|
++int ath10k_wmi_connect(struct ath10k *ar)
|
|
+ {
|
|
+ int status;
|
|
+ struct ath10k_htc_svc_conn_req conn_req;
|
|
+@@ -2378,7 +3654,7 @@ int ath10k_wmi_connect_htc_service(struc
|
|
+
|
|
+ status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
|
|
+ if (status) {
|
|
+- ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
|
|
++ ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
|
|
+ status);
|
|
+ return status;
|
|
+ }
|
|
+@@ -2387,16 +3663,17 @@ int ath10k_wmi_connect_htc_service(struc
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k *ar, u16 rd,
|
|
+- u16 rd2g, u16 rd5g, u16 ctl2g,
|
|
+- u16 ctl5g)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
|
|
++ u16 ctl2g, u16 ctl5g,
|
|
++ enum wmi_dfs_region dfs_reg)
|
|
+ {
|
|
+ struct wmi_pdev_set_regdomain_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
|
|
+ cmd->reg_domain = __cpu_to_le32(rd);
|
|
+@@ -2405,25 +3682,23 @@ static int ath10k_wmi_main_pdev_set_regd
|
|
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
|
|
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
|
|
+ rd, rd2g, rd5g, ctl2g, ctl5g);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb,
|
|
+- ar->wmi.cmd->pdev_set_regdomain_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k *ar, u16 rd,
|
|
+- u16 rd2g, u16 rd5g,
|
|
+- u16 ctl2g, u16 ctl5g,
|
|
+- enum wmi_dfs_region dfs_reg)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
|
|
++ rd5g, u16 ctl2g, u16 ctl5g,
|
|
++ enum wmi_dfs_region dfs_reg)
|
|
+ {
|
|
+ struct wmi_pdev_set_regdomain_cmd_10x *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
|
|
+ cmd->reg_domain = __cpu_to_le32(rd);
|
|
+@@ -2433,121 +3708,96 @@ static int ath10k_wmi_10x_pdev_set_regdo
|
|
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
|
|
+ cmd->dfs_domain = __cpu_to_le32(dfs_reg);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
|
|
+ rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb,
|
|
+- ar->wmi.cmd->pdev_set_regdomain_cmdid);
|
|
+-}
|
|
+-
|
|
+-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
|
|
+- u16 rd5g, u16 ctl2g, u16 ctl5g,
|
|
+- enum wmi_dfs_region dfs_reg)
|
|
+-{
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
|
|
+- return ath10k_wmi_10x_pdev_set_regdomain(ar, rd, rd2g, rd5g,
|
|
+- ctl2g, ctl5g, dfs_reg);
|
|
+- else
|
|
+- return ath10k_wmi_main_pdev_set_regdomain(ar, rd, rd2g, rd5g,
|
|
+- ctl2g, ctl5g);
|
|
+-}
|
|
+-
|
|
+-int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
|
|
+- const struct wmi_channel_arg *arg)
|
|
+-{
|
|
+- struct wmi_set_channel_cmd *cmd;
|
|
+- struct sk_buff *skb;
|
|
+- u32 ch_flags = 0;
|
|
+-
|
|
+- if (arg->passive)
|
|
+- return -EINVAL;
|
|
+-
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
+- if (!skb)
|
|
+- return -ENOMEM;
|
|
+-
|
|
+- if (arg->chan_radar)
|
|
+- ch_flags |= WMI_CHAN_FLAG_DFS;
|
|
+-
|
|
+- cmd = (struct wmi_set_channel_cmd *)skb->data;
|
|
+- cmd->chan.mhz = __cpu_to_le32(arg->freq);
|
|
+- cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
|
|
+- cmd->chan.mode = arg->mode;
|
|
+- cmd->chan.flags |= __cpu_to_le32(ch_flags);
|
|
+- cmd->chan.min_power = arg->min_power;
|
|
+- cmd->chan.max_power = arg->max_power;
|
|
+- cmd->chan.reg_power = arg->max_reg_power;
|
|
+- cmd->chan.reg_classid = arg->reg_class_id;
|
|
+- cmd->chan.antenna_max = arg->max_antenna_gain;
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
+- "wmi set channel mode %d freq %d\n",
|
|
+- arg->mode, arg->freq);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb,
|
|
+- ar->wmi.cmd->pdev_set_channel_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
|
|
+ {
|
|
+ struct wmi_pdev_suspend_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
|
|
+ cmd->suspend_opt = __cpu_to_le32(suspend_opt);
|
|
+
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
|
|
+ {
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(0);
|
|
+- if (skb == NULL)
|
|
+- return -ENOMEM;
|
|
++ skb = ath10k_wmi_alloc_skb(ar, 0);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
|
|
+ {
|
|
+ struct wmi_pdev_set_param_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+ if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
|
|
+- ath10k_warn("pdev param %d not supported by firmware\n", id);
|
|
+- return -EOPNOTSUPP;
|
|
++ ath10k_warn(ar, "pdev param %d not supported by firmware\n",
|
|
++ id);
|
|
++ return ERR_PTR(-EOPNOTSUPP);
|
|
+ }
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
|
|
+ cmd->param_id = __cpu_to_le32(id);
|
|
+ cmd->param_value = __cpu_to_le32(value);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
|
|
+ id, value);
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
|
|
++void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
|
|
++ struct wmi_host_mem_chunks *chunks)
|
|
+ {
|
|
+- struct wmi_init_cmd *cmd;
|
|
+- struct sk_buff *buf;
|
|
+- struct wmi_resource_config config = {};
|
|
+- u32 len, val;
|
|
++ struct host_memory_chunk *chunk;
|
|
+ int i;
|
|
+
|
|
+- config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
|
|
+- config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
|
|
++ chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
|
|
++
|
|
++ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
|
|
++ chunk = &chunks->items[i];
|
|
++ chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
|
|
++ chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
|
|
++ chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi chunk %d len %d requested, addr 0x%llx\n",
|
|
++ i,
|
|
++ ar->wmi.mem_chunks[i].len,
|
|
++ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
|
|
++ }
|
|
++}
|
|
++
|
|
++static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
|
|
++{
|
|
++ struct wmi_init_cmd *cmd;
|
|
++ struct sk_buff *buf;
|
|
++ struct wmi_resource_config config = {};
|
|
++ u32 len, val;
|
|
++
|
|
++ config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
|
|
++ config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
|
|
+ config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
|
|
+
|
|
+ config.num_offload_reorder_bufs =
|
|
+@@ -2600,50 +3850,25 @@ static int ath10k_wmi_main_cmd_init(stru
|
|
+ len = sizeof(*cmd) +
|
|
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
|
|
+
|
|
+- buf = ath10k_wmi_alloc_skb(len);
|
|
++ buf = ath10k_wmi_alloc_skb(ar, len);
|
|
+ if (!buf)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_init_cmd *)buf->data;
|
|
+
|
|
+- if (ar->wmi.num_mem_chunks == 0) {
|
|
+- cmd->num_host_mem_chunks = 0;
|
|
+- goto out;
|
|
+- }
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
|
|
+- ar->wmi.num_mem_chunks);
|
|
+-
|
|
+- cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
|
|
+-
|
|
+- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
|
|
+- cmd->host_mem_chunks[i].ptr =
|
|
+- __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
|
|
+- cmd->host_mem_chunks[i].size =
|
|
+- __cpu_to_le32(ar->wmi.mem_chunks[i].len);
|
|
+- cmd->host_mem_chunks[i].req_id =
|
|
+- __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
+- "wmi chunk %d len %d requested, addr 0x%llx\n",
|
|
+- i,
|
|
+- ar->wmi.mem_chunks[i].len,
|
|
+- (unsigned long long)ar->wmi.mem_chunks[i].paddr);
|
|
+- }
|
|
+-out:
|
|
+ memcpy(&cmd->resource_config, &config, sizeof(config));
|
|
++ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
|
|
+- return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
|
|
++ return buf;
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
|
|
++static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
|
|
+ {
|
|
+ struct wmi_init_cmd_10x *cmd;
|
|
+ struct sk_buff *buf;
|
|
+ struct wmi_resource_config_10x config = {};
|
|
+ u32 len, val;
|
|
+- int i;
|
|
+
|
|
+ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
|
|
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
|
|
+@@ -2691,101 +3916,132 @@ static int ath10k_wmi_10x_cmd_init(struc
|
|
+ len = sizeof(*cmd) +
|
|
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
|
|
+
|
|
+- buf = ath10k_wmi_alloc_skb(len);
|
|
++ buf = ath10k_wmi_alloc_skb(ar, len);
|
|
+ if (!buf)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_init_cmd_10x *)buf->data;
|
|
+
|
|
+- if (ar->wmi.num_mem_chunks == 0) {
|
|
+- cmd->num_host_mem_chunks = 0;
|
|
+- goto out;
|
|
+- }
|
|
++ memcpy(&cmd->resource_config, &config, sizeof(config));
|
|
++ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
|
|
+- ar->wmi.num_mem_chunks);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
|
|
++ return buf;
|
|
++}
|
|
+
|
|
+- cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
|
|
++static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
|
|
++{
|
|
++ struct wmi_init_cmd_10_2 *cmd;
|
|
++ struct sk_buff *buf;
|
|
++ struct wmi_resource_config_10x config = {};
|
|
++ u32 len, val, features;
|
|
+
|
|
+- for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
|
|
+- cmd->host_mem_chunks[i].ptr =
|
|
+- __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
|
|
+- cmd->host_mem_chunks[i].size =
|
|
+- __cpu_to_le32(ar->wmi.mem_chunks[i].len);
|
|
+- cmd->host_mem_chunks[i].req_id =
|
|
+- __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
|
|
++ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
|
|
++ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
|
|
++ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
|
|
++ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
|
|
++ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
|
|
++ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
|
|
++ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
|
|
++ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
|
|
++ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
|
|
++ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
|
|
++ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
|
|
++ config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
+- "wmi chunk %d len %d requested, addr 0x%llx\n",
|
|
+- i,
|
|
+- ar->wmi.mem_chunks[i].len,
|
|
+- (unsigned long long)ar->wmi.mem_chunks[i].paddr);
|
|
+- }
|
|
+-out:
|
|
+- memcpy(&cmd->resource_config, &config, sizeof(config));
|
|
++ config.scan_max_pending_reqs =
|
|
++ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
|
|
++
|
|
++ config.bmiss_offload_max_vdev =
|
|
++ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
|
|
++
|
|
++ config.roam_offload_max_vdev =
|
|
++ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
|
|
++
|
|
++ config.roam_offload_max_ap_profiles =
|
|
++ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
|
|
++
|
|
++ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
|
|
++ config.num_mcast_table_elems =
|
|
++ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
|
|
++
|
|
++ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
|
|
++ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
|
|
++ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
|
|
++ config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
|
|
++ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
|
|
++
|
|
++ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
|
|
++ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
|
|
++
|
|
++ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
|
|
++
|
|
++ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
|
|
++ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n");
|
|
+- return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
|
|
++ len = sizeof(*cmd) +
|
|
++ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
|
|
++
|
|
++ buf = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!buf)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ cmd = (struct wmi_init_cmd_10_2 *)buf->data;
|
|
++
|
|
++ features = WMI_10_2_RX_BATCH_MODE;
|
|
++ cmd->resource_config.feature_mask = __cpu_to_le32(features);
|
|
++
|
|
++ memcpy(&cmd->resource_config.common, &config, sizeof(config));
|
|
++ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
|
|
++ return buf;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_cmd_init(struct ath10k *ar)
|
|
++int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
|
|
+ {
|
|
+- int ret;
|
|
++ if (arg->ie_len && !arg->ie)
|
|
++ return -EINVAL;
|
|
++ if (arg->n_channels && !arg->channels)
|
|
++ return -EINVAL;
|
|
++ if (arg->n_ssids && !arg->ssids)
|
|
++ return -EINVAL;
|
|
++ if (arg->n_bssids && !arg->bssids)
|
|
++ return -EINVAL;
|
|
+
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
|
|
+- ret = ath10k_wmi_10x_cmd_init(ar);
|
|
+- else
|
|
+- ret = ath10k_wmi_main_cmd_init(ar);
|
|
++ if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
|
|
++ return -EINVAL;
|
|
++ if (arg->n_channels > ARRAY_SIZE(arg->channels))
|
|
++ return -EINVAL;
|
|
++ if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
|
|
++ return -EINVAL;
|
|
++ if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
|
|
++ return -EINVAL;
|
|
+
|
|
+- return ret;
|
|
++ return 0;
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
|
|
+- const struct wmi_start_scan_arg *arg)
|
|
++static size_t
|
|
++ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
|
|
+ {
|
|
+- int len;
|
|
+-
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
|
|
+- len = sizeof(struct wmi_start_scan_cmd_10x);
|
|
+- else
|
|
+- len = sizeof(struct wmi_start_scan_cmd);
|
|
++ int len = 0;
|
|
+
|
|
+ if (arg->ie_len) {
|
|
+- if (!arg->ie)
|
|
+- return -EINVAL;
|
|
+- if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
|
|
+- return -EINVAL;
|
|
+-
|
|
+ len += sizeof(struct wmi_ie_data);
|
|
+ len += roundup(arg->ie_len, 4);
|
|
+ }
|
|
+
|
|
+ if (arg->n_channels) {
|
|
+- if (!arg->channels)
|
|
+- return -EINVAL;
|
|
+- if (arg->n_channels > ARRAY_SIZE(arg->channels))
|
|
+- return -EINVAL;
|
|
+-
|
|
+ len += sizeof(struct wmi_chan_list);
|
|
+ len += sizeof(__le32) * arg->n_channels;
|
|
+ }
|
|
+
|
|
+ if (arg->n_ssids) {
|
|
+- if (!arg->ssids)
|
|
+- return -EINVAL;
|
|
+- if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
|
|
+- return -EINVAL;
|
|
+-
|
|
+ len += sizeof(struct wmi_ssid_list);
|
|
+ len += sizeof(struct wmi_ssid) * arg->n_ssids;
|
|
+ }
|
|
+
|
|
+ if (arg->n_bssids) {
|
|
+- if (!arg->bssids)
|
|
+- return -EINVAL;
|
|
+- if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
|
|
+- return -EINVAL;
|
|
+-
|
|
+ len += sizeof(struct wmi_bssid_list);
|
|
+ len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
|
|
+ }
|
|
+@@ -2793,28 +4049,11 @@ static int ath10k_wmi_start_scan_calc_le
|
|
+ return len;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_start_scan(struct ath10k *ar,
|
|
+- const struct wmi_start_scan_arg *arg)
|
|
++void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
|
|
++ const struct wmi_start_scan_arg *arg)
|
|
+ {
|
|
+- struct wmi_start_scan_cmd *cmd;
|
|
+- struct sk_buff *skb;
|
|
+- struct wmi_ie_data *ie;
|
|
+- struct wmi_chan_list *channels;
|
|
+- struct wmi_ssid_list *ssids;
|
|
+- struct wmi_bssid_list *bssids;
|
|
+ u32 scan_id;
|
|
+ u32 scan_req_id;
|
|
+- int off;
|
|
+- int len = 0;
|
|
+- int i;
|
|
+-
|
|
+- len = ath10k_wmi_start_scan_calc_len(ar, arg);
|
|
+- if (len < 0)
|
|
+- return len; /* len contains error code here */
|
|
+-
|
|
+- skb = ath10k_wmi_alloc_skb(len);
|
|
+- if (!skb)
|
|
+- return -ENOMEM;
|
|
+
|
|
+ scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
|
|
+ scan_id |= arg->scan_id;
|
|
+@@ -2822,48 +4061,49 @@ int ath10k_wmi_start_scan(struct ath10k
|
|
+ scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
|
|
+ scan_req_id |= arg->scan_req_id;
|
|
+
|
|
+- cmd = (struct wmi_start_scan_cmd *)skb->data;
|
|
+- cmd->scan_id = __cpu_to_le32(scan_id);
|
|
+- cmd->scan_req_id = __cpu_to_le32(scan_req_id);
|
|
+- cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
|
+- cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
|
|
+- cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
|
|
+- cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
|
|
+- cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
|
|
+- cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
|
|
+- cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
|
|
+- cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
|
|
+- cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
|
|
+- cmd->idle_time = __cpu_to_le32(arg->idle_time);
|
|
+- cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
|
|
+- cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
|
|
+- cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
|
|
+-
|
|
+- /* TLV list starts after fields included in the struct */
|
|
+- /* There's just one filed that differes the two start_scan
|
|
+- * structures - burst_duration, which we are not using btw,
|
|
+- no point to make the split here, just shift the buffer to fit with
|
|
+- given FW */
|
|
+- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
|
|
+- off = sizeof(struct wmi_start_scan_cmd_10x);
|
|
+- else
|
|
+- off = sizeof(struct wmi_start_scan_cmd);
|
|
++ cmn->scan_id = __cpu_to_le32(scan_id);
|
|
++ cmn->scan_req_id = __cpu_to_le32(scan_req_id);
|
|
++ cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
|
|
++ cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
|
|
++ cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
|
|
++ cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
|
|
++ cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
|
|
++ cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
|
|
++ cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
|
|
++ cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
|
|
++ cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
|
|
++ cmn->idle_time = __cpu_to_le32(arg->idle_time);
|
|
++ cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
|
|
++ cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
|
|
++ cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
|
|
++}
|
|
++
|
|
++static void
|
|
++ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
|
|
++ const struct wmi_start_scan_arg *arg)
|
|
++{
|
|
++ struct wmi_ie_data *ie;
|
|
++ struct wmi_chan_list *channels;
|
|
++ struct wmi_ssid_list *ssids;
|
|
++ struct wmi_bssid_list *bssids;
|
|
++ void *ptr = tlvs->tlvs;
|
|
++ int i;
|
|
+
|
|
+ if (arg->n_channels) {
|
|
+- channels = (void *)skb->data + off;
|
|
++ channels = ptr;
|
|
+ channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
|
|
+ channels->num_chan = __cpu_to_le32(arg->n_channels);
|
|
+
|
|
+ for (i = 0; i < arg->n_channels; i++)
|
|
+- channels->channel_list[i] =
|
|
+- __cpu_to_le32(arg->channels[i]);
|
|
++ channels->channel_list[i].freq =
|
|
++ __cpu_to_le16(arg->channels[i]);
|
|
+
|
|
+- off += sizeof(*channels);
|
|
+- off += sizeof(__le32) * arg->n_channels;
|
|
++ ptr += sizeof(*channels);
|
|
++ ptr += sizeof(__le32) * arg->n_channels;
|
|
+ }
|
|
+
|
|
+ if (arg->n_ssids) {
|
|
+- ssids = (void *)skb->data + off;
|
|
++ ssids = ptr;
|
|
+ ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
|
|
+ ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
|
|
+
|
|
+@@ -2875,12 +4115,12 @@ int ath10k_wmi_start_scan(struct ath10k
|
|
+ arg->ssids[i].len);
|
|
+ }
|
|
+
|
|
+- off += sizeof(*ssids);
|
|
+- off += sizeof(struct wmi_ssid) * arg->n_ssids;
|
|
++ ptr += sizeof(*ssids);
|
|
++ ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
|
|
+ }
|
|
+
|
|
+ if (arg->n_bssids) {
|
|
+- bssids = (void *)skb->data + off;
|
|
++ bssids = ptr;
|
|
+ bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
|
|
+ bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
|
|
+
|
|
+@@ -2889,27 +4129,75 @@ int ath10k_wmi_start_scan(struct ath10k
|
|
+ arg->bssids[i].bssid,
|
|
+ ETH_ALEN);
|
|
+
|
|
+- off += sizeof(*bssids);
|
|
+- off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
|
|
++ ptr += sizeof(*bssids);
|
|
++ ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
|
|
+ }
|
|
+
|
|
+ if (arg->ie_len) {
|
|
+- ie = (void *)skb->data + off;
|
|
++ ie = ptr;
|
|
+ ie->tag = __cpu_to_le32(WMI_IE_TAG);
|
|
+ ie->ie_len = __cpu_to_le32(arg->ie_len);
|
|
+ memcpy(ie->ie_data, arg->ie, arg->ie_len);
|
|
+
|
|
+- off += sizeof(*ie);
|
|
+- off += roundup(arg->ie_len, 4);
|
|
++ ptr += sizeof(*ie);
|
|
++ ptr += roundup(arg->ie_len, 4);
|
|
+ }
|
|
++}
|
|
+
|
|
+- if (off != skb->len) {
|
|
+- dev_kfree_skb(skb);
|
|
+- return -EINVAL;
|
|
+- }
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
|
|
++ const struct wmi_start_scan_arg *arg)
|
|
++{
|
|
++ struct wmi_start_scan_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++ size_t len;
|
|
++ int ret;
|
|
++
|
|
++ ret = ath10k_wmi_start_scan_verify(arg);
|
|
++ if (ret)
|
|
++ return ERR_PTR(ret);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
|
|
++ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ cmd = (struct wmi_start_scan_cmd *)skb->data;
|
|
++
|
|
++ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
|
|
++ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
|
|
++
|
|
++ cmd->burst_duration_ms = __cpu_to_le32(0);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
|
|
++ const struct wmi_start_scan_arg *arg)
|
|
++{
|
|
++ struct wmi_10x_start_scan_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++ size_t len;
|
|
++ int ret;
|
|
++
|
|
++ ret = ath10k_wmi_start_scan_verify(arg);
|
|
++ if (ret)
|
|
++ return ERR_PTR(ret);
|
|
++
|
|
++ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
|
|
++
|
|
++ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
|
|
++ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+ void ath10k_wmi_start_scan_init(struct ath10k *ar,
|
|
+@@ -2938,7 +4226,9 @@ void ath10k_wmi_start_scan_init(struct a
|
|
+ arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
|
|
++ const struct wmi_stop_scan_arg *arg)
|
|
+ {
|
|
+ struct wmi_stop_scan_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+@@ -2946,13 +4236,13 @@ int ath10k_wmi_stop_scan(struct ath10k *
|
|
+ u32 req_id;
|
|
+
|
|
+ if (arg->req_id > 0xFFF)
|
|
+- return -EINVAL;
|
|
++ return ERR_PTR(-EINVAL);
|
|
+ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
|
|
+- return -EINVAL;
|
|
++ return ERR_PTR(-EINVAL);
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ scan_id = arg->u.scan_id;
|
|
+ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
|
|
+@@ -2966,92 +4256,85 @@ int ath10k_wmi_stop_scan(struct ath10k *
|
|
+ cmd->scan_id = __cpu_to_le32(scan_id);
|
|
+ cmd->scan_req_id = __cpu_to_le32(req_id);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
|
|
+ arg->req_id, arg->req_type, arg->u.scan_id);
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
|
|
+- enum wmi_vdev_type type,
|
|
+- enum wmi_vdev_subtype subtype,
|
|
+- const u8 macaddr[ETH_ALEN])
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_vdev_type type,
|
|
++ enum wmi_vdev_subtype subtype,
|
|
++ const u8 macaddr[ETH_ALEN])
|
|
+ {
|
|
+ struct wmi_vdev_create_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+ cmd->vdev_type = __cpu_to_le32(type);
|
|
+ cmd->vdev_subtype = __cpu_to_le32(subtype);
|
|
+- memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
|
|
++ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
|
|
+ vdev_id, type, subtype, macaddr);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
|
|
+ {
|
|
+ struct wmi_vdev_delete_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "WMI vdev delete id %d\n", vdev_id);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
|
|
+- const struct wmi_vdev_start_request_arg *arg,
|
|
+- u32 cmd_id)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
|
|
++ const struct wmi_vdev_start_request_arg *arg,
|
|
++ bool restart)
|
|
+ {
|
|
+ struct wmi_vdev_start_request_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+ const char *cmdname;
|
|
+ u32 flags = 0;
|
|
+- u32 ch_flags = 0;
|
|
+
|
|
+- if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
|
|
+- cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
|
|
+- return -EINVAL;
|
|
+ if (WARN_ON(arg->ssid && arg->ssid_len == 0))
|
|
+- return -EINVAL;
|
|
++ return ERR_PTR(-EINVAL);
|
|
+ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
|
|
+- return -EINVAL;
|
|
++ return ERR_PTR(-EINVAL);
|
|
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
|
|
+- return -EINVAL;
|
|
++ return ERR_PTR(-EINVAL);
|
|
+
|
|
+- if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
|
|
+- cmdname = "start";
|
|
+- else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
|
|
++ if (restart)
|
|
+ cmdname = "restart";
|
|
+ else
|
|
+- return -EINVAL; /* should not happen, we already check cmd_id */
|
|
++ cmdname = "start";
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ if (arg->hidden_ssid)
|
|
+ flags |= WMI_VDEV_START_HIDDEN_SSID;
|
|
+ if (arg->pmf_enabled)
|
|
+ flags |= WMI_VDEV_START_PMF_ENABLED;
|
|
+- if (arg->channel.chan_radar)
|
|
+- ch_flags |= WMI_CHAN_FLAG_DFS;
|
|
+
|
|
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
|
+@@ -3067,143 +4350,118 @@ static int ath10k_wmi_vdev_start_restart
|
|
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
|
|
+ }
|
|
+
|
|
+- cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
|
|
++ ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
|
|
+
|
|
+- cmd->chan.band_center_freq1 =
|
|
+- __cpu_to_le32(arg->channel.band_center_freq1);
|
|
+-
|
|
+- cmd->chan.mode = arg->channel.mode;
|
|
+- cmd->chan.flags |= __cpu_to_le32(ch_flags);
|
|
+- cmd->chan.min_power = arg->channel.min_power;
|
|
+- cmd->chan.max_power = arg->channel.max_power;
|
|
+- cmd->chan.reg_power = arg->channel.max_reg_power;
|
|
+- cmd->chan.reg_classid = arg->channel.reg_class_id;
|
|
+- cmd->chan.antenna_max = arg->channel.max_antenna_gain;
|
|
+-
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
+- "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, "
|
|
+- "ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
|
|
++ cmdname, arg->vdev_id,
|
|
+ flags, arg->channel.freq, arg->channel.mode,
|
|
+ cmd->chan.flags, arg->channel.max_power);
|
|
+
|
|
+- return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
|
+-}
|
|
+-
|
|
+-int ath10k_wmi_vdev_start(struct ath10k *ar,
|
|
+- const struct wmi_vdev_start_request_arg *arg)
|
|
+-{
|
|
+- u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
|
|
+-
|
|
+- return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
|
|
+-}
|
|
+-
|
|
+-int ath10k_wmi_vdev_restart(struct ath10k *ar,
|
|
+- const struct wmi_vdev_start_request_arg *arg)
|
|
+-{
|
|
+- u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
|
|
+-
|
|
+- return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
|
|
+ {
|
|
+ struct wmi_vdev_stop_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
|
|
++ const u8 *bssid)
|
|
+ {
|
|
+ struct wmi_vdev_up_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+ cmd->vdev_assoc_id = __cpu_to_le32(aid);
|
|
+- memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
|
|
++ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
|
|
+ vdev_id, aid, bssid);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
|
|
+ {
|
|
+ struct wmi_vdev_down_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi mgmt vdev down id 0x%x\n", vdev_id);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
|
|
+- u32 param_id, u32 param_value)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
|
|
++ u32 param_id, u32 param_value)
|
|
+ {
|
|
+ struct wmi_vdev_set_param_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+ if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "vdev param %d not supported by firmware\n",
|
|
+ param_id);
|
|
+- return -EOPNOTSUPP;
|
|
++ return ERR_PTR(-EOPNOTSUPP);
|
|
+ }
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+ cmd->param_id = __cpu_to_le32(param_id);
|
|
+ cmd->param_value = __cpu_to_le32(param_value);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi vdev id 0x%x set param %d value %d\n",
|
|
+ vdev_id, param_id, param_value);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_vdev_install_key(struct ath10k *ar,
|
|
+- const struct wmi_vdev_install_key_arg *arg)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
|
|
++ const struct wmi_vdev_install_key_arg *arg)
|
|
+ {
|
|
+ struct wmi_vdev_install_key_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+ if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
|
|
+- return -EINVAL;
|
|
++ return ERR_PTR(-EINVAL);
|
|
+ if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
|
|
+- return -EINVAL;
|
|
++ return ERR_PTR(-EINVAL);
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
|
+@@ -3215,176 +4473,232 @@ int ath10k_wmi_vdev_install_key(struct a
|
|
+ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
|
|
+
|
|
+ if (arg->macaddr)
|
|
+- memcpy(cmd->peer_macaddr.addr, arg->macaddr, ETH_ALEN);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
|
|
+ if (arg->key_data)
|
|
+ memcpy(cmd->key_data, arg->key_data, arg->key_len);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi vdev install key idx %d cipher %d len %d\n",
|
|
+ arg->key_idx, arg->key_cipher, arg->key_len);
|
|
+- return ath10k_wmi_cmd_send(ar, skb,
|
|
+- ar->wmi.cmd->vdev_install_key_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
|
|
+- const u8 peer_addr[ETH_ALEN])
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
|
|
++ const struct wmi_vdev_spectral_conf_arg *arg)
|
|
++{
|
|
++ struct wmi_vdev_spectral_conf_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
|
|
++ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
|
++ cmd->scan_count = __cpu_to_le32(arg->scan_count);
|
|
++ cmd->scan_period = __cpu_to_le32(arg->scan_period);
|
|
++ cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
|
|
++ cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
|
|
++ cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
|
|
++ cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
|
|
++ cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
|
|
++ cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
|
|
++ cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
|
|
++ cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
|
|
++ cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
|
|
++ cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
|
|
++ cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
|
|
++ cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
|
|
++ cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
|
|
++ cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
|
|
++ cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
|
|
++ cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
|
|
++
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
|
|
++ u32 trigger, u32 enable)
|
|
++{
|
|
++ struct wmi_vdev_spectral_enable_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->trigger_cmd = __cpu_to_le32(trigger);
|
|
++ cmd->enable_cmd = __cpu_to_le32(enable);
|
|
++
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN])
|
|
+ {
|
|
+ struct wmi_peer_create_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+- memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi peer create vdev_id %d peer_addr %pM\n",
|
|
+ vdev_id, peer_addr);
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
|
|
+- const u8 peer_addr[ETH_ALEN])
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN])
|
|
+ {
|
|
+ struct wmi_peer_delete_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+- memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi peer delete vdev_id %d peer_addr %pM\n",
|
|
+ vdev_id, peer_addr);
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
|
|
+- const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
|
|
+ {
|
|
+ struct wmi_peer_flush_tids_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
|
|
+- memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
|
|
+ vdev_id, peer_addr, tid_bitmap);
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
|
|
+- const u8 *peer_addr, enum wmi_peer_param param_id,
|
|
+- u32 param_value)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *peer_addr,
|
|
++ enum wmi_peer_param param_id,
|
|
++ u32 param_value)
|
|
+ {
|
|
+ struct wmi_peer_set_param_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+ cmd->param_id = __cpu_to_le32(param_id);
|
|
+ cmd->param_value = __cpu_to_le32(param_value);
|
|
+- memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi vdev %d peer 0x%pM set param %d value %d\n",
|
|
+ vdev_id, peer_addr, param_id, param_value);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
|
|
+- enum wmi_sta_ps_mode psmode)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_sta_ps_mode psmode)
|
|
+ {
|
|
+ struct wmi_sta_powersave_mode_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+ cmd->sta_ps_mode = __cpu_to_le32(psmode);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi set powersave id 0x%x mode %d\n",
|
|
+ vdev_id, psmode);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb,
|
|
+- ar->wmi.cmd->sta_powersave_mode_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
|
|
+- enum wmi_sta_powersave_param param_id,
|
|
+- u32 value)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_sta_powersave_param param_id,
|
|
++ u32 value)
|
|
+ {
|
|
+ struct wmi_sta_powersave_param_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+ cmd->param_id = __cpu_to_le32(param_id);
|
|
+ cmd->param_value = __cpu_to_le32(value);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi sta ps param vdev_id 0x%x param %d value %d\n",
|
|
+ vdev_id, param_id, value);
|
|
+- return ath10k_wmi_cmd_send(ar, skb,
|
|
+- ar->wmi.cmd->sta_powersave_param_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
+- enum wmi_ap_ps_peer_param param_id, u32 value)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
++ enum wmi_ap_ps_peer_param param_id, u32 value)
|
|
+ {
|
|
+ struct wmi_ap_ps_peer_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+ if (!mac)
|
|
+- return -EINVAL;
|
|
++ return ERR_PTR(-EINVAL);
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
+ cmd->param_id = __cpu_to_le32(param_id);
|
|
+ cmd->param_value = __cpu_to_le32(value);
|
|
+- memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
|
|
+ vdev_id, param_id, value, mac);
|
|
+-
|
|
+- return ath10k_wmi_cmd_send(ar, skb,
|
|
+- ar->wmi.cmd->ap_ps_peer_param_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_scan_chan_list(struct ath10k *ar,
|
|
+- const struct wmi_scan_chan_list_arg *arg)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
|
|
++ const struct wmi_scan_chan_list_arg *arg)
|
|
+ {
|
|
+ struct wmi_scan_chan_list_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+@@ -3395,66 +4709,29 @@ int ath10k_wmi_scan_chan_list(struct ath
|
|
+
|
|
+ len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(len);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
+ if (!skb)
|
|
+- return -EINVAL;
|
|
++ return ERR_PTR(-EINVAL);
|
|
+
|
|
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
|
|
+ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
|
|
+
|
|
+ for (i = 0; i < arg->n_channels; i++) {
|
|
+- u32 flags = 0;
|
|
+-
|
|
+ ch = &arg->channels[i];
|
|
+ ci = &cmd->chan_info[i];
|
|
+
|
|
+- if (ch->passive)
|
|
+- flags |= WMI_CHAN_FLAG_PASSIVE;
|
|
+- if (ch->allow_ibss)
|
|
+- flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
|
|
+- if (ch->allow_ht)
|
|
+- flags |= WMI_CHAN_FLAG_ALLOW_HT;
|
|
+- if (ch->allow_vht)
|
|
+- flags |= WMI_CHAN_FLAG_ALLOW_VHT;
|
|
+- if (ch->ht40plus)
|
|
+- flags |= WMI_CHAN_FLAG_HT40_PLUS;
|
|
+- if (ch->chan_radar)
|
|
+- flags |= WMI_CHAN_FLAG_DFS;
|
|
+-
|
|
+- ci->mhz = __cpu_to_le32(ch->freq);
|
|
+- ci->band_center_freq1 = __cpu_to_le32(ch->freq);
|
|
+- ci->band_center_freq2 = 0;
|
|
+- ci->min_power = ch->min_power;
|
|
+- ci->max_power = ch->max_power;
|
|
+- ci->reg_power = ch->max_reg_power;
|
|
+- ci->antenna_max = ch->max_antenna_gain;
|
|
+-
|
|
+- /* mode & flags share storage */
|
|
+- ci->mode = ch->mode;
|
|
+- ci->flags |= __cpu_to_le32(flags);
|
|
++ ath10k_wmi_put_wmi_channel(ci, ch);
|
|
+ }
|
|
+
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_peer_assoc(struct ath10k *ar,
|
|
+- const struct wmi_peer_assoc_complete_arg *arg)
|
|
++static void
|
|
++ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
|
|
++ const struct wmi_peer_assoc_complete_arg *arg)
|
|
+ {
|
|
+- struct wmi_peer_assoc_complete_cmd *cmd;
|
|
+- struct sk_buff *skb;
|
|
++ struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
|
|
+
|
|
+- if (arg->peer_mpdu_density > 16)
|
|
+- return -EINVAL;
|
|
+- if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
|
|
+- return -EINVAL;
|
|
+- if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
|
|
+- return -EINVAL;
|
|
+-
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
+- if (!skb)
|
|
+- return -ENOMEM;
|
|
+-
|
|
+- cmd = (struct wmi_peer_assoc_complete_cmd *)skb->data;
|
|
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
|
+ cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
|
|
+ cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
|
|
+@@ -3469,7 +4746,7 @@ int ath10k_wmi_peer_assoc(struct ath10k
|
|
+ cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
|
|
+ cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
|
|
+
|
|
+- memcpy(cmd->peer_macaddr.addr, arg->addr, ETH_ALEN);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
|
|
+
|
|
+ cmd->peer_legacy_rates.num_rates =
|
|
+ __cpu_to_le32(arg->peer_legacy_rates.num_rates);
|
|
+@@ -3489,57 +4766,183 @@ int ath10k_wmi_peer_assoc(struct ath10k
|
|
+ __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
|
|
+ cmd->peer_vht_rates.tx_mcs_set =
|
|
+ __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
|
|
++}
|
|
++
|
|
++static void
|
|
++ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
|
|
++ const struct wmi_peer_assoc_complete_arg *arg)
|
|
++{
|
|
++ struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
|
|
++ memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
|
|
++}
|
|
++
|
|
++static void
|
|
++ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
|
|
++ const struct wmi_peer_assoc_complete_arg *arg)
|
|
++{
|
|
++ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
|
|
++}
|
|
++
|
|
++static void
|
|
++ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
|
|
++ const struct wmi_peer_assoc_complete_arg *arg)
|
|
++{
|
|
++ struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
|
|
++ int max_mcs, max_nss;
|
|
++ u32 info0;
|
|
++
|
|
++ /* TODO: Is using max values okay with firmware? */
|
|
++ max_mcs = 0xf;
|
|
++ max_nss = 0xf;
|
|
++
|
|
++ info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
|
|
++ SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
|
|
++
|
|
++ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
|
|
++ cmd->info0 = __cpu_to_le32(info0);
|
|
++}
|
|
++
|
|
++static int
|
|
++ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
|
|
++{
|
|
++ if (arg->peer_mpdu_density > 16)
|
|
++ return -EINVAL;
|
|
++ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
|
|
++ return -EINVAL;
|
|
++ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
|
|
++ return -EINVAL;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
|
|
++ const struct wmi_peer_assoc_complete_arg *arg)
|
|
++{
|
|
++ size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
|
|
++ struct sk_buff *skb;
|
|
++ int ret;
|
|
++
|
|
++ ret = ath10k_wmi_peer_assoc_check_arg(arg);
|
|
++ if (ret)
|
|
++ return ERR_PTR(ret);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi peer assoc vdev %d addr %pM (%s)\n",
|
|
++ arg->vdev_id, arg->addr,
|
|
++ arg->peer_reassoc ? "reassociate" : "new");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
|
|
++ const struct wmi_peer_assoc_complete_arg *arg)
|
|
++{
|
|
++ size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
|
|
++ struct sk_buff *skb;
|
|
++ int ret;
|
|
++
|
|
++ ret = ath10k_wmi_peer_assoc_check_arg(arg);
|
|
++ if (ret)
|
|
++ return ERR_PTR(ret);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi peer assoc vdev %d addr %pM (%s)\n",
|
|
++ arg->vdev_id, arg->addr,
|
|
++ arg->peer_reassoc ? "reassociate" : "new");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
|
|
++ const struct wmi_peer_assoc_complete_arg *arg)
|
|
++{
|
|
++ size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
|
|
++ struct sk_buff *skb;
|
|
++ int ret;
|
|
++
|
|
++ ret = ath10k_wmi_peer_assoc_check_arg(arg);
|
|
++ if (ret)
|
|
++ return ERR_PTR(ret);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
|
|
+ arg->vdev_id, arg->addr,
|
|
+ arg->peer_reassoc ? "reassociate" : "new");
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, 0);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+ /* This function assumes the beacon is already DMA mapped */
|
|
+-int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
|
|
++ size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
|
|
++ bool deliver_cab)
|
|
+ {
|
|
+ struct wmi_bcn_tx_ref_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+- struct sk_buff *beacon = arvif->beacon;
|
|
+- struct ath10k *ar = arvif->ar;
|
|
+ struct ieee80211_hdr *hdr;
|
|
+- int ret;
|
|
+ u16 fc;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+- hdr = (struct ieee80211_hdr *)beacon->data;
|
|
++ hdr = (struct ieee80211_hdr *)bcn;
|
|
+ fc = le16_to_cpu(hdr->frame_control);
|
|
+
|
|
+ cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
|
|
+- cmd->vdev_id = __cpu_to_le32(arvif->vdev_id);
|
|
+- cmd->data_len = __cpu_to_le32(beacon->len);
|
|
+- cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr);
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->data_len = __cpu_to_le32(bcn_len);
|
|
++ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
|
|
+ cmd->msdu_id = 0;
|
|
+ cmd->frame_control = __cpu_to_le32(fc);
|
|
+ cmd->flags = 0;
|
|
++ cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
|
|
+
|
|
+- if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero)
|
|
++ if (dtim_zero)
|
|
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
|
|
+
|
|
+- if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab)
|
|
++ if (deliver_cab)
|
|
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
|
|
+
|
|
+- ret = ath10k_wmi_cmd_send_nowait(ar, skb,
|
|
+- ar->wmi.cmd->pdev_send_bcn_cmdid);
|
|
+-
|
|
+- if (ret)
|
|
+- dev_kfree_skb(skb);
|
|
+-
|
|
+- return ret;
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
|
|
+- const struct wmi_wmm_params_arg *arg)
|
|
++void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
|
|
++ const struct wmi_wmm_params_arg *arg)
|
|
+ {
|
|
+ params->cwmin = __cpu_to_le32(arg->cwmin);
|
|
+ params->cwmax = __cpu_to_le32(arg->cwmax);
|
|
+@@ -3549,76 +4952,81 @@ static void ath10k_wmi_pdev_set_wmm_para
|
|
+ params->no_ack = __cpu_to_le32(arg->no_ack);
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
|
|
+- const struct wmi_pdev_set_wmm_params_arg *arg)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
|
|
++ const struct wmi_wmm_params_all_arg *arg)
|
|
+ {
|
|
+ struct wmi_pdev_set_wmm_params *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
|
|
+- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_be, &arg->ac_be);
|
|
+- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
|
|
+- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
|
|
+- ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
|
|
++ ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
|
|
++ ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
|
|
++ ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
|
|
++ ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
|
|
+- return ath10k_wmi_cmd_send(ar, skb,
|
|
+- ar->wmi.cmd->pdev_set_wmm_params_cmdid);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
|
|
+ {
|
|
+ struct wmi_request_stats_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
|
|
+- cmd->stats_id = __cpu_to_le32(stats_id);
|
|
++ cmd->stats_id = __cpu_to_le32(stats_mask);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
|
|
++ stats_mask);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_force_fw_hang(struct ath10k *ar,
|
|
+- enum wmi_force_fw_hang_type type, u32 delay_ms)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
|
|
++ enum wmi_force_fw_hang_type type, u32 delay_ms)
|
|
+ {
|
|
+ struct wmi_force_fw_hang_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
|
|
+ cmd->type = __cpu_to_le32(type);
|
|
+ cmd->delay_ms = __cpu_to_le32(delay_ms);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
|
|
+ type, delay_ms);
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
|
|
++ return skb;
|
|
+ }
|
|
+
|
|
+-int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
|
|
++ u32 log_level)
|
|
+ {
|
|
+ struct wmi_dbglog_cfg_cmd *cmd;
|
|
+ struct sk_buff *skb;
|
|
+ u32 cfg;
|
|
+
|
|
+- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
+ if (!skb)
|
|
+- return -ENOMEM;
|
|
++ return ERR_PTR(-ENOMEM);
|
|
+
|
|
+ cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
|
|
+
|
|
+ if (module_enable) {
|
|
+- cfg = SM(ATH10K_DBGLOG_LEVEL_VERBOSE,
|
|
++ cfg = SM(log_level,
|
|
+ ATH10K_DBGLOG_CFG_LOG_LVL);
|
|
+ } else {
|
|
+ /* set back defaults, all modules with WARN level */
|
|
+@@ -3632,12 +5040,474 @@ int ath10k_wmi_dbglog_cfg(struct ath10k
|
|
+ cmd->config_enable = __cpu_to_le32(cfg);
|
|
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
|
|
+
|
|
+- ath10k_dbg(ATH10K_DBG_WMI,
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
+ "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
|
|
+ __le32_to_cpu(cmd->module_enable),
|
|
+ __le32_to_cpu(cmd->module_valid),
|
|
+ __le32_to_cpu(cmd->config_enable),
|
|
+ __le32_to_cpu(cmd->config_valid));
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
|
|
++{
|
|
++ struct wmi_pdev_pktlog_enable_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ev_bitmap &= ATH10K_PKTLOG_ANY;
|
|
++
|
|
++ cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
|
|
++ cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
|
|
++ ev_bitmap);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
+
|
|
+- return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, 0);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
|
|
++ u32 duration, u32 next_offset,
|
|
++ u32 enabled)
|
|
++{
|
|
++ struct wmi_pdev_set_quiet_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
|
|
++ cmd->period = __cpu_to_le32(period);
|
|
++ cmd->duration = __cpu_to_le32(duration);
|
|
++ cmd->next_start = __cpu_to_le32(next_offset);
|
|
++ cmd->enabled = __cpu_to_le32(enabled);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi quiet param: period %u duration %u enabled %d\n",
|
|
++ period, duration, enabled);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *mac)
|
|
++{
|
|
++ struct wmi_addba_clear_resp_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!mac)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
|
|
++ vdev_id, mac);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
++ u32 tid, u32 buf_size)
|
|
++{
|
|
++ struct wmi_addba_send_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!mac)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ cmd = (struct wmi_addba_send_cmd *)skb->data;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
|
|
++ cmd->tid = __cpu_to_le32(tid);
|
|
++ cmd->buffersize = __cpu_to_le32(buf_size);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
|
|
++ vdev_id, mac, tid, buf_size);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
++ u32 tid, u32 status)
|
|
++{
|
|
++ struct wmi_addba_setresponse_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!mac)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
|
|
++ cmd->tid = __cpu_to_le32(tid);
|
|
++ cmd->statuscode = __cpu_to_le32(status);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
|
|
++ vdev_id, mac, tid, status);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
++ u32 tid, u32 initiator, u32 reason)
|
|
++{
|
|
++ struct wmi_delba_send_cmd *cmd;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!mac)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ cmd = (struct wmi_delba_send_cmd *)skb->data;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
|
|
++ cmd->tid = __cpu_to_le32(tid);
|
|
++ cmd->initiator = __cpu_to_le32(initiator);
|
|
++ cmd->reasoncode = __cpu_to_le32(reason);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
|
|
++ vdev_id, mac, tid, initiator, reason);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static const struct wmi_ops wmi_ops = {
|
|
++ .rx = ath10k_wmi_op_rx,
|
|
++ .map_svc = wmi_main_svc_map,
|
|
++
|
|
++ .pull_scan = ath10k_wmi_op_pull_scan_ev,
|
|
++ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
|
|
++ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
|
|
++ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
|
|
++ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
|
|
++ .pull_swba = ath10k_wmi_op_pull_swba_ev,
|
|
++ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
|
|
++ .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
|
|
++ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
|
++ .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
|
|
++
|
|
++ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
|
++ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
|
++ .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
|
|
++ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
|
|
++ .gen_init = ath10k_wmi_op_gen_init,
|
|
++ .gen_start_scan = ath10k_wmi_op_gen_start_scan,
|
|
++ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
|
|
++ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
|
|
++ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
|
|
++ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
|
|
++ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
|
|
++ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
|
|
++ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
|
|
++ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
|
|
++ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
|
|
++ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
|
|
++ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
|
|
++ /* .gen_vdev_wmm_conf not implemented */
|
|
++ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
|
|
++ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
|
|
++ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
|
|
++ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
|
|
++ .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
|
|
++ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
|
|
++ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
|
|
++ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
|
|
++ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
|
|
++ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
|
|
++ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
|
|
++ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
|
|
++ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
|
|
++ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
|
|
++ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
|
|
++ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
|
|
++ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
|
|
++ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
|
|
++ /* .gen_pdev_get_temperature not implemented */
|
|
++ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
|
|
++ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
|
|
++ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
|
|
++ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
|
|
++ /* .gen_bcn_tmpl not implemented */
|
|
++ /* .gen_prb_tmpl not implemented */
|
|
++ /* .gen_p2p_go_bcn_ie not implemented */
|
|
++};
|
|
++
|
|
++static const struct wmi_ops wmi_10_1_ops = {
|
|
++ .rx = ath10k_wmi_10_1_op_rx,
|
|
++ .map_svc = wmi_10x_svc_map,
|
|
++ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
|
|
++ .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
|
|
++ .gen_init = ath10k_wmi_10_1_op_gen_init,
|
|
++ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
|
|
++ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
|
|
++ .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
|
|
++ /* .gen_pdev_get_temperature not implemented */
|
|
++
|
|
++ /* shared with main branch */
|
|
++ .pull_scan = ath10k_wmi_op_pull_scan_ev,
|
|
++ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
|
|
++ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
|
|
++ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
|
|
++ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
|
|
++ .pull_swba = ath10k_wmi_op_pull_swba_ev,
|
|
++ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
|
|
++ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
|
++
|
|
++ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
|
++ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
|
++ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
|
|
++ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
|
|
++ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
|
|
++ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
|
|
++ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
|
|
++ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
|
|
++ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
|
|
++ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
|
|
++ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
|
|
++ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
|
|
++ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
|
|
++ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
|
|
++ /* .gen_vdev_wmm_conf not implemented */
|
|
++ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
|
|
++ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
|
|
++ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
|
|
++ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
|
|
++ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
|
|
++ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
|
|
++ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
|
|
++ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
|
|
++ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
|
|
++ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
|
|
++ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
|
|
++ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
|
|
++ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
|
|
++ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
|
|
++ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
|
|
++ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
|
|
++ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
|
|
++ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
|
|
++ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
|
|
++ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
|
|
++ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
|
|
++ /* .gen_bcn_tmpl not implemented */
|
|
++ /* .gen_prb_tmpl not implemented */
|
|
++ /* .gen_p2p_go_bcn_ie not implemented */
|
|
++};
|
|
++
|
|
++static const struct wmi_ops wmi_10_2_ops = {
|
|
++ .rx = ath10k_wmi_10_2_op_rx,
|
|
++ .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
|
|
++ .gen_init = ath10k_wmi_10_2_op_gen_init,
|
|
++ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
|
|
++ /* .gen_pdev_get_temperature not implemented */
|
|
++
|
|
++ /* shared with 10.1 */
|
|
++ .map_svc = wmi_10x_svc_map,
|
|
++ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
|
|
++ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
|
|
++ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
|
|
++
|
|
++ .pull_scan = ath10k_wmi_op_pull_scan_ev,
|
|
++ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
|
|
++ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
|
|
++ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
|
|
++ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
|
|
++ .pull_swba = ath10k_wmi_op_pull_swba_ev,
|
|
++ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
|
|
++ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
|
++
|
|
++ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
|
++ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
|
++ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
|
|
++ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
|
|
++ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
|
|
++ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
|
|
++ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
|
|
++ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
|
|
++ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
|
|
++ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
|
|
++ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
|
|
++ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
|
|
++ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
|
|
++ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
|
|
++ /* .gen_vdev_wmm_conf not implemented */
|
|
++ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
|
|
++ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
|
|
++ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
|
|
++ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
|
|
++ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
|
|
++ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
|
|
++ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
|
|
++ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
|
|
++ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
|
|
++ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
|
|
++ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
|
|
++ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
|
|
++ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
|
|
++ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
|
|
++ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
|
|
++ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
|
|
++ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
|
|
++ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
|
|
++ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
|
|
++ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
|
|
++ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
|
|
++};
|
|
++
|
|
++static const struct wmi_ops wmi_10_2_4_ops = {
|
|
++ .rx = ath10k_wmi_10_2_op_rx,
|
|
++ .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
|
|
++ .gen_init = ath10k_wmi_10_2_op_gen_init,
|
|
++ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
|
|
++ .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
|
|
++
|
|
++ /* shared with 10.1 */
|
|
++ .map_svc = wmi_10x_svc_map,
|
|
++ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
|
|
++ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
|
|
++ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
|
|
++
|
|
++ .pull_scan = ath10k_wmi_op_pull_scan_ev,
|
|
++ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
|
|
++ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
|
|
++ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
|
|
++ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
|
|
++ .pull_swba = ath10k_wmi_op_pull_swba_ev,
|
|
++ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
|
|
++ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
|
|
++
|
|
++ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
|
|
++ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
|
|
++ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
|
|
++ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
|
|
++ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
|
|
++ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
|
|
++ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
|
|
++ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
|
|
++ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
|
|
++ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
|
|
++ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
|
|
++ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
|
|
++ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
|
|
++ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
|
|
++ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
|
|
++ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
|
|
++ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
|
|
++ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
|
|
++ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
|
|
++ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
|
|
++ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
|
|
++ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
|
|
++ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
|
|
++ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
|
|
++ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
|
|
++ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
|
|
++ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
|
|
++ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
|
|
++ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
|
|
++ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
|
|
++ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
|
|
++ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
|
|
++ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
|
|
++ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
|
|
++ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
|
|
++ /* .gen_bcn_tmpl not implemented */
|
|
++ /* .gen_prb_tmpl not implemented */
|
|
++ /* .gen_p2p_go_bcn_ie not implemented */
|
|
++};
|
|
++
|
|
++int ath10k_wmi_attach(struct ath10k *ar)
|
|
++{
|
|
++ switch (ar->wmi.op_version) {
|
|
++ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
|
|
++ ar->wmi.cmd = &wmi_10_2_4_cmd_map;
|
|
++ ar->wmi.ops = &wmi_10_2_4_ops;
|
|
++ ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
|
|
++ ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
|
|
++ break;
|
|
++ case ATH10K_FW_WMI_OP_VERSION_10_2:
|
|
++ ar->wmi.cmd = &wmi_10_2_cmd_map;
|
|
++ ar->wmi.ops = &wmi_10_2_ops;
|
|
++ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
|
|
++ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
|
|
++ break;
|
|
++ case ATH10K_FW_WMI_OP_VERSION_10_1:
|
|
++ ar->wmi.cmd = &wmi_10x_cmd_map;
|
|
++ ar->wmi.ops = &wmi_10_1_ops;
|
|
++ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
|
|
++ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
|
|
++ break;
|
|
++ case ATH10K_FW_WMI_OP_VERSION_MAIN:
|
|
++ ar->wmi.cmd = &wmi_cmd_map;
|
|
++ ar->wmi.ops = &wmi_ops;
|
|
++ ar->wmi.vdev_param = &wmi_vdev_param_map;
|
|
++ ar->wmi.pdev_param = &wmi_pdev_param_map;
|
|
++ break;
|
|
++ case ATH10K_FW_WMI_OP_VERSION_TLV:
|
|
++ ath10k_wmi_tlv_attach(ar);
|
|
++ break;
|
|
++ case ATH10K_FW_WMI_OP_VERSION_UNSET:
|
|
++ case ATH10K_FW_WMI_OP_VERSION_MAX:
|
|
++ ath10k_err(ar, "unsupported WMI op version: %d\n",
|
|
++ ar->wmi.op_version);
|
|
++ return -EINVAL;
|
|
++ }
|
|
++
|
|
++ init_completion(&ar->wmi.service_ready);
|
|
++ init_completion(&ar->wmi.unified_ready);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++void ath10k_wmi_detach(struct ath10k *ar)
|
|
++{
|
|
++ int i;
|
|
++
|
|
++ /* free the host memory chunks requested by firmware */
|
|
++ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
|
|
++ dma_free_coherent(ar->dev,
|
|
++ ar->wmi.mem_chunks[i].len,
|
|
++ ar->wmi.mem_chunks[i].vaddr,
|
|
++ ar->wmi.mem_chunks[i].paddr);
|
|
++ }
|
|
++
|
|
++ ar->wmi.num_mem_chunks = 0;
|
|
+ }
|
|
+--- a/drivers/net/wireless/ath/ath10k/wmi.h
|
|
++++ b/drivers/net/wireless/ath/ath10k/wmi.h
|
|
+@@ -73,119 +73,361 @@ struct wmi_cmd_hdr {
|
|
+ #define HTC_PROTOCOL_VERSION 0x0002
|
|
+ #define WMI_PROTOCOL_VERSION 0x0002
|
|
+
|
|
+-enum wmi_service_id {
|
|
+- WMI_SERVICE_BEACON_OFFLOAD = 0, /* beacon offload */
|
|
+- WMI_SERVICE_SCAN_OFFLOAD, /* scan offload */
|
|
+- WMI_SERVICE_ROAM_OFFLOAD, /* roam offload */
|
|
+- WMI_SERVICE_BCN_MISS_OFFLOAD, /* beacon miss offload */
|
|
+- WMI_SERVICE_STA_PWRSAVE, /* fake sleep + basic power save */
|
|
+- WMI_SERVICE_STA_ADVANCED_PWRSAVE, /* uapsd, pspoll, force sleep */
|
|
+- WMI_SERVICE_AP_UAPSD, /* uapsd on AP */
|
|
+- WMI_SERVICE_AP_DFS, /* DFS on AP */
|
|
+- WMI_SERVICE_11AC, /* supports 11ac */
|
|
+- WMI_SERVICE_BLOCKACK, /* Supports triggering ADDBA/DELBA from host*/
|
|
+- WMI_SERVICE_PHYERR, /* PHY error */
|
|
+- WMI_SERVICE_BCN_FILTER, /* Beacon filter support */
|
|
+- WMI_SERVICE_RTT, /* RTT (round trip time) support */
|
|
+- WMI_SERVICE_RATECTRL, /* Rate-control */
|
|
+- WMI_SERVICE_WOW, /* WOW Support */
|
|
+- WMI_SERVICE_RATECTRL_CACHE, /* Rate-control caching */
|
|
+- WMI_SERVICE_IRAM_TIDS, /* TIDs in IRAM */
|
|
+- WMI_SERVICE_ARPNS_OFFLOAD, /* ARP NS Offload support */
|
|
+- WMI_SERVICE_NLO, /* Network list offload service */
|
|
+- WMI_SERVICE_GTK_OFFLOAD, /* GTK offload */
|
|
+- WMI_SERVICE_SCAN_SCH, /* Scan Scheduler Service */
|
|
+- WMI_SERVICE_CSA_OFFLOAD, /* CSA offload service */
|
|
+- WMI_SERVICE_CHATTER, /* Chatter service */
|
|
+- WMI_SERVICE_COEX_FREQAVOID, /* FW report freq range to avoid */
|
|
+- WMI_SERVICE_PACKET_POWER_SAVE, /* packet power save service */
|
|
+- WMI_SERVICE_FORCE_FW_HANG, /* To test fw recovery mechanism */
|
|
+- WMI_SERVICE_GPIO, /* GPIO service */
|
|
+- WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, /* Modulated DTIM support */
|
|
+- WMI_STA_UAPSD_BASIC_AUTO_TRIG, /* UAPSD AC Trigger Generation */
|
|
+- WMI_STA_UAPSD_VAR_AUTO_TRIG, /* -do- */
|
|
+- WMI_SERVICE_STA_KEEP_ALIVE, /* STA keep alive mechanism support */
|
|
+- WMI_SERVICE_TX_ENCAP, /* Packet type for TX encapsulation */
|
|
+-
|
|
+- WMI_SERVICE_LAST,
|
|
+- WMI_MAX_SERVICE = 64 /* max service */
|
|
++enum wmi_service {
|
|
++ WMI_SERVICE_BEACON_OFFLOAD = 0,
|
|
++ WMI_SERVICE_SCAN_OFFLOAD,
|
|
++ WMI_SERVICE_ROAM_OFFLOAD,
|
|
++ WMI_SERVICE_BCN_MISS_OFFLOAD,
|
|
++ WMI_SERVICE_STA_PWRSAVE,
|
|
++ WMI_SERVICE_STA_ADVANCED_PWRSAVE,
|
|
++ WMI_SERVICE_AP_UAPSD,
|
|
++ WMI_SERVICE_AP_DFS,
|
|
++ WMI_SERVICE_11AC,
|
|
++ WMI_SERVICE_BLOCKACK,
|
|
++ WMI_SERVICE_PHYERR,
|
|
++ WMI_SERVICE_BCN_FILTER,
|
|
++ WMI_SERVICE_RTT,
|
|
++ WMI_SERVICE_RATECTRL,
|
|
++ WMI_SERVICE_WOW,
|
|
++ WMI_SERVICE_RATECTRL_CACHE,
|
|
++ WMI_SERVICE_IRAM_TIDS,
|
|
++ WMI_SERVICE_ARPNS_OFFLOAD,
|
|
++ WMI_SERVICE_NLO,
|
|
++ WMI_SERVICE_GTK_OFFLOAD,
|
|
++ WMI_SERVICE_SCAN_SCH,
|
|
++ WMI_SERVICE_CSA_OFFLOAD,
|
|
++ WMI_SERVICE_CHATTER,
|
|
++ WMI_SERVICE_COEX_FREQAVOID,
|
|
++ WMI_SERVICE_PACKET_POWER_SAVE,
|
|
++ WMI_SERVICE_FORCE_FW_HANG,
|
|
++ WMI_SERVICE_GPIO,
|
|
++ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
|
|
++ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
|
|
++ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
|
|
++ WMI_SERVICE_STA_KEEP_ALIVE,
|
|
++ WMI_SERVICE_TX_ENCAP,
|
|
++ WMI_SERVICE_BURST,
|
|
++ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT,
|
|
++ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT,
|
|
++ WMI_SERVICE_ROAM_SCAN_OFFLOAD,
|
|
++ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
|
|
++ WMI_SERVICE_EARLY_RX,
|
|
++ WMI_SERVICE_STA_SMPS,
|
|
++ WMI_SERVICE_FWTEST,
|
|
++ WMI_SERVICE_STA_WMMAC,
|
|
++ WMI_SERVICE_TDLS,
|
|
++ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE,
|
|
++ WMI_SERVICE_ADAPTIVE_OCS,
|
|
++ WMI_SERVICE_BA_SSN_SUPPORT,
|
|
++ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
|
|
++ WMI_SERVICE_WLAN_HB,
|
|
++ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT,
|
|
++ WMI_SERVICE_BATCH_SCAN,
|
|
++ WMI_SERVICE_QPOWER,
|
|
++ WMI_SERVICE_PLMREQ,
|
|
++ WMI_SERVICE_THERMAL_MGMT,
|
|
++ WMI_SERVICE_RMC,
|
|
++ WMI_SERVICE_MHF_OFFLOAD,
|
|
++ WMI_SERVICE_COEX_SAR,
|
|
++ WMI_SERVICE_BCN_TXRATE_OVERRIDE,
|
|
++ WMI_SERVICE_NAN,
|
|
++ WMI_SERVICE_L1SS_STAT,
|
|
++ WMI_SERVICE_ESTIMATE_LINKSPEED,
|
|
++ WMI_SERVICE_OBSS_SCAN,
|
|
++ WMI_SERVICE_TDLS_OFFCHAN,
|
|
++ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
|
|
++ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA,
|
|
++ WMI_SERVICE_IBSS_PWRSAVE,
|
|
++ WMI_SERVICE_LPASS,
|
|
++ WMI_SERVICE_EXTSCAN,
|
|
++ WMI_SERVICE_D0WOW,
|
|
++ WMI_SERVICE_HSOFFLOAD,
|
|
++ WMI_SERVICE_ROAM_HO_OFFLOAD,
|
|
++ WMI_SERVICE_RX_FULL_REORDER,
|
|
++ WMI_SERVICE_DHCP_OFFLOAD,
|
|
++ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
|
|
++ WMI_SERVICE_MDNS_OFFLOAD,
|
|
++ WMI_SERVICE_SAP_AUTH_OFFLOAD,
|
|
++
|
|
++ /* keep last */
|
|
++ WMI_SERVICE_MAX,
|
|
++};
|
|
++
|
|
++enum wmi_10x_service {
|
|
++ WMI_10X_SERVICE_BEACON_OFFLOAD = 0,
|
|
++ WMI_10X_SERVICE_SCAN_OFFLOAD,
|
|
++ WMI_10X_SERVICE_ROAM_OFFLOAD,
|
|
++ WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
|
|
++ WMI_10X_SERVICE_STA_PWRSAVE,
|
|
++ WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
|
|
++ WMI_10X_SERVICE_AP_UAPSD,
|
|
++ WMI_10X_SERVICE_AP_DFS,
|
|
++ WMI_10X_SERVICE_11AC,
|
|
++ WMI_10X_SERVICE_BLOCKACK,
|
|
++ WMI_10X_SERVICE_PHYERR,
|
|
++ WMI_10X_SERVICE_BCN_FILTER,
|
|
++ WMI_10X_SERVICE_RTT,
|
|
++ WMI_10X_SERVICE_RATECTRL,
|
|
++ WMI_10X_SERVICE_WOW,
|
|
++ WMI_10X_SERVICE_RATECTRL_CACHE,
|
|
++ WMI_10X_SERVICE_IRAM_TIDS,
|
|
++ WMI_10X_SERVICE_BURST,
|
|
++
|
|
++ /* introduced in 10.2 */
|
|
++ WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
|
|
++ WMI_10X_SERVICE_FORCE_FW_HANG,
|
|
++ WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
|
|
++};
|
|
++
|
|
++enum wmi_main_service {
|
|
++ WMI_MAIN_SERVICE_BEACON_OFFLOAD = 0,
|
|
++ WMI_MAIN_SERVICE_SCAN_OFFLOAD,
|
|
++ WMI_MAIN_SERVICE_ROAM_OFFLOAD,
|
|
++ WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
|
|
++ WMI_MAIN_SERVICE_STA_PWRSAVE,
|
|
++ WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
|
|
++ WMI_MAIN_SERVICE_AP_UAPSD,
|
|
++ WMI_MAIN_SERVICE_AP_DFS,
|
|
++ WMI_MAIN_SERVICE_11AC,
|
|
++ WMI_MAIN_SERVICE_BLOCKACK,
|
|
++ WMI_MAIN_SERVICE_PHYERR,
|
|
++ WMI_MAIN_SERVICE_BCN_FILTER,
|
|
++ WMI_MAIN_SERVICE_RTT,
|
|
++ WMI_MAIN_SERVICE_RATECTRL,
|
|
++ WMI_MAIN_SERVICE_WOW,
|
|
++ WMI_MAIN_SERVICE_RATECTRL_CACHE,
|
|
++ WMI_MAIN_SERVICE_IRAM_TIDS,
|
|
++ WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
|
|
++ WMI_MAIN_SERVICE_NLO,
|
|
++ WMI_MAIN_SERVICE_GTK_OFFLOAD,
|
|
++ WMI_MAIN_SERVICE_SCAN_SCH,
|
|
++ WMI_MAIN_SERVICE_CSA_OFFLOAD,
|
|
++ WMI_MAIN_SERVICE_CHATTER,
|
|
++ WMI_MAIN_SERVICE_COEX_FREQAVOID,
|
|
++ WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
|
|
++ WMI_MAIN_SERVICE_FORCE_FW_HANG,
|
|
++ WMI_MAIN_SERVICE_GPIO,
|
|
++ WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
|
|
++ WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
|
|
++ WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
|
|
++ WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
|
|
++ WMI_MAIN_SERVICE_TX_ENCAP,
|
|
+ };
|
|
+
|
|
+ static inline char *wmi_service_name(int service_id)
|
|
+ {
|
|
++#define SVCSTR(x) case x: return #x
|
|
++
|
|
+ switch (service_id) {
|
|
+- case WMI_SERVICE_BEACON_OFFLOAD:
|
|
+- return "BEACON_OFFLOAD";
|
|
+- case WMI_SERVICE_SCAN_OFFLOAD:
|
|
+- return "SCAN_OFFLOAD";
|
|
+- case WMI_SERVICE_ROAM_OFFLOAD:
|
|
+- return "ROAM_OFFLOAD";
|
|
+- case WMI_SERVICE_BCN_MISS_OFFLOAD:
|
|
+- return "BCN_MISS_OFFLOAD";
|
|
+- case WMI_SERVICE_STA_PWRSAVE:
|
|
+- return "STA_PWRSAVE";
|
|
+- case WMI_SERVICE_STA_ADVANCED_PWRSAVE:
|
|
+- return "STA_ADVANCED_PWRSAVE";
|
|
+- case WMI_SERVICE_AP_UAPSD:
|
|
+- return "AP_UAPSD";
|
|
+- case WMI_SERVICE_AP_DFS:
|
|
+- return "AP_DFS";
|
|
+- case WMI_SERVICE_11AC:
|
|
+- return "11AC";
|
|
+- case WMI_SERVICE_BLOCKACK:
|
|
+- return "BLOCKACK";
|
|
+- case WMI_SERVICE_PHYERR:
|
|
+- return "PHYERR";
|
|
+- case WMI_SERVICE_BCN_FILTER:
|
|
+- return "BCN_FILTER";
|
|
+- case WMI_SERVICE_RTT:
|
|
+- return "RTT";
|
|
+- case WMI_SERVICE_RATECTRL:
|
|
+- return "RATECTRL";
|
|
+- case WMI_SERVICE_WOW:
|
|
+- return "WOW";
|
|
+- case WMI_SERVICE_RATECTRL_CACHE:
|
|
+- return "RATECTRL CACHE";
|
|
+- case WMI_SERVICE_IRAM_TIDS:
|
|
+- return "IRAM TIDS";
|
|
+- case WMI_SERVICE_ARPNS_OFFLOAD:
|
|
+- return "ARPNS_OFFLOAD";
|
|
+- case WMI_SERVICE_NLO:
|
|
+- return "NLO";
|
|
+- case WMI_SERVICE_GTK_OFFLOAD:
|
|
+- return "GTK_OFFLOAD";
|
|
+- case WMI_SERVICE_SCAN_SCH:
|
|
+- return "SCAN_SCH";
|
|
+- case WMI_SERVICE_CSA_OFFLOAD:
|
|
+- return "CSA_OFFLOAD";
|
|
+- case WMI_SERVICE_CHATTER:
|
|
+- return "CHATTER";
|
|
+- case WMI_SERVICE_COEX_FREQAVOID:
|
|
+- return "COEX_FREQAVOID";
|
|
+- case WMI_SERVICE_PACKET_POWER_SAVE:
|
|
+- return "PACKET_POWER_SAVE";
|
|
+- case WMI_SERVICE_FORCE_FW_HANG:
|
|
+- return "FORCE FW HANG";
|
|
+- case WMI_SERVICE_GPIO:
|
|
+- return "GPIO";
|
|
+- case WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM:
|
|
+- return "MODULATED DTIM";
|
|
+- case WMI_STA_UAPSD_BASIC_AUTO_TRIG:
|
|
+- return "BASIC UAPSD";
|
|
+- case WMI_STA_UAPSD_VAR_AUTO_TRIG:
|
|
+- return "VAR UAPSD";
|
|
+- case WMI_SERVICE_STA_KEEP_ALIVE:
|
|
+- return "STA KEEP ALIVE";
|
|
+- case WMI_SERVICE_TX_ENCAP:
|
|
+- return "TX ENCAP";
|
|
++ SVCSTR(WMI_SERVICE_BEACON_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_SCAN_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_ROAM_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_BCN_MISS_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_STA_PWRSAVE);
|
|
++ SVCSTR(WMI_SERVICE_STA_ADVANCED_PWRSAVE);
|
|
++ SVCSTR(WMI_SERVICE_AP_UAPSD);
|
|
++ SVCSTR(WMI_SERVICE_AP_DFS);
|
|
++ SVCSTR(WMI_SERVICE_11AC);
|
|
++ SVCSTR(WMI_SERVICE_BLOCKACK);
|
|
++ SVCSTR(WMI_SERVICE_PHYERR);
|
|
++ SVCSTR(WMI_SERVICE_BCN_FILTER);
|
|
++ SVCSTR(WMI_SERVICE_RTT);
|
|
++ SVCSTR(WMI_SERVICE_RATECTRL);
|
|
++ SVCSTR(WMI_SERVICE_WOW);
|
|
++ SVCSTR(WMI_SERVICE_RATECTRL_CACHE);
|
|
++ SVCSTR(WMI_SERVICE_IRAM_TIDS);
|
|
++ SVCSTR(WMI_SERVICE_ARPNS_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_NLO);
|
|
++ SVCSTR(WMI_SERVICE_GTK_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_SCAN_SCH);
|
|
++ SVCSTR(WMI_SERVICE_CSA_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_CHATTER);
|
|
++ SVCSTR(WMI_SERVICE_COEX_FREQAVOID);
|
|
++ SVCSTR(WMI_SERVICE_PACKET_POWER_SAVE);
|
|
++ SVCSTR(WMI_SERVICE_FORCE_FW_HANG);
|
|
++ SVCSTR(WMI_SERVICE_GPIO);
|
|
++ SVCSTR(WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM);
|
|
++ SVCSTR(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG);
|
|
++ SVCSTR(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG);
|
|
++ SVCSTR(WMI_SERVICE_STA_KEEP_ALIVE);
|
|
++ SVCSTR(WMI_SERVICE_TX_ENCAP);
|
|
++ SVCSTR(WMI_SERVICE_BURST);
|
|
++ SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
|
|
++ SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
|
|
++ SVCSTR(WMI_SERVICE_ROAM_SCAN_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC);
|
|
++ SVCSTR(WMI_SERVICE_EARLY_RX);
|
|
++ SVCSTR(WMI_SERVICE_STA_SMPS);
|
|
++ SVCSTR(WMI_SERVICE_FWTEST);
|
|
++ SVCSTR(WMI_SERVICE_STA_WMMAC);
|
|
++ SVCSTR(WMI_SERVICE_TDLS);
|
|
++ SVCSTR(WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE);
|
|
++ SVCSTR(WMI_SERVICE_ADAPTIVE_OCS);
|
|
++ SVCSTR(WMI_SERVICE_BA_SSN_SUPPORT);
|
|
++ SVCSTR(WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE);
|
|
++ SVCSTR(WMI_SERVICE_WLAN_HB);
|
|
++ SVCSTR(WMI_SERVICE_LTE_ANT_SHARE_SUPPORT);
|
|
++ SVCSTR(WMI_SERVICE_BATCH_SCAN);
|
|
++ SVCSTR(WMI_SERVICE_QPOWER);
|
|
++ SVCSTR(WMI_SERVICE_PLMREQ);
|
|
++ SVCSTR(WMI_SERVICE_THERMAL_MGMT);
|
|
++ SVCSTR(WMI_SERVICE_RMC);
|
|
++ SVCSTR(WMI_SERVICE_MHF_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_COEX_SAR);
|
|
++ SVCSTR(WMI_SERVICE_BCN_TXRATE_OVERRIDE);
|
|
++ SVCSTR(WMI_SERVICE_NAN);
|
|
++ SVCSTR(WMI_SERVICE_L1SS_STAT);
|
|
++ SVCSTR(WMI_SERVICE_ESTIMATE_LINKSPEED);
|
|
++ SVCSTR(WMI_SERVICE_OBSS_SCAN);
|
|
++ SVCSTR(WMI_SERVICE_TDLS_OFFCHAN);
|
|
++ SVCSTR(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA);
|
|
++ SVCSTR(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA);
|
|
++ SVCSTR(WMI_SERVICE_IBSS_PWRSAVE);
|
|
++ SVCSTR(WMI_SERVICE_LPASS);
|
|
++ SVCSTR(WMI_SERVICE_EXTSCAN);
|
|
++ SVCSTR(WMI_SERVICE_D0WOW);
|
|
++ SVCSTR(WMI_SERVICE_HSOFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_ROAM_HO_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_RX_FULL_REORDER);
|
|
++ SVCSTR(WMI_SERVICE_DHCP_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
|
|
++ SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
|
|
++ SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
|
|
+ default:
|
|
+- return "UNKNOWN SERVICE\n";
|
|
++ return NULL;
|
|
+ }
|
|
++
|
|
++#undef SVCSTR
|
|
+ }
|
|
+
|
|
++#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
|
|
++ ((svc_id) < (len) && \
|
|
++ __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
|
|
++ BIT((svc_id)%(sizeof(u32))))
|
|
++
|
|
++#define SVCMAP(x, y, len) \
|
|
++ do { \
|
|
++ if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
|
|
++ __set_bit(y, out); \
|
|
++ } while (0)
|
|
++
|
|
++static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
|
|
++ size_t len)
|
|
++{
|
|
++ SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD,
|
|
++ WMI_SERVICE_BEACON_OFFLOAD, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD,
|
|
++ WMI_SERVICE_SCAN_OFFLOAD, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD,
|
|
++ WMI_SERVICE_ROAM_OFFLOAD, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
|
|
++ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE,
|
|
++ WMI_SERVICE_STA_PWRSAVE, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
|
|
++ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_AP_UAPSD,
|
|
++ WMI_SERVICE_AP_UAPSD, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_AP_DFS,
|
|
++ WMI_SERVICE_AP_DFS, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_11AC,
|
|
++ WMI_SERVICE_11AC, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_BLOCKACK,
|
|
++ WMI_SERVICE_BLOCKACK, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_PHYERR,
|
|
++ WMI_SERVICE_PHYERR, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_BCN_FILTER,
|
|
++ WMI_SERVICE_BCN_FILTER, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_RTT,
|
|
++ WMI_SERVICE_RTT, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_RATECTRL,
|
|
++ WMI_SERVICE_RATECTRL, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_WOW,
|
|
++ WMI_SERVICE_WOW, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE,
|
|
++ WMI_SERVICE_RATECTRL_CACHE, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_IRAM_TIDS,
|
|
++ WMI_SERVICE_IRAM_TIDS, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_BURST,
|
|
++ WMI_SERVICE_BURST, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
|
|
++ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG,
|
|
++ WMI_SERVICE_FORCE_FW_HANG, len);
|
|
++ SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
|
|
++ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
|
|
++}
|
|
++
|
|
++static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
|
|
++ size_t len)
|
|
++{
|
|
++ SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD,
|
|
++ WMI_SERVICE_BEACON_OFFLOAD, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD,
|
|
++ WMI_SERVICE_SCAN_OFFLOAD, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD,
|
|
++ WMI_SERVICE_ROAM_OFFLOAD, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
|
|
++ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE,
|
|
++ WMI_SERVICE_STA_PWRSAVE, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
|
|
++ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD,
|
|
++ WMI_SERVICE_AP_UAPSD, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_AP_DFS,
|
|
++ WMI_SERVICE_AP_DFS, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_11AC,
|
|
++ WMI_SERVICE_11AC, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_BLOCKACK,
|
|
++ WMI_SERVICE_BLOCKACK, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_PHYERR,
|
|
++ WMI_SERVICE_PHYERR, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER,
|
|
++ WMI_SERVICE_BCN_FILTER, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_RTT,
|
|
++ WMI_SERVICE_RTT, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_RATECTRL,
|
|
++ WMI_SERVICE_RATECTRL, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_WOW,
|
|
++ WMI_SERVICE_WOW, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE,
|
|
++ WMI_SERVICE_RATECTRL_CACHE, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS,
|
|
++ WMI_SERVICE_IRAM_TIDS, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
|
|
++ WMI_SERVICE_ARPNS_OFFLOAD, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_NLO,
|
|
++ WMI_SERVICE_NLO, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD,
|
|
++ WMI_SERVICE_GTK_OFFLOAD, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH,
|
|
++ WMI_SERVICE_SCAN_SCH, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD,
|
|
++ WMI_SERVICE_CSA_OFFLOAD, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_CHATTER,
|
|
++ WMI_SERVICE_CHATTER, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID,
|
|
++ WMI_SERVICE_COEX_FREQAVOID, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
|
|
++ WMI_SERVICE_PACKET_POWER_SAVE, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG,
|
|
++ WMI_SERVICE_FORCE_FW_HANG, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_GPIO,
|
|
++ WMI_SERVICE_GPIO, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
|
|
++ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
|
|
++ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
|
|
++ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
|
|
++ WMI_SERVICE_STA_KEEP_ALIVE, len);
|
|
++ SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP,
|
|
++ WMI_SERVICE_TX_ENCAP, len);
|
|
++}
|
|
+
|
|
+-#define WMI_SERVICE_BM_SIZE \
|
|
+- ((WMI_MAX_SERVICE + sizeof(u32) - 1)/sizeof(u32))
|
|
++#undef SVCMAP
|
|
+
|
|
+ /* 2 word representation of MAC addr */
|
|
+ struct wmi_mac_addr {
|
|
+@@ -308,6 +550,8 @@ struct wmi_cmd_map {
|
|
+ u32 force_fw_hang_cmdid;
|
|
+ u32 gpio_config_cmdid;
|
|
+ u32 gpio_output_cmdid;
|
|
++ u32 pdev_get_temperature_cmdid;
|
|
++ u32 vdev_set_wmm_params_cmdid;
|
|
+ };
|
|
+
|
|
+ /*
|
|
+@@ -803,6 +1047,166 @@ enum wmi_10x_event_id {
|
|
+ WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
|
|
+ };
|
|
+
|
|
++enum wmi_10_2_cmd_id {
|
|
++ WMI_10_2_START_CMDID = 0x9000,
|
|
++ WMI_10_2_END_CMDID = 0x9FFF,
|
|
++ WMI_10_2_INIT_CMDID,
|
|
++ WMI_10_2_START_SCAN_CMDID = WMI_10_2_START_CMDID,
|
|
++ WMI_10_2_STOP_SCAN_CMDID,
|
|
++ WMI_10_2_SCAN_CHAN_LIST_CMDID,
|
|
++ WMI_10_2_ECHO_CMDID,
|
|
++ WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
|
|
++ WMI_10_2_PDEV_SET_CHANNEL_CMDID,
|
|
++ WMI_10_2_PDEV_SET_PARAM_CMDID,
|
|
++ WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
|
|
++ WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
|
|
++ WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
|
|
++ WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
|
|
++ WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
|
|
++ WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
|
|
++ WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
|
|
++ WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
|
|
++ WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
|
|
++ WMI_10_2_VDEV_CREATE_CMDID,
|
|
++ WMI_10_2_VDEV_DELETE_CMDID,
|
|
++ WMI_10_2_VDEV_START_REQUEST_CMDID,
|
|
++ WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
|
|
++ WMI_10_2_VDEV_UP_CMDID,
|
|
++ WMI_10_2_VDEV_STOP_CMDID,
|
|
++ WMI_10_2_VDEV_DOWN_CMDID,
|
|
++ WMI_10_2_VDEV_STANDBY_RESPONSE_CMDID,
|
|
++ WMI_10_2_VDEV_RESUME_RESPONSE_CMDID,
|
|
++ WMI_10_2_VDEV_SET_PARAM_CMDID,
|
|
++ WMI_10_2_VDEV_INSTALL_KEY_CMDID,
|
|
++ WMI_10_2_VDEV_SET_DSCP_TID_MAP_CMDID,
|
|
++ WMI_10_2_PEER_CREATE_CMDID,
|
|
++ WMI_10_2_PEER_DELETE_CMDID,
|
|
++ WMI_10_2_PEER_FLUSH_TIDS_CMDID,
|
|
++ WMI_10_2_PEER_SET_PARAM_CMDID,
|
|
++ WMI_10_2_PEER_ASSOC_CMDID,
|
|
++ WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
|
|
++ WMI_10_2_PEER_UPDATE_WDS_ENTRY_CMDID,
|
|
++ WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
|
|
++ WMI_10_2_PEER_MCAST_GROUP_CMDID,
|
|
++ WMI_10_2_BCN_TX_CMDID,
|
|
++ WMI_10_2_BCN_PRB_TMPL_CMDID,
|
|
++ WMI_10_2_BCN_FILTER_RX_CMDID,
|
|
++ WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
|
|
++ WMI_10_2_MGMT_TX_CMDID,
|
|
++ WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
|
|
++ WMI_10_2_ADDBA_SEND_CMDID,
|
|
++ WMI_10_2_ADDBA_STATUS_CMDID,
|
|
++ WMI_10_2_DELBA_SEND_CMDID,
|
|
++ WMI_10_2_ADDBA_SET_RESP_CMDID,
|
|
++ WMI_10_2_SEND_SINGLEAMSDU_CMDID,
|
|
++ WMI_10_2_STA_POWERSAVE_MODE_CMDID,
|
|
++ WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
|
|
++ WMI_10_2_STA_MIMO_PS_MODE_CMDID,
|
|
++ WMI_10_2_DBGLOG_CFG_CMDID,
|
|
++ WMI_10_2_PDEV_DFS_ENABLE_CMDID,
|
|
++ WMI_10_2_PDEV_DFS_DISABLE_CMDID,
|
|
++ WMI_10_2_PDEV_QVIT_CMDID,
|
|
++ WMI_10_2_ROAM_SCAN_MODE,
|
|
++ WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
|
|
++ WMI_10_2_ROAM_SCAN_PERIOD,
|
|
++ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
|
|
++ WMI_10_2_ROAM_AP_PROFILE,
|
|
++ WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
|
|
++ WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
|
|
++ WMI_10_2_OFL_SCAN_PERIOD,
|
|
++ WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
|
|
++ WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
|
|
++ WMI_10_2_P2P_GO_SET_BEACON_IE,
|
|
++ WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
|
|
++ WMI_10_2_AP_PS_PEER_PARAM_CMDID,
|
|
++ WMI_10_2_AP_PS_PEER_UAPSD_COEX_CMDID,
|
|
++ WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
|
|
++ WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
|
|
++ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
|
|
++ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
|
|
++ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
|
|
++ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
|
|
++ WMI_10_2_PDEV_SUSPEND_CMDID,
|
|
++ WMI_10_2_PDEV_RESUME_CMDID,
|
|
++ WMI_10_2_ADD_BCN_FILTER_CMDID,
|
|
++ WMI_10_2_RMV_BCN_FILTER_CMDID,
|
|
++ WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
|
|
++ WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
|
|
++ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
|
|
++ WMI_10_2_WOW_ENABLE_CMDID,
|
|
++ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
|
|
++ WMI_10_2_RTT_MEASREQ_CMDID,
|
|
++ WMI_10_2_RTT_TSF_CMDID,
|
|
++ WMI_10_2_RTT_KEEPALIVE_CMDID,
|
|
++ WMI_10_2_PDEV_SEND_BCN_CMDID,
|
|
++ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
|
|
++ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
|
|
++ WMI_10_2_REQUEST_STATS_CMDID,
|
|
++ WMI_10_2_GPIO_CONFIG_CMDID,
|
|
++ WMI_10_2_GPIO_OUTPUT_CMDID,
|
|
++ WMI_10_2_VDEV_RATEMASK_CMDID,
|
|
++ WMI_10_2_PDEV_SMART_ANT_ENABLE_CMDID,
|
|
++ WMI_10_2_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
|
|
++ WMI_10_2_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
|
|
++ WMI_10_2_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
|
|
++ WMI_10_2_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
|
|
++ WMI_10_2_FORCE_FW_HANG_CMDID,
|
|
++ WMI_10_2_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
|
|
++ WMI_10_2_PDEV_SET_CTL_TABLE_CMDID,
|
|
++ WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID,
|
|
++ WMI_10_2_PDEV_RATEPWR_TABLE_CMDID,
|
|
++ WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
|
|
++ WMI_10_2_PDEV_GET_INFO,
|
|
++ WMI_10_2_VDEV_GET_INFO,
|
|
++ WMI_10_2_VDEV_ATF_REQUEST_CMDID,
|
|
++ WMI_10_2_PEER_ATF_REQUEST_CMDID,
|
|
++ WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
|
|
++ WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
|
|
++};
|
|
++
|
|
++enum wmi_10_2_event_id {
|
|
++ WMI_10_2_SERVICE_READY_EVENTID = 0x8000,
|
|
++ WMI_10_2_READY_EVENTID,
|
|
++ WMI_10_2_DEBUG_MESG_EVENTID,
|
|
++ WMI_10_2_START_EVENTID = 0x9000,
|
|
++ WMI_10_2_END_EVENTID = 0x9FFF,
|
|
++ WMI_10_2_SCAN_EVENTID = WMI_10_2_START_EVENTID,
|
|
++ WMI_10_2_ECHO_EVENTID,
|
|
++ WMI_10_2_UPDATE_STATS_EVENTID,
|
|
++ WMI_10_2_INST_RSSI_STATS_EVENTID,
|
|
++ WMI_10_2_VDEV_START_RESP_EVENTID,
|
|
++ WMI_10_2_VDEV_STANDBY_REQ_EVENTID,
|
|
++ WMI_10_2_VDEV_RESUME_REQ_EVENTID,
|
|
++ WMI_10_2_VDEV_STOPPED_EVENTID,
|
|
++ WMI_10_2_PEER_STA_KICKOUT_EVENTID,
|
|
++ WMI_10_2_HOST_SWBA_EVENTID,
|
|
++ WMI_10_2_TBTTOFFSET_UPDATE_EVENTID,
|
|
++ WMI_10_2_MGMT_RX_EVENTID,
|
|
++ WMI_10_2_CHAN_INFO_EVENTID,
|
|
++ WMI_10_2_PHYERR_EVENTID,
|
|
++ WMI_10_2_ROAM_EVENTID,
|
|
++ WMI_10_2_PROFILE_MATCH,
|
|
++ WMI_10_2_DEBUG_PRINT_EVENTID,
|
|
++ WMI_10_2_PDEV_QVIT_EVENTID,
|
|
++ WMI_10_2_WLAN_PROFILE_DATA_EVENTID,
|
|
++ WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID,
|
|
++ WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID,
|
|
++ WMI_10_2_RTT_ERROR_REPORT_EVENTID,
|
|
++ WMI_10_2_RTT_KEEPALIVE_EVENTID,
|
|
++ WMI_10_2_WOW_WAKEUP_HOST_EVENTID,
|
|
++ WMI_10_2_DCS_INTERFERENCE_EVENTID,
|
|
++ WMI_10_2_PDEV_TPC_CONFIG_EVENTID,
|
|
++ WMI_10_2_GPIO_INPUT_EVENTID,
|
|
++ WMI_10_2_PEER_RATECODE_LIST_EVENTID,
|
|
++ WMI_10_2_GENERIC_BUFFER_EVENTID,
|
|
++ WMI_10_2_MCAST_BUF_RELEASE_EVENTID,
|
|
++ WMI_10_2_MCAST_LIST_AGEOUT_EVENTID,
|
|
++ WMI_10_2_WDS_PEER_EVENTID,
|
|
++ WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
|
|
++ WMI_10_2_PDEV_TEMPERATURE_EVENTID,
|
|
++ WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
|
|
++};
|
|
++
|
|
+ enum wmi_phy_mode {
|
|
+ MODE_11A = 0, /* 11a Mode */
|
|
+ MODE_11G = 1, /* 11b/g Mode */
|
|
+@@ -955,7 +1359,6 @@ enum wmi_channel_change_cause {
|
|
+ WMI_HT_CAP_RX_STBC | \
|
|
+ WMI_HT_CAP_LDPC)
|
|
+
|
|
+-
|
|
+ /*
|
|
+ * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information
|
|
+ * field. The fields not defined here are not supported, or reserved.
|
|
+@@ -1076,10 +1479,6 @@ struct wlan_host_mem_req {
|
|
+ __le32 num_units;
|
|
+ } __packed;
|
|
+
|
|
+-#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id) \
|
|
+- ((((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
|
|
+- (1 << ((svc_id)%(sizeof(u32))))) != 0)
|
|
+-
|
|
+ /*
|
|
+ * The following struct holds optional payload for
|
|
+ * wmi_service_ready_event,e.g., 11ac pass some of the
|
|
+@@ -1093,7 +1492,7 @@ struct wmi_service_ready_event {
|
|
+ __le32 phy_capability;
|
|
+ /* Maximum number of frag table entries that SW will populate less 1 */
|
|
+ __le32 max_frag_entry;
|
|
+- __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
|
|
++ __le32 wmi_service_bitmap[16];
|
|
+ __le32 num_rf_chains;
|
|
+ /*
|
|
+ * The following field is only valid for service type
|
|
+@@ -1119,11 +1518,11 @@ struct wmi_service_ready_event {
|
|
+ * where FW can access this memory directly (or) by DMA.
|
|
+ */
|
|
+ __le32 num_mem_reqs;
|
|
+- struct wlan_host_mem_req mem_reqs[1];
|
|
++ struct wlan_host_mem_req mem_reqs[0];
|
|
+ } __packed;
|
|
+
|
|
+ /* This is the definition from 10.X firmware branch */
|
|
+-struct wmi_service_ready_event_10x {
|
|
++struct wmi_10x_service_ready_event {
|
|
+ __le32 sw_version;
|
|
+ __le32 abi_version;
|
|
+
|
|
+@@ -1132,7 +1531,7 @@ struct wmi_service_ready_event_10x {
|
|
+
|
|
+ /* Maximum number of frag table entries that SW will populate less 1 */
|
|
+ __le32 max_frag_entry;
|
|
+- __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
|
|
++ __le32 wmi_service_bitmap[16];
|
|
+ __le32 num_rf_chains;
|
|
+
|
|
+ /*
|
|
+@@ -1158,10 +1557,9 @@ struct wmi_service_ready_event_10x {
|
|
+ */
|
|
+ __le32 num_mem_reqs;
|
|
+
|
|
+- struct wlan_host_mem_req mem_reqs[1];
|
|
++ struct wlan_host_mem_req mem_reqs[0];
|
|
+ } __packed;
|
|
+
|
|
+-
|
|
+ #define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
|
|
+ #define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
|
|
+
|
|
+@@ -1255,7 +1653,7 @@ struct wmi_resource_config {
|
|
+ */
|
|
+ __le32 rx_decap_mode;
|
|
+
|
|
+- /* what is the maximum scan requests than can be queued */
|
|
++ /* what is the maximum number of scan requests that can be queued */
|
|
+ __le32 scan_max_pending_reqs;
|
|
+
|
|
+ /* maximum VDEV that could use BMISS offload */
|
|
+@@ -1440,7 +1838,7 @@ struct wmi_resource_config_10x {
|
|
+ */
|
|
+ __le32 rx_decap_mode;
|
|
+
|
|
+- /* what is the maximum scan requests than can be queued */
|
|
++ /* what is the maximum number of scan requests that can be queued */
|
|
+ __le32 scan_max_pending_reqs;
|
|
+
|
|
+ /* maximum VDEV that could use BMISS offload */
|
|
+@@ -1551,6 +1949,21 @@ struct wmi_resource_config_10x {
|
|
+ __le32 max_frag_entries;
|
|
+ } __packed;
|
|
+
|
|
++enum wmi_10_2_feature_mask {
|
|
++ WMI_10_2_RX_BATCH_MODE = BIT(0),
|
|
++ WMI_10_2_ATF_CONFIG = BIT(1),
|
|
++};
|
|
++
|
|
++struct wmi_resource_config_10_2 {
|
|
++ struct wmi_resource_config_10x common;
|
|
++ __le32 max_peer_ext_stats;
|
|
++ __le32 smart_ant_cap; /* 0-disable, 1-enable */
|
|
++ __le32 bk_min_free;
|
|
++ __le32 be_min_free;
|
|
++ __le32 vi_min_free;
|
|
++ __le32 vo_min_free;
|
|
++ __le32 feature_mask;
|
|
++} __packed;
|
|
+
|
|
+ #define NUM_UNITS_IS_NUM_VDEVS 0x1
|
|
+ #define NUM_UNITS_IS_NUM_PEERS 0x2
|
|
+@@ -1565,34 +1978,39 @@ struct host_memory_chunk {
|
|
+ __le32 size;
|
|
+ } __packed;
|
|
+
|
|
++struct wmi_host_mem_chunks {
|
|
++ __le32 count;
|
|
++ /* some fw revisions require at least 1 chunk regardless of count */
|
|
++ struct host_memory_chunk items[1];
|
|
++} __packed;
|
|
++
|
|
+ struct wmi_init_cmd {
|
|
+ struct wmi_resource_config resource_config;
|
|
+- __le32 num_host_mem_chunks;
|
|
+-
|
|
+- /*
|
|
+- * variable number of host memory chunks.
|
|
+- * This should be the last element in the structure
|
|
+- */
|
|
+- struct host_memory_chunk host_mem_chunks[1];
|
|
++ struct wmi_host_mem_chunks mem_chunks;
|
|
+ } __packed;
|
|
+
|
|
+ /* _10x stucture is from 10.X FW API */
|
|
+ struct wmi_init_cmd_10x {
|
|
+ struct wmi_resource_config_10x resource_config;
|
|
+- __le32 num_host_mem_chunks;
|
|
++ struct wmi_host_mem_chunks mem_chunks;
|
|
++} __packed;
|
|
+
|
|
+- /*
|
|
+- * variable number of host memory chunks.
|
|
+- * This should be the last element in the structure
|
|
+- */
|
|
+- struct host_memory_chunk host_mem_chunks[1];
|
|
++struct wmi_init_cmd_10_2 {
|
|
++ struct wmi_resource_config_10_2 resource_config;
|
|
++ struct wmi_host_mem_chunks mem_chunks;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_chan_list_entry {
|
|
++ __le16 freq;
|
|
++ u8 phy_mode; /* valid for 10.2 only */
|
|
++ u8 reserved;
|
|
+ } __packed;
|
|
+
|
|
+ /* TLV for channel list */
|
|
+ struct wmi_chan_list {
|
|
+ __le32 tag; /* WMI_CHAN_LIST_TAG */
|
|
+ __le32 num_chan;
|
|
+- __le32 channel_list[0];
|
|
++ struct wmi_chan_list_entry channel_list[0];
|
|
+ } __packed;
|
|
+
|
|
+ struct wmi_bssid_list {
|
|
+@@ -1629,6 +2047,11 @@ struct wmi_ssid_list {
|
|
+ #define WLAN_SCAN_PARAMS_MAX_BSSID 4
|
|
+ #define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
|
|
+
|
|
++/* Values lower than this may be refused by some firmware revisions with a scan
|
|
++ * completion with a timedout reason.
|
|
++ */
|
|
++#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
|
|
++
|
|
+ /* Scan priority numbers must be sequential, starting with 0 */
|
|
+ enum wmi_scan_priority {
|
|
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
|
|
+@@ -1639,7 +2062,7 @@ enum wmi_scan_priority {
|
|
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
|
|
+ };
|
|
+
|
|
+-struct wmi_start_scan_cmd {
|
|
++struct wmi_start_scan_common {
|
|
+ /* Scan ID */
|
|
+ __le32 scan_id;
|
|
+ /* Scan requestor ID */
|
|
+@@ -1697,97 +2120,26 @@ struct wmi_start_scan_cmd {
|
|
+ __le32 probe_delay;
|
|
+ /* Scan control flags */
|
|
+ __le32 scan_ctrl_flags;
|
|
+-
|
|
+- /* Burst duration time in msecs */
|
|
+- __le32 burst_duration;
|
|
+- /*
|
|
+- * TLV (tag length value ) paramerters follow the scan_cmd structure.
|
|
+- * TLV can contain channel list, bssid list, ssid list and
|
|
+- * ie. the TLV tags are defined above;
|
|
+- */
|
|
+ } __packed;
|
|
+
|
|
+-/* This is the definition from 10.X firmware branch */
|
|
+-struct wmi_start_scan_cmd_10x {
|
|
+- /* Scan ID */
|
|
+- __le32 scan_id;
|
|
+-
|
|
+- /* Scan requestor ID */
|
|
+- __le32 scan_req_id;
|
|
+-
|
|
+- /* VDEV id(interface) that is requesting scan */
|
|
+- __le32 vdev_id;
|
|
+-
|
|
+- /* Scan Priority, input to scan scheduler */
|
|
+- __le32 scan_priority;
|
|
+-
|
|
+- /* Scan events subscription */
|
|
+- __le32 notify_scan_events;
|
|
+-
|
|
+- /* dwell time in msec on active channels */
|
|
+- __le32 dwell_time_active;
|
|
+-
|
|
+- /* dwell time in msec on passive channels */
|
|
+- __le32 dwell_time_passive;
|
|
+-
|
|
+- /*
|
|
+- * min time in msec on the BSS channel,only valid if atleast one
|
|
+- * VDEV is active
|
|
+- */
|
|
+- __le32 min_rest_time;
|
|
+-
|
|
+- /*
|
|
+- * max rest time in msec on the BSS channel,only valid if at least
|
|
+- * one VDEV is active
|
|
+- */
|
|
+- /*
|
|
+- * the scanner will rest on the bss channel at least min_rest_time
|
|
+- * after min_rest_time the scanner will start checking for tx/rx
|
|
+- * activity on all VDEVs. if there is no activity the scanner will
|
|
+- * switch to off channel. if there is activity the scanner will let
|
|
+- * the radio on the bss channel until max_rest_time expires.at
|
|
+- * max_rest_time scanner will switch to off channel irrespective of
|
|
+- * activity. activity is determined by the idle_time parameter.
|
|
+- */
|
|
+- __le32 max_rest_time;
|
|
+-
|
|
+- /*
|
|
+- * time before sending next set of probe requests.
|
|
+- * The scanner keeps repeating probe requests transmission with
|
|
+- * period specified by repeat_probe_time.
|
|
+- * The number of probe requests specified depends on the ssid_list
|
|
+- * and bssid_list
|
|
+- */
|
|
+- __le32 repeat_probe_time;
|
|
+-
|
|
+- /* time in msec between 2 consequetive probe requests with in a set. */
|
|
+- __le32 probe_spacing_time;
|
|
+-
|
|
+- /*
|
|
+- * data inactivity time in msec on bss channel that will be used by
|
|
+- * scanner for measuring the inactivity.
|
|
++struct wmi_start_scan_tlvs {
|
|
++ /* TLV parameters. These includes channel list, ssid list, bssid list,
|
|
++ * extra ies.
|
|
+ */
|
|
+- __le32 idle_time;
|
|
+-
|
|
+- /* maximum time in msec allowed for scan */
|
|
+- __le32 max_scan_time;
|
|
+-
|
|
+- /*
|
|
+- * delay in msec before sending first probe request after switching
|
|
+- * to a channel
|
|
+- */
|
|
+- __le32 probe_delay;
|
|
+-
|
|
+- /* Scan control flags */
|
|
+- __le32 scan_ctrl_flags;
|
|
++ u8 tlvs[0];
|
|
++} __packed;
|
|
+
|
|
+- /*
|
|
+- * TLV (tag length value ) paramerters follow the scan_cmd structure.
|
|
+- * TLV can contain channel list, bssid list, ssid list and
|
|
+- * ie. the TLV tags are defined above;
|
|
+- */
|
|
++struct wmi_start_scan_cmd {
|
|
++ struct wmi_start_scan_common common;
|
|
++ __le32 burst_duration_ms;
|
|
++ struct wmi_start_scan_tlvs tlvs;
|
|
+ } __packed;
|
|
+
|
|
++/* This is the definition from 10.X firmware branch */
|
|
++struct wmi_10x_start_scan_cmd {
|
|
++ struct wmi_start_scan_common common;
|
|
++ struct wmi_start_scan_tlvs tlvs;
|
|
++} __packed;
|
|
+
|
|
+ struct wmi_ssid_arg {
|
|
+ int len;
|
|
+@@ -1821,7 +2173,7 @@ struct wmi_start_scan_arg {
|
|
+ u32 n_bssids;
|
|
+
|
|
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
|
|
+- u32 channels[64];
|
|
++ u16 channels[64];
|
|
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
|
|
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
|
|
+ };
|
|
+@@ -1849,7 +2201,6 @@ struct wmi_start_scan_arg {
|
|
+ /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
|
|
+ #define WMI_SCAN_CLASS_MASK 0xFF000000
|
|
+
|
|
+-
|
|
+ enum wmi_stop_scan_type {
|
|
+ WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */
|
|
+ WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */
|
|
+@@ -1973,100 +2324,31 @@ struct wmi_mgmt_rx_event_v2 {
|
|
+ #define PHY_ERROR_FALSE_RADAR_EXT 0x24
|
|
+ #define PHY_ERROR_RADAR 0x05
|
|
+
|
|
+-struct wmi_single_phyerr_rx_hdr {
|
|
+- /* TSF timestamp */
|
|
++struct wmi_phyerr {
|
|
+ __le32 tsf_timestamp;
|
|
+-
|
|
+- /*
|
|
+- * Current freq1, freq2
|
|
+- *
|
|
+- * [7:0]: freq1[lo]
|
|
+- * [15:8] : freq1[hi]
|
|
+- * [23:16]: freq2[lo]
|
|
+- * [31:24]: freq2[hi]
|
|
+- */
|
|
+ __le16 freq1;
|
|
+ __le16 freq2;
|
|
+-
|
|
+- /*
|
|
+- * Combined RSSI over all chains and channel width for this PHY error
|
|
+- *
|
|
+- * [7:0]: RSSI combined
|
|
+- * [15:8]: Channel width (MHz)
|
|
+- * [23:16]: PHY error code
|
|
+- * [24:16]: reserved (future use)
|
|
+- */
|
|
+ u8 rssi_combined;
|
|
+ u8 chan_width_mhz;
|
|
+ u8 phy_err_code;
|
|
+ u8 rsvd0;
|
|
+-
|
|
+- /*
|
|
+- * RSSI on chain 0 through 3
|
|
+- *
|
|
+- * This is formatted the same as the PPDU_START RX descriptor
|
|
+- * field:
|
|
+- *
|
|
+- * [7:0]: pri20
|
|
+- * [15:8]: sec20
|
|
+- * [23:16]: sec40
|
|
+- * [31:24]: sec80
|
|
+- */
|
|
+-
|
|
+- __le32 rssi_chain0;
|
|
+- __le32 rssi_chain1;
|
|
+- __le32 rssi_chain2;
|
|
+- __le32 rssi_chain3;
|
|
+-
|
|
+- /*
|
|
+- * Last calibrated NF value for chain 0 through 3
|
|
+- *
|
|
+- * nf_list_1:
|
|
+- *
|
|
+- * + [15:0] - chain 0
|
|
+- * + [31:16] - chain 1
|
|
+- *
|
|
+- * nf_list_2:
|
|
+- *
|
|
+- * + [15:0] - chain 2
|
|
+- * + [31:16] - chain 3
|
|
+- */
|
|
+- __le32 nf_list_1;
|
|
+- __le32 nf_list_2;
|
|
+-
|
|
+-
|
|
+- /* Length of the frame */
|
|
++ __le32 rssi_chains[4];
|
|
++ __le16 nf_chains[4];
|
|
+ __le32 buf_len;
|
|
++ u8 buf[0];
|
|
+ } __packed;
|
|
+
|
|
+-struct wmi_single_phyerr_rx_event {
|
|
+- /* Phy error event header */
|
|
+- struct wmi_single_phyerr_rx_hdr hdr;
|
|
+- /* frame buffer */
|
|
+- u8 bufp[0];
|
|
+-} __packed;
|
|
+-
|
|
+-struct wmi_comb_phyerr_rx_hdr {
|
|
+- /* Phy error phy error count */
|
|
+- __le32 num_phyerr_events;
|
|
++struct wmi_phyerr_event {
|
|
++ __le32 num_phyerrs;
|
|
+ __le32 tsf_l32;
|
|
+ __le32 tsf_u32;
|
|
+-} __packed;
|
|
+-
|
|
+-struct wmi_comb_phyerr_rx_event {
|
|
+- /* Phy error phy error count */
|
|
+- struct wmi_comb_phyerr_rx_hdr hdr;
|
|
+- /*
|
|
+- * frame buffer - contains multiple payloads in the order:
|
|
+- * header - payload, header - payload...
|
|
+- * (The header is of type: wmi_single_phyerr_rx_hdr)
|
|
+- */
|
|
+- u8 bufp[0];
|
|
++ struct wmi_phyerr phyerrs[0];
|
|
+ } __packed;
|
|
+
|
|
+ #define PHYERR_TLV_SIG 0xBB
|
|
+ #define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB
|
|
+ #define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8
|
|
++#define PHYERR_TLV_TAG_SPECTRAL_SUMMARY_REPORT 0xF9
|
|
+
|
|
+ struct phyerr_radar_report {
|
|
+ __le32 reg0; /* RADAR_REPORT_REG0_* */
|
|
+@@ -2135,7 +2417,6 @@ struct phyerr_fft_report {
|
|
+ #define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK 0x000000FF
|
|
+ #define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB 0
|
|
+
|
|
+-
|
|
+ struct phyerr_tlv {
|
|
+ __le16 len;
|
|
+ u8 tag;
|
|
+@@ -2166,7 +2447,6 @@ struct wmi_echo_cmd {
|
|
+ __le32 value;
|
|
+ } __packed;
|
|
+
|
|
+-
|
|
+ struct wmi_pdev_set_regdomain_cmd {
|
|
+ __le32 reg_domain;
|
|
+ __le32 reg_domain_2G;
|
|
+@@ -2215,7 +2495,6 @@ struct wmi_pdev_set_quiet_cmd {
|
|
+ __le32 enabled;
|
|
+ } __packed;
|
|
+
|
|
+-
|
|
+ /*
|
|
+ * 802.11g protection mode.
|
|
+ */
|
|
+@@ -2318,14 +2597,15 @@ struct wmi_pdev_param_map {
|
|
+ u32 fast_channel_reset;
|
|
+ u32 burst_dur;
|
|
+ u32 burst_enable;
|
|
++ u32 cal_period;
|
|
+ };
|
|
+
|
|
+ #define WMI_PDEV_PARAM_UNSUPPORTED 0
|
|
+
|
|
+ enum wmi_pdev_param {
|
|
+- /* TX chian mask */
|
|
++ /* TX chain mask */
|
|
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
|
|
+- /* RX chian mask */
|
|
++ /* RX chain mask */
|
|
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
|
|
+ /* TX power limit for 2G Radio */
|
|
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
|
|
+@@ -2515,6 +2795,22 @@ enum wmi_10x_pdev_param {
|
|
+ WMI_10X_PDEV_PARAM_BURST_DUR,
|
|
+ /* Set Bursting Enable*/
|
|
+ WMI_10X_PDEV_PARAM_BURST_ENABLE,
|
|
++
|
|
++ /* following are available as of firmware 10.2 */
|
|
++ WMI_10X_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
|
|
++ WMI_10X_PDEV_PARAM_IGMPMLD_OVERRIDE,
|
|
++ WMI_10X_PDEV_PARAM_IGMPMLD_TID,
|
|
++ WMI_10X_PDEV_PARAM_ANTENNA_GAIN,
|
|
++ WMI_10X_PDEV_PARAM_RX_DECAP_MODE,
|
|
++ WMI_10X_PDEV_PARAM_RX_FILTER,
|
|
++ WMI_10X_PDEV_PARAM_SET_MCAST_TO_UCAST_TID,
|
|
++ WMI_10X_PDEV_PARAM_PROXY_STA_MODE,
|
|
++ WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE,
|
|
++ WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
|
|
++ WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
|
|
++ WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
|
|
++ WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
|
|
++ WMI_10X_PDEV_PARAM_CAL_PERIOD
|
|
+ };
|
|
+
|
|
+ struct wmi_pdev_set_param_cmd {
|
|
+@@ -2522,6 +2818,9 @@ struct wmi_pdev_set_param_cmd {
|
|
+ __le32 param_value;
|
|
+ } __packed;
|
|
+
|
|
++/* valid period is 1 ~ 60000ms, unit in millisecond */
|
|
++#define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000
|
|
++
|
|
+ struct wmi_pdev_get_tpc_config_cmd {
|
|
+ /* parameter */
|
|
+ __le32 param;
|
|
+@@ -2565,11 +2864,6 @@ enum wmi_tp_scale {
|
|
+ WMI_TP_SCALE_SIZE = 5, /* max num of enum */
|
|
+ };
|
|
+
|
|
+-struct wmi_set_channel_cmd {
|
|
+- /* channel (only frequency and mode info are used) */
|
|
+- struct wmi_channel chan;
|
|
+-} __packed;
|
|
+-
|
|
+ struct wmi_pdev_chanlist_update_event {
|
|
+ /* number of channels */
|
|
+ __le32 num_chan;
|
|
+@@ -2600,6 +2894,10 @@ struct wmi_pdev_set_channel_cmd {
|
|
+ struct wmi_channel chan;
|
|
+ } __packed;
|
|
+
|
|
++struct wmi_pdev_pktlog_enable_cmd {
|
|
++ __le32 ev_bitmap;
|
|
++} __packed;
|
|
++
|
|
+ /* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
|
|
+ #define WMI_DSCP_MAP_MAX (64)
|
|
+ struct wmi_pdev_set_dscp_tid_map_cmd {
|
|
+@@ -2642,14 +2940,14 @@ struct wmi_wmm_params_arg {
|
|
+ u32 no_ack;
|
|
+ };
|
|
+
|
|
+-struct wmi_pdev_set_wmm_params_arg {
|
|
++struct wmi_wmm_params_all_arg {
|
|
+ struct wmi_wmm_params_arg ac_be;
|
|
+ struct wmi_wmm_params_arg ac_bk;
|
|
+ struct wmi_wmm_params_arg ac_vi;
|
|
+ struct wmi_wmm_params_arg ac_vo;
|
|
+ };
|
|
+
|
|
+-struct wal_dbg_tx_stats {
|
|
++struct wmi_pdev_stats_tx {
|
|
+ /* Num HTT cookies queued to dispatch list */
|
|
+ __le32 comp_queued;
|
|
+
|
|
+@@ -2719,7 +3017,7 @@ struct wal_dbg_tx_stats {
|
|
+ __le32 txop_ovf;
|
|
+ } __packed;
|
|
+
|
|
+-struct wal_dbg_rx_stats {
|
|
++struct wmi_pdev_stats_rx {
|
|
+ /* Cnts any change in ring routing mid-ppdu */
|
|
+ __le32 mid_ppdu_route_change;
|
|
+
|
|
+@@ -2753,20 +3051,18 @@ struct wal_dbg_rx_stats {
|
|
+ __le32 mpdu_errs;
|
|
+ } __packed;
|
|
+
|
|
+-struct wal_dbg_peer_stats {
|
|
++struct wmi_pdev_stats_peer {
|
|
+ /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
|
|
+ __le32 dummy;
|
|
+ } __packed;
|
|
+
|
|
+-struct wal_dbg_stats {
|
|
+- struct wal_dbg_tx_stats tx;
|
|
+- struct wal_dbg_rx_stats rx;
|
|
+- struct wal_dbg_peer_stats peer;
|
|
+-} __packed;
|
|
+-
|
|
+ enum wmi_stats_id {
|
|
+- WMI_REQUEST_PEER_STAT = 0x01,
|
|
+- WMI_REQUEST_AP_STAT = 0x02
|
|
++ WMI_STAT_PEER = BIT(0),
|
|
++ WMI_STAT_AP = BIT(1),
|
|
++ WMI_STAT_PDEV = BIT(2),
|
|
++ WMI_STAT_VDEV = BIT(3),
|
|
++ WMI_STAT_BCNFLT = BIT(4),
|
|
++ WMI_STAT_VDEV_RATE = BIT(5),
|
|
+ };
|
|
+
|
|
+ struct wlan_inst_rssi_args {
|
|
+@@ -2801,7 +3097,7 @@ struct wmi_pdev_suspend_cmd {
|
|
+ } __packed;
|
|
+
|
|
+ struct wmi_stats_event {
|
|
+- __le32 stats_id; /* %WMI_REQUEST_ */
|
|
++ __le32 stats_id; /* WMI_STAT_ */
|
|
+ /*
|
|
+ * number of pdev stats event structures
|
|
+ * (wmi_pdev_stats) 0 or 1
|
|
+@@ -2830,30 +3126,38 @@ struct wmi_stats_event {
|
|
+ u8 data[0];
|
|
+ } __packed;
|
|
+
|
|
++struct wmi_10_2_stats_event {
|
|
++ __le32 stats_id; /* %WMI_REQUEST_ */
|
|
++ __le32 num_pdev_stats;
|
|
++ __le32 num_pdev_ext_stats;
|
|
++ __le32 num_vdev_stats;
|
|
++ __le32 num_peer_stats;
|
|
++ __le32 num_bcnflt_stats;
|
|
++ u8 data[0];
|
|
++} __packed;
|
|
++
|
|
+ /*
|
|
+ * PDEV statistics
|
|
+ * TODO: add all PDEV stats here
|
|
+ */
|
|
+-struct wmi_pdev_stats_old {
|
|
+- __le32 chan_nf; /* Channel noise floor */
|
|
+- __le32 tx_frame_count; /* TX frame count */
|
|
+- __le32 rx_frame_count; /* RX frame count */
|
|
+- __le32 rx_clear_count; /* rx clear count */
|
|
+- __le32 cycle_count; /* cycle count */
|
|
+- __le32 phy_err_count; /* Phy error count */
|
|
+- __le32 chan_tx_pwr; /* channel tx power */
|
|
+- struct wal_dbg_stats wal; /* WAL dbg stats */
|
|
+-} __packed;
|
|
+-
|
|
+-struct wmi_pdev_stats_10x {
|
|
+- __le32 chan_nf; /* Channel noise floor */
|
|
+- __le32 tx_frame_count; /* TX frame count */
|
|
+- __le32 rx_frame_count; /* RX frame count */
|
|
+- __le32 rx_clear_count; /* rx clear count */
|
|
+- __le32 cycle_count; /* cycle count */
|
|
+- __le32 phy_err_count; /* Phy error count */
|
|
+- __le32 chan_tx_pwr; /* channel tx power */
|
|
+- struct wal_dbg_stats wal; /* WAL dbg stats */
|
|
++struct wmi_pdev_stats_base {
|
|
++ __le32 chan_nf;
|
|
++ __le32 tx_frame_count;
|
|
++ __le32 rx_frame_count;
|
|
++ __le32 rx_clear_count;
|
|
++ __le32 cycle_count;
|
|
++ __le32 phy_err_count;
|
|
++ __le32 chan_tx_pwr;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_pdev_stats {
|
|
++ struct wmi_pdev_stats_base base;
|
|
++ struct wmi_pdev_stats_tx tx;
|
|
++ struct wmi_pdev_stats_rx rx;
|
|
++ struct wmi_pdev_stats_peer peer;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_pdev_stats_extra {
|
|
+ __le32 ack_rx_bad;
|
|
+ __le32 rts_bad;
|
|
+ __le32 rts_good;
|
|
+@@ -2862,6 +3166,30 @@ struct wmi_pdev_stats_10x {
|
|
+ __le32 mib_int_count;
|
|
+ } __packed;
|
|
+
|
|
++struct wmi_10x_pdev_stats {
|
|
++ struct wmi_pdev_stats_base base;
|
|
++ struct wmi_pdev_stats_tx tx;
|
|
++ struct wmi_pdev_stats_rx rx;
|
|
++ struct wmi_pdev_stats_peer peer;
|
|
++ struct wmi_pdev_stats_extra extra;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_pdev_stats_mem {
|
|
++ __le32 dram_free;
|
|
++ __le32 iram_free;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_10_2_pdev_stats {
|
|
++ struct wmi_pdev_stats_base base;
|
|
++ struct wmi_pdev_stats_tx tx;
|
|
++ __le32 mc_drop;
|
|
++ struct wmi_pdev_stats_rx rx;
|
|
++ __le32 pdev_rx_timeout;
|
|
++ struct wmi_pdev_stats_mem mem;
|
|
++ struct wmi_pdev_stats_peer peer;
|
|
++ struct wmi_pdev_stats_extra extra;
|
|
++} __packed;
|
|
++
|
|
+ /*
|
|
+ * VDEV statistics
|
|
+ * TODO: add all VDEV stats here
|
|
+@@ -2874,19 +3202,43 @@ struct wmi_vdev_stats {
|
|
+ * peer statistics.
|
|
+ * TODO: add more stats
|
|
+ */
|
|
+-struct wmi_peer_stats_old {
|
|
++struct wmi_peer_stats {
|
|
+ struct wmi_mac_addr peer_macaddr;
|
|
+ __le32 peer_rssi;
|
|
+ __le32 peer_tx_rate;
|
|
+ } __packed;
|
|
+
|
|
+-struct wmi_peer_stats_10x {
|
|
+- struct wmi_mac_addr peer_macaddr;
|
|
+- __le32 peer_rssi;
|
|
+- __le32 peer_tx_rate;
|
|
++struct wmi_10x_peer_stats {
|
|
++ struct wmi_peer_stats old;
|
|
+ __le32 peer_rx_rate;
|
|
+ } __packed;
|
|
+
|
|
++struct wmi_10_2_peer_stats {
|
|
++ struct wmi_peer_stats old;
|
|
++ __le32 peer_rx_rate;
|
|
++ __le32 current_per;
|
|
++ __le32 retries;
|
|
++ __le32 tx_rate_count;
|
|
++ __le32 max_4ms_frame_len;
|
|
++ __le32 total_sub_frames;
|
|
++ __le32 tx_bytes;
|
|
++ __le32 num_pkt_loss_overflow[4];
|
|
++ __le32 num_pkt_loss_excess_retry[4];
|
|
++} __packed;
|
|
++
|
|
++struct wmi_10_2_4_peer_stats {
|
|
++ struct wmi_10_2_peer_stats common;
|
|
++ __le32 unknown_value; /* FIXME: what is this word? */
|
|
++} __packed;
|
|
++
|
|
++struct wmi_10_2_pdev_ext_stats {
|
|
++ __le32 rx_rssi_comb;
|
|
++ __le32 rx_rssi[4];
|
|
++ __le32 rx_mcs[10];
|
|
++ __le32 tx_mcs[10];
|
|
++ __le32 ack_rssi;
|
|
++} __packed;
|
|
++
|
|
+ struct wmi_vdev_create_cmd {
|
|
+ __le32 vdev_id;
|
|
+ __le32 vdev_type;
|
|
+@@ -3387,8 +3739,21 @@ enum wmi_10x_vdev_param {
|
|
+ WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
|
|
+
|
|
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
|
|
++
|
|
++ /* following are available as of firmware 10.2 */
|
|
++ WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE,
|
|
++ WMI_10X_VDEV_PARAM_CABQ_MAXDUR,
|
|
++ WMI_10X_VDEV_PARAM_MFPTEST_SET,
|
|
++ WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
|
|
++ WMI_10X_VDEV_PARAM_VHT_SGIMASK,
|
|
++ WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
|
|
+ };
|
|
+
|
|
++#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
|
|
++#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
|
|
++#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
|
|
++#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
|
|
++
|
|
+ /* slot time long */
|
|
+ #define WMI_VDEV_SLOT_TIME_LONG 0x1
|
|
+ /* slot time short */
|
|
+@@ -3444,6 +3809,98 @@ struct wmi_vdev_simple_event {
|
|
+ /* unsupported VDEV combination */
|
|
+ #define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2
|
|
+
|
|
++/* TODO: please add more comments if you have in-depth information */
|
|
++struct wmi_vdev_spectral_conf_cmd {
|
|
++ __le32 vdev_id;
|
|
++
|
|
++ /* number of fft samples to send (0 for infinite) */
|
|
++ __le32 scan_count;
|
|
++ __le32 scan_period;
|
|
++ __le32 scan_priority;
|
|
++
|
|
++ /* number of bins in the FFT: 2^(fft_size - bin_scale) */
|
|
++ __le32 scan_fft_size;
|
|
++ __le32 scan_gc_ena;
|
|
++ __le32 scan_restart_ena;
|
|
++ __le32 scan_noise_floor_ref;
|
|
++ __le32 scan_init_delay;
|
|
++ __le32 scan_nb_tone_thr;
|
|
++ __le32 scan_str_bin_thr;
|
|
++ __le32 scan_wb_rpt_mode;
|
|
++ __le32 scan_rssi_rpt_mode;
|
|
++ __le32 scan_rssi_thr;
|
|
++ __le32 scan_pwr_format;
|
|
++
|
|
++ /* rpt_mode: Format of FFT report to software for spectral scan
|
|
++ * triggered FFTs:
|
|
++ * 0: No FFT report (only spectral scan summary report)
|
|
++ * 1: 2-dword summary of metrics for each completed FFT + spectral
|
|
++ * scan summary report
|
|
++ * 2: 2-dword summary of metrics for each completed FFT +
|
|
++ * 1x- oversampled bins(in-band) per FFT + spectral scan summary
|
|
++ * report
|
|
++ * 3: 2-dword summary of metrics for each completed FFT +
|
|
++ * 2x- oversampled bins (all) per FFT + spectral scan summary
|
|
++ */
|
|
++ __le32 scan_rpt_mode;
|
|
++ __le32 scan_bin_scale;
|
|
++ __le32 scan_dbm_adj;
|
|
++ __le32 scan_chn_mask;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_vdev_spectral_conf_arg {
|
|
++ u32 vdev_id;
|
|
++ u32 scan_count;
|
|
++ u32 scan_period;
|
|
++ u32 scan_priority;
|
|
++ u32 scan_fft_size;
|
|
++ u32 scan_gc_ena;
|
|
++ u32 scan_restart_ena;
|
|
++ u32 scan_noise_floor_ref;
|
|
++ u32 scan_init_delay;
|
|
++ u32 scan_nb_tone_thr;
|
|
++ u32 scan_str_bin_thr;
|
|
++ u32 scan_wb_rpt_mode;
|
|
++ u32 scan_rssi_rpt_mode;
|
|
++ u32 scan_rssi_thr;
|
|
++ u32 scan_pwr_format;
|
|
++ u32 scan_rpt_mode;
|
|
++ u32 scan_bin_scale;
|
|
++ u32 scan_dbm_adj;
|
|
++ u32 scan_chn_mask;
|
|
++};
|
|
++
|
|
++#define WMI_SPECTRAL_ENABLE_DEFAULT 0
|
|
++#define WMI_SPECTRAL_COUNT_DEFAULT 0
|
|
++#define WMI_SPECTRAL_PERIOD_DEFAULT 35
|
|
++#define WMI_SPECTRAL_PRIORITY_DEFAULT 1
|
|
++#define WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
|
|
++#define WMI_SPECTRAL_GC_ENA_DEFAULT 1
|
|
++#define WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
|
|
++#define WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
|
|
++#define WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
|
|
++#define WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
|
|
++#define WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
|
|
++#define WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
|
|
++#define WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
|
|
++#define WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
|
|
++#define WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
|
|
++#define WMI_SPECTRAL_RPT_MODE_DEFAULT 2
|
|
++#define WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
|
|
++#define WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
|
|
++#define WMI_SPECTRAL_CHN_MASK_DEFAULT 1
|
|
++
|
|
++struct wmi_vdev_spectral_enable_cmd {
|
|
++ __le32 vdev_id;
|
|
++ __le32 trigger_cmd;
|
|
++ __le32 enable_cmd;
|
|
++} __packed;
|
|
++
|
|
++#define WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
|
|
++#define WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
|
|
++#define WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
|
|
++#define WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
|
|
++
|
|
+ /* Beacon processing related command and event structures */
|
|
+ struct wmi_bcn_tx_hdr {
|
|
+ __le32 vdev_id;
|
|
+@@ -3470,6 +3927,11 @@ enum wmi_bcn_tx_ref_flags {
|
|
+ WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
|
|
+ };
|
|
+
|
|
++/* TODO: It is unclear why "no antenna" works while any other seemingly valid
|
|
++ * chainmask yields no beacons on the air at all.
|
|
++ */
|
|
++#define WMI_BCN_TX_REF_DEF_ANTENNA 0
|
|
++
|
|
+ struct wmi_bcn_tx_ref_cmd {
|
|
+ __le32 vdev_id;
|
|
+ __le32 data_len;
|
|
+@@ -3481,6 +3943,8 @@ struct wmi_bcn_tx_ref_cmd {
|
|
+ __le32 frame_control;
|
|
+ /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
|
|
+ __le32 flags;
|
|
++ /* introduced in 10.2 */
|
|
++ __le32 antenna_mask;
|
|
+ } __packed;
|
|
+
|
|
+ /* Beacon filter */
|
|
+@@ -3633,6 +4097,13 @@ enum wmi_sta_ps_param_pspoll_count {
|
|
+ * Values greater than 0 indicate the maximum numer of PS-Poll frames
|
|
+ * FW will send before waking up.
|
|
+ */
|
|
++
|
|
++ /* When u-APSD is enabled the firmware will be very reluctant to exit
|
|
++ * STA PS. This could result in very poor Rx performance with STA doing
|
|
++ * PS-Poll for each and every buffered frame. This value is a bit
|
|
++ * arbitrary.
|
|
++ */
|
|
++ WMI_STA_PS_PSPOLL_COUNT_UAPSD = 3,
|
|
+ };
|
|
+
|
|
+ /*
|
|
+@@ -3658,6 +4129,30 @@ enum wmi_sta_ps_param_uapsd {
|
|
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
|
|
+ };
|
|
+
|
|
++#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
|
|
++
|
|
++struct wmi_sta_uapsd_auto_trig_param {
|
|
++ __le32 wmm_ac;
|
|
++ __le32 user_priority;
|
|
++ __le32 service_interval;
|
|
++ __le32 suspend_interval;
|
|
++ __le32 delay_interval;
|
|
++};
|
|
++
|
|
++struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
|
|
++ __le32 vdev_id;
|
|
++ struct wmi_mac_addr peer_macaddr;
|
|
++ __le32 num_ac;
|
|
++};
|
|
++
|
|
++struct wmi_sta_uapsd_auto_trig_arg {
|
|
++ u32 wmm_ac;
|
|
++ u32 user_priority;
|
|
++ u32 service_interval;
|
|
++ u32 suspend_interval;
|
|
++ u32 delay_interval;
|
|
++};
|
|
++
|
|
+ enum wmi_sta_powersave_param {
|
|
+ /*
|
|
+ * Controls how frames are retrievd from AP while STA is sleeping
|
|
+@@ -3823,7 +4318,7 @@ struct wmi_bcn_info {
|
|
+
|
|
+ struct wmi_host_swba_event {
|
|
+ __le32 vdev_map;
|
|
+- struct wmi_bcn_info bcn_info[1];
|
|
++ struct wmi_bcn_info bcn_info[0];
|
|
+ } __packed;
|
|
+
|
|
+ #define WMI_MAX_AP_VDEV 16
|
|
+@@ -3833,7 +4328,6 @@ struct wmi_tbtt_offset_event {
|
|
+ __le32 tbttoffset_list[WMI_MAX_AP_VDEV];
|
|
+ } __packed;
|
|
+
|
|
+-
|
|
+ struct wmi_peer_create_cmd {
|
|
+ __le32 vdev_id;
|
|
+ struct wmi_mac_addr peer_macaddr;
|
|
+@@ -3951,7 +4445,8 @@ enum wmi_peer_param {
|
|
+ WMI_PEER_AUTHORIZE = 0x3,
|
|
+ WMI_PEER_CHAN_WIDTH = 0x4,
|
|
+ WMI_PEER_NSS = 0x5,
|
|
+- WMI_PEER_USE_4ADDR = 0x6
|
|
++ WMI_PEER_USE_4ADDR = 0x6,
|
|
++ WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
|
|
+ };
|
|
+
|
|
+ struct wmi_peer_set_param_cmd {
|
|
+@@ -4029,7 +4524,7 @@ struct wmi_peer_set_q_empty_callback_cmd
|
|
+ #define WMI_PEER_SPATIAL_MUX 0x00200000
|
|
+ #define WMI_PEER_VHT 0x02000000
|
|
+ #define WMI_PEER_80MHZ 0x04000000
|
|
+-#define WMI_PEER_PMF 0x08000000
|
|
++#define WMI_PEER_VHT_2G 0x08000000
|
|
+
|
|
+ /*
|
|
+ * Peer rate capabilities.
|
|
+@@ -4053,7 +4548,7 @@ struct wmi_peer_set_q_empty_callback_cmd
|
|
+ /* Maximum listen interval supported by hw in units of beacon interval */
|
|
+ #define ATH10K_MAX_HW_LISTEN_INTERVAL 5
|
|
+
|
|
+-struct wmi_peer_assoc_complete_cmd {
|
|
++struct wmi_common_peer_assoc_complete_cmd {
|
|
+ struct wmi_mac_addr peer_macaddr;
|
|
+ __le32 vdev_id;
|
|
+ __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
|
|
+@@ -4071,11 +4566,30 @@ struct wmi_peer_assoc_complete_cmd {
|
|
+ __le32 peer_vht_caps;
|
|
+ __le32 peer_phymode;
|
|
+ struct wmi_vht_rate_set peer_vht_rates;
|
|
++};
|
|
++
|
|
++struct wmi_main_peer_assoc_complete_cmd {
|
|
++ struct wmi_common_peer_assoc_complete_cmd cmd;
|
|
++
|
|
+ /* HT Operation Element of the peer. Five bytes packed in 2
|
|
+ * INT32 array and filled from lsb to msb. */
|
|
+ __le32 peer_ht_info[2];
|
|
+ } __packed;
|
|
+
|
|
++struct wmi_10_1_peer_assoc_complete_cmd {
|
|
++ struct wmi_common_peer_assoc_complete_cmd cmd;
|
|
++} __packed;
|
|
++
|
|
++#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_LSB 0
|
|
++#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_MASK 0x0f
|
|
++#define WMI_PEER_ASSOC_INFO0_MAX_NSS_LSB 4
|
|
++#define WMI_PEER_ASSOC_INFO0_MAX_NSS_MASK 0xf0
|
|
++
|
|
++struct wmi_10_2_peer_assoc_complete_cmd {
|
|
++ struct wmi_common_peer_assoc_complete_cmd cmd;
|
|
++ __le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
|
|
++} __packed;
|
|
++
|
|
+ struct wmi_peer_assoc_complete_arg {
|
|
+ u8 addr[ETH_ALEN];
|
|
+ u32 vdev_id;
|
|
+@@ -4161,6 +4675,11 @@ enum wmi_sta_keepalive_method {
|
|
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
|
|
+ };
|
|
+
|
|
++#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
|
|
++
|
|
++/* Firmware crashes if keepalive interval exceeds this limit */
|
|
++#define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff
|
|
++
|
|
+ /* note: ip4 addresses are in network byte order, i.e. big endian */
|
|
+ struct wmi_sta_keepalive_arp_resp {
|
|
+ __be32 src_ip4_addr;
|
|
+@@ -4176,6 +4695,16 @@ struct wmi_sta_keepalive_cmd {
|
|
+ struct wmi_sta_keepalive_arp_resp arp_resp;
|
|
+ } __packed;
|
|
+
|
|
++struct wmi_sta_keepalive_arg {
|
|
++ u32 vdev_id;
|
|
++ u32 enabled;
|
|
++ u32 method;
|
|
++ u32 interval;
|
|
++ __be32 src_ip4_addr;
|
|
++ __be32 dest_ip4_addr;
|
|
++ const u8 dest_mac_addr[ETH_ALEN];
|
|
++};
|
|
++
|
|
+ enum wmi_force_fw_hang_type {
|
|
+ WMI_FORCE_FW_HANG_ASSERT = 1,
|
|
+ WMI_FORCE_FW_HANG_NO_DETECT,
|
|
+@@ -4240,7 +4769,6 @@ struct wmi_dbglog_cfg_cmd {
|
|
+ __le32 config_valid;
|
|
+ } __packed;
|
|
+
|
|
+-#define ATH10K_RTS_MAX 2347
|
|
+ #define ATH10K_FRAGMT_THRESHOLD_MIN 540
|
|
+ #define ATH10K_FRAGMT_THRESHOLD_MAX 2346
|
|
+
|
|
+@@ -4251,72 +4779,170 @@ struct wmi_dbglog_cfg_cmd {
|
|
+ /* By default disable power save for IBSS */
|
|
+ #define ATH10K_DEFAULT_ATIM 0
|
|
+
|
|
++#define WMI_MAX_MEM_REQS 16
|
|
++
|
|
++struct wmi_scan_ev_arg {
|
|
++ __le32 event_type; /* %WMI_SCAN_EVENT_ */
|
|
++ __le32 reason; /* %WMI_SCAN_REASON_ */
|
|
++ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
|
|
++ __le32 scan_req_id;
|
|
++ __le32 scan_id;
|
|
++ __le32 vdev_id;
|
|
++};
|
|
++
|
|
++struct wmi_mgmt_rx_ev_arg {
|
|
++ __le32 channel;
|
|
++ __le32 snr;
|
|
++ __le32 rate;
|
|
++ __le32 phy_mode;
|
|
++ __le32 buf_len;
|
|
++ __le32 status; /* %WMI_RX_STATUS_ */
|
|
++};
|
|
++
|
|
++struct wmi_ch_info_ev_arg {
|
|
++ __le32 err_code;
|
|
++ __le32 freq;
|
|
++ __le32 cmd_flags;
|
|
++ __le32 noise_floor;
|
|
++ __le32 rx_clear_count;
|
|
++ __le32 cycle_count;
|
|
++};
|
|
++
|
|
++struct wmi_vdev_start_ev_arg {
|
|
++ __le32 vdev_id;
|
|
++ __le32 req_id;
|
|
++ __le32 resp_type; /* %WMI_VDEV_RESP_ */
|
|
++ __le32 status;
|
|
++};
|
|
++
|
|
++struct wmi_peer_kick_ev_arg {
|
|
++ const u8 *mac_addr;
|
|
++};
|
|
++
|
|
++struct wmi_swba_ev_arg {
|
|
++ __le32 vdev_map;
|
|
++ const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
|
|
++ const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
|
|
++};
|
|
++
|
|
++struct wmi_phyerr_ev_arg {
|
|
++ __le32 num_phyerrs;
|
|
++ __le32 tsf_l32;
|
|
++ __le32 tsf_u32;
|
|
++ __le32 buf_len;
|
|
++ const struct wmi_phyerr *phyerrs;
|
|
++};
|
|
++
|
|
++struct wmi_svc_rdy_ev_arg {
|
|
++ __le32 min_tx_power;
|
|
++ __le32 max_tx_power;
|
|
++ __le32 ht_cap;
|
|
++ __le32 vht_cap;
|
|
++ __le32 sw_ver0;
|
|
++ __le32 sw_ver1;
|
|
++ __le32 fw_build;
|
|
++ __le32 phy_capab;
|
|
++ __le32 num_rf_chains;
|
|
++ __le32 eeprom_rd;
|
|
++ __le32 num_mem_reqs;
|
|
++ const __le32 *service_map;
|
|
++ size_t service_map_len;
|
|
++ const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
|
|
++};
|
|
++
|
|
++struct wmi_rdy_ev_arg {
|
|
++ __le32 sw_version;
|
|
++ __le32 abi_version;
|
|
++ __le32 status;
|
|
++ const u8 *mac_addr;
|
|
++};
|
|
++
|
|
++struct wmi_pdev_temperature_event {
|
|
++ /* temperature value in Celcius degree */
|
|
++ __le32 temperature;
|
|
++} __packed;
|
|
++
|
|
+ struct ath10k;
|
|
+ struct ath10k_vif;
|
|
++struct ath10k_fw_stats_pdev;
|
|
++struct ath10k_fw_stats_peer;
|
|
+
|
|
+ int ath10k_wmi_attach(struct ath10k *ar);
|
|
+ void ath10k_wmi_detach(struct ath10k *ar);
|
|
+ int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
|
|
+ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
|
|
+
|
|
+-int ath10k_wmi_connect_htc_service(struct ath10k *ar);
|
|
+-int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
|
|
+- const struct wmi_channel_arg *);
|
|
+-int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
|
|
+-int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
|
|
+-int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
|
|
+- u16 rd5g, u16 ctl2g, u16 ctl5g,
|
|
+- enum wmi_dfs_region dfs_reg);
|
|
+-int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
|
|
+-int ath10k_wmi_cmd_init(struct ath10k *ar);
|
|
+-int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
|
|
++struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
|
|
++int ath10k_wmi_connect(struct ath10k *ar);
|
|
++
|
|
++struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
|
|
++int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
|
|
++int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
|
|
++ u32 cmd_id);
|
|
+ void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
|
|
+-int ath10k_wmi_stop_scan(struct ath10k *ar,
|
|
+- const struct wmi_stop_scan_arg *arg);
|
|
+-int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
|
|
+- enum wmi_vdev_type type,
|
|
+- enum wmi_vdev_subtype subtype,
|
|
+- const u8 macaddr[ETH_ALEN]);
|
|
+-int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id);
|
|
+-int ath10k_wmi_vdev_start(struct ath10k *ar,
|
|
+- const struct wmi_vdev_start_request_arg *);
|
|
+-int ath10k_wmi_vdev_restart(struct ath10k *ar,
|
|
+- const struct wmi_vdev_start_request_arg *);
|
|
+-int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id);
|
|
+-int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
|
|
+- const u8 *bssid);
|
|
+-int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
|
|
+-int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
|
|
+- u32 param_id, u32 param_value);
|
|
+-int ath10k_wmi_vdev_install_key(struct ath10k *ar,
|
|
+- const struct wmi_vdev_install_key_arg *arg);
|
|
+-int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
|
|
+- const u8 peer_addr[ETH_ALEN]);
|
|
+-int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
|
|
+- const u8 peer_addr[ETH_ALEN]);
|
|
+-int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
|
|
+- const u8 peer_addr[ETH_ALEN], u32 tid_bitmap);
|
|
+-int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
|
|
+- const u8 *peer_addr,
|
|
+- enum wmi_peer_param param_id, u32 param_value);
|
|
+-int ath10k_wmi_peer_assoc(struct ath10k *ar,
|
|
+- const struct wmi_peer_assoc_complete_arg *arg);
|
|
+-int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
|
|
+- enum wmi_sta_ps_mode psmode);
|
|
+-int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
|
|
+- enum wmi_sta_powersave_param param_id,
|
|
+- u32 value);
|
|
+-int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
+- enum wmi_ap_ps_peer_param param_id, u32 value);
|
|
+-int ath10k_wmi_scan_chan_list(struct ath10k *ar,
|
|
+- const struct wmi_scan_chan_list_arg *arg);
|
|
+-int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif);
|
|
+-int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
|
|
+- const struct wmi_pdev_set_wmm_params_arg *arg);
|
|
+-int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
|
|
+-int ath10k_wmi_force_fw_hang(struct ath10k *ar,
|
|
+- enum wmi_force_fw_hang_type type, u32 delay_ms);
|
|
+-int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
|
|
+-int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable);
|
|
++
|
|
++void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
|
|
++ struct ath10k_fw_stats_pdev *dst);
|
|
++void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
|
|
++ struct ath10k_fw_stats_pdev *dst);
|
|
++void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
|
|
++ struct ath10k_fw_stats_pdev *dst);
|
|
++void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
|
|
++ struct ath10k_fw_stats_pdev *dst);
|
|
++void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
|
|
++ struct ath10k_fw_stats_peer *dst);
|
|
++void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
|
|
++ struct wmi_host_mem_chunks *chunks);
|
|
++void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
|
|
++ const struct wmi_start_scan_arg *arg);
|
|
++void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
|
|
++ const struct wmi_wmm_params_arg *arg);
|
|
++void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
|
|
++ const struct wmi_channel_arg *arg);
|
|
++int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
|
|
++
|
|
++int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
|
|
++int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
|
|
++int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_dfs(struct ath10k *ar,
|
|
++ const struct wmi_phyerr *phyerr, u64 tsf);
|
|
++void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
|
|
++ const struct wmi_phyerr *phyerr,
|
|
++ u64 tsf);
|
|
++void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
|
|
++ struct sk_buff *skb);
|
|
++void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
|
|
++ struct sk_buff *skb);
|
|
++void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
|
|
++ struct sk_buff *skb);
|
|
++void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
|
|
++ struct sk_buff *skb);
|
|
++void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
|
|
++void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
|
|
++int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
|
|
+
|
|
+ #endif /* _WMI_H_ */
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/spectral.c
|
|
+@@ -0,0 +1,552 @@
|
|
++/*
|
|
++ * Copyright (c) 2013 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++
|
|
++#include <linux/relay.h>
|
|
++#include "core.h"
|
|
++#include "debug.h"
|
|
++#include "wmi-ops.h"
|
|
++
|
|
++static void send_fft_sample(struct ath10k *ar,
|
|
++ const struct fft_sample_tlv *fft_sample_tlv)
|
|
++{
|
|
++ int length;
|
|
++
|
|
++ if (!ar->spectral.rfs_chan_spec_scan)
|
|
++ return;
|
|
++
|
|
++ length = __be16_to_cpu(fft_sample_tlv->length) +
|
|
++ sizeof(*fft_sample_tlv);
|
|
++ relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length);
|
|
++}
|
|
++
|
|
++static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
|
|
++ u8 *data)
|
|
++{
|
|
++ int dc_pos;
|
|
++ u8 max_exp;
|
|
++
|
|
++ dc_pos = bin_len / 2;
|
|
++
|
|
++ /* peak index outside of bins */
|
|
++ if (dc_pos < max_index || -dc_pos >= max_index)
|
|
++ return 0;
|
|
++
|
|
++ for (max_exp = 0; max_exp < 8; max_exp++) {
|
|
++ if (data[dc_pos + max_index] == (max_magnitude >> max_exp))
|
|
++ break;
|
|
++ }
|
|
++
|
|
++ /* max_exp not found */
|
|
++ if (data[dc_pos + max_index] != (max_magnitude >> max_exp))
|
|
++ return 0;
|
|
++
|
|
++ return max_exp;
|
|
++}
|
|
++
|
|
++int ath10k_spectral_process_fft(struct ath10k *ar,
|
|
++ const struct wmi_phyerr *phyerr,
|
|
++ const struct phyerr_fft_report *fftr,
|
|
++ size_t bin_len, u64 tsf)
|
|
++{
|
|
++ struct fft_sample_ath10k *fft_sample;
|
|
++ u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
|
|
++ u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
|
|
++ u32 reg0, reg1;
|
|
++ u8 chain_idx, *bins;
|
|
++ int dc_pos;
|
|
++
|
|
++ fft_sample = (struct fft_sample_ath10k *)&buf;
|
|
++
|
|
++ if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
|
|
++ return -EINVAL;
|
|
++
|
|
++ reg0 = __le32_to_cpu(fftr->reg0);
|
|
++ reg1 = __le32_to_cpu(fftr->reg1);
|
|
++
|
|
++ length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len;
|
|
++ fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K;
|
|
++ fft_sample->tlv.length = __cpu_to_be16(length);
|
|
++
|
|
++ /* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
|
|
++ * but the results/plots suggest that its actually 22/44/88 MHz.
|
|
++ */
|
|
++ switch (phyerr->chan_width_mhz) {
|
|
++ case 20:
|
|
++ fft_sample->chan_width_mhz = 22;
|
|
++ break;
|
|
++ case 40:
|
|
++ fft_sample->chan_width_mhz = 44;
|
|
++ break;
|
|
++ case 80:
|
|
++ /* TODO: As experiments with an analogue sender and various
|
|
++ * configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz)
|
|
++ * show, the particular configuration of 80 MHz/64 bins does
|
|
++ * not match with the other smaples at all. Until the reason
|
|
++ * for that is found, don't report these samples.
|
|
++ */
|
|
++ if (bin_len == 64)
|
|
++ return -EINVAL;
|
|
++ fft_sample->chan_width_mhz = 88;
|
|
++ break;
|
|
++ default:
|
|
++ fft_sample->chan_width_mhz = phyerr->chan_width_mhz;
|
|
++ }
|
|
++
|
|
++ fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
|
|
++ fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB);
|
|
++
|
|
++ peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
|
|
++ fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
|
|
++ fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
|
|
++ fft_sample->rssi = phyerr->rssi_combined;
|
|
++
|
|
++ total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
|
|
++ base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
|
|
++ fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
|
|
++ fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
|
|
++
|
|
++ freq1 = __le16_to_cpu(phyerr->freq1);
|
|
++ freq2 = __le16_to_cpu(phyerr->freq2);
|
|
++ fft_sample->freq1 = __cpu_to_be16(freq1);
|
|
++ fft_sample->freq2 = __cpu_to_be16(freq2);
|
|
++
|
|
++ chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
|
|
++
|
|
++ fft_sample->noise = __cpu_to_be16(
|
|
++ __le16_to_cpu(phyerr->nf_chains[chain_idx]));
|
|
++
|
|
++ bins = (u8 *)fftr;
|
|
++ bins += sizeof(*fftr);
|
|
++
|
|
++ fft_sample->tsf = __cpu_to_be64(tsf);
|
|
++
|
|
++ /* max_exp has been directly reported by previous hardware (ath9k),
|
|
++ * maybe its possible to get it by other means?
|
|
++ */
|
|
++ fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag,
|
|
++ bin_len, bins);
|
|
++
|
|
++ memcpy(fft_sample->data, bins, bin_len);
|
|
++
|
|
++ /* DC value (value in the middle) is the blind spot of the spectral
|
|
++ * sample and invalid, interpolate it.
|
|
++ */
|
|
++ dc_pos = bin_len / 2;
|
|
++ fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] +
|
|
++ fft_sample->data[dc_pos - 1]) / 2;
|
|
++
|
|
++ send_fft_sample(ar, &fft_sample->tlv);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar)
|
|
++{
|
|
++ struct ath10k_vif *arvif;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ if (list_empty(&ar->arvifs))
|
|
++ return NULL;
|
|
++
|
|
++ /* if there already is a vif doing spectral, return that. */
|
|
++ list_for_each_entry(arvif, &ar->arvifs, list)
|
|
++ if (arvif->spectral_enabled)
|
|
++ return arvif;
|
|
++
|
|
++ /* otherwise, return the first vif. */
|
|
++ return list_first_entry(&ar->arvifs, typeof(*arvif), list);
|
|
++}
|
|
++
|
|
++static int ath10k_spectral_scan_trigger(struct ath10k *ar)
|
|
++{
|
|
++ struct ath10k_vif *arvif;
|
|
++ int res;
|
|
++ int vdev_id;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ arvif = ath10k_get_spectral_vdev(ar);
|
|
++ if (!arvif)
|
|
++ return -ENODEV;
|
|
++ vdev_id = arvif->vdev_id;
|
|
++
|
|
++ if (ar->spectral.mode == SPECTRAL_DISABLED)
|
|
++ return 0;
|
|
++
|
|
++ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
|
|
++ WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
|
|
++ WMI_SPECTRAL_ENABLE_CMD_ENABLE);
|
|
++ if (res < 0)
|
|
++ return res;
|
|
++
|
|
++ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
|
|
++ WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
|
|
++ WMI_SPECTRAL_ENABLE_CMD_ENABLE);
|
|
++ if (res < 0)
|
|
++ return res;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_spectral_scan_config(struct ath10k *ar,
|
|
++ enum ath10k_spectral_mode mode)
|
|
++{
|
|
++ struct wmi_vdev_spectral_conf_arg arg;
|
|
++ struct ath10k_vif *arvif;
|
|
++ int vdev_id, count, res = 0;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ arvif = ath10k_get_spectral_vdev(ar);
|
|
++ if (!arvif)
|
|
++ return -ENODEV;
|
|
++
|
|
++ vdev_id = arvif->vdev_id;
|
|
++
|
|
++ arvif->spectral_enabled = (mode != SPECTRAL_DISABLED);
|
|
++ ar->spectral.mode = mode;
|
|
++
|
|
++ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
|
|
++ WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
|
|
++ WMI_SPECTRAL_ENABLE_CMD_DISABLE);
|
|
++ if (res < 0) {
|
|
++ ath10k_warn(ar, "failed to enable spectral scan: %d\n", res);
|
|
++ return res;
|
|
++ }
|
|
++
|
|
++ if (mode == SPECTRAL_DISABLED)
|
|
++ return 0;
|
|
++
|
|
++ if (mode == SPECTRAL_BACKGROUND)
|
|
++ count = WMI_SPECTRAL_COUNT_DEFAULT;
|
|
++ else
|
|
++ count = max_t(u8, 1, ar->spectral.config.count);
|
|
++
|
|
++ arg.vdev_id = vdev_id;
|
|
++ arg.scan_count = count;
|
|
++ arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT;
|
|
++ arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT;
|
|
++ arg.scan_fft_size = ar->spectral.config.fft_size;
|
|
++ arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT;
|
|
++ arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT;
|
|
++ arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
|
|
++ arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT;
|
|
++ arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
|
|
++ arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
|
|
++ arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
|
|
++ arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
|
|
++ arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT;
|
|
++ arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
|
|
++ arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT;
|
|
++ arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
|
|
++ arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT;
|
|
++ arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT;
|
|
++
|
|
++ res = ath10k_wmi_vdev_spectral_conf(ar, &arg);
|
|
++ if (res < 0) {
|
|
++ ath10k_warn(ar, "failed to configure spectral scan: %d\n", res);
|
|
++ return res;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ char *mode = "";
|
|
++ unsigned int len;
|
|
++ enum ath10k_spectral_mode spectral_mode;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ spectral_mode = ar->spectral.mode;
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ switch (spectral_mode) {
|
|
++ case SPECTRAL_DISABLED:
|
|
++ mode = "disable";
|
|
++ break;
|
|
++ case SPECTRAL_BACKGROUND:
|
|
++ mode = "background";
|
|
++ break;
|
|
++ case SPECTRAL_MANUAL:
|
|
++ mode = "manual";
|
|
++ break;
|
|
++ }
|
|
++
|
|
++ len = strlen(mode);
|
|
++ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
|
|
++}
|
|
++
|
|
++static ssize_t write_file_spec_scan_ctl(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ char buf[32];
|
|
++ ssize_t len;
|
|
++ int res;
|
|
++
|
|
++ len = min(count, sizeof(buf) - 1);
|
|
++ if (copy_from_user(buf, user_buf, len))
|
|
++ return -EFAULT;
|
|
++
|
|
++ buf[len] = '\0';
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (strncmp("trigger", buf, 7) == 0) {
|
|
++ if (ar->spectral.mode == SPECTRAL_MANUAL ||
|
|
++ ar->spectral.mode == SPECTRAL_BACKGROUND) {
|
|
++ /* reset the configuration to adopt possibly changed
|
|
++ * debugfs parameters
|
|
++ */
|
|
++ res = ath10k_spectral_scan_config(ar,
|
|
++ ar->spectral.mode);
|
|
++ if (res < 0) {
|
|
++ ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n",
|
|
++ res);
|
|
++ }
|
|
++ res = ath10k_spectral_scan_trigger(ar);
|
|
++ if (res < 0) {
|
|
++ ath10k_warn(ar, "failed to trigger spectral scan: %d\n",
|
|
++ res);
|
|
++ }
|
|
++ } else {
|
|
++ res = -EINVAL;
|
|
++ }
|
|
++ } else if (strncmp("background", buf, 9) == 0) {
|
|
++ res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
|
|
++ } else if (strncmp("manual", buf, 6) == 0) {
|
|
++ res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
|
|
++ } else if (strncmp("disable", buf, 7) == 0) {
|
|
++ res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED);
|
|
++ } else {
|
|
++ res = -EINVAL;
|
|
++ }
|
|
++
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ if (res < 0)
|
|
++ return res;
|
|
++
|
|
++ return count;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_spec_scan_ctl = {
|
|
++ .read = read_file_spec_scan_ctl,
|
|
++ .write = write_file_spec_scan_ctl,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static ssize_t read_file_spectral_count(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ char buf[32];
|
|
++ unsigned int len;
|
|
++ u8 spectral_count;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ spectral_count = ar->spectral.config.count;
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ len = sprintf(buf, "%d\n", spectral_count);
|
|
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++}
|
|
++
|
|
++static ssize_t write_file_spectral_count(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ unsigned long val;
|
|
++ char buf[32];
|
|
++ ssize_t len;
|
|
++
|
|
++ len = min(count, sizeof(buf) - 1);
|
|
++ if (copy_from_user(buf, user_buf, len))
|
|
++ return -EFAULT;
|
|
++
|
|
++ buf[len] = '\0';
|
|
++ if (kstrtoul(buf, 0, &val))
|
|
++ return -EINVAL;
|
|
++
|
|
++ if (val < 0 || val > 255)
|
|
++ return -EINVAL;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ ar->spectral.config.count = val;
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return count;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_spectral_count = {
|
|
++ .read = read_file_spectral_count,
|
|
++ .write = write_file_spectral_count,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static ssize_t read_file_spectral_bins(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ char buf[32];
|
|
++ unsigned int len, bins, fft_size, bin_scale;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ fft_size = ar->spectral.config.fft_size;
|
|
++ bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
|
|
++ bins = 1 << (fft_size - bin_scale);
|
|
++
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ len = sprintf(buf, "%d\n", bins);
|
|
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++}
|
|
++
|
|
++static ssize_t write_file_spectral_bins(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ath10k *ar = file->private_data;
|
|
++ unsigned long val;
|
|
++ char buf[32];
|
|
++ ssize_t len;
|
|
++
|
|
++ len = min(count, sizeof(buf) - 1);
|
|
++ if (copy_from_user(buf, user_buf, len))
|
|
++ return -EFAULT;
|
|
++
|
|
++ buf[len] = '\0';
|
|
++ if (kstrtoul(buf, 0, &val))
|
|
++ return -EINVAL;
|
|
++
|
|
++ if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS)
|
|
++ return -EINVAL;
|
|
++
|
|
++ if (!is_power_of_2(val))
|
|
++ return -EINVAL;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ ar->spectral.config.fft_size = ilog2(val);
|
|
++ ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT;
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return count;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_spectral_bins = {
|
|
++ .read = read_file_spectral_bins,
|
|
++ .write = write_file_spectral_bins,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static struct dentry *create_buf_file_handler(const char *filename,
|
|
++ struct dentry *parent,
|
|
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
|
|
++ umode_t mode,
|
|
++#else
|
|
++ int mode,
|
|
++#endif
|
|
++ struct rchan_buf *buf,
|
|
++ int *is_global)
|
|
++{
|
|
++ struct dentry *buf_file;
|
|
++
|
|
++ buf_file = debugfs_create_file(filename, mode, parent, buf,
|
|
++ &relay_file_operations);
|
|
++ *is_global = 1;
|
|
++ return buf_file;
|
|
++}
|
|
++
|
|
++static int remove_buf_file_handler(struct dentry *dentry)
|
|
++{
|
|
++ debugfs_remove(dentry);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static struct rchan_callbacks rfs_spec_scan_cb = {
|
|
++ .create_buf_file = create_buf_file_handler,
|
|
++ .remove_buf_file = remove_buf_file_handler,
|
|
++};
|
|
++
|
|
++int ath10k_spectral_start(struct ath10k *ar)
|
|
++{
|
|
++ struct ath10k_vif *arvif;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ list_for_each_entry(arvif, &ar->arvifs, list)
|
|
++ arvif->spectral_enabled = 0;
|
|
++
|
|
++ ar->spectral.mode = SPECTRAL_DISABLED;
|
|
++ ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT;
|
|
++ ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
|
|
++{
|
|
++ if (!arvif->spectral_enabled)
|
|
++ return 0;
|
|
++
|
|
++ return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED);
|
|
++}
|
|
++
|
|
++int ath10k_spectral_create(struct ath10k *ar)
|
|
++{
|
|
++ ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
|
|
++ ar->debug.debugfs_phy,
|
|
++ 1024, 256,
|
|
++ &rfs_spec_scan_cb, NULL);
|
|
++ debugfs_create_file("spectral_scan_ctl",
|
|
++ S_IRUSR | S_IWUSR,
|
|
++ ar->debug.debugfs_phy, ar,
|
|
++ &fops_spec_scan_ctl);
|
|
++ debugfs_create_file("spectral_count",
|
|
++ S_IRUSR | S_IWUSR,
|
|
++ ar->debug.debugfs_phy, ar,
|
|
++ &fops_spectral_count);
|
|
++ debugfs_create_file("spectral_bins",
|
|
++ S_IRUSR | S_IWUSR,
|
|
++ ar->debug.debugfs_phy, ar,
|
|
++ &fops_spectral_bins);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++void ath10k_spectral_destroy(struct ath10k *ar)
|
|
++{
|
|
++ if (ar->spectral.rfs_chan_spec_scan) {
|
|
++ relay_close(ar->spectral.rfs_chan_spec_scan);
|
|
++ ar->spectral.rfs_chan_spec_scan = NULL;
|
|
++ }
|
|
++}
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/spectral.h
|
|
+@@ -0,0 +1,90 @@
|
|
++/*
|
|
++ * Copyright (c) 2013 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++
|
|
++#ifndef SPECTRAL_H
|
|
++#define SPECTRAL_H
|
|
++
|
|
++#include "../spectral_common.h"
|
|
++
|
|
++/**
|
|
++ * struct ath10k_spec_scan - parameters for Atheros spectral scan
|
|
++ *
|
|
++ * @count: number of scan results requested for manual mode
|
|
++ * @fft_size: number of bins to be requested = 2^(fft_size - bin_scale)
|
|
++ */
|
|
++struct ath10k_spec_scan {
|
|
++ u8 count;
|
|
++ u8 fft_size;
|
|
++};
|
|
++
|
|
++/* enum ath10k_spectral_mode:
|
|
++ *
|
|
++ * @SPECTRAL_DISABLED: spectral mode is disabled
|
|
++ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
|
|
++ * something else.
|
|
++ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
|
|
++ * is performed manually.
|
|
++ */
|
|
++enum ath10k_spectral_mode {
|
|
++ SPECTRAL_DISABLED = 0,
|
|
++ SPECTRAL_BACKGROUND,
|
|
++ SPECTRAL_MANUAL,
|
|
++};
|
|
++
|
|
++#ifdef CPTCFG_ATH10K_DEBUGFS
|
|
++
|
|
++int ath10k_spectral_process_fft(struct ath10k *ar,
|
|
++ const struct wmi_phyerr *phyerr,
|
|
++ const struct phyerr_fft_report *fftr,
|
|
++ size_t bin_len, u64 tsf);
|
|
++int ath10k_spectral_start(struct ath10k *ar);
|
|
++int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
|
|
++int ath10k_spectral_create(struct ath10k *ar);
|
|
++void ath10k_spectral_destroy(struct ath10k *ar);
|
|
++
|
|
++#else
|
|
++
|
|
++static inline int
|
|
++ath10k_spectral_process_fft(struct ath10k *ar,
|
|
++ const struct wmi_phyerr *phyerr,
|
|
++ const struct phyerr_fft_report *fftr,
|
|
++ size_t bin_len, u64 tsf)
|
|
++{
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static inline int ath10k_spectral_start(struct ath10k *ar)
|
|
++{
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
|
|
++{
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static inline int ath10k_spectral_create(struct ath10k *ar)
|
|
++{
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static inline void ath10k_spectral_destroy(struct ath10k *ar)
|
|
++{
|
|
++}
|
|
++
|
|
++#endif /* CPTCFG_ATH10K_DEBUGFS */
|
|
++
|
|
++#endif /* SPECTRAL_H */
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/testmode.c
|
|
+@@ -0,0 +1,385 @@
|
|
++/*
|
|
++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++
|
|
++#include "testmode.h"
|
|
++
|
|
++#include <net/netlink.h>
|
|
++#include <linux/firmware.h>
|
|
++
|
|
++#include "debug.h"
|
|
++#include "wmi.h"
|
|
++#include "hif.h"
|
|
++#include "hw.h"
|
|
++
|
|
++#include "testmode_i.h"
|
|
++
|
|
++static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
|
|
++ [ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 },
|
|
++ [ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY,
|
|
++ .len = ATH10K_TM_DATA_MAX_LEN },
|
|
++ [ATH10K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
|
|
++ [ATH10K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
|
|
++ [ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
|
|
++};
|
|
++
|
|
++/* Returns true if callee consumes the skb and the skb should be discarded.
|
|
++ * Returns false if skb is not used. Does not sleep.
|
|
++ */
|
|
++bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
|
|
++{
|
|
++ struct sk_buff *nl_skb;
|
|
++ bool consumed;
|
|
++ int ret;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
|
|
++ "testmode event wmi cmd_id %d skb %p skb->len %d\n",
|
|
++ cmd_id, skb, skb->len);
|
|
++
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++
|
|
++ if (!ar->testmode.utf_monitor) {
|
|
++ consumed = false;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ /* Only testmode.c should be handling events from utf firmware,
|
|
++ * otherwise all sort of problems will arise as mac80211 operations
|
|
++ * are not initialised.
|
|
++ */
|
|
++ consumed = true;
|
|
++
|
|
++ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
|
|
++ 2 * sizeof(u32) + skb->len,
|
|
++ GFP_ATOMIC);
|
|
++ if (!nl_skb) {
|
|
++ ath10k_warn(ar,
|
|
++ "failed to allocate skb for testmode wmi event\n");
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar,
|
|
++ "failed to to put testmode wmi event cmd attribute: %d\n",
|
|
++ ret);
|
|
++ kfree_skb(nl_skb);
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar,
|
|
++ "failed to to put testmode wmi even cmd_id: %d\n",
|
|
++ ret);
|
|
++ kfree_skb(nl_skb);
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar,
|
|
++ "failed to copy skb to testmode wmi event: %d\n",
|
|
++ ret);
|
|
++ kfree_skb(nl_skb);
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
|
|
++
|
|
++out:
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ return consumed;
|
|
++}
|
|
++
|
|
++static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++ int ret;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
|
|
++ "testmode cmd get version_major %d version_minor %d\n",
|
|
++ ATH10K_TESTMODE_VERSION_MAJOR,
|
|
++ ATH10K_TESTMODE_VERSION_MINOR);
|
|
++
|
|
++ skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
|
|
++ nla_total_size(sizeof(u32)));
|
|
++ if (!skb)
|
|
++ return -ENOMEM;
|
|
++
|
|
++ ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR,
|
|
++ ATH10K_TESTMODE_VERSION_MAJOR);
|
|
++ if (ret) {
|
|
++ kfree_skb(skb);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR,
|
|
++ ATH10K_TESTMODE_VERSION_MINOR);
|
|
++ if (ret) {
|
|
++ kfree_skb(skb);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return cfg80211_testmode_reply(skb);
|
|
++}
|
|
++
|
|
++static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
|
|
++{
|
|
++ char filename[100];
|
|
++ int ret;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n");
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->state == ATH10K_STATE_UTF) {
|
|
++ ret = -EALREADY;
|
|
++ goto err;
|
|
++ }
|
|
++
|
|
++ /* start utf only when the driver is not in use */
|
|
++ if (ar->state != ATH10K_STATE_OFF) {
|
|
++ ret = -EBUSY;
|
|
++ goto err;
|
|
++ }
|
|
++
|
|
++ if (WARN_ON(ar->testmode.utf != NULL)) {
|
|
++ /* utf image is already downloaded, it shouldn't be */
|
|
++ ret = -EEXIST;
|
|
++ goto err;
|
|
++ }
|
|
++
|
|
++ snprintf(filename, sizeof(filename), "%s/%s",
|
|
++ ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
|
|
++
|
|
++ /* load utf firmware image */
|
|
++ ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
|
|
++ filename, ret);
|
|
++ goto err;
|
|
++ }
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++
|
|
++ ar->testmode.utf_monitor = true;
|
|
++
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ BUILD_BUG_ON(sizeof(ar->fw_features) !=
|
|
++ sizeof(ar->testmode.orig_fw_features));
|
|
++
|
|
++ memcpy(ar->testmode.orig_fw_features, ar->fw_features,
|
|
++ sizeof(ar->fw_features));
|
|
++ ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
|
|
++
|
|
++ /* utf.bin firmware image does not advertise firmware features. Do
|
|
++ * an ugly hack where we force the firmware features so that wmi.c
|
|
++ * will use the correct WMI interface.
|
|
++ */
|
|
++ memset(ar->fw_features, 0, sizeof(ar->fw_features));
|
|
++ ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
|
|
++
|
|
++ ret = ath10k_hif_power_up(ar);
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
|
|
++ ar->state = ATH10K_STATE_OFF;
|
|
++ goto err_fw_features;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
|
|
++ ar->state = ATH10K_STATE_OFF;
|
|
++ goto err_power_down;
|
|
++ }
|
|
++
|
|
++ ar->state = ATH10K_STATE_UTF;
|
|
++
|
|
++ ath10k_info(ar, "UTF firmware started\n");
|
|
++
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return 0;
|
|
++
|
|
++err_power_down:
|
|
++ ath10k_hif_power_down(ar);
|
|
++
|
|
++err_fw_features:
|
|
++ /* return the original firmware features */
|
|
++ memcpy(ar->fw_features, ar->testmode.orig_fw_features,
|
|
++ sizeof(ar->fw_features));
|
|
++ ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
|
|
++
|
|
++ release_firmware(ar->testmode.utf);
|
|
++ ar->testmode.utf = NULL;
|
|
++
|
|
++err:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
|
|
++{
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ ath10k_core_stop(ar);
|
|
++ ath10k_hif_power_down(ar);
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++
|
|
++ ar->testmode.utf_monitor = false;
|
|
++
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ /* return the original firmware features */
|
|
++ memcpy(ar->fw_features, ar->testmode.orig_fw_features,
|
|
++ sizeof(ar->fw_features));
|
|
++ ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
|
|
++
|
|
++ release_firmware(ar->testmode.utf);
|
|
++ ar->testmode.utf = NULL;
|
|
++
|
|
++ ar->state = ATH10K_STATE_OFF;
|
|
++}
|
|
++
|
|
++static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[])
|
|
++{
|
|
++ int ret;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n");
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_UTF) {
|
|
++ ret = -ENETDOWN;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ __ath10k_tm_cmd_utf_stop(ar);
|
|
++
|
|
++ ret = 0;
|
|
++
|
|
++ ath10k_info(ar, "UTF firmware stopped\n");
|
|
++
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++ int ret, buf_len;
|
|
++ u32 cmd_id;
|
|
++ void *buf;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_UTF) {
|
|
++ ret = -ENETDOWN;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ if (!tb[ATH10K_TM_ATTR_DATA]) {
|
|
++ ret = -EINVAL;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) {
|
|
++ ret = -EINVAL;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
|
|
++ buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
|
|
++ cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
|
|
++ "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
|
|
++ cmd_id, buf, buf_len);
|
|
++
|
|
++ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, buf_len);
|
|
++ if (!skb) {
|
|
++ ret = -ENOMEM;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ memcpy(skb->data, buf, buf_len);
|
|
++
|
|
++ ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n",
|
|
++ ret);
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ ret = 0;
|
|
++
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|
++ void *data, int len)
|
|
++{
|
|
++ struct ath10k *ar = hw->priv;
|
|
++ struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
|
|
++ int ret;
|
|
++
|
|
++ ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len,
|
|
++ ath10k_tm_policy);
|
|
++ if (ret)
|
|
++ return ret;
|
|
++
|
|
++ if (!tb[ATH10K_TM_ATTR_CMD])
|
|
++ return -EINVAL;
|
|
++
|
|
++ switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
|
|
++ case ATH10K_TM_CMD_GET_VERSION:
|
|
++ return ath10k_tm_cmd_get_version(ar, tb);
|
|
++ case ATH10K_TM_CMD_UTF_START:
|
|
++ return ath10k_tm_cmd_utf_start(ar, tb);
|
|
++ case ATH10K_TM_CMD_UTF_STOP:
|
|
++ return ath10k_tm_cmd_utf_stop(ar, tb);
|
|
++ case ATH10K_TM_CMD_WMI:
|
|
++ return ath10k_tm_cmd_wmi(ar, tb);
|
|
++ default:
|
|
++ return -EOPNOTSUPP;
|
|
++ }
|
|
++}
|
|
++
|
|
++void ath10k_testmode_destroy(struct ath10k *ar)
|
|
++{
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ if (ar->state != ATH10K_STATE_UTF) {
|
|
++ /* utf firmware is not running, nothing to do */
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ __ath10k_tm_cmd_utf_stop(ar);
|
|
++
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++}
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/testmode.h
|
|
+@@ -0,0 +1,46 @@
|
|
++/*
|
|
++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++
|
|
++#include "core.h"
|
|
++
|
|
++#ifdef CPTCFG_NL80211_TESTMODE
|
|
++
|
|
++void ath10k_testmode_destroy(struct ath10k *ar);
|
|
++
|
|
++bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb);
|
|
++int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|
++ void *data, int len);
|
|
++
|
|
++#else
|
|
++
|
|
++static inline void ath10k_testmode_destroy(struct ath10k *ar)
|
|
++{
|
|
++}
|
|
++
|
|
++static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id,
|
|
++ struct sk_buff *skb)
|
|
++{
|
|
++ return false;
|
|
++}
|
|
++
|
|
++static inline int ath10k_tm_cmd(struct ieee80211_hw *hw,
|
|
++ struct ieee80211_vif *vif,
|
|
++ void *data, int len)
|
|
++{
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++#endif
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
|
|
+@@ -0,0 +1,70 @@
|
|
++/*
|
|
++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++
|
|
++/* "API" level of the ath10k testmode interface. Bump it after every
|
|
++ * incompatible interface change.
|
|
++ */
|
|
++#define ATH10K_TESTMODE_VERSION_MAJOR 1
|
|
++
|
|
++/* Bump this after every _compatible_ interface change, for example
|
|
++ * addition of a new command or an attribute.
|
|
++ */
|
|
++#define ATH10K_TESTMODE_VERSION_MINOR 0
|
|
++
|
|
++#define ATH10K_TM_DATA_MAX_LEN 5000
|
|
++
|
|
++enum ath10k_tm_attr {
|
|
++ __ATH10K_TM_ATTR_INVALID = 0,
|
|
++ ATH10K_TM_ATTR_CMD = 1,
|
|
++ ATH10K_TM_ATTR_DATA = 2,
|
|
++ ATH10K_TM_ATTR_WMI_CMDID = 3,
|
|
++ ATH10K_TM_ATTR_VERSION_MAJOR = 4,
|
|
++ ATH10K_TM_ATTR_VERSION_MINOR = 5,
|
|
++
|
|
++ /* keep last */
|
|
++ __ATH10K_TM_ATTR_AFTER_LAST,
|
|
++ ATH10K_TM_ATTR_MAX = __ATH10K_TM_ATTR_AFTER_LAST - 1,
|
|
++};
|
|
++
|
|
++/* All ath10k testmode interface commands specified in
|
|
++ * ATH10K_TM_ATTR_CMD
|
|
++ */
|
|
++enum ath10k_tm_cmd {
|
|
++ /* Returns the supported ath10k testmode interface version in
|
|
++ * ATH10K_TM_ATTR_VERSION. Always guaranteed to work. User space
|
|
++ * uses this to verify it's using the correct version of the
|
|
++ * testmode interface
|
|
++ */
|
|
++ ATH10K_TM_CMD_GET_VERSION = 0,
|
|
++
|
|
++ /* Boots the UTF firmware, the netdev interface must be down at the
|
|
++ * time.
|
|
++ */
|
|
++ ATH10K_TM_CMD_UTF_START = 1,
|
|
++
|
|
++ /* Shuts down the UTF firmware and puts the driver back into OFF
|
|
++ * state.
|
|
++ */
|
|
++ ATH10K_TM_CMD_UTF_STOP = 2,
|
|
++
|
|
++ /* The command used to transmit a WMI command to the firmware and
|
|
++ * the event to receive WMI events from the firmware. Without
|
|
++ * struct wmi_cmd_hdr header, only the WMI payload. Command id is
|
|
++ * provided with ATH10K_TM_ATTR_WMI_CMDID and payload in
|
|
++ * ATH10K_TM_ATTR_DATA.
|
|
++ */
|
|
++ ATH10K_TM_CMD_WMI = 3,
|
|
++};
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
|
|
+@@ -0,0 +1,243 @@
|
|
++/*
|
|
++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++
|
|
++#include "core.h"
|
|
++#include "wmi-ops.h"
|
|
++#include "debug.h"
|
|
++
|
|
++static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
|
|
++ char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ieee80211_sta *sta = file->private_data;
|
|
++ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
|
++ struct ath10k *ar = arsta->arvif->ar;
|
|
++ char buf[32];
|
|
++ int len = 0;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
|
|
++ (arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
|
|
++ "auto" : "manual");
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
|
++}
|
|
++
|
|
++static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ieee80211_sta *sta = file->private_data;
|
|
++ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
|
++ struct ath10k *ar = arsta->arvif->ar;
|
|
++ u32 aggr_mode;
|
|
++ int ret;
|
|
++
|
|
++ if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
|
|
++ return -EINVAL;
|
|
++
|
|
++ if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
|
|
++ return -EINVAL;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ if ((ar->state != ATH10K_STATE_ON) ||
|
|
++ (aggr_mode == arsta->aggr_mode)) {
|
|
++ ret = count;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ arsta->aggr_mode = aggr_mode;
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_aggr_mode = {
|
|
++ .read = ath10k_dbg_sta_read_aggr_mode,
|
|
++ .write = ath10k_dbg_sta_write_aggr_mode,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ieee80211_sta *sta = file->private_data;
|
|
++ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
|
++ struct ath10k *ar = arsta->arvif->ar;
|
|
++ u32 tid, buf_size;
|
|
++ int ret;
|
|
++ char buf[64];
|
|
++
|
|
++ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
|
++
|
|
++ /* make sure that buf is null terminated */
|
|
++ buf[sizeof(buf) - 1] = '\0';
|
|
++
|
|
++ ret = sscanf(buf, "%u %u", &tid, &buf_size);
|
|
++ if (ret != 2)
|
|
++ return -EINVAL;
|
|
++
|
|
++ /* Valid TID values are 0 through 15 */
|
|
++ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
|
|
++ return -EINVAL;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ if ((ar->state != ATH10K_STATE_ON) ||
|
|
++ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
|
|
++ ret = count;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
|
|
++ tid, buf_size);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
|
|
++ arsta->arvif->vdev_id, sta->addr, tid, buf_size);
|
|
++ }
|
|
++
|
|
++ ret = count;
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_addba = {
|
|
++ .write = ath10k_dbg_sta_write_addba,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ieee80211_sta *sta = file->private_data;
|
|
++ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
|
++ struct ath10k *ar = arsta->arvif->ar;
|
|
++ u32 tid, status;
|
|
++ int ret;
|
|
++ char buf[64];
|
|
++
|
|
++ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
|
++
|
|
++ /* make sure that buf is null terminated */
|
|
++ buf[sizeof(buf) - 1] = '\0';
|
|
++
|
|
++ ret = sscanf(buf, "%u %u", &tid, &status);
|
|
++ if (ret != 2)
|
|
++ return -EINVAL;
|
|
++
|
|
++ /* Valid TID values are 0 through 15 */
|
|
++ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
|
|
++ return -EINVAL;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ if ((ar->state != ATH10K_STATE_ON) ||
|
|
++ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
|
|
++ ret = count;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
|
|
++ tid, status);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
|
|
++ arsta->arvif->vdev_id, sta->addr, tid, status);
|
|
++ }
|
|
++ ret = count;
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_addba_resp = {
|
|
++ .write = ath10k_dbg_sta_write_addba_resp,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
|
|
++ const char __user *user_buf,
|
|
++ size_t count, loff_t *ppos)
|
|
++{
|
|
++ struct ieee80211_sta *sta = file->private_data;
|
|
++ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
|
|
++ struct ath10k *ar = arsta->arvif->ar;
|
|
++ u32 tid, initiator, reason;
|
|
++ int ret;
|
|
++ char buf[64];
|
|
++
|
|
++ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
|
|
++
|
|
++ /* make sure that buf is null terminated */
|
|
++ buf[sizeof(buf) - 1] = '\0';
|
|
++
|
|
++ ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
|
|
++ if (ret != 3)
|
|
++ return -EINVAL;
|
|
++
|
|
++ /* Valid TID values are 0 through 15 */
|
|
++ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
|
|
++ return -EINVAL;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ if ((ar->state != ATH10K_STATE_ON) ||
|
|
++ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
|
|
++ ret = count;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
|
|
++ tid, initiator, reason);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
|
|
++ arsta->arvif->vdev_id, sta->addr, tid, initiator,
|
|
++ reason);
|
|
++ }
|
|
++ ret = count;
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static const struct file_operations fops_delba = {
|
|
++ .write = ath10k_dbg_sta_write_delba,
|
|
++ .open = simple_open,
|
|
++ .owner = THIS_MODULE,
|
|
++ .llseek = default_llseek,
|
|
++};
|
|
++
|
|
++void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|
++ struct ieee80211_sta *sta, struct dentry *dir)
|
|
++{
|
|
++ debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
|
|
++ &fops_aggr_mode);
|
|
++ debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
|
|
++ debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
|
|
++ debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
|
|
++}
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/hw.c
|
|
+@@ -0,0 +1,58 @@
|
|
++/*
|
|
++ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++
|
|
++#include <linux/types.h>
|
|
++#include "hw.h"
|
|
++
|
|
++const struct ath10k_hw_regs qca988x_regs = {
|
|
++ .rtc_state_cold_reset_mask = 0x00000400,
|
|
++ .rtc_soc_base_address = 0x00004000,
|
|
++ .rtc_wmac_base_address = 0x00005000,
|
|
++ .soc_core_base_address = 0x00009000,
|
|
++ .ce_wrapper_base_address = 0x00057000,
|
|
++ .ce0_base_address = 0x00057400,
|
|
++ .ce1_base_address = 0x00057800,
|
|
++ .ce2_base_address = 0x00057c00,
|
|
++ .ce3_base_address = 0x00058000,
|
|
++ .ce4_base_address = 0x00058400,
|
|
++ .ce5_base_address = 0x00058800,
|
|
++ .ce6_base_address = 0x00058c00,
|
|
++ .ce7_base_address = 0x00059000,
|
|
++ .soc_reset_control_si0_rst_mask = 0x00000001,
|
|
++ .soc_reset_control_ce_rst_mask = 0x00040000,
|
|
++ .soc_chip_id_address = 0x00ec,
|
|
++ .scratch_3_address = 0x0030,
|
|
++};
|
|
++
|
|
++const struct ath10k_hw_regs qca6174_regs = {
|
|
++ .rtc_state_cold_reset_mask = 0x00002000,
|
|
++ .rtc_soc_base_address = 0x00000800,
|
|
++ .rtc_wmac_base_address = 0x00001000,
|
|
++ .soc_core_base_address = 0x0003a000,
|
|
++ .ce_wrapper_base_address = 0x00034000,
|
|
++ .ce0_base_address = 0x00034400,
|
|
++ .ce1_base_address = 0x00034800,
|
|
++ .ce2_base_address = 0x00034c00,
|
|
++ .ce3_base_address = 0x00035000,
|
|
++ .ce4_base_address = 0x00035400,
|
|
++ .ce5_base_address = 0x00035800,
|
|
++ .ce6_base_address = 0x00035c00,
|
|
++ .ce7_base_address = 0x00036000,
|
|
++ .soc_reset_control_si0_rst_mask = 0x00000000,
|
|
++ .soc_reset_control_ce_rst_mask = 0x00000001,
|
|
++ .soc_chip_id_address = 0x000f0,
|
|
++ .scratch_3_address = 0x0028,
|
|
++};
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/thermal.c
|
|
+@@ -0,0 +1,244 @@
|
|
++/*
|
|
++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++
|
|
++#include <linux/device.h>
|
|
++#include <linux/sysfs.h>
|
|
++#include <linux/thermal.h>
|
|
++#include <linux/hwmon.h>
|
|
++#include <linux/hwmon-sysfs.h>
|
|
++#include "core.h"
|
|
++#include "debug.h"
|
|
++#include "wmi-ops.h"
|
|
++
|
|
++static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
|
|
++ enum wmi_vdev_type type)
|
|
++{
|
|
++ struct ath10k_vif *arvif;
|
|
++ int count = 0;
|
|
++
|
|
++ lockdep_assert_held(&ar->conf_mutex);
|
|
++
|
|
++ list_for_each_entry(arvif, &ar->arvifs, list) {
|
|
++ if (!arvif->is_started)
|
|
++ continue;
|
|
++
|
|
++ if (!arvif->is_up)
|
|
++ continue;
|
|
++
|
|
++ if (arvif->vdev_type != type)
|
|
++ continue;
|
|
++
|
|
++ count++;
|
|
++ }
|
|
++ return count;
|
|
++}
|
|
++
|
|
++static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
|
|
++ unsigned long *state)
|
|
++{
|
|
++ *state = ATH10K_QUIET_DUTY_CYCLE_MAX;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
|
|
++ unsigned long *state)
|
|
++{
|
|
++ struct ath10k *ar = cdev->devdata;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ *state = ar->thermal.duty_cycle;
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
|
|
++ unsigned long duty_cycle)
|
|
++{
|
|
++ struct ath10k *ar = cdev->devdata;
|
|
++ u32 period, duration, enabled;
|
|
++ int num_bss, ret = 0;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++ if (ar->state != ATH10K_STATE_ON) {
|
|
++ ret = -ENETDOWN;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
|
|
++ ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
|
|
++ duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
|
|
++ ret = -EINVAL;
|
|
++ goto out;
|
|
++ }
|
|
++ /* TODO: Right now, thermal mitigation is handled only for single/multi
|
|
++ * vif AP mode. Since quiet param is not validated in STA mode, it needs
|
|
++ * to be investigated further to handle multi STA and multi-vif (AP+STA)
|
|
++ * mode properly.
|
|
++ */
|
|
++ num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
|
|
++ if (!num_bss) {
|
|
++ ath10k_warn(ar, "no active AP interfaces\n");
|
|
++ ret = -ENETDOWN;
|
|
++ goto out;
|
|
++ }
|
|
++ period = max(ATH10K_QUIET_PERIOD_MIN,
|
|
++ (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
|
|
++ duration = (period * duty_cycle) / 100;
|
|
++ enabled = duration ? 1 : 0;
|
|
++
|
|
++ ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
|
|
++ ATH10K_QUIET_START_OFFSET,
|
|
++ enabled);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
|
|
++ period, duration, enabled, ret);
|
|
++ goto out;
|
|
++ }
|
|
++ ar->thermal.duty_cycle = duty_cycle;
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++static struct thermal_cooling_device_ops ath10k_thermal_ops = {
|
|
++ .get_max_state = ath10k_thermal_get_max_dutycycle,
|
|
++ .get_cur_state = ath10k_thermal_get_cur_dutycycle,
|
|
++ .set_cur_state = ath10k_thermal_set_cur_dutycycle,
|
|
++};
|
|
++
|
|
++static ssize_t ath10k_thermal_show_temp(struct device *dev,
|
|
++ struct device_attribute *attr,
|
|
++ char *buf)
|
|
++{
|
|
++ struct ath10k *ar = dev_get_drvdata(dev);
|
|
++ int ret, temperature;
|
|
++
|
|
++ mutex_lock(&ar->conf_mutex);
|
|
++
|
|
++ /* Can't get temperature when the card is off */
|
|
++ if (ar->state != ATH10K_STATE_ON) {
|
|
++ ret = -ENETDOWN;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ reinit_completion(&ar->thermal.wmi_sync);
|
|
++ ret = ath10k_wmi_pdev_get_temperature(ar);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to read temperature %d\n", ret);
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
|
|
++ ret = -ESHUTDOWN;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
|
|
++ ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
|
|
++ if (ret == 0) {
|
|
++ ath10k_warn(ar, "failed to synchronize thermal read\n");
|
|
++ ret = -ETIMEDOUT;
|
|
++ goto out;
|
|
++ }
|
|
++
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ temperature = ar->thermal.temperature;
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++
|
|
++ /* display in millidegree celcius */
|
|
++ ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
|
|
++out:
|
|
++ mutex_unlock(&ar->conf_mutex);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature)
|
|
++{
|
|
++ spin_lock_bh(&ar->data_lock);
|
|
++ ar->thermal.temperature = temperature;
|
|
++ spin_unlock_bh(&ar->data_lock);
|
|
++ complete(&ar->thermal.wmi_sync);
|
|
++}
|
|
++
|
|
++static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ath10k_thermal_show_temp,
|
|
++ NULL, 0);
|
|
++
|
|
++static struct attribute *ath10k_hwmon_attrs[] = {
|
|
++ &sensor_dev_attr_temp1_input.dev_attr.attr,
|
|
++ NULL,
|
|
++};
|
|
++ATTRIBUTE_GROUPS(ath10k_hwmon);
|
|
++
|
|
++int ath10k_thermal_register(struct ath10k *ar)
|
|
++{
|
|
++ struct thermal_cooling_device *cdev;
|
|
++ struct device *hwmon_dev;
|
|
++ int ret;
|
|
++
|
|
++ cdev = thermal_cooling_device_register("ath10k_thermal", ar,
|
|
++ &ath10k_thermal_ops);
|
|
++
|
|
++ if (IS_ERR(cdev)) {
|
|
++ ath10k_err(ar, "failed to setup thermal device result: %ld\n",
|
|
++ PTR_ERR(cdev));
|
|
++ return -EINVAL;
|
|
++ }
|
|
++
|
|
++ ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
|
|
++ "cooling_device");
|
|
++ if (ret) {
|
|
++ ath10k_err(ar, "failed to create thermal symlink\n");
|
|
++ goto err_cooling_destroy;
|
|
++ }
|
|
++
|
|
++ ar->thermal.cdev = cdev;
|
|
++
|
|
++ /* Do not register hwmon device when temperature reading is not
|
|
++ * supported by firmware
|
|
++ */
|
|
++ if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4)
|
|
++ return 0;
|
|
++
|
|
++ /* Avoid linking error on devm_hwmon_device_register_with_groups, I
|
|
++ * guess linux/hwmon.h is missing proper stubs. */
|
|
++ if (!config_enabled(CONFIG_HWMON))
|
|
++ return 0;
|
|
++
|
|
++ hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
|
|
++ "ath10k_hwmon", ar,
|
|
++ ath10k_hwmon_groups);
|
|
++ if (IS_ERR(hwmon_dev)) {
|
|
++ ath10k_err(ar, "failed to register hwmon device: %ld\n",
|
|
++ PTR_ERR(hwmon_dev));
|
|
++ ret = -EINVAL;
|
|
++ goto err_remove_link;
|
|
++ }
|
|
++ return 0;
|
|
++
|
|
++err_remove_link:
|
|
++ sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
|
|
++err_cooling_destroy:
|
|
++ thermal_cooling_device_unregister(cdev);
|
|
++ return ret;
|
|
++}
|
|
++
|
|
++void ath10k_thermal_unregister(struct ath10k *ar)
|
|
++{
|
|
++ thermal_cooling_device_unregister(ar->thermal.cdev);
|
|
++ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
|
|
++}
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/thermal.h
|
|
+@@ -0,0 +1,58 @@
|
|
++/*
|
|
++ * Copyright (c) 2014 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++#ifndef _THERMAL_
|
|
++#define _THERMAL_
|
|
++
|
|
++#define ATH10K_QUIET_PERIOD_DEFAULT 100
|
|
++#define ATH10K_QUIET_PERIOD_MIN 25
|
|
++#define ATH10K_QUIET_START_OFFSET 10
|
|
++#define ATH10K_QUIET_DUTY_CYCLE_MAX 70
|
|
++#define ATH10K_HWMON_NAME_LEN 15
|
|
++#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
|
|
++
|
|
++struct ath10k_thermal {
|
|
++ struct thermal_cooling_device *cdev;
|
|
++ struct completion wmi_sync;
|
|
++
|
|
++ /* protected by conf_mutex */
|
|
++ u32 duty_cycle;
|
|
++ /* temperature value in Celcius degree
|
|
++ * protected by data_lock
|
|
++ */
|
|
++ int temperature;
|
|
++};
|
|
++
|
|
++#ifdef CONFIG_THERMAL
|
|
++int ath10k_thermal_register(struct ath10k *ar);
|
|
++void ath10k_thermal_unregister(struct ath10k *ar);
|
|
++void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
|
|
++#else
|
|
++static inline int ath10k_thermal_register(struct ath10k *ar)
|
|
++{
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static inline void ath10k_thermal_unregister(struct ath10k *ar)
|
|
++{
|
|
++}
|
|
++
|
|
++static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
|
|
++ int temperature)
|
|
++{
|
|
++}
|
|
++
|
|
++#endif
|
|
++#endif /* _THERMAL_ */
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
|
|
+@@ -0,0 +1,1063 @@
|
|
++/*
|
|
++ * Copyright (c) 2005-2011 Atheros Communications Inc.
|
|
++ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++
|
|
++#ifndef _WMI_OPS_H_
|
|
++#define _WMI_OPS_H_
|
|
++
|
|
++struct ath10k;
|
|
++struct sk_buff;
|
|
++
|
|
++struct wmi_ops {
|
|
++ void (*rx)(struct ath10k *ar, struct sk_buff *skb);
|
|
++ void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
|
|
++
|
|
++ int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_scan_ev_arg *arg);
|
|
++ int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_mgmt_rx_ev_arg *arg);
|
|
++ int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_ch_info_ev_arg *arg);
|
|
++ int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_vdev_start_ev_arg *arg);
|
|
++ int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_peer_kick_ev_arg *arg);
|
|
++ int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_swba_ev_arg *arg);
|
|
++ int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_phyerr_ev_arg *arg);
|
|
++ int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_svc_rdy_ev_arg *arg);
|
|
++ int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_rdy_ev_arg *arg);
|
|
++ int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct ath10k_fw_stats *stats);
|
|
++
|
|
++ struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
|
|
++ struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
|
|
++ struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
|
|
++ u16 rd5g, u16 ctl2g, u16 ctl5g,
|
|
++ enum wmi_dfs_region dfs_reg);
|
|
++ struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
|
|
++ u32 value);
|
|
++ struct sk_buff *(*gen_init)(struct ath10k *ar);
|
|
++ struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
|
|
++ const struct wmi_start_scan_arg *arg);
|
|
++ struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
|
|
++ const struct wmi_stop_scan_arg *arg);
|
|
++ struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_vdev_type type,
|
|
++ enum wmi_vdev_subtype subtype,
|
|
++ const u8 macaddr[ETH_ALEN]);
|
|
++ struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
|
|
++ struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
|
|
++ const struct wmi_vdev_start_request_arg *arg,
|
|
++ bool restart);
|
|
++ struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
|
|
++ struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
|
|
++ const u8 *bssid);
|
|
++ struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
|
|
++ struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
|
|
++ u32 param_id, u32 param_value);
|
|
++ struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
|
|
++ const struct wmi_vdev_install_key_arg *arg);
|
|
++ struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
|
|
++ const struct wmi_vdev_spectral_conf_arg *arg);
|
|
++ struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
|
|
++ u32 trigger, u32 enable);
|
|
++ struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
|
|
++ const struct wmi_wmm_params_all_arg *arg);
|
|
++ struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN]);
|
|
++ struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN]);
|
|
++ struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN],
|
|
++ u32 tid_bitmap);
|
|
++ struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *peer_addr,
|
|
++ enum wmi_peer_param param_id,
|
|
++ u32 param_value);
|
|
++ struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
|
|
++ const struct wmi_peer_assoc_complete_arg *arg);
|
|
++ struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_sta_ps_mode psmode);
|
|
++ struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_sta_powersave_param param_id,
|
|
++ u32 value);
|
|
++ struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *mac,
|
|
++ enum wmi_ap_ps_peer_param param_id,
|
|
++ u32 value);
|
|
++ struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
|
|
++ const struct wmi_scan_chan_list_arg *arg);
|
|
++ struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
|
|
++ const void *bcn, size_t bcn_len,
|
|
++ u32 bcn_paddr, bool dtim_zero,
|
|
++ bool deliver_cab);
|
|
++ struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
|
|
++ const struct wmi_wmm_params_all_arg *arg);
|
|
++ struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
|
|
++ struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
|
|
++ enum wmi_force_fw_hang_type type,
|
|
++ u32 delay_ms);
|
|
++ struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
|
|
++ struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
|
|
++ u32 log_level);
|
|
++ struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
|
|
++ struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
|
|
++ struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
|
|
++ u32 period, u32 duration,
|
|
++ u32 next_offset,
|
|
++ u32 enabled);
|
|
++ struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
|
|
++ struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *mac);
|
|
++ struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *mac, u32 tid, u32 buf_size);
|
|
++ struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *mac, u32 tid,
|
|
++ u32 status);
|
|
++ struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *mac, u32 tid, u32 initiator,
|
|
++ u32 reason);
|
|
++ struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
|
|
++ u32 tim_ie_offset, struct sk_buff *bcn,
|
|
++ u32 prb_caps, u32 prb_erp,
|
|
++ void *prb_ies, size_t prb_ies_len);
|
|
++ struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
|
|
++ struct sk_buff *bcn);
|
|
++ struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *p2p_ie);
|
|
++ struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN],
|
|
++ const struct wmi_sta_uapsd_auto_trig_arg *args,
|
|
++ u32 num_ac);
|
|
++ struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
|
|
++ const struct wmi_sta_keepalive_arg *arg);
|
|
++};
|
|
++
|
|
++int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ if (WARN_ON_ONCE(!ar->wmi.ops->rx))
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ ar->wmi.ops->rx(ar, skb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
|
|
++ size_t len)
|
|
++{
|
|
++ if (!ar->wmi.ops->map_svc)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ ar->wmi.ops->map_svc(in, out, len);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_scan_ev_arg *arg)
|
|
++{
|
|
++ if (!ar->wmi.ops->pull_scan)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->wmi.ops->pull_scan(ar, skb, arg);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_mgmt_rx_ev_arg *arg)
|
|
++{
|
|
++ if (!ar->wmi.ops->pull_mgmt_rx)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_ch_info_ev_arg *arg)
|
|
++{
|
|
++ if (!ar->wmi.ops->pull_ch_info)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->wmi.ops->pull_ch_info(ar, skb, arg);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_vdev_start_ev_arg *arg)
|
|
++{
|
|
++ if (!ar->wmi.ops->pull_vdev_start)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_peer_kick_ev_arg *arg)
|
|
++{
|
|
++ if (!ar->wmi.ops->pull_peer_kick)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_swba_ev_arg *arg)
|
|
++{
|
|
++ if (!ar->wmi.ops->pull_swba)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->wmi.ops->pull_swba(ar, skb, arg);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_phyerr_ev_arg *arg)
|
|
++{
|
|
++ if (!ar->wmi.ops->pull_phyerr)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->wmi.ops->pull_phyerr(ar, skb, arg);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_svc_rdy_ev_arg *arg)
|
|
++{
|
|
++ if (!ar->wmi.ops->pull_svc_rdy)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_rdy_ev_arg *arg)
|
|
++{
|
|
++ if (!ar->wmi.ops->pull_rdy)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->wmi.ops->pull_rdy(ar, skb, arg);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct ath10k_fw_stats *stats)
|
|
++{
|
|
++ if (!ar->wmi.ops->pull_fw_stats)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
|
|
++{
|
|
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
|
|
++ struct sk_buff *skb;
|
|
++ int ret;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_mgmt_tx)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
|
|
++ if (ret)
|
|
++ return ret;
|
|
++
|
|
++ /* FIXME There's no ACK event for Management Tx. This probably
|
|
++ * shouldn't be called here either. */
|
|
++ info->flags |= IEEE80211_TX_STAT_ACK;
|
|
++ ieee80211_tx_status_irqsafe(ar->hw, msdu);
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
|
|
++ u16 ctl2g, u16 ctl5g,
|
|
++ enum wmi_dfs_region dfs_reg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_pdev_set_rd)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
|
|
++ dfs_reg);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->pdev_set_regdomain_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_pdev_suspend)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pdev_resume_target(struct ath10k *ar)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_pdev_resume)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_pdev_resume(ar);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_pdev_set_param)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_cmd_init(struct ath10k *ar)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_init)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_init(ar);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_start_scan(struct ath10k *ar,
|
|
++ const struct wmi_start_scan_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_start_scan)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_start_scan(ar, arg);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_stop_scan)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_stop_scan(ar, arg);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_vdev_type type,
|
|
++ enum wmi_vdev_subtype subtype,
|
|
++ const u8 macaddr[ETH_ALEN])
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_vdev_create)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_vdev_delete)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_start(struct ath10k *ar,
|
|
++ const struct wmi_vdev_start_request_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_vdev_start)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->vdev_start_request_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_restart(struct ath10k *ar,
|
|
++ const struct wmi_vdev_start_request_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_vdev_start)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->vdev_restart_request_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_vdev_stop)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_vdev_up)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_vdev_down)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
|
|
++ u32 param_value)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_vdev_set_param)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
|
|
++ param_value);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_install_key(struct ath10k *ar,
|
|
++ const struct wmi_vdev_install_key_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_vdev_install_key)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->vdev_install_key_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
|
|
++ const struct wmi_vdev_spectral_conf_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++ u32 cmd_id;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
|
|
++ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
|
|
++ u32 enable)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++ u32 cmd_id;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
|
|
++ enable);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
|
|
++ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN],
|
|
++ const struct wmi_sta_uapsd_auto_trig_arg *args,
|
|
++ u32 num_ac)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++ u32 cmd_id;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_vdev_sta_uapsd)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
|
|
++ num_ac);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
|
|
++ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
|
|
++ const struct wmi_wmm_params_all_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++ u32 cmd_id;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
|
|
++ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN])
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_peer_create)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN])
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_peer_delete)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_peer_flush)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
|
|
++ enum wmi_peer_param param_id, u32 param_value)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_peer_set_param)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
|
|
++ param_value);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_sta_ps_mode psmode)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_set_psmode)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->sta_powersave_mode_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_sta_powersave_param param_id, u32 value)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_set_sta_ps)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->sta_powersave_param_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
++ enum wmi_ap_ps_peer_param param_id, u32 value)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_set_ap_ps)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->ap_ps_peer_param_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_scan_chan_list(struct ath10k *ar,
|
|
++ const struct wmi_scan_chan_list_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_scan_chan_list)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_peer_assoc(struct ath10k *ar,
|
|
++ const struct wmi_peer_assoc_complete_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_peer_assoc)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
|
|
++ const void *bcn, size_t bcn_len,
|
|
++ u32 bcn_paddr, bool dtim_zero,
|
|
++ bool deliver_cab)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++ int ret;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_beacon_dma)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
|
|
++ dtim_zero, deliver_cab);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ ret = ath10k_wmi_cmd_send_nowait(ar, skb,
|
|
++ ar->wmi.cmd->pdev_send_bcn_cmdid);
|
|
++ if (ret) {
|
|
++ dev_kfree_skb(skb);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
|
|
++ const struct wmi_wmm_params_all_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_pdev_set_wmm)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->pdev_set_wmm_params_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_request_stats)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_force_fw_hang(struct ath10k *ar,
|
|
++ enum wmi_force_fw_hang_type type, u32 delay_ms)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_force_fw_hang)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_dbglog_cfg)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_pktlog_enable)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_pktlog_disable)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_pktlog_disable(ar);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->pdev_pktlog_disable_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
|
|
++ u32 next_offset, u32 enabled)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
|
|
++ next_offset, enabled);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_pdev_get_temperature)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->pdev_get_temperature_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_addba_clear_resp)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->addba_clear_resp_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
++ u32 tid, u32 buf_size)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_addba_send)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->addba_send_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
++ u32 tid, u32 status)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_addba_set_resp)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->addba_set_resp_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
++ u32 tid, u32 initiator, u32 reason)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_delba_send)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
|
|
++ reason);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb,
|
|
++ ar->wmi.cmd->delba_send_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
|
|
++ struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
|
|
++ void *prb_ies, size_t prb_ies_len)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_bcn_tmpl)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
|
|
++ prb_caps, prb_erp, prb_ies,
|
|
++ prb_ies_len);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_prb_tmpl)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
|
|
++}
|
|
++
|
|
++static inline int
|
|
++ath10k_wmi_sta_keepalive(struct ath10k *ar,
|
|
++ const struct wmi_sta_keepalive_arg *arg)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++ u32 cmd_id;
|
|
++
|
|
++ if (!ar->wmi.ops->gen_sta_keepalive)
|
|
++ return -EOPNOTSUPP;
|
|
++
|
|
++ skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
|
|
++ if (IS_ERR(skb))
|
|
++ return PTR_ERR(skb);
|
|
++
|
|
++ cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
|
|
++ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
|
|
++}
|
|
++
|
|
++#endif
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
|
|
+@@ -0,0 +1,2796 @@
|
|
++/*
|
|
++ * Copyright (c) 2005-2011 Atheros Communications Inc.
|
|
++ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++#include "core.h"
|
|
++#include "debug.h"
|
|
++#include "hw.h"
|
|
++#include "wmi.h"
|
|
++#include "wmi-ops.h"
|
|
++#include "wmi-tlv.h"
|
|
++
|
|
++/***************/
|
|
++/* TLV helpers */
|
|
++/**************/
|
|
++
|
|
++struct wmi_tlv_policy {
|
|
++ size_t min_len;
|
|
++};
|
|
++
|
|
++static const struct wmi_tlv_policy wmi_tlv_policies[] = {
|
|
++ [WMI_TLV_TAG_ARRAY_BYTE]
|
|
++ = { .min_len = sizeof(u8) },
|
|
++ [WMI_TLV_TAG_ARRAY_UINT32]
|
|
++ = { .min_len = sizeof(u32) },
|
|
++ [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
|
|
++ = { .min_len = sizeof(struct wmi_scan_event) },
|
|
++ [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
|
|
++ = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
|
|
++ [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
|
|
++ = { .min_len = sizeof(struct wmi_chan_info_event) },
|
|
++ [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
|
|
++ = { .min_len = sizeof(struct wmi_vdev_start_response_event) },
|
|
++ [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
|
|
++ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
|
|
++ [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
|
|
++ = { .min_len = sizeof(struct wmi_host_swba_event) },
|
|
++ [WMI_TLV_TAG_STRUCT_TIM_INFO]
|
|
++ = { .min_len = sizeof(struct wmi_tim_info) },
|
|
++ [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
|
|
++ = { .min_len = sizeof(struct wmi_p2p_noa_info) },
|
|
++ [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
|
|
++ = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
|
|
++ [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
|
|
++ = { .min_len = sizeof(struct hal_reg_capabilities) },
|
|
++ [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
|
|
++ = { .min_len = sizeof(struct wlan_host_mem_req) },
|
|
++ [WMI_TLV_TAG_STRUCT_READY_EVENT]
|
|
++ = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
|
|
++ [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
|
|
++ = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
|
|
++ [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
|
|
++ = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
|
|
++};
|
|
++
|
|
++static int
|
|
++ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
|
|
++ int (*iter)(struct ath10k *ar, u16 tag, u16 len,
|
|
++ const void *ptr, void *data),
|
|
++ void *data)
|
|
++{
|
|
++ const void *begin = ptr;
|
|
++ const struct wmi_tlv *tlv;
|
|
++ u16 tlv_tag, tlv_len;
|
|
++ int ret;
|
|
++
|
|
++ while (len > 0) {
|
|
++ if (len < sizeof(*tlv)) {
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
|
|
++ ptr - begin, len, sizeof(*tlv));
|
|
++ return -EINVAL;
|
|
++ }
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv_tag = __le16_to_cpu(tlv->tag);
|
|
++ tlv_len = __le16_to_cpu(tlv->len);
|
|
++ ptr += sizeof(*tlv);
|
|
++ len -= sizeof(*tlv);
|
|
++
|
|
++ if (tlv_len > len) {
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
|
|
++ tlv_tag, ptr - begin, len, tlv_len);
|
|
++ return -EINVAL;
|
|
++ }
|
|
++
|
|
++ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
|
|
++ wmi_tlv_policies[tlv_tag].min_len &&
|
|
++ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
|
|
++ tlv_tag, ptr - begin, tlv_len,
|
|
++ wmi_tlv_policies[tlv_tag].min_len);
|
|
++ return -EINVAL;
|
|
++ }
|
|
++
|
|
++ ret = iter(ar, tlv_tag, tlv_len, ptr, data);
|
|
++ if (ret)
|
|
++ return ret;
|
|
++
|
|
++ ptr += tlv_len;
|
|
++ len -= tlv_len;
|
|
++ }
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
|
|
++ const void *ptr, void *data)
|
|
++{
|
|
++ const void **tb = data;
|
|
++
|
|
++ if (tag < WMI_TLV_TAG_MAX)
|
|
++ tb[tag] = ptr;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
|
|
++ const void *ptr, size_t len)
|
|
++{
|
|
++ return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
|
|
++ (void *)tb);
|
|
++}
|
|
++
|
|
++static const void **
|
|
++ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
|
|
++ size_t len, gfp_t gfp)
|
|
++{
|
|
++ const void **tb;
|
|
++ int ret;
|
|
++
|
|
++ tb = kzalloc(sizeof(*tb) * WMI_TLV_TAG_MAX, gfp);
|
|
++ if (!tb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
|
|
++ if (ret) {
|
|
++ kfree(tb);
|
|
++ return ERR_PTR(ret);
|
|
++ }
|
|
++
|
|
++ return tb;
|
|
++}
|
|
++
|
|
++static u16 ath10k_wmi_tlv_len(const void *ptr)
|
|
++{
|
|
++ return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
|
|
++}
|
|
++
|
|
++/**************/
|
|
++/* TLV events */
|
|
++/**************/
|
|
++static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
|
|
++ struct sk_buff *skb)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct wmi_tlv_bcn_tx_status_ev *ev;
|
|
++ u32 vdev_id, tx_status;
|
|
++ int ret;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
|
|
++ if (!ev) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ tx_status = __le32_to_cpu(ev->tx_status);
|
|
++ vdev_id = __le32_to_cpu(ev->vdev_id);
|
|
++
|
|
++ switch (tx_status) {
|
|
++ case WMI_TLV_BCN_TX_STATUS_OK:
|
|
++ break;
|
|
++ case WMI_TLV_BCN_TX_STATUS_XRETRY:
|
|
++ case WMI_TLV_BCN_TX_STATUS_DROP:
|
|
++ case WMI_TLV_BCN_TX_STATUS_FILTERED:
|
|
++ /* FIXME: It's probably worth telling mac80211 to stop the
|
|
++ * interface as it is crippled.
|
|
++ */
|
|
++ ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
|
|
++ vdev_id, tx_status);
|
|
++ break;
|
|
++ }
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
|
|
++ struct sk_buff *skb)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct wmi_tlv_diag_data_ev *ev;
|
|
++ const struct wmi_tlv_diag_item *item;
|
|
++ const void *data;
|
|
++ int ret, num_items, len;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
|
|
++ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
|
|
++ if (!ev || !data) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ num_items = __le32_to_cpu(ev->num_items);
|
|
++ len = ath10k_wmi_tlv_len(data);
|
|
++
|
|
++ while (num_items--) {
|
|
++ if (len == 0)
|
|
++ break;
|
|
++ if (len < sizeof(*item)) {
|
|
++ ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
|
|
++ break;
|
|
++ }
|
|
++
|
|
++ item = data;
|
|
++
|
|
++ if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
|
|
++ ath10k_warn(ar, "failed to parse diag data: item is too long\n");
|
|
++ break;
|
|
++ }
|
|
++
|
|
++ trace_ath10k_wmi_diag_container(ar,
|
|
++ item->type,
|
|
++ __le32_to_cpu(item->timestamp),
|
|
++ __le32_to_cpu(item->code),
|
|
++ __le16_to_cpu(item->len),
|
|
++ item->payload);
|
|
++
|
|
++ len -= sizeof(*item);
|
|
++ len -= roundup(__le16_to_cpu(item->len), 4);
|
|
++
|
|
++ data += sizeof(*item);
|
|
++ data += roundup(__le16_to_cpu(item->len), 4);
|
|
++ }
|
|
++
|
|
++ if (num_items != -1 || len != 0)
|
|
++ ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
|
|
++ num_items, len);
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
|
|
++ struct sk_buff *skb)
|
|
++{
|
|
++ const void **tb;
|
|
++ const void *data;
|
|
++ int ret, len;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
|
|
++ if (!data) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++ len = ath10k_wmi_tlv_len(data);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
|
|
++ trace_ath10k_wmi_diag(ar, data, len);
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++/***********/
|
|
++/* TLV ops */
|
|
++/***********/
|
|
++
|
|
++static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
|
|
++{
|
|
++ struct wmi_cmd_hdr *cmd_hdr;
|
|
++ enum wmi_tlv_event_id id;
|
|
++
|
|
++ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
|
|
++ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
|
|
++
|
|
++ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
|
|
++ return;
|
|
++
|
|
++ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
|
|
++
|
|
++ switch (id) {
|
|
++ case WMI_TLV_MGMT_RX_EVENTID:
|
|
++ ath10k_wmi_event_mgmt_rx(ar, skb);
|
|
++ /* mgmt_rx() owns the skb now! */
|
|
++ return;
|
|
++ case WMI_TLV_SCAN_EVENTID:
|
|
++ ath10k_wmi_event_scan(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_CHAN_INFO_EVENTID:
|
|
++ ath10k_wmi_event_chan_info(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_ECHO_EVENTID:
|
|
++ ath10k_wmi_event_echo(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_DEBUG_MESG_EVENTID:
|
|
++ ath10k_wmi_event_debug_mesg(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_UPDATE_STATS_EVENTID:
|
|
++ ath10k_wmi_event_update_stats(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_VDEV_START_RESP_EVENTID:
|
|
++ ath10k_wmi_event_vdev_start_resp(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_VDEV_STOPPED_EVENTID:
|
|
++ ath10k_wmi_event_vdev_stopped(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
|
|
++ ath10k_wmi_event_peer_sta_kickout(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_HOST_SWBA_EVENTID:
|
|
++ ath10k_wmi_event_host_swba(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
|
|
++ ath10k_wmi_event_tbttoffset_update(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_PHYERR_EVENTID:
|
|
++ ath10k_wmi_event_phyerr(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_ROAM_EVENTID:
|
|
++ ath10k_wmi_event_roam(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_PROFILE_MATCH:
|
|
++ ath10k_wmi_event_profile_match(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_DEBUG_PRINT_EVENTID:
|
|
++ ath10k_wmi_event_debug_print(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_PDEV_QVIT_EVENTID:
|
|
++ ath10k_wmi_event_pdev_qvit(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
|
|
++ ath10k_wmi_event_wlan_profile_data(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
|
|
++ ath10k_wmi_event_rtt_measurement_report(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
|
|
++ ath10k_wmi_event_tsf_measurement_report(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
|
|
++ ath10k_wmi_event_rtt_error_report(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
|
|
++ ath10k_wmi_event_wow_wakeup_host(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_DCS_INTERFERENCE_EVENTID:
|
|
++ ath10k_wmi_event_dcs_interference(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
|
|
++ ath10k_wmi_event_pdev_tpc_config(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_PDEV_FTM_INTG_EVENTID:
|
|
++ ath10k_wmi_event_pdev_ftm_intg(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
|
|
++ ath10k_wmi_event_gtk_offload_status(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
|
|
++ ath10k_wmi_event_gtk_rekey_fail(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
|
|
++ ath10k_wmi_event_delba_complete(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
|
|
++ ath10k_wmi_event_addba_complete(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
|
|
++ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_SERVICE_READY_EVENTID:
|
|
++ ath10k_wmi_event_service_ready(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_READY_EVENTID:
|
|
++ ath10k_wmi_event_ready(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
|
|
++ ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
|
|
++ ath10k_wmi_tlv_event_diag_data(ar, skb);
|
|
++ break;
|
|
++ case WMI_TLV_DIAG_EVENTID:
|
|
++ ath10k_wmi_tlv_event_diag(ar, skb);
|
|
++ break;
|
|
++ default:
|
|
++ ath10k_warn(ar, "Unknown eventid: %d\n", id);
|
|
++ break;
|
|
++ }
|
|
++
|
|
++ dev_kfree_skb(skb);
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct wmi_scan_ev_arg *arg)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct wmi_scan_event *ev;
|
|
++ int ret;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
|
|
++ if (!ev) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ arg->event_type = ev->event_type;
|
|
++ arg->reason = ev->reason;
|
|
++ arg->channel_freq = ev->channel_freq;
|
|
++ arg->scan_req_id = ev->scan_req_id;
|
|
++ arg->scan_id = ev->scan_id;
|
|
++ arg->vdev_id = ev->vdev_id;
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct wmi_mgmt_rx_ev_arg *arg)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct wmi_tlv_mgmt_rx_ev *ev;
|
|
++ const u8 *frame;
|
|
++ u32 msdu_len;
|
|
++ int ret;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
|
|
++ frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
|
|
++
|
|
++ if (!ev || !frame) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ arg->channel = ev->channel;
|
|
++ arg->buf_len = ev->buf_len;
|
|
++ arg->status = ev->status;
|
|
++ arg->snr = ev->snr;
|
|
++ arg->phy_mode = ev->phy_mode;
|
|
++ arg->rate = ev->rate;
|
|
++
|
|
++ msdu_len = __le32_to_cpu(arg->buf_len);
|
|
++
|
|
++ if (skb->len < (frame - skb->data) + msdu_len) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ /* shift the sk_buff to point to `frame` */
|
|
++ skb_trim(skb, 0);
|
|
++ skb_put(skb, frame - skb->data);
|
|
++ skb_pull(skb, frame - skb->data);
|
|
++ skb_put(skb, msdu_len);
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct wmi_ch_info_ev_arg *arg)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct wmi_chan_info_event *ev;
|
|
++ int ret;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
|
|
++ if (!ev) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ arg->err_code = ev->err_code;
|
|
++ arg->freq = ev->freq;
|
|
++ arg->cmd_flags = ev->cmd_flags;
|
|
++ arg->noise_floor = ev->noise_floor;
|
|
++ arg->rx_clear_count = ev->rx_clear_count;
|
|
++ arg->cycle_count = ev->cycle_count;
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int
|
|
++ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
|
|
++ struct wmi_vdev_start_ev_arg *arg)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct wmi_vdev_start_response_event *ev;
|
|
++ int ret;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
|
|
++ if (!ev) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ skb_pull(skb, sizeof(*ev));
|
|
++ arg->vdev_id = ev->vdev_id;
|
|
++ arg->req_id = ev->req_id;
|
|
++ arg->resp_type = ev->resp_type;
|
|
++ arg->status = ev->status;
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct wmi_peer_kick_ev_arg *arg)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct wmi_peer_sta_kickout_event *ev;
|
|
++ int ret;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
|
|
++ if (!ev) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ arg->mac_addr = ev->peer_macaddr.addr;
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++struct wmi_tlv_swba_parse {
|
|
++ const struct wmi_host_swba_event *ev;
|
|
++ bool tim_done;
|
|
++ bool noa_done;
|
|
++ size_t n_tim;
|
|
++ size_t n_noa;
|
|
++ struct wmi_swba_ev_arg *arg;
|
|
++};
|
|
++
|
|
++static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
|
|
++ const void *ptr, void *data)
|
|
++{
|
|
++ struct wmi_tlv_swba_parse *swba = data;
|
|
++
|
|
++ if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
|
|
++ return -EPROTO;
|
|
++
|
|
++ if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
|
|
++ return -ENOBUFS;
|
|
++
|
|
++ swba->arg->tim_info[swba->n_tim++] = ptr;
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
|
|
++ const void *ptr, void *data)
|
|
++{
|
|
++ struct wmi_tlv_swba_parse *swba = data;
|
|
++
|
|
++ if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
|
|
++ return -EPROTO;
|
|
++
|
|
++ if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
|
|
++ return -ENOBUFS;
|
|
++
|
|
++ swba->arg->noa_info[swba->n_noa++] = ptr;
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
|
|
++ const void *ptr, void *data)
|
|
++{
|
|
++ struct wmi_tlv_swba_parse *swba = data;
|
|
++ int ret;
|
|
++
|
|
++ switch (tag) {
|
|
++ case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
|
|
++ swba->ev = ptr;
|
|
++ break;
|
|
++ case WMI_TLV_TAG_ARRAY_STRUCT:
|
|
++ if (!swba->tim_done) {
|
|
++ swba->tim_done = true;
|
|
++ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
|
|
++ ath10k_wmi_tlv_swba_tim_parse,
|
|
++ swba);
|
|
++ if (ret)
|
|
++ return ret;
|
|
++ } else if (!swba->noa_done) {
|
|
++ swba->noa_done = true;
|
|
++ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
|
|
++ ath10k_wmi_tlv_swba_noa_parse,
|
|
++ swba);
|
|
++ if (ret)
|
|
++ return ret;
|
|
++ }
|
|
++ break;
|
|
++ default:
|
|
++ break;
|
|
++ }
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct wmi_swba_ev_arg *arg)
|
|
++{
|
|
++ struct wmi_tlv_swba_parse swba = { .arg = arg };
|
|
++ u32 map;
|
|
++ size_t n_vdevs;
|
|
++ int ret;
|
|
++
|
|
++ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
|
|
++ ath10k_wmi_tlv_swba_parse, &swba);
|
|
++ if (ret) {
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ if (!swba.ev)
|
|
++ return -EPROTO;
|
|
++
|
|
++ arg->vdev_map = swba.ev->vdev_map;
|
|
++
|
|
++ for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
|
|
++ if (map & BIT(0))
|
|
++ n_vdevs++;
|
|
++
|
|
++ if (n_vdevs != swba.n_tim ||
|
|
++ n_vdevs != swba.n_noa)
|
|
++ return -EPROTO;
|
|
++
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct wmi_phyerr_ev_arg *arg)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct wmi_tlv_phyerr_ev *ev;
|
|
++ const void *phyerrs;
|
|
++ int ret;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
|
|
++ phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
|
|
++
|
|
++ if (!ev || !phyerrs) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ arg->num_phyerrs = ev->num_phyerrs;
|
|
++ arg->tsf_l32 = ev->tsf_l32;
|
|
++ arg->tsf_u32 = ev->tsf_u32;
|
|
++ arg->buf_len = ev->buf_len;
|
|
++ arg->phyerrs = phyerrs;
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++#define WMI_TLV_ABI_VER_NS0 0x5F414351
|
|
++#define WMI_TLV_ABI_VER_NS1 0x00004C4D
|
|
++#define WMI_TLV_ABI_VER_NS2 0x00000000
|
|
++#define WMI_TLV_ABI_VER_NS3 0x00000000
|
|
++
|
|
++#define WMI_TLV_ABI_VER0_MAJOR 1
|
|
++#define WMI_TLV_ABI_VER0_MINOR 0
|
|
++#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
|
|
++ (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF))
|
|
++#define WMI_TLV_ABI_VER1 53
|
|
++
|
|
++static int
|
|
++ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
|
|
++ const void *ptr, void *data)
|
|
++{
|
|
++ struct wmi_svc_rdy_ev_arg *arg = data;
|
|
++ int i;
|
|
++
|
|
++ if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
|
|
++ return -EPROTO;
|
|
++
|
|
++ for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
|
|
++ if (!arg->mem_reqs[i]) {
|
|
++ arg->mem_reqs[i] = ptr;
|
|
++ return 0;
|
|
++ }
|
|
++ }
|
|
++
|
|
++ return -ENOMEM;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct wmi_svc_rdy_ev_arg *arg)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct hal_reg_capabilities *reg;
|
|
++ const struct wmi_tlv_svc_rdy_ev *ev;
|
|
++ const __le32 *svc_bmap;
|
|
++ const struct wlan_host_mem_req *mem_reqs;
|
|
++ int ret;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT];
|
|
++ reg = tb[WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES];
|
|
++ svc_bmap = tb[WMI_TLV_TAG_ARRAY_UINT32];
|
|
++ mem_reqs = tb[WMI_TLV_TAG_ARRAY_STRUCT];
|
|
++
|
|
++ if (!ev || !reg || !svc_bmap || !mem_reqs) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ /* This is an internal ABI compatibility check for WMI TLV so check it
|
|
++ * here instead of the generic WMI code.
|
|
++ */
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
|
|
++ __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
|
|
++ __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
|
|
++ __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
|
|
++ __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
|
|
++ __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
|
|
++
|
|
++ if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
|
|
++ __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
|
|
++ __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
|
|
++ __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
|
|
++ __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
|
|
++ kfree(tb);
|
|
++ return -ENOTSUPP;
|
|
++ }
|
|
++
|
|
++ arg->min_tx_power = ev->hw_min_tx_power;
|
|
++ arg->max_tx_power = ev->hw_max_tx_power;
|
|
++ arg->ht_cap = ev->ht_cap_info;
|
|
++ arg->vht_cap = ev->vht_cap_info;
|
|
++ arg->sw_ver0 = ev->abi.abi_ver0;
|
|
++ arg->sw_ver1 = ev->abi.abi_ver1;
|
|
++ arg->fw_build = ev->fw_build_vers;
|
|
++ arg->phy_capab = ev->phy_capability;
|
|
++ arg->num_rf_chains = ev->num_rf_chains;
|
|
++ arg->eeprom_rd = reg->eeprom_rd;
|
|
++ arg->num_mem_reqs = ev->num_mem_reqs;
|
|
++ arg->service_map = svc_bmap;
|
|
++ arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
|
|
++
|
|
++ ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
|
|
++ ath10k_wmi_tlv_parse_mem_reqs, arg);
|
|
++ if (ret) {
|
|
++ kfree(tb);
|
|
++ ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct wmi_rdy_ev_arg *arg)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct wmi_tlv_rdy_ev *ev;
|
|
++ int ret;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
|
|
++ if (!ev) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ arg->sw_version = ev->abi.abi_ver0;
|
|
++ arg->abi_version = ev->abi.abi_ver1;
|
|
++ arg->status = ev->status;
|
|
++ arg->mac_addr = ev->mac_addr.addr;
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
|
|
++ struct ath10k_fw_stats_vdev *dst)
|
|
++{
|
|
++ int i;
|
|
++
|
|
++ dst->vdev_id = __le32_to_cpu(src->vdev_id);
|
|
++ dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
|
|
++ dst->data_snr = __le32_to_cpu(src->data_snr);
|
|
++ dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
|
|
++ dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
|
|
++ dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
|
|
++ dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
|
|
++ dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
|
|
++ dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
|
|
++
|
|
++ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
|
|
++ dst->num_tx_frames[i] =
|
|
++ __le32_to_cpu(src->num_tx_frames[i]);
|
|
++
|
|
++ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
|
|
++ dst->num_tx_frames_retries[i] =
|
|
++ __le32_to_cpu(src->num_tx_frames_retries[i]);
|
|
++
|
|
++ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
|
|
++ dst->num_tx_frames_failures[i] =
|
|
++ __le32_to_cpu(src->num_tx_frames_failures[i]);
|
|
++
|
|
++ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
|
|
++ dst->tx_rate_history[i] =
|
|
++ __le32_to_cpu(src->tx_rate_history[i]);
|
|
++
|
|
++ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
|
|
++ dst->beacon_rssi_history[i] =
|
|
++ __le32_to_cpu(src->beacon_rssi_history[i]);
|
|
++}
|
|
++
|
|
++static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
|
|
++ struct sk_buff *skb,
|
|
++ struct ath10k_fw_stats *stats)
|
|
++{
|
|
++ const void **tb;
|
|
++ const struct wmi_tlv_stats_ev *ev;
|
|
++ const void *data;
|
|
++ u32 num_pdev_stats;
|
|
++ u32 num_vdev_stats;
|
|
++ u32 num_peer_stats;
|
|
++ u32 num_bcnflt_stats;
|
|
++ u32 num_chan_stats;
|
|
++ size_t data_len;
|
|
++ int ret;
|
|
++ int i;
|
|
++
|
|
++ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
|
|
++ if (IS_ERR(tb)) {
|
|
++ ret = PTR_ERR(tb);
|
|
++ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
|
|
++ return ret;
|
|
++ }
|
|
++
|
|
++ ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
|
|
++ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
|
|
++
|
|
++ if (!ev || !data) {
|
|
++ kfree(tb);
|
|
++ return -EPROTO;
|
|
++ }
|
|
++
|
|
++ data_len = ath10k_wmi_tlv_len(data);
|
|
++ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
|
|
++ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
|
|
++ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
|
|
++ num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
|
|
++ num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
|
|
++ num_pdev_stats, num_vdev_stats, num_peer_stats,
|
|
++ num_bcnflt_stats, num_chan_stats);
|
|
++
|
|
++ for (i = 0; i < num_pdev_stats; i++) {
|
|
++ const struct wmi_pdev_stats *src;
|
|
++ struct ath10k_fw_stats_pdev *dst;
|
|
++
|
|
++ src = data;
|
|
++ if (data_len < sizeof(*src))
|
|
++ return -EPROTO;
|
|
++
|
|
++ data += sizeof(*src);
|
|
++ data_len -= sizeof(*src);
|
|
++
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
++
|
|
++ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
|
|
++ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
|
|
++ list_add_tail(&dst->list, &stats->pdevs);
|
|
++ }
|
|
++
|
|
++ for (i = 0; i < num_vdev_stats; i++) {
|
|
++ const struct wmi_tlv_vdev_stats *src;
|
|
++ struct ath10k_fw_stats_vdev *dst;
|
|
++
|
|
++ src = data;
|
|
++ if (data_len < sizeof(*src))
|
|
++ return -EPROTO;
|
|
++
|
|
++ data += sizeof(*src);
|
|
++ data_len -= sizeof(*src);
|
|
++
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
++
|
|
++ ath10k_wmi_tlv_pull_vdev_stats(src, dst);
|
|
++ list_add_tail(&dst->list, &stats->vdevs);
|
|
++ }
|
|
++
|
|
++ for (i = 0; i < num_peer_stats; i++) {
|
|
++ const struct wmi_10x_peer_stats *src;
|
|
++ struct ath10k_fw_stats_peer *dst;
|
|
++
|
|
++ src = data;
|
|
++ if (data_len < sizeof(*src))
|
|
++ return -EPROTO;
|
|
++
|
|
++ data += sizeof(*src);
|
|
++ data_len -= sizeof(*src);
|
|
++
|
|
++ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
|
|
++ if (!dst)
|
|
++ continue;
|
|
++
|
|
++ ath10k_wmi_pull_peer_stats(&src->old, dst);
|
|
++ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
|
|
++ list_add_tail(&dst->list, &stats->peers);
|
|
++ }
|
|
++
|
|
++ kfree(tb);
|
|
++ return 0;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
|
|
++{
|
|
++ struct wmi_tlv_pdev_suspend *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->opt = __cpu_to_le32(opt);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
|
|
++{
|
|
++ struct wmi_tlv_resume_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->reserved = __cpu_to_le32(0);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
|
|
++ u16 rd, u16 rd2g, u16 rd5g,
|
|
++ u16 ctl2g, u16 ctl5g,
|
|
++ enum wmi_dfs_region dfs_reg)
|
|
++{
|
|
++ struct wmi_tlv_pdev_set_rd_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->regd = __cpu_to_le32(rd);
|
|
++ cmd->regd_2ghz = __cpu_to_le32(rd2g);
|
|
++ cmd->regd_5ghz = __cpu_to_le32(rd5g);
|
|
++ cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
|
|
++ cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
|
|
++ u32 param_value)
|
|
++{
|
|
++ struct wmi_tlv_pdev_set_param_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->param_id = __cpu_to_le32(param_id);
|
|
++ cmd->param_value = __cpu_to_le32(param_value);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
|
|
++{
|
|
++ struct sk_buff *skb;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct wmi_tlv_init_cmd *cmd;
|
|
++ struct wmi_tlv_resource_config *cfg;
|
|
++ struct wmi_host_mem_chunks *chunks;
|
|
++ size_t len, chunks_len;
|
|
++ void *ptr;
|
|
++
|
|
++ chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
|
|
++ len = (sizeof(*tlv) + sizeof(*cmd)) +
|
|
++ (sizeof(*tlv) + sizeof(*cfg)) +
|
|
++ (sizeof(*tlv) + chunks_len);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = skb->data;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cfg));
|
|
++ cfg = (void *)tlv->value;
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cfg);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
|
++ tlv->len = __cpu_to_le16(chunks_len);
|
|
++ chunks = (void *)tlv->value;
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += chunks_len;
|
|
++
|
|
++ cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
|
|
++ cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
|
|
++ cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
|
|
++ cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
|
|
++ cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
|
|
++ cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
|
|
++ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
|
|
++
|
|
++ cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
|
|
++ cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
|
|
++
|
|
++ if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
|
|
++ cfg->num_offload_peers = __cpu_to_le32(3);
|
|
++ cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
|
|
++ } else {
|
|
++ cfg->num_offload_peers = __cpu_to_le32(0);
|
|
++ cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
|
|
++ }
|
|
++
|
|
++ cfg->num_peer_keys = __cpu_to_le32(2);
|
|
++ cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
|
|
++ cfg->ast_skid_limit = __cpu_to_le32(0x10);
|
|
++ cfg->tx_chain_mask = __cpu_to_le32(0x7);
|
|
++ cfg->rx_chain_mask = __cpu_to_le32(0x7);
|
|
++ cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
|
|
++ cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
|
|
++ cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
|
|
++ cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
|
|
++ cfg->rx_decap_mode = __cpu_to_le32(1);
|
|
++ cfg->scan_max_pending_reqs = __cpu_to_le32(4);
|
|
++ cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
|
|
++ cfg->roam_offload_max_vdev = __cpu_to_le32(3);
|
|
++ cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
|
|
++ cfg->num_mcast_groups = __cpu_to_le32(0);
|
|
++ cfg->num_mcast_table_elems = __cpu_to_le32(0);
|
|
++ cfg->mcast2ucast_mode = __cpu_to_le32(0);
|
|
++ cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
|
|
++ cfg->num_wds_entries = __cpu_to_le32(0x20);
|
|
++ cfg->dma_burst_size = __cpu_to_le32(0);
|
|
++ cfg->mac_aggr_delim = __cpu_to_le32(0);
|
|
++ cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
|
|
++ cfg->vow_config = __cpu_to_le32(0);
|
|
++ cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
|
|
++ cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
|
|
++ cfg->max_frag_entries = __cpu_to_le32(2);
|
|
++ cfg->num_tdls_vdevs = __cpu_to_le32(1);
|
|
++ cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
|
|
++ cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
|
|
++ cfg->num_multicast_filter_entries = __cpu_to_le32(5);
|
|
++ cfg->num_wow_filters = __cpu_to_le32(0x16);
|
|
++ cfg->num_keep_alive_pattern = __cpu_to_le32(6);
|
|
++ cfg->keep_alive_pattern_size = __cpu_to_le32(0);
|
|
++ cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
|
|
++ cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
|
|
++
|
|
++ ath10k_wmi_put_host_mem_chunks(ar, chunks);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
|
|
++ const struct wmi_start_scan_arg *arg)
|
|
++{
|
|
++ struct wmi_tlv_start_scan_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ size_t len, chan_len, ssid_len, bssid_len, ie_len;
|
|
++ __le32 *chans;
|
|
++ struct wmi_ssid *ssids;
|
|
++ struct wmi_mac_addr *addrs;
|
|
++ void *ptr;
|
|
++ int i, ret;
|
|
++
|
|
++ ret = ath10k_wmi_start_scan_verify(arg);
|
|
++ if (ret)
|
|
++ return ERR_PTR(ret);
|
|
++
|
|
++ chan_len = arg->n_channels * sizeof(__le32);
|
|
++ ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
|
|
++ bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
|
|
++ ie_len = roundup(arg->ie_len, 4);
|
|
++ len = (sizeof(*tlv) + sizeof(*cmd)) +
|
|
++ (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
|
|
++ (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
|
|
++ (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
|
|
++ (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++
|
|
++ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
|
|
++ cmd->burst_duration_ms = __cpu_to_le32(0);
|
|
++ cmd->num_channels = __cpu_to_le32(arg->n_channels);
|
|
++ cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
|
|
++ cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
|
|
++ cmd->ie_len = __cpu_to_le32(arg->ie_len);
|
|
++ cmd->num_probes = __cpu_to_le32(3);
|
|
++
|
|
++ /* FIXME: There are some scan flag inconsistencies across firmwares,
|
|
++ * e.g. WMI-TLV inverts the logic behind the following flag.
|
|
++ */
|
|
++ cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
|
|
++ tlv->len = __cpu_to_le16(chan_len);
|
|
++ chans = (void *)tlv->value;
|
|
++ for (i = 0; i < arg->n_channels; i++)
|
|
++ chans[i] = __cpu_to_le32(arg->channels[i]);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += chan_len;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
|
|
++ tlv->len = __cpu_to_le16(ssid_len);
|
|
++ ssids = (void *)tlv->value;
|
|
++ for (i = 0; i < arg->n_ssids; i++) {
|
|
++ ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
|
|
++ memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
|
|
++ }
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += ssid_len;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
|
|
++ tlv->len = __cpu_to_le16(bssid_len);
|
|
++ addrs = (void *)tlv->value;
|
|
++ for (i = 0; i < arg->n_bssids; i++)
|
|
++ ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += bssid_len;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
|
|
++ tlv->len = __cpu_to_le16(ie_len);
|
|
++ memcpy(tlv->value, arg->ie, arg->ie_len);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += ie_len;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
|
|
++ const struct wmi_stop_scan_arg *arg)
|
|
++{
|
|
++ struct wmi_stop_scan_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ u32 scan_id;
|
|
++ u32 req_id;
|
|
++
|
|
++ if (arg->req_id > 0xFFF)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ scan_id = arg->u.scan_id;
|
|
++ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
|
|
++
|
|
++ req_id = arg->req_id;
|
|
++ req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->req_type = __cpu_to_le32(arg->req_type);
|
|
++ cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
|
|
++ cmd->scan_id = __cpu_to_le32(scan_id);
|
|
++ cmd->scan_req_id = __cpu_to_le32(req_id);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
|
|
++ u32 vdev_id,
|
|
++ enum wmi_vdev_type vdev_type,
|
|
++ enum wmi_vdev_subtype vdev_subtype,
|
|
++ const u8 mac_addr[ETH_ALEN])
|
|
++{
|
|
++ struct wmi_vdev_create_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->vdev_type = __cpu_to_le32(vdev_type);
|
|
++ cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
|
|
++ ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
|
|
++{
|
|
++ struct wmi_vdev_delete_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
|
|
++ const struct wmi_vdev_start_request_arg *arg,
|
|
++ bool restart)
|
|
++{
|
|
++ struct wmi_tlv_vdev_start_cmd *cmd;
|
|
++ struct wmi_channel *ch;
|
|
++ struct wmi_p2p_noa_descriptor *noa;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ size_t len;
|
|
++ void *ptr;
|
|
++ u32 flags = 0;
|
|
++
|
|
++ if (WARN_ON(arg->ssid && arg->ssid_len == 0))
|
|
++ return ERR_PTR(-EINVAL);
|
|
++ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
|
|
++ return ERR_PTR(-EINVAL);
|
|
++ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ len = (sizeof(*tlv) + sizeof(*cmd)) +
|
|
++ (sizeof(*tlv) + sizeof(*ch)) +
|
|
++ (sizeof(*tlv) + 0);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ if (arg->hidden_ssid)
|
|
++ flags |= WMI_VDEV_START_HIDDEN_SSID;
|
|
++ if (arg->pmf_enabled)
|
|
++ flags |= WMI_VDEV_START_PMF_ENABLED;
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
|
++ cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
|
|
++ cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
|
|
++ cmd->flags = __cpu_to_le32(flags);
|
|
++ cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
|
|
++ cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
|
|
++ cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
|
|
++
|
|
++ if (arg->ssid) {
|
|
++ cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
|
|
++ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
|
|
++ }
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*ch));
|
|
++ ch = (void *)tlv->value;
|
|
++ ath10k_wmi_put_wmi_channel(ch, &arg->channel);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*ch);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
|
++ tlv->len = 0;
|
|
++ noa = (void *)tlv->value;
|
|
++
|
|
++ /* Note: This is a nested TLV containing:
|
|
++ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
|
|
++ */
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += 0;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
|
|
++{
|
|
++ struct wmi_vdev_stop_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
|
|
++ const u8 *bssid)
|
|
++
|
|
++{
|
|
++ struct wmi_vdev_up_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->vdev_assoc_id = __cpu_to_le32(aid);
|
|
++ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
|
|
++{
|
|
++ struct wmi_vdev_down_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
|
|
++ u32 param_id, u32 param_value)
|
|
++{
|
|
++ struct wmi_vdev_set_param_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->param_id = __cpu_to_le32(param_id);
|
|
++ cmd->param_value = __cpu_to_le32(param_value);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
|
|
++ const struct wmi_vdev_install_key_arg *arg)
|
|
++{
|
|
++ struct wmi_vdev_install_key_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ size_t len;
|
|
++ void *ptr;
|
|
++
|
|
++ if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++ if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ len = sizeof(*tlv) + sizeof(*cmd) +
|
|
++ sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
|
++ cmd->key_idx = __cpu_to_le32(arg->key_idx);
|
|
++ cmd->key_flags = __cpu_to_le32(arg->key_flags);
|
|
++ cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
|
|
++ cmd->key_len = __cpu_to_le32(arg->key_len);
|
|
++ cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
|
|
++ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
|
|
++
|
|
++ if (arg->macaddr)
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
|
|
++ tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
|
|
++ if (arg->key_data)
|
|
++ memcpy(tlv->value, arg->key_data, arg->key_len);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += roundup(arg->key_len, sizeof(__le32));
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
|
|
++ const struct wmi_sta_uapsd_auto_trig_arg *arg)
|
|
++{
|
|
++ struct wmi_sta_uapsd_auto_trig_param *ac;
|
|
++ struct wmi_tlv *tlv;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*ac));
|
|
++ ac = (void *)tlv->value;
|
|
++
|
|
++ ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
|
|
++ ac->user_priority = __cpu_to_le32(arg->user_priority);
|
|
++ ac->service_interval = __cpu_to_le32(arg->service_interval);
|
|
++ ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
|
|
++ ac->delay_interval = __cpu_to_le32(arg->delay_interval);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI,
|
|
++ "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
|
|
++ ac->wmm_ac, ac->user_priority, ac->service_interval,
|
|
++ ac->suspend_interval, ac->delay_interval);
|
|
++
|
|
++ return ptr + sizeof(*tlv) + sizeof(*ac);
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN],
|
|
++ const struct wmi_sta_uapsd_auto_trig_arg *args,
|
|
++ u32 num_ac)
|
|
++{
|
|
++ struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
|
|
++ struct wmi_sta_uapsd_auto_trig_param *ac;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ size_t len;
|
|
++ size_t ac_tlv_len;
|
|
++ void *ptr;
|
|
++ int i;
|
|
++
|
|
++ ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
|
|
++ len = sizeof(*tlv) + sizeof(*cmd) +
|
|
++ sizeof(*tlv) + ac_tlv_len;
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->num_ac = __cpu_to_le32(num_ac);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
|
++ tlv->len = __cpu_to_le16(ac_tlv_len);
|
|
++ ac = (void *)tlv->value;
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ for (i = 0; i < num_ac; i++)
|
|
++ ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static void *ath10k_wmi_tlv_put_wmm(void *ptr,
|
|
++ const struct wmi_wmm_params_arg *arg)
|
|
++{
|
|
++ struct wmi_wmm_params *wmm;
|
|
++ struct wmi_tlv *tlv;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*wmm));
|
|
++ wmm = (void *)tlv->value;
|
|
++ ath10k_wmi_set_wmm_param(wmm, arg);
|
|
++
|
|
++ return ptr + sizeof(*tlv) + sizeof(*wmm);
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
|
|
++ const struct wmi_wmm_params_all_arg *arg)
|
|
++{
|
|
++ struct wmi_tlv_vdev_set_wmm_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ size_t len;
|
|
++ void *ptr;
|
|
++
|
|
++ len = sizeof(*tlv) + sizeof(*cmd);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++
|
|
++ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
|
|
++ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
|
|
++ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
|
|
++ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
|
|
++ const struct wmi_sta_keepalive_arg *arg)
|
|
++{
|
|
++ struct wmi_tlv_sta_keepalive_cmd *cmd;
|
|
++ struct wmi_sta_keepalive_arp_resp *arp;
|
|
++ struct sk_buff *skb;
|
|
++ struct wmi_tlv *tlv;
|
|
++ void *ptr;
|
|
++ size_t len;
|
|
++
|
|
++ len = sizeof(*tlv) + sizeof(*cmd) +
|
|
++ sizeof(*tlv) + sizeof(*arp);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
|
++ cmd->enabled = __cpu_to_le32(arg->enabled);
|
|
++ cmd->method = __cpu_to_le32(arg->method);
|
|
++ cmd->interval = __cpu_to_le32(arg->interval);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*arp));
|
|
++ arp = (void *)tlv->value;
|
|
++
|
|
++ arp->src_ip4_addr = arg->src_ip4_addr;
|
|
++ arp->dest_ip4_addr = arg->dest_ip4_addr;
|
|
++ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
|
|
++ arg->vdev_id, arg->enabled, arg->method, arg->interval);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN])
|
|
++{
|
|
++ struct wmi_tlv_peer_create_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
|
|
++ ether_addr_copy(cmd->peer_addr.addr, peer_addr);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN])
|
|
++{
|
|
++ struct wmi_peer_delete_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
|
|
++{
|
|
++ struct wmi_peer_flush_tids_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *peer_addr,
|
|
++ enum wmi_peer_param param_id,
|
|
++ u32 param_value)
|
|
++{
|
|
++ struct wmi_peer_set_param_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->param_id = __cpu_to_le32(param_id);
|
|
++ cmd->param_value = __cpu_to_le32(param_value);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
|
|
++ const struct wmi_peer_assoc_complete_arg *arg)
|
|
++{
|
|
++ struct wmi_tlv_peer_assoc_cmd *cmd;
|
|
++ struct wmi_vht_rate_set *vht_rate;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ size_t len, legacy_rate_len, ht_rate_len;
|
|
++ void *ptr;
|
|
++
|
|
++ if (arg->peer_mpdu_density > 16)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
|
|
++ sizeof(__le32));
|
|
++ ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
|
|
++ len = (sizeof(*tlv) + sizeof(*cmd)) +
|
|
++ (sizeof(*tlv) + legacy_rate_len) +
|
|
++ (sizeof(*tlv) + ht_rate_len) +
|
|
++ (sizeof(*tlv) + sizeof(*vht_rate));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++
|
|
++ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
|
|
++ cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
|
|
++ cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
|
|
++ cmd->flags = __cpu_to_le32(arg->peer_flags);
|
|
++ cmd->caps = __cpu_to_le32(arg->peer_caps);
|
|
++ cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
|
|
++ cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
|
|
++ cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
|
|
++ cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
|
|
++ cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
|
|
++ cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
|
|
++ cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
|
|
++ cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
|
|
++ cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
|
|
++ cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
|
|
++ ether_addr_copy(cmd->mac_addr.addr, arg->addr);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
|
|
++ tlv->len = __cpu_to_le16(legacy_rate_len);
|
|
++ memcpy(tlv->value, arg->peer_legacy_rates.rates,
|
|
++ arg->peer_legacy_rates.num_rates);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += legacy_rate_len;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
|
|
++ tlv->len = __cpu_to_le16(ht_rate_len);
|
|
++ memcpy(tlv->value, arg->peer_ht_rates.rates,
|
|
++ arg->peer_ht_rates.num_rates);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += ht_rate_len;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*vht_rate));
|
|
++ vht_rate = (void *)tlv->value;
|
|
++
|
|
++ vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
|
|
++ vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
|
|
++ vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
|
|
++ vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*vht_rate);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_sta_ps_mode psmode)
|
|
++{
|
|
++ struct wmi_sta_powersave_mode_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->sta_ps_mode = __cpu_to_le32(psmode);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
|
|
++ enum wmi_sta_powersave_param param_id,
|
|
++ u32 param_value)
|
|
++{
|
|
++ struct wmi_sta_powersave_param_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->param_id = __cpu_to_le32(param_id);
|
|
++ cmd->param_value = __cpu_to_le32(param_value);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
|
|
++ enum wmi_ap_ps_peer_param param_id, u32 value)
|
|
++{
|
|
++ struct wmi_ap_ps_peer_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ if (!mac)
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->param_id = __cpu_to_le32(param_id);
|
|
++ cmd->param_value = __cpu_to_le32(value);
|
|
++ ether_addr_copy(cmd->peer_macaddr.addr, mac);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
|
|
++ const struct wmi_scan_chan_list_arg *arg)
|
|
++{
|
|
++ struct wmi_tlv_scan_chan_list_cmd *cmd;
|
|
++ struct wmi_channel *ci;
|
|
++ struct wmi_channel_arg *ch;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ size_t chans_len, len;
|
|
++ int i;
|
|
++ void *ptr, *chans;
|
|
++
|
|
++ chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
|
|
++ len = (sizeof(*tlv) + sizeof(*cmd)) +
|
|
++ (sizeof(*tlv) + chans_len);
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
|
++ tlv->len = __cpu_to_le16(chans_len);
|
|
++ chans = (void *)tlv->value;
|
|
++
|
|
++ for (i = 0; i < arg->n_channels; i++) {
|
|
++ ch = &arg->channels[i];
|
|
++
|
|
++ tlv = chans;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*ci));
|
|
++ ci = (void *)tlv->value;
|
|
++
|
|
++ ath10k_wmi_put_wmi_channel(ci, ch);
|
|
++
|
|
++ chans += sizeof(*tlv);
|
|
++ chans += sizeof(*ci);
|
|
++ }
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += chans_len;
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
|
|
++ const void *bcn, size_t bcn_len,
|
|
++ u32 bcn_paddr, bool dtim_zero,
|
|
++ bool deliver_cab)
|
|
++
|
|
++{
|
|
++ struct wmi_bcn_tx_ref_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ struct ieee80211_hdr *hdr;
|
|
++ u16 fc;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ hdr = (struct ieee80211_hdr *)bcn;
|
|
++ fc = le16_to_cpu(hdr->frame_control);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->data_len = __cpu_to_le32(bcn_len);
|
|
++ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
|
|
++ cmd->msdu_id = 0;
|
|
++ cmd->frame_control = __cpu_to_le32(fc);
|
|
++ cmd->flags = 0;
|
|
++
|
|
++ if (dtim_zero)
|
|
++ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
|
|
++
|
|
++ if (deliver_cab)
|
|
++ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
|
|
++ const struct wmi_wmm_params_all_arg *arg)
|
|
++{
|
|
++ struct wmi_tlv_pdev_set_wmm_cmd *cmd;
|
|
++ struct wmi_wmm_params *wmm;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ size_t len;
|
|
++ void *ptr;
|
|
++
|
|
++ len = (sizeof(*tlv) + sizeof(*cmd)) +
|
|
++ (4 * (sizeof(*tlv) + sizeof(*wmm)));
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++
|
|
++ /* nothing to set here */
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
|
|
++ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
|
|
++ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
|
|
++ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
|
|
++{
|
|
++ struct wmi_request_stats_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->stats_id = __cpu_to_le32(stats_mask);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
|
|
++ enum wmi_force_fw_hang_type type,
|
|
++ u32 delay_ms)
|
|
++{
|
|
++ struct wmi_force_fw_hang_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++
|
|
++ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ tlv = (void *)skb->data;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->type = __cpu_to_le32(type);
|
|
++ cmd->delay_ms = __cpu_to_le32(delay_ms);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
|
|
++ u32 log_level) {
|
|
++ struct wmi_tlv_dbglog_cmd *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ size_t len, bmap_len;
|
|
++ u32 value;
|
|
++ void *ptr;
|
|
++
|
|
++ if (module_enable) {
|
|
++ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
|
|
++ module_enable,
|
|
++ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
|
|
++ } else {
|
|
++ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
|
|
++ WMI_TLV_DBGLOG_ALL_MODULES,
|
|
++ WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
|
|
++ }
|
|
++
|
|
++ bmap_len = 0;
|
|
++ len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
|
|
++ cmd->value = __cpu_to_le32(value);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
|
|
++ tlv->len = __cpu_to_le16(bmap_len);
|
|
++
|
|
++ /* nothing to do here */
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(bmap_len);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
|
|
++{
|
|
++ struct wmi_tlv_pktlog_enable *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ void *ptr;
|
|
++ size_t len;
|
|
++
|
|
++ len = sizeof(*tlv) + sizeof(*cmd);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->filter = __cpu_to_le32(filter);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
|
|
++ filter);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
|
|
++{
|
|
++ struct wmi_tlv_pktlog_disable *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ void *ptr;
|
|
++ size_t len;
|
|
++
|
|
++ len = sizeof(*tlv) + sizeof(*cmd);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
|
|
++ u32 tim_ie_offset, struct sk_buff *bcn,
|
|
++ u32 prb_caps, u32 prb_erp, void *prb_ies,
|
|
++ size_t prb_ies_len)
|
|
++{
|
|
++ struct wmi_tlv_bcn_tmpl_cmd *cmd;
|
|
++ struct wmi_tlv_bcn_prb_info *info;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ void *ptr;
|
|
++ size_t len;
|
|
++
|
|
++ if (WARN_ON(prb_ies_len > 0 && !prb_ies))
|
|
++ return ERR_PTR(-EINVAL);
|
|
++
|
|
++ len = sizeof(*tlv) + sizeof(*cmd) +
|
|
++ sizeof(*tlv) + sizeof(*info) + prb_ies_len +
|
|
++ sizeof(*tlv) + roundup(bcn->len, 4);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
|
|
++ cmd->buf_len = __cpu_to_le32(bcn->len);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
|
|
++ * then it is then impossible to pass original ie len.
|
|
++ * This chunk is not used yet so if setting probe resp template yields
|
|
++ * problems with beaconing or crashes firmware look here.
|
|
++ */
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
|
|
++ info = (void *)tlv->value;
|
|
++ info->caps = __cpu_to_le32(prb_caps);
|
|
++ info->erp = __cpu_to_le32(prb_erp);
|
|
++ memcpy(info->ies, prb_ies, prb_ies_len);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*info);
|
|
++ ptr += prb_ies_len;
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
|
|
++ tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
|
|
++ memcpy(tlv->value, bcn->data, bcn->len);
|
|
++
|
|
++ /* FIXME: Adjust TSF? */
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
|
|
++ vdev_id);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
|
|
++ struct sk_buff *prb)
|
|
++{
|
|
++ struct wmi_tlv_prb_tmpl_cmd *cmd;
|
|
++ struct wmi_tlv_bcn_prb_info *info;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ void *ptr;
|
|
++ size_t len;
|
|
++
|
|
++ len = sizeof(*tlv) + sizeof(*cmd) +
|
|
++ sizeof(*tlv) + sizeof(*info) +
|
|
++ sizeof(*tlv) + roundup(prb->len, 4);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->buf_len = __cpu_to_le32(prb->len);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*info));
|
|
++ info = (void *)tlv->value;
|
|
++ info->caps = 0;
|
|
++ info->erp = 0;
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*info);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
|
|
++ tlv->len = __cpu_to_le16(roundup(prb->len, 4));
|
|
++ memcpy(tlv->value, prb->data, prb->len);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
|
|
++ vdev_id);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++static struct sk_buff *
|
|
++ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
|
|
++ const u8 *p2p_ie)
|
|
++{
|
|
++ struct wmi_tlv_p2p_go_bcn_ie *cmd;
|
|
++ struct wmi_tlv *tlv;
|
|
++ struct sk_buff *skb;
|
|
++ void *ptr;
|
|
++ size_t len;
|
|
++
|
|
++ len = sizeof(*tlv) + sizeof(*cmd) +
|
|
++ sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
|
|
++ skb = ath10k_wmi_alloc_skb(ar, len);
|
|
++ if (!skb)
|
|
++ return ERR_PTR(-ENOMEM);
|
|
++
|
|
++ ptr = (void *)skb->data;
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
|
|
++ tlv->len = __cpu_to_le16(sizeof(*cmd));
|
|
++ cmd = (void *)tlv->value;
|
|
++ cmd->vdev_id = __cpu_to_le32(vdev_id);
|
|
++ cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += sizeof(*cmd);
|
|
++
|
|
++ tlv = ptr;
|
|
++ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
|
|
++ tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
|
|
++ memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
|
|
++
|
|
++ ptr += sizeof(*tlv);
|
|
++ ptr += roundup(p2p_ie[1] + 2, 4);
|
|
++
|
|
++ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
|
|
++ vdev_id);
|
|
++ return skb;
|
|
++}
|
|
++
|
|
++/****************/
|
|
++/* TLV mappings */
|
|
++/****************/
|
|
++
|
|
++static struct wmi_cmd_map wmi_tlv_cmd_map = {
|
|
++ .init_cmdid = WMI_TLV_INIT_CMDID,
|
|
++ .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
|
|
++ .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
|
|
++ .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
|
|
++ .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
|
|
++ .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
|
|
++ .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
|
|
++ .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
|
|
++ .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
|
|
++ .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
|
|
++ .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
|
|
++ .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
|
|
++ .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
|
|
++ .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
|
|
++ .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
|
|
++ .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
|
|
++ .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
|
|
++ .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
|
|
++ .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
|
|
++ .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
|
|
++ .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
|
|
++ .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
|
|
++ .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
|
|
++ .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
|
|
++ .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
|
|
++ .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
|
|
++ .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
|
|
++ .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
|
|
++ .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
|
|
++ .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
|
|
++ .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
|
|
++ .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
|
|
++ .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
|
|
++ .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
|
|
++ .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
|
|
++ .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
|
|
++ .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
|
|
++ .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
|
|
++ .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
|
|
++ .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
|
|
++ .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
|
|
++ .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
|
|
++ .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
|
|
++ .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
|
|
++ .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
|
|
++ .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
|
|
++ .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
|
|
++ .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
|
|
++ .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
|
|
++ .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
|
|
++ .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
|
|
++ .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
|
|
++ .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
|
|
++ .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
|
|
++ .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
|
|
++ .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
|
|
++ .roam_scan_rssi_change_threshold =
|
|
++ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
|
|
++ .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
|
|
++ .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
|
|
++ .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
|
|
++ .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
|
|
++ .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
|
|
++ .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
|
|
++ .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
|
|
++ .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
|
|
++ .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
|
|
++ .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
|
|
++ .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
|
|
++ .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
|
|
++ .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
|
|
++ .wlan_profile_set_hist_intvl_cmdid =
|
|
++ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
|
|
++ .wlan_profile_get_profile_data_cmdid =
|
|
++ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
|
|
++ .wlan_profile_enable_profile_id_cmdid =
|
|
++ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
|
|
++ .wlan_profile_list_profile_id_cmdid =
|
|
++ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
|
|
++ .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
|
|
++ .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
|
|
++ .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
|
|
++ .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
|
|
++ .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
|
|
++ .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
|
|
++ .wow_enable_disable_wake_event_cmdid =
|
|
++ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
|
|
++ .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
|
|
++ .wow_hostwakeup_from_sleep_cmdid =
|
|
++ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
|
|
++ .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
|
|
++ .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
|
|
++ .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
|
|
++ .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
|
|
++ .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
|
|
++ .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
|
|
++ .network_list_offload_config_cmdid =
|
|
++ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
|
|
++ .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
|
|
++ .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
|
|
++ .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
|
|
++ .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
|
|
++ .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
|
|
++ .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
|
|
++ .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
|
|
++ .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
|
|
++ .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
|
|
++ .echo_cmdid = WMI_TLV_ECHO_CMDID,
|
|
++ .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
|
|
++ .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
|
|
++ .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
|
|
++ .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
|
|
++ .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
|
|
++ .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
|
|
++ .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
|
|
++ .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
|
|
++ .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
|
|
++ .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
|
|
++ .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
|
|
++};
|
|
++
|
|
++static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
|
|
++ .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
|
|
++ .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
|
|
++ .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
|
|
++ .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
|
|
++ .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
|
|
++ .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
|
|
++ .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
|
|
++ .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
|
|
++ .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
|
|
++ .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
|
|
++ .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
|
|
++ .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
|
|
++ .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
|
|
++ .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
|
|
++ .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
|
|
++ .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
|
|
++ .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
|
|
++ .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
|
|
++ .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
|
|
++ .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
|
|
++ .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
|
|
++ .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
|
|
++ .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
|
|
++ .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
|
|
++ .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
|
|
++ .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
|
|
++ .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
|
|
++ .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
|
|
++ .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
|
|
++ .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
|
|
++ .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
|
|
++ .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
|
|
++ .bcnflt_stats_update_period =
|
|
++ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
|
|
++ .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
|
|
++ .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
|
|
++ .dcs = WMI_TLV_PDEV_PARAM_DCS,
|
|
++ .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
|
|
++ .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
|
|
++ .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
|
|
++ .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
|
|
++ .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
|
|
++ .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
|
|
++ .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
|
|
++ .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
|
|
++ .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
|
|
++ .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
|
|
++ .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
|
|
++ .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
|
|
++ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
|
|
++};
|
|
++
|
|
++static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
|
|
++ .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
|
|
++ .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
|
|
++ .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
|
|
++ .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
|
|
++ .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
|
|
++ .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
|
|
++ .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
|
|
++ .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
|
|
++ .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
|
|
++ .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
|
|
++ .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
|
|
++ .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
|
|
++ .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
|
|
++ .wmi_vdev_oc_scheduler_air_time_limit =
|
|
++ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
|
|
++ .wds = WMI_TLV_VDEV_PARAM_WDS,
|
|
++ .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
|
|
++ .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
|
|
++ .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
|
|
++ .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
|
|
++ .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
|
|
++ .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
|
|
++ .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
|
|
++ .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
|
|
++ .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
|
|
++ .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
|
|
++ .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
|
|
++ .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
|
|
++ .sgi = WMI_TLV_VDEV_PARAM_SGI,
|
|
++ .ldpc = WMI_TLV_VDEV_PARAM_LDPC,
|
|
++ .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
|
|
++ .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
|
|
++ .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
|
|
++ .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
|
|
++ .nss = WMI_TLV_VDEV_PARAM_NSS,
|
|
++ .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
|
|
++ .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
|
|
++ .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
|
|
++ .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
|
|
++ .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
|
|
++ .ap_keepalive_min_idle_inactive_time_secs =
|
|
++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
|
|
++ .ap_keepalive_max_idle_inactive_time_secs =
|
|
++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
|
|
++ .ap_keepalive_max_unresponsive_time_secs =
|
|
++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
|
|
++ .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
|
|
++ .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
|
|
++ .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
|
|
++ .txbf = WMI_TLV_VDEV_PARAM_TXBF,
|
|
++ .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
|
|
++ .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
|
|
++ .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
|
|
++ .ap_detect_out_of_sync_sleeping_sta_time_secs =
|
|
++ WMI_TLV_VDEV_PARAM_UNSUPPORTED,
|
|
++};
|
|
++
|
|
++static const struct wmi_ops wmi_tlv_ops = {
|
|
++ .rx = ath10k_wmi_tlv_op_rx,
|
|
++ .map_svc = wmi_tlv_svc_map,
|
|
++
|
|
++ .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
|
|
++ .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
|
|
++ .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
|
|
++ .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
|
|
++ .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
|
|
++ .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
|
|
++ .pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev,
|
|
++ .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
|
|
++ .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
|
|
++ .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
|
|
++
|
|
++ .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
|
|
++ .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
|
|
++ .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
|
|
++ .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
|
|
++ .gen_init = ath10k_wmi_tlv_op_gen_init,
|
|
++ .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
|
|
++ .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
|
|
++ .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
|
|
++ .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
|
|
++ .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
|
|
++ .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
|
|
++ .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
|
|
++ .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
|
|
++ .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
|
|
++ .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
|
|
++ .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
|
|
++ .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
|
|
++ .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
|
|
++ .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
|
|
++ .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
|
|
++ .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
|
|
++ .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
|
|
++ .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
|
|
++ .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
|
|
++ .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
|
|
++ .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
|
|
++ .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
|
|
++ .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
|
|
++ .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
|
|
++ /* .gen_mgmt_tx = not implemented; HTT is used */
|
|
++ .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
|
|
++ .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
|
|
++ .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
|
|
++ /* .gen_pdev_set_quiet_mode not implemented */
|
|
++ /* .gen_pdev_get_temperature not implemented */
|
|
++ /* .gen_addba_clear_resp not implemented */
|
|
++ /* .gen_addba_send not implemented */
|
|
++ /* .gen_addba_set_resp not implemented */
|
|
++ /* .gen_delba_send not implemented */
|
|
++ .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
|
|
++ .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
|
|
++ .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
|
|
++ .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
|
|
++ .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
|
|
++};
|
|
++
|
|
++/************/
|
|
++/* TLV init */
|
|
++/************/
|
|
++
|
|
++void ath10k_wmi_tlv_attach(struct ath10k *ar)
|
|
++{
|
|
++ ar->wmi.cmd = &wmi_tlv_cmd_map;
|
|
++ ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
|
|
++ ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
|
|
++ ar->wmi.ops = &wmi_tlv_ops;
|
|
++}
|
|
+--- /dev/null
|
|
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
|
|
+@@ -0,0 +1,1459 @@
|
|
++/*
|
|
++ * Copyright (c) 2005-2011 Atheros Communications Inc.
|
|
++ * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
|
|
++ *
|
|
++ * Permission to use, copy, modify, and/or distribute this software for any
|
|
++ * purpose with or without fee is hereby granted, provided that the above
|
|
++ * copyright notice and this permission notice appear in all copies.
|
|
++ *
|
|
++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
++ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
++ */
|
|
++#ifndef _WMI_TLV_H
|
|
++#define _WMI_TLV_H
|
|
++
|
|
++#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
|
|
++#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
|
|
++#define WMI_TLV_CMD_UNSUPPORTED 0
|
|
++#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
|
|
++#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
|
|
++
|
|
++enum wmi_tlv_grp_id {
|
|
++ WMI_TLV_GRP_START = 0x3,
|
|
++ WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START,
|
|
++ WMI_TLV_GRP_PDEV,
|
|
++ WMI_TLV_GRP_VDEV,
|
|
++ WMI_TLV_GRP_PEER,
|
|
++ WMI_TLV_GRP_MGMT,
|
|
++ WMI_TLV_GRP_BA_NEG,
|
|
++ WMI_TLV_GRP_STA_PS,
|
|
++ WMI_TLV_GRP_DFS,
|
|
++ WMI_TLV_GRP_ROAM,
|
|
++ WMI_TLV_GRP_OFL_SCAN,
|
|
++ WMI_TLV_GRP_P2P,
|
|
++ WMI_TLV_GRP_AP_PS,
|
|
++ WMI_TLV_GRP_RATECTL,
|
|
++ WMI_TLV_GRP_PROFILE,
|
|
++ WMI_TLV_GRP_SUSPEND,
|
|
++ WMI_TLV_GRP_BCN_FILTER,
|
|
++ WMI_TLV_GRP_WOW,
|
|
++ WMI_TLV_GRP_RTT,
|
|
++ WMI_TLV_GRP_SPECTRAL,
|
|
++ WMI_TLV_GRP_STATS,
|
|
++ WMI_TLV_GRP_ARP_NS_OFL,
|
|
++ WMI_TLV_GRP_NLO_OFL,
|
|
++ WMI_TLV_GRP_GTK_OFL,
|
|
++ WMI_TLV_GRP_CSA_OFL,
|
|
++ WMI_TLV_GRP_CHATTER,
|
|
++ WMI_TLV_GRP_TID_ADDBA,
|
|
++ WMI_TLV_GRP_MISC,
|
|
++ WMI_TLV_GRP_GPIO,
|
|
++ WMI_TLV_GRP_FWTEST,
|
|
++ WMI_TLV_GRP_TDLS,
|
|
++ WMI_TLV_GRP_RESMGR,
|
|
++ WMI_TLV_GRP_STA_SMPS,
|
|
++ WMI_TLV_GRP_WLAN_HB,
|
|
++ WMI_TLV_GRP_RMC,
|
|
++ WMI_TLV_GRP_MHF_OFL,
|
|
++ WMI_TLV_GRP_LOCATION_SCAN,
|
|
++ WMI_TLV_GRP_OEM,
|
|
++ WMI_TLV_GRP_NAN,
|
|
++ WMI_TLV_GRP_COEX,
|
|
++ WMI_TLV_GRP_OBSS_OFL,
|
|
++ WMI_TLV_GRP_LPI,
|
|
++ WMI_TLV_GRP_EXTSCAN,
|
|
++ WMI_TLV_GRP_DHCP_OFL,
|
|
++ WMI_TLV_GRP_IPA,
|
|
++ WMI_TLV_GRP_MDNS_OFL,
|
|
++ WMI_TLV_GRP_SAP_OFL,
|
|
++};
|
|
++
|
|
++enum wmi_tlv_cmd_id {
|
|
++ WMI_TLV_INIT_CMDID = 0x1,
|
|
++ WMI_TLV_START_SCAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SCAN),
|
|
++ WMI_TLV_STOP_SCAN_CMDID,
|
|
++ WMI_TLV_SCAN_CHAN_LIST_CMDID,
|
|
++ WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
|
|
++ WMI_TLV_SCAN_UPDATE_REQUEST_CMDID,
|
|
++ WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
|
|
++ WMI_TLV_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PDEV),
|
|
++ WMI_TLV_PDEV_SET_CHANNEL_CMDID,
|
|
++ WMI_TLV_PDEV_SET_PARAM_CMDID,
|
|
++ WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
|
|
++ WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
|
|
++ WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
|
|
++ WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
|
|
++ WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
|
|
++ WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
|
|
++ WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
|
|
++ WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
|
|
++ WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
|
|
++ WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
|
|
++ WMI_TLV_PDEV_DUMP_CMDID,
|
|
++ WMI_TLV_PDEV_SET_LED_CONFIG_CMDID,
|
|
++ WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
|
|
++ WMI_TLV_PDEV_SET_LED_FLASHING_CMDID,
|
|
++ WMI_TLV_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_VDEV),
|
|
++ WMI_TLV_VDEV_DELETE_CMDID,
|
|
++ WMI_TLV_VDEV_START_REQUEST_CMDID,
|
|
++ WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
|
|
++ WMI_TLV_VDEV_UP_CMDID,
|
|
++ WMI_TLV_VDEV_STOP_CMDID,
|
|
++ WMI_TLV_VDEV_DOWN_CMDID,
|
|
++ WMI_TLV_VDEV_SET_PARAM_CMDID,
|
|
++ WMI_TLV_VDEV_INSTALL_KEY_CMDID,
|
|
++ WMI_TLV_VDEV_WNM_SLEEPMODE_CMDID,
|
|
++ WMI_TLV_VDEV_WMM_ADDTS_CMDID,
|
|
++ WMI_TLV_VDEV_WMM_DELTS_CMDID,
|
|
++ WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
|
|
++ WMI_TLV_VDEV_SET_GTX_PARAMS_CMDID,
|
|
++ WMI_TLV_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
|
|
++ WMI_TLV_VDEV_PLMREQ_START_CMDID,
|
|
++ WMI_TLV_VDEV_PLMREQ_STOP_CMDID,
|
|
++ WMI_TLV_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PEER),
|
|
++ WMI_TLV_PEER_DELETE_CMDID,
|
|
++ WMI_TLV_PEER_FLUSH_TIDS_CMDID,
|
|
++ WMI_TLV_PEER_SET_PARAM_CMDID,
|
|
++ WMI_TLV_PEER_ASSOC_CMDID,
|
|
++ WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
|
|
++ WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
|
|
++ WMI_TLV_PEER_MCAST_GROUP_CMDID,
|
|
++ WMI_TLV_PEER_INFO_REQ_CMDID,
|
|
++ WMI_TLV_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
|
|
++ WMI_TLV_BCN_TX_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MGMT),
|
|
++ WMI_TLV_PDEV_SEND_BCN_CMDID,
|
|
++ WMI_TLV_BCN_TMPL_CMDID,
|
|
++ WMI_TLV_BCN_FILTER_RX_CMDID,
|
|
++ WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
|
|
++ WMI_TLV_MGMT_TX_CMDID,
|
|
++ WMI_TLV_PRB_TMPL_CMDID,
|
|
++ WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
|
|
++ WMI_TLV_ADDBA_SEND_CMDID,
|
|
++ WMI_TLV_ADDBA_STATUS_CMDID,
|
|
++ WMI_TLV_DELBA_SEND_CMDID,
|
|
++ WMI_TLV_ADDBA_SET_RESP_CMDID,
|
|
++ WMI_TLV_SEND_SINGLEAMSDU_CMDID,
|
|
++ WMI_TLV_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_PS),
|
|
++ WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
|
|
++ WMI_TLV_STA_MIMO_PS_MODE_CMDID,
|
|
++ WMI_TLV_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_DFS),
|
|
++ WMI_TLV_PDEV_DFS_DISABLE_CMDID,
|
|
++ WMI_TLV_DFS_PHYERR_FILTER_ENA_CMDID,
|
|
++ WMI_TLV_DFS_PHYERR_FILTER_DIS_CMDID,
|
|
++ WMI_TLV_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_TLV_GRP_ROAM),
|
|
++ WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
|
|
++ WMI_TLV_ROAM_SCAN_PERIOD,
|
|
++ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
|
|
++ WMI_TLV_ROAM_AP_PROFILE,
|
|
++ WMI_TLV_ROAM_CHAN_LIST,
|
|
++ WMI_TLV_ROAM_SCAN_CMD,
|
|
++ WMI_TLV_ROAM_SYNCH_COMPLETE,
|
|
++ WMI_TLV_ROAM_SET_RIC_REQUEST_CMDID,
|
|
++ WMI_TLV_ROAM_INVOKE_CMDID,
|
|
++ WMI_TLV_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_TLV_GRP_OFL_SCAN),
|
|
++ WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
|
|
++ WMI_TLV_OFL_SCAN_PERIOD,
|
|
++ WMI_TLV_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_TLV_GRP_P2P),
|
|
++ WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
|
|
++ WMI_TLV_P2P_GO_SET_BEACON_IE,
|
|
++ WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
|
|
++ WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
|
|
++ WMI_TLV_P2P_DISC_OFFLOAD_CONFIG_CMDID,
|
|
++ WMI_TLV_P2P_DISC_OFFLOAD_APPIE_CMDID,
|
|
++ WMI_TLV_P2P_DISC_OFFLOAD_PATTERN_CMDID,
|
|
++ WMI_TLV_P2P_SET_OPPPS_PARAM_CMDID,
|
|
++ WMI_TLV_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_AP_PS),
|
|
++ WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
|
|
++ WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RATECTL),
|
|
++ WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PROFILE),
|
|
++ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
|
|
++ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
|
|
++ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
|
|
++ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
|
|
++ WMI_TLV_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SUSPEND),
|
|
++ WMI_TLV_PDEV_RESUME_CMDID,
|
|
++ WMI_TLV_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BCN_FILTER),
|
|
++ WMI_TLV_RMV_BCN_FILTER_CMDID,
|
|
++ WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WOW),
|
|
++ WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
|
|
++ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
|
|
++ WMI_TLV_WOW_ENABLE_CMDID,
|
|
++ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
|
|
++ WMI_TLV_WOW_ACER_IOAC_ADD_KEEPALIVE_CMDID,
|
|
++ WMI_TLV_WOW_ACER_IOAC_DEL_KEEPALIVE_CMDID,
|
|
++ WMI_TLV_WOW_ACER_IOAC_ADD_WAKE_PATTERN_CMDID,
|
|
++ WMI_TLV_WOW_ACER_IOAC_DEL_WAKE_PATTERN_CMDID,
|
|
++ WMI_TLV_D0_WOW_ENABLE_DISABLE_CMDID,
|
|
++ WMI_TLV_EXTWOW_ENABLE_CMDID,
|
|
++ WMI_TLV_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
|
|
++ WMI_TLV_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
|
|
++ WMI_TLV_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RTT),
|
|
++ WMI_TLV_RTT_TSF_CMDID,
|
|
++ WMI_TLV_SPECTRAL_SCAN_CONF_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SPECTRAL),
|
|
++ WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
|
|
++ WMI_TLV_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STATS),
|
|
++ WMI_TLV_MCC_SCHED_TRAFFIC_STATS_CMDID,
|
|
++ WMI_TLV_REQUEST_STATS_EXT_CMDID,
|
|
++ WMI_TLV_REQUEST_LINK_STATS_CMDID,
|
|
++ WMI_TLV_START_LINK_STATS_CMDID,
|
|
++ WMI_TLV_CLEAR_LINK_STATS_CMDID,
|
|
++ WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
|
|
++ WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
|
|
++ WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
|
|
++ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID =
|
|
++ WMI_TLV_CMD(WMI_TLV_GRP_NLO_OFL),
|
|
++ WMI_TLV_APFIND_CMDID,
|
|
++ WMI_TLV_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GTK_OFL),
|
|
++ WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CSA_OFL),
|
|
++ WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
|
|
++ WMI_TLV_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CHATTER),
|
|
++ WMI_TLV_CHATTER_ADD_COALESCING_FILTER_CMDID,
|
|
++ WMI_TLV_CHATTER_DELETE_COALESCING_FILTER_CMDID,
|
|
++ WMI_TLV_CHATTER_COALESCING_QUERY_CMDID,
|
|
++ WMI_TLV_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TID_ADDBA),
|
|
++ WMI_TLV_PEER_TID_DELBA_CMDID,
|
|
++ WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
|
|
++ WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
|
|
++ WMI_TLV_STA_KEEPALIVE_CMDID,
|
|
++ WMI_TLV_BA_REQ_SSN_CMDID,
|
|
++ WMI_TLV_ECHO_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MISC),
|
|
++ WMI_TLV_PDEV_UTF_CMDID,
|
|
++ WMI_TLV_DBGLOG_CFG_CMDID,
|
|
++ WMI_TLV_PDEV_QVIT_CMDID,
|
|
++ WMI_TLV_PDEV_FTM_INTG_CMDID,
|
|
++ WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
|
|
++ WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
|
|
++ WMI_TLV_FORCE_FW_HANG_CMDID,
|
|
++ WMI_TLV_SET_MCASTBCAST_FILTER_CMDID,
|
|
++ WMI_TLV_THERMAL_MGMT_CMDID,
|
|
++ WMI_TLV_HOST_AUTO_SHUTDOWN_CFG_CMDID,
|
|
++ WMI_TLV_TPC_CHAINMASK_CONFIG_CMDID,
|
|
++ WMI_TLV_SET_ANTENNA_DIVERSITY_CMDID,
|
|
++ WMI_TLV_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GPIO),
|
|
++ WMI_TLV_GPIO_OUTPUT_CMDID,
|
|
++ WMI_TLV_TXBF_CMDID,
|
|
++ WMI_TLV_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID =
|
|
++ WMI_TLV_CMD(WMI_TLV_GRP_FWTEST),
|
|
++ WMI_TLV_FWTEST_P2P_SET_NOA_PARAM_CMDID,
|
|
++ WMI_TLV_UNIT_TEST_CMDID,
|
|
++ WMI_TLV_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TDLS),
|
|
++ WMI_TLV_TDLS_PEER_UPDATE_CMDID,
|
|
++ WMI_TLV_TDLS_SET_OFFCHAN_MODE_CMDID,
|
|
++ WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RESMGR),
|
|
++ WMI_TLV_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
|
|
++ WMI_TLV_RESMGR_SET_CHAN_LATENCY_CMDID,
|
|
++ WMI_TLV_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_SMPS),
|
|
++ WMI_TLV_STA_SMPS_PARAM_CMDID,
|
|
++ WMI_TLV_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WLAN_HB),
|
|
++ WMI_TLV_HB_SET_TCP_PARAMS_CMDID,
|
|
++ WMI_TLV_HB_SET_TCP_PKT_FILTER_CMDID,
|
|
++ WMI_TLV_HB_SET_UDP_PARAMS_CMDID,
|
|
++ WMI_TLV_HB_SET_UDP_PKT_FILTER_CMDID,
|
|
++ WMI_TLV_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RMC),
|
|
++ WMI_TLV_RMC_SET_ACTION_PERIOD_CMDID,
|
|
++ WMI_TLV_RMC_CONFIG_CMDID,
|
|
++ WMI_TLV_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MHF_OFL),
|
|
++ WMI_TLV_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
|
|
++ WMI_TLV_BATCH_SCAN_ENABLE_CMDID =
|
|
++ WMI_TLV_CMD(WMI_TLV_GRP_LOCATION_SCAN),
|
|
++ WMI_TLV_BATCH_SCAN_DISABLE_CMDID,
|
|
++ WMI_TLV_BATCH_SCAN_TRIGGER_RESULT_CMDID,
|
|
++ WMI_TLV_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OEM),
|
|
++ WMI_TLV_NAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_NAN),
|
|
++ WMI_TLV_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_COEX),
|
|
++ WMI_TLV_CHAN_AVOID_UPDATE_CMDID,
|
|
++ WMI_TLV_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OBSS_OFL),
|
|
++ WMI_TLV_OBSS_SCAN_DISABLE_CMDID,
|
|
++ WMI_TLV_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_LPI),
|
|
++ WMI_TLV_LPI_START_SCAN_CMDID,
|
|
++ WMI_TLV_LPI_STOP_SCAN_CMDID,
|
|
++ WMI_TLV_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_EXTSCAN),
|
|
++ WMI_TLV_EXTSCAN_STOP_CMDID,
|
|
++ WMI_TLV_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
|
|
++ WMI_TLV_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
|
|
++ WMI_TLV_EXTSCAN_GET_CACHED_RESULTS_CMDID,
|
|
++ WMI_TLV_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
|
|
++ WMI_TLV_EXTSCAN_SET_CAPABILITIES_CMDID,
|
|
++ WMI_TLV_EXTSCAN_GET_CAPABILITIES_CMDID,
|
|
++ WMI_TLV_SET_DHCP_SERVER_OFFLOAD_CMDID =
|
|
++ WMI_TLV_CMD(WMI_TLV_GRP_DHCP_OFL),
|
|
++ WMI_TLV_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_IPA),
|
|
++ WMI_TLV_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MDNS_OFL),
|
|
++ WMI_TLV_MDNS_SET_FQDN_CMDID,
|
|
++ WMI_TLV_MDNS_SET_RESPONSE_CMDID,
|
|
++ WMI_TLV_MDNS_GET_STATS_CMDID,
|
|
++ WMI_TLV_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SAP_OFL),
|
|
++};
|
|
++
|
|
++enum wmi_tlv_event_id {
|
|
++ WMI_TLV_SERVICE_READY_EVENTID = 0x1,
|
|
++ WMI_TLV_READY_EVENTID,
|
|
++ WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN),
|
|
++ WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV),
|
|
++ WMI_TLV_CHAN_INFO_EVENTID,
|
|
++ WMI_TLV_PHYERR_EVENTID,
|
|
++ WMI_TLV_PDEV_DUMP_EVENTID,
|
|
++ WMI_TLV_TX_PAUSE_EVENTID,
|
|
++ WMI_TLV_DFS_RADAR_EVENTID,
|
|
++ WMI_TLV_PDEV_L1SS_TRACK_EVENTID,
|
|
++ WMI_TLV_PDEV_TEMPERATURE_EVENTID,
|
|
++ WMI_TLV_VDEV_START_RESP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_VDEV),
|
|
++ WMI_TLV_VDEV_STOPPED_EVENTID,
|
|
++ WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
|
|
++ WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
|
|
++ WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER),
|
|
++ WMI_TLV_PEER_INFO_EVENTID,
|
|
++ WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID,
|
|
++ WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID,
|
|
++ WMI_TLV_PEER_STATE_EVENTID,
|
|
++ WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT),
|
|
++ WMI_TLV_HOST_SWBA_EVENTID,
|
|
++ WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
|
|
++ WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
|
|
++ WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
|
|
++ WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
|
|
++ WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
|
|
++ WMI_TLV_BA_RSP_SSN_EVENTID,
|
|
++ WMI_TLV_AGGR_STATE_TRIG_EVENTID,
|
|
++ WMI_TLV_ROAM_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_ROAM),
|
|
++ WMI_TLV_PROFILE_MATCH,
|
|
++ WMI_TLV_ROAM_SYNCH_EVENTID,
|
|
++ WMI_TLV_P2P_DISC_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_P2P),
|
|
++ WMI_TLV_P2P_NOA_EVENTID,
|
|
++ WMI_TLV_PDEV_RESUME_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SUSPEND),
|
|
++ WMI_TLV_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_WOW),
|
|
++ WMI_TLV_D0_WOW_DISABLE_ACK_EVENTID,
|
|
++ WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_RTT),
|
|
++ WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID,
|
|
++ WMI_TLV_RTT_ERROR_REPORT_EVENTID,
|
|
++ WMI_TLV_STATS_EXT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_STATS),
|
|
++ WMI_TLV_IFACE_LINK_STATS_EVENTID,
|
|
++ WMI_TLV_PEER_LINK_STATS_EVENTID,
|
|
++ WMI_TLV_RADIO_LINK_STATS_EVENTID,
|
|
++ WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
|
|
++ WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
|
|
++ WMI_TLV_APFIND_EVENTID,
|
|
++ WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GTK_OFL),
|
|
++ WMI_TLV_GTK_REKEY_FAIL_EVENTID,
|
|
++ WMI_TLV_CSA_HANDLING_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CSA_OFL),
|
|
++ WMI_TLV_CHATTER_PC_QUERY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CHATTER),
|
|
++ WMI_TLV_ECHO_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MISC),
|
|
++ WMI_TLV_PDEV_UTF_EVENTID,
|
|
++ WMI_TLV_DEBUG_MESG_EVENTID,
|
|
++ WMI_TLV_UPDATE_STATS_EVENTID,
|
|
++ WMI_TLV_DEBUG_PRINT_EVENTID,
|
|
++ WMI_TLV_DCS_INTERFERENCE_EVENTID,
|
|
++ WMI_TLV_PDEV_QVIT_EVENTID,
|
|
++ WMI_TLV_WLAN_PROFILE_DATA_EVENTID,
|
|
++ WMI_TLV_PDEV_FTM_INTG_EVENTID,
|
|
++ WMI_TLV_WLAN_FREQ_AVOID_EVENTID,
|
|
++ WMI_TLV_VDEV_GET_KEEPALIVE_EVENTID,
|
|
++ WMI_TLV_THERMAL_MGMT_EVENTID,
|
|
++ WMI_TLV_DIAG_DATA_CONTAINER_EVENTID,
|
|
++ WMI_TLV_HOST_AUTO_SHUTDOWN_EVENTID,
|
|
++ WMI_TLV_UPDATE_WHAL_MIB_STATS_EVENTID,
|
|
++ WMI_TLV_UPDATE_VDEV_RATE_STATS_EVENTID,
|
|
++ WMI_TLV_DIAG_EVENTID,
|
|
++ WMI_TLV_GPIO_INPUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GPIO),
|
|
++ WMI_TLV_UPLOADH_EVENTID,
|
|
++ WMI_TLV_CAPTUREH_EVENTID,
|
|
++ WMI_TLV_RFKILL_STATE_CHANGE_EVENTID,
|
|
++ WMI_TLV_TDLS_PEER_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_TDLS),
|
|
++ WMI_TLV_BATCH_SCAN_ENABLED_EVENTID =
|
|
++ WMI_TLV_EV(WMI_TLV_GRP_LOCATION_SCAN),
|
|
++ WMI_TLV_BATCH_SCAN_RESULT_EVENTID,
|
|
++ WMI_TLV_OEM_CAPABILITY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_OEM),
|
|
++ WMI_TLV_OEM_MEASUREMENT_REPORT_EVENTID,
|
|
++ WMI_TLV_OEM_ERROR_REPORT_EVENTID,
|
|
++ WMI_TLV_NAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NAN),
|
|
++ WMI_TLV_LPI_RESULT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_LPI),
|
|
++ WMI_TLV_LPI_STATUS_EVENTID,
|
|
++ WMI_TLV_LPI_HANDOFF_EVENTID,
|
|
++ WMI_TLV_EXTSCAN_START_STOP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_EXTSCAN),
|
|
++ WMI_TLV_EXTSCAN_OPERATION_EVENTID,
|
|
++ WMI_TLV_EXTSCAN_TABLE_USAGE_EVENTID,
|
|
++ WMI_TLV_EXTSCAN_CACHED_RESULTS_EVENTID,
|
|
++ WMI_TLV_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
|
|
++ WMI_TLV_EXTSCAN_HOTLIST_MATCH_EVENTID,
|
|
++ WMI_TLV_EXTSCAN_CAPABILITIES_EVENTID,
|
|
++ WMI_TLV_MDNS_STATS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MDNS_OFL),
|
|
++ WMI_TLV_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SAP_OFL),
|
|
++ WMI_TLV_SAP_OFL_DEL_STA_EVENTID,
|
|
++};
|
|
++
|
|
++enum wmi_tlv_pdev_param {
|
|
++ WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
|
|
++ WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
|
|
++ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
|
|
++ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
|
|
++ WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
|
|
++ WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
|
|
++ WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
|
|
++ WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
|
|
++ WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
|
|
++ WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
|
|
++ WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
|
|
++ WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
|
|
++ WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
|
|
++ WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
|
|
++ WMI_TLV_PDEV_PARAM_LTR_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
|
|
++ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
|
|
++ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
|
|
++ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
|
|
++ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
|
|
++ WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
|
|
++ WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
|
|
++ WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
|
|
++ WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
|
|
++ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
|
|
++ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
|
|
++ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
|
|
++ WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
|
|
++ WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
|
|
++ WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
|
|
++ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
|
|
++ WMI_TLV_PDEV_PARAM_PMF_QOS,
|
|
++ WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
|
|
++ WMI_TLV_PDEV_PARAM_DCS,
|
|
++ WMI_TLV_PDEV_PARAM_ANI_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
|
|
++ WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
|
|
++ WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
|
|
++ WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
|
|
++ WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
|
|
++ WMI_TLV_PDEV_PARAM_PROXY_STA,
|
|
++ WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
|
|
++ WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
|
|
++ WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_BURST_DUR,
|
|
++ WMI_TLV_PDEV_PARAM_BURST_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
|
|
++ WMI_TLV_PDEV_PARAM_LOW_POWER_RF_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_L1SS_TRACK,
|
|
++ WMI_TLV_PDEV_PARAM_HYST_EN,
|
|
++ WMI_TLV_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_LED_SYS_STATE,
|
|
++ WMI_TLV_PDEV_PARAM_LED_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
|
|
++ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
|
|
++ WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
|
|
++ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
|
|
++ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
|
|
++ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
|
|
++};
|
|
++
|
|
++enum wmi_tlv_vdev_param {
|
|
++ WMI_TLV_VDEV_PARAM_RTS_THRESHOLD = 0x1,
|
|
++ WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
|
|
++ WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
|
|
++ WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
|
|
++ WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
|
|
++ WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
|
|
++ WMI_TLV_VDEV_PARAM_SLOT_TIME,
|
|
++ WMI_TLV_VDEV_PARAM_PREAMBLE,
|
|
++ WMI_TLV_VDEV_PARAM_SWBA_TIME,
|
|
++ WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
|
|
++ WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
|
|
++ WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
|
|
++ WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
|
|
++ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
|
|
++ WMI_TLV_VDEV_PARAM_WDS,
|
|
++ WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
|
|
++ WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
|
|
++ WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
|
|
++ WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
|
|
++ WMI_TLV_VDEV_PARAM_FEATURE_WMM,
|
|
++ WMI_TLV_VDEV_PARAM_CHWIDTH,
|
|
++ WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
|
|
++ WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
|
|
++ WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
|
|
++ WMI_TLV_VDEV_PARAM_MGMT_RATE,
|
|
++ WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
|
|
++ WMI_TLV_VDEV_PARAM_FIXED_RATE,
|
|
++ WMI_TLV_VDEV_PARAM_SGI,
|
|
++ WMI_TLV_VDEV_PARAM_LDPC,
|
|
++ WMI_TLV_VDEV_PARAM_TX_STBC,
|
|
++ WMI_TLV_VDEV_PARAM_RX_STBC,
|
|
++ WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
|
|
++ WMI_TLV_VDEV_PARAM_DEF_KEYID,
|
|
++ WMI_TLV_VDEV_PARAM_NSS,
|
|
++ WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
|
|
++ WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
|
|
++ WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
|
|
++ WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
|
|
++ WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
|
|
++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
|
|
++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
|
|
++ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
|
|
++ WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
|
|
++ WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
|
|
++ WMI_TLV_VDEV_PARAM_TXBF,
|
|
++ WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
|
|
++ WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
|
|
++ WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
|
|
++ WMI_TLV_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
|
|
++ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
|
|
++ WMI_TLV_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
|
|
++ WMI_TLV_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
|
|
++ WMI_TLV_VDEV_PARAM_EARLY_RX_SLOP_STEP,
|
|
++ WMI_TLV_VDEV_PARAM_EARLY_RX_INIT_SLOP,
|
|
++ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
|
|
++ WMI_TLV_VDEV_PARAM_TX_PWRLIMIT,
|
|
++ WMI_TLV_VDEV_PARAM_SNR_NUM_FOR_CAL,
|
|
++ WMI_TLV_VDEV_PARAM_ROAM_FW_OFFLOAD,
|
|
++ WMI_TLV_VDEV_PARAM_ENABLE_RMC,
|
|
++ WMI_TLV_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
|
|
++ WMI_TLV_VDEV_PARAM_MAX_RATE,
|
|
++ WMI_TLV_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
|
|
++ WMI_TLV_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
|
|
++ WMI_TLV_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
|
|
++ WMI_TLV_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
|
|
++ WMI_TLV_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
|
|
++ WMI_TLV_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
|
|
++ WMI_TLV_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
|
|
++ WMI_TLV_VDEV_PARAM_INACTIVITY_CNT,
|
|
++ WMI_TLV_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
|
|
++ WMI_TLV_VDEV_PARAM_DTIM_POLICY,
|
|
++ WMI_TLV_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
|
|
++ WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
|
|
++};
|
|
++
|
|
++enum wmi_tlv_tag {
|
|
++ WMI_TLV_TAG_LAST_RESERVED = 15,
|
|
++
|
|
++ WMI_TLV_TAG_FIRST_ARRAY_ENUM,
|
|
++ WMI_TLV_TAG_ARRAY_UINT32 = WMI_TLV_TAG_FIRST_ARRAY_ENUM,
|
|
++ WMI_TLV_TAG_ARRAY_BYTE,
|
|
++ WMI_TLV_TAG_ARRAY_STRUCT,
|
|
++ WMI_TLV_TAG_ARRAY_FIXED_STRUCT,
|
|
++ WMI_TLV_TAG_LAST_ARRAY_ENUM = 31,
|
|
++
|
|
++ WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES,
|
|
++ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ,
|
|
++ WMI_TLV_TAG_STRUCT_READY_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_SCAN_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_TPC_CONFIG_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_STOPPED_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_COMPLETE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_MGMT_RX_HDR,
|
|
++ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_TX_DELBA_COMPLETE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_TX_ADDBA_COMPLETE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO_SECTION_BITMAP,
|
|
++ WMI_TLV_TAG_STRUCT_RTT_EVENT_HEADER,
|
|
++ WMI_TLV_TAG_STRUCT_RTT_ERROR_REPORT_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_RTT_MEAS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_ECHO_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_FTM_INTG_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_GPIO_INPUT_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_CSA_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_STATUS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_IGTK_INFO,
|
|
++ WMI_TLV_TAG_STRUCT_DCS_INTERFERENCE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_ATH_DCS_CW_INT,
|
|
++ WMI_TLV_TAG_STRUCT_ATH_DCS_WLAN_INT_STAT,
|
|
++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_CTX_T,
|
|
++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_T,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_QVIT_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_TIM_INFO,
|
|
++ WMI_TLV_TAG_STRUCT_P2P_NOA_INFO,
|
|
++ WMI_TLV_TAG_STRUCT_STATS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGES_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGE_DESC,
|
|
++ WMI_TLV_TAG_STRUCT_GTK_REKEY_FAIL_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_INIT_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG,
|
|
++ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK,
|
|
++ WMI_TLV_TAG_STRUCT_START_SCAN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_CHANNEL,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WMM_PARAMS,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_P2P_NOA_DESCRIPTOR,
|
|
++ WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE,
|
|
++ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_UP_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VHT_RATE_SET,
|
|
++ WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_BCN_PRB_INFO,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_TID_ADDBA_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_TID_DELBA_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_STA_DTIM_PS_METHOD_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_SCAN_MODE,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_THRESHOLD,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_SCAN_PERIOD,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_ADD_BCN_FILTER_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_RMV_BCN_FILTER_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE,
|
|
++ WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE,
|
|
++ WMI_TLV_TAG_STRUCT_FTM_INTG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE,
|
|
++ WMI_TLV_TAG_STRUCT_P2P_SET_VENDOR_IE_DATA_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_RATE_RETRY_SCHED_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_TRIGGER_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_SET_HIST_INTVL_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_GET_PROF_DATA_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_HEAD,
|
|
++ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_BODY,
|
|
++ WMI_TLV_TAG_STRUCT_RTT_TSF_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_NLO_CONFIGURED_PARAMETERS,
|
|
++ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_CHANSWITCH_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_CHATTER_SET_MODE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_ECHO_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_SET_KEEPALIVE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_GPIO_CONFIG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_GPIO_OUTPUT_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_ADD_WDS_ENTRY_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_REMOVE_WDS_ENTRY_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_BCN_TX_HDR,
|
|
++ WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_MGMT_TX_HDR,
|
|
++ WMI_TLV_TAG_STRUCT_ADDBA_CLEAR_RESP_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_ADDBA_SEND_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_DELBA_SEND_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_ADDBA_SETRESPONSE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_SEND_SINGLEAMSDU_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SET_HT_IE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SET_VHT_IE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SET_DSCP_TID_MAP_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_GREEN_AP_PS_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CONFIG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_MCAST_GROUP_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_AP_PROFILE,
|
|
++ WMI_TLV_TAG_STRUCT_AP_PROFILE,
|
|
++ WMI_TLV_TAG_STRUCT_SCAN_SCH_PRIORITY_TABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_DFS_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_DFS_DISABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_IPV4_SYNC_PATTERN_T,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_IPV6_SYNC_PATTERN_T,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_MAGIC_PATTERN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_SCAN_UPDATE_REQUEST_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_CHATTER_PKT_COALESCING_FILTER,
|
|
++ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_ADD_FILTER_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_DELETE_FILTER_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_QUERY_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_TXBF_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_NLO_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_CHATTER_QUERY_REPLY_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_UPLOAD_H_HDR,
|
|
++ WMI_TLV_TAG_STRUCT_CAPTURE_H_EVENT_HDR,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_WNM_SLEEPMODE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_WMM_ADDTS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_WMM_DELTS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_MCC_SET_TBTT_MODE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_CHAN_LIST,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_LATENCY_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_BA_RSP_SSN_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_SET_MCASTBCAST_FILTER_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_P2P_SET_OPPPS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_P2P_SET_NOA_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_STA_SMPS_PARAM_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_SET_GTX_PARAMS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_MCC_SCHED_TRAFFIC_STATS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_MCC_SCHED_STA_TRAFFIC_STATS,
|
|
++ WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_HB_SET_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PARAMS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PKT_FILTER_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PARAMS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PKT_FILTER_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_HB_IND_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_RFKILL_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_DFS_RADAR_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_ENA_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_DIS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_SCAN_LIST,
|
|
++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_NETWORK_INFO,
|
|
++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_DISABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_TRIGGER_RESULT_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLED_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_START_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_STOP_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_INFO_REQ_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_INFO_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_INFO,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_TX_FAIL_CNT_THR_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_RMC_SET_MODE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_RMC_SET_ACTION_PERIOD_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_RMC_CONFIG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_SET_MODE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_NAN_CMD_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_NAN_EVENT_HDR,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_L1SS_TRACK_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_MODEM_POWER_STATE_CMD_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_GET_ESTIMATED_LINKSPEED_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_ESTIMATED_LINKSPEED_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_AGGR_STATE_TRIG_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_SCAN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_REQ_STATS_EXT_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_STATS_EXT_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_OBSS_SCAN_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_OBSS_SCAN_DISABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SET_LED_CONFIG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_CFG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_UPDATE_WHAL_MIB_STATS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_CHAN_AVOID_UPDATE_CMD_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_PKT_PATTERN_T,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_TMR_PATTERN_T,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_IOAC_ADD_KEEPALIVE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_IOAC_DEL_KEEPALIVE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_IOAC_KEEPALIVE_T,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_ADD_PATTERN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_DEL_PATTERN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_START_LINK_STATS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_CLEAR_LINK_STATS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_REQUEST_LINK_STATS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_STATS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_CHANNEL_STATS,
|
|
++ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS,
|
|
++ WMI_TLV_TAG_STRUCT_RATE_STATS,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_LINK_STATS,
|
|
++ WMI_TLV_TAG_STRUCT_WMM_AC_STATS,
|
|
++ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS,
|
|
++ WMI_TLV_TAG_STRUCT_LPI_MGMT_SNOOPING_CONFIG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_LPI_START_SCAN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_LPI_STOP_SCAN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_LPI_RESULT_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_PEER_STATE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CHANNEL_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_START_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_STOP_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CACHED_RESULTS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_SET_CAPABILITIES_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CAPABILITIES_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_OPERATION_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_START_STOP_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_TABLE_USAGE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_RSSI_INFO_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHED_RESULTS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MATCH_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_CAPABILITIES_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHE_CAPABILITIES_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_D0_WOW_ENABLE_DISABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_D0_WOW_DISABLE_ACK_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_UNIT_TEST_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_OFFLOAD_TLV_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_11I_OFFLOAD_TLV_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_11R_OFFLOAD_TLV_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_ESE_OFFLOAD_TLV_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_COMPLETE,
|
|
++ WMI_TLV_TAG_STRUCT_EXTWOW_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_LPI_STATUS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_LPI_HANDOFF_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_RATE_STATS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_VDEV_RATE_HT_INFO,
|
|
++ WMI_TLV_TAG_STRUCT_RIC_REQUEST,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_TEMPERATURE_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_SET_DHCP_SERVER_OFFLOAD_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_RIC_TSPEC,
|
|
++ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG,
|
|
++ WMI_TLV_TAG_STRUCT_IPA_OFFLOAD_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_KEY_MATERIAL,
|
|
++ WMI_TLV_TAG_STRUCT_TDLS_SET_OFFCHAN_MODE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_SET_LED_FLASHING_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_MDNS_OFFLOAD_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_MDNS_SET_FQDN_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_MDNS_SET_RESP_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_MDNS_GET_STATS_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_MDNS_STATS_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_ROAM_INVOKE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_RESUME_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_DIVERSITY_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_SAP_OFL_ENABLE_CMD,
|
|
++ WMI_TLV_TAG_STRUCT_SAP_OFL_ADD_STA_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
|
|
++ WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
|
|
++ WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
|
|
++
|
|
++ WMI_TLV_TAG_MAX
|
|
++};
|
|
++
|
|
++enum wmi_tlv_service {
|
|
++ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
|
|
++ WMI_TLV_SERVICE_SCAN_OFFLOAD,
|
|
++ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
|
|
++ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
|
|
++ WMI_TLV_SERVICE_STA_PWRSAVE,
|
|
++ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
|
|
++ WMI_TLV_SERVICE_AP_UAPSD,
|
|
++ WMI_TLV_SERVICE_AP_DFS,
|
|
++ WMI_TLV_SERVICE_11AC,
|
|
++ WMI_TLV_SERVICE_BLOCKACK,
|
|
++ WMI_TLV_SERVICE_PHYERR,
|
|
++ WMI_TLV_SERVICE_BCN_FILTER,
|
|
++ WMI_TLV_SERVICE_RTT,
|
|
++ WMI_TLV_SERVICE_WOW,
|
|
++ WMI_TLV_SERVICE_RATECTRL_CACHE,
|
|
++ WMI_TLV_SERVICE_IRAM_TIDS,
|
|
++ WMI_TLV_SERVICE_ARPNS_OFFLOAD,
|
|
++ WMI_TLV_SERVICE_NLO,
|
|
++ WMI_TLV_SERVICE_GTK_OFFLOAD,
|
|
++ WMI_TLV_SERVICE_SCAN_SCH,
|
|
++ WMI_TLV_SERVICE_CSA_OFFLOAD,
|
|
++ WMI_TLV_SERVICE_CHATTER,
|
|
++ WMI_TLV_SERVICE_COEX_FREQAVOID,
|
|
++ WMI_TLV_SERVICE_PACKET_POWER_SAVE,
|
|
++ WMI_TLV_SERVICE_FORCE_FW_HANG,
|
|
++ WMI_TLV_SERVICE_GPIO,
|
|
++ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
|
|
++ WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
|
|
++ WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
|
|
++ WMI_TLV_SERVICE_STA_KEEP_ALIVE,
|
|
++ WMI_TLV_SERVICE_TX_ENCAP,
|
|
++ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
|
|
++ WMI_TLV_SERVICE_EARLY_RX,
|
|
++ WMI_TLV_SERVICE_STA_SMPS,
|
|
++ WMI_TLV_SERVICE_FWTEST,
|
|
++ WMI_TLV_SERVICE_STA_WMMAC,
|
|
++ WMI_TLV_SERVICE_TDLS,
|
|
++ WMI_TLV_SERVICE_BURST,
|
|
++ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
|
|
++ WMI_TLV_SERVICE_ADAPTIVE_OCS,
|
|
++ WMI_TLV_SERVICE_BA_SSN_SUPPORT,
|
|
++ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
|
|
++ WMI_TLV_SERVICE_WLAN_HB,
|
|
++ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
|
|
++ WMI_TLV_SERVICE_BATCH_SCAN,
|
|
++ WMI_TLV_SERVICE_QPOWER,
|
|
++ WMI_TLV_SERVICE_PLMREQ,
|
|
++ WMI_TLV_SERVICE_THERMAL_MGMT,
|
|
++ WMI_TLV_SERVICE_RMC,
|
|
++ WMI_TLV_SERVICE_MHF_OFFLOAD,
|
|
++ WMI_TLV_SERVICE_COEX_SAR,
|
|
++ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
|
|
++ WMI_TLV_SERVICE_NAN,
|
|
++ WMI_TLV_SERVICE_L1SS_STAT,
|
|
++ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
|
|
++ WMI_TLV_SERVICE_OBSS_SCAN,
|
|
++ WMI_TLV_SERVICE_TDLS_OFFCHAN,
|
|
++ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
|
|
++ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
|
|
++ WMI_TLV_SERVICE_IBSS_PWRSAVE,
|
|
++ WMI_TLV_SERVICE_LPASS,
|
|
++ WMI_TLV_SERVICE_EXTSCAN,
|
|
++ WMI_TLV_SERVICE_D0WOW,
|
|
++ WMI_TLV_SERVICE_HSOFFLOAD,
|
|
++ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
|
|
++ WMI_TLV_SERVICE_RX_FULL_REORDER,
|
|
++ WMI_TLV_SERVICE_DHCP_OFFLOAD,
|
|
++ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
|
|
++ WMI_TLV_SERVICE_MDNS_OFFLOAD,
|
|
++ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
|
|
++};
|
|
++
|
|
++#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
|
|
++ ((svc_id) < (len) && \
|
|
++ __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
|
|
++ BIT((svc_id)%(sizeof(u32))))
|
|
++
|
|
++#define SVCMAP(x, y, len) \
|
|
++ do { \
|
|
++ if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \
|
|
++ __set_bit(y, out); \
|
|
++ } while (0)
|
|
++
|
|
++static inline void
|
|
++wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
|
|
++{
|
|
++ SVCMAP(WMI_TLV_SERVICE_BEACON_OFFLOAD,
|
|
++ WMI_SERVICE_BEACON_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_SCAN_OFFLOAD,
|
|
++ WMI_SERVICE_SCAN_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
|
|
++ WMI_SERVICE_ROAM_SCAN_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
|
|
++ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_STA_PWRSAVE,
|
|
++ WMI_SERVICE_STA_PWRSAVE, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
|
|
++ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_AP_UAPSD,
|
|
++ WMI_SERVICE_AP_UAPSD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_AP_DFS,
|
|
++ WMI_SERVICE_AP_DFS, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_11AC,
|
|
++ WMI_SERVICE_11AC, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_BLOCKACK,
|
|
++ WMI_SERVICE_BLOCKACK, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_PHYERR,
|
|
++ WMI_SERVICE_PHYERR, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_BCN_FILTER,
|
|
++ WMI_SERVICE_BCN_FILTER, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_RTT,
|
|
++ WMI_SERVICE_RTT, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_WOW,
|
|
++ WMI_SERVICE_WOW, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_RATECTRL_CACHE,
|
|
++ WMI_SERVICE_RATECTRL_CACHE, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_IRAM_TIDS,
|
|
++ WMI_SERVICE_IRAM_TIDS, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_ARPNS_OFFLOAD,
|
|
++ WMI_SERVICE_ARPNS_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_NLO,
|
|
++ WMI_SERVICE_NLO, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_GTK_OFFLOAD,
|
|
++ WMI_SERVICE_GTK_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_SCAN_SCH,
|
|
++ WMI_SERVICE_SCAN_SCH, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_CSA_OFFLOAD,
|
|
++ WMI_SERVICE_CSA_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_CHATTER,
|
|
++ WMI_SERVICE_CHATTER, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_COEX_FREQAVOID,
|
|
++ WMI_SERVICE_COEX_FREQAVOID, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_PACKET_POWER_SAVE,
|
|
++ WMI_SERVICE_PACKET_POWER_SAVE, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_FORCE_FW_HANG,
|
|
++ WMI_SERVICE_FORCE_FW_HANG, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_GPIO,
|
|
++ WMI_SERVICE_GPIO, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
|
|
++ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
|
|
++ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
|
|
++ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_STA_KEEP_ALIVE,
|
|
++ WMI_SERVICE_STA_KEEP_ALIVE, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_TX_ENCAP,
|
|
++ WMI_SERVICE_TX_ENCAP, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
|
|
++ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_EARLY_RX,
|
|
++ WMI_SERVICE_EARLY_RX, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_STA_SMPS,
|
|
++ WMI_SERVICE_STA_SMPS, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_FWTEST,
|
|
++ WMI_SERVICE_FWTEST, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_STA_WMMAC,
|
|
++ WMI_SERVICE_STA_WMMAC, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_TDLS,
|
|
++ WMI_SERVICE_TDLS, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_BURST,
|
|
++ WMI_SERVICE_BURST, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
|
|
++ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_ADAPTIVE_OCS,
|
|
++ WMI_SERVICE_ADAPTIVE_OCS, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_BA_SSN_SUPPORT,
|
|
++ WMI_SERVICE_BA_SSN_SUPPORT, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
|
|
++ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_WLAN_HB,
|
|
++ WMI_SERVICE_WLAN_HB, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
|
|
++ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_BATCH_SCAN,
|
|
++ WMI_SERVICE_BATCH_SCAN, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_QPOWER,
|
|
++ WMI_SERVICE_QPOWER, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_PLMREQ,
|
|
++ WMI_SERVICE_PLMREQ, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_THERMAL_MGMT,
|
|
++ WMI_SERVICE_THERMAL_MGMT, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_RMC,
|
|
++ WMI_SERVICE_RMC, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_MHF_OFFLOAD,
|
|
++ WMI_SERVICE_MHF_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_COEX_SAR,
|
|
++ WMI_SERVICE_COEX_SAR, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
|
|
++ WMI_SERVICE_BCN_TXRATE_OVERRIDE, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_NAN,
|
|
++ WMI_SERVICE_NAN, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_L1SS_STAT,
|
|
++ WMI_SERVICE_L1SS_STAT, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
|
|
++ WMI_SERVICE_ESTIMATE_LINKSPEED, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_OBSS_SCAN,
|
|
++ WMI_SERVICE_OBSS_SCAN, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_TDLS_OFFCHAN,
|
|
++ WMI_SERVICE_TDLS_OFFCHAN, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
|
|
++ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
|
|
++ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_IBSS_PWRSAVE,
|
|
++ WMI_SERVICE_IBSS_PWRSAVE, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_LPASS,
|
|
++ WMI_SERVICE_LPASS, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_EXTSCAN,
|
|
++ WMI_SERVICE_EXTSCAN, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_D0WOW,
|
|
++ WMI_SERVICE_D0WOW, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_HSOFFLOAD,
|
|
++ WMI_SERVICE_HSOFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
|
|
++ WMI_SERVICE_ROAM_HO_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_RX_FULL_REORDER,
|
|
++ WMI_SERVICE_RX_FULL_REORDER, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_DHCP_OFFLOAD,
|
|
++ WMI_SERVICE_DHCP_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
|
|
++ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_MDNS_OFFLOAD,
|
|
++ WMI_SERVICE_MDNS_OFFLOAD, len);
|
|
++ SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
|
|
++ WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
|
|
++}
|
|
++
|
|
++#undef SVCMAP
|
|
++
|
|
++struct wmi_tlv {
|
|
++ __le16 len;
|
|
++ __le16 tag;
|
|
++ u8 value[0];
|
|
++} __packed;
|
|
++
|
|
++#define WMI_TLV_MGMT_RX_NUM_RSSI 4
|
|
++
|
|
++struct wmi_tlv_mgmt_rx_ev {
|
|
++ __le32 channel;
|
|
++ __le32 snr;
|
|
++ __le32 rate;
|
|
++ __le32 phy_mode;
|
|
++ __le32 buf_len;
|
|
++ __le32 status;
|
|
++ __le32 rssi[WMI_TLV_MGMT_RX_NUM_RSSI];
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_abi_version {
|
|
++ __le32 abi_ver0;
|
|
++ __le32 abi_ver1;
|
|
++ __le32 abi_ver_ns0;
|
|
++ __le32 abi_ver_ns1;
|
|
++ __le32 abi_ver_ns2;
|
|
++ __le32 abi_ver_ns3;
|
|
++} __packed;
|
|
++
|
|
++enum wmi_tlv_hw_bd_id {
|
|
++ WMI_TLV_HW_BD_LEGACY = 0,
|
|
++ WMI_TLV_HW_BD_QCA6174 = 1,
|
|
++ WMI_TLV_HW_BD_QCA2582 = 2,
|
|
++};
|
|
++
|
|
++struct wmi_tlv_hw_bd_info {
|
|
++ u8 rev;
|
|
++ u8 project_id;
|
|
++ u8 custom_id;
|
|
++ u8 reference_design_id;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_svc_rdy_ev {
|
|
++ __le32 fw_build_vers;
|
|
++ struct wmi_tlv_abi_version abi;
|
|
++ __le32 phy_capability;
|
|
++ __le32 max_frag_entry;
|
|
++ __le32 num_rf_chains;
|
|
++ __le32 ht_cap_info;
|
|
++ __le32 vht_cap_info;
|
|
++ __le32 vht_supp_mcs;
|
|
++ __le32 hw_min_tx_power;
|
|
++ __le32 hw_max_tx_power;
|
|
++ __le32 sys_cap_info;
|
|
++ __le32 min_pkt_size_enable;
|
|
++ __le32 max_bcn_ie_size;
|
|
++ __le32 num_mem_reqs;
|
|
++ __le32 max_num_scan_chans;
|
|
++ __le32 hw_bd_id; /* 0 means hw_bd_info is invalid */
|
|
++ struct wmi_tlv_hw_bd_info hw_bd_info[5];
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_rdy_ev {
|
|
++ struct wmi_tlv_abi_version abi;
|
|
++ struct wmi_mac_addr mac_addr;
|
|
++ __le32 status;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_resource_config {
|
|
++ __le32 num_vdevs;
|
|
++ __le32 num_peers;
|
|
++ __le32 num_offload_peers;
|
|
++ __le32 num_offload_reorder_bufs;
|
|
++ __le32 num_peer_keys;
|
|
++ __le32 num_tids;
|
|
++ __le32 ast_skid_limit;
|
|
++ __le32 tx_chain_mask;
|
|
++ __le32 rx_chain_mask;
|
|
++ __le32 rx_timeout_pri[4];
|
|
++ __le32 rx_decap_mode;
|
|
++ __le32 scan_max_pending_reqs;
|
|
++ __le32 bmiss_offload_max_vdev;
|
|
++ __le32 roam_offload_max_vdev;
|
|
++ __le32 roam_offload_max_ap_profiles;
|
|
++ __le32 num_mcast_groups;
|
|
++ __le32 num_mcast_table_elems;
|
|
++ __le32 mcast2ucast_mode;
|
|
++ __le32 tx_dbg_log_size;
|
|
++ __le32 num_wds_entries;
|
|
++ __le32 dma_burst_size;
|
|
++ __le32 mac_aggr_delim;
|
|
++ __le32 rx_skip_defrag_timeout_dup_detection_check;
|
|
++ __le32 vow_config;
|
|
++ __le32 gtk_offload_max_vdev;
|
|
++ __le32 num_msdu_desc;
|
|
++ __le32 max_frag_entries;
|
|
++ __le32 num_tdls_vdevs;
|
|
++ __le32 num_tdls_conn_table_entries;
|
|
++ __le32 beacon_tx_offload_max_vdev;
|
|
++ __le32 num_multicast_filter_entries;
|
|
++ __le32 num_wow_filters;
|
|
++ __le32 num_keep_alive_pattern;
|
|
++ __le32 keep_alive_pattern_size;
|
|
++ __le32 max_tdls_concurrent_sleep_sta;
|
|
++ __le32 max_tdls_concurrent_buffer_sta;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_init_cmd {
|
|
++ struct wmi_tlv_abi_version abi;
|
|
++ __le32 num_host_mem_chunks;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_pdev_set_param_cmd {
|
|
++ __le32 pdev_id; /* not used yet */
|
|
++ __le32 param_id;
|
|
++ __le32 param_value;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_pdev_set_rd_cmd {
|
|
++ __le32 pdev_id; /* not used yet */
|
|
++ __le32 regd;
|
|
++ __le32 regd_2ghz;
|
|
++ __le32 regd_5ghz;
|
|
++ __le32 conform_limit_2ghz;
|
|
++ __le32 conform_limit_5ghz;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_scan_chan_list_cmd {
|
|
++ __le32 num_scan_chans;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_start_scan_cmd {
|
|
++ struct wmi_start_scan_common common;
|
|
++ __le32 burst_duration_ms;
|
|
++ __le32 num_channels;
|
|
++ __le32 num_bssids;
|
|
++ __le32 num_ssids;
|
|
++ __le32 ie_len;
|
|
++ __le32 num_probes;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_vdev_start_cmd {
|
|
++ __le32 vdev_id;
|
|
++ __le32 requestor_id;
|
|
++ __le32 bcn_intval;
|
|
++ __le32 dtim_period;
|
|
++ __le32 flags;
|
|
++ struct wmi_ssid ssid;
|
|
++ __le32 bcn_tx_rate;
|
|
++ __le32 bcn_tx_power;
|
|
++ __le32 num_noa_descr;
|
|
++ __le32 disable_hw_ack;
|
|
++} __packed;
|
|
++
|
|
++enum {
|
|
++ WMI_TLV_PEER_TYPE_DEFAULT = 0, /* generic / non-BSS / self-peer */
|
|
++ WMI_TLV_PEER_TYPE_BSS = 1,
|
|
++ WMI_TLV_PEER_TYPE_TDLS = 2,
|
|
++ WMI_TLV_PEER_TYPE_HOST_MAX = 127,
|
|
++ WMI_TLV_PEER_TYPE_ROAMOFFLOAD_TMP = 128,
|
|
++};
|
|
++
|
|
++struct wmi_tlv_peer_create_cmd {
|
|
++ __le32 vdev_id;
|
|
++ struct wmi_mac_addr peer_addr;
|
|
++ __le32 peer_type;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_peer_assoc_cmd {
|
|
++ struct wmi_mac_addr mac_addr;
|
|
++ __le32 vdev_id;
|
|
++ __le32 new_assoc;
|
|
++ __le32 assoc_id;
|
|
++ __le32 flags;
|
|
++ __le32 caps;
|
|
++ __le32 listen_intval;
|
|
++ __le32 ht_caps;
|
|
++ __le32 max_mpdu;
|
|
++ __le32 mpdu_density;
|
|
++ __le32 rate_caps;
|
|
++ __le32 nss;
|
|
++ __le32 vht_caps;
|
|
++ __le32 phy_mode;
|
|
++ __le32 ht_info[2];
|
|
++ __le32 num_legacy_rates;
|
|
++ __le32 num_ht_rates;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_pdev_suspend {
|
|
++ __le32 pdev_id; /* not used yet */
|
|
++ __le32 opt;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_pdev_set_wmm_cmd {
|
|
++ __le32 pdev_id; /* not used yet */
|
|
++ __le32 dg_type; /* no idea.. */
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_vdev_wmm_params {
|
|
++ __le32 dummy;
|
|
++ struct wmi_wmm_params params;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_vdev_set_wmm_cmd {
|
|
++ __le32 vdev_id;
|
|
++ struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4];
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_phyerr_ev {
|
|
++ __le32 num_phyerrs;
|
|
++ __le32 tsf_l32;
|
|
++ __le32 tsf_u32;
|
|
++ __le32 buf_len;
|
|
++} __packed;
|
|
++
|
|
++enum wmi_tlv_dbglog_param {
|
|
++ WMI_TLV_DBGLOG_PARAM_LOG_LEVEL = 1,
|
|
++ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE,
|
|
++ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE,
|
|
++ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE_BITMAP,
|
|
++ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE_BITMAP,
|
|
++};
|
|
++
|
|
++enum wmi_tlv_dbglog_log_level {
|
|
++ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE = 0,
|
|
++ WMI_TLV_DBGLOG_LOG_LEVEL_INFO,
|
|
++ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_1,
|
|
++ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_2,
|
|
++ WMI_TLV_DBGLOG_LOG_LEVEL_WARN,
|
|
++ WMI_TLV_DBGLOG_LOG_LEVEL_ERR,
|
|
++};
|
|
++
|
|
++#define WMI_TLV_DBGLOG_BITMAP_MAX_IDS 512
|
|
++#define WMI_TLV_DBGLOG_BITMAP_MAX_WORDS (WMI_TLV_DBGLOG_BITMAP_MAX_IDS / \
|
|
++ sizeof(__le32))
|
|
++#define WMI_TLV_DBGLOG_ALL_MODULES 0xffff
|
|
++#define WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(module_id, log_level) \
|
|
++ (((module_id << 16) & 0xffff0000) | \
|
|
++ ((log_level << 0) & 0x000000ff))
|
|
++
|
|
++struct wmi_tlv_dbglog_cmd {
|
|
++ __le32 param;
|
|
++ __le32 value;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_resume_cmd {
|
|
++ __le32 reserved;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_req_stats_cmd {
|
|
++ __le32 stats_id; /* wmi_stats_id */
|
|
++ __le32 vdev_id;
|
|
++ struct wmi_mac_addr peer_macaddr;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_vdev_stats {
|
|
++ __le32 vdev_id;
|
|
++ __le32 beacon_snr;
|
|
++ __le32 data_snr;
|
|
++ __le32 num_tx_frames[4]; /* per-AC */
|
|
++ __le32 num_rx_frames;
|
|
++ __le32 num_tx_frames_retries[4];
|
|
++ __le32 num_tx_frames_failures[4];
|
|
++ __le32 num_rts_fail;
|
|
++ __le32 num_rts_success;
|
|
++ __le32 num_rx_err;
|
|
++ __le32 num_rx_discard;
|
|
++ __le32 num_tx_not_acked;
|
|
++ __le32 tx_rate_history[10];
|
|
++ __le32 beacon_rssi_history[10];
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_pktlog_enable {
|
|
++ __le32 reserved;
|
|
++ __le32 filter;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_pktlog_disable {
|
|
++ __le32 reserved;
|
|
++} __packed;
|
|
++
|
|
++enum wmi_tlv_bcn_tx_status {
|
|
++ WMI_TLV_BCN_TX_STATUS_OK,
|
|
++ WMI_TLV_BCN_TX_STATUS_XRETRY,
|
|
++ WMI_TLV_BCN_TX_STATUS_DROP,
|
|
++ WMI_TLV_BCN_TX_STATUS_FILTERED,
|
|
++};
|
|
++
|
|
++struct wmi_tlv_bcn_tx_status_ev {
|
|
++ __le32 vdev_id;
|
|
++ __le32 tx_status;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_bcn_prb_info {
|
|
++ __le32 caps;
|
|
++ __le32 erp;
|
|
++ u8 ies[0];
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_bcn_tmpl_cmd {
|
|
++ __le32 vdev_id;
|
|
++ __le32 tim_ie_offset;
|
|
++ __le32 buf_len;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_prb_tmpl_cmd {
|
|
++ __le32 vdev_id;
|
|
++ __le32 buf_len;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_p2p_go_bcn_ie {
|
|
++ __le32 vdev_id;
|
|
++ __le32 ie_len;
|
|
++} __packed;
|
|
++
|
|
++enum wmi_tlv_diag_item_type {
|
|
++ WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
|
|
++ WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
|
|
++ WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
|
|
++};
|
|
++
|
|
++struct wmi_tlv_diag_item {
|
|
++ u8 type;
|
|
++ u8 reserved;
|
|
++ __le16 len;
|
|
++ __le32 timestamp;
|
|
++ __le32 code;
|
|
++ u8 payload[0];
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_diag_data_ev {
|
|
++ __le32 num_items;
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_sta_keepalive_cmd {
|
|
++ __le32 vdev_id;
|
|
++ __le32 enabled;
|
|
++ __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
|
|
++ __le32 interval; /* in seconds */
|
|
++} __packed;
|
|
++
|
|
++struct wmi_tlv_stats_ev {
|
|
++ __le32 stats_id; /* WMI_STAT_ */
|
|
++ __le32 num_pdev_stats;
|
|
++ __le32 num_vdev_stats;
|
|
++ __le32 num_peer_stats;
|
|
++ __le32 num_bcnflt_stats;
|
|
++ __le32 num_chan_stats;
|
|
++} __packed;
|
|
++
|
|
++void ath10k_wmi_tlv_attach(struct ath10k *ar);
|
|
++
|
|
++#endif
|
|
+--- a/backport-include/linux/etherdevice.h
|
|
++++ b/backport-include/linux/etherdevice.h
|
|
+@@ -148,6 +148,29 @@ static inline bool ether_addr_equal_unal
|
|
+ return memcmp(addr1, addr2, ETH_ALEN) == 0;
|
|
+ #endif
|
|
+ }
|
|
++
|
|
++/**
|
|
++ * ether_addr_copy - Copy an Ethernet address
|
|
++ * @dst: Pointer to a six-byte array Ethernet address destination
|
|
++ * @src: Pointer to a six-byte array Ethernet address source
|
|
++ *
|
|
++ * Please note: dst & src must both be aligned to u16.
|
|
++ */
|
|
++#define ether_addr_copy LINUX_BACKPORT(ether_addr_copy)
|
|
++static inline void ether_addr_copy(u8 *dst, const u8 *src)
|
|
++{
|
|
++#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
|
++ *(u32 *)dst = *(const u32 *)src;
|
|
++ *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
|
|
++#else
|
|
++ u16 *a = (u16 *)dst;
|
|
++ const u16 *b = (const u16 *)src;
|
|
++
|
|
++ a[0] = b[0];
|
|
++ a[1] = b[1];
|
|
++ a[2] = b[2];
|
|
++#endif
|
|
++}
|
|
+ #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) */
|
|
+
|
|
+ #endif /* _BACKPORT_LINUX_ETHERDEVICE_H */
|
|
+--- a/drivers/net/wireless/ath/spectral_common.h
|
|
++++ b/drivers/net/wireless/ath/spectral_common.h
|
|
+@@ -20,6 +20,11 @@
|
|
+ #define SPECTRAL_HT20_NUM_BINS 56
|
|
+ #define SPECTRAL_HT20_40_NUM_BINS 128
|
|
+
|
|
++/* TODO: could possibly be 512, but no samples this large
|
|
++ * could be acquired so far.
|
|
++ */
|
|
++#define SPECTRAL_ATH10K_MAX_NUM_BINS 256
|
|
++
|
|
+ /* FFT sample format given to userspace via debugfs.
|
|
+ *
|
|
+ * Please keep the type/length at the front position and change
|
|
+@@ -31,6 +36,7 @@
|
|
+ enum ath_fft_sample_type {
|
|
+ ATH_FFT_SAMPLE_HT20 = 1,
|
|
+ ATH_FFT_SAMPLE_HT20_40,
|
|
++ ATH_FFT_SAMPLE_ATH10K,
|
|
+ };
|
|
+
|
|
+ struct fft_sample_tlv {
|
|
+@@ -85,4 +91,23 @@ struct fft_sample_ht20_40 {
|
|
+ u8 data[SPECTRAL_HT20_40_NUM_BINS];
|
|
+ } __packed;
|
|
+
|
|
++struct fft_sample_ath10k {
|
|
++ struct fft_sample_tlv tlv;
|
|
++ u8 chan_width_mhz;
|
|
++ __be16 freq1;
|
|
++ __be16 freq2;
|
|
++ __be16 noise;
|
|
++ __be16 max_magnitude;
|
|
++ __be16 total_gain_db;
|
|
++ __be16 base_pwr_db;
|
|
++ __be64 tsf;
|
|
++ s8 max_index;
|
|
++ u8 rssi;
|
|
++ u8 relpwr_db;
|
|
++ u8 avgpwr_db;
|
|
++ u8 max_exp;
|
|
++
|
|
++ u8 data[0];
|
|
++} __packed;
|
|
++
|
|
+ #endif /* SPECTRAL_COMMON_H */
|
|
diff --git a/package/kernel/mac80211/patches/920-ath10k_allow_fallback_to_board_bin_on_empty_otp_stream.patch b/package/kernel/mac80211/patches/920-ath10k_allow_fallback_to_board_bin_on_empty_otp_stream.patch
|
|
index 6a5c766..6a3d2a4 100644
|
|
--- a/package/kernel/mac80211/patches/920-ath10k_allow_fallback_to_board_bin_on_empty_otp_stream.patch
|
|
+++ b/package/kernel/mac80211/patches/920-ath10k_allow_fallback_to_board_bin_on_empty_otp_stream.patch
|
|
@@ -1,14 +1,14 @@
|
|
--- a/drivers/net/wireless/ath/ath10k/core.c
|
|
+++ b/drivers/net/wireless/ath/ath10k/core.c
|
|
-@@ -277,7 +277,10 @@ static int ath10k_download_and_run_otp(s
|
|
+@@ -387,7 +387,10 @@ static int ath10k_download_and_run_otp(s
|
|
|
|
- ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
|
|
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
|
|
|
|
-- if (result != 0) {
|
|
+- if (!skip_otp && result != 0) {
|
|
+ if (result == 2) {
|
|
-+ ath10k_warn("otp stream is empty, using board.bin contents");
|
|
++ ath10k_warn(ar, "otp stream is empty, using board.bin contents");
|
|
+ return 0;
|
|
-+ } else if (result != 0) {
|
|
- ath10k_err("otp calibration failed: %d", result);
|
|
++ } else if (!skip_otp && result != 0) {
|
|
+ ath10k_err(ar, "otp calibration failed: %d", result);
|
|
return -EINVAL;
|
|
}
|
|
diff --git a/package/kernel/mac80211/patches/921-ath10k_init_devices_synchronously.patch b/package/kernel/mac80211/patches/921-ath10k_init_devices_synchronously.patch
|
|
new file mode 100644
|
|
index 0000000..c664faa
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/921-ath10k_init_devices_synchronously.patch
|
|
@@ -0,0 +1,33 @@
|
|
+From: Sven Eckelmann <sven@open-mesh.com>
|
|
+Date: Tue, 18 Nov 2014 12:29:28 +0100
|
|
+Subject: [PATCH] ath10k: Don't initialize devices asynchronously
|
|
+
|
|
+OpenWrt requires all PHYs to be initialized to create the configuration files
|
|
+during bootup. ath10k violates this because it delays the creation of the PHY
|
|
+to a not well defined point in the future.
|
|
+
|
|
+Forcing the work to be done immediately works around this problem but may also
|
|
+delay the boot when firmware images cannot be found.
|
|
+
|
|
+Signed-off-by: Sven Eckelmann <sven@open-mesh.com>
|
|
+---
|
|
+
|
|
+--- a/drivers/net/wireless/ath/ath10k/core.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/core.c
|
|
+@@ -1321,6 +1321,16 @@ int ath10k_core_register(struct ath10k *
|
|
+ ar->chip_id = chip_id;
|
|
+ queue_work(ar->workqueue, &ar->register_work);
|
|
+
|
|
++ /* OpenWrt requires all PHYs to be initialized to create the
|
|
++ * configuration files during bootup. ath10k violates this
|
|
++ * because it delays the creation of the PHY to a not well defined
|
|
++ * point in the future.
|
|
++ *
|
|
++ * Forcing the work to be done immediately works around this problem
|
|
++ * but may also delay the boot when firmware images cannot be found.
|
|
++ */
|
|
++ flush_workqueue(ar->workqueue);
|
|
++
|
|
+ return 0;
|
|
+ }
|
|
+ EXPORT_SYMBOL(ath10k_core_register);
|
|
diff --git a/package/kernel/mac80211/patches/930-ath10k_add_tpt_led_trigger.patch b/package/kernel/mac80211/patches/930-ath10k_add_tpt_led_trigger.patch
|
|
new file mode 100644
|
|
index 0000000..54174b1
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/930-ath10k_add_tpt_led_trigger.patch
|
|
@@ -0,0 +1,37 @@
|
|
+--- a/drivers/net/wireless/ath/ath10k/mac.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/mac.c
|
|
+@@ -5405,6 +5405,21 @@ struct ath10k_vif *ath10k_get_arvif(stru
|
|
+ return arvif_iter.arvif;
|
|
+ }
|
|
+
|
|
++#ifdef CPTCFG_MAC80211_LEDS
|
|
++static const struct ieee80211_tpt_blink ath10k_tpt_blink[] = {
|
|
++ { .throughput = 0 * 1024, .blink_time = 334 },
|
|
++ { .throughput = 1 * 1024, .blink_time = 260 },
|
|
++ { .throughput = 2 * 1024, .blink_time = 220 },
|
|
++ { .throughput = 5 * 1024, .blink_time = 190 },
|
|
++ { .throughput = 10 * 1024, .blink_time = 170 },
|
|
++ { .throughput = 25 * 1024, .blink_time = 150 },
|
|
++ { .throughput = 54 * 1024, .blink_time = 130 },
|
|
++ { .throughput = 120 * 1024, .blink_time = 110 },
|
|
++ { .throughput = 265 * 1024, .blink_time = 80 },
|
|
++ { .throughput = 586 * 1024, .blink_time = 50 },
|
|
++};
|
|
++#endif
|
|
++
|
|
+ int ath10k_mac_register(struct ath10k *ar)
|
|
+ {
|
|
+ struct ieee80211_supported_band *band;
|
|
+@@ -5567,6 +5582,12 @@ int ath10k_mac_register(struct ath10k *a
|
|
+ goto err_free;
|
|
+ }
|
|
+
|
|
++#if CPTCFG_MAC80211_LEDS
|
|
++ ieee80211_create_tpt_led_trigger(ar->hw,
|
|
++ IEEE80211_TPT_LEDTRIG_FL_RADIO, ath10k_tpt_blink,
|
|
++ ARRAY_SIZE(ath10k_tpt_blink));
|
|
++#endif
|
|
++
|
|
+ ret = ieee80211_register_hw(ar->hw);
|
|
+ if (ret) {
|
|
+ ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
|
|
diff --git a/package/kernel/mac80211/patches/950-ath10k_AP_IBSS.patch b/package/kernel/mac80211/patches/950-ath10k_AP_IBSS.patch
|
|
new file mode 100644
|
|
index 0000000..0011b5d
|
|
--- /dev/null
|
|
+++ b/package/kernel/mac80211/patches/950-ath10k_AP_IBSS.patch
|
|
@@ -0,0 +1,32 @@
|
|
+--- a/drivers/net/wireless/ath/ath10k/mac.c
|
|
++++ b/drivers/net/wireless/ath/ath10k/mac.c
|
|
+@@ -5253,6 +5253,10 @@ static const struct ieee80211_iface_limi
|
|
+ .max = 7,
|
|
+ .types = BIT(NL80211_IFTYPE_AP)
|
|
+ },
|
|
++ {
|
|
++ .max = 1,
|
|
++ .types = BIT(NL80211_IFTYPE_ADHOC)
|
|
++ },
|
|
+ };
|
|
+
|
|
+ static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
|
|
+@@ -5260,6 +5264,10 @@ static const struct ieee80211_iface_limi
|
|
+ .max = 8,
|
|
+ .types = BIT(NL80211_IFTYPE_AP)
|
|
+ },
|
|
++ {
|
|
++ .max = 1,
|
|
++ .types = BIT(NL80211_IFTYPE_ADHOC)
|
|
++ },
|
|
+ };
|
|
+
|
|
+ static const struct ieee80211_iface_combination ath10k_if_comb[] = {
|
|
+@@ -5555,6 +5563,7 @@ int ath10k_mac_register(struct ath10k *a
|
|
+ ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
|
|
+ ar->hw->wiphy->n_iface_combinations =
|
|
+ ARRAY_SIZE(ath10k_10x_if_comb);
|
|
++ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
|
|
+ break;
|
|
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
|
|
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
|