iwlwifi patches for v5.14
* Some robustness improvements in the PCI code; * Remove some duplicate and unused declarations; * Improve PNVM load robustness by increasing the timeout a bit; * Support for a new HW; * Suport for BIOS control of 11ax enablement in Russia; * Support UNII4 enablement from BIOS; * Support LMR feedback; * Fix in TWT; * Some fixes in IML (image loader) DMA handling; * Fixes in WoWLAN; * Updates in the WoWLAN FW commands; * Add one new device to the PCI ID lists; * Support reading PNVM from a UEFI variable; * Bump the supported FW API version; * Some other small fixes, clean-ups and improvements. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAmDR8WcACgkQoUecoho8 xfpF2BAAm4rm0cWL68nKh8J6AsZbPZeRrDyHVAPV8ThOqjXQ/gkx/tDf2Tnbcwes AsDEY9eIxti7xbWTzFVd7dj8/h05dcrdvrYx/HMXIKfGBMf6162mwur+uXx3TJ1v 7F3R+CpmdnrkhcWfLgNRoYP4hCBcw4N3rtSJuJODChTfe6zr1BnnnIAws+GcoPzG wjg6/xUCHoB08Eo6ErzdGJR2Xe/kxyYKE87BOgvuFBV4rOVviRkTdO4Cst/0C+c1 NmhNeqVjMGxmg842jRD/QTDu6z1ACb03NKR76wKnXRGP1I5CuyG7auU5apFM1B61 GHlM5JnwNxvoCeOQ5OrPF/G/dIXugNFnInP7lQg2mbi2bs2BFJiDdyqCt53Y7lij pNXUlF1r6fwQxLaf26bX4+w6cnr8fjMp5iCrQDNu+2NBrv7IdntiUIB0BfPfHLqo mqKEagFju9UxhPBNfFrUIfv7ZMty00hNlJlikLUSRH2fqpHsKHNZzsE8fHZbGhys Gmcn3k9HTWqlkV+lNOSJKyn08t9fotc7GcBNlBkC8wH94Iv5ram4CNzWucc+nTo6 MV8aLylyhEOUzJ/+kd3msXfyW6yia+Itr4Rbaw0qawZ6taIu+IXo0zEeAYILNAS5 gkuF3eXakzChAlUIo/GNYNDcpB+dd+h38P9gkzN2MoKuWfUPitE= =HjQs -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2021-06-22' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next iwlwifi patches for v5.14 * Some robustness improvements in the PCI code; * Remove some duplicate and unused declarations; * Improve PNVM load robustness by increasing the timeout a bit; * Support for a new HW; * Suport for BIOS control of 11ax enablement in Russia; * Support UNII4 enablement from BIOS; * Support LMR feedback; * Fix in TWT; * Some fixes in IML (image loader) DMA handling; * Fixes in WoWLAN; * Updates in the WoWLAN FW commands; * Add one new device to the PCI ID lists; * Support reading PNVM from a UEFI variable; * Bump the supported FW API version; * Some other small fixes, clean-ups and improvements. # gpg: Signature made Tue 22 Jun 2021 05:19:19 PM EEST # gpg: using RSA key 1772CD7E06F604F5A6EBCB26A1479CA21A3CC5FA # gpg: Good signature from "Luciano Roth Coelho (Luca) <luca@coelho.fi>" [full] # gpg: aka "Luciano Roth Coelho (Intel) <luciano.coelho@intel.com>" [full]
This commit is contained in:
commit
559c664751
|
|
@ -16,9 +16,10 @@ iwlwifi-objs += iwl-trans.o
|
|||
iwlwifi-objs += queue/tx.o
|
||||
|
||||
iwlwifi-objs += fw/img.o fw/notif-wait.o
|
||||
iwlwifi-objs += fw/dbg.o fw/pnvm.o
|
||||
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
|
||||
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
|
||||
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
|
||||
iwlwifi-$(CONFIG_EFI) += fw/uefi.o
|
||||
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
|
||||
|
||||
iwlwifi-objs += $(iwlwifi-m)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/stringify.h>
|
||||
|
|
@ -9,7 +9,7 @@
|
|||
#include "iwl-prph.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MAX 63
|
||||
#define IWL_22000_UCODE_API_MAX 64
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MIN 39
|
||||
|
|
@ -47,6 +47,7 @@
|
|||
#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-"
|
||||
#define IWL_MA_A_GF4_A_FW_PRE "iwlwifi-ma-a0-gf4-a0-"
|
||||
#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-"
|
||||
#define IWL_MA_A_FM_A_FW_PRE "iwlwifi-ma-a0-fm-a0-"
|
||||
#define IWL_SNJ_A_MR_A_FW_PRE "iwlwifi-SoSnj-a0-mr-a0-"
|
||||
#define IWL_BZ_A_HR_B_FW_PRE "iwlwifi-bz-a0-hr-b0-"
|
||||
#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-"
|
||||
|
|
@ -93,6 +94,8 @@
|
|||
IWL_MA_A_GF4_A_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
|
||||
IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(api) \
|
||||
IWL_MA_A_FM_A_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_SNJ_A_MR_A_MODULE_FIRMWARE(api) \
|
||||
IWL_SNJ_A_MR_A_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
|
||||
|
|
@ -389,6 +392,7 @@ const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
|
|||
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
|
||||
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
|
||||
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
|
||||
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
|
||||
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
|
||||
|
||||
const char iwl_ax200_killer_1650w_name[] =
|
||||
|
|
@ -724,6 +728,13 @@ const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
|
|||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_cfg_ma_a0_fm_a0 = {
|
||||
.fw_name_pre = IWL_MA_A_FM_A_FW_PRE,
|
||||
.uhb_supported = true,
|
||||
IWL_DEVICE_AX210,
|
||||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_cfg_snj_a0_mr_a0 = {
|
||||
.fw_name_pre = IWL_SNJ_A_MR_A_FW_PRE,
|
||||
.uhb_supported = true,
|
||||
|
|
@ -797,6 +808,7 @@ MODULE_FIRMWARE(IWL_MA_A_HR_B_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
|||
MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_MA_A_GF4_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_MA_A_FM_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_SNJ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/stringify.h>
|
||||
|
|
@ -171,8 +171,12 @@ const char iwl9260_killer_1550_name[] =
|
|||
"Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW) 160MHz";
|
||||
const char iwl9560_killer_1550i_name[] =
|
||||
"Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)";
|
||||
const char iwl9560_killer_1550i_160_name[] =
|
||||
"Killer(R) Wireless-AC 1550i Wireless Network Adapter (9560NGW) 160MHz";
|
||||
const char iwl9560_killer_1550s_name[] =
|
||||
"Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)";
|
||||
const char iwl9560_killer_1550s_160_name[] =
|
||||
"Killer(R) Wireless-AC 1550s Wireless Network Adapter (9560D2W) 160MHz";
|
||||
|
||||
const struct iwl_cfg iwl9260_2ac_cfg = {
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
|
|
|
|||
|
|
@ -163,6 +163,27 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
|
|||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
|
||||
|
||||
/*
|
||||
* Evaluate a DSM with no arguments and a u32 return value,
|
||||
*/
|
||||
int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
|
||||
const guid_t *guid, u32 *value)
|
||||
{
|
||||
int ret;
|
||||
u64 val;
|
||||
|
||||
ret = iwl_acpi_get_dsm_integer(dev, rev, func,
|
||||
guid, &val, sizeof(u32));
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* cast val (u64) to be u32 */
|
||||
*value = (u32)val;
|
||||
return 0;
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
|
||||
|
||||
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
|
||||
union acpi_object *data,
|
||||
int data_size, int *tbl_rev)
|
||||
|
|
@ -734,30 +755,35 @@ out:
|
|||
|
||||
__le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
u32 ret;
|
||||
int ret;
|
||||
u8 value;
|
||||
__le32 config_bitmap = 0;
|
||||
|
||||
/*
|
||||
** Evaluate func 'DSM_FUNC_ENABLE_INDONESIA_5G2'
|
||||
*/
|
||||
ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_ENABLE_INDONESIA_5G2);
|
||||
ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
|
||||
DSM_FUNC_ENABLE_INDONESIA_5G2,
|
||||
&iwl_guid, &value);
|
||||
|
||||
if (ret == DSM_VALUE_INDONESIA_ENABLE)
|
||||
if (!ret && value == DSM_VALUE_INDONESIA_ENABLE)
|
||||
config_bitmap |=
|
||||
cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
|
||||
|
||||
/*
|
||||
** Evaluate func 'DSM_FUNC_DISABLE_SRD'
|
||||
*/
|
||||
ret = iwl_acpi_eval_dsm_func(fwrt->dev, DSM_FUNC_DISABLE_SRD);
|
||||
|
||||
if (ret == DSM_VALUE_SRD_PASSIVE)
|
||||
config_bitmap |=
|
||||
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
|
||||
|
||||
else if (ret == DSM_VALUE_SRD_DISABLE)
|
||||
config_bitmap |=
|
||||
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
|
||||
ret = iwl_acpi_get_dsm_u8(fwrt->dev, 0,
|
||||
DSM_FUNC_DISABLE_SRD,
|
||||
&iwl_guid, &value);
|
||||
if (!ret) {
|
||||
if (value == DSM_VALUE_SRD_PASSIVE)
|
||||
config_bitmap |=
|
||||
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
|
||||
else if (value == DSM_VALUE_SRD_DISABLE)
|
||||
config_bitmap |=
|
||||
cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
|
||||
}
|
||||
|
||||
return config_bitmap;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -78,6 +78,7 @@ enum iwl_dsm_funcs_rev_0 {
|
|||
DSM_FUNC_DISABLE_SRD = 1,
|
||||
DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
|
||||
DSM_FUNC_11AX_ENABLEMENT = 6,
|
||||
DSM_FUNC_ENABLE_UNII4_CHAN = 7
|
||||
};
|
||||
|
||||
enum iwl_dsm_values_srd {
|
||||
|
|
@ -116,6 +117,9 @@ void *iwl_acpi_get_object(struct device *dev, acpi_string method);
|
|||
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
|
||||
const guid_t *guid, u8 *value);
|
||||
|
||||
int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
|
||||
const guid_t *guid, u32 *value);
|
||||
|
||||
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
|
||||
union acpi_object *data,
|
||||
int data_size, int *tbl_rev);
|
||||
|
|
@ -182,6 +186,12 @@ static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
|
||||
const guid_t *guid, u32 *value)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
|
||||
union acpi_object *data,
|
||||
int data_size,
|
||||
|
|
|
|||
|
|
@ -534,11 +534,6 @@ enum iwl_legacy_cmds {
|
|||
*/
|
||||
OFFLOADS_QUERY_CMD = 0xd5,
|
||||
|
||||
/**
|
||||
* @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config
|
||||
*/
|
||||
REMOTE_WAKE_CONFIG_CMD = 0xd6,
|
||||
|
||||
/**
|
||||
* @D0I3_END_CMD: End D0i3/D3 state, no command data
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -159,6 +159,22 @@ struct iwl_proto_offload_cmd_v3_large {
|
|||
struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
|
||||
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
|
||||
|
||||
/**
|
||||
* struct iwl_proto_offload_cmd_v4 - ARP/NS offload configuration
|
||||
* @sta_id: station id
|
||||
* @common: common/IPv4 configuration
|
||||
* @num_valid_ipv6_addrs: number of valid IPv6 addresses
|
||||
* @targ_addrs: target IPv6 addresses
|
||||
* @ns_config: NS offload configurations
|
||||
*/
|
||||
struct iwl_proto_offload_cmd_v4 {
|
||||
__le32 sta_id;
|
||||
struct iwl_proto_offload_cmd_common common;
|
||||
__le32 num_valid_ipv6_addrs;
|
||||
struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
|
||||
struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
|
||||
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_4 */
|
||||
|
||||
/*
|
||||
* WOWLAN_PATTERNS
|
||||
*/
|
||||
|
|
@ -302,13 +318,23 @@ struct iwl_wowlan_patterns_cmd {
|
|||
/**
|
||||
* @n_patterns: number of patterns
|
||||
*/
|
||||
__le32 n_patterns;
|
||||
u8 n_patterns;
|
||||
|
||||
/**
|
||||
* @n_patterns: sta_id
|
||||
*/
|
||||
u8 sta_id;
|
||||
|
||||
/**
|
||||
* @reserved: reserved for alignment
|
||||
*/
|
||||
__le16 reserved;
|
||||
|
||||
/**
|
||||
* @patterns: the patterns, array length in @n_patterns
|
||||
*/
|
||||
struct iwl_wowlan_pattern_v2 patterns[];
|
||||
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_2 */
|
||||
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_3 */
|
||||
|
||||
enum iwl_wowlan_wakeup_filters {
|
||||
IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
|
||||
|
|
@ -339,9 +365,10 @@ enum iwl_wowlan_flags {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_config_cmd - WoWLAN configuration
|
||||
* struct iwl_wowlan_config_cmd - WoWLAN configuration (versions 5 and 6)
|
||||
* @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
|
||||
* @non_qos_seq: non-QoS sequence counter to use next
|
||||
* @non_qos_seq: non-QoS sequence counter to use next.
|
||||
* Reserved if the struct has version >= 6.
|
||||
* @qos_seq: QoS sequence counters to use next
|
||||
* @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down
|
||||
* @is_11n_connection: indicates HT connection
|
||||
|
|
@ -456,6 +483,23 @@ struct iwl_wowlan_kek_kck_material_cmd_v3 {
|
|||
__le32 bigtk_cipher;
|
||||
} __packed; /* KEK_KCK_MATERIAL_API_S_VER_3 */
|
||||
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v4 {
|
||||
__le32 sta_id;
|
||||
u8 kck[IWL_KCK_MAX_SIZE];
|
||||
u8 kek[IWL_KEK_MAX_SIZE];
|
||||
__le16 kck_len;
|
||||
__le16 kek_len;
|
||||
__le64 replay_ctr;
|
||||
__le32 akm;
|
||||
__le32 gtk_cipher;
|
||||
__le32 igtk_cipher;
|
||||
__le32 bigtk_cipher;
|
||||
} __packed; /* KEK_KCK_MATERIAL_API_S_VER_4 */
|
||||
|
||||
struct iwl_wowlan_get_status_cmd {
|
||||
__le32 sta_id;
|
||||
} __packed; /* WOWLAN_GET_STATUSES_CMD_API_S_VER_1 */
|
||||
|
||||
#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
|
||||
|
||||
enum iwl_wowlan_rekey_status {
|
||||
|
|
@ -604,12 +648,13 @@ struct iwl_wowlan_status_v7 {
|
|||
} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_status_v9 - WoWLAN status (version 9)
|
||||
* struct iwl_wowlan_status_v9 - WoWLAN status (versions 9 and 10)
|
||||
* @gtk: GTK data
|
||||
* @igtk: IGTK data
|
||||
* @replay_ctr: GTK rekey replay counter
|
||||
* @pattern_number: number of the matched pattern
|
||||
* @non_qos_seq_ctr: non-QoS sequence counter to use next
|
||||
* @non_qos_seq_ctr: non-QoS sequence counter to use next.
|
||||
* Reserved if the struct has version >= 10.
|
||||
* @qos_seq_ctr: QoS sequence counters to use next
|
||||
* @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
|
||||
* @num_of_gtk_rekeys: number of GTK rekeys
|
||||
|
|
@ -638,7 +683,7 @@ struct iwl_wowlan_status_v9 {
|
|||
u8 tid_tear_down;
|
||||
u8 reserved[3];
|
||||
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
|
||||
} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
|
||||
} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_status - WoWLAN status
|
||||
|
|
@ -683,55 +728,6 @@ static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
|
|||
return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
|
||||
}
|
||||
|
||||
#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
|
||||
#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
|
||||
#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
|
||||
|
||||
struct iwl_tcp_packet_info {
|
||||
__le16 tcp_pseudo_header_checksum;
|
||||
__le16 tcp_payload_length;
|
||||
} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
|
||||
|
||||
struct iwl_tcp_packet {
|
||||
struct iwl_tcp_packet_info info;
|
||||
u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
|
||||
u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
|
||||
} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
|
||||
|
||||
struct iwl_remote_wake_packet {
|
||||
struct iwl_tcp_packet_info info;
|
||||
u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
|
||||
u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
|
||||
} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
|
||||
|
||||
struct iwl_wowlan_remote_wake_config {
|
||||
__le32 connection_max_time; /* unused */
|
||||
/* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
|
||||
u8 max_syn_retries;
|
||||
u8 max_data_retries;
|
||||
u8 tcp_syn_ack_timeout;
|
||||
u8 tcp_ack_timeout;
|
||||
|
||||
struct iwl_tcp_packet syn_tx;
|
||||
struct iwl_tcp_packet synack_rx;
|
||||
struct iwl_tcp_packet keepalive_ack_rx;
|
||||
struct iwl_tcp_packet fin_tx;
|
||||
|
||||
struct iwl_remote_wake_packet keepalive_tx;
|
||||
struct iwl_remote_wake_packet wake_rx;
|
||||
|
||||
/* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
|
||||
u8 sequence_number_offset;
|
||||
u8 sequence_number_length;
|
||||
u8 token_offset;
|
||||
u8 token_length;
|
||||
/* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
|
||||
__le32 initial_sequence_number;
|
||||
__le16 keepalive_interval;
|
||||
__le16 num_tokens;
|
||||
u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
|
||||
} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
|
||||
|
||||
/* TODO: NetDetect API */
|
||||
|
||||
#endif /* __iwl_fw_api_d3_h__ */
|
||||
|
|
|
|||
|
|
@ -63,6 +63,12 @@ enum iwl_data_path_subcmd_ids {
|
|||
*/
|
||||
RX_NO_DATA_NOTIF = 0xF5,
|
||||
|
||||
/**
|
||||
* @THERMAL_DUAL_CHAIN_DISABLE_REQ: firmware request for SMPS mode,
|
||||
* &struct iwl_thermal_dual_chain_request
|
||||
*/
|
||||
THERMAL_DUAL_CHAIN_REQUEST = 0xF6,
|
||||
|
||||
/**
|
||||
* @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
|
||||
*/
|
||||
|
|
@ -169,4 +175,24 @@ struct iwl_datapath_monitor_notif {
|
|||
u8 reserved[3];
|
||||
} __packed; /* MONITOR_NTF_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_thermal_dual_chain_req_events - firmware SMPS request event
|
||||
* @THERMAL_DUAL_CHAIN_REQ_ENABLE: (re-)enable dual-chain operation
|
||||
* (subject to other constraints)
|
||||
* @THERMAL_DUAL_CHAIN_REQ_DISABLE: disable dual-chain operation
|
||||
* (static SMPS)
|
||||
*/
|
||||
enum iwl_thermal_dual_chain_req_events {
|
||||
THERMAL_DUAL_CHAIN_REQ_ENABLE,
|
||||
THERMAL_DUAL_CHAIN_REQ_DISABLE,
|
||||
}; /* THERMAL_DUAL_CHAIN_DISABLE_STATE_API_E_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_thermal_dual_chain_request - SMPS request
|
||||
* @event: the type of request, see &enum iwl_thermal_dual_chain_req_events
|
||||
*/
|
||||
struct iwl_thermal_dual_chain_request {
|
||||
__le32 event;
|
||||
} __packed; /* THERMAL_DUAL_CHAIN_DISABLE_REQ_NTFY_API_S_VER_1 */
|
||||
|
||||
#endif /* __iwl_fw_api_datapath_h__ */
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_fw_dbg_tlv_h__
|
||||
#define __iwl_fw_dbg_tlv_h__
|
||||
|
|
@ -11,6 +11,7 @@
|
|||
#define IWL_FW_INI_MAX_NAME 32
|
||||
#define IWL_FW_INI_MAX_CFG_NAME 64
|
||||
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
|
||||
#define IWL_FW_INI_REGION_V2_MASK 0x0000FFFF
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_hcmd
|
||||
|
|
|
|||
|
|
@ -452,6 +452,25 @@ struct iwl_lari_config_change_cmd_v3 {
|
|||
__le32 oem_11ax_allow_bitmap;
|
||||
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_3 */
|
||||
|
||||
/**
|
||||
* struct iwl_lari_config_change_cmd_v4 - change LARI configuration
|
||||
* @config_bitmap: Bitmap of the config commands. Each bit will trigger a
|
||||
* different predefined FW config operation.
|
||||
* @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
|
||||
* @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
|
||||
* per country, one to indicate whether to override and the other to
|
||||
* indicate the value to use.
|
||||
* @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
|
||||
* per country, one to indicate whether to override and the other to
|
||||
* indicate allow/disallow unii4 channels.
|
||||
*/
|
||||
struct iwl_lari_config_change_cmd_v4 {
|
||||
__le32 config_bitmap;
|
||||
__le32 oem_uhb_allow_bitmap;
|
||||
__le32 oem_11ax_allow_bitmap;
|
||||
__le32 oem_unii4_allow_bitmap;
|
||||
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_4 */
|
||||
|
||||
/**
|
||||
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
|
||||
* @status: PNVM image loading status
|
||||
|
|
|
|||
|
|
@ -1933,6 +1933,13 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
|
|||
u32 num_of_ranges, i, size;
|
||||
void *range;
|
||||
|
||||
/*
|
||||
* The higher part of the ID in version 2 is irrelevant for
|
||||
* us, so mask it out.
|
||||
*/
|
||||
if (le32_to_cpu(reg->hdr.version) == 2)
|
||||
id &= IWL_FW_INI_REGION_V2_MASK;
|
||||
|
||||
if (!ops->get_num_of_ranges || !ops->get_size || !ops->fill_mem_hdr ||
|
||||
!ops->fill_range)
|
||||
return 0;
|
||||
|
|
@ -1957,7 +1964,7 @@ static u32 iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, struct list_head *list,
|
|||
num_of_ranges = ops->get_num_of_ranges(fwrt, reg_data);
|
||||
|
||||
header = (void *)tlv->data;
|
||||
header->region_id = reg->id;
|
||||
header->region_id = cpu_to_le32(id);
|
||||
header->num_of_ranges = cpu_to_le32(num_of_ranges);
|
||||
header->name_len = cpu_to_le32(IWL_FW_INI_MAX_NAME);
|
||||
memcpy(header->name, reg->name, IWL_FW_INI_MAX_NAME);
|
||||
|
|
@ -2752,44 +2759,6 @@ void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime *fwrt)
|
|||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync);
|
||||
|
||||
#define FSEQ_REG(x) { .addr = (x), .str = #x, }
|
||||
|
||||
void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
struct iwl_trans *trans = fwrt->trans;
|
||||
int i;
|
||||
struct {
|
||||
u32 addr;
|
||||
const char *str;
|
||||
} fseq_regs[] = {
|
||||
FSEQ_REG(FSEQ_ERROR_CODE),
|
||||
FSEQ_REG(FSEQ_TOP_INIT_VERSION),
|
||||
FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
|
||||
FSEQ_REG(FSEQ_OTP_VERSION),
|
||||
FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
|
||||
FSEQ_REG(FSEQ_ALIVE_TOKEN),
|
||||
FSEQ_REG(FSEQ_CNVI_ID),
|
||||
FSEQ_REG(FSEQ_CNVR_ID),
|
||||
FSEQ_REG(CNVI_AUX_MISC_CHIP),
|
||||
FSEQ_REG(CNVR_AUX_MISC_CHIP),
|
||||
FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
|
||||
FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
|
||||
};
|
||||
|
||||
if (!iwl_trans_grab_nic_access(trans))
|
||||
return;
|
||||
|
||||
IWL_ERR(fwrt, "Fseq Registers:\n");
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
|
||||
IWL_ERR(fwrt, "0x%08X | %s\n",
|
||||
iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
|
||||
fseq_regs[i].str);
|
||||
|
||||
iwl_trans_release_nic_access(trans);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs);
|
||||
|
||||
static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans *trans, bool suspend)
|
||||
{
|
||||
struct iwl_dbg_suspend_resume_cmd cmd = {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2018-2019 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -321,4 +321,6 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
|
|||
fwrt->dump.fw_ver.umac_minor = le32_to_cpu(umac->umac_minor);
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
|
||||
#endif /* __iwl_fw_dbg_h__ */
|
||||
|
|
|
|||
|
|
@ -0,0 +1,418 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
#include <linux/devcoredump.h>
|
||||
#include "iwl-drv.h"
|
||||
#include "runtime.h"
|
||||
#include "dbg.h"
|
||||
#include "debugfs.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-prph.h"
|
||||
#include "iwl-csr.h"
|
||||
|
||||
/*
|
||||
* Note: This structure is read from the device with IO accesses,
|
||||
* and the reading already does the endian conversion. As it is
|
||||
* read with u32-sized accesses, any members with a different size
|
||||
* need to be ordered correctly though!
|
||||
*/
|
||||
struct iwl_error_event_table_v1 {
|
||||
u32 valid; /* (nonzero) valid, (0) log is empty */
|
||||
u32 error_id; /* type of error */
|
||||
u32 pc; /* program counter */
|
||||
u32 blink1; /* branch link */
|
||||
u32 blink2; /* branch link */
|
||||
u32 ilink1; /* interrupt link */
|
||||
u32 ilink2; /* interrupt link */
|
||||
u32 data1; /* error-specific data */
|
||||
u32 data2; /* error-specific data */
|
||||
u32 data3; /* error-specific data */
|
||||
u32 bcon_time; /* beacon timer */
|
||||
u32 tsf_low; /* network timestamp function timer */
|
||||
u32 tsf_hi; /* network timestamp function timer */
|
||||
u32 gp1; /* GP1 timer register */
|
||||
u32 gp2; /* GP2 timer register */
|
||||
u32 gp3; /* GP3 timer register */
|
||||
u32 ucode_ver; /* uCode version */
|
||||
u32 hw_ver; /* HW Silicon version */
|
||||
u32 brd_ver; /* HW board version */
|
||||
u32 log_pc; /* log program counter */
|
||||
u32 frame_ptr; /* frame pointer */
|
||||
u32 stack_ptr; /* stack pointer */
|
||||
u32 hcmd; /* last host command header */
|
||||
u32 isr0; /* isr status register LMPM_NIC_ISR0:
|
||||
* rxtx_flag */
|
||||
u32 isr1; /* isr status register LMPM_NIC_ISR1:
|
||||
* host_flag */
|
||||
u32 isr2; /* isr status register LMPM_NIC_ISR2:
|
||||
* enc_flag */
|
||||
u32 isr3; /* isr status register LMPM_NIC_ISR3:
|
||||
* time_flag */
|
||||
u32 isr4; /* isr status register LMPM_NIC_ISR4:
|
||||
* wico interrupt */
|
||||
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
|
||||
u32 wait_event; /* wait event() caller address */
|
||||
u32 l2p_control; /* L2pControlField */
|
||||
u32 l2p_duration; /* L2pDurationField */
|
||||
u32 l2p_mhvalid; /* L2pMhValidBits */
|
||||
u32 l2p_addr_match; /* L2pAddrMatchStat */
|
||||
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
|
||||
* (LMPM_PMG_SEL) */
|
||||
u32 u_timestamp; /* indicate when the date and time of the
|
||||
* compilation */
|
||||
u32 flow_handler; /* FH read/write pointers, RX credit */
|
||||
} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
|
||||
|
||||
struct iwl_error_event_table {
|
||||
u32 valid; /* (nonzero) valid, (0) log is empty */
|
||||
u32 error_id; /* type of error */
|
||||
u32 trm_hw_status0; /* TRM HW status */
|
||||
u32 trm_hw_status1; /* TRM HW status */
|
||||
u32 blink2; /* branch link */
|
||||
u32 ilink1; /* interrupt link */
|
||||
u32 ilink2; /* interrupt link */
|
||||
u32 data1; /* error-specific data */
|
||||
u32 data2; /* error-specific data */
|
||||
u32 data3; /* error-specific data */
|
||||
u32 bcon_time; /* beacon timer */
|
||||
u32 tsf_low; /* network timestamp function timer */
|
||||
u32 tsf_hi; /* network timestamp function timer */
|
||||
u32 gp1; /* GP1 timer register */
|
||||
u32 gp2; /* GP2 timer register */
|
||||
u32 fw_rev_type; /* firmware revision type */
|
||||
u32 major; /* uCode version major */
|
||||
u32 minor; /* uCode version minor */
|
||||
u32 hw_ver; /* HW Silicon version */
|
||||
u32 brd_ver; /* HW board version */
|
||||
u32 log_pc; /* log program counter */
|
||||
u32 frame_ptr; /* frame pointer */
|
||||
u32 stack_ptr; /* stack pointer */
|
||||
u32 hcmd; /* last host command header */
|
||||
u32 isr0; /* isr status register LMPM_NIC_ISR0:
|
||||
* rxtx_flag */
|
||||
u32 isr1; /* isr status register LMPM_NIC_ISR1:
|
||||
* host_flag */
|
||||
u32 isr2; /* isr status register LMPM_NIC_ISR2:
|
||||
* enc_flag */
|
||||
u32 isr3; /* isr status register LMPM_NIC_ISR3:
|
||||
* time_flag */
|
||||
u32 isr4; /* isr status register LMPM_NIC_ISR4:
|
||||
* wico interrupt */
|
||||
u32 last_cmd_id; /* last HCMD id handled by the firmware */
|
||||
u32 wait_event; /* wait event() caller address */
|
||||
u32 l2p_control; /* L2pControlField */
|
||||
u32 l2p_duration; /* L2pDurationField */
|
||||
u32 l2p_mhvalid; /* L2pMhValidBits */
|
||||
u32 l2p_addr_match; /* L2pAddrMatchStat */
|
||||
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
|
||||
* (LMPM_PMG_SEL) */
|
||||
u32 u_timestamp; /* indicate when the date and time of the
|
||||
* compilation */
|
||||
u32 flow_handler; /* FH read/write pointers, RX credit */
|
||||
} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
|
||||
|
||||
/*
|
||||
* UMAC error struct - relevant starting from family 8000 chip.
|
||||
* Note: This structure is read from the device with IO accesses,
|
||||
* and the reading already does the endian conversion. As it is
|
||||
* read with u32-sized accesses, any members with a different size
|
||||
* need to be ordered correctly though!
|
||||
*/
|
||||
struct iwl_umac_error_event_table {
|
||||
u32 valid; /* (nonzero) valid, (0) log is empty */
|
||||
u32 error_id; /* type of error */
|
||||
u32 blink1; /* branch link */
|
||||
u32 blink2; /* branch link */
|
||||
u32 ilink1; /* interrupt link */
|
||||
u32 ilink2; /* interrupt link */
|
||||
u32 data1; /* error-specific data */
|
||||
u32 data2; /* error-specific data */
|
||||
u32 data3; /* error-specific data */
|
||||
u32 umac_major;
|
||||
u32 umac_minor;
|
||||
u32 frame_pointer; /* core register 27*/
|
||||
u32 stack_pointer; /* core register 28 */
|
||||
u32 cmd_header; /* latest host cmd sent to UMAC */
|
||||
u32 nic_isr_pref; /* ISR status register */
|
||||
} __packed;
|
||||
|
||||
#define ERROR_START_OFFSET (1 * sizeof(u32))
|
||||
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
|
||||
|
||||
static void iwl_fwrt_dump_umac_error_log(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
struct iwl_trans *trans = fwrt->trans;
|
||||
struct iwl_umac_error_event_table table = {};
|
||||
u32 base = fwrt->trans->dbg.umac_error_event_table;
|
||||
|
||||
if (!base &&
|
||||
!(fwrt->trans->dbg.error_event_table_tlv_status &
|
||||
IWL_ERROR_EVENT_TABLE_UMAC))
|
||||
return;
|
||||
|
||||
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
|
||||
|
||||
if (table.valid)
|
||||
fwrt->dump.umac_err_id = table.error_id;
|
||||
|
||||
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
||||
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
|
||||
IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
|
||||
fwrt->trans->status, table.valid);
|
||||
}
|
||||
|
||||
IWL_ERR(fwrt, "0x%08X | %s\n", table.error_id,
|
||||
iwl_fw_lookup_assert_desc(table.error_id));
|
||||
IWL_ERR(fwrt, "0x%08X | umac branchlink1\n", table.blink1);
|
||||
IWL_ERR(fwrt, "0x%08X | umac branchlink2\n", table.blink2);
|
||||
IWL_ERR(fwrt, "0x%08X | umac interruptlink1\n", table.ilink1);
|
||||
IWL_ERR(fwrt, "0x%08X | umac interruptlink2\n", table.ilink2);
|
||||
IWL_ERR(fwrt, "0x%08X | umac data1\n", table.data1);
|
||||
IWL_ERR(fwrt, "0x%08X | umac data2\n", table.data2);
|
||||
IWL_ERR(fwrt, "0x%08X | umac data3\n", table.data3);
|
||||
IWL_ERR(fwrt, "0x%08X | umac major\n", table.umac_major);
|
||||
IWL_ERR(fwrt, "0x%08X | umac minor\n", table.umac_minor);
|
||||
IWL_ERR(fwrt, "0x%08X | frame pointer\n", table.frame_pointer);
|
||||
IWL_ERR(fwrt, "0x%08X | stack pointer\n", table.stack_pointer);
|
||||
IWL_ERR(fwrt, "0x%08X | last host cmd\n", table.cmd_header);
|
||||
IWL_ERR(fwrt, "0x%08X | isr status reg\n", table.nic_isr_pref);
|
||||
}
|
||||
|
||||
static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_num)
|
||||
{
|
||||
struct iwl_trans *trans = fwrt->trans;
|
||||
struct iwl_error_event_table table = {};
|
||||
u32 val, base = fwrt->trans->dbg.lmac_error_event_table[lmac_num];
|
||||
|
||||
if (fwrt->cur_fw_img == IWL_UCODE_INIT) {
|
||||
if (!base)
|
||||
base = fwrt->fw->init_errlog_ptr;
|
||||
} else {
|
||||
if (!base)
|
||||
base = fwrt->fw->inst_errlog_ptr;
|
||||
}
|
||||
|
||||
if (base < 0x400000) {
|
||||
IWL_ERR(fwrt,
|
||||
"Not valid error log pointer 0x%08X for %s uCode\n",
|
||||
base,
|
||||
(fwrt->cur_fw_img == IWL_UCODE_INIT)
|
||||
? "Init" : "RT");
|
||||
return;
|
||||
}
|
||||
|
||||
/* check if there is a HW error */
|
||||
val = iwl_trans_read_mem32(trans, base);
|
||||
if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
|
||||
int err;
|
||||
|
||||
IWL_ERR(trans, "HW error, resetting before reading\n");
|
||||
|
||||
/* reset the device */
|
||||
iwl_trans_sw_reset(trans);
|
||||
|
||||
err = iwl_finish_nic_init(trans, trans->trans_cfg);
|
||||
if (err)
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
|
||||
|
||||
if (table.valid)
|
||||
fwrt->dump.lmac_err_id[lmac_num] = table.error_id;
|
||||
|
||||
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
||||
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
|
||||
IWL_ERR(trans, "Transport status: 0x%08lX, valid: %d\n",
|
||||
fwrt->trans->status, table.valid);
|
||||
}
|
||||
|
||||
/* Do not change this output - scripts rely on it */
|
||||
|
||||
IWL_ERR(fwrt, "Loaded firmware version: %s\n", fwrt->fw->fw_version);
|
||||
|
||||
IWL_ERR(fwrt, "0x%08X | %-28s\n", table.error_id,
|
||||
iwl_fw_lookup_assert_desc(table.error_id));
|
||||
IWL_ERR(fwrt, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
|
||||
IWL_ERR(fwrt, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
|
||||
IWL_ERR(fwrt, "0x%08X | branchlink2\n", table.blink2);
|
||||
IWL_ERR(fwrt, "0x%08X | interruptlink1\n", table.ilink1);
|
||||
IWL_ERR(fwrt, "0x%08X | interruptlink2\n", table.ilink2);
|
||||
IWL_ERR(fwrt, "0x%08X | data1\n", table.data1);
|
||||
IWL_ERR(fwrt, "0x%08X | data2\n", table.data2);
|
||||
IWL_ERR(fwrt, "0x%08X | data3\n", table.data3);
|
||||
IWL_ERR(fwrt, "0x%08X | beacon time\n", table.bcon_time);
|
||||
IWL_ERR(fwrt, "0x%08X | tsf low\n", table.tsf_low);
|
||||
IWL_ERR(fwrt, "0x%08X | tsf hi\n", table.tsf_hi);
|
||||
IWL_ERR(fwrt, "0x%08X | time gp1\n", table.gp1);
|
||||
IWL_ERR(fwrt, "0x%08X | time gp2\n", table.gp2);
|
||||
IWL_ERR(fwrt, "0x%08X | uCode revision type\n", table.fw_rev_type);
|
||||
IWL_ERR(fwrt, "0x%08X | uCode version major\n", table.major);
|
||||
IWL_ERR(fwrt, "0x%08X | uCode version minor\n", table.minor);
|
||||
IWL_ERR(fwrt, "0x%08X | hw version\n", table.hw_ver);
|
||||
IWL_ERR(fwrt, "0x%08X | board version\n", table.brd_ver);
|
||||
IWL_ERR(fwrt, "0x%08X | hcmd\n", table.hcmd);
|
||||
IWL_ERR(fwrt, "0x%08X | isr0\n", table.isr0);
|
||||
IWL_ERR(fwrt, "0x%08X | isr1\n", table.isr1);
|
||||
IWL_ERR(fwrt, "0x%08X | isr2\n", table.isr2);
|
||||
IWL_ERR(fwrt, "0x%08X | isr3\n", table.isr3);
|
||||
IWL_ERR(fwrt, "0x%08X | isr4\n", table.isr4);
|
||||
IWL_ERR(fwrt, "0x%08X | last cmd Id\n", table.last_cmd_id);
|
||||
IWL_ERR(fwrt, "0x%08X | wait_event\n", table.wait_event);
|
||||
IWL_ERR(fwrt, "0x%08X | l2p_control\n", table.l2p_control);
|
||||
IWL_ERR(fwrt, "0x%08X | l2p_duration\n", table.l2p_duration);
|
||||
IWL_ERR(fwrt, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
|
||||
IWL_ERR(fwrt, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
|
||||
IWL_ERR(fwrt, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
|
||||
IWL_ERR(fwrt, "0x%08X | timestamp\n", table.u_timestamp);
|
||||
IWL_ERR(fwrt, "0x%08X | flow_handler\n", table.flow_handler);
|
||||
}
|
||||
|
||||
/*
|
||||
* TCM error struct.
|
||||
* Note: This structure is read from the device with IO accesses,
|
||||
* and the reading already does the endian conversion. As it is
|
||||
* read with u32-sized accesses, any members with a different size
|
||||
* need to be ordered correctly though!
|
||||
*/
|
||||
struct iwl_tcm_error_event_table {
|
||||
u32 valid;
|
||||
u32 error_id;
|
||||
u32 blink2;
|
||||
u32 ilink1;
|
||||
u32 ilink2;
|
||||
u32 data1, data2, data3;
|
||||
u32 logpc;
|
||||
u32 frame_pointer;
|
||||
u32 stack_pointer;
|
||||
u32 msgid;
|
||||
u32 isr;
|
||||
u32 hw_status[5];
|
||||
u32 sw_status[1];
|
||||
u32 reserved[4];
|
||||
} __packed; /* TCM_LOG_ERROR_TABLE_API_S_VER_1 */
|
||||
|
||||
static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
struct iwl_trans *trans = fwrt->trans;
|
||||
struct iwl_tcm_error_event_table table = {};
|
||||
u32 base = fwrt->trans->dbg.tcm_error_event_table;
|
||||
int i;
|
||||
|
||||
if (!base ||
|
||||
!(fwrt->trans->dbg.error_event_table_tlv_status &
|
||||
IWL_ERROR_EVENT_TABLE_TCM))
|
||||
return;
|
||||
|
||||
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
|
||||
|
||||
IWL_ERR(fwrt, "TCM status:\n");
|
||||
IWL_ERR(fwrt, "0x%08X | error ID\n", table.error_id);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm branchlink2\n", table.blink2);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm interruptlink1\n", table.ilink1);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm interruptlink2\n", table.ilink2);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm data1\n", table.data1);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm data2\n", table.data2);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm data3\n", table.data3);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm log PC\n", table.logpc);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm frame pointer\n", table.frame_pointer);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm stack pointer\n", table.stack_pointer);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm msg ID\n", table.msgid);
|
||||
IWL_ERR(fwrt, "0x%08X | tcm ISR status\n", table.isr);
|
||||
for (i = 0; i < ARRAY_SIZE(table.hw_status); i++)
|
||||
IWL_ERR(fwrt, "0x%08X | tcm HW status[%d]\n",
|
||||
table.hw_status[i], i);
|
||||
for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
|
||||
IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
|
||||
table.sw_status[i], i);
|
||||
}
|
||||
|
||||
static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
struct iwl_trans *trans = fwrt->trans;
|
||||
u32 error, data1;
|
||||
|
||||
if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
|
||||
error = UMAG_SB_CPU_2_STATUS;
|
||||
data1 = UMAG_SB_CPU_1_STATUS;
|
||||
} else if (fwrt->trans->trans_cfg->device_family >=
|
||||
IWL_DEVICE_FAMILY_8000) {
|
||||
error = SB_CPU_2_STATUS;
|
||||
data1 = SB_CPU_1_STATUS;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
|
||||
|
||||
IWL_ERR(trans, "IML/ROM dump:\n");
|
||||
|
||||
if (error & 0xFFFF0000)
|
||||
IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
|
||||
|
||||
IWL_ERR(fwrt, "0x%08X | IML/ROM error/state\n", error);
|
||||
IWL_ERR(fwrt, "0x%08X | IML/ROM data1\n",
|
||||
iwl_read_umac_prph(trans, data1));
|
||||
|
||||
if (fwrt->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
|
||||
IWL_ERR(fwrt, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
|
||||
iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
|
||||
}
|
||||
|
||||
#define FSEQ_REG(x) { .addr = (x), .str = #x, }
|
||||
|
||||
static void iwl_fwrt_dump_fseq_regs(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
struct iwl_trans *trans = fwrt->trans;
|
||||
int i;
|
||||
struct {
|
||||
u32 addr;
|
||||
const char *str;
|
||||
} fseq_regs[] = {
|
||||
FSEQ_REG(FSEQ_ERROR_CODE),
|
||||
FSEQ_REG(FSEQ_TOP_INIT_VERSION),
|
||||
FSEQ_REG(FSEQ_CNVIO_INIT_VERSION),
|
||||
FSEQ_REG(FSEQ_OTP_VERSION),
|
||||
FSEQ_REG(FSEQ_TOP_CONTENT_VERSION),
|
||||
FSEQ_REG(FSEQ_ALIVE_TOKEN),
|
||||
FSEQ_REG(FSEQ_CNVI_ID),
|
||||
FSEQ_REG(FSEQ_CNVR_ID),
|
||||
FSEQ_REG(CNVI_AUX_MISC_CHIP),
|
||||
FSEQ_REG(CNVR_AUX_MISC_CHIP),
|
||||
FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM),
|
||||
FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR),
|
||||
};
|
||||
|
||||
if (!iwl_trans_grab_nic_access(trans))
|
||||
return;
|
||||
|
||||
IWL_ERR(fwrt, "Fseq Registers:\n");
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fseq_regs); i++)
|
||||
IWL_ERR(fwrt, "0x%08X | %s\n",
|
||||
iwl_read_prph_no_grab(trans, fseq_regs[i].addr),
|
||||
fseq_regs[i].str);
|
||||
|
||||
iwl_trans_release_nic_access(trans);
|
||||
}
|
||||
|
||||
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
if (!test_bit(STATUS_DEVICE_ENABLED, &fwrt->trans->status)) {
|
||||
IWL_ERR(fwrt,
|
||||
"DEVICE_ENABLED bit is not set. Aborting dump.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_fwrt_dump_lmac_error_log(fwrt, 0);
|
||||
if (fwrt->trans->dbg.lmac_error_event_table[1])
|
||||
iwl_fwrt_dump_lmac_error_log(fwrt, 1);
|
||||
iwl_fwrt_dump_umac_error_log(fwrt);
|
||||
iwl_fwrt_dump_tcm_error_log(fwrt);
|
||||
iwl_fwrt_dump_iml_error_log(fwrt);
|
||||
iwl_fwrt_dump_fseq_regs(fwrt);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fwrt_dump_error_logs);
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2008-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2008-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -52,7 +52,8 @@ enum iwl_ucode_tlv_type {
|
|||
IWL_UCODE_TLV_INIT_DATA = 4,
|
||||
IWL_UCODE_TLV_BOOT = 5,
|
||||
IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
|
||||
IWL_UCODE_TLV_PAN = 7,
|
||||
IWL_UCODE_TLV_PAN = 7, /* deprecated -- only used in DVM */
|
||||
IWL_UCODE_TLV_MEM_DESC = 7, /* replaces PAN in non-DVM */
|
||||
IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
|
||||
IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
|
||||
IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
|
||||
|
|
@ -97,6 +98,7 @@ enum iwl_ucode_tlv_type {
|
|||
|
||||
IWL_UCODE_TLV_PNVM_VERSION = 62,
|
||||
IWL_UCODE_TLV_PNVM_SKU = 64,
|
||||
IWL_UCODE_TLV_TCM_DEBUG_ADDRS = 65,
|
||||
|
||||
IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
|
||||
|
||||
|
|
@ -277,10 +279,11 @@ enum iwl_ucode_tlv_api {
|
|||
IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
|
||||
|
||||
|
||||
NUM_IWL_UCODE_TLV_API
|
||||
#ifdef __CHECKER__
|
||||
/* sparse says it cannot increment the previous enum member */
|
||||
= 128
|
||||
/* sparse says it cannot increment the previous enum member */
|
||||
#define NUM_IWL_UCODE_TLV_API 128
|
||||
#else
|
||||
NUM_IWL_UCODE_TLV_API
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
@ -411,6 +414,7 @@ enum iwl_ucode_tlv_capa {
|
|||
IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56,
|
||||
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57,
|
||||
IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58,
|
||||
IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
|
||||
|
||||
/* set 2 */
|
||||
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
|
||||
|
|
@ -446,10 +450,11 @@ enum iwl_ucode_tlv_capa {
|
|||
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
|
||||
IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)102,
|
||||
|
||||
NUM_IWL_UCODE_TLV_CAPA
|
||||
#ifdef __CHECKER__
|
||||
/* sparse says it cannot increment the previous enum member */
|
||||
= 128
|
||||
/* sparse says it cannot increment the previous enum member */
|
||||
#define NUM_IWL_UCODE_TLV_CAPA 128
|
||||
#else
|
||||
NUM_IWL_UCODE_TLV_CAPA
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
@ -946,6 +951,10 @@ struct iwl_fw_cmd_version {
|
|||
u8 notif_ver;
|
||||
} __packed;
|
||||
|
||||
struct iwl_fw_tcm_error_addr {
|
||||
__le32 addr;
|
||||
}; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */
|
||||
|
||||
static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
|
||||
size_t fixed_size, size_t var_size)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
#include "fw/api/commands.h"
|
||||
#include "fw/api/nvm-reg.h"
|
||||
#include "fw/api/alive.h"
|
||||
#include <linux/efi.h>
|
||||
#include "fw/uefi.h"
|
||||
|
||||
struct iwl_pnvm_section {
|
||||
__le32 offset;
|
||||
|
|
@ -220,83 +220,6 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_EFI)
|
||||
|
||||
#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
|
||||
0xb2, 0xec, 0xf5, 0xa3, \
|
||||
0x59, 0x4f, 0x4a, 0xea)
|
||||
|
||||
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
|
||||
|
||||
#define IWL_HARDCODED_PNVM_SIZE 4096
|
||||
|
||||
struct pnvm_sku_package {
|
||||
u8 rev;
|
||||
u8 reserved1[3];
|
||||
u32 total_size;
|
||||
u8 n_skus;
|
||||
u8 reserved2[11];
|
||||
u8 data[];
|
||||
};
|
||||
|
||||
static int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
|
||||
u8 **data, size_t *len)
|
||||
{
|
||||
struct efivar_entry *pnvm_efivar;
|
||||
struct pnvm_sku_package *package;
|
||||
unsigned long package_size;
|
||||
int err;
|
||||
|
||||
pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
|
||||
if (!pnvm_efivar)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
|
||||
sizeof(IWL_UEFI_OEM_PNVM_NAME));
|
||||
pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
|
||||
|
||||
/*
|
||||
* TODO: we hardcode a maximum length here, because reading
|
||||
* from the UEFI is not working. To implement this properly,
|
||||
* we have to call efivar_entry_size().
|
||||
*/
|
||||
package_size = IWL_HARDCODED_PNVM_SIZE;
|
||||
|
||||
package = kmalloc(package_size, GFP_KERNEL);
|
||||
if (!package) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = efivar_entry_get(pnvm_efivar, NULL, &package_size, package);
|
||||
if (err) {
|
||||
IWL_DEBUG_FW(trans,
|
||||
"PNVM UEFI variable not found %d (len %lu)\n",
|
||||
err, package_size);
|
||||
goto out;
|
||||
}
|
||||
|
||||
IWL_DEBUG_FW(trans, "Read PNVM fro UEFI with size %lu\n", package_size);
|
||||
|
||||
*data = kmemdup(package->data, *len, GFP_KERNEL);
|
||||
if (!*data)
|
||||
err = -ENOMEM;
|
||||
*len = package_size - sizeof(*package);
|
||||
|
||||
out:
|
||||
kfree(package);
|
||||
kfree(pnvm_efivar);
|
||||
|
||||
return err;
|
||||
}
|
||||
#else /* CONFIG_EFI */
|
||||
static inline int iwl_pnvm_get_from_efi(struct iwl_trans *trans,
|
||||
u8 **data, size_t *len)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif /* CONFIG_EFI */
|
||||
|
||||
static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
|
||||
{
|
||||
const struct firmware *pnvm;
|
||||
|
|
@ -335,6 +258,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
|
|||
{
|
||||
u8 *data;
|
||||
size_t len;
|
||||
struct pnvm_sku_package *package;
|
||||
struct iwl_notification_wait pnvm_wait;
|
||||
static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
PNVM_INIT_COMPLETE_NTFY) };
|
||||
|
|
@ -356,9 +280,19 @@ int iwl_pnvm_load(struct iwl_trans *trans,
|
|||
}
|
||||
|
||||
/* First attempt to get the PNVM from BIOS */
|
||||
ret = iwl_pnvm_get_from_efi(trans, &data, &len);
|
||||
if (!ret)
|
||||
goto parse;
|
||||
package = iwl_uefi_get_pnvm(trans, &len);
|
||||
if (!IS_ERR_OR_NULL(package)) {
|
||||
data = kmemdup(package->data, len, GFP_KERNEL);
|
||||
|
||||
/* free package regardless of whether kmemdup succeeded */
|
||||
kfree(package);
|
||||
|
||||
if (data) {
|
||||
/* we need only the data size */
|
||||
len -= sizeof(*package);
|
||||
goto parse;
|
||||
}
|
||||
}
|
||||
|
||||
/* If it's not available, try from the filesystem */
|
||||
ret = iwl_pnvm_get_from_fs(trans, &data, &len);
|
||||
|
|
@ -379,6 +313,30 @@ parse:
|
|||
kfree(data);
|
||||
|
||||
skip_parse:
|
||||
data = NULL;
|
||||
/* now try to get the reduce power table, if not loaded yet */
|
||||
if (!trans->reduce_power_loaded) {
|
||||
data = iwl_uefi_get_reduced_power(trans, &len);
|
||||
if (IS_ERR_OR_NULL(data)) {
|
||||
/*
|
||||
* Pretend we've loaded it - at least we've tried and
|
||||
* couldn't load it at all, so there's no point in
|
||||
* trying again over and over.
|
||||
*/
|
||||
trans->reduce_power_loaded = true;
|
||||
|
||||
goto skip_reduce_power;
|
||||
}
|
||||
}
|
||||
|
||||
ret = iwl_trans_set_reduce_power(trans, data, len);
|
||||
if (ret)
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Failed to set reduce power table %d\n",
|
||||
ret);
|
||||
kfree(data);
|
||||
|
||||
skip_reduce_power:
|
||||
iwl_init_notification_wait(notif_wait, &pnvm_wait,
|
||||
ntf_cmds, ARRAY_SIZE(ntf_cmds),
|
||||
iwl_pnvm_complete_fn, trans);
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2020 Intel Corporation
|
||||
* Copyright(c) 2020-2021 Intel Corporation
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
#include "fw/notif-wait.h"
|
||||
|
||||
#define MVM_UCODE_PNVM_TIMEOUT (HZ / 10)
|
||||
#define MVM_UCODE_PNVM_TIMEOUT (HZ / 4)
|
||||
|
||||
int iwl_pnvm_load(struct iwl_trans *trans,
|
||||
struct iwl_notif_wait_data *notif_wait);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,262 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright(c) 2021 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "iwl-drv.h"
|
||||
#include "pnvm.h"
|
||||
#include "iwl-prph.h"
|
||||
#include "iwl-io.h"
|
||||
|
||||
#include "fw/uefi.h"
|
||||
#include "fw/api/alive.h"
|
||||
#include <linux/efi.h>
|
||||
|
||||
#define IWL_EFI_VAR_GUID EFI_GUID(0x92daaf2f, 0xc02b, 0x455b, \
|
||||
0xb2, 0xec, 0xf5, 0xa3, \
|
||||
0x59, 0x4f, 0x4a, 0xea)
|
||||
|
||||
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
|
||||
{
|
||||
struct efivar_entry *pnvm_efivar;
|
||||
void *data;
|
||||
unsigned long package_size;
|
||||
int err;
|
||||
|
||||
*len = 0;
|
||||
|
||||
pnvm_efivar = kzalloc(sizeof(*pnvm_efivar), GFP_KERNEL);
|
||||
if (!pnvm_efivar)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
memcpy(&pnvm_efivar->var.VariableName, IWL_UEFI_OEM_PNVM_NAME,
|
||||
sizeof(IWL_UEFI_OEM_PNVM_NAME));
|
||||
pnvm_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
|
||||
|
||||
/*
|
||||
* TODO: we hardcode a maximum length here, because reading
|
||||
* from the UEFI is not working. To implement this properly,
|
||||
* we have to call efivar_entry_size().
|
||||
*/
|
||||
package_size = IWL_HARDCODED_PNVM_SIZE;
|
||||
|
||||
data = kmalloc(package_size, GFP_KERNEL);
|
||||
if (!data) {
|
||||
data = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = efivar_entry_get(pnvm_efivar, NULL, &package_size, data);
|
||||
if (err) {
|
||||
IWL_DEBUG_FW(trans,
|
||||
"PNVM UEFI variable not found %d (len %zd)\n",
|
||||
err, package_size);
|
||||
kfree(data);
|
||||
data = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
IWL_DEBUG_FW(trans, "Read PNVM from UEFI with size %zd\n", package_size);
|
||||
*len = package_size;
|
||||
|
||||
out:
|
||||
kfree(pnvm_efivar);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
|
||||
const u8 *data, size_t len)
|
||||
{
|
||||
struct iwl_ucode_tlv *tlv;
|
||||
u8 *reduce_power_data = NULL, *tmp;
|
||||
u32 size = 0;
|
||||
|
||||
IWL_DEBUG_FW(trans, "Handling REDUCE_POWER section\n");
|
||||
|
||||
while (len >= sizeof(*tlv)) {
|
||||
u32 tlv_len, tlv_type;
|
||||
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le32_to_cpu(tlv->type);
|
||||
|
||||
if (len < tlv_len) {
|
||||
IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
|
||||
len, tlv_len);
|
||||
reduce_power_data = ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
}
|
||||
|
||||
data += sizeof(*tlv);
|
||||
|
||||
switch (tlv_type) {
|
||||
case IWL_UCODE_TLV_MEM_DESC: {
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Got IWL_UCODE_TLV_MEM_DESC len %d\n",
|
||||
tlv_len);
|
||||
|
||||
IWL_DEBUG_FW(trans, "Adding data (size %d)\n", tlv_len);
|
||||
|
||||
tmp = krealloc(reduce_power_data, size + tlv_len, GFP_KERNEL);
|
||||
if (!tmp) {
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Couldn't allocate (more) reduce_power_data\n");
|
||||
|
||||
reduce_power_data = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
reduce_power_data = tmp;
|
||||
|
||||
memcpy(reduce_power_data + size, data, tlv_len);
|
||||
|
||||
size += tlv_len;
|
||||
|
||||
break;
|
||||
}
|
||||
case IWL_UCODE_TLV_PNVM_SKU:
|
||||
IWL_DEBUG_FW(trans,
|
||||
"New REDUCE_POWER section started, stop parsing.\n");
|
||||
goto done;
|
||||
default:
|
||||
IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n",
|
||||
tlv_type, tlv_len);
|
||||
break;
|
||||
}
|
||||
|
||||
len -= ALIGN(tlv_len, 4);
|
||||
data += ALIGN(tlv_len, 4);
|
||||
}
|
||||
|
||||
done:
|
||||
if (!size) {
|
||||
IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
|
||||
reduce_power_data = ERR_PTR(-ENOENT);
|
||||
goto out;
|
||||
}
|
||||
|
||||
IWL_INFO(trans, "loaded REDUCE_POWER\n");
|
||||
|
||||
out:
|
||||
return reduce_power_data;
|
||||
}
|
||||
|
||||
static void *iwl_uefi_reduce_power_parse(struct iwl_trans *trans,
|
||||
const u8 *data, size_t len)
|
||||
{
|
||||
struct iwl_ucode_tlv *tlv;
|
||||
void *sec_data;
|
||||
|
||||
IWL_DEBUG_FW(trans, "Parsing REDUCE_POWER data\n");
|
||||
|
||||
while (len >= sizeof(*tlv)) {
|
||||
u32 tlv_len, tlv_type;
|
||||
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le32_to_cpu(tlv->type);
|
||||
|
||||
if (len < tlv_len) {
|
||||
IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
|
||||
len, tlv_len);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
|
||||
struct iwl_sku_id *sku_id =
|
||||
(void *)(data + sizeof(*tlv));
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
|
||||
tlv_len);
|
||||
IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
|
||||
le32_to_cpu(sku_id->data[0]),
|
||||
le32_to_cpu(sku_id->data[1]),
|
||||
le32_to_cpu(sku_id->data[2]));
|
||||
|
||||
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
|
||||
len -= ALIGN(tlv_len, 4);
|
||||
|
||||
if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
|
||||
trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
|
||||
trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
|
||||
sec_data = iwl_uefi_reduce_power_section(trans,
|
||||
data,
|
||||
len);
|
||||
if (!IS_ERR(sec_data))
|
||||
return sec_data;
|
||||
} else {
|
||||
IWL_DEBUG_FW(trans, "SKU ID didn't match!\n");
|
||||
}
|
||||
} else {
|
||||
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
|
||||
len -= ALIGN(tlv_len, 4);
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
|
||||
{
|
||||
struct efivar_entry *reduce_power_efivar;
|
||||
struct pnvm_sku_package *package;
|
||||
void *data = NULL;
|
||||
unsigned long package_size;
|
||||
int err;
|
||||
|
||||
*len = 0;
|
||||
|
||||
reduce_power_efivar = kzalloc(sizeof(*reduce_power_efivar), GFP_KERNEL);
|
||||
if (!reduce_power_efivar)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
memcpy(&reduce_power_efivar->var.VariableName, IWL_UEFI_REDUCED_POWER_NAME,
|
||||
sizeof(IWL_UEFI_REDUCED_POWER_NAME));
|
||||
reduce_power_efivar->var.VendorGuid = IWL_EFI_VAR_GUID;
|
||||
|
||||
/*
|
||||
* TODO: we hardcode a maximum length here, because reading
|
||||
* from the UEFI is not working. To implement this properly,
|
||||
* we have to call efivar_entry_size().
|
||||
*/
|
||||
package_size = IWL_HARDCODED_REDUCE_POWER_SIZE;
|
||||
|
||||
package = kmalloc(package_size, GFP_KERNEL);
|
||||
if (!package) {
|
||||
package = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = efivar_entry_get(reduce_power_efivar, NULL, &package_size, package);
|
||||
if (err) {
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Reduced Power UEFI variable not found %d (len %lu)\n",
|
||||
err, package_size);
|
||||
kfree(package);
|
||||
data = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
IWL_DEBUG_FW(trans, "Read reduced power from UEFI with size %lu\n",
|
||||
package_size);
|
||||
*len = package_size;
|
||||
|
||||
IWL_DEBUG_FW(trans, "rev %d, total_size %d, n_skus %d\n",
|
||||
package->rev, package->total_size, package->n_skus);
|
||||
|
||||
data = iwl_uefi_reduce_power_parse(trans, package->data,
|
||||
*len - sizeof(*package));
|
||||
|
||||
kfree(package);
|
||||
|
||||
out:
|
||||
kfree(reduce_power_efivar);
|
||||
|
||||
return data;
|
||||
}
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright(c) 2021 Intel Corporation
|
||||
*/
|
||||
|
||||
|
||||
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
|
||||
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
|
||||
|
||||
/*
|
||||
* TODO: we have these hardcoded values that the caller must pass,
|
||||
* because reading from the UEFI is not working. To implement this
|
||||
* properly, we have to change iwl_pnvm_get_from_uefi() to call
|
||||
* efivar_entry_size() and return the value to the caller instead.
|
||||
*/
|
||||
#define IWL_HARDCODED_PNVM_SIZE 4096
|
||||
#define IWL_HARDCODED_REDUCE_POWER_SIZE 32768
|
||||
|
||||
struct pnvm_sku_package {
|
||||
u8 rev;
|
||||
u32 total_size;
|
||||
u8 n_skus;
|
||||
u32 reserved[2];
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
|
||||
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
|
||||
#else /* CONFIG_EFI */
|
||||
static inline
|
||||
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static inline
|
||||
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
|
||||
{
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
#endif /* CONFIG_EFI */
|
||||
|
|
@ -426,6 +426,7 @@ struct iwl_cfg {
|
|||
#define IWL_CFG_RF_TYPE_HR1 0x10C
|
||||
#define IWL_CFG_RF_TYPE_GF 0x10D
|
||||
#define IWL_CFG_RF_TYPE_MR 0x110
|
||||
#define IWL_CFG_RF_TYPE_FM 0x112
|
||||
|
||||
#define IWL_CFG_RF_ID_TH 0x1
|
||||
#define IWL_CFG_RF_ID_TH1 0x1
|
||||
|
|
@ -505,8 +506,11 @@ extern const char iwl_ax201_killer_1650s_name[];
|
|||
extern const char iwl_ax201_killer_1650i_name[];
|
||||
extern const char iwl_ax210_killer_1675w_name[];
|
||||
extern const char iwl_ax210_killer_1675x_name[];
|
||||
extern const char iwl9560_killer_1550i_160_name[];
|
||||
extern const char iwl9560_killer_1550s_160_name[];
|
||||
extern const char iwl_ax211_name[];
|
||||
extern const char iwl_ax221_name[];
|
||||
extern const char iwl_ax231_name[];
|
||||
extern const char iwl_ax411_name[];
|
||||
#if IS_ENABLED(CONFIG_IWLDVM)
|
||||
extern const struct iwl_cfg iwl5300_agn_cfg;
|
||||
|
|
@ -586,7 +590,6 @@ extern const struct iwl_cfg iwl_qu_b0_hr_b0;
|
|||
extern const struct iwl_cfg iwl_qu_c0_hr_b0;
|
||||
extern const struct iwl_cfg iwl_ax200_cfg_cc;
|
||||
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
|
||||
extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
|
||||
extern const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0;
|
||||
extern const struct iwl_cfg iwl_ax201_cfg_quz_hr;
|
||||
extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
|
||||
|
|
@ -613,6 +616,7 @@ extern const struct iwl_cfg iwl_cfg_ma_a0_hr_b0;
|
|||
extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_ma_a0_gf4_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_ma_a0_fm_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_snj_a0_mr_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0;
|
||||
extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2018, 2020 Intel Corporation
|
||||
* Copyright (C) 2018, 2020-2021 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_context_info_file_gen3_h__
|
||||
#define __iwl_context_info_file_gen3_h__
|
||||
|
|
@ -127,6 +127,17 @@ struct iwl_prph_scratch_rbd_cfg {
|
|||
__le32 reserved;
|
||||
} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
|
||||
|
||||
/*
|
||||
* struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
|
||||
* @base_addr: reduce power table address
|
||||
* @size: table size in dwords
|
||||
*/
|
||||
struct iwl_prph_scratch_uefi_cfg {
|
||||
__le64 base_addr;
|
||||
__le32 size;
|
||||
__le32 reserved;
|
||||
} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
|
||||
|
||||
/*
|
||||
* struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
|
||||
* @version: version information of context info and HW
|
||||
|
|
@ -141,6 +152,7 @@ struct iwl_prph_scratch_ctrl_cfg {
|
|||
struct iwl_prph_scratch_pnvm_cfg pnvm_cfg;
|
||||
struct iwl_prph_scratch_hwm_cfg hwm_cfg;
|
||||
struct iwl_prph_scratch_rbd_cfg rbd_cfg;
|
||||
struct iwl_prph_scratch_uefi_cfg reduce_power_cfg;
|
||||
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
|
||||
|
||||
/*
|
||||
|
|
@ -151,7 +163,7 @@ struct iwl_prph_scratch_ctrl_cfg {
|
|||
*/
|
||||
struct iwl_prph_scratch {
|
||||
struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
|
||||
__le32 reserved[16];
|
||||
__le32 reserved[12];
|
||||
struct iwl_context_info_dram dram;
|
||||
} __packed; /* PERIPH_SCRATCH_S */
|
||||
|
||||
|
|
@ -245,9 +257,11 @@ struct iwl_context_info_gen3 {
|
|||
|
||||
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
const struct fw_img *fw);
|
||||
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
|
||||
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive);
|
||||
|
||||
int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
|
||||
const void *data, u32 len);
|
||||
int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
|
||||
const void *data, u32 len);
|
||||
|
||||
#endif /* __iwl_context_info_file_gen3_h__ */
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -325,9 +325,6 @@ enum {
|
|||
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
|
||||
#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
|
||||
|
||||
/* HW_RF CHIP ID */
|
||||
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
|
||||
|
||||
/* HW_RF CHIP STEP */
|
||||
#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
|
||||
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
|
|||
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
|
||||
[IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
|
||||
[IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
|
||||
[IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 1,},
|
||||
[IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 2,},
|
||||
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
|
||||
};
|
||||
|
||||
|
|
@ -178,9 +178,20 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
|
|||
u32 type = le32_to_cpu(reg->type);
|
||||
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
|
||||
|
||||
/*
|
||||
* The higher part of the ID in version 2 is irrelevant for
|
||||
* us, so mask it out.
|
||||
*/
|
||||
if (le32_to_cpu(reg->hdr.version) == 2)
|
||||
id &= IWL_FW_INI_REGION_V2_MASK;
|
||||
|
||||
if (le32_to_cpu(tlv->length) < sizeof(*reg))
|
||||
return -EINVAL;
|
||||
|
||||
/* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
|
||||
IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
|
||||
IWL_FW_INI_MAX_NAME, reg->name);
|
||||
|
||||
if (id >= IWL_FW_INI_MAX_REGION_ID) {
|
||||
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
|
||||
return -EINVAL;
|
||||
|
|
|
|||
|
|
@ -1117,6 +1117,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
|||
IWL_ERROR_EVENT_TABLE_LMAC1;
|
||||
break;
|
||||
}
|
||||
case IWL_UCODE_TLV_TCM_DEBUG_ADDRS: {
|
||||
struct iwl_fw_tcm_error_addr *ptr = (void *)tlv_data;
|
||||
|
||||
if (tlv_len != sizeof(*ptr))
|
||||
goto invalid_tlv_len;
|
||||
drv->trans->dbg.tcm_error_event_table =
|
||||
le32_to_cpu(ptr->addr) & ~FW_ADDR_CACHE_CONTROL;
|
||||
drv->trans->dbg.error_event_table_tlv_status |=
|
||||
IWL_ERROR_EVENT_TABLE_TCM;
|
||||
break;
|
||||
}
|
||||
case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
|
||||
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
|
||||
case IWL_UCODE_TLV_TYPE_HCMD:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -549,8 +549,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
|||
.mac_cap_info[2] =
|
||||
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
|
||||
.mac_cap_info[3] =
|
||||
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
|
||||
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
|
||||
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
|
||||
.mac_cap_info[4] =
|
||||
IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU |
|
||||
IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39,
|
||||
|
|
@ -579,25 +578,20 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
|||
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
|
||||
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
|
||||
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
|
||||
.phy_cap_info[5] =
|
||||
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
|
||||
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
|
||||
.phy_cap_info[6] =
|
||||
IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
|
||||
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
|
||||
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
|
||||
.phy_cap_info[7] =
|
||||
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
|
||||
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
|
||||
IEEE80211_HE_PHY_CAP7_MAX_NC_1,
|
||||
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
|
||||
.phy_cap_info[8] =
|
||||
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
|
||||
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
|
||||
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
|
||||
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
|
||||
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
|
||||
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
|
||||
.phy_cap_info[9] =
|
||||
IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
|
||||
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
|
||||
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
|
||||
IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
|
||||
|
|
@ -632,19 +626,11 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
|||
.mac_cap_info[1] =
|
||||
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
|
||||
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
|
||||
.mac_cap_info[2] =
|
||||
IEEE80211_HE_MAC_CAP2_BSR,
|
||||
.mac_cap_info[3] =
|
||||
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
|
||||
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2,
|
||||
.mac_cap_info[4] =
|
||||
IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
|
||||
.mac_cap_info[5] =
|
||||
IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
|
||||
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
|
||||
.phy_cap_info[0] =
|
||||
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
|
||||
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
|
||||
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
|
||||
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G,
|
||||
.phy_cap_info[1] =
|
||||
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
|
||||
.phy_cap_info[2] =
|
||||
|
|
@ -654,27 +640,14 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
|||
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
|
||||
.phy_cap_info[4] =
|
||||
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
|
||||
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
|
||||
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
|
||||
.phy_cap_info[5] =
|
||||
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
|
||||
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
|
||||
.phy_cap_info[6] =
|
||||
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
|
||||
.phy_cap_info[7] =
|
||||
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
|
||||
IEEE80211_HE_PHY_CAP7_MAX_NC_1,
|
||||
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI,
|
||||
.phy_cap_info[8] =
|
||||
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
|
||||
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
|
||||
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
|
||||
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
|
||||
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996,
|
||||
IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242,
|
||||
.phy_cap_info[9] =
|
||||
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
|
||||
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
|
||||
IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
|
||||
},
|
||||
/*
|
||||
|
|
@ -745,12 +718,72 @@ static void iwl_init_he_6ghz_capa(struct iwl_trans *trans,
|
|||
iftype_data[i].he_6ghz_capa.capa = cpu_to_le16(he_6ghz_capa);
|
||||
}
|
||||
|
||||
static void
|
||||
iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
|
||||
struct ieee80211_supported_band *sband,
|
||||
struct ieee80211_sband_iftype_data *iftype_data,
|
||||
u8 tx_chains, u8 rx_chains,
|
||||
const struct iwl_fw *fw)
|
||||
{
|
||||
bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
|
||||
|
||||
/* Advertise an A-MPDU exponent extension based on
|
||||
* operating band
|
||||
*/
|
||||
if (sband->band != NL80211_BAND_2GHZ)
|
||||
iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
|
||||
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_1;
|
||||
else
|
||||
iftype_data->he_cap.he_cap_elem.mac_cap_info[3] |=
|
||||
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
|
||||
|
||||
if (is_ap && iwlwifi_mod_params.nvm_file)
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[0] |=
|
||||
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
|
||||
|
||||
if ((tx_chains & rx_chains) == ANT_AB) {
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |=
|
||||
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
|
||||
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
|
||||
if (!is_ap)
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
|
||||
IEEE80211_HE_PHY_CAP7_MAX_NC_2;
|
||||
} else if (!is_ap) {
|
||||
/* If not 2x2, we need to indicate 1x1 in the
|
||||
* Midamble RX Max NSTS - but not for AP mode
|
||||
*/
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &=
|
||||
~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] &=
|
||||
~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[7] |=
|
||||
IEEE80211_HE_PHY_CAP7_MAX_NC_1;
|
||||
}
|
||||
|
||||
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
|
||||
case IWL_CFG_RF_TYPE_GF:
|
||||
case IWL_CFG_RF_TYPE_MR:
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
|
||||
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
|
||||
if (!is_ap)
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
|
||||
IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
|
||||
break;
|
||||
}
|
||||
|
||||
if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
|
||||
iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
|
||||
IEEE80211_HE_MAC_CAP2_BCAST_TWT;
|
||||
}
|
||||
|
||||
static void iwl_init_he_hw_capab(struct iwl_trans *trans,
|
||||
struct iwl_nvm_data *data,
|
||||
struct ieee80211_supported_band *sband,
|
||||
u8 tx_chains, u8 rx_chains)
|
||||
u8 tx_chains, u8 rx_chains,
|
||||
const struct iwl_fw *fw)
|
||||
{
|
||||
struct ieee80211_sband_iftype_data *iftype_data;
|
||||
int i;
|
||||
|
||||
/* should only initialize once */
|
||||
if (WARN_ON(sband->iftype_data))
|
||||
|
|
@ -777,26 +810,18 @@ static void iwl_init_he_hw_capab(struct iwl_trans *trans,
|
|||
sband->iftype_data = iftype_data;
|
||||
sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
|
||||
|
||||
/* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
|
||||
if ((tx_chains & rx_chains) != ANT_AB) {
|
||||
int i;
|
||||
for (i = 0; i < sband->n_iftype_data; i++)
|
||||
iwl_nvm_fixup_sband_iftd(trans, sband, &iftype_data[i],
|
||||
tx_chains, rx_chains, fw);
|
||||
|
||||
for (i = 0; i < sband->n_iftype_data; i++) {
|
||||
iftype_data[i].he_cap.he_cap_elem.phy_cap_info[1] &=
|
||||
~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
|
||||
iftype_data[i].he_cap.he_cap_elem.phy_cap_info[2] &=
|
||||
~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
|
||||
iftype_data[i].he_cap.he_cap_elem.phy_cap_info[7] &=
|
||||
~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
|
||||
}
|
||||
}
|
||||
iwl_init_he_6ghz_capa(trans, data, sband, tx_chains, rx_chains);
|
||||
}
|
||||
|
||||
static void iwl_init_sbands(struct iwl_trans *trans,
|
||||
struct iwl_nvm_data *data,
|
||||
const void *nvm_ch_flags, u8 tx_chains,
|
||||
u8 rx_chains, u32 sbands_flags, bool v4)
|
||||
u8 rx_chains, u32 sbands_flags, bool v4,
|
||||
const struct iwl_fw *fw)
|
||||
{
|
||||
struct device *dev = trans->dev;
|
||||
const struct iwl_cfg *cfg = trans->cfg;
|
||||
|
|
@ -816,7 +841,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
|
|||
tx_chains, rx_chains);
|
||||
|
||||
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
|
||||
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
|
||||
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
|
||||
fw);
|
||||
|
||||
sband = &data->bands[NL80211_BAND_5GHZ];
|
||||
sband->band = NL80211_BAND_5GHZ;
|
||||
|
|
@ -831,7 +857,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
|
|||
tx_chains, rx_chains);
|
||||
|
||||
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
|
||||
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
|
||||
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
|
||||
fw);
|
||||
|
||||
/* 6GHz band. */
|
||||
sband = &data->bands[NL80211_BAND_6GHZ];
|
||||
|
|
@ -843,7 +870,8 @@ static void iwl_init_sbands(struct iwl_trans *trans,
|
|||
NL80211_BAND_6GHZ);
|
||||
|
||||
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
|
||||
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains);
|
||||
iwl_init_he_hw_capab(trans, data, sband, tx_chains, rx_chains,
|
||||
fw);
|
||||
else
|
||||
sband->n_channels = 0;
|
||||
if (n_channels != n_used)
|
||||
|
|
@ -1154,7 +1182,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
|
||||
|
||||
iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains,
|
||||
sbands_flags, false);
|
||||
sbands_flags, false, fw);
|
||||
data->calib_version = 255;
|
||||
|
||||
return data;
|
||||
|
|
@ -1661,7 +1689,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
|
|||
channel_profile,
|
||||
nvm->valid_tx_ant & fw->valid_tx_ant,
|
||||
nvm->valid_rx_ant & fw->valid_rx_ant,
|
||||
sbands_flags, v4);
|
||||
sbands_flags, v4, fw);
|
||||
|
||||
iwl_free_resp(&hcmd);
|
||||
return nvm;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2005-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -412,6 +412,8 @@ enum {
|
|||
#define UREG_DOORBELL_TO_ISR6_RESUME BIT(19)
|
||||
#define UREG_DOORBELL_TO_ISR6_PNVM BIT(20)
|
||||
|
||||
#define CNVI_MBOX_C 0xA3400C
|
||||
|
||||
#define FSEQ_ERROR_CODE 0xA340C8
|
||||
#define FSEQ_TOP_INIT_VERSION 0xA34038
|
||||
#define FSEQ_CNVIO_INIT_VERSION 0xA3403C
|
||||
|
|
|
|||
|
|
@ -193,6 +193,7 @@ enum iwl_error_event_table_status {
|
|||
IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
|
||||
IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
|
||||
IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
|
||||
IWL_ERROR_EVENT_TABLE_TCM = BIT(3),
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -589,6 +590,8 @@ struct iwl_trans_ops {
|
|||
void (*debugfs_cleanup)(struct iwl_trans *trans);
|
||||
void (*sync_nmi)(struct iwl_trans *trans);
|
||||
int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
|
||||
int (*set_reduce_power)(struct iwl_trans *trans,
|
||||
const void *data, u32 len);
|
||||
void (*interrupts)(struct iwl_trans *trans, bool enable);
|
||||
};
|
||||
|
||||
|
|
@ -706,6 +709,7 @@ struct iwl_self_init_dram {
|
|||
* @trigger_tlv: array of pointers to triggers TLVs for debug
|
||||
* @lmac_error_event_table: addrs of lmacs error tables
|
||||
* @umac_error_event_table: addr of umac error table
|
||||
* @tcm_error_event_table: address of TCM error table
|
||||
* @error_event_table_tlv_status: bitmap that indicates what error table
|
||||
* pointers was recevied via TLV. uses enum &iwl_error_event_table_status
|
||||
* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
|
||||
|
|
@ -732,6 +736,7 @@ struct iwl_trans_debug {
|
|||
|
||||
u32 lmac_error_event_table[2];
|
||||
u32 umac_error_event_table;
|
||||
u32 tcm_error_event_table;
|
||||
unsigned int error_event_table_tlv_status;
|
||||
|
||||
enum iwl_ini_cfg_state internal_ini_cfg;
|
||||
|
|
@ -957,6 +962,7 @@ struct iwl_trans {
|
|||
bool pm_support;
|
||||
bool ltr_enabled;
|
||||
u8 pnvm_loaded:1;
|
||||
u8 reduce_power_loaded:1;
|
||||
|
||||
const struct iwl_hcmd_arr *command_groups;
|
||||
int command_groups_size;
|
||||
|
|
@ -1420,6 +1426,20 @@ static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int iwl_trans_set_reduce_power(struct iwl_trans *trans,
|
||||
const void *data, u32 len)
|
||||
{
|
||||
if (trans->ops->set_reduce_power) {
|
||||
int ret = trans->ops->set_reduce_power(trans, data, len);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
trans->reduce_power_loaded = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
|
||||
{
|
||||
return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
|
|||
struct wowlan_key_data {
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
|
||||
struct iwl_wowlan_tkip_params_cmd *tkip;
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v3 *kek_kck_cmd;
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd;
|
||||
bool error, use_rsc_tsc, use_tkip, configure_keys;
|
||||
int wep_key_idx;
|
||||
};
|
||||
|
|
@ -393,14 +393,19 @@ static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct cfg80211_wowlan *wowlan)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_wowlan_patterns_cmd *pattern_cmd;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = WOWLAN_PATTERNS,
|
||||
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
|
||||
};
|
||||
int i, err;
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_PATTERNS,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
if (!wowlan->n_patterns)
|
||||
return 0;
|
||||
|
|
@ -408,11 +413,13 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
|
|||
cmd.len[0] = sizeof(*pattern_cmd) +
|
||||
wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
|
||||
|
||||
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
|
||||
pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL);
|
||||
if (!pattern_cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
|
||||
pattern_cmd->n_patterns = wowlan->n_patterns;
|
||||
if (ver >= 3)
|
||||
pattern_cmd->sta_id = mvmvif->ap_sta_id;
|
||||
|
||||
for (i = 0; i < wowlan->n_patterns; i++) {
|
||||
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
|
||||
|
|
@ -636,7 +643,6 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
|
|||
struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
|
||||
struct ieee80211_sta *ap_sta)
|
||||
{
|
||||
int ret;
|
||||
struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
|
||||
|
||||
/* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
|
||||
|
|
@ -646,12 +652,16 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
|
|||
wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
|
||||
ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
|
||||
|
||||
/* Query the last used seqno and set it */
|
||||
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_CONFIGURATION, 0) < 6) {
|
||||
/* Query the last used seqno and set it */
|
||||
int ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
|
||||
|
||||
wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
|
||||
}
|
||||
|
||||
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
|
||||
|
||||
|
|
@ -706,7 +716,8 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
|||
struct ieee80211_vif *vif,
|
||||
u32 cmd_flags)
|
||||
{
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v3 kek_kck_cmd = {};
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd;
|
||||
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
|
||||
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
|
||||
|
|
@ -715,7 +726,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
|||
.use_rsc_tsc = false,
|
||||
.tkip = &tkip_cmd,
|
||||
.use_tkip = false,
|
||||
.kek_kck_cmd = &kek_kck_cmd,
|
||||
.kek_kck_cmd = _kek_kck_cmd,
|
||||
};
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int ret;
|
||||
|
|
@ -809,13 +820,9 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
|||
IWL_ALWAYS_LONG_GROUP,
|
||||
WOWLAN_KEK_KCK_MATERIAL,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 &&
|
||||
if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 && cmd_ver != 4 &&
|
||||
cmd_ver != IWL_FW_CMD_VER_UNKNOWN))
|
||||
return -EINVAL;
|
||||
if (cmd_ver == 3)
|
||||
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
|
||||
else
|
||||
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
|
||||
|
||||
memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
|
||||
mvmvif->rekey_data.kck_len);
|
||||
|
|
@ -825,6 +832,21 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
|||
kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
|
||||
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
|
||||
kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
|
||||
kek_kck_cmd.sta_id = cpu_to_le32(mvmvif->ap_sta_id);
|
||||
|
||||
if (cmd_ver == 4) {
|
||||
cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v4);
|
||||
} else {
|
||||
if (cmd_ver == 3)
|
||||
cmd_size =
|
||||
sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
|
||||
else
|
||||
cmd_size =
|
||||
sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
|
||||
/* skip the sta_id at the beginning */
|
||||
_kek_kck_cmd = (void *)
|
||||
((u8 *)_kek_kck_cmd) + sizeof(kek_kck_cmd.sta_id);
|
||||
}
|
||||
|
||||
IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
|
||||
mvmvif->rekey_data.akm);
|
||||
|
|
@ -832,7 +854,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
|||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
|
||||
cmd_size,
|
||||
&kek_kck_cmd);
|
||||
_kek_kck_cmd);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
|
@ -884,7 +906,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
|
|||
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
|
||||
ret = iwl_mvm_send_patterns(mvm, wowlan);
|
||||
ret = iwl_mvm_send_patterns(mvm, vif, wowlan);
|
||||
else
|
||||
ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
|
||||
if (ret)
|
||||
|
|
@ -1534,9 +1556,12 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
out:
|
||||
mvmvif->seqno_valid = true;
|
||||
/* +0x10 because the set API expects next-to-use, not last-used */
|
||||
mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
|
||||
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_GET_STATUSES, 0) < 10) {
|
||||
mvmvif->seqno_valid = true;
|
||||
/* +0x10 because the set API expects next-to-use, not last-used */
|
||||
mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1587,15 +1612,27 @@ iwl_mvm_parse_wowlan_status_common(v6)
|
|||
iwl_mvm_parse_wowlan_status_common(v7)
|
||||
iwl_mvm_parse_wowlan_status_common(v9)
|
||||
|
||||
struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
|
||||
static struct iwl_wowlan_status *
|
||||
iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
|
||||
{
|
||||
struct iwl_wowlan_status *status;
|
||||
struct iwl_wowlan_get_status_cmd get_status_cmd = {
|
||||
.sta_id = cpu_to_le32(sta_id),
|
||||
};
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = WOWLAN_GET_STATUSES,
|
||||
.flags = CMD_WANT_SKB,
|
||||
.data = { &get_status_cmd, },
|
||||
.len = { sizeof(get_status_cmd), },
|
||||
};
|
||||
int ret, len;
|
||||
u8 notif_ver;
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_GET_STATUSES,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
|
||||
cmd.len[0] = 0;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
|
@ -1608,8 +1645,11 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
|
|||
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
|
||||
|
||||
/* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */
|
||||
notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
|
||||
WOWLAN_GET_STATUSES, 7);
|
||||
notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_GET_STATUSES, 0);
|
||||
if (!notif_ver)
|
||||
notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
|
||||
WOWLAN_GET_STATUSES, 7);
|
||||
|
||||
if (!fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
|
||||
|
|
@ -1654,7 +1694,7 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
|
|||
|
||||
status->gtk[0] = v7->gtk[0];
|
||||
status->igtk[0] = v7->igtk[0];
|
||||
} else if (notif_ver == 9) {
|
||||
} else if (notif_ver == 9 || notif_ver == 10) {
|
||||
struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
|
||||
|
||||
status = iwl_mvm_parse_wowlan_status_common_v9(mvm,
|
||||
|
|
@ -1680,29 +1720,37 @@ out_free_resp:
|
|||
}
|
||||
|
||||
static struct iwl_wowlan_status *
|
||||
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
|
||||
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
|
||||
{
|
||||
int ret;
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
OFFLOADS_QUERY_CMD,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
__le32 station_id = cpu_to_le32(sta_id);
|
||||
u32 cmd_size = cmd_ver != IWL_FW_CMD_VER_UNKNOWN ? sizeof(station_id) : 0;
|
||||
|
||||
/* only for tracing for now */
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
|
||||
if (!mvm->net_detect) {
|
||||
/* only for tracing for now */
|
||||
int ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0,
|
||||
cmd_size, &station_id);
|
||||
if (ret)
|
||||
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
|
||||
}
|
||||
|
||||
return iwl_mvm_send_wowlan_get_status(mvm);
|
||||
return iwl_mvm_send_wowlan_get_status(mvm, sta_id);
|
||||
}
|
||||
|
||||
/* releases the MVM mutex */
|
||||
static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_wowlan_status_data status;
|
||||
struct iwl_wowlan_status *fw_status;
|
||||
int i;
|
||||
bool keep;
|
||||
struct iwl_mvm_sta *mvm_ap_sta;
|
||||
|
||||
fw_status = iwl_mvm_get_wakeup_status(mvm);
|
||||
fw_status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
|
||||
if (IS_ERR_OR_NULL(fw_status))
|
||||
goto out_unlock;
|
||||
|
||||
|
|
@ -1880,7 +1928,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
|
|||
u32 reasons = 0;
|
||||
int i, n_matches, ret;
|
||||
|
||||
fw_status = iwl_mvm_get_wakeup_status(mvm);
|
||||
fw_status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
|
||||
if (!IS_ERR_OR_NULL(fw_status)) {
|
||||
reasons = le32_to_cpu(fw_status->wakeup_reasons);
|
||||
kfree(fw_status);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -460,7 +460,7 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
|
|||
int pos = 0;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
|
||||
iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2, &curr_os, NULL);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
do_div(curr_os, NSEC_PER_USEC);
|
||||
|
|
|
|||
|
|
@ -1023,7 +1023,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
|
|||
mvm->fw_restart++;
|
||||
|
||||
/* take the return value to make compiler happy - it will fail anyway */
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(LONG_GROUP, REPLY_ERROR),
|
||||
0, 0, NULL);
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
*/
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/math64.h>
|
||||
|
|
@ -430,6 +430,10 @@ iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
|
|||
FTM_PUT_FLAG(TB);
|
||||
else if (peer->ftm.non_trigger_based)
|
||||
FTM_PUT_FLAG(NON_TB);
|
||||
|
||||
if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
|
||||
peer->ftm.lmr_feedback)
|
||||
FTM_PUT_FLAG(LMR_FEEDBACK);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
@ -879,7 +883,8 @@ static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
|
|||
u32 curr_gp2, diff;
|
||||
u64 now_from_boot_ns;
|
||||
|
||||
iwl_mvm_get_sync_time(mvm, &curr_gp2, &now_from_boot_ns);
|
||||
iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
|
||||
&now_from_boot_ns, NULL);
|
||||
|
||||
if (curr_gp2 >= gp2_ts)
|
||||
diff = curr_gp2 - gp2_ts;
|
||||
|
|
|
|||
|
|
@ -1139,19 +1139,34 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
|
|||
|
||||
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
|
||||
{
|
||||
int cmd_ret;
|
||||
struct iwl_lari_config_change_cmd_v3 cmd = {};
|
||||
int ret;
|
||||
u32 value;
|
||||
struct iwl_lari_config_change_cmd_v4 cmd = {};
|
||||
|
||||
cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
|
||||
|
||||
ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0, DSM_FUNC_11AX_ENABLEMENT,
|
||||
&iwl_guid, &value);
|
||||
if (!ret)
|
||||
cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
|
||||
/* apply more config masks here */
|
||||
|
||||
if (cmd.config_bitmap) {
|
||||
ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0,
|
||||
DSM_FUNC_ENABLE_UNII4_CHAN,
|
||||
&iwl_guid, &value);
|
||||
if (!ret)
|
||||
cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
|
||||
|
||||
if (cmd.config_bitmap ||
|
||||
cmd.oem_11ax_allow_bitmap ||
|
||||
cmd.oem_unii4_allow_bitmap) {
|
||||
size_t cmd_size;
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
REGULATORY_AND_NVM_GROUP,
|
||||
LARI_CONFIG_CHANGE, 1);
|
||||
if (cmd_ver == 3)
|
||||
if (cmd_ver == 4)
|
||||
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
|
||||
else if (cmd_ver == 3)
|
||||
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
|
||||
else if (cmd_ver == 2)
|
||||
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
|
||||
|
|
@ -1159,16 +1174,21 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
|
|||
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
|
||||
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
|
||||
le32_to_cpu(cmd.config_bitmap));
|
||||
cmd_ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
LARI_CONFIG_CHANGE),
|
||||
0, cmd_size, &cmd);
|
||||
if (cmd_ret < 0)
|
||||
"sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
|
||||
le32_to_cpu(cmd.config_bitmap),
|
||||
le32_to_cpu(cmd.oem_11ax_allow_bitmap));
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, cmd_ver=%d\n",
|
||||
le32_to_cpu(cmd.oem_unii4_allow_bitmap),
|
||||
cmd_ver);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
LARI_CONFIG_CHANGE),
|
||||
0, cmd_size, &cmd);
|
||||
if (ret < 0)
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
|
||||
cmd_ret);
|
||||
ret);
|
||||
}
|
||||
}
|
||||
#else /* CONFIG_ACPI */
|
||||
|
|
|
|||
|
|
@ -3800,6 +3800,7 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
|
|||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct cfg80211_chan_def chandef;
|
||||
struct iwl_mvm_phy_ctxt *phy_ctxt;
|
||||
bool band_change_removal;
|
||||
int ret, i;
|
||||
|
||||
IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
|
||||
|
|
@ -3880,19 +3881,30 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
|
|||
cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
|
||||
|
||||
/*
|
||||
* Change the PHY context configuration as it is currently referenced
|
||||
* only by the P2P Device MAC
|
||||
* Check if the remain-on-channel is on a different band and that
|
||||
* requires context removal, see iwl_mvm_phy_ctxt_changed(). If
|
||||
* so, we'll need to release and then re-configure here, since we
|
||||
* must not remove a PHY context that's part of a binding.
|
||||
*/
|
||||
if (mvmvif->phy_ctxt->ref == 1) {
|
||||
band_change_removal =
|
||||
fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
|
||||
mvmvif->phy_ctxt->channel->band != chandef.chan->band;
|
||||
|
||||
if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) {
|
||||
/*
|
||||
* Change the PHY context configuration as it is currently
|
||||
* referenced only by the P2P Device MAC (and we can modify it)
|
||||
*/
|
||||
ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
|
||||
&chandef, 1, 1);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
} else {
|
||||
/*
|
||||
* The PHY context is shared with other MACs. Need to remove the
|
||||
* P2P Device from the binding, allocate an new PHY context and
|
||||
* create a new binding
|
||||
* The PHY context is shared with other MACs (or we're trying to
|
||||
* switch bands), so remove the P2P Device from the binding,
|
||||
* allocate an new PHY context and create a new binding.
|
||||
*/
|
||||
phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
|
||||
if (!phy_ctxt) {
|
||||
|
|
@ -4211,7 +4223,6 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
|
|||
struct ieee80211_vif *disabled_vif = NULL;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
|
||||
|
||||
switch (vif->type) {
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@
|
|||
#include <linux/thermal.h>
|
||||
#endif
|
||||
|
||||
#include <linux/ktime.h>
|
||||
|
||||
#include "iwl-op-mode.h"
|
||||
#include "iwl-trans.h"
|
||||
#include "fw/notif-wait.h"
|
||||
|
|
@ -195,6 +197,7 @@ enum iwl_mvm_smps_type_request {
|
|||
IWL_MVM_SMPS_REQ_BT_COEX,
|
||||
IWL_MVM_SMPS_REQ_TT,
|
||||
IWL_MVM_SMPS_REQ_PROT,
|
||||
IWL_MVM_SMPS_REQ_FW,
|
||||
NUM_IWL_MVM_SMPS_REQ,
|
||||
};
|
||||
|
||||
|
|
@ -991,6 +994,8 @@ struct iwl_mvm {
|
|||
*/
|
||||
bool temperature_test; /* Debug test temperature is enabled */
|
||||
|
||||
bool fw_static_smps_request;
|
||||
|
||||
unsigned long bt_coex_last_tcm_ts;
|
||||
struct iwl_mvm_tcm tcm;
|
||||
|
||||
|
|
@ -1447,10 +1452,16 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
|
|||
struct ieee80211_tx_rate *r);
|
||||
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
|
||||
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
|
||||
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
|
||||
|
||||
static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
|
||||
{
|
||||
iwl_fwrt_dump_error_logs(&mvm->fwrt);
|
||||
}
|
||||
|
||||
u8 first_antenna(u8 mask);
|
||||
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
|
||||
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
|
||||
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
|
||||
u64 *boottime, ktime_t *realtime);
|
||||
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
|
||||
|
||||
/* Tx / Host Commands */
|
||||
|
|
@ -1769,7 +1780,6 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
|
|||
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif, int idx);
|
||||
extern const struct file_operations iwl_dbgfs_d3_test_ops;
|
||||
struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm);
|
||||
#ifdef CONFIG_PM
|
||||
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif);
|
||||
|
|
@ -1827,7 +1837,9 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
|
|||
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
enum iwl_mvm_smps_type_request req_type,
|
||||
enum ieee80211_smps_mode smps_request);
|
||||
bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
|
||||
bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_phy_ctxt *ctxt);
|
||||
void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif);
|
||||
|
||||
/* Low latency */
|
||||
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2021 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -36,7 +36,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
|
|||
struct iwl_proto_offload_cmd_v1 v1;
|
||||
struct iwl_proto_offload_cmd_v2 v2;
|
||||
struct iwl_proto_offload_cmd_v3_small v3s;
|
||||
struct iwl_proto_offload_cmd_v3_large v3l;
|
||||
struct iwl_proto_offload_cmd_v4 v4;
|
||||
} cmd = {};
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = PROT_OFFLOAD_CONFIG_CMD,
|
||||
|
|
@ -47,6 +47,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
|
|||
struct iwl_proto_offload_cmd_common *common;
|
||||
u32 enabled = 0, size;
|
||||
u32 capa_flags = mvm->fw->ucode_capa.flags;
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
PROT_OFFLOAD_CONFIG_CMD, 0);
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int i;
|
||||
|
|
@ -72,9 +75,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
|
|||
addrs = cmd.v3s.targ_addrs;
|
||||
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
|
||||
} else {
|
||||
nsc = cmd.v3l.ns_config;
|
||||
nsc = cmd.v4.ns_config;
|
||||
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
|
||||
addrs = cmd.v3l.targ_addrs;
|
||||
addrs = cmd.v4.targ_addrs;
|
||||
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
|
||||
}
|
||||
|
||||
|
|
@ -116,7 +119,7 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
|
|||
cmd.v3s.num_valid_ipv6_addrs =
|
||||
cpu_to_le32(i - num_skipped);
|
||||
else
|
||||
cmd.v3l.num_valid_ipv6_addrs =
|
||||
cmd.v4.num_valid_ipv6_addrs =
|
||||
cpu_to_le32(i - num_skipped);
|
||||
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
|
||||
bool found = false;
|
||||
|
|
@ -171,8 +174,17 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
|
|||
common = &cmd.v3s.common;
|
||||
size = sizeof(cmd.v3s);
|
||||
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
|
||||
common = &cmd.v3l.common;
|
||||
size = sizeof(cmd.v3l);
|
||||
common = &cmd.v4.common;
|
||||
size = sizeof(cmd.v4);
|
||||
if (ver < 4) {
|
||||
/*
|
||||
* This basically uses iwl_proto_offload_cmd_v3_large
|
||||
* which doesn't have the sta_id parameter before the
|
||||
* common part.
|
||||
*/
|
||||
size -= sizeof(cmd.v4.sta_id);
|
||||
hcmd.data[0] = common;
|
||||
}
|
||||
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
|
||||
common = &cmd.v2.common;
|
||||
size = sizeof(cmd.v2);
|
||||
|
|
|
|||
|
|
@ -210,6 +210,39 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
|
|||
ieee80211_disconnect(vif, true);
|
||||
}
|
||||
|
||||
void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm *mvm = mvmvif->mvm;
|
||||
|
||||
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW,
|
||||
mvm->fw_static_smps_request ?
|
||||
IEEE80211_SMPS_STATIC :
|
||||
IEEE80211_SMPS_AUTOMATIC);
|
||||
}
|
||||
|
||||
static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
iwl_mvm_apply_fw_smps_request(vif);
|
||||
}
|
||||
|
||||
static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_thermal_dual_chain_request *req = (void *)pkt->data;
|
||||
|
||||
/*
|
||||
* We could pass it to the iterator data, but also need to remember
|
||||
* it for new interfaces that are added while in this state.
|
||||
*/
|
||||
mvm->fw_static_smps_request =
|
||||
req->event == cpu_to_le32(THERMAL_DUAL_CHAIN_REQ_DISABLE);
|
||||
ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_intf_dual_chain_req, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* enum iwl_rx_handler_context context for Rx handler
|
||||
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
|
||||
|
|
@ -358,6 +391,11 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
|||
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
|
||||
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
|
||||
struct iwl_datapath_monitor_notif),
|
||||
|
||||
RX_HANDLER_GRP(DATA_PATH_GROUP, THERMAL_DUAL_CHAIN_REQUEST,
|
||||
iwl_mvm_rx_thermal_dual_chain_req,
|
||||
RX_HANDLER_ASYNC_LOCKED,
|
||||
struct iwl_thermal_dual_chain_request),
|
||||
};
|
||||
#undef RX_HANDLER
|
||||
#undef RX_HANDLER_GRP
|
||||
|
|
@ -445,7 +483,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
|
|||
HCMD_NAME(D3_CONFIG_CMD),
|
||||
HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
|
||||
HCMD_NAME(OFFLOADS_QUERY_CMD),
|
||||
HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
|
||||
HCMD_NAME(MATCH_FOUND_NOTIFICATION),
|
||||
HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
|
||||
HCMD_NAME(WOWLAN_PATTERNS),
|
||||
|
|
@ -503,6 +540,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
|
|||
HCMD_NAME(TLC_MNG_CONFIG_CMD),
|
||||
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
|
||||
HCMD_NAME(MONITOR_NOTIF),
|
||||
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
|
||||
HCMD_NAME(STA_PM_NOTIF),
|
||||
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
|
||||
HCMD_NAME(RX_QUEUES_NOTIFICATION),
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -76,6 +76,7 @@ static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
|
|||
}
|
||||
|
||||
static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_phy_ctxt *ctxt,
|
||||
__le32 *rxchain_info,
|
||||
u8 chains_static,
|
||||
u8 chains_dynamic)
|
||||
|
|
@ -93,11 +94,22 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
|
|||
* between the two antennas is sufficiently different to impact
|
||||
* performance.
|
||||
*/
|
||||
if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
|
||||
if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) {
|
||||
idle_cnt = 2;
|
||||
active_cnt = 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the firmware requested it, then we know that it supports
|
||||
* getting zero for the values to indicate "use one, but pick
|
||||
* which one yourself", which means it can dynamically pick one
|
||||
* that e.g. has better RSSI.
|
||||
*/
|
||||
if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) {
|
||||
idle_cnt = 0;
|
||||
active_cnt = 0;
|
||||
}
|
||||
|
||||
*rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
|
||||
PHY_RX_CHAIN_VALID_POS);
|
||||
*rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
|
||||
|
|
@ -113,6 +125,7 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
|
|||
* Add the phy configuration to the PHY context command
|
||||
*/
|
||||
static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_phy_ctxt *ctxt,
|
||||
struct iwl_phy_context_cmd_v1 *cmd,
|
||||
struct cfg80211_chan_def *chandef,
|
||||
u8 chains_static, u8 chains_dynamic)
|
||||
|
|
@ -123,7 +136,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
|
|||
/* Set the channel info data */
|
||||
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
|
||||
|
||||
iwl_mvm_phy_ctxt_set_rxchain(mvm, &tail->rxchain_info,
|
||||
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info,
|
||||
chains_static, chains_dynamic);
|
||||
|
||||
tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
|
||||
|
|
@ -133,6 +146,7 @@ static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
|
|||
* Add the phy configuration to the PHY context command
|
||||
*/
|
||||
static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_phy_ctxt *ctxt,
|
||||
struct iwl_phy_context_cmd *cmd,
|
||||
struct cfg80211_chan_def *chandef,
|
||||
u8 chains_static, u8 chains_dynamic)
|
||||
|
|
@ -143,7 +157,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
|
|||
/* Set the channel info data */
|
||||
iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
|
||||
|
||||
iwl_mvm_phy_ctxt_set_rxchain(mvm, &cmd->rxchain_info,
|
||||
iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info,
|
||||
chains_static, chains_dynamic);
|
||||
}
|
||||
|
||||
|
|
@ -170,7 +184,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
|
|||
iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action);
|
||||
|
||||
/* Set the command data */
|
||||
iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
|
||||
iwl_mvm_phy_ctxt_cmd_data(mvm, ctxt, &cmd, chandef,
|
||||
chains_static,
|
||||
chains_dynamic);
|
||||
|
||||
|
|
@ -186,7 +200,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
|
|||
action);
|
||||
|
||||
/* Set the command data */
|
||||
iwl_mvm_phy_ctxt_cmd_data_v1(mvm, &cmd, chandef,
|
||||
iwl_mvm_phy_ctxt_cmd_data_v1(mvm, ctxt, &cmd, chandef,
|
||||
chains_static,
|
||||
chains_dynamic);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -2001,8 +2001,10 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
struct sk_buff *skb;
|
||||
u8 channel, energy_a, energy_b;
|
||||
struct iwl_mvm_rx_phy_data phy_data = {
|
||||
.info_type = le32_get_bits(desc->phy_info[1],
|
||||
IWL_RX_PHY_DATA1_INFO_TYPE_MASK),
|
||||
.d0 = desc->phy_info[0],
|
||||
.info_type = IWL_RX_PHY_INFO_TYPE_NONE,
|
||||
.d1 = desc->phy_info[1],
|
||||
};
|
||||
|
||||
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
|
||||
|
|
@ -2015,10 +2017,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
|
||||
channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
|
||||
|
||||
phy_data.info_type =
|
||||
le32_get_bits(desc->phy_info[1],
|
||||
IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
|
||||
|
||||
/* Dont use dev_alloc_skb(), we'll have enough headroom once
|
||||
* ieee80211_hdr pulled.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -2327,9 +2327,9 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
&scan_p->general_params,
|
||||
gen_flags);
|
||||
|
||||
ret = iwl_mvm_fill_scan_sched_params(params,
|
||||
scan_p->periodic_params.schedule,
|
||||
&scan_p->periodic_params.delay);
|
||||
ret = iwl_mvm_fill_scan_sched_params(params,
|
||||
scan_p->periodic_params.schedule,
|
||||
&scan_p->periodic_params.delay);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
@ -2362,9 +2362,9 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
&scan_p->general_params,
|
||||
gen_flags);
|
||||
|
||||
ret = iwl_mvm_fill_scan_sched_params(params,
|
||||
scan_p->periodic_params.schedule,
|
||||
&scan_p->periodic_params.delay);
|
||||
ret = iwl_mvm_fill_scan_sched_params(params,
|
||||
scan_p->periodic_params.schedule,
|
||||
&scan_p->periodic_params.delay);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
|||
|
|
@ -3794,8 +3794,12 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
|
|||
|
||||
mvm_sta->disable_tx = disable;
|
||||
|
||||
/* Tell mac80211 to start/stop queuing tx for this station */
|
||||
ieee80211_sta_block_awake(mvm->hw, sta, disable);
|
||||
/*
|
||||
* If sta PS state is handled by mac80211, tell it to start/stop
|
||||
* queuing tx for this station.
|
||||
*/
|
||||
if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
|
||||
ieee80211_sta_block_awake(mvm->hw, sta, disable);
|
||||
|
||||
iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
|
||||
|
||||
|
|
|
|||
|
|
@ -31,6 +31,13 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
|
|||
return;
|
||||
|
||||
list_del(&te_data->list);
|
||||
|
||||
/*
|
||||
* the list is only used for AUX ROC events so make sure it is always
|
||||
* initialized
|
||||
*/
|
||||
INIT_LIST_HEAD(&te_data->list);
|
||||
|
||||
te_data->running = false;
|
||||
te_data->uid = 0;
|
||||
te_data->id = TE_MAX;
|
||||
|
|
@ -310,6 +317,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
|
|||
* and know the dtim period.
|
||||
*/
|
||||
iwl_mvm_te_check_disconnect(mvm, te_data->vif,
|
||||
!te_data->vif->bss_conf.assoc ?
|
||||
"Not associated and the time event is over already..." :
|
||||
"No beacon heard and the time event is over already...");
|
||||
break;
|
||||
default:
|
||||
|
|
@ -607,14 +616,15 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_vif *mvmvif)
|
||||
struct iwl_mvm_vif *mvmvif,
|
||||
u32 id)
|
||||
{
|
||||
struct iwl_mvm_session_prot_cmd cmd = {
|
||||
.id_and_color =
|
||||
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
|
||||
mvmvif->color)),
|
||||
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
|
||||
.conf_id = cpu_to_le32(mvmvif->time_event_data.id),
|
||||
.conf_id = cpu_to_le32(id),
|
||||
};
|
||||
int ret;
|
||||
|
||||
|
|
@ -632,6 +642,12 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
|
|||
{
|
||||
u32 id;
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
|
||||
enum nl80211_iftype iftype;
|
||||
|
||||
if (!te_data->vif)
|
||||
return false;
|
||||
|
||||
iftype = te_data->vif->type;
|
||||
|
||||
/*
|
||||
* It is possible that by the time we got to this point the time
|
||||
|
|
@ -656,8 +672,8 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
|
|||
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
|
||||
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
|
||||
/* Session protection is still ongoing. Cancel it */
|
||||
iwl_mvm_cancel_session_protection(mvm, mvmvif);
|
||||
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
|
||||
if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
}
|
||||
|
|
@ -738,11 +754,6 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
|
|||
IWL_ERR(mvm, "Couldn't remove the time event\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* When the firmware supports the session protection API,
|
||||
* this is not needed since it'll automatically remove the
|
||||
* session protection after association + beacon reception.
|
||||
*/
|
||||
void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
|
|
@ -756,7 +767,15 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
|
|||
id = te_data->id;
|
||||
spin_unlock_bh(&mvm->time_event_lock);
|
||||
|
||||
if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
|
||||
if (id != SESSION_PROTECT_CONF_ASSOC) {
|
||||
IWL_DEBUG_TE(mvm,
|
||||
"don't remove session protection id=%u\n",
|
||||
id);
|
||||
return;
|
||||
}
|
||||
} else if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
|
||||
IWL_DEBUG_TE(mvm,
|
||||
"don't remove TE with id=%u (not session protection)\n",
|
||||
id);
|
||||
|
|
@ -808,6 +827,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
|
|||
* and know the dtim period.
|
||||
*/
|
||||
iwl_mvm_te_check_disconnect(mvm, vif,
|
||||
!vif->bss_conf.assoc ?
|
||||
"Not associated and the session protection is over already..." :
|
||||
"No beacon heard and the session protection is over already...");
|
||||
spin_lock_bh(&mvm->time_event_lock);
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
|
|
@ -981,7 +1002,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
iwl_mvm_cancel_session_protection(mvm, mvmvif);
|
||||
iwl_mvm_cancel_session_protection(mvm, mvmvif,
|
||||
mvmvif->time_event_data.id);
|
||||
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
|
||||
} else {
|
||||
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
|
||||
|
|
@ -1141,6 +1163,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
|
|||
|
||||
iwl_mvm_te_clear_data(mvm, te_data);
|
||||
te_data->duration = le32_to_cpu(cmd.duration_tu);
|
||||
te_data->vif = vif;
|
||||
spin_unlock_bh(&mvm->time_event_lock);
|
||||
|
||||
IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -238,316 +238,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
|
|||
return last_idx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: This structure is read from the device with IO accesses,
|
||||
* and the reading already does the endian conversion. As it is
|
||||
* read with u32-sized accesses, any members with a different size
|
||||
* need to be ordered correctly though!
|
||||
*/
|
||||
struct iwl_error_event_table_v1 {
|
||||
u32 valid; /* (nonzero) valid, (0) log is empty */
|
||||
u32 error_id; /* type of error */
|
||||
u32 pc; /* program counter */
|
||||
u32 blink1; /* branch link */
|
||||
u32 blink2; /* branch link */
|
||||
u32 ilink1; /* interrupt link */
|
||||
u32 ilink2; /* interrupt link */
|
||||
u32 data1; /* error-specific data */
|
||||
u32 data2; /* error-specific data */
|
||||
u32 data3; /* error-specific data */
|
||||
u32 bcon_time; /* beacon timer */
|
||||
u32 tsf_low; /* network timestamp function timer */
|
||||
u32 tsf_hi; /* network timestamp function timer */
|
||||
u32 gp1; /* GP1 timer register */
|
||||
u32 gp2; /* GP2 timer register */
|
||||
u32 gp3; /* GP3 timer register */
|
||||
u32 ucode_ver; /* uCode version */
|
||||
u32 hw_ver; /* HW Silicon version */
|
||||
u32 brd_ver; /* HW board version */
|
||||
u32 log_pc; /* log program counter */
|
||||
u32 frame_ptr; /* frame pointer */
|
||||
u32 stack_ptr; /* stack pointer */
|
||||
u32 hcmd; /* last host command header */
|
||||
u32 isr0; /* isr status register LMPM_NIC_ISR0:
|
||||
* rxtx_flag */
|
||||
u32 isr1; /* isr status register LMPM_NIC_ISR1:
|
||||
* host_flag */
|
||||
u32 isr2; /* isr status register LMPM_NIC_ISR2:
|
||||
* enc_flag */
|
||||
u32 isr3; /* isr status register LMPM_NIC_ISR3:
|
||||
* time_flag */
|
||||
u32 isr4; /* isr status register LMPM_NIC_ISR4:
|
||||
* wico interrupt */
|
||||
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
|
||||
u32 wait_event; /* wait event() caller address */
|
||||
u32 l2p_control; /* L2pControlField */
|
||||
u32 l2p_duration; /* L2pDurationField */
|
||||
u32 l2p_mhvalid; /* L2pMhValidBits */
|
||||
u32 l2p_addr_match; /* L2pAddrMatchStat */
|
||||
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
|
||||
* (LMPM_PMG_SEL) */
|
||||
u32 u_timestamp; /* indicate when the date and time of the
|
||||
* compilation */
|
||||
u32 flow_handler; /* FH read/write pointers, RX credit */
|
||||
} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
|
||||
|
||||
struct iwl_error_event_table {
|
||||
u32 valid; /* (nonzero) valid, (0) log is empty */
|
||||
u32 error_id; /* type of error */
|
||||
u32 trm_hw_status0; /* TRM HW status */
|
||||
u32 trm_hw_status1; /* TRM HW status */
|
||||
u32 blink2; /* branch link */
|
||||
u32 ilink1; /* interrupt link */
|
||||
u32 ilink2; /* interrupt link */
|
||||
u32 data1; /* error-specific data */
|
||||
u32 data2; /* error-specific data */
|
||||
u32 data3; /* error-specific data */
|
||||
u32 bcon_time; /* beacon timer */
|
||||
u32 tsf_low; /* network timestamp function timer */
|
||||
u32 tsf_hi; /* network timestamp function timer */
|
||||
u32 gp1; /* GP1 timer register */
|
||||
u32 gp2; /* GP2 timer register */
|
||||
u32 fw_rev_type; /* firmware revision type */
|
||||
u32 major; /* uCode version major */
|
||||
u32 minor; /* uCode version minor */
|
||||
u32 hw_ver; /* HW Silicon version */
|
||||
u32 brd_ver; /* HW board version */
|
||||
u32 log_pc; /* log program counter */
|
||||
u32 frame_ptr; /* frame pointer */
|
||||
u32 stack_ptr; /* stack pointer */
|
||||
u32 hcmd; /* last host command header */
|
||||
u32 isr0; /* isr status register LMPM_NIC_ISR0:
|
||||
* rxtx_flag */
|
||||
u32 isr1; /* isr status register LMPM_NIC_ISR1:
|
||||
* host_flag */
|
||||
u32 isr2; /* isr status register LMPM_NIC_ISR2:
|
||||
* enc_flag */
|
||||
u32 isr3; /* isr status register LMPM_NIC_ISR3:
|
||||
* time_flag */
|
||||
u32 isr4; /* isr status register LMPM_NIC_ISR4:
|
||||
* wico interrupt */
|
||||
u32 last_cmd_id; /* last HCMD id handled by the firmware */
|
||||
u32 wait_event; /* wait event() caller address */
|
||||
u32 l2p_control; /* L2pControlField */
|
||||
u32 l2p_duration; /* L2pDurationField */
|
||||
u32 l2p_mhvalid; /* L2pMhValidBits */
|
||||
u32 l2p_addr_match; /* L2pAddrMatchStat */
|
||||
u32 lmpm_pmg_sel; /* indicate which clocks are turned on
|
||||
* (LMPM_PMG_SEL) */
|
||||
u32 u_timestamp; /* indicate when the date and time of the
|
||||
* compilation */
|
||||
u32 flow_handler; /* FH read/write pointers, RX credit */
|
||||
} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
|
||||
|
||||
/*
|
||||
* UMAC error struct - relevant starting from family 8000 chip.
|
||||
* Note: This structure is read from the device with IO accesses,
|
||||
* and the reading already does the endian conversion. As it is
|
||||
* read with u32-sized accesses, any members with a different size
|
||||
* need to be ordered correctly though!
|
||||
*/
|
||||
struct iwl_umac_error_event_table {
|
||||
u32 valid; /* (nonzero) valid, (0) log is empty */
|
||||
u32 error_id; /* type of error */
|
||||
u32 blink1; /* branch link */
|
||||
u32 blink2; /* branch link */
|
||||
u32 ilink1; /* interrupt link */
|
||||
u32 ilink2; /* interrupt link */
|
||||
u32 data1; /* error-specific data */
|
||||
u32 data2; /* error-specific data */
|
||||
u32 data3; /* error-specific data */
|
||||
u32 umac_major;
|
||||
u32 umac_minor;
|
||||
u32 frame_pointer; /* core register 27*/
|
||||
u32 stack_pointer; /* core register 28 */
|
||||
u32 cmd_header; /* latest host cmd sent to UMAC */
|
||||
u32 nic_isr_pref; /* ISR status register */
|
||||
} __packed;
|
||||
|
||||
#define ERROR_START_OFFSET (1 * sizeof(u32))
|
||||
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
|
||||
|
||||
static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_trans *trans = mvm->trans;
|
||||
struct iwl_umac_error_event_table table = {};
|
||||
u32 base = mvm->trans->dbg.umac_error_event_table;
|
||||
|
||||
if (!base &&
|
||||
!(mvm->trans->dbg.error_event_table_tlv_status &
|
||||
IWL_ERROR_EVENT_TABLE_UMAC))
|
||||
return;
|
||||
|
||||
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
|
||||
|
||||
if (table.valid)
|
||||
mvm->fwrt.dump.umac_err_id = table.error_id;
|
||||
|
||||
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
||||
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
|
||||
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
|
||||
mvm->status, table.valid);
|
||||
}
|
||||
|
||||
IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
|
||||
iwl_fw_lookup_assert_desc(table.error_id));
|
||||
IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
|
||||
IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
|
||||
IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
|
||||
IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
|
||||
IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
|
||||
IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
|
||||
IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
|
||||
IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
|
||||
IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
|
||||
IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
|
||||
IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
|
||||
IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
|
||||
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
|
||||
}
|
||||
|
||||
static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
|
||||
{
|
||||
struct iwl_trans *trans = mvm->trans;
|
||||
struct iwl_error_event_table table = {};
|
||||
u32 val, base = mvm->trans->dbg.lmac_error_event_table[lmac_num];
|
||||
|
||||
if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
|
||||
if (!base)
|
||||
base = mvm->fw->init_errlog_ptr;
|
||||
} else {
|
||||
if (!base)
|
||||
base = mvm->fw->inst_errlog_ptr;
|
||||
}
|
||||
|
||||
if (base < 0x400000) {
|
||||
IWL_ERR(mvm,
|
||||
"Not valid error log pointer 0x%08X for %s uCode\n",
|
||||
base,
|
||||
(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
|
||||
? "Init" : "RT");
|
||||
return;
|
||||
}
|
||||
|
||||
/* check if there is a HW error */
|
||||
val = iwl_trans_read_mem32(trans, base);
|
||||
if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
|
||||
int err;
|
||||
|
||||
IWL_ERR(trans, "HW error, resetting before reading\n");
|
||||
|
||||
/* reset the device */
|
||||
iwl_trans_sw_reset(trans);
|
||||
|
||||
err = iwl_finish_nic_init(trans, trans->trans_cfg);
|
||||
if (err)
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
|
||||
|
||||
if (table.valid)
|
||||
mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
|
||||
|
||||
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
||||
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
|
||||
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
|
||||
mvm->status, table.valid);
|
||||
}
|
||||
|
||||
/* Do not change this output - scripts rely on it */
|
||||
|
||||
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
|
||||
|
||||
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
|
||||
iwl_fw_lookup_assert_desc(table.error_id));
|
||||
IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
|
||||
IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
|
||||
IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
|
||||
IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
|
||||
IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
|
||||
IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
|
||||
IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
|
||||
IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
|
||||
IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
|
||||
IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
|
||||
IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
|
||||
IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
|
||||
IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
|
||||
IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
|
||||
IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
|
||||
IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
|
||||
IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
|
||||
IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
|
||||
IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
|
||||
IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
|
||||
IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
|
||||
IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
|
||||
IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
|
||||
IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
|
||||
IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
|
||||
IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
|
||||
IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
|
||||
IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
|
||||
IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
|
||||
IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
|
||||
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
|
||||
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
|
||||
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
|
||||
}
|
||||
|
||||
static void iwl_mvm_dump_iml_error_log(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_trans *trans = mvm->trans;
|
||||
u32 error, data1;
|
||||
|
||||
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
|
||||
error = UMAG_SB_CPU_2_STATUS;
|
||||
data1 = UMAG_SB_CPU_1_STATUS;
|
||||
} else if (mvm->trans->trans_cfg->device_family >=
|
||||
IWL_DEVICE_FAMILY_8000) {
|
||||
error = SB_CPU_2_STATUS;
|
||||
data1 = SB_CPU_1_STATUS;
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
error = iwl_read_umac_prph(trans, UMAG_SB_CPU_2_STATUS);
|
||||
|
||||
IWL_ERR(trans, "IML/ROM dump:\n");
|
||||
|
||||
if (error & 0xFFFF0000)
|
||||
IWL_ERR(trans, "0x%04X | IML/ROM SYSASSERT\n", error >> 16);
|
||||
|
||||
IWL_ERR(mvm, "0x%08X | IML/ROM error/state\n", error);
|
||||
IWL_ERR(mvm, "0x%08X | IML/ROM data1\n",
|
||||
iwl_read_umac_prph(trans, data1));
|
||||
|
||||
if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000)
|
||||
IWL_ERR(mvm, "0x%08X | IML/ROM WFPM_AUTH_KEY_0\n",
|
||||
iwl_read_umac_prph(trans, SB_MODIFY_CFG_FLAG));
|
||||
}
|
||||
|
||||
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
|
||||
{
|
||||
if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
|
||||
IWL_ERR(mvm,
|
||||
"DEVICE_ENABLED bit is not set. Aborting dump.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_mvm_dump_lmac_error_log(mvm, 0);
|
||||
|
||||
if (mvm->trans->dbg.lmac_error_event_table[1])
|
||||
iwl_mvm_dump_lmac_error_log(mvm, 1);
|
||||
|
||||
iwl_mvm_dump_umac_error_log(mvm);
|
||||
|
||||
iwl_mvm_dump_iml_error_log(mvm);
|
||||
|
||||
iwl_fw_error_print_fseq_regs(&mvm->fwrt);
|
||||
}
|
||||
|
||||
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||
int tid, int frame_limit, u16 ssn)
|
||||
{
|
||||
|
|
@ -621,7 +311,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
enum ieee80211_smps_mode smps_request)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif;
|
||||
enum ieee80211_smps_mode smps_mode;
|
||||
enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
|
@ -630,10 +320,8 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
|
||||
return;
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP)
|
||||
smps_mode = IEEE80211_SMPS_OFF;
|
||||
else
|
||||
smps_mode = IEEE80211_SMPS_AUTOMATIC;
|
||||
if (vif->type != NL80211_IFTYPE_STATION)
|
||||
return;
|
||||
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
mvmvif->smps_requests[req_type] = smps_request;
|
||||
|
|
@ -683,23 +371,37 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
|
|||
mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
|
||||
}
|
||||
|
||||
struct iwl_mvm_diversity_iter_data {
|
||||
struct iwl_mvm_phy_ctxt *ctxt;
|
||||
bool result;
|
||||
};
|
||||
|
||||
static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
bool *result = _data;
|
||||
struct iwl_mvm_diversity_iter_data *data = _data;
|
||||
int i;
|
||||
|
||||
if (mvmvif->phy_ctxt != data->ctxt)
|
||||
return;
|
||||
|
||||
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
|
||||
if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
|
||||
mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
|
||||
*result = false;
|
||||
mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC) {
|
||||
data->result = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
|
||||
bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_phy_ctxt *ctxt)
|
||||
{
|
||||
bool result = true;
|
||||
struct iwl_mvm_diversity_iter_data data = {
|
||||
.ctxt = ctxt,
|
||||
.result = true,
|
||||
};
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
|
@ -711,9 +413,9 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
|
|||
|
||||
ieee80211_iterate_active_interfaces_atomic(
|
||||
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_diversity_iter, &result);
|
||||
iwl_mvm_diversity_iter, &data);
|
||||
|
||||
return result;
|
||||
return data.result;
|
||||
}
|
||||
|
||||
void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
|
||||
|
|
@ -1398,7 +1100,8 @@ u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
|
|||
return iwl_read_prph(mvm->trans, reg_addr);
|
||||
}
|
||||
|
||||
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
|
||||
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type,
|
||||
u32 *gp2, u64 *boottime, ktime_t *realtime)
|
||||
{
|
||||
bool ps_disabled;
|
||||
|
||||
|
|
@ -1412,7 +1115,11 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
|
|||
}
|
||||
|
||||
*gp2 = iwl_mvm_get_systime(mvm);
|
||||
*boottime = ktime_get_boottime_ns();
|
||||
|
||||
if (clock_type == CLOCK_BOOTTIME && boottime)
|
||||
*boottime = ktime_get_boottime_ns();
|
||||
else if (clock_type == CLOCK_REALTIME && realtime)
|
||||
*realtime = ktime_get_real();
|
||||
|
||||
if (!ps_disabled) {
|
||||
mvm->ps_disabled = ps_disabled;
|
||||
|
|
|
|||
|
|
@ -79,7 +79,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
|||
struct iwl_prph_scratch *prph_scratch;
|
||||
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
|
||||
struct iwl_prph_info *prph_info;
|
||||
void *iml_img;
|
||||
u32 control_flags = 0;
|
||||
int ret;
|
||||
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
|
|
@ -138,8 +137,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
|||
|
||||
/* Allocate prph information
|
||||
* currently we don't assign to the prph info anything, but it would get
|
||||
* assigned later */
|
||||
prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
|
||||
* assigned later
|
||||
*
|
||||
* We also use the second half of this page to give the device some
|
||||
* dummy TR/CR tail pointers - which shouldn't be necessary as we don't
|
||||
* use this, but the hardware still reads/writes there and we can't let
|
||||
* it go do that with a NULL pointer.
|
||||
*/
|
||||
BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
|
||||
prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
|
||||
&trans_pcie->prph_info_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!prph_info) {
|
||||
|
|
@ -166,13 +172,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
|||
ctxt_info_gen3->cr_head_idx_arr_base_addr =
|
||||
cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
|
||||
ctxt_info_gen3->tr_tail_idx_arr_base_addr =
|
||||
cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
|
||||
cpu_to_le64(trans_pcie->prph_info_dma_addr + PAGE_SIZE / 2);
|
||||
ctxt_info_gen3->cr_tail_idx_arr_base_addr =
|
||||
cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
|
||||
ctxt_info_gen3->cr_idx_arr_size =
|
||||
cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
|
||||
ctxt_info_gen3->tr_idx_arr_size =
|
||||
cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
|
||||
cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
|
||||
ctxt_info_gen3->mtr_base_addr =
|
||||
cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
|
||||
ctxt_info_gen3->mcr_base_addr =
|
||||
|
|
@ -187,14 +189,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
|||
trans_pcie->prph_scratch = prph_scratch;
|
||||
|
||||
/* Allocate IML */
|
||||
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
|
||||
&trans_pcie->iml_dma_addr, GFP_KERNEL);
|
||||
if (!iml_img) {
|
||||
trans_pcie->iml = dma_alloc_coherent(trans->dev, trans->iml_len,
|
||||
&trans_pcie->iml_dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!trans_pcie->iml) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_ctxt_info;
|
||||
}
|
||||
|
||||
memcpy(iml_img, trans->iml, trans->iml_len);
|
||||
memcpy(trans_pcie->iml, trans->iml, trans->iml_len);
|
||||
|
||||
iwl_enable_fw_load_int_ctx_info(trans);
|
||||
|
||||
|
|
@ -216,10 +219,8 @@ err_free_ctxt_info:
|
|||
trans_pcie->ctxt_info_dma_addr);
|
||||
trans_pcie->ctxt_info_gen3 = NULL;
|
||||
err_free_prph_info:
|
||||
dma_free_coherent(trans->dev,
|
||||
sizeof(*prph_info),
|
||||
prph_info,
|
||||
trans_pcie->prph_info_dma_addr);
|
||||
dma_free_coherent(trans->dev, PAGE_SIZE, prph_info,
|
||||
trans_pcie->prph_info_dma_addr);
|
||||
|
||||
err_free_prph_scratch:
|
||||
dma_free_coherent(trans->dev,
|
||||
|
|
@ -230,29 +231,40 @@ err_free_prph_scratch:
|
|||
|
||||
}
|
||||
|
||||
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
|
||||
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans, bool alive)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (trans_pcie->iml) {
|
||||
dma_free_coherent(trans->dev, trans->iml_len, trans_pcie->iml,
|
||||
trans_pcie->iml_dma_addr);
|
||||
trans_pcie->iml_dma_addr = 0;
|
||||
trans_pcie->iml = NULL;
|
||||
}
|
||||
|
||||
iwl_pcie_ctxt_info_free_fw_img(trans);
|
||||
|
||||
if (alive)
|
||||
return;
|
||||
|
||||
if (!trans_pcie->ctxt_info_gen3)
|
||||
return;
|
||||
|
||||
/* ctxt_info_gen3 and prph_scratch are still needed for PNVM load */
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
|
||||
trans_pcie->ctxt_info_gen3,
|
||||
trans_pcie->ctxt_info_dma_addr);
|
||||
trans_pcie->ctxt_info_dma_addr = 0;
|
||||
trans_pcie->ctxt_info_gen3 = NULL;
|
||||
|
||||
iwl_pcie_ctxt_info_free_fw_img(trans);
|
||||
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
|
||||
trans_pcie->prph_scratch,
|
||||
trans_pcie->prph_scratch_dma_addr);
|
||||
trans_pcie->prph_scratch_dma_addr = 0;
|
||||
trans_pcie->prph_scratch = NULL;
|
||||
|
||||
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
|
||||
trans_pcie->prph_info,
|
||||
/* this is needed for the entire lifetime */
|
||||
dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
|
||||
trans_pcie->prph_info_dma_addr);
|
||||
trans_pcie->prph_info_dma_addr = 0;
|
||||
trans_pcie->prph_info = NULL;
|
||||
|
|
@ -290,3 +302,37 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
|
||||
const void *data, u32 len)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
|
||||
&trans_pcie->prph_scratch->ctrl_cfg;
|
||||
int ret;
|
||||
|
||||
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
|
||||
return 0;
|
||||
|
||||
/* only allocate the DRAM if not allocated yet */
|
||||
if (!trans->reduce_power_loaded) {
|
||||
if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
|
||||
return -EBUSY;
|
||||
|
||||
ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
|
||||
&trans_pcie->reduce_power_dram);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Failed to allocate reduce power DMA %d.\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
prph_sc_ctrl->reduce_power_cfg.base_addr =
|
||||
cpu_to_le64(trans_pcie->reduce_power_dram.physical);
|
||||
prph_sc_ctrl->reduce_power_cfg.size =
|
||||
cpu_to_le32(trans_pcie->reduce_power_dram.size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -532,6 +532,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
|||
IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
|
||||
IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
|
||||
IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
|
||||
IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
|
||||
IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
|
||||
|
||||
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
|
||||
|
||||
|
|
@ -1029,6 +1031,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
|||
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
|
||||
iwl_cfg_ma_a0_mr_a0, iwl_ax221_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
|
||||
iwl_cfg_ma_a0_fm_a0, iwl_ax231_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_SNJ, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
|
||||
|
|
@ -1209,14 +1216,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (cfg == &iwlax210_2ax_cfg_so_hr_a0) {
|
||||
if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_TY) {
|
||||
iwl_trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0;
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
|
||||
} else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
|
||||
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) {
|
||||
iwl_trans->cfg = &iwlax210_2ax_cfg_so_jf_b0;
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
|
||||
} else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
|
||||
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF)) {
|
||||
iwl_trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
|
||||
} else if (CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) ==
|
||||
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4)) {
|
||||
iwl_trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2003-2015, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2003-2015, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
|
|
@ -109,12 +109,8 @@ struct iwl_rx_completion_desc {
|
|||
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
|
||||
* In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
|
||||
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
|
||||
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
|
||||
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
|
||||
* @tr_tail: driver's pointer to the transmission ring tail buffer
|
||||
* @tr_tail_dma: physical address of the buffer for the transmission ring tail
|
||||
* @cr_tail: driver's pointer to the completion ring tail buffer
|
||||
* @cr_tail_dma: physical address of the buffer for the completion ring tail
|
||||
* @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
|
||||
* @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
|
||||
* @read: Shared index to newest available Rx buffer
|
||||
* @write: Shared index to oldest written Rx packet
|
||||
* @free_count: Number of pre-allocated buffers in rx_free
|
||||
|
|
@ -142,10 +138,6 @@ struct iwl_rxq {
|
|||
struct iwl_rx_completion_desc *cd;
|
||||
};
|
||||
dma_addr_t used_bd_dma;
|
||||
__le16 *tr_tail;
|
||||
dma_addr_t tr_tail_dma;
|
||||
__le16 *cr_tail;
|
||||
dma_addr_t cr_tail_dma;
|
||||
u32 read;
|
||||
u32 write;
|
||||
u32 free_count;
|
||||
|
|
@ -279,6 +271,8 @@ struct cont_rec {
|
|||
* Context information addresses will be taken from here.
|
||||
* This is driver's local copy for keeping track of size and
|
||||
* count for allocating and freeing the memory.
|
||||
* @iml: image loader image virtual address
|
||||
* @iml_dma_addr: image loader image DMA address
|
||||
* @trans: pointer to the generic transport area
|
||||
* @scd_base_addr: scheduler sram base address in SRAM
|
||||
* @kw: keep warm address
|
||||
|
|
@ -317,6 +311,7 @@ struct cont_rec {
|
|||
* @alloc_page_lock: spinlock for the page allocator
|
||||
* @alloc_page: allocated page to still use parts of
|
||||
* @alloc_page_used: how much of the allocated page was already used (bytes)
|
||||
* @rf_name: name/version of the CRF, if any
|
||||
*/
|
||||
struct iwl_trans_pcie {
|
||||
struct iwl_rxq *rxq;
|
||||
|
|
@ -329,6 +324,7 @@ struct iwl_trans_pcie {
|
|||
};
|
||||
struct iwl_prph_info *prph_info;
|
||||
struct iwl_prph_scratch *prph_scratch;
|
||||
void *iml;
|
||||
dma_addr_t ctxt_info_dma_addr;
|
||||
dma_addr_t prph_info_dma_addr;
|
||||
dma_addr_t prph_scratch_dma_addr;
|
||||
|
|
@ -353,6 +349,7 @@ struct iwl_trans_pcie {
|
|||
struct iwl_dma_ptr kw;
|
||||
|
||||
struct iwl_dram_data pnvm_dram;
|
||||
struct iwl_dram_data reduce_power_dram;
|
||||
|
||||
struct iwl_txq *txq_memory;
|
||||
|
||||
|
|
@ -409,6 +406,8 @@ struct iwl_trans_pcie {
|
|||
bool fw_reset_handshake;
|
||||
bool fw_reset_done;
|
||||
wait_queue_head_t fw_reset_waitq;
|
||||
|
||||
char rf_name[32];
|
||||
};
|
||||
|
||||
static inline struct iwl_trans_pcie *
|
||||
|
|
@ -530,9 +529,6 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
|
|||
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
|
||||
}
|
||||
|
||||
#define IWL_NUM_OF_COMPLETION_RINGS 31
|
||||
#define IWL_NUM_OF_TRANSFER_RINGS 527
|
||||
|
||||
static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
|
||||
int start)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -663,7 +663,6 @@ static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
|
|||
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
|
||||
struct iwl_rxq *rxq)
|
||||
{
|
||||
struct device *dev = trans->dev;
|
||||
bool use_rx_td = (trans->trans_cfg->device_family >=
|
||||
IWL_DEVICE_FAMILY_AX210);
|
||||
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
|
||||
|
|
@ -685,21 +684,6 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
|
|||
rxq->used_bd, rxq->used_bd_dma);
|
||||
rxq->used_bd_dma = 0;
|
||||
rxq->used_bd = NULL;
|
||||
|
||||
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
|
||||
return;
|
||||
|
||||
if (rxq->tr_tail)
|
||||
dma_free_coherent(dev, sizeof(__le16),
|
||||
rxq->tr_tail, rxq->tr_tail_dma);
|
||||
rxq->tr_tail_dma = 0;
|
||||
rxq->tr_tail = NULL;
|
||||
|
||||
if (rxq->cr_tail)
|
||||
dma_free_coherent(dev, sizeof(__le16),
|
||||
rxq->cr_tail, rxq->cr_tail_dma);
|
||||
rxq->cr_tail_dma = 0;
|
||||
rxq->cr_tail = NULL;
|
||||
}
|
||||
|
||||
static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
|
||||
|
|
@ -744,21 +728,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
|
|||
rxq->rb_stts_dma =
|
||||
trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
|
||||
|
||||
if (!use_rx_td)
|
||||
return 0;
|
||||
|
||||
/* Allocate the driver's pointer to TR tail */
|
||||
rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
|
||||
&rxq->tr_tail_dma, GFP_KERNEL);
|
||||
if (!rxq->tr_tail)
|
||||
goto err;
|
||||
|
||||
/* Allocate the driver's pointer to CR tail */
|
||||
rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
|
||||
&rxq->cr_tail_dma, GFP_KERNEL);
|
||||
if (!rxq->cr_tail)
|
||||
goto err;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
|
|
@ -1590,9 +1559,6 @@ restart:
|
|||
out:
|
||||
/* Backtrack one entry */
|
||||
rxq->read = i;
|
||||
/* update cr tail with the rxq read pointer */
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
||||
*rxq->cr_tail = cpu_to_le16(r);
|
||||
spin_unlock(&rxq->lock);
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
|
|||
|
||||
iwl_pcie_ctxt_info_free_paging(trans);
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
||||
iwl_pcie_ctxt_info_gen3_free(trans);
|
||||
iwl_pcie_ctxt_info_gen3_free(trans, false);
|
||||
else
|
||||
iwl_pcie_ctxt_info_free(trans);
|
||||
|
||||
|
|
@ -240,6 +240,75 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
char *buf = trans_pcie->rf_name;
|
||||
size_t buflen = sizeof(trans_pcie->rf_name);
|
||||
size_t pos;
|
||||
u32 version;
|
||||
|
||||
if (buf[0])
|
||||
return;
|
||||
|
||||
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
|
||||
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF):
|
||||
pos = scnprintf(buf, buflen, "JF");
|
||||
break;
|
||||
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF):
|
||||
pos = scnprintf(buf, buflen, "GF");
|
||||
break;
|
||||
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_GF4):
|
||||
pos = scnprintf(buf, buflen, "GF4");
|
||||
break;
|
||||
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
|
||||
pos = scnprintf(buf, buflen, "HR");
|
||||
break;
|
||||
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
|
||||
pos = scnprintf(buf, buflen, "HR1");
|
||||
break;
|
||||
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
|
||||
pos = scnprintf(buf, buflen, "HRCDB");
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
|
||||
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR):
|
||||
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HR1):
|
||||
case CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_HRCDB):
|
||||
version = iwl_read_prph(trans, CNVI_MBOX_C);
|
||||
switch (version) {
|
||||
case 0x20000:
|
||||
pos += scnprintf(buf + pos, buflen - pos, " B3");
|
||||
break;
|
||||
case 0x120000:
|
||||
pos += scnprintf(buf + pos, buflen - pos, " B5");
|
||||
break;
|
||||
default:
|
||||
pos += scnprintf(buf + pos, buflen - pos,
|
||||
" (0x%x)", version);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
pos += scnprintf(buf + pos, buflen - pos, ", rfid=0x%x",
|
||||
trans->hw_rf_id);
|
||||
|
||||
IWL_INFO(trans, "Detected RF %s\n", buf);
|
||||
|
||||
/*
|
||||
* also add a \n for debugfs - need to do it after printing
|
||||
* since our IWL_INFO machinery wants to see a static \n at
|
||||
* the end of the string
|
||||
*/
|
||||
pos += scnprintf(buf + pos, buflen - pos, "\n");
|
||||
}
|
||||
|
||||
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
|
@ -254,7 +323,10 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
|
|||
/* now that we got alive we can free the fw image & the context info.
|
||||
* paging memory cannot be freed included since FW will still use it
|
||||
*/
|
||||
iwl_pcie_ctxt_info_free(trans);
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
||||
iwl_pcie_ctxt_info_gen3_free(trans, true);
|
||||
else
|
||||
iwl_pcie_ctxt_info_free(trans);
|
||||
|
||||
/*
|
||||
* Re-enable all the interrupts, including the RF-Kill one, now that
|
||||
|
|
@ -263,6 +335,8 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
|
|||
iwl_enable_interrupts(trans);
|
||||
mutex_lock(&trans_pcie->mutex);
|
||||
iwl_pcie_check_hw_rf_kill(trans);
|
||||
|
||||
iwl_pcie_get_rf_name(trans);
|
||||
mutex_unlock(&trans_pcie->mutex);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1648,7 +1648,7 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
|
|||
if (ret)
|
||||
IWL_ERR(trans_pcie->trans,
|
||||
"Failed to set affinity mask for IRQ %d\n",
|
||||
i);
|
||||
trans_pcie->msix_entries[i].vector);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1943,6 +1943,12 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
|
|||
trans_pcie->pnvm_dram.block,
|
||||
trans_pcie->pnvm_dram.physical);
|
||||
|
||||
if (trans_pcie->reduce_power_dram.size)
|
||||
dma_free_coherent(trans->dev,
|
||||
trans_pcie->reduce_power_dram.size,
|
||||
trans_pcie->reduce_power_dram.block,
|
||||
trans_pcie->reduce_power_dram.physical);
|
||||
|
||||
mutex_destroy(&trans_pcie->mutex);
|
||||
iwl_trans_free(trans);
|
||||
}
|
||||
|
|
@ -2848,11 +2854,28 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
|
|||
return bytes_copied;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_rf_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_trans *trans = file->private_data;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (!trans_pcie->rf_name[0])
|
||||
return -ENODEV;
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos,
|
||||
trans_pcie->rf_name,
|
||||
strlen(trans_pcie->rf_name));
|
||||
}
|
||||
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
|
||||
DEBUGFS_READ_FILE_OPS(fh_reg);
|
||||
DEBUGFS_READ_FILE_OPS(rx_queue);
|
||||
DEBUGFS_WRITE_FILE_OPS(csr);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
|
||||
DEBUGFS_READ_FILE_OPS(rf);
|
||||
|
||||
static const struct file_operations iwl_dbgfs_tx_queue_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = iwl_dbgfs_tx_queue_open,
|
||||
|
|
@ -2879,6 +2902,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
|
|||
DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
|
||||
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
|
||||
DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
|
||||
DEBUGFS_ADD_FILE(rf, dir, 0400);
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
|
||||
|
|
@ -3400,6 +3424,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
|
|||
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
|
||||
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
|
||||
.set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
|
||||
.set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
|
||||
#endif
|
||||
|
|
@ -3413,6 +3438,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
struct iwl_trans *trans;
|
||||
int ret, addr_size;
|
||||
const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
|
||||
void __iomem * const *table;
|
||||
|
||||
if (!cfg_trans->gen2)
|
||||
ops = &trans_ops_pcie;
|
||||
|
|
@ -3485,9 +3511,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
goto out_no_pci;
|
||||
}
|
||||
|
||||
trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
|
||||
if (!trans_pcie->hw_base) {
|
||||
table = pcim_iomap_table(pdev);
|
||||
if (!table) {
|
||||
dev_err(&pdev->dev, "pcim_iomap_table failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_no_pci;
|
||||
}
|
||||
|
||||
trans_pcie->hw_base = table[0];
|
||||
if (!trans_pcie->hw_base) {
|
||||
dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n");
|
||||
ret = -ENODEV;
|
||||
goto out_no_pci;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue