Merge: Wireless core and drivers rebase to v6.15

MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/6948

JIRA: https://issues.redhat.com/browse/RHEL-89168

ARK MR: cki-project/kernel-ark!3897

Tested: basic testing with several supported WiFi cards (Intel, Qualcomm, Mediatek and Realtek).

Wireless core and drivers update to v6.15.
MHI will be updated in a separate way.

Upstream status: linux.git

Not necessary because SKB_DROP_REASON_SUBSYS_OPENVSWITCH is still the same:
Omitted-fix: 1cc3462159bab selftests: openvswitch: don't hardcode the drop reason subsys

Commented fix was already included:
Omitted-fix: 175e69e33c669 wifi: iwlwifi: restore missing initialization of async_handlers_list

Not important fixes (postponed):
Omitted-fix: 9a353a4a11a4 wifi: ath12k: avoid burning CPU while waiting for firmware stats
Omitted-fix: ac7b8ff7839d wifi: ath12k: don't use static variables in ath12k_wmi_fw_stats_process()
Omitted-fix: ad5e9178cec5 wifi: ath12k: don't wait when there is no vdev started
Omitted-fix: 54c350055b1d wifi: ath12k: Fix double budget decrement while reaping monitor ring

Fixes for AP capabilities (postponed):
Omitted-fix: 64cbf0d7ce9a wifi: mt76: mt7996: Fix possible OOB access in mt7996_tx()
Omitted-fix: a0c5eac91810 wifi: mt76: Assume __mt76_connac_mcu_alloc_sta_req runs in atomic context
Omitted-fix: c772cd726eea wifi: mt76: Move RCU section in mt7996_mcu_set_fixed_field()
Omitted-fix: 28d519d0d493 wifi: mt76: Move RCU section in mt7996_mcu_add_rate_ctrl_fixed()
Omitted-fix: 3dd6f67c669c wifi: mt76: Move RCU section in mt7996_mcu_add_rate_ctrl()
Omitted-fix: 71532576f41e wifi: mt76: Remove RCU section in mt7996_mac_sta_rc_work()
Omitted-fix: e8d7eef07199 wifi: mt76: mt7996: Fix secondary link lookup in mt7996_mcu_sta_mld_setup_tlv()
Omitted-fix: a59650a22701 wifi: mt76: mt7996: Fix valid_links bitmask in mt7996_mac_sta_{add,remove}

Signed-off-by: Jose Ignacio Tornos Martinez <jtornosm@redhat.com>

Approved-by: Jan Stancek <jstancek@redhat.com>
Approved-by: Davide Caratti <dcaratti@redhat.com>
Approved-by: mheib <mheib@redhat.com>
Approved-by: Michal Schmidt <mschmidt@redhat.com>
Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com>

Merged-by: Augusto Caringi <acaringi@redhat.com>
This commit is contained in:
Augusto Caringi 2025-07-15 15:53:43 -03:00
commit 8010469081
364 changed files with 74493 additions and 7056 deletions

View File

@ -0,0 +1,56 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/net/rfkill-gpio.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: GPIO controlled rfkill switch
maintainers:
- Johannes Berg <johannes@sipsolutions.net>
- Philipp Zabel <p.zabel@pengutronix.de>
properties:
compatible:
const: rfkill-gpio
label:
description: rfkill switch name, defaults to node name
radio-type:
description: rfkill radio type
enum:
- bluetooth
- fm
- gps
- nfc
- ultrawideband
- wimax
- wlan
- wwan
shutdown-gpios:
maxItems: 1
default-blocked:
$ref: /schemas/types.yaml#/definitions/flag
description: configure rfkill state as blocked at boot
required:
- compatible
- radio-type
- shutdown-gpios
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
rfkill {
compatible = "rfkill-gpio";
label = "rfkill-pcie-wlan";
radio-type = "wlan";
shutdown-gpios = <&gpio2 25 GPIO_ACTIVE_HIGH>;
default-blocked;
};

View File

@ -93,20 +93,41 @@ properties:
ieee80211-freq-limit: true
qcom,ath10k-calibration-data:
qcom,calibration-data:
$ref: /schemas/types.yaml#/definitions/uint8-array
description:
Calibration data + board-specific data as a byte array. The length
can vary between hardware versions.
qcom,ath10k-calibration-variant:
qcom,ath10k-calibration-data:
$ref: /schemas/types.yaml#/definitions/uint8-array
deprecated: true
description:
Calibration data + board-specific data as a byte array. The length
can vary between hardware versions.
qcom,calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
description:
Unique variant identifier of the calibration data in board-2.bin
for designs with colliding bus and device specific ids
qcom,ath10k-calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
deprecated: true
description:
Unique variant identifier of the calibration data in board-2.bin
for designs with colliding bus and device specific ids
qcom,pre-calibration-data:
$ref: /schemas/types.yaml#/definitions/uint8-array
description:
Pre-calibration data as a byte array. The length can vary between
hardware versions.
qcom,ath10k-pre-calibration-data:
$ref: /schemas/types.yaml#/definitions/uint8-array
deprecated: true
description:
Pre-calibration data as a byte array. The length can vary between
hardware versions.

View File

@ -23,8 +23,15 @@ properties:
reg:
maxItems: 1
qcom,calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
description: |
string to uniquely identify variant of the calibration data for designs
with colliding bus and device ids
qcom,ath11k-calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
deprecated: true
description: |
string to uniquely identify variant of the calibration data for designs
with colliding bus and device ids
@ -50,6 +57,9 @@ properties:
vddrfa1p7-supply:
description: VDD_RFA_1P7 supply regulator handle
vddrfa1p8-supply:
description: VDD_RFA_1P8 supply regulator handle
vddpcie0p9-supply:
description: VDD_PCIE_0P9 supply regulator handle
@ -77,6 +87,22 @@ allOf:
- vddrfa1p7-supply
- vddpcie0p9-supply
- vddpcie1p8-supply
- if:
properties:
compatible:
contains:
const: pci17cb,1103
then:
required:
- vddrfacmn-supply
- vddaon-supply
- vddwlcx-supply
- vddwlmx-supply
- vddrfa0p8-supply
- vddrfa1p2-supply
- vddrfa1p8-supply
- vddpcie0p9-supply
- vddpcie1p8-supply
additionalProperties: false
@ -99,7 +125,17 @@ examples:
compatible = "pci17cb,1103";
reg = <0x10000 0x0 0x0 0x0 0x0>;
qcom,ath11k-calibration-variant = "LE_X13S";
vddrfacmn-supply = <&vreg_pmu_rfa_cmn_0p8>;
vddaon-supply = <&vreg_pmu_aon_0p8>;
vddwlcx-supply = <&vreg_pmu_wlcx_0p8>;
vddwlmx-supply = <&vreg_pmu_wlmx_0p8>;
vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>;
vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>;
vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>;
vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>;
vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>;
qcom,calibration-variant = "LE_X13S";
};
};
};

View File

@ -42,8 +42,15 @@ properties:
* reg
* reg-names
qcom,calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
description:
string to uniquely identify variant of the calibration data in the
board-2.bin for designs with colliding bus and device specific ids
qcom,ath11k-calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
deprecated: true
description:
string to uniquely identify variant of the calibration data in the
board-2.bin for designs with colliding bus and device specific ids

View File

@ -53,8 +53,15 @@ properties:
reg:
maxItems: 1
qcom,calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
description:
String to uniquely identify variant of the calibration data for designs
with colliding bus and device ids
qcom,ath12k-calibration-variant:
$ref: /schemas/types.yaml#/definitions/string
deprecated: true
description:
String to uniquely identify variant of the calibration data for designs
with colliding bus and device ids
@ -104,7 +111,7 @@ examples:
compatible = "pci17cb,1109";
reg = <0x0 0x0 0x0 0x0 0x0>;
qcom,ath12k-calibration-variant = "RDP433_1";
qcom,calibration-variant = "RDP433_1";
ports {
#address-cells = <1>;
@ -140,7 +147,7 @@ examples:
compatible = "pci17cb,1109";
reg = <0x0 0x0 0x0 0x0 0x0>;
qcom,ath12k-calibration-variant = "RDP433_2";
qcom,calibration-variant = "RDP433_2";
qcom,wsi-controller;
ports {
@ -177,7 +184,7 @@ examples:
compatible = "pci17cb,1109";
reg = <0x0 0x0 0x0 0x0 0x0>;
qcom,ath12k-calibration-variant = "RDP433_3";
qcom,calibration-variant = "RDP433_3";
ports {
#address-cells = <1>;

View File

@ -1163,8 +1163,11 @@ int ath10k_core_check_dt(struct ath10k *ar)
if (!node)
return -ENOENT;
of_property_read_string(node, "qcom,ath10k-calibration-variant",
of_property_read_string(node, "qcom,calibration-variant",
&variant);
if (!variant)
of_property_read_string(node, "qcom,ath10k-calibration-variant",
&variant);
if (!variant)
return -ENODATA;
@ -2259,7 +2262,9 @@ static int ath10k_core_pre_cal_download(struct ath10k *ar)
"boot did not find a pre calibration file, try DT next: %d\n",
ret);
ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
ret = ath10k_download_cal_dt(ar, "qcom,pre-calibration-data");
if (ret == -ENOENT)
ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
if (ret) {
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"unable to load pre cal data from DT: %d\n", ret);
@ -2337,7 +2342,9 @@ static int ath10k_download_cal_data(struct ath10k *ar)
"boot did not find a calibration file, try DT next: %d\n",
ret);
ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
ret = ath10k_download_cal_dt(ar, "qcom,calibration-data");
if (ret == -ENOENT)
ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_DT;
goto done;

View File

@ -27,6 +27,7 @@ ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
ath11k-$(CONFIG_PM) += wow.o
ath11k-$(CONFIG_DEV_COREDUMP) += coredump.o
obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
ath11k_ahb-y += ahb.o

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -1290,6 +1290,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
ath11k_core_deinit(ab);
qmi_fail:
ath11k_fw_destroy(ab);
ath11k_ahb_free_resources(ab);
}
@ -1309,6 +1310,7 @@ static void ath11k_ahb_shutdown(struct platform_device *pdev)
ath11k_core_deinit(ab);
free_resources:
ath11k_fw_destroy(ab);
ath11k_ahb_free_resources(ab);
}

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -1175,8 +1175,11 @@ int ath11k_core_check_dt(struct ath11k_base *ab)
if (!node)
return -ENOENT;
of_property_read_string(node, "qcom,ath11k-calibration-variant",
of_property_read_string(node, "qcom,calibration-variant",
&variant);
if (!variant)
of_property_read_string(node, "qcom,ath11k-calibration-variant",
&variant);
if (!variant)
return -ENODATA;
@ -2056,6 +2059,7 @@ void ath11k_core_halt(struct ath11k *ar)
ath11k_mac_scan_finish(ar);
ath11k_mac_peer_cleanup_all(ar);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ab->update_11d_work);
@ -2257,6 +2261,7 @@ static void ath11k_core_reset(struct work_struct *work)
reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_start_count, 0);
ath11k_coredump_collect(ab);
ath11k_core_pre_reconfigure_recovery(ab);
reinit_completion(&ab->reconfigure_complete);
@ -2346,7 +2351,6 @@ void ath11k_core_deinit(struct ath11k_base *ab)
ath11k_hif_power_down(ab);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
ath11k_fw_destroy(ab);
}
EXPORT_SYMBOL(ath11k_core_deinit);
@ -2393,6 +2397,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
INIT_WORK(&ab->reset_work, ath11k_core_reset);
INIT_WORK(&ab->dump_work, ath11k_coredump_upload);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CORE_H
@ -32,6 +32,7 @@
#include "spectral.h"
#include "wow.h"
#include "fw.h"
#include "coredump.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@ -370,6 +371,7 @@ struct ath11k_vif {
struct ieee80211_vif *vif;
struct wmi_wmm_params_all_arg wmm_params;
struct wmi_wmm_params_all_arg muedca_params;
struct list_head list;
union {
struct {
@ -685,7 +687,7 @@ struct ath11k {
struct mutex conf_mutex;
/* protects the radio specific data like debug stats, ppdu_stats_info stats,
* vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
* channel context data, survey info, test mode data.
* channel context data, survey info, test mode data, channel_update_queue.
*/
spinlock_t data_lock;
@ -743,6 +745,9 @@ struct ath11k {
struct completion bss_survey_done;
struct work_struct regd_update_work;
struct work_struct channel_update_work;
/* protected with data_lock */
struct list_head channel_update_queue;
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
@ -900,6 +905,10 @@ struct ath11k_base {
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
struct ath11k_dump_file_data *dump_data;
size_t ath11k_coredump_len;
struct work_struct dump_work;
struct ath11k_htc htc;
struct ath11k_dp dp;

View File

@ -0,0 +1,52 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/devcoredump.h>
#include "hif.h"
#include "coredump.h"
#include "debug.h"
enum
ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type)
{
enum ath11k_fw_crash_dump_type dump_type;
switch (type) {
case HOST_DDR_REGION_TYPE:
dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA;
break;
case M3_DUMP_REGION_TYPE:
dump_type = FW_CRASH_DUMP_M3_DUMP;
break;
case PAGEABLE_MEM_REGION_TYPE:
dump_type = FW_CRASH_DUMP_PAGEABLE_DATA;
break;
case BDF_MEM_REGION_TYPE:
case CALDB_MEM_REGION_TYPE:
dump_type = FW_CRASH_DUMP_NONE;
break;
default:
dump_type = FW_CRASH_DUMP_TYPE_MAX;
break;
}
return dump_type;
}
EXPORT_SYMBOL(ath11k_coredump_get_dump_type);
void ath11k_coredump_upload(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, dump_work);
ath11k_info(ab, "Uploading coredump\n");
/* dev_coredumpv() takes ownership of the buffer */
dev_coredumpv(ab->dev, ab->dump_data, ab->ath11k_coredump_len, GFP_KERNEL);
ab->dump_data = NULL;
}
void ath11k_coredump_collect(struct ath11k_base *ab)
{
ath11k_hif_coredump_download(ab);
}

View File

@ -0,0 +1,79 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_COREDUMP_H_
#define _ATH11K_COREDUMP_H_
#define ATH11K_FW_CRASH_DUMP_V2 2
enum ath11k_fw_crash_dump_type {
FW_CRASH_DUMP_PAGING_DATA,
FW_CRASH_DUMP_RDDM_DATA,
FW_CRASH_DUMP_REMOTE_MEM_DATA,
FW_CRASH_DUMP_PAGEABLE_DATA,
FW_CRASH_DUMP_M3_DUMP,
FW_CRASH_DUMP_NONE,
/* keep last */
FW_CRASH_DUMP_TYPE_MAX,
};
#define COREDUMP_TLV_HDR_SIZE 8
struct ath11k_tlv_dump_data {
/* see ath11k_fw_crash_dump_type above */
__le32 type;
/* in bytes */
__le32 tlv_len;
/* pad to 32-bit boundaries as needed */
u8 tlv_data[];
} __packed;
struct ath11k_dump_file_data {
/* "ATH11K-FW-DUMP" */
char df_magic[16];
/* total dump len in bytes */
__le32 len;
/* file dump version */
__le32 version;
/* pci device id */
__le32 chip_id;
/* qrtr instance id */
__le32 qrtr_id;
/* pci domain id */
__le32 bus_id;
guid_t guid;
/* time-of-day stamp */
__le64 tv_sec;
/* time-of-day stamp, nano-seconds */
__le64 tv_nsec;
/* room for growth w/out changing binary format */
u8 unused[128];
u8 data[];
} __packed;
#ifdef CONFIG_DEV_COREDUMP
enum ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type);
void ath11k_coredump_upload(struct work_struct *work);
void ath11k_coredump_collect(struct ath11k_base *ab);
#else
static inline enum
ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type)
{
return FW_CRASH_DUMP_TYPE_MAX;
}
static inline void ath11k_coredump_upload(struct work_struct *work)
{
}
static inline void ath11k_coredump_collect(struct ath11k_base *ab)
{
}
#endif
#endif

View File

@ -178,7 +178,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
* received 'update stats' event, we keep a 3 seconds timeout in case,
* fw_stats_done is not marked yet
*/
timeout = jiffies + msecs_to_jiffies(3 * 1000);
timeout = jiffies + secs_to_jiffies(3);
ath11k_debugfs_fw_stats_reset(ar);

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@ -104,14 +104,12 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
if (!ring->vaddr_unaligned)
return;
if (ring->cached) {
dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
DMA_FROM_DEVICE);
kfree(ring->vaddr_unaligned);
} else {
if (ring->cached)
dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned, DMA_FROM_DEVICE);
else
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
ring->paddr_unaligned);
}
ring->vaddr_unaligned = NULL;
}
@ -249,25 +247,14 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
default:
cached = false;
}
if (cached) {
ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
if (!ring->vaddr_unaligned)
return -ENOMEM;
ring->paddr_unaligned = dma_map_single(ab->dev,
ring->vaddr_unaligned,
ring->size,
DMA_FROM_DEVICE);
if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) {
kfree(ring->vaddr_unaligned);
ring->vaddr_unaligned = NULL;
return -ENOMEM;
}
}
}
if (!cached)
if (cached)
ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size,
&ring->paddr_unaligned,
DMA_FROM_DEVICE,
GFP_KERNEL);
else
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
&ring->paddr_unaligned,
GFP_KERNEL);

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_H
@ -20,7 +20,6 @@ struct ath11k_ext_irq_grp;
struct dp_rx_tid {
u8 tid;
u32 *vaddr;
dma_addr_t paddr;
u32 size;
u32 ba_win_sz;
@ -37,6 +36,9 @@ struct dp_rx_tid {
/* Timer info related to fragments */
struct timer_list frag_timer;
struct ath11k_base *ab;
u32 *vaddr_unaligned;
dma_addr_t paddr_unaligned;
u32 unaligned_size;
};
#define DP_REO_DESC_FREE_THRESHOLD 64

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@ -675,11 +675,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
list_del(&cmd->list);
rx_tid = &cmd->data;
if (rx_tid->vaddr) {
dma_unmap_single(ab->dev, rx_tid->paddr,
rx_tid->size, DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
if (rx_tid->vaddr_unaligned) {
dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
rx_tid->vaddr_unaligned,
rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
rx_tid->vaddr_unaligned = NULL;
}
kfree(cmd);
}
@ -689,11 +689,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
list_del(&cmd_cache->list);
dp->reo_cmd_cache_flush_count--;
rx_tid = &cmd_cache->data;
if (rx_tid->vaddr) {
dma_unmap_single(ab->dev, rx_tid->paddr,
rx_tid->size, DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
if (rx_tid->vaddr_unaligned) {
dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
rx_tid->vaddr_unaligned,
rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
rx_tid->vaddr_unaligned = NULL;
}
kfree(cmd_cache);
}
@ -708,11 +708,11 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
if (status != HAL_REO_CMD_SUCCESS)
ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
rx_tid->tid, status);
if (rx_tid->vaddr) {
dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
if (rx_tid->vaddr_unaligned) {
dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size,
rx_tid->vaddr_unaligned,
rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
rx_tid->vaddr_unaligned = NULL;
}
}
@ -749,10 +749,10 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
if (ret) {
ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
rx_tid->tid, ret);
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
rx_tid->vaddr_unaligned,
rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
rx_tid->vaddr_unaligned = NULL;
}
}
@ -802,10 +802,10 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
return;
free_desc:
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
rx_tid->vaddr_unaligned,
rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
rx_tid->vaddr_unaligned = NULL;
}
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
@ -831,14 +831,16 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar,
if (ret != -ESHUTDOWN)
ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
tid, ret);
dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size,
rx_tid->vaddr_unaligned,
rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
rx_tid->vaddr_unaligned = NULL;
}
rx_tid->paddr = 0;
rx_tid->paddr_unaligned = 0;
rx_tid->size = 0;
rx_tid->unaligned_size = 0;
}
static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
@ -982,10 +984,9 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
if (!rx_tid->active)
goto unlock_exit;
dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
DMA_BIDIRECTIONAL);
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned,
rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
rx_tid->vaddr_unaligned = NULL;
rx_tid->active = false;
@ -1000,9 +1001,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
struct ath11k_base *ab = ar->ab;
struct ath11k_peer *peer;
struct dp_rx_tid *rx_tid;
u32 hw_desc_sz;
u32 *addr_aligned;
void *vaddr;
u32 hw_desc_sz, *vaddr;
void *vaddr_unaligned;
dma_addr_t paddr;
int ret;
@ -1050,49 +1050,40 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
else
hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
if (!vaddr) {
rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1;
vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr,
DMA_BIDIRECTIONAL, GFP_ATOMIC);
if (!vaddr_unaligned) {
spin_unlock_bh(&ab->base_lock);
return -ENOMEM;
}
addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
ssn, pn_type);
paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(ab->dev, paddr);
if (ret) {
spin_unlock_bh(&ab->base_lock);
ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
peer_mac, tid, ret);
goto err_mem_free;
}
rx_tid->vaddr = vaddr;
rx_tid->paddr = paddr;
rx_tid->vaddr_unaligned = vaddr_unaligned;
vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);
rx_tid->paddr_unaligned = paddr;
rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -
(unsigned long)rx_tid->vaddr_unaligned);
ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
rx_tid->size = hw_desc_sz;
rx_tid->active = true;
/* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup.
* Since these changes are not reflected in the device, driver now needs to
* explicitly call dma_sync_single_for_device.
*/
dma_sync_single_for_device(ab->dev, rx_tid->paddr,
rx_tid->size,
DMA_TO_DEVICE);
spin_unlock_bh(&ab->base_lock);
ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
paddr, tid, 1, ba_win_sz);
ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,
tid, 1, ba_win_sz);
if (ret) {
ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
peer_mac, tid, ret);
ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
}
return ret;
err_mem_free:
kfree(rx_tid->vaddr);
rx_tid->vaddr = NULL;
return ret;
}
@ -2831,8 +2822,6 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
rx_stats->dcm_count += ppdu_info->dcm;
rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
arsta->rssi_comb = ppdu_info->rssi_comb;
BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
@ -4783,7 +4772,7 @@ u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
if (!msdu) {
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
"msdu_pop: invalid buf_id %d\n", buf_id);
break;
goto next_msdu;
}
rxcb = ATH11K_SKB_RXCB(msdu);
if (!rxcb->unmapped) {
@ -5148,7 +5137,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
const struct ath11k_hw_hal_params *hal_params;
void *ring_entry;
void *mon_dst_srng;
struct hal_srng *mon_dst_srng;
u32 ppdu_id;
u32 rx_bufs_used;
u32 ring_id;
@ -5165,6 +5154,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
spin_lock_bh(&pmon->mon_lock);
spin_lock_bh(&mon_dst_srng->lock);
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
ppdu_id = pmon->mon_ppdu_info.ppdu_id;
@ -5223,6 +5213,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
mon_dst_srng);
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
spin_unlock_bh(&mon_dst_srng->lock);
spin_unlock_bh(&pmon->mon_lock);
@ -5410,7 +5401,7 @@ ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
"full mon msdu_pop: invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
break;
goto next_msdu;
}
idr_remove(&rx_ring->bufs_idr, buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
@ -5612,7 +5603,7 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct hal_sw_mon_ring_entries *sw_mon_entries;
struct ath11k_pdev_mon_stats *rx_mon_stats;
struct sk_buff *head_msdu, *tail_msdu;
void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
struct hal_srng *mon_dst_srng;
void *ring_entry;
u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
int quota = 0, ret;
@ -5628,6 +5619,9 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
goto reap_status_ring;
}
mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
spin_lock_bh(&mon_dst_srng->lock);
ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
head_msdu = NULL;
@ -5671,6 +5665,7 @@ next_entry:
}
ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
spin_unlock_bh(&mon_dst_srng->lock);
spin_unlock_bh(&pmon->mon_lock);
if (rx_bufs_used) {

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@ -166,3 +166,4 @@ void ath11k_fw_destroy(struct ath11k_base *ab)
{
release_firmware(ab->fw.fw);
}
EXPORT_SYMBOL(ath11k_fw_destroy);

View File

@ -31,6 +31,7 @@ struct ath11k_hif_ops {
void (*ce_irq_enable)(struct ath11k_base *ab);
void (*ce_irq_disable)(struct ath11k_base *ab);
void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
void (*coredump_download)(struct ath11k_base *ab);
};
static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab)
@ -146,4 +147,10 @@ static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
*msi_data_idx = ce_id;
}
static inline void ath11k_hif_coredump_download(struct ath11k_base *ab)
{
if (ab->hif.ops->coredump_download)
ab->hif.ops->coredump_download(ab);
}
#endif /* _HIF_H_ */

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@ -1529,17 +1529,23 @@ static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
return ret;
}
static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif)
{
if (arvif->vif->mbssid_tx_vif)
return ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
return NULL;
}
static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif,
struct ath11k_vif *tx_arvif)
{
struct ath11k_vif *tx_arvif;
struct ieee80211_ema_beacons *beacons;
int ret = 0;
bool nontx_vif_params_set = false;
u32 params = 0;
u8 i = 0;
tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw,
tx_arvif->vif, 0);
if (!beacons || !beacons->cnt) {
@ -1585,25 +1591,22 @@ static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
return ret;
}
static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif,
struct ath11k_vif *tx_arvif)
{
struct ath11k *ar = arvif->ar;
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *tx_arvif = arvif;
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
struct sk_buff *bcn;
int ret;
if (vif->mbssid_tx_vif) {
tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif);
if (tx_arvif != arvif) {
ar = tx_arvif->ar;
ab = ar->ab;
hw = ar->hw;
vif = tx_arvif->vif;
}
if (tx_arvif != arvif) {
ar = tx_arvif->ar;
ab = ar->ab;
hw = ar->hw;
vif = tx_arvif->vif;
}
bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
@ -1632,6 +1635,7 @@ static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
{
struct ieee80211_vif *vif = arvif->vif;
struct ath11k_vif *tx_arvif;
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
@ -1639,14 +1643,18 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
/* Target does not expect beacon templates for the already up
* non-transmitting interfaces, and results in a crash if sent.
*/
if (vif->mbssid_tx_vif &&
arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up)
return 0;
tx_arvif = ath11k_mac_get_tx_arvif(arvif);
if (tx_arvif) {
if (arvif != tx_arvif && arvif->is_up)
return 0;
if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif)
return ath11k_mac_setup_bcn_tmpl_ema(arvif);
if (vif->bss_conf.ema_ap)
return ath11k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif);
} else {
tx_arvif = arvif;
}
return ath11k_mac_setup_bcn_tmpl_mbssid(arvif);
return ath11k_mac_setup_bcn_tmpl_mbssid(arvif, tx_arvif);
}
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
@ -1674,7 +1682,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
struct ieee80211_bss_conf *info)
{
struct ath11k *ar = arvif->ar;
struct ath11k_vif *tx_arvif = NULL;
struct ath11k_vif *tx_arvif;
int ret = 0;
lockdep_assert_held(&arvif->ar->conf_mutex);
@ -1701,9 +1709,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif,
ether_addr_copy(arvif->bssid, info->bssid);
if (arvif->vif->mbssid_tx_vif)
tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
tx_arvif = ath11k_mac_get_tx_arvif(arvif);
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid,
tx_arvif ? tx_arvif->bssid : NULL,
@ -5204,6 +5210,45 @@ exit:
return ret;
}
static int ath11k_mac_op_conf_tx_mu_edca(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k *ar = hw->priv;
struct wmi_wmm_params_arg *p;
int ret;
switch (ac) {
case IEEE80211_AC_VO:
p = &arvif->muedca_params.ac_vo;
break;
case IEEE80211_AC_VI:
p = &arvif->muedca_params.ac_vi;
break;
case IEEE80211_AC_BE:
p = &arvif->muedca_params.ac_be;
break;
case IEEE80211_AC_BK:
p = &arvif->muedca_params.ac_bk;
break;
default:
ath11k_warn(ar->ab, "error ac: %d", ac);
return -EINVAL;
}
p->cwmin = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(3, 0));
p->cwmax = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(7, 4));
p->aifs = u8_get_bits(params->mu_edca_param_rec.aifsn, GENMASK(3, 0));
p->txop = params->mu_edca_param_rec.mu_edca_timer;
ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
&arvif->muedca_params,
WMI_WMM_PARAM_TYPE_11AX_MU_EDCA);
return ret;
}
static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id, u16 ac,
@ -5242,12 +5287,22 @@ static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
p->txop = params->txop;
ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
&arvif->wmm_params);
&arvif->wmm_params,
WMI_WMM_PARAM_TYPE_LEGACY);
if (ret) {
ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
goto exit;
}
if (params->mu_edca) {
ret = ath11k_mac_op_conf_tx_mu_edca(hw, vif, link_id, ac,
params);
if (ret) {
ath11k_warn(ar->ab, "failed to set mu_edca params: %d\n", ret);
goto exit;
}
}
ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
if (ret)
@ -5336,8 +5391,6 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
if (nsts > (ar->num_rx_chains - 1))
nsts = ar->num_rx_chains - 1;
value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
}
@ -5421,9 +5474,6 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
/* Enable Beamformee STS Field only if SU BF is enabled */
if (subfee) {
if (nsts > (ar->num_rx_chains - 1))
nsts = ar->num_rx_chains - 1;
nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
*vht_cap |= nsts;
@ -6288,6 +6338,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath11k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
struct scan_chan_list_params *params;
int ret;
ath11k_mac_drain_tx(ar);
@ -6303,6 +6354,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
cancel_work_sync(&ar->ab->update_11d_work);
@ -6312,10 +6364,19 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
}
spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
list_del(&ppdu_stats->list);
kfree(ppdu_stats);
}
while ((params = list_first_entry_or_null(&ar->channel_update_queue,
struct scan_chan_list_params,
list))) {
list_del(&params->list);
kfree(params);
}
spin_unlock_bh(&ar->data_lock);
rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
@ -6330,23 +6391,20 @@ static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif,
{
struct ath11k *ar = arvif->ar;
struct ath11k_vif *tx_arvif;
struct ieee80211_vif *tx_vif;
*tx_vdev_id = 0;
tx_vif = arvif->vif->mbssid_tx_vif;
if (!tx_vif) {
tx_arvif = ath11k_mac_get_tx_arvif(arvif);
if (!tx_arvif) {
*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
return 0;
}
tx_arvif = ath11k_vif_to_arvif(tx_vif);
if (arvif->vif->bss_conf.nontransmitted) {
if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
if (ar->hw->wiphy != tx_arvif->ar->hw->wiphy)
return -EINVAL;
*flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP;
*tx_vdev_id = ath11k_vif_to_arvif(tx_vif)->vdev_id;
*tx_vdev_id = tx_arvif->vdev_id;
} else if (tx_arvif == arvif) {
*flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP;
} else {
@ -7306,8 +7364,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
int n_vifs)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif, *tx_arvif = NULL;
struct ieee80211_vif *mbssid_tx_vif;
struct ath11k_vif *arvif, *tx_arvif;
int ret;
int i;
bool monitor_vif = false;
@ -7361,10 +7418,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
ret);
mbssid_tx_vif = arvif->vif->mbssid_tx_vif;
if (mbssid_tx_vif)
tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif);
tx_arvif = ath11k_mac_get_tx_arvif(arvif);
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid,
tx_arvif ? tx_arvif->bssid : NULL,
@ -10019,6 +10073,7 @@ static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
static void __ath11k_mac_unregister(struct ath11k *ar)
{
cancel_work_sync(&ar->channel_update_work);
cancel_work_sync(&ar->regd_update_work);
ieee80211_unregister_hw(ar->hw);
@ -10418,6 +10473,8 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
init_completion(&ar->thermal.wmi_sync);
INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work);
INIT_LIST_HEAD(&ar->channel_update_queue);
INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);

View File

@ -491,3 +491,8 @@ int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
return 0;
}
void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic)
{
mhi_download_rddm_image(mhi_ctrl, in_panic);
}

View File

@ -26,5 +26,6 @@ void ath11k_mhi_clear_vector(struct ath11k_base *ab);
int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic);
#endif

View File

@ -1,13 +1,15 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
#include "pci.h"
#include "core.h"
@ -610,6 +612,187 @@ static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
PCI_EXP_LNKCTL_ASPMC);
}
#ifdef CONFIG_DEV_COREDUMP
static int ath11k_pci_coredump_calculate_size(struct ath11k_base *ab, u32 *dump_seg_sz)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
struct image_info *rddm_img, *fw_img;
struct ath11k_tlv_dump_data *dump_tlv;
enum ath11k_fw_crash_dump_type mem_type;
u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
struct ath11k_dump_file_data *file_data;
int i;
rddm_img = mhi_ctrl->rddm_image;
if (!rddm_img) {
ath11k_err(ab, "No RDDM dump found\n");
return 0;
}
fw_img = mhi_ctrl->fbc_image;
for (i = 0; i < fw_img->entries ; i++) {
if (!fw_img->mhi_buf[i].buf)
continue;
paging_tlv_sz += fw_img->mhi_buf[i].len;
}
dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
for (i = 0; i < rddm_img->entries; i++) {
if (!rddm_img->mhi_buf[i].buf)
continue;
rddm_tlv_sz += rddm_img->mhi_buf[i].len;
}
dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
if (mem_type == FW_CRASH_DUMP_NONE)
continue;
if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
ath11k_dbg(ab, ATH11K_DBG_PCI,
"target mem region type %d not supported",
ab->qmi.target_mem[i].type);
continue;
}
if (!ab->qmi.target_mem[i].anyaddr)
continue;
dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
}
for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
if (!dump_seg_sz[i])
continue;
len += sizeof(*dump_tlv) + dump_seg_sz[i];
}
if (len)
len += sizeof(*file_data);
return len;
}
static void ath11k_pci_coredump_download(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
struct image_info *rddm_img, *fw_img;
struct timespec64 timestamp;
int i, len, mem_idx;
enum ath11k_fw_crash_dump_type mem_type;
struct ath11k_dump_file_data *file_data;
struct ath11k_tlv_dump_data *dump_tlv;
size_t hdr_len = sizeof(*file_data);
void *buf;
u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 };
ath11k_mhi_coredump(mhi_ctrl, false);
len = ath11k_pci_coredump_calculate_size(ab, dump_seg_sz);
if (!len) {
ath11k_warn(ab, "No crash dump data found for devcoredump");
return;
}
rddm_img = mhi_ctrl->rddm_image;
fw_img = mhi_ctrl->fbc_image;
/* dev_coredumpv() requires vmalloc data */
buf = vzalloc(len);
if (!buf)
return;
ab->dump_data = buf;
ab->ath11k_coredump_len = len;
file_data = ab->dump_data;
strscpy(file_data->df_magic, "ATH11K-FW-DUMP", sizeof(file_data->df_magic));
file_data->len = cpu_to_le32(len);
file_data->version = cpu_to_le32(ATH11K_FW_CRASH_DUMP_V2);
file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
guid_gen(&file_data->guid);
ktime_get_real_ts64(&timestamp);
file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
buf += hdr_len;
dump_tlv = buf;
dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
buf += COREDUMP_TLV_HDR_SIZE;
/* append all segments together as they are all part of a single contiguous
* block of memory
*/
for (i = 0; i < fw_img->entries ; i++) {
if (!fw_img->mhi_buf[i].buf)
continue;
memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
fw_img->mhi_buf[i].len);
buf += fw_img->mhi_buf[i].len;
}
dump_tlv = buf;
dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
buf += COREDUMP_TLV_HDR_SIZE;
/* append all segments together as they are all part of a single contiguous
* block of memory
*/
for (i = 0; i < rddm_img->entries; i++) {
if (!rddm_img->mhi_buf[i].buf)
continue;
memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
rddm_img->mhi_buf[i].len);
buf += rddm_img->mhi_buf[i].len;
}
mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
if (mem_idx == FW_CRASH_DUMP_NONE)
continue;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
mem_type = ath11k_coredump_get_dump_type
(ab->qmi.target_mem[i].type);
if (mem_type != mem_idx)
continue;
if (!ab->qmi.target_mem[i].anyaddr) {
ath11k_dbg(ab, ATH11K_DBG_PCI,
"Skipping mem region type %d",
ab->qmi.target_mem[i].type);
continue;
}
dump_tlv = buf;
dump_tlv->type = cpu_to_le32(mem_idx);
dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
buf += COREDUMP_TLV_HDR_SIZE;
memcpy_fromio(buf, ab->qmi.target_mem[i].iaddr,
ab->qmi.target_mem[i].size);
buf += ab->qmi.target_mem[i].size;
}
}
queue_work(ab->workqueue, &ab->dump_work);
}
#endif
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@ -713,6 +896,9 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
#ifdef CONFIG_DEV_COREDUMP
.coredump_download = ath11k_pci_coredump_download,
#endif
};
static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
@ -735,7 +921,7 @@ static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
return 0;
return irq_set_affinity_hint(ab_pci->pdev->irq, m);
return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
}
static int ath11k_pci_probe(struct pci_dev *pdev,
@ -939,6 +1125,8 @@ unsupported_wcn6855_soc:
return 0;
err_free_irq:
/* __free_irq() expects the caller to have cleared the affinity hint */
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath11k_pcic_free_irq(ab);
err_ce_free:
@ -981,9 +1169,12 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
ath11k_core_deinit(ab);
qmi_fail:
ath11k_fw_destroy(ab);
ath11k_mhi_unregister(ab_pci);
ath11k_pcic_free_irq(ab);

View File

@ -1957,14 +1957,16 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
int i;
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
if ((ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
ab->qmi.target_mem[i].iaddr)
iounmap(ab->qmi.target_mem[i].iaddr);
if (!ab->qmi.target_mem[i].vaddr)
if (!ab->qmi.target_mem[i].anyaddr)
continue;
if (ab->hw_params.fixed_mem_region ||
test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
iounmap(ab->qmi.target_mem[i].iaddr);
ab->qmi.target_mem[i].iaddr = NULL;
continue;
}
dma_free_coherent(ab->dev,
ab->qmi.target_mem[i].prev_size,
ab->qmi.target_mem[i].vaddr,
@ -2070,7 +2072,7 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
break;
case BDF_MEM_REGION_TYPE:
ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
ab->qmi.target_mem[idx].vaddr = NULL;
ab->qmi.target_mem[idx].iaddr = NULL;
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
@ -2093,10 +2095,11 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
} else {
ab->qmi.target_mem[idx].paddr =
ATH11K_QMI_CALDB_ADDRESS;
ab->qmi.target_mem[idx].iaddr = NULL;
}
} else {
ab->qmi.target_mem[idx].paddr = 0;
ab->qmi.target_mem[idx].vaddr = NULL;
ab->qmi.target_mem[idx].iaddr = NULL;
}
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_QMI_H
@ -102,8 +102,11 @@ struct target_mem_chunk {
u32 prev_size;
u32 prev_type;
dma_addr_t paddr;
u32 *vaddr;
void __iomem *iaddr;
union {
u32 *vaddr;
void __iomem *iaddr;
void *anyaddr;
};
};
struct target_info {
@ -154,6 +157,7 @@ struct ath11k_qmi {
#define BDF_MEM_REGION_TYPE 0x2
#define M3_DUMP_REGION_TYPE 0x3
#define CALDB_MEM_REGION_TYPE 0x4
#define PAGEABLE_MEM_REGION_TYPE 0x9
struct qmi_wlanfw_host_cap_req_msg_v01 {
u8 num_clients_valid;

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
@ -55,6 +55,19 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"driver initiated regd update\n");
if (ar->state != ATH11K_STATE_ON)
return;
ret = ath11k_reg_update_chan_list(ar, true);
if (ret)
ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret);
return;
}
/* Currently supporting only General User Hints. Cell base user
* hints to be handled later.
* Hints from other sources like Core, Beacons are not expected for
@ -111,32 +124,7 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
int i, ret, left;
if (wait && ar->state_11d != ATH11K_11D_IDLE) {
left = wait_for_completion_timeout(&ar->completed_11d_scan,
ATH11K_SCAN_TIMEOUT_HZ);
if (!left) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"failed to receive 11d scan complete: timed out\n");
ar->state_11d = ATH11K_11D_IDLE;
}
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"11d scan wait left time %d\n", left);
}
if (wait &&
(ar->scan.state == ATH11K_SCAN_STARTING ||
ar->scan.state == ATH11K_SCAN_RUNNING)) {
left = wait_for_completion_timeout(&ar->scan.completed,
ATH11K_SCAN_TIMEOUT_HZ);
if (!left)
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"failed to receive hw scan complete: timed out\n");
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"hw scan wait left time %d\n", left);
}
int i, ret = 0;
if (ar->state == ATH11K_STATE_RESTARTING)
return 0;
@ -218,6 +206,16 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
}
}
if (wait) {
spin_lock_bh(&ar->data_lock);
list_add_tail(&params->list, &ar->channel_update_queue);
spin_unlock_bh(&ar->data_lock);
queue_work(ar->ab->workqueue, &ar->channel_update_work);
return 0;
}
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params);
@ -293,12 +291,6 @@ int ath11k_regd_update(struct ath11k *ar)
if (ret)
goto err;
if (ar->state == ATH11K_STATE_ON) {
ret = ath11k_reg_update_chan_list(ar, true);
if (ret)
goto err;
}
return 0;
err:
ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
@ -804,6 +796,54 @@ ret:
return new_regd;
}
void ath11k_regd_update_chan_list_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k,
channel_update_work);
struct scan_chan_list_params *params;
struct list_head local_update_list;
int left;
INIT_LIST_HEAD(&local_update_list);
spin_lock_bh(&ar->data_lock);
list_splice_tail_init(&ar->channel_update_queue, &local_update_list);
spin_unlock_bh(&ar->data_lock);
while ((params = list_first_entry_or_null(&local_update_list,
struct scan_chan_list_params,
list))) {
if (ar->state_11d != ATH11K_11D_IDLE) {
left = wait_for_completion_timeout(&ar->completed_11d_scan,
ATH11K_SCAN_TIMEOUT_HZ);
if (!left) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"failed to receive 11d scan complete: timed out\n");
ar->state_11d = ATH11K_11D_IDLE;
}
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"reg 11d scan wait left time %d\n", left);
}
if ((ar->scan.state == ATH11K_SCAN_STARTING ||
ar->scan.state == ATH11K_SCAN_RUNNING)) {
left = wait_for_completion_timeout(&ar->scan.completed,
ATH11K_SCAN_TIMEOUT_HZ);
if (!left)
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"failed to receive hw scan complete: timed out\n");
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"reg hw scan wait left time %d\n", left);
}
ath11k_wmi_send_scan_chan_list_cmd(ar, params);
list_del(&params->list);
kfree(params);
}
}
static bool ath11k_reg_is_world_alpha(char *alpha)
{
if (alpha[0] == '0' && alpha[1] == '0')
@ -977,6 +1017,7 @@ void ath11k_regd_update_work(struct work_struct *work)
void ath11k_reg_init(struct ath11k *ar)
{
ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER;
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_REG_H
@ -33,6 +33,7 @@ void ath11k_reg_init(struct ath11k *ar);
void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
void ath11k_reg_free(struct ath11k_base *ab);
void ath11k_regd_update_work(struct work_struct *work);
void ath11k_regd_update_chan_list_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect,

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "testmode.h"
@ -10,18 +10,18 @@
#include "wmi.h"
#include "hw.h"
#include "core.h"
#include "testmode_i.h"
#include "../testmode_i.h"
#define ATH11K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
#define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
[ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
.len = ATH11K_TM_DATA_MAX_LEN },
[ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
static const struct nla_policy ath11k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
[ATH_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH_TM_ATTR_DATA] = { .type = NLA_BINARY,
.len = ATH_TM_DATA_MAX_LEN },
[ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
[ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
[ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab)
@ -73,9 +73,9 @@ static void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
goto out;
}
if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI) ||
nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data)) {
if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
ath11k_warn(ab, "failed to populate testmode unsegmented event\n");
kfree_skb(nl_skb);
goto out;
@ -140,7 +140,7 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
data_pos = ab->testmode.data_pos;
if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) {
if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
ath11k_warn(ab, "Invalid ftm event length at %d: %d\n",
data_pos, datalen);
ret = -EINVAL;
@ -172,10 +172,10 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
goto out;
}
if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD,
ATH11K_TM_CMD_WMI_FTM) ||
nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos,
if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD,
ATH_TM_CMD_WMI_FTM) ||
nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
&ab->testmode.eventdata[0])) {
ath11k_warn(ab, "failed to populate segmented testmode event");
kfree_skb(nl_skb);
@ -235,23 +235,23 @@ static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"cmd get version_major %d version_minor %d\n",
ATH11K_TESTMODE_VERSION_MAJOR,
ATH11K_TESTMODE_VERSION_MINOR);
ATH_TESTMODE_VERSION_MAJOR,
ATH_TESTMODE_VERSION_MINOR);
skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
nla_total_size(sizeof(u32)));
if (!skb)
return -ENOMEM;
ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
ATH11K_TESTMODE_VERSION_MAJOR);
ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
ATH_TESTMODE_VERSION_MAJOR);
if (ret) {
kfree_skb(skb);
return ret;
}
ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
ATH11K_TESTMODE_VERSION_MINOR);
ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
ATH_TESTMODE_VERSION_MINOR);
if (ret) {
kfree_skb(skb);
return ret;
@ -277,7 +277,7 @@ static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[])
goto err;
}
ar->ab->testmode.eventdata = kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH,
ar->ab->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH,
GFP_KERNEL);
if (!ar->ab->testmode.eventdata) {
ret = -ENOMEM;
@ -310,25 +310,25 @@ static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[],
mutex_lock(&ar->conf_mutex);
if (!tb[ATH11K_TM_ATTR_DATA]) {
if (!tb[ATH_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
if (!tb[ATH_TM_ATTR_WMI_CMDID]) {
ret = -EINVAL;
goto out;
}
buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
buf = nla_data(tb[ATH_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
if (!buf_len) {
ath11k_warn(ar->ab, "No data present in testmode wmi command\n");
ret = -EINVAL;
goto out;
}
cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
/* Make sure that the buffer length is long enough to
* hold TLV and pdev/vdev id.
@ -409,13 +409,13 @@ static int ath11k_tm_cmd_wmi_ftm(struct ath11k *ar, struct nlattr *tb[])
goto out;
}
if (!tb[ATH11K_TM_ATTR_DATA]) {
if (!tb[ATH_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
buf = nla_data(tb[ATH_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
cmd_id = WMI_PDEV_UTF_CMDID;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
@ -476,25 +476,25 @@ int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct ath11k *ar = hw->priv;
struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
int ret;
ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath11k_tm_policy,
NULL);
if (ret)
return ret;
if (!tb[ATH11K_TM_ATTR_CMD])
if (!tb[ATH_TM_ATTR_CMD])
return -EINVAL;
switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
case ATH11K_TM_CMD_GET_VERSION:
switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
case ATH_TM_CMD_GET_VERSION:
return ath11k_tm_cmd_get_version(ar, tb);
case ATH11K_TM_CMD_WMI:
case ATH_TM_CMD_WMI:
return ath11k_tm_cmd_wmi(ar, tb, vif);
case ATH11K_TM_CMD_TESTMODE_START:
case ATH_TM_CMD_TESTMODE_START:
return ath11k_tm_cmd_testmode_start(ar, tb);
case ATH11K_TM_CMD_WMI_FTM:
case ATH_TM_CMD_WMI_FTM:
return ath11k_tm_cmd_wmi_ftm(ar, tb);
default:
return -EOPNOTSUPP;

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@ -2662,7 +2662,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
}
int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
struct wmi_wmm_params_all_arg *param)
struct wmi_wmm_params_all_arg *param,
enum wmi_wmm_params_type wmm_param_type)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_vdev_set_wmm_params_cmd *cmd;
@ -2681,7 +2682,7 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->wmm_param_type = 0;
cmd->wmm_param_type = wmm_param_type;
for (ac = 0; ac < WME_NUM_AC; ac++) {
switch (ac) {
@ -2714,8 +2715,8 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
wmm_param->no_ack = wmi_wmm_arg->no_ack;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
ac, wmm_param->aifs, wmm_param->cwmin,
"wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin,
wmm_param->cwmax, wmm_param->txoplimit,
wmm_param->acm, wmm_param->no_ack);
}

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_WMI_H
@ -3817,6 +3817,7 @@ struct wmi_stop_scan_cmd {
};
struct scan_chan_list_params {
struct list_head list;
u32 pdev_id;
u16 nallchans;
struct channel_param ch_param[];
@ -6346,6 +6347,11 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
enum wmi_wmm_params_type {
WMI_WMM_PARAM_TYPE_LEGACY = 0,
WMI_WMM_PARAM_TYPE_11AX_MU_EDCA = 1,
};
const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
struct sk_buff *skb, gfp_t gfp);
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
@ -6402,7 +6408,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
struct scan_cancel_param *param);
int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
struct wmi_wmm_params_all_arg *param);
struct wmi_wmm_params_all_arg *param,
enum wmi_wmm_params_type wmm_param_type);
int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
u32 pdev_id);
int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);

View File

@ -23,11 +23,12 @@ ath12k-y += core.o \
fw.o \
p2p.o
ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o
ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
ath12k-$(CONFIG_PM) += wow.o
ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o
ath12k-$(CONFIG_NL80211_TESTMODE) += testmode.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@ -12,7 +12,7 @@ static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
{
union acpi_object *obj;
acpi_handle root_handle;
int ret;
int ret, i;
root_handle = ACPI_HANDLE(ab->dev);
if (!root_handle) {
@ -29,9 +29,48 @@ static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
}
if (obj->type == ACPI_TYPE_INTEGER) {
ab->acpi.func_bit = obj->integer.value;
switch (func) {
case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS:
ab->acpi.func_bit = obj->integer.value;
break;
case ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG:
ab->acpi.bit_flag = obj->integer.value;
break;
}
} else if (obj->type == ACPI_TYPE_STRING) {
switch (func) {
case ATH12K_ACPI_DSM_FUNC_BDF_EXT:
if (obj->string.length <= ATH12K_ACPI_BDF_ANCHOR_STRING_LEN ||
obj->string.length > ATH12K_ACPI_BDF_MAX_LEN ||
memcmp(obj->string.pointer, ATH12K_ACPI_BDF_ANCHOR_STRING,
ATH12K_ACPI_BDF_ANCHOR_STRING_LEN)) {
ath12k_warn(ab, "invalid ACPI DSM BDF size: %d\n",
obj->string.length);
ret = -EINVAL;
goto out;
}
memcpy(ab->acpi.bdf_string, obj->string.pointer,
obj->buffer.length);
break;
}
} else if (obj->type == ACPI_TYPE_BUFFER) {
switch (func) {
case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS:
if (obj->buffer.length < ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE ||
obj->buffer.length > ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE) {
ath12k_warn(ab, "invalid ACPI DSM func size: %d\n",
obj->buffer.length);
ret = -EINVAL;
goto out;
}
ab->acpi.func_bit = 0;
for (i = 0; i < obj->buffer.length; i++)
ab->acpi.func_bit += obj->buffer.pointer[i] << (i * 8);
break;
case ATH12K_ACPI_DSM_FUNC_TAS_CFG:
if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) {
ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n",
@ -247,24 +286,118 @@ static int ath12k_acpi_set_tas_params(struct ath12k_base *ab)
return 0;
}
bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
{
return ab->acpi.acpi_disable_rfkill;
}
bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
{
return ab->acpi.acpi_disable_11be;
}
void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
{
int ret;
u8 *buf;
if (!ab->hw_params->acpi_guid)
/* not supported with this hardware */
return;
if (ab->acpi.acpi_tas_enable) {
ret = ath12k_acpi_set_tas_params(ab);
if (ret) {
ath12k_warn(ab, "failed to send ACPI TAS parameters: %d\n", ret);
return;
}
}
if (ab->acpi.acpi_bios_sar_enable) {
ret = ath12k_acpi_set_bios_sar_params(ab);
if (ret) {
ath12k_warn(ab, "failed to send ACPI BIOS SAR: %d\n", ret);
return;
}
}
if (ab->acpi.acpi_cca_enable) {
buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
ret = ath12k_wmi_set_bios_cmd(ab,
WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
buf,
ATH12K_ACPI_CCA_THR_OFFSET_LEN);
if (ret) {
ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
ret);
return;
}
}
if (ab->acpi.acpi_band_edge_enable) {
ret = ath12k_wmi_set_bios_cmd(ab,
WMI_BIOS_PARAM_TYPE_BANDEDGE,
ab->acpi.band_edge_power,
sizeof(ab->acpi.band_edge_power));
if (ret) {
ath12k_warn(ab,
"failed to set ACPI DSM band edge channel power: %d\n",
ret);
return;
}
}
}
int ath12k_acpi_start(struct ath12k_base *ab)
{
acpi_status status;
u8 *buf;
int ret;
ab->acpi.acpi_tas_enable = false;
ab->acpi.acpi_disable_11be = false;
ab->acpi.acpi_disable_rfkill = false;
ab->acpi.acpi_bios_sar_enable = false;
ab->acpi.acpi_cca_enable = false;
ab->acpi.acpi_band_edge_enable = false;
ab->acpi.acpi_enable_bdf = false;
ab->acpi.bdf_string[0] = '\0';
if (!ab->hw_params->acpi_guid)
/* not supported with this hardware */
return 0;
ab->acpi.acpi_tas_enable = false;
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS);
if (ret) {
ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret);
return ret;
}
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG)) {
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG);
if (ret) {
ath12k_warn(ab, "failed to get ACPI DISABLE FLAG: %d\n", ret);
return ret;
}
if (ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi,
ATH12K_ACPI_DSM_DISABLE_11BE_BIT))
ab->acpi.acpi_disable_11be = true;
if (!ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi,
ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT))
ab->acpi.acpi_disable_rfkill = true;
}
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BDF_EXT)) {
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BDF_EXT);
if (ret || ab->acpi.bdf_string[0] == '\0') {
ath12k_warn(ab, "failed to get ACPI BDF EXT: %d\n", ret);
return ret;
}
ab->acpi.acpi_enable_bdf = true;
}
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) {
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG);
if (ret) {
@ -308,20 +441,6 @@ int ath12k_acpi_start(struct ath12k_base *ab)
ab->acpi.acpi_bios_sar_enable = true;
}
if (ab->acpi.acpi_tas_enable) {
ret = ath12k_acpi_set_tas_params(ab);
if (ret) {
ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret);
return ret;
}
}
if (ab->acpi.acpi_bios_sar_enable) {
ret = ath12k_acpi_set_bios_sar_params(ab);
if (ret)
return ret;
}
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) {
ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA);
if (ret) {
@ -332,18 +451,8 @@ int ath12k_acpi_start(struct ath12k_base *ab)
if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION &&
ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] ==
ATH12K_ACPI_CCA_THR_ENABLE_FLAG) {
buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
ret = ath12k_wmi_set_bios_cmd(ab,
WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
buf,
ATH12K_ACPI_CCA_THR_OFFSET_LEN);
if (ret) {
ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
ret);
return ret;
}
}
ATH12K_ACPI_CCA_THR_ENABLE_FLAG)
ab->acpi.acpi_cca_enable = true;
}
if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi,
@ -356,18 +465,8 @@ int ath12k_acpi_start(struct ath12k_base *ab)
}
if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION &&
ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) {
ret = ath12k_wmi_set_bios_cmd(ab,
WMI_BIOS_PARAM_TYPE_BANDEDGE,
ab->acpi.band_edge_power,
sizeof(ab->acpi.band_edge_power));
if (ret) {
ath12k_warn(ab,
"failed to set ACPI DSM band edge channel power: %d\n",
ret);
return ret;
}
}
ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG)
ab->acpi.acpi_band_edge_enable = true;
}
status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev),
@ -383,6 +482,21 @@ int ath12k_acpi_start(struct ath12k_base *ab)
return 0;
}
int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
{
size_t max_len = sizeof(ab->qmi.target.bdf_ext);
if (!ab->acpi.acpi_enable_bdf)
return -ENODATA;
if (strscpy(ab->qmi.target.bdf_ext, ab->acpi.bdf_string + 4, max_len) < 0)
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"acpi bdf variant longer than the buffer (variant: %s)\n",
ab->acpi.bdf_string);
return 0;
}
void ath12k_acpi_stop(struct ath12k_base *ab)
{
if (!ab->acpi.started)

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_ACPI_H
#define ATH12K_ACPI_H
@ -9,6 +9,8 @@
#include <linux/acpi.h>
#define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0
#define ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG 2
#define ATH12K_ACPI_DSM_FUNC_BDF_EXT 3
#define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4
#define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5
#define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6
@ -16,6 +18,8 @@
#define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9
#define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10
#define ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG BIT(1)
#define ATH12K_ACPI_FUNC_BIT_BDF_EXT BIT(2)
#define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3)
#define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4)
#define ATH12K_ACPI_FUNC_BIT_CCA BIT(5)
@ -25,6 +29,7 @@
#define ATH12K_ACPI_NOTIFY_EVENT 0x86
#define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func))
#define ATH12K_ACPI_CHEK_BIT_VALID(_acdata, _func) (((_acdata).bit_flag) & (_func))
#define ATH12K_ACPI_TAS_DATA_VERSION 0x1
#define ATH12K_ACPI_TAS_DATA_ENABLE 0x1
@ -48,6 +53,16 @@
#define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100
#define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108
#define ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE 1
#define ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE 4
#define ATH12K_ACPI_DSM_DISABLE_11BE_BIT BIT(0)
#define ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT BIT(2)
#define ATH12K_ACPI_BDF_ANCHOR_STRING_LEN 3
#define ATH12K_ACPI_BDF_ANCHOR_STRING "BDF"
#define ATH12K_ACPI_BDF_MAX_LEN 100
#define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \
ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN)
#define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \
@ -59,6 +74,10 @@
int ath12k_acpi_start(struct ath12k_base *ab);
void ath12k_acpi_stop(struct ath12k_base *ab);
bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab);
bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab);
void ath12k_acpi_set_dsm_func(struct ath12k_base *ab);
int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab);
#else
@ -71,6 +90,25 @@ static inline void ath12k_acpi_stop(struct ath12k_base *ab)
{
}
static inline bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
{
return false;
}
static inline bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
{
return false;
}
static inline void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
{
}
static inline int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
{
return 0;
}
#endif /* CONFIG_ACPI */
#endif /* ATH12K_ACPI_H */

View File

@ -23,6 +23,10 @@ unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
bool ath12k_ftm_mode;
module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
/* protected with ath12k_hw_group_mutex */
static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
@ -36,6 +40,9 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab)
if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
return 0;
if (ath12k_acpi_get_disable_rfkill(ab))
return 0;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@ -173,7 +180,7 @@ EXPORT_SYMBOL(ath12k_core_resume);
static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len, bool with_variant,
bool bus_type_mode)
bool bus_type_mode, bool with_default)
{
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
@ -204,7 +211,9 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
"bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
ath12k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
ab->qmi.target.board_id, variant);
with_default ?
ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
variant);
break;
}
@ -216,19 +225,19 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
return __ath12k_core_create_board_name(ab, name, name_len, true, false);
return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
}
static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
return __ath12k_core_create_board_name(ab, name, name_len, false, false);
return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
}
static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
size_t name_len)
{
return __ath12k_core_create_board_name(ab, name, name_len, false, true);
return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
}
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
@ -693,6 +702,11 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
{
int ret;
if (ath12k_ftm_mode) {
ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
ath12k_info(ab, "Booting in ftm mode\n");
}
ret = ath12k_qmi_init_service(ab);
if (ret) {
ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
@ -741,8 +755,7 @@ static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
ath12k_dp_pdev_free(ab);
}
static int ath12k_core_start(struct ath12k_base *ab,
enum ath12k_firmware_mode mode)
static int ath12k_core_start(struct ath12k_base *ab)
{
int ret;
@ -836,10 +849,7 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_reo_cleanup;
}
ret = ath12k_acpi_start(ab);
if (ret)
/* ACPI is optional so continue in case of an error */
ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
ath12k_acpi_set_dsm_func(ab);
if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
/* Indicate the core start in the appropriate group */
@ -887,10 +897,41 @@ static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
ath12k_mac_destroy(ag);
}
u8 ath12k_get_num_partner_link(struct ath12k *ar)
{
struct ath12k_base *partner_ab, *ab = ar->ab;
struct ath12k_hw_group *ag = ab->ag;
struct ath12k_pdev *pdev;
u8 num_link = 0;
int i, j;
lockdep_assert_held(&ag->mutex);
for (i = 0; i < ag->num_devices; i++) {
partner_ab = ag->ab[i];
for (j = 0; j < partner_ab->num_radios; j++) {
pdev = &partner_ab->pdevs[j];
/* Avoid the self link */
if (ar == pdev->ar)
continue;
num_link++;
}
}
return num_link;
}
static int __ath12k_mac_mlo_ready(struct ath12k *ar)
{
u8 num_link = ath12k_get_num_partner_link(ar);
int ret;
if (num_link == 0)
return 0;
ret = ath12k_wmi_mlo_ready(ar);
if (ret) {
ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
@ -920,19 +961,18 @@ int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
ar = &ah->radio[j];
ret = __ath12k_mac_mlo_ready(ar);
if (ret)
goto out;
return ret;
}
}
out:
return ret;
return 0;
}
static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
{
int ret, i;
if (!ag->mlo_capable || ag->num_devices == 1)
if (!ag->mlo_capable)
return 0;
ret = ath12k_mac_mlo_setup(ag);
@ -1068,7 +1108,7 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
int ret, i;
ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
ret = ath12k_core_start_firmware(ab, ab->fw_mode);
if (ret) {
ath12k_err(ab, "failed to start firmware: %d\n", ret);
return ret;
@ -1089,7 +1129,7 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
mutex_lock(&ag->mutex);
mutex_lock(&ab->core_lock);
ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
ret = ath12k_core_start(ab);
if (ret) {
ath12k_err(ab, "failed to start core: %d\n", ret);
goto err_dp_free;
@ -1122,16 +1162,18 @@ err_core_stop:
ath12k_core_stop(ab);
mutex_unlock(&ab->core_lock);
}
mutex_unlock(&ag->mutex);
goto exit;
err_dp_free:
ath12k_dp_free(ab);
mutex_unlock(&ab->core_lock);
mutex_unlock(&ag->mutex);
err_firmware_stop:
ath12k_qmi_firmware_stop(ab);
exit:
mutex_unlock(&ag->mutex);
return ret;
}
@ -1239,7 +1281,8 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
for (i = 0; i < ag->num_hw; i++) {
ah = ath12k_ag_to_ah(ag, i);
if (!ah || ah->state == ATH12K_HW_STATE_OFF)
if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
ah->state == ATH12K_HW_STATE_TM)
continue;
ieee80211_stop_queues(ah->hw);
@ -1309,6 +1352,9 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
ath12k_warn(ab,
"device is wedged, will not restart hw %d\n", i);
break;
case ATH12K_HW_STATE_TM:
ath12k_warn(ab, "fw mode reset done radio %d\n", i);
break;
}
mutex_unlock(&ah->hw_mutex);
@ -1602,6 +1648,9 @@ static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *a
lockdep_assert_held(&ath12k_hw_group_mutex);
if (ath12k_ftm_mode)
goto invalid_group;
/* The grouping of multiple devices will be done based on device tree file.
* The platforms that do not have any valid group information would have
* each device to be part of its own invalid group.
@ -1789,19 +1838,19 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
struct ath12k_base *ab;
int i;
if (ath12k_ftm_mode)
return;
lockdep_assert_held(&ag->mutex);
/* If more than one devices are grouped, then inter MLO
* functionality can work still independent of whether internally
* each device supports single_chip_mlo or not.
* Only when there is one device, then it depends whether the
* device can support intra chip MLO or not
* Only when there is one device, then disable for WCN chipsets
* till the required driver implementation is in place.
*/
if (ag->num_devices > 1) {
ag->mlo_capable = true;
} else {
if (ag->num_devices == 1) {
ab = ag->ab[0];
ag->mlo_capable = ab->single_chip_mlo_supp;
/* WCN chipsets does not advertise in firmware features
* hence skip checking
@ -1810,8 +1859,7 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
return;
}
if (!ag->mlo_capable)
return;
ag->mlo_capable = true;
for (i = 0; i < ag->num_devices; i++) {
ab = ag->ab[i];
@ -1927,7 +1975,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
ab->single_chip_mlo_supp = false;
/* Device index used to identify the devices in a group.
*

View File

@ -15,6 +15,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
#include <linux/panic_notifier.h>
#include <linux/average.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@ -52,8 +53,6 @@
#define ATH12K_INVALID_HW_MAC_ID 0xFF
#define ATH12K_CONNECTION_LOSS_HZ (3 * HZ)
#define ATH12K_RX_RATE_TABLE_NUM 320
#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
#define ATH12K_MON_TIMER_INTERVAL 10
#define ATH12K_RESET_TIMEOUT_HZ (20 * HZ)
@ -87,6 +86,7 @@ enum wme_ac {
#define ATH12K_HT_MCS_MAX 7
#define ATH12K_VHT_MCS_MAX 9
#define ATH12K_HE_MCS_MAX 11
#define ATH12K_EHT_MCS_MAX 15
enum ath12k_crypt_mode {
/* Only use hardware crypto engine */
@ -141,6 +141,7 @@ struct ath12k_skb_rxcb {
u8 is_frag;
u8 tid;
u16 peer_id;
bool is_end_of_ppdu;
};
enum ath12k_hw_rev {
@ -166,6 +167,7 @@ struct ath12k_ext_irq_grp {
u32 num_irq;
u32 grp_id;
u64 timestamp;
bool napi_enabled;
struct napi_struct napi;
struct net_device *napi_ndev;
};
@ -235,6 +237,7 @@ enum ath12k_dev_flags {
ATH12K_FLAG_CE_IRQ_ENABLED,
ATH12K_FLAG_EXT_IRQ_ENABLED,
ATH12K_FLAG_QMI_FW_READY_COMPLETE,
ATH12K_FLAG_FTM_SEGMENTED,
};
struct ath12k_tx_conf {
@ -298,6 +301,8 @@ struct ath12k_link_vif {
u8 link_id;
struct ath12k_vif *ahvif;
struct ath12k_rekey_data rekey_data;
u8 current_cntdown_counter;
};
struct ath12k_vif {
@ -327,6 +332,7 @@ struct ath12k_vif {
u32 key_cipher;
u8 tx_encap_type;
bool ps;
atomic_t mcbc_gsn;
struct ath12k_link_vif deflink;
struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
@ -354,20 +360,20 @@ struct ath12k_vif_iter {
#define HAL_RX_MAX_MCS_HT 31
#define HAL_RX_MAX_MCS_VHT 9
#define HAL_RX_MAX_MCS_HE 11
#define HAL_RX_MAX_MCS_BE 15
#define HAL_RX_MAX_NSS 8
#define HAL_RX_MAX_NUM_LEGACY_RATES 12
#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
#define ATH12K_RX_RATE_TABLE_NUM 320
struct ath12k_rx_peer_rate_stats {
u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1];
u64 be_mcs_count[HAL_RX_MAX_MCS_BE + 1];
u64 nss_count[HAL_RX_MAX_NSS];
u64 bw_count[HAL_RX_BW_MAX];
u64 gi_count[HAL_RX_GI_MAX];
u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES];
u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM];
u64 rx_rate[HAL_RX_BW_MAX][HAL_RX_GI_MAX][HAL_RX_MAX_NSS][HAL_RX_MAX_MCS_HT + 1];
};
struct ath12k_rx_peer_stats {
@ -477,6 +483,8 @@ struct ath12k_wbm_tx_stats {
u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
};
DECLARE_EWMA(avg_rssi, 10, 8)
struct ath12k_link_sta {
struct ath12k_link_vif *arvif;
struct ath12k_sta *ahsta;
@ -496,10 +504,13 @@ struct ath12k_link_sta {
u64 rx_duration;
u64 tx_duration;
u8 rssi_comb;
struct ewma_avg_rssi avg_rssi;
u8 link_id;
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_wbm_tx_stats *wbm_tx_stats;
u32 bw_prev;
u32 peer_nss;
s8 rssi_beacon;
/* For now the assoc link will be considered primary */
bool is_assoc_link;
@ -534,18 +545,26 @@ enum ath12k_hw_state {
ATH12K_HW_STATE_RESTARTING,
ATH12K_HW_STATE_RESTARTED,
ATH12K_HW_STATE_WEDGED,
ATH12K_HW_STATE_TM,
/* Add other states as required */
};
/* Antenna noise floor */
#define ATH12K_DEFAULT_NOISE_FLOOR -95
struct ath12k_ftm_event_obj {
u32 data_pos;
u32 expected_seq;
u8 *eventdata;
};
struct ath12k_fw_stats {
u32 pdev_id;
u32 stats_id;
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
bool fw_stats_done;
};
struct ath12k_dbg_htt_stats {
@ -559,6 +578,12 @@ struct ath12k_debug {
struct dentry *debugfs_pdev;
struct dentry *debugfs_pdev_symlink;
struct ath12k_dbg_htt_stats htt_stats;
enum wmi_halphy_ctrl_path_stats_id tpc_stats_type;
bool tpc_request;
struct completion tpc_complete;
struct wmi_tpc_stats_arg *tpc_stats;
u32 rx_filter;
bool extd_rx_stats;
};
struct ath12k_per_peer_tx_stats {
@ -712,8 +737,12 @@ struct ath12k {
bool nlo_enabled;
struct completion fw_stats_complete;
struct completion mlo_setup_done;
u32 mlo_setup_status;
u8 ftm_msgref;
struct ath12k_fw_stats fw_stats;
};
struct ath12k_hw {
@ -1026,9 +1055,6 @@ struct ath12k_base {
const struct hal_rx_ops *hal_rx_ops;
/* Denotes the whether MLO is possible within the chip */
bool single_chip_mlo_supp;
struct completion restart_completed;
#ifdef CONFIG_ACPI
@ -1038,6 +1064,13 @@ struct ath12k_base {
u32 func_bit;
bool acpi_tas_enable;
bool acpi_bios_sar_enable;
bool acpi_disable_11be;
bool acpi_disable_rfkill;
bool acpi_cca_enable;
bool acpi_band_edge_enable;
bool acpi_enable_bdf;
u32 bit_flag;
char bdf_string[ATH12K_ACPI_BDF_MAX_LEN];
u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE];
u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE];
u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE];
@ -1052,6 +1085,8 @@ struct ath12k_base {
struct ath12k_hw_group *ag;
struct ath12k_wsi_info wsi_info;
enum ath12k_firmware_mode fw_mode;
struct ath12k_ftm_event_obj ftm_event_obj;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
@ -1062,6 +1097,93 @@ struct ath12k_pdev_map {
u8 pdev_idx;
};
struct ath12k_fw_stats_vdev {
struct list_head list;
u32 vdev_id;
u32 beacon_snr;
u32 data_snr;
u32 num_tx_frames[WLAN_MAX_AC];
u32 num_rx_frames;
u32 num_tx_frames_retries[WLAN_MAX_AC];
u32 num_tx_frames_failures[WLAN_MAX_AC];
u32 num_rts_fail;
u32 num_rts_success;
u32 num_rx_err;
u32 num_rx_discard;
u32 num_tx_not_acked;
u32 tx_rate_history[MAX_TX_RATE_VALUES];
u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
};
struct ath12k_fw_stats_bcn {
struct list_head list;
u32 vdev_id;
u32 tx_bcn_succ_cnt;
u32 tx_bcn_outage_cnt;
};
struct ath12k_fw_stats_pdev {
struct list_head list;
/* PDEV stats */
s32 ch_noise_floor;
u32 tx_frame_count;
u32 rx_frame_count;
u32 rx_clear_count;
u32 cycle_count;
u32 phy_err_count;
u32 chan_tx_power;
u32 ack_rx_bad;
u32 rts_bad;
u32 rts_good;
u32 fcs_bad;
u32 no_beacons;
u32 mib_int_count;
/* PDEV TX stats */
s32 comp_queued;
s32 comp_delivered;
s32 msdu_enqued;
s32 mpdu_enqued;
s32 wmm_drop;
s32 local_enqued;
s32 local_freed;
s32 hw_queued;
s32 hw_reaped;
s32 underrun;
s32 tx_abort;
s32 mpdus_requed;
u32 tx_ko;
u32 data_rc;
u32 self_triggers;
u32 sw_retry_failure;
u32 illgl_rate_phy_err;
u32 pdev_cont_xretry;
u32 pdev_tx_timeout;
u32 pdev_resets;
u32 stateless_tid_alloc_failure;
u32 phy_underrun;
u32 txop_ovf;
/* PDEV RX stats */
s32 mid_ppdu_route_change;
s32 status_rcvd;
s32 r0_frags;
s32 r1_frags;
s32 r2_frags;
s32 r3_frags;
s32 htt_msdus;
s32 htt_mpdus;
s32 loc_msdus;
s32 loc_mpdus;
s32 oversize_amsdu;
s32 phy_errs;
s32 phy_err_drop;
s32 mpdu_errs;
};
int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
int ath12k_core_pre_init(struct ath12k_base *ab);
int ath12k_core_init(struct ath12k_base *ath12k);
@ -1084,6 +1206,7 @@ int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
int ath12k_core_suspend_late(struct ath12k_base *ab);
void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
u8 ath12k_get_num_partner_link(struct ath12k *ar);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@ -63,8 +63,10 @@ void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask,
vaf.fmt = fmt;
vaf.va = &args;
if (ath12k_debug_mask & mask)
if (likely(ab))
dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
else
printk(KERN_DEBUG "ath12k: %pV", &vaf);
/* TODO: trace log */

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUG_H_
@ -37,6 +37,7 @@ __printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...);
#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__)
extern unsigned int ath12k_debug_mask;
extern bool ath12k_ftm_mode;
#ifdef CONFIG_ATH12K_DEBUG
__printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab,
@ -61,11 +62,14 @@ static inline void ath12k_dbg_dump(struct ath12k_base *ab,
}
#endif /* CONFIG_ATH12K_DEBUG */
#define ath12k_dbg(ar, dbg_mask, fmt, ...) \
#define ath12k_dbg(ab, dbg_mask, fmt, ...) \
do { \
typeof(dbg_mask) mask = (dbg_mask); \
if (ath12k_debug_mask & mask) \
__ath12k_dbg(ar, mask, fmt, ##__VA_ARGS__); \
__ath12k_dbg(ab, mask, fmt, ##__VA_ARGS__); \
} while (0)
#define ath12k_generic_dbg(dbg_mask, fmt, ...) \
ath12k_dbg(NULL, dbg_mask, fmt, ##__VA_ARGS__)
#endif /* _ATH12K_DEBUG_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUGFS_H_
@ -12,6 +12,101 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab);
void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
void ath12k_debugfs_register(struct ath12k *ar);
void ath12k_debugfs_unregister(struct ath12k *ar);
void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
struct ath12k_fw_stats *stats);
void ath12k_debugfs_fw_stats_reset(struct ath12k *ar);
static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
{
return ar->debug.extd_rx_stats;
}
static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
{
return ar->debug.rx_filter;
}
#define ATH12K_CCK_RATES 4
#define ATH12K_OFDM_RATES 8
#define ATH12K_HT_RATES 8
#define ATH12K_VHT_RATES 12
#define ATH12K_HE_RATES 12
#define ATH12K_HE_RATES_WITH_EXTRA_MCS 14
#define ATH12K_EHT_RATES 16
#define HE_EXTRA_MCS_SUPPORT GENMASK(31, 16)
#define ATH12K_NSS_1 1
#define ATH12K_NSS_4 4
#define ATH12K_NSS_8 8
#define ATH12K_HW_NSS(_rcode) (((_rcode) >> 5) & 0x7)
#define TPC_STATS_WAIT_TIME (1 * HZ)
#define MAX_TPC_PREAM_STR_LEN 7
#define TPC_INVAL -128
#define TPC_MAX 127
#define TPC_STATS_WAIT_TIME (1 * HZ)
#define TPC_STATS_TOT_ROW 700
#define TPC_STATS_TOT_COLUMN 100
#define MODULATION_LIMIT 126
#define ATH12K_TPC_STATS_BUF_SIZE (TPC_STATS_TOT_ROW * TPC_STATS_TOT_COLUMN)
enum wmi_tpc_pream_bw {
WMI_TPC_PREAM_CCK,
WMI_TPC_PREAM_OFDM,
WMI_TPC_PREAM_HT20,
WMI_TPC_PREAM_HT40,
WMI_TPC_PREAM_VHT20,
WMI_TPC_PREAM_VHT40,
WMI_TPC_PREAM_VHT80,
WMI_TPC_PREAM_VHT160,
WMI_TPC_PREAM_HE20,
WMI_TPC_PREAM_HE40,
WMI_TPC_PREAM_HE80,
WMI_TPC_PREAM_HE160,
WMI_TPC_PREAM_EHT20,
WMI_TPC_PREAM_EHT40,
WMI_TPC_PREAM_EHT60,
WMI_TPC_PREAM_EHT80,
WMI_TPC_PREAM_EHT120,
WMI_TPC_PREAM_EHT140,
WMI_TPC_PREAM_EHT160,
WMI_TPC_PREAM_EHT200,
WMI_TPC_PREAM_EHT240,
WMI_TPC_PREAM_EHT280,
WMI_TPC_PREAM_EHT320,
WMI_TPC_PREAM_MAX
};
enum ath12k_debug_tpc_stats_ctl_mode {
ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ,
ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ,
ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ,
ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ,
ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ,
ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ,
ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ,
ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ,
ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ,
ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ,
ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ,
ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ,
ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ,
ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ,
ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20 = 23,
ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20,
ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40,
ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80,
ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120
};
enum ath12k_debug_tpc_stats_support_modes {
ATH12K_TPC_STATS_SUPPORT_160 = 0,
ATH12K_TPC_STATS_SUPPORT_320,
ATH12K_TPC_STATS_SUPPORT_AX,
ATH12K_TPC_STATS_SUPPORT_AX_EXTRA_MCS,
ATH12K_TPC_STATS_SUPPORT_BE,
ATH12K_TPC_STATS_SUPPORT_BE_PUNC,
};
#else
static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab)
{
@ -29,6 +124,24 @@ static inline void ath12k_debugfs_unregister(struct ath12k *ar)
{
}
static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
struct ath12k_fw_stats *stats)
{
}
static inline void ath12k_debugfs_fw_stats_reset(struct ath12k *ar)
{
}
static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
{
return false;
}
static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
{
return 0;
}
#endif /* CONFIG_ATH12K_DEBUGFS */
#endif /* _ATH12K_DEBUGFS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef DEBUG_HTT_STATS_H
@ -123,30 +123,38 @@ struct ath12k_htt_extd_stats_msg {
/* htt_dbg_ext_stats_type */
enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31,
ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41,
ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46,
ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
ATH12K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
ATH12K_DBG_HTT_EXT_STATS_LATENCY_PROF_STATS = 25,
ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_TRIG_STATS = 26,
ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_MUMIMO_TRIG_STATS = 27,
ATH12K_DBG_HTT_EXT_STATS_FSE_RX = 28,
ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE_EXT = 30,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31,
ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36,
ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38,
ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40,
ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41,
ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45,
ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46,
ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49,
ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51,
ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54,
/* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS,
@ -173,6 +181,8 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
HTT_STATS_SFM_CMN_TAG = 26,
HTT_STATS_SRING_STATS_TAG = 27,
HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34,
HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35,
HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
HTT_STATS_TX_SCHED_CMN_TAG = 37,
HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
@ -195,12 +205,21 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72,
HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73,
HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74,
HTT_STATS_TX_SOUNDING_STATS_TAG = 80,
HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
HTT_STATS_PDEV_OBSS_PD_TAG = 88,
HTT_STATS_HW_WAR_TAG = 89,
HTT_STATS_LATENCY_PROF_STATS_TAG = 91,
HTT_STATS_LATENCY_CTX_TAG = 92,
HTT_STATS_LATENCY_CNT_TAG = 93,
HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG = 94,
HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG = 95,
HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG = 97,
HTT_STATS_RX_FSE_STATS_TAG = 98,
HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG = 103,
HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111,
HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112,
@ -387,6 +406,182 @@ struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv {
__le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
} __packed;
#define ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS 12
#define ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS 4
#define ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS 5
#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
#define ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES 7
#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
#define ATH12K_HTT_TX_PDEV_STATS_NUM_LTF 4
#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2
#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2
#define ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES 6
struct ath12k_htt_tx_pdev_rate_stats_tlv {
__le32 mac_id_word;
__le32 tx_ldpc;
__le32 rts_cnt;
__le32 ack_rssi;
__le32 tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 tx_su_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 tx_mu_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
__le32 tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
__le32 tx_stbc[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 tx_pream[ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES];
__le32 tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 tx_dcm[ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
__le32 rts_success;
__le32 tx_legacy_cck_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
__le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
__le32 ac_mu_mimo_tx_ldpc;
__le32 ax_mu_mimo_tx_ldpc;
__le32 ofdma_tx_ldpc;
__le32 tx_he_ltf[ATH12K_HTT_TX_PDEV_STATS_NUM_LTF];
__le32 ac_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 ax_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 ac_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
__le32 ax_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
__le32 ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
__le32 ac_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
__le32 ax_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
__le32 ofdma_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
__le32 ac_mu_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 ax_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 ofdma_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 trigger_type_11ax[ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES];
__le32 tx_11ax_su_ext;
__le32 tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
__le32 tx_stbc_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
__le32 tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
__le32 ax_mu_mimo_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
__le32 ofdma_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
__le32 ax_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
__le32 ofd_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
__le32 tx_mcs_ext_2[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
__le32 tx_bw_320mhz;
};
#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS 12
#define ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4
#define ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5
#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4
#define ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
#define ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES 7
#define ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
#define ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS 16
#define ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6
#define ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8
#define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2
struct ath12k_htt_rx_pdev_rate_stats_tlv {
__le32 mac_id_word;
__le32 nsts;
__le32 rx_ldpc;
__le32 rts_cnt;
__le32 rssi_mgmt;
__le32 rssi_data;
__le32 rssi_comb;
__le32 rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 rx_nss[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
__le32 rx_dcm[ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS];
__le32 rx_stbc[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 rx_bw[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
__le32 rx_pream[ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES];
u8 rssi_chain_in_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
__le32 rx_gi[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 rssi_in_dbm;
__le32 rx_11ax_su_ext;
__le32 rx_11ac_mumimo;
__le32 rx_11ax_mumimo;
__le32 rx_11ax_ofdma;
__le32 txbf;
__le32 rx_legacy_cck_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
__le32 rx_legacy_ofdm_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
__le32 rx_active_dur_us_low;
__le32 rx_active_dur_us_high;
__le32 rx_11ax_ul_ofdma;
__le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 ul_ofdma_rx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 ul_ofdma_rx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
__le32 ul_ofdma_rx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
__le32 ul_ofdma_rx_stbc;
__le32 ul_ofdma_rx_ldpc;
__le32 rx_ulofdma_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
__le32 rx_ulofdma_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
__le32 rx_ulofdma_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
__le32 rx_ulofdma_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
__le32 nss_count;
__le32 pilot_count;
__le32 rx_pil_evm_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
[ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS];
__le32 rx_pilot_evm_db_mean[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
s8 rx_ul_fd_rssi[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
__le32 per_chain_rssi_pkt_type;
s8 rx_per_chain_rssi_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
__le32 rx_su_ndpa;
__le32 rx_11ax_su_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 rx_mu_ndpa;
__le32 rx_11ax_mu_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 rx_br_poll;
__le32 rx_11ax_dl_ofdma_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
__le32 rx_11ax_dl_ofdma_ru[ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS];
__le32 rx_ulmumimo_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
__le32 rx_ulmumimo_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
__le32 rx_ulmumimo_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
__le32 rx_ulmumimo_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
__le32 rx_ulofdma_non_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
__le32 rx_ulofdma_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
__le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
};
#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS 4
#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT 14
#define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2
#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS 5
#define ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS 5
struct ath12k_htt_rx_pdev_rate_ext_stats_tlv {
u8 rssi_chain_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS];
s8 rx_per_chain_rssi_ext_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS];
__le32 rssi_mcast_in_dbm;
__le32 rssi_mgmt_in_dbm;
__le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
__le32 rx_stbc_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
__le32 rx_gi_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
__le32 ul_ofdma_rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
__le32 ul_ofdma_rx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
__le32 rx_11ax_su_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
__le32 rx_11ax_mu_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
__le32 rx_11ax_dl_ofdma_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
__le32 rx_mcs_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
__le32 rx_bw_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS];
__le32 rx_gi_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
__le32 rx_su_punctured_mode[ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS];
};
#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
@ -1058,6 +1253,82 @@ struct ath12k_htt_pdev_cca_stats_hist_v1_tlv {
__le32 collection_interval;
} __packed;
#define ATH12K_HTT_TX_CV_CORR_MAX_NUM_COLUMNS 8
#define ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS 4
#define ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS 8
#define ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS 8
#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
#define ATH12K_HTT_TX_NUM_MCS_CNTRS 12
#define ATH12K_HTT_TX_NUM_EXTRA_MCS_CNTRS 2
#define ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \
(ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \
ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS)
enum ath12k_htt_txbf_sound_steer_modes {
ATH12K_HTT_IMPL_STEER_STATS = 0,
ATH12K_HTT_EXPL_SUSIFS_STEER_STATS = 1,
ATH12K_HTT_EXPL_SURBO_STEER_STATS = 2,
ATH12K_HTT_EXPL_MUSIFS_STEER_STATS = 3,
ATH12K_HTT_EXPL_MURBO_STEER_STATS = 4,
ATH12K_HTT_TXBF_MAX_NUM_OF_MODES = 5
};
enum ath12k_htt_stats_sounding_tx_mode {
ATH12K_HTT_TX_AC_SOUNDING_MODE = 0,
ATH12K_HTT_TX_AX_SOUNDING_MODE = 1,
ATH12K_HTT_TX_BE_SOUNDING_MODE = 2,
ATH12K_HTT_TX_CMN_SOUNDING_MODE = 3,
};
struct ath12k_htt_tx_sounding_stats_tlv {
__le32 tx_sounding_mode;
__le32 cbf_20[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
__le32 cbf_40[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
__le32 cbf_80[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
__le32 cbf_160[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
__le32 sounding[ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS];
__le32 cv_nc_mismatch_err;
__le32 cv_fcs_err;
__le32 cv_frag_idx_mismatch;
__le32 cv_invalid_peer_id;
__le32 cv_no_txbf_setup;
__le32 cv_expiry_in_update;
__le32 cv_pkt_bw_exceed;
__le32 cv_dma_not_done_err;
__le32 cv_update_failed;
__le32 cv_total_query;
__le32 cv_total_pattern_query;
__le32 cv_total_bw_query;
__le32 cv_invalid_bw_coding;
__le32 cv_forced_sounding;
__le32 cv_standalone_sounding;
__le32 cv_nc_mismatch;
__le32 cv_fb_type_mismatch;
__le32 cv_ofdma_bw_mismatch;
__le32 cv_bw_mismatch;
__le32 cv_pattern_mismatch;
__le32 cv_preamble_mismatch;
__le32 cv_nr_mismatch;
__le32 cv_in_use_cnt_exceeded;
__le32 cv_found;
__le32 cv_not_found;
__le32 sounding_320[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
__le32 cbf_320[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
__le32 cv_ntbr_sounding;
__le32 cv_found_upload_in_progress;
__le32 cv_expired_during_query;
__le32 cv_dma_timeout_error;
__le32 cv_buf_ibf_uploads;
__le32 cv_buf_ebf_uploads;
__le32 cv_buf_received;
__le32 cv_buf_fed_back;
__le32 cv_total_query_ibf;
__le32 cv_found_ibf;
__le32 cv_not_found_ibf;
__le32 cv_expired_during_query_ibf;
} __packed;
struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_obss_tx_ppdu_success;
__le32 num_obss_tx_ppdu_failure;
@ -1080,6 +1351,127 @@ struct ath12k_htt_pdev_obss_pd_stats_tlv {
__le32 num_sr_ppdu_abort_flush_cnt;
} __packed;
#define ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN 32
#define ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST 3
#define ATH12K_HTT_INTERRUPTS_LATENCY_PROFILE_MAX_HIST 3
struct ath12k_htt_latency_prof_stats_tlv {
__le32 print_header;
s8 latency_prof_name[ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN];
__le32 cnt;
__le32 min;
__le32 max;
__le32 last;
__le32 tot;
__le32 avg;
__le32 hist_intvl;
__le32 hist[ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST];
} __packed;
struct ath12k_htt_latency_prof_ctx_tlv {
__le32 duration;
__le32 tx_msdu_cnt;
__le32 tx_mpdu_cnt;
__le32 tx_ppdu_cnt;
__le32 rx_msdu_cnt;
__le32 rx_mpdu_cnt;
} __packed;
struct ath12k_htt_latency_prof_cnt_tlv {
__le32 prof_enable_cnt;
} __packed;
#define ATH12K_HTT_RX_NUM_MCS_CNTRS 12
#define ATH12K_HTT_RX_NUM_GI_CNTRS 4
#define ATH12K_HTT_RX_NUM_SPATIAL_STREAMS 8
#define ATH12K_HTT_RX_NUM_BW_CNTRS 4
#define ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS 6
#define ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS 7
#define ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK 5
#define ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES 2
#define ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS 2
enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS,
};
struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv {
__le32 user_index;
__le32 rx_ulofdma_non_data_ppdu;
__le32 rx_ulofdma_data_ppdu;
__le32 rx_ulofdma_mpdu_ok;
__le32 rx_ulofdma_mpdu_fail;
__le32 rx_ulofdma_non_data_nusers;
__le32 rx_ulofdma_data_nusers;
} __packed;
struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv {
__le32 mac_id__word;
__le32 rx_11ax_ul_ofdma;
__le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS];
__le32 ul_ofdma_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS];
__le32 ul_ofdma_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
__le32 ul_ofdma_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS];
__le32 ul_ofdma_rx_stbc;
__le32 ul_ofdma_rx_ldpc;
__le32 data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS];
__le32 non_data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS];
__le32 uplink_sta_aid[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
__le32 uplink_sta_target_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
__le32 uplink_sta_fd_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
__le32 uplink_sta_power_headroom[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
__le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS];
__le32 ul_ofdma_bsc_trig_rx_qos_null_only;
} __packed;
#define ATH12K_HTT_TX_UL_MUMIMO_USER_STATS 8
struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv {
__le32 mac_id__word;
__le32 rx_11ax_ul_mumimo;
__le32 ul_mumimo_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS];
__le32 ul_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS];
__le32 ul_mumimo_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
__le32 ul_mumimo_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS];
__le32 ul_mumimo_rx_stbc;
__le32 ul_mumimo_rx_ldpc;
__le32 ul_mumimo_rx_mcs_ext[ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS];
__le32 ul_gi_ext[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS];
s8 ul_rssi[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS][ATH12K_HTT_RX_NUM_BW_CNTRS];
s8 tgt_rssi[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_BW_CNTRS];
s8 fd[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
s8 db[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
__le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS];
__le32 mumimo_bsc_trig_rx_qos_null_only;
} __packed;
#define ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX 10
#define ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX 10
#define ATH12K_HTT_RX_NUM_SQUARE_INDEX 6
#define ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX 4
#define ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX 4
struct ath12k_htt_rx_fse_stats_tlv {
__le32 fse_enable_cnt;
__le32 fse_disable_cnt;
__le32 fse_cache_invalidate_entry_cnt;
__le32 fse_full_cache_invalidate_cnt;
__le32 fse_num_cache_hits_cnt;
__le32 fse_num_searches_cnt;
__le32 fse_cache_occupancy_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX];
__le32 fse_cache_occupancy_curr_cnt[ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX];
__le32 fse_search_stat_square_cnt[ATH12K_HTT_RX_NUM_SQUARE_INDEX];
__le32 fse_search_stat_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX];
__le32 fse_search_stat_pending_cnt[ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX];
} __packed;
#define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS 14
#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
@ -1417,17 +1809,6 @@ enum ATH12K_HTT_RC_MODE {
ATH12K_HTT_RC_MODE_2D_COUNT
};
enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS
};
enum ath12k_htt_stats_rc_mode {
ATH12K_HTT_STATS_RC_MODE_DLSU = 0,
ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1,

View File

@ -0,0 +1,337 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
#include "debugfs_sta.h"
#include "core.h"
#include "peer.h"
#include "debug.h"
#include "debugfs_htt_stats.h"
#include "debugfs.h"
static
u32 ath12k_dbg_sta_dump_rate_stats(u8 *buf, u32 offset, const int size,
bool he_rates_avail,
const struct ath12k_rx_peer_rate_stats *stats)
{
static const char *legacy_rate_str[HAL_RX_MAX_NUM_LEGACY_RATES] = {
"1 Mbps", "2 Mbps", "5.5 Mbps", "6 Mbps",
"9 Mbps", "11 Mbps", "12 Mbps", "18 Mbps",
"24 Mbps", "36 Mbps", "48 Mbps", "54 Mbps"};
u8 max_bw = HAL_RX_BW_MAX, max_gi = HAL_RX_GI_MAX, max_mcs = HAL_RX_MAX_NSS;
int mcs = 0, bw = 0, nss = 0, gi = 0, bw_num = 0;
u32 i, len = offset, max = max_bw * max_gi * max_mcs;
bool found;
len += scnprintf(buf + len, size - len, "\nEHT stats:\n");
for (i = 0; i <= HAL_RX_MAX_MCS_BE; i++)
len += scnprintf(buf + len, size - len,
"MCS %d: %llu%s", i, stats->be_mcs_count[i],
(i + 1) % 8 ? "\t" : "\n");
len += scnprintf(buf + len, size - len, "\nHE stats:\n");
for (i = 0; i <= HAL_RX_MAX_MCS_HE; i++)
len += scnprintf(buf + len, size - len,
"MCS %d: %llu%s", i, stats->he_mcs_count[i],
(i + 1) % 6 ? "\t" : "\n");
len += scnprintf(buf + len, size - len, "\nVHT stats:\n");
for (i = 0; i <= HAL_RX_MAX_MCS_VHT; i++)
len += scnprintf(buf + len, size - len,
"MCS %d: %llu%s", i, stats->vht_mcs_count[i],
(i + 1) % 5 ? "\t" : "\n");
len += scnprintf(buf + len, size - len, "\nHT stats:\n");
for (i = 0; i <= HAL_RX_MAX_MCS_HT; i++)
len += scnprintf(buf + len, size - len,
"MCS %d: %llu%s", i, stats->ht_mcs_count[i],
(i + 1) % 8 ? "\t" : "\n");
len += scnprintf(buf + len, size - len, "\nLegacy stats:\n");
for (i = 0; i < HAL_RX_MAX_NUM_LEGACY_RATES; i++)
len += scnprintf(buf + len, size - len,
"%s: %llu%s", legacy_rate_str[i],
stats->legacy_count[i],
(i + 1) % 4 ? "\t" : "\n");
len += scnprintf(buf + len, size - len, "\nNSS stats:\n");
for (i = 0; i < HAL_RX_MAX_NSS; i++)
len += scnprintf(buf + len, size - len,
"%dx%d: %llu ", i + 1, i + 1,
stats->nss_count[i]);
len += scnprintf(buf + len, size - len,
"\n\nGI: 0.8 us %llu 0.4 us %llu 1.6 us %llu 3.2 us %llu\n",
stats->gi_count[0],
stats->gi_count[1],
stats->gi_count[2],
stats->gi_count[3]);
len += scnprintf(buf + len, size - len,
"BW: 20 MHz %llu 40 MHz %llu 80 MHz %llu 160 MHz %llu 320 MHz %llu\n",
stats->bw_count[0],
stats->bw_count[1],
stats->bw_count[2],
stats->bw_count[3],
stats->bw_count[4]);
for (i = 0; i < max; i++) {
found = false;
for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) {
if (stats->rx_rate[bw][gi][nss][mcs]) {
found = true;
break;
}
}
if (!found)
goto skip_report;
switch (bw) {
case HAL_RX_BW_20MHZ:
bw_num = 20;
break;
case HAL_RX_BW_40MHZ:
bw_num = 40;
break;
case HAL_RX_BW_80MHZ:
bw_num = 80;
break;
case HAL_RX_BW_160MHZ:
bw_num = 160;
break;
case HAL_RX_BW_320MHZ:
bw_num = 320;
break;
}
len += scnprintf(buf + len, size - len, "\n%d Mhz gi %d us %dx%d : ",
bw_num, gi, nss + 1, nss + 1);
for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) {
if (stats->rx_rate[bw][gi][nss][mcs])
len += scnprintf(buf + len, size - len,
" %d:%llu", mcs,
stats->rx_rate[bw][gi][nss][mcs]);
}
skip_report:
if (nss++ >= max_mcs - 1) {
nss = 0;
if (gi++ >= max_gi - 1) {
gi = 0;
if (bw < max_bw - 1)
bw++;
}
}
}
len += scnprintf(buf + len, size - len, "\n");
return len - offset;
}
static ssize_t ath12k_dbg_sta_dump_rx_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_link_sta *link_sta = file->private_data;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
const int size = ATH12K_STA_RX_STATS_BUF_SIZE;
struct ath12k_hw *ah = ahsta->ahvif->ah;
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_link_sta *arsta;
u8 link_id = link_sta->link_id;
int len = 0, i, ret = 0;
bool he_rates_avail;
struct ath12k *ar;
wiphy_lock(ah->hw->wiphy);
if (!(BIT(link_id) & ahsta->links_map)) {
wiphy_unlock(ah->hw->wiphy);
return -ENOENT;
}
arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
if (!arsta || !arsta->arvif->ar) {
wiphy_unlock(ah->hw->wiphy);
return -ENOENT;
}
ar = arsta->arvif->ar;
u8 *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
if (!buf) {
ret = -ENOENT;
goto out;
}
spin_lock_bh(&ar->ab->base_lock);
rx_stats = arsta->rx_stats;
if (!rx_stats) {
ret = -ENOENT;
goto unlock;
}
len += scnprintf(buf + len, size - len, "RX peer stats:\n\n");
len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
rx_stats->num_msdu);
len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
rx_stats->tcp_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
rx_stats->udp_msdu_count);
len += scnprintf(buf + len, size - len, "Num of other MSDUs: %llu\n",
rx_stats->other_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
rx_stats->ampdu_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
rx_stats->non_ampdu_msdu_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
rx_stats->stbc_count);
len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
rx_stats->beamformed_count);
len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
rx_stats->num_mpdu_fcs_ok);
len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
rx_stats->num_mpdu_fcs_err);
he_rates_avail = (rx_stats->pream_cnt[HAL_RX_PREAMBLE_11AX] > 1) ? true : false;
len += scnprintf(buf + len, size - len,
"preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu 11BE %llu\n",
rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
rx_stats->pream_cnt[4], rx_stats->pream_cnt[6]);
len += scnprintf(buf + len, size - len,
"reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
rx_stats->reception_type[0], rx_stats->reception_type[1],
rx_stats->reception_type[2], rx_stats->reception_type[3]);
len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
len += scnprintf(buf + len, size - len, "\nRX Duration:%llu\n",
rx_stats->rx_duration);
len += scnprintf(buf + len, size - len,
"\nDCM: %llu\nRU26: %llu\nRU52: %llu\nRU106: %llu\nRU242: %llu\nRU484: %llu\nRU996: %llu\nRU996x2: %llu\n",
rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0],
rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2],
rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4],
rx_stats->ru_alloc_cnt[5], rx_stats->ru_alloc_cnt[6]);
len += scnprintf(buf + len, size - len, "\nRX success packet stats:\n");
len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail,
&rx_stats->pkt_stats);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, "\nRX success byte stats:\n");
len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail,
&rx_stats->byte_stats);
unlock:
spin_unlock_bh(&ar->ab->base_lock);
if (len)
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
out:
wiphy_unlock(ah->hw->wiphy);
return ret;
}
static const struct file_operations fops_rx_stats = {
.read = ath12k_dbg_sta_dump_rx_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath12k_dbg_sta_reset_rx_stats(struct file *file,
const char __user *buf,
size_t count, loff_t *ppos)
{
struct ieee80211_link_sta *link_sta = file->private_data;
struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
struct ath12k_hw *ah = ahsta->ahvif->ah;
struct ath12k_rx_peer_stats *rx_stats;
struct ath12k_link_sta *arsta;
u8 link_id = link_sta->link_id;
struct ath12k *ar;
bool reset;
int ret;
ret = kstrtobool_from_user(buf, count, &reset);
if (ret)
return ret;
if (!reset)
return -EINVAL;
wiphy_lock(ah->hw->wiphy);
if (!(BIT(link_id) & ahsta->links_map)) {
ret = -ENOENT;
goto out;
}
arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
if (!arsta || !arsta->arvif->ar) {
ret = -ENOENT;
goto out;
}
ar = arsta->arvif->ar;
spin_lock_bh(&ar->ab->base_lock);
rx_stats = arsta->rx_stats;
if (!rx_stats) {
spin_unlock_bh(&ar->ab->base_lock);
ret = -ENOENT;
goto out;
}
memset(rx_stats, 0, sizeof(*rx_stats));
spin_unlock_bh(&ar->ab->base_lock);
ret = count;
out:
wiphy_unlock(ah->hw->wiphy);
return ret;
}
static const struct file_operations fops_reset_rx_stats = {
.write = ath12k_dbg_sta_reset_rx_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
struct dentry *dir)
{
struct ath12k *ar;
lockdep_assert_wiphy(hw->wiphy);
ar = ath12k_get_ar_by_vif(hw, vif, link_sta->link_id);
if (!ar)
return;
if (ath12k_debugfs_is_extd_rx_stats_enabled(ar)) {
debugfs_create_file("rx_stats", 0400, dir, link_sta,
&fops_rx_stats);
debugfs_create_file("reset_rx_stats", 0200, dir, link_sta,
&fops_reset_rx_stats);
}
}

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUGFS_STA_H_
#define _ATH12K_DEBUGFS_STA_H_
#include <net/mac80211.h>
#include "core.h"
#define ATH12K_STA_RX_STATS_BUF_SIZE (1024 * 16)
#ifdef CONFIG_ATH12K_DEBUGFS
void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta,
struct dentry *dir);
#endif /* CONFIG_ATH12K_DEBUGFS */
#endif /* _ATH12K_DEBUGFS_STA_H_ */

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@ -1315,6 +1315,9 @@ void ath12k_dp_cc_config(struct ath12k_base *ab)
u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
u32 val = 0;
if (ath12k_ftm_mode)
return;
ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_H
@ -125,7 +125,6 @@ struct ath12k_mon_data {
struct sk_buff_head rx_status_q;
struct dp_mon_mpdu *mon_mpdu;
struct list_head dp_rx_mon_mpdu_list;
struct sk_buff *dest_skb_q[DP_MON_MAX_STATUS_BUF];
struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info;
struct dp_mon_tx_ppdu_info *tx_data_ppdu_info;
};
@ -176,7 +175,7 @@ struct ath12k_pdev_dp {
#define DP_RXDMA_ERR_DST_RING_SIZE 1024
#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
#define DP_RXDMA_MONITOR_DST_RING_SIZE 8092
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
#define DP_TX_MONITOR_BUF_RING_SIZE 4096
#define DP_TX_MONITOR_DEST_RING_SIZE 2048
@ -373,17 +372,24 @@ struct ath12k_dp {
};
/* HTT definitions */
#define HTT_TAG_TCL_METADATA_VERSION 5
#define HTT_TCL_META_DATA_TYPE BIT(0)
#define HTT_TCL_META_DATA_VALID_HTT BIT(1)
#define HTT_TCL_META_DATA_TYPE GENMASK(1, 0)
#define HTT_TCL_META_DATA_VALID_HTT BIT(2)
/* vdev meta data */
#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2)
#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10)
#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12)
#define HTT_TCL_META_DATA_VDEV_ID GENMASK(10, 3)
#define HTT_TCL_META_DATA_PDEV_ID GENMASK(12, 11)
#define HTT_TCL_META_DATA_HOST_INSPECTED_MISSION BIT(13)
/* peer meta data */
#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 3)
/* Global sequence number */
#define HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM 3
#define HTT_TCL_META_DATA_GLOBAL_SEQ_HOST_INSPECTED BIT(2)
#define HTT_TCL_META_DATA_GLOBAL_SEQ_NUM GENMASK(14, 3)
#define HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID 128
/* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
@ -414,9 +420,15 @@ enum htt_h2t_msg_type {
};
#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
#define HTT_OPTION_TCL_METADATA_VER_V2 2
#define HTT_OPTION_TAG GENMASK(7, 0)
#define HTT_OPTION_LEN GENMASK(15, 8)
#define HTT_OPTION_VALUE GENMASK(31, 16)
#define HTT_TCL_METADATA_VER_SZ 4
struct htt_ver_req_cmd {
__le32 ver_reg_info;
__le32 tcl_metadata_version;
} __packed;
enum htt_srng_ring_type {
@ -434,8 +446,11 @@ enum htt_srng_ring_id {
HTT_HOST1_TO_FW_RXBUF_RING,
HTT_HOST2_TO_FW_RXBUF_RING,
HTT_RXDMA_NON_MONITOR_DEST_RING,
HTT_RXDMA_HOST_BUF_RING2,
HTT_TX_MON_HOST2MON_BUF_RING,
HTT_TX_MON_MON2HOST_DEST_RING,
HTT_RX_MON_HOST2MON_BUF_RING,
HTT_RX_MON_MON2HOST_DEST_RING,
};
/* host -> target HTT_SRING_SETUP message
@ -767,8 +782,22 @@ enum htt_stats_internal_ppdu_frametype {
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
#define HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID BIT(26)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID BIT(26)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL BIT(27)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON BIT(28)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(18, 16)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(21, 19)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(24, 22)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD GENMASK(9, 0)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE BIT(17)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE BIT(18)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE BIT(19)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET BIT(0)
#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET GENMASK(14, 1)
#define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0)
#define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16)
@ -797,6 +826,7 @@ enum htt_rx_filter_tlv_flags {
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO = BIT(13),
};
enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
@ -1085,6 +1115,21 @@ enum htt_rx_data_pkt_filter_tlv_flasg3 {
HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING \
(HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO)
/* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */
#define HTT_RX_TLV_FLAGS_RXDMA_RING \
(HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
@ -1113,6 +1158,10 @@ struct htt_rx_ring_selection_cfg_cmd {
__le32 info3;
} __packed;
#define HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE 32
#define HTT_RX_RING_DEFAULT_DMA_LENGTH 0x7
#define HTT_RX_RING_PKT_TLV_OFFSET 0x1
struct htt_rx_ring_tlv_filter {
u32 rx_filter; /* see htt_rx_filter_tlv_flags */
u32 pkt_filter_flags0; /* MGMT */
@ -1130,6 +1179,17 @@ struct htt_rx_ring_tlv_filter {
u16 rx_mpdu_start_wmask;
u16 rx_mpdu_end_wmask;
u32 rx_msdu_end_wmask;
u32 conf_len_ctrl;
u32 conf_len_mgmt;
u32 conf_len_data;
u16 rx_drop_threshold;
bool enable_log_mgmt_type;
bool enable_log_ctrl_type;
bool enable_log_data_type;
bool enable_rx_tlv_offset;
u16 rx_tlv_offset;
bool drop_threshold_valid;
bool rxmon_disable;
};
#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_MON_H
@ -77,14 +77,11 @@ struct dp_mon_tx_ppdu_info {
enum hal_rx_mon_status
ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
int mac_id, struct sk_buff *skb,
struct sk_buff *skb,
struct napi_struct *napi);
int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
struct dp_rxdma_mon_ring *buf_ring,
int req_entries);
int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id,
int *budget, enum dp_monitor_mode monitor_mode,
struct napi_struct *napi);
int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
struct napi_struct *napi, int budget,
enum dp_monitor_mode monitor_mode);
@ -96,11 +93,9 @@ ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
enum hal_rx_mon_status
ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
struct ath12k_mon_data *pmon,
int mac_id,
struct sk_buff *skb,
struct napi_struct *napi,
u32 ppdu_id);
void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info);
int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
struct napi_struct *napi, int *budget);
int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, struct napi_struct *napi);
#endif

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@ -2392,6 +2392,23 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
break;
case RX_MSDU_START_PKT_TYPE_11BE:
rx_status->rate_idx = rate_mcs;
if (rate_mcs > ATH12K_EHT_MCS_MAX) {
ath12k_warn(ar->ab,
"Received with invalid mcs in EHT mode %d\n",
rate_mcs);
break;
}
rx_status->encoding = RX_ENC_EHT;
rx_status->nss = nss;
rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
break;
default:
break;
}
}
@ -2486,7 +2503,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
"rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
"rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@ -2497,6 +2514,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
(status->encoding == RX_ENC_HE) ? "he" : "",
(status->encoding == RX_ENC_EHT) ? "eht" : "",
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
@ -4070,7 +4088,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
hw_links[hw_link_id].pdev_idx);
ar = partner_ab->pdevs[pdev_id].ar;
if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) {
if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) {
dev_kfree_skb_any(msdu);
continue;
}

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_RX_H
#define ATH12K_DP_RX_H
@ -79,6 +79,9 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
case RX_MSDU_START_SGI_3_2_US:
ret = NL80211_RATE_INFO_HE_GI_3_2;
break;
default:
ret = NL80211_RATE_INFO_HE_GI_0_8;
break;
}
return ret;
@ -135,9 +138,6 @@ u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
struct hal_rx_desc *desc);
void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status);
struct ath12k_peer *
ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);

View File

@ -1,13 +1,15 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
#include "hw.h"
#include "peer.h"
#include "mac.h"
static enum hal_tcl_encap_type
ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb)
@ -117,7 +119,7 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
le32_encode_bits(ti->data_len,
HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
le32_encode_bits(ti->encap_type,
HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
le32_encode_bits(ti->encrypt_type,
@ -217,7 +219,7 @@ out:
}
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct sk_buff *skb)
struct sk_buff *skb, bool gsn_valid, int mcbc_gsn)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
@ -290,13 +292,27 @@ tcl_ring_sel:
msdu_ext_desc = true;
}
if (gsn_valid) {
/* Reset and Initialize meta_data_flags with Global Sequence
* Number (GSN) info.
*/
ti.meta_data_flags =
u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
HTT_TCL_META_DATA_TYPE) |
u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
}
ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
ti.type = HAL_TCL_DESC_TYPE_BUFFER;
ti.pkt_offset = 0;
ti.lmac_id = ar->lmac_id;
ti.vdev_id = arvif->vdev_id;
if (gsn_valid)
ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID;
ti.bss_ast_hash = arvif->ast_hash;
ti.bss_ast_idx = arvif->ast_idx;
ti.dscp_tid_tbl_idx = 0;
@ -368,6 +384,7 @@ map:
add_htt_metadata = true;
msdu_ext_desc = true;
ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
}
@ -398,6 +415,7 @@ map:
if (ret < 0) {
ath12k_dbg(ab, ATH12K_DBG_DP_TX,
"Failed to add HTT meta data, dropping packet\n");
kfree_skb(skb_ext_desc);
goto fail_unmap_dma;
}
}
@ -558,13 +576,13 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
ts.ack_rssi = le32_get_bits(status_desc->info2,
HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
break;
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
@ -580,6 +598,124 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
}
}
static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_peer *peer;
struct ieee80211_sta *sta;
struct ath12k_sta *ahsta;
struct ath12k_link_sta *arsta;
struct rate_info txrate = {0};
u16 rate, ru_tones;
u8 rate_idx = 0;
int ret;
spin_lock_bh(&ab->base_lock);
peer = ath12k_peer_find_by_id(ab, ts->peer_id);
if (!peer || !peer->sta) {
ath12k_dbg(ab, ATH12K_DBG_DP_TX,
"failed to find the peer by id %u\n", ts->peer_id);
spin_unlock_bh(&ab->base_lock);
return;
}
sta = peer->sta;
ahsta = ath12k_sta_to_ahsta(sta);
arsta = &ahsta->deflink;
/* This is to prefer choose the real NSS value arsta->last_txrate.nss,
* if it is invalid, then choose the NSS value while assoc.
*/
if (arsta->last_txrate.nss)
txrate.nss = arsta->last_txrate.nss;
else
txrate.nss = arsta->peer_nss;
spin_unlock_bh(&ab->base_lock);
switch (ts->pkt_type) {
case HAL_TX_RATE_STATS_PKT_TYPE_11A:
case HAL_TX_RATE_STATS_PKT_TYPE_11B:
ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
ts->pkt_type,
&rate_idx,
&rate);
if (ret < 0) {
ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
return;
}
txrate.legacy = rate;
break;
case HAL_TX_RATE_STATS_PKT_TYPE_11N:
if (ts->mcs > ATH12K_HT_MCS_MAX) {
ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
return;
}
if (txrate.nss != 0)
txrate.mcs = ts->mcs + 8 * (txrate.nss - 1);
txrate.flags = RATE_INFO_FLAGS_MCS;
if (ts->sgi)
txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
if (ts->mcs > ATH12K_VHT_MCS_MAX) {
ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
return;
}
txrate.mcs = ts->mcs;
txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
if (ts->sgi)
txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
break;
case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
if (ts->mcs > ATH12K_HE_MCS_MAX) {
ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
return;
}
txrate.mcs = ts->mcs;
txrate.flags = RATE_INFO_FLAGS_HE_MCS;
txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi);
break;
case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
if (ts->mcs > ATH12K_EHT_MCS_MAX) {
ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
return;
}
txrate.mcs = ts->mcs;
txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi);
break;
default:
ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
return;
}
txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw);
if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
txrate.bw = RATE_INFO_BW_HE_RU;
ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones);
txrate.he_ru_alloc =
ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
}
if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
txrate.bw = RATE_INFO_BW_EHT_RU;
txrate.eht_ru_alloc =
ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones);
}
spin_lock_bh(&ab->base_lock);
arsta->txrate = txrate;
spin_unlock_bh(&ab->base_lock);
}
static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct sk_buff *msdu,
struct hal_tx_status *ts)
@ -658,6 +794,8 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
* Might end up reporting it out-of-band from HTT stats.
*/
ath12k_dp_tx_update_txcompl(ar, ts);
ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
exit:
@ -668,6 +806,8 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
struct hal_wbm_completion_ring_tx *desc,
struct hal_tx_status *ts)
{
u32 info0 = le32_to_cpu(desc->rate_stats.info0);
ts->buf_rel_source =
le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
@ -682,10 +822,17 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
ts->ppdu_id = le32_get_bits(desc->info1,
HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
else
ts->rate_stats = 0;
ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS);
ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI);
ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW);
ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU);
ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX);
}
}
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
@ -814,7 +961,7 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_BUF:
*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
*htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_STATUS:
@ -822,7 +969,7 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_MONITOR_DST:
*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
*htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
*htt_ring_type = HTT_HW_TO_SW_RING;
break;
case HAL_RXDMA_MONITOR_DESC:
@ -971,7 +1118,14 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
skb_put(skb, len);
cmd = (struct htt_ver_req_cmd *)skb->data;
cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
HTT_VER_REQ_INFO_MSG_ID);
HTT_OPTION_TAG);
cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
HTT_OPTION_TAG) |
le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
HTT_OPTION_LEN) |
le32_encode_bits(HTT_OPTION_TCL_METADATA_VER_V2,
HTT_OPTION_VALUE);
ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
if (ret) {
@ -1077,15 +1231,46 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
cmd->info0 |=
le32_encode_bits(tlv_filter->drop_threshold_valid,
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable,
HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
cmd->info1 = le32_encode_bits(rx_buf_size,
HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt,
HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl,
HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data,
HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold,
HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
cmd->info2 |=
le32_encode_bits(tlv_filter->enable_log_mgmt_type,
HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
cmd->info2 |=
le32_encode_bits(tlv_filter->enable_log_ctrl_type,
HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
cmd->info2 |=
le32_encode_bits(tlv_filter->enable_log_data_type,
HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
cmd->info3 =
le32_encode_bits(tlv_filter->enable_rx_tlv_offset,
HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
cmd->info3 |=
le32_encode_bits(tlv_filter->rx_tlv_offset,
HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
if (tlv_filter->offset_valid) {
cmd->rx_packet_offset =
le32_encode_bits(tlv_filter->rx_packet_offset,
@ -1210,15 +1395,28 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
{
struct ath12k_base *ab = ar->ab;
struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
int ret, ring_id;
int ret, ring_id, i;
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
tlv_filter.offset_valid = false;
if (!reset) {
tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
tlv_filter.drop_threshold_valid = true;
tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
tlv_filter.enable_log_mgmt_type = true;
tlv_filter.enable_log_ctrl_type = true;
tlv_filter.enable_log_data_type = true;
tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
tlv_filter.enable_rx_tlv_offset = true;
tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
tlv_filter.pkt_filter_flags0 =
HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
@ -1236,14 +1434,19 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
}
if (ab->hw_params->rxdma1_enable) {
ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
HAL_RXDMA_MONITOR_BUF,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
if (ret) {
ath12k_err(ab,
"failed to setup filter for monitor buf %d\n", ret);
return ret;
for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
ar->dp.mac_id + i,
HAL_RXDMA_MONITOR_DST,
DP_RXDMA_REFILL_RING_SIZE,
&tlv_filter);
if (ret) {
ath12k_err(ab,
"failed to setup filter for monitor buf %d\n",
ret);
return ret;
}
}
}

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_TX_H
@ -17,7 +17,7 @@ struct ath12k_dp_htt_wbm_tx_status {
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
struct sk_buff *skb);
struct sk_buff *skb, bool gsn_valid, int mcbc_gsn);
void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);

View File

@ -2968,9 +2968,8 @@ struct hal_mon_buf_ring {
#define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0)
#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0)
#define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16)
#define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17)
#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(11, 0)
#define HAL_MON_DEST_INFO0_END_REASON GENMASK(17, 16)
#define HAL_MON_DEST_INFO0_INITIATOR BIT(18)
#define HAL_MON_DEST_INFO0_EMPTY_DESC BIT(19)
#define HAL_MON_DEST_INFO0_RING_ID GENMASK(27, 20)

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_RX_H
@ -22,9 +22,6 @@ struct hal_rx_wbm_rel_info {
#define HAL_INVALID_PEERID 0x3fff
#define VHT_SIG_SU_NSS_MASK 0x7
#define HAL_RX_MAX_MCS 12
#define HAL_RX_MAX_NSS 8
#define HAL_RX_MPDU_INFO_PN_GET_BYTE1(__val) \
le32_get_bits((__val), GENMASK(7, 0))
@ -71,6 +68,8 @@ enum hal_rx_preamble {
HAL_RX_PREAMBLE_11N,
HAL_RX_PREAMBLE_11AC,
HAL_RX_PREAMBLE_11AX,
HAL_RX_PREAMBLE_11BA,
HAL_RX_PREAMBLE_11BE,
HAL_RX_PREAMBLE_MAX,
};
@ -108,6 +107,9 @@ enum hal_rx_mon_status {
HAL_RX_MON_STATUS_PPDU_NOT_DONE,
HAL_RX_MON_STATUS_PPDU_DONE,
HAL_RX_MON_STATUS_BUF_DONE,
HAL_RX_MON_STATUS_BUF_ADDR,
HAL_RX_MON_STATUS_MPDU_END,
HAL_RX_MON_STATUS_MSDU_END,
};
#define HAL_RX_MAX_MPDU 256
@ -143,10 +145,43 @@ struct hal_rx_user_status {
u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
u32 mpdu_ok_byte_count;
u32 mpdu_err_byte_count;
bool ampdu_present;
u16 ampdu_id;
};
#define HAL_MAX_UL_MU_USERS 37
struct hal_rx_u_sig_info {
bool ul_dl;
u8 bw;
u8 ppdu_type_comp_mode;
u8 eht_sig_mcs;
u8 num_eht_sig_sym;
struct ieee80211_radiotap_eht_usig usig;
};
#define HAL_RX_MON_MAX_AGGR_SIZE 128
struct hal_rx_tlv_aggr_info {
bool in_progress;
u16 cur_len;
u16 tlv_tag;
u8 buf[HAL_RX_MON_MAX_AGGR_SIZE];
};
struct hal_rx_radiotap_eht {
__le32 known;
__le32 data[9];
};
#define EHT_MAX_USER_INFO 4
struct hal_rx_eht_info {
u8 num_user_info;
struct hal_rx_radiotap_eht eht;
u32 user_info[EHT_MAX_USER_INFO];
};
struct hal_rx_mon_ppdu_info {
u32 ppdu_id;
u32 last_ppdu_id;
@ -227,10 +262,15 @@ struct hal_rx_mon_ppdu_info {
u8 addr4[ETH_ALEN];
struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS];
u8 userid;
u16 ampdu_id[HAL_MAX_UL_MU_USERS];
bool first_msdu_in_mpdu;
bool is_ampdu;
u8 medium_prot_type;
bool ppdu_continuation;
bool eht_usig;
struct hal_rx_u_sig_info u_sig_info;
bool is_eht;
struct hal_rx_eht_info eht_info;
struct hal_rx_tlv_aggr_info tlv_aggr;
};
#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
@ -641,6 +681,395 @@ struct hal_rx_resp_req_info {
#define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6)
#define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
#define HAL_RX_PHY_CMN_USER_INFO0_GI GENMASK(17, 16)
struct hal_phyrx_common_user_info {
__le32 rsvd[2];
__le32 info0;
__le32 rsvd1;
} __packed;
#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE GENMASK(3, 0)
#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_GI_LTF GENMASK(5, 4)
#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM GENMASK(8, 6)
#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS GENMASK(10, 7)
#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED BIT(11)
#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD GENMASK(13, 12)
#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC GENMASK(17, 14)
struct hal_eht_sig_ndp_cmn_eb {
__le32 info0;
} __packed;
#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE GENMASK(3, 0)
#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_GI_LTF GENMASK(5, 4)
#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM GENMASK(8, 6)
#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM BIT(9)
#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR GENMASK(11, 10)
#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY BIT(12)
#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD GENMASK(16, 13)
struct hal_eht_sig_usig_overflow {
__le32 info0;
} __packed;
#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS GENMASK(14, 11)
#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_VALIDATE BIT(15)
#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS GENMASK(19, 16)
#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED BIT(20)
#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING BIT(21)
#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CRC GENMASK(25, 22)
struct hal_eht_sig_non_mu_mimo {
__le32 info0;
} __packed;
#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID GENMASK(10, 0)
#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS GENMASK(14, 11)
#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING BIT(15)
#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING GENMASK(22, 16)
#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CRC GENMASK(26, 23)
struct hal_eht_sig_mu_mimo {
__le32 info0;
} __packed;
union hal_eht_sig_user_field {
struct hal_eht_sig_mu_mimo mu_mimo;
struct hal_eht_sig_non_mu_mimo n_mu_mimo;
};
#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_SPATIAL_REUSE GENMASK(3, 0)
#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_GI_LTF GENMASK(5, 4)
#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_LTF_SYM GENMASK(8, 6)
#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_LDPC_EXTA_SYM BIT(9)
#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_PRE_FEC_PAD_FACTOR GENMASK(11, 10)
#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISAMBIGUITY BIT(12)
#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISREGARD GENMASK(16, 13)
#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS GENMASK(19, 17)
struct hal_eht_sig_non_ofdma_cmn_eb {
__le32 info0;
union hal_eht_sig_user_field user_field;
} __packed;
#define HAL_RX_EHT_SIG_OFDMA_EB1_SPATIAL_REUSE GENMASK_ULL(3, 0)
#define HAL_RX_EHT_SIG_OFDMA_EB1_GI_LTF GENMASK_ULL(5, 4)
#define HAL_RX_EHT_SIG_OFDMA_EB1_NUM_LFT_SYM GENMASK_ULL(8, 6)
#define HAL_RX_EHT_SIG_OFDMA_EB1_LDPC_EXTRA_SYM BIT(9)
#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_FEC_PAD_FACTOR GENMASK_ULL(11, 10)
#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_DISAMBIGUITY BIT(12)
#define HAL_RX_EHT_SIG_OFDMA_EB1_DISREGARD GENMASK_ULL(16, 13)
#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1 GENMASK_ULL(25, 17)
#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2 GENMASK_ULL(34, 26)
#define HAL_RX_EHT_SIG_OFDMA_EB1_CRC GENMASK_ULL(30, 27)
struct hal_eht_sig_ofdma_cmn_eb1 {
__le64 info0;
} __packed;
#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1 GENMASK_ULL(8, 0)
#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2 GENMASK_ULL(17, 9)
#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3 GENMASK_ULL(26, 18)
#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4 GENMASK_ULL(35, 27)
#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5 GENMASK_ULL(44, 36)
#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6 GENMASK_ULL(53, 45)
#define HAL_RX_EHT_SIG_OFDMA_EB2_MCS GNEMASK_ULL(57, 54)
struct hal_eht_sig_ofdma_cmn_eb2 {
__le64 info0;
} __packed;
struct hal_eht_sig_ofdma_cmn_eb {
struct hal_eht_sig_ofdma_cmn_eb1 eb1;
struct hal_eht_sig_ofdma_cmn_eb2 eb2;
union hal_eht_sig_user_field user_field;
} __packed;
enum hal_eht_bw {
HAL_EHT_BW_20,
HAL_EHT_BW_40,
HAL_EHT_BW_80,
HAL_EHT_BW_160,
HAL_EHT_BW_320_1,
HAL_EHT_BW_320_2,
};
#define HAL_RX_USIG_CMN_INFO0_PHY_VERSION GENMASK(2, 0)
#define HAL_RX_USIG_CMN_INFO0_BW GENMASK(5, 3)
#define HAL_RX_USIG_CMN_INFO0_UL_DL BIT(6)
#define HAL_RX_USIG_CMN_INFO0_BSS_COLOR GENMASK(12, 7)
#define HAL_RX_USIG_CMN_INFO0_TXOP GENMASK(19, 13)
#define HAL_RX_USIG_CMN_INFO0_DISREGARD GENMASK(25, 20)
#define HAL_RX_USIG_CMN_INFO0_VALIDATE BIT(26)
struct hal_mon_usig_cmn {
__le32 info0;
} __packed;
#define HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE GENMASK(1, 0)
#define HAL_RX_USIG_TB_INFO0_VALIDATE BIT(2)
#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1 GENMASK(6, 3)
#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2 GENMASK(10, 7)
#define HAL_RX_USIG_TB_INFO0_DISREGARD_1 GENMASK(15, 11)
#define HAL_RX_USIG_TB_INFO0_CRC GENMASK(19, 16)
#define HAL_RX_USIG_TB_INFO0_TAIL GENMASK(25, 20)
#define HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS BIT(31)
struct hal_mon_usig_tb {
__le32 info0;
} __packed;
#define HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE GENMASK(1, 0)
#define HAL_RX_USIG_MU_INFO0_VALIDATE_1 BIT(2)
#define HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO GENMASK(7, 3)
#define HAL_RX_USIG_MU_INFO0_VALIDATE_2 BIT(8)
#define HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS GENMASK(10, 9)
#define HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM GENMASK(15, 11)
#define HAL_RX_USIG_MU_INFO0_CRC GENMASK(20, 16)
#define HAL_RX_USIG_MU_INFO0_TAIL GENMASK(26, 21)
#define HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS BIT(31)
struct hal_mon_usig_mu {
__le32 info0;
} __packed;
union hal_mon_usig_non_cmn {
struct hal_mon_usig_tb tb;
struct hal_mon_usig_mu mu;
};
struct hal_mon_usig_hdr {
struct hal_mon_usig_cmn cmn;
union hal_mon_usig_non_cmn non_cmn;
} __packed;
#define HAL_RX_USR_INFO0_PHY_PPDU_ID GENMASK(15, 0)
#define HAL_RX_USR_INFO0_USR_RSSI GENMASK(23, 16)
#define HAL_RX_USR_INFO0_PKT_TYPE GENMASK(27, 24)
#define HAL_RX_USR_INFO0_STBC BIT(28)
#define HAL_RX_USR_INFO0_RECEPTION_TYPE GENMASK(31, 29)
#define HAL_RX_USR_INFO1_MCS GENMASK(3, 0)
#define HAL_RX_USR_INFO1_SGI GENMASK(5, 4)
#define HAL_RX_USR_INFO1_HE_RANGING_NDP BIT(6)
#define HAL_RX_USR_INFO1_MIMO_SS_BITMAP GENMASK(15, 8)
#define HAL_RX_USR_INFO1_RX_BW GENMASK(18, 16)
#define HAL_RX_USR_INFO1_DL_OFMDA_USR_IDX GENMASK(31, 24)
#define HAL_RX_USR_INFO2_DL_OFDMA_CONTENT_CHAN BIT(0)
#define HAL_RX_USR_INFO2_NSS GENMASK(10, 8)
#define HAL_RX_USR_INFO2_STREAM_OFFSET GENMASK(13, 11)
#define HAL_RX_USR_INFO2_STA_DCM BIT(14)
#define HAL_RX_USR_INFO2_LDPC BIT(15)
#define HAL_RX_USR_INFO2_RU_TYPE_80_0 GENMASK(19, 16)
#define HAL_RX_USR_INFO2_RU_TYPE_80_1 GENMASK(23, 20)
#define HAL_RX_USR_INFO2_RU_TYPE_80_2 GENMASK(27, 24)
#define HAL_RX_USR_INFO2_RU_TYPE_80_3 GENMASK(31, 28)
#define HAL_RX_USR_INFO3_RU_START_IDX_80_0 GENMASK(5, 0)
#define HAL_RX_USR_INFO3_RU_START_IDX_80_1 GENMASK(13, 8)
#define HAL_RX_USR_INFO3_RU_START_IDX_80_2 GENMASK(21, 16)
#define HAL_RX_USR_INFO3_RU_START_IDX_80_3 GENMASK(29, 24)
struct hal_receive_user_info {
__le32 info0;
__le32 info1;
__le32 info2;
__le32 info3;
__le32 user_fd_rssi_seg0;
__le32 user_fd_rssi_seg1;
__le32 user_fd_rssi_seg2;
__le32 user_fd_rssi_seg3;
} __packed;
enum hal_mon_reception_type {
HAL_RECEPTION_TYPE_SU,
HAL_RECEPTION_TYPE_DL_MU_MIMO,
HAL_RECEPTION_TYPE_DL_MU_OFMA,
HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
HAL_RECEPTION_TYPE_UL_MU_MIMO,
HAL_RECEPTION_TYPE_UL_MU_OFDMA,
HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
};
/* Different allowed RU in 11BE */
#define HAL_EHT_RU_26 0ULL
#define HAL_EHT_RU_52 1ULL
#define HAL_EHT_RU_78 2ULL
#define HAL_EHT_RU_106 3ULL
#define HAL_EHT_RU_132 4ULL
#define HAL_EHT_RU_242 5ULL
#define HAL_EHT_RU_484 6ULL
#define HAL_EHT_RU_726 7ULL
#define HAL_EHT_RU_996 8ULL
#define HAL_EHT_RU_996x2 9ULL
#define HAL_EHT_RU_996x3 10ULL
#define HAL_EHT_RU_996x4 11ULL
#define HAL_EHT_RU_NONE 15ULL
#define HAL_EHT_RU_INVALID 31ULL
/* MRUs spanning above 80Mhz
* HAL_EHT_RU_996_484 = HAL_EHT_RU_484 + HAL_EHT_RU_996 + 4 (reserved)
*/
#define HAL_EHT_RU_996_484 18ULL
#define HAL_EHT_RU_996x2_484 28ULL
#define HAL_EHT_RU_996x3_484 40ULL
#define HAL_EHT_RU_996_484_242 23ULL
#define NUM_RU_BITS_PER80 16
#define NUM_RU_BITS_PER20 4
/* Different per_80Mhz band in 320Mhz bandwidth */
#define HAL_80_0 0
#define HAL_80_1 1
#define HAL_80_2 2
#define HAL_80_3 3
#define HAL_RU_80MHZ(num_band) ((num_band) * NUM_RU_BITS_PER80)
#define HAL_RU_20MHZ(idx_per_80) ((idx_per_80) * NUM_RU_BITS_PER20)
#define HAL_RU_SHIFT(num_band, idx_per_80) \
(HAL_RU_80MHZ(num_band) + HAL_RU_20MHZ(idx_per_80))
#define HAL_RU(ru, num_band, idx_per_80) \
((u64)(ru) << HAL_RU_SHIFT(num_band, idx_per_80))
/* MRU-996+484 */
#define HAL_EHT_RU_996_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \
HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0))
#define HAL_EHT_RU_996_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0))
#define HAL_EHT_RU_996_484_2 (HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1))
#define HAL_EHT_RU_996_484_3 (HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0))
#define HAL_EHT_RU_996_484_4 (HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \
HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0))
#define HAL_EHT_RU_996_484_5 (HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0))
#define HAL_EHT_RU_996_484_6 (HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
#define HAL_EHT_RU_996_484_7 (HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
/* MRU-996x2+484 */
#define HAL_EHT_RU_996x2_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
#define HAL_EHT_RU_996x2_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
#define HAL_EHT_RU_996x2_484_2 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
#define HAL_EHT_RU_996x2_484_3 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
#define HAL_EHT_RU_996x2_484_4 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1))
#define HAL_EHT_RU_996x2_484_5 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0))
#define HAL_EHT_RU_996x2_484_6 (HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
#define HAL_EHT_RU_996x2_484_7 (HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
#define HAL_EHT_RU_996x2_484_8 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
#define HAL_EHT_RU_996x2_484_9 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
#define HAL_EHT_RU_996x2_484_10 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
#define HAL_EHT_RU_996x2_484_11 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
/* MRU-996x3+484 */
#define HAL_EHT_RU_996x3_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
#define HAL_EHT_RU_996x3_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
#define HAL_EHT_RU_996x3_484_2 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
#define HAL_EHT_RU_996x3_484_3 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
#define HAL_EHT_RU_996x3_484_4 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
#define HAL_EHT_RU_996x3_484_5 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
#define HAL_EHT_RU_996x3_484_6 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
#define HAL_EHT_RU_996x3_484_7 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \
HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \
HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
#define HAL_RU_PER80(ru_per80, num_80mhz, ru_idx_per80mhz) \
(HAL_RU(ru_per80, num_80mhz, ru_idx_per80mhz))
#define RU_INVALID 0
#define RU_26 1
#define RU_52 2
#define RU_106 4
#define RU_242 9
#define RU_484 18
#define RU_996 37
#define RU_2X996 74
#define RU_3X996 111
#define RU_4X996 148
#define RU_52_26 (RU_52 + RU_26)
#define RU_106_26 (RU_106 + RU_26)
#define RU_484_242 (RU_484 + RU_242)
#define RU_996_484 (RU_996 + RU_484)
#define RU_996_484_242 (RU_996 + RU_484_242)
#define RU_2X996_484 (RU_2X996 + RU_484)
#define RU_3X996_484 (RU_3X996 + RU_484)
enum ath12k_eht_ru_size {
ATH12K_EHT_RU_26,
ATH12K_EHT_RU_52,
ATH12K_EHT_RU_106,
ATH12K_EHT_RU_242,
ATH12K_EHT_RU_484,
ATH12K_EHT_RU_996,
ATH12K_EHT_RU_996x2,
ATH12K_EHT_RU_996x4,
ATH12K_EHT_RU_52_26,
ATH12K_EHT_RU_106_26,
ATH12K_EHT_RU_484_242,
ATH12K_EHT_RU_996_484,
ATH12K_EHT_RU_996_484_242,
ATH12K_EHT_RU_996x2_484,
ATH12K_EHT_RU_996x3,
ATH12K_EHT_RU_996x3_484,
/* Keep last */
ATH12K_EHT_RU_INVALID,
};
#define HAL_RX_RU_ALLOC_TYPE_MAX ATH12K_EHT_RU_INVALID
static inline
enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
{
@ -662,6 +1091,9 @@ enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
case RU_996:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
break;
case RU_2X996:
ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
break;
case RU_26:
fallthrough;
default:

View File

@ -1,7 +1,8 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc.
* All rights reserved.
*/
#ifndef ATH12K_HAL_TX_H
@ -63,7 +64,12 @@ struct hal_tx_status {
u8 try_cnt;
u8 tid;
u16 peer_id;
u32 rate_stats;
enum hal_tx_rate_stats_pkt_type pkt_type;
enum hal_tx_rate_stats_sgi sgi;
enum ath12k_supported_bw bw;
u8 mcs;
u16 tones;
u8 ofdma;
};
#define HAL_TX_PHY_DESC_INFO0_BF_TYPE GENMASK(17, 16)

View File

@ -543,7 +543,11 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
ATH12K_TX_RING_MASK_3,
},
.rx_mon_dest = {
0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
ATH12K_RX_MON_RING_MASK_0,
ATH12K_RX_MON_RING_MASK_1,
ATH12K_RX_MON_RING_MASK_2,
},
.rx = {
0, 0, 0, 0,
@ -1035,7 +1039,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
.rxdma1_enable = true,
.num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_MAC_H
@ -108,5 +108,11 @@ int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif);
void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf,
void *data);
u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones);
enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones);
enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi);
struct ieee80211_bss_conf *ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif);
struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u8 link_id);
#endif

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@ -483,8 +483,11 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
ath12k_pci_ext_grp_disable(irq_grp);
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
if (irq_grp->napi_enabled) {
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
irq_grp->napi_enabled = false;
}
}
}
@ -646,7 +649,7 @@ static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return 0;
return irq_set_affinity_hint(ab_pci->pdev->irq, m);
return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
}
static int ath12k_pci_config_irq(struct ath12k_base *ab)
@ -1114,7 +1117,11 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
napi_enable(&irq_grp->napi);
if (!irq_grp->napi_enabled) {
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
ath12k_pci_ext_grp_enable(irq_grp);
}
@ -1561,6 +1568,7 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
ab_pci->ab = ab;
ab_pci->pdev = pdev;
ab->hif.ops = &ath12k_pci_hif_ops;
ab->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL;
pci_set_drvdata(pdev, ab);
spin_lock_init(&ab_pci->window_lock);
@ -1689,6 +1697,8 @@ static int ath12k_pci_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
/* __free_irq() expects the caller to have cleared the affinity hint */
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
ath12k_pci_free_irq(ab);
err_ce_free:
@ -1734,9 +1744,9 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
cancel_work_sync(&ab->reset_work);
cancel_work_sync(&ab->dump_work);
ath12k_core_deinit(ab);
ath12k_fw_unmap(ab);
qmi_fail:
ath12k_fw_unmap(ab);
ath12k_mhi_unregister(ab_pci);
ath12k_pci_free_irq(ab);

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
@ -2056,8 +2056,7 @@ static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
}
if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
ab->single_chip_mlo_supp = false;
ag->mlo_capable = false;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"skip QMI MLO cap due to invalid num_radio %d\n",
ab->qmi.num_radios);
@ -2265,10 +2264,6 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
if (resp.single_chip_mlo_support_valid &&
resp.single_chip_mlo_support)
ab->single_chip_mlo_supp = true;
if (!resp.num_phy_valid) {
ret = -ENODATA;
goto out;
@ -2277,10 +2272,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
ab->qmi.num_radios = resp.num_phy;
ath12k_dbg(ab, ATH12K_DBG_QMI,
"phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n",
"phy capability resp valid %d num_phy %d valid %d board_id %d\n",
resp.num_phy_valid, resp.num_phy,
resp.board_id_valid, resp.board_id,
resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support);
resp.board_id_valid, resp.board_id);
return;
@ -2740,6 +2734,15 @@ int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
if (r)
ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
r = ath12k_acpi_start(ab);
if (r)
/* ACPI is optional so continue in case of an error */
ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", r);
r = ath12k_acpi_check_bdf_variant_name(ab);
if (r)
ath12k_dbg(ab, ATH12K_DBG_BOOT, "ACPI bdf variant name not set.\n");
out:
return ret;
}

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_REG_H
@ -13,6 +13,9 @@
struct ath12k_base;
struct ath12k;
#define ATH12K_2GHZ_MAX_FREQUENCY 2495
#define ATH12K_5GHZ_MAX_FREQUENCY 5920
/* DFS regdomains supported by Firmware */
enum ath12k_dfs_region {
ATH12K_DFS_REG_UNSET,

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
@ -637,6 +637,8 @@ enum rx_msdu_start_pkt_type {
RX_MSDU_START_PKT_TYPE_11N,
RX_MSDU_START_PKT_TYPE_11AC,
RX_MSDU_START_PKT_TYPE_11AX,
RX_MSDU_START_PKT_TYPE_11BA,
RX_MSDU_START_PKT_TYPE_11BE,
};
enum rx_msdu_start_sgi {
@ -1539,12 +1541,4 @@ struct hal_rx_desc {
#define MAX_MU_GROUP_SHOW 16
#define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW)
#define HAL_RX_RU_ALLOC_TYPE_MAX 6
#define RU_26 1
#define RU_52 2
#define RU_106 4
#define RU_242 9
#define RU_484 18
#define RU_996 37
#endif /* ATH12K_RX_DESC_H */

View File

@ -0,0 +1,395 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "testmode.h"
#include <net/netlink.h>
#include "debug.h"
#include "wmi.h"
#include "hw.h"
#include "core.h"
#include "hif.h"
#include "../testmode_i.h"
#define ATH12K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
#define ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
static const struct nla_policy ath12k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
[ATH_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH_TM_ATTR_DATA] = { .type = NLA_BINARY,
.len = ATH_TM_DATA_MAX_LEN },
[ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
[ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
[ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
};
static struct ath12k *ath12k_tm_get_ar(struct ath12k_base *ab)
{
struct ath12k_pdev *pdev;
struct ath12k *ar;
int i;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ar && ar->ah->state == ATH12K_HW_STATE_TM)
return ar;
}
return NULL;
}
void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
struct sk_buff *skb)
{
struct sk_buff *nl_skb;
struct ath12k *ar;
ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
"testmode event wmi cmd_id %d skb length %d\n",
cmd_id, skb->len);
ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
ar = ath12k_tm_get_ar(ab);
if (!ar) {
ath12k_warn(ab, "testmode event not handled due to invalid pdev\n");
return;
}
spin_lock_bh(&ar->data_lock);
nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy,
2 * nla_total_size(sizeof(u32)) +
nla_total_size(skb->len),
GFP_ATOMIC);
spin_unlock_bh(&ar->data_lock);
if (!nl_skb) {
ath12k_warn(ab,
"failed to allocate skb for unsegmented testmode wmi event\n");
return;
}
if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
ath12k_warn(ab, "failed to populate testmode unsegmented event\n");
kfree_skb(nl_skb);
return;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
}
void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
const struct ath12k_wmi_ftm_event *ftm_msg,
u16 length)
{
struct sk_buff *nl_skb;
struct ath12k *ar;
u32 data_pos, pdev_id;
u16 datalen;
u8 total_segments, current_seq;
u8 const *buf_pos;
ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
"testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n",
cmd_id, ftm_msg, length);
ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", ftm_msg, length);
pdev_id = DP_HW2SW_MACID(le32_to_cpu(ftm_msg->seg_hdr.pdev_id));
if (pdev_id >= ab->num_radios) {
ath12k_warn(ab, "testmode event not handled due to invalid pdev id\n");
return;
}
ar = ab->pdevs[pdev_id].ar;
if (!ar) {
ath12k_warn(ab, "testmode event not handled due to absence of pdev\n");
return;
}
current_seq = le32_get_bits(ftm_msg->seg_hdr.segmentinfo,
ATH12K_FTM_SEGHDR_CURRENT_SEQ);
total_segments = le32_get_bits(ftm_msg->seg_hdr.segmentinfo,
ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS);
datalen = length - (sizeof(struct ath12k_wmi_ftm_seg_hdr_params));
buf_pos = ftm_msg->data;
if (current_seq == 0) {
ab->ftm_event_obj.expected_seq = 0;
ab->ftm_event_obj.data_pos = 0;
}
data_pos = ab->ftm_event_obj.data_pos;
if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
ath12k_warn(ab,
"Invalid event length date_pos[%d] datalen[%d]\n",
data_pos, datalen);
return;
}
memcpy(&ab->ftm_event_obj.eventdata[data_pos], buf_pos, datalen);
data_pos += datalen;
if (++ab->ftm_event_obj.expected_seq != total_segments) {
ab->ftm_event_obj.data_pos = data_pos;
ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
"partial data received current_seq[%d], total_seg[%d]\n",
current_seq, total_segments);
return;
}
ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
"total data length[%d] = [%d]\n",
data_pos, ftm_msg->seg_hdr.len);
spin_lock_bh(&ar->data_lock);
nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy,
2 * nla_total_size(sizeof(u32)) +
nla_total_size(data_pos),
GFP_ATOMIC);
spin_unlock_bh(&ar->data_lock);
if (!nl_skb) {
ath12k_warn(ab,
"failed to allocate skb for testmode wmi event\n");
return;
}
if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD,
ATH_TM_CMD_WMI_FTM) ||
nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
&ab->ftm_event_obj.eventdata[0])) {
ath12k_warn(ab, "failed to populate testmode event");
kfree_skb(nl_skb);
return;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
}
static int ath12k_tm_cmd_get_version(struct ath12k *ar, struct nlattr *tb[])
{
struct sk_buff *skb;
ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
"testmode cmd get version_major %d version_minor %d\n",
ATH_TESTMODE_VERSION_MAJOR,
ATH_TESTMODE_VERSION_MINOR);
spin_lock_bh(&ar->data_lock);
skb = cfg80211_testmode_alloc_reply_skb(ar->ah->hw->wiphy,
2 * nla_total_size(sizeof(u32)));
spin_unlock_bh(&ar->data_lock);
if (!skb)
return -ENOMEM;
if (nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
ATH_TESTMODE_VERSION_MAJOR) ||
nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
ATH_TESTMODE_VERSION_MINOR)) {
kfree_skb(skb);
return -ENOBUFS;
}
return cfg80211_testmode_reply(skb);
}
static int ath12k_tm_cmd_process_ftm(struct ath12k *ar, struct nlattr *tb[])
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct sk_buff *skb;
struct ath12k_wmi_ftm_cmd *ftm_cmd;
int ret = 0;
void *buf;
size_t aligned_len;
u32 cmd_id, buf_len;
u16 chunk_len, total_bytes, num_segments;
u8 segnumber = 0, *bufpos;
ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, "ah->state %d\n", ar->ah->state);
if (ar->ah->state != ATH12K_HW_STATE_TM)
return -ENETDOWN;
if (!tb[ATH_TM_ATTR_DATA])
return -EINVAL;
buf = nla_data(tb[ATH_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
cmd_id = WMI_PDEV_UTF_CMDID;
ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
"testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
cmd_id, buf, buf_len);
ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
bufpos = buf;
total_bytes = buf_len;
num_segments = total_bytes / MAX_WMI_UTF_LEN;
if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
num_segments++;
while (buf_len) {
if (buf_len > MAX_WMI_UTF_LEN)
chunk_len = MAX_WMI_UTF_LEN; /* MAX message */
else
chunk_len = buf_len;
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
sizeof(struct ath12k_wmi_ftm_cmd)));
if (!skb)
return -ENOMEM;
ftm_cmd = (struct ath12k_wmi_ftm_cmd *)skb->data;
aligned_len = chunk_len + sizeof(struct ath12k_wmi_ftm_seg_hdr_params);
ftm_cmd->tlv_header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
ftm_cmd->seg_hdr.len = cpu_to_le32(total_bytes);
ftm_cmd->seg_hdr.msgref = cpu_to_le32(ar->ftm_msgref);
ftm_cmd->seg_hdr.segmentinfo =
le32_encode_bits(num_segments,
ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS) |
le32_encode_bits(segnumber,
ATH12K_FTM_SEGHDR_CURRENT_SEQ);
ftm_cmd->seg_hdr.pdev_id = cpu_to_le32(ar->pdev->pdev_id);
segnumber++;
memcpy(&ftm_cmd->data, bufpos, chunk_len);
ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
if (ret) {
ath12k_warn(ar->ab, "ftm wmi command fail: %d\n", ret);
kfree_skb(skb);
return ret;
}
buf_len -= chunk_len;
bufpos += chunk_len;
}
++ar->ftm_msgref;
return ret;
}
static int ath12k_tm_cmd_testmode_start(struct ath12k *ar, struct nlattr *tb[])
{
if (ar->ah->state == ATH12K_HW_STATE_TM)
return -EALREADY;
if (ar->ah->state != ATH12K_HW_STATE_OFF)
return -EBUSY;
ar->ab->ftm_event_obj.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH,
GFP_KERNEL);
if (!ar->ab->ftm_event_obj.eventdata)
return -ENOMEM;
ar->ah->state = ATH12K_HW_STATE_TM;
ar->ftm_msgref = 0;
return 0;
}
static int ath12k_tm_cmd_wmi(struct ath12k *ar, struct nlattr *tb[])
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct sk_buff *skb;
struct wmi_pdev_set_param_cmd *cmd;
int ret = 0, tag;
void *buf;
u32 cmd_id, buf_len;
if (!tb[ATH_TM_ATTR_DATA])
return -EINVAL;
if (!tb[ATH_TM_ATTR_WMI_CMDID])
return -EINVAL;
buf = nla_data(tb[ATH_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
if (!buf_len) {
ath12k_warn(ar->ab, "No data present in testmode command\n");
return -EINVAL;
}
cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
cmd = buf;
tag = le32_get_bits(cmd->tlv_header, WMI_TLV_TAG);
if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
"testmode cmd wmi cmd_id %d buf length %d\n",
cmd_id, buf_len);
ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
if (!skb)
return -ENOMEM;
memcpy(skb->data, buf, buf_len);
ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
if (ret) {
dev_kfree_skb(skb);
ath12k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
ret);
}
return ret;
}
int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct ath12k_hw *ah = hw->priv;
struct ath12k *ar = NULL;
struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
struct ath12k_base *ab;
struct wiphy *wiphy = hw->wiphy;
int ret;
lockdep_assert_held(&wiphy->mtx);
ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath12k_tm_policy,
NULL);
if (ret)
return ret;
if (!tb[ATH_TM_ATTR_CMD])
return -EINVAL;
/* TODO: have to handle ar for MLO case */
if (ah->num_radio)
ar = ah->radio;
if (!ar)
return -EINVAL;
ab = ar->ab;
switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
case ATH_TM_CMD_WMI:
return ath12k_tm_cmd_wmi(ar, tb);
case ATH_TM_CMD_TESTMODE_START:
return ath12k_tm_cmd_testmode_start(ar, tb);
case ATH_TM_CMD_GET_VERSION:
return ath12k_tm_cmd_get_version(ar, tb);
case ATH_TM_CMD_WMI_FTM:
set_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags);
return ath12k_tm_cmd_process_ftm(ar, tb);
default:
return -EOPNOTSUPP;
}
}

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "hif.h"
#ifdef CONFIG_NL80211_TESTMODE
void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
struct sk_buff *skb);
void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
const struct ath12k_wmi_ftm_event *ftm_msg,
u16 length);
int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
#else
static inline void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
struct sk_buff *skb)
{
}
static inline void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
const struct ath12k_wmi_ftm_event *msg,
u16 length)
{
}
static inline int ath12k_tm_cmd(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
void *data, int len)
{
return 0;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_WMI_H
@ -25,6 +25,7 @@
struct ath12k_base;
struct ath12k;
struct ath12k_link_vif;
struct ath12k_fw_stats;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@ -516,6 +517,9 @@ enum wmi_tlv_cmd_id {
WMI_REQUEST_RCPI_CMDID,
WMI_REQUEST_PEER_STATS_INFO_CMDID,
WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
WMI_REQUEST_WLM_STATS_CMDID,
WMI_REQUEST_CTRL_PATH_STATS_CMDID,
WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID = WMI_REQUEST_CTRL_PATH_STATS_CMDID + 3,
WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
@ -785,6 +789,9 @@ enum wmi_tlv_event_id {
WMI_UPDATE_RCPI_EVENTID,
WMI_PEER_STATS_INFO_EVENTID,
WMI_RADIO_CHAN_STATS_EVENTID,
WMI_WLM_STATS_EVENTID,
WMI_CTRL_PATH_STATS_EVENTID,
WMI_HALPHY_STATS_CTRL_PATH_EVENTID,
WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
WMI_NLO_SCAN_COMPLETE_EVENTID,
WMI_APFIND_EVENTID,
@ -1191,6 +1198,7 @@ enum wmi_tlv_tag {
WMI_TAG_ARRAY_BYTE,
WMI_TAG_ARRAY_STRUCT,
WMI_TAG_ARRAY_FIXED_STRUCT,
WMI_TAG_ARRAY_INT16,
WMI_TAG_LAST_ARRAY_ENUM = 31,
WMI_TAG_SERVICE_READY_EVENT,
WMI_TAG_HAL_REG_CAPABILITIES,
@ -1941,6 +1949,12 @@ enum wmi_tlv_tag {
WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F,
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
WMI_TAG_TPC_STATS_GET_CMD = 0x38B,
WMI_TAG_TPC_STATS_EVENT_FIXED_PARAM,
WMI_TAG_TPC_STATS_CONFIG_EVENT,
WMI_TAG_TPC_STATS_REG_PWR_ALLOWED,
WMI_TAG_TPC_STATS_RATES_ARRAY,
WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT,
WMI_TAG_EHT_RATE_SET = 0x3C4,
WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
WMI_TAG_MLO_TX_SEND_PARAMS,
@ -1958,6 +1972,8 @@ enum wmi_tlv_tag {
WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM = 0x442,
WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM,
WMI_TAG_MAX
};
@ -3624,6 +3640,26 @@ struct ath12k_wmi_p2p_noa_info {
struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
} __packed;
#define MAX_WMI_UTF_LEN 252
struct ath12k_wmi_ftm_seg_hdr_params {
__le32 len;
__le32 msgref;
__le32 segmentinfo;
__le32 pdev_id;
} __packed;
struct ath12k_wmi_ftm_cmd {
__le32 tlv_header;
struct ath12k_wmi_ftm_seg_hdr_params seg_hdr;
u8 data[];
} __packed;
struct ath12k_wmi_ftm_event {
struct ath12k_wmi_ftm_seg_hdr_params seg_hdr;
u8 data[];
} __packed;
#define WMI_BEACON_TX_BUFFER_SIZE 512
#define WMI_EMA_BEACON_CNT GENMASK(7, 0)
@ -4604,6 +4640,7 @@ enum wmi_rate_preamble {
WMI_RATE_PREAMBLE_HT,
WMI_RATE_PREAMBLE_VHT,
WMI_RATE_PREAMBLE_HE,
WMI_RATE_PREAMBLE_EHT,
};
/**
@ -5628,6 +5665,245 @@ enum wmi_sta_keepalive_method {
#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
struct wmi_stats_event {
__le32 stats_id;
__le32 num_pdev_stats;
__le32 num_vdev_stats;
__le32 num_peer_stats;
__le32 num_bcnflt_stats;
__le32 num_chan_stats;
__le32 num_mib_stats;
__le32 pdev_id;
__le32 num_bcn_stats;
__le32 num_peer_extd_stats;
__le32 num_peer_extd2_stats;
} __packed;
enum wmi_stats_id {
WMI_REQUEST_PDEV_STAT = BIT(2),
WMI_REQUEST_VDEV_STAT = BIT(3),
WMI_REQUEST_BCN_STAT = BIT(11),
};
struct wmi_request_stats_cmd {
__le32 tlv_header;
__le32 stats_id;
__le32 vdev_id;
struct ath12k_wmi_mac_addr_params peer_macaddr;
__le32 pdev_id;
} __packed;
#define WLAN_MAX_AC 4
#define MAX_TX_RATE_VALUES 10
struct wmi_vdev_stats_params {
__le32 vdev_id;
__le32 beacon_snr;
__le32 data_snr;
__le32 num_tx_frames[WLAN_MAX_AC];
__le32 num_rx_frames;
__le32 num_tx_frames_retries[WLAN_MAX_AC];
__le32 num_tx_frames_failures[WLAN_MAX_AC];
__le32 num_rts_fail;
__le32 num_rts_success;
__le32 num_rx_err;
__le32 num_rx_discard;
__le32 num_tx_not_acked;
__le32 tx_rate_history[MAX_TX_RATE_VALUES];
__le32 beacon_rssi_history[MAX_TX_RATE_VALUES];
} __packed;
struct ath12k_wmi_bcn_stats_params {
__le32 vdev_id;
__le32 tx_bcn_succ_cnt;
__le32 tx_bcn_outage_cnt;
} __packed;
struct ath12k_wmi_pdev_base_stats_params {
a_sle32 chan_nf;
__le32 tx_frame_count; /* Cycles spent transmitting frames */
__le32 rx_frame_count; /* Cycles spent receiving frames */
__le32 rx_clear_count; /* Total channel busy time, evidently */
__le32 cycle_count; /* Total on-channel time */
__le32 phy_err_count;
__le32 chan_tx_pwr;
} __packed;
struct ath12k_wmi_pdev_tx_stats_params {
a_sle32 comp_queued;
a_sle32 comp_delivered;
a_sle32 msdu_enqued;
a_sle32 mpdu_enqued;
a_sle32 wmm_drop;
a_sle32 local_enqued;
a_sle32 local_freed;
a_sle32 hw_queued;
a_sle32 hw_reaped;
a_sle32 underrun;
a_sle32 tx_abort;
a_sle32 mpdus_requed;
__le32 tx_ko;
__le32 data_rc;
__le32 self_triggers;
__le32 sw_retry_failure;
__le32 illgl_rate_phy_err;
__le32 pdev_cont_xretry;
__le32 pdev_tx_timeout;
__le32 pdev_resets;
__le32 stateless_tid_alloc_failure;
__le32 phy_underrun;
__le32 txop_ovf;
} __packed;
struct ath12k_wmi_pdev_rx_stats_params {
a_sle32 mid_ppdu_route_change;
a_sle32 status_rcvd;
a_sle32 r0_frags;
a_sle32 r1_frags;
a_sle32 r2_frags;
a_sle32 r3_frags;
a_sle32 htt_msdus;
a_sle32 htt_mpdus;
a_sle32 loc_msdus;
a_sle32 loc_mpdus;
a_sle32 oversize_amsdu;
a_sle32 phy_errs;
a_sle32 phy_err_drop;
a_sle32 mpdu_errs;
} __packed;
struct ath12k_wmi_pdev_stats_params {
struct ath12k_wmi_pdev_base_stats_params base;
struct ath12k_wmi_pdev_tx_stats_params tx;
struct ath12k_wmi_pdev_rx_stats_params rx;
} __packed;
struct ath12k_fw_stats_req_params {
u32 stats_id;
u32 vdev_id;
u32 pdev_id;
};
#define WMI_REQ_CTRL_PATH_PDEV_TX_STAT 1
#define WMI_REQUEST_CTRL_PATH_STAT_GET 1
#define WMI_TPC_CONFIG BIT(1)
#define WMI_TPC_REG_PWR_ALLOWED BIT(2)
#define WMI_TPC_RATES_ARRAY1 BIT(3)
#define WMI_TPC_RATES_ARRAY2 BIT(4)
#define WMI_TPC_RATES_DL_OFDMA_ARRAY BIT(5)
#define WMI_TPC_CTL_PWR_ARRAY BIT(6)
#define WMI_TPC_CONFIG_PARAM 0x1
#define ATH12K_TPC_RATE_ARRAY_MU GENMASK(15, 8)
#define ATH12K_TPC_RATE_ARRAY_SU GENMASK(7, 0)
#define TPC_STATS_REG_PWR_ALLOWED_TYPE 0
enum wmi_halphy_ctrl_path_stats_id {
WMI_HALPHY_PDEV_TX_SU_STATS = 0,
WMI_HALPHY_PDEV_TX_SUTXBF_STATS,
WMI_HALPHY_PDEV_TX_MU_STATS,
WMI_HALPHY_PDEV_TX_MUTXBF_STATS,
WMI_HALPHY_PDEV_TX_STATS_MAX,
};
enum ath12k_wmi_tpc_stats_rates_array {
ATH12K_TPC_STATS_RATES_ARRAY1,
ATH12K_TPC_STATS_RATES_ARRAY2,
};
enum ath12k_wmi_tpc_stats_ctl_array {
ATH12K_TPC_STATS_CTL_ARRAY,
ATH12K_TPC_STATS_CTL_160ARRAY,
};
enum ath12k_wmi_tpc_stats_events {
ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT,
ATH12K_TPC_STATS_RATES_EVENT1,
ATH12K_TPC_STATS_RATES_EVENT2,
ATH12K_TPC_STATS_CTL_TABLE_EVENT
};
struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params {
__le32 tlv_header;
__le32 stats_id_mask;
__le32 request_id;
__le32 action;
__le32 subid;
} __packed;
struct ath12k_wmi_pdev_tpc_stats_event_fixed_params {
__le32 pdev_id;
__le32 end_of_event;
__le32 event_count;
} __packed;
struct wmi_tpc_config_params {
__le32 reg_domain;
__le32 chan_freq;
__le32 phy_mode;
__le32 twice_antenna_reduction;
__le32 twice_max_reg_power;
__le32 twice_antenna_gain;
__le32 power_limit;
__le32 rate_max;
__le32 num_tx_chain;
__le32 ctl;
__le32 flags;
__le32 caps;
} __packed;
struct wmi_max_reg_power_fixed_params {
__le32 reg_power_type;
__le32 reg_array_len;
__le32 d1;
__le32 d2;
__le32 d3;
__le32 d4;
} __packed;
struct wmi_max_reg_power_allowed_arg {
struct wmi_max_reg_power_fixed_params tpc_reg_pwr;
s16 *reg_pwr_array;
};
struct wmi_tpc_rates_array_fixed_params {
__le32 rate_array_type;
__le32 rate_array_len;
} __packed;
struct wmi_tpc_rates_array_arg {
struct wmi_tpc_rates_array_fixed_params tpc_rates_array;
s16 *rate_array;
};
struct wmi_tpc_ctl_pwr_fixed_params {
__le32 ctl_array_type;
__le32 ctl_array_len;
__le32 end_of_ctl_pwr;
__le32 ctl_pwr_count;
__le32 d1;
__le32 d2;
__le32 d3;
__le32 d4;
} __packed;
struct wmi_tpc_ctl_pwr_table_arg {
struct wmi_tpc_ctl_pwr_fixed_params tpc_ctl_pwr;
s8 *ctl_pwr_table;
};
struct wmi_tpc_stats_arg {
u32 pdev_id;
u32 event_count;
u32 end_of_event;
u32 tlvs_rcvd;
struct wmi_max_reg_power_allowed_arg max_reg_allowed_power;
struct wmi_tpc_rates_array_arg rates_array1;
struct wmi_tpc_rates_array_arg rates_array2;
struct wmi_tpc_config_params tpc_config;
struct wmi_tpc_ctl_pwr_table_arg ctl_array;
};
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@ -5639,7 +5915,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame);
int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
const u8 *p2p_ie);
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn,
struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args);
@ -5753,6 +6029,13 @@ int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
const u8 *buf, size_t buf_len);
int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table);
int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table);
int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
u32 vdev_id, u32 pdev_id);
__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len);
int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
enum wmi_halphy_ctrl_path_stats_id tpc_stats_type);
void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar);
static inline u32
ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
@ -5806,5 +6089,8 @@ int ath12k_wmi_sta_keepalive(struct ath12k *ar,
int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params);
int ath12k_wmi_mlo_ready(struct ath12k *ar);
int ath12k_wmi_mlo_teardown(struct ath12k *ar);
void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
struct ath12k_fw_stats *fw_stats, u32 stats_id,
char *buf);
#endif

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@ -990,6 +990,7 @@ exit:
case ATH12K_HW_STATE_RESTARTING:
case ATH12K_HW_STATE_RESTARTED:
case ATH12K_HW_STATE_WEDGED:
case ATH12K_HW_STATE_TM:
ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
ah->state);
ret = -EIO;

View File

@ -274,7 +274,6 @@ struct ath_node {
struct ath_tx_control {
struct ath_txq *txq;
struct ath_node *an;
struct ieee80211_sta *sta;
u8 paprd;
};
@ -1018,7 +1017,7 @@ struct ath_softc {
u8 gtt_cnt;
u32 intrstatus;
u32 rx_active_check_time;
unsigned long rx_active_check_time;
u32 rx_active_count;
u16 ps_flags; /* PS_* */
bool ps_enabled;

View File

@ -628,12 +628,12 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
else
RX_STAT_INC(sc, rx_spectral_sample_err);
memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
/* Mix the received bins to the /dev/random
* pool
*/
add_device_randomness(sample_buf, num_bins);
memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
}
/* Process a normal frame */

View File

@ -647,7 +647,9 @@ static int ath9k_of_init(struct ath_softc *sc)
ah->ah_flags |= AH_NO_EEP_SWAP;
}
of_get_mac_address(np, common->macaddr);
ret = of_get_mac_address(np, common->macaddr);
if (ret == -EPROBE_DEFER)
return ret;
return 0;
}

View File

@ -2291,19 +2291,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
struct ath_vif *avp;
struct ath_softc *sc = hw->priv;
int frmlen = skb->len + FCS_LEN;
int padpos, padsize;
/* NOTE: sta can be NULL according to net/mac80211.h */
if (sta)
txctl->an = (struct ath_node *)sta->drv_priv;
else if (vif && ieee80211_is_data(hdr->frame_control)) {
avp = (void *)vif->drv_priv;
txctl->an = &avp->mcast_node;
}
if (info->control.hw_key)
frmlen += info->control.hw_key->icv_len;

View File

@ -1,59 +1,59 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/* "API" level of the ath11k testmode interface. Bump it after every
/* "API" level of the ath testmode interface. Bump it after every
* incompatible interface change.
*/
#define ATH11K_TESTMODE_VERSION_MAJOR 1
#define ATH_TESTMODE_VERSION_MAJOR 1
/* Bump this after every _compatible_ interface change, for example
* addition of a new command or an attribute.
*/
#define ATH11K_TESTMODE_VERSION_MINOR 1
#define ATH_TESTMODE_VERSION_MINOR 1
#define ATH11K_TM_DATA_MAX_LEN 5000
#define ATH11K_FTM_EVENT_MAX_BUF_LENGTH 2048
#define ATH_TM_DATA_MAX_LEN 5000
#define ATH_FTM_EVENT_MAX_BUF_LENGTH 2048
enum ath11k_tm_attr {
__ATH11K_TM_ATTR_INVALID = 0,
ATH11K_TM_ATTR_CMD = 1,
ATH11K_TM_ATTR_DATA = 2,
ATH11K_TM_ATTR_WMI_CMDID = 3,
ATH11K_TM_ATTR_VERSION_MAJOR = 4,
ATH11K_TM_ATTR_VERSION_MINOR = 5,
ATH11K_TM_ATTR_WMI_OP_VERSION = 6,
enum ath_tm_attr {
__ATH_TM_ATTR_INVALID = 0,
ATH_TM_ATTR_CMD = 1,
ATH_TM_ATTR_DATA = 2,
ATH_TM_ATTR_WMI_CMDID = 3,
ATH_TM_ATTR_VERSION_MAJOR = 4,
ATH_TM_ATTR_VERSION_MINOR = 5,
ATH_TM_ATTR_WMI_OP_VERSION = 6,
/* keep last */
__ATH11K_TM_ATTR_AFTER_LAST,
ATH11K_TM_ATTR_MAX = __ATH11K_TM_ATTR_AFTER_LAST - 1,
__ATH_TM_ATTR_AFTER_LAST,
ATH_TM_ATTR_MAX = __ATH_TM_ATTR_AFTER_LAST - 1,
};
/* All ath11k testmode interface commands specified in
* ATH11K_TM_ATTR_CMD
/* All ath testmode interface commands specified in
* ATH_TM_ATTR_CMD
*/
enum ath11k_tm_cmd {
/* Returns the supported ath11k testmode interface version in
* ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space
enum ath_tm_cmd {
/* Returns the supported ath testmode interface version in
* ATH_TM_ATTR_VERSION. Always guaranteed to work. User space
* uses this to verify it's using the correct version of the
* testmode interface
*/
ATH11K_TM_CMD_GET_VERSION = 0,
ATH_TM_CMD_GET_VERSION = 0,
/* The command used to transmit a WMI command to the firmware and
* the event to receive WMI events from the firmware. Without
* struct wmi_cmd_hdr header, only the WMI payload. Command id is
* provided with ATH11K_TM_ATTR_WMI_CMDID and payload in
* ATH11K_TM_ATTR_DATA.
* provided with ATH_TM_ATTR_WMI_CMDID and payload in
* ATH_TM_ATTR_DATA.
*/
ATH11K_TM_CMD_WMI = 1,
ATH_TM_CMD_WMI = 1,
/* Boots the UTF firmware, the netdev interface must be down at the
* time.
*/
ATH11K_TM_CMD_TESTMODE_START = 2,
ATH_TM_CMD_TESTMODE_START = 2,
/* The command used to transmit a FTM WMI command to the firmware
* and the event to receive WMI events from the firmware. The data
@ -62,5 +62,5 @@ enum ath11k_tm_cmd {
* The data payload size could be large and the driver needs to
* send segmented data to firmware.
*/
ATH11K_TM_CMD_WMI_FTM = 3,
ATH_TM_CMD_WMI_FTM = 3,
};

View File

@ -561,8 +561,10 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
if (!found) {
/* No platform data for this device, try OF and DMI data */
brcmf_dmi_probe(settings, chip, chiprev);
if (brcmf_of_probe(dev, bus_type, settings) == -EPROBE_DEFER)
if (brcmf_of_probe(dev, bus_type, settings) == -EPROBE_DEFER) {
kfree(settings);
return ERR_PTR(-EPROBE_DEFER);
}
brcmf_acpi_probe(dev, bus_type, settings);
}
return settings;

View File

@ -896,14 +896,16 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
}
/* 1) Prepare USB boot loader for runtime image */
brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state));
if (err)
goto fail;
rdlstate = le32_to_cpu(state.state);
rdlbytes = le32_to_cpu(state.bytes);
/* 2) Check we are in the Waiting state */
if (rdlstate != DL_WAITING) {
brcmf_err("Failed to DL_START\n");
brcmf_err("Invalid DL state: %u\n", rdlstate);
err = -EINVAL;
goto fail;
}

View File

@ -81,14 +81,25 @@ config IWLMVM
of the devices that use this firmware is available here:
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
config IWLMLD
tristate "Intel Wireless WiFi MLD Firmware support"
select WANT_DEV_COREDUMP
depends on MAC80211
depends on PTP_1588_CLOCK_OPTIONAL
help
This is the driver that supports firmwares of MLD capable devices.
The list of the devices that use this firmware is available here:
https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR
bool
default y if IWLDVM=m
default y if IWLMVM=m
default y if IWLMLD=m
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLDVM=n && IWLMVM=n
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM or IWLMLD"
depends on IWLDVM=n && IWLMVM=n && IWLMLD=n
menu "Debugging Options"

View File

@ -12,7 +12,8 @@ iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o
iwlwifi-$(CONFIG_IWLMLD) += cfg/bz.o cfg/sc.o cfg/dr.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
@ -20,6 +21,7 @@ iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
iwlwifi-objs += fw/regulatory.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_IWLMLD) += fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_EFI) += fw/uefi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
@ -33,6 +35,7 @@ ccflags-y += -I$(src)
obj-$(CONFIG_IWLDVM) += dvm/
obj-$(CONFIG_IWLMVM) += mvm/
obj-$(CONFIG_IWLMEI) += mei/
obj-$(CONFIG_IWLMLD) += mld/
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/

View File

@ -205,7 +205,6 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz";
const char iwl_ax200_killer_1650w_name[] =
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";

View File

@ -180,7 +180,6 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
};
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2024 Intel Corporation
* Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
#define IWL_BZ_UCODE_API_MAX 96
#define IWL_BZ_UCODE_API_MAX 98
/* Lowest firmware API version supported */
#define IWL_BZ_UCODE_API_MIN 92
#define IWL_BZ_UCODE_API_MIN 93
/* NVM versions */
#define IWL_BZ_NVM_VERSION 0x0a1d
@ -38,6 +38,11 @@
#define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode"
#if !IS_ENABLED(CONFIG_IWLMVM)
const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
#endif
static const struct iwl_base_params iwl_bz_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 512,
@ -50,6 +55,13 @@ static const struct iwl_base_params iwl_bz_base_params = {
.pcie_l1_allowed = true,
};
const struct iwl_ht_params iwl_bz_ht_params = {
.stbc = true,
.ldpc = true,
.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
};
#define IWL_DEVICE_BZ_COMMON \
.ucode_api_max = IWL_BZ_UCODE_API_MAX, \
.ucode_api_min = IWL_BZ_UCODE_API_MIN, \
@ -113,7 +125,7 @@ static const struct iwl_base_params iwl_bz_base_params = {
#define IWL_DEVICE_BZ \
IWL_DEVICE_BZ_COMMON, \
.ht_params = &iwl_22000_ht_params
.ht_params = &iwl_bz_ht_params
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
@ -145,7 +157,6 @@ const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
.low_latency_xtal = true,
};
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2024 Intel Corporation
* Copyright (C) 2024-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@ -9,7 +9,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
#define IWL_DR_UCODE_API_MAX 96
#define IWL_DR_UCODE_API_MAX 98
/* Lowest firmware API version supported */
#define IWL_DR_UCODE_API_MIN 96
@ -114,7 +114,7 @@ static const struct iwl_base_params iwl_dr_base_params = {
.uhb_supported = true, \
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.num_rbds = IWL_NUM_RBDS_DR_EHT, \
.ht_params = &iwl_22000_ht_params
.ht_params = &iwl_bz_ht_params
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
@ -148,11 +148,9 @@ const struct iwl_cfg_trans_params iwl_br_trans_cfg = {
.mq_rx_supported = true,
.rf_id = true,
.gen2 = true,
.integrated = true,
.umac_prph_offset = 0x300000,
.xtal_latency = 12000,
.low_latency_xtal = true,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
const char iwl_br_name[] = "Intel(R) TBD Br device";

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2024 Intel Corporation
* Copyright (C) 2018-2025 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
#define IWL_SC_UCODE_API_MAX 96
#define IWL_SC_UCODE_API_MAX 98
/* Lowest firmware API version supported */
#define IWL_SC_UCODE_API_MIN 92
#define IWL_SC_UCODE_API_MIN 93
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
@ -121,7 +121,7 @@ static const struct iwl_base_params iwl_sc_base_params = {
.uhb_supported = true, \
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.num_rbds = IWL_NUM_RBDS_SC_EHT, \
.ht_params = &iwl_22000_ht_params
.ht_params = &iwl_bz_ht_params
/*
* This size was picked according to 8 MSDUs inside 512 A-MSDUs in an
@ -142,22 +142,16 @@ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = {
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
const char iwl_sc_name[] = "Intel(R) TBD Sc device";
const struct iwl_cfg iwl_cfg_sc = {
.fw_name_mac = "sc",
IWL_DEVICE_SC,
};
const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device";
const struct iwl_cfg iwl_cfg_sc2 = {
.fw_name_mac = "sc2",
IWL_DEVICE_SC,
};
const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device";
const struct iwl_cfg iwl_cfg_sc2f = {
.fw_name_mac = "sc2f",
IWL_DEVICE_SC,

View File

@ -1180,85 +1180,87 @@ struct iwl_dram_scratch {
} __packed;
struct iwl_tx_cmd {
/*
* MPDU byte count:
* MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
* + 8 byte IV for CCM or TKIP (not used for WEP)
* + Data payload
* + 8-byte MIC (not used for CCM/WEP)
* NOTE: Does not include Tx command bytes, post-MAC pad bytes,
* MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
* Range: 14-2342 bytes.
*/
__le16 len;
/* New members MUST be added within the __struct_group() macro below. */
__struct_group(iwl_tx_cmd_hdr, __hdr, __packed,
/*
* MPDU byte count:
* MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
* + 8 byte IV for CCM or TKIP (not used for WEP)
* + Data payload
* + 8-byte MIC (not used for CCM/WEP)
* NOTE: Does not include Tx command bytes, post-MAC pad bytes,
* MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
* Range: 14-2342 bytes.
*/
__le16 len;
/*
* MPDU or MSDU byte count for next frame.
* Used for fragmentation and bursting, but not 11n aggregation.
* Same as "len", but for next frame. Set to 0 if not applicable.
*/
__le16 next_frame_len;
/*
* MPDU or MSDU byte count for next frame.
* Used for fragmentation and bursting, but not 11n aggregation.
* Same as "len", but for next frame. Set to 0 if not applicable.
*/
__le16 next_frame_len;
__le32 tx_flags; /* TX_CMD_FLG_* */
__le32 tx_flags; /* TX_CMD_FLG_* */
/* uCode may modify this field of the Tx command (in host DRAM!).
* Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
struct iwl_dram_scratch scratch;
/* uCode may modify this field of the Tx command (in host DRAM!).
* Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
struct iwl_dram_scratch scratch;
/* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
__le32 rate_n_flags; /* RATE_MCS_* */
/* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
__le32 rate_n_flags; /* RATE_MCS_* */
/* Index of destination station in uCode's station table */
u8 sta_id;
/* Index of destination station in uCode's station table */
u8 sta_id;
/* Type of security encryption: CCM or TKIP */
u8 sec_ctl; /* TX_CMD_SEC_* */
/* Type of security encryption: CCM or TKIP */
u8 sec_ctl; /* TX_CMD_SEC_* */
/*
* Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
* Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
* data frames, this field may be used to selectively reduce initial
* rate (via non-0 value) for special frames (e.g. management), while
* still supporting rate scaling for all frames.
*/
u8 initial_rate_index;
u8 reserved;
u8 key[16];
__le16 next_frame_flags;
__le16 reserved2;
union {
__le32 life_time;
__le32 attempt;
} stop_time;
/*
* Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
* Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
* data frames, this field may be used to selectively reduce initial
* rate (via non-0 value) for special frames (e.g. management), while
* still supporting rate scaling for all frames.
*/
u8 initial_rate_index;
u8 reserved;
u8 key[16];
__le16 next_frame_flags;
__le16 reserved2;
union {
__le32 life_time;
__le32 attempt;
} stop_time;
/* Host DRAM physical address pointer to "scratch" in this command.
* Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
__le32 dram_lsb_ptr;
u8 dram_msb_ptr;
/* Host DRAM physical address pointer to "scratch" in this command.
* Must be dword aligned. "0" in dram_lsb_ptr disables usage. */
__le32 dram_lsb_ptr;
u8 dram_msb_ptr;
u8 rts_retry_limit; /*byte 50 */
u8 data_retry_limit; /*byte 51 */
u8 tid_tspec;
union {
__le16 pm_frame_timeout;
__le16 attempt_duration;
} timeout;
u8 rts_retry_limit; /*byte 50 */
u8 data_retry_limit; /*byte 51 */
u8 tid_tspec;
union {
__le16 pm_frame_timeout;
__le16 attempt_duration;
} timeout;
/*
* Duration of EDCA burst Tx Opportunity, in 32-usec units.
* Set this if txop time is not specified by HCCA protocol (e.g. by AP).
*/
__le16 driver_txop;
/*
* Duration of EDCA burst Tx Opportunity, in 32-usec units.
* Set this if txop time is not specified by HCCA protocol (e.g. by AP).
*/
__le16 driver_txop;
);
/*
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
union {
DECLARE_FLEX_ARRAY(u8, payload);
DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
};
struct ieee80211_hdr hdr[];
} __packed;
static_assert(offsetof(struct iwl_tx_cmd, hdr) == sizeof(struct iwl_tx_cmd_hdr),
"struct member likely outside of __struct_group()");
/*
* TX command response is sent after *agn* transmission attempts.
@ -2312,7 +2314,7 @@ struct iwl_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
struct iwl_tx_cmd tx_cmd;
struct iwl_tx_cmd_hdr tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
@ -2423,7 +2425,7 @@ struct iwlagn_beacon_notif {
*/
struct iwl_tx_beacon_cmd {
struct iwl_tx_cmd tx;
struct iwl_tx_cmd_hdr tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;

View File

@ -124,17 +124,6 @@ enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
return restriction->tx_stream;
}
enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
{
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
struct iwl_tt_restriction *restriction;
if (!priv->thermal_throttle.advanced_tt)
return IWL_ANT_OK_MULTI;
restriction = tt->restriction + tt->state;
return restriction->rx_stream;
}
#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */

View File

@ -100,7 +100,6 @@ u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
bool iwl_ht_enabled(struct iwl_priv *priv);
enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
void iwl_tt_handler(struct iwl_priv *priv);

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018, 2020-2021, 2024 Intel Corporation
* Copyright (C) 2012-2014, 2018, 2020-2021, 2024-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -117,7 +117,7 @@ struct iwl_alive_ntf_v6 {
* finishing init flow
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
* @IWL_INIT_PHY: driver is going to send PHY_DB commands
* @IWL_INIT_PHY: driver is going to send the PHY_CONFIGURATION_CMD
*/
enum iwl_extended_cfg_flags {
IWL_INIT_DEBUG_CFG,

View File

@ -502,10 +502,15 @@ enum iwl_legacy_cmds {
/**
* @DTS_MEASUREMENT_NOTIFICATION:
* &struct iwl_dts_measurement_notif_v1 or
* &struct iwl_dts_measurement_notif_v2
* &struct iwl_dts_measurement_notif
*/
DTS_MEASUREMENT_NOTIFICATION = 0xdd,
/**
* @DEBUG_HOST_COMMAND: &struct iwl_dhc_cmd
*/
DEBUG_HOST_COMMAND = 0xf1,
/**
* @LDBG_CONFIG_CMD: configure continuous trace recording
*/

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2022 Intel Corporation
* Copyright (C) 2012-2014, 2022, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -14,6 +14,9 @@
* @FW_CTXT_COLOR_POS: position of the color
* @FW_CTXT_COLOR_MSK: mask of the color
* @FW_CTXT_INVALID: value used to indicate unused/invalid
* @FW_CTXT_ID_INVALID: value used to indicate unused/invalid. This can be
* used with newer firmware which no longer use the color. Typically,
* firmware versions supported by iwlmld can use this value.
*/
enum iwl_ctxt_id_and_color {
FW_CTXT_ID_POS = 0,
@ -21,6 +24,7 @@ enum iwl_ctxt_id_and_color {
FW_CTXT_COLOR_POS = 8,
FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS,
FW_CTXT_INVALID = 0xffffffff,
FW_CTXT_ID_INVALID = 0xff,
};
#define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\

View File

@ -1006,7 +1006,7 @@ struct iwl_wowlan_wake_pkt_notif {
* struct iwl_mvm_d3_end_notif - d3 end notification
* @flags: See &enum iwl_d0i3_flags
*/
struct iwl_mvm_d3_end_notif {
struct iwl_d3_end_notif {
__le32 flags;
} __packed;

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2024 Intel Corporation
* Copyright (C) 2024-2025 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
@ -90,9 +90,15 @@ enum iwl_data_path_subcmd_ids {
*/
SEC_KEY_CMD = 0x18,
/**
* @OMI_SEND_STATUS_NOTIF: notification after OMI was sent
* uses &struct iwl_omi_send_status_notif
*/
OMI_SEND_STATUS_NOTIF = 0xF2,
/**
* @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode,
* uses &struct iwl_mvm_esr_mode_notif
* uses &struct iwl_esr_mode_notif
*/
ESR_MODE_NOTIF = 0xF3,
@ -688,4 +694,13 @@ struct iwl_sec_key_cmd {
} __packed u; /* SEC_KEY_OPERATION_API_U_VER_1 */
} __packed; /* SEC_KEY_CMD_API_S_VER_1 */
/**
* struct iwl_omi_send_status_notif - OMI status notification
* @success: indicates that the OMI was sent successfully
* (currently always set)
*/
struct iwl_omi_send_status_notif {
__le32 success;
} __packed; /* OMI_SEND_STATUS_NTFY_API_S_VER_1 */
#endif /* __iwl_fw_api_datapath_h__ */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2018-2024 Intel Corporation
* Copyright (C) 2018-2025 Intel Corporation
*/
#ifndef __iwl_fw_dbg_tlv_h__
#define __iwl_fw_dbg_tlv_h__
@ -527,6 +527,8 @@ enum iwl_fw_ini_time_point {
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
* @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected
* @IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE: perform reset handshake and
* split dump to before/after with region marking
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
@ -535,6 +537,7 @@ enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16),
IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE = BIT(17),
};
/**
@ -556,12 +559,14 @@ enum iwl_fw_ini_trigger_reset_fw_policy {
* @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size
* @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump
* @IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET: dump this region before reset
* handshake (if requested by %IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)
*/
enum iwl_fw_ini_dump_policy {
IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0),
IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1),
IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2),
IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET = BIT(3),
};
/**

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -51,7 +51,7 @@ enum iwl_debug_cmds {
/**
* @GET_TAS_STATUS:
* sends command to fw to get TAS status
* the response is &struct iwl_mvm_tas_status_resp
* the response is &struct iwl_tas_status_resp
*/
GET_TAS_STATUS = 0xA,
/**
@ -439,25 +439,20 @@ struct iwl_dbg_dump_complete_cmd {
__le32 tp_data;
} __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */
#define TAS_LMAC_BAND_HB 0
#define TAS_LMAC_BAND_LB 1
#define TAS_LMAC_BAND_UHB 2
#define TAS_LMAC_BAND_INVALID 3
/**
* struct iwl_mvm_tas_status_per_mac - tas status per lmac
* struct iwl_tas_status_per_mac - tas status per lmac
* @static_status: tas statically enabled or disabled per lmac - TRUE/FALSE
* @static_dis_reason: TAS static disable reason, uses
* &enum iwl_mvm_tas_statically_disabled_reason
* &enum iwl_tas_statically_disabled_reason
* @dynamic_status: Current TAS status. uses
* &enum iwl_mvm_tas_dyna_status
* &enum iwl_tas_dyna_status
* @near_disconnection: is TAS currently near disconnection per lmac? - TRUE/FALSE
* @max_reg_pwr_limit: Regulatory power limits in dBm
* @sar_limit: SAR limits per lmac in dBm
* @band: Band per lmac
* @reserved: reserved
*/
struct iwl_mvm_tas_status_per_mac {
struct iwl_tas_status_per_mac {
u8 static_status;
u8 static_dis_reason;
u8 dynamic_status;
@ -466,35 +461,35 @@ struct iwl_mvm_tas_status_per_mac {
__le16 sar_limit;
u8 band;
u8 reserved[3];
} __packed; /*DEBUG_GET_TAS_STATUS_PER_MAC_S_VER_1*/
} __packed; /* DEBUG_GET_TAS_STATUS_PER_MAC_S_VER_1 */
/**
* struct iwl_mvm_tas_status_resp - Response to GET_TAS_STATUS
* struct iwl_tas_status_resp - Response to GET_TAS_STATUS
* @tas_fw_version: TAS FW version
* @is_uhb_for_usa_enable: is UHB enabled in USA? - TRUE/FALSE
* @curr_mcc: current mcc
* @block_list: country block list
* @tas_status_mac: TAS status per lmac, uses
* &struct iwl_mvm_tas_status_per_mac
* &struct iwl_tas_status_per_mac
* @in_dual_radio: is TAS in dual radio? - TRUE/FALSE
* @uhb_allowed_flags: see &enum iwl_tas_uhb_allowed_flags.
* This member is valid only when fw has
* %IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT capability.
* @reserved: reserved
*/
struct iwl_mvm_tas_status_resp {
struct iwl_tas_status_resp {
u8 tas_fw_version;
u8 is_uhb_for_usa_enable;
__le16 curr_mcc;
__le16 block_list[16];
struct iwl_mvm_tas_status_per_mac tas_status_mac[2];
struct iwl_tas_status_per_mac tas_status_mac[2];
u8 in_dual_radio;
u8 uhb_allowed_flags;
u8 reserved[2];
} __packed; /*DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3*/
} __packed; /* DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3 */
/**
* enum iwl_mvm_tas_dyna_status - TAS current running status
* enum iwl_tas_dyna_status - TAS current running status
* @TAS_DYNA_INACTIVE: TAS status is inactive
* @TAS_DYNA_INACTIVE_MVM_MODE: TAS is disabled due because FW is in MVM mode
* or is in softap mode.
@ -507,7 +502,7 @@ struct iwl_mvm_tas_status_resp {
* @TAS_DYNA_ACTIVE: TAS is currently active
* @TAS_DYNA_STATUS_MAX: TAS status max value
*/
enum iwl_mvm_tas_dyna_status {
enum iwl_tas_dyna_status {
TAS_DYNA_INACTIVE,
TAS_DYNA_INACTIVE_MVM_MODE,
TAS_DYNA_INACTIVE_TRIGGER_MODE,
@ -516,19 +511,22 @@ enum iwl_mvm_tas_dyna_status {
TAS_DYNA_ACTIVE,
TAS_DYNA_STATUS_MAX,
}; /*_TAS_DYNA_STATUS_E*/
};
/**
* enum iwl_mvm_tas_statically_disabled_reason - TAS statically disabled reason
* enum iwl_tas_statically_disabled_reason - TAS statically disabled reason
* @TAS_DISABLED_DUE_TO_BIOS: TAS is disabled because TAS is disabled in BIOS
* @TAS_DISABLED_DUE_TO_SAR_6DBM: TAS is disabled because SAR limit is less than 6 Dbm
* @TAS_DISABLED_REASON_INVALID: TAS disable reason is invalid
* @TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID: TAS is disabled due to
* table source invalid
* @TAS_DISABLED_REASON_MAX: TAS disable reason max value
*/
enum iwl_mvm_tas_statically_disabled_reason {
enum iwl_tas_statically_disabled_reason {
TAS_DISABLED_DUE_TO_BIOS,
TAS_DISABLED_DUE_TO_SAR_6DBM,
TAS_DISABLED_REASON_INVALID,
TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID,
TAS_DISABLED_REASON_MAX,
}; /*_TAS_STATICALLY_DISABLED_REASON_E*/

View File

@ -0,0 +1,226 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2025 Intel Corporation
*/
#ifndef __iwl_fw_api_dhc_h__
#define __iwl_fw_api_dhc_h__
#define DHC_TABLE_MASK_POS (28)
/**
* enum iwl_dhc_table_id - DHC table operations index
*/
enum iwl_dhc_table_id {
/**
* @DHC_TABLE_INTEGRATION: select the integration table
*/
DHC_TABLE_INTEGRATION = 2 << DHC_TABLE_MASK_POS,
/**
* @DHC_TABLE_TOOLS: select the tools table
*/
DHC_TABLE_TOOLS = 0,
};
/**
* enum iwl_dhc_umac_tools_table - tools operations
* @DHC_TOOLS_UMAC_GET_TAS_STATUS: Get TAS status.
* See @struct iwl_dhc_tas_status_resp
*/
enum iwl_dhc_umac_tools_table {
DHC_TOOLS_UMAC_GET_TAS_STATUS = 0,
};
/**
* enum iwl_dhc_umac_integration_table - integration operations
*/
enum iwl_dhc_umac_integration_table {
/**
* @DHC_INT_UMAC_TWT_OPERATION: trigger a TWT operation
*/
DHC_INT_UMAC_TWT_OPERATION = 4,
/**
* @DHC_INTEGRATION_TLC_DEBUG_CONFIG: TLC debug
*/
DHC_INTEGRATION_TLC_DEBUG_CONFIG = 1,
/**
* @DHC_INTEGRATION_MAX: Maximum UMAC integration table entries
*/
DHC_INTEGRATION_MAX
};
#define DHC_TARGET_UMAC BIT(27)
/**
* struct iwl_dhc_cmd - debug host command
* @length: length in DWs of the data structure that is concatenated to the end
* of this struct
* @index_and_mask: bit 31 is 1 for data set operation else it's 0
* bits 28-30 is the index of the table of the operation -
* &enum iwl_dhc_table_id *
* bit 27 is 0 if the cmd targeted to LMAC and 1 if targeted to UMAC,
* (LMAC is 0 for backward compatibility)
* bit 26 is 0 if the cmd targeted to LMAC0 and 1 if targeted to LMAC1,
* relevant only if bit 27 set to 0
* bits 0-25 is a specific entry index in the table specified in bits 28-30
*
* @data: the concatenated data.
*/
struct iwl_dhc_cmd {
__le32 length;
__le32 index_and_mask;
__le32 data[];
} __packed; /* DHC_CMD_API_S */
/**
* struct iwl_dhc_payload_hdr - DHC payload header
* @version: a version of a payload
* @reserved: reserved for alignment
*/
struct iwl_dhc_payload_hdr {
u8 version;
u8 reserved[3];
} __packed; /* DHC_PAYLOAD_HDR_API_S_VER_1 */
/**
* struct iwl_dhc_tas_status_per_radio - TAS status per radio
* @band: &PHY_BAND_5 for high band, PHY_BAND_24 for low band and
* &PHY_BAND_6 for ultra high band.
* @static_status: TAS statically enabled or disabled
* @static_disable_reason: TAS static disable reason, uses
* &enum iwl_tas_statically_disabled_reason
* @near_disconnection: is TAS currently near disconnection per radio
* @dynamic_status_ant_a: Antenna A current TAS status.
* uses &enum iwl_tas_dyna_status
* @dynamic_status_ant_b: Antenna B current TAS status.
* uses &enum iwl_tas_dyna_status
* @max_reg_pwr_limit_ant_a: Antenna A regulatory power limits in dBm
* @max_reg_pwr_limit_ant_b: Antenna B regulatory power limits in dBm
* @sar_limit_ant_a: Antenna A SAR limit per radio in dBm
* @sar_limit_ant_b: Antenna B SAR limit per radio in dBm
* @reserved: reserved for alignment
*/
struct iwl_dhc_tas_status_per_radio {
u8 band;
u8 static_status;
u8 static_disable_reason;
u8 near_disconnection;
u8 dynamic_status_ant_a;
u8 dynamic_status_ant_b;
__le16 max_reg_pwr_limit_ant_a;
__le16 max_reg_pwr_limit_ant_b;
__le16 sar_limit_ant_a;
__le16 sar_limit_ant_b;
u8 reserved[2];
} __packed; /* DHC_TAS_STATUS_PER_RADIO_S_VER_1 */
/**
* struct iwl_dhc_tas_status_resp - Response to DHC_TOOLS_UMAC_GET_TAS_STATUS
* @header: DHC payload header, uses &struct iwl_dhc_payload_hdr
* @tas_config_info: see @struct bios_value_u32
* @mcc_block_list: block listed country codes
* @tas_status_radio: TAS status, uses &struct iwl_dhc_tas_status_per_radio
* @curr_mcc: current mcc
* @valid_radio_mask: represent entry in tas_status_per_radio is valid.
* @reserved: reserved for alignment
*/
struct iwl_dhc_tas_status_resp {
struct iwl_dhc_payload_hdr header;
struct bios_value_u32 tas_config_info;
__le16 mcc_block_list[IWL_WTAS_BLACK_LIST_MAX];
struct iwl_dhc_tas_status_per_radio tas_status_radio[2];
__le16 curr_mcc;
u8 valid_radio_mask;
u8 reserved;
} __packed; /* DHC_TAS_STATUS_RSP_API_S_VER_1 */
/**
* struct iwl_dhc_cmd_resp_v1 - debug host command response
* @status: status of the command
* @data: the response data
*/
struct iwl_dhc_cmd_resp_v1 {
__le32 status;
__le32 data[];
} __packed; /* DHC_RESP_API_S_VER_1 */
/**
* struct iwl_dhc_cmd_resp - debug host command response
* @status: status of the command
* @descriptor: command descriptor (index_and_mask) returned
* @data: the response data
*/
struct iwl_dhc_cmd_resp {
__le32 status;
__le32 descriptor;
__le32 data[];
} __packed; /* DHC_RESP_API_S_VER_2 and DHC_RESP_API_S_VER_3 */
/**
* enum iwl_dhc_twt_operation_type - describes the TWT operation type
*
* @DHC_TWT_REQUEST: Send a Request TWT command
* @DHC_TWT_SUGGEST: Send a Suggest TWT command
* @DHC_TWT_DEMAND: Send a Demand TWT command
* @DHC_TWT_GROUPING: Send a Grouping TWT command
* @DHC_TWT_ACCEPT: Send a Accept TWT command
* @DHC_TWT_ALTERNATE: Send a Alternate TWT command
* @DHC_TWT_DICTATE: Send a Dictate TWT command
* @DHC_TWT_REJECT: Send a Reject TWT command
* @DHC_TWT_TEARDOWN: Send a TearDown TWT command
*/
enum iwl_dhc_twt_operation_type {
DHC_TWT_REQUEST,
DHC_TWT_SUGGEST,
DHC_TWT_DEMAND,
DHC_TWT_GROUPING,
DHC_TWT_ACCEPT,
DHC_TWT_ALTERNATE,
DHC_TWT_DICTATE,
DHC_TWT_REJECT,
DHC_TWT_TEARDOWN,
}; /* DHC_TWT_OPERATION_TYPE_E */
/**
* struct iwl_dhc_twt_operation - trigger a TWT operation
*
* @mac_id: the mac Id on which to trigger TWT operation
* @twt_operation: see &enum iwl_dhc_twt_operation_type
* @target_wake_time: when should we be on channel
* @interval_exp: the exponent for the interval
* @interval_mantissa: the mantissa for the interval
* @min_wake_duration: the minimum duration for the wake period
* @trigger: is the TWT triggered or not
* @flow_type: is the TWT announced or not
* @flow_id: the TWT flow identifier from 0 to 7
* @protection: is the TWT protected
* @ndo_paging_indicator: is ndo_paging_indicator set
* @responder_pm_mode: is responder_pm_mode set
* @negotiation_type: if the responder wants to doze outside the TWT SP
* @twt_request: 1 for TWT request, 0 otherwise
* @implicit: is TWT implicit
* @twt_group_assignment: the TWT group assignment
* @twt_channel: the TWT channel
* @reserved: reserved
*/
struct iwl_dhc_twt_operation {
__le32 mac_id;
__le32 twt_operation;
__le64 target_wake_time;
__le32 interval_exp;
__le32 interval_mantissa;
__le32 min_wake_duration;
u8 trigger;
u8 flow_type;
u8 flow_id;
u8 protection;
u8 ndo_paging_indicator;
u8 responder_pm_mode;
u8 negotiation_type;
u8 twt_request;
u8 implicit;
u8 twt_group_assignment;
u8 twt_channel;
u8 reserved;
}; /* DHC_TWT_OPERATION_API_S */
#endif /* __iwl_fw_api_dhc_h__ */

View File

@ -2,7 +2,7 @@
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2022 Intel Corporation
* Copyright (C) 2024 Intel Corporation
* Copyright (C) 2024-2025 Intel Corporation
*/
#ifndef __iwl_fw_api_location_h__
#define __iwl_fw_api_location_h__
@ -1015,7 +1015,7 @@ struct iwl_tof_range_req_ap_entry_v9 {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
/**
* struct iwl_tof_range_req_ap_entry_v10 - AP configuration parameters
* struct iwl_tof_range_req_ap_entry - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz
* @channel_num: AP Channel number
@ -1063,7 +1063,7 @@ struct iwl_tof_range_req_ap_entry_v9 {
* @min_time_between_msr: For non trigger based NDP ranging, the minimum time
* between measurements in units of milliseconds
*/
struct iwl_tof_range_req_ap_entry_v10 {
struct iwl_tof_range_req_ap_entry {
__le32 initiator_ap_flags;
u8 band;
u8 channel_num;
@ -1134,7 +1134,7 @@ enum iwl_tof_initiator_flags {
IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT = BIT(20),
}; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
#define IWL_MVM_TOF_MAX_APS 5
#define IWL_TOF_MAX_APS 5
#define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5
/**
@ -1153,7 +1153,7 @@ enum iwl_tof_initiator_flags {
* when the session is done (successfully / partially).
* one of iwl_tof_response_mode.
* @reserved0: reserved
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
* '1' Use MAC Address randomization according to the below
* @range_req_bssid: ranging request BSSID
@ -1183,7 +1183,7 @@ struct iwl_tof_range_req_cmd_v5 {
u8 ftm_tx_chains;
__le16 common_calib;
__le16 specific_calib;
struct iwl_tof_range_req_ap_entry_v2 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_req_ap_entry_v2 ap[IWL_TOF_MAX_APS];
} __packed;
/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */
@ -1192,7 +1192,7 @@ struct iwl_tof_range_req_cmd_v5 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@ -1216,7 +1216,7 @@ struct iwl_tof_range_req_cmd_v7 {
__le32 tsf_mac_id;
__le16 common_calib;
__le16 specific_calib;
struct iwl_tof_range_req_ap_entry_v3 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_req_ap_entry_v3 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */
/**
@ -1224,7 +1224,7 @@ struct iwl_tof_range_req_cmd_v7 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@ -1248,7 +1248,7 @@ struct iwl_tof_range_req_cmd_v8 {
__le32 tsf_mac_id;
__le16 common_calib;
__le16 specific_calib;
struct iwl_tof_range_req_ap_entry_v4 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_req_ap_entry_v4 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
/**
@ -1256,7 +1256,7 @@ struct iwl_tof_range_req_cmd_v8 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@ -1276,7 +1276,7 @@ struct iwl_tof_range_req_cmd_v9 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
struct iwl_tof_range_req_ap_entry_v6 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_req_ap_entry_v6 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */
/**
@ -1284,7 +1284,7 @@ struct iwl_tof_range_req_cmd_v9 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@ -1304,7 +1304,7 @@ struct iwl_tof_range_req_cmd_v11 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_req_ap_entry_v7 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */
/**
@ -1312,7 +1312,7 @@ struct iwl_tof_range_req_cmd_v11 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@ -1332,7 +1332,7 @@ struct iwl_tof_range_req_cmd_v12 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_req_ap_entry_v8 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */
/**
@ -1340,7 +1340,7 @@ struct iwl_tof_range_req_cmd_v12 {
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@ -1360,15 +1360,15 @@ struct iwl_tof_range_req_cmd_v13 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_req_ap_entry_v9 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
/**
* struct iwl_tof_range_req_cmd_v14 - start measurement cmd
* struct iwl_tof_range_req_cmd - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @range_req_bssid: ranging request BSSID
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
* Bits set to 1 shall be randomized by the UMAC
@ -1377,9 +1377,9 @@ struct iwl_tof_range_req_cmd_v13 {
* This is the session time for completing the measurement.
* @tsf_mac_id: report the measurement start time for each ap in terms of the
* TSF of this mac id. 0xff to disable TSF reporting.
* @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v10.
* @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry.
*/
struct iwl_tof_range_req_cmd_v14 {
struct iwl_tof_range_req_cmd {
__le32 initiator_flags;
u8 request_id;
u8 num_of_ap;
@ -1388,8 +1388,8 @@ struct iwl_tof_range_req_cmd_v14 {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
struct iwl_tof_range_req_ap_entry_v10 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
struct iwl_tof_range_req_ap_entry ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_15 */
/*
* enum iwl_tof_range_request_status - status of the sent request
@ -1609,7 +1609,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
/**
* struct iwl_tof_range_rsp_ap_entry_ntfy_v6 - AP parameters (response)
* struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
* &enum iwl_tof_entry_status.
@ -1645,7 +1645,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
* @tx_pn: the last PN used for this responder Tx in case PMF is configured in
* LE byte order.
*/
struct iwl_tof_range_rsp_ap_entry_ntfy_v6 {
struct iwl_tof_range_rsp_ap_entry_ntfy {
u8 bssid[ETH_ALEN];
u8 measure_status;
u8 measure_bw;
@ -1695,7 +1695,7 @@ enum iwl_tof_response_status {
* @request_status: status of current measurement session, one of
* &enum iwl_tof_response_status.
* @last_in_batch: reprot policy (when not all responses are uploaded at once)
* @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
* @num_of_aps: Number of APs to measure (error if > IWL_TOF_MAX_APS)
* @ap: per-AP data
*/
struct iwl_tof_range_rsp_ntfy_v5 {
@ -1703,7 +1703,7 @@ struct iwl_tof_range_rsp_ntfy_v5 {
u8 request_status;
u8 last_in_batch;
u8 num_of_aps;
struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */
/**
@ -1719,7 +1719,7 @@ struct iwl_tof_range_rsp_ntfy_v6 {
u8 num_of_aps;
u8 last_report;
u8 reserved;
struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
/**
@ -1735,23 +1735,23 @@ struct iwl_tof_range_rsp_ntfy_v7 {
u8 num_of_aps;
u8 last_report;
u8 reserved;
struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
/**
* struct iwl_tof_range_rsp_ntfy_v8 - ranging response notification
* struct iwl_tof_range_rsp_ntfy - ranging response notification
* @request_id: A Token ID of the corresponding Range request
* @num_of_aps: Number of APs results
* @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
* @reserved: reserved
* @ap: per-AP data
*/
struct iwl_tof_range_rsp_ntfy_v8 {
struct iwl_tof_range_rsp_ntfy {
u8 request_id;
u8 num_of_aps;
u8 last_report;
u8 reserved;
struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS];
struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8,
LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -46,7 +46,7 @@ enum iwl_mac_conf_subcmd_ids {
*/
STA_CONFIG_CMD = 0xA,
/**
* @AUX_STA_CMD: &struct iwl_mvm_aux_sta_cmd
* @AUX_STA_CMD: &struct iwl_aux_sta_cmd
*/
AUX_STA_CMD = 0xB,
/**
@ -61,6 +61,10 @@ enum iwl_mac_conf_subcmd_ids {
* @ROC_CMD: &struct iwl_roc_req
*/
ROC_CMD = 0xE,
/**
* @TWT_OPERATION_CMD: &struct iwl_twt_operation_cmd
*/
TWT_OPERATION_CMD = 0x10,
/**
* @MISSED_BEACONS_NOTIF: &struct iwl_missed_beacons_notif
*/
@ -641,7 +645,7 @@ struct iwl_sta_cfg_cmd {
} __packed; /* STA_CMD_API_S_VER_1 */
/**
* struct iwl_mvm_aux_sta_cmd - command for AUX STA configuration
* struct iwl_aux_sta_cmd - command for AUX STA configuration
* ( AUX_STA_CMD = 0xB )
*
* @sta_id: index of aux sta to configure
@ -649,7 +653,7 @@ struct iwl_sta_cfg_cmd {
* @mac_addr: mac addr of the auxilary sta
* @reserved_for_mac_addr: reserved
*/
struct iwl_mvm_aux_sta_cmd {
struct iwl_aux_sta_cmd {
__le32 sta_id;
__le32 lmac_id;
u8 mac_addr[ETH_ALEN];
@ -693,11 +697,11 @@ enum iwl_mvm_fw_esr_recommendation {
}; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */
/**
* struct iwl_mvm_esr_mode_notif - FWs recommendation/force for esr mode
* struct iwl_esr_mode_notif - FWs recommendation/force for esr mode
*
* @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation
*/
struct iwl_mvm_esr_mode_notif {
struct iwl_esr_mode_notif {
__le32 action;
} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */
@ -748,4 +752,83 @@ struct iwl_esr_trans_fail_notif {
__le32 err_code;
} __packed; /* ESR_TRANSITION_FAILED_NTFY_API_S_VER_1 */
/*
* enum iwl_twt_operation_type: TWT operation in a TWT action frame
*
* @TWT_OPERATION_REQUEST: TWT Request
* @TWT_OPERATION_SUGGEST: TWT Suggest
* @TWT_OPERATION_DEMAND: TWT Demand
* @TWT_OPERATION_GROUPING: TWT Grouping
* @TWT_OPERATION_ACCEPT: TWT Accept
* @TWT_OPERATION_ALTERNATE: TWT Alternate
* @TWT_OPERATION_DICTATE: TWT Dictate
* @TWT_OPERATION_REJECT: TWT Reject
* @TWT_OPERATION_TEARDOWN: TWT Teardown
* @TWT_OPERATION_UNAVAILABILITY: TWT Unavailability
*/
enum iwl_twt_operation_type {
TWT_OPERATION_REQUEST,
TWT_OPERATION_SUGGEST,
TWT_OPERATION_DEMAND,
TWT_OPERATION_GROUPING,
TWT_OPERATION_ACCEPT,
TWT_OPERATION_ALTERNATE,
TWT_OPERATION_DICTATE,
TWT_OPERATION_REJECT,
TWT_OPERATION_TEARDOWN,
TWT_OPERATION_UNAVAILABILITY,
TWT_OPERATION_MAX,
}; /* TWT_OPERATION_TYPE_E_VER_1 */
/**
* struct iwl_twt_operation_cmd - initiate a TWT session from driver
*
* @link_id: FW link id to initiate the TWT
* @twt_operation: &enum iwl_twt_operation_type
* @target_wake_time: TSF time to start the TWT
* @interval_exponent: the exponent for the interval
* @interval_mantissa: the mantissa for the interval
* @minimum_wake_duration: the minimum duration for the wake period
* @trigger: is the TWT triggered or not
* @flow_type: is the TWT announced (0) or not (1)
* @flow_id: the TWT flow identifier 0 - 7
* @twt_protection: is the TWT protected
* @ndp_paging_indicator: is ndp paging indicator set
* @responder_pm_mode: is responder pm mode set
* @negotiation_type: if the responder wants to doze outside the TWT SP
* @twt_request: 1 for TWT request (STA), 0 for TWT response (AP)
* @implicit: is TWT implicit
* @twt_group_assignment: the TWT group assignment
* @twt_channel: the TWT channel
* @restricted_info_present: is this a restricted TWT
* @dl_bitmap_valid: is DL (download) bitmap valid (restricted TWT)
* @ul_bitmap_valid: is UL (upload) bitmap valid (restricted TWT)
* @dl_tid_bitmap: DL TID bitmap (restricted TWT)
* @ul_tid_bitmap: UL TID bitmap (restricted TWT)
*/
struct iwl_twt_operation_cmd {
__le32 link_id;
__le32 twt_operation;
__le64 target_wake_time;
__le32 interval_exponent;
__le32 interval_mantissa;
__le32 minimum_wake_duration;
u8 trigger;
u8 flow_type;
u8 flow_id;
u8 twt_protection;
u8 ndp_paging_indicator;
u8 responder_pm_mode;
u8 negotiation_type;
u8 twt_request;
u8 implicit;
u8 twt_group_assignment;
u8 twt_channel;
u8 restricted_info_present;
u8 dl_bitmap_valid;
u8 ul_bitmap_valid;
u8 dl_tid_bitmap;
u8 ul_tid_bitmap;
} __packed; /* TWT_OPERATION_API_S_VER_1 */
#endif /* __iwl_fw_api_mac_cfg_h__ */

View File

@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
* Copyright (C) 2012-2014, 2019-2022, 2024-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_phy_h__
#define __iwl_fw_api_phy_h__
#include <linux/types.h>
#include <linux/bits.h>
/**
* enum iwl_phy_ops_subcmd_ids - PHY group commands
@ -19,7 +21,7 @@ enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
/**
* @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd
* @CTDP_CONFIG_CMD: &struct iwl_ctdp_cmd
*/
CTDP_CONFIG_CMD = 0x03,
@ -55,7 +57,7 @@ enum iwl_phy_ops_subcmd_ids {
/**
* @DTS_MEASUREMENT_NOTIF_WIDE:
* &struct iwl_dts_measurement_notif_v1 or
* &struct iwl_dts_measurement_notif_v2
* &struct iwl_dts_measurement_notif
*/
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
@ -152,13 +154,13 @@ struct iwl_dts_measurement_notif_v1 {
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
/**
* struct iwl_dts_measurement_notif_v2 - measurements notification
* struct iwl_dts_measurement_notif - measurements notification
*
* @temp: the measured temperature
* @voltage: the measured voltage
* @threshold_idx: the trip index that was crossed
*/
struct iwl_dts_measurement_notif_v2 {
struct iwl_dts_measurement_notif {
__le32 temp;
__le32 voltage;
__le32 threshold_idx;
@ -195,25 +197,25 @@ struct ct_kill_notif {
} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
* enum iwl_mvm_ctdp_cmd_operation - CTDP command operations
* enum iwl_ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp
* @CTDP_CMD_OPERATION_REPORT: get the average budget
*/
enum iwl_mvm_ctdp_cmd_operation {
enum iwl_ctdp_cmd_operation {
CTDP_CMD_OPERATION_START = 0x1,
CTDP_CMD_OPERATION_STOP = 0x2,
CTDP_CMD_OPERATION_REPORT = 0x4,
};/* CTDP_CMD_OPERATION_TYPE_E */
/**
* struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget
* struct iwl_ctdp_cmd - track and manage the FW power consumption budget
*
* @operation: see &enum iwl_mvm_ctdp_cmd_operation
* @operation: see &enum iwl_ctdp_cmd_operation
* @budget: the budget in milliwatt
* @window_size: defined in API but not used
*/
struct iwl_mvm_ctdp_cmd {
struct iwl_ctdp_cmd {
__le32 operation;
__le32 budget;
__le32 window_size;

View File

@ -226,6 +226,58 @@ struct iwl_tlc_update_notif {
__le32 amsdu_enabled;
} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
/**
* enum iwl_tlc_debug_types - debug options
*/
enum iwl_tlc_debug_types {
/**
* @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
*/
IWL_TLC_DEBUG_FIXED_RATE,
/**
* @IWL_TLC_DEBUG_AGG_DURATION_LIM: time limit for a BA
* session, in usec
*/
IWL_TLC_DEBUG_AGG_DURATION_LIM,
/**
* @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames
* in an aggregation
*/
IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
/**
* @IWL_TLC_DEBUG_TPC_ENABLED: enable or disable tpc
*/
IWL_TLC_DEBUG_TPC_ENABLED,
/**
* @IWL_TLC_DEBUG_TPC_STATS: get number of frames Tx'ed in each
* tpc step
*/
IWL_TLC_DEBUG_TPC_STATS,
/**
* @IWL_TLC_DEBUG_RTS_DISABLE: disable RTS (bool true/false).
*/
IWL_TLC_DEBUG_RTS_DISABLE,
/**
* @IWL_TLC_DEBUG_PARTIAL_FIXED_RATE: set partial fixed rate to fw
*/
IWL_TLC_DEBUG_PARTIAL_FIXED_RATE,
}; /* TLC_MNG_DEBUG_TYPES_API_E */
#define MAX_DATA_IN_DHC_TLC_CMD 10
/**
* struct iwl_dhc_tlc_cmd - fixed debug config
* @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
* @reserved1: reserved
* @type: type id of %enum iwl_tlc_debug_types
* @data: data to send
*/
struct iwl_dhc_tlc_cmd {
u8 sta_id;
u8 reserved1[3];
__le32 type;
__le32 data[MAX_DATA_IN_DHC_TLC_CMD];
} __packed; /* TLC_MNG_DEBUG_CMD_S */
#define IWL_MAX_MCS_DISPLAY_SIZE 12

View File

@ -191,6 +191,7 @@ enum iwl_sta_sleep_flag {
#define STA_KEY_IDX_INVALID (0xff)
#define STA_KEY_MAX_DATA_KEY_NUM (4)
#define IWL_MAX_GLOBAL_KEYS (4)
#define IWL_MAX_NUM_IGTKS 2
#define STA_KEY_LEN_WEP40 (5)
#define STA_KEY_LEN_WEP104 (13)

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2012-2014, 2018-2020, 2022-2024 Intel Corporation
* Copyright (C) 2012-2014, 2018-2020, 2022-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@ -351,7 +351,7 @@ enum iwl_roc_activity {
}; /* ROC_ACTIVITY_API_E_VER_1 */
/*
* ROC command
* ROC command v5
*
* Command requests the firmware to remain on a channel for a certain duration.
*
@ -366,7 +366,7 @@ enum iwl_roc_activity {
* @max_delay: max delay the ROC can start in TU
* @duration: remain on channel duration in TU
*/
struct iwl_roc_req {
struct iwl_roc_req_v5 {
__le32 action;
__le32 activity;
__le32 sta_id;
@ -375,7 +375,41 @@ struct iwl_roc_req {
__le16 reserved;
__le32 max_delay;
__le32 duration;
} __packed; /* ROC_CMD_API_S_VER_3 */
} __packed; /* ROC_CMD_API_S_VER_5 */
/*
* ROC command
*
* Command requests the firmware to remain on a channel for a certain duration.
*
* ( MAC_CONF_GROUP 0x3, ROC_CMD 0xE )
*
* @action: action to perform, see &enum iwl_ctxt_action
* @activity: type of activity, see &enum iwl_roc_activity
* @sta_id: station id, resumed during "Remain On Channel" activity.
* @channel_info: &struct iwl_fw_channel_info
* @node_addr: node MAC address for Rx filtering
* @reserved1: align to a dword
* @max_delay: max delay the ROC can start in TU
* @duration: remain on channel duration in TU
* @interval: interval between repetitions (when repetitions > 1).
* @repetitions: number of repetitions
* 0xFF: infinite repetitions. 0 or 1: single repetition.
* @reserved2: align to a dword
*/
struct iwl_roc_req {
__le32 action;
__le32 activity;
__le32 sta_id;
struct iwl_fw_channel_info channel_info;
u8 node_addr[ETH_ALEN];
__le16 reserved1;
__le32 max_delay;
__le32 duration;
__le32 interval;
u8 repetitions;
u8 reserved2[3];
} __packed; /* ROC_CMD_API_S_VER_6 */
/*
* ROC notification

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2005-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@ -2618,29 +2618,28 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
},
};
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data,
struct list_head *list)
enum iwl_dump_ini_region_selector {
IWL_INI_DUMP_ALL_REGIONS,
IWL_INI_DUMP_EARLY_REGIONS,
IWL_INI_DUMP_LATE_REGIONS,
};
static u32
iwl_dump_ini_dump_regions(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data,
struct list_head *list,
enum iwl_fw_ini_time_point tp_id,
u64 regions_mask,
struct iwl_dump_ini_region_data *imr_reg_data,
enum iwl_dump_ini_region_selector which)
{
struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point);
struct iwl_dump_ini_region_data reg_data = {
.dump_data = dump_data,
};
struct iwl_dump_ini_region_data imr_reg_data = {
.dump_data = dump_data,
};
int i;
u32 size = 0;
u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
~(fwrt->trans->dbg.unsupported_region_msk);
BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
ARRAY_SIZE(fwrt->trans->dbg.active_regions));
for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) {
u32 reg_type;
for (int i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) {
struct iwl_dump_ini_region_data reg_data = {
.dump_data = dump_data,
};
u32 reg_type, dp;
struct iwl_fw_ini_region_tlv *reg;
if (!(BIT_ULL(i) & regions_mask))
@ -2658,6 +2657,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops))
continue;
dp = le32_get_bits(reg->id, IWL_FW_INI_REGION_DUMP_POLICY_MASK);
if ((reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY ||
reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE ||
reg_type == IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP) &&
@ -2667,6 +2668,20 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
tp_id);
continue;
}
switch (which) {
case IWL_INI_DUMP_ALL_REGIONS:
break;
case IWL_INI_DUMP_EARLY_REGIONS:
if (!(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET))
continue;
break;
case IWL_INI_DUMP_LATE_REGIONS:
if (dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET)
continue;
break;
}
/*
* DRAM_IMR can be collected only for FW/HW error timepoint
* when fw is not alive. In addition, it must be collected
@ -2676,7 +2691,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) {
if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT ||
tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR)
imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i];
imr_reg_data->reg_tlv =
fwrt->trans->dbg.active_regions[i];
else
IWL_INFO(fwrt,
"WRT: trying to collect DRAM_IMR at time point: %d, skipping\n",
@ -2689,9 +2705,44 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
size += iwl_dump_ini_mem(fwrt, list, &reg_data,
&iwl_dump_ini_region_ops[reg_type]);
}
return size;
}
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data,
struct list_head *list)
{
struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig;
enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point);
struct iwl_dump_ini_region_data imr_reg_data = {
.dump_data = dump_data,
};
u32 size = 0;
u64 regions_mask = le64_to_cpu(trigger->regions_mask) &
~(fwrt->trans->dbg.unsupported_region_msk);
BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask));
BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) <
ARRAY_SIZE(fwrt->trans->dbg.active_regions));
if (trigger->time_point &
cpu_to_le32(IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)) {
size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
regions_mask, &imr_reg_data,
IWL_INI_DUMP_EARLY_REGIONS);
iwl_trans_pcie_fw_reset_handshake(fwrt->trans);
size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
regions_mask, &imr_reg_data,
IWL_INI_DUMP_LATE_REGIONS);
} else {
size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id,
regions_mask, &imr_reg_data,
IWL_INI_DUMP_ALL_REGIONS);
}
/* collect DRAM_IMR region in the last */
if (imr_reg_data.reg_tlv)
size += iwl_dump_ini_mem(fwrt, list, &reg_data,
size += iwl_dump_ini_mem(fwrt, list, &imr_reg_data,
&iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]);
if (size) {
@ -3072,9 +3123,8 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
u32 timepoint,
u32 timepoint_data)
static void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
u32 timepoint, u32 timepoint_data)
{
struct iwl_dbg_dump_complete_cmd hcmd_data;
struct iwl_host_cmd hcmd = {
@ -3102,6 +3152,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
@ -3126,9 +3177,9 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n");
if (iwl_trans_dbg_ini_valid(fwrt->trans))
iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
iwl_fw_error_ini_dump(fwrt, dump_data);
else
iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data);
iwl_fw_error_dump(fwrt, dump_data);
IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n");
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
@ -3145,7 +3196,6 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
out:
if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_error_dump_data_free(dump_data);

View File

@ -324,9 +324,6 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt,
}
void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt);
void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt,
u32 timepoint,
u32 timepoint_data);
bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id);
void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt);
void iwl_fw_dbg_clear_monitor_buf(struct iwl_fw_runtime *fwrt);

View File

@ -0,0 +1,75 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2021, 2025 Intel Corporation
*/
#ifndef __iwl_fw_dhc_utils_h__
#define __iwl_fw_dhc_utils_h__
#include <linux/types.h>
#include "fw/img.h"
#include "api/commands.h"
#include "api/dhc.h"
/**
* iwl_dhc_resp_status - return status of DHC response
* @fw: firwmware image information
* @pkt: response packet, must not be %NULL
*
* Returns: the status value of the DHC command or (u32)-1 if the
* response was too short.
*/
static inline u32 iwl_dhc_resp_status(const struct iwl_fw *fw,
struct iwl_rx_packet *pkt)
{
if (iwl_fw_lookup_notif_ver(fw, IWL_ALWAYS_LONG_GROUP,
DEBUG_HOST_COMMAND, 1) >= 2) {
struct iwl_dhc_cmd_resp *resp = (void *)pkt->data;
if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
return (u32)-1;
return le32_to_cpu(resp->status);
} else {
struct iwl_dhc_cmd_resp_v1 *resp = (void *)pkt->data;
if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
return (u32)-1;
return le32_to_cpu(resp->status);
}
}
/**
* iwl_dhc_resp_data - return data pointer of DHC response
* @fw: firwmware image information
* @pkt: response packet, must not be %NULL
* @len: where to store the length
*
* Returns: The data pointer, or an ERR_PTR() if the data was
* not valid (too short).
*/
static inline void *iwl_dhc_resp_data(const struct iwl_fw *fw,
struct iwl_rx_packet *pkt,
unsigned int *len)
{
if (iwl_fw_lookup_notif_ver(fw, IWL_ALWAYS_LONG_GROUP,
DEBUG_HOST_COMMAND, 1) >= 2) {
struct iwl_dhc_cmd_resp *resp = (void *)pkt->data;
if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
return ERR_PTR(-EINVAL);
*len = iwl_rx_packet_payload_len(pkt) - sizeof(*resp);
return (void *)&resp->data;
} else {
struct iwl_dhc_cmd_resp_v1 *resp = (void *)pkt->data;
if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp))
return ERR_PTR(-EINVAL);
*len = iwl_rx_packet_payload_len(pkt) - sizeof(*resp);
return (void *)&resp->data;
}
}
#endif /* __iwl_fw_dhc_utils_h__ */

View File

@ -456,7 +456,25 @@ iwl_parse_tas_selection(const u32 tas_selection_in, const u8 tbl_rev)
}
IWL_EXPORT_SYMBOL(iwl_parse_tas_selection);
static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc)
{
for (int i = 0; i < *size; i++) {
if (list[i] == mcc)
return true;
}
/* Verify that there is room for another country
* If *size == IWL_WTAS_BLACK_LIST_MAX, then the table is full.
*/
if (*size >= IWL_WTAS_BLACK_LIST_MAX)
return false;
list[*size++] = mcc;
return true;
}
IWL_EXPORT_SYMBOL(iwl_add_mcc_to_tas_block_list);
__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
int ret;
u32 val;
@ -503,6 +521,7 @@ static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
return config_bitmap;
}
IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver)
{
@ -553,6 +572,10 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE), 1);
if (WARN_ONCE(cmd_ver > 12,
"Don't add newer versions to this function\n"))
return -EINVAL;
memset(cmd, 0, sizeof(*cmd));
*cmd_size = iwl_get_lari_config_cmd_size(cmd_ver);
@ -674,3 +697,34 @@ bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc)
}
}
IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios);
bool iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime *fwrt)
{
/* default behaviour is disabled */
u32 value = 0;
int ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_RFI_CONFIG, &value);
if (ret < 0) {
IWL_DEBUG_RADIO(fwrt, "Failed to get DSM RFI, ret=%d\n", ret);
return false;
}
value &= DSM_VALUE_RFI_DISABLE;
/* RFI BIOS CONFIG value can be 0 or 3 only.
* i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
* 1 and 2 are invalid BIOS configurations, So, it's not possible to
* disable ddr/dlvr separately.
*/
if (!value) {
IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to enable\n");
return true;
} else if (value == DSM_VALUE_RFI_DISABLE) {
IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to disable\n");
} else {
IWL_DEBUG_RADIO(fwrt,
"DSM RFI got invalid value, value=%d\n", value);
}
return false;
}
IWL_EXPORT_SYMBOL(iwl_rfi_is_enabled_in_bios);

Some files were not shown because too many files have changed in this diff Show More