* [PATCH wireless-next v2 01/31] wifi: mm81x: add bus.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 02/31] wifi: mm81x: add command.c Lachlan Hodges
` (30 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/bus.h | 99 +++++++++++++++++++++
1 file changed, 99 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/bus.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/bus.h b/drivers/net/wireless/morsemicro/mm81x/bus.h
new file mode 100644
index 000000000000..d2ccabc037fb
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/bus.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_BUS_H_
+#define _MM81X_BUS_H_
+
+#include <linux/skbuff.h>
+#include "core.h"
+
+enum mm81x_bus_type {
+ MM81X_BUS_TYPE_USB,
+ MM81X_BUS_TYPE_SDIO,
+};
+
+struct mm81x_bus_ops {
+ int (*dm_read)(struct mm81x *mors, u32 addr, u8 *data, int len);
+ int (*dm_write)(struct mm81x *mors, u32 addr, const u8 *data, int len);
+ int (*reg32_read)(struct mm81x *mors, u32 addr, u32 *data);
+ int (*reg32_write)(struct mm81x *mors, u32 addr, u32 data);
+ int (*digital_reset)(struct mm81x *mors);
+ void (*set_bus_enable)(struct mm81x *mors, bool enable);
+ void (*config_burst_mode)(struct mm81x *mors, bool enable_burst);
+ void (*claim)(struct mm81x *mors);
+ void (*set_irq)(struct mm81x *mors, bool enable);
+ void (*release)(struct mm81x *mors);
+ unsigned int bulk_alignment;
+};
+
+/*
+ * Default TX alignment for buses which don't care. mac80211 will give us
+ * SKBs aligned to the 2 byte boundary, so 2 is effectively a noop.
+ */
+#define MM81X_BUS_DEFAULT_BULK_ALIGNMENT (2)
+
+/* mm81x_dm_read - len must be rounded up to the nearest 4-byte boundary */
+static inline int mm81x_dm_read(struct mm81x *mors, u32 addr, u8 *data, int len)
+{
+ return mors->bus_ops->dm_read(mors, addr, data, len);
+}
+
+static inline int mm81x_dm_write(struct mm81x *mors, u32 addr, const u8 *data,
+ int len)
+{
+ return mors->bus_ops->dm_write(mors, addr, data, len);
+}
+
+static inline int mm81x_reg32_read(struct mm81x *mors, u32 addr, u32 *data)
+{
+ return mors->bus_ops->reg32_read(mors, addr, data);
+}
+
+static inline int mm81x_reg32_write(struct mm81x *mors, u32 addr, u32 data)
+{
+ return mors->bus_ops->reg32_write(mors, addr, data);
+}
+
+static inline int mm81x_bus_digital_reset(struct mm81x *mors)
+{
+ if (mors->bus_ops->digital_reset)
+ return mors->bus_ops->digital_reset(mors);
+
+ return 0;
+}
+
+static inline void mm81x_set_bus_enable(struct mm81x *mors, bool enable)
+{
+ mors->bus_ops->set_bus_enable(mors, enable);
+}
+
+static inline void mm81x_bus_config_burst_mode(struct mm81x *mors,
+ bool enable_burst)
+{
+ if (mors->bus_ops->config_burst_mode)
+ mors->bus_ops->config_burst_mode(mors, enable_burst);
+}
+
+static inline void mm81x_claim_bus(struct mm81x *mors)
+{
+ mors->bus_ops->claim(mors);
+}
+
+static inline void mm81x_bus_set_irq(struct mm81x *mors, bool enable)
+{
+ mors->bus_ops->set_irq(mors, enable);
+}
+
+static inline void mm81x_release_bus(struct mm81x *mors)
+{
+ mors->bus_ops->release(mors);
+}
+
+static inline unsigned int mm81x_bus_get_alignment(struct mm81x *mors)
+{
+ return mors->bus_ops->bulk_alignment;
+}
+
+#endif /* !_MM81X_BUS_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 02/31] wifi: mm81x: add command.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 01/31] wifi: mm81x: add bus.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 03/31] wifi: mm81x: add command_defs.h Lachlan Hodges
` (29 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
.../net/wireless/morsemicro/mm81x/command.c | 569 ++++++++++++++++++
1 file changed, 569 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/command.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/command.c b/drivers/net/wireless/morsemicro/mm81x/command.c
new file mode 100644
index 000000000000..704453c6f139
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/command.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "command.h"
+#include "mac.h"
+#include "ps.h"
+#include "hif.h"
+
+#define MM_MAX_COMMAND_RETRY 2
+#define HOST_CMD_DEFAULT_TIMEOUT_MS 600
+#define HOST_CMD_POWERSAVE_TIMEOUT_MS 2000
+
+#define INIT_CMD_HDR(_req, _cmd, _vif_id) \
+ ((struct host_cmd_header){ \
+ .flags = cpu_to_le16(0), \
+ .message_id = cpu_to_le16(_cmd), \
+ .len = cpu_to_le16(sizeof(_req) - sizeof((_req).hdr)), \
+ .host_id = cpu_to_le16(0), \
+ .vif_id = cpu_to_le16(_vif_id), \
+ .pad = cpu_to_le16(0), \
+ })
+
+struct host_cmd_resp_cb {
+ int ret;
+ u32 length;
+ struct host_cmd_resp *dest_resp;
+};
+
+static int mm81x_cmd_tx(struct mm81x *mors, struct host_cmd_resp *resp,
+ struct host_cmd_req *req, u32 length, u32 timeout)
+{
+ int cmd_len;
+ int ret = 0;
+ u16 host_id;
+ int retry = 0;
+ unsigned long wait_ret = 0;
+ struct sk_buff *skb;
+ struct mm81x_skbq *cmd_q = mm81x_hif_get_tx_cmd_queue(mors);
+ struct host_cmd_resp_cb *resp_cb;
+ DECLARE_COMPLETION_ONSTACK(cmd_comp);
+
+ BUILD_BUG_ON(sizeof(struct host_cmd_resp_cb) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+
+ if (!cmd_q)
+ /* No control pageset, not supported by FW */
+ return -ENODEV;
+
+ cmd_len = sizeof(*req) + le16_to_cpu(req->hdr.len);
+ req->hdr.flags = cpu_to_le16(HOST_CMD_TYPE_REQ);
+
+ mutex_lock(&mors->cmd_wait);
+ mors->cmd_seq++;
+ if (mors->cmd_seq > HOST_CMD_HOST_ID_SEQ_MAX)
+ mors->cmd_seq = 1;
+ host_id = mors->cmd_seq << HOST_CMD_HOST_ID_SEQ_SHIFT;
+
+ mm81x_ps_disable(mors);
+
+ do {
+ req->hdr.host_id = cpu_to_le16(host_id | retry);
+
+ skb = mm81x_skbq_alloc_skb(cmd_q, cmd_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ memcpy(skb->data, req, cmd_len);
+ resp_cb = (struct host_cmd_resp_cb *)IEEE80211_SKB_CB(skb)
+ ->driver_data;
+ resp_cb->length = length;
+ resp_cb->dest_resp = resp;
+
+ dev_dbg(mors->dev, "CMD 0x%04x:%04x",
+ le16_to_cpu(req->hdr.message_id),
+ le16_to_cpu(req->hdr.host_id));
+
+ mutex_lock(&mors->cmd_lock);
+ mors->cmd_comp = &cmd_comp;
+ if (retry > 0)
+ reinit_completion(&cmd_comp);
+ timeout = timeout ? timeout : HOST_CMD_DEFAULT_TIMEOUT_MS;
+ ret = mm81x_skbq_skb_tx(cmd_q, &skb, NULL,
+ MM81X_SKB_CHAN_COMMAND);
+ mutex_unlock(&mors->cmd_lock);
+
+ if (ret) {
+ dev_err(mors->dev, "mm81x_skbq_tx fail: %d", ret);
+ break;
+ }
+
+ wait_ret = wait_for_completion_timeout(
+ &cmd_comp, msecs_to_jiffies(timeout));
+ mutex_lock(&mors->cmd_lock);
+ mors->cmd_comp = NULL;
+
+ if (!wait_ret) {
+ dev_err(mors->dev,
+ "Try:%d Command %04x:%04x timeout after %u ms",
+ retry, le16_to_cpu(req->hdr.message_id),
+ le16_to_cpu(req->hdr.host_id), timeout);
+ ret = -ETIMEDOUT;
+ } else {
+ ret = (length && resp) ? le32_to_cpu(resp->status) :
+ resp_cb->ret;
+
+ dev_dbg(mors->dev, "Command 0x%04x:%04x status 0x%08x",
+ le16_to_cpu(req->hdr.message_id),
+ le16_to_cpu(req->hdr.host_id), ret);
+ if (ret) {
+ dev_err(mors->dev,
+ "Command 0x%04x:%04x error %d",
+ le16_to_cpu(req->hdr.message_id),
+ le16_to_cpu(req->hdr.host_id), ret);
+ }
+ }
+ /* Free the command request */
+ spin_lock_bh(&cmd_q->lock);
+ mm81x_skbq_skb_finish(cmd_q, skb, NULL);
+ spin_unlock_bh(&cmd_q->lock);
+ mutex_unlock(&mors->cmd_lock);
+
+ retry++;
+ } while ((ret == -ETIMEDOUT) && retry < MM_MAX_COMMAND_RETRY);
+
+ mm81x_ps_enable(mors);
+ mutex_unlock(&mors->cmd_wait);
+
+ if (ret == -ETIMEDOUT) {
+ dev_err(mors->dev, "Command %02x:%02x timed out",
+ le16_to_cpu(req->hdr.message_id),
+ le16_to_cpu(req->hdr.host_id));
+ } else if (ret != 0) {
+ dev_err(mors->dev,
+ "Command %02x:%02x failed with rc %d (0x%x)\n",
+ le16_to_cpu(req->hdr.message_id),
+ le16_to_cpu(req->hdr.host_id), ret, ret);
+ }
+
+ return ret;
+}
+
+int mm81x_cmd_resp_process(struct mm81x *mors, struct sk_buff *skb)
+{
+ int length, ret = -ESRCH; /* No such process */
+ struct mm81x_skbq *cmd_q = mm81x_hif_get_tx_cmd_queue(mors);
+ struct host_cmd_resp *src_resp = (struct host_cmd_resp *)(skb->data);
+ struct sk_buff *cmd_skb = NULL;
+ struct host_cmd_resp_cb *resp_cb;
+ struct host_cmd_resp *dest_resp;
+ struct host_cmd_req *req;
+ u16 message_id = 0;
+ u16 host_id = 0;
+ u16 resp_message_id = le16_to_cpu(src_resp->hdr.message_id);
+ u16 resp_host_id = le16_to_cpu(src_resp->hdr.host_id);
+ bool is_late_response = false;
+
+ dev_dbg(mors->dev, "EVT 0x%04x:0x%04x", resp_message_id, resp_host_id);
+
+ if (!HOST_CMD_IS_RESP(src_resp)) {
+ ret = mm81x_mac_event_recv(mors, skb);
+ goto exit_free;
+ }
+
+ mutex_lock(&mors->cmd_lock);
+
+ cmd_skb = mm81x_skbq_tx_pending(cmd_q);
+ if (cmd_skb) {
+ mm81x_skbq_pull_hdr_post_tx(cmd_skb);
+ req = (struct host_cmd_req *)cmd_skb->data;
+ message_id = le16_to_cpu(req->hdr.message_id);
+ host_id = le16_to_cpu(req->hdr.host_id);
+ }
+
+ /*
+ * If there is no pending command or the sequence ID does not match,
+ * this is a late response for a timed out command which has been
+ * cleaned up, so just free up the response. If a command was retried,
+ * the response may be from the retry or from the original command
+ * (late response) but not from both because the firmware will silently
+ * drop a retry if it received the initial request. So a mismatched
+ * retry counter is treated as a matched command and response.
+ */
+ if (!cmd_skb || message_id != resp_message_id ||
+ (host_id & HOST_CMD_HOST_ID_SEQ_MASK) !=
+ (resp_host_id & HOST_CMD_HOST_ID_SEQ_MASK)) {
+ dev_err(mors->dev,
+ "Late response for timed out req 0x%04x:%04x have 0x%04x:%04x 0x%04x",
+ resp_message_id, resp_host_id, message_id, host_id,
+ mors->cmd_seq);
+ is_late_response = true;
+ goto exit;
+ }
+ if ((host_id & HOST_CMD_HOST_ID_RETRY_MASK) !=
+ (resp_host_id & HOST_CMD_HOST_ID_RETRY_MASK))
+ dev_dbg(mors->dev,
+ "Command retry mismatch 0x%04x:%04x 0x%04x:%04x",
+ message_id, host_id, resp_message_id, resp_host_id);
+
+ resp_cb = (struct host_cmd_resp_cb *)IEEE80211_SKB_CB(cmd_skb)
+ ->driver_data;
+ length = resp_cb->length;
+ dest_resp = resp_cb->dest_resp;
+ if (length >= sizeof(struct host_cmd_resp) && dest_resp) {
+ ret = 0;
+ length = min_t(int, length,
+ le16_to_cpu(src_resp->hdr.len) +
+ sizeof(struct host_cmd_header));
+ memcpy(dest_resp, src_resp, length);
+ } else {
+ ret = le32_to_cpu(src_resp->status);
+ }
+
+ resp_cb->ret = ret;
+
+exit:
+ if (cmd_skb && !is_late_response) {
+ /* Complete if not already timed out */
+ if (mors->cmd_comp)
+ complete(mors->cmd_comp);
+ }
+
+ mutex_unlock(&mors->cmd_lock);
+exit_free:
+ dev_kfree_skb(skb);
+ return 0;
+}
+
+int mm81x_cmd_sta_state(struct mm81x *mors, struct mm81x_vif *mors_vif, u16 aid,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state state)
+{
+ struct host_cmd_req_set_sta_state req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_SET_STA_STATE,
+ mors_vif->id),
+ .aid = cpu_to_le16(aid),
+ .state = cpu_to_le16(state),
+ .uapsd_queues = sta->uapsd_queues,
+ };
+
+ memcpy(req.sta_addr, sta->addr, sizeof(req.sta_addr));
+
+ return mm81x_cmd_tx(mors, NULL, (struct host_cmd_req *)&req, 0, 0);
+}
+
+int mm81x_cmd_add_if(struct mm81x *mors, u16 *vif_id, const u8 *addr,
+ enum nl80211_iftype type)
+{
+ int ret;
+ struct host_cmd_req_add_interface req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_ADD_INTERFACE, 0),
+ };
+ struct host_cmd_resp_add_interface resp;
+
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ req.interface_type = cpu_to_le32(HOST_CMD_INTERFACE_TYPE_STA);
+ break;
+ case NL80211_IFTYPE_AP:
+ req.interface_type = cpu_to_le32(HOST_CMD_INTERFACE_TYPE_AP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ memcpy(req.addr.octet, addr, sizeof(req.addr.octet));
+
+ ret = mm81x_cmd_tx(mors, (struct host_cmd_resp *)&resp,
+ (struct host_cmd_req *)&req, sizeof(resp), 0);
+ if (!ret)
+ *vif_id = le16_to_cpu(resp.hdr.vif_id);
+
+ return ret;
+}
+
+int mm81x_cmd_get_capabilities(struct mm81x *mors, u16 vif_id,
+ struct mm81x_fw_caps *capabilities)
+{
+ int ret;
+ int i;
+ struct host_cmd_req_get_capabilities req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_GET_CAPABILITIES, vif_id),
+ };
+ struct host_cmd_resp_get_capabilities rsp;
+
+ ret = mm81x_cmd_tx(mors, (struct host_cmd_resp *)&rsp,
+ (struct host_cmd_req *)&req, sizeof(rsp), 0);
+ if (ret)
+ return ret;
+
+ capabilities->ampdu_mss = rsp.capabilities.ampdu_mss;
+ capabilities->mm81x_mmss_offset = rsp.morse_mmss_offset;
+ capabilities->beamformee_sts_capability =
+ rsp.capabilities.beamformee_sts_capability;
+ capabilities->maximum_ampdu_length_exponent =
+ rsp.capabilities.maximum_ampdu_length_exponent;
+ capabilities->number_sounding_dimensions =
+ rsp.capabilities.number_sounding_dimensions;
+ for (i = 0; i < FW_CAPABILITIES_FLAGS_WIDTH; i++)
+ capabilities->flags[i] = le32_to_cpu(rsp.capabilities.flags[i]);
+
+ return ret;
+}
+
+int mm81x_cmd_get_max_txpower(struct mm81x *mors, s32 *out_power_mbm)
+{
+ int ret;
+ struct host_cmd_req_get_max_txpower req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_GET_MAX_TXPOWER, 0),
+ };
+ struct host_cmd_resp_get_max_txpower resp;
+
+ ret = mm81x_cmd_tx(mors, (struct host_cmd_resp *)&resp,
+ (struct host_cmd_req *)&req, sizeof(resp), 0);
+ if (!ret)
+ *out_power_mbm = QDBM_TO_MBM(le32_to_cpu(resp.power_qdbm));
+
+ return ret;
+}
+
+int mm81x_cmd_hw_scan(struct mm81x *mors, struct mm81x_hw_scan_params *params,
+ bool store)
+{
+ int ret;
+ struct host_cmd_req_hw_scan *req;
+ size_t cmd_size;
+ u8 *buf;
+ u32 flags = 0;
+
+ cmd_size = mm81x_hw_scan_h_get_cmd_size(params);
+ cmd_size = ROUND_BYTES_TO_WORD(cmd_size);
+
+ req = kzalloc(cmd_size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ buf = req->variable;
+
+ if (store)
+ flags = HOST_CMD_HW_SCAN_FLAGS_STORE;
+ else if (params->operation == MM81X_HW_SCAN_OP_START)
+ flags |= HOST_CMD_HW_SCAN_FLAGS_START;
+ else if (params->operation == MM81X_HW_SCAN_OP_STOP)
+ flags |= HOST_CMD_HW_SCAN_FLAGS_ABORT;
+
+ if (params->use_1mhz_probes)
+ flags |= HOST_CMD_HW_SCAN_FLAGS_1MHZ_PROBES;
+
+ if (params->operation == MM81X_HW_SCAN_OP_START) {
+ req->dwell_time_ms = cpu_to_le32(params->dwell_time_ms);
+ buf = mm81x_hw_scan_h_insert_tlvs(params, buf);
+ }
+
+ req->flags = cpu_to_le32(flags);
+ req->hdr = INIT_CMD_HDR((*req), HOST_CMD_ID_HW_SCAN, 0);
+ req->hdr.len = cpu_to_le16((u16)((buf - (u8 *)req) - sizeof(req->hdr)));
+ ret = mm81x_cmd_tx(mors, NULL, (struct host_cmd_req *)req, 0, 0);
+ kfree(req);
+
+ return ret;
+}
+
+int mm81x_cmd_set_txpower(struct mm81x *mors, s32 *out_power_mbm,
+ int txpower_mbm)
+{
+ int ret;
+ struct host_cmd_req_set_txpower req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_SET_TXPOWER, 0),
+ .power_qdbm = cpu_to_le32(MBM_TO_QDBM(txpower_mbm)),
+ };
+ struct host_cmd_resp_set_txpower resp;
+
+ ret = mm81x_cmd_tx(mors, (struct host_cmd_resp *)&resp,
+ (struct host_cmd_req *)&req, sizeof(resp), 0);
+ if (!ret)
+ *out_power_mbm = QDBM_TO_MBM(le32_to_cpu(resp.power_qdbm));
+
+ return ret;
+}
+
+int mm81x_cmd_set_channel(struct mm81x *mors, u32 op_chan_freq_hz,
+ u8 pri_1mhz_chan_idx, u8 op_bw_mhz, u8 pri_bw_mhz,
+ s32 *power_mbm)
+{
+ int ret;
+ struct host_cmd_req_set_channel req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_SET_CHANNEL, 0),
+ .op_chan_freq_hz = cpu_to_le32(op_chan_freq_hz),
+ .op_bw_mhz = op_bw_mhz,
+ .pri_bw_mhz = pri_bw_mhz,
+ .pri_1mhz_chan_idx = pri_1mhz_chan_idx,
+ .dot11_mode = HOST_CMD_DOT11_PROTO_MODE_AH,
+ };
+ struct host_cmd_resp_set_channel resp;
+
+ ret = mm81x_cmd_tx(mors, (struct host_cmd_resp *)&resp,
+ (struct host_cmd_req *)&req, sizeof(resp), 0);
+ if (!ret)
+ *power_mbm = QDBM_TO_MBM(le32_to_cpu(resp.power_qdbm));
+
+ return ret;
+}
+
+int mm81x_cmd_disable_key(struct mm81x *mors, struct mm81x_vif *mors_vif,
+ u16 aid, struct ieee80211_key_conf *key)
+{
+ struct host_cmd_req_disable_key req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_DISABLE_KEY, mors_vif->id),
+ .aid = cpu_to_le32(aid),
+ .key_idx = key->hw_key_idx,
+ .key_type =
+ cpu_to_le32((key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ?
+ HOST_CMD_TEMPORAL_KEY_TYPE_PTK :
+ HOST_CMD_TEMPORAL_KEY_TYPE_GTK),
+ };
+
+ return mm81x_cmd_tx(mors, NULL, (struct host_cmd_req *)&req, 0, 0);
+}
+
+int mm81x_cmd_install_key(struct mm81x *mors, struct mm81x_vif *mors_vif,
+ u16 aid, struct ieee80211_key_conf *key,
+ enum host_cmd_key_cipher cipher,
+ enum host_cmd_aes_key_len length)
+{
+ int ret;
+ struct host_cmd_req_install_key req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_INSTALL_KEY, mors_vif->id),
+ .pn = cpu_to_le64(atomic64_read(&key->tx_pn)),
+ .aid = cpu_to_le32(aid),
+ .cipher = cipher,
+ .key_length = length,
+ .key_idx = key->keyidx,
+ .key_type = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ?
+ HOST_CMD_TEMPORAL_KEY_TYPE_PTK :
+ HOST_CMD_TEMPORAL_KEY_TYPE_GTK,
+ };
+ struct host_cmd_resp_install_key resp;
+
+ if (key->keylen > sizeof(req.key))
+ return -EINVAL;
+
+ memcpy(req.key, key->key, key->keylen);
+
+ ret = mm81x_cmd_tx(mors, (struct host_cmd_resp *)&resp,
+ (struct host_cmd_req *)&req, sizeof(resp), 0);
+ if (!ret) {
+ key->hw_key_idx = resp.key_idx;
+ dev_dbg(mors->dev, "Installed key @ hw index: %d",
+ resp.key_idx);
+ }
+
+ return ret;
+}
+
+int mm81x_cmd_cfg_multicast_filter(struct mm81x *mors,
+ struct mm81x_vif *mors_vif)
+{
+ struct host_cmd_req_mcast_filter *req;
+ struct mcast_filter *filter = mors->mcast_filter;
+ u16 filter_list_len = sizeof(filter->addr_list[0]) * filter->count;
+ u16 alloc_len = filter_list_len + sizeof(*req);
+ int ret = 0;
+
+ req = kzalloc(alloc_len, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->hdr = INIT_CMD_HDR((*req), HOST_CMD_ID_MCAST_FILTER, mors_vif->id);
+ req->hdr.len = cpu_to_le16(alloc_len - sizeof(req->hdr));
+ req->count = filter->count;
+ memcpy(req->hw_addr, filter->addr_list, filter_list_len);
+
+ ret = mm81x_cmd_tx(mors, NULL, (struct host_cmd_req *)req, 0, 0);
+ kfree(req);
+ return ret;
+}
+
+int mm81x_cmd_cfg_bss(struct mm81x *mors, u16 vif_id, u16 beacon_int,
+ u16 dtim_period, u32 cssid)
+{
+ struct host_cmd_req_bss_config req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_BSS_CONFIG, vif_id),
+ .beacon_interval_tu = cpu_to_le16(beacon_int),
+ .cssid = cpu_to_le32(cssid),
+ .dtim_period = cpu_to_le16(dtim_period),
+ };
+
+ return mm81x_cmd_tx(mors, NULL, (struct host_cmd_req *)&req, 0, 0);
+}
+
+int mm81x_cmd_config_beacon_timer(struct mm81x *mors, void *mm81x_vif,
+ bool enabled)
+{
+ struct mm81x_vif *vif = mm81x_vif;
+ struct host_cmd_req_bss_beacon_config req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_BSS_BEACON_CONFIG,
+ vif->id),
+ .enable = enabled,
+ };
+
+ return mm81x_cmd_tx(mors, NULL, (struct host_cmd_req *)&req, 0, 0);
+}
+
+int mm81x_cmd_set_ps(struct mm81x *mors, bool enabled)
+{
+ struct host_cmd_req_config_ps req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_CONFIG_PS, 0),
+ .enabled = (u8)enabled,
+ };
+
+ return mm81x_cmd_tx(mors, NULL, (struct host_cmd_req *)&req, 0,
+ HOST_CMD_POWERSAVE_TIMEOUT_MS);
+}
+
+int mm81x_cmd_cfg_qos(struct mm81x *mors, struct mm81x_queue_params *params)
+{
+ struct host_cmd_req_set_qos_params req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_SET_QOS_PARAMS, 0),
+ .uapsd = params->uapsd,
+ .queue_idx = params->aci,
+ .aifs_slot_count = params->aifs,
+ .contention_window_min = cpu_to_le16(params->cw_min),
+ .contention_window_max = cpu_to_le16(params->cw_max),
+ .max_txop_usec = cpu_to_le32(params->txop),
+ };
+
+ return mm81x_cmd_tx(mors, NULL, (struct host_cmd_req *)&req, 0, 0);
+}
+
+int mm81x_cmd_rm_if(struct mm81x *mors, u16 vif_id)
+{
+ struct host_cmd_req_remove_interface req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_REMOVE_INTERFACE, vif_id),
+ };
+
+ return mm81x_cmd_tx(mors, NULL, (struct host_cmd_req *)&req, 0, 0);
+}
+
+int mm81x_cmd_set_frag_threshold(struct mm81x *mors, u32 frag_threshold)
+{
+ struct host_cmd_req_get_set_generic_param req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_GET_SET_GENERIC_PARAM, 0),
+ .param_id = cpu_to_le32(HOST_CMD_PARAM_ID_FRAGMENT_THRESHOLD),
+ .action = cpu_to_le32(HOST_CMD_PARAM_ACTION_SET),
+ .value = cpu_to_le32(frag_threshold),
+ };
+
+ return mm81x_cmd_tx(mors, NULL, (struct host_cmd_req *)&req, 0, 0);
+}
+
+int mm81x_cmd_get_disabled_channels(
+ struct mm81x *mors, struct host_cmd_resp_get_disabled_channels *resp,
+ uint resp_len)
+{
+ struct host_cmd_req req = {
+ .hdr = INIT_CMD_HDR(req, HOST_CMD_ID_GET_DISABLED_CHANNELS, 0),
+ };
+
+ return mm81x_cmd_tx(mors, (struct host_cmd_resp *)resp, &req, resp_len,
+ 0);
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 03/31] wifi: mm81x: add command_defs.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 01/31] wifi: mm81x: add bus.h Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 02/31] wifi: mm81x: add command.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 04/31] wifi: mm81x: add command.h Lachlan Hodges
` (28 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
.../wireless/morsemicro/mm81x/command_defs.h | 1658 +++++++++++++++++
1 file changed, 1658 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/command_defs.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/command_defs.h b/drivers/net/wireless/morsemicro/mm81x/command_defs.h
new file mode 100644
index 000000000000..91a4ac09ad80
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/command_defs.h
@@ -0,0 +1,1658 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#ifndef _MM81X_COMMAND_DEFS_H_
+#define _MM81X_COMMAND_DEFS_H_
+
+#include <linux/types.h>
+
+#define __sle16 __le16
+#define __sle32 __le32
+#define __sle64 __le64
+
+#define HOST_CMD_SEMVER_MAJOR 56
+#define HOST_CMD_SEMVER_MINOR 17
+#define HOST_CMD_SEMVER_PATCH 0
+
+#define HOST_CMD_TYPE_REQ BIT(0)
+#define HOST_CMD_TYPE_RESP BIT(1)
+#define HOST_CMD_TYPE_EVT BIT(2)
+
+#define HOST_CMD_SSID_MAX_LEN 32
+#define HOST_CMD_MAC_ADDR_LEN 6
+
+enum host_cmd_id {
+ HOST_CMD_ID_SET_CHANNEL = 0x0001,
+ HOST_CMD_ID_GET_CHANNEL = 0x001D,
+ HOST_CMD_ID_GET_CHANNEL_FULL = 0x0013,
+ HOST_CMD_ID_GET_CHANNEL_DTIM = 0x001C,
+ HOST_CMD_ID_GET_VERSION = 0x0002,
+ HOST_CMD_ID_SET_TXPOWER = 0x0003,
+ HOST_CMD_ID_GET_MAX_TXPOWER = 0x0024,
+ HOST_CMD_ID_ADD_INTERFACE = 0x0004,
+ HOST_CMD_ID_REMOVE_INTERFACE = 0x0005,
+ HOST_CMD_ID_BSS_CONFIG = 0x0006,
+ HOST_CMD_ID_SCAN_CONFIG = 0x0010,
+ HOST_CMD_ID_SET_QOS_PARAMS = 0x0011,
+ HOST_CMD_ID_GET_QOS_PARAMS = 0x0012,
+ HOST_CMD_ID_SET_STA_STATE = 0x0014,
+ HOST_CMD_ID_SET_BSS_COLOR = 0x0015,
+ HOST_CMD_ID_CONFIG_PS = 0x0016,
+ HOST_CMD_ID_HEALTH_CHECK = 0x0019,
+ HOST_CMD_ID_CTS_SELF_PS = 0x001A,
+ HOST_CMD_ID_DTIM_CHANNEL_ENABLE = 0x001B,
+ HOST_CMD_ID_ARP_OFFLOAD = 0x0020,
+ HOST_CMD_ID_SET_LONG_SLEEP_CONFIG = 0x0021,
+ HOST_CMD_ID_SET_DUTY_CYCLE = 0x0022,
+ HOST_CMD_ID_GET_DUTY_CYCLE = 0x0023,
+ HOST_CMD_ID_GET_CAPABILITIES = 0x0025,
+ HOST_CMD_ID_TWT_AGREEMENT_INSTALL = 0x0026,
+ HOST_CMD_ID_TWT_AGREEMENT_VALIDATE = 0x0036,
+ HOST_CMD_ID_TWT_AGREEMENT_REMOVE = 0x0027,
+ HOST_CMD_ID_GET_TSF = 0x0028,
+ HOST_CMD_ID_MAC_ADDR = 0x0029,
+ HOST_CMD_ID_MPSW_CONFIG = 0x0030,
+ HOST_CMD_ID_INSTALL_KEY = 0x000A,
+ HOST_CMD_ID_DISABLE_KEY = 0x000B,
+ HOST_CMD_ID_DHCP_OFFLOAD = 0x0032,
+ HOST_CMD_ID_SET_KEEP_ALIVE_OFFLOAD = 0x0033,
+ HOST_CMD_ID_UPDATE_OUI_FILTER = 0x0034,
+ HOST_CMD_ID_IBSS_CONFIG = 0x0035,
+ HOST_CMD_ID_OCS = 0x0038,
+ HOST_CMD_ID_MESH_CONFIG = 0x0039,
+ HOST_CMD_ID_SET_OFFSET_TSF = 0x003A,
+ HOST_CMD_ID_GET_CHANNEL_USAGE = 0x003B,
+ HOST_CMD_ID_MCAST_FILTER = 0x003C,
+ HOST_CMD_ID_BSS_BEACON_CONFIG = 0x003D,
+ HOST_CMD_ID_UAPSD_CONFIG = 0x0040,
+ HOST_CMD_ID_PAGE_SLICING_CONFIG = 0x0043,
+ HOST_CMD_ID_HW_SCAN = 0x0044,
+ HOST_CMD_ID_SET_WHITELIST = 0x0045,
+ HOST_CMD_ID_ARP_PERIODIC_REFRESH = 0x0046,
+ HOST_CMD_ID_SET_TCP_KEEPALIVE = 0x0047,
+ HOST_CMD_ID_FORCE_POWER_MODE = 0x0048,
+ HOST_CMD_ID_LI_SLEEP = 0x0049,
+ HOST_CMD_ID_GET_DISABLED_CHANNELS = 0x004A,
+ HOST_CMD_ID_SET_CQM_RSSI = 0x004F,
+ HOST_CMD_ID_GET_APF_CAPABILITIES = 0x0050,
+ HOST_CMD_ID_READ_WRITE_APF = 0x0051,
+ HOST_CMD_ID_BSSID_SET = 0x0052,
+ HOST_CMD_ID_BEACON_OFFLOAD = 0x0053,
+ HOST_CMD_ID_PROBE_RESPONSE_OFFLOAD = 0x0054,
+ HOST_CMD_ID_HOST_STATS_LOG = 0x2007,
+ HOST_CMD_ID_HOST_STATS_RESET = 0x2008,
+ HOST_CMD_ID_MAC_STATS_LOG = 0x200C,
+ HOST_CMD_ID_MAC_STATS_RESET = 0x200D,
+ HOST_CMD_ID_UPHY_STATS_LOG = 0x200E,
+ HOST_CMD_ID_UPHY_STATS_RESET = 0x200F,
+ HOST_CMD_ID_SET_STA_TYPE = 0xA000,
+ HOST_CMD_ID_SET_ENC_MODE = 0xA001,
+ HOST_CMD_ID_TEST_BA = 0xA002,
+ HOST_CMD_ID_SET_LISTEN_INTERVAL = 0xA003,
+ HOST_CMD_ID_SET_AMPDU = 0xA004,
+ HOST_CMD_ID_COREDUMP = 0xA006,
+ HOST_CMD_ID_SET_S1G_OP_CLASS = 0xA007,
+ HOST_CMD_ID_SEND_WAKE_ACTION_FRAME = 0xA008,
+ HOST_CMD_ID_VENDOR_IE_CONFIG = 0xA009,
+ HOST_CMD_ID_SET_TWT_CONF = 0xA010,
+ HOST_CMD_ID_GET_AVAILABLE_CHANNELS = 0xA011,
+ HOST_CMD_ID_SET_ECSA_S1G_INFO = 0xA012,
+ HOST_CMD_ID_GET_HW_VERSION = 0xA013,
+ HOST_CMD_ID_CAC = 0xA014,
+ HOST_CMD_ID_DRIVER_SET_DUTY_CYCLE = 0xA015,
+ HOST_CMD_ID_OCS_DRIVER = 0xA017,
+ HOST_CMD_ID_MBSSID = 0xA016,
+ HOST_CMD_ID_SET_MESH_CONFIG = 0xA018,
+ HOST_CMD_ID_SET_MCBA_CONF = 0xA019,
+ HOST_CMD_ID_DYNAMIC_PEERING_CONFIG = 0xA020,
+ HOST_CMD_ID_CONFIG_RAW = 0xA021,
+ HOST_CMD_ID_CONFIG_BSS_STATS = 0xA022,
+ HOST_CMD_ID_GET_RSSI = 0x1002,
+ HOST_CMD_ID_SET_IFS = 0x1003,
+ HOST_CMD_ID_SET_FEM_SETTINGS = 0x1005,
+ HOST_CMD_ID_SET_TXOP = 0x1008,
+ HOST_CMD_ID_SET_CONTROL_RESPONSE = 0x1009,
+ HOST_CMD_ID_SET_PERIODIC_CAL = 0x100A,
+ HOST_CMD_ID_SET_BCN_RSSI_THRESHOLD = 0x100B,
+ HOST_CMD_ID_SET_TX_PKT_LIFETIME_USECS = 0x100C,
+ HOST_CMD_ID_SET_PHYSM_WATCHDOG = 0x100D,
+ HOST_CMD_ID_TX_POLAR = 0x100E,
+ HOST_CMD_ID_EVT_STA_STATE = 0x4001,
+ HOST_CMD_ID_EVT_BEACON_LOSS = 0x4002,
+ HOST_CMD_ID_EVT_SIG_FIELD_ERROR = 0x4003,
+ HOST_CMD_ID_EVT_UMAC_TRAFFIC_CONTROL = 0x4004,
+ HOST_CMD_ID_EVT_DHCP_LEASE_UPDATE = 0x4005,
+ HOST_CMD_ID_EVT_OCS_DONE = 0x4006,
+ HOST_CMD_ID_EVT_HW_SCAN_DONE = 0x4011,
+ HOST_CMD_ID_EVT_CHANNEL_USAGE = 0x4012,
+ HOST_CMD_ID_EVT_CONNECTION_LOSS = 0x4013,
+ HOST_CMD_ID_EVT_SCHED_SCAN_RESULTS = 0x4014,
+ HOST_CMD_ID_EVT_CQM_RSSI_NOTIFY = 0x4015,
+ HOST_CMD_ID_EVT_SCAN_DONE = 0x4007,
+ HOST_CMD_ID_EVT_SCAN_RESULT = 0x4008,
+ HOST_CMD_ID_EVT_CONNECTED = 0x4009,
+ HOST_CMD_ID_EVT_DISCONNECTED = 0x4010,
+ HOST_CMD_ID_EVT_BEACON_FILTER_MATCH = 0x4016,
+ HOST_CMD_ID_SET_CAPABILITIES = 0x8118,
+ HOST_CMD_ID_SET_TRANSMISSION_RATE = 0x8009,
+ HOST_CMD_ID_FORCE_ASSERT = 0x800E,
+ HOST_CMD_ID_GET_SET_GENERIC_PARAM = 0x003E,
+};
+
+struct host_cmd_mac_addr {
+ u8 octet[HOST_CMD_MAC_ADDR_LEN];
+};
+
+enum host_cmd_ocs_subcmd {
+ HOST_CMD_OCS_SUBCMD_CONFIG = 1,
+ HOST_CMD_OCS_SUBCMD_STATUS = 2,
+};
+
+enum host_cmd_headless_cfg_option {
+ HOST_CMD_HEADLESS_CFG_OPTION_KEEP_IFACES = BIT(0),
+ HOST_CMD_HEADLESS_CFG_OPTION_BUFFER_RX = BIT(1),
+ HOST_CMD_HEADLESS_CFG_OPTION_NOTIFY_ON_ANY_RX = BIT(2),
+};
+
+struct host_cmd_header {
+ __le16 flags;
+ __le16 message_id;
+ __le16 len;
+ __le16 host_id;
+ __le16 vif_id;
+ __le16 pad;
+};
+
+#define HOST_CMD_CHANNEL_BW_NOT_SET 0xFF
+#define HOST_CMD_CHANNEL_IDX_NOT_SET 0xFF
+#define HOST_CMD_CHANNEL_FREQ_NOT_SET 0xFFFFFFFF
+
+enum host_cmd_dot11_proto_mode {
+ HOST_CMD_DOT11_PROTO_MODE_AH = 0,
+};
+
+struct host_cmd_req_set_channel {
+ struct host_cmd_header hdr;
+ __le32 op_chan_freq_hz;
+ u8 op_bw_mhz;
+ u8 pri_bw_mhz;
+ u8 pri_1mhz_chan_idx;
+ u8 dot11_mode;
+ u8 __deprecated_reg_tx_power_set;
+ u8 is_off_channel;
+} __packed;
+
+struct host_cmd_resp_set_channel {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __sle32 power_qdbm;
+} __packed;
+
+struct host_cmd_req_get_channel {
+ struct host_cmd_header hdr;
+} __packed;
+
+struct host_cmd_resp_get_channel {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le32 op_chan_freq_hz;
+ u8 op_chan_bw_mhz;
+ u8 pri_chan_bw_mhz;
+ u8 pri_1mhz_chan_idx;
+} __packed;
+
+#define HOST_CMD_MAX_VERSION_LEN 128
+
+struct host_cmd_req_get_version {
+ struct host_cmd_header hdr;
+} __packed;
+
+struct host_cmd_resp_get_version {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __sle32 length;
+ u8 version[];
+} __packed;
+
+struct host_cmd_req_set_txpower {
+ struct host_cmd_header hdr;
+ __sle32 power_qdbm;
+} __packed;
+
+struct host_cmd_resp_set_txpower {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __sle32 power_qdbm;
+} __packed;
+
+struct host_cmd_req_get_max_txpower {
+ struct host_cmd_header hdr;
+} __packed;
+
+struct host_cmd_resp_get_max_txpower {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __sle32 power_qdbm;
+} __packed;
+
+enum host_cmd_interface_type {
+ HOST_CMD_INTERFACE_TYPE_INVALID = 0,
+ HOST_CMD_INTERFACE_TYPE_STA = 1,
+ HOST_CMD_INTERFACE_TYPE_AP = 2,
+ HOST_CMD_INTERFACE_TYPE_MON = 3,
+ HOST_CMD_INTERFACE_TYPE_ADHOC = 4,
+ HOST_CMD_INTERFACE_TYPE_MESH = 5,
+ HOST_CMD_INTERFACE_TYPE_LAST = HOST_CMD_INTERFACE_TYPE_MESH,
+};
+
+struct host_cmd_req_add_interface {
+ struct host_cmd_header hdr;
+ struct host_cmd_mac_addr addr;
+ __le32 interface_type;
+} __packed;
+
+struct host_cmd_resp_add_interface {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_remove_interface {
+ struct host_cmd_header hdr;
+} __packed;
+
+struct host_cmd_resp_remove_interface {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_bss_config {
+ struct host_cmd_header hdr;
+ __le16 beacon_interval_tu;
+ __le16 dtim_period;
+ u8 __padding[2];
+ __le32 cssid;
+} __packed;
+
+struct host_cmd_resp_bss_config {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_scan_config {
+ struct host_cmd_header hdr;
+ u8 enabled;
+ u8 is_survey;
+} __packed;
+
+struct host_cmd_resp_scan_config {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_qos_params {
+ struct host_cmd_header hdr;
+ u8 uapsd;
+ u8 queue_idx;
+ u8 aifs_slot_count;
+ __le16 contention_window_min;
+ __le16 contention_window_max;
+ __le32 max_txop_usec;
+} __packed;
+
+struct host_cmd_resp_set_qos_params {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_get_qos_params {
+ struct host_cmd_header hdr;
+ u8 queue_idx;
+} __packed;
+
+struct host_cmd_resp_get_qos_params {
+ struct host_cmd_header hdr;
+ __le32 status;
+ u8 aifs_slot_count;
+ __le16 contention_window_min;
+ __le16 contention_window_max;
+ __le32 max_txop_usec;
+} __packed;
+
+struct host_cmd_req_set_sta_state {
+ struct host_cmd_header hdr;
+ u8 sta_addr[HOST_CMD_MAC_ADDR_LEN];
+ __le16 aid;
+ __le16 state;
+ u8 uapsd_queues;
+ __le32 flags;
+} __packed;
+
+struct host_cmd_resp_set_sta_state {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_bss_color {
+ struct host_cmd_header hdr;
+ u8 bss_color;
+} __packed;
+
+struct host_cmd_resp_set_bss_color {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_config_ps {
+ struct host_cmd_header hdr;
+ u8 enabled;
+ u8 dynamic_ps_offload;
+} __packed;
+
+struct host_cmd_resp_config_ps {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_health_check {
+ struct host_cmd_header hdr;
+} __packed;
+
+struct host_cmd_resp_health_check {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_cts_self_ps {
+ struct host_cmd_header hdr;
+ u8 enable;
+} __packed;
+
+struct host_cmd_resp_cts_self_ps {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_dtim_channel_enable {
+ struct host_cmd_header hdr;
+ u8 enable;
+} __packed;
+
+struct host_cmd_resp_dtim_channel_enable {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+#define HOST_CMD_ARP_OFFLOAD_MAX_IP_ADDRESSES 4
+
+struct host_cmd_req_arp_offload {
+ struct host_cmd_header hdr;
+ __be32 ip_table[HOST_CMD_ARP_OFFLOAD_MAX_IP_ADDRESSES];
+} __packed;
+
+struct host_cmd_resp_arp_offload {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_long_sleep_config {
+ struct host_cmd_header hdr;
+ u8 enabled;
+} __packed;
+
+struct host_cmd_resp_set_long_sleep_config {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+#define HOST_CMD_DUTY_CYCLE_SET_CFG_DUTY_CYCLE BIT(0)
+#define HOST_CMD_DUTY_CYCLE_SET_CFG_OMIT_CONTROL_RESP BIT(1)
+#define HOST_CMD_DUTY_CYCLE_SET_CFG_EXT BIT(2)
+#define HOST_CMD_DUTY_CYCLE_SET_CFG_BURST_RECORD_UNIT BIT(3)
+
+enum host_cmd_duty_cycle_mode {
+ HOST_CMD_DUTY_CYCLE_MODE_SPREAD = 0,
+ HOST_CMD_DUTY_CYCLE_MODE_BURST = 1,
+ HOST_CMD_DUTY_CYCLE_MODE_LAST = HOST_CMD_DUTY_CYCLE_MODE_BURST,
+};
+
+struct host_cmd_duty_cycle_configuration {
+ u8 omit_control_responses;
+ __le32 duty_cycle;
+} __packed;
+
+struct host_cmd_duty_cycle_set_configuration_ext {
+ __le32 burst_record_unit_us;
+ u8 mode;
+} __packed;
+
+struct host_cmd_duty_cycle_configuration_ext {
+ __le32 airtime_remaining_us;
+ __le32 burst_window_duration_us;
+ struct host_cmd_duty_cycle_set_configuration_ext set;
+} __packed;
+
+struct host_cmd_req_set_duty_cycle {
+ struct host_cmd_header hdr;
+ struct host_cmd_duty_cycle_configuration config;
+ u8 set_cfgs;
+ struct host_cmd_duty_cycle_set_configuration_ext config_ext;
+} __packed;
+
+struct host_cmd_resp_get_duty_cycle {
+ struct host_cmd_header hdr;
+ __le32 status;
+ struct host_cmd_duty_cycle_configuration config;
+ struct host_cmd_duty_cycle_configuration_ext config_ext;
+} __packed;
+
+#define HOST_CMD_SET_S1G_CAP_FLAGS BIT(0)
+#define HOST_CMD_SET_S1G_CAP_AMPDU_MSS BIT(1)
+#define HOST_CMD_SET_S1G_CAP_BEAM_STS BIT(2)
+#define HOST_CMD_SET_S1G_CAP_NUM_SOUND_DIMS BIT(3)
+#define HOST_CMD_SET_S1G_CAP_MAX_AMPDU_LEXP BIT(4)
+#define HOST_CMD_SET_MORSE_CAP_MMSS_OFFSET BIT(5)
+#define HOST_CMD_S1G_CAPABILITY_FLAGS_WIDTH 4
+
+struct host_cmd_mm_capabilities {
+ __le32 flags[HOST_CMD_S1G_CAPABILITY_FLAGS_WIDTH];
+ u8 ampdu_mss;
+ u8 beamformee_sts_capability;
+ u8 number_sounding_dimensions;
+ u8 maximum_ampdu_length_exponent;
+} __packed;
+
+struct host_cmd_req_get_capabilities {
+ struct host_cmd_header hdr;
+} __packed;
+
+struct host_cmd_resp_get_capabilities {
+ struct host_cmd_header hdr;
+ __le32 status;
+ struct host_cmd_mm_capabilities capabilities;
+ u8 morse_mmss_offset;
+} __packed;
+
+#define HOST_CMD_DOT11_TWT_AGREEMENT_MAX_LEN 20
+
+struct host_cmd_req_twt_agreement_install {
+ struct host_cmd_header hdr;
+ u8 flow_id;
+ u8 agreement_len;
+ u8 agreement[HOST_CMD_DOT11_TWT_AGREEMENT_MAX_LEN];
+} __packed;
+
+struct host_cmd_resp_twt_agreement_install {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_twt_agreement_validate {
+ struct host_cmd_header hdr;
+ u8 flow_id;
+ u8 agreement_len;
+ u8 agreement[HOST_CMD_DOT11_TWT_AGREEMENT_MAX_LEN];
+} __packed;
+
+struct host_cmd_resp_twt_agreement_validate {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_twt_agreement_remove {
+ struct host_cmd_header hdr;
+ u8 flow_id;
+} __packed;
+
+struct host_cmd_req_get_tsf {
+ struct host_cmd_header hdr;
+} __packed;
+
+struct host_cmd_resp_get_tsf {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le64 now_tsf;
+ __le64 now_chip_ts;
+} __packed;
+
+struct host_cmd_req_mac_addr {
+ struct host_cmd_header hdr;
+ u8 write;
+ u8 octet[HOST_CMD_MAC_ADDR_LEN];
+} __packed;
+
+struct host_cmd_resp_mac_addr {
+ struct host_cmd_header hdr;
+ __le32 status;
+ u8 octet[HOST_CMD_MAC_ADDR_LEN];
+} __packed;
+
+#define HOST_CMD_SET_MPSW_CFG_AIRTIME_BOUNDS BIT(0)
+#define HOST_CMD_SET_MPSW_CFG_PKT_SPC_WIN_LEN BIT(1)
+#define HOST_CMD_SET_MPSW_CFG_ENABLED BIT(2)
+
+struct host_cmd_mpsw_configuration {
+ __le32 airtime_max_us;
+ __le32 airtime_min_us;
+ __le32 packet_space_window_length_us;
+ u8 enable;
+} __packed;
+
+struct host_cmd_req_mpsw_config {
+ struct host_cmd_header hdr;
+ struct host_cmd_mpsw_configuration config;
+ u8 set_cfgs;
+} __packed;
+
+struct host_cmd_resp_mpsw_config {
+ struct host_cmd_header hdr;
+ __le32 status;
+ struct host_cmd_mpsw_configuration config;
+} __packed;
+
+#define HOST_CMD_MAX_KEY_LEN 32
+
+enum host_cmd_key_cipher {
+ HOST_CMD_KEY_CIPHER_INVALID = 0,
+ HOST_CMD_KEY_CIPHER_AES_CCM = 1,
+ HOST_CMD_KEY_CIPHER_AES_GCM = 2,
+ HOST_CMD_KEY_CIPHER_AES_CMAC = 3,
+ HOST_CMD_KEY_CIPHER_AES_GMAC = 4,
+ HOST_CMD_KEY_CIPHER_LAST = HOST_CMD_KEY_CIPHER_AES_GMAC,
+};
+
+enum host_cmd_aes_key_len {
+ HOST_CMD_AES_KEY_LEN_INVALID = 0,
+ HOST_CMD_AES_KEY_LEN_LENGTH_128 = 1,
+ HOST_CMD_AES_KEY_LEN_LENGTH_256 = 2,
+ HOST_CMD_AES_KEY_LEN_LENGTH_LAST = HOST_CMD_AES_KEY_LEN_LENGTH_256,
+};
+
+enum host_cmd_temporal_key_type {
+ HOST_CMD_TEMPORAL_KEY_TYPE_INVALID = 0,
+ HOST_CMD_TEMPORAL_KEY_TYPE_GTK = 1,
+ HOST_CMD_TEMPORAL_KEY_TYPE_PTK = 2,
+ HOST_CMD_TEMPORAL_KEY_TYPE_IGTK = 3,
+ HOST_CMD_TEMPORAL_KEY_TYPE_LAST = HOST_CMD_TEMPORAL_KEY_TYPE_IGTK,
+};
+
+struct host_cmd_req_install_key {
+ struct host_cmd_header hdr;
+ __le64 pn;
+ __le32 aid;
+ u8 key_idx;
+ u8 cipher;
+ u8 key_length;
+ u8 key_type;
+ u8 __padding[2];
+ u8 key[HOST_CMD_MAX_KEY_LEN];
+} __packed;
+
+struct host_cmd_resp_install_key {
+ struct host_cmd_header hdr;
+ __le32 status;
+ u8 key_idx;
+} __packed;
+
+struct host_cmd_req_disable_key {
+ struct host_cmd_header hdr;
+ __le32 key_type;
+ __le32 aid;
+ u8 key_idx;
+} __packed;
+
+struct host_cmd_resp_disable_key {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+enum host_cmd_dhcp_opcode {
+ HOST_CMD_DHCP_OPCODE_ENABLE = 0,
+ HOST_CMD_DHCP_OPCODE_DO_DISCOVERY = 1,
+ HOST_CMD_DHCP_OPCODE_GET_LEASE = 2,
+ HOST_CMD_DHCP_OPCODE_CLEAR_LEASE = 3,
+ HOST_CMD_DHCP_OPCODE_RENEW_LEASE = 4,
+ HOST_CMD_DHCP_OPCODE_REBIND_LEASE = 5,
+ HOST_CMD_DHCP_OPCODE_SEND_LEASE_UPDATE = 6,
+};
+
+enum host_cmd_dhcp_retcode {
+ HOST_CMD_DHCP_RETCODE_SUCCESS = 0,
+ HOST_CMD_DHCP_RETCODE_NOT_ENABLED = 1,
+ HOST_CMD_DHCP_RETCODE_ALREADY_ENABLED = 2,
+ HOST_CMD_DHCP_RETCODE_NO_LEASE = 3,
+ HOST_CMD_DHCP_RETCODE_HAVE_LEASE = 4,
+ HOST_CMD_DHCP_RETCODE_BUSY = 5,
+ HOST_CMD_DHCP_RETCODE_BAD_VIF = 6,
+};
+
+struct host_cmd_req_dhcp_offload {
+ struct host_cmd_header hdr;
+ __le32 opcode;
+} __packed;
+
+struct host_cmd_resp_dhcp_offload {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le32 retcode;
+ __le32 my_ip;
+ __le32 netmask;
+ __le32 router;
+ __le32 dns;
+} __packed;
+
+struct host_cmd_req_set_keep_alive_offload {
+ struct host_cmd_header hdr;
+ __le16 bss_max_idle_period;
+ u8 interpret_as_11ah;
+} __packed;
+
+#define HOST_CMD_MAX_OUI_FILTERS 5
+#define HOST_CMD_OUI_SIZE 3
+#define HOST_CMD_MAX_OUI_FILTER_ARRAY_SIZE 15
+
+struct host_cmd_req_update_oui_filter {
+ struct host_cmd_header hdr;
+ u8 n_ouis;
+ u8 ouis[HOST_CMD_MAX_OUI_FILTERS][HOST_CMD_OUI_SIZE];
+} __packed;
+
+enum host_cmd_ibss_config_opcode {
+ HOST_CMD_IBSS_CONFIG_OPCODE_CREATE = 0,
+ HOST_CMD_IBSS_CONFIG_OPCODE_JOIN = 1,
+ HOST_CMD_IBSS_CONFIG_OPCODE_STOP = 2,
+};
+
+struct host_cmd_req_ibss_config {
+ struct host_cmd_header hdr;
+ u8 ibss_bssid[HOST_CMD_MAC_ADDR_LEN];
+ u8 ibss_cfg_opcode;
+ u8 ibss_probe_filtering;
+} __packed;
+
+enum host_cmd_ocs_type {
+ HOST_CMD_OCS_TYPE_QNULL = 0,
+ HOST_CMD_OCS_TYPE_RAW = 1,
+};
+
+struct host_cmd_ocs_config_req {
+ __le32 op_channel_freq_hz;
+ u8 op_channel_bw_mhz;
+ u8 pri_channel_bw_mhz;
+ u8 pri_1mhz_channel_index;
+ __le16 aid;
+ u8 type;
+} __packed;
+
+struct host_cmd_ocs_status_resp {
+ u8 running;
+} __packed;
+
+struct host_cmd_req_ocs {
+ struct host_cmd_header hdr;
+ __le32 subcmd;
+ union {
+ u8 opaque[0];
+ struct host_cmd_ocs_config_req config;
+ };
+} __packed;
+
+struct host_cmd_resp_ocs {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le32 subcmd;
+ union {
+ u8 opaque[0];
+ struct host_cmd_ocs_status_resp ocs_status;
+ };
+} __packed;
+
+enum host_cmd_mesh_config_opcode {
+ HOST_CMD_MESH_CONFIG_OPCODE_START = 0,
+ HOST_CMD_MESH_CONFIG_OPCODE_STOP = 1,
+};
+
+struct host_cmd_req_mesh_config {
+ struct host_cmd_header hdr;
+ u8 mesh_cfg_opcode;
+ u8 enable_beaconing;
+ u8 mbca_config;
+ u8 min_beacon_gap_ms;
+ __le16 mbss_start_scan_duration_ms;
+ __le16 tbtt_adj_timer_interval_ms;
+} __packed;
+
+struct host_cmd_req_set_offset_tsf {
+ struct host_cmd_header hdr;
+ __sle64 offset_tsf;
+} __packed;
+
+struct host_cmd_req_get_channel_usage {
+ struct host_cmd_header hdr;
+} __packed;
+
+struct host_cmd_resp_get_channel_usage {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le64 time_listen;
+ __le64 busy_time;
+ __le32 freq_hz;
+ s8 noise;
+ u8 bw_mhz;
+} __packed;
+
+#define HOST_CMD_MAX_MCAST_FILTERS 12
+
+struct host_cmd_req_mcast_filter {
+ struct host_cmd_header hdr;
+ u8 count;
+ __le32 hw_addr[];
+} __packed;
+
+struct host_cmd_req_bss_beacon_config {
+ struct host_cmd_header hdr;
+ u8 enable;
+} __packed;
+
+struct host_cmd_resp_bss_beacon_config {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le16 interface_id;
+} __packed;
+
+struct host_cmd_req_uapsd_config {
+ struct host_cmd_header hdr;
+ u8 auto_trigger_enabled;
+ __le32 auto_trigger_timeout;
+} __packed;
+
+struct host_cmd_resp_uapsd_config {
+ struct host_cmd_header hdr;
+ __le32 status;
+ u8 auto_trigger_enabled;
+} __packed;
+
+struct host_cmd_req_page_slicing_config {
+ struct host_cmd_header hdr;
+ u8 enable;
+} __packed;
+
+#define HOST_CMD_HW_SCAN_FLAGS_START BIT(0)
+#define HOST_CMD_HW_SCAN_FLAGS_ABORT BIT(1)
+#define HOST_CMD_HW_SCAN_FLAGS_SURVEY BIT(2)
+#define HOST_CMD_HW_SCAN_FLAGS_STORE BIT(3)
+#define HOST_CMD_HW_SCAN_FLAGS_1MHZ_PROBES BIT(4)
+#define HOST_CMD_HW_SCAN_FLAGS_SCHED_START BIT(5)
+#define HOST_CMD_HW_SCAN_FLAGS_SCHED_STOP BIT(6)
+#define HOST_CMD_HW_SCAN_FLAGS_PROBE_ON_DOZE_BEACON BIT(7)
+
+enum host_cmd_hw_scan_tlv_tag {
+ HOST_CMD_HW_SCAN_TLV_TAG_PAD = 0,
+ HOST_CMD_HW_SCAN_TLV_TAG_PROBE_REQ = 1,
+ HOST_CMD_HW_SCAN_TLV_TAG_CHAN_LIST = 2,
+ HOST_CMD_HW_SCAN_TLV_TAG_POWER_LIST = 3,
+ HOST_CMD_HW_SCAN_TLV_TAG_DWELL_ON_HOME = 4,
+ HOST_CMD_HW_SCAN_TLV_TAG_SCHED = 5,
+ HOST_CMD_HW_SCAN_TLV_TAG_FILTER = 6,
+ HOST_CMD_HW_SCAN_TLV_TAG_SCHED_PARAMS = 7,
+};
+
+struct host_cmd_hw_scan_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 value[];
+} __packed;
+
+struct host_cmd_req_hw_scan {
+ struct host_cmd_header hdr;
+ __le32 flags;
+ __le32 dwell_time_ms;
+ u8 variable[];
+} __packed;
+
+#define HOST_CMD_WHITELIST_FLAGS_CLEAR BIT(0)
+
+struct host_cmd_req_set_whitelist {
+ struct host_cmd_header hdr;
+ u8 flags;
+ u8 ip_protocol;
+ __be16 llc_protocol;
+ __be32 src_ip;
+ __be32 dest_ip;
+ __be32 netmask;
+ __be16 src_port;
+ __be16 dest_port;
+} __packed;
+
+struct host_cmd_arp_periodic_params {
+ __le32 refresh_period_s;
+ __le32 destination_ip;
+ u8 send_as_garp;
+} __packed;
+
+struct host_cmd_req_arp_periodic_refresh {
+ struct host_cmd_header hdr;
+ struct host_cmd_arp_periodic_params config;
+} __packed;
+
+#define HOST_CMD_TCP_KEEPALIVE_SET_CFG_PERIOD BIT(0)
+#define HOST_CMD_TCP_KEEPALIVE_SET_CFG_RETRY_COUNT BIT(1)
+#define HOST_CMD_TCP_KEEPALIVE_SET_CFG_RETRY_INTERVAL BIT(2)
+#define HOST_CMD_TCP_KEEPALIVE_SET_CFG_SRC_IP_ADDR BIT(3)
+#define HOST_CMD_TCP_KEEPALIVE_SET_CFG_DEST_IP_ADDR BIT(4)
+#define HOST_CMD_TCP_KEEPALIVE_SET_CFG_SRC_PORT BIT(5)
+#define HOST_CMD_TCP_KEEPALIVE_SET_CFG_DEST_PORT BIT(6)
+
+struct host_cmd_req_set_tcp_keepalive {
+ struct host_cmd_header hdr;
+ u8 enabled;
+ u8 retry_count;
+ u8 retry_interval_s;
+ u8 set_cfgs;
+ __be32 src_ip;
+ __be32 dest_ip;
+ __be16 src_port;
+ __be16 dest_port;
+ __le16 period_s;
+} __packed;
+
+enum host_cmd_power_mode {
+ HOST_CMD_POWER_MODE_SNOOZE = 0,
+ HOST_CMD_POWER_MODE_DEEP_SLEEP = 1,
+ HOST_CMD_POWER_MODE_HIBERNATE = 2,
+};
+
+struct host_cmd_req_force_power_mode {
+ struct host_cmd_header hdr;
+ __le32 mode;
+} __packed;
+
+struct host_cmd_req_li_sleep {
+ struct host_cmd_header hdr;
+ __le32 listen_interval;
+} __packed;
+
+struct host_cmd_disabled_channel_entry {
+ __le16 freq_100khz;
+ u8 bw_mhz;
+} __packed;
+
+struct host_cmd_resp_get_disabled_channels {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le32 n_channels;
+ struct host_cmd_disabled_channel_entry channels[];
+} __packed;
+
+struct host_cmd_req_set_cqm_rssi {
+ struct host_cmd_header hdr;
+ __sle32 threshold;
+ __le32 hysteresis;
+} __packed;
+
+struct host_cmd_req_get_apf_capabilities {
+ struct host_cmd_header hdr;
+} __packed;
+
+struct host_cmd_resp_get_apf_capabilities {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le32 max_length;
+ u8 version;
+} __packed;
+
+struct host_cmd_req_read_write_apf {
+ struct host_cmd_header hdr;
+ __le32 offset;
+ __le16 program_length;
+ u8 write;
+ u8 program[];
+} __packed;
+
+struct host_cmd_resp_read_write_apf {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le16 program_length;
+ u8 program[];
+} __packed;
+
+struct host_cmd_req_bssid_set {
+ struct host_cmd_header hdr;
+ struct host_cmd_mac_addr bssid;
+} __packed;
+
+#define HOST_CMD_BEACON_OFFLOAD_FLAGS_START BIT(0)
+#define HOST_CMD_BEACON_OFFLOAD_FLAGS_STOP BIT(1)
+#define HOST_CMD_BEACON_OFFLOAD_CSSID_LEN 4
+
+enum host_cmd_beacon_offload_tlv_tag {
+ HOST_CMD_BEACON_OFFLOAD_TLV_TAG_DTIM_CNT = 0,
+ HOST_CMD_BEACON_OFFLOAD_TLV_TAG_FRAME_CTRL = 1,
+ HOST_CMD_BEACON_OFFLOAD_TLV_TAG_CHANGE_SEQ = 2,
+ HOST_CMD_BEACON_OFFLOAD_TLV_TAG_CSSID = 3,
+ HOST_CMD_BEACON_OFFLOAD_TLV_TAG_IES = 4,
+ HOST_CMD_BEACON_OFFLOAD_TLV_TAG_TX_INFO = 5,
+};
+
+struct host_cmd_beacon_offload_tlv_hdr {
+ __le16 tag;
+ __le16 len;
+} __packed;
+
+struct host_cmd_beacon_offload_tlv_generic {
+ struct host_cmd_beacon_offload_tlv_hdr hdr;
+ u8 value[];
+} __packed;
+
+struct host_cmd_beacon_offload_tlv_dtim_cnt {
+ struct host_cmd_beacon_offload_tlv_hdr hdr;
+ __le16 dtim_cnt;
+} __packed;
+
+struct host_cmd_beacon_offload_tlv_frame_ctrl {
+ struct host_cmd_beacon_offload_tlv_hdr hdr;
+ u8 frame_ctrl[2];
+} __packed;
+
+struct host_cmd_beacon_offload_tlv_change_seq {
+ struct host_cmd_beacon_offload_tlv_hdr hdr;
+ __le16 change_seq;
+} __packed;
+
+struct host_cmd_beacon_offload_tlv_tx_info {
+ struct host_cmd_beacon_offload_tlv_hdr hdr;
+ u8 bw_mhz;
+} __packed;
+
+struct host_cmd_beacon_offload_tlv_cssid {
+ struct host_cmd_beacon_offload_tlv_hdr hdr;
+ u8 cssid[HOST_CMD_BEACON_OFFLOAD_CSSID_LEN];
+} __packed;
+
+struct host_cmd_beacon_offload_tlv_ies {
+ struct host_cmd_beacon_offload_tlv_hdr hdr;
+ u8 buf[];
+} __packed;
+
+struct host_cmd_req_beacon_offload {
+ struct host_cmd_header hdr;
+ __le32 flags;
+ u8 variable[];
+} __packed;
+
+struct host_cmd_resp_beacon_offload {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le16 dtim_count;
+} __packed;
+
+struct host_cmd_req_probe_response_offload {
+ struct host_cmd_header hdr;
+ u8 enable;
+ __le16 probe_resp_len;
+ u8 probe_resp_buf[];
+} __packed;
+
+struct host_cmd_resp_probe_response_offload {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_sta_type {
+ struct host_cmd_header hdr;
+ u8 sta_type;
+} __packed;
+
+struct host_cmd_req_set_enc_mode {
+ struct host_cmd_header hdr;
+ u8 enc_mode;
+} __packed;
+
+struct host_cmd_req_test_ba {
+ struct host_cmd_header hdr;
+ u8 addr[HOST_CMD_MAC_ADDR_LEN];
+ u8 start;
+ u8 tx;
+ __le32 tid;
+} __packed;
+
+struct host_cmd_req_set_listen_interval {
+ struct host_cmd_header hdr;
+ __le16 listen_interval;
+} __packed;
+
+struct host_cmd_req_set_ampdu {
+ struct host_cmd_header hdr;
+ u8 ampdu_enabled;
+} __packed;
+
+struct host_cmd_req_set_s1g_op_class {
+ struct host_cmd_header hdr;
+ u8 opclass;
+ u8 prim_opclass;
+} __packed;
+
+struct host_cmd_req_send_wake_action_frame {
+ struct host_cmd_header hdr;
+ u8 dest_addr[HOST_CMD_MAC_ADDR_LEN];
+ __le32 payload_size;
+ u8 payload[];
+} __packed;
+
+#define HOST_CMD_MAX_VENDOR_IE_LENGTH 255
+#define HOST_CMD_VENDOR_IE_TYPE_FLAG_BEACON BIT(0)
+#define HOST_CMD_VENDOR_IE_TYPE_FLAG_PROBE_REQ BIT(1)
+#define HOST_CMD_VENDOR_IE_TYPE_FLAG_PROBE_RESP BIT(2)
+#define HOST_CMD_VENDOR_IE_TYPE_FLAG_ASSOC_REQ BIT(3)
+#define HOST_CMD_VENDOR_IE_TYPE_FLAG_ASSOC_RESP BIT(4)
+
+enum host_cmd_vendor_ie_op {
+ HOST_CMD_VENDOR_IE_OP_ADD_ELEMENT = 0,
+ HOST_CMD_VENDOR_IE_OP_CLEAR_ELEMENTS = 1,
+ HOST_CMD_VENDOR_IE_OP_ADD_FILTER = 2,
+ HOST_CMD_VENDOR_IE_OP_CLEAR_FILTERS = 3,
+ HOST_CMD_VENDOR_IE_OP_INVALID = U16_MAX,
+};
+
+struct host_cmd_req_vendor_ie_config {
+ struct host_cmd_header hdr;
+ __le16 opcode;
+ __le16 mgmt_type_mask;
+ u8 data[HOST_CMD_MAX_VENDOR_IE_LENGTH];
+} __packed;
+
+struct host_cmd_resp_vendor_ie_config {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+enum host_cmd_twt_conf_op {
+ HOST_CMD_TWT_CONF_OP_CONFIGURE = 0,
+ HOST_CMD_TWT_CONF_OP_FORCE_INSTALL_AGREEMENT = 1,
+ HOST_CMD_TWT_CONF_OP_REMOVE_AGREEMENT = 2,
+ HOST_CMD_TWT_CONF_OP_CONFIGURE_EXPLICIT = 3,
+};
+
+struct host_cmd_explicit_twt_wake_interval {
+ __le16 wake_interval_mantissa;
+ u8 wake_interval_exponent;
+ u8 __padding[5];
+} __packed;
+
+union host_cmd_wake_interval {
+ __le64 wake_interval_us;
+ struct host_cmd_explicit_twt_wake_interval explicit_twt;
+} __packed;
+
+struct host_cmd_req_set_twt_conf {
+ struct host_cmd_header hdr;
+ u8 opcode;
+ u8 flow_id;
+ __le64 target_wake_time;
+ union host_cmd_wake_interval wake_interval;
+ __le32 wake_duration_us;
+ u8 twt_setup_command;
+ u8 __padding[3];
+} __packed;
+
+#define HOST_CMD_MAX_AVAILABLE_CHANNELS 255
+
+struct host_cmd_channel_info {
+ __le32 frequency_khz;
+ u8 channel_5g;
+ u8 channel_s1g;
+ u8 bandwidth_mhz;
+} __packed;
+
+struct host_cmd_resp_get_available_channels {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le32 num_channels;
+ struct host_cmd_channel_info channels[HOST_CMD_MAX_AVAILABLE_CHANNELS];
+} __packed;
+
+#define HOST_CMD_S1G_CAP0_S1G_LONG BIT(0)
+#define HOST_CMD_S1G_CAP0_SGI_1MHZ BIT(1)
+#define HOST_CMD_S1G_CAP0_SGI_2MHZ BIT(2)
+#define HOST_CMD_S1G_CAP0_SGI_4MHZ BIT(3)
+#define HOST_CMD_S1G_CAP0_SGI_8MHZ BIT(4)
+#define HOST_CMD_S1G_CAP0_SGI_16MHZ BIT(5)
+
+struct host_cmd_req_set_ecsa_s1g_info {
+ struct host_cmd_header hdr;
+ __le32 operating_channel_freq_hz;
+ u8 opclass;
+ u8 primary_channel_bw_mhz;
+ u8 prim_1mhz_ch_idx;
+ u8 operating_channel_bw_mhz;
+ u8 prim_opclass;
+ u8 s1g_cap0;
+ u8 s1g_cap1;
+ u8 s1g_cap2;
+ u8 s1g_cap3;
+} __packed;
+
+struct host_cmd_resp_get_hw_version {
+ struct host_cmd_header hdr;
+ __le32 status;
+ u8 hw_version[64];
+} __packed;
+
+#define HOST_CMD_CAC_CFG_CHANGE_RULE_MAX 8
+#define HOST_CMD_CAC_CFG_ARFS_MAX 99
+#define HOST_CMD_CAC_CFG_CHANGE_MAX 99
+#define HOST_CMD_CAC_CFG_CHANGE_STEP 5
+
+enum host_cmd_cac_op {
+ HOST_CMD_CAC_OP_DISABLE = 0,
+ HOST_CMD_CAC_OP_ENABLE = 1,
+ HOST_CMD_CAC_OP_CFG_GET = 2,
+ HOST_CMD_CAC_OP_CFG_SET = 3,
+};
+
+struct host_cmd_cac_change_rule {
+ __le16 arfs;
+ __sle16 threshold_change;
+} __packed;
+
+struct host_cmd_req_cac {
+ struct host_cmd_header hdr;
+ u8 opcode;
+ u8 rule_tot;
+ struct host_cmd_cac_change_rule rule[HOST_CMD_CAC_CFG_CHANGE_RULE_MAX];
+} __packed;
+
+struct host_cmd_resp_cac {
+ struct host_cmd_header hdr;
+ __le32 status;
+ u8 rule_tot;
+ struct host_cmd_cac_change_rule rule[HOST_CMD_CAC_CFG_CHANGE_RULE_MAX];
+} __packed;
+
+struct host_cmd_ocs_driver_req {
+ __le32 op_channel_freq_hz;
+ u8 op_channel_bw_mhz;
+ u8 pri_channel_bw_mhz;
+ u8 pri_1mhz_channel_index;
+} __packed;
+
+struct host_cmd_ocs_driver_resp {
+ u8 running;
+} __packed;
+
+struct host_cmd_req_ocs_driver {
+ struct host_cmd_header hdr;
+ __le32 subcmd;
+ union {
+ u8 opaque[0];
+ struct host_cmd_ocs_driver_req config;
+ };
+} __packed;
+
+struct host_cmd_resp_ocs_driver {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le32 subcmd;
+ union {
+ u8 opaque[0];
+ struct host_cmd_ocs_driver_resp ocs_status;
+ };
+} __packed;
+
+#define HOST_CMD_IFNAMSIZ 16
+
+struct host_cmd_req_mbssid {
+ struct host_cmd_header hdr;
+ u8 max_bssid_indicator;
+ s8 transmitter_iface[HOST_CMD_IFNAMSIZ];
+} __packed;
+
+#define HOST_CMD_MESH_ID_LEN_MAX 32
+#define HOST_CMD_MESH_BEACONLESS_MODE_DISABLE 0
+#define HOST_CMD_MESH_BEACONLESS_MODE_ENABLE 1
+#define HOST_CMD_MESH_PEER_LINKS_MIN 0
+#define HOST_CMD_MESH_PEER_LINKS_MAX 10
+
+struct host_cmd_req_set_mesh_config {
+ struct host_cmd_header hdr;
+ u8 mesh_id_len;
+ u8 mesh_id[HOST_CMD_MESH_ID_LEN_MAX];
+ u8 mesh_beaconless_mode;
+ u8 max_plinks;
+} __packed;
+
+struct host_cmd_req_set_mcba_conf {
+ struct host_cmd_header hdr;
+ u8 mbca_config;
+ u8 beacon_timing_report_interval;
+ u8 min_beacon_gap_ms;
+ __le16 mbss_start_scan_duration_ms;
+ __le16 tbtt_adj_interval_ms;
+} __packed;
+
+struct host_cmd_req_dynamic_peering_config {
+ struct host_cmd_header hdr;
+ u8 enabled;
+ u8 rssi_margin;
+ __le32 blacklist_timeout;
+} __packed;
+
+#define HOST_CMD_CFG_RAW_FLAG_ENABLE BIT(0)
+#define HOST_CMD_CFG_RAW_FLAG_DELETE BIT(1)
+#define HOST_CMD_CFG_RAW_FLAG_UPDATE BIT(2)
+#define HOST_CMD_CFG_RAW_FLAG_DYNAMIC BIT(3)
+#define HOST_CMD_RAW_RESERVED_AID_DCS 2008
+#define HOST_CMD_RAW_RESERVED_AID_DOWNLINK 2009
+
+enum host_cmd_raw_tlv_tag {
+ HOST_CMD_RAW_TLV_TAG_SLOT_DEF = 0,
+ HOST_CMD_RAW_TLV_TAG_GROUP = 1,
+ HOST_CMD_RAW_TLV_TAG_START_TIME = 2,
+ HOST_CMD_RAW_TLV_TAG_PRAW = 3,
+ HOST_CMD_RAW_TLV_TAG_BCN_SPREAD = 4,
+ HOST_CMD_RAW_TLV_TAG_DYN_GLOBAL = 5,
+ HOST_CMD_RAW_TLV_TAG_DYN_CONFIG = 6,
+ HOST_CMD_RAW_TLV_TAG_LAST = 7,
+};
+
+struct host_cmd_raw_tlv_slot_def {
+ u8 tag;
+ __le32 raw_duration_us;
+ u8 num_slots;
+ u8 cross_slot_bleed;
+} __packed;
+
+struct host_cmd_raw_tlv_group {
+ u8 tag;
+ __le16 aid_start;
+ __le16 aid_end;
+} __packed;
+
+struct host_cmd_raw_tlv_start_time {
+ u8 tag;
+ __le32 start_time_us;
+} __packed;
+
+struct host_cmd_raw_tlv_praw {
+ u8 tag;
+ u8 periodicity;
+ u8 validity;
+ u8 start_offset;
+ u8 refresh_on_expiry;
+} __packed;
+
+struct host_cmd_raw_tlv_bcn_spread {
+ u8 tag;
+ __le16 max_spread;
+ __le16 nominal_sta_per_bcn;
+} __packed;
+
+struct host_cmd_raw_tlv_dyn_global {
+ u8 tag;
+ __le16 num_configs;
+ __le16 num_bcn_indexes;
+} __packed;
+
+struct host_cmd_raw_tlv_dyn_config {
+ u8 tag;
+ __le16 id;
+ __le16 index;
+ __le16 len;
+ u8 variable[];
+} __packed;
+
+union host_cmd_raw_tlvs {
+ u8 tag;
+ struct host_cmd_raw_tlv_slot_def slot_def;
+ struct host_cmd_raw_tlv_group group;
+ struct host_cmd_raw_tlv_start_time start_time;
+ struct host_cmd_raw_tlv_praw praw;
+ struct host_cmd_raw_tlv_bcn_spread bcn_spread;
+ struct host_cmd_raw_tlv_dyn_global dyn_global;
+ struct host_cmd_raw_tlv_dyn_config dyn_config;
+} __packed;
+
+struct host_cmd_req_config_raw {
+ struct host_cmd_header hdr;
+ __le32 flags;
+ __le16 id;
+ u8 variable[];
+} __packed;
+
+struct host_cmd_req_config_bss_stats {
+ struct host_cmd_header hdr;
+ u8 enable;
+ __le32 monitor_window_ms;
+} __packed;
+
+struct host_cmd_req_get_rssi {
+ struct host_cmd_header hdr;
+} __packed;
+
+struct host_cmd_resp_get_rssi {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __sle32 rssi0;
+ __sle32 rssi1;
+ __sle32 rssi2;
+ __sle32 rssi3;
+ __sle32 rssi4;
+ __sle32 rssi5;
+ __sle32 rssi6;
+ __sle32 rssi7;
+} __packed;
+
+#define HOST_CMD_SET_IFS_MIN_USECS 160
+
+struct host_cmd_req_set_ifs {
+ struct host_cmd_header hdr;
+ __le32 period_usecs;
+} __packed;
+
+struct host_cmd_resp_set_ifs {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_fem_settings {
+ struct host_cmd_header hdr;
+ __le32 tx_antenna;
+ __le32 rx_antenna;
+ __le32 lna_enabled;
+ __le32 pa_enabled;
+} __packed;
+
+struct host_cmd_resp_set_fem_settings {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_txop {
+ struct host_cmd_header hdr;
+ u8 min_packet_count;
+} __packed;
+
+struct host_cmd_resp_set_txop {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_control_response {
+ struct host_cmd_header hdr;
+ u8 direction;
+ u8 control_response_1mhz_en;
+} __packed;
+
+struct host_cmd_resp_set_control_response {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_periodic_cal {
+ struct host_cmd_header hdr;
+ __le32 periodic_cal_en_mask;
+} __packed;
+
+struct host_cmd_resp_set_periodic_cal {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_bcn_rssi_threshold {
+ struct host_cmd_header hdr;
+ u8 threshold_db;
+} __packed;
+
+struct host_cmd_resp_set_bcn_rssi_threshold {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_tx_pkt_lifetime_usecs {
+ struct host_cmd_header hdr;
+ __le32 lifetime_usecs;
+} __packed;
+
+struct host_cmd_resp_set_tx_pkt_lifetime_usecs {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_physm_watchdog {
+ struct host_cmd_header hdr;
+ u8 physm_watchdog_en;
+} __packed;
+
+struct host_cmd_req_tx_polar {
+ struct host_cmd_header hdr;
+ u8 enable;
+} __packed;
+
+struct host_cmd_evt_sta_state {
+ struct host_cmd_header hdr;
+ u8 sta_addr[HOST_CMD_MAC_ADDR_LEN];
+ __le16 aid;
+ __le16 state;
+} __packed;
+
+struct host_cmd_evt_beacon_loss {
+ struct host_cmd_header hdr;
+ __le32 num_bcns;
+} __packed;
+
+struct host_cmd_evt_sig_field_error {
+ struct host_cmd_header hdr;
+ __le64 start_timestamp;
+ __le64 end_timestamp;
+} __packed;
+
+#define HOST_CMD_UMAC_TRAFFIC_CONTROL_SOURCE_TWT BIT(0)
+#define HOST_CMD_UMAC_TRAFFIC_CONTROL_SOURCE_DUTY_CYCLE BIT(1)
+
+struct host_cmd_evt_umac_traffic_control {
+ struct host_cmd_header hdr;
+ u8 pause_data_traffic;
+ __le32 sources;
+} __packed;
+
+struct host_cmd_evt_dhcp_lease_update {
+ struct host_cmd_header hdr;
+ __le32 my_ip;
+ __le32 netmask;
+ __le32 router;
+ __le32 dns;
+} __packed;
+
+struct host_cmd_evt_ocs_done {
+ struct host_cmd_header hdr;
+ __le64 time_listen;
+ __le64 time_rx;
+ s8 noise;
+ u8 metric;
+} __packed;
+
+struct host_cmd_evt_hw_scan_done {
+ struct host_cmd_header hdr;
+ u8 aborted;
+} __packed;
+
+struct host_cmd_evt_channel_usage {
+ struct host_cmd_header hdr;
+ __le64 time_listen;
+ __le64 busy_time;
+ __le32 freq_hz;
+ u8 noise;
+ u8 bw_mhz;
+} __packed;
+
+enum host_cmd_connection_loss_reason {
+ HOST_CMD_CONNECTION_LOSS_REASON_TSF_RESET = 0,
+};
+
+struct host_cmd_evt_connection_loss {
+ struct host_cmd_header hdr;
+ __le32 reason;
+} __packed;
+
+struct host_cmd_evt_sched_scan_results {
+ struct host_cmd_header hdr;
+} __packed;
+
+enum host_cmd_cqm_rssi_threshold_event {
+ HOST_CMD_CQM_RSSI_THRESHOLD_EVENT_LOW = 0,
+ HOST_CMD_CQM_RSSI_THRESHOLD_EVENT_HIGH = 1,
+};
+
+struct host_cmd_evt_cqm_rssi_notify {
+ struct host_cmd_header hdr;
+ __sle16 rssi;
+ __le16 event;
+} __packed;
+
+struct host_cmd_evt_scan_done {
+ struct host_cmd_header hdr;
+ u8 aborted;
+} __packed;
+
+enum host_cmd_scan_result_frame {
+ HOST_CMD_SCAN_RESULT_FRAME_UNKNOWN = 0,
+ HOST_CMD_SCAN_RESULT_FRAME_BEACON = 1,
+ HOST_CMD_SCAN_RESULT_FRAME_PROBE_RESPONSE = 2,
+};
+
+struct host_cmd_evt_scan_result {
+ struct host_cmd_header hdr;
+ __le32 channel_freq_hz;
+ u8 bw_mhz;
+ u8 frame_type;
+ __sle16 rssi;
+ u8 bssid[HOST_CMD_MAC_ADDR_LEN];
+ __le16 beacon_interval;
+ __le16 capability_info;
+ __le64 tsf;
+ __le16 ies_len;
+ u8 ies[];
+} __packed;
+
+struct host_cmd_evt_connected {
+ struct host_cmd_header hdr;
+ u8 bssid[HOST_CMD_MAC_ADDR_LEN];
+ __sle16 rssi;
+ u8 padding_0[8];
+ __le16 assoc_resp_ies_len;
+ u8 assoc_resp_ies[];
+} __packed;
+
+struct host_cmd_evt_beacon_filter_match {
+ struct host_cmd_header hdr;
+ u8 padding_0[4];
+ __le32 ies_len;
+ u8 ies[];
+} __packed;
+
+struct host_cmd_req_set_capabilities {
+ struct host_cmd_header hdr;
+ struct host_cmd_mm_capabilities capabilities;
+ u8 set_caps;
+ u8 morse_mmss_offset;
+} __packed;
+
+struct host_cmd_resp_set_capabilities {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+struct host_cmd_req_set_transmission_rate {
+ struct host_cmd_header hdr;
+ __sle32 mcs_index;
+ __sle32 bandwidth_mhz;
+ __sle32 tx_80211ah_format;
+ s8 use_traveling_pilots;
+ s8 use_sgi;
+ u8 enabled;
+ s8 nss_idx;
+ s8 use_ldpc;
+ s8 use_stbc;
+} __packed;
+
+struct host_cmd_resp_set_transmission_rate {
+ struct host_cmd_header hdr;
+ __le32 status;
+} __packed;
+
+enum host_cmd_hart_id {
+ HOST_CMD_HART_ID_HOST = 0,
+ HOST_CMD_HART_ID_MAC = 1,
+ HOST_CMD_HART_ID_UPHY = 2,
+ HOST_CMD_HART_ID_LPHY = 3,
+};
+
+struct host_cmd_req_force_assert {
+ struct host_cmd_header hdr;
+ __le32 hart_id;
+} __packed;
+
+#define HOST_CMD_HOST_BLOCK_TX_FRAMES BIT(0)
+#define HOST_CMD_HOST_BLOCK_TX_CMD BIT(1)
+
+enum host_cmd_param_action {
+ HOST_CMD_PARAM_ACTION_SET = 0,
+ HOST_CMD_PARAM_ACTION_GET = 1,
+ HOST_CMD_PARAM_ACTION_LAST = 2,
+};
+
+enum host_cmd_slow_clock_mode {
+ HOST_CMD_SLOW_CLOCK_MODE_AUTO = 0,
+ HOST_CMD_SLOW_CLOCK_MODE_INTERNAL = 1,
+};
+
+enum host_cmd_param_id {
+ HOST_CMD_PARAM_ID_MAX_TRAFFIC_DELIVERY_WAIT_US = 0,
+ HOST_CMD_PARAM_ID_EXTRA_ACK_TIMEOUT_ADJUST_US = 1,
+ HOST_CMD_PARAM_ID_TX_STATUS_FLUSH_WATERMARK = 2,
+ HOST_CMD_PARAM_ID_TX_STATUS_FLUSH_MIN_AMPDU_SIZE = 3,
+ HOST_CMD_PARAM_ID_POWERSAVE_TYPE = 4,
+ HOST_CMD_PARAM_ID_SNOOZE_DURATION_ADJUST_US = 5,
+ HOST_CMD_PARAM_ID_TX_BLOCK = 6,
+ HOST_CMD_PARAM_ID_FORCED_SNOOZE_PERIOD_US = 7,
+ HOST_CMD_PARAM_ID_WAKE_ACTION_GPIO = 8,
+ HOST_CMD_PARAM_ID_WAKE_ACTION_GPIO_PULSE_MS = 9,
+ HOST_CMD_PARAM_ID_CONNECTION_MONITOR_GPIO = 10,
+ HOST_CMD_PARAM_ID_INPUT_TRIGGER_GPIO = 11,
+ HOST_CMD_PARAM_ID_INPUT_TRIGGER_MODE = 12,
+ HOST_CMD_PARAM_ID_COUNTRY = 13,
+ HOST_CMD_PARAM_ID_RTS_THRESHOLD = 14,
+ HOST_CMD_PARAM_ID_HOST_TX_BLOCK = 15,
+ HOST_CMD_PARAM_ID_MEM_RETENTION_CODE = 16,
+ HOST_CMD_PARAM_ID_NON_TIM_MODE = 17,
+ HOST_CMD_PARAM_ID_DYNAMIC_PS_TIMEOUT_MS = 18,
+ HOST_CMD_PARAM_ID_HOME_CHANNEL_DWELL_MS = 19,
+ HOST_CMD_PARAM_ID_SLOW_CLOCK_MODE = 20,
+ HOST_CMD_PARAM_ID_FRAGMENT_THRESHOLD = 21,
+ HOST_CMD_PARAM_ID_BEACON_LOSS_COUNT = 22,
+ HOST_CMD_PARAM_ID_AP_POWER_SAVE = 23,
+ HOST_CMD_PARAM_ID_BEACON_OFFLOAD = 24,
+ HOST_CMD_PARAM_ID_PROBE_RESP_OFFLOAD = 25,
+ HOST_CMD_PARAM_ID_BSS_MAX_AWAY_DURATION = 26,
+ HOST_CMD_PARAM_ID_DEFAULT_ACTIVE_SCAN_DWELL_MS = 27,
+ HOST_CMD_PARAM_ID_CTS_TO_SELF = 28,
+ HOST_CMD_PARAM_ID_CHANNELIZATION = 29,
+ HOST_CMD_PARAM_ID_LAST = 30,
+};
+
+struct host_cmd_req_get_set_generic_param {
+ struct host_cmd_header hdr;
+ __le32 param_id;
+ __le32 action;
+ __le32 flags;
+ __le32 value;
+} __packed;
+
+struct host_cmd_resp_get_set_generic_param {
+ struct host_cmd_header hdr;
+ __le32 status;
+ __le32 flags;
+ __le32 value;
+} __packed;
+
+#endif
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 04/31] wifi: mm81x: add command.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (2 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 03/31] wifi: mm81x: add command_defs.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 05/31] wifi: mm81x: add core.c Lachlan Hodges
` (27 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
.../net/wireless/morsemicro/mm81x/command.h | 85 +++++++++++++++++++
1 file changed, 85 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/command.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/command.h b/drivers/net/wireless/morsemicro/mm81x/command.h
new file mode 100644
index 000000000000..0ea796f1d878
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/command.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_COMMAND_H_
+#define _MM81X_COMMAND_H_
+
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include "core.h"
+#include "command_defs.h"
+
+#define HOST_CMD_IS_REQ(cmd) (le16_to_cpu((cmd)->hdr.flags) & HOST_CMD_TYPE_REQ)
+#define HOST_CMD_IS_RESP(cmd) \
+ (le16_to_cpu((cmd)->hdr.flags) & HOST_CMD_TYPE_RESP)
+#define HOST_CMD_IS_EVT(cmd) (le16_to_cpu((cmd)->hdr.flags) & HOST_CMD_TYPE_EVT)
+
+struct mm81x_queue_params;
+
+enum mm81x_cmd_return_code {
+ MM81X_RET_SUCCESS = 0,
+ MM81X_RET_EPERM = -1,
+ MM81X_RET_ENOMEM = -12,
+ MM81X_RET_CMD_NOT_HANDLED = -32757,
+};
+
+#define HOST_CMD_HOST_ID_SEQ_MAX 0xFFF
+#define HOST_CMD_HOST_ID_RETRY_MASK 0x000F
+#define HOST_CMD_HOST_ID_SEQ_SHIFT 4
+#define HOST_CMD_HOST_ID_SEQ_MASK 0xFFF0
+
+struct host_cmd_req {
+ struct host_cmd_header hdr;
+ u8 data[];
+} __packed;
+
+struct host_cmd_resp {
+ struct host_cmd_header hdr;
+ __le32 status;
+ u8 data[];
+} __packed;
+
+struct host_cmd_event {
+ struct host_cmd_header hdr;
+ u8 data[];
+} __packed;
+
+int mm81x_cmd_resp_process(struct mm81x *mors, struct sk_buff *skb);
+int mm81x_cmd_add_if(struct mm81x *mors, u16 *vif_id, const u8 *addr,
+ enum nl80211_iftype type);
+int mm81x_cmd_get_capabilities(struct mm81x *mors, u16 vif_id,
+ struct mm81x_fw_caps *capabilities);
+int mm81x_cmd_cfg_qos(struct mm81x *mors, struct mm81x_queue_params *params);
+int mm81x_cmd_config_beacon_timer(struct mm81x *mors, void *mm81x_vif,
+ bool enabled);
+int mm81x_cmd_cfg_bss(struct mm81x *mors, u16 vif_id, u16 beacon_int,
+ u16 dtim_period, u32 cssid);
+int mm81x_cmd_set_channel(struct mm81x *mors, u32 op_chan_freq_hz,
+ u8 pri_1mhz_chan_idx, u8 op_bw_mhz, u8 pri_bw_mhz,
+ s32 *power_mbm);
+int mm81x_cmd_get_max_txpower(struct mm81x *mors, s32 *out_power_mbm);
+int mm81x_cmd_set_txpower(struct mm81x *mors, s32 *out_power_mbm,
+ int txpower_mbm);
+int mm81x_cmd_hw_scan(struct mm81x *mors, struct mm81x_hw_scan_params *params,
+ bool store);
+int mm81x_cmd_set_ps(struct mm81x *mors, bool enabled);
+int mm81x_cmd_cfg_multicast_filter(struct mm81x *mors,
+ struct mm81x_vif *mors_vif);
+int mm81x_cmd_sta_state(struct mm81x *mors, struct mm81x_vif *mors_vif, u16 aid,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state state);
+int mm81x_cmd_install_key(struct mm81x *mors, struct mm81x_vif *mors_vif,
+ u16 aid, struct ieee80211_key_conf *key,
+ enum host_cmd_key_cipher cipher,
+ enum host_cmd_aes_key_len length);
+int mm81x_cmd_disable_key(struct mm81x *mors, struct mm81x_vif *mors_vif,
+ u16 aid, struct ieee80211_key_conf *key);
+int mm81x_cmd_rm_if(struct mm81x *mors, u16 vif_id);
+int mm81x_cmd_set_frag_threshold(struct mm81x *mors, u32 frag_threshold);
+int mm81x_cmd_get_disabled_channels(
+ struct mm81x *mors, struct host_cmd_resp_get_disabled_channels *resp,
+ uint resp_len);
+
+#endif /* !_MM81X_COMMAND_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 05/31] wifi: mm81x: add core.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (3 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 04/31] wifi: mm81x: add command.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-05-01 5:45 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 06/31] wifi: mm81x: add core.h Lachlan Hodges
` (26 subsequent siblings)
31 siblings, 1 reply; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/core.c | 146 +++++++++++++++++++
1 file changed, 146 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/core.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/core.c b/drivers/net/wireless/morsemicro/mm81x/core.c
new file mode 100644
index 000000000000..b08f52921525
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/core.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include <linux/module.h>
+#include "core.h"
+#include "bus.h"
+#include "hif.h"
+#include "mac.h"
+
+char board_config_file[BCF_SIZE_MAX] = "";
+module_param_string(bcf, board_config_file, BCF_SIZE_MAX, 0644);
+MODULE_PARM_DESC(bcf, "BCF filename to load");
+
+static int mm81x_core_attach_regs(struct mm81x *mors)
+{
+ int ret = 0;
+
+ mm81x_claim_bus(mors);
+ ret = mm81x_reg32_read(mors, MM8108_REG_CHIP_ID, &mors->chip_id);
+ mm81x_release_bus(mors);
+
+ if (ret < 0) {
+ dev_err(mors->dev, "failed to read chip id %d", ret);
+ return ret;
+ }
+
+ switch (mors->chip_id) {
+ case (MM8108B2_ID):
+ mors->regs = &mm8108_regs;
+ mors->hif.ops = &mm81x_yaps_ops;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
+static char *mm81x_core_get_revision_string(u32 chip_id)
+{
+ u8 chip_rev = MM81X_DEVICE_GET_CHIP_REV(chip_id);
+
+ switch (chip_rev) {
+ case MM8108B2_REV:
+ return MM8108B2_REV_STRING;
+ default:
+ return "??";
+ }
+}
+
+static void mm81x_core_init_mac_addr(struct mm81x *mors)
+{
+ int ret = mm81x_hw_otp_get_mac_addr(mors);
+
+ if (ret || !is_valid_ether_addr(mors->macaddr))
+ eth_random_addr(mors->macaddr);
+}
+
+char *mm81x_core_get_fw_path(u32 chip_id)
+{
+ return kasprintf(GFP_KERNEL,
+ MM81X_FW_DIR "/" MM8108_FW_BASE
+ "%s" FW_ROM_LINKED_STRING MM81X_FW_EXT,
+ mm81x_core_get_revision_string(chip_id));
+}
+EXPORT_SYMBOL_GPL(mm81x_core_get_fw_path);
+
+struct mm81x *mm81x_core_alloc(size_t priv_size, struct device *dev)
+{
+ return mm81x_mac_alloc(priv_size, dev);
+}
+EXPORT_SYMBOL_GPL(mm81x_core_alloc);
+
+int mm81x_core_init(struct mm81x *mors)
+{
+ int ret;
+
+ set_bit(MM81X_STATE_CHIP_UNRESPONSIVE, &mors->state_flags);
+ set_bit(MM81X_STATE_RELOAD_FW_AFTER_START, &mors->state_flags);
+
+ mm81x_core_init_mac_addr(mors);
+
+ ret = mm81x_core_attach_regs(mors);
+ if (ret)
+ return ret;
+
+ mors->chip_wq = create_singlethread_workqueue("chip_wq");
+ if (!mors->chip_wq)
+ return -ENOMEM;
+
+ mors->net_wq = create_singlethread_workqueue("net_wq");
+ if (!mors->net_wq) {
+ ret = -ENOMEM;
+ goto err_chip_wq;
+ }
+
+ ret = mm81x_hif_init(mors);
+ if (ret)
+ goto err_wqs;
+
+ return 0;
+
+err_wqs:
+ flush_workqueue(mors->net_wq);
+ destroy_workqueue(mors->net_wq);
+
+err_chip_wq:
+ flush_workqueue(mors->chip_wq);
+ destroy_workqueue(mors->chip_wq);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mm81x_core_init);
+
+int mm81x_core_register(struct mm81x *mors)
+{
+ return mm81x_mac_register(mors);
+}
+EXPORT_SYMBOL_GPL(mm81x_core_register);
+
+void mm81x_core_unregister(struct mm81x *mors)
+{
+ mm81x_mac_unregister(mors);
+}
+EXPORT_SYMBOL_GPL(mm81x_core_unregister);
+
+void mm81x_core_deinit(struct mm81x *mors)
+{
+ mm81x_hif_finish(mors);
+ flush_workqueue(mors->net_wq);
+ destroy_workqueue(mors->net_wq);
+ flush_workqueue(mors->chip_wq);
+ destroy_workqueue(mors->chip_wq);
+}
+EXPORT_SYMBOL_GPL(mm81x_core_deinit);
+
+void mm81x_core_free(struct mm81x *mors)
+{
+ mm81x_mac_free(mors);
+}
+EXPORT_SYMBOL_GPL(mm81x_core_free);
+
+MODULE_AUTHOR("Morse Micro");
+MODULE_DESCRIPTION("Driver support for Morse Micro MM81X core");
+MODULE_LICENSE("Dual BSD/GPL");
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* Re: [PATCH wireless-next v2 05/31] wifi: mm81x: add core.c
2026-04-30 4:55 ` [PATCH wireless-next v2 05/31] wifi: mm81x: add core.c Lachlan Hodges
@ 2026-05-01 5:45 ` Lachlan Hodges
0 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-05-01 5:45 UTC (permalink / raw)
To: johannes, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
> +char board_config_file[BCF_SIZE_MAX] = "";
> +module_param_string(bcf, board_config_file, BCF_SIZE_MAX, 0644);
> +MODULE_PARM_DESC(bcf, "BCF filename to load");
Just reviewing the bot results, I see it is not happy about the
mod param, especially since it doesn't really work for multiple
radios so it's not actually a valid approach anyway. After some
discussion we will remove it for now and in the future once we
are in tree add a sysfs entry or something similar.
lachlan
^ permalink raw reply [flat|nested] 36+ messages in thread
* [PATCH wireless-next v2 06/31] wifi: mm81x: add core.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (4 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 05/31] wifi: mm81x: add core.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 07/31] wifi: mm81x: add fw.c Lachlan Hodges
` (25 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/core.h | 478 +++++++++++++++++++
1 file changed, 478 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/core.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/core.h b/drivers/net/wireless/morsemicro/mm81x/core.h
new file mode 100644
index 000000000000..698353006865
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/core.h
@@ -0,0 +1,478 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_CORE_H_
+#define _MM81X_CORE_H_
+
+#include <net/mac80211.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/crc32.h>
+#include <linux/notifier.h>
+#include <linux/nospec.h>
+#include "yaps.h"
+#include "yaps_hw.h"
+#include "hw.h"
+#include "fw.h"
+#include "rc.h"
+
+#define MM81X_DRIVER_SEMVER_MAJOR 56
+#define MM81X_DRIVER_SEMVER_MINOR 3
+#define MM81X_DRIVER_SEMVER_PATCH 0
+
+#define MM81X_SEMVER_GET_MAJOR(x) (((x) >> 22) & 0x3FF)
+#define MM81X_SEMVER_GET_MINOR(x) (((x) >> 10) & 0xFFF)
+#define MM81X_SEMVER_GET_PATCH(x) ((x) & 0x3FF)
+
+#define DRV_VERSION __stringify(MM81X_VERSION)
+
+#define MM8108_FW_BASE "mm8108"
+
+#define BCF_SIZE_MAX 48
+
+/* Generate a device ID from chip ID, revision, and chip type */
+#define MM81X_DEVICE_ID(chip_id, chip_rev, chip_type) \
+ ((chip_id) | ((chip_rev) << 8) | ((chip_type) << 12))
+
+/* Get constituents of the device ID */
+#define MM81X_DEVICE_GET_CHIP_ID(device_id) ((device_id) & 0xff)
+#define MM81X_DEVICE_GET_CHIP_REV(device_id) ((((device_id) >> 8) & 0xf))
+#define MM81X_DEVICE_GET_CHIP_TYPE(device_id) ((((device_id) >> 12) & 0xf))
+
+#define KHZ_TO_HZ(x) ((x) * 1000)
+#define KHZ100_TO_MHZ(x) ((x) / 10)
+#define KHZ100_TO_KHZ(freq) ((freq) * 100)
+#define KHZ100_TO_HZ(freq) ((freq) * 100000)
+
+#define QDBM_TO_MBM(gain) (((gain) * 100) >> 2)
+#define MBM_TO_QDBM(gain) (((gain) << 2) / 100)
+#define QDBM_TO_DBM(gain) ((gain) / 4)
+
+#define BPS_TO_KBPS(x) ((x) / 1000)
+
+#define NSS_IDX_TO_NSS(x) ((x) + 1)
+#define NSS_TO_NSS_IDX(x) ((x) - 1)
+
+#define ROUND_BYTES_TO_WORD(_nbytes) \
+ (((_nbytes) + 3) & ~((typeof(_nbytes))0x03))
+
+static inline u32 mm81x_fle32_to_cpu(u32 v)
+{
+ return le32_to_cpu((__force __le32)v);
+}
+
+static inline u16 mm81x_fle16_to_cpu(u16 v)
+{
+ return le16_to_cpu((__force __le16)v);
+}
+
+struct mm81x_bus_ops;
+struct mm81x_hif_ops;
+
+/* modparam variables */
+extern char board_config_file[];
+
+#define MM81X_CAPS_MAX_FW_VAL (128)
+
+/* Max number of interfaces */
+#define MM81X_MAX_IF (2)
+
+enum mm81x_caps_flags {
+ MM81X_CAPS_FW_START = 0,
+ MM81X_CAPS_2MHZ = MM81X_CAPS_FW_START,
+ MM81X_CAPS_4MHZ,
+ MM81X_CAPS_8MHZ,
+ MM81X_CAPS_16MHZ,
+ MM81X_CAPS_SGI,
+ MM81X_CAPS_S1G_LONG,
+ MM81X_CAPS_TRAVELING_PILOT_ONE_STREAM,
+ MM81X_CAPS_TRAVELING_PILOT_TWO_STREAM,
+ MM81X_CAPS_MU_BEAMFORMEE,
+ MM81X_CAPS_MU_BEAMFORMER,
+ MM81X_CAPS_RD_RESPONDER,
+ MM81X_CAPS_STA_TYPE_SENSOR,
+ MM81X_CAPS_STA_TYPE_NON_SENSOR,
+ MM81X_CAPS_GROUP_AID,
+ MM81X_CAPS_NON_TIM,
+ MM81X_CAPS_TIM_ADE,
+ MM81X_CAPS_BAT,
+ MM81X_CAPS_DYNAMIC_AID,
+ MM81X_CAPS_UPLINK_SYNC,
+ MM81X_CAPS_FLOW_CONTROL,
+ MM81X_CAPS_AMPDU,
+ MM81X_CAPS_AMSDU,
+ MM81X_CAPS_1MHZ_CONTROL_RESPONSE_PREAMBLE,
+ MM81X_CAPS_PAGE_SLICING,
+ MM81X_CAPS_RAW,
+ MM81X_CAPS_MCS8,
+ MM81X_CAPS_MCS9,
+ MM81X_CAPS_ASYMMETRIC_BA_SUPPORT,
+ MM81X_CAPS_DAC,
+ MM81X_CAPS_CAC,
+ MM81X_CAPS_TXOP_SHARING_IMPLICIT_ACK,
+ MM81X_CAPS_NDP_PSPOLL,
+ MM81X_CAPS_FRAGMENT_BA,
+ MM81X_CAPS_OBSS_MITIGATION,
+ MM81X_CAPS_TMP_PS_MODE_SWITCH,
+ MM81X_CAPS_SECTOR_TRAINING,
+ MM81X_CAPS_UNSOLICIT_DYNAMIC_AID,
+ MM81X_CAPS_NDP_BEAMFORMING_REPORT,
+ MM81X_CAPS_MCS_NEGOTIATION,
+ MM81X_CAPS_DUPLICATE_1MHZ,
+ MM81X_CAPS_TACK_AS_PSPOLL,
+ MM81X_CAPS_PV1,
+ MM81X_CAPS_TWT_RESPONDER,
+ MM81X_CAPS_TWT_REQUESTER,
+ MM81X_CAPS_BDT,
+ MM81X_CAPS_TWT_GROUPING,
+ MM81X_CAPS_LINK_ADAPTATION_WO_NDP_CMAC,
+ MM81X_CAPS_LONG_MPDU,
+ MM81X_CAPS_TXOP_SECTORIZATION,
+ MM81X_CAPS_GROUP_SECTORIZATION,
+ MM81X_CAPS_HTC_VHT,
+ MM81X_CAPS_HTC_VHT_MFB,
+ MM81X_CAPS_HTC_VHT_MRQ,
+ MM81X_CAPS_2SS,
+ MM81X_CAPS_3SS,
+ MM81X_CAPS_4SS,
+ MM81X_CAPS_SU_BEAMFORMEE,
+ MM81X_CAPS_SU_BEAMFORMER,
+ MM81X_CAPS_RX_STBC,
+ MM81X_CAPS_TX_STBC,
+ MM81X_CAPS_RX_LDPC,
+ MM81X_CAPS_HW_FRAGMENT,
+
+ MM81X_CAPS_FW_END = MM81X_CAPS_MAX_FW_VAL,
+ MM81X_CAPS_LAST = MM81X_CAPS_FW_END,
+};
+
+struct mm81x_fw_caps {
+ u32 flags[FW_CAPABILITIES_FLAGS_WIDTH];
+ u8 ampdu_mss;
+ u8 beamformee_sts_capability;
+ u8 number_sounding_dimensions;
+ u8 maximum_ampdu_length_exponent;
+ u8 mm81x_mmss_offset;
+};
+
+#define MM81X_FW_SUPP(MM81X_CAPS, CAPABILITY) \
+ mm81x_caps_supported(MM81X_CAPS, MM81X_CAPS_##CAPABILITY)
+
+static inline bool mm81x_caps_supported(struct mm81x_fw_caps *caps,
+ enum mm81x_caps_flags flag)
+{
+ const unsigned long *flags_ptr = (unsigned long *)caps->flags;
+
+ return test_bit(flag, flags_ptr);
+}
+
+struct mm81x_ps {
+ u32 wakers;
+ bool enable;
+ bool suspended;
+ /* PS state lock */
+ struct mutex lock;
+ struct delayed_work delayed_eval_work;
+};
+
+enum mm81x_page_aci {
+ MM81X_ACI_BE = 0,
+ MM81X_ACI_BK = 1,
+ MM81X_ACI_VI = 2,
+ MM81X_ACI_VO = 3,
+};
+
+enum mm81x_qos_tid_up_index {
+ MM81X_QOS_TID_UP_BK = 1,
+ MM81X_QOS_TID_UP_XX = 2,
+ MM81X_QOS_TID_UP_BE = 0,
+ MM81X_QOS_TID_UP_EE = 3,
+ MM81X_QOS_TID_UP_CL = 4,
+ MM81X_QOS_TID_UP_VI = 5,
+ MM81X_QOS_TID_UP_VO = 6,
+ MM81X_QOS_TID_UP_NC = 7,
+
+ MM81X_QOS_TID_UP_LOWEST = MM81X_QOS_TID_UP_BK,
+ MM81X_QOS_TID_UP_HIGHEST = MM81X_QOS_TID_UP_NC
+};
+
+struct mm81x_sw_version {
+ u8 major;
+ u8 minor;
+ u8 patch;
+};
+
+struct mm81x_sta {
+ const struct ieee80211_vif *vif;
+ u8 addr[ETH_ALEN];
+ enum ieee80211_sta_state state;
+ bool tid_tx[IEEE80211_NUM_TIDS];
+ bool tid_start_tx[IEEE80211_NUM_TIDS];
+ u8 tid_params[IEEE80211_NUM_TIDS];
+ int max_bw_mhz;
+ struct mm81x_rc_sta rc;
+ struct mmrc_rate last_sta_tx_rate;
+ s16 avg_rssi;
+ bool tx_ps_filter_en;
+};
+
+struct mm81x_vif {
+ struct mm81x *mors;
+ u16 id;
+
+ union {
+ struct {
+ bool is_assoc;
+ } sta;
+ struct {
+ u32 num_stas;
+ struct tasklet_struct beacon_tasklet;
+ bool beaconing_enabled;
+ } ap;
+ } u;
+};
+
+struct mm81x_stale_tx_status {
+ /* Stale Tx lock */
+ spinlock_t lock;
+ struct timer_list timer;
+};
+
+struct mcast_filter {
+ u8 count;
+ /*
+ * Integer representation of the last four bytes of a multicast MAC
+ * address. The first two bytes are always 0x0100 (IPv4) or 0x3333
+ * (IPv6).
+ */
+ __le32 addr_list[];
+};
+
+enum mm81x_hw_scan_op {
+ MM81X_HW_SCAN_OP_START,
+ MM81X_HW_SCAN_OP_STOP,
+};
+
+struct mm81x_hw_scan_params {
+ struct ieee80211_hw *hw;
+
+ /* vif which initiated the scan */
+ struct ieee80211_vif *vif;
+ bool has_directed_ssid;
+ u32 dwell_time_ms;
+ u32 dwell_on_home_ms;
+ enum mm81x_hw_scan_op operation;
+ bool store;
+ struct sk_buff *probe_req;
+ u16 num_chans;
+ u16 allocated_chans;
+
+ struct {
+ struct ieee80211_channel *channel;
+ /* Index into @ref powers_qdbm for the power of this channel */
+ u8 power_idx;
+ } *channels;
+
+ s32 *powers_qdbm;
+ u8 n_powers;
+ bool use_1mhz_probes;
+};
+
+enum mm81x_hw_scan_state {
+ HW_SCAN_STATE_IDLE,
+ HW_SCAN_STATE_RUNNING,
+ HW_SCAN_STATE_ABORTING,
+};
+
+struct mm81x_hw_scan {
+ enum mm81x_hw_scan_state state;
+ struct completion scan_done;
+ struct mm81x_hw_scan_params *params;
+ struct delayed_work timeout;
+ u32 home_dwell_ms;
+};
+
+enum mm81x_hif_event_flags {
+ MM81X_HIF_EVT_RX_PEND,
+ MM81X_HIF_EVT_PAGE_RETURN_PEND,
+ MM81X_HIF_EVT_TX_COMMAND_PEND,
+ MM81X_HIF_EVT_TX_BEACON_PEND,
+ MM81X_HIF_EVT_TX_MGMT_PEND,
+ MM81X_HIF_EVT_TX_DATA_PEND,
+ MM81X_HIF_EVT_TX_PACKET_FREED_UP_PEND,
+ MM81X_HIF_EVT_DATA_TRAFFIC_PAUSE_PEND,
+ MM81X_HIF_EVT_DATA_TRAFFIC_RESUME_PEND,
+ MM81X_HIF_EVT_UPDATE_HW_CLOCK_REFERENCE,
+};
+
+enum mm81x_state_flags {
+ MM81X_STATE_CHIP_UNRESPONSIVE,
+ MM81X_STATE_DATA_QS_STOPPED,
+ MM81X_STATE_DATA_TX_STOPPED,
+ MM81X_STATE_REGDOM_SET_BY_USER,
+ MM81X_STATE_REGDOM_SET_BY_OTP,
+ MM81X_STATE_RELOAD_FW_AFTER_START,
+ MM81X_STATE_HOST_TO_CHIP_TX_BLOCKED,
+ MM81X_STATE_HOST_TO_CHIP_CMD_BLOCKED,
+};
+
+#define MM81X_COUNTRY_LEN (3)
+#define INVALID_VIF_INDEX 0xFF
+
+struct mm81x {
+ u32 chip_id;
+ u32 host_table_ptr;
+
+ /* Refer to @enum mm81x_bus_type */
+ u32 bus_type;
+ u32 bcf_address;
+
+ /*
+ * Parsed from the release tag, which should be in the format
+ * 'rel_<major>_<minor>_<patch>'. If the tag is not in this format
+ * then corresponding version field will be 0.
+ */
+ struct mm81x_sw_version sw_ver;
+ u8 macaddr[ETH_ALEN];
+ u8 country[MM81X_COUNTRY_LEN];
+
+ /* Mask of type @enum host_table_firmware_flags */
+ u32 firmware_flags;
+ struct mm81x_fw_caps fw_caps;
+ bool started;
+ bool chip_was_reset;
+ struct wiphy *wiphy;
+ struct mm81x_hw_scan hw_scan;
+ struct ieee80211_hw *hw;
+ struct device *dev;
+
+ struct ieee80211_vif __rcu *vifs[MM81X_MAX_IF];
+
+ /* @mm81x_state_flags */
+ unsigned long state_flags;
+
+ u16 cmd_seq;
+ struct completion *cmd_comp;
+ /* Serialises commands */
+ struct mutex cmd_lock;
+
+ /* Serialises command completion */
+ struct mutex cmd_wait;
+
+ const struct mm81x_regs *regs;
+
+ struct {
+ union {
+ struct mm81x_yaps yaps;
+ } u;
+ const struct mm81x_hif_ops *ops;
+ /* See @enum mm81x_hif_event_flags for values */
+ unsigned long event_flags;
+ bool validate_skb_checksum;
+ } hif;
+
+ struct workqueue_struct *chip_wq;
+ struct work_struct hif_work;
+ struct work_struct usb_irq_work;
+ struct mm81x_stale_tx_status stale_status;
+ bool config_ps;
+ struct mm81x_ps ps;
+
+ /* Tx power in mBm received from the FW before association */
+ s32 tx_power_mbm;
+ s32 tx_max_power_mbm;
+
+ const struct mm81x_bus_ops *bus_ops;
+ struct mm81x_rc mrc;
+ int rts_threshold;
+ struct workqueue_struct *net_wq;
+ struct work_struct tx_stale_work;
+
+ struct cfg80211_chan_def chandef;
+ struct mcast_filter *mcast_filter;
+ atomic_t num_bcn_vifs;
+ unsigned long beacon_irqs_enabled;
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+/* Map from mac80211 queue to Morse ACI value for page metadata */
+static inline u8 map_mac80211q_2_mm81x_aci(u16 mac80211queue)
+{
+ switch (mac80211queue) {
+ case IEEE80211_AC_VO:
+ return MM81X_ACI_VO;
+ case IEEE80211_AC_VI:
+ return MM81X_ACI_VI;
+ case IEEE80211_AC_BK:
+ return MM81X_ACI_BK;
+ default:
+ return MM81X_ACI_BE;
+ }
+}
+
+static inline enum mm81x_page_aci
+dot11_tid_to_ac(enum mm81x_qos_tid_up_index tid)
+{
+ switch (tid) {
+ case MM81X_QOS_TID_UP_BK:
+ case MM81X_QOS_TID_UP_XX:
+ return MM81X_ACI_BK;
+ case MM81X_QOS_TID_UP_CL:
+ case MM81X_QOS_TID_UP_VI:
+ return MM81X_ACI_VI;
+ case MM81X_QOS_TID_UP_VO:
+ case MM81X_QOS_TID_UP_NC:
+ return MM81X_ACI_VO;
+ case MM81X_QOS_TID_UP_BE:
+ case MM81X_QOS_TID_UP_EE:
+ default:
+ return MM81X_ACI_BE;
+ }
+}
+
+static inline bool mm81x_is_data_tx_allowed(struct mm81x *mors)
+{
+ return !test_bit(MM81X_STATE_DATA_TX_STOPPED, &mors->state_flags) &&
+ !test_bit(MM81X_HIF_EVT_DATA_TRAFFIC_PAUSE_PEND,
+ &mors->hif.event_flags);
+}
+
+static inline struct ieee80211_vif *
+mm81x_vif_to_ieee80211_vif(struct mm81x_vif *mors_vif)
+{
+ return container_of((void *)mors_vif, struct ieee80211_vif, drv_priv);
+}
+
+static inline struct mm81x_vif *
+ieee80211_vif_to_mors_vif(struct ieee80211_vif *vif)
+{
+ return (struct mm81x_vif *)vif->drv_priv;
+}
+
+static inline struct mm81x *mm81x_vif_to_mors(struct mm81x_vif *mors_vif)
+{
+ return mors_vif->mors;
+}
+
+static inline u32 mm81x_generate_cssid(const u8 *ssid, u8 len)
+{
+ return ~crc32(~0, ssid, len);
+}
+
+int mm81x_beacon_init(struct mm81x_vif *mors_vif);
+void mm81x_beacon_finish(struct mm81x_vif *mors_vif);
+void mm81x_beacon_irq_handle(struct mm81x *mors, u32 status);
+char *mm81x_core_get_fw_path(u32 chip_id);
+struct mm81x *mm81x_core_alloc(size_t priv_size, struct device *dev);
+int mm81x_core_init(struct mm81x *mors);
+int mm81x_core_register(struct mm81x *mors);
+void mm81x_core_unregister(struct mm81x *mors);
+void mm81x_core_deinit(struct mm81x *mors);
+void mm81x_core_free(struct mm81x *mors);
+
+#endif /* !_MM81X_MM81X_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 07/31] wifi: mm81x: add fw.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (5 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 06/31] wifi: mm81x: add core.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 08/31] wifi: mm81x: add fw.h Lachlan Hodges
` (24 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/fw.c | 744 +++++++++++++++++++++
1 file changed, 744 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/fw.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/fw.c b/drivers/net/wireless/morsemicro/mm81x/fw.c
new file mode 100644
index 000000000000..9ab48a34115e
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/fw.c
@@ -0,0 +1,744 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/string_choices.h>
+#include <net/mac80211.h>
+#include <linux/elf.h>
+#include <linux/crc32.h>
+#include "fw.h"
+#include "mac.h"
+#include "bus.h"
+
+/*
+ * Maximum wait time (milliseconds) for firmware to boot (for host table
+ * pointer to be available)
+ */
+#define MAX_WAIT_FOR_HOST_TABLE_PTR_MS 1200
+
+/* Number of times to attempt flashing FW */
+#define FW_FLASH_ATTEMPT_COUNT 3
+
+static int mm81x_fw_get_header(const u8 *data, mm81x_elf_ehdr *ehdr)
+{
+ mm81x_elf_ehdr *p = (mm81x_elf_ehdr *)data;
+
+ /* Magic check */
+ if (p->e_ident[EI_MAG0] != ELFMAG0 || p->e_ident[EI_MAG1] != ELFMAG1 ||
+ p->e_ident[EI_MAG2] != ELFMAG2 || p->e_ident[EI_MAG3] != ELFMAG3)
+ return -EINVAL;
+
+ /* elf32 and little endian */
+ if (p->e_ident[EI_DATA] != ELFDATA2LSB ||
+ p->e_ident[EI_CLASS] != ELFCLASS32)
+ return -EINVAL;
+
+ ehdr->e_phoff = mm81x_fle32_to_cpu(p->e_phoff);
+ ehdr->e_phentsize = mm81x_fle16_to_cpu(p->e_phentsize);
+ ehdr->e_phnum = mm81x_fle16_to_cpu(p->e_phnum);
+ ehdr->e_shoff = mm81x_fle32_to_cpu(p->e_shoff);
+ ehdr->e_shentsize = mm81x_fle16_to_cpu(p->e_shentsize);
+ ehdr->e_shnum = mm81x_fle16_to_cpu(p->e_shnum);
+ ehdr->e_shstrndx = mm81x_fle16_to_cpu(p->e_shstrndx);
+ ehdr->e_entry = mm81x_fle32_to_cpu(p->e_entry);
+
+ return 0;
+}
+
+static void mm81x_fw_parse_info(struct mm81x *mors, const u8 *data, int length)
+{
+ const struct mm81x_fw_info_tlv *tlv =
+ (const struct mm81x_fw_info_tlv *)data;
+
+ while ((u8 *)tlv < (data + length)) {
+ switch (le16_to_cpu(tlv->type)) {
+ case MM81X_FW_INFO_TLV_BCF_ADDR:
+ mors->bcf_address =
+ get_unaligned_le32((__force __le32 *)tlv->val);
+ break;
+ default:
+ break;
+ }
+ tlv = (const struct mm81x_fw_info_tlv *)((u8 *)tlv +
+ le16_to_cpu(
+ tlv->length) +
+ sizeof(*tlv));
+ }
+}
+
+static int mm81x_fw_get_section_header(const u8 *data, mm81x_elf_ehdr *ehdr,
+ mm81x_elf_shdr *shdr, int i)
+{
+ mm81x_elf_shdr *p = (mm81x_elf_shdr *)(data + ehdr->e_shoff +
+ (i * ehdr->e_shentsize));
+
+ shdr->sh_name = mm81x_fle32_to_cpu(p->sh_name);
+ shdr->sh_type = mm81x_fle32_to_cpu(p->sh_type);
+ shdr->sh_offset = mm81x_fle32_to_cpu(p->sh_offset);
+ shdr->sh_addr = mm81x_fle32_to_cpu(p->sh_addr);
+ shdr->sh_size = mm81x_fle32_to_cpu(p->sh_size);
+ shdr->sh_flags = mm81x_fle32_to_cpu(p->sh_flags);
+
+ return 0;
+}
+
+static int mm81x_fw_set_boot_addr(struct mm81x *mors, uint32_t addr)
+{
+ int status;
+
+ dev_dbg(mors->dev, "Overwriting boot address to 0x%x", addr);
+ mm81x_claim_bus(mors);
+ status = mm81x_reg32_write(mors, MM81X_REG_BOOT_ADDR(mors), addr);
+ mm81x_release_bus(mors);
+ return status;
+}
+
+static int mm81x_fw_load_fw(struct mm81x *mors, const struct firmware *fw)
+{
+ int i;
+ int ret = 0;
+ mm81x_elf_ehdr ehdr;
+ mm81x_elf_phdr phdr;
+ mm81x_elf_shdr shdr;
+ mm81x_elf_shdr sh_strtab;
+ const char *sh_strs;
+
+ u8 *fw_buf = devm_kmalloc(mors->dev, ROUND_BYTES_TO_WORD(fw->size),
+ GFP_KERNEL);
+
+ if (!fw_buf)
+ return -ENOMEM;
+
+ if (mm81x_fw_get_header(fw->data, &ehdr)) {
+ dev_err(mors->dev, "Wrong file format");
+ return -EINVAL;
+ }
+
+ if (mm81x_fw_get_section_header(fw->data, &ehdr, &sh_strtab,
+ ehdr.e_shstrndx)) {
+ dev_err(mors->dev, "Invalid firmware. Missing string table");
+ return -ENOENT;
+ }
+
+ sh_strs = (const char *)fw->data + sh_strtab.sh_offset;
+
+ for (i = 0; i < ehdr.e_phnum; i++) {
+ int status;
+ int address;
+
+ mm81x_elf_phdr *p = (mm81x_elf_phdr *)(fw->data + ehdr.e_phoff +
+ i * ehdr.e_phentsize);
+
+ phdr.p_type = le32_to_cpu((__force __le32)p->p_type);
+ phdr.p_offset = le32_to_cpu((__force __le32)p->p_offset);
+ phdr.p_paddr = le32_to_cpu((__force __le32)p->p_paddr);
+ phdr.p_filesz = le32_to_cpu((__force __le32)p->p_filesz);
+ phdr.p_memsz = le32_to_cpu((__force __le32)p->p_memsz);
+
+ address = phdr.p_paddr;
+ if (address == IFLASH_BASE_ADDR || address == DFLASH_BASE_ADDR)
+ continue;
+
+ if (phdr.p_type != PT_LOAD || !phdr.p_memsz)
+ continue;
+
+ if (phdr.p_filesz && phdr.p_offset &&
+ (phdr.p_offset + phdr.p_filesz) < fw->size) {
+ u32 padded_size = ROUND_BYTES_TO_WORD(phdr.p_filesz);
+
+ memcpy(fw_buf, fw->data + phdr.p_offset, padded_size);
+ /* Set padding to 0xff */
+ memset(fw_buf + phdr.p_filesz, 0xff,
+ padded_size - phdr.p_filesz);
+ mm81x_claim_bus(mors);
+ status = mm81x_dm_write(mors, address, fw_buf,
+ padded_size);
+ mm81x_release_bus(mors);
+ if (status) {
+ ret = -EIO;
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ if (mm81x_fw_get_section_header(fw->data, &ehdr, &shdr, i))
+ continue;
+
+ /* This is the firmware info. Parse it */
+ if (!strncmp(sh_strs + shdr.sh_name, ".fw_info",
+ sizeof(".fw_info")))
+ mm81x_fw_parse_info(mors, fw->data + shdr.sh_offset,
+ shdr.sh_size);
+ }
+
+ if (ehdr.e_entry)
+ ret = mm81x_fw_set_boot_addr(mors, ehdr.e_entry);
+
+ devm_kfree(mors->dev, fw_buf);
+ return ret;
+}
+
+static int __mm81x_fw_load_bcf(struct mm81x *mors, unsigned int addr,
+ const void *src, size_t src_len, u8 *scratch,
+ size_t scratch_cap)
+{
+ size_t rounded = ROUND_BYTES_TO_WORD(src_len);
+ int st;
+
+ if (rounded > scratch_cap)
+ return -EINVAL;
+ if (rounded > BCF_DATABASE_SIZE)
+ return -EFBIG;
+
+ memcpy(scratch, src, src_len);
+ if (rounded > src_len)
+ memset(scratch + src_len, 0xff, rounded - src_len);
+
+ mm81x_claim_bus(mors);
+ st = mm81x_dm_write(mors, addr, scratch, rounded);
+ mm81x_release_bus(mors);
+
+ return st ? -EIO : 0;
+}
+
+static int mm81x_fw_load_bcf(struct mm81x *mors, const struct firmware *bcf,
+ unsigned int bcf_address)
+{
+ int i, ret = 0;
+ size_t reg_prefix_len, cfg_len_rounded = 0, reg_len_rounded;
+ mm81x_elf_ehdr ehdr;
+ mm81x_elf_shdr shdr, sh_strtab;
+ const char *sh_strs, *reg_prefix = ".regdom_", *reg_src;
+ size_t reg_len;
+ u8 *bcf_buf;
+
+ bcf_buf = devm_kmalloc(mors->dev, ROUND_BYTES_TO_WORD(bcf->size),
+ GFP_KERNEL);
+ if (!bcf_buf)
+ return -ENOMEM;
+
+ if (mm81x_fw_get_header(bcf->data, &ehdr)) {
+ dev_err(mors->dev, "Wrong file format");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (mm81x_fw_get_section_header(bcf->data, &ehdr, &sh_strtab,
+ ehdr.e_shstrndx)) {
+ dev_err(mors->dev, "Invalid BCF - missing string table");
+ ret = -ENOENT;
+ goto out_free;
+ }
+
+ sh_strs = (const char *)bcf->data + sh_strtab.sh_offset;
+ reg_prefix_len = strlen(reg_prefix);
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ if (mm81x_fw_get_section_header(bcf->data, &ehdr, &shdr, i))
+ continue;
+ if (strcmp(sh_strs + shdr.sh_name, ".board_config"))
+ continue;
+
+ cfg_len_rounded = ROUND_BYTES_TO_WORD(shdr.sh_size);
+ dev_dbg(mors->dev,
+ "Write BCF board_config - addr 0x%x size %zu",
+ bcf_address, cfg_len_rounded);
+
+ ret = __mm81x_fw_load_bcf(mors, bcf_address,
+ bcf->data + shdr.sh_offset,
+ shdr.sh_size, bcf_buf,
+ ROUND_BYTES_TO_WORD(bcf->size));
+ if (ret)
+ goto out_free;
+
+ bcf_address += cfg_len_rounded;
+ break;
+ }
+
+ ret = -EINVAL;
+ for (; i < ehdr.e_shnum; i++) {
+ if (mm81x_fw_get_section_header(bcf->data, &ehdr, &shdr, i))
+ continue;
+ if (strncmp(sh_strs + shdr.sh_name, reg_prefix, reg_prefix_len))
+ continue;
+ if (strncmp(sh_strs + shdr.sh_name + reg_prefix_len,
+ mors->country, 2))
+ continue;
+
+ reg_src = bcf->data + shdr.sh_offset;
+ reg_len = shdr.sh_size;
+ dev_dbg(mors->dev, "Write BCF %s - addr 0x%x size %zu",
+ sh_strs + shdr.sh_name, bcf_address,
+ ROUND_BYTES_TO_WORD(reg_len));
+ ret = 0;
+ break;
+ }
+
+ if (ret)
+ goto out_free;
+
+ reg_len_rounded = ROUND_BYTES_TO_WORD(reg_len);
+ if ((cfg_len_rounded + reg_len_rounded) > BCF_DATABASE_SIZE) {
+ ret = -EFBIG;
+ goto out_free;
+ }
+
+ ret = __mm81x_fw_load_bcf(mors, bcf_address, reg_src, reg_len, bcf_buf,
+ ROUND_BYTES_TO_WORD(bcf->size));
+
+out_free:
+ devm_kfree(mors->dev, bcf_buf);
+ return ret;
+}
+
+static void mm81x_fw_clear_aon(struct mm81x *mors)
+{
+ int idx;
+ u8 count = MM81X_REG_AON_COUNT(mors);
+ u32 address = MM81X_REG_AON_ADDR(mors);
+
+ for (idx = 0; idx < count; idx++, address += 4) {
+ if (mors->bus_type == MM81X_BUS_TYPE_USB && idx == 0)
+ /* Keep the USB power domain enabled in AON. */
+ mm81x_reg32_write(mors, address,
+ MM81X_REG_AON_USB_RESET(mors));
+ else
+ /* clear AON */
+ mm81x_reg32_write(mors, address, 0x0);
+ }
+
+ mm81x_hw_toggle_aon_latch(mors);
+}
+
+static void mm81x_fw_trigger(struct mm81x *mors)
+{
+ const unsigned int wait_after_msi_trigger_ms = 1;
+
+ mm81x_claim_bus(mors);
+ /*
+ * If not coming from a full reset, some AON flags may be latched.
+ * Make sure to clear any hanging AON bits (can affect booting).
+ */
+ mm81x_fw_clear_aon(mors);
+
+ if (MM81X_REG_CLK_CTRL(mors))
+ mm81x_reg32_write(mors, MM81X_REG_CLK_CTRL(mors),
+ MM81X_REG_CLK_CTRL_VALUE(mors));
+
+ mm81x_reg32_write(mors, MM81X_REG_MSI(mors),
+ MM81X_REG_MSI_HOST_INT(mors));
+ mm81x_release_bus(mors);
+
+ /* Give the chip a chance to boot */
+ mdelay(wait_after_msi_trigger_ms);
+}
+
+static int mm81x_fw_verify_magic(struct mm81x *mors)
+{
+ int ret = 0;
+ int magic = ~MM81X_REG_HOST_MAGIC_VALUE(mors);
+
+ mm81x_claim_bus(mors);
+ mm81x_reg32_read(mors,
+ mors->host_table_ptr +
+ offsetof(struct host_table, magic_number),
+ &magic);
+
+ if (magic != MM81X_REG_HOST_MAGIC_VALUE(mors)) {
+ dev_err(mors->dev, "FW magic mismatch 0x%08x:0x%08x",
+ MM81X_REG_HOST_MAGIC_VALUE(mors), magic);
+ ret = -EIO;
+ }
+
+ mm81x_release_bus(mors);
+ return ret;
+}
+
+static int mm81x_fw_get_flags(struct mm81x *mors)
+{
+ int ret = 0;
+ int fw_flags = 0;
+
+ mm81x_claim_bus(mors);
+ ret = mm81x_reg32_read(mors,
+ mors->host_table_ptr +
+ offsetof(struct host_table,
+ firmware_flags),
+ &fw_flags);
+ mors->firmware_flags = fw_flags;
+ mm81x_release_bus(mors);
+
+ return ret;
+}
+
+static int mm81x_fw_check_compatibility(struct mm81x *mors)
+{
+ int ret = 0;
+ u32 fw_version;
+ u32 major;
+ u32 minor;
+ u32 patch;
+
+ mm81x_claim_bus(mors);
+ ret = mm81x_reg32_read(mors,
+ mors->host_table_ptr +
+ offsetof(struct host_table,
+ fw_version_number),
+ &fw_version);
+ mm81x_release_bus(mors);
+
+ major = MM81X_SEMVER_GET_MAJOR(fw_version);
+ minor = MM81X_SEMVER_GET_MINOR(fw_version);
+ patch = MM81X_SEMVER_GET_PATCH(fw_version);
+
+ /* Firmware on device must be recent enough for driver */
+ if (ret == 0 && major != HOST_CMD_SEMVER_MAJOR) {
+ dev_err(mors->dev,
+ "Incompatible FW version: (Driver) %d.%d.%d, (Chip) %d.%d.%d\n",
+ HOST_CMD_SEMVER_MAJOR, HOST_CMD_SEMVER_MINOR,
+ HOST_CMD_SEMVER_PATCH, major, minor, patch);
+ ret = -EPERM;
+ } else if (ret == 0 && minor != HOST_CMD_SEMVER_MINOR) {
+ dev_warn(
+ mors->dev,
+ "FW version mismatch, some features might not be supported: (Driver) %d.%d.%d, (Chip) %d.%d.%d",
+ HOST_CMD_SEMVER_MAJOR, HOST_CMD_SEMVER_MINOR,
+ HOST_CMD_SEMVER_PATCH, major, minor, patch);
+ }
+
+ return ret;
+}
+
+static int mm81x_fw_invalidate_host_ptr(struct mm81x *mors)
+{
+ int ret;
+
+ mors->host_table_ptr = 0;
+ mm81x_claim_bus(mors);
+ ret = mm81x_reg32_write(mors, MM81X_REG_HOST_MANIFEST_PTR(mors), 0);
+ mm81x_release_bus(mors);
+ return ret;
+}
+
+static int mm81x_fw_get_host_table_ptr(struct mm81x *mors)
+{
+ int ret = 0;
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(MAX_WAIT_FOR_HOST_TABLE_PTR_MS);
+
+ mm81x_claim_bus(mors);
+ while (1) {
+ ret = mm81x_reg32_read(mors, MM81X_REG_HOST_MANIFEST_PTR(mors),
+ &mors->host_table_ptr);
+
+ if (mors->host_table_ptr)
+ break;
+
+ if (time_after(jiffies, timeout)) {
+ ret = -EIO;
+ break;
+ }
+
+ usleep_range(5000, 10000);
+ }
+
+ mm81x_release_bus(mors);
+ return ret;
+}
+
+static int mm81x_fw_read_ext_host_table(struct mm81x *mors,
+ struct ext_host_tbl **ext_host_table)
+{
+ int ret = 0;
+ u32 host_tbl_ptr = mors->host_table_ptr;
+ u32 ext_host_tbl_ptr;
+ u32 ext_host_tbl_ptr_addr =
+ host_tbl_ptr + offsetof(struct host_table, ext_host_tbl_addr);
+ u32 ext_host_tbl_len;
+ u32 ext_host_tbl_len_ptr_addr;
+ struct ext_host_tbl *host_tbl = NULL;
+
+ mm81x_claim_bus(mors);
+ ret = mm81x_reg32_read(mors, ext_host_tbl_ptr_addr, &ext_host_tbl_ptr);
+ if (ret)
+ goto exit;
+
+ if (!ext_host_tbl_ptr) {
+ ret = -ENXIO;
+ goto exit;
+ }
+
+ ext_host_tbl_len_ptr_addr =
+ ext_host_tbl_ptr +
+ offsetof(struct ext_host_tbl, ext_host_tbl_length);
+
+ ret = mm81x_reg32_read(mors, ext_host_tbl_len_ptr_addr,
+ &ext_host_tbl_len);
+ if (ret)
+ goto exit;
+
+ ext_host_tbl_len = ROUND_BYTES_TO_WORD(ext_host_tbl_len);
+ if (WARN_ON(ext_host_tbl_len == 0 || ext_host_tbl_len > INT_MAX)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ host_tbl = kmalloc(ext_host_tbl_len, GFP_KERNEL);
+ if (!host_tbl) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = mm81x_dm_read(mors, ext_host_tbl_ptr, (u8 *)host_tbl,
+ (int)ext_host_tbl_len);
+ if (ret)
+ goto exit;
+
+ mm81x_release_bus(mors);
+ *ext_host_table = host_tbl;
+ return ret;
+
+exit:
+ mm81x_release_bus(mors);
+ kfree(host_tbl);
+ return ret;
+}
+
+static void mm81x_fw_update_capabilities(struct mm81x *mors,
+ struct ext_host_tbl_s1g_caps *caps)
+{
+ int i;
+
+ for (i = 0; i < FW_CAPABILITIES_FLAGS_WIDTH; i++) {
+ mors->fw_caps.flags[i] = le32_to_cpu(caps->flags[i]);
+ dev_dbg(mors->dev, "Firmware Manifest Flags%d: 0x%x", i,
+ le32_to_cpu(caps->flags[i]));
+ }
+ mors->fw_caps.ampdu_mss = caps->ampdu_mss;
+ mors->fw_caps.mm81x_mmss_offset = caps->mm81x_mmss_offset;
+ mors->fw_caps.beamformee_sts_capability =
+ caps->beamformee_sts_capability;
+ mors->fw_caps.maximum_ampdu_length_exponent =
+ caps->maximum_ampdu_length;
+ mors->fw_caps.number_sounding_dimensions =
+ caps->number_sounding_dimensions;
+
+ dev_dbg(mors->dev, "\tAMPDU Minimum start spacing: %u",
+ caps->ampdu_mss);
+ dev_dbg(mors->dev, "\tMorse Minimum Start Spacing offset: %u",
+ caps->mm81x_mmss_offset);
+ dev_dbg(mors->dev, "\tBeamformee STS Capability: %u",
+ caps->beamformee_sts_capability);
+ dev_dbg(mors->dev, "\tNumber of Sounding Dimensions: %u",
+ caps->number_sounding_dimensions);
+ dev_dbg(mors->dev, "\tMaximum AMPDU Length Exponent: %u",
+ caps->maximum_ampdu_length);
+}
+
+static void mm81x_fw_update_validate_skb_checksum(
+ struct mm81x *mors,
+ struct ext_host_tbl_insert_skb_checksum *validate_checksum)
+{
+ mors->hif.validate_skb_checksum =
+ validate_checksum->insert_and_validate_checksum;
+ dev_dbg(mors->dev, "Validate checksum inserted by fw %s",
+ str_enabled_disabled(mors->hif.validate_skb_checksum));
+}
+
+int mm81x_fw_parse_ext_host_tbl(struct mm81x *mors)
+{
+ int ret;
+ u8 *head;
+ u8 *end;
+ struct ext_host_tbl *ext_host_table = NULL;
+
+ ret = mm81x_fw_read_ext_host_table(mors, &ext_host_table);
+ if (ret || !ext_host_table)
+ goto exit;
+
+ /* Parse the TLVs */
+ head = ext_host_table->ext_host_table_data_tlvs;
+ end = ((u8 *)ext_host_table) +
+ le32_to_cpu(ext_host_table->ext_host_tbl_length);
+
+ while (head < end) {
+ struct ext_host_tbl_tlv_hdr *hdr =
+ (struct ext_host_tbl_tlv_hdr *)head;
+
+ switch (le16_to_cpu(hdr->tag)) {
+ case MM81X_FW_HOST_TABLE_TAG_S1G_CAPABILITIES:
+ mm81x_fw_update_capabilities(
+ mors, (struct ext_host_tbl_s1g_caps *)hdr);
+ break;
+
+ case MM81X_FW_HOST_TABLE_TAG_INSERT_SKB_CHECKSUM:
+ mm81x_fw_update_validate_skb_checksum(
+ mors,
+ (struct ext_host_tbl_insert_skb_checksum *)hdr);
+ break;
+
+ case MM81X_FW_HOST_TABLE_TAG_YAPS_TABLE:
+ mm81x_yaps_hw_read_table(
+ mors, &((struct ext_host_tbl_yaps_table *)hdr)
+ ->yaps_table);
+ break;
+ default:
+ break;
+ }
+
+ head += le16_to_cpu(hdr->length);
+ if (!hdr->length)
+ break;
+ }
+
+ kfree(ext_host_table);
+ return ret;
+exit:
+ dev_err(mors->dev, "failed to parse ext host table %d", ret);
+ return ret;
+}
+
+static int __mm81x_fw_flash(struct mm81x *mors, const struct firmware *fw,
+ const struct firmware *bcf, bool reset)
+{
+ int ret;
+
+ if (reset || !mors->chip_was_reset) {
+ ret = mm81x_hw_digital_reset(mors);
+ if (ret)
+ return ret;
+ }
+
+ mm81x_hw_pre_firmware_ndr_hook(mors);
+
+ ret = mm81x_fw_invalidate_host_ptr(mors);
+ if (ret)
+ return ret;
+
+ ret = mm81x_fw_load_fw(mors, fw);
+ if (ret)
+ return ret;
+
+ ret = mm81x_fw_load_bcf(mors, bcf, mors->bcf_address);
+ if (ret)
+ return ret;
+
+ mm81x_fw_trigger(mors);
+ mm81x_hw_post_firmware_ndr_hook(mors);
+
+ ret = mm81x_fw_get_host_table_ptr(mors);
+ if (ret)
+ return ret;
+
+ ret = mm81x_fw_verify_magic(mors);
+ if (ret)
+ return ret;
+
+ return mm81x_fw_check_compatibility(mors);
+}
+
+static int mm81x_fw_flash(struct mm81x *mors, const struct firmware *fw,
+ const struct firmware *bcf, bool reset)
+{
+ int ret;
+ int retries = FW_FLASH_ATTEMPT_COUNT;
+
+ while (retries--) {
+ ret = __mm81x_fw_flash(mors, fw, bcf, reset);
+ if (!ret)
+ return 0;
+
+ mors->chip_was_reset = false;
+ }
+
+ return ret;
+}
+
+static uint32_t binary_crc(const struct firmware *fw)
+{
+ return ~crc32_le(~0, (unsigned char const *)fw->data, fw->size) &
+ 0xffffffff;
+}
+
+int mm81x_fw_init(struct mm81x *mors, bool reset)
+{
+ int ret;
+ int n;
+ int board_id;
+ char *fw_path;
+ char bcf_path[MAX_BCF_NAME_LEN];
+ const struct firmware *fw = NULL;
+ const struct firmware *bcf = NULL;
+
+ fw_path = mm81x_core_get_fw_path(mors->chip_id);
+ if (!fw_path)
+ return -ENOMEM;
+
+ board_id = mm81x_hw_otp_get_board_type(mors);
+
+ if (strlen(board_config_file) > 0) {
+ n = snprintf(bcf_path, sizeof(bcf_path), "%s/%s", MM81X_FW_DIR,
+ board_config_file);
+ } else if (mm81x_hw_otp_valid_board_type(board_id)) {
+ dev_dbg(mors->dev, "Using board type 0x%04x from OTP",
+ board_id);
+ n = snprintf(bcf_path, sizeof(bcf_path),
+ "%s/bcf_boardtype_%04x.bin", MM81X_FW_DIR,
+ board_id);
+ } else {
+ dev_err(mors->dev, "BCF or Serial parameters are not defined");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (n < 0 || n >= sizeof(bcf_path)) {
+ dev_err(mors->dev, "Failed to create BCF path");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = request_firmware(&fw, fw_path, mors->dev);
+ if (ret) {
+ if (ret == -ENOENT)
+ dev_err(mors->dev, "Firmware %s not found\n", fw_path);
+ goto out;
+ }
+
+ dev_info(mors->dev, "Loaded firmware from %s, size %zu, crc32 0x%08x\n",
+ fw_path, fw->size, binary_crc(fw));
+
+ ret = request_firmware(&bcf, bcf_path, mors->dev);
+ if (ret) {
+ if (ret == -ENOENT)
+ dev_err(mors->dev, "BCF %s not found\n", bcf_path);
+ goto out;
+ }
+
+ dev_info(mors->dev, "Loaded BCF from %s, size %zu, crc32 0x%08x\n",
+ bcf_path, bcf->size, binary_crc(bcf));
+
+ ret = mm81x_fw_flash(mors, fw, bcf, reset);
+ if (ret) {
+ dev_err(mors->dev, "failed to flash firmware: %d", ret);
+ goto out;
+ }
+
+ ret = mm81x_fw_get_flags(mors);
+
+out:
+ release_firmware(fw);
+ release_firmware(bcf);
+ kfree(fw_path);
+
+ if (ret)
+ dev_err(mors->dev, "failed to init firmware: %d", ret);
+ else
+ dev_dbg(mors->dev, "firmware initialised");
+
+ return ret;
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 08/31] wifi: mm81x: add fw.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (6 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 07/31] wifi: mm81x: add fw.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 09/31] wifi: mm81x: add hif.h Lachlan Hodges
` (23 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/fw.h | 107 +++++++++++++++++++++
1 file changed, 107 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/fw.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/fw.h b/drivers/net/wireless/morsemicro/mm81x/fw.h
new file mode 100644
index 000000000000..60477c59245d
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/fw.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_FW_H_
+#define _MM81X_FW_H_
+
+#include <linux/firmware.h>
+#include <linux/completion.h>
+#include "yaps_hw.h"
+
+#define BCF_DATABASE_SIZE (1024)
+#define MM81X_FW_DIR "morsemicro"
+#define MM81X_FW_EXT ".bin"
+
+#define IFLASH_BASE_ADDR 0x400000
+#define DFLASH_BASE_ADDR 0xC00000
+
+#define MAX_BCF_NAME_LEN 64
+
+/* FW_CAPABILITIES_FLAGS_WIDTH = ceil(MM81X_CAPS_MAX_HW_LEN / 32) */
+#define FW_CAPABILITIES_FLAGS_WIDTH (4)
+
+/* Checkpatch does not like Camel Case */
+#define mm81x_elf_ehdr Elf32_Ehdr
+#define mm81x_elf_shdr Elf32_Shdr
+#define mm81x_elf_phdr Elf32_Phdr
+
+enum mm81x_fw_info_tlv_type {
+ MM81X_FW_INFO_TLV_BCF_ADDR = 1,
+};
+
+struct mm81x_fw_info_tlv {
+ __le16 type;
+ __le16 length;
+ u8 val[];
+} __packed;
+
+enum mm81x_fw_ext_host_tbl_tag {
+ /* The S1G capability tag */
+ MM81X_FW_HOST_TABLE_TAG_S1G_CAPABILITIES = 0,
+ MM81X_FW_HOST_TABLE_TAG_PAGER_BYPASS_TX_STATUS = 1,
+ MM81X_FW_HOST_TABLE_TAG_INSERT_SKB_CHECKSUM = 2,
+ MM81X_FW_HOST_TABLE_TAG_YAPS_TABLE = 3,
+ MM81X_FW_HOST_TABLE_TAG_PAGER_PKT_MEMORY = 4,
+ MM81X_FW_HOST_TABLE_TAG_PAGER_BYPASS_CMD_RESP = 5,
+};
+
+struct ext_host_tbl_tlv_hdr {
+ /* The tag used to identify which capability this represents */
+ __le16 tag;
+ /* The length of the capability structure including this header */
+ __le16 length;
+} __packed;
+
+struct ext_host_tbl_s1g_caps {
+ struct ext_host_tbl_tlv_hdr header;
+ __le32 flags[FW_CAPABILITIES_FLAGS_WIDTH];
+ /*
+ * The minimum A-MPDU start spacing required by firmware.
+ * Value | Description
+ * ------|------------
+ * 0 | No restriction
+ * 1 | 1/4 us
+ * 2 | 1/2 us
+ * 3 | 1 us
+ * 4 | 2 us
+ * 5 | 4 us
+ * 6 | 8 us
+ * 7 | 16 us
+ */
+ u8 ampdu_mss;
+ u8 beamformee_sts_capability;
+ u8 number_sounding_dimensions;
+ /*
+ * The maximum A-MPDU length. This is the exponent value such that
+ * (2^(13 + exponent) - 1) is the length
+ */
+ u8 maximum_ampdu_length;
+ /*
+ * Offset to apply to the specification's MMSS table to signal further
+ * minimum MPDU start spacing.
+ */
+ u8 mm81x_mmss_offset;
+} __packed;
+
+struct ext_host_tbl_insert_skb_checksum {
+ struct ext_host_tbl_tlv_hdr header;
+ u8 insert_and_validate_checksum;
+};
+
+struct ext_host_tbl_yaps_table {
+ struct ext_host_tbl_tlv_hdr header;
+ struct mm81x_yaps_hw_table yaps_table;
+} __packed;
+
+struct ext_host_tbl {
+ __le32 ext_host_tbl_length;
+ u8 dev_mac_addr[6];
+ u8 ext_host_table_data_tlvs[];
+} __packed;
+
+int mm81x_fw_init(struct mm81x *mors, bool reset);
+int mm81x_fw_parse_ext_host_tbl(struct mm81x *mors);
+
+#endif /* !_MM81X_FW_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 09/31] wifi: mm81x: add hif.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (7 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 08/31] wifi: mm81x: add fw.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 10/31] wifi: mm81x: add hw.c Lachlan Hodges
` (22 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/hif.h | 117 ++++++++++++++++++++
1 file changed, 117 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/hif.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/hif.h b/drivers/net/wireless/morsemicro/mm81x/hif.h
new file mode 100644
index 000000000000..e3d23423049a
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/hif.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_HIF_H_
+#define _MM81X_HIF_H_
+
+#include "core.h"
+
+struct mm81x_skbq;
+
+#define MM81X_HIF_BYPASS_TX_STATUS_IRQ_NUM (15)
+#define MM81X_HIF_BYPASS_CMD_RESP_IRQ_NUM (29)
+#define MM81X_HIF_IRQ_BYPASS_TX_STATUS_AVAILABLE \
+ BIT(MM81X_HIF_BYPASS_TX_STATUS_IRQ_NUM)
+#define MM81X_HIF_IRQ_BYPASS_CMD_RESP_AVAILABLE \
+ BIT(MM81X_HIF_BYPASS_CMD_RESP_IRQ_NUM)
+
+/* Hardware IF interrupt mask. We may use any interrupts in this range */
+#define MM81X_HIF_IRQ_MASK_ALL \
+ (GENMASK(13, 0) | MM81X_HIF_IRQ_BYPASS_TX_STATUS_AVAILABLE | \
+ MM81X_HIF_IRQ_BYPASS_CMD_RESP_AVAILABLE)
+
+enum mm81x_hif_flags {
+ MM81X_HIF_FLAGS_DIR_TO_HOST = BIT(0),
+ MM81X_HIF_FLAGS_DIR_TO_CHIP = BIT(1),
+ MM81X_HIF_FLAGS_COMMAND = BIT(2),
+ MM81X_HIF_FLAGS_BEACON = BIT(3),
+ MM81X_HIF_FLAGS_DATA = BIT(4)
+};
+
+struct mm81x_hif_ops {
+ int (*init)(struct mm81x *mors);
+ void (*flush_tx_data)(struct mm81x *mors);
+ void (*flush_cmds)(struct mm81x *mors);
+ void (*finish)(struct mm81x *mors);
+ void (*skbq_get_tx_qs)(struct mm81x *mors, struct mm81x_skbq **qs,
+ int *num_qs);
+ struct mm81x_skbq *(*get_tx_cmd_queue)(struct mm81x *mors);
+ struct mm81x_skbq *(*get_tx_beacon_queue)(struct mm81x *mors);
+ struct mm81x_skbq *(*get_tx_mgmt_queue)(struct mm81x *mors);
+ struct mm81x_skbq *(*get_tx_data_queue)(struct mm81x *mors, int aci);
+ int (*handle_irq)(struct mm81x *mors, u32 status);
+ int (*get_tx_buffered_count)(struct mm81x *mors);
+ int (*get_tx_status_pending_count)(struct mm81x *mors);
+};
+
+static inline void mm81x_hif_clear_events(struct mm81x *mors)
+{
+ mors->hif.event_flags = 0;
+}
+
+static inline int mm81x_hif_init(struct mm81x *mors)
+{
+ return mors->hif.ops->init(mors);
+}
+
+static inline void mm81x_hif_flush_tx_data(struct mm81x *mors)
+{
+ mors->hif.ops->flush_tx_data(mors);
+}
+
+static inline void mm81x_hif_flush_cmds(struct mm81x *mors)
+{
+ mors->hif.ops->flush_cmds(mors);
+}
+
+static inline void mm81x_hif_finish(struct mm81x *mors)
+{
+ mors->hif.ops->finish(mors);
+}
+
+static inline void mm81x_hif_skbq_get_tx_qs(struct mm81x *mors,
+ struct mm81x_skbq **qs, int *num_qs)
+{
+ mors->hif.ops->skbq_get_tx_qs(mors, qs, num_qs);
+}
+
+static inline struct mm81x_skbq *mm81x_hif_get_tx_cmd_queue(struct mm81x *mors)
+{
+ return mors->hif.ops->get_tx_cmd_queue(mors);
+}
+
+static inline struct mm81x_skbq *
+mm81x_hif_get_tx_beacon_queue(struct mm81x *mors)
+{
+ return mors->hif.ops->get_tx_beacon_queue(mors);
+}
+
+static inline struct mm81x_skbq *mm81x_hif_get_tx_mgmt_queue(struct mm81x *mors)
+{
+ return mors->hif.ops->get_tx_mgmt_queue(mors);
+}
+
+static inline struct mm81x_skbq *mm81x_hif_get_tx_data_queue(struct mm81x *mors,
+ int aci)
+{
+ return mors->hif.ops->get_tx_data_queue(mors, aci);
+}
+
+static inline int mm81x_hif_handle_irq(struct mm81x *mors, u32 status)
+{
+ return mors->hif.ops->handle_irq(mors, status);
+}
+
+static inline int mm81x_hif_get_tx_buffered_count(struct mm81x *mors)
+{
+ return mors->hif.ops->get_tx_buffered_count(mors);
+}
+
+static inline int mm81x_hif_get_tx_status_pending_count(struct mm81x *mors)
+{
+ return mors->hif.ops->get_tx_status_pending_count(mors);
+}
+
+#endif /* _MM81X_HIF_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 10/31] wifi: mm81x: add hw.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (8 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 09/31] wifi: mm81x: add hif.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 11/31] wifi: mm81x: add hw.h Lachlan Hodges
` (21 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/hw.c | 365 +++++++++++++++++++++
1 file changed, 365 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/hw.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/hw.c b/drivers/net/wireless/morsemicro/mm81x/hw.c
new file mode 100644
index 000000000000..afa4cb6d1dd0
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/hw.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/gpio.h>
+#include "hif.h"
+#include "mac.h"
+#include "bus.h"
+#include "core.h"
+#include "fw.h"
+#include "yaps.h"
+
+#define MM8108_REG_HOST_MAGIC_VALUE 0xDEADBEEF
+#define MM8108_REG_RESET_VALUE 0xDEAD
+
+#define MM8108_REG_SDIO_DEVICE_ADDR 0x0000207C
+
+#define MM8108_REG_SDIO_DEVICE_BURST_OFFSET 9
+#define MM8108_REG_TRGR_BASE 0x00003c00
+#define MM8108_REG_INT_BASE 0x00003c50
+#define MM8108_REG_MSI_ADDRESS 0x00004100
+#define MM8108_REG_MSI_VALUE 0x1
+#define MM8108_REG_MANIFEST_PTR_ADDRESS 0x00002d40
+#define MM8108_REG_APPS_BOOT_ADDR 0x00002084
+#define MM8108_REG_RESET 0x000020AC
+#define MM8108_REG_AON_COUNT 2
+
+#define MM8108_REG_AON_ADDR 0x00002114
+#define MM8108_REG_AON_LATCH_ADDR 0x00405020
+#define MM8108_REG_AON_LATCH_MASK 0x1
+#define MM8108_REG_AON_RESET_USB_VALUE 0x8
+#define MM8108_APPS_MAC_DMEM_ADDR_START 0x00100000
+
+#define MM8108_REG_RC_CLK_POWER_OFF_ADDR 0x00405020
+#define MM8108_REG_RC_CLK_POWER_OFF_MASK 0x00000040
+#define MM8108_SLOW_RC_POWER_ON_DELAY_MS 2
+
+#define MM8108_RESET_DELAY_TIME_MS 400
+
+#define MM8108_REG_OTPCTRL_PLDO 0x00004014
+#define MM8108_REG_OTPCTRL_PENVDD2 0x00004010
+#define MM8108_REG_OTPCTRL_PDSTB 0x00004018
+#define MM8108_REG_OTPCTRL_PTM 0x0000401c
+#define MM8108_REG_OTPCTRL_PCE 0x00004020
+#define MM8108_REG_OTPCTRL_PA 0x00004034
+#define MM8108_REG_OTPCTRL_PECCRDB 0x00004048
+#define MM8108_REG_OTPCTRL_ACTION_AUTO_RD_START 0x0000400c
+#define MM8108_REG_OTPCTRL_PDOUT 0x00004040
+
+#define MM81X_OTP_MAC_ADDR_2_BANK_NUM 27
+#define MM81X_OTP_MAC_ADDR_1_BANK_NUM 26
+#define MM81X_OTP_MAC_ADDR_1_MASK GENMASK(31, 16)
+#define MM81X_OTP_BOARD_TYPE_BANK_NUM 26
+#define MM81X_OTP_BOARD_TYPE_MASK GENMASK(15, 0)
+
+#define MM810X_BOARD_TYPE_MAX_VALUE (MM81X_OTP_BOARD_TYPE_MASK - 1)
+
+static void mm81x_hw_otp_power_up(struct mm81x *mors)
+{
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PENVDD2, 1);
+ udelay(2);
+
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PLDO, 1);
+ usleep_range(10, 20);
+
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PDSTB, 1);
+ udelay(3);
+}
+
+static void mm81x_hw_otp_power_down(struct mm81x *mors)
+{
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PDSTB, 0);
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PLDO, 0);
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PENVDD2, 0);
+}
+
+static void mm81x_hw_otp_read_enable(struct mm81x *mors)
+{
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PTM, 0);
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PCE, 1);
+ usleep_range(10, 20);
+}
+
+static void mm81x_hw_otp_read_disable(struct mm81x *mors)
+{
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PCE, 0);
+ udelay(1);
+}
+
+static int mm81x_hw_otp_read(struct mm81x *mors, u8 bank_num, u32 *buf,
+ u8 ignore_ecc)
+{
+ u32 auto_rd_start_tmp;
+ u32 auto_rd_start = 1;
+ int i;
+
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PA, bank_num);
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_PECCRDB, ignore_ecc);
+
+ mm81x_reg32_read(mors, MM8108_REG_OTPCTRL_ACTION_AUTO_RD_START,
+ &auto_rd_start_tmp);
+ auto_rd_start_tmp &= 0xfffffffe;
+
+ mm81x_reg32_write(mors, MM8108_REG_OTPCTRL_ACTION_AUTO_RD_START,
+ auto_rd_start | auto_rd_start_tmp);
+
+ /* Attempt reading up to 5 times. */
+ for (i = 0; i < 5 && auto_rd_start; i++) {
+ usleep_range(15, 20);
+ mm81x_reg32_read(mors, MM8108_REG_OTPCTRL_ACTION_AUTO_RD_START,
+ &auto_rd_start_tmp);
+ auto_rd_start = auto_rd_start_tmp & 0x1;
+ }
+
+ if (i == 5)
+ return -EIO;
+
+ mm81x_reg32_read(mors, MM8108_REG_OTPCTRL_PDOUT, buf);
+
+ return 0;
+}
+
+int mm81x_hw_otp_get_board_type(struct mm81x *mors)
+{
+ int board_type = 0;
+ u32 otp_word = 0;
+ int ret;
+
+ mm81x_claim_bus(mors);
+ mm81x_hw_otp_power_up(mors);
+ mm81x_hw_otp_read_enable(mors);
+
+ ret = mm81x_hw_otp_read(mors, MM81X_OTP_BOARD_TYPE_BANK_NUM, &otp_word,
+ 1);
+
+ mm81x_hw_otp_read_disable(mors);
+ mm81x_hw_otp_power_down(mors);
+ mm81x_release_bus(mors);
+
+ if (ret)
+ return -EINVAL;
+
+ board_type = otp_word & MM81X_OTP_BOARD_TYPE_MASK;
+
+ return board_type;
+}
+
+bool mm81x_hw_otp_valid_board_type(u32 board_type)
+{
+ return board_type > 0 && board_type < MM810X_BOARD_TYPE_MAX_VALUE;
+}
+
+int mm81x_hw_otp_get_mac_addr(struct mm81x *mors)
+{
+ u32 mac1 = 0;
+ u32 mac2 = 0;
+ int ret = 0;
+
+ mm81x_claim_bus(mors);
+ mm81x_hw_otp_power_up(mors);
+ mm81x_hw_otp_read_enable(mors);
+
+ ret = mm81x_hw_otp_read(mors, MM81X_OTP_MAC_ADDR_1_BANK_NUM, &mac1, 1);
+ if (ret)
+ goto exit;
+
+ ret = mm81x_hw_otp_read(mors, MM81X_OTP_MAC_ADDR_2_BANK_NUM, &mac2, 1);
+ if (ret)
+ goto exit;
+
+ *((u16 *)&mors->macaddr[0]) = (mac1 & MM81X_OTP_MAC_ADDR_1_MASK) >> 16;
+ *((u32 *)&mors->macaddr[2]) = mac2;
+
+exit:
+ mm81x_hw_otp_read_disable(mors);
+ mm81x_hw_otp_power_down(mors);
+ mm81x_release_bus(mors);
+
+ return ret;
+}
+
+void mm81x_hw_irq_enable(struct mm81x *mors, u32 irq, bool enable)
+{
+ u32 irq_en, irq_en_addr = irq < 32 ? MM81X_REG_INT1_EN(mors) :
+ MM81X_REG_INT2_EN(mors);
+ u32 irq_clr_addr = irq < 32 ? MM81X_REG_INT1_CLR(mors) :
+ MM81X_REG_INT2_CLR(mors);
+ u32 mask = irq < 32 ? (1 << irq) : (1 << (irq - 32));
+
+ mm81x_claim_bus(mors);
+ mm81x_reg32_read(mors, irq_en_addr, &irq_en);
+ if (enable)
+ irq_en |= (mask);
+ else
+ irq_en &= ~(mask);
+ mm81x_reg32_write(mors, irq_clr_addr, mask);
+ mm81x_reg32_write(mors, irq_en_addr, irq_en);
+ mm81x_release_bus(mors);
+}
+
+int mm81x_hw_irq_handle(struct mm81x *mors)
+{
+ u32 status1 = 0;
+
+ mm81x_reg32_read(mors, MM81X_REG_INT1_STS(mors), &status1);
+
+ if (status1 & MM81X_HIF_IRQ_MASK_ALL)
+ mm81x_hif_handle_irq(mors, status1);
+
+ if (status1 & MM81X_INT_BEACON_VIF_MASK_ALL)
+ mm81x_mac_beacon_irq_handle(mors, status1);
+
+ mm81x_reg32_write(mors, MM81X_REG_INT1_CLR(mors), status1);
+
+ return status1 ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(mm81x_hw_irq_handle);
+
+void mm81x_hw_irq_clear(struct mm81x *mors)
+{
+ mm81x_claim_bus(mors);
+ mm81x_reg32_write(mors, MM81X_REG_INT1_CLR(mors), 0xFFFFFFFF);
+ mm81x_reg32_write(mors, MM81X_REG_INT2_CLR(mors), 0xFFFFFFFF);
+ mm81x_release_bus(mors);
+}
+
+void mm81x_hw_toggle_aon_latch(struct mm81x *mors)
+{
+ u32 address = MM81X_REG_AON_LATCH_ADDR(mors);
+ u32 mask = MM81X_REG_AON_LATCH_MASK(mors);
+ u32 latch;
+
+ mm81x_reg32_read(mors, address, &latch);
+ mm81x_reg32_write(mors, address, latch & ~(mask));
+ mdelay(5);
+ mm81x_reg32_write(mors, address, latch | mask);
+ mdelay(5);
+ mm81x_reg32_write(mors, address, latch & ~(mask));
+ mdelay(5);
+}
+
+void mm81x_hw_enable_stop_notifications(struct mm81x *mors, bool enable)
+{
+ mm81x_hw_irq_enable(mors, MM81X_INT_HW_STOP_NOTIFICATION_NUM, enable);
+}
+
+void mm81x_hw_enable_burst_mode(struct mm81x *mors, const u8 burst_mode)
+{
+ u32 reg32_value;
+
+ mm81x_claim_bus(mors);
+ if (mm81x_reg32_read(mors, MM8108_REG_SDIO_DEVICE_ADDR, ®32_value))
+ goto end;
+
+ reg32_value &= ~(u32)(SDIO_WORD_BURST_MASK
+ << MM8108_REG_SDIO_DEVICE_BURST_OFFSET);
+ reg32_value |= (u32)(burst_mode << MM8108_REG_SDIO_DEVICE_BURST_OFFSET);
+
+ dev_dbg(mors->dev,
+ "Setting Burst mode to %d Writing 0x%08X to the register",
+ burst_mode, reg32_value);
+
+ if (mm81x_reg32_write(mors, MM8108_REG_SDIO_DEVICE_ADDR, reg32_value))
+ goto end;
+
+end:
+ mm81x_release_bus(mors);
+}
+EXPORT_SYMBOL_GPL(mm81x_hw_enable_burst_mode);
+
+static int mm81x_hw_enable_internal_slow_clock(struct mm81x *mors)
+{
+ u32 rc_clock_reg_value;
+ int ret = 0;
+
+ dev_dbg(mors->dev, "Enabling internal slow clock");
+
+ ret = mm81x_reg32_read(mors, MM8108_REG_RC_CLK_POWER_OFF_ADDR,
+ &rc_clock_reg_value);
+ if (ret)
+ goto exit;
+
+ rc_clock_reg_value &= ~MM8108_REG_RC_CLK_POWER_OFF_MASK;
+ ret = mm81x_reg32_write(mors, MM8108_REG_RC_CLK_POWER_OFF_ADDR,
+ rc_clock_reg_value);
+ if (ret)
+ goto exit;
+
+ mm81x_hw_toggle_aon_latch(mors);
+
+ /* Wait for the clock to turn on and settle */
+ mdelay(MM8108_SLOW_RC_POWER_ON_DELAY_MS);
+exit:
+ return ret;
+}
+
+int mm81x_hw_digital_reset(struct mm81x *mors)
+{
+ int ret = 0;
+
+ mm81x_claim_bus(mors);
+
+ /* This should be the first step in digital reset, do not reorder */
+ ret = mm81x_hw_enable_internal_slow_clock(mors);
+ if (ret)
+ goto exit;
+
+ if (mors->bus_type == MM81X_BUS_TYPE_USB) {
+ ret = mm81x_bus_digital_reset(mors);
+ goto usb_done;
+ }
+
+ if (MM81X_REG_RESET(mors) != 0)
+ ret = mm81x_reg32_write(mors, MM81X_REG_RESET(mors),
+ MM81X_REG_RESET_VALUE(mors));
+
+usb_done:
+ msleep(MM8108_RESET_DELAY_TIME_MS);
+exit:
+ mm81x_release_bus(mors);
+
+ if (!ret)
+ mors->chip_was_reset = true;
+
+ return ret;
+}
+
+void mm81x_hw_pre_firmware_ndr_hook(struct mm81x *mors)
+{
+ /* We need disable bursting for firmware download/init procedure */
+ mm81x_bus_config_burst_mode(mors, false);
+}
+
+void mm81x_hw_post_firmware_ndr_hook(struct mm81x *mors)
+{
+ /* We are safe here to reenable bursting again, if supported */
+ mm81x_bus_config_burst_mode(mors, true);
+}
+
+const struct mm81x_regs mm8108_regs = {
+ .chip_id_address = MM8108_REG_CHIP_ID,
+ .irq_base_address = MM8108_REG_INT_BASE,
+ .trgr_base_address = MM8108_REG_TRGR_BASE,
+ .cpu_reset_address = MM8108_REG_RESET,
+ .cpu_reset_value = MM8108_REG_RESET_VALUE,
+ .manifest_ptr_address = MM8108_REG_MANIFEST_PTR_ADDRESS,
+ .msi_address = MM8108_REG_MSI_ADDRESS,
+ .msi_value = MM8108_REG_MSI_VALUE,
+ .magic_num_value = MM8108_REG_HOST_MAGIC_VALUE,
+ .early_clk_ctrl_value = 0,
+ .pager_base_address = MM8108_APPS_MAC_DMEM_ADDR_START,
+ .aon_latch = MM8108_REG_AON_LATCH_ADDR,
+ .aon_latch_mask = MM8108_REG_AON_LATCH_MASK,
+ .aon_reset_usb_value = MM8108_REG_AON_RESET_USB_VALUE,
+ .aon = MM8108_REG_AON_ADDR,
+ .aon_count = MM8108_REG_AON_COUNT,
+ .boot_address = MM8108_REG_APPS_BOOT_ADDR,
+};
+
+/* B2 ROM_LINKED */
+MODULE_FIRMWARE(MM81X_FW_DIR "/" MM8108_FW_BASE MM8108B2_REV_STRING
+ FW_ROM_LINKED_STRING MM81X_FW_EXT);
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 11/31] wifi: mm81x: add hw.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (9 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 10/31] wifi: mm81x: add hw.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 12/31] wifi: mm81x: add mac.c Lachlan Hodges
` (20 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/hw.h | 176 +++++++++++++++++++++
1 file changed, 176 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/hw.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/hw.h b/drivers/net/wireless/morsemicro/mm81x/hw.h
new file mode 100644
index 000000000000..e22948f037bf
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/hw.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_HW_H_
+#define _MM81X_HW_H_
+
+#include <linux/gpio/consumer.h>
+#include "core.h"
+#include "command_defs.h"
+
+/* This should be at a fixed location for a family of chipset */
+#define MM8108_REG_CHIP_ID 0x00002d20
+
+#define MM81X_SDIO_RW_ADDR_BOUNDARY_MASK ((u32)0xFFFF0000)
+
+#define MM81X_CONFIG_ACCESS_1BYTE 0
+#define MM81X_CONFIG_ACCESS_2BYTE 1
+#define MM81X_CONFIG_ACCESS_4BYTE 2
+
+#define MM81X_REG_TRGR_BASE(mors) ((mors)->regs->trgr_base_address)
+#define MM81X_REG_TRGR1_STS(mors) (MM81X_REG_TRGR_BASE(mors) + 0x00)
+#define MM81X_REG_TRGR1_SET(mors) (MM81X_REG_TRGR_BASE(mors) + 0x04)
+#define MM81X_REG_TRGR1_CLR(mors) (MM81X_REG_TRGR_BASE(mors) + 0x08)
+#define MM81X_REG_TRGR1_EN(mors) (MM81X_REG_TRGR_BASE(mors) + 0x0C)
+#define MM81X_REG_TRGR2_STS(mors) (MM81X_REG_TRGR_BASE(mors) + 0x10)
+#define MM81X_REG_TRGR2_SET(mors) (MM81X_REG_TRGR_BASE(mors) + 0x14)
+#define MM81X_REG_TRGR2_CLR(mors) (MM81X_REG_TRGR_BASE(mors) + 0x18)
+#define MM81X_REG_TRGR2_EN(mors) (MM81X_REG_TRGR_BASE(mors) + 0x1C)
+
+#define MM81X_REG_INT_BASE(mors) ((mors)->regs->irq_base_address)
+#define MM81X_REG_INT1_STS(mors) (MM81X_REG_INT_BASE(mors) + 0x00)
+#define MM81X_REG_INT1_SET(mors) (MM81X_REG_INT_BASE(mors) + 0x04)
+#define MM81X_REG_INT1_CLR(mors) (MM81X_REG_INT_BASE(mors) + 0x08)
+#define MM81X_REG_INT1_EN(mors) (MM81X_REG_INT_BASE(mors) + 0x0C)
+#define MM81X_REG_INT2_STS(mors) (MM81X_REG_INT_BASE(mors) + 0x10)
+#define MM81X_REG_INT2_SET(mors) (MM81X_REG_INT_BASE(mors) + 0x14)
+#define MM81X_REG_INT2_CLR(mors) (MM81X_REG_INT_BASE(mors) + 0x18)
+#define MM81X_REG_INT2_EN(mors) (MM81X_REG_INT_BASE(mors) + 0x1C)
+
+#define MM81X_REG_CHIP_ID(mors) ((mors)->regs->chip_id_address)
+
+#define MM81X_REG_MSI(mors) ((mors)->regs->msi_address)
+#define MM81X_REG_MSI_HOST_INT(mors) ((mors)->regs->msi_value)
+
+#define MM81X_REG_HOST_MAGIC_VALUE(mors) ((mors)->regs->magic_num_value)
+
+#define MM81X_REG_RESET(mors) ((mors)->regs->cpu_reset_address)
+#define MM81X_REG_RESET_VALUE(mors) ((mors)->regs->cpu_reset_value)
+
+#define MM81X_REG_HOST_MANIFEST_PTR(mors) ((mors)->regs->manifest_ptr_address)
+
+#define MM81X_REG_EARLY_CLK_CTRL_VALUE(mors) \
+ ((mors)->regs->early_clk_ctrl_value)
+
+#define MM81X_REG_CLK_CTRL(mors) ((mors)->regs->clk_ctrl_address)
+#define MM81X_REG_CLK_CTRL_VALUE(mors) ((mors)->regs->clk_ctrl_value)
+
+#define MM81X_REG_BOOT_ADDR(mors) ((mors)->regs->boot_address)
+#define MM81X_REG_BOOT_ADDR_VALUE(mors) ((mors)->regs->boot_value)
+
+#define MM81X_REG_AON_ADDR(mors) ((mors)->regs->aon)
+#define MM81X_REG_AON_COUNT(mors) ((mors)->regs->aon_count)
+#define MM81X_REG_AON_LATCH_ADDR(mors) ((mors)->regs->aon_latch)
+#define MM81X_REG_AON_LATCH_MASK(mors) ((mors)->regs->aon_latch_mask)
+#define MM81X_REG_AON_USB_RESET(mors) ((mors)->regs->aon_reset_usb_value)
+
+/* Bit 17 to 24 reserved for the beacon VIF 0 to 7 interrupts */
+#define MM81X_INT_BEACON_VIF_MASK_ALL (GENMASK(24, 17))
+#define MM81X_INT_BEACON_BASE_NUM (17)
+
+/* PV0 NDP probe interrupts (VIF 0 and 1). */
+#define MM81X_INT_NDP_PROBE_REQ_PV0_VIF_MASK_ALL (GENMASK(26, 25))
+#define MM81X_INT_NDP_PROBE_REQ_PV0_BASE_NUM (25)
+
+/* Bit 27 Chip to Host stop notify */
+#define MM81X_INT_HW_STOP_NOTIFICATION_NUM (27)
+#define MM81X_INT_HW_STOP_NOTIFICATION BIT(MM81X_INT_HW_STOP_NOTIFICATION_NUM)
+
+#define CHIP_TYPE_SILICON 0x0
+
+/* Chip ID */
+#define MM8108XX_ID 0x9
+
+/* Chip Rev */
+#define MM8108B2_REV 0x8
+
+/* Chip Rev String */
+#define MM8108B_STRING "b"
+#define MM8108B2_REV_STRING MM8108B_STRING "2"
+
+/* Chip ID for MM8108 */
+#define MM8108B2_ID \
+ MM81X_DEVICE_ID(MM8108XX_ID, MM8108B2_REV, CHIP_TYPE_SILICON)
+
+#define FW_RAM_ONLY_STRING ""
+#define FW_ROM_LINKED_STRING "-rl"
+#define FW_ROM_ALL_STRING "-ro"
+
+/*
+ * Minimum time we must wait between attempting to reload the HW after a
+ * stop notification
+ */
+#define HW_RELOAD_AFTER_STOP_WINDOW 5
+
+enum host_table_firmware_flags {
+ MM81X_FW_FLAGS_SUPPORT_S1G = BIT(0),
+ MM81X_FW_FLAGS_BUSY_ACTIVE_LOW = BIT(1),
+ MM81X_FW_FLAGS_REPORTS_TX_BEACON_COMPLETION = BIT(2),
+ MM81X_FW_FLAGS_SUPPORT_HW_SCAN = BIT(3),
+ MM81X_FW_FLAGS_SUPPORT_CHIP_HALT_IRQ = BIT(4),
+};
+
+struct host_table {
+ __le32 magic_number;
+ __le32 fw_version_number;
+ __le32 host_flags;
+ __le32 firmware_flags;
+ __le32 memcmd_cmd_addr;
+ __le32 memcmd_resp_addr;
+ __le32 ext_host_tbl_addr;
+} __packed;
+
+struct mm81x_regs {
+ u32 chip_id_address;
+ u32 irq_base_address;
+ u32 trgr_base_address;
+ u32 cpu_reset_address;
+ u32 cpu_reset_value;
+ u32 msi_address;
+ u32 msi_value;
+ u32 manifest_ptr_address;
+ u32 magic_num_value;
+ u32 clk_ctrl_address;
+ u32 clk_ctrl_value;
+ u32 early_clk_ctrl_value;
+ u32 boot_address;
+ u32 boot_value;
+ u32 pager_base_address;
+ u32 aon_latch;
+ u32 aon_latch_mask;
+ u32 aon_reset_usb_value;
+ u32 aon;
+ u8 aon_count;
+};
+
+int mm81x_hw_otp_get_board_type(struct mm81x *mors);
+bool mm81x_hw_otp_valid_board_type(u32 board_type);
+int mm81x_hw_otp_get_mac_addr(struct mm81x *mors);
+
+void mm81x_hw_irq_enable(struct mm81x *mors, u32 irq, bool enable);
+int mm81x_hw_irq_handle(struct mm81x *mors);
+void mm81x_hw_irq_clear(struct mm81x *mors);
+void mm81x_hw_toggle_aon_latch(struct mm81x *mors);
+void mm81x_hw_enable_burst_mode(struct mm81x *mors, const u8 burst_mode);
+int mm81x_hw_digital_reset(struct mm81x *mors);
+void mm81x_hw_pre_firmware_ndr_hook(struct mm81x *mors);
+void mm81x_hw_post_firmware_ndr_hook(struct mm81x *mors);
+
+enum sdio_burst_mode {
+ SDIO_WORD_BURST_DISABLE =
+ 0, /* Intentionally duplicate to make it clear it's disabled */
+ SDIO_WORD_BURST_SIZE_0 = 0, /* 000: no bursting (single 32bit word) */
+ SDIO_WORD_BURST_SIZE_2 = 1, /* 001: bursts of 2 words */
+ SDIO_WORD_BURST_SIZE_4 = 2, /* 010: bursts of 4 words */
+ SDIO_WORD_BURST_SIZE_8 = 3, /* 011: bursts of 8 words */
+ SDIO_WORD_BURST_SIZE_16 = 4, /* 100: bursts of 16 words */
+ SDIO_WORD_BURST_MASK = 7,
+};
+
+extern const struct mm81x_regs mm8108_regs;
+
+void mm81x_hw_enable_stop_notifications(struct mm81x *mors, bool enable);
+
+#endif /* !_MM81X_HW_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 12/31] wifi: mm81x: add mac.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (10 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 11/31] wifi: mm81x: add hw.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 13/31] wifi: mm81x: add mac.h Lachlan Hodges
` (19 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge,
Nathan Chancellor, Nick Desaulniers, Bill Wendling, Justin Stitt
Cc: ayman.grais, linux-wireless, linux-kernel, llvm
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/mac.c | 2444 +++++++++++++++++++
1 file changed, 2444 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/mac.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/mac.c b/drivers/net/wireless/morsemicro/mm81x/mac.c
new file mode 100644
index 000000000000..e4a6900f635d
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/mac.c
@@ -0,0 +1,2444 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include "core.h"
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/crc32.h>
+#include <net/mac80211.h>
+#include <asm/div64.h>
+#include <linux/kernel.h>
+#include "hif.h"
+#include "mac.h"
+#include "bus.h"
+#include "ps.h"
+#include "rc.h"
+
+/*
+ * Arbitrary size limit for the filter command address list, to ensure that
+ * the command does not exceed page/MTU size. This will be far greater than
+ * the number of filters supported by the firmware.
+ */
+#define MCAST_FILTER_COUNT_MAX (1024 / sizeof(filter->addr_list[0]))
+
+/* Calculate average RSSI for Rx status */
+#define CALC_AVG_RSSI(_avg, _sample) ((((_avg) * 9 + (_sample)) / 10))
+
+/*
+ * When automatically trying MCS0 before MCS10, this is how many
+ * MCS0 attempts to make
+ */
+#define MCS0_BEFORE_MCS10_COUNT (1)
+
+/* Maximum TX power (default) */
+#define MAX_TX_POWER_MBM (2200)
+
+/* Default queue count */
+#define MM81X_HW_QUEUE_COUNT (4)
+
+/* Max rates per skb */
+#define MM81X_HW_MAX_RATES (4)
+
+/* Max reported rates */
+#define MM81X_HW_MAX_REPORT_RATES (4)
+
+/* Max rate attempts */
+#define MM81X_HW_MAX_RATE_TRIES (1)
+
+/* Max sk pacing shift */
+#define MM81X_HW_TX_SK_PACING_SHIFT (3)
+
+/* NSS/MCS map values */
+#define MM81X_NSS_MCS_BYTE_0 0xfe /* 1SS */
+#define MM81X_NSS_MCS_BYTE_1 0x00
+#define MM81X_NSS_MCS_BYTE_2 0xfc /* 1SS */
+#define MM81X_NSS_MCS_BYTE_3 0x01
+#define MM81X_NSS_MCS_BYTE_4 0x00
+
+/* HW restart delay time before terminating hardware IF work items */
+#define MM81X_HW_RESTART_DELAY_MS 20
+
+/* clang-format off */
+
+/* mm81x chips do not support 16MHz */
+#define CHANS1G(channel, frequency, offset) \
+{ \
+ .band = NL80211_BAND_S1GHZ, \
+ .center_freq = (frequency), \
+ .freq_offset = (offset), \
+ .hw_value = (channel), \
+ .flags = IEEE80211_CHAN_NO_16MHZ, \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static struct ieee80211_channel mors_s1ghz_channels[] = {
+ CHANS1G(1, 902, 500),
+ CHANS1G(3, 903, 500),
+ CHANS1G(5, 904, 500),
+ CHANS1G(7, 905, 500),
+ CHANS1G(9, 906, 500),
+ CHANS1G(11, 907, 500),
+ CHANS1G(13, 908, 500),
+ CHANS1G(15, 909, 500),
+ CHANS1G(17, 910, 500),
+ CHANS1G(19, 911, 500),
+ CHANS1G(21, 912, 500),
+ CHANS1G(23, 913, 500),
+ CHANS1G(25, 914, 500),
+ CHANS1G(27, 915, 500),
+ CHANS1G(29, 916, 500),
+ CHANS1G(31, 917, 500),
+ CHANS1G(33, 918, 500),
+ CHANS1G(35, 919, 500),
+ CHANS1G(37, 920, 500),
+ CHANS1G(39, 921, 500),
+ CHANS1G(41, 922, 500),
+ CHANS1G(43, 923, 500),
+ CHANS1G(45, 924, 500),
+ CHANS1G(47, 925, 500),
+ CHANS1G(49, 926, 500),
+ CHANS1G(51, 927, 500),
+};
+
+/* clang-format on */
+
+static struct ieee80211_supported_band mors_band_s1ghz = {
+ .band = NL80211_BAND_S1GHZ,
+ .s1g_cap.s1g = true,
+ .channels = mors_s1ghz_channels,
+ .n_channels = ARRAY_SIZE(mors_s1ghz_channels),
+ .bitrates = NULL,
+ .n_bitrates = 0,
+ .s1g_cap.cap[4] = 0x80 /* STA type sensor only for AP & STA */
+};
+
+static struct ieee80211_iface_limit mors_if_limits[] = {
+ {
+ .max = MM81X_MAX_IF,
+ .types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static struct ieee80211_iface_combination mors_if_combs[] = {
+ {
+ .limits = mors_if_limits,
+ .n_limits = ARRAY_SIZE(mors_if_limits),
+ .max_interfaces = MM81X_MAX_IF,
+ .num_different_channels = 1,
+ },
+};
+
+/* Convert from a time in time units (1024us) to us */
+#define MM81X_TU_TO_US(x) ((x) * 1024UL)
+
+/* Convert from a time in time units (1024us) to ms */
+#define MM81X_TU_TO_MS(x) (MM81X_TU_TO_US(x) / 1000UL)
+
+/* Default time to dwell on a scan channel */
+#define MM81X_HWSCAN_DEFAULT_DWELL_TIME_MS (30)
+
+/* Default time to dwell on a scan channel for passive scan */
+#define MM81X_HWSCAN_DEFAULT_PASSIVE_DWELL_TIME_MS (110)
+
+/* Default time to dwell on home channel, in between scan channels */
+#define MM81X_HWSCAN_DEFAULT_DWELL_ON_HOME_MS (200)
+
+/* Typical time it takes to send the probe */
+#define MM81X_HWSCAN_PROBE_DELAY_MS (30)
+
+/* A margin to account for event/command processing */
+#define MM81X_HWSCAN_TIMEOUT_OVERHEAD_MS (2000)
+
+/* Scan channel frequency mask */
+#define HW_SCAN_CH_LIST_FREQ_KHZ GENMASK(19, 0)
+
+/*
+ * Scan channel bandwidth mask.
+ * Encoded as: 0 = 1MHz, 1 = 2MHz, 2 = 4MHz, 3 = 8MHz
+ */
+#define HW_SCAN_CH_LIST_OP_BW GENMASK(21, 20)
+
+/*
+ * Scan channel primary channel width.
+ * Encoded as: 0 = 1MHz, 1 = 2MHz
+ */
+#define HW_SCAN_CH_LIST_PRIM_CH_WIDTH BIT(22)
+
+/* Index into power_list for tx power of channel */
+#define HW_SCAN_CH_LIST_PWR_LIST_IDX GENMASK(31, 26)
+
+struct hw_scan_tlv_hdr {
+ __le16 tag;
+ __le16 len;
+} __packed;
+
+struct hw_scan_tlv_channel_list {
+ struct hw_scan_tlv_hdr hdr;
+ __le32 channels[];
+} __packed;
+
+struct hw_scan_tlv_power_list {
+ struct hw_scan_tlv_hdr hdr;
+ s32 tx_power_qdbm[];
+} __packed;
+
+struct hw_scan_tlv_probe_req {
+ struct hw_scan_tlv_hdr hdr;
+ /* Probe request frame template (including SSIDs) */
+ u8 buf[];
+} __packed;
+
+struct hw_scan_tlv_dwell_on_home {
+ struct hw_scan_tlv_hdr hdr;
+ /* Time to dwell on home between scan channels */
+ __le32 home_dwell_time_ms;
+} __packed;
+
+#define DOT11AH_BA_MAX_MPDU_PER_AMPDU (32)
+
+/* wiphy scan params */
+#define MM81X_MAX_SCAN_IE_LEN 512
+#define MM81X_MAX_SCAN_SSIDS 1
+#define MM81X_MAX_REMAIN_ON_CHAN_DURATION 10000
+
+static int mm81x_tx_h_get_prim_bw(struct cfg80211_chan_def *chandef)
+{
+ return chandef->s1g_primary_2mhz ? 2 : 1;
+}
+
+static bool mm81x_reg_h_cc_equal(const char *cc1, const char *cc2)
+{
+ return (cc1[0] == cc2[0]) && (cc1[1] == cc2[1]);
+}
+
+static bool mm81x_tx_h_pkt_over_rts_threshold(struct mm81x *mors,
+ struct ieee80211_tx_info *info,
+ struct sk_buff *skb)
+{
+ u8 ccmp_len;
+
+ if (!info->control.hw_key)
+ return ((skb->len + FCS_LEN) > mors->rts_threshold);
+
+ if (info->control.hw_key->keylen == 32)
+ ccmp_len =
+ IEEE80211_CCMP_256_HDR_LEN + IEEE80211_CCMP_256_MIC_LEN;
+ else if (info->control.hw_key->keylen == 16)
+ ccmp_len = IEEE80211_CCMP_HDR_LEN + IEEE80211_CCMP_MIC_LEN;
+ else
+ ccmp_len = 0;
+
+ return ((skb->len + FCS_LEN + ccmp_len) > mors->rts_threshold);
+}
+
+static bool mm81x_tx_h_ps_filtered_for_sta(struct mm81x *mors,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta)
+{
+ struct mm81x_sta *mors_sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (!sta)
+ return false;
+
+ mors_sta = (struct mm81x_sta *)sta->drv_priv;
+
+ if (!mors_sta->tx_ps_filter_en)
+ return false;
+
+ dev_dbg(mors->dev, "Frame for sta[%pM] PS filtered", mors_sta->addr);
+
+ info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+ ieee80211_tx_status_skb(mors->hw, skb);
+ return true;
+}
+
+static void mm81x_mac_check_fw_disabled_chans(struct ieee80211_hw *hw)
+{
+ int ret = 0;
+ u32 i;
+ struct mm81x *mors = hw->priv;
+ struct host_cmd_resp_get_disabled_channels *resp;
+ u32 resp_len = sizeof(struct host_cmd_disabled_channel_entry) *
+ ARRAY_SIZE(mors_s1ghz_channels) +
+ sizeof(*resp);
+
+ resp = kzalloc(resp_len, GFP_KERNEL);
+ if (!resp) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mm81x_cmd_get_disabled_channels(mors, resp, resp_len);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(mors_s1ghz_channels); i++) {
+ struct ieee80211_channel *ch = &mors_s1ghz_channels[i];
+
+ if (ch->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ ch->flags &= ~IEEE80211_CHAN_S1G_NO_PRIMARY;
+ }
+
+ for (i = 0; i < le32_to_cpu(resp->n_channels); i++) {
+ struct ieee80211_channel *ch;
+ struct host_cmd_disabled_channel_entry *entry =
+ &resp->channels[i];
+
+ if (entry->bw_mhz != 1)
+ continue;
+
+ ch = ieee80211_get_channel_khz(
+ hw->wiphy,
+ KHZ100_TO_KHZ(le16_to_cpu(entry->freq_100khz)));
+ if (!ch)
+ continue;
+
+ ch->flags |= IEEE80211_CHAN_S1G_NO_PRIMARY;
+ dev_dbg(mors->dev, "set NO_PRIMARY on %u KHz",
+ ieee80211_channel_to_khz(ch));
+ }
+
+out:
+ if (ret)
+ dev_err(mors->dev, "failed to set disabled primary channels");
+
+ kfree(resp);
+}
+
+static int mm81x_mac_ops_start(struct ieee80211_hw *hw)
+{
+ struct mm81x *mors = hw->priv;
+
+ mors->started = true;
+ return 0;
+}
+
+static int mm81x_tx_h_get_max_bw(struct mm81x *mors)
+{
+ return MM81X_FW_SUPP(&mors->fw_caps, 8MHZ) ? 8 :
+ MM81X_FW_SUPP(&mors->fw_caps, 4MHZ) ? 4 :
+ MM81X_FW_SUPP(&mors->fw_caps, 2MHZ) ? 2 :
+ 1;
+}
+
+static void mm81x_mac_caps_init(struct mm81x *mors)
+{
+ struct mm81x_fw_caps *fw_caps = &mors->fw_caps;
+ struct ieee80211_sta_s1g_cap *s1g = &mors_band_s1ghz.s1g_cap;
+
+#define __FW_CAP_N(_n, _cap, _bit) \
+ do { \
+ if (MM81X_FW_SUPP(fw_caps, _cap)) \
+ s1g->cap[_n] |= (_bit); \
+ } while (0)
+
+#define FW_CAP0(_cap, _bit) __FW_CAP_N(0, _cap, _bit)
+#define FW_CAP3(_cap, _bit) __FW_CAP_N(3, _cap, _bit)
+#define FW_CAP5(_cap, _bit) __FW_CAP_N(5, _cap, _bit)
+#define FW_CAP6(_cap, _bit) __FW_CAP_N(6, _cap, _bit)
+#define FW_CAP7(_cap, _bit) __FW_CAP_N(7, _cap, _bit)
+#define FW_CAP8(_cap, _bit) __FW_CAP_N(8, _cap, _bit)
+#define FW_CAP9(_cap, _bit) __FW_CAP_N(9, _cap, _bit)
+
+ FW_CAP0(S1G_LONG, S1G_CAP0_S1G_LONG);
+
+ s1g->cap[0] |= S1G_CAP0_SGI_1MHZ;
+ if (MM81X_FW_SUPP(fw_caps, SGI)) {
+ FW_CAP0(2MHZ, S1G_CAP0_SGI_2MHZ);
+ FW_CAP0(4MHZ, S1G_CAP0_SGI_4MHZ);
+ FW_CAP0(8MHZ, S1G_CAP0_SGI_8MHZ);
+ }
+
+ if (MM81X_FW_SUPP(fw_caps, 8MHZ))
+ s1g->cap[0] |= S1G_SUPP_CH_WIDTH_8;
+ else if (MM81X_FW_SUPP(fw_caps, 4MHZ))
+ s1g->cap[0] |= S1G_SUPP_CH_WIDTH_4;
+ else if (MM81X_FW_SUPP(fw_caps, 2MHZ))
+ s1g->cap[0] |= S1G_SUPP_CH_WIDTH_2;
+
+ FW_CAP3(RD_RESPONDER, S1G_CAP3_RD_RESPONDER);
+ FW_CAP3(LONG_MPDU, S1G_CAP3_MAX_MPDU_LEN);
+
+ FW_CAP5(AMSDU, S1G_CAP5_AMSDU);
+ FW_CAP5(AMPDU, S1G_CAP5_AMPDU);
+ FW_CAP5(ASYMMETRIC_BA_SUPPORT, S1G_CAP5_ASYMMETRIC_BA);
+ FW_CAP5(FLOW_CONTROL, S1G_CAP5_FLOW_CONTROL);
+
+ FW_CAP6(OBSS_MITIGATION, S1G_CAP6_OBSS_MITIGATION);
+ FW_CAP6(FRAGMENT_BA, S1G_CAP6_FRAGMENT_BA);
+ FW_CAP6(NDP_PSPOLL, S1G_CAP6_NDP_PS_POLL);
+ FW_CAP6(TXOP_SHARING_IMPLICIT_ACK, S1G_CAP6_TXOP_SHARING_IMP_ACK);
+ FW_CAP6(HTC_VHT_MFB, S1G_CAP6_VHT_LINK_ADAPT);
+
+ FW_CAP7(TACK_AS_PSPOLL, S1G_CAP7_TACK_AS_PS_POLL);
+ FW_CAP7(DUPLICATE_1MHZ, S1G_CAP7_DUP_1MHZ);
+ FW_CAP7(MCS_NEGOTIATION, S1G_CAP7_MCS_NEGOTIATION);
+ FW_CAP7(1MHZ_CONTROL_RESPONSE_PREAMBLE,
+ S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE);
+ FW_CAP7(SECTOR_TRAINING, S1G_CAP7_SECTOR_TRAINING_OPERATION);
+ FW_CAP7(TMP_PS_MODE_SWITCH, S1G_CAP7_TEMP_PS_MODE_SWITCH);
+
+ FW_CAP8(BDT, S1G_CAP8_BDT);
+
+ FW_CAP9(LINK_ADAPTATION_WO_NDP_CMAC,
+ S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE);
+
+ /* 1SS MCS 9 for Rx / Tx map */
+ s1g->nss_mcs[0] = MM81X_NSS_MCS_BYTE_0;
+ s1g->nss_mcs[1] = MM81X_NSS_MCS_BYTE_1;
+ s1g->nss_mcs[2] = MM81X_NSS_MCS_BYTE_2;
+ s1g->nss_mcs[3] = MM81X_NSS_MCS_BYTE_3;
+ s1g->nss_mcs[4] = MM81X_NSS_MCS_BYTE_4;
+
+#undef FW_CAP0
+#undef FW_CAP3
+#undef FW_CAP5
+#undef FW_CAP6
+#undef FW_CAP7
+#undef FW_CAP8
+#undef FW_CAP9
+#undef __FW_CAP_N
+}
+
+static void mm81x_mac_beacon_irq_enable(struct mm81x_vif *mors_vif, bool enable)
+{
+ struct mm81x *mors = mm81x_vif_to_mors(mors_vif);
+ u8 beacon_irq_num = MM81X_INT_BEACON_BASE_NUM + mors_vif->id;
+
+ enable ? set_bit(beacon_irq_num, &mors->beacon_irqs_enabled) :
+ clear_bit(beacon_irq_num, &mors->beacon_irqs_enabled);
+
+ mm81x_hw_irq_enable(mors, beacon_irq_num, enable);
+}
+
+static void mm81x_beacon_h_fill_tx_info(struct mm81x *mors,
+ struct mm81x_skb_tx_info *tx_info,
+ struct mm81x_vif *mors_vif,
+ int tx_bw_mhz)
+{
+ enum dot11_bandwidth bw_idx =
+ mm81x_ratecode_bw_mhz_to_bw_index(tx_bw_mhz);
+ enum mm81x_rate_preamble pream = MM81X_RATE_PREAMBLE_S1G_SHORT;
+
+ tx_info->flags |=
+ cpu_to_le32(MM81X_TX_CONF_FLAGS_VIF_ID_SET(mors_vif->id));
+
+ if (bw_idx == DOT11_BANDWIDTH_1MHZ)
+ pream = MM81X_RATE_PREAMBLE_S1G_1M;
+
+ tx_info->rates[0].count = 1;
+ tx_info->rates[1].count = 0;
+ tx_info->rates[0].mm81x_ratecode =
+ mm81x_ratecode_init(bw_idx, 0, 0, pream);
+
+ if (mors->firmware_flags & MM81X_FW_FLAGS_REPORTS_TX_BEACON_COMPLETION)
+ tx_info->flags |=
+ cpu_to_le32(MM81X_TX_CONF_FLAGS_IMMEDIATE_REPORT);
+}
+
+static void mm81x_mac_beacon_tasklet(struct tasklet_struct *t)
+{
+ struct mm81x_vif *mors_vif =
+ from_tasklet(mors_vif, t, u.ap.beacon_tasklet);
+ struct mm81x *mors = mm81x_vif_to_mors(mors_vif);
+ struct mm81x_skbq *mq;
+ struct sk_buff *beacon;
+ struct ieee80211_vif *vif = mm81x_vif_to_ieee80211_vif(mors_vif);
+ struct mm81x_skb_tx_info tx_info = { 0 };
+ int num_bcn_vifs = atomic_read(&mors->num_bcn_vifs);
+
+ mq = mm81x_hif_get_tx_beacon_queue(mors);
+ if (!mq) {
+ dev_err(mors->dev, "no matching beacon Q found");
+ return;
+ }
+
+ if (mm81x_skbq_count(mq) >= num_bcn_vifs) {
+ dev_err(mors->dev,
+ "previous beacon not consumed, dropping req [id:%d]",
+ mors_vif->id);
+ return;
+ }
+
+ beacon = ieee80211_beacon_get(mors->hw, vif, false);
+ if (!beacon) {
+ dev_err(mors->dev, "failed to retrieve beacon");
+ return;
+ }
+
+ mm81x_beacon_h_fill_tx_info(mors, &tx_info, mors_vif,
+ mm81x_tx_h_get_prim_bw(&mors->chandef));
+ mm81x_skbq_skb_tx(mq, &beacon, &tx_info, MM81X_SKB_CHAN_BEACON);
+}
+
+void mm81x_mac_beacon_irq_handle(struct mm81x *mors, u32 status)
+{
+ int vif_id;
+ unsigned long masked_status = (status & mors->beacon_irqs_enabled) >>
+ MM81X_INT_BEACON_BASE_NUM;
+
+ guard(rcu)();
+ for_each_set_bit(vif_id, &masked_status, MM81X_MAX_IF) {
+ struct mm81x_vif *mors_vif;
+ struct ieee80211_vif *vif;
+
+ vif = mm81x_rcu_dereference_vif_id(mors, vif_id, true);
+ if (!vif)
+ continue;
+
+ mors_vif = ieee80211_vif_to_mors_vif(vif);
+ tasklet_schedule(&mors_vif->u.ap.beacon_tasklet);
+ }
+}
+
+static void mm81x_mac_beacon_init(struct mm81x_vif *mors_vif)
+{
+ struct mm81x *mors = mm81x_vif_to_mors(mors_vif);
+
+ tasklet_setup(&mors_vif->u.ap.beacon_tasklet, mm81x_mac_beacon_tasklet);
+ mm81x_mac_beacon_irq_enable(mors_vif, true);
+ atomic_inc(&mors->num_bcn_vifs);
+}
+
+static struct hw_scan_tlv_hdr mm81x_hw_scan_h_pack_tlv_hdr(u16 tag, u16 len)
+{
+ struct hw_scan_tlv_hdr hdr = { .tag = cpu_to_le16(tag),
+ .len = cpu_to_le16(len) };
+ return hdr;
+}
+
+static __le32 mm81x_hw_scan_h_pack_channel(struct ieee80211_channel *chan,
+ u8 pwr_idx)
+{
+ __le32 packed = 0;
+ u32 freq_khz = ieee80211_channel_to_khz(chan);
+
+ packed |= le32_encode_bits(freq_khz, HW_SCAN_CH_LIST_FREQ_KHZ);
+ packed |= le32_encode_bits(mm81x_ratecode_bw_mhz_to_bw_index(1),
+ HW_SCAN_CH_LIST_OP_BW);
+ packed |= le32_encode_bits(mm81x_ratecode_bw_mhz_to_bw_index(1),
+ HW_SCAN_CH_LIST_PRIM_CH_WIDTH);
+ packed |= le32_encode_bits(pwr_idx, HW_SCAN_CH_LIST_PWR_LIST_IDX);
+
+ return packed;
+}
+
+static u8 *
+mm81x_hw_scan_h_add_channel_list_tlv(u8 *buf,
+ struct mm81x_hw_scan_params *params)
+{
+ int i;
+ struct hw_scan_tlv_channel_list *ch_list =
+ (struct hw_scan_tlv_channel_list *)buf;
+
+ ch_list->hdr = mm81x_hw_scan_h_pack_tlv_hdr(
+ HOST_CMD_HW_SCAN_TLV_TAG_CHAN_LIST,
+ params->num_chans * sizeof(ch_list->channels[0]));
+
+ for (i = 0; i < params->num_chans; i++) {
+ struct ieee80211_channel *chan = params->channels[i].channel;
+
+ ch_list->channels[i] = mm81x_hw_scan_h_pack_channel(
+ chan, params->channels[i].power_idx);
+ }
+
+ return (u8 *)&ch_list->channels[i];
+}
+
+static u8 *
+mm81x_hw_scan_h_add_power_list_tlv(u8 *buf, struct mm81x_hw_scan_params *params)
+{
+ int i;
+ struct hw_scan_tlv_power_list *pwr_list =
+ (struct hw_scan_tlv_power_list *)buf;
+ size_t size = sizeof(pwr_list->tx_power_qdbm[0]) * params->n_powers;
+
+ pwr_list->hdr = mm81x_hw_scan_h_pack_tlv_hdr(
+ HOST_CMD_HW_SCAN_TLV_TAG_POWER_LIST, size);
+
+ for (i = 0; i < params->n_powers; i++)
+ pwr_list->tx_power_qdbm[i] = params->powers_qdbm[i];
+
+ return (u8 *)&pwr_list->tx_power_qdbm[i];
+}
+
+static u8 *
+mm81x_hw_scan_h_add_probe_req_tlv(u8 *buf, struct mm81x_hw_scan_params *params)
+{
+ struct sk_buff *skb = params->probe_req;
+ struct hw_scan_tlv_probe_req *probe_req =
+ (struct hw_scan_tlv_probe_req *)buf;
+
+ probe_req->hdr = mm81x_hw_scan_h_pack_tlv_hdr(
+ HOST_CMD_HW_SCAN_TLV_TAG_PROBE_REQ, skb->len);
+ memcpy(probe_req->buf, skb->data, skb->len);
+
+ return buf + sizeof(*probe_req) + skb->len;
+}
+
+static u8 *
+mm81x_hw_scan_h_insert_dwell_time_tlv(u8 *buf,
+ struct mm81x_hw_scan_params *params)
+{
+ struct hw_scan_tlv_dwell_on_home *dwell =
+ (struct hw_scan_tlv_dwell_on_home *)buf;
+
+ dwell->hdr = mm81x_hw_scan_h_pack_tlv_hdr(
+ HOST_CMD_HW_SCAN_TLV_TAG_DWELL_ON_HOME,
+ sizeof(*dwell) - sizeof(dwell->hdr));
+ dwell->home_dwell_time_ms = cpu_to_le32(params->dwell_on_home_ms);
+
+ return buf + sizeof(*dwell);
+}
+
+static int __mm81x_hw_scan_h_init_probe_req(struct mm81x_hw_scan_params *params,
+ u8 *ssid, u8 ssid_len,
+ struct ieee80211_scan_ies *ies)
+{
+ u8 *pos;
+ struct sk_buff *probe_req;
+ struct ieee80211_tx_info *info;
+ u16 ies_len = ies->len[NL80211_BAND_S1GHZ] + ies->common_ie_len;
+
+ probe_req = ieee80211_probereq_get(params->hw, params->vif->addr, ssid,
+ ssid_len, ies_len);
+ if (!probe_req)
+ return -ENOMEM;
+
+ pos = skb_put(probe_req, ies_len);
+ memcpy(pos, ies->common_ies, ies->common_ie_len);
+ pos += ies->common_ie_len;
+ memcpy(pos, ies->ies[NL80211_BAND_S1GHZ], ies->len[NL80211_BAND_S1GHZ]);
+
+ info = IEEE80211_SKB_CB(probe_req);
+ info->control.vif = params->vif;
+ params->probe_req = probe_req;
+
+ return 0;
+}
+
+static void mm81x_hw_scan_h_init_ssid(struct mm81x *mors,
+ struct cfg80211_ssid *ssids, int n_ssids,
+ u8 **out_ssid, u8 *out_ssid_len)
+{
+ *out_ssid = NULL;
+ *out_ssid_len = 0;
+
+ if (n_ssids > 0) {
+ if (n_ssids > 1) {
+ dev_warn(
+ mors->dev,
+ "Multiple SSIDs found when only one supported. Using the first only.");
+ }
+ *out_ssid_len = ssids[0].ssid_len;
+ *out_ssid = ssids[0].ssid;
+ }
+}
+
+static int
+mm81x_hw_scan_h_init_probe_req(struct mm81x_hw_scan_params *params,
+ struct ieee80211_scan_request *scan_req)
+{
+ struct mm81x *mors = params->hw->priv;
+ struct cfg80211_scan_request *req = &scan_req->req;
+ struct ieee80211_scan_ies *ies = &scan_req->ies;
+ u8 ssid_len = 0;
+ u8 *ssid = NULL;
+
+ mm81x_hw_scan_h_init_ssid(mors, req->ssids, req->n_ssids, &ssid,
+ &ssid_len);
+
+ return __mm81x_hw_scan_h_init_probe_req(params, ssid, ssid_len, ies);
+}
+
+static bool
+mm81x_hw_scan_h_is_chan_present(const struct mm81x_hw_scan_params *params,
+ const struct ieee80211_channel *chan)
+{
+ int channel;
+
+ for (channel = 0; channel < params->num_chans; channel++) {
+ if (params->channels[channel].channel == chan)
+ return true;
+ }
+
+ return false;
+}
+
+static int mm81x_hw_scan_h_insert_chan(struct mm81x_hw_scan_params *params,
+ struct ieee80211_channel *chan)
+{
+ if (!params->channels)
+ return -EFAULT;
+
+ if (!chan)
+ return -EFAULT;
+
+ if (params->num_chans >= params->allocated_chans)
+ return -ENOMEM;
+
+ if (mm81x_hw_scan_h_is_chan_present(params, chan))
+ return 0;
+
+ params->channels[params->num_chans].channel = chan;
+ params->num_chans++;
+ return 0;
+}
+
+static int mm81x_hw_scan_h_init_chan_list(struct mm81x_hw_scan_params *params,
+ struct ieee80211_channel **chans,
+ u32 n_channels)
+{
+ int i, j;
+ int num_pwrs_coarse = 0;
+ int last_pwr = INT_MIN;
+ int chans_to_allocate = 0;
+
+ for (i = 0; i < n_channels; i++)
+ if (chans[i])
+ chans_to_allocate++;
+
+ params->num_chans = 0;
+ params->allocated_chans = 0;
+ params->channels = kcalloc(chans_to_allocate, sizeof(*params->channels),
+ GFP_KERNEL);
+ if (!params->channels)
+ return -ENOMEM;
+
+ params->allocated_chans = chans_to_allocate;
+
+ for (i = 0; i < n_channels; i++)
+ if (chans[i])
+ mm81x_hw_scan_h_insert_chan(params, chans[i]);
+
+ /*
+ * Calculate a rough estimate of number of different channel
+ * powers required
+ */
+ for (i = 0; i < params->num_chans; i++) {
+ if (chans[i]->max_reg_power != last_pwr) {
+ last_pwr = chans[i]->max_reg_power;
+ num_pwrs_coarse++;
+ }
+ }
+
+ params->powers_qdbm = kmalloc_array(
+ num_pwrs_coarse, sizeof(*params->powers_qdbm), GFP_KERNEL);
+ if (!params->powers_qdbm)
+ return -ENOMEM;
+
+ params->n_powers = 0;
+
+ for (i = 0; i < params->num_chans; i++) {
+ s32 power_qdbm =
+ MBM_TO_QDBM(DBM_TO_MBM(chans[i]->max_reg_power));
+
+ /* Try and find the power in the list */
+ for (j = 0; j < params->n_powers; j++)
+ if (params->powers_qdbm[j] == power_qdbm)
+ break;
+
+ /* Reached the end of the list - add the new power option */
+ if (j == params->n_powers) {
+ params->powers_qdbm[j] = power_qdbm;
+ params->n_powers++;
+ if (params->n_powers > num_pwrs_coarse) {
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ }
+
+ /* Give the index of the power level to the channel */
+ params->channels[i].power_idx = j;
+ }
+ return 0;
+}
+
+static void mm81x_hw_scan_h_clean_params(struct mm81x_hw_scan_params *params)
+{
+ if (params->probe_req)
+ dev_kfree_skb_any(params->probe_req);
+ kfree(params->channels);
+ kfree(params->powers_qdbm);
+
+ params->num_chans = 0;
+ params->allocated_chans = 0;
+}
+
+size_t mm81x_hw_scan_h_get_cmd_size(struct mm81x_hw_scan_params *params)
+{
+ struct hw_scan_tlv_channel_list *ch_list;
+ struct hw_scan_tlv_power_list *pwr_list;
+ struct hw_scan_tlv_probe_req *probe_req;
+ struct hw_scan_tlv_dwell_on_home *dwell;
+ struct host_cmd_req_hw_scan *req;
+ size_t cmd_size = sizeof(*req);
+
+ /* No TLVs if simple abort command */
+ if (params->operation != MM81X_HW_SCAN_OP_START)
+ return cmd_size;
+
+ cmd_size += struct_size(ch_list, channels, params->num_chans);
+ cmd_size += struct_size(pwr_list, tx_power_qdbm, params->n_powers);
+
+ if (params->probe_req)
+ cmd_size += struct_size(probe_req, buf, params->probe_req->len);
+ if (params->dwell_on_home_ms)
+ cmd_size += sizeof(*dwell);
+
+ return cmd_size;
+}
+
+u8 *mm81x_hw_scan_h_insert_tlvs(struct mm81x_hw_scan_params *params, u8 *buf)
+{
+ buf = mm81x_hw_scan_h_add_channel_list_tlv(buf, params);
+ buf = mm81x_hw_scan_h_add_power_list_tlv(buf, params);
+
+ if (params->dwell_on_home_ms)
+ buf = mm81x_hw_scan_h_insert_dwell_time_tlv(buf, params);
+ if (params->probe_req)
+ buf = mm81x_hw_scan_h_add_probe_req_tlv(buf, params);
+
+ return buf;
+}
+
+static u32 mm81x_hw_scan_h_get_dwell_on_home(struct mm81x *mors,
+ struct ieee80211_vif *vif)
+{
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mm81x_mac_is_sta_vif_associated(vif))
+ return mors->hw_scan.home_dwell_ms;
+ return 0;
+}
+
+static struct mm81x_hw_scan_params *
+__mm81x_hw_scan_h_init_params(struct mm81x *mors)
+{
+ struct mm81x_hw_scan_params *params = mors->hw_scan.params;
+
+ if (!params) {
+ params = kzalloc_obj(*params, GFP_KERNEL);
+ if (params)
+ mors->hw_scan.params = params;
+ } else {
+ mm81x_hw_scan_h_clean_params(params);
+ memset(params, 0, sizeof(*params));
+ }
+
+ return params;
+}
+
+static int mm81x_hw_scan_h_init_params(struct mm81x *mors,
+ struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct mm81x_hw_scan_params *params = mors->hw_scan.params;
+
+ params = __mm81x_hw_scan_h_init_params(mors);
+ if (!params) {
+ mors->hw_scan.state = HW_SCAN_STATE_IDLE;
+ return -ENOMEM;
+ }
+
+ params->hw = hw;
+ params->vif = vif;
+ params->has_directed_ssid = (req->ssids && req->ssids[0].ssid_len > 0);
+ params->operation = MM81X_HW_SCAN_OP_START;
+ params->dwell_on_home_ms = mm81x_hw_scan_h_get_dwell_on_home(mors, vif);
+ params->use_1mhz_probes = true;
+
+ if (req->duration)
+ params->dwell_time_ms = MM81X_TU_TO_MS(req->duration);
+ else if (req->n_ssids == 0)
+ params->dwell_time_ms =
+ MM81X_HWSCAN_DEFAULT_PASSIVE_DWELL_TIME_MS;
+ else
+ params->dwell_time_ms = MM81X_HWSCAN_DEFAULT_DWELL_TIME_MS;
+
+ return 0;
+}
+
+static u32 mm81x_hw_scan_h_calc_timeout(struct mm81x_hw_scan_params *params)
+{
+ u32 ret = 0;
+
+ ret = params->dwell_time_ms + params->dwell_on_home_ms;
+ if (params->probe_req)
+ ret += MM81X_HWSCAN_PROBE_DELAY_MS;
+
+ ret *= params->num_chans;
+ ret += MM81X_HWSCAN_TIMEOUT_OVERHEAD_MS;
+
+ return ret;
+}
+
+static int mm81x_mac_ops_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ int ret = 0;
+ struct mm81x *mors = hw->priv;
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct mm81x_hw_scan_params *params;
+ struct ieee80211_channel **chans = hw_req->req.channels;
+
+ dev_dbg(mors->dev, "state %d", mors->hw_scan.state);
+
+ if (!mors->started) {
+ dev_warn(mors->dev, "device not ready");
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ switch (mors->hw_scan.state) {
+ case HW_SCAN_STATE_IDLE:
+ mors->hw_scan.state = HW_SCAN_STATE_RUNNING;
+ reinit_completion(&mors->hw_scan.scan_done);
+ break;
+ case HW_SCAN_STATE_RUNNING:
+ case HW_SCAN_STATE_ABORTING:
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ ret = mm81x_hw_scan_h_init_params(mors, hw, vif, req);
+ if (ret)
+ goto exit;
+
+ params = mors->hw_scan.params;
+
+ ret = mm81x_hw_scan_h_init_chan_list(params, chans,
+ hw_req->req.n_channels);
+ if (ret)
+ goto exit;
+
+ /* Only init the probe request template if this is an active scan */
+ if (req->n_ssids > 0) {
+ ret = mm81x_hw_scan_h_init_probe_req(params, hw_req);
+ if (ret) {
+ dev_err(mors->dev, "Failed to init probe req %d", ret);
+ goto exit;
+ }
+ }
+
+ ret = mm81x_cmd_hw_scan(mors, params, false);
+ if (ret) {
+ mors->hw_scan.state = HW_SCAN_STATE_IDLE;
+ goto exit;
+ }
+
+ ieee80211_queue_delayed_work(
+ mors->hw, &mors->hw_scan.timeout,
+ msecs_to_jiffies(mm81x_hw_scan_h_calc_timeout(params)));
+exit:
+ return ret;
+}
+
+static void mm81x_hw_scan_h_cancel(struct mm81x *mors)
+{
+ int ret;
+ struct mm81x_hw_scan_params params = { 0 };
+
+ cancel_delayed_work_sync(&mors->hw_scan.timeout);
+
+ switch (mors->hw_scan.state) {
+ case HW_SCAN_STATE_IDLE:
+ case HW_SCAN_STATE_ABORTING:
+ /* scan not running */
+ return;
+ case HW_SCAN_STATE_RUNNING:
+ mors->hw_scan.state = HW_SCAN_STATE_ABORTING;
+ break;
+ }
+
+ params.operation = MM81X_HW_SCAN_OP_STOP;
+
+ ret = mm81x_cmd_hw_scan(mors, ¶ms, false);
+
+ if (ret || !mors->started ||
+ !wait_for_completion_timeout(&mors->hw_scan.scan_done, 1 * HZ)) {
+ /*
+ * We may have lost the event on the bus, the chip could be
+ * wedged, or the cmd failed for another reason. Nevertheless,
+ * we should call the done event so mac80211 knows to unblock
+ * itself.
+ */
+ struct cfg80211_scan_info info = { .aborted = true };
+
+ ieee80211_scan_completed(mors->hw, &info);
+ mors->hw_scan.state = HW_SCAN_STATE_IDLE;
+ }
+}
+
+static void mm81x_mac_ops_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mm81x *mors = hw->priv;
+
+ cancel_delayed_work_sync(&mors->hw_scan.timeout);
+ mm81x_hw_scan_h_cancel(mors);
+}
+
+static void mm81x_mac_hw_scan_done_event(struct ieee80211_hw *hw)
+{
+ struct mm81x *mors = hw->priv;
+ struct cfg80211_scan_info info = { 0 };
+
+ dev_dbg(mors->dev, "completing hw scan");
+
+ switch (mors->hw_scan.state) {
+ case HW_SCAN_STATE_IDLE:
+ /* Scan has already been stopped. Just continue */
+ goto exit;
+ case HW_SCAN_STATE_RUNNING:
+ case HW_SCAN_STATE_ABORTING:
+ info.aborted = (mors->hw_scan.state == HW_SCAN_STATE_ABORTING);
+ mors->hw_scan.state = HW_SCAN_STATE_IDLE;
+ }
+
+ ieee80211_scan_completed(mors->hw, &info);
+exit:
+ complete(&mors->hw_scan.scan_done);
+ cancel_delayed_work_sync(&mors->hw_scan.timeout);
+}
+
+static void mm81x_mac_hw_scan_timeout_work(struct work_struct *work)
+{
+ struct mm81x *mors =
+ container_of(work, struct mm81x, hw_scan.timeout.work);
+
+ dev_err(mors->dev, "hw scan timed out, aborting");
+ mm81x_hw_scan_h_cancel(mors);
+}
+
+static void mm81x_mac_hw_scan_init(struct mm81x *mors)
+{
+ mors->hw_scan.state = HW_SCAN_STATE_IDLE;
+ mors->hw_scan.params = NULL;
+ mors->hw_scan.home_dwell_ms = MM81X_HWSCAN_DEFAULT_DWELL_ON_HOME_MS;
+
+ init_completion(&mors->hw_scan.scan_done);
+ INIT_DELAYED_WORK(&mors->hw_scan.timeout,
+ mm81x_mac_hw_scan_timeout_work);
+}
+
+static void mm81x_mac_hw_scan_destroy(struct mm81x *mors)
+{
+ cancel_delayed_work_sync(&mors->hw_scan.timeout);
+ if (mors->hw_scan.params)
+ mm81x_hw_scan_h_clean_params(mors->hw_scan.params);
+ kfree(mors->hw_scan.params);
+ mors->hw_scan.params = NULL;
+}
+
+static void mm81x_mac_hw_scan_finish(struct mm81x *mors)
+{
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ if (mors->hw_scan.state == HW_SCAN_STATE_IDLE)
+ return;
+
+ ieee80211_scan_completed(mors->hw, &info);
+ complete(&mors->hw_scan.scan_done);
+ mors->hw_scan.state = HW_SCAN_STATE_IDLE;
+ cancel_delayed_work_sync(&mors->hw_scan.timeout);
+}
+
+int mm81x_mac_event_recv(struct mm81x *mors, struct sk_buff *skb)
+{
+ struct host_cmd_event *event = (struct host_cmd_event *)(skb->data);
+ u16 event_id = le16_to_cpu(event->hdr.message_id);
+ u16 event_iid = le16_to_cpu(event->hdr.host_id);
+ u16 vif_id = le16_to_cpu(event->hdr.vif_id);
+ struct ieee80211_vif *vif;
+ int ret = 0;
+
+ if (!HOST_CMD_IS_EVT(event) || event_iid != 0)
+ return -EINVAL;
+
+ switch (event_id) {
+ case HOST_CMD_ID_EVT_HW_SCAN_DONE:
+ dev_dbg(mors->dev,
+ "Event: HOST_CMD_ID_EVT_HW_SCAN_DONE Received.");
+ mm81x_mac_hw_scan_done_event(mors->hw);
+ break;
+ case HOST_CMD_ID_EVT_BEACON_LOSS:
+ dev_dbg(mors->dev,
+ "Event: HOST_CMD_ID_EVT_BEACON_LOSS Received");
+ scoped_guard(rcu) {
+ vif = mm81x_rcu_dereference_vif_id(mors, vif_id, true);
+ if (vif)
+ ieee80211_beacon_loss(vif);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void mm81x_tx_h_apply_mcs10(struct mm81x *mors,
+ struct mm81x_skb_tx_info *tx_info)
+{
+ u8 i;
+ u8 j;
+ int mcs0_first_idx = -1;
+ int mcs0_last_idx = -1;
+
+ /* Find out where our first and last MCS0 entries are. */
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ enum dot11_bandwidth bw_idx = mm81x_ratecode_bw_index_get(
+ tx_info->rates[i].mm81x_ratecode);
+
+ if (bw_idx == DOT11_BANDWIDTH_1MHZ) {
+ mcs0_last_idx = i;
+ if (mcs0_first_idx == -1)
+ mcs0_first_idx = i;
+ }
+
+ /*
+ * If the count is 0 then we are at the end of the table.
+ * Break to allow us to reuse i indicating the end of the
+ * table.
+ */
+ if (tx_info->rates[i].count == 0)
+ break;
+ }
+
+ /* If there aren't any MCS0 (at 1MHz) entries we are done. */
+ if (mcs0_first_idx < 0)
+ return;
+
+ /*
+ * If we are in MCS10_MODE_AUTO add MCS10 counts to the table if they
+ * will fit. There should be three cases:
+ *
+ * - There is one MSC0 entry and the table is full -> do nothing
+ * - There is one MSC0 entry and the table has space -> adjust MSC0
+ * down and add MCS 10
+ * - There are multiple MCS0 entries -> replace entries after the first
+ * with MCS 10
+ */
+ /* Case 3 - replace additional entries. */
+ if (mcs0_last_idx > mcs0_first_idx) {
+ for (j = mcs0_first_idx + 1; j < i; j++) {
+ enum dot11_bandwidth bw_idx =
+ mm81x_ratecode_bw_index_get(
+ tx_info->rates[j].mm81x_ratecode);
+ u8 mcs_index = mm81x_ratecode_mcs_index_get(
+ tx_info->rates[j].mm81x_ratecode);
+ if (mcs_index == 0 && bw_idx == DOT11_BANDWIDTH_1MHZ) {
+ mm81x_ratecode_mcs_index_set(
+ &tx_info->rates[j].mm81x_ratecode, 10);
+ }
+ }
+ /* Case 2 - add additional MCS10 entry. */
+ } else if (mcs0_last_idx == mcs0_first_idx &&
+ i < (IEEE80211_TX_MAX_RATES)) {
+ int pre_mcs10_mcs0_count =
+ min_t(u8, tx_info->rates[mcs0_last_idx].count,
+ MCS0_BEFORE_MCS10_COUNT);
+ int mcs10_count = tx_info->rates[mcs0_last_idx].count -
+ pre_mcs10_mcs0_count;
+
+ /*
+ * If there were less retries than our desired minimum MCS0 we
+ * don't add MCS10 retries.
+ */
+ if (mcs10_count > 0) {
+ /* Use the same flags for MCS10 as MCS0. */
+ tx_info->rates[i].mm81x_ratecode =
+ tx_info->rates[mcs0_last_idx].mm81x_ratecode;
+ mm81x_ratecode_mcs_index_set(
+ &tx_info->rates[i].mm81x_ratecode, 10);
+ tx_info->rates[mcs0_last_idx].count =
+ pre_mcs10_mcs0_count;
+ tx_info->rates[i].count = mcs10_count;
+ }
+ }
+}
+
+void mm81x_tx_h_check_aggr(struct ieee80211_sta *pubsta, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct mm81x_sta *mors_sta = (struct mm81x_sta *)pubsta->drv_priv;
+ u8 tid = ieee80211_get_tid(hdr);
+
+ /* we are already aggregating */
+ if (mors_sta->tid_tx[tid] || mors_sta->tid_start_tx[tid])
+ return;
+
+ if (mors_sta->state < IEEE80211_STA_AUTHORIZED)
+ return;
+
+ if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ return;
+
+ if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
+ return;
+
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ return;
+
+ mors_sta->tid_start_tx[tid] = true;
+ ieee80211_start_tx_ba_session(pubsta, tid, 0);
+}
+
+int mm81x_tx_h_get_attempts(struct mm81x *mors,
+ struct mm81x_skb_tx_status *tx_sts)
+{
+ int attempts = 0;
+ int i;
+ int count = min_t(int, MM81X_SKB_MAX_RATES, IEEE80211_TX_MAX_RATES);
+
+ for (i = 0; i < count; i++) {
+ if (tx_sts->rates[i].count > 0)
+ attempts += tx_sts->rates[i].count;
+ else
+ break;
+ }
+
+ return attempts;
+}
+
+static void mm81x_tx_h_fill_info(struct mm81x *mors,
+ struct mm81x_skb_tx_info *tx_info,
+ struct sk_buff *skb, struct ieee80211_vif *vif,
+ int tx_bw_mhz, struct ieee80211_sta *sta)
+{
+ int i;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mm81x_vif *mors_vif = ieee80211_vif_to_mors_vif(vif);
+ struct mm81x_sta *mors_sta = NULL;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int op_bw_mhz = cfg80211_chandef_get_width(&mors->chandef);
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ bool rts_allowed = op_bw_mhz < 8;
+
+ if (sta)
+ mors_sta = (struct mm81x_sta *)sta->drv_priv;
+
+ rts_allowed &= mm81x_tx_h_pkt_over_rts_threshold(mors, info, skb);
+
+ mm81x_rc_sta_fill_tx_rates(mors, tx_info, skb, sta, tx_bw_mhz,
+ rts_allowed);
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ if (rts_allowed)
+ mm81x_ratecode_enable_rts(
+ &tx_info->rates[i].mm81x_ratecode);
+
+ if (info->control.rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ mm81x_ratecode_enable_sgi(
+ &tx_info->rates[i].mm81x_ratecode);
+ }
+
+ /* Apply change of MCS0 to MCS10 if required. */
+ mm81x_tx_h_apply_mcs10(mors, tx_info);
+
+ tx_info->flags |=
+ cpu_to_le32(MM81X_TX_CONF_FLAGS_VIF_ID_SET(mors_vif->id));
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_info->flags |= cpu_to_le32(MM81X_TX_CONF_FLAGS_CTL_AMPDU);
+
+ if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ tx_info->flags |=
+ cpu_to_le32(MM81X_TX_CONF_FLAGS_SEND_AFTER_DTIM);
+
+ if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
+ tx_info->flags |= cpu_to_le32(MM81X_TX_CONF_NO_PS_BUFFER);
+
+ if (info->flags & IEEE80211_TX_STATUS_EOSP)
+ tx_info->flags |= cpu_to_le32(
+ MM81X_TX_CONF_FLAGS_IMMEDIATE_REPORT);
+ } else if (ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_bufferable_mmpdu(skb)) {
+ tx_info->flags |= cpu_to_le32(MM81X_TX_CONF_NO_PS_BUFFER);
+ }
+
+ if (info->control.hw_key) {
+ tx_info->flags |= cpu_to_le32(MM81X_TX_CONF_FLAGS_HW_ENCRYPT);
+ tx_info->flags |= cpu_to_le32(MM81X_TX_CONF_FLAGS_KEY_IDX_SET(
+ info->control.hw_key->hw_key_idx));
+ }
+
+ tx_info->tid = tid;
+ if (mors_sta) {
+ tx_info->tid_params = mors_sta->tid_params[tid];
+
+ if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) {
+ if (mors_sta->tx_ps_filter_en)
+ dev_dbg(mors->dev,
+ "TX ps filter cleared sta[%pM]",
+ mors_sta->addr);
+ mors_sta->tx_ps_filter_en = false;
+ }
+ }
+}
+
+static void mm81x_mac_ops_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct mm81x *mors = hw->priv;
+ struct mm81x_skbq *mq = NULL;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mm81x_skb_tx_info tx_info = { 0 };
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ bool is_mgmt = ieee80211_is_mgmt(hdr->frame_control);
+ int tx_bw_mhz = cfg80211_chandef_get_width(&mors->chandef);
+ struct ieee80211_sta *sta = control->sta;
+ int max_tx_bw = 0, sta_max_bw_mhz = 0;
+
+ if (sta) {
+ struct mm81x_sta *mors_sta = (struct mm81x_sta *)sta->drv_priv;
+
+ sta_max_bw_mhz = mors_sta->max_bw_mhz;
+ }
+
+ max_tx_bw = mm81x_tx_h_get_max_bw(mors);
+ tx_bw_mhz = min(max_tx_bw, tx_bw_mhz);
+
+ if (is_mgmt)
+ tx_bw_mhz = mm81x_tx_h_get_prim_bw(&mors->chandef);
+ if (sta_max_bw_mhz)
+ tx_bw_mhz = min(tx_bw_mhz, sta_max_bw_mhz);
+ if (ieee80211_is_probe_resp(hdr->frame_control))
+ tx_bw_mhz = 1;
+
+ mm81x_tx_h_fill_info(mors, &tx_info, skb, vif, tx_bw_mhz, sta);
+
+ if (mm81x_tx_h_ps_filtered_for_sta(mors, skb, sta))
+ return;
+
+ if (is_mgmt)
+ mq = mm81x_hif_get_tx_mgmt_queue(mors);
+ else
+ mq = mm81x_hif_get_tx_data_queue(mors,
+ dot11_tid_to_ac(tx_info.tid));
+
+ mm81x_skbq_skb_tx(mq, &skb, &tx_info,
+ (is_mgmt) ? MM81X_SKB_CHAN_MGMT :
+ MM81X_SKB_CHAN_DATA);
+}
+
+static void mm81x_mac_ops_stop(struct ieee80211_hw *hw, bool suspend)
+{
+ struct mm81x *mors = hw->priv;
+
+ mors->started = false;
+}
+
+static void mm81x_mac_beacon_finish(struct mm81x_vif *mors_vif)
+{
+ struct mm81x *mors = mm81x_vif_to_mors(mors_vif);
+
+ mm81x_mac_beacon_irq_enable(mors_vif, false);
+ tasklet_kill(&mors_vif->u.ap.beacon_tasklet);
+ /*
+ * Side effect of the restarting required when
+ * reacting to regdom changes...
+ */
+ atomic_add_unless(&mors->num_bcn_vifs, -1, 0);
+}
+
+static void mm81x_mac_ops_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ int ret;
+ struct mm81x *mors = hw->priv;
+ struct mm81x_vif *mors_vif = (struct mm81x_vif *)vif->drv_priv;
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ mm81x_mac_beacon_finish(mors_vif);
+
+ ret = mm81x_cmd_rm_if(mors, mors_vif->id);
+ if (ret)
+ dev_err(mors->dev, "mm81x_cmd_rm_if failed %d", ret);
+
+ RCU_INIT_POINTER(mors->vifs[mors_vif->id], NULL);
+}
+
+static s32 mm81x_mac_get_max_txpower(struct mm81x *mors)
+{
+ int ret;
+ s32 power_mbm;
+
+ /* Retrieve maximum TX power the chip can transmit */
+ ret = mm81x_cmd_get_max_txpower(mors, &power_mbm);
+ if (ret) {
+ dev_err(mors->dev, "using default tx max power %d mBm",
+ MAX_TX_POWER_MBM);
+ return MAX_TX_POWER_MBM;
+ }
+
+ dev_dbg(mors->dev, "Max tx power detected %d mBm", power_mbm);
+ return power_mbm;
+}
+
+static s32 mm81x_mac_set_txpower(struct mm81x *mors, s32 power_mbm)
+{
+ int ret;
+ s32 out_power_mbm;
+
+ if (mors->tx_max_power_mbm == INT_MAX)
+ mors->tx_max_power_mbm = mm81x_mac_get_max_txpower(mors);
+
+ power_mbm = min(power_mbm, mors->tx_max_power_mbm);
+ if (power_mbm == mors->tx_power_mbm)
+ return mors->tx_power_mbm;
+
+ ret = mm81x_cmd_set_txpower(mors, &out_power_mbm, power_mbm);
+ if (ret) {
+ dev_err(mors->dev, "failed, power %d mBm ret %d", power_mbm,
+ ret);
+ return mors->tx_power_mbm;
+ }
+
+ if (out_power_mbm != mors->tx_power_mbm) {
+ dev_dbg(mors->dev, "%d -> %d mBm", mors->tx_power_mbm,
+ out_power_mbm);
+ mors->tx_power_mbm = out_power_mbm;
+ }
+
+ return mors->tx_power_mbm;
+}
+
+static int mm81x_mac_set_channel(struct mm81x *mors, u32 op_chan_freq_hz,
+ u8 pri_1mhz_chan_idx, u8 op_bw_mhz,
+ u8 pri_bw_mhz)
+{
+ int ret;
+
+ ret = mm81x_cmd_set_channel(mors, op_chan_freq_hz, pri_1mhz_chan_idx,
+ op_bw_mhz, pri_bw_mhz, &mors->tx_power_mbm);
+ if (ret) {
+ dev_err(mors->dev, "mm81x_cmd_set_channel() failed, ret %d",
+ ret);
+ return ret;
+ }
+
+ mm81x_mac_set_txpower(mors, mors->tx_power_mbm);
+ return 0;
+}
+
+static u8 mm81x_mac_pri_chan_to_index(const struct cfg80211_chan_def *chandef)
+{
+ u32 bw_mhz = cfg80211_chandef_get_width(chandef);
+ u32 op_center_khz = ieee80211_chandef_to_khz(chandef);
+ u32 first_1mhz_center_khz = op_center_khz - (bw_mhz * 500) + 500;
+ u32 pri_1mhz_khz = ieee80211_channel_to_khz(chandef->chan);
+
+ return (pri_1mhz_khz - first_1mhz_center_khz) / 1000;
+}
+
+static int mm81x_mac_ops_change_channel(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef)
+{
+ int ret;
+ struct mm81x *mors = hw->priv;
+ u64 freq_hz = KHZ_TO_HZ(ieee80211_chandef_to_khz(chandef));
+ u8 op_bw_mhz = cfg80211_chandef_get_width(chandef);
+ u8 pri_1mhz_idx = mm81x_mac_pri_chan_to_index(chandef);
+ int pri_chan_width_mhz = mm81x_tx_h_get_prim_bw(chandef);
+
+ dev_dbg(mors->dev, "ch: freq=%llu Hz bw=%u pri_idx=%d pri_bw=%d",
+ freq_hz, op_bw_mhz, pri_1mhz_idx, pri_chan_width_mhz);
+
+ ret = mm81x_mac_set_channel(mors, freq_hz, (u8)pri_1mhz_idx, op_bw_mhz,
+ pri_chan_width_mhz);
+ if (ret)
+ return ret;
+
+ memcpy(&mors->chandef, chandef, sizeof(mors->chandef));
+ return 0;
+}
+
+static int mm81x_mac_ops_config(struct ieee80211_hw *hw, int radio_idx,
+ u32 changed)
+{
+ int ret;
+ struct mm81x *mors = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = conf->chandef.chan;
+
+ if (!mors->started)
+ return 0;
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ret = mm81x_mac_ops_change_channel(hw, &conf->chandef);
+ if (ret < 0)
+ return ret;
+ }
+
+ if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
+ !(changed & IEEE80211_CONF_CHANGE_CHANNEL) &&
+ !(conf->flags & IEEE80211_CONF_MONITOR)) {
+ s32 power_mbm = DBM_TO_MBM(conf->power_level);
+
+ power_mbm = min(DBM_TO_MBM(channel->max_reg_power), power_mbm);
+ power_mbm = mm81x_mac_set_txpower(mors, power_mbm);
+ conf->power_level = MBM_TO_DBM(power_mbm);
+ }
+
+ return 0;
+}
+
+static int mm81x_mac_ops_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, int *dbm)
+{
+ struct mm81x *mors = hw->priv;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct cfg80211_chan_def *chandef = &vif->bss_conf.chanreq.oper;
+
+ scoped_guard(rcu) {
+ chanctx_conf = rcu_access_pointer(vif->bss_conf.chanctx_conf);
+ if (!chanctx_conf ||
+ !cfg80211_chandef_identical(chandef, &chanctx_conf->def))
+ return -ENODATA;
+ }
+
+ *dbm = MBM_TO_DBM(mors->tx_power_mbm);
+ return 0;
+}
+
+static void mm81x_mac_config_ps(struct mm81x *mors, struct ieee80211_vif *vif)
+{
+ bool en_ps = vif->cfg.ps;
+
+ if (vif->type == NL80211_IFTYPE_AP || !mors->ps.enable)
+ return;
+
+ if (mors->config_ps == en_ps)
+ return;
+
+ dev_dbg(mors->dev, "change powersave mode: %d (current %d)", en_ps,
+ mors->config_ps);
+
+ mors->config_ps = en_ps;
+
+ if (en_ps) {
+ mm81x_cmd_set_ps(mors, true);
+ mm81x_ps_enable(mors);
+ } else {
+ mm81x_ps_disable(mors);
+ mm81x_cmd_set_ps(mors, false);
+ }
+}
+
+static void mm81x_mac_ops_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ int ret;
+ struct mm81x *mors = hw->priv;
+ struct mm81x_vif *mors_vif = (struct mm81x_vif *)vif->drv_priv;
+
+ if (changed & BSS_CHANGED_PS)
+ mm81x_mac_config_ps(mors, vif);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ /* start command is sent, only if it was previously stopped */
+ if ((mors_vif->u.ap.beaconing_enabled && info->enable_beacon) ||
+ !info->enable_beacon)
+ mm81x_cmd_config_beacon_timer(mors, mors_vif,
+ info->enable_beacon);
+
+ mors_vif->u.ap.beaconing_enabled = true;
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT || changed & BSS_CHANGED_SSID) {
+ ret = mm81x_cmd_cfg_bss(mors, mors_vif->id, info->beacon_int,
+ info->dtim_period,
+ mm81x_vif_generate_cssid(vif));
+ if (ret)
+ dev_err(mors->dev, "mm81x_cmd_cfg_bss failed %d", ret);
+ }
+}
+
+static u64 mm81x_mac_ops_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct mm81x *mors = hw->priv;
+ struct mcast_filter *filter;
+ struct netdev_hw_addr *addr;
+ u16 addr_count = netdev_hw_addr_list_count(mc_list);
+ u16 len = sizeof(*filter) + addr_count * sizeof(filter->addr_list[0]);
+
+ filter = kzalloc(len, GFP_ATOMIC);
+ if (!filter)
+ return 0;
+
+ if (addr_count > MCAST_FILTER_COUNT_MAX) {
+ dev_warn(
+ mors->dev,
+ "Multicast filtering disabled - too many groups (%d) > %u",
+ addr_count, (u16)MCAST_FILTER_COUNT_MAX);
+ filter->count = 0;
+ } else {
+ netdev_hw_addr_list_for_each(addr, mc_list) {
+ dev_dbg(mors->dev, "mcast whitelist (%d): %pM",
+ filter->count, addr->addr);
+ filter->addr_list[filter->count++] =
+ mac2leuint32(addr->addr);
+ }
+ }
+
+ return (u64)(unsigned long)filter;
+}
+
+static void mm81x_mac_ops_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct mm81x *mors = hw->priv;
+ struct mcast_filter *cmd = (void *)(unsigned long)multicast;
+ struct mm81x_vif *mors_vif = NULL;
+ struct ieee80211_vif *vif = NULL;
+ int vif_id = 0;
+ int ret = 0;
+
+ if (!cmd)
+ goto out;
+
+ kfree(mors->mcast_filter);
+ mors->mcast_filter = cmd;
+
+ for (vif_id = 0; vif_id < ARRAY_SIZE(mors->vifs); vif_id++) {
+ vif = mm81x_rcu_dereference_vif_id(mors, vif_id, false);
+ if (!vif)
+ continue;
+
+ mors_vif = ieee80211_vif_to_mors_vif(vif);
+
+ ret = mm81x_cmd_cfg_multicast_filter(mors, mors_vif);
+ if (!ret)
+ continue;
+
+ dev_err(mors->dev, "Multicast filtering failed - rc=%d", ret);
+ mors->mcast_filter = NULL;
+ kfree(cmd);
+ break;
+ }
+
+out:
+ *total_flags &= 0;
+}
+
+static int mm81x_mac_ops_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ int ret;
+ struct mm81x *mors = hw->priv;
+ struct mm81x_queue_params mqp;
+
+ mqp.aci = map_mac80211q_2_mm81x_aci(ac);
+ mqp.aifs = params->aifs;
+ mqp.cw_max = params->cw_max;
+ mqp.cw_min = params->cw_min;
+ mqp.uapsd = params->uapsd;
+ mqp.txop = params->txop << 5;
+
+ dev_dbg(mors->dev, "queue:%d txop:%d cw_min:%d cw_max:%d aifs:%d",
+ mqp.aci, mqp.txop, mqp.cw_min, mqp.cw_max, mqp.aifs);
+
+ ret = mm81x_cmd_cfg_qos(mors, &mqp);
+ if (ret)
+ dev_dbg(mors->dev, "mm81x_cmd_cfg_qos failed %d", ret);
+ return ret;
+}
+
+static int mm81x_mac_ops_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ u16 aid;
+ int ret;
+ struct mm81x *mors = hw->priv;
+ struct mm81x_vif *mors_vif = (struct mm81x_vif *)vif->drv_priv;
+ struct mm81x_sta *mors_sta = (struct mm81x_sta *)sta->drv_priv;
+
+ /* Ignore both NOTEXIST to NONE and NONE to NOTEXIST */
+ if ((old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) ||
+ (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ return 0;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = mm81x_mac_sta_aid(vif);
+ else
+ aid = sta->aid;
+
+ ret = mm81x_cmd_sta_state(mors, mors_vif, aid, sta, new_state);
+ if (ret < 0)
+ goto exit;
+
+ ether_addr_copy(mors_sta->addr, sta->addr);
+ mors_sta->state = new_state;
+
+ if (new_state > old_state && new_state == IEEE80211_STA_ASSOC) {
+ if (vif->type == NL80211_IFTYPE_AP)
+ mors_vif->u.ap.num_stas++;
+ else if (vif->type == NL80211_IFTYPE_STATION)
+ mors_vif->u.sta.is_assoc = true;
+ }
+
+ if (new_state < old_state && new_state == IEEE80211_STA_NONE) {
+ if (vif->type == NL80211_IFTYPE_AP)
+ mors_vif->u.ap.num_stas--;
+ else if (vif->type == NL80211_IFTYPE_STATION)
+ mors_vif->u.sta.is_assoc = false;
+ }
+
+exit:
+ /*
+ * Always update our mmrc sta state even on failure to ensure
+ * we don't hold a dangling sta on error
+ */
+ mm81x_rc_sta_state_check(mors, vif, sta, old_state, new_state);
+ return new_state < old_state ? 0 : ret;
+}
+
+static int mm81x_mac_ops_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ u16 tid = params->tid;
+ struct mm81x *mors = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ struct mm81x_sta *mors_sta = (struct mm81x_sta *)sta->drv_priv;
+ u16 buf_size =
+ min_t(u16, params->buf_size, DOT11AH_BA_MAX_MPDU_PER_AMPDU);
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_TX_START:
+ dev_dbg(mors->dev, "%pM.%d A-MPDU TX start", mors_sta->addr,
+ tid);
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ dev_dbg(mors->dev, "%pM.%d A-MPDU TX flush", mors_sta->addr,
+ tid);
+ mors_sta->tid_start_tx[tid] = false;
+ mors_sta->tid_tx[tid] = false;
+ mors_sta->tid_params[tid] = 0;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ dev_dbg(mors->dev, "%pM.%d A-MPDU TX oper", mors_sta->addr,
+ tid);
+ mors_sta->tid_tx[tid] = true;
+ if (!buf_size) {
+ dev_err(mors->dev, "%pM.%d A-MPDU Invalid buf size",
+ mors_sta->addr, tid);
+ break;
+ }
+ mors_sta->tid_params[tid] =
+ u8_encode_bits(buf_size - 1,
+ TX_INFO_TID_PARAMS_MAX_REORDER_BUF) |
+ u8_encode_bits(1, TX_INFO_TID_PARAMS_AMPDU_ENABLED) |
+ u8_encode_bits(params->amsdu,
+ TX_INFO_TID_PARAMS_AMSDU_SUPPORTED);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mm81x_mac_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ u16 aid;
+ int ret = -EOPNOTSUPP;
+ struct mm81x *mors = hw->priv;
+ struct mm81x_vif *mors_vif = (struct mm81x_vif *)vif->drv_priv;
+ enum host_cmd_key_cipher cipher;
+ enum host_cmd_aes_key_len length;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ aid = mm81x_mac_sta_aid(vif);
+ } else if (sta) {
+ aid = sta->aid;
+ } else {
+ /* Is a group key - AID is unused */
+ WARN_ON(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ aid = 0;
+ }
+
+ switch (cmd) {
+ case SET_KEY: {
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ cipher = HOST_CMD_KEY_CIPHER_AES_CCM;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ cipher = HOST_CMD_KEY_CIPHER_AES_GCM;
+ break;
+ default:
+ /* Cipher suite currently not supported */
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ switch (key->keylen) {
+ case 16:
+ length = HOST_CMD_AES_KEY_LEN_LENGTH_128;
+ break;
+ case 32:
+ length = HOST_CMD_AES_KEY_LEN_LENGTH_256;
+ break;
+ default:
+ /* Key length not supported */
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ ret = mm81x_cmd_install_key(mors, mors_vif, aid, key, cipher,
+ length);
+ break;
+ }
+ case DISABLE_KEY:
+ ret = mm81x_cmd_disable_key(mors, mors_vif, aid, key);
+ if (ret) {
+ /* Must return 0 */
+ dev_warn(mors->dev, "Failed to remove key");
+ ret = 0;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ if (ret) {
+ dev_dbg(mors->dev, "Falling back to software crypto");
+ ret = 1;
+ }
+
+exit:
+ return ret;
+}
+
+static int mm81x_mac_set_frag_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
+{
+ struct mm81x *mors = hw->priv;
+
+ return mm81x_cmd_set_frag_threshold(mors, value);
+}
+
+static void mm81x_rx_h_fill_status(struct mm81x *mors,
+ struct mm81x_skb_rx_status *hdr_rx_status,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ u8 mcs_index;
+ u32 flags = le32_to_cpu(hdr_rx_status->flags);
+ u16 freq_100khz = le16_to_cpu(hdr_rx_status->freq_100khz);
+ __le32 ratecode = hdr_rx_status->mm81x_ratecode;
+
+ rx_status->signal = le16_to_cpu(hdr_rx_status->rssi);
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->band = NL80211_BAND_S1GHZ;
+ rx_status->freq = KHZ100_TO_MHZ(freq_100khz);
+ rx_status->freq_offset = (freq_100khz % 10) ? 1 : 0;
+ rx_status->nss = NSS_IDX_TO_NSS(mm81x_ratecode_nss_index_get(ratecode));
+
+ if (flags & MM81X_RX_STATUS_FLAGS_DECRYPTED)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+
+ mcs_index = mm81x_ratecode_mcs_index_get(ratecode);
+ rx_status->rate_idx = (mcs_index == 10) ? 0 : mcs_index;
+
+ if (mm81x_ratecode_sgi_get(ratecode))
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+}
+
+static void mm81x_rx_h_update_sta(struct ieee80211_vif *vif,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_sta *sta;
+ struct mm81x_sta *msta;
+ u8 *lookup = ieee80211_is_s1g_beacon(hdr->frame_control) ? hdr->addr1 :
+ hdr->addr2;
+
+ lockdep_assert_in_rcu_read_lock();
+
+ sta = ieee80211_find_sta(vif, lookup);
+ if (!sta)
+ return;
+
+ msta = (void *)sta->drv_priv;
+ if (msta->avg_rssi) {
+ msta->avg_rssi =
+ CALC_AVG_RSSI(msta->avg_rssi, rx_status->signal);
+ } else {
+ msta->avg_rssi = rx_status->signal;
+ }
+}
+
+static struct ieee80211_vif *
+mm81x_rx_h_skb_get_vif(struct mm81x *mors, struct sk_buff *skb,
+ struct mm81x_skb_rx_status *hdr_rx_status)
+{
+ u8 vif_id = u32_get_bits(le32_to_cpu(hdr_rx_status->flags),
+ MM81X_RX_STATUS_FLAGS_VIF_ID);
+
+ lockdep_assert_in_rcu_read_lock();
+
+ if (vif_id == INVALID_VIF_INDEX)
+ return NULL;
+
+ return mm81x_rcu_dereference_vif_id(mors, vif_id, true);
+}
+
+void mm81x_mac_rx_skb(struct mm81x *mors, struct sk_buff *skb,
+ struct mm81x_skb_rx_status *hdr_rx_status)
+{
+ struct ieee80211_vif *vif;
+ struct ieee80211_hw *hw = mors->hw;
+ struct ieee80211_rx_status rx_status;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ memset(&rx_status, 0, sizeof(rx_status));
+
+ if (!mors->started || !skb->data || !skb->len) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ mm81x_rx_h_fill_status(mors, hdr_rx_status, &rx_status, skb);
+
+ scoped_guard(rcu) {
+ vif = mm81x_rx_h_skb_get_vif(mors, skb, hdr_rx_status);
+ if (!vif)
+ goto rx;
+
+ mm81x_rx_h_update_sta(vif, hdr, &rx_status);
+ }
+
+rx:
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+ ieee80211_rx_ni(hw, skb);
+}
+
+static void mm81x_mac_ops_flush(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u32 queues,
+ bool drop)
+{
+ struct mm81x *mors = hw->priv;
+
+ /* We don't support IEEE80211_HW_QUEUE_CONTROL so flush all queues */
+ if (drop) {
+ /*
+ * No need to call mm81x_skbq_stop_tx_queues as mac80211
+ * has already cancelled each queue prior to calling .flush()
+ */
+ mm81x_skbq_data_traffic_pause(mors);
+
+ flush_work(&mors->hif_work);
+ flush_work(&mors->tx_stale_work);
+
+ mm81x_hif_clear_events(mors);
+ mm81x_hif_flush_tx_data(mors);
+ mm81x_hif_flush_cmds(mors);
+
+ /* Reenable data, not that there will be any */
+ mm81x_skbq_data_traffic_resume(mors);
+ }
+}
+
+static int mm81x_mac_ops_set_rts_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
+{
+ struct mm81x *mors = hw->priv;
+
+ mors->rts_threshold = value;
+ return 0;
+}
+
+static void mm81x_mac_ops_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
+{
+ struct mm81x *mors = hw->priv;
+ struct ieee80211_sta *sta = link_sta->sta;
+ enum ieee80211_sta_state old_state;
+ enum ieee80211_sta_state new_state;
+
+ dev_dbg(mors->dev,
+ "Rate control config updated (changed %u, peer address %pM)",
+ changed, sta->addr);
+
+ if (!(changed & IEEE80211_RC_BW_CHANGED))
+ return;
+
+ /*
+ * Simulate the disconnection and connection to reinitialize the sta
+ * in mmrc with new BW
+ */
+ old_state = IEEE80211_STA_ASSOC;
+ new_state = IEEE80211_STA_NOTEXIST;
+
+ dev_dbg(mors->dev,
+ "Remove sta, old_state=%d, new_state=%d, changed=0x%x, bw_changed=%d",
+ old_state, new_state, changed,
+ (changed & IEEE80211_RC_BW_CHANGED));
+
+ mm81x_rc_sta_state_check(mors, vif, sta, old_state, new_state);
+
+ old_state = IEEE80211_STA_NOTEXIST;
+ new_state = IEEE80211_STA_ASSOC;
+
+ dev_dbg(mors->dev, "Add sta, old_state=%d, new_state=%d", old_state,
+ new_state);
+
+ mm81x_rc_sta_state_check(mors, vif, sta, old_state, new_state);
+}
+
+static void mm81x_mac_ops_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct mm81x_sta *msta = (struct mm81x_sta *)sta->drv_priv;
+ struct mm81x *mors = hw->priv;
+ const struct mmrc_table *tb = msta->rc.tb;
+ struct mmrc_rate rate;
+
+ if (!tb || tb->best_tp.rate == MMRC_MCS_UNUSED) {
+ sinfo->filled &= ~BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ return;
+ }
+
+ rate = tb->best_tp;
+ sinfo->txrate.mcs = rate.rate;
+ sinfo->txrate.nss = NSS_IDX_TO_NSS(rate.ss);
+ sinfo->txrate.flags = RATE_INFO_FLAGS_S1G_MCS;
+ switch (rate.bw) {
+ case MMRC_BW_1MHZ:
+ sinfo->txrate.bw = RATE_INFO_BW_1;
+ break;
+ case MMRC_BW_2MHZ:
+ sinfo->txrate.bw = RATE_INFO_BW_2;
+ break;
+ case MMRC_BW_4MHZ:
+ sinfo->txrate.bw = RATE_INFO_BW_4;
+ break;
+ case MMRC_BW_8MHZ:
+ sinfo->txrate.bw = RATE_INFO_BW_8;
+ break;
+ default:
+ break;
+ }
+
+ if (rate.guard == MMRC_GUARD_SHORT)
+ sinfo->txrate.flags |= (RATE_INFO_FLAGS_SHORT_GI);
+
+ dev_dbg(mors->dev, "mcs: %d, bw: %d, flag: 0x%x", rate.rate, rate.bw,
+ sinfo->txrate.flags);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
+static u32 mm81x_get_expected_throughput(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct mm81x_sta *msta = (struct mm81x_sta *)sta->drv_priv;
+ struct mm81x *mors = hw->priv;
+ const struct mmrc_table *tb = msta->rc.tb;
+ struct mmrc_rate rate;
+ u32 tput;
+
+ if (!tb || tb->best_tp.rate == MMRC_MCS_UNUSED)
+ return 0;
+
+ rate = tb->best_tp;
+ tput = BPS_TO_KBPS(mmrc_calculate_theoretical_throughput(rate));
+ dev_dbg(mors->dev, "Throughput: MCS: %d, BW: %d, GI: %d -> %u",
+ rate.rate, 1 << rate.bw, rate.guard, tput);
+
+ return tput;
+}
+
+static void mm81x_mac_restart_cleanup_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ if (vif->type == NL80211_IFTYPE_AP)
+ mm81x_mac_beacon_finish((struct mm81x_vif *)vif->drv_priv);
+}
+
+static void mm81x_mac_restart_cleanup(struct mm81x *mors)
+{
+ ieee80211_iterate_active_interfaces(mors->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ mm81x_mac_restart_cleanup_iter,
+ NULL);
+ mm81x_mac_hw_scan_finish(mors);
+}
+
+static int mm81x_mac_restart(struct mm81x *mors)
+{
+ int ret;
+ u32 chip_id;
+
+ mors->started = false;
+ mm81x_ps_disable(mors);
+ mm81x_bus_set_irq(mors, false);
+ mm81x_hw_irq_clear(mors);
+ ieee80211_stop_queues(mors->hw);
+
+ set_bit(MM81X_STATE_DATA_TX_STOPPED, &mors->state_flags);
+ set_bit(MM81X_STATE_DATA_QS_STOPPED, &mors->state_flags);
+
+ /* Allow time for in-transit tx/rx packets to settle */
+ mdelay(MM81X_HW_RESTART_DELAY_MS);
+ flush_work(&mors->hif_work);
+ flush_work(&mors->tx_stale_work);
+ mm81x_hif_clear_events(mors);
+ mm81x_hif_flush_tx_data(mors);
+ mm81x_hif_flush_cmds(mors);
+
+ mm81x_claim_bus(mors);
+ ret = mm81x_reg32_read(mors, MM81X_REG_CHIP_ID(mors), &chip_id);
+ mm81x_release_bus(mors);
+
+ if (ret < 0) {
+ dev_err(mors->dev, "Failed to access HW: %d", ret);
+ goto exit;
+ }
+
+ mm81x_mac_restart_cleanup(mors);
+
+ ret = mm81x_fw_init(mors, true);
+ if (ret < 0) {
+ dev_err(mors->dev, "Failed to init firmware: %d", ret);
+ goto exit;
+ }
+
+ mm81x_hw_irq_enable(mors, MM81X_INT_HW_STOP_NOTIFICATION_NUM, true);
+
+ ret = mm81x_fw_parse_ext_host_tbl(mors);
+ if (ret) {
+ dev_err(mors->dev, "failed to parse extended host table: %d",
+ ret);
+ goto exit;
+ }
+
+ mm81x_mac_caps_init(mors);
+
+ mm81x_bus_set_irq(mors, true);
+ clear_bit(MM81X_STATE_DATA_TX_STOPPED, &mors->state_flags);
+ clear_bit(MM81X_STATE_DATA_QS_STOPPED, &mors->state_flags);
+ clear_bit(MM81X_STATE_CHIP_UNRESPONSIVE, &mors->state_flags);
+ clear_bit(MM81X_STATE_RELOAD_FW_AFTER_START, &mors->state_flags);
+ mm81x_mac_check_fw_disabled_chans(mors->hw);
+ ieee80211_restart_hw(mors->hw);
+
+exit:
+ mm81x_ps_enable(mors);
+ return ret;
+}
+
+static int mm81x_mac_ops_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ int ret = 0;
+ struct mm81x *mors = hw->priv;
+ struct mm81x_vif *mors_vif = (struct mm81x_vif *)vif->drv_priv;
+
+ if (test_bit(MM81X_STATE_RELOAD_FW_AFTER_START, &mors->state_flags)) {
+ dev_info(mors->dev, "Restarting chip with regdom: %s",
+ mors->country);
+
+ ret = mm81x_mac_restart(mors);
+ if (ret) {
+ dev_err(mors->dev, "Failed to restart chip");
+ return ret;
+ }
+
+ /*
+ * mac_restart will trigger ieee80211_hw_restart and
+ * add_interface will re-enter. just exit here instead.
+ */
+ return 0;
+ }
+
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ mors_vif->u.ap.beaconing_enabled = false;
+ mors_vif->mors = mors;
+
+ ret = mm81x_cmd_add_if(mors, &mors_vif->id, vif->addr, vif->type);
+ if (ret) {
+ dev_err(mors->dev, "mm81x_cmd_add_if failed %d", ret);
+ return ret;
+ }
+
+ if (mors_vif->id >= ARRAY_SIZE(mors->vifs)) {
+ dev_err(mors->dev, "vif_id is too large %u", mors_vif->id);
+ ret = -EOPNOTSUPP;
+ return ret;
+ }
+
+ if (mors_vif->id != (mors_vif->id & MM81X_TX_CONF_FLAGS_VIF_ID_MASK)) {
+ dev_err(mors->dev, "invalid vif_id %u", mors_vif->id);
+ ret = -EOPNOTSUPP;
+ return ret;
+ }
+
+ rcu_assign_pointer(mors->vifs[mors_vif->id], vif);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ mm81x_mac_beacon_init(mors_vif);
+
+ ret = mm81x_cmd_get_capabilities(mors, mors_vif->id, &mors->fw_caps);
+ if (ret) {
+ dev_err(mors->dev,
+ "mm81x_cmd_get_capabilities failed for vif %d",
+ mors_vif->id);
+ return ret;
+ }
+
+ ieee80211_wake_queues(mors->hw);
+ return ret;
+}
+
+static const struct ieee80211_ops mm81x_ops = {
+ .start = mm81x_mac_ops_start,
+ .stop = mm81x_mac_ops_stop,
+ .config = mm81x_mac_ops_config,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
+ .tx = mm81x_mac_ops_tx,
+ .add_interface = mm81x_mac_ops_add_interface,
+ .remove_interface = mm81x_mac_ops_remove_interface,
+ .configure_filter = mm81x_mac_ops_configure_filter,
+ .sta_state = mm81x_mac_ops_sta_state,
+ .flush = mm81x_mac_ops_flush,
+ .set_frag_threshold = mm81x_mac_set_frag_threshold,
+ .set_rts_threshold = mm81x_mac_ops_set_rts_threshold,
+ .link_sta_rc_update = mm81x_mac_ops_sta_rc_update,
+ .sta_statistics = mm81x_mac_ops_sta_statistics,
+ .get_expected_throughput = mm81x_get_expected_throughput,
+ .hw_scan = mm81x_mac_ops_hw_scan,
+ .cancel_hw_scan = mm81x_mac_ops_cancel_hw_scan,
+ .get_txpower = mm81x_mac_ops_get_txpower,
+ .bss_info_changed = mm81x_mac_ops_bss_info_changed,
+ .prepare_multicast = mm81x_mac_ops_prepare_multicast,
+ .conf_tx = mm81x_mac_ops_conf_tx,
+ .ampdu_action = mm81x_mac_ops_ampdu_action,
+ .set_key = mm81x_mac_ops_set_key,
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
+};
+
+static void mm81x_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ int ret;
+ struct mm81x *mors = wiphy_to_ieee80211_hw(wiphy)->priv;
+
+ if (mm81x_reg_h_cc_equal(request->alpha2, "00") ||
+ mm81x_reg_h_cc_equal(request->alpha2, mors->country))
+ return;
+
+ memcpy(mors->country, request->alpha2, sizeof(mors->country));
+
+ ret = mm81x_mac_restart(mors);
+ if (ret)
+ dev_err(mors->dev, "Failed to restart chip: %d", ret);
+}
+
+static void mm81x_mac_config_hw(struct mm81x *mors)
+{
+ int i;
+ struct ieee80211_hw *hw = mors->hw;
+ struct wiphy *wiphy;
+
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
+ hw->wiphy->bands[i] = NULL;
+
+ hw->wiphy->bands[NL80211_BAND_S1GHZ] = &mors_band_s1ghz;
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION);
+ hw->wiphy->reg_notifier = mm81x_reg_notifier;
+ hw->queues = MM81X_HW_QUEUE_COUNT;
+ hw->max_rates = MM81X_HW_MAX_RATES;
+ hw->max_report_rates = MM81X_HW_MAX_REPORT_RATES;
+ hw->max_rate_tries = MM81X_HW_MAX_RATE_TRIES;
+ hw->tx_sk_pacing_shift = MM81X_HW_TX_SK_PACING_SHIFT;
+ hw->vif_data_size = sizeof(struct mm81x_vif);
+ hw->sta_data_size = sizeof(struct mm81x_sta);
+ hw->extra_tx_headroom =
+ sizeof(struct mm81x_skb_hdr) + mm81x_bus_get_alignment(mors);
+
+ mors->wiphy = hw->wiphy;
+
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+ ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
+ ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(hw, SUPPORTS_NDP_BLOCKACK);
+
+ SET_IEEE80211_PERM_ADDR(hw, mors->macaddr);
+
+ wiphy = mors->wiphy;
+
+ wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ if (!mors->ps.enable)
+ wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_TX_POWER_INSERTION;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+
+ wiphy->iface_combinations = mors_if_combs;
+ wiphy->n_iface_combinations = ARRAY_SIZE(mors_if_combs);
+ wiphy->max_scan_ie_len = MM81X_MAX_SCAN_IE_LEN;
+ wiphy->max_scan_ssids = MM81X_MAX_SCAN_SSIDS;
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+ wiphy->max_remain_on_channel_duration =
+ MM81X_MAX_REMAIN_ON_CHAN_DURATION;
+}
+
+static void mm81x_stale_tx_status_timer(struct timer_list *t)
+{
+ struct mm81x *mors = timer_container_of(mors, t, stale_status.timer);
+
+ spin_lock_bh(&mors->stale_status.lock);
+ if (mm81x_hif_get_tx_status_pending_count(mors))
+ queue_work(mors->net_wq, &mors->tx_stale_work);
+ spin_unlock_bh(&mors->stale_status.lock);
+}
+
+static void mm81x_stale_tx_status_timer_finish(struct mm81x *mors)
+{
+ timer_delete_sync_try(&mors->stale_status.timer);
+}
+
+static void mm81x_mac_stale_tx_status_timer_init(struct mm81x *mors)
+{
+ spin_lock_init(&mors->stale_status.lock);
+ timer_setup(&mors->stale_status.timer, mm81x_stale_tx_status_timer, 0);
+}
+
+int mm81x_mac_register(struct mm81x *mors)
+{
+ int ret;
+ struct ieee80211_hw *hw = mors->hw;
+
+ mors->tx_power_mbm = INT_MAX;
+ mors->tx_max_power_mbm = INT_MAX;
+ mors->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+
+ ret = mm81x_ps_init(mors);
+ if (ret < 0)
+ return ret;
+
+ mm81x_mac_config_hw(mors);
+ mm81x_mac_hw_scan_init(mors);
+ mm81x_mac_stale_tx_status_timer_init(mors);
+
+ ret = ieee80211_register_hw(hw);
+ if (ret) {
+ dev_err(mors->dev, "ieee80211_register_hw failed %d", ret);
+ mm81x_mac_unregister(mors);
+ return ret;
+ }
+
+ mm81x_rc_init(mors);
+
+ /*
+ * At this stage, we know bus and pager system interrupts are enabled.
+ * Trigger the receive workqueue to drain any incoming chip-to-host
+ * pending packets been pushed in the period between the firmware
+ * initialization and interrupts being enabled.
+ */
+ set_bit(MM81X_HIF_EVT_RX_PEND, &mors->hif.event_flags);
+ queue_work(mors->chip_wq, &mors->hif_work);
+
+ return ret;
+}
+
+void mm81x_mac_unregister(struct mm81x *mors)
+{
+ mm81x_ps_disable(mors);
+ mm81x_rc_deinit(mors);
+ mm81x_mac_hw_scan_destroy(mors);
+
+ ieee80211_stop_queues(mors->hw);
+ ieee80211_unregister_hw(mors->hw);
+
+ mm81x_hif_flush_tx_data(mors);
+ mm81x_hif_flush_cmds(mors);
+ mm81x_stale_tx_status_timer_finish(mors);
+ mm81x_ps_finish(mors);
+}
+
+struct mm81x *mm81x_mac_alloc(size_t priv_size, struct device *dev)
+{
+ struct ieee80211_hw *hw;
+ struct mm81x *mors;
+
+ hw = ieee80211_alloc_hw(sizeof(*mors) + priv_size, &mm81x_ops);
+ if (!hw) {
+ dev_err(dev, "ieee80211_alloc_hw failed\r\n");
+ return NULL;
+ }
+
+ SET_IEEE80211_DEV(hw, dev);
+ memset(hw->priv, 0, sizeof(*mors));
+
+ mors = hw->priv;
+ mors->hw = hw;
+ mors->dev = dev;
+ mutex_init(&mors->cmd_lock);
+ mutex_init(&mors->cmd_wait);
+
+ return mors;
+}
+
+void mm81x_mac_free(struct mm81x *mors)
+{
+ ieee80211_free_hw(mors->hw);
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 13/31] wifi: mm81x: add mac.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (11 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 12/31] wifi: mm81x: add mac.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 14/31] wifi: mm81x: add mmrc.c Lachlan Hodges
` (18 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/mac.h | 69 +++++++++++++++++++++
1 file changed, 69 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/mac.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/mac.h b/drivers/net/wireless/morsemicro/mm81x/mac.h
new file mode 100644
index 000000000000..7d0fc2e5229b
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/mac.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_MAC_H_
+#define _MM81X_MAC_H_
+
+#include "core.h"
+#include "command.h"
+
+struct mm81x_queue_params {
+ u8 uapsd;
+ u8 aci;
+ u8 aifs;
+ u16 cw_min;
+ u16 cw_max;
+ u32 txop;
+};
+
+static inline bool mm81x_mac_is_sta_vif_associated(struct ieee80211_vif *vif)
+{
+ return vif->cfg.assoc;
+}
+
+static inline u32 mm81x_vif_generate_cssid(struct ieee80211_vif *vif)
+{
+ return mm81x_generate_cssid(vif->cfg.ssid, vif->cfg.ssid_len);
+}
+
+static inline u16 mm81x_mac_sta_aid(struct ieee80211_vif *vif)
+{
+ return vif->cfg.aid;
+}
+
+static inline __le32 mac2leuint32(const unsigned char *addr)
+{
+ return cpu_to_le32(((u32)(addr[2]) << 24) | ((u32)(addr[3]) << 16) |
+ ((u32)(addr[4]) << 8) | ((u32)(addr[5])));
+}
+
+static inline struct ieee80211_vif *
+mm81x_rcu_dereference_vif_id(struct mm81x *mors, u8 vif_id, bool rcu)
+{
+ if (WARN_ON(vif_id >= ARRAY_SIZE(mors->vifs)))
+ return NULL;
+
+ if (rcu)
+ return rcu_dereference(mors->vifs[vif_id]);
+
+ return rcu_dereference_protected(mors->vifs[vif_id],
+ lockdep_is_held(&mors->hw->wiphy->mtx));
+}
+
+int mm81x_tx_h_get_attempts(struct mm81x *mors,
+ struct mm81x_skb_tx_status *tx_sts);
+struct mm81x *mm81x_mac_alloc(size_t priv_size, struct device *dev);
+int mm81x_mac_register(struct mm81x *mors);
+void mm81x_mac_free(struct mm81x *mors);
+void mm81x_mac_unregister(struct mm81x *mors);
+int mm81x_mac_event_recv(struct mm81x *mors, struct sk_buff *skb);
+void mm81x_mac_rx_skb(struct mm81x *mors, struct sk_buff *skb,
+ struct mm81x_skb_rx_status *hdr_rx_status);
+void mm81x_mac_beacon_irq_handle(struct mm81x *mors, u32 status);
+
+u8 *mm81x_hw_scan_h_insert_tlvs(struct mm81x_hw_scan_params *params, u8 *buf);
+size_t mm81x_hw_scan_h_get_cmd_size(struct mm81x_hw_scan_params *params);
+void mm81x_tx_h_check_aggr(struct ieee80211_sta *pubsta, struct sk_buff *skb);
+#endif /* !_MM81X_MAC_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 14/31] wifi: mm81x: add mmrc.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (12 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 13/31] wifi: mm81x: add mac.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 15/31] wifi: mm81x: add mmrc.h Lachlan Hodges
` (17 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/mmrc.c | 1354 ++++++++++++++++++
1 file changed, 1354 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/mmrc.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/mmrc.c b/drivers/net/wireless/morsemicro/mm81x/mmrc.c
new file mode 100644
index 000000000000..fe7e4f501d6c
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/mmrc.c
@@ -0,0 +1,1354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include "mmrc.h"
+
+/*
+ * The default packet size in bits used for calculated throughput of a given
+ * rate
+ */
+#define DEFAULT_PACKET_SIZE_BITS 9600
+
+/*
+ * The default packet size in bytes used for calculating retries for a given
+ * rate
+ */
+#define DEFAULT_PACKET_SIZE_BYTES 1200
+
+/* The sample frequencies at different stages */
+#define LOOKAROUND_RATE_INIT 5
+#define LOOKAROUND_RATE_NORMAL 50
+#define LOOKAROUND_RATE_STABLE 100
+
+/* The thresholds for stability stages */
+#define STABILITY_CNT_THRESHOLD_INIT 20
+#define STABILITY_CNT_THRESHOLD_NORMAL 50
+#define STABILITY_CNT_THRESHOLD_STABLE 100
+
+/* The backoff step size for the counter */
+#define STABILITY_BACKOFF_STEP 2
+
+/*
+ * The packet success threshold for attempting slower lookaround rates
+ */
+/*
+ * Force a look around if there haven't been any for this number of cycles
+ */
+#define LOOKAROUND_MAX_RC_CYCLES 5
+
+/*
+ * Number of attempts for each lookaround rate within at most two RC cycles
+ * if there are enough packets
+ */
+#define LOOKAROUND_RATE_ATTEMPTS 4
+
+/*
+ * Limit the number of times we try to pick a theoretically better rate to
+ * sample. Necessary so we don't stall the CPU, due to constantly picking worse
+ * rates.
+ */
+#define LOOKAROUND_FAIL_MAX 200
+
+/*
+ * Initial and reset probability per rate in the table
+ * Changing this value will have a severe implication on the current heuristic
+ * It could mean that some rates will have better probability throughput even
+ * with no edivence and so will cause unexpected changes in the rate table
+ */
+#define RATE_INIT_PROBABILITY 0
+
+/*
+ * The lowest number of MPDUs within acknowledged AMPDUs that can be used for
+ * rate stats
+ */
+#define AMPDU_STATS_MIN 2
+
+/*
+ * The lowest number of stats to be used for processing in NORMAL lookaround
+ * mode
+ */
+#define STATS_MIN_NORMAL 2
+
+/*
+ * The lowest number of stats to be used for processing in INIT lookaround
+ * mode
+ */
+#define STATS_MIN_INIT 1
+
+/* The lowest probability value considered for recognising a dip */
+#define PROBABILITY_DIP_MIN 20
+
+/* The lowest probability value for recovering from a dip */
+#define PROBABILITY_DIP_RECOVERY_MIN 40
+
+/*
+ * The time cap on rate allocation for multiple attempts. If a single attempt
+ * exceeds this window, no additional attempts will be generated
+ */
+#define MAX_WINDOW_ATTEMPT_TIME 4000
+
+/* The time window for all rates in rate table */
+#define RATE_WINDOW_MICROSECONDS 24000
+
+/*
+ * EWMA is the alpha coefficient in the exponential weighting moving average
+ * filter used for probability updates.
+ *
+ * Y[n] = X[n] * (100 - EWMA) + (Y[n-1] * EWMA)
+ * -------------------------------------
+ * 100
+ *
+ */
+#define EWMA 75
+
+/*
+ * Evidence scaling to allow for one decimal place. Needed for low
+ * throughput, otherwise the history decays in a single cycle.
+ */
+#define EVIDENCE_SCALE 5
+
+/*
+ * Evidence maximum to ensure history doesn't decay too slowly when
+ * there is a lot of historical data.
+ */
+#define EVIDENCE_MAX 100
+
+/*
+ * This fixed point conversion multiplies a value by one and shifts it
+ * accordingly to account for the fixed point shifting at the return of a
+ * function
+ */
+#define FP_8_MULT_1 256
+
+/* Fixed point conversion for 2.1 * 2^8 used for 4MHz symbol multiplication */
+#define FP_8_4MHZ 537
+
+/* Fixed point conversion for 4.5 * 2^8 used for 8MHz symbol multiplication */
+#define FP_8_8MHZ 1152
+
+/* Fixed point conversion for 9.0 * 2^8 used for 16MHz symbol multiplication */
+#define FP_8_16MHZ 2301
+
+/*
+ * Fixed point conversion for 3.6 * 2^8 used for long guard symbol tx time
+ * multiplication
+ */
+#define FP_8_LONG_GUARD_SYMBOL_TIME 1024
+
+/*
+ * Fixed point conversion for 4.0 * 2^8 used for short guard symbol tx time
+ * multiplication
+ */
+#define FP_8_SHORT_GUARD_SYMBOL_TIME 921
+
+/*
+ * Shift value to shift back our FP conversions
+ */
+#define FP_8_SHIFT 8
+
+/*
+ * Limit to count of consecutive variations in one direction
+ */
+#define MAX_VARIATION_DIRECTION 5
+
+/*
+ * Threshold for considering consecutive variation direction as variation
+ * or not
+ */
+#define VARIATION_DIRECTION_THRESHOLD 3
+
+/* EWMA percentage value for averaging the best rate probability variation */
+#define VARIATION_EWMA 95
+
+/* Percentage variation regarded as minor */
+#define MINOR_VARIATION_THRESHOLD 1
+
+/* Percentage variation regarded as moderate */
+#define MODERATE_VARIATION_THRESHOLD 3
+
+/* Percentage variation regarded as significant */
+#define SIGNIFICANT_VARIATION_THRESHOLD 5
+
+/* If the best rate changes twice in this number of cycles, it is unstable */
+#define BEST_RATE_UNSTABLE_THRESHOLD 4
+
+/*
+ * Once the best rate is unchanged for this number of cycles it has
+ * converged
+ */
+#define BEST_RATE_CONVERGED_THRESHOLD 10
+
+/* RSSI threshold for short range */
+#define MMRC_SHORT_RANGE_RSSI_LIMIT -70
+
+/* RSSI threshold for mid range */
+#define MMRC_MID_RANGE_RSSI_LIMIT -85
+
+#define MMRC_MAX_BW(bw_caps) \
+ (((bw_caps) & BIT(MMRC_BW_16MHZ)) ? MMRC_BW_16MHZ : \
+ ((bw_caps) & BIT(MMRC_BW_8MHZ)) ? MMRC_BW_8MHZ : \
+ ((bw_caps) & BIT(MMRC_BW_4MHZ)) ? MMRC_BW_4MHZ : \
+ ((bw_caps) & BIT(MMRC_BW_2MHZ)) ? MMRC_BW_2MHZ : \
+ MMRC_BW_1MHZ)
+
+/*
+ * This table stores the number of bits per symbols used for MCS0-MCS9 based
+ * on 20MHz and 1SS
+ */
+static const u32 sym_table[10] = { 24, 36, 48, 72, 96, 144, 192, 216, 256, 288 };
+
+/*
+ * Calculate which bit is the nth bit set in an integer based flag.
+ */
+static u8 nth_bit(u16 in, u16 index)
+{
+ u32 i;
+ u8 count = 0;
+
+ for (i = 0; count != index + 1; i++) {
+ if (((1u << i) & in) != 0)
+ count++;
+ }
+
+ return i - 1;
+}
+
+/*
+ * Calculate the input bit's index among all the set bits in an integer
+ * based flag.
+ */
+static u16 bit_index(u16 in, u32 bit_pos)
+{
+ u16 i;
+ u16 index = 0;
+
+ for (i = 0; i != bit_pos + 1; i++) {
+ if (((1u << i) & in) != 0)
+ index++;
+ }
+
+ if (index == 0) {
+ /* Could not match bit pos to caps */
+ return 0;
+ }
+
+ return index - 1;
+}
+
+static u16 rows_from_sta_caps(struct mmrc_sta_capabilities *caps)
+{
+ u16 rows = 0;
+ u8 n_rates = hweight_long(caps->rates);
+
+ /* Taking MCS10 into account as it is relevant for 1 MHz entries */
+ if (caps->rates & BIT(MMRC_MCS10)) {
+ n_rates -= 1;
+ rows = 2;
+ }
+
+ rows += (hweight_long(caps->bandwidth) * n_rates *
+ hweight_long(caps->guard) *
+ hweight_long(caps->spatial_streams));
+
+ return rows;
+}
+
+static void rate_update_index(struct mmrc_table *tb, struct mmrc_rate *rate)
+{
+ u16 index = 0;
+ /* Information about our rates */
+ u16 bw = hweight_long(tb->caps.bandwidth);
+ u16 streams = hweight_long(tb->caps.spatial_streams);
+ u16 guard = hweight_long(tb->caps.guard);
+ u16 rows = rows_from_sta_caps(&tb->caps);
+
+ index = bit_index(tb->caps.guard, rate->guard) +
+ bit_index(tb->caps.bandwidth, rate->bw) * guard +
+ bit_index(tb->caps.spatial_streams, rate->ss) * guard * bw +
+ bit_index(tb->caps.rates, rate->rate) * bw * streams * guard;
+
+ if (index >= rows)
+ index = 0;
+
+ rate->index = index;
+}
+
+static struct mmrc_rate get_rate_row(struct mmrc_table *tb, u16 index)
+{
+ struct mmrc_rate rate;
+ u16 ss_index;
+
+ /* Information about our rates */
+ u16 mcs = hweight_long(tb->caps.rates);
+ u16 bw = hweight_long(tb->caps.bandwidth);
+ u16 streams = hweight_long(tb->caps.spatial_streams);
+ u16 guard = hweight_long(tb->caps.guard);
+ u16 total_caps = mcs * bw * streams * guard;
+
+ /* Find our MCS */
+ u16 rows = total_caps / mcs;
+ u16 mcs_index = index / rows;
+ u16 mcs_modulo = index % rows;
+
+ mcs = nth_bit(tb->caps.rates, mcs_index);
+
+ /* Find our spatial stream */
+ rows = rows / streams;
+ streams = nth_bit(tb->caps.spatial_streams, mcs_modulo / rows);
+
+ /* Find our bandwidth */
+ ss_index = index % rows;
+ rows = rows / bw;
+ bw = nth_bit(tb->caps.bandwidth, ss_index / rows);
+
+ /* Find our guard */
+ guard = nth_bit(tb->caps.guard, index % guard);
+
+ /* Add range checks to keep scan-build happy */
+ if (bw >= MMRC_BW_MAX)
+ bw = MMRC_BW_1MHZ;
+
+ if (guard >= MMRC_GUARD_MAX)
+ guard = MMRC_GUARD_LONG;
+
+ /* Validate guard against capability */
+ if (guard == MMRC_GUARD_SHORT &&
+ !(tb->caps.sgi_per_bw & SGI_PER_BW(bw)))
+ guard = MMRC_GUARD_LONG;
+
+ /* Create our rate row and send it */
+ rate.bw = MMRC_BW_TO_BITFIELD(bw);
+ rate.ss = MMRC_SS_TO_BITFIELD(streams);
+ rate.rate = MMRC_RATE_TO_BITFIELD(mcs);
+ rate.guard = MMRC_GUARD_TO_BITFIELD(guard);
+ rate.attempts = 0;
+ rate.flags = 0;
+
+ /* Update index as bw or guard may have changed */
+ rate_update_index(tb, &rate);
+
+ return rate;
+}
+
+size_t mmrc_memory_required_for_caps(struct mmrc_sta_capabilities *caps)
+{
+ return sizeof(struct mmrc_table) +
+ rows_from_sta_caps(caps) * sizeof(struct mmrc_stats_table);
+}
+
+static u32 calculate_bits_per_symbol(struct mmrc_rate *rate)
+{
+ u32 bps;
+
+ /* If MCS10 is selected we return 2*MCS0 Symbols */
+ if (rate->rate == MMRC_MCS10)
+ return 6;
+
+ /* Confirm that the rate is valid for the sym_table lookup */
+ if (rate->rate >= MMRC_MCS_UNUSED) {
+ pr_err("%s: Invalid MCS rate %d for sym_table lookup\n",
+ __func__, rate->rate);
+ return 1;
+ }
+
+ /*
+ * Coversion from 20MHz as in sym_table to:
+ * 40MHz == x 2.1
+ * 80MHz == x 4.5
+ * 160MHz == x 9.0
+ */
+ bps = sym_table[rate->rate];
+ switch (rate->bw) {
+ case (MMRC_BW_4MHZ):
+ bps *= FP_8_4MHZ;
+ break;
+ case (MMRC_BW_8MHZ):
+ bps *= FP_8_8MHZ;
+ break;
+ case (MMRC_BW_16MHZ):
+ bps *= FP_8_16MHZ;
+ break;
+ case (MMRC_BW_1MHZ):
+ bps = sym_table[rate->rate] * 24 / 52;
+ bps *= FP_8_MULT_1;
+ break;
+ case (MMRC_BW_2MHZ):
+ case (MMRC_BW_MAX):
+ default:
+ bps *= FP_8_MULT_1;
+ break;
+ }
+ /* SS + 1 because mmrc_spatial_stream starts at 0 */
+ return ((rate->ss + 1) * bps) >> FP_8_SHIFT;
+}
+
+static u32 get_tx_time(struct mmrc_rate *rate)
+{
+ u32 tx = 0;
+ u32 n_sym;
+ u32 avg_bits;
+
+ /* Calculate tx time based on a default packet size */
+ avg_bits = DEFAULT_PACKET_SIZE_BITS;
+
+ /* Number of bits per symbol for this rate */
+ n_sym = calculate_bits_per_symbol(rate);
+
+ /* In case of bad calcuation/parameter use lowest value */
+ n_sym = n_sym == 0 ? sym_table[0] : n_sym;
+
+ /* number of symbols in default packet size */
+ n_sym = avg_bits / n_sym;
+
+ /* tx is time to transmit average packet in us */
+ switch (rate->guard) {
+ case (MMRC_GUARD_LONG):
+ tx = n_sym * FP_8_LONG_GUARD_SYMBOL_TIME;
+ break;
+ case (MMRC_GUARD_SHORT):
+ tx = n_sym * FP_8_SHORT_GUARD_SYMBOL_TIME;
+ break;
+ default:
+ return 0;
+ }
+
+ return (tx * 10) >> FP_8_SHIFT;
+}
+
+u32 mmrc_calculate_theoretical_throughput(struct mmrc_rate rate)
+{
+ static const u32 s1g_tpt_lgi[4][11] = {
+ { 300, 600, 900, 1200, 1800, 2400, 2700, 3000, 3600, 4000,
+ 150 },
+ { 650, 1300, 1950, 2600, 3900, 5200, 5850, 6500, 7800, 0, 0 },
+ { 1350, 2700, 4050, 5400, 8100, 10800, 12150, 13500, 16200,
+ 18000, 0 },
+ { 2925, 5850, 8775, 11700, 17550, 23400, 26325, 29250, 35100,
+ 39000, 0 },
+ };
+
+ static const u32 s1g_tpt_sgi[4][11] = {
+ { 333, 666, 1000, 1333, 2000, 2666, 3000, 3333, 4000, 4444,
+ 166 },
+ { 722, 1444, 2166, 2888, 4333, 5777, 6500, 7222, 8666, 0, 0 },
+ { 1500, 3000, 4500, 6000, 9000, 12000, 13500, 15000, 18000,
+ 20000, 0 },
+ { 3250, 6500, 9750, 13000, 19500, 26000, 29250, 32500, 39000,
+ 43333, 0 },
+ };
+
+ if (rate.guard)
+ return s1g_tpt_sgi[rate.bw][rate.rate] * 1000 * (rate.ss + 1);
+
+ return s1g_tpt_lgi[rate.bw][rate.rate] * 1000 * (rate.ss + 1);
+}
+
+static u32 calculate_throughput(struct mmrc_table *tb, u8 index)
+{
+ struct mmrc_rate rate = get_rate_row(tb, index);
+
+ /*
+ * Avoid the overflow (observed for 8MHz MCS9 rate: 43333) by dividing
+ * first before multiplying. Should not experience any loss of
+ * precision as the throughput is already multiplied by 1000 in
+ * mmrc_calculate_theoretical_throughput (returned as bits/sec)
+ */
+ if (tb->table[rate.index].prob < 10)
+ return 0;
+ else if (rate.index == tb->best_tp.index && tb->interference_likely)
+ /*
+ * Assist the best rate by increasing the probability by the
+ * averaged variation
+ */
+ return (mmrc_calculate_theoretical_throughput(rate) / 100) *
+ (tb->table[rate.index].prob + tb->probability_variation);
+ else
+ return (mmrc_calculate_theoretical_throughput(rate) / 100) *
+ tb->table[rate.index].prob;
+}
+
+static bool validate_rate(struct mmrc_table *tb, struct mmrc_rate *rate)
+{
+ if (rate->rate == MMRC_MCS10 &&
+ (rate->bw != MMRC_BW_1MHZ || rate->ss != MMRC_SPATIAL_STREAM_1)) {
+ /*
+ * 802.11ah does not support MCS10 with BW that is not 1MHz or
+ * not 1 spatial stream.
+ */
+ return false;
+ }
+
+ if (rate->rate == MMRC_MCS9 && rate->bw == MMRC_BW_2MHZ &&
+ rate->ss != MMRC_SPATIAL_STREAM_3) {
+ /*
+ * 802.11ah does not support MCS9 at 2MHz for 1, 2 or 4 spatial
+ * streams
+ */
+ return false;
+ }
+
+ if (rate->guard == MMRC_GUARD_SHORT &&
+ !(tb->caps.sgi_per_bw & SGI_PER_BW(rate->bw)))
+ return false;
+
+ return true;
+}
+
+static u16 find_baseline_index(struct mmrc_table *tb)
+{
+ u32 i, theoretical_tp, min_theoretical_tp;
+ u16 row_count = rows_from_sta_caps(&tb->caps);
+ u16 min_theoretical_tp_index = 0;
+ struct mmrc_rate rate;
+
+ if (tb->caps.rates & BIT(MMRC_MCS10))
+ return 0;
+
+ min_theoretical_tp =
+ mmrc_calculate_theoretical_throughput(get_rate_row(tb, 0));
+ for (i = 0; i < row_count; i++) {
+ rate = get_rate_row(tb, i);
+ if (!validate_rate(tb, &rate))
+ continue;
+
+ theoretical_tp = mmrc_calculate_theoretical_throughput(rate);
+ if (min_theoretical_tp > theoretical_tp) {
+ min_theoretical_tp = theoretical_tp;
+ min_theoretical_tp_index = rate.index;
+ }
+ }
+
+ return min_theoretical_tp_index;
+}
+
+/*
+ * Fill out the remaining rates to be used once the best rate is selected.
+ * Normally the retry rates are one MCS lower than the previous, however in
+ * unconverged mode we limit the 3 respective retry rates to MCS 4, 2 and 0
+ * respectively. The last retry rate is always MCS 0
+ */
+static void mmrc_fill_retry_rates(struct mmrc_table *tb)
+{
+ tb->second_tp = tb->best_tp;
+ if (tb->second_tp.rate != MMRC_MCS0) {
+ tb->second_tp.rate--;
+ if (tb->unconverged && tb->second_tp.rate > MMRC_MCS4)
+ tb->second_tp.rate = MMRC_MCS4;
+ rate_update_index(tb, &tb->second_tp);
+ } else if (tb->second_tp.bw > MMRC_BW_1MHZ) {
+ tb->second_tp.bw--;
+ rate_update_index(tb, &tb->second_tp);
+ }
+
+ tb->best_prob = tb->second_tp;
+ if (tb->best_prob.rate != MMRC_MCS0) {
+ tb->best_prob.rate--;
+ if (tb->unconverged && tb->best_prob.rate > MMRC_MCS2)
+ tb->best_prob.rate = MMRC_MCS2;
+ rate_update_index(tb, &tb->best_prob);
+ } else if (tb->best_prob.bw > MMRC_BW_1MHZ) {
+ tb->best_prob.bw--;
+ rate_update_index(tb, &tb->best_prob);
+ }
+
+ tb->baseline = tb->best_prob;
+ if (tb->baseline.rate != MMRC_MCS0) {
+ tb->baseline.rate = MMRC_MCS0;
+ rate_update_index(tb, &tb->baseline);
+ } else if (tb->baseline.bw > MMRC_BW_1MHZ) {
+ tb->baseline.bw--;
+ rate_update_index(tb, &tb->baseline);
+ }
+}
+
+/*
+ * Updates the mmrc_table with the appropriate rate priority based on the
+ * latest update statistics
+ */
+static void generate_table_priority(struct mmrc_table *tb, u32 new_stats)
+{
+ u16 i;
+ u16 best_row = tb->best_tp.index;
+ u16 prev_best_row = best_row;
+ u8 prev_best_rate = tb->best_tp.rate;
+ u16 second_best_row = tb->second_tp.index;
+ u32 best_tp = calculate_throughput(tb, best_row);
+ u32 second_best_tp = calculate_throughput(tb, second_best_row);
+ u32 last_nonzero_prob = 0;
+ struct mmrc_rate tmp;
+ u32 tmp_tp;
+
+ /* Use fixed rate if set */
+ if (tb->fixed_rate.rate != MMRC_MCS_UNUSED) {
+ tb->best_tp = tb->fixed_rate;
+ tb->second_tp = tb->fixed_rate;
+ tb->best_prob = tb->fixed_rate;
+ return;
+ }
+
+ for (i = 0; i < rows_from_sta_caps(&tb->caps); i++) {
+ tmp = get_rate_row(tb, i);
+ if (!validate_rate(tb, &tmp))
+ continue;
+
+ if (tb->table[tmp.index].evidence == 0)
+ continue;
+
+ /*
+ * Besides better throughput, also consider this rate better if
+ * lower rates had worse probability. That indicates the rate
+ * itself is not the problem. Only do the probability check for
+ * rates up to the previous best rate.
+ */
+ tmp_tp = calculate_throughput(tb, tmp.index);
+
+ if (tmp_tp > best_tp ||
+ (tb->table[tmp.index].max_throughput <=
+ tb->table[prev_best_row].max_throughput &&
+ tb->table[tmp.index].prob >=
+ PROBABILITY_DIP_RECOVERY_MIN &&
+ tb->table[tmp.index].prob >
+ tb->table[last_nonzero_prob].prob)) {
+ second_best_row = best_row;
+ second_best_tp = best_tp;
+
+ best_tp = tmp_tp;
+ best_row = tmp.index;
+ } else if (tmp_tp > second_best_tp && best_row != tmp.index) {
+ second_best_tp = tmp_tp;
+ second_best_row = tmp.index;
+ }
+
+ if (tb->table[tmp.index].prob >= PROBABILITY_DIP_MIN &&
+ tb->table[tmp.index].max_throughput >=
+ tb->table[last_nonzero_prob].max_throughput)
+ last_nonzero_prob = tmp.index;
+ }
+
+ /* Only update rates and stability when there are new statistics */
+ if (!new_stats)
+ return;
+
+ tb->best_tp = get_rate_row(tb, best_row);
+ if (best_tp == 0 && tb->best_tp.rate > MMRC_MCS0) {
+ /* Drop one rate, as the best throughput is zero */
+ tb->best_tp.rate--;
+ rate_update_index(tb, &tb->best_tp);
+ }
+ tb->second_tp = get_rate_row(tb, second_best_row);
+ mmrc_fill_retry_rates(tb);
+
+ if (tb->best_tp.rate > MMRC_MCS1 && prev_best_row == best_row) {
+ /* Increase the counter when the best rate is not changed */
+ tb->stability_cnt++;
+ } else if (tb->stability_cnt > STABILITY_BACKOFF_STEP) {
+ /* Back off the counter when there is a new best rate */
+ tb->stability_cnt -= STABILITY_BACKOFF_STEP;
+ } else {
+ tb->stability_cnt = 0;
+ }
+
+ if (prev_best_row != best_row) {
+ s8 latest_best_rate_diff = prev_best_rate - tb->best_tp.rate;
+ u8 total_abs_best_rate_diff =
+ abs(tb->best_rate_diff[0] + tb->best_rate_diff[1] +
+ latest_best_rate_diff);
+
+ if (!tb->interference_likely) {
+ tb->probability_variation = 0;
+ if (!tb->unconverged &&
+ tb->best_rate_cycle_count <=
+ BEST_RATE_UNSTABLE_THRESHOLD &&
+ total_abs_best_rate_diff >= 2) {
+ /*
+ * Best rate has changed twice in a few cycles
+ * and moved at least 2 MCSs from where it was
+ * 3 best rate changes ago
+ */
+ tb->unconverged = true;
+ tb->newly_unconverged = true;
+ }
+ }
+ if (tb->unconverged && !tb->newly_unconverged &&
+ total_abs_best_rate_diff < 2) {
+ /*
+ * Best rate has been relatively stable (not moved more
+ * than 1 MCS after the last 3 rate changes), go back
+ * to converged
+ */
+ tb->unconverged = false;
+ }
+ tb->probability_variation_direction = 0;
+ tb->best_rate_cycle_count = 0;
+ tb->best_rate_diff[0] = tb->best_rate_diff[1];
+ tb->best_rate_diff[1] = latest_best_rate_diff;
+ } else {
+ tb->best_rate_cycle_count++;
+ if (tb->unconverged && !tb->newly_unconverged &&
+ tb->best_rate_cycle_count >=
+ BEST_RATE_CONVERGED_THRESHOLD) {
+ /*
+ * Best rate has been stable for a while, go back to
+ * converged
+ */
+ tb->unconverged = false;
+ }
+ }
+
+ if (tb->newly_unconverged)
+ tb->newly_unconverged = false;
+}
+
+static u32 calculate_attempt_time(struct mmrc_rate *rate, size_t size)
+{
+ u32 time;
+
+ time = get_tx_time(rate);
+
+ if (size > DEFAULT_PACKET_SIZE_BYTES)
+ time = (time * ((size * 1000) / DEFAULT_PACKET_SIZE_BYTES)) /
+ 1000;
+ else
+ time = (time * 1000) /
+ ((DEFAULT_PACKET_SIZE_BYTES * 1000) / size);
+
+ return time;
+}
+
+u32 mmrc_calculate_rate_tx_time(struct mmrc_rate *rate, size_t size)
+{
+ u8 i;
+ u32 total_time = 0;
+
+ for (i = 0; i < rate->attempts; i++)
+ total_time += calculate_attempt_time(rate, size);
+
+ return total_time;
+}
+
+/*
+ * Calculates the appropriate amount of additional attempts to make based on
+ * packet size and theoretical throughput.
+ */
+static void calculate_remaining_attempts(struct mmrc_table *tb,
+ struct mmrc_rate_table *rate,
+ s32 *rem_time, size_t size)
+{
+ size_t i;
+
+ if (*rem_time <= 0)
+ return;
+
+ for (i = 0; i < MMRC_MAX_CHAIN_LENGTH; i++) {
+ u32 attempt_time;
+ u32 attempt;
+
+ if (rate->rates[i].rate == MMRC_MCS_UNUSED)
+ break;
+
+ /*
+ * The attempts for these rates were calculated in the initial
+ * attempt allocation
+ */
+ if (tb->table[rate->rates[i].index].prob < 20)
+ continue;
+
+ if (i == 0 && (calculate_throughput(tb, rate->rates[i].index) <
+ calculate_throughput(tb, tb->best_prob.index)))
+ continue;
+
+ attempt_time = calculate_attempt_time(&rate->rates[i], size);
+ if (!attempt_time)
+ continue;
+
+ attempt = (*rem_time / tb->caps.max_rates) / attempt_time;
+ attempt += rate->rates[i].attempts;
+
+ rate->rates[i].attempts = MMRC_ATTEMPTS_TO_BITFIELD(
+ attempt > MMRC_MAX_CHAIN_ATTEMPTS ?
+ MMRC_MAX_CHAIN_ATTEMPTS :
+ attempt);
+ }
+}
+
+/* Allocate initial attempts to all rates in a rate table */
+static void allocate_initial_attempts(struct mmrc_rate_table *rate,
+ s32 *rem_time, size_t size)
+{
+ u32 i;
+
+ for (i = 0; i < MMRC_MAX_CHAIN_LENGTH; i++) {
+ u32 attempt_time;
+
+ if (rate->rates[i].rate == MMRC_MCS_UNUSED)
+ break;
+
+ attempt_time = calculate_attempt_time(&rate->rates[i], size);
+
+ /*
+ * if the time for a single attempt is very long, lets just
+ * try once
+ */
+ if (attempt_time > MAX_WINDOW_ATTEMPT_TIME) {
+ *rem_time -= attempt_time;
+ rate->rates[i].attempts = MMRC_ATTEMPTS_TO_BITFIELD(1);
+ } else {
+ *rem_time -= attempt_time * 2;
+ rate->rates[i].attempts = MMRC_ATTEMPTS_TO_BITFIELD(2);
+ }
+ }
+}
+
+void mmrc_get_rates(struct mmrc_table *tb, struct mmrc_rate_table *out,
+ size_t size)
+{
+ u8 i;
+ u16 random_index;
+ struct mmrc_rate random;
+ struct mmrc_rate lookaround0 = tb->best_tp;
+ struct mmrc_rate lookaround1 = tb->second_tp;
+ bool is_lookaround;
+ int lookaround_index = -1;
+ int best_index = 0;
+ int random_tp = 0;
+ int best_tp;
+ int lookaround_fail_count;
+ bool try_current_lookaround = false;
+
+ s32 rem_time = RATE_WINDOW_MICROSECONDS;
+
+ memset(out, 0, sizeof(*out));
+
+ tb->lookaround_cnt = (tb->lookaround_cnt + 1) % tb->lookaround_wrap;
+ /*
+ * Look around if the counter wraps or there has been no look around
+ * for a number of rate control cycles.
+ */
+ is_lookaround = (tb->fixed_rate.rate == MMRC_MCS_UNUSED) &&
+ ((tb->lookaround_cnt == 0) ||
+ ((tb->last_lookaround_cycle +
+ LOOKAROUND_MAX_RC_CYCLES) <= tb->cycle_cnt));
+
+ /* Also skip sampling if we don't yet have data for our best rate */
+ if (tb->table[tb->best_tp.index].evidence == 0)
+ is_lookaround = false;
+
+ if (tb->lookaround_wrap != LOOKAROUND_RATE_STABLE) {
+ if (tb->stability_cnt >= tb->stability_cnt_threshold) {
+ tb->lookaround_wrap = LOOKAROUND_RATE_STABLE;
+ tb->stability_cnt_threshold =
+ STABILITY_CNT_THRESHOLD_STABLE;
+ tb->stability_cnt = STABILITY_CNT_THRESHOLD_STABLE * 2;
+ is_lookaround = false;
+ }
+ } else if (tb->stability_cnt < tb->stability_cnt_threshold) {
+ tb->stability_cnt_threshold = STABILITY_CNT_THRESHOLD_NORMAL;
+ tb->lookaround_wrap = LOOKAROUND_RATE_NORMAL;
+ tb->stability_cnt = 0;
+ }
+
+ /* Look around only when the fixed rate is not set */
+ if (is_lookaround) {
+ tb->total_lookaround++;
+ tb->forced_lookaround =
+ (tb->forced_lookaround + 1) % LOOKAROUND_RATE_NORMAL;
+ tb->last_lookaround_cycle = tb->cycle_cnt;
+
+ if (tb->current_lookaround_rate_attempts <
+ LOOKAROUND_RATE_ATTEMPTS)
+ try_current_lookaround = true;
+
+ best_tp = calculate_throughput(tb, tb->best_tp.index);
+
+ for (lookaround_fail_count = 0;
+ lookaround_fail_count < LOOKAROUND_FAIL_MAX;
+ lookaround_fail_count++) {
+ if (try_current_lookaround) {
+ random_index =
+ tb->current_lookaround_rate_index;
+ try_current_lookaround = false;
+ } else {
+ random_index = get_random_u32_below(
+ rows_from_sta_caps(&tb->caps));
+ }
+ random = get_rate_row(tb, random_index);
+
+ if (!validate_rate(tb, &random))
+ continue;
+
+ if (random.rate == MMRC_MCS10)
+ continue;
+
+ if (tb->table[random_index].evidence > 0)
+ random_tp =
+ calculate_throughput(tb, random_index);
+ else
+ random_tp =
+ mmrc_calculate_theoretical_throughput(
+ random);
+
+ /*
+ * Skip rates that can only be worse than the current
+ * best
+ */
+ if (random_tp <= best_tp)
+ continue;
+
+ /*
+ * Force looking up the rate no more that one MCS.
+ * It will avoid looking for rates with very low
+ * success rate. In case of better environment
+ * conditions MMRC will collect enough statistics to
+ * climb up the rates one by one.
+ */
+ if (random.rate > tb->best_tp.rate + 1 ||
+ random.bw > tb->best_tp.bw + 1 ||
+ (random.rate > tb->best_tp.rate &&
+ random.bw > tb->best_tp.bw))
+ continue;
+
+ if (tb->current_lookaround_rate_index == random_index) {
+ tb->current_lookaround_rate_attempts++;
+ } else {
+ tb->current_lookaround_rate_attempts = 0;
+ tb->current_lookaround_rate_index =
+ random_index;
+ }
+
+ break;
+ }
+
+ if (lookaround_fail_count >= LOOKAROUND_FAIL_MAX) {
+ is_lookaround = false;
+ tb->current_lookaround_rate_index = tb->best_tp.index;
+ } else {
+ lookaround0 = random;
+ lookaround1 = tb->best_tp;
+ lookaround_index = 0;
+ best_index = 1;
+ }
+ }
+
+ if (tb->caps.max_rates == 1) {
+ out->rates[0] = (is_lookaround) ? lookaround0 : tb->best_tp;
+ out->rates[1].rate = MMRC_MCS_UNUSED;
+ out->rates[2].rate = MMRC_MCS_UNUSED;
+ out->rates[3].rate = MMRC_MCS_UNUSED;
+ } else if (tb->caps.max_rates == 2) {
+ out->rates[0] = (is_lookaround) ? lookaround0 : tb->best_tp;
+ out->rates[1] = (is_lookaround) ? lookaround1 : tb->best_prob;
+ out->rates[2].rate = MMRC_MCS_UNUSED;
+ out->rates[3].rate = MMRC_MCS_UNUSED;
+ } else if (tb->caps.max_rates == 3) {
+ out->rates[0] = (is_lookaround) ? lookaround0 : tb->best_tp;
+ out->rates[1] = (is_lookaround) ? lookaround1 : tb->second_tp;
+ out->rates[2] = tb->best_prob;
+ out->rates[3].rate = MMRC_MCS_UNUSED;
+ } else {
+ out->rates[0] = (is_lookaround) ? lookaround0 : tb->best_tp;
+ out->rates[1] = (is_lookaround) ? lookaround1 : tb->second_tp;
+ out->rates[2] = tb->best_prob;
+ out->rates[3] = tb->baseline;
+ }
+
+ /* For fallback rates, set RTS/CTS */
+ for (i = 1; i < MMRC_MAX_CHAIN_LENGTH; i++)
+ out->rates[i].flags |= BIT(MMRC_FLAGS_CTS_RTS);
+
+ /* Allocate initial attempts for rate */
+ allocate_initial_attempts(out, &rem_time, size);
+
+ /* Calculate and allocate remaining attempts */
+ calculate_remaining_attempts(tb, out, &rem_time, size);
+
+ /* Enforce limits on each attempts */
+ for (i = 0; i < MMRC_MAX_CHAIN_LENGTH; i++) {
+ if (out->rates[i].rate != MMRC_MCS_UNUSED) {
+ out->rates[i].attempts =
+ out->rates[i].attempts == 0 ?
+ MMRC_ATTEMPTS_TO_BITFIELD(
+ MMRC_MIN_CHAIN_ATTEMPTS) :
+ out->rates[i].attempts;
+ out->rates[i].attempts =
+ out->rates[i].attempts >
+ MMRC_MAX_CHAIN_ATTEMPTS ?
+ MMRC_ATTEMPTS_TO_BITFIELD(
+ MMRC_MAX_CHAIN_ATTEMPTS) :
+ out->rates[i].attempts;
+ if (i == lookaround_index &&
+ tb->lookaround_wrap != LOOKAROUND_RATE_INIT)
+ out->rates[i].attempts =
+ MMRC_ATTEMPTS_TO_BITFIELD(1);
+ }
+ }
+
+ /*
+ * Give the best rate at least 2 attempts to keep peak throughput
+ * unless it is too low
+ */
+ if (out->rates[best_index].attempts == 1 &&
+ out->rates[best_index].rate > MMRC_MCS1)
+ out->rates[best_index].attempts = MMRC_ATTEMPTS_TO_BITFIELD(2);
+ else if (out->rates[best_index].rate <= MMRC_MCS1)
+ out->rates[best_index].attempts = 1;
+}
+
+static u32 calc_ewma_average(u32 avg, u32 latest, u32 weight)
+{
+ WARN_ON_ONCE(!(weight <= 100));
+
+ if (avg == 0)
+ return latest;
+
+ return ((latest * (100 - weight)) + (avg * weight)) / 100;
+}
+
+static void mmrc_process_variation(struct mmrc_table *tb, u16 current_success,
+ u32 index)
+{
+ u32 current_variation;
+
+ /*
+ * Only process probability variation for the best rate. It is likely
+ * the only rate to have enough data to see the variation and its
+ * statistics are more affected because they are usually collected over
+ * the full period.
+ */
+ if (index != tb->best_tp.index)
+ return;
+
+ if (current_success == 0) {
+ if (!tb->unconverged) {
+ /*
+ * Best rate is failing completely, go to unconverged
+ * mode
+ */
+ tb->unconverged = true;
+ tb->newly_unconverged = true;
+ }
+ return;
+ }
+
+ if (tb->table[index].prob == 0)
+ return;
+
+ /* Don't process variation while converging after association */
+ if (tb->lookaround_wrap == LOOKAROUND_RATE_INIT)
+ return;
+
+ current_variation = abs(current_success - tb->table[index].prob);
+
+ /* Calculate the EWMA of the probability variation */
+ tb->probability_variation = calc_ewma_average(
+ tb->probability_variation, current_variation, VARIATION_EWMA);
+
+ /*
+ * Process the variation direction to distinguish converged and
+ * unconverged scenarios
+ */
+ if (tb->probability_variation >= MODERATE_VARIATION_THRESHOLD ||
+ tb->interference_likely) {
+ if ((current_success - tb->table[index].prob) *
+ tb->probability_variation_direction <
+ 0)
+ tb->probability_variation_direction = 0;
+ else if (current_success > tb->table[index].prob)
+ tb->probability_variation_direction =
+ min(tb->probability_variation_direction + 1,
+ MAX_VARIATION_DIRECTION);
+ else if (current_success < tb->table[index].prob)
+ tb->probability_variation_direction =
+ max(tb->probability_variation_direction - 1,
+ -MAX_VARIATION_DIRECTION);
+ }
+
+ if (tb->best_rate_cycle_count > VARIATION_DIRECTION_THRESHOLD &&
+ tb->probability_variation >= SIGNIFICANT_VARIATION_THRESHOLD) {
+ /*
+ * Only enter interference mode if the best rate is stable for
+ * enough cycles to determine the direction is random and not
+ * in one direction only
+ */
+ if (abs(tb->probability_variation_direction) <=
+ VARIATION_DIRECTION_THRESHOLD &&
+ !tb->interference_likely) {
+ tb->interference_likely = true;
+ }
+ } else if (tb->interference_likely &&
+ (tb->probability_variation <= MINOR_VARIATION_THRESHOLD ||
+ abs(tb->probability_variation_direction) ==
+ MAX_VARIATION_DIRECTION)) {
+ /*
+ * Exit interference mode if the variability drops or the
+ * direction stops being random
+ */
+ tb->interference_likely = false;
+ }
+}
+
+void mmrc_update(struct mmrc_table *tb)
+{
+ u32 i;
+ u16 this_success;
+ u32 scale;
+ u32 scaled_ewma;
+ u32 new_stats = 0;
+ u32 attempts_for_stats;
+ u32 success_for_stats;
+ u32 min_stats;
+ u32 throughput;
+ u32 evidence_sent;
+
+ tb->cycle_cnt++;
+
+ /* Allow less minimum stats when converging */
+ if (tb->lookaround_wrap != LOOKAROUND_RATE_INIT)
+ min_stats = STATS_MIN_NORMAL;
+ else
+ min_stats = STATS_MIN_INIT;
+
+ for (i = 0; i < rows_from_sta_caps(&tb->caps); i++) {
+ /* This algorithm is keeping track of the amount of evidence,
+ * being packets that have been recently sent at this rate.
+ * This value is smoothed with an EWMA function over time and
+ * used to update the probability of a rate succeeding
+ * dynamically. This method allows MMRC to react timely if a
+ * new rate is used that hasn't been used recently
+ */
+
+ /* Necessary to prevent a divide by 0 */
+ if (tb->table[i].evidence == 0)
+ scale = 0;
+ else
+ scale = ((tb->table[i].evidence * 2) * 100) /
+ ((tb->table[i].sent * EVIDENCE_SCALE) +
+ tb->table[i].evidence);
+
+ /* Restrict scale to appropriate values */
+ if (scale > 100)
+ scale = 100;
+
+ scaled_ewma = scale * EWMA / 100;
+
+ /*
+ * Only count new packets for evidence if we will process
+ * them
+ */
+ evidence_sent =
+ tb->table[i].sent >= min_stats ? tb->table[i].sent : 0;
+ tb->table[i].evidence = calc_ewma_average(
+ tb->table[i].evidence, evidence_sent * EVIDENCE_SCALE,
+ scaled_ewma);
+
+ if (tb->table[i].evidence > EVIDENCE_MAX)
+ tb->table[i].evidence = EVIDENCE_MAX;
+
+ /* Try to use statistics from acknowledged AMPDUs first */
+ attempts_for_stats = tb->table[i].back_mpdu_success +
+ tb->table[i].back_mpdu_failure;
+ success_for_stats = tb->table[i].back_mpdu_success;
+
+ /*
+ * Use the full statistics if rates are not converged or there
+ * were no AMPDUs for this rate or the remaining attempts are
+ * less than half of what we have from AMPDUs.
+ */
+ if (!tb->table[i].have_sent_ampdus || tb->unconverged ||
+ attempts_for_stats < AMPDU_STATS_MIN ||
+ (tb->table[i].sent - attempts_for_stats <
+ attempts_for_stats / 2)) {
+ attempts_for_stats = tb->table[i].sent;
+ success_for_stats = tb->table[i].sent_success;
+ }
+
+ if (attempts_for_stats >= min_stats ||
+ (attempts_for_stats > 0 && tb->table[i].prob > 0)) {
+ new_stats = 1;
+ this_success =
+ (100 * success_for_stats) / attempts_for_stats;
+
+ if (scaled_ewma)
+ mmrc_process_variation(tb, this_success, i);
+
+ tb->table[i].prob = calc_ewma_average(
+ tb->table[i].prob, this_success, scaled_ewma);
+
+ /* Clear our sent statistics and update totals */
+ tb->table[i].total_sent += tb->table[i].sent;
+ tb->table[i].sent = 0;
+
+ tb->table[i].total_success += tb->table[i].sent_success;
+ tb->table[i].sent_success = 0;
+
+ tb->table[i].back_mpdu_failure = 0;
+ tb->table[i].back_mpdu_success = 0;
+ tb->table[i].have_sent_ampdus = false;
+ }
+
+ throughput = calculate_throughput(tb, i);
+ if (tb->table[i].max_throughput < throughput)
+ tb->table[i].max_throughput = throughput;
+
+ /*
+ * Reset the running average windows if reached collector
+ * limits
+ */
+ if (tb->table[i].sum_throughput > (0xFFFFFFFF - throughput)) {
+ tb->table[i].sum_throughput /=
+ tb->table[i].avg_throughput_counter;
+ tb->table[i].avg_throughput_counter = 1;
+ }
+ /* Update the sum and counter so it will be possible later to
+ * calculate the running average throughput
+ */
+ tb->table[i].sum_throughput += throughput;
+ tb->table[i].avg_throughput_counter++;
+ }
+
+ generate_table_priority(tb, new_stats);
+
+ /*
+ * Switch to faster lookaround mode if rates drop low at very low
+ * bandwidth or we are in unconverged mode. Switching at low bandwidth
+ * and rate is to help recover quickly from rates where we would need
+ * to fragment standard MTU size packets.
+ */
+ if (tb->lookaround_wrap != LOOKAROUND_RATE_INIT &&
+ (tb->unconverged || (tb->best_tp.bw == MMRC_BW_1MHZ &&
+ tb->best_tp.rate <= MMRC_MCS2))) {
+ tb->lookaround_cnt = 0;
+ tb->lookaround_wrap = LOOKAROUND_RATE_INIT;
+ tb->stability_cnt_threshold = STABILITY_CNT_THRESHOLD_INIT;
+ }
+
+ /*
+ * If it is unlikely we can do the lookaround attempts in two RC cycles
+ * choose a new rate
+ */
+ if (tb->current_lookaround_rate_attempts <=
+ (LOOKAROUND_RATE_ATTEMPTS / 2))
+ tb->current_lookaround_rate_attempts = LOOKAROUND_RATE_ATTEMPTS;
+}
+
+void mmrc_feedback(struct mmrc_table *tb, struct mmrc_rate_table *rates,
+ s32 retry_count, bool was_aggregated)
+{
+ s32 ind = retry_count;
+ u32 i;
+
+ for (i = 0; i < MMRC_MAX_CHAIN_LENGTH; i++) {
+ rate_update_index(tb, &rates->rates[i]);
+ tb->table[rates->rates[i].index].have_sent_ampdus |=
+ was_aggregated;
+
+ if ((s32)rates->rates[i].attempts < ind) {
+ ind = ind - rates->rates[i].attempts;
+ tb->table[rates->rates[i].index].sent +=
+ rates->rates[i].attempts;
+ if (was_aggregated) {
+ tb->table[rates->rates[i].index]
+ .back_mpdu_failure +=
+ rates->rates[i].attempts;
+ }
+ } else {
+ tb->table[rates->rates[i].index].sent += ind;
+ tb->table[rates->rates[i].index].sent_success += 1;
+ if (was_aggregated) {
+ tb->table[rates->rates[i].index]
+ .back_mpdu_success += 1;
+ tb->table[rates->rates[i].index]
+ .back_mpdu_failure +=
+ ind > 1 ? ind - 1 : 0;
+ }
+ return;
+ }
+ }
+}
+
+/*
+ * Chooses a reasonable starting rate based on range (gathered from
+ * RSSI measurements) or bandwidth. Then fills out the 3 retry rates
+ * so a full set of rates is available.
+ */
+static void mmrc_init_rates(struct mmrc_table *tb, s8 rssi)
+{
+ tb->best_tp.bw = MMRC_MAX_BW(tb->caps.bandwidth);
+ if (tb->caps.sgi_per_bw & SGI_PER_BW(tb->best_tp.bw))
+ tb->best_tp.guard = MMRC_GUARD_TO_BITFIELD(MMRC_GUARD_SHORT);
+ else
+ tb->best_tp.guard = MMRC_GUARD_TO_BITFIELD(MMRC_GUARD_LONG);
+ tb->best_tp.rate = MMRC_RATE_TO_BITFIELD(MMRC_MCS0);
+
+ if (rssi >= MMRC_SHORT_RANGE_RSSI_LIMIT)
+ tb->best_tp.rate = MMRC_RATE_TO_BITFIELD(MMRC_MCS7);
+ else if (rssi < MMRC_SHORT_RANGE_RSSI_LIMIT &&
+ rssi >= MMRC_MID_RANGE_RSSI_LIMIT)
+ tb->best_tp.rate = MMRC_RATE_TO_BITFIELD(MMRC_MCS3);
+ else if (tb->best_tp.bw == MMRC_BW_1MHZ ||
+ tb->best_tp.bw == MMRC_BW_2MHZ)
+ /*
+ * To compensate for slow feedback when running with 1 and 2
+ * MHz bandwidth, we start from MCS3 which will correspond to
+ * reasonable feedback and will avoid resetting the rate table
+ * evidence.
+ */
+ tb->best_tp.rate = MMRC_RATE_TO_BITFIELD(MMRC_MCS3);
+
+ tb->best_tp.ss = MMRC_SS_TO_BITFIELD(MMRC_SPATIAL_STREAM_1);
+ rate_update_index(tb, &tb->best_tp);
+ /* Init every rate in case they are needed to set the retry rates */
+ tb->second_tp = tb->best_tp;
+ tb->best_prob = tb->best_tp;
+ tb->baseline = tb->best_tp;
+ mmrc_fill_retry_rates(tb);
+}
+
+void mmrc_sta_init(struct mmrc_table *tb, struct mmrc_sta_capabilities *caps,
+ s8 rssi)
+{
+ u32 i;
+ u16 row_count = rows_from_sta_caps(caps);
+
+ memset(tb, 0, mmrc_memory_required_for_caps(caps));
+ memcpy(&tb->caps, caps, sizeof(tb->caps));
+
+ for (i = 0; i < row_count; i++) {
+ tb->table[i].prob = RATE_INIT_PROBABILITY;
+ tb->table[i].evidence = 0;
+ tb->table[i].sum_throughput = 0;
+ tb->table[i].avg_throughput_counter = 0;
+ tb->table[i].max_throughput = 0;
+ }
+
+ tb->fixed_rate.rate = MMRC_MCS_UNUSED;
+ tb->cycle_cnt = 0;
+ tb->last_lookaround_cycle = 0;
+ tb->lookaround_cnt = 0;
+ tb->lookaround_wrap = LOOKAROUND_RATE_INIT;
+ tb->unconverged = true;
+ tb->newly_unconverged = true;
+ tb->stability_cnt_threshold = STABILITY_CNT_THRESHOLD_INIT;
+ tb->baseline = get_rate_row(tb, find_baseline_index(tb));
+ mmrc_init_rates(tb, rssi);
+}
+
+bool mmrc_set_fixed_rate(struct mmrc_table *tb, struct mmrc_rate fixed_rate)
+{
+ bool caps_support_rate = true;
+
+ /* Do not accept rate which does not support the STA capabilities */
+ if ((BIT(fixed_rate.rate) & tb->caps.rates) == 0 ||
+ (BIT(fixed_rate.bw) & tb->caps.bandwidth) == 0 ||
+ (BIT(fixed_rate.ss) & tb->caps.spatial_streams) == 0 ||
+ (BIT(fixed_rate.guard) & tb->caps.guard) == 0)
+ caps_support_rate = false;
+
+ if (validate_rate(tb, &fixed_rate) && caps_support_rate) {
+ tb->fixed_rate = fixed_rate;
+ rate_update_index(tb, &tb->fixed_rate);
+ return true;
+ }
+
+ return false;
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 15/31] wifi: mm81x: add mmrc.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (13 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 14/31] wifi: mm81x: add mmrc.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 16/31] wifi: mm81x: add ps.c Lachlan Hodges
` (16 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/mmrc.h | 193 +++++++++++++++++++
1 file changed, 193 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/mmrc.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/mmrc.h b/drivers/net/wireless/morsemicro/mm81x/mmrc.h
new file mode 100644
index 000000000000..a4c7d941ad55
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/mmrc.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_MMRC_H_
+#define _MM81X_MMRC_H_
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/random.h>
+#include <linux/time.h>
+
+/* The max length of a retry chain for a single packet transmission */
+#define MMRC_MAX_CHAIN_LENGTH 4
+
+/* Rate minimum allowed attempts */
+#define MMRC_MIN_CHAIN_ATTEMPTS 1
+
+/* Rate upper limit for attempts */
+#define MMRC_MAX_CHAIN_ATTEMPTS 2
+
+/* The frequency of MMRC stat table updates */
+#define MMRC_UPDATE_FREQUENCY_MS 100
+
+enum mmrc_flags {
+ MMRC_FLAGS_CTS_RTS,
+};
+
+enum mmrc_mcs_rate {
+ MMRC_MCS0,
+ MMRC_MCS1,
+ MMRC_MCS2,
+ MMRC_MCS3,
+ MMRC_MCS4,
+ MMRC_MCS5,
+ MMRC_MCS6,
+ MMRC_MCS7,
+ MMRC_MCS8,
+ MMRC_MCS9,
+ MMRC_MCS10,
+ MMRC_MCS_UNUSED,
+};
+
+enum mmrc_bw {
+ MMRC_BW_1MHZ = 0,
+ MMRC_BW_2MHZ = 1,
+ MMRC_BW_4MHZ = 2,
+ MMRC_BW_8MHZ = 3,
+ MMRC_BW_16MHZ = 4,
+ MMRC_BW_MAX = 5,
+};
+
+enum mmrc_spatial_stream {
+ MMRC_SPATIAL_STREAM_1 = 0,
+ MMRC_SPATIAL_STREAM_2 = 1,
+ MMRC_SPATIAL_STREAM_3 = 2,
+ MMRC_SPATIAL_STREAM_4 = 3,
+ MMRC_SPATIAL_STREAM_MAX,
+};
+
+enum mmrc_guard {
+ MMRC_GUARD_LONG = 0,
+ MMRC_GUARD_SHORT = 1,
+ MMRC_GUARD_MAX,
+};
+
+#define MMRC_RATE_TO_BITFIELD(x) ((x) & 0xF)
+#define MMRC_ATTEMPTS_TO_BITFIELD(x) ((x) & 0x7)
+#define MMRC_GUARD_TO_BITFIELD(x) ((x) & 0x1)
+#define MMRC_SS_TO_BITFIELD(x) ((x) & 0x3)
+#define MMRC_BW_TO_BITFIELD(x) ((x) & 0x7)
+#define MMRC_FLAGS_TO_BITFIELD(x) ((x) & 0x7)
+
+struct mmrc_rate {
+ u8 rate : 4;
+ u8 attempts : 3;
+ u8 guard : 1;
+ u8 ss : 2;
+ u8 bw : 3;
+ u8 flags : 3;
+ u16 index;
+};
+
+struct mmrc_rate_table {
+ struct mmrc_rate rates[MMRC_MAX_CHAIN_LENGTH];
+};
+
+#define SGI_PER_BW(bw) (1 << (bw))
+
+struct mmrc_sta_capabilities {
+ u8 max_rates : 3;
+ u8 max_retries : 3;
+ u8 bandwidth : 5;
+ u8 spatial_streams : 4;
+ u16 rates : 11;
+ u8 guard : 2;
+ u8 sta_flags : 4;
+ u8 sgi_per_bw : 5;
+};
+
+struct mmrc_stats_table {
+ u32 avg_throughput_counter;
+ u32 sum_throughput;
+ u32 max_throughput;
+ u16 sent;
+ u16 sent_success;
+ u16 back_mpdu_success;
+ u16 back_mpdu_failure;
+ u32 total_sent;
+ u32 total_success;
+ u16 evidence;
+ u8 prob;
+ bool have_sent_ampdus;
+};
+
+struct mmrc_table {
+ struct mmrc_sta_capabilities caps;
+ struct mmrc_rate best_tp;
+ struct mmrc_rate second_tp;
+ struct mmrc_rate baseline;
+ struct mmrc_rate best_prob;
+ struct mmrc_rate fixed_rate;
+ u32 cycle_cnt;
+ u32 last_lookaround_cycle;
+ u8 lookaround_cnt;
+
+ /* The ratio of using normal rate and sampling */
+ u8 lookaround_wrap;
+
+ /*
+ * A counter that is used to determine when we should force a
+ * lookaround. Should be a portion of the above lookaround with
+ * less constraints
+ */
+ u8 forced_lookaround;
+
+ u8 current_lookaround_rate_attempts;
+ u16 current_lookaround_rate_index;
+ u32 total_lookaround;
+
+ /*
+ * A counter to detect if the current best rate is optimal
+ * and may slow down sample frequency.
+ */
+ u32 stability_cnt;
+
+ u32 stability_cnt_threshold;
+ u8 probability_variation;
+
+ /* The difference in MCS from each of the last 2 rate changes */
+ s8 best_rate_diff[2];
+
+ /* Indication of random versus consistently one-sided variation */
+ s8 probability_variation_direction;
+
+ /* Has rate control detected possible interference */
+ bool interference_likely;
+
+ /* Has rate control detected the best rate is no longer converged */
+ bool unconverged;
+
+ /* Is rate control just entering unconverged state */
+ bool newly_unconverged;
+
+ /*
+ * Number of rate control cycles the best rate has remained
+ * unchanged
+ */
+ s32 best_rate_cycle_count;
+
+ /*
+ * The probability table for the STA. This MUST always be the last
+ * element in the struct.
+ */
+ struct mmrc_stats_table table[];
+};
+
+void mmrc_sta_init(struct mmrc_table *tb, struct mmrc_sta_capabilities *caps,
+ s8 rssi);
+size_t mmrc_memory_required_for_caps(struct mmrc_sta_capabilities *caps);
+void mmrc_get_rates(struct mmrc_table *tb, struct mmrc_rate_table *out,
+ size_t size);
+void mmrc_feedback(struct mmrc_table *tb, struct mmrc_rate_table *rates,
+ s32 retry_count, bool was_aggregated);
+void mmrc_update(struct mmrc_table *tb);
+bool mmrc_set_fixed_rate(struct mmrc_table *tb, struct mmrc_rate fixed_rate);
+u32 mmrc_calculate_theoretical_throughput(struct mmrc_rate rate);
+u32 mmrc_calculate_rate_tx_time(struct mmrc_rate *rate, size_t size);
+
+#endif /* _MMRC_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 16/31] wifi: mm81x: add ps.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (14 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 15/31] wifi: mm81x: add mmrc.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 17/31] wifi: mm81x: add ps.h Lachlan Hodges
` (15 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/ps.c | 120 +++++++++++++++++++++
1 file changed, 120 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/ps.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/ps.c b/drivers/net/wireless/morsemicro/mm81x/ps.c
new file mode 100644
index 000000000000..ab67823452ee
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/ps.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include "hif.h"
+#include "skbq.h"
+#include "mac.h"
+#include "bus.h"
+#include "ps.h"
+
+static void mm81x_ps_wakeup(struct mm81x_ps *mps)
+{
+ struct mm81x *mors = container_of(mps, struct mm81x, ps);
+
+ if (!mps->enable || !mps->suspended)
+ return;
+
+ mm81x_set_bus_enable(mors, true);
+ mps->suspended = false;
+}
+
+static void mm81x_ps_sleep(struct mm81x_ps *mps)
+{
+ struct mm81x *mors = container_of(mps, struct mm81x, ps);
+
+ if (!mps->enable || mps->suspended)
+ return;
+
+ mps->suspended = true;
+ mm81x_set_bus_enable(mors, false);
+}
+
+static void mm81x_ps_evaluate(struct mm81x_ps *mps)
+{
+ struct mm81x *mors = container_of(mps, struct mm81x, ps);
+ bool needs_wake = false;
+ unsigned long flags_on_entry =
+ (mors->hif.event_flags &
+ ~BIT(MM81X_HIF_EVT_DATA_TRAFFIC_PAUSE_PEND));
+
+ if (!mps->enable)
+ return;
+
+ needs_wake = (mps->wakers > 0);
+ needs_wake |= (flags_on_entry > 0);
+ needs_wake |= (mm81x_hif_get_tx_buffered_count(mors) > 0);
+
+ if (needs_wake) {
+ mm81x_ps_wakeup(mps);
+ return;
+ }
+
+ mm81x_ps_sleep(mps);
+}
+
+static void mm81x_ps_evaluate_work(struct work_struct *work)
+{
+ struct mm81x_ps *mps =
+ container_of(work, struct mm81x_ps, delayed_eval_work.work);
+
+ if (mps->enable) {
+ mutex_lock(&mps->lock);
+ mm81x_ps_evaluate(mps);
+ mutex_unlock(&mps->lock);
+ }
+}
+
+void mm81x_ps_enable(struct mm81x *mors)
+{
+ struct mm81x_ps *mps = &mors->ps;
+
+ if (mps->enable) {
+ mutex_lock(&mps->lock);
+ if (mps->wakers == 0) {
+ WARN_ON_ONCE(1);
+ } else {
+ mps->wakers--;
+ mm81x_ps_evaluate(mps);
+ }
+ mutex_unlock(&mps->lock);
+ }
+}
+
+void mm81x_ps_disable(struct mm81x *mors)
+{
+ struct mm81x_ps *mps = &mors->ps;
+
+ if (mps->enable) {
+ mutex_lock(&mps->lock);
+ mps->wakers++;
+ mm81x_ps_evaluate(mps);
+ mutex_unlock(&mps->lock);
+ }
+}
+
+int mm81x_ps_init(struct mm81x *mors)
+{
+ struct mm81x_ps *mps = &mors->ps;
+
+ mps->enable = (mors->bus_type == MM81X_BUS_TYPE_USB);
+ mps->suspended = true;
+ mps->wakers = 1; /* we default to being on */
+ mutex_init(&mps->lock);
+ INIT_DELAYED_WORK(&mps->delayed_eval_work, mm81x_ps_evaluate_work);
+
+ return 0;
+}
+
+void mm81x_ps_finish(struct mm81x *mors)
+{
+ struct mm81x_ps *mps = &mors->ps;
+
+ if (mps->enable) {
+ mps->enable = false;
+ cancel_delayed_work_sync(&mps->delayed_eval_work);
+ }
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 17/31] wifi: mm81x: add ps.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (15 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 16/31] wifi: mm81x: add ps.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 18/31] wifi: mm81x: add rate_code.h Lachlan Hodges
` (14 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/ps.h | 22 ++++++++++++++++++++++
1 file changed, 22 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/ps.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/ps.h b/drivers/net/wireless/morsemicro/mm81x/ps.h
new file mode 100644
index 000000000000..0b59bb4145ab
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/ps.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_PS_H_
+#define _MM81X_PS_H_
+
+#include "core.h"
+
+/* This should be nominally <= the dynamic ps timeout */
+#define NETWORK_BUS_TIMEOUT_MS (90)
+
+/* The default period of time to wait to re-evaluate powersave */
+#define DEFAULT_BUS_TIMEOUT_MS (50)
+
+void mm81x_ps_disable(struct mm81x *mors);
+void mm81x_ps_enable(struct mm81x *mors);
+int mm81x_ps_init(struct mm81x *mors);
+void mm81x_ps_finish(struct mm81x *mors);
+
+#endif /* !_MM81X_PS_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 18/31] wifi: mm81x: add rate_code.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (16 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 17/31] wifi: mm81x: add ps.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 19/31] wifi: mm81x: add rc.c Lachlan Hodges
` (13 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
.../net/wireless/morsemicro/mm81x/rate_code.h | 177 ++++++++++++++++++
1 file changed, 177 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/rate_code.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/rate_code.h b/drivers/net/wireless/morsemicro/mm81x/rate_code.h
new file mode 100644
index 000000000000..c60fcb9447c4
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/rate_code.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_RATE_CODE_H_
+#define _MM81X_RATE_CODE_H_
+
+#include <linux/types.h>
+
+enum dot11_bandwidth {
+ DOT11_BANDWIDTH_1MHZ = 0,
+ DOT11_BANDWIDTH_2MHZ = 1,
+ DOT11_BANDWIDTH_4MHZ = 2,
+ DOT11_BANDWIDTH_8MHZ = 3,
+ DOT11_BANDWIDTH_16MHZ = 4,
+
+ DOT11_MAX_BANDWIDTH = DOT11_BANDWIDTH_16MHZ,
+ DOT11_INVALID_BANDWIDTH = 5
+};
+
+enum mm81x_rate_preamble {
+ /* S1G LONG format (with SIG-A and SIG-B) */
+ MM81X_RATE_PREAMBLE_S1G_LONG = 0,
+ /* This is the most common format used */
+ MM81X_RATE_PREAMBLE_S1G_SHORT = 1,
+ /* S1G 1M format */
+ MM81X_RATE_PREAMBLE_S1G_1M = 2,
+
+ MM81X_RATE_MAX_PREAMBLE = MM81X_RATE_PREAMBLE_S1G_1M,
+ MM81X_RATE_INVALID_PREAMBLE = 7
+};
+
+typedef __le32 mm81x_rate_code_t;
+
+#define MM81X_RATECODE_PREAMBLE (0x0000000F)
+#define MM81X_RATECODE_MCS_INDEX (0x000000F0)
+#define MM81X_RATECODE_NSS_INDEX (0x00000700)
+#define MM81X_RATECODE_BW_INDEX (0x00003800)
+#define MM81X_RATECODE_RTS_FLAG (0x00010000)
+#define MM81X_RATECODE_SHORT_GI_FLAG (0x00040000)
+#define MM81X_RATECODE_DUP_BW_INDEX (0x01C00000)
+
+static inline enum mm81x_rate_preamble
+mm81x_ratecode_preamble_get(mm81x_rate_code_t rc)
+{
+ return (enum mm81x_rate_preamble)(
+ le32_get_bits(rc, MM81X_RATECODE_PREAMBLE));
+}
+
+static inline u8 mm81x_ratecode_mcs_index_get(mm81x_rate_code_t rc)
+{
+ return le32_get_bits(rc, MM81X_RATECODE_MCS_INDEX);
+}
+
+static inline u8 mm81x_ratecode_nss_index_get(mm81x_rate_code_t rc)
+{
+ return le32_get_bits(rc, MM81X_RATECODE_NSS_INDEX);
+}
+
+static inline enum dot11_bandwidth
+mm81x_ratecode_bw_index_get(mm81x_rate_code_t rc)
+{
+ return (enum dot11_bandwidth)(
+ le32_get_bits(rc, MM81X_RATECODE_BW_INDEX));
+}
+
+static inline bool mm81x_ratecode_rts_get(mm81x_rate_code_t rc)
+{
+ return le32_get_bits(rc, MM81X_RATECODE_RTS_FLAG);
+}
+
+static inline bool mm81x_ratecode_sgi_get(mm81x_rate_code_t rc)
+{
+ return le32_get_bits(rc, MM81X_RATECODE_SHORT_GI_FLAG);
+}
+
+static inline enum dot11_bandwidth
+mm81x_ratecode_dup_bw_index_get(mm81x_rate_code_t rc)
+{
+ return (enum dot11_bandwidth)(
+ le32_get_bits(rc, MM81X_RATECODE_DUP_BW_INDEX));
+}
+
+#define MM81X_RATECODE_INIT(bw_idx, nss_idx, mcs_idx, preamble) \
+ (le32_encode_bits((bw_idx), MM81X_RATECODE_BW_INDEX) | \
+ le32_encode_bits((nss_idx), MM81X_RATECODE_NSS_INDEX) | \
+ le32_encode_bits((mcs_idx), MM81X_RATECODE_MCS_INDEX) | \
+ le32_encode_bits((preamble), MM81X_RATECODE_PREAMBLE))
+
+static inline mm81x_rate_code_t
+mm81x_ratecode_init(enum dot11_bandwidth bw_index, u32 nss_index, u32 mcs_index,
+ enum mm81x_rate_preamble preamble)
+{
+ return MM81X_RATECODE_INIT(bw_index, nss_index, mcs_index, preamble);
+}
+
+static inline void
+mm81x_ratecode_preamble_set(mm81x_rate_code_t *rc,
+ enum mm81x_rate_preamble preamble)
+{
+ *rc = (*rc & cpu_to_le32(~MM81X_RATECODE_PREAMBLE)) |
+ le32_encode_bits(preamble, MM81X_RATECODE_PREAMBLE);
+}
+
+static inline void mm81x_ratecode_mcs_index_set(mm81x_rate_code_t *rc,
+ u32 mcs_index)
+{
+ *rc = (*rc & cpu_to_le32(~MM81X_RATECODE_MCS_INDEX)) |
+ le32_encode_bits(mcs_index, MM81X_RATECODE_MCS_INDEX);
+}
+
+static inline void mm81x_ratecode_nss_index_set(mm81x_rate_code_t *rc,
+ u32 nss_index)
+{
+ *rc = (*rc & cpu_to_le32(~MM81X_RATECODE_NSS_INDEX)) |
+ le32_encode_bits(nss_index, MM81X_RATECODE_NSS_INDEX);
+}
+
+static inline void mm81x_ratecode_bw_index_set(mm81x_rate_code_t *rc,
+ enum dot11_bandwidth bw_index)
+{
+ *rc = (*rc & cpu_to_le32(~MM81X_RATECODE_BW_INDEX)) |
+ le32_encode_bits(bw_index, MM81X_RATECODE_BW_INDEX);
+}
+
+static inline void
+mm81x_ratecode_update_s1g_bw_preamble(mm81x_rate_code_t *rc,
+ enum dot11_bandwidth bw_index)
+{
+ enum mm81x_rate_preamble pream = MM81X_RATE_PREAMBLE_S1G_SHORT;
+
+ if (bw_index == DOT11_BANDWIDTH_1MHZ)
+ pream = MM81X_RATE_PREAMBLE_S1G_1M;
+
+ mm81x_ratecode_preamble_set(rc, pream);
+ mm81x_ratecode_bw_index_set(rc, bw_index);
+}
+
+static inline void
+mm81x_ratecode_dup_bw_index_set(mm81x_rate_code_t *rc,
+ enum dot11_bandwidth dup_bw_index)
+{
+ *rc = (*rc & cpu_to_le32(~MM81X_RATECODE_DUP_BW_INDEX)) |
+ le32_encode_bits(dup_bw_index, MM81X_RATECODE_DUP_BW_INDEX);
+}
+
+static inline void mm81x_ratecode_enable_rts(mm81x_rate_code_t *rc)
+{
+ *rc |= cpu_to_le32(MM81X_RATECODE_RTS_FLAG);
+}
+
+static inline void mm81x_ratecode_enable_sgi(mm81x_rate_code_t *rc)
+{
+ *rc |= cpu_to_le32(MM81X_RATECODE_SHORT_GI_FLAG);
+}
+
+static inline enum dot11_bandwidth mm81x_ratecode_bw_mhz_to_bw_index(u8 bw_mhz)
+{
+ return ((bw_mhz == 1) ? DOT11_BANDWIDTH_1MHZ :
+ (bw_mhz == 2) ? DOT11_BANDWIDTH_2MHZ :
+ (bw_mhz == 4) ? DOT11_BANDWIDTH_4MHZ :
+ (bw_mhz == 8) ? DOT11_BANDWIDTH_8MHZ :
+ DOT11_BANDWIDTH_2MHZ);
+}
+
+static inline u8
+mm81x_ratecode_bw_index_to_s1g_bw_mhz(enum dot11_bandwidth bw_idx)
+{
+ return ((bw_idx == DOT11_BANDWIDTH_1MHZ) ? 1 :
+ (bw_idx == DOT11_BANDWIDTH_2MHZ) ? 2 :
+ (bw_idx == DOT11_BANDWIDTH_4MHZ) ? 4 :
+ (bw_idx == DOT11_BANDWIDTH_8MHZ) ? 8 :
+ 2);
+}
+
+#endif
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 19/31] wifi: mm81x: add rc.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (17 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 18/31] wifi: mm81x: add rate_code.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 20/31] wifi: mm81x: add rc.h Lachlan Hodges
` (12 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/rc.c | 556 +++++++++++++++++++++
1 file changed, 556 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/rc.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/rc.c b/drivers/net/wireless/morsemicro/mm81x/rc.c
new file mode 100644
index 000000000000..c14e39aa2fa4
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/rc.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include "core.h"
+#include "mac.h"
+#include "bus.h"
+#include "rc.h"
+
+#define MM81X_RC_BW_TO_MMRC_BW(X) \
+ (((X) == 1) ? MMRC_BW_1MHZ : \
+ ((X) == 2) ? MMRC_BW_2MHZ : \
+ ((X) == 4) ? MMRC_BW_4MHZ : \
+ ((X) == 8) ? MMRC_BW_8MHZ : \
+ MMRC_BW_2MHZ)
+
+static void mm81x_rc_work(struct work_struct *work)
+{
+ struct mm81x_rc *mrc = container_of(work, struct mm81x_rc, work);
+ struct list_head *pos;
+
+ spin_lock_bh(&mrc->lock);
+
+ list_for_each(pos, &mrc->stas) {
+ struct mm81x_rc_sta *mrc_sta =
+ container_of(pos, struct mm81x_rc_sta, list);
+ unsigned long now = jiffies;
+
+ mrc_sta->last_update = now;
+
+ mmrc_update(mrc_sta->tb);
+ }
+
+ spin_unlock_bh(&mrc->lock);
+
+ mod_timer(&mrc->timer, jiffies + msecs_to_jiffies(100));
+}
+
+static void mm81x_rc_timer(struct timer_list *t)
+{
+ struct mm81x_rc *mrc = timer_container_of(mrc, t, timer);
+ struct mm81x *mors = mrc->mors;
+
+ queue_work(mors->net_wq, &mors->mrc.work);
+}
+
+void mm81x_rc_init(struct mm81x *mors)
+{
+ INIT_LIST_HEAD(&mors->mrc.stas);
+ spin_lock_init(&mors->mrc.lock);
+
+ INIT_WORK(&mors->mrc.work, mm81x_rc_work);
+ timer_setup(&mors->mrc.timer, mm81x_rc_timer, 0);
+
+ mors->mrc.mors = mors;
+ mod_timer(&mors->mrc.timer, jiffies + msecs_to_jiffies(100));
+}
+
+void mm81x_rc_deinit(struct mm81x *mors)
+{
+ cancel_work_sync(&mors->mrc.work);
+ timer_delete_sync_try(&mors->mrc.timer);
+}
+
+static void mm81x_rc_sta_config_guard_per_bw(struct ieee80211_sta *sta,
+ struct mmrc_sta_capabilities *caps)
+{
+ caps->guard = BIT(MMRC_GUARD_LONG);
+
+ if (caps->bandwidth & BIT(MMRC_BW_1MHZ)) {
+ caps->sgi_per_bw |= SGI_PER_BW(MMRC_BW_1MHZ);
+ caps->guard |= BIT(MMRC_GUARD_SHORT);
+ }
+
+ if (caps->bandwidth & BIT(MMRC_BW_2MHZ)) {
+ caps->sgi_per_bw |= SGI_PER_BW(MMRC_BW_2MHZ);
+ caps->guard |= BIT(MMRC_GUARD_SHORT);
+ }
+
+ if (caps->bandwidth & BIT(MMRC_BW_4MHZ)) {
+ caps->sgi_per_bw |= SGI_PER_BW(MMRC_BW_4MHZ);
+ caps->guard |= BIT(MMRC_GUARD_SHORT);
+ }
+
+ if (caps->bandwidth & BIT(MMRC_BW_8MHZ)) {
+ caps->sgi_per_bw |= SGI_PER_BW(MMRC_BW_8MHZ);
+ caps->guard |= BIT(MMRC_GUARD_SHORT);
+ }
+}
+
+static void mm81x_rc_sta_add_s1g_sta_caps(struct mm81x *mors,
+ struct mmrc_sta_capabilities *caps,
+ struct ieee80211_sta_s1g_cap *s1g_cap)
+{
+ int nss_idx = 0;
+ u8 rx_mcs = s1g_cap->nss_mcs[0] & 0x3; /* 1SS */
+ u8 tx_mcs = (s1g_cap->nss_mcs[2] >> 1) & 0x3; /* 1SS */
+ u8 mcs = min(rx_mcs, tx_mcs);
+
+ switch (mcs) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: /* VHT 9 -> S1G 9 */
+ caps->rates |= BIT(MMRC_MCS9) | BIT(MMRC_MCS8);
+ fallthrough;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: /* VHT 8 -> S1G 7 */
+ caps->rates |= BIT(MMRC_MCS7) | BIT(MMRC_MCS6) |
+ BIT(MMRC_MCS5) | BIT(MMRC_MCS4) | BIT(MMRC_MCS3);
+ fallthrough;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: /* VHT 7 -> S1G 2 */
+ caps->rates |= BIT(MMRC_MCS2) | BIT(MMRC_MCS1) |
+ BIT(MMRC_MCS0) | BIT(MMRC_MCS10);
+ caps->spatial_streams |= (BIT(nss_idx) & 0x0F);
+ break;
+
+ default:
+ dev_warn(mors->dev, "Invalid MCS encoding 0x%02x for stream %d",
+ mcs, nss_idx);
+ }
+}
+
+int mm81x_rc_sta_add(struct mm81x *mors, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_s1g_cap *s1g_cap = &sta->deflink.s1g_cap;
+ struct mm81x_sta *msta = (struct mm81x_sta *)sta->drv_priv;
+ struct mmrc_sta_capabilities caps;
+ int oper_bw_mhz = cfg80211_chandef_get_width(&mors->chandef);
+ size_t table_mem_size;
+ struct mmrc_table *tb;
+
+ memset(&caps, 0, sizeof(caps));
+
+ mm81x_rc_sta_add_s1g_sta_caps(mors, &caps, s1g_cap);
+
+ /* Configure STA for support up to 8MHZ */
+ while (oper_bw_mhz > 0) {
+ caps.bandwidth |= BIT(MM81X_RC_BW_TO_MMRC_BW(oper_bw_mhz));
+ oper_bw_mhz >>= 1;
+ }
+
+ /* Configure STA for short and long guard */
+ mm81x_rc_sta_config_guard_per_bw(sta, &caps);
+
+ /* Set max rates */
+ if (mors->hw->max_rates > 0 &&
+ mors->hw->max_rates < IEEE80211_TX_MAX_RATES)
+ caps.max_rates = mors->hw->max_rates;
+ else
+ caps.max_rates = IEEE80211_TX_MAX_RATES;
+
+ /* Set max reties */
+ if (mors->hw->max_rate_tries >= MMRC_MIN_CHAIN_ATTEMPTS &&
+ mors->hw->max_rate_tries < MMRC_MAX_CHAIN_ATTEMPTS)
+ caps.max_retries = mors->hw->max_rate_tries;
+ else
+ caps.max_retries = MMRC_MAX_CHAIN_ATTEMPTS;
+
+ WARN_ON(msta->rc.tb);
+ table_mem_size = mmrc_memory_required_for_caps(&caps);
+ tb = kzalloc(table_mem_size, GFP_KERNEL);
+ if (!tb)
+ return -ENOMEM;
+
+ /* Initialise the STA rate control table */
+ mmrc_sta_init(tb, &caps, msta->avg_rssi);
+
+ spin_lock_bh(&mors->mrc.lock);
+ kfree(msta->rc.tb);
+ msta->rc.tb = tb;
+ list_add(&msta->rc.list, &mors->mrc.stas);
+ msta->rc.last_update = jiffies;
+ spin_unlock_bh(&mors->mrc.lock);
+
+ return 0;
+}
+
+static void mm81x_rc_reinit_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct ieee80211_vif *vif = (struct ieee80211_vif *)data;
+ struct mm81x_sta *msta = (struct mm81x_sta *)sta->drv_priv;
+ struct mm81x_vif *mors_vif = ieee80211_vif_to_mors_vif(vif);
+ struct mm81x *mors = mm81x_vif_to_mors(mors_vif);
+ int oper_bw_mhz = cfg80211_chandef_get_width(&mors->chandef);
+
+ if (!msta || msta->vif != vif)
+ return;
+
+ dev_dbg(mors->dev, "Reinitialize sta %pM with new op_bw=%d, ts=%ld",
+ sta->addr, oper_bw_mhz, jiffies);
+
+ mm81x_rc_sta_remove(mors, sta);
+ mm81x_rc_sta_add(mors, vif, sta);
+}
+
+void mm81x_rc_reinit_stas(struct mm81x *mors, struct ieee80211_vif *vif)
+{
+ ieee80211_iterate_stations_atomic(mors->hw, mm81x_rc_reinit_sta_iter,
+ vif);
+}
+
+bool _mm81x_rc_set_fixed_rate(struct mm81x *mors, struct ieee80211_sta *sta,
+ int mcs, int bw, int ss, int guard,
+ const char *caller)
+{
+ struct mm81x_sta *msta = (struct mm81x_sta *)sta->drv_priv;
+ struct list_head *pos;
+ struct mmrc_rate fixed_rate;
+ bool ret_val = true;
+
+ fixed_rate.rate = mcs;
+ fixed_rate.bw = bw;
+ /*
+ * Code spatial streams is zero based while user starts at 1, like the
+ * real spatial streams.
+ */
+ fixed_rate.ss = (ss - 1);
+ fixed_rate.guard = guard;
+
+ spin_lock_bh(&mors->mrc.lock);
+ list_for_each(pos, &mors->mrc.stas) {
+ struct mm81x_rc_sta *mrc_sta =
+ list_entry(pos, struct mm81x_rc_sta, list);
+
+ if (&msta->rc == mrc_sta) {
+ ret_val = mmrc_set_fixed_rate(msta->rc.tb, fixed_rate);
+ break;
+ }
+ }
+ spin_unlock_bh(&mors->mrc.lock);
+
+ if (!ret_val)
+ dev_err(mors->dev,
+ "failed, caller %s ss %d bw %d mcs %d guard %d", caller,
+ ss, bw, mcs, guard);
+
+ return ret_val;
+}
+
+void mm81x_rc_sta_remove(struct mm81x *mors, struct ieee80211_sta *sta)
+{
+ struct mm81x_sta *msta = (struct mm81x_sta *)sta->drv_priv;
+
+ spin_lock_bh(&mors->mrc.lock);
+ if (msta->rc.tb) {
+ list_del_init(&msta->rc.list);
+ kfree(msta->rc.tb);
+ msta->rc.tb = NULL;
+ }
+ spin_unlock_bh(&mors->mrc.lock);
+}
+
+static void mm81x_rc_sta_fill_basic_rates(struct mm81x_skb_tx_info *tx_info,
+ struct ieee80211_tx_info *info,
+ int tx_bw)
+{
+ int i;
+ enum dot11_bandwidth bw_idx = mm81x_ratecode_bw_mhz_to_bw_index(tx_bw);
+ enum mm81x_rate_preamble pream = MM81X_RATE_PREAMBLE_S1G_SHORT;
+
+ mm81x_ratecode_mcs_index_set(&tx_info->rates[0].mm81x_ratecode, 0);
+ mm81x_ratecode_nss_index_set(&tx_info->rates[0].mm81x_ratecode,
+ NSS_TO_NSS_IDX(1));
+ mm81x_ratecode_bw_index_set(&tx_info->rates[0].mm81x_ratecode, bw_idx);
+ if (bw_idx == DOT11_BANDWIDTH_1MHZ)
+ pream = MM81X_RATE_PREAMBLE_S1G_1M;
+ mm81x_ratecode_preamble_set(&tx_info->rates[0].mm81x_ratecode, pream);
+ tx_info->rates[0].count = 4;
+
+ for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
+ tx_info->rates[i].count = 0;
+
+ info->control.rates[0].idx = 0;
+ info->control.rates[0].count = tx_info->rates[0].count;
+ info->control.rates[0].flags = 0;
+ info->control.rates[1].idx = -1;
+}
+
+static int mm81x_rc_sta_get_rates(struct mm81x *mors, struct mm81x_sta *msta,
+ struct mmrc_rate_table *rates, size_t size)
+{
+ int ret = -ENOENT;
+ struct list_head *pos;
+
+ spin_lock_bh(&mors->mrc.lock);
+ list_for_each(pos, &mors->mrc.stas) {
+ struct mm81x_rc_sta *mrc_sta =
+ list_entry(pos, struct mm81x_rc_sta, list);
+
+ if (&msta->rc == mrc_sta) {
+ ret = 0;
+ mmrc_get_rates(msta->rc.tb, rates, size);
+ break;
+ }
+ }
+ spin_unlock_bh(&mors->mrc.lock);
+
+ return ret;
+}
+
+static bool mm81x_rc_use_basic_rates(struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct ieee80211_hdr *hdr)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (!sta)
+ return true;
+
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control) ||
+ ieee80211_is_nullfunc(hdr->frame_control))
+ return true;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return true;
+
+ /* Use basic rates for EAPOL exchanges or when instructed */
+ if (unlikely((skb->protocol == cpu_to_be16(ETH_P_PAE) ||
+ info->flags & IEEE80211_TX_CTL_USE_MINRATE)))
+ return true;
+
+ return false;
+}
+
+void mm81x_rc_sta_fill_tx_rates(struct mm81x *mors,
+ struct mm81x_skb_tx_info *tx_info,
+ struct sk_buff *skb, struct ieee80211_sta *sta,
+ int tx_bw, bool rts_allowed)
+{
+ int ret, i;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct mm81x_sta *msta;
+ struct mmrc_rate_table rates;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ BUILD_BUG_ON((MMRC_BW_1MHZ != (enum mmrc_bw)DOT11_BANDWIDTH_1MHZ ||
+ MMRC_BW_2MHZ != (enum mmrc_bw)DOT11_BANDWIDTH_2MHZ ||
+ MMRC_BW_4MHZ != (enum mmrc_bw)DOT11_BANDWIDTH_4MHZ ||
+ MMRC_BW_16MHZ != (enum mmrc_bw)DOT11_BANDWIDTH_16MHZ));
+
+ memset(&info->control.rates, 0, sizeof(info->control.rates));
+ memset(&info->status.rates, 0, sizeof(info->status.rates));
+ mm81x_rc_sta_fill_basic_rates(tx_info, info, tx_bw);
+
+ /* Use basic rates for non data packets */
+ if (mm81x_rc_use_basic_rates(sta, skb, hdr))
+ return;
+
+ msta = (struct mm81x_sta *)sta->drv_priv;
+ if (!msta)
+ return;
+
+ ret = mm81x_rc_sta_get_rates(mors, msta, &rates, skb->len);
+ if (ret != 0)
+ return;
+
+ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ info->control.rates[i].flags = 0;
+ if (rates.rates[i].rate != MMRC_MCS_UNUSED) {
+ u8 mcs = rates.rates[i].rate;
+ u8 nss_index = rates.rates[i].ss;
+ enum dot11_bandwidth bw_idx =
+ (enum dot11_bandwidth)rates.rates[i].bw;
+ enum mm81x_rate_preamble pream =
+ MM81X_RATE_PREAMBLE_S1G_SHORT;
+
+ mm81x_ratecode_bw_index_set(
+ &tx_info->rates[i].mm81x_ratecode, bw_idx);
+ mm81x_ratecode_mcs_index_set(
+ &tx_info->rates[i].mm81x_ratecode, mcs);
+ mm81x_ratecode_nss_index_set(
+ &tx_info->rates[i].mm81x_ratecode, nss_index);
+ if (bw_idx == DOT11_BANDWIDTH_1MHZ)
+ pream = MM81X_RATE_PREAMBLE_S1G_1M;
+ mm81x_ratecode_preamble_set(
+ &tx_info->rates[i].mm81x_ratecode, pream);
+ tx_info->rates[i].count = rates.rates[i].attempts;
+
+ if (rts_allowed &&
+ (rates.rates[i].flags & BIT(MMRC_FLAGS_CTS_RTS))) {
+ mm81x_ratecode_enable_rts(
+ &tx_info->rates[i].mm81x_ratecode);
+ info->control.rates[i].flags |=
+ IEEE80211_TX_RC_USE_RTS_CTS;
+ }
+
+ if (rates.rates[i].guard == MMRC_GUARD_SHORT) {
+ mm81x_ratecode_enable_sgi(
+ &tx_info->rates[i].mm81x_ratecode);
+ info->control.rates[i].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ }
+
+ /* Update skb tx_info */
+ info->control.rates[i].idx = rates.rates[i].rate;
+ info->control.rates[i].count = rates.rates[i].attempts;
+ } else {
+ info->control.rates[i].idx = -1;
+ info->control.rates[i].count = 0;
+ tx_info->rates[i].count = 0;
+ }
+ }
+}
+
+static void mm81x_rc_sta_set_rates(struct mm81x *mors, struct mm81x_sta *msta,
+ struct mmrc_rate_table *rates, int attempts,
+ bool was_aggregated)
+{
+ struct list_head *pos;
+
+ spin_lock_bh(&mors->mrc.lock);
+ list_for_each(pos, &mors->mrc.stas) {
+ struct mm81x_rc_sta *mrc_sta =
+ list_entry(pos, struct mm81x_rc_sta, list);
+
+ if (&msta->rc == mrc_sta) {
+ mmrc_feedback(msta->rc.tb, rates, attempts,
+ was_aggregated);
+ break;
+ }
+ }
+ spin_unlock_bh(&mors->mrc.lock);
+}
+
+void mm81x_rc_sta_feedback_rates(struct mm81x *mors, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct mm81x_skb_tx_status *tx_sts,
+ int attempts)
+{
+ int i;
+ u32 tx_airtime = 0;
+ struct mmrc_rate_table rates;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *r = &txi->status.rates[0];
+ int count = min_t(int, MM81X_SKB_MAX_RATES, IEEE80211_TX_MAX_RATES);
+ struct mm81x_sta *msta = msta = (struct mm81x_sta *)sta->drv_priv;
+
+ /* Don't update rate info if basic rates were used */
+ if (mm81x_rc_use_basic_rates(sta, skb, hdr))
+ goto exit;
+
+ if (attempts <= 0)
+ /* Did we really send the packet? */
+ goto exit;
+
+ for (i = 0; i < count; i++) {
+ rates.rates[i].rate = mm81x_ratecode_mcs_index_get(
+ tx_sts->rates[i].mm81x_ratecode);
+ rates.rates[i].ss = mm81x_ratecode_nss_index_get(
+ tx_sts->rates[i].mm81x_ratecode);
+ rates.rates[i].guard =
+ mm81x_ratecode_sgi_get(tx_sts->rates[i].mm81x_ratecode);
+ rates.rates[i].bw = mm81x_ratecode_bw_index_get(
+ tx_sts->rates[i].mm81x_ratecode);
+ rates.rates[i].flags =
+ mm81x_ratecode_rts_get(tx_sts->rates[i].mm81x_ratecode);
+ rates.rates[i].attempts = tx_sts->rates[i].count;
+
+ tx_airtime +=
+ mmrc_calculate_rate_tx_time(&rates.rates[i], skb->len);
+ }
+
+ if (msta) {
+ /*
+ * Save the rate information. This will be used to update
+ * station's tx rate stats
+ */
+ msta->last_sta_tx_rate.bw = rates.rates[0].bw;
+ msta->last_sta_tx_rate.rate = rates.rates[0].rate;
+ msta->last_sta_tx_rate.ss = rates.rates[0].ss;
+ msta->last_sta_tx_rate.guard = rates.rates[0].guard;
+ }
+
+ mm81x_rc_sta_set_rates(mors, msta, &rates, attempts,
+ !!(le32_to_cpu(tx_sts->flags) &
+ MM81X_TX_STATUS_WAS_AGGREGATED));
+
+ ieee80211_sta_register_airtime(sta, tx_sts->tid, tx_airtime, 0);
+
+exit:
+ ieee80211_tx_info_clear_status(txi);
+
+ if (!(le32_to_cpu(tx_sts->flags) & MM81X_TX_STATUS_FLAGS_NO_ACK) &&
+ !(txi->flags & IEEE80211_TX_CTL_NO_ACK))
+ txi->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (le32_to_cpu(tx_sts->flags) & MM81X_TX_STATUS_FLAGS_PS_FILTERED) {
+ txi->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+
+ /*
+ * Clear TX CTL AMPDU flag so that this frame gets rescheduled
+ * in ieee80211_handle_filtered_frame(). This flag will get set
+ * again by mac80211's tx path on rescheduling.
+ */
+ txi->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ if (msta) {
+ if (!msta->tx_ps_filter_en)
+ dev_dbg(mors->dev, "TX ps filter set sta[%pM]",
+ msta->addr);
+ msta->tx_ps_filter_en = true;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ if (tx_sts->rates[i].count > 0) {
+ r[i].count = tx_sts->rates[i].count;
+ r[i].flags |= IEEE80211_TX_RC_MCS;
+ } else {
+ r[i].idx = -1;
+ }
+ }
+
+ /* single packet per A-MPDU (for now) */
+ if (txi->flags & IEEE80211_TX_CTL_AMPDU) {
+ txi->flags |= IEEE80211_TX_STAT_AMPDU;
+ txi->status.ampdu_len = 1;
+ txi->status.ampdu_ack_len =
+ txi->flags & IEEE80211_TX_STAT_ACK ? 1 : 0;
+ }
+
+ /*
+ * Inform mac80211 that the SP (elicited by a PS-Poll or u-APSD) is
+ * over
+ */
+ if (sta && (txi->flags & IEEE80211_TX_STATUS_EOSP)) {
+ txi->flags &= ~IEEE80211_TX_STATUS_EOSP;
+ ieee80211_sta_eosp(sta);
+ }
+}
+
+void mm81x_rc_sta_state_check(struct mm81x *mors, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct mm81x_sta *msta = (struct mm81x_sta *)sta->drv_priv;
+
+ /* Add to Morse RC STA list */
+ if (old_state < new_state && new_state == IEEE80211_STA_ASSOC) {
+ /* Newly associated, add to RC */
+ mm81x_rc_sta_add(mors, vif, sta);
+ } else if (old_state > new_state && (old_state == IEEE80211_STA_ASSOC ||
+ old_state == IEEE80211_STA_AUTH)) {
+ /* Lost or failed association; remove from list */
+ mm81x_rc_sta_remove(mors, sta);
+ } else if (old_state < new_state && old_state == IEEE80211_STA_NONE &&
+ msta->rc.list.prev) {
+ /*
+ * Special case for driver warning issue causing a sta to be
+ * left on the list
+ */
+ dev_dbg(mors->dev, "Remove stale sta from rc list");
+ mm81x_rc_sta_remove(mors, sta);
+ }
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 20/31] wifi: mm81x: add rc.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (18 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 19/31] wifi: mm81x: add rc.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 21/31] mmc: sdio: add Morse Micro vendor ids Lachlan Hodges
` (11 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/rc.h | 57 ++++++++++++++++++++++
1 file changed, 57 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/rc.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/rc.h b/drivers/net/wireless/morsemicro/mm81x/rc.h
new file mode 100644
index 000000000000..1a8d76d28c14
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/rc.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_RC_H_
+#define _MM81X_RC_H_
+
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include "core.h"
+#include "mmrc.h"
+
+struct mm81x_vif;
+
+#define INIT_MAX_RATES_NUM 4
+
+struct mm81x_rc {
+ /* Serialise rate control queue manipulation and timer functions */
+ spinlock_t lock;
+ struct list_head stas;
+ struct timer_list timer;
+ struct work_struct work;
+ struct mm81x *mors;
+};
+
+struct mm81x_rc_sta {
+ struct mmrc_table *tb;
+ struct list_head list;
+ unsigned long last_update;
+};
+
+void mm81x_rc_init(struct mm81x *mors);
+void mm81x_rc_deinit(struct mm81x *mors);
+int mm81x_rc_sta_add(struct mm81x *mors, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+#define mm81x_rc_set_fixed_rate(mors, sta, mcs, bw, ss, guard) \
+ _mm81x_rc_set_fixed_rate(mors, sta, mcs, bw, ss, guard, __func__)
+bool _mm81x_rc_set_fixed_rate(struct mm81x *mors, struct ieee80211_sta *sta,
+ int mcs, int bw, int ss, int guard,
+ const char *caller);
+void mm81x_rc_sta_remove(struct mm81x *mors, struct ieee80211_sta *sta);
+void mm81x_rc_sta_fill_tx_rates(struct mm81x *mors,
+ struct mm81x_skb_tx_info *tx_info,
+ struct sk_buff *skb, struct ieee80211_sta *sta,
+ int tx_bw, bool rts_allowed);
+void mm81x_rc_sta_feedback_rates(struct mm81x *mors, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct mm81x_skb_tx_status *tx_sts,
+ int tx_attempts);
+void mm81x_rc_sta_state_check(struct mm81x *mors, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state);
+void mm81x_rc_reinit_stas(struct mm81x *mors, struct ieee80211_vif *vif);
+
+#endif /* !_MM81X_RC_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 21/31] mmc: sdio: add Morse Micro vendor ids
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (19 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 20/31] wifi: mm81x: add rc.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-05-11 14:54 ` Ulf Hansson
2026-04-30 4:55 ` [PATCH wireless-next v2 22/31] wifi: mm81x: add sdio.c Lachlan Hodges
` (10 subsequent siblings)
31 siblings, 1 reply; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Ulf Hansson
Cc: arien.judge, dan.callaghan, ayman.grais, linux-wireless,
Lachlan Hodges, linux-mmc, linux-kernel
Add the Morse Micro mm81x series vendor ids.
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
v1 -> v2:
- Use a single VENDOR_ID
- Drop B2 chip which is not needed
Ulf, a mistake was made in v1 [1] listing multiple vendor IDs instead
of a single vendor ID and the subsequent device IDs. As for why
the series is structured as a series of singular patches is due to
how wireless driver submissions are as per [2] to simplify review
due to the size. The final submission will be sent as a pull request
with all driver files as a single commit and this SDIO commit
beforehand going through the wireless tree once you have acked.
[1] https://lore.kernel.org/linux-wireless/CAPDyKFp6dhmpkMCs=ejYTpR9oNbNz0urtFD2HTvRwOp2Y7H3DA@mail.gmail.com/
[2] https://wireless.docs.kernel.org/en/latest/en/developers/documentation/submittingpatches.html#new-driver
---
include/linux/mmc/sdio_ids.h | 3 +++
1 file changed, 3 insertions(+)
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0685dd717e85..111cb1758830 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -117,6 +117,9 @@
#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296
#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347
+#define SDIO_VENDOR_ID_MORSEMICRO 0x325b
+#define SDIO_DEVICE_ID_MORSEMICRO_MM81XB2 0x0809
+
#define SDIO_VENDOR_ID_NXP 0x0471
#define SDIO_DEVICE_ID_NXP_IW61X 0x0205
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* Re: [PATCH wireless-next v2 21/31] mmc: sdio: add Morse Micro vendor ids
2026-04-30 4:55 ` [PATCH wireless-next v2 21/31] mmc: sdio: add Morse Micro vendor ids Lachlan Hodges
@ 2026-05-11 14:54 ` Ulf Hansson
0 siblings, 0 replies; 36+ messages in thread
From: Ulf Hansson @ 2026-05-11 14:54 UTC (permalink / raw)
To: Lachlan Hodges
Cc: johannes, Ulf Hansson, arien.judge, dan.callaghan, ayman.grais,
linux-wireless, linux-mmc, linux-kernel
On Thu, 30 Apr 2026 at 06:58, Lachlan Hodges
<lachlan.hodges@morsemicro.com> wrote:
>
> Add the Morse Micro mm81x series vendor ids.
>
> Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
Acked-by: Ulf Hansson <ulf.hansson@linaro.org>
Kind regards
Uffe
> ---
>
> v1 -> v2:
>
> - Use a single VENDOR_ID
> - Drop B2 chip which is not needed
>
> Ulf, a mistake was made in v1 [1] listing multiple vendor IDs instead
> of a single vendor ID and the subsequent device IDs. As for why
> the series is structured as a series of singular patches is due to
> how wireless driver submissions are as per [2] to simplify review
> due to the size. The final submission will be sent as a pull request
> with all driver files as a single commit and this SDIO commit
> beforehand going through the wireless tree once you have acked.
>
> [1] https://lore.kernel.org/linux-wireless/CAPDyKFp6dhmpkMCs=ejYTpR9oNbNz0urtFD2HTvRwOp2Y7H3DA@mail.gmail.com/
> [2] https://wireless.docs.kernel.org/en/latest/en/developers/documentation/submittingpatches.html#new-driver
>
> ---
> include/linux/mmc/sdio_ids.h | 3 +++
> 1 file changed, 3 insertions(+)
>
> diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
> index 0685dd717e85..111cb1758830 100644
> --- a/include/linux/mmc/sdio_ids.h
> +++ b/include/linux/mmc/sdio_ids.h
> @@ -117,6 +117,9 @@
> #define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296
> #define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347
>
> +#define SDIO_VENDOR_ID_MORSEMICRO 0x325b
> +#define SDIO_DEVICE_ID_MORSEMICRO_MM81XB2 0x0809
> +
> #define SDIO_VENDOR_ID_NXP 0x0471
> #define SDIO_DEVICE_ID_NXP_IW61X 0x0205
>
> --
> 2.43.0
>
^ permalink raw reply [flat|nested] 36+ messages in thread
* [PATCH wireless-next v2 22/31] wifi: mm81x: add sdio.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (20 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 21/31] mmc: sdio: add Morse Micro vendor ids Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 23/31] wifi: mm81x: add skbq.c Lachlan Hodges
` (9 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/sdio.c | 614 +++++++++++++++++++
1 file changed, 614 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/sdio.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/sdio.c b/drivers/net/wireless/morsemicro/mm81x/sdio.c
new file mode 100644
index 000000000000..0832c9195a68
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/sdio.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include "hw.h"
+#include "core.h"
+#include "bus.h"
+#include "mac.h"
+#include "fw.h"
+#include "hif.h"
+
+/*
+ * Value to indicate that the base address for bulk/register
+ * read/writes has yet to be set
+ */
+#define MM81X_SDIO_BASE_ADDR_UNSET 0xFFFFFFFF
+
+#define MM81X_SDIO_ALIGNMENT (8)
+
+#define MM81X_SDIO_REG_ADDRESS_BASE 0x10000
+#define MM81X_SDIO_REG_ADDRESS_WINDOW_0 MM81X_SDIO_REG_ADDRESS_BASE
+#define MM81X_SDIO_REG_ADDRESS_WINDOW_1 (MM81X_SDIO_REG_ADDRESS_BASE + 1)
+#define MM81X_SDIO_REG_ADDRESS_CONFIG (MM81X_SDIO_REG_ADDRESS_BASE + 2)
+
+struct mm81x_sdio {
+ bool enabled;
+ u32 bulk_addr_base;
+ u32 register_addr_base;
+ struct sdio_func *func;
+ const struct sdio_device_id *id;
+};
+
+static void mm81x_sdio_remove(struct sdio_func *func);
+
+static void irq_handler(struct sdio_func *func1)
+{
+ struct sdio_func *func = func1->card->sdio_func[1];
+ struct mm81x *mors = sdio_get_drvdata(func);
+
+ mm81x_hw_irq_handle(mors);
+}
+
+static int mm81x_sdio_enable_irq(struct mm81x_sdio *sdio)
+{
+ int ret;
+ struct sdio_func *func = sdio->func;
+ struct sdio_func *func1 = func->card->sdio_func[0];
+ struct mm81x *mors = sdio_get_drvdata(func);
+
+ sdio_claim_host(func);
+ ret = sdio_claim_irq(func1, irq_handler);
+ if (ret)
+ dev_err(mors->dev, "Failed to enable sdio irq: %d\n", ret);
+
+ sdio_release_host(func);
+ return ret;
+}
+
+static void mm81x_sdio_disable_irq(struct mm81x_sdio *sdio)
+{
+ struct sdio_func *func = sdio->func;
+ struct sdio_func *func1 = func->card->sdio_func[0];
+
+ sdio_claim_host(func);
+ sdio_release_irq(func1);
+ sdio_release_host(func);
+}
+
+static void mm81x_sdio_set_irq(struct mm81x *mors, bool enable)
+{
+ struct mm81x_sdio *sdio = (struct mm81x_sdio *)mors->drv_priv;
+
+ if (enable)
+ mm81x_sdio_enable_irq(sdio);
+ else
+ mm81x_sdio_disable_irq(sdio);
+}
+
+static u32 mm81x_sdio_calculate_base_address(u32 address, u8 access)
+{
+ return (address & MM81X_SDIO_RW_ADDR_BOUNDARY_MASK) | (access & 0x3);
+}
+
+static void mm81x_sdio_reset_base_address(struct mm81x_sdio *sdio)
+{
+ sdio->bulk_addr_base = MM81X_SDIO_BASE_ADDR_UNSET;
+ sdio->register_addr_base = MM81X_SDIO_BASE_ADDR_UNSET;
+}
+
+static int mm81x_sdio_set_func_address_base(struct mm81x_sdio *sdio,
+ struct sdio_func *func, u32 address,
+ u8 access)
+{
+ int ret = 0;
+ int retries = 0;
+ static const int max_retries = 3;
+ struct sdio_func *func2 = sdio->func;
+ struct mm81x *mors = sdio_get_drvdata(sdio->func);
+ s32 calculated_addr_base =
+ mm81x_sdio_calculate_base_address(address, access);
+ u32 *current_addr_base = func == func2 ? &sdio->bulk_addr_base :
+ &sdio->register_addr_base;
+
+ if ((*current_addr_base) == calculated_addr_base &&
+ *current_addr_base != MM81X_SDIO_BASE_ADDR_UNSET)
+ return ret;
+
+retry:
+ sdio_writeb(func, (u8)u32_get_bits(address, GENMASK(23, 16)),
+ MM81X_SDIO_REG_ADDRESS_WINDOW_0, &ret);
+ if (ret)
+ goto err;
+
+ sdio_writeb(func, (u8)u32_get_bits(address, GENMASK(31, 24)),
+ MM81X_SDIO_REG_ADDRESS_WINDOW_1, &ret);
+ if (ret)
+ goto err;
+
+ sdio_writeb(func, access & 0x3, MM81X_SDIO_REG_ADDRESS_CONFIG, &ret);
+ if (ret)
+ goto err;
+
+ *current_addr_base = calculated_addr_base;
+ if (retries)
+ dev_dbg(mors->dev, "%s succeeded after %d retries\n", __func__,
+ retries);
+
+ return ret;
+err:
+ retries++;
+ if (ret == -ETIMEDOUT && retries <= max_retries) {
+ dev_dbg(mors->dev, "%s failed (%d), retrying (%d/%d)\n",
+ __func__, ret, retries, max_retries);
+ goto retry;
+ }
+
+ *current_addr_base = MM81X_SDIO_BASE_ADDR_UNSET;
+ return ret;
+}
+
+static int mm81x_sdio_mem_write_block(struct mm81x_sdio *sdio, u32 address,
+ u8 *data, ssize_t size)
+{
+ int ret;
+ struct sdio_func *func2 = sdio->func;
+ struct mm81x *mors = sdio_get_drvdata(sdio->func);
+
+ mm81x_sdio_set_func_address_base(sdio, func2, address,
+ MM81X_CONFIG_ACCESS_4BYTE);
+ if (unlikely(!IS_ALIGNED((uintptr_t)data,
+ mors->bus_ops->bulk_alignment))) {
+ ret = -EBADE;
+ goto exit;
+ }
+
+ address &= 0x0000FFFF; /* remove base and keep offset */
+ ret = sdio_memcpy_toio(func2, address, data, size);
+ if (ret)
+ goto exit;
+
+ ret = size;
+exit:
+ return ret;
+}
+
+static int mm81x_sdio_mem_write_byte(struct mm81x_sdio *sdio, u32 address,
+ u8 *data, ssize_t size)
+{
+ int i, ret;
+ struct sdio_func *func1 = sdio->func->card->sdio_func[0];
+
+ mm81x_sdio_set_func_address_base(sdio, func1, address,
+ MM81X_CONFIG_ACCESS_1BYTE);
+
+ address &= 0x0000FFFF; /* remove base and keep offset */
+ for (i = 0; i < size; i++) {
+ sdio_writeb(func1, data[i], address + i, (int *)&ret);
+ if (ret)
+ goto exit;
+ }
+
+ ret = size;
+exit:
+ return ret;
+}
+
+static void mm81x_sdio_claim_host(struct mm81x *mors)
+{
+ struct mm81x_sdio *sdio = (struct mm81x_sdio *)mors->drv_priv;
+ struct sdio_func *func = sdio->func;
+
+ sdio_claim_host(func);
+}
+
+static void mm81x_sdio_release_host(struct mm81x *mors)
+{
+ struct mm81x_sdio *sdio = (struct mm81x_sdio *)mors->drv_priv;
+ struct sdio_func *func = sdio->func;
+
+ sdio_release_host(func);
+}
+
+static int mm81x_sdio_mem_read_block(struct mm81x_sdio *sdio, u32 address,
+ u8 *data, ssize_t size)
+{
+ int ret;
+ struct sdio_func *func2 = sdio->func;
+ struct mm81x *mors = sdio_get_drvdata(sdio->func);
+
+ mm81x_sdio_set_func_address_base(sdio, func2, address,
+ MM81X_CONFIG_ACCESS_4BYTE);
+ if (unlikely(!IS_ALIGNED((uintptr_t)data,
+ mors->bus_ops->bulk_alignment))) {
+ ret = -EBADE;
+ goto exit;
+ }
+
+ address &= 0x0000FFFF; /* remove base and keep offset */
+ ret = sdio_memcpy_fromio(func2, data, address, size);
+ if (ret)
+ goto exit;
+
+ /*
+ * Observed sometimes that SDIO read repeats the first 4-bytes
+ * word twice, overwriting second word (hence, tail will be
+ * overwritten with 'sync' byte). When this happens, reading
+ * will fetch the correct word. NB: if repeated again, pass it
+ * anyway and upper layers will handle it
+ */
+
+ if (size >= 8 && memcmp(data, data + 4, 4) == 0)
+ sdio_memcpy_fromio(func2, data, address, 8);
+
+ ret = size;
+exit:
+ return ret;
+}
+
+static int mm81x_sdio_mem_read_byte(struct mm81x_sdio *sdio, u32 address,
+ u8 *data, ssize_t size)
+{
+ int i, ret;
+ struct sdio_func *func1 = sdio->func->card->sdio_func[0];
+
+ mm81x_sdio_set_func_address_base(sdio, func1, address,
+ MM81X_CONFIG_ACCESS_1BYTE);
+
+ address &= 0x0000FFFF; /* remove base and keep offset */
+ for (i = 0; i < size; i++) {
+ data[i] = sdio_readb(func1, address + i, (int *)&ret);
+ if (ret)
+ goto exit;
+ }
+
+ ret = size;
+exit:
+ return ret;
+}
+
+static int mm81x_sdio_dm_write(struct mm81x *mors, u32 address, const u8 *data,
+ int len)
+{
+ int ret = 0;
+ int block_len, byte_len;
+ struct mm81x_sdio *sdio = (struct mm81x_sdio *)mors->drv_priv;
+ int remaining = len;
+ int offset = 0;
+
+ if (remaining > 0 && address & 0x3) {
+ len = 4 - (address & 0x3);
+ ret = mm81x_sdio_mem_write_byte(sdio, address, (u8 *)data, len);
+ if (ret != len)
+ return -EIO;
+
+ offset += len;
+ remaining -= len;
+ }
+
+ while ((remaining) > 0) {
+ /*
+ * We can only write up to the end of a single window in
+ * each write operation.
+ */
+ u32 window_end = (address + offset) |
+ ~MM81X_SDIO_RW_ADDR_BOUNDARY_MASK;
+
+ len = min(remaining, (int)(window_end + 1 - address - offset));
+ block_len = len & ~0x3;
+ byte_len = len & 0x3;
+
+ if (block_len) {
+ ret = mm81x_sdio_mem_write_block(sdio, address + offset,
+ (u8 *)(data + offset),
+ block_len);
+ if (ret != block_len)
+ return -EIO;
+
+ offset += block_len;
+ }
+
+ if (byte_len) {
+ ret = mm81x_sdio_mem_write_byte(sdio, address + offset,
+ (u8 *)(data + offset),
+ byte_len);
+ if (ret != byte_len)
+ return -EIO;
+
+ offset += byte_len;
+ }
+
+ remaining -= len;
+ }
+
+ return 0;
+}
+
+static int mm81x_sdio_dm_read(struct mm81x *mors, u32 address, u8 *data,
+ int len)
+{
+ int ret = 0;
+ int block_len, byte_len;
+ struct mm81x_sdio *sdio = (struct mm81x_sdio *)mors->drv_priv;
+ int remaining = len;
+ int offset = 0;
+
+ if (remaining > 0 && address & 0x3) {
+ len = 4 - (address & 0x3);
+ ret = mm81x_sdio_mem_read_byte(sdio, address, data, len);
+ if (ret != len)
+ return -EIO;
+
+ offset += len;
+ remaining -= len;
+ }
+
+ while (remaining > 0) {
+ /*
+ * We can only read up to the end of a single window in
+ * each read operation.
+ */
+ u32 window_end = (address + offset) |
+ ~MM81X_SDIO_RW_ADDR_BOUNDARY_MASK;
+
+ len = min(remaining, (int)(window_end + 1 - address - offset));
+ block_len = len & ~0x3;
+ byte_len = len & 0x3;
+
+ if (block_len) {
+ ret = mm81x_sdio_mem_read_block(sdio, address + offset,
+ data + offset, len);
+ if (ret != block_len)
+ return -EIO;
+
+ offset += block_len;
+ }
+
+ if (byte_len) {
+ ret = mm81x_sdio_mem_read_byte(sdio, address + offset,
+ data + offset, len);
+ if (ret != byte_len)
+ return -EIO;
+
+ offset += byte_len;
+ }
+
+ remaining -= len;
+ }
+
+ return 0;
+}
+
+static int mm81x_sdio_reg32_write(struct mm81x *mors, u32 address, u32 val)
+{
+ ssize_t ret = 0;
+ u32 original_address = address;
+ struct mm81x_sdio *sdio = (struct mm81x_sdio *)mors->drv_priv;
+ struct sdio_func *func1 = sdio->func->card->sdio_func[0];
+
+ mm81x_sdio_set_func_address_base(sdio, func1, address,
+ MM81X_CONFIG_ACCESS_4BYTE);
+
+ address &= 0x0000FFFF;
+ sdio_writel(func1, (__force u32)cpu_to_le32(val),
+ (__force u32)cpu_to_le32(address), (int *)&ret);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ if (original_address == MM81X_REG_RESET(mors) &&
+ val == MM81X_REG_RESET_VALUE(mors)) {
+ dev_dbg(mors->dev,
+ "SDIO reset detected, invalidating base addr\n");
+ mm81x_sdio_reset_base_address(sdio);
+ }
+
+ return -EIO;
+}
+
+static int mm81x_sdio_reg32_read(struct mm81x *mors, u32 address, u32 *val)
+{
+ u32 value;
+ ssize_t ret = 0;
+ struct mm81x_sdio *sdio = (struct mm81x_sdio *)mors->drv_priv;
+ struct sdio_func *func1 = sdio->func->card->sdio_func[0];
+
+ mm81x_sdio_set_func_address_base(sdio, func1, address,
+ MM81X_CONFIG_ACCESS_4BYTE);
+
+ address &= 0x0000FFFF;
+ value = sdio_readl(func1, (__force u32)cpu_to_le32(address),
+ (int *)&ret);
+ if (ret)
+ ret = -EIO;
+
+ *val = le32_to_cpup((__le32 *)&value);
+ return 0;
+}
+
+static void mm81x_sdio_bus_enable(struct mm81x *mors, bool enable)
+{
+ struct mm81x_sdio *sdio = (struct mm81x_sdio *)mors->drv_priv;
+ struct sdio_func *func = sdio->func;
+ struct mmc_host *host = func->card->host;
+
+ sdio_claim_host(func);
+
+ if (enable) {
+ /*
+ * No need to do anything special to re-enable the sdio bus.
+ * This will happen automatically when a read/write is
+ * attempted and sdio->bulk_addr_base == 0.
+ */
+ sdio->enabled = true;
+ host->ops->enable_sdio_irq(host, 1);
+ dev_dbg(mors->dev, "%s: enabling bus\n", __func__);
+ } else {
+ host->ops->enable_sdio_irq(host, 0);
+ mm81x_sdio_reset_base_address(sdio);
+ sdio->enabled = false;
+ dev_dbg(mors->dev, "%s: disabling bus\n", __func__);
+ }
+
+ sdio_release_host(func);
+}
+
+static void mm81x_sdio_reset(struct sdio_func *func)
+{
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+
+ mdelay(20);
+
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ mmc_hw_reset(func->card);
+ sdio_enable_func(func);
+ sdio_release_host(func);
+}
+
+static void mm81x_sdio_config_burst_mode(struct mm81x *mors, bool enable_burst)
+{
+ u8 burst_mode = (enable_burst) ? SDIO_WORD_BURST_SIZE_16 :
+ SDIO_WORD_BURST_DISABLE;
+
+ mm81x_hw_enable_burst_mode(mors, burst_mode);
+}
+
+static const struct mm81x_bus_ops mm81x_sdio_ops = {
+ .dm_read = mm81x_sdio_dm_read,
+ .dm_write = mm81x_sdio_dm_write,
+ .reg32_read = mm81x_sdio_reg32_read,
+ .reg32_write = mm81x_sdio_reg32_write,
+ .set_bus_enable = mm81x_sdio_bus_enable,
+ .claim = mm81x_sdio_claim_host,
+ .release = mm81x_sdio_release_host,
+ .config_burst_mode = mm81x_sdio_config_burst_mode,
+ .set_irq = mm81x_sdio_set_irq,
+ .bulk_alignment = MM81X_SDIO_ALIGNMENT
+};
+
+static int mm81x_sdio_enable(struct mm81x_sdio *sdio)
+{
+ int ret;
+ struct sdio_func *func = sdio->func;
+ struct mm81x *mors = sdio_get_drvdata(func);
+
+ sdio_claim_host(func);
+ ret = sdio_enable_func(func);
+ if (ret)
+ dev_err(mors->dev, "sdio_enable_func failed: %d\n", ret);
+ sdio_release_host(func);
+ return ret;
+}
+
+static void mm81x_sdio_release(struct mm81x_sdio *sdio)
+{
+ struct sdio_func *func = sdio->func;
+
+ sdio_claim_host(func);
+ sdio_disable_func(func);
+ sdio_release_host(func);
+}
+
+static int mm81x_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ int ret = 0;
+ struct mm81x *mors = NULL;
+ struct mm81x_sdio *sdio;
+ struct device *dev = &func->dev;
+
+ if (func->num == 1)
+ return 0;
+
+ if (func->num != 2)
+ return -ENODEV;
+
+ mors = mm81x_core_alloc(sizeof(*sdio), dev);
+ if (!mors)
+ return -ENOMEM;
+
+ mors->bus_ops = &mm81x_sdio_ops;
+ mors->bus_type = MM81X_BUS_TYPE_SDIO;
+
+ sdio = (struct mm81x_sdio *)mors->drv_priv;
+ sdio->func = func;
+ sdio->id = id;
+ sdio->enabled = true;
+ mm81x_sdio_reset_base_address(sdio);
+
+ sdio_set_drvdata(func, mors);
+
+ ret = mm81x_sdio_enable(sdio);
+ if (ret)
+ goto err_core_free;
+
+ mm81x_sdio_config_burst_mode(mors, true);
+
+ ret = mm81x_core_init(mors);
+ if (ret)
+ goto err_sdio_release;
+
+ ret = mm81x_sdio_enable_irq(sdio);
+ if (ret)
+ goto err_core_deinit;
+
+ ret = mm81x_core_register(mors);
+ if (ret)
+ goto err_disable_irq;
+
+ return 0;
+
+err_disable_irq:
+ mm81x_sdio_disable_irq(sdio);
+err_core_deinit:
+ mm81x_core_deinit(mors);
+err_sdio_release:
+ mm81x_sdio_release(sdio);
+err_core_free:
+ mm81x_core_free(mors);
+ return ret;
+}
+
+static void mm81x_sdio_remove(struct sdio_func *func)
+{
+ struct mm81x *mors = sdio_get_drvdata(func);
+ struct mm81x_sdio *sdio = (struct mm81x_sdio *)mors->drv_priv;
+
+ if (!mors)
+ return;
+
+ mm81x_core_unregister(mors);
+ mm81x_sdio_disable_irq(sdio);
+ mm81x_core_deinit(mors);
+ mm81x_sdio_release(sdio);
+ mm81x_sdio_reset(func);
+ mm81x_core_free(mors);
+ sdio_set_drvdata(func, NULL);
+}
+
+static const struct sdio_device_id mm81x_sdio_devices[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MORSEMICRO,
+ SDIO_DEVICE_ID_MORSEMICRO_MM81XB2) },
+ {},
+};
+
+MODULE_DEVICE_TABLE(sdio, mm81x_sdio_devices);
+
+static struct sdio_driver mm81x_sdio_driver = {
+ .name = "mm81x_sdio",
+ .id_table = mm81x_sdio_devices,
+ .probe = mm81x_sdio_probe,
+ .remove = mm81x_sdio_remove,
+};
+
+module_sdio_driver(mm81x_sdio_driver);
+
+MODULE_AUTHOR("Morse Micro");
+MODULE_DESCRIPTION("Driver support for Morse Micro MM81X SDIO devices");
+MODULE_LICENSE("Dual BSD/GPL");
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 23/31] wifi: mm81x: add skbq.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (21 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 22/31] wifi: mm81x: add sdio.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 24/31] wifi: mm81x: add skbq.h Lachlan Hodges
` (8 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/skbq.c | 1053 ++++++++++++++++++
1 file changed, 1053 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/skbq.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/skbq.c b/drivers/net/wireless/morsemicro/mm81x/skbq.c
new file mode 100644
index 000000000000..62eeb9620e5d
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/skbq.c
@@ -0,0 +1,1053 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+#include <linux/jiffies.h>
+#include "hif.h"
+#include "skbq.h"
+#include "mac.h"
+#include "command.h"
+#include "bus.h"
+
+/* Returns number of bytes needed to word align */
+#define BYTES_NEEDED_TO_WORD_ALIGN(bytes) \
+ ((bytes) & 0x3 ? (4 - ((bytes) & 0x3)) : 0)
+
+/* Rounds down to the nearest word boundary */
+#define ROUND_DOWN_TO_WORD(bytes) \
+ (BYTES_NEEDED_TO_WORD_ALIGN(bytes) ? \
+ bytes - (4 - BYTES_NEEDED_TO_WORD_ALIGN(bytes)) : \
+ bytes)
+
+#define MM81X_SKBQ_MAX_TXQ_LEN 32
+#define MM81X_SKBQ_TX_QUEUED_LIFETIME_MS 1000
+#define MM81X_SKBQ_TX_STATUS_LIFETIME_MS (15 * 1000)
+
+/* Returns padding needed to align x up to a 4-byte boundary */
+#define MM81X_PAD4(x) (((x) & 0x3) ? (4 - ((x) & 0x3)) : 0)
+
+struct mm81x_tx_status_priv {
+ /*
+ * Time (jiffies) at which this packet has spent too long the pending
+ * queue, waiting for status notification from the firmware, and
+ * should be considered lost.
+ */
+ unsigned long tx_status_expiry;
+};
+
+static inline struct mm81x_tx_status_priv *
+__mm81x_skbq_tx_status_priv(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ BUILD_BUG_ON(sizeof(struct mm81x_tx_status_priv) >
+ sizeof(tx_info->status.status_driver_data));
+ return (struct mm81x_tx_status_priv *)&tx_info->status
+ .status_driver_data[0];
+}
+
+static inline bool
+__mm81x_skbq_has_pending_tx_skb_timed_out(struct sk_buff *skb)
+{
+ struct mm81x_tx_status_priv *info = __mm81x_skbq_tx_status_priv(skb);
+
+ /* If our timestamp value is in the past then we have timed out. */
+ return time_is_before_jiffies(info->tx_status_expiry);
+}
+
+static inline u32 __mm81x_skbq_size(const struct mm81x_skbq *mq)
+{
+ return mq->skbq_size;
+}
+
+static inline u32 __mm81x_skbq_space(const struct mm81x_skbq *mq)
+{
+ return MM81X_SKBQ_SIZE - __mm81x_skbq_size(mq);
+}
+
+static inline bool __mm81x_skbq_over_threshold(struct mm81x_skbq *mq)
+{
+ return skb_queue_len(&mq->skbq) >= MM81X_SKBQ_MAX_TXQ_LEN;
+}
+
+static inline bool __mm81x_skbq_under_threshold(struct mm81x_skbq *mq)
+{
+ return skb_queue_len(&mq->skbq) < (MM81X_SKBQ_MAX_TXQ_LEN - 2);
+}
+
+static void __mm81x_skbq_unlink(struct mm81x_skbq *mq,
+ struct sk_buff_head *queue, struct sk_buff *skb)
+{
+ if (queue == &mq->skbq) {
+ WARN_ON(skb->len > mq->skbq_size);
+ mq->skbq_size -= min(skb->len, mq->skbq_size);
+ }
+
+ __skb_unlink(skb, queue);
+}
+
+static int __mm81x_skbq_put(struct mm81x_skbq *mq, struct sk_buff_head *queue,
+ struct sk_buff *skb, bool queue_at_head,
+ struct sk_buff *queue_before)
+{
+ /* Limit the size of the Tx queue, but not the pending queue */
+ if (queue == &mq->skbq) {
+ if (skb->len > __mm81x_skbq_space(mq))
+ return -ENOMEM;
+
+ mq->skbq_size += skb->len;
+ }
+
+ if (queue_before)
+ __skb_queue_before(queue, queue_before, skb);
+ else if (queue_at_head)
+ __skb_queue_head(queue, skb);
+ else
+ __skb_queue_tail(queue, skb);
+
+ return 0;
+}
+
+static void __mm81x_skbq_pkt_id(struct mm81x_skbq *mq, struct sk_buff *skb)
+{
+ struct mm81x_skb_hdr *hdr = (struct mm81x_skb_hdr *)skb->data;
+
+ hdr->tx_info.pkt_id = cpu_to_le32(mq->pkt_seq++);
+}
+
+static struct mm81x_skbq *
+__mm81x_skbq_tx_status_to_skbq(struct mm81x *mors,
+ const struct mm81x_skb_tx_status *tx_sts)
+{
+ int aci;
+ struct mm81x_skbq *mq = NULL;
+
+ switch (tx_sts->channel) {
+ case MM81X_SKB_CHAN_DATA:
+ case MM81X_SKB_CHAN_DATA_NOACK:
+ aci = dot11_tid_to_ac(tx_sts->tid);
+ mq = mm81x_hif_get_tx_data_queue(mors, aci);
+ break;
+ case MM81X_SKB_CHAN_MGMT:
+ mq = mm81x_hif_get_tx_mgmt_queue(mors);
+ break;
+ case MM81X_SKB_CHAN_BEACON:
+ mq = mm81x_hif_get_tx_beacon_queue(mors);
+ break;
+ default:
+ dev_err(mors->dev,
+ "unexpected channel on reported tx status [%d]",
+ tx_sts->channel);
+ }
+
+ return mq;
+}
+
+void mm81x_skbq_pull_hdr_post_tx(struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct mm81x_skb_hdr) +
+ ((struct mm81x_skb_hdr *)skb->data)->offset);
+}
+
+static void mm81x_skbq_insert_pending(struct mm81x_skbq *mq,
+ struct sk_buff *skb, __le32 insertion_id)
+{
+ struct sk_buff *pfirst, *pnext;
+ struct mm81x_skb_hdr *mhdr;
+ struct sk_buff *tail = skb_peek_tail(&mq->skbq);
+
+ __mm81x_skbq_unlink(mq, &mq->pending, skb);
+
+ if (!tail) {
+ __mm81x_skbq_put(mq, &mq->skbq, skb, false, NULL);
+ return;
+ }
+
+ /* Check if it should just be inserted on to the end */
+ mhdr = (struct mm81x_skb_hdr *)tail->data;
+ WARN_ON(insertion_id == mhdr->tx_info.pkt_id);
+ if (le32_to_cpu(insertion_id) >= le32_to_cpu(mhdr->tx_info.pkt_id)) {
+ __mm81x_skbq_put(mq, &mq->skbq, skb, false, NULL);
+ return;
+ }
+
+ /* Otherwise, re-insert to correct spot in skbq */
+ skb_queue_walk_safe(&mq->skbq, pfirst, pnext) {
+ mhdr = (struct mm81x_skb_hdr *)pfirst->data;
+
+ WARN_ON(insertion_id == mhdr->tx_info.pkt_id);
+ if (le32_to_cpu(insertion_id) <=
+ le32_to_cpu(mhdr->tx_info.pkt_id)) {
+ __mm81x_skbq_put(mq, &mq->skbq, skb, false, pfirst);
+ return;
+ }
+ }
+
+ WARN_ON_ONCE(1);
+}
+
+static void mm81x_skbq_sta_eosp(struct mm81x *mors, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = txi->control.vif;
+
+ mm81x_skbq_pull_hdr_post_tx(skb);
+
+ /*
+ * If this frame is the last frame in a PS-Poll or u-APSD SP,
+ * then mac80211 must be informed that the SP is now over.
+ */
+ if (txi->flags & IEEE80211_TX_STATUS_EOSP) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_sta *sta;
+
+ scoped_guard(rcu) {
+ sta = ieee80211_find_sta(vif, hdr->addr1);
+ if (sta)
+ ieee80211_sta_eosp(sta);
+ }
+ }
+}
+
+static void __mm81x_skbq_drop_pending_skb(struct mm81x_skbq *mq,
+ struct sk_buff *skb)
+{
+ __mm81x_skbq_unlink(mq, &mq->pending, skb);
+ mm81x_skbq_sta_eosp(mq->mors, skb);
+ ieee80211_free_txskb(mq->mors->hw, skb);
+}
+
+static bool mm81x_tx_h_is_ps_filtered(struct mm81x_skbq *mq,
+ struct sk_buff *skb,
+ struct mm81x_skb_tx_status *tx_sts)
+{
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = txi->control.vif;
+
+ WARN_ON_ONCE(!(le32_to_cpu(tx_sts->flags) &
+ MM81X_TX_STATUS_FLAGS_PS_FILTERED));
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ __mm81x_skbq_drop_pending_skb(mq, skb);
+ return true;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ mm81x_skbq_insert_pending(mq, skb, tx_sts->pkt_id);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Get a pending frame by its ID. This will also drop frames with
+ * older packet ids that are in the list
+ */
+static struct sk_buff *__mm81x_skbq_get_pending_by_id(struct mm81x *mors,
+ struct mm81x_skbq *mq,
+ u32 pkt_id)
+{
+ struct sk_buff *pfirst, *pnext;
+ struct sk_buff *ret = NULL;
+
+ /* Move sent packets to pending list waiting for feedback */
+ skb_queue_walk_safe(&mq->pending, pfirst, pnext) {
+ struct mm81x_skb_hdr *hdr =
+ (struct mm81x_skb_hdr *)pfirst->data;
+
+ if (le32_to_cpu(hdr->tx_info.pkt_id) == pkt_id) {
+ ret = pfirst;
+ break;
+
+ } else if (le32_to_cpu(hdr->tx_info.pkt_id) < pkt_id &&
+ __mm81x_skbq_has_pending_tx_skb_timed_out(pfirst)) {
+ __mm81x_skbq_drop_pending_skb(mq, pfirst);
+ }
+ }
+
+ return ret;
+}
+
+static void mm81x_skbq_tx_status_process(struct mm81x *mors,
+ struct sk_buff *skb)
+{
+ int i;
+ struct mm81x_skb_tx_status *tx_sts =
+ (struct mm81x_skb_tx_status *)skb->data;
+ int count = skb->len / sizeof(*tx_sts);
+
+ for (i = 0; i < count; tx_sts++, i++) {
+ struct sk_buff *tx_skb;
+ struct mm81x_skbq *mq =
+ __mm81x_skbq_tx_status_to_skbq(mors, tx_sts);
+ bool is_ps_filtered = (le32_to_cpu(tx_sts->flags) &
+ MM81X_TX_STATUS_FLAGS_PS_FILTERED);
+
+ if (!mq)
+ continue;
+
+ spin_lock_bh(&mq->lock);
+ tx_skb = __mm81x_skbq_get_pending_by_id(
+ mors, mq, le32_to_cpu(tx_sts->pkt_id));
+ if (!tx_skb) {
+ dev_dbg(mors->dev,
+ "No pending pkt match found [pktid:%d chan:%d]",
+ tx_sts->pkt_id, tx_sts->channel);
+ spin_unlock_bh(&mq->lock);
+ continue;
+ }
+
+ if (le32_to_cpu(tx_sts->flags) & MM81X_TX_STATUS_PAGE_INVALID) {
+ __mm81x_skbq_drop_pending_skb(mq, tx_skb);
+ spin_unlock_bh(&mq->lock);
+ continue;
+ }
+
+ if (le32_to_cpu(tx_sts->flags) &
+ MM81X_TX_STATUS_DUTY_CYCLE_CANT_SEND) {
+ __mm81x_skbq_drop_pending_skb(mq, tx_skb);
+ spin_unlock_bh(&mq->lock);
+ continue;
+ }
+
+ if (is_ps_filtered &&
+ mm81x_tx_h_is_ps_filtered(mq, tx_skb, tx_sts)) {
+ /* Has been consumed by mm81x_tx_h_is_ps_filtered */
+ spin_unlock_bh(&mq->lock);
+ continue;
+ }
+
+ mm81x_skbq_pull_hdr_post_tx(tx_skb);
+ mm81x_skbq_skb_finish(mq, tx_skb, tx_sts);
+ spin_unlock_bh(&mq->lock);
+ }
+
+ if (mors->ps.enable && !mors->ps.suspended &&
+ (mm81x_hif_get_tx_buffered_count(mors) == 0)) {
+ /* Evaluate ps, check if it was gated on a pending tx status */
+ queue_delayed_work(mors->chip_wq, &mors->ps.delayed_eval_work,
+ 0);
+ }
+}
+
+static void mm81x_skbq_dispatch_work(struct work_struct *dispatch_work)
+{
+ struct mm81x_skbq *mq =
+ container_of(dispatch_work, struct mm81x_skbq, dispatch_work);
+ struct mm81x *mors = mq->mors;
+ struct mm81x_skb_hdr *hdr;
+ struct sk_buff_head skbq;
+ struct sk_buff *pfirst, *pnext;
+ u8 channel;
+
+ __skb_queue_head_init(&skbq);
+
+ mm81x_skbq_deq_num_skb(mq, &skbq, mm81x_skbq_count(mq));
+
+ skb_queue_walk_safe(&skbq, pfirst, pnext) {
+ __skb_unlink(pfirst, &skbq);
+ /* Header endianness has already be adjusted */
+ hdr = (struct mm81x_skb_hdr *)pfirst->data;
+ channel = hdr->channel;
+ /* Remove mm81x header and padding */
+ __skb_pull(pfirst, sizeof(*hdr) + hdr->offset);
+
+ switch (channel) {
+ case MM81X_SKB_CHAN_COMMAND:
+ mm81x_cmd_resp_process(mors, pfirst);
+ break;
+ case MM81X_SKB_CHAN_TX_STATUS:
+ mm81x_skbq_tx_status_process(mors, pfirst);
+ dev_kfree_skb_any(pfirst);
+ break;
+ default:
+ mm81x_mac_rx_skb(mors, pfirst, &hdr->rx_status);
+ break;
+ }
+ }
+
+ if (mm81x_skbq_count(mq))
+ queue_work(mors->net_wq, &mq->dispatch_work);
+}
+
+int mm81x_skbq_put(struct mm81x_skbq *mq, struct sk_buff *skb)
+{
+ int ret;
+
+ spin_lock_bh(&mq->lock);
+ ret = __mm81x_skbq_put(mq, &mq->skbq, skb, false, NULL);
+ spin_unlock_bh(&mq->lock);
+ return ret;
+}
+
+static void mm81x_skbq_set_queued_tx_skb_expiry(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+
+ if (ieee80211_is_probe_req(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_auth(hdr->frame_control)) {
+ txi->control.enqueue_time = (u32)jiffies;
+ } else {
+ txi->control.enqueue_time = 0;
+ }
+}
+
+static bool mm81x_skbq_has_queued_tx_skb_expired(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+
+ if (txi->control.enqueue_time > 0) {
+ u32 expiry_time =
+ txi->control.enqueue_time +
+ msecs_to_jiffies(MM81X_SKBQ_TX_QUEUED_LIFETIME_MS);
+
+ return (s32)((u32)jiffies - expiry_time) > 0;
+ }
+
+ return false;
+}
+
+/*
+ * Drop selected frames (those with an expiry time set) that could not
+ * be sent within a reasonable timeframe due to congestion. These would
+ * only be rejected or ignored by the peer, so are only contributing to
+ * the problem.
+ */
+void mm81x_skbq_purge_aged(struct mm81x *mors, struct mm81x_skbq *mq)
+{
+ struct sk_buff *pfirst;
+ struct sk_buff *pnext;
+
+ spin_lock_bh(&mq->lock);
+ skb_queue_walk_safe(&mq->skbq, pfirst, pnext) {
+ if (!mm81x_skbq_has_queued_tx_skb_expired(pfirst))
+ break;
+ __mm81x_skbq_unlink(mq, &mq->skbq, pfirst);
+ ieee80211_free_txskb(mors->hw, pfirst);
+ }
+
+ spin_unlock_bh(&mq->lock);
+}
+
+void mm81x_skbq_purge(struct mm81x_skbq *mq, struct sk_buff_head *skbq)
+{
+ struct sk_buff *skb;
+
+ spin_lock_bh(&mq->lock);
+ while ((skb = __skb_dequeue(skbq)))
+ dev_kfree_skb_any(skb);
+ spin_unlock_bh(&mq->lock);
+}
+
+void mm81x_skbq_enq(struct mm81x_skbq *mq, struct sk_buff_head *skbq)
+{
+ int size;
+ struct sk_buff *pfirst, *pnext;
+
+ spin_lock_bh(&mq->lock);
+ size = __mm81x_skbq_space(mq);
+ skb_queue_walk_safe(skbq, pfirst, pnext) {
+ if (pfirst->len > size)
+ break;
+ __skb_unlink(pfirst, skbq);
+ __mm81x_skbq_put(mq, &mq->skbq, pfirst, false, NULL);
+ size -= pfirst->len;
+ }
+
+ spin_unlock_bh(&mq->lock);
+}
+
+int mm81x_skbq_deq_num_skb(struct mm81x_skbq *mq, struct sk_buff_head *skbq,
+ int num_skb)
+{
+ int count = 0;
+ struct sk_buff *pfirst, *pnext;
+
+ spin_lock_bh(&mq->lock);
+ skb_queue_walk_safe(&mq->skbq, pfirst, pnext) {
+ if (count >= num_skb)
+ break;
+ __mm81x_skbq_unlink(mq, &mq->skbq, pfirst);
+ __skb_queue_tail(skbq, pfirst);
+ ++count;
+ }
+
+ spin_unlock_bh(&mq->lock);
+ return count;
+}
+
+void mm81x_skbq_enq_prepend(struct mm81x_skbq *mq, struct sk_buff_head *skbq)
+{
+ int size;
+ struct sk_buff *pfirst, *pnext;
+
+ spin_lock_bh(&mq->lock);
+ size = __mm81x_skbq_space(mq);
+
+ /*
+ * We are doing a reverse walk here to ensure the order remains the
+ * same. This means the last member of the queue goes in, on top of
+ * the queue first and gets pushed down as more members get added to
+ * the top of the queue.
+ */
+ skb_queue_reverse_walk_safe(skbq, pfirst, pnext) {
+ if (pfirst->len > size)
+ break;
+ __skb_unlink(pfirst, skbq);
+ __mm81x_skbq_put(mq, &mq->skbq, pfirst, true, NULL);
+ size -= pfirst->len;
+ }
+
+ spin_unlock_bh(&mq->lock);
+}
+
+static void mm81x_skbq_stop_tx_queues(struct mm81x *mors)
+{
+ int queue;
+
+ if (!mors->started)
+ return;
+ for (queue = IEEE80211_AC_VO; queue <= IEEE80211_AC_BK; queue++)
+ ieee80211_stop_queue(mors->hw, queue);
+
+ set_bit(MM81X_STATE_DATA_QS_STOPPED, &mors->state_flags);
+}
+
+/* Wake all Tx queues if all queues are below threshold */
+void mm81x_skbq_may_wake_tx_queues(struct mm81x *mors)
+{
+ int queue;
+ struct mm81x_skbq *qs;
+ int num_qs;
+ bool could_wake;
+
+ if (!mors->started)
+ return;
+
+ could_wake = true;
+ mm81x_hif_skbq_get_tx_qs(mors, &qs, &num_qs);
+ for (queue = 0; queue < num_qs; queue++) {
+ struct mm81x_skbq *mq = &qs[queue];
+
+ if (!could_wake)
+ break;
+
+ spin_lock_bh(&mq->lock);
+ could_wake &= (__mm81x_skbq_under_threshold(mq));
+ spin_unlock_bh(&mq->lock);
+ }
+
+ if (!could_wake)
+ return;
+
+ for (queue = IEEE80211_AC_VO; queue <= IEEE80211_AC_BK; queue++)
+ ieee80211_wake_queue(mors->hw, queue);
+
+ clear_bit(MM81X_STATE_DATA_QS_STOPPED, &mors->state_flags);
+}
+
+static int mm81x_skbq_tx(struct mm81x_skbq *mq, struct sk_buff *skb, u8 channel)
+{
+ int rc;
+ bool mq_over_threshold;
+ struct mm81x *mors = mq->mors;
+
+ spin_lock_bh(&mq->lock);
+ rc = __mm81x_skbq_put(mq, &mq->skbq, skb, false, NULL);
+ if (rc) {
+ dev_err(mors->dev, "skb put chan %d failed (%d)", channel, rc);
+ if (channel == MM81X_SKB_CHAN_DATA) {
+ u16 queue = skb_get_queue_mapping(skb);
+
+ dev_err(mors->dev, "skb put queue %d status %d", queue,
+ ieee80211_queue_stopped(mors->hw, queue));
+ }
+ }
+
+ /* Fill packet ID in TX info */
+ __mm81x_skbq_pkt_id(mq, skb);
+
+ mq_over_threshold = __mm81x_skbq_over_threshold(mq);
+ spin_unlock_bh(&mq->lock);
+
+ /* For data packets stop queues */
+ if (channel == MM81X_SKB_CHAN_DATA && mq_over_threshold)
+ mm81x_skbq_stop_tx_queues(mors);
+
+ switch (channel) {
+ case MM81X_SKB_CHAN_DATA:
+ case MM81X_SKB_CHAN_DATA_NOACK:
+ if (mm81x_is_data_tx_allowed(mors)) {
+ set_bit(MM81X_HIF_EVT_TX_DATA_PEND,
+ &mors->hif.event_flags);
+ queue_work(mors->chip_wq, &mors->hif_work);
+ }
+ break;
+ case MM81X_SKB_CHAN_MGMT:
+ set_bit(MM81X_HIF_EVT_TX_MGMT_PEND, &mors->hif.event_flags);
+ queue_work(mors->chip_wq, &mors->hif_work);
+ break;
+ case MM81X_SKB_CHAN_BEACON:
+ set_bit(MM81X_HIF_EVT_TX_BEACON_PEND, &mors->hif.event_flags);
+ queue_work(mors->chip_wq, &mors->hif_work);
+ break;
+ case MM81X_SKB_CHAN_COMMAND:
+ set_bit(MM81X_HIF_EVT_TX_COMMAND_PEND, &mors->hif.event_flags);
+ queue_work(mors->chip_wq, &mors->hif_work);
+ break;
+ default:
+ dev_err(mors->dev, "Invalid skb channel: %d", channel);
+ break;
+ }
+
+ return rc;
+}
+
+static inline void __mm81x_skbq_tx_move_to_pending(struct mm81x_skbq *mq,
+ struct sk_buff *skb)
+{
+ struct mm81x_tx_status_priv *pend_info =
+ __mm81x_skbq_tx_status_priv(skb);
+
+ pend_info->tx_status_expiry =
+ jiffies + msecs_to_jiffies(MM81X_SKBQ_TX_STATUS_LIFETIME_MS);
+ __mm81x_skbq_put(mq, &mq->pending, skb, false, NULL);
+}
+
+void mm81x_skbq_tx_complete(struct mm81x_skbq *mq, struct sk_buff_head *skbq)
+{
+ bool skb_awaits_tx_status = false;
+ struct mm81x *mors = mq->mors;
+ struct sk_buff *pfirst, *pnext;
+ struct sk_buff *peek = skb_peek(skbq);
+ struct mm81x_skb_hdr *hdr;
+ const bool fw_reports_bcn_tx_status =
+ mors->firmware_flags &
+ MM81X_FW_FLAGS_REPORTS_TX_BEACON_COMPLETION;
+
+ if (!peek)
+ return;
+
+ /* Move sent packets to pending list waiting for feedback */
+ spin_lock_bh(&mq->lock);
+ skb_queue_walk_safe(skbq, pfirst, pnext) {
+ __skb_unlink(pfirst, skbq);
+ hdr = (struct mm81x_skb_hdr *)pfirst->data;
+ /*
+ * If firmware doesn't give status on beacons just free
+ * them, otherwise queue and wait for response.
+ */
+ switch (hdr->channel) {
+ case MM81X_SKB_CHAN_BEACON:
+ if (fw_reports_bcn_tx_status) {
+ __mm81x_skbq_tx_move_to_pending(mq, pfirst);
+ skb_awaits_tx_status = true;
+ break;
+ }
+ /*
+ * If the FW doesn't give statuses on beacon's,
+ * then mark them as done.
+ */
+ mm81x_skbq_pull_hdr_post_tx(pfirst);
+ dev_kfree_skb_any(pfirst);
+ break;
+ default:
+ if (le32_to_cpu(hdr->tx_info.flags) &
+ MM81X_TX_STATUS_FLAGS_NO_REPORT) {
+ dev_kfree_skb_any(pfirst);
+ } else {
+ /*
+ * skb has been given to the chip. Store the
+ * time and queue the skb onto the pending
+ * queue while we wait for the tx_status.
+ */
+ __mm81x_skbq_tx_move_to_pending(mq, pfirst);
+ skb_awaits_tx_status = true;
+ }
+ break;
+ }
+ }
+ spin_unlock_bh(&mq->lock);
+
+ if (skb_awaits_tx_status) {
+ spin_lock_bh(&mors->stale_status.lock);
+ mod_timer(&mors->stale_status.timer,
+ jiffies + msecs_to_jiffies(
+ MM81X_SKBQ_TX_STATUS_LIFETIME_MS));
+ spin_unlock_bh(&mors->stale_status.lock);
+ }
+}
+
+/* Returns the first skb in the pending list. */
+struct sk_buff *mm81x_skbq_tx_pending(struct mm81x_skbq *mq)
+{
+ struct sk_buff *pfirst;
+
+ spin_lock_bh(&mq->lock);
+ pfirst = skb_peek(&mq->pending);
+ spin_unlock_bh(&mq->lock);
+ return pfirst;
+}
+
+int mm81x_skbq_check_for_stale_tx(struct mm81x *mors, struct mm81x_skbq *mq)
+{
+ int flushed = 0;
+ struct sk_buff *pfirst;
+ struct sk_buff *pnext;
+
+ if (!skb_queue_len(&mq->pending))
+ return 0;
+
+ /* Move sent packets to pending list waiting for feedback */
+ spin_lock_bh(&mq->lock);
+ skb_queue_walk_safe(&mq->pending, pfirst, pnext) {
+ struct mm81x_skb_hdr *hdr =
+ (struct mm81x_skb_hdr *)pfirst->data;
+
+ if (__mm81x_skbq_has_pending_tx_skb_timed_out(pfirst)) {
+ dev_dbg(mors->dev, "TX skb timed out [id:%d,chan:%d]",
+ hdr->tx_info.pkt_id, hdr->channel);
+
+ __mm81x_skbq_drop_pending_skb(mq, pfirst);
+ flushed++;
+ }
+ }
+
+ spin_unlock_bh(&mq->lock);
+ return flushed;
+}
+
+/* Remove commands from pending (or skbq if not sent) */
+static void __skbq_cmd_finish(struct mm81x_skbq *mq, struct sk_buff *skb)
+{
+ struct mm81x *mors = mq->mors;
+
+ if (skb_queue_len(&mq->pending)) {
+ __mm81x_skbq_unlink(mq, &mq->pending, skb);
+ dev_kfree_skb(skb);
+ } else if (skb_queue_len(&mq->skbq)) {
+ /* Command was probably timed out before being sent */
+ dev_dbg(mors->dev,
+ "Command pending queue empty. Removing from SKBQ.");
+ __mm81x_skbq_unlink(mq, &mq->skbq, skb);
+ dev_kfree_skb(skb);
+ } else {
+ dev_dbg(mors->dev, "Command Q not found");
+ }
+}
+
+struct mm81x_update_sta_iter_data {
+ struct mm81x *mors;
+ struct sk_buff *skb;
+ struct mm81x_skb_tx_status *tx_sts;
+ int tx_attempts;
+ bool updated;
+};
+
+static void mm81x_tx_h_update_sta_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mm81x_update_sta_iter_data *iter = data;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_sta *sta;
+
+ if (iter->updated || !iter->skb || !iter->skb->data)
+ return;
+
+ hdr = (struct ieee80211_hdr *)iter->skb->data;
+
+ /*
+ * Note that each iteration via
+ * ieee80211_iterate_active_interfaces_atomic is under an RCU critical
+ * section so there is no need for a local critical section within here
+ * when looking up the station.
+ */
+ sta = ieee80211_find_sta(vif, hdr->addr1);
+ if (!sta)
+ return;
+
+ mm81x_rc_sta_feedback_rates(iter->mors, iter->skb, sta, iter->tx_sts,
+ iter->tx_attempts);
+ mm81x_tx_h_check_aggr(sta, iter->skb);
+
+ /*
+ * In situations with multiple virtual interfaces, finish iteration
+ * once we have found our STA to prevent further iteration.
+ */
+ iter->updated = true;
+}
+
+/* TX status/Response received remove packet from pending TX finish */
+static void __skbq_data_tx_finish(struct mm81x_skbq *mq, struct sk_buff *skb,
+ struct mm81x_skb_tx_status *tx_sts)
+{
+ struct mm81x *mors = mq->mors;
+ struct mm81x_update_sta_iter_data iter = {};
+
+ __mm81x_skbq_unlink(mq, &mq->pending, skb);
+ iter.mors = mors;
+ iter.skb = skb;
+ iter.tx_sts = tx_sts;
+ iter.tx_attempts = mm81x_tx_h_get_attempts(mors, tx_sts);
+
+ ieee80211_iterate_active_interfaces_atomic(mors->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ mm81x_tx_h_update_sta_iter,
+ &iter);
+
+ ieee80211_tx_status_skb(mors->hw, skb);
+}
+
+void mm81x_skbq_skb_finish(struct mm81x_skbq *mq, struct sk_buff *skb,
+ struct mm81x_skb_tx_status *tx_sts)
+{
+ if (mq->flags & MM81X_HIF_FLAGS_COMMAND)
+ __skbq_cmd_finish(mq, skb);
+ else
+ __skbq_data_tx_finish(mq, skb, tx_sts);
+}
+
+void mm81x_skbq_tx_flush(struct mm81x_skbq *mq)
+{
+ struct sk_buff *pfirst, *pnext;
+
+ spin_lock_bh(&mq->lock);
+ skb_queue_walk_safe(&mq->pending, pfirst, pnext) {
+ __mm81x_skbq_unlink(mq, &mq->pending, pfirst);
+ ieee80211_free_txskb(mq->mors->hw, pfirst);
+ }
+
+ skb_queue_walk_safe(&mq->skbq, pfirst, pnext) {
+ __mm81x_skbq_unlink(mq, &mq->skbq, pfirst);
+ ieee80211_free_txskb(mq->mors->hw, pfirst);
+ }
+ spin_unlock_bh(&mq->lock);
+}
+
+void mm81x_skbq_init(struct mm81x *mors, struct mm81x_skbq *mq, u16 flags)
+{
+ spin_lock_init(&mq->lock);
+ __skb_queue_head_init(&mq->skbq);
+ __skb_queue_head_init(&mq->pending);
+ mq->mors = mors;
+ mq->skbq_size = 0;
+ mq->flags = flags;
+ mq->pkt_seq = 0;
+ if (flags & MM81X_HIF_FLAGS_DIR_TO_HOST)
+ INIT_WORK(&mq->dispatch_work, mm81x_skbq_dispatch_work);
+}
+
+void mm81x_skbq_finish(struct mm81x_skbq *mq)
+{
+ if (mq->skbq_size > 0)
+ dev_dbg(mq->mors->dev,
+ "Purging a non empty MorseQ. Dropping data!");
+
+ /* Clean up link to hif */
+ if (mq->flags & MM81X_HIF_FLAGS_DIR_TO_HOST)
+ cancel_work_sync(&mq->dispatch_work);
+ mm81x_skbq_purge(mq, &mq->skbq);
+ mm81x_skbq_purge(mq, &mq->pending);
+ mq->skbq_size = 0;
+}
+
+u32 mm81x_skbq_size(struct mm81x_skbq *mq)
+{
+ u32 count;
+
+ spin_lock_bh(&mq->lock);
+ count = __mm81x_skbq_size(mq);
+ spin_unlock_bh(&mq->lock);
+ return count;
+}
+
+u32 mm81x_skbq_count(struct mm81x_skbq *mq)
+{
+ u32 count = 0;
+
+ spin_lock_bh(&mq->lock);
+ count += skb_queue_len(&mq->skbq);
+ spin_unlock_bh(&mq->lock);
+ return count;
+}
+
+u32 mm81x_skbq_pending_count(struct mm81x_skbq *mq)
+{
+ u32 count;
+
+ spin_lock_bh(&mq->lock);
+ count = skb_queue_len(&mq->pending);
+ spin_unlock_bh(&mq->lock);
+ return count;
+}
+
+u32 mm81x_skbq_count_tx_ready(struct mm81x_skbq *mq)
+{
+ struct mm81x *mors = mq->mors;
+
+ if (!mm81x_is_data_tx_allowed(mors))
+ return 0;
+
+ return mm81x_skbq_count(mq);
+}
+
+u32 mm81x_skbq_space(struct mm81x_skbq *mq)
+{
+ u32 space;
+
+ spin_lock_bh(&mq->lock);
+ space = __mm81x_skbq_space(mq);
+ spin_unlock_bh(&mq->lock);
+
+ return space;
+}
+
+struct sk_buff *mm81x_skbq_alloc_skb(struct mm81x_skbq *mq, unsigned int length)
+{
+ struct sk_buff *skb;
+ int tx_headroom = sizeof(struct mm81x_skb_hdr) +
+ mm81x_bus_get_alignment(mq->mors);
+ int skb_len = tx_headroom + length + MM81X_PAD4(length);
+
+ skb = dev_alloc_skb(skb_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, tx_headroom);
+ skb_put(skb, length);
+ return skb;
+}
+
+static int mm81x_skb_tx_h_validate_channel(const struct mm81x *mors, u8 channel)
+{
+ if (channel == MM81X_SKB_CHAN_COMMAND) {
+ if (test_bit(MM81X_STATE_HOST_TO_CHIP_CMD_BLOCKED,
+ &mors->state_flags))
+ return -EPERM;
+ } else {
+ if (test_bit(MM81X_STATE_HOST_TO_CHIP_TX_BLOCKED,
+ &mors->state_flags))
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int mm81x_skbq_skb_tx(struct mm81x_skbq *mq, struct sk_buff **skb_orig,
+ struct mm81x_skb_tx_info *tx_info, u8 channel)
+{
+ int ret;
+ struct mm81x_skb_hdr hdr;
+ struct mm81x *mors = mq->mors;
+ size_t end_of_skb_pad;
+ struct sk_buff *skb = *skb_orig;
+ u8 *aligned_head, *data;
+
+ if (test_bit(MM81X_STATE_CHIP_UNRESPONSIVE, &mors->state_flags)) {
+ dev_kfree_skb_any(skb);
+ return -ENODEV;
+ }
+
+ ret = mm81x_skb_tx_h_validate_channel(mors, channel);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ mm81x_skbq_set_queued_tx_skb_expiry(skb);
+
+ data = skb->data;
+ aligned_head = PTR_ALIGN_DOWN((data - sizeof(hdr)),
+ mm81x_bus_get_alignment(mors));
+ hdr.sync = MM81X_SKB_HEADER_SYNC;
+ hdr.channel = channel;
+ hdr.len = cpu_to_le16(skb->len);
+ hdr.offset = data - (aligned_head + sizeof(hdr));
+ hdr.checksum_upper = 0;
+ hdr.checksum_lower = 0;
+ if (tx_info)
+ memcpy(&hdr.tx_info, tx_info, sizeof(*tx_info));
+ else
+ memset(&hdr.tx_info, 0, sizeof(hdr.tx_info));
+
+ skb_push(skb, data - aligned_head);
+ memcpy(skb->data, &hdr, sizeof(hdr));
+
+ end_of_skb_pad = MM81X_PAD4(skb->len);
+ if (end_of_skb_pad && skb_pad(skb, end_of_skb_pad))
+ return -EINVAL;
+
+ ret = mm81x_skbq_tx(mq, skb, channel);
+ if (ret) {
+ dev_err(mors->dev, "mm81x_skbq_tx fail: %d", ret);
+ dev_kfree_skb_any(skb);
+ }
+
+ return ret;
+}
+
+void mm81x_skbq_data_traffic_pause(struct mm81x *mors)
+{
+ set_bit(MM81X_STATE_DATA_TX_STOPPED, &mors->state_flags);
+ /* power-save requirements will be re-evaluated by the caller */
+}
+
+void mm81x_skbq_data_traffic_resume(struct mm81x *mors)
+{
+ clear_bit(MM81X_STATE_DATA_TX_STOPPED, &mors->state_flags);
+
+ /* Set the TX_DATA_PEND bit. This will kick the transmission path to
+ * send any frames pending in the TX buffers, and wake the mac80211
+ * data Qs if they were previously stopped.
+ */
+ set_bit(MM81X_HIF_EVT_TX_DATA_PEND, &mors->hif.event_flags);
+}
+
+bool mm81x_skbq_validate_checksum(u8 *data)
+{
+ int i;
+ u32 xor = 0;
+ struct mm81x_skb_hdr *skb_hdr = (struct mm81x_skb_hdr *)data;
+ struct ieee80211_hdr *hdr =
+ (struct ieee80211_hdr *)(data + sizeof(*skb_hdr));
+ u16 len = le16_to_cpu(skb_hdr->len) + sizeof(*skb_hdr);
+ u32 *data_to_xor = (u32 *)data;
+ u32 header_xor = (le16_to_cpu(skb_hdr->checksum_upper) << 8) |
+ (skb_hdr->checksum_lower);
+
+ /*
+ * For data frames the calculate the xor for skb header, mac header
+ * and ccmp header. For all other channel the xor is calculated for
+ * the full skb.
+ */
+ if (skb_hdr->channel == MM81X_SKB_CHAN_DATA &&
+ (ieee80211_is_data(hdr->frame_control) ||
+ ieee80211_is_data_qos(hdr->frame_control))) {
+ u16 data_len = sizeof(*skb_hdr) +
+ sizeof(struct ieee80211_qos_hdr) +
+ IEEE80211_CCMP_HDR_LEN;
+
+ len = min(len, data_len);
+ len = ROUND_DOWN_TO_WORD(len);
+ }
+
+ skb_hdr->checksum_upper = 0;
+ skb_hdr->checksum_lower = 0;
+
+ for (i = 0; i < len; i += 4) {
+ xor ^= *data_to_xor;
+ data_to_xor++;
+ }
+
+ xor &= 0x00FFFFFF;
+
+ return xor == header_xor;
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 24/31] wifi: mm81x: add skbq.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (22 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 23/31] wifi: mm81x: add skbq.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 25/31] wifi: mm81x: add usb.c Lachlan Hodges
` (7 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/skbq.h | 218 +++++++++++++++++++
1 file changed, 218 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/skbq.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/skbq.h b/drivers/net/wireless/morsemicro/mm81x/skbq.h
new file mode 100644
index 000000000000..9930493141cf
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/skbq.h
@@ -0,0 +1,218 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_SKBQ_H_
+#define _MM81X_SKBQ_H_
+
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include "rate_code.h"
+
+/* Sync value of skb header to indicate a valid skb */
+#define MM81X_SKB_HEADER_SYNC (0xAA)
+/* Sync value indicating that the chip owns this skb */
+#define MM81X_SKB_HEADER_CHIP_OWNED_SYNC (0xBB)
+
+enum mm81x_tx_status_and_conf_flags {
+ MM81X_TX_STATUS_FLAGS_NO_ACK = BIT(0),
+ MM81X_TX_STATUS_FLAGS_NO_REPORT = BIT(1),
+ MM81X_TX_CONF_FLAGS_CTL_AMPDU = BIT(2),
+ MM81X_TX_CONF_FLAGS_HW_ENCRYPT = BIT(3),
+ MM81X_TX_CONF_FLAGS_VIF_ID = (BIT(4) | BIT(5) | BIT(6) | BIT(7) |
+ BIT(8) | BIT(9) | BIT(10) | BIT(11)),
+ MM81X_TX_CONF_FLAGS_KEY_IDX = (BIT(12) | BIT(13) | BIT(14)),
+ MM81X_TX_STATUS_FLAGS_PS_FILTERED = (BIT(15)),
+ MM81X_TX_CONF_IGNORE_TWT = (BIT(16)),
+ MM81X_TX_STATUS_PAGE_INVALID = (BIT(17)),
+ MM81X_TX_CONF_NO_PS_BUFFER = (BIT(18)),
+ MM81X_TX_STATUS_DUTY_CYCLE_CANT_SEND = (BIT(19)),
+ MM81X_TX_CONF_HAS_PV1_BPN_IN_BODY = (BIT(21)),
+ MM81X_TX_CONF_FLAGS_SEND_AFTER_DTIM = (BIT(22)),
+ MM81X_TX_STATUS_WAS_AGGREGATED = (BIT(23)),
+ MM81X_TX_CONF_FLAGS_FULLMAC_REPORT = BIT(24),
+ MM81X_TX_CONF_FLAGS_IMMEDIATE_REPORT = (BIT(31))
+};
+
+/* Getter and setter macros for vif id */
+#define MM81X_TX_CONF_FLAGS_VIF_ID_MASK (0xFF)
+#define MM81X_TX_CONF_FLAGS_VIF_ID_SET(x) \
+ (((x) & MM81X_TX_CONF_FLAGS_VIF_ID_MASK) << 4)
+#define MM81X_TX_CONF_FLAGS_VIF_ID_GET(x) \
+ (((x) & MM81X_TX_CONF_FLAGS_VIF_ID) >> 4)
+
+/* Getter and setter macros for key index */
+#define MM81X_TX_CONF_FLAGS_KEY_IDX_SET(x) (((x) & 0x07) << 12)
+#define MM81X_TX_CONF_FLAGS_KEY_IDX_GET(x) \
+ (((x) & MM81X_TX_CONF_FLAGS_KEY_IDX) >> 12)
+
+enum mm81x_rx_status_flags {
+ MM81X_RX_STATUS_FLAGS_ERROR = BIT(0),
+ MM81X_RX_STATUS_FLAGS_DECRYPTED = BIT(1),
+ MM81X_RX_STATUS_FLAGS_FCS_INCLUDED = BIT(2),
+ MM81X_RX_STATUS_FLAGS_EOF = BIT(3),
+ MM81X_RX_STATUS_FLAGS_AMPDU = BIT(4),
+ MM81X_RX_STATUS_FLAGS_NDP = BIT(7),
+ MM81X_RX_STATUS_FLAGS_UPLINK = BIT(8),
+ MM81X_RX_STATUS_FLAGS_RI = (BIT(9) | BIT(10)),
+ MM81X_RX_STATUS_FLAGS_NDP_TYPE = (BIT(11) | BIT(12) | BIT(13)),
+ MM81X_RX_STATUS_FLAGS_CRC_ERROR = BIT(14),
+ MM81X_RX_STATUS_FLAGS_VIF_ID = GENMASK(24, 17),
+};
+
+/* Getter and Setter macros for vif id */
+#define MM81X_RX_STATUS_FLAGS_VIF_ID_MASK (0xFF)
+#define MM81X_RX_STATUS_FLAGS_VIF_ID_SET(x) \
+ (((x) & MM81X_RX_STATUS_FLAGS_VIF_ID_MASK) << 17)
+#define MM81X_RX_STATUS_FLAGS_VIF_ID_GET(x) \
+ (((x) & MM81X_RX_STATUS_FLAGS_VIF_ID) >> 17)
+#define MM81X_RX_STATUS_FLAGS_VIF_ID_CLEAR(x) \
+ ((x) & ~(MM81X_RX_STATUS_FLAGS_VIF_ID_MASK << 17))
+
+/* Getter macro for guard interval */
+#define MM81X_RX_STATUS_FLAGS_UPL_IND_GET(x) \
+ (((x) & MM81X_RX_STATUS_FLAGS_UPLINK) >> 8)
+
+/* Getter macro for response indication */
+#define MM81X_RX_STATUS_FLAGS_RI_GET(x) (((x) & MM81X_RX_STATUS_FLAGS_RI) >> 9)
+
+/* Getter macro for NDP type */
+#define MM81X_RX_STATUS_FLAGS_NDP_TYPE_GET(x) \
+ (((x) & MM81X_RX_STATUS_FLAGS_NDP_TYPE) >> 11)
+
+enum mm81x_skb_channel {
+ MM81X_SKB_CHAN_DATA = 0x0,
+ MM81X_SKB_CHAN_NDP_FRAMES = 0x1,
+ MM81X_SKB_CHAN_DATA_NOACK = 0x2,
+ MM81X_SKB_CHAN_BEACON = 0x3,
+ MM81X_SKB_CHAN_MGMT = 0x4,
+ MM81X_SKB_CHAN_INTERNAL_CRIT_BEACON = 0x80,
+ MM81X_SKB_CHAN_COMMAND = 0xFE,
+ MM81X_SKB_CHAN_TX_STATUS = 0xFF
+};
+
+#define MM81X_SKB_MAX_RATES (4)
+
+struct mm81x_skb_rate_info {
+ mm81x_rate_code_t mm81x_ratecode;
+ u8 count;
+} __packed;
+
+struct mm81x_skb_tx_status {
+ __le32 flags;
+ __le32 pkt_id;
+ u8 tid;
+ u8 channel;
+ __le16 ampdu_info;
+ struct mm81x_skb_rate_info rates[MM81X_SKB_MAX_RATES];
+} __packed;
+
+#define MM81X_TXSTS_AMPDU_INFO_GET_TAG(x) (((x) >> 10) & 0x3F)
+#define MM81X_TXSTS_AMPDU_INFO_GET_LEN(x) (((x) >> 5) & 0x1F)
+#define MM81X_TXSTS_AMPDU_INFO_GET_SUC(x) ((x) & 0x1F)
+
+struct mm81x_skb_tx_info {
+ __le32 flags;
+ __le32 pkt_id;
+ u8 tid;
+ u8 tid_params;
+ u8 mmss_params;
+ u8 padding[1];
+ struct mm81x_skb_rate_info rates[MM81X_SKB_MAX_RATES];
+} __packed;
+
+#define TX_INFO_TID_PARAMS_MAX_REORDER_BUF 0x1f
+#define TX_INFO_TID_PARAMS_AMPDU_ENABLED 0x20
+#define TX_INFO_TID_PARAMS_AMSDU_SUPPORTED 0x40
+#define TX_INFO_TID_PARAMS_USE_LEGACY_BA 0x80
+
+/* Bitmap for MMSS (Minimum MPDU start spacing) parameters
+ * +-----------+-----------+
+ * | Morse | MMSS set |
+ * | MMSS | by S1G cap|
+ * | offset | IE |
+ * |-----------|-----------|
+ * |b7|b6|b5|b4|b3|b2|b1|b0|
+ */
+#define TX_INFO_MMSS_PARAMS_MMSS_MASK GENMASK(3, 0)
+#define TX_INFO_MMSS_PARAMS_MMSS_OFFSET_START 4
+#define TX_INFO_MMSS_PARAMS_MMSS_OFFSET_MASK GENMASK(7, 4)
+#define TX_INFO_MMSS_PARAMS_SET_MMSS(x) ((x) & TX_INFO_MMSS_PARAMS_MMSS_MASK)
+#define TX_INFO_MMSS_PARAMS_SET_MMSS_OFFSET(x) \
+ (((x) << TX_INFO_MMSS_PARAMS_MMSS_OFFSET_START) & \
+ TX_INFO_MMSS_PARAMS_MMSS_OFFSET_MASK)
+
+struct mm81x_skb_rx_status {
+ __le32 flags;
+ mm81x_rate_code_t mm81x_ratecode;
+ __le16 rssi;
+ __le16 freq_100khz;
+ u8 bss_color;
+ s8 noise_dbm;
+ /** Padding for word alignment */
+ u8 padding[2];
+ __le64 rx_timestamp_us;
+} __packed;
+
+struct mm81x_skb_hdr {
+ u8 sync;
+ u8 channel;
+ __le16 len;
+ u8 offset;
+ u8 checksum_lower;
+ __le16 checksum_upper;
+ union {
+ struct mm81x_skb_tx_info tx_info;
+ struct mm81x_skb_tx_status tx_status;
+ struct mm81x_skb_rx_status rx_status;
+ };
+} __packed;
+
+#define MM81X_SKBQ_SIZE (4 * 128 * 1024)
+
+struct mm81x;
+
+struct mm81x_skbq {
+ struct mm81x *mors;
+ u32 pkt_seq; /* SKB sequence used in tx_status */
+ u16 flags;
+ u32 skbq_size; /* current off loaded size */
+ spinlock_t lock;
+ struct sk_buff_head skbq;
+ struct sk_buff_head pending; /* packets sent pending feedback */
+ struct work_struct dispatch_work;
+};
+
+void mm81x_skbq_purge(struct mm81x_skbq *mq, struct sk_buff_head *skbq);
+void mm81x_skbq_purge_aged(struct mm81x *mors, struct mm81x_skbq *mq);
+u32 mm81x_skbq_space(struct mm81x_skbq *mq);
+u32 mm81x_skbq_size(struct mm81x_skbq *mq);
+int mm81x_skbq_deq_num_skb(struct mm81x_skbq *mq, struct sk_buff_head *skbq,
+ int num_skb);
+struct sk_buff *mm81x_skbq_alloc_skb(struct mm81x_skbq *mq,
+ unsigned int length);
+int mm81x_skbq_skb_tx(struct mm81x_skbq *mq, struct sk_buff **skb,
+ struct mm81x_skb_tx_info *tx_info, u8 channel);
+int mm81x_skbq_put(struct mm81x_skbq *mq, struct sk_buff *skb);
+void mm81x_skbq_enq(struct mm81x_skbq *mq, struct sk_buff_head *skbq);
+void mm81x_skbq_enq_prepend(struct mm81x_skbq *mq, struct sk_buff_head *skbq);
+void mm81x_skbq_tx_complete(struct mm81x_skbq *mq, struct sk_buff_head *skbq);
+struct sk_buff *mm81x_skbq_tx_pending(struct mm81x_skbq *mq);
+void mm81x_skbq_init(struct mm81x *mors, struct mm81x_skbq *mq, u16 flags);
+void mm81x_skbq_finish(struct mm81x_skbq *mq);
+void mm81x_skbq_pull_hdr_post_tx(struct sk_buff *skb);
+void mm81x_skbq_mon_dump(struct mm81x *mors, struct seq_file *file);
+void mm81x_skbq_skb_finish(struct mm81x_skbq *mq, struct sk_buff *skb,
+ struct mm81x_skb_tx_status *tx_sts);
+void mm81x_skbq_tx_flush(struct mm81x_skbq *mq);
+int mm81x_skbq_check_for_stale_tx(struct mm81x *mors, struct mm81x_skbq *mq);
+void mm81x_skbq_may_wake_tx_queues(struct mm81x *mors);
+u32 mm81x_skbq_count_tx_ready(struct mm81x_skbq *mq);
+u32 mm81x_skbq_count(struct mm81x_skbq *mq);
+u32 mm81x_skbq_pending_count(struct mm81x_skbq *mq);
+void mm81x_skbq_data_traffic_pause(struct mm81x *mors);
+void mm81x_skbq_data_traffic_resume(struct mm81x *mors);
+bool mm81x_skbq_validate_checksum(u8 *data);
+
+#endif /* !_MM81X_SKBQ_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 25/31] wifi: mm81x: add usb.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (23 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 24/31] wifi: mm81x: add skbq.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 26/31] wifi: mm81x: add yaps.c Lachlan Hodges
` (6 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/usb.c | 938 ++++++++++++++++++++
1 file changed, 938 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/usb.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/usb.c b/drivers/net/wireless/morsemicro/mm81x/usb.c
new file mode 100644
index 000000000000..79958462c814
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/usb.c
@@ -0,0 +1,938 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include "hif.h"
+#include "bus.h"
+#include "mac.h"
+#include "core.h"
+
+/*
+ * URB timeout in milliseconds. If an URB does not complete within this
+ * time, it will be killed. This timeout needs to account for USB suspendand
+ * resume occurring before the URB can be transferred, and it also needs to
+ * account for transferring USB_MAX_TRANSFER_SIZE bytes over a potentially
+ * slow, congested USB Full Speed link.
+ */
+#define URB_TIMEOUT_MS 250
+
+/* High speed USB 2^(4-1) * 125usec = 1msec */
+#define MM81X_USB_INTERRUPT_INTERVAL 4
+
+/* Max bytes per USB read/write */
+#define USB_MAX_TRANSFER_SIZE (16 * 1024)
+
+/* INT EP buffer size */
+#define MM81X_EP_INT_BUFFER_SIZE 8
+
+/* Morse vendor IDs*/
+#define MM81X_VENDOR_ID 0x325b
+#define MM81X_MM810X_PRODUCT_ID 0x8100
+
+/* Power management runtime auto-suspend delay value in milliseconds */
+#define PM_RUNTIME_AUTOSUSPEND_DELAY_MS 100
+
+enum mm81x_usb_endpoints {
+ MM81X_EP_CMD = 0,
+ MM81X_EP_INT,
+ MM81X_EP_MEM_RD,
+ MM81X_EP_MEM_WR,
+ MM81X_EP_REG_RD,
+ MM81X_EP_REG_WR,
+ MM81X_EP_EP_MAX,
+};
+
+struct mm81x_usb_endpoint {
+ unsigned char *buffer;
+ struct urb *urb;
+ __u8 addr;
+ int size;
+};
+
+enum mm81x_usb_flags { MM81X_USB_FLAG_ATTACHED, MM81X_USB_FLAG_SUSPENDED };
+
+struct mm81x_usb {
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ struct mm81x_usb_endpoint endpoints[MM81X_EP_EP_MAX];
+ int errors;
+
+ /* serialise USB device struct */
+ struct mutex lock;
+
+ /* serialise USB bus access */
+ struct mutex bus_lock;
+
+ bool ongoing_cmd;
+ bool ongoing_rw;
+ wait_queue_head_t rw_in_wait;
+ unsigned long flags;
+};
+
+enum mm81x_usb_command_direction {
+ MM81X_USB_WRITE = 0x00,
+ MM81X_USB_READ = 0x80,
+ MM81X_USB_RESET = 0x02,
+};
+
+struct mm81x_usb_command {
+ __le32 dir; /* Next BULK direction */
+ __le32 address; /* Next BULK address */
+ __le32 length; /* Next BULK size */
+};
+
+static const struct usb_device_id mm81x_usb_table[] = {
+ { USB_DEVICE(MM81X_VENDOR_ID, MM81X_MM810X_PRODUCT_ID) },
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, mm81x_usb_table);
+
+static void mm81x_usb_irq_work(struct work_struct *work)
+{
+ struct mm81x *mors = container_of(work, struct mm81x, usb_irq_work);
+
+ mm81x_claim_bus(mors);
+ mm81x_hw_irq_handle(mors);
+ mm81x_release_bus(mors);
+}
+
+static bool mm81x_usb_urb_status_is_disconnect(const struct urb *urb)
+{
+ return ((urb->status == -EPROTO) || (urb->status == -EILSEQ) ||
+ (urb->status == -ETIME) || (urb->status == -EPIPE));
+}
+
+static void mm81x_usb_int_handler(struct urb *urb)
+{
+ int ret;
+ struct mm81x *mors = urb->context;
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+
+ if (!test_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags))
+ return;
+
+ if (urb->status) {
+ if (mm81x_usb_urb_status_is_disconnect(urb)) {
+ clear_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags);
+ set_bit(MM81X_STATE_CHIP_UNRESPONSIVE,
+ &mors->state_flags);
+ dev_dbg(mors->dev,
+ "USB sudden disconnect detected in %s",
+ __func__);
+ return;
+ }
+
+ if (!(urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ dev_err(mors->dev, "- nonzero read status received: %d",
+ urb->status);
+ }
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+
+ /* usb_kill_urb has been called */
+ if (ret == -EPERM)
+ return;
+ else if (ret)
+ dev_err(mors->dev, "error: resubmit urb %p err code %d", urb,
+ ret);
+
+ queue_work(mors->chip_wq, &mors->usb_irq_work);
+}
+
+static int mm81x_usb_int_enable(struct mm81x *mors)
+{
+ int ret = 0;
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+ struct urb *urb;
+
+ if (!test_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags))
+ return -ENODEV;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ musb->endpoints[MM81X_EP_INT].urb = urb;
+
+ musb->endpoints[MM81X_EP_INT].buffer =
+ usb_alloc_coherent(musb->udev, MM81X_EP_INT_BUFFER_SIZE,
+ GFP_KERNEL, &urb->transfer_dma);
+ if (!musb->endpoints[MM81X_EP_INT].buffer) {
+ dev_err(mors->dev, "couldn't allocate transfer_buffer");
+ ret = -ENOMEM;
+ goto error_set_urb_null;
+ }
+
+ usb_fill_int_urb(
+ musb->endpoints[MM81X_EP_INT].urb, musb->udev,
+ usb_rcvintpipe(musb->udev, musb->endpoints[MM81X_EP_INT].addr),
+ musb->endpoints[MM81X_EP_INT].buffer, MM81X_EP_INT_BUFFER_SIZE,
+ mm81x_usb_int_handler, mors, MM81X_USB_INTERRUPT_INTERVAL);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(mors->dev, "Couldn't submit urb. Error number %d", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ usb_free_coherent(musb->udev, MM81X_EP_INT_BUFFER_SIZE,
+ musb->endpoints[MM81X_EP_INT].buffer,
+ urb->transfer_dma);
+error_set_urb_null:
+ musb->endpoints[MM81X_EP_INT].urb = NULL;
+ usb_free_urb(urb);
+out:
+ return ret;
+}
+
+static void mm81x_usb_int_stop(struct mm81x *mors)
+{
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+
+ usb_kill_urb(musb->endpoints[MM81X_EP_INT].urb);
+ cancel_work_sync(&mors->usb_irq_work);
+}
+
+static void mm81x_usb_cmd_callback(struct urb *urb)
+{
+ struct mm81x *mors = urb->context;
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (!(urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ dev_err(mors->dev,
+ "nonzero write bulk status received: %d",
+ urb->status);
+
+ musb->errors = urb->status;
+ }
+
+ musb->ongoing_cmd = false;
+ wake_up(&musb->rw_in_wait);
+}
+
+static int mm81x_usb_cmd(struct mm81x_usb *musb,
+ const struct mm81x_usb_command *cmd)
+{
+ int retval = 0;
+ struct mm81x *mors = usb_get_intfdata(musb->interface);
+ struct mm81x_usb_endpoint *ep = &musb->endpoints[MM81X_EP_CMD];
+ size_t writesize = sizeof(*cmd);
+
+ if (!test_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags))
+ return -ENODEV;
+
+ memcpy(ep->buffer, cmd, writesize);
+
+ usb_fill_bulk_urb(ep->urb, musb->udev,
+ usb_sndbulkpipe(musb->udev, ep->addr), ep->buffer,
+ writesize, mm81x_usb_cmd_callback, mors);
+ ep->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ musb->ongoing_cmd = true;
+
+ retval = usb_submit_urb(ep->urb, GFP_KERNEL);
+ if (retval) {
+ dev_err(mors->dev, "- failed submitting write urb, error %d",
+ retval);
+
+ goto error;
+ }
+
+ retval = wait_event_interruptible_timeout(
+ musb->rw_in_wait, (!musb->ongoing_cmd),
+ msecs_to_jiffies(URB_TIMEOUT_MS));
+ if (retval < 0) {
+ dev_err(mors->dev, "error waiting for urb %d", retval);
+ goto error;
+ } else if (retval == 0) {
+ dev_err(mors->dev, "timed out waiting for urb");
+ usb_kill_urb(ep->urb);
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+
+ musb->ongoing_cmd = false;
+ return writesize;
+
+error:
+ musb->ongoing_cmd = false;
+ return retval;
+}
+
+static int mm81x_usb_ndr_reset(struct mm81x *mors)
+{
+ int ret;
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+ struct mm81x_usb_command cmd;
+
+ mutex_lock(&musb->lock);
+
+ musb->ongoing_rw = true;
+ musb->errors = 0;
+
+ cmd.dir = cpu_to_le32(MM81X_USB_RESET);
+ cmd.address = cpu_to_le32(0);
+ cmd.length = cpu_to_le32(0);
+
+ ret = mm81x_usb_cmd(musb, &cmd);
+ if (ret < 0)
+ dev_err(mors->dev, "mm81x_usb_cmd (MM81X_USB_RESET) error %d\n",
+ ret);
+ else
+ ret = 0;
+
+ musb->ongoing_rw = false;
+ mutex_unlock(&musb->lock);
+ return ret;
+}
+
+static void mm81x_usb_mem_rw_callback(struct urb *urb)
+{
+ struct mm81x *mors = urb->context;
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (!(urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ dev_err(mors->dev,
+ "nonzero write bulk status received: %d",
+ urb->status);
+
+ musb->errors = urb->status;
+ }
+
+ musb->ongoing_rw = false;
+ wake_up(&musb->rw_in_wait);
+}
+
+static int mm81x_usb_mem_read(struct mm81x_usb *musb, u32 address, u8 *data,
+ ssize_t size)
+{
+ int ret;
+ struct mm81x_usb_command cmd;
+ struct mm81x *mors = usb_get_intfdata(musb->interface);
+
+ if (!test_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags))
+ return -ENODEV;
+
+ mutex_lock(&musb->lock);
+
+ musb->ongoing_rw = true;
+ musb->errors = 0;
+
+ /* Send command ahead to prepare for Tokens */
+ cmd.dir = cpu_to_le32(MM81X_USB_READ);
+ cmd.address = cpu_to_le32(address);
+ cmd.length = cpu_to_le32(size);
+
+ ret = mm81x_usb_cmd(musb, &cmd);
+ if (ret < 0) {
+ dev_err(mors->dev, "mm81x_usb_cmd error %d", ret);
+ goto error;
+ }
+
+ /* Let's be fast push the next URB, don't wait until command is done */
+ usb_fill_bulk_urb(
+ musb->endpoints[MM81X_EP_MEM_RD].urb, musb->udev,
+ usb_rcvbulkpipe(musb->udev,
+ musb->endpoints[MM81X_EP_MEM_RD].addr),
+ musb->endpoints[MM81X_EP_MEM_RD].buffer, size,
+ mm81x_usb_mem_rw_callback, mors);
+
+ ret = usb_submit_urb(musb->endpoints[MM81X_EP_MEM_RD].urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(mors->dev, "failed submitting read urb, error %d", ret);
+ ret = (ret == -ENOMEM) ? ret : -EIO;
+ goto error;
+ }
+
+ ret = wait_event_interruptible_timeout(
+ musb->rw_in_wait, (!musb->ongoing_rw),
+ msecs_to_jiffies(URB_TIMEOUT_MS));
+ if (ret < 0) {
+ dev_err(mors->dev, "wait_event_interruptible: error %d", ret);
+ goto error;
+ } else if (ret == 0) {
+ /* Timed out. */
+ usb_kill_urb(musb->endpoints[MM81X_EP_MEM_RD].urb);
+ }
+
+ if (musb->errors) {
+ ret = musb->errors;
+ dev_err(mors->dev, "mem read error %d", ret);
+ goto error;
+ }
+
+ memcpy(data, musb->endpoints[MM81X_EP_MEM_RD].buffer, size);
+ ret = size;
+
+error:
+ musb->ongoing_rw = false;
+ mutex_unlock(&musb->lock);
+
+ return ret;
+}
+
+static int mm81x_usb_mem_write(struct mm81x_usb *musb, u32 address, u8 *data,
+ ssize_t size)
+{
+ int ret;
+ struct mm81x_usb_command cmd;
+ struct mm81x *mors = usb_get_intfdata(musb->interface);
+
+ if (!test_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags))
+ return -ENODEV;
+
+ mutex_lock(&musb->lock);
+
+ musb->ongoing_rw = true;
+ musb->errors = 0;
+
+ /* Send command ahead to prepare for Tokens */
+ cmd.dir = cpu_to_le32(MM81X_USB_WRITE);
+ cmd.address = cpu_to_le32(address);
+ cmd.length = cpu_to_le32(size);
+ ret = mm81x_usb_cmd(musb, &cmd);
+ if (ret < 0) {
+ dev_err(mors->dev, "mm81x_usb_mem_read error %d", ret);
+ goto error;
+ }
+
+ memcpy(musb->endpoints[MM81X_EP_MEM_WR].buffer, data, size);
+
+ /* prepare a read */
+ usb_fill_bulk_urb(
+ musb->endpoints[MM81X_EP_MEM_WR].urb, musb->udev,
+ usb_sndbulkpipe(musb->udev,
+ musb->endpoints[MM81X_EP_MEM_WR].addr),
+ musb->endpoints[MM81X_EP_MEM_WR].buffer, size,
+ mm81x_usb_mem_rw_callback, mors);
+
+ ret = usb_submit_urb(musb->endpoints[MM81X_EP_MEM_WR].urb, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_err(mors->dev, "- failed submitting write urb, error %d",
+ ret);
+ ret = (ret == -ENOMEM) ? ret : -EIO;
+ goto error;
+ }
+
+ ret = wait_event_interruptible_timeout(
+ musb->rw_in_wait, (!musb->ongoing_rw),
+ msecs_to_jiffies(URB_TIMEOUT_MS));
+ if (ret < 0) {
+ dev_err(mors->dev, "error %d", ret);
+ goto error;
+ } else if (ret == 0) {
+ /* Timed out. */
+ usb_kill_urb(musb->endpoints[MM81X_EP_MEM_WR].urb);
+ }
+
+ if (musb->errors) {
+ ret = musb->errors;
+ dev_err(mors->dev, "error %d", ret);
+ goto error;
+ }
+
+ ret = size;
+
+error:
+ musb->ongoing_rw = false;
+ mutex_unlock(&musb->lock);
+ return ret;
+}
+
+static int mm81x_usb_dm_read(struct mm81x *mors, u32 address, u8 *data, int len)
+{
+ ssize_t offset = 0;
+ int ret;
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+
+ while (offset < len) {
+ ret = mm81x_usb_mem_read(musb, address + offset,
+ (u8 *)(data + offset),
+ min((ssize_t)(len - offset),
+ (ssize_t)USB_MAX_TRANSFER_SIZE));
+ if (ret < 0) {
+ dev_err(mors->dev, "%s failed (errno=%d)", __func__,
+ ret);
+ return ret;
+ }
+
+ offset += ret;
+ }
+
+ return 0;
+}
+
+static int mm81x_usb_dm_write(struct mm81x *mors, u32 address, const u8 *data,
+ int len)
+{
+ ssize_t offset = 0;
+ int ret;
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+
+ while (offset < len) {
+ ret = mm81x_usb_mem_write(musb, address + offset,
+ (u8 *)(data + offset),
+ min((ssize_t)(len - offset),
+ (ssize_t)USB_MAX_TRANSFER_SIZE));
+ if (ret < 0) {
+ dev_err(mors->dev, "%s failed (errno=%d)", __func__,
+ ret);
+ return ret;
+ }
+
+ offset += ret;
+ }
+
+ return 0;
+}
+
+static int mm81x_usb_reg32_read(struct mm81x *mors, u32 address, u32 *val)
+{
+ int ret = 0;
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+
+ ret = mm81x_usb_mem_read(musb, address, (u8 *)val, sizeof(*val));
+ if (ret == sizeof(*val)) {
+ *val = le32_to_cpup((__le32 *)val);
+ return 0;
+ }
+
+ dev_err(mors->dev, "usb reg32 read failed %d", ret);
+ return ret;
+}
+
+static int mm81x_usb_reg32_write(struct mm81x *mors, u32 address, u32 val)
+{
+ int ret = 0;
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+ __le32 val_le = cpu_to_le32(val);
+
+ ret = mm81x_usb_mem_write(musb, address, (u8 *)&val_le, sizeof(val_le));
+ if (ret == sizeof(val_le))
+ return 0;
+
+ dev_err(mors->dev, "usb reg32 write failed %d", ret);
+ return ret;
+}
+
+static void mm81x_usb_bus_enable(struct mm81x *mors, bool enable)
+{
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+
+ if (enable)
+ usb_autopm_get_interface(musb->interface);
+ else
+ usb_autopm_put_interface(musb->interface);
+}
+
+static void mm81x_usb_claim_bus(struct mm81x *mors)
+{
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+
+ mutex_lock(&musb->bus_lock);
+}
+
+static void mm81x_usb_release_bus(struct mm81x *mors)
+{
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+
+ mutex_unlock(&musb->bus_lock);
+}
+
+static void mm81x_usb_set_irq(struct mm81x *mors, bool enable)
+{
+}
+
+static const struct mm81x_bus_ops mm81x_usb_ops = {
+ .dm_read = mm81x_usb_dm_read,
+ .dm_write = mm81x_usb_dm_write,
+ .reg32_read = mm81x_usb_reg32_read,
+ .reg32_write = mm81x_usb_reg32_write,
+ .digital_reset = mm81x_usb_ndr_reset,
+ .set_bus_enable = mm81x_usb_bus_enable,
+ .claim = mm81x_usb_claim_bus,
+ .release = mm81x_usb_release_bus,
+ .set_irq = mm81x_usb_set_irq,
+ .bulk_alignment = MM81X_BUS_DEFAULT_BULK_ALIGNMENT,
+};
+
+static int mm81x_usb_detect_endpoints(struct mm81x *mors,
+ const struct usb_interface *intf)
+{
+ int ret;
+ unsigned int i;
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+ struct usb_endpoint_descriptor *ep_desc;
+ struct usb_host_interface *intf_desc = intf->cur_altsetting;
+
+ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+ ep_desc = &intf_desc->endpoint[i].desc;
+
+ if (usb_endpoint_is_bulk_in(ep_desc)) {
+ if (!musb->endpoints[MM81X_EP_MEM_RD].addr) {
+ musb->endpoints[MM81X_EP_MEM_RD].addr =
+ usb_endpoint_num(ep_desc);
+ musb->endpoints[MM81X_EP_MEM_RD].size =
+ usb_endpoint_maxp(ep_desc);
+ } else if (!musb->endpoints[MM81X_EP_REG_RD].addr) {
+ musb->endpoints[MM81X_EP_REG_RD].addr =
+ usb_endpoint_num(ep_desc);
+ musb->endpoints[MM81X_EP_REG_RD].size =
+ usb_endpoint_maxp(ep_desc);
+ }
+ } else if (usb_endpoint_is_bulk_out(ep_desc)) {
+ if (!musb->endpoints[MM81X_EP_MEM_WR].addr) {
+ musb->endpoints[MM81X_EP_MEM_WR].addr =
+ usb_endpoint_num(ep_desc);
+ musb->endpoints[MM81X_EP_MEM_WR].size =
+ usb_endpoint_maxp(ep_desc);
+ } else if (!musb->endpoints[MM81X_EP_REG_WR].addr) {
+ musb->endpoints[MM81X_EP_REG_WR].addr =
+ usb_endpoint_num(ep_desc);
+ musb->endpoints[MM81X_EP_REG_WR].size =
+ usb_endpoint_maxp(ep_desc);
+ }
+ } else if (usb_endpoint_is_int_in(ep_desc)) {
+ musb->endpoints[MM81X_EP_INT].addr =
+ usb_endpoint_num(ep_desc);
+ musb->endpoints[MM81X_EP_INT].size =
+ usb_endpoint_maxp(ep_desc);
+ }
+ }
+
+ dev_dbg(mors->dev, "\tMemory Endpoint IN %s detected: %u size %u",
+ musb->endpoints[MM81X_EP_MEM_RD].addr ? "" : "not",
+ musb->endpoints[MM81X_EP_MEM_RD].addr,
+ musb->endpoints[MM81X_EP_MEM_RD].size);
+ dev_dbg(mors->dev, "\tMemory Endpoint OUT %s detected: %u size %u",
+ musb->endpoints[MM81X_EP_MEM_WR].addr ? "" : "not",
+ musb->endpoints[MM81X_EP_MEM_WR].addr,
+ musb->endpoints[MM81X_EP_MEM_WR].size);
+ dev_dbg(mors->dev, "\tRegister Endpoint IN %s detected: %u",
+ musb->endpoints[MM81X_EP_REG_RD].addr ? "" : "not",
+ musb->endpoints[MM81X_EP_REG_RD].addr);
+ dev_dbg(mors->dev, "\tRegister Endpoint OUT %s detected: %u",
+ musb->endpoints[MM81X_EP_REG_WR].addr ? "" : "not",
+ musb->endpoints[MM81X_EP_REG_WR].addr);
+ dev_dbg(mors->dev, "\tStats IN endpoint %s detected: %u",
+ musb->endpoints[MM81X_EP_INT].addr ? "" : "not",
+ musb->endpoints[MM81X_EP_INT].addr);
+
+ /* Verify we have an IN and OUT */
+ if (!(musb->endpoints[MM81X_EP_MEM_RD].addr &&
+ musb->endpoints[MM81X_EP_MEM_WR].addr))
+ return -ENODEV;
+
+ /* Verify the stats MM81X_EP_INT is detected */
+ if (!musb->endpoints[MM81X_EP_INT].addr)
+ return -ENODEV;
+
+ /* Verify minimum interrupt status read */
+ if (musb->endpoints[MM81X_EP_INT].size < 8)
+ return -ENODEV;
+
+ musb->endpoints[MM81X_EP_CMD].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!musb->endpoints[MM81X_EP_CMD].urb) {
+ ret = -ENOMEM;
+ goto err_ep;
+ }
+
+ musb->endpoints[MM81X_EP_MEM_RD].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!musb->endpoints[MM81X_EP_MEM_RD].urb) {
+ ret = -ENOMEM;
+ goto err_ep;
+ }
+
+ musb->endpoints[MM81X_EP_MEM_WR].urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!musb->endpoints[MM81X_EP_MEM_WR].urb) {
+ ret = -ENOMEM;
+ goto err_ep;
+ }
+
+ musb->endpoints[MM81X_EP_MEM_RD].buffer =
+ kmalloc(USB_MAX_TRANSFER_SIZE, GFP_KERNEL);
+ if (!musb->endpoints[MM81X_EP_MEM_RD].buffer) {
+ ret = -ENOMEM;
+ goto err_ep;
+ }
+
+ musb->endpoints[MM81X_EP_MEM_WR].buffer =
+ kmalloc(USB_MAX_TRANSFER_SIZE, GFP_KERNEL);
+ if (!musb->endpoints[MM81X_EP_MEM_WR].buffer) {
+ ret = -ENOMEM;
+ goto err_ep;
+ }
+
+ musb->endpoints[MM81X_EP_CMD].buffer = usb_alloc_coherent(
+ musb->udev, sizeof(struct mm81x_usb_command), GFP_KERNEL,
+ &musb->endpoints[MM81X_EP_CMD].urb->transfer_dma);
+
+ if (!musb->endpoints[MM81X_EP_CMD].buffer) {
+ ret = -ENOMEM;
+ goto err_ep;
+ }
+
+ /* Assign command to memory out end point */
+ musb->endpoints[MM81X_EP_CMD].addr =
+ musb->endpoints[MM81X_EP_MEM_WR].addr;
+ musb->endpoints[MM81X_EP_CMD].size =
+ musb->endpoints[MM81X_EP_MEM_WR].size;
+
+ return 0;
+
+err_ep:
+ if (musb->endpoints[MM81X_EP_CMD].urb &&
+ musb->endpoints[MM81X_EP_CMD].buffer)
+ usb_free_coherent(
+ musb->udev, sizeof(struct mm81x_usb_command),
+ musb->endpoints[MM81X_EP_CMD].buffer,
+ musb->endpoints[MM81X_EP_CMD].urb->transfer_dma);
+ usb_free_urb(musb->endpoints[MM81X_EP_MEM_RD].urb);
+ usb_free_urb(musb->endpoints[MM81X_EP_CMD].urb);
+ usb_free_urb(musb->endpoints[MM81X_EP_MEM_WR].urb);
+ kfree(musb->endpoints[MM81X_EP_MEM_RD].buffer);
+ kfree(musb->endpoints[MM81X_EP_MEM_WR].buffer);
+
+ return ret;
+}
+
+static void mm81x_urb_cleanup(struct mm81x *mors)
+{
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+ struct mm81x_usb_endpoint *int_ep = &musb->endpoints[MM81X_EP_INT];
+ struct mm81x_usb_endpoint *rd_ep = &musb->endpoints[MM81X_EP_MEM_RD];
+ struct mm81x_usb_endpoint *wr_ep = &musb->endpoints[MM81X_EP_MEM_WR];
+ struct mm81x_usb_endpoint *cmd_ep = &musb->endpoints[MM81X_EP_CMD];
+
+ usb_kill_urb(rd_ep->urb);
+ usb_kill_urb(wr_ep->urb);
+ usb_kill_urb(cmd_ep->urb);
+
+ if (int_ep->urb)
+ usb_free_coherent(musb->udev, MM81X_EP_INT_BUFFER_SIZE,
+ int_ep->buffer, int_ep->urb->transfer_dma);
+
+ if (cmd_ep->urb)
+ usb_free_coherent(musb->udev, sizeof(struct mm81x_usb_command),
+ cmd_ep->buffer, cmd_ep->urb->transfer_dma);
+
+ kfree(wr_ep->buffer);
+ kfree(rd_ep->buffer);
+
+ usb_free_urb(int_ep->urb);
+ usb_free_urb(wr_ep->urb);
+ usb_free_urb(rd_ep->urb);
+ usb_free_urb(cmd_ep->urb);
+}
+
+static int mm81x_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ int ret;
+ struct mm81x *mors;
+ struct mm81x_usb *musb;
+
+ mors = mm81x_core_alloc(sizeof(*musb), &interface->dev);
+ if (!mors)
+ return -ENOMEM;
+
+ mors->bus_ops = &mm81x_usb_ops;
+ mors->bus_type = MM81X_BUS_TYPE_USB;
+
+ musb = (struct mm81x_usb *)mors->drv_priv;
+ musb->udev = usb_get_dev(interface_to_usbdev(interface));
+ musb->interface = usb_get_intf(interface);
+
+ mutex_init(&musb->lock);
+ mutex_init(&musb->bus_lock);
+ init_waitqueue_head(&musb->rw_in_wait);
+ usb_set_intfdata(interface, mors);
+
+ ret = mm81x_usb_detect_endpoints(mors, interface);
+ if (ret < 0)
+ goto err_core_free;
+
+ set_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags);
+
+ ret = mm81x_core_init(mors);
+ if (ret)
+ goto err_core_free;
+
+ INIT_WORK(&mors->usb_irq_work, mm81x_usb_irq_work);
+
+ ret = mm81x_usb_int_enable(mors);
+ if (ret)
+ goto err_core_deinit;
+
+ ret = mm81x_core_register(mors);
+ if (ret)
+ goto err_usb_int_stop;
+
+ /* USB requires remote wakeup functionality for suspend */
+ clear_bit(MM81X_USB_FLAG_SUSPENDED, &musb->flags);
+ musb->interface->needs_remote_wakeup = 1;
+ usb_enable_autosuspend(musb->udev);
+ pm_runtime_set_autosuspend_delay(&musb->udev->dev,
+ PM_RUNTIME_AUTOSUSPEND_DELAY_MS);
+
+ usb_autopm_get_interface(interface);
+ return 0;
+
+err_usb_int_stop:
+ mm81x_usb_int_stop(mors);
+err_core_deinit:
+ mm81x_core_deinit(mors);
+err_core_free:
+ mm81x_core_free(mors);
+ return ret;
+}
+
+static void mm81x_usb_disconnect(struct usb_interface *interface)
+{
+ struct mm81x *mors = usb_get_intfdata(interface);
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+ int minor = interface->minor;
+ struct usb_device *udev = interface_to_usbdev(interface);
+
+ if (udev->state == USB_STATE_NOTATTACHED) {
+ clear_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags);
+ set_bit(MM81X_STATE_CHIP_UNRESPONSIVE, &mors->state_flags);
+ dev_dbg(mors->dev, "USB suddenly unplugged");
+ }
+
+ usb_disable_autosuspend(usb_get_dev(udev));
+
+ if (test_bit(MM81X_USB_FLAG_SUSPENDED, &musb->flags)) {
+ dev_dbg(mors->dev, "USB was suspended: release locks");
+ mm81x_usb_release_bus(mors);
+ mutex_unlock(&musb->lock);
+ }
+
+ clear_bit(MM81X_USB_FLAG_SUSPENDED, &musb->flags);
+
+ mm81x_core_unregister(mors);
+ mm81x_usb_int_stop(mors);
+ mm81x_core_deinit(mors);
+ mm81x_urb_cleanup(mors);
+ mm81x_core_free(mors);
+
+ usb_autopm_put_interface(interface);
+ usb_set_intfdata(interface, NULL);
+ dev_info(&interface->dev, "USB Morse #%d now disconnected", minor);
+ usb_put_dev(udev);
+}
+
+static int mm81x_usb_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct mm81x *mors = usb_get_intfdata(intf);
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+ struct mm81x_usb_endpoint *int_ep = &musb->endpoints[MM81X_EP_INT];
+ struct mm81x_usb_endpoint *rd_ep = &musb->endpoints[MM81X_EP_MEM_RD];
+ struct mm81x_usb_endpoint *wr_ep = &musb->endpoints[MM81X_EP_MEM_WR];
+ struct mm81x_usb_endpoint *cmd_ep = &musb->endpoints[MM81X_EP_CMD];
+
+ if (!test_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags))
+ return -ENODEV;
+
+ usb_kill_urb(int_ep->urb);
+ usb_kill_urb(rd_ep->urb);
+ usb_kill_urb(wr_ep->urb);
+ usb_kill_urb(cmd_ep->urb);
+
+ /* Locking the bus. No USB communication after this point */
+ mm81x_usb_claim_bus(mors);
+ mutex_lock(&musb->lock);
+
+ set_bit(MM81X_USB_FLAG_SUSPENDED, &musb->flags);
+ return 0;
+}
+
+static int mm81x_usb_resume(struct usb_interface *intf)
+{
+ struct mm81x *mors = usb_get_intfdata(intf);
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+ int ret;
+ struct mm81x_usb_endpoint *int_ep = &musb->endpoints[MM81X_EP_INT];
+
+ if (!test_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags))
+ return -ENODEV;
+
+ ret = usb_submit_urb(int_ep->urb, GFP_KERNEL);
+ if (ret)
+ dev_err(mors->dev, "Couldn't submit urb. Error number %d", ret);
+
+ mm81x_usb_release_bus(mors);
+ mutex_unlock(&musb->lock);
+
+ clear_bit(MM81X_USB_FLAG_SUSPENDED, &musb->flags);
+ return 0;
+}
+
+static int mm81x_usb_reset_resume(struct usb_interface *intf)
+{
+ struct mm81x *mors = usb_get_intfdata(intf);
+ struct mm81x_usb *musb = (struct mm81x_usb *)mors->drv_priv;
+ int ret;
+ struct mm81x_usb_endpoint *int_ep = &musb->endpoints[MM81X_EP_INT];
+
+ if (!test_bit(MM81X_USB_FLAG_ATTACHED, &musb->flags))
+ return -ENODEV;
+
+ ret = usb_submit_urb(int_ep->urb, GFP_KERNEL);
+ if (ret)
+ dev_err(mors->dev, "Couldn't submit urb. Error number %d", ret);
+
+ mm81x_usb_release_bus(mors);
+ mutex_unlock(&musb->lock);
+
+ clear_bit(MM81X_USB_FLAG_SUSPENDED, &musb->flags);
+
+ return 0;
+}
+
+static int mm81x_usb_pre_reset(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static int mm81x_usb_post_reset(struct usb_interface *intf)
+{
+ return 0;
+}
+
+static struct usb_driver mm81x_usb_driver = {
+ .name = "mm81x_usb",
+ .probe = mm81x_usb_probe,
+ .disconnect = mm81x_usb_disconnect,
+ .suspend = mm81x_usb_suspend,
+ .resume = mm81x_usb_resume,
+ .reset_resume = mm81x_usb_reset_resume,
+ .pre_reset = mm81x_usb_pre_reset,
+ .post_reset = mm81x_usb_post_reset,
+ .id_table = mm81x_usb_table,
+ .supports_autosuspend = 1,
+ .soft_unbind = 1,
+};
+
+module_usb_driver(mm81x_usb_driver);
+
+MODULE_AUTHOR("Morse Micro");
+MODULE_DESCRIPTION("Driver support for Morse Micro MM81X USB devices");
+MODULE_LICENSE("Dual BSD/GPL");
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 26/31] wifi: mm81x: add yaps.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (24 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 25/31] wifi: mm81x: add usb.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 27/31] wifi: mm81x: add yaps.h Lachlan Hodges
` (5 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/yaps.c | 704 +++++++++++++++++++
1 file changed, 704 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/yaps.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/yaps.c b/drivers/net/wireless/morsemicro/mm81x/yaps.c
new file mode 100644
index 000000000000..6ad90e3af641
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/yaps.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include <linux/gpio.h>
+#include <linux/random.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include "hif.h"
+#include "ps.h"
+#include "bus.h"
+#include "command.h"
+#include "skbq.h"
+
+/* This is a fail safe timeout */
+#define CHIP_FULL_RECOVERY_TIMEOUT_MS 30
+
+/* Defined as the max number of MPDUs per AMPDU */
+#define MAX_PKTS_PER_TX_TXN 16
+#define MAX_PKTS_PER_RX_TXN 32
+
+static int mm81x_yaps_alloc_pkt_buffers(struct mm81x_yaps *yaps)
+{
+ yaps->hw.to_chip_pkts = kcalloc(MAX_PKTS_PER_TX_TXN,
+ sizeof(*yaps->hw.to_chip_pkts),
+ GFP_KERNEL);
+ if (!yaps->hw.to_chip_pkts)
+ return -ENOMEM;
+
+ yaps->hw.from_chip_pkts = kcalloc(MAX_PKTS_PER_RX_TXN,
+ sizeof(*yaps->hw.from_chip_pkts),
+ GFP_KERNEL);
+ if (!yaps->hw.from_chip_pkts) {
+ kfree(yaps->hw.to_chip_pkts);
+ yaps->hw.to_chip_pkts = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mm81x_yaps_free_pkt_buffers(struct mm81x_yaps *yaps)
+{
+ kfree(yaps->hw.from_chip_pkts);
+ yaps->hw.from_chip_pkts = NULL;
+ kfree(yaps->hw.to_chip_pkts);
+ yaps->hw.to_chip_pkts = NULL;
+}
+
+static inline int mm81x_yaps_write_pkts(struct mm81x_yaps *yaps,
+ struct mm81x_yaps_pkt *pkts,
+ int num_pkts, int *num_pkts_sent)
+{
+ return yaps->ops->write_pkts(yaps, pkts, num_pkts, num_pkts_sent);
+}
+
+static inline int mm81x_yaps_read_pkts(struct mm81x_yaps *yaps,
+ struct mm81x_yaps_pkt *pkts,
+ int num_pkts_max, int *num_pkts_received)
+{
+ return yaps->ops->read_pkts(yaps, pkts, num_pkts_max,
+ num_pkts_received);
+}
+
+static inline int mm81x_yaps_update_status(struct mm81x_yaps *yaps)
+{
+ return yaps->ops->update_status(yaps);
+}
+
+/* Mappings between sk_buff, skbq and yaps */
+static struct mm81x_skbq *mm81x_yaps_tc_q_from_aci(struct mm81x *mors, int aci)
+{
+ struct mm81x_yaps *yaps = &mors->hif.u.yaps;
+
+ if (aci >= ARRAY_SIZE(yaps->data_tx_qs))
+ return NULL;
+ return &yaps->data_tx_qs[aci];
+}
+
+static void mm81x_yaps_get_tx_qs(struct mm81x *mors, struct mm81x_skbq **qs,
+ int *num_qs)
+{
+ *qs = mors->hif.u.yaps.data_tx_qs;
+ *num_qs = YAPS_TX_SKBQ_MAX;
+}
+
+static struct mm81x_skbq *mm81x_yaps_get_bcn_tc_q(struct mm81x *mors)
+{
+ return &mors->hif.u.yaps.beacon_q;
+}
+
+static struct mm81x_skbq *mm81x_yaps_get_mgmt_tc_q(struct mm81x *mors)
+{
+ return &mors->hif.u.yaps.mgmt_q;
+}
+
+static struct mm81x_skbq *mm81x_yaps_get_tx_cmd_queue(struct mm81x *mors)
+{
+ return &mors->hif.u.yaps.cmd_q;
+}
+
+static int mm81x_yaps_irq_handler(struct mm81x *mors, u32 status)
+{
+ if (status & BIT(MM81X_INT_YAPS_FC_PKT_WAITING_IRQN))
+ set_bit(MM81X_HIF_EVT_RX_PEND, &mors->hif.event_flags);
+
+ if (status & BIT(MM81X_INT_YAPS_FC_PACKET_FREED_UP_IRQN)) {
+ timer_delete_sync_try(&mors->hif.u.yaps.chip_queue_full.timer);
+ set_bit(MM81X_HIF_EVT_TX_PACKET_FREED_UP_PEND,
+ &mors->hif.event_flags);
+ }
+
+ queue_work(mors->chip_wq, &mors->hif_work);
+ return 0;
+}
+
+const struct mm81x_hif_ops mm81x_yaps_ops = {
+ .init = mm81x_yaps_init,
+ .flush_tx_data = mm81x_yaps_flush_tx_data,
+ .flush_cmds = mm81x_yaps_flush_cmds,
+ .get_tx_status_pending_count = mm81x_yaps_get_tx_status_pending_count,
+ .get_tx_buffered_count = mm81x_yaps_get_tx_buffered_count,
+ .finish = mm81x_yaps_finish,
+ .skbq_get_tx_qs = mm81x_yaps_get_tx_qs,
+ .get_tx_beacon_queue = mm81x_yaps_get_bcn_tc_q,
+ .get_tx_mgmt_queue = mm81x_yaps_get_mgmt_tc_q,
+ .get_tx_cmd_queue = mm81x_yaps_get_tx_cmd_queue,
+ .get_tx_data_queue = mm81x_yaps_tc_q_from_aci,
+ .handle_irq = mm81x_yaps_irq_handler
+};
+
+static int mm81x_yaps_read_pkt(struct mm81x_yaps *yaps, struct sk_buff *skb)
+{
+ struct mm81x *mors = yaps->mors;
+ struct sk_buff_head skbq;
+ struct mm81x_skbq *mq = NULL;
+ struct mm81x_skb_hdr *hdr;
+ int skb_bytes_remaining;
+ int skb_len;
+ int ret = 0;
+
+ if (!skb) {
+ ret = -EINVAL;
+ goto exit_return_page;
+ }
+
+ __skb_queue_head_init(&skbq);
+
+ hdr = (struct mm81x_skb_hdr *)skb->data;
+ if (hdr->sync != MM81X_SKB_HEADER_SYNC) {
+ dev_err(mors->dev, "sync value error [0xAA:%d], hdr.len %d",
+ hdr->sync, hdr->len);
+ ret = -EIO;
+ goto exit_return_page;
+ }
+
+ if (yaps->mors->hif.validate_skb_checksum &&
+ !mm81x_skbq_validate_checksum(skb->data)) {
+ dev_dbg(yaps->mors->dev,
+ "SKB checksum is invalid hdr:[c:%02X s:%02X len:%d]",
+ hdr->channel, hdr->sync, hdr->len);
+
+ if (hdr->channel != MM81X_SKB_CHAN_TX_STATUS) {
+ ret = -EIO;
+ goto exit;
+ }
+ }
+
+ switch (hdr->channel) {
+ case MM81X_SKB_CHAN_DATA:
+ case MM81X_SKB_CHAN_NDP_FRAMES:
+ case MM81X_SKB_CHAN_TX_STATUS:
+ case MM81X_SKB_CHAN_DATA_NOACK:
+ case MM81X_SKB_CHAN_BEACON:
+ case MM81X_SKB_CHAN_MGMT:
+ mq = &yaps->data_rx_q;
+ break;
+ case MM81X_SKB_CHAN_COMMAND:
+ mq = &yaps->cmd_resp_q;
+ break;
+ default:
+ dev_err(mors->dev, "channel value error [%d]", hdr->channel);
+ ret = -EIO;
+ goto exit_return_page;
+ }
+
+ skb_len = sizeof(*hdr) + hdr->offset + le16_to_cpu(hdr->len);
+ skb_bytes_remaining = mm81x_skbq_space(mq);
+
+ if (skb_len > skb_bytes_remaining) {
+ dev_err(mors->dev,
+ "Page will not fit in SKBQ, dropping - len %d remain %d",
+ skb_len, skb_bytes_remaining);
+ ret = -ENOMEM;
+ /* Queue work to clear backlog */
+ queue_work(mors->net_wq, &mq->dispatch_work);
+ goto exit_return_page;
+ }
+
+ skb_trim(skb, skb_len);
+ __skb_queue_tail(&skbq, skb);
+
+ if (skb_queue_len(&skbq))
+ mm81x_skbq_enq(mq, &skbq);
+
+ /* push packets up in a different context */
+ queue_work(mors->net_wq, &mq->dispatch_work);
+
+ goto exit;
+
+exit_return_page:
+ if (ret && mq) {
+ dev_err(mors->dev, "failed %d", ret);
+ mm81x_skbq_purge(mq, &skbq);
+ goto exit;
+ }
+
+exit:
+ if (ret && skb)
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int mm81x_yaps_tx(struct mm81x_yaps *yaps, struct mm81x_skbq *mq)
+{
+ int i;
+ int ret = 0;
+ int num_skbs = 0;
+ int tc_pkt_idx = 0;
+ int num_pkts_sent = 0;
+ struct sk_buff *skb;
+ struct sk_buff_head skbq_to_send;
+ struct sk_buff_head skbq_sent;
+ struct sk_buff_head skbq_failed;
+ struct sk_buff *pfirst, *pnext;
+ struct mm81x *mors = yaps->mors;
+ struct mm81x_skb_hdr *hdr;
+
+ /* Check there is something on the queue */
+ spin_lock_bh(&mq->lock);
+ skb = skb_peek(&mq->skbq);
+ spin_unlock_bh(&mq->lock);
+ if (!skb)
+ return 0;
+
+ __skb_queue_head_init(&skbq_to_send);
+ __skb_queue_head_init(&skbq_sent);
+ __skb_queue_head_init(&skbq_failed);
+
+ if (mq == &yaps->cmd_q)
+ /* Purge timed-out commands (this should not happen) */
+ mm81x_skbq_purge(mq, &mq->pending);
+ else if (mq == &yaps->mgmt_q && skb_queue_len(&mq->skbq) > 0)
+ /*
+ * Purge old mgmt frames that have not been sent due to
+ * congestion
+ */
+ mm81x_skbq_purge_aged(mors, mq);
+
+ num_skbs =
+ mm81x_skbq_deq_num_skb(mq, &skbq_to_send, MAX_PKTS_PER_TX_TXN);
+
+ skb_queue_walk_safe(&skbq_to_send, pfirst, pnext) {
+ enum mm81x_yaps_to_chip_q tc_queue;
+
+ hdr = (struct mm81x_skb_hdr *)pfirst->data;
+ switch (hdr->channel) {
+ case MM81X_SKB_CHAN_COMMAND:
+ tc_queue = MM81X_YAPS_CMD_Q;
+ break;
+ case MM81X_SKB_CHAN_BEACON:
+ tc_queue = MM81X_YAPS_BEACON_Q;
+ break;
+ case MM81X_SKB_CHAN_MGMT:
+ tc_queue = MM81X_YAPS_MGMT_Q;
+ break;
+ default:
+ tc_queue = MM81X_YAPS_TX_Q;
+ break;
+ }
+ yaps->hw.to_chip_pkts[tc_pkt_idx].tc_queue = tc_queue;
+ yaps->hw.to_chip_pkts[tc_pkt_idx].skb = pfirst;
+ tc_pkt_idx++;
+ }
+
+ /* Send queued packets to chip */
+ ret = mm81x_yaps_update_status(yaps);
+ if (ret)
+ return ret;
+
+ ret = mm81x_yaps_write_pkts(yaps, yaps->hw.to_chip_pkts, tc_pkt_idx,
+ &num_pkts_sent);
+
+ /* Move sent packets to done queue */
+ for (i = 0; i < num_pkts_sent; ++i) {
+ pfirst = __skb_dequeue(&skbq_to_send);
+ __skb_queue_tail(&skbq_sent, pfirst);
+ }
+
+ for (i = num_pkts_sent; i < num_skbs; ++i) {
+ pfirst = __skb_dequeue(&skbq_to_send);
+ __skb_queue_tail(&skbq_failed, pfirst);
+ }
+
+ if (skb_queue_len(&skbq_failed) > 0) {
+ mm81x_skbq_enq_prepend(mq, &skbq_failed);
+
+ /* queue full, can't requeue */
+ if (skb_queue_len(&skbq_failed) > 0) {
+ dev_warn(mors->dev,
+ "can't requeue failed pkts, purging");
+ __skb_queue_purge(&skbq_failed);
+ }
+ }
+
+ if (skb_queue_len(&skbq_sent) > 0)
+ mm81x_skbq_tx_complete(mq, &skbq_sent);
+
+ return ret;
+}
+
+/* Returns true if there are TX data pages waiting to be sent */
+static bool mm81x_yaps_tx_data_handler(struct mm81x_yaps *yaps)
+{
+ s16 aci;
+ u32 count = 0;
+ struct mm81x *mors = yaps->mors;
+
+ for (aci = MM81X_ACI_VO; aci >= 0; aci--) {
+ struct mm81x_skbq *data_q = mm81x_yaps_tc_q_from_aci(mors, aci);
+
+ if (!mm81x_is_data_tx_allowed(mors))
+ break;
+
+ yaps->chip_queue_full.is_full = mm81x_yaps_tx(yaps, data_q);
+ count += mm81x_skbq_count(data_q);
+
+ if (yaps->chip_queue_full.is_full)
+ break;
+
+ if (aci == MM81X_ACI_BE)
+ break;
+ }
+
+ /*
+ * Data has potentially been transmitted from the data SKBQs.
+ * If the mac80211 TX data Qs were previously stopped, now would
+ * be a good time to check if they can be started again.
+ */
+ mm81x_skbq_may_wake_tx_queues(mors);
+
+ return (count > 0) && mm81x_is_data_tx_allowed(mors);
+}
+
+/* Returns true if there are commands waiting to be sent */
+static bool mm81x_yaps_tx_cmd_handler(struct mm81x_yaps *yaps)
+{
+ struct mm81x_skbq *cmd_q = &yaps->cmd_q;
+
+ mm81x_yaps_tx(yaps, cmd_q);
+
+ return mm81x_skbq_count(cmd_q) > 0;
+}
+
+static bool mm81x_yaps_tx_beacon_handler(struct mm81x_yaps *yaps)
+{
+ struct mm81x_skbq *beacon_q = &yaps->beacon_q;
+
+ mm81x_yaps_tx(yaps, beacon_q);
+
+ return mm81x_skbq_count(beacon_q) > 0;
+}
+
+static bool mm81x_yaps_tx_mgmt_handler(struct mm81x_yaps *yaps)
+{
+ struct mm81x_skbq *mgmt_q = &yaps->mgmt_q;
+
+ mm81x_yaps_tx(yaps, mgmt_q);
+
+ return mm81x_skbq_count(mgmt_q) > 0;
+}
+
+/* Returns true if there are populated RX pages left in the device */
+static bool mm81x_yaps_rx_handler(struct mm81x_yaps *yaps)
+{
+ int ret = 0;
+ int i;
+ int num_pks_received;
+
+ ret = mm81x_yaps_update_status(yaps);
+ if (ret)
+ goto exit;
+
+ ret = mm81x_yaps_read_pkts(yaps, yaps->hw.from_chip_pkts,
+ MAX_PKTS_PER_RX_TXN, &num_pks_received);
+ if (ret && ret != -EAGAIN) {
+ dev_err(yaps->mors->dev, "YAPS read_pkts fail: %d", ret);
+ goto exit;
+ }
+
+ for (i = 0; i < num_pks_received; ++i) {
+ mm81x_yaps_read_pkt(yaps, yaps->hw.from_chip_pkts[i].skb);
+ yaps->hw.from_chip_pkts[i].skb = NULL;
+ }
+
+exit:
+ if (ret == -ENOMEM || ret == -EAGAIN)
+ return true;
+ else
+ return false;
+}
+
+void mm81x_yaps_stale_tx_work(struct work_struct *work)
+{
+ int i;
+ int flushed = 0;
+ struct mm81x *mors = container_of(work, struct mm81x, tx_stale_work);
+ struct mm81x_yaps *yaps;
+
+ yaps = &mors->hif.u.yaps;
+ flushed += mm81x_skbq_check_for_stale_tx(mors, &yaps->beacon_q);
+ flushed += mm81x_skbq_check_for_stale_tx(mors, &yaps->mgmt_q);
+
+ for (i = 0; i < ARRAY_SIZE(yaps->data_tx_qs); i++)
+ flushed += mm81x_skbq_check_for_stale_tx(mors,
+ &yaps->data_tx_qs[i]);
+
+ if (!flushed)
+ return;
+
+ dev_dbg(mors->dev, "Flushed %d stale TX SKBs", flushed);
+
+ if (mors->ps.enable && !mors->ps.suspended &&
+ (mm81x_yaps_get_tx_buffered_count(mors) == 0)) {
+ /* Evaluate ps to check if it was gated on a stale tx status */
+ queue_delayed_work(mors->chip_wq, &mors->ps.delayed_eval_work,
+ 0);
+ }
+}
+
+void mm81x_yaps_work(struct work_struct *work)
+{
+ struct mm81x *mors = container_of(work, struct mm81x, hif_work);
+ unsigned long *flags = &mors->hif.event_flags;
+ struct mm81x_yaps *yaps = &mors->hif.u.yaps;
+
+ if (test_bit(MM81X_STATE_CHIP_UNRESPONSIVE, &mors->state_flags))
+ return;
+
+ if (!*flags)
+ return;
+
+ /* Disable power save in case it is running */
+ mm81x_ps_disable(mors);
+ mm81x_claim_bus(mors);
+
+ /*
+ * Handle any populated RX pages from chip first to
+ * avoid dropping pkts due to full on-chip buffers.
+ * Check if all pages were removed, set event flags if not.
+ */
+ if (test_and_clear_bit(MM81X_HIF_EVT_RX_PEND, flags)) {
+ if (mm81x_yaps_rx_handler(yaps))
+ set_bit(MM81X_HIF_EVT_RX_PEND, flags);
+ }
+
+ /* TX any commands before considering data */
+ if (test_and_clear_bit(MM81X_HIF_EVT_TX_COMMAND_PEND, flags)) {
+ if (mm81x_yaps_tx_cmd_handler(yaps))
+ set_bit(MM81X_HIF_EVT_TX_COMMAND_PEND, flags);
+ }
+
+ /* TX beacons before considering mgmt/data */
+ if (test_and_clear_bit(MM81X_HIF_EVT_TX_BEACON_PEND, flags)) {
+ if (mm81x_yaps_tx_beacon_handler(yaps))
+ set_bit(MM81X_HIF_EVT_TX_BEACON_PEND, flags);
+ }
+
+ /* TX mgmt before considering data */
+ if (test_and_clear_bit(MM81X_HIF_EVT_TX_MGMT_PEND, flags)) {
+ if (mm81x_yaps_tx_mgmt_handler(yaps))
+ set_bit(MM81X_HIF_EVT_TX_MGMT_PEND, flags);
+ }
+
+ /* Pause TX data Qs */
+ if (test_and_clear_bit(MM81X_HIF_EVT_DATA_TRAFFIC_PAUSE_PEND, flags)) {
+ test_and_clear_bit(MM81X_HIF_EVT_DATA_TRAFFIC_RESUME_PEND,
+ flags);
+ mm81x_skbq_data_traffic_pause(mors);
+ }
+
+ /* Resume TX data Qs */
+ if (test_and_clear_bit(MM81X_HIF_EVT_DATA_TRAFFIC_RESUME_PEND, flags))
+ mm81x_skbq_data_traffic_resume(mors);
+
+ /* Handle chip queue status */
+ if (test_and_clear_bit(MM81X_HIF_EVT_TX_PACKET_FREED_UP_PEND, flags))
+ yaps->chip_queue_full.is_full = false;
+
+ /* Check to see if the queue is full or
+ * long enough has past since the queue was full
+ */
+ if (yaps->chip_queue_full.is_full &&
+ time_before(jiffies, yaps->chip_queue_full.retry_expiry))
+ goto exit;
+
+ /* Finally TX any data */
+ if (test_and_clear_bit(MM81X_HIF_EVT_TX_DATA_PEND, flags)) {
+ if (mm81x_yaps_tx_data_handler(yaps))
+ set_bit(MM81X_HIF_EVT_TX_DATA_PEND, flags);
+
+ if (yaps->chip_queue_full.is_full) {
+ yaps->chip_queue_full.retry_expiry =
+ jiffies +
+ msecs_to_jiffies(CHIP_FULL_RECOVERY_TIMEOUT_MS);
+ mod_timer(&yaps->chip_queue_full.timer,
+ yaps->chip_queue_full.retry_expiry);
+ }
+ }
+
+exit:
+
+ /* Disable power save in case it is running */
+ mm81x_release_bus(mors);
+ mm81x_ps_enable(mors);
+
+ /* Don't requeue work if we are shutting down. */
+ if (yaps->finish)
+ return;
+ /*
+ * Evaluate all events except MM81X_HIF_EVT_TX_DATA_PEND in case data
+ * tx queue is full
+ */
+ if ((*flags) & ~(1 << MM81X_HIF_EVT_TX_DATA_PEND))
+ queue_work(mors->chip_wq, &mors->hif_work);
+ /*
+ * if data tx queue is not full and the work hasn't been queued let's
+ * queue it
+ */
+ else if (!yaps->chip_queue_full.is_full && *flags)
+ queue_work(mors->chip_wq, &mors->hif_work);
+}
+
+int mm81x_yaps_get_tx_status_pending_count(struct mm81x *mors)
+{
+ int i = 0;
+ int count = 0;
+ struct mm81x_yaps *yaps;
+
+ yaps = &mors->hif.u.yaps;
+ count += skb_queue_len(&yaps->beacon_q.pending);
+ count += skb_queue_len(&yaps->mgmt_q.pending);
+ count += skb_queue_len(&yaps->cmd_q.pending);
+
+ for (i = 0; i < ARRAY_SIZE(yaps->data_tx_qs); i++)
+ count += skb_queue_len(&yaps->data_tx_qs[i].pending);
+
+ return count;
+}
+
+int mm81x_yaps_get_tx_buffered_count(struct mm81x *mors)
+{
+ int i = 0;
+ int count = 0;
+ struct mm81x_yaps *yaps;
+
+ yaps = &mors->hif.u.yaps;
+ count += skb_queue_len(&yaps->beacon_q.skbq) +
+ skb_queue_len(&yaps->beacon_q.pending);
+ count += skb_queue_len(&yaps->mgmt_q.skbq) +
+ skb_queue_len(&yaps->mgmt_q.pending);
+ count += skb_queue_len(&yaps->cmd_q.skbq) +
+ skb_queue_len(&yaps->cmd_q.pending);
+
+ for (i = 0; i < ARRAY_SIZE(yaps->data_tx_qs); i++)
+ count += mm81x_skbq_count_tx_ready(&yaps->data_tx_qs[i]) +
+ skb_queue_len(&yaps->data_tx_qs[i].pending);
+
+ return count;
+}
+
+static void mm81x_yaps_tx_q_full_timer(struct timer_list *t)
+{
+ struct mm81x_yaps *yaps =
+ timer_container_of(yaps, t, chip_queue_full.timer);
+
+ queue_work(yaps->mors->chip_wq, &yaps->mors->hif_work);
+}
+
+static void mm81x_yaps_q_chip_full_timer_init(struct mm81x_yaps *yaps)
+{
+ timer_setup(&yaps->chip_queue_full.timer, mm81x_yaps_tx_q_full_timer,
+ 0);
+}
+
+static void mm81x_yaps_q_chip_full_timer_finish(struct mm81x_yaps *yaps)
+{
+ timer_delete_sync_try(&yaps->chip_queue_full.timer);
+}
+
+int mm81x_yaps_init(struct mm81x *mors)
+{
+ int i, ret;
+ struct mm81x_yaps *yaps;
+
+ ret = mm81x_yaps_hw_init(mors);
+ if (ret) {
+ dev_err(mors->dev, "mm81x_yaps_hw_init failed %d", ret);
+ return ret;
+ }
+
+ yaps = &mors->hif.u.yaps;
+ yaps->mors = mors;
+
+ mm81x_claim_bus(mors);
+
+ ret = mm81x_yaps_alloc_pkt_buffers(yaps);
+ if (ret) {
+ dev_err(mors->dev, "Failed to allocate YAPS packet buffers: %d",
+ ret);
+ mm81x_yaps_hw_finish(mors);
+ mm81x_release_bus(mors);
+ return ret;
+ }
+
+ /* YAPS is bi-directional */
+ mm81x_skbq_init(mors, &yaps->data_rx_q,
+ MM81X_HIF_FLAGS_DATA | MM81X_HIF_FLAGS_DIR_TO_HOST);
+ mm81x_skbq_init(mors, &yaps->beacon_q,
+ MM81X_HIF_FLAGS_DATA | MM81X_HIF_FLAGS_DIR_TO_HOST);
+ mm81x_skbq_init(mors, &yaps->mgmt_q,
+ MM81X_HIF_FLAGS_DATA | MM81X_HIF_FLAGS_DIR_TO_HOST);
+
+ for (i = 0; i < ARRAY_SIZE(yaps->data_tx_qs); i++) {
+ mm81x_skbq_init(mors, &yaps->data_tx_qs[i],
+ MM81X_HIF_FLAGS_DATA |
+ MM81X_HIF_FLAGS_DIR_TO_CHIP);
+ }
+
+ mm81x_skbq_init(mors, &yaps->cmd_q,
+ MM81X_HIF_FLAGS_COMMAND | MM81X_HIF_FLAGS_DIR_TO_CHIP);
+ mm81x_skbq_init(mors, &yaps->cmd_resp_q,
+ MM81X_HIF_FLAGS_COMMAND | MM81X_HIF_FLAGS_DIR_TO_HOST);
+
+ mm81x_yaps_q_chip_full_timer_init(yaps);
+ INIT_WORK(&mors->hif_work, mm81x_yaps_work);
+ INIT_WORK(&mors->tx_stale_work, mm81x_yaps_stale_tx_work);
+ mm81x_release_bus(mors);
+ mm81x_hw_enable_stop_notifications(mors, true);
+ return 0;
+}
+
+void mm81x_yaps_finish(struct mm81x *mors)
+{
+ int i;
+ struct mm81x_yaps *yaps;
+
+ mm81x_yaps_hw_enable_irqs(mors, false);
+
+ yaps = &mors->hif.u.yaps;
+ yaps->finish = true;
+
+ mm81x_skbq_finish(&yaps->data_rx_q);
+ mm81x_skbq_finish(&yaps->beacon_q);
+ mm81x_skbq_finish(&yaps->mgmt_q);
+
+ for (i = 0; i < ARRAY_SIZE(yaps->data_tx_qs); i++)
+ mm81x_skbq_finish(&yaps->data_tx_qs[i]);
+
+ mm81x_skbq_finish(&yaps->cmd_q);
+ mm81x_skbq_finish(&yaps->cmd_resp_q);
+
+ mm81x_yaps_q_chip_full_timer_finish(yaps);
+
+ cancel_work_sync(&mors->hif_work);
+ cancel_work_sync(&mors->tx_stale_work);
+
+ mm81x_yaps_free_pkt_buffers(yaps);
+ mm81x_yaps_hw_finish(mors);
+}
+
+void mm81x_yaps_flush_tx_data(struct mm81x *mors)
+{
+ int i;
+ struct mm81x_yaps *yaps = &mors->hif.u.yaps;
+
+ mm81x_skbq_tx_flush(&yaps->beacon_q);
+ mm81x_skbq_tx_flush(&yaps->mgmt_q);
+
+ for (i = 0; i < ARRAY_SIZE(yaps->data_tx_qs); i++)
+ mm81x_skbq_tx_flush(&yaps->data_tx_qs[i]);
+}
+
+void mm81x_yaps_flush_cmds(struct mm81x *mors)
+{
+ struct mm81x_yaps *yaps = &mors->hif.u.yaps;
+
+ if (yaps->flags & MM81X_HIF_FLAGS_COMMAND) {
+ mm81x_skbq_finish(&yaps->cmd_q);
+ mm81x_skbq_finish(&yaps->cmd_resp_q);
+ }
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 27/31] wifi: mm81x: add yaps.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (25 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 26/31] wifi: mm81x: add yaps.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 28/31] wifi: mm81x: add yaps_hw.c Lachlan Hodges
` (4 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/morsemicro/mm81x/yaps.h | 77 ++++++++++++++++++++
1 file changed, 77 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/yaps.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/yaps.h b/drivers/net/wireless/morsemicro/mm81x/yaps.h
new file mode 100644
index 000000000000..2b2bb5f6e399
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/yaps.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_YAPS_H_
+#define _MM81X_YAPS_H_
+
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include "skbq.h"
+
+#define YAPS_TX_SKBQ_MAX 4
+
+struct mm81x_hif_ops;
+extern const struct mm81x_hif_ops mm81x_yaps_ops;
+
+enum mm81x_yaps_to_chip_q {
+ MM81X_YAPS_TX_Q = 0,
+ MM81X_YAPS_CMD_Q,
+ MM81X_YAPS_BEACON_Q,
+ MM81X_YAPS_MGMT_Q,
+ /* Keep this last */
+ MM81X_YAPS_NUM_TC_Q
+};
+
+struct mm81x_yaps_pkt {
+ struct sk_buff *skb;
+ enum mm81x_yaps_to_chip_q tc_queue;
+};
+
+struct mm81x_yaps {
+ struct mm81x *mors;
+ struct mm81x_yaps_hw_aux_data *aux_data;
+ const struct mm81x_yaps_ops *ops;
+ u8 flags;
+ struct {
+ struct mm81x_yaps_pkt *to_chip_pkts;
+ struct mm81x_yaps_pkt *from_chip_pkts;
+ } hw;
+
+ /* Chip interface is stopping, new work should not be enqueued. */
+ bool finish;
+
+ struct mm81x_skbq data_tx_qs[YAPS_TX_SKBQ_MAX];
+ struct mm81x_skbq beacon_q;
+ struct mm81x_skbq mgmt_q;
+ struct mm81x_skbq data_rx_q;
+ struct mm81x_skbq cmd_q;
+ struct mm81x_skbq cmd_resp_q;
+
+ struct {
+ struct timer_list timer;
+ unsigned long retry_expiry;
+ bool is_full;
+ } chip_queue_full;
+};
+
+struct mm81x_yaps_ops {
+ int (*write_pkts)(struct mm81x_yaps *yaps, struct mm81x_yaps_pkt *pkts,
+ int num_pkts, int *num_pkts_sent);
+ int (*read_pkts)(struct mm81x_yaps *yaps, struct mm81x_yaps_pkt *pkts,
+ int num_pkts_max, int *num_pkts_received);
+ int (*update_status)(struct mm81x_yaps *yaps);
+};
+
+int mm81x_yaps_init(struct mm81x *mors);
+void mm81x_yaps_show(struct mm81x_yaps *yaps, struct seq_file *file);
+void mm81x_yaps_finish(struct mm81x *mors);
+void mm81x_yaps_flush_tx_data(struct mm81x *mors);
+void mm81x_yaps_flush_cmds(struct mm81x *mors);
+void mm81x_yaps_work(struct work_struct *work);
+void mm81x_yaps_stale_tx_work(struct work_struct *work);
+int mm81x_yaps_get_tx_status_pending_count(struct mm81x *mors);
+int mm81x_yaps_get_tx_buffered_count(struct mm81x *mors);
+
+#endif /* !_MM81X_YAPS_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 28/31] wifi: mm81x: add yaps_hw.c
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (26 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 27/31] wifi: mm81x: add yaps.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 29/31] wifi: mm81x: add yaps_hw.h Lachlan Hodges
` (3 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
.../net/wireless/morsemicro/mm81x/yaps_hw.c | 684 ++++++++++++++++++
1 file changed, 684 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/yaps_hw.c
diff --git a/drivers/net/wireless/morsemicro/mm81x/yaps_hw.c b/drivers/net/wireless/morsemicro/mm81x/yaps_hw.c
new file mode 100644
index 000000000000..b73a71629ba9
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/yaps_hw.c
@@ -0,0 +1,684 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+#include "yaps_hw.h"
+#include "bus.h"
+#include "hif.h"
+#include "yaps.h"
+
+#define YAPS_HW_WINDOW_SIZE_BYTES 32768
+#define YAPS_MAX_PKT_SIZE_BYTES 16128
+#define YAPS_METADATA_PAGE_COUNT 1
+
+#define YAPS_PHANDLE_CORRUPTION_WAR_EXTRA_PAGE 1
+
+#define YAPS_PAGE_SIZE 256
+
+/* Calculate padding required for yaps transaction */
+#define YAPS_CALC_PADDING(_bytes) ((_bytes) & 0x3 ? (4 - ((_bytes) & 0x3)) : 0)
+
+#define YAPS_RESERVED_PAGE_SIZE 256
+
+/*
+ * Yaps data stream delimiter is a 32 bit word with the following fields:
+ *
+ * pkt_size (14 bits) - Packet size not including delimiter or padding
+ * pool_id (3 bits) - Pool that pages should be allocated from.
+ * padding (2 bits) - Padding required to bring packet to word (4 byte)
+ * irq (1 bit ) - Raise a PKT_IRQ on the YDS this is sent to
+ * reserved (5 bits) - Reserved, must write as 0
+ * crc (7 bits) - YAPS CRC
+ */
+
+/* Packet size not including delimiter or padding */
+#define YAPS_DELIM_GET_PKT_SIZE(_delim) \
+ (((_delim) & 0x3FFF) - YAPS_RESERVED_PAGE_SIZE)
+#define YAPS_DELIM_SET_PKT_SIZE(_pkt_size) \
+ (((_pkt_size) & 0x3FFF) + YAPS_RESERVED_PAGE_SIZE)
+#define YAPS_DELIM_GET_PHANDLE_SIZE(_delim) (((_delim) & 0x3FFF))
+
+/* Pool that pages should be allocated from. */
+#define YAPS_DELIM_SET_POOL_ID(_pool_id) (((_pool_id) & 0x7) << 14)
+
+/* Padding required to bring packet to word (4 byte) boundary */
+#define YAPS_DELIM_GET_PADDING(_delim) (((_delim) >> 17) & 0x3)
+#define YAPS_DELIM_SET_PADDING(_padding) (((_padding) & 0x3) << 17)
+
+/* Raise a PKT_IRQ on the YDS this is sent to */
+#define YAPS_DELIM_SET_IRQ(_irq) (((_irq) & 0x1) << 19)
+
+/* YAPS CRC */
+#define YAPS_DELIM_GET_CRC(_delim) (((_delim) >> 25) & 0x7F)
+#define YAPS_DELIM_SET_CRC(_crc) (((_crc) & 0x7F) << 25)
+
+struct mm81x_yaps_hw_status_registers {
+ /* Allocation pools */
+ u32 tc_tx_pool_num_pages;
+ u32 tc_cmd_pool_num_pages;
+ u32 tc_beacon_pool_num_pages;
+ u32 tc_mgmt_pool_num_pages;
+ u32 fc_rx_pool_num_pages;
+ u32 fc_resp_pool_num_pages;
+ u32 fc_tx_sts_pool_num_pages;
+ u32 fc_aux_pool_num_pages;
+
+ /* To chip/From chip queues for YDS/YSL */
+ u32 tc_tx_num_pkts;
+ u32 tc_cmd_num_pkts;
+ u32 tc_beacon_num_pkts;
+ u32 tc_mgmt_num_pkts;
+ u32 fc_num_pkts;
+ u32 fc_done_num_pkts;
+ u32 fc_rx_bytes_in_queue;
+ u32 tc_delim_crc_fail_detected;
+ u32 fc_host_ysl_status;
+ u32 lock;
+} __packed __aligned(8);
+
+struct mm81x_yaps_hw_aux_data {
+ unsigned long access_lock;
+
+ u32 yds_addr;
+ u32 ysl_addr;
+ u32 status_regs_addr;
+
+ /* Alloc pool sizes */
+ u16 tc_tx_pool_size;
+ u16 tc_cmd_pool_size;
+ u8 tc_beacon_pool_size;
+ u8 tc_mgmt_pool_size;
+ u8 fc_rx_pool_size;
+ u8 fc_resp_pool_size;
+ u8 fc_tx_sts_pool_size;
+ u8 fc_aux_pool_size;
+
+ /* To chip/from chip queue sizes */
+ u8 tc_tx_q_size;
+ u8 tc_cmd_q_size;
+ u8 tc_beacon_q_size;
+ u8 tc_mgmt_q_size;
+ u8 fc_q_size;
+ u8 fc_done_q_size;
+
+ u16 reserved_yaps_page_size;
+
+ /* Buffers to/from chip to support large contiguous reads/writes */
+ char *to_chip_buffer;
+ char *from_chip_buffer;
+
+ /* Status registers for queues and aloc pools on chip */
+ struct mm81x_yaps_hw_status_registers status_regs;
+};
+
+static int mm81x_yaps_hw_lock(struct mm81x_yaps *yaps)
+{
+ if (test_and_set_bit_lock(0, &yaps->aux_data->access_lock))
+ return -1;
+ return 0;
+}
+
+static void mm81x_yaps_hw_unlock(struct mm81x_yaps *yaps)
+{
+ clear_bit_unlock(0, &yaps->aux_data->access_lock);
+}
+
+static void
+mm81x_yaps_hw_fill_aux_data_from_hw_tbl(struct mm81x_yaps_hw_aux_data *a,
+ struct mm81x_yaps_hw_table *t)
+{
+ a->ysl_addr = __le32_to_cpu(t->ysl_addr);
+ a->yds_addr = __le32_to_cpu(t->yds_addr);
+ a->status_regs_addr = __le32_to_cpu(t->status_regs_addr);
+ a->tc_tx_pool_size = __le16_to_cpu(t->tc_tx_pool_size);
+ a->fc_rx_pool_size = __le16_to_cpu(t->fc_rx_pool_size);
+ a->tc_cmd_pool_size = t->tc_cmd_pool_size;
+ a->tc_beacon_pool_size = t->tc_beacon_pool_size;
+ a->tc_mgmt_pool_size = t->tc_mgmt_pool_size;
+ a->fc_resp_pool_size = t->fc_resp_pool_size;
+ a->fc_tx_sts_pool_size = t->fc_tx_sts_pool_size;
+ a->fc_aux_pool_size = t->fc_aux_pool_size;
+ a->tc_tx_q_size = t->tc_tx_q_size;
+ a->tc_cmd_q_size = t->tc_cmd_q_size;
+ a->tc_beacon_q_size = t->tc_beacon_q_size;
+ a->tc_mgmt_q_size = t->tc_mgmt_q_size;
+ a->fc_q_size = t->fc_q_size;
+ a->fc_done_q_size = t->fc_done_q_size;
+ a->reserved_yaps_page_size = le16_to_cpu(t->yaps_reserved_page_size);
+}
+
+static u8 mm81x_yaps_hw_crc(u32 word)
+{
+ u8 crc = 0;
+ u8 byte;
+ int i;
+
+ /* Mask to look at only non-CRC bits */
+ word &= 0x1ffffff;
+
+ for (i = 0; i < 4; i++) {
+ byte = (word >> 24) & 0xff;
+ crc = crc7_be(crc, &byte, 1);
+ word <<= 8;
+ }
+
+ return crc >> 1;
+}
+
+static u32 mm81x_write_pkts_h_build_delim(struct mm81x_yaps *yaps,
+ unsigned int size, u8 pool_id,
+ bool irq)
+{
+ u32 delim = 0;
+
+ delim |= YAPS_DELIM_SET_PKT_SIZE(size);
+ delim |= YAPS_DELIM_SET_PADDING(YAPS_CALC_PADDING(size));
+ delim |= YAPS_DELIM_SET_POOL_ID(pool_id);
+ delim |= YAPS_DELIM_SET_IRQ(irq);
+ delim |= YAPS_DELIM_SET_CRC(mm81x_yaps_hw_crc(delim));
+ return delim;
+}
+
+void mm81x_yaps_hw_enable_irqs(struct mm81x *mors, bool enable)
+{
+ mm81x_hw_irq_enable(mors, MM81X_INT_YAPS_FC_PKT_WAITING_IRQN, enable);
+ mm81x_hw_irq_enable(mors, MM81X_INT_YAPS_FC_PACKET_FREED_UP_IRQN,
+ enable);
+}
+
+void mm81x_yaps_hw_read_table(struct mm81x *mors,
+ struct mm81x_yaps_hw_table *tbl_ptr)
+{
+ mm81x_yaps_hw_fill_aux_data_from_hw_tbl(mors->hif.u.yaps.aux_data,
+ tbl_ptr);
+ mm81x_yaps_hw_enable_irqs(mors, true);
+}
+
+static unsigned int mm81x_write_pkts_h_pages_required(struct mm81x_yaps *yaps,
+ unsigned int size_bytes)
+{
+ /* Always account for the first metadata page */
+ return DIV_ROUND_UP(size_bytes +
+ yaps->aux_data->reserved_yaps_page_size,
+ YAPS_PAGE_SIZE) +
+ YAPS_METADATA_PAGE_COUNT +
+ YAPS_PHANDLE_CORRUPTION_WAR_EXTRA_PAGE;
+}
+
+/*
+ * Checks if a single pkt will fit in the chip using the pool/alloc holding
+ * information from the last status register read.
+ */
+static bool mm81x_write_pkts_h_will_fit(struct mm81x_yaps *yaps,
+ struct mm81x_yaps_pkt *pkt, bool update)
+{
+ bool will_fit = true;
+ const int pages_required =
+ mm81x_write_pkts_h_pages_required(yaps, pkt->skb->len);
+ int *pool_pages_avail = NULL;
+ int *pkts_in_queue = NULL;
+ int queue_pkts_avail = 0;
+
+ switch (pkt->tc_queue) {
+ case MM81X_YAPS_TX_Q:
+ pool_pages_avail =
+ &yaps->aux_data->status_regs.tc_tx_pool_num_pages;
+ pkts_in_queue = &yaps->aux_data->status_regs.tc_tx_num_pkts;
+ queue_pkts_avail =
+ yaps->aux_data->tc_tx_q_size - *pkts_in_queue;
+ break;
+ case MM81X_YAPS_CMD_Q:
+ pool_pages_avail =
+ &yaps->aux_data->status_regs.tc_cmd_pool_num_pages;
+ pkts_in_queue = &yaps->aux_data->status_regs.tc_cmd_num_pkts;
+ queue_pkts_avail =
+ yaps->aux_data->tc_cmd_q_size - *pkts_in_queue;
+ break;
+ case MM81X_YAPS_BEACON_Q:
+ pool_pages_avail =
+ &yaps->aux_data->status_regs.tc_beacon_pool_num_pages;
+ pkts_in_queue = &yaps->aux_data->status_regs.tc_beacon_num_pkts;
+ queue_pkts_avail =
+ yaps->aux_data->tc_beacon_q_size - *pkts_in_queue;
+ break;
+ case MM81X_YAPS_MGMT_Q:
+ pool_pages_avail =
+ &yaps->aux_data->status_regs.tc_mgmt_pool_num_pages;
+ pkts_in_queue = &yaps->aux_data->status_regs.tc_mgmt_num_pkts;
+ queue_pkts_avail =
+ yaps->aux_data->tc_mgmt_q_size - *pkts_in_queue;
+ break;
+ default:
+ dev_err(yaps->mors->dev, "yaps invalid tc queue");
+ }
+
+ WARN_ON(queue_pkts_avail < 0);
+
+ if (pages_required > *pool_pages_avail)
+ will_fit = false;
+
+ if (queue_pkts_avail == 0)
+ will_fit = false;
+
+ if (will_fit && update) {
+ *pool_pages_avail -= pages_required;
+ *pkts_in_queue += 1;
+ }
+
+ return will_fit;
+}
+
+static int mm81x_write_pkts_h_err_check(struct mm81x_yaps *yaps,
+ struct mm81x_yaps_pkt *pkt)
+{
+ if (pkt->skb->len + yaps->aux_data->reserved_yaps_page_size >
+ YAPS_MAX_PKT_SIZE_BYTES)
+ return -EMSGSIZE;
+ if (pkt->tc_queue > MM81X_YAPS_NUM_TC_Q)
+ return -EINVAL;
+ if (!mm81x_write_pkts_h_will_fit(yaps, pkt, true))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int mm81x_yaps_hw_write_pkts(struct mm81x_yaps *yaps,
+ struct mm81x_yaps_pkt *pkts, int num_pkts,
+ int *num_pkts_sent)
+{
+ int ret = 0;
+ int i;
+ u32 delim = 0;
+ int tx_len;
+ int batch_txn_len = 0;
+ int pkts_pending = 0;
+ bool delim_irq = false;
+ char *to_chip_buffer_aligned =
+ PTR_ALIGN(yaps->aux_data->to_chip_buffer,
+ mm81x_bus_get_alignment(yaps->mors));
+ char *write_buf = to_chip_buffer_aligned;
+
+ ret = mm81x_yaps_hw_lock(yaps);
+ if (ret) {
+ dev_dbg(yaps->mors->dev, "yaps lock failed %d", ret);
+ return ret;
+ }
+
+ *num_pkts_sent = 0;
+
+ /* Check packet conditions */
+ ret = mm81x_write_pkts_h_err_check(yaps, &pkts[0]);
+ if (ret)
+ goto exit;
+
+ /* Batch packets into larger transactions */
+ for (i = 0; i < num_pkts; ++i) {
+ u32 pkt_size =
+ pkts[i].skb->len + YAPS_CALC_PADDING(pkts[i].skb->len);
+ tx_len = pkt_size + sizeof(delim);
+
+ /*
+ * Send when we have reached window size, don't split pkt over
+ * boundary
+ */
+ if ((batch_txn_len + tx_len) > YAPS_HW_WINDOW_SIZE_BYTES) {
+ ret = mm81x_dm_write(yaps->mors,
+ yaps->aux_data->yds_addr,
+ to_chip_buffer_aligned,
+ batch_txn_len);
+
+ batch_txn_len = 0;
+ if (ret)
+ goto exit;
+ write_buf = to_chip_buffer_aligned;
+ *num_pkts_sent += pkts_pending;
+ pkts_pending = 0;
+ }
+
+ if ((i + 1) == num_pkts) {
+ /* The last packet in the queue has IRQ set */
+ delim_irq = true;
+ } else {
+ /*
+ * Since this is not the last packet, we can check for
+ * the next one. In case of errors in the next packet
+ * set the IRQ
+ */
+ ret = mm81x_write_pkts_h_err_check(yaps, &pkts[i + 1]);
+ if (ret)
+ delim_irq = true;
+ }
+
+ /* Build stream header*/
+ delim = mm81x_write_pkts_h_build_delim(
+ yaps, pkt_size, pkts[i].tc_queue, delim_irq);
+ *((__le32 *)write_buf) = cpu_to_le32(delim);
+ memcpy(write_buf + sizeof(delim), pkts[i].skb->data,
+ pkts[i].skb->len);
+
+ write_buf += tx_len;
+ batch_txn_len += tx_len;
+ pkts_pending++;
+
+ if (ret)
+ goto exit;
+ }
+
+exit:
+ if (batch_txn_len > 0) {
+ ret = mm81x_dm_write(yaps->mors, yaps->aux_data->yds_addr,
+ to_chip_buffer_aligned, batch_txn_len);
+ *num_pkts_sent += pkts_pending;
+ }
+
+ mm81x_yaps_hw_unlock(yaps);
+ return ret;
+}
+
+static bool mm81x_read_pkts_h_is_valid_delim(u32 delim)
+{
+ u8 calc_crc = mm81x_yaps_hw_crc(delim);
+ int pkt_size = YAPS_DELIM_GET_PHANDLE_SIZE(delim);
+ int padding = YAPS_DELIM_GET_PADDING(delim);
+
+ if (calc_crc != YAPS_DELIM_GET_CRC(delim))
+ return false;
+
+ if (pkt_size == 0)
+ return false;
+
+ if ((pkt_size + padding) > YAPS_MAX_PKT_SIZE_BYTES)
+ return false;
+
+ /* Pkt length + padding should not require more padding */
+ if (YAPS_CALC_PADDING(pkt_size) != padding)
+ return false;
+
+ return true;
+}
+
+static int mm81x_read_pkts_h_bytes_remaining(struct mm81x_yaps *yaps)
+{
+ u32 bytes_in_queue = yaps->aux_data->status_regs.fc_rx_bytes_in_queue;
+ u32 delim_overhead =
+ yaps->aux_data->status_regs.fc_num_pkts * sizeof(u32);
+ u32 reserved_bytes = yaps->aux_data->status_regs.fc_num_pkts *
+ yaps->aux_data->reserved_yaps_page_size;
+
+ if (WARN_ON(bytes_in_queue > INT_MAX) ||
+ WARN_ON(delim_overhead > INT_MAX) ||
+ WARN_ON(reserved_bytes > INT_MAX))
+ return -EIO;
+
+ return (int)bytes_in_queue;
+}
+
+static int mm81x_yaps_hw_read_pkts(struct mm81x_yaps *yaps,
+ struct mm81x_yaps_pkt *pkts,
+ int num_pkts_max, int *num_pkts_received)
+{
+ int ret;
+ int i = 0;
+ char *from_chip_buffer_aligned =
+ PTR_ALIGN(yaps->aux_data->from_chip_buffer,
+ mm81x_bus_get_alignment(yaps->mors));
+ char *read_ptr = from_chip_buffer_aligned;
+ int bytes_remaining = mm81x_read_pkts_h_bytes_remaining(yaps);
+ bool again = false;
+
+ *num_pkts_received = 0;
+
+ if (num_pkts_max == 0 || bytes_remaining == 0)
+ return 0;
+ if (bytes_remaining < 0)
+ return bytes_remaining;
+
+ if (bytes_remaining > YAPS_HW_WINDOW_SIZE_BYTES) {
+ bytes_remaining = YAPS_HW_WINDOW_SIZE_BYTES;
+ again = true;
+ }
+
+ /*
+ * This is more coarse-grained than it needs to be - once the data
+ * is read into a local buffer the lock can be released, however
+ * access to from_chip_buffer will need to be protected with its
+ * own lock
+ */
+ ret = mm81x_yaps_hw_lock(yaps);
+ if (ret) {
+ dev_dbg(yaps->mors->dev, "yaps lock failed %d", ret);
+ return ret;
+ }
+
+ /* Read all available packets to the buffer */
+ ret = mm81x_dm_read(yaps->mors, yaps->aux_data->ysl_addr,
+ from_chip_buffer_aligned, bytes_remaining);
+
+ if (ret)
+ goto exit;
+
+ /* Split serialised packets from buffer */
+ while (i < num_pkts_max && bytes_remaining > 0) {
+ u32 delim;
+ int total_len;
+ int pkt_size;
+
+ delim = le32_to_cpu(*((__le32 *)read_ptr));
+ read_ptr += sizeof(delim);
+ bytes_remaining -= sizeof(delim);
+
+ /* End of stream */
+ if (!delim)
+ break;
+
+ if (!mm81x_read_pkts_h_is_valid_delim(delim)) {
+ /*
+ * This will start a hunt for a valid delimiter. Given
+ * the CRC is only 7 bit it's possible to find an
+ * invalid block with a valid delimiter, leading to
+ * desynchronisation.
+ */
+ dev_warn(yaps->mors->dev, "yaps invalid delim");
+ break;
+ }
+
+ /* Total length in chip */
+ pkt_size = YAPS_DELIM_GET_PKT_SIZE(delim);
+ total_len = pkt_size + YAPS_DELIM_GET_PADDING(delim);
+
+ if (pkts[i].skb)
+ dev_err(yaps->mors->dev, "yaps packet leak");
+
+ /* SKB doesn't want padding */
+ pkts[i].skb = dev_alloc_skb(pkt_size);
+ if (!pkts[i].skb) {
+ ret = -ENOMEM;
+ dev_err(yaps->mors->dev, "yaps no mem for skb");
+ goto exit;
+ }
+ skb_put(pkts[i].skb, pkt_size);
+
+ if (total_len <= bytes_remaining) {
+ memcpy(pkts[i].skb->data, read_ptr, pkt_size);
+ read_ptr += total_len;
+ bytes_remaining -= total_len;
+ } else {
+ const int read_overhang_len =
+ total_len - bytes_remaining;
+ const int pkt_overhang_len = pkt_size - bytes_remaining;
+
+ memcpy(pkts[i].skb->data, read_ptr, bytes_remaining);
+ read_ptr = from_chip_buffer_aligned;
+
+ ret = mm81x_dm_read(
+ yaps->mors,
+ /* Offset by 4 to avoid retry logic */
+ yaps->aux_data->ysl_addr + 4, read_ptr,
+ read_overhang_len);
+
+ if (ret)
+ goto exit;
+
+ memcpy(pkts[i].skb->data + bytes_remaining, read_ptr,
+ pkt_overhang_len);
+ read_ptr += read_overhang_len;
+ bytes_remaining = 0;
+ }
+
+ *num_pkts_received += 1;
+ i++;
+ }
+
+ if (again)
+ ret = -EAGAIN;
+
+exit:
+ mm81x_yaps_hw_unlock(yaps);
+ return ret;
+}
+
+static int mm81x_yaps_hw_update_status(struct mm81x_yaps *yaps)
+{
+ int ret;
+ int tc_total_pkt_count;
+ unsigned long reg_read_timeout;
+
+ struct mm81x_yaps_hw_status_registers *r = &yaps->aux_data->status_regs;
+
+ ret = mm81x_yaps_hw_lock(yaps);
+ if (ret) {
+ dev_dbg(yaps->mors->dev, "yaps lock failed %d", ret);
+ return ret;
+ }
+
+ reg_read_timeout = jiffies + msecs_to_jiffies(100);
+ do {
+ if (time_after(jiffies, reg_read_timeout)) {
+ dev_err(yaps->mors->dev,
+ "timed out reading status registers: %d", ret);
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ ret = mm81x_dm_read(yaps->mors,
+ yaps->aux_data->status_regs_addr, (u8 *)r,
+ sizeof(*r));
+ } while (!ret && r->lock);
+
+ if (ret) {
+ if (ret != -ENODEV) {
+ dev_err(yaps->mors->dev,
+ "error reading yaps status registers: %d", ret);
+ }
+ goto exit_unlock;
+ }
+
+ r->tc_tx_pool_num_pages = mm81x_fle32_to_cpu(r->tc_tx_pool_num_pages);
+ r->tc_cmd_pool_num_pages = mm81x_fle32_to_cpu(r->tc_cmd_pool_num_pages);
+ r->tc_beacon_pool_num_pages =
+ mm81x_fle32_to_cpu(r->tc_beacon_pool_num_pages);
+ r->tc_mgmt_pool_num_pages =
+ mm81x_fle32_to_cpu(r->tc_mgmt_pool_num_pages);
+ r->fc_rx_pool_num_pages = mm81x_fle32_to_cpu(r->fc_rx_pool_num_pages);
+ r->fc_resp_pool_num_pages =
+ mm81x_fle32_to_cpu(r->fc_resp_pool_num_pages);
+ r->fc_tx_sts_pool_num_pages =
+ mm81x_fle32_to_cpu(r->fc_tx_sts_pool_num_pages);
+ r->fc_aux_pool_num_pages = mm81x_fle32_to_cpu(r->fc_aux_pool_num_pages);
+ r->tc_tx_num_pkts = mm81x_fle32_to_cpu(r->tc_tx_num_pkts);
+ r->tc_cmd_num_pkts = mm81x_fle32_to_cpu(r->tc_cmd_num_pkts);
+ r->tc_beacon_num_pkts = mm81x_fle32_to_cpu(r->tc_beacon_num_pkts);
+ r->tc_mgmt_num_pkts = mm81x_fle32_to_cpu(r->tc_mgmt_num_pkts);
+ r->fc_num_pkts = mm81x_fle32_to_cpu(r->fc_num_pkts);
+ r->fc_done_num_pkts = mm81x_fle32_to_cpu(r->fc_done_num_pkts);
+ r->fc_rx_bytes_in_queue = mm81x_fle32_to_cpu(r->fc_rx_bytes_in_queue);
+ r->tc_delim_crc_fail_detected =
+ mm81x_fle32_to_cpu(r->tc_delim_crc_fail_detected);
+ r->lock = mm81x_fle32_to_cpu(r->lock);
+ r->fc_host_ysl_status = mm81x_fle32_to_cpu(r->fc_host_ysl_status);
+
+ tc_total_pkt_count = r->tc_tx_num_pkts + r->tc_cmd_num_pkts +
+ r->tc_beacon_num_pkts + r->tc_mgmt_num_pkts;
+
+ if (r->tc_delim_crc_fail_detected) {
+ /*
+ * Host and chip have become desynchronised. This can happen if
+ * the chip crashes during a YAPS transaction. We cannot
+ * recover from this.
+ */
+ dev_err(yaps->mors->dev,
+ "to-chip yaps delimiter CRC fail, pkt_count=%d",
+ tc_total_pkt_count);
+ ret = -EIO;
+ }
+
+ if (mm81x_read_pkts_h_bytes_remaining(yaps))
+ set_bit(MM81X_HIF_EVT_RX_PEND, &yaps->mors->hif.event_flags);
+
+exit_unlock:
+ mm81x_yaps_hw_unlock(yaps);
+ return ret;
+}
+
+static const struct mm81x_yaps_ops mm81x_yaps_hw_ops = {
+ .write_pkts = mm81x_yaps_hw_write_pkts,
+ .read_pkts = mm81x_yaps_hw_read_pkts,
+ .update_status = mm81x_yaps_hw_update_status,
+};
+
+int mm81x_yaps_hw_init(struct mm81x *mors)
+{
+ int ret = 0;
+ struct mm81x_yaps *yaps = NULL;
+ int aux_data_len = sizeof(struct mm81x_yaps_hw_aux_data);
+ int alignment = mm81x_bus_get_alignment(mors);
+
+ yaps = &mors->hif.u.yaps;
+ yaps->aux_data = kzalloc(aux_data_len, GFP_KERNEL);
+ if (!yaps->aux_data) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ yaps->aux_data->to_chip_buffer =
+ kzalloc(YAPS_HW_WINDOW_SIZE_BYTES + alignment - 1, GFP_KERNEL);
+ if (!yaps->aux_data->to_chip_buffer) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ yaps->aux_data->from_chip_buffer =
+ kzalloc(YAPS_HW_WINDOW_SIZE_BYTES + alignment - 1, GFP_KERNEL);
+ if (!yaps->aux_data->from_chip_buffer) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ if (!IS_ALIGNED((uintptr_t)&yaps->aux_data->status_regs, alignment)) {
+ dev_warn(mors->dev,
+ "Status registers are not aligned to %d bytes",
+ alignment);
+ }
+
+ yaps->ops = &mm81x_yaps_hw_ops;
+ return ret;
+
+err_exit:
+ mm81x_yaps_hw_finish(mors);
+ return ret;
+}
+
+void mm81x_yaps_hw_finish(struct mm81x *mors)
+{
+ struct mm81x_yaps *yaps;
+
+ yaps = &mors->hif.u.yaps;
+ if (yaps->aux_data) {
+ kfree(yaps->aux_data->from_chip_buffer);
+ yaps->aux_data->from_chip_buffer = NULL;
+ kfree(yaps->aux_data->to_chip_buffer);
+ yaps->aux_data->to_chip_buffer = NULL;
+ kfree(yaps->aux_data);
+ yaps->aux_data = NULL;
+ }
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 29/31] wifi: mm81x: add yaps_hw.h
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (27 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 28/31] wifi: mm81x: add yaps_hw.c Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 30/31] wifi: mm81x: add Kconfig and Makefile Lachlan Hodges
` (2 subsequent siblings)
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
.../net/wireless/morsemicro/mm81x/yaps_hw.h | 52 +++++++++++++++++++
1 file changed, 52 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/mm81x/yaps_hw.h
diff --git a/drivers/net/wireless/morsemicro/mm81x/yaps_hw.h b/drivers/net/wireless/morsemicro/mm81x/yaps_hw.h
new file mode 100644
index 000000000000..89e15375aabc
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/yaps_hw.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2026 Morse Micro
+ */
+
+#ifndef _MM81X_YAPS_HW_H_
+#define _MM81X_YAPS_HW_H_
+
+#include <linux/types.h>
+#include <linux/crc7.h>
+
+#define MM81X_INT_YAPS_FC_PKT_WAITING_IRQN 0
+#define MM81X_INT_YAPS_FC_PACKET_FREED_UP_IRQN 1
+
+struct mm81x_yaps_hw_table {
+ /* NOTE: We need these padding bytes for yaps to work */
+ u8 padding[4];
+ __le32 ysl_addr;
+ __le32 yds_addr;
+ __le32 status_regs_addr;
+
+ /* Alloc pool sizes */
+ __le16 tc_tx_pool_size;
+ __le16 fc_rx_pool_size;
+ u8 tc_cmd_pool_size;
+ u8 tc_beacon_pool_size;
+ u8 tc_mgmt_pool_size;
+ u8 fc_resp_pool_size;
+ u8 fc_tx_sts_pool_size;
+ u8 fc_aux_pool_size;
+
+ /* To chip/from chip queue sizes */
+ u8 tc_tx_q_size;
+ u8 tc_cmd_q_size;
+ u8 tc_beacon_q_size;
+ u8 tc_mgmt_q_size;
+ u8 fc_q_size;
+ u8 fc_done_q_size;
+
+ __le16 yaps_reserved_page_size;
+ __le16 reserved_unused;
+} __packed;
+
+struct mm81x;
+
+void mm81x_yaps_hw_enable_irqs(struct mm81x *mors, bool enable);
+int mm81x_yaps_hw_init(struct mm81x *mors);
+void mm81x_yaps_hw_finish(struct mm81x *mors);
+void mm81x_yaps_hw_read_table(struct mm81x *mors,
+ struct mm81x_yaps_hw_table *tbl_ptr);
+
+#endif /* !_MM81X_YAPS_HW_H_ */
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 30/31] wifi: mm81x: add Kconfig and Makefile
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (28 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 29/31] wifi: mm81x: add yaps_hw.h Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 4:55 ` [PATCH wireless-next v2 31/31] wifi: mm81x: add MAINTAINERS entry Lachlan Hodges
2026-04-30 5:43 ` [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes, Lachlan Hodges, Dan Callaghan, Arien Judge
Cc: ayman.grais, linux-wireless, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
drivers/net/wireless/Kconfig | 1 +
drivers/net/wireless/Makefile | 1 +
drivers/net/wireless/morsemicro/Kconfig | 15 ++++++++++++
drivers/net/wireless/morsemicro/Makefile | 2 ++
drivers/net/wireless/morsemicro/mm81x/Kconfig | 24 +++++++++++++++++++
.../net/wireless/morsemicro/mm81x/Makefile | 21 ++++++++++++++++
6 files changed, 64 insertions(+)
create mode 100644 drivers/net/wireless/morsemicro/Kconfig
create mode 100644 drivers/net/wireless/morsemicro/Makefile
create mode 100644 drivers/net/wireless/morsemicro/mm81x/Kconfig
create mode 100644 drivers/net/wireless/morsemicro/mm81x/Makefile
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index c6599594dc99..baddadf9ec3c 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -27,6 +27,7 @@ source "drivers/net/wireless/intersil/Kconfig"
source "drivers/net/wireless/marvell/Kconfig"
source "drivers/net/wireless/mediatek/Kconfig"
source "drivers/net/wireless/microchip/Kconfig"
+source "drivers/net/wireless/morsemicro/Kconfig"
source "drivers/net/wireless/purelifi/Kconfig"
source "drivers/net/wireless/ralink/Kconfig"
source "drivers/net/wireless/realtek/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index e1c4141c6004..d74f817b37de 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/
obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_WLAN_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_WLAN_VENDOR_MORSEMICRO) += morsemicro/
obj-$(CONFIG_WLAN_VENDOR_PURELIFI) += purelifi/
obj-$(CONFIG_WLAN_VENDOR_QUANTENNA) += quantenna/
obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/
diff --git a/drivers/net/wireless/morsemicro/Kconfig b/drivers/net/wireless/morsemicro/Kconfig
new file mode 100644
index 000000000000..cb0653c77d87
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config WLAN_VENDOR_MORSEMICRO
+ bool "Morse Micro devices"
+ default y
+ help
+ If you have a wireless card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about these cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if WLAN_VENDOR_MORSEMICRO
+source "drivers/net/wireless/morsemicro/mm81x/Kconfig"
+endif # WLAN_VENDOR_MORSEMICRO
diff --git a/drivers/net/wireless/morsemicro/Makefile b/drivers/net/wireless/morsemicro/Makefile
new file mode 100644
index 000000000000..5b2670f7d171
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_MM81X) += mm81x/
diff --git a/drivers/net/wireless/morsemicro/mm81x/Kconfig b/drivers/net/wireless/morsemicro/mm81x/Kconfig
new file mode 100644
index 000000000000..33cdcc0df4de
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config MM81X
+ tristate "Morse Micro MM81x wireless devices"
+ depends on MAC80211
+ select FW_LOADER
+ select CRC7
+ help
+ This module adds support for wireless devices based
+ on Morse Micro MM81xx chipsets.
+
+config MM81X_USB
+ tristate "Morse Micro MM81x USB support"
+ depends on MM81X && USB
+ help
+ This module adds support for the USB interface of
+ devices using the Morse Micro MM81x chipset.
+
+config MM81X_SDIO
+ tristate "Morse Micro MM81x SDIO support"
+ depends on MM81X && MMC
+ help
+ This module adds support for the SDIO interface of
+ devices using the Morse Micro MM81x chipset.
diff --git a/drivers/net/wireless/morsemicro/mm81x/Makefile b/drivers/net/wireless/morsemicro/mm81x/Makefile
new file mode 100644
index 000000000000..0d494fda1412
--- /dev/null
+++ b/drivers/net/wireless/morsemicro/mm81x/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_MM81X) += mm81x_core.o
+
+mm81x_core-y += core.o
+mm81x_core-y += mac.o
+mm81x_core-y += hw.o
+mm81x_core-y += fw.o
+mm81x_core-y += command.o
+mm81x_core-y += ps.o
+mm81x_core-y += skbq.o
+mm81x_core-y += yaps_hw.o
+mm81x_core-y += yaps.o
+mm81x_core-y += rc.o
+mm81x_core-y += mmrc.o
+
+obj-$(CONFIG_MM81X_USB) += mm81x_usb.o
+mm81x_usb-y += usb.o
+
+obj-$(CONFIG_MM81X_SDIO) += mm81x_sdio.o
+mm81x_sdio-y += sdio.o
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* [PATCH wireless-next v2 31/31] wifi: mm81x: add MAINTAINERS entry
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (29 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 30/31] wifi: mm81x: add Kconfig and Makefile Lachlan Hodges
@ 2026-04-30 4:55 ` Lachlan Hodges
2026-04-30 5:43 ` [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
31 siblings, 0 replies; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 4:55 UTC (permalink / raw)
To: johannes
Cc: arien.judge, dan.callaghan, ayman.grais, linux-wireless,
Lachlan Hodges, linux-kernel
(Patches split per file for review, will be a single commit alongside
SDIO ids once review is complete. See cover letter for more
information)
Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
---
MAINTAINERS | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/MAINTAINERS b/MAINTAINERS
index 2fb1c75afd16..980d7658fc75 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -17988,6 +17988,14 @@ F: drivers/regulator/mpq7920.c
F: drivers/regulator/mpq7920.h
F: include/linux/mfd/mp2629.h
+MORSE MICRO MM81X WIRELESS DRIVER
+M: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+M: Dan Callaghan <dan.callaghan@morsemicro.com>
+R: Arien Judge <arien.judge@morsemicro.com>
+L: linux-wireless@vger.kernel.org
+S: Supported
+F: drivers/net/wireless/morsemicro/mm81x/
+
MOST(R) TECHNOLOGY DRIVER
M: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
M: Christian Gromm <christian.gromm@microchip.com>
--
2.43.0
^ permalink raw reply related [flat|nested] 36+ messages in thread* Re: [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver
2026-04-30 4:55 [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
` (30 preceding siblings ...)
2026-04-30 4:55 ` [PATCH wireless-next v2 31/31] wifi: mm81x: add MAINTAINERS entry Lachlan Hodges
@ 2026-04-30 5:43 ` Lachlan Hodges
2026-04-30 6:09 ` Johannes Berg
31 siblings, 1 reply; 36+ messages in thread
From: Lachlan Hodges @ 2026-04-30 5:43 UTC (permalink / raw)
To: johannes; +Cc: arien.judge, dan.callaghan, ayman.grais, linux-wireless
Johannes, this version was auto delegated to my patchwork due to the
new rules, but since this is still under review I have delegated them
back to you (I hope I've done that right :))
lachlan
^ permalink raw reply [flat|nested] 36+ messages in thread* Re: [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver
2026-04-30 5:43 ` [PATCH wireless-next v2 00/31] wifi: mm81x: add mm81x driver Lachlan Hodges
@ 2026-04-30 6:09 ` Johannes Berg
0 siblings, 0 replies; 36+ messages in thread
From: Johannes Berg @ 2026-04-30 6:09 UTC (permalink / raw)
To: Lachlan Hodges; +Cc: arien.judge, dan.callaghan, ayman.grais, linux-wireless
Hi Lachlan,
On Thu, 2026-04-30 at 15:43 +1000, Lachlan Hodges wrote:
> Johannes, this version was auto delegated to my patchwork due to the
> new rules, but since this is still under review I have delegated them
> back to you (I hope I've done that right :))
Sounds good. And as a bonus we know the patchwork delegation works, and
you have access to modify things ;-)
johannes
^ permalink raw reply [flat|nested] 36+ messages in thread