The branch main has been updated by bz:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=9954217599ce02fbf1772388e24e0b89663f4425

commit 9954217599ce02fbf1772388e24e0b89663f4425
Merge: 28125d24c92a f4669ef6cf78
Author:     Bjoern A. Zeeb <[email protected]>
AuthorDate: 2025-12-06 09:51:10 +0000
Commit:     Bjoern A. Zeeb <[email protected]>
CommitDate: 2025-12-06 09:51:10 +0000

    ath10k: update Atheros/QCA's ath10k driver
    
    This version is based on
    git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
    7d0a66e4bb9081d75c82ec4957c50034cb0ea449 ( tag: v6.18 ).
    
    Merge commit 'f4669ef6cf7860919442e67106e83f616ed36f51'.
    
    Sponsored by:   The FreeBSD Foundation

 sys/contrib/dev/athk/ath10k/Kconfig        |   7 +
 sys/contrib/dev/athk/ath10k/Makefile       |   1 +
 sys/contrib/dev/athk/ath10k/ahb.c          |  30 ++--
 sys/contrib/dev/athk/ath10k/bmi.c          |   9 +-
 sys/contrib/dev/athk/ath10k/ce.c           |  35 ++--
 sys/contrib/dev/athk/ath10k/ce.h           |   2 +-
 sys/contrib/dev/athk/ath10k/core.c         | 174 ++++++++++++++-----
 sys/contrib/dev/athk/ath10k/core.h         |  28 ++-
 sys/contrib/dev/athk/ath10k/coredump.c     |   3 +
 sys/contrib/dev/athk/ath10k/coredump.h     |   7 +-
 sys/contrib/dev/athk/ath10k/debug.c        |  62 +++----
 sys/contrib/dev/athk/ath10k/debugfs_sta.c  |  10 +-
 sys/contrib/dev/athk/ath10k/htc.c          |  10 +-
 sys/contrib/dev/athk/ath10k/htc.h          |  20 +--
 sys/contrib/dev/athk/ath10k/htt.c          |   3 +-
 sys/contrib/dev/athk/ath10k/htt.h          |  18 +-
 sys/contrib/dev/athk/ath10k/htt_rx.c       |  22 ++-
 sys/contrib/dev/athk/ath10k/htt_tx.c       |  26 ++-
 sys/contrib/dev/athk/ath10k/hw.c           |  64 +++----
 sys/contrib/dev/athk/ath10k/hw.h           |  63 ++++---
 sys/contrib/dev/athk/ath10k/leds.c         |  89 ++++++++++
 sys/contrib/dev/athk/ath10k/leds.h         |  34 ++++
 sys/contrib/dev/athk/ath10k/mac.c          | 265 ++++++++++++++++++-----------
 sys/contrib/dev/athk/ath10k/pci.c          |  86 +++++-----
 sys/contrib/dev/athk/ath10k/pci.h          |   7 +-
 sys/contrib/dev/athk/ath10k/qmi.c          |  12 ++
 sys/contrib/dev/athk/ath10k/qmi.h          |   1 +
 sys/contrib/dev/athk/ath10k/qmi_wlfw_v01.c |   1 +
 sys/contrib/dev/athk/ath10k/qmi_wlfw_v01.h |   1 +
 sys/contrib/dev/athk/ath10k/rx_desc.h      |   1 +
 sys/contrib/dev/athk/ath10k/sdio.c         |  37 ++--
 sys/contrib/dev/athk/ath10k/snoc.c         |  60 ++++---
 sys/contrib/dev/athk/ath10k/spectral.c     |  26 +--
 sys/contrib/dev/athk/ath10k/targaddrs.h    |   3 +
 sys/contrib/dev/athk/ath10k/testmode.c     |   4 +-
 sys/contrib/dev/athk/ath10k/thermal.c      |   3 +-
 sys/contrib/dev/athk/ath10k/trace.c        |   2 +
 sys/contrib/dev/athk/ath10k/trace.h        |  64 +++----
 sys/contrib/dev/athk/ath10k/txrx.c         |   2 +-
 sys/contrib/dev/athk/ath10k/usb.c          |   8 +-
 sys/contrib/dev/athk/ath10k/usb.h          |   1 +
 sys/contrib/dev/athk/ath10k/wmi-ops.h      |  32 ++++
 sys/contrib/dev/athk/ath10k/wmi-tlv.c      |  20 ++-
 sys/contrib/dev/athk/ath10k/wmi-tlv.h      |   3 +-
 sys/contrib/dev/athk/ath10k/wmi.c          | 122 ++++++++++---
 sys/contrib/dev/athk/ath10k/wmi.h          | 104 ++++++-----
 sys/contrib/dev/athk/ath10k/wow.c          |   1 +
 47 files changed, 1009 insertions(+), 574 deletions(-)

diff --cc sys/contrib/dev/athk/ath10k/Makefile
index 7881fc25993f,000000000000..28460625cdfb
mode 100644,000000..100644
--- a/sys/contrib/dev/athk/ath10k/Makefile
+++ b/sys/contrib/dev/athk/ath10k/Makefile
@@@ -1,45 -1,0 +1,46 @@@
 +# SPDX-License-Identifier: ISC
 +obj-$(CONFIG_ATH10K) += ath10k_core.o
 +ath10k_core-y += mac.o \
 +               debug.o \
 +               core.o \
 +               htc.o \
 +               htt.o \
 +               htt_rx.o \
 +               htt_tx.o \
 +               txrx.o \
 +               wmi.o \
 +               wmi-tlv.o \
 +               bmi.o \
 +               hw.o \
 +               p2p.o \
 +               swap.o
 +
 +ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
 +ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
 +ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
 +ath10k_core-$(CONFIG_THERMAL) += thermal.o
++ath10k_core-$(CONFIG_ATH10K_LEDS) += leds.o
 +ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
 +ath10k_core-$(CONFIG_PM) += wow.o
 +ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
 +ath10k_core-$(CONFIG_ATH10K_CE) += ce.o
 +ath10k_core-${CONFIG_FWLOG) += fwlog.o
 +
 +obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
 +ath10k_pci-y += pci.o
 +
 +ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
 +
 +obj-$(CONFIG_ATH10K_SDIO) += ath10k_sdio.o
 +ath10k_sdio-y += sdio.o
 +
 +obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
 +ath10k_usb-y += usb.o
 +
 +obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
 +ath10k_snoc-y += qmi.o \
 +               qmi_wlfw_v01.o \
 +               snoc.o
 +
 +# for tracing framework to find trace.h
 +CFLAGS_trace.o := -I$(src)
diff --cc sys/contrib/dev/athk/ath10k/ce.c
index 8168e20bb09a,000000000000..719957ac87e4
mode 100644,000000..100644
--- a/sys/contrib/dev/athk/ath10k/ce.c
+++ b/sys/contrib/dev/athk/ath10k/ce.c
@@@ -1,1972 -1,0 +1,1975 @@@
 +// SPDX-License-Identifier: ISC
 +/*
 + * Copyright (c) 2005-2011 Atheros Communications Inc.
 + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
 + * Copyright (c) 2018 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
++ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
 + */
 +
++#include <linux/export.h>
 +#include "hif.h"
 +#include "ce.h"
 +#include "debug.h"
 +
 +/*
 + * Support for Copy Engine hardware, which is mainly used for
 + * communication between Host and Target over a PCIe interconnect.
 + */
 +
 +/*
 + * A single CopyEngine (CE) comprises two "rings":
 + *   a source ring
 + *   a destination ring
 + *
 + * Each ring consists of a number of descriptors which specify
 + * an address, length, and meta-data.
 + *
 + * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
 + * controls one ring and the other side controls the other ring.
 + * The source side chooses when to initiate a transfer and it
 + * chooses what to send (buffer address, length). The destination
 + * side keeps a supply of "anonymous receive buffers" available and
 + * it handles incoming data as it arrives (when the destination
 + * receives an interrupt).
 + *
 + * The sender may send a simple buffer (address/length) or it may
 + * send a small list of buffers.  When a small list is sent, hardware
 + * "gathers" these and they end up in a single destination buffer
 + * with a single interrupt.
 + *
 + * There are several "contexts" managed by this layer -- more, it
 + * may seem -- than should be needed. These are provided mainly for
 + * maximum flexibility and especially to facilitate a simpler HIF
 + * implementation. There are per-CopyEngine recv, send, and watermark
 + * contexts. These are supplied by the caller when a recv, send,
 + * or watermark handler is established and they are echoed back to
 + * the caller when the respective callbacks are invoked. There is
 + * also a per-transfer context supplied by the caller when a buffer
 + * (or sendlist) is sent and when a buffer is enqueued for recv.
 + * These per-transfer contexts are echoed back to the caller when
 + * the buffer is sent/received.
 + */
 +
 +static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
 +                                      struct ath10k_ce_pipe *ce_state)
 +{
 +      u32 ce_id = ce_state->id;
 +      u32 addr = 0;
 +
 +      switch (ce_id) {
 +      case 0:
 +              addr = 0x00032000;
 +              break;
 +      case 3:
 +              addr = 0x0003200C;
 +              break;
 +      case 4:
 +              addr = 0x00032010;
 +              break;
 +      case 5:
 +              addr = 0x00032014;
 +              break;
 +      case 7:
 +              addr = 0x0003201C;
 +              break;
 +      default:
 +              ath10k_warn(ar, "invalid CE id: %d", ce_id);
 +              break;
 +      }
 +      return addr;
 +}
 +
 +static inline unsigned int
 +ath10k_set_ring_byte(unsigned int offset,
-                    struct ath10k_hw_ce_regs_addr_map *addr_map)
++                   const struct ath10k_hw_ce_regs_addr_map *addr_map)
 +{
 +      return ((offset << addr_map->lsb) & addr_map->mask);
 +}
 +
 +static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
 +{
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +
 +      return ce->bus_ops->read32(ar, offset);
 +}
 +
 +static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
 +{
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +
 +      ce->bus_ops->write32(ar, offset, value);
 +}
 +
 +static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
 +                                                     u32 ce_ctrl_addr,
 +                                                     unsigned int n)
 +{
 +      ath10k_ce_write32(ar, ce_ctrl_addr +
 +                        ar->hw_ce_regs->dst_wr_index_addr, n);
 +}
 +
 +static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
 +                                                    u32 ce_ctrl_addr)
 +{
 +      return ath10k_ce_read32(ar, ce_ctrl_addr +
 +                              ar->hw_ce_regs->dst_wr_index_addr);
 +}
 +
 +static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
 +                                                    u32 ce_ctrl_addr,
 +                                                    unsigned int n)
 +{
 +      ath10k_ce_write32(ar, ce_ctrl_addr +
 +                        ar->hw_ce_regs->sr_wr_index_addr, n);
 +}
 +
 +static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
 +                                                   u32 ce_ctrl_addr)
 +{
 +      return ath10k_ce_read32(ar, ce_ctrl_addr +
 +                              ar->hw_ce_regs->sr_wr_index_addr);
 +}
 +
 +static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
 +                                                       u32 ce_id)
 +{
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +
 +      return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
 +}
 +
 +static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
 +                                                  u32 ce_ctrl_addr)
 +{
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
 +      struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
 +      u32 index;
 +
 +      if (ar->hw_params.rri_on_ddr &&
 +          (ce_state->attr_flags & CE_ATTR_DIS_INTR))
 +              index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
 +      else
 +              index = ath10k_ce_read32(ar, ce_ctrl_addr +
 +                                       ar->hw_ce_regs->current_srri_addr);
 +
 +      return index;
 +}
 +
 +static inline void
 +ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
 +                                        struct ath10k_ce_pipe *ce_state,
 +                                        unsigned int value)
 +{
 +      ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
 +}
 +
 +static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
 +                                                  u32 ce_id,
 +                                                  u64 addr)
 +{
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
 +      u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
 +      u32 addr_lo = lower_32_bits(addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr +
 +                        ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
 +
 +      if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
 +              ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
 +                                                          addr);
 +      }
 +}
 +
 +static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
 +                                              u32 ce_ctrl_addr,
 +                                              u64 addr)
 +{
 +      u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr +
 +                        ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
 +}
 +
 +static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
 +                                             u32 ce_ctrl_addr,
 +                                             unsigned int n)
 +{
 +      ath10k_ce_write32(ar, ce_ctrl_addr +
 +                        ar->hw_ce_regs->sr_size_addr, n);
 +}
 +
 +static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
 +                                             u32 ce_ctrl_addr,
 +                                             unsigned int n)
 +{
-       struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
++      const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
 +
 +      u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
 +                                        ctrl_regs->addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
 +                        (ctrl1_addr &  ~(ctrl_regs->dmax->mask)) |
 +                        ath10k_set_ring_byte(n, ctrl_regs->dmax));
 +}
 +
 +static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
 +                                                  u32 ce_ctrl_addr,
 +                                                  unsigned int n)
 +{
-       struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
++      const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
 +
 +      u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
 +                                        ctrl_regs->addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
 +                        (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
 +                        ath10k_set_ring_byte(n, ctrl_regs->src_ring));
 +}
 +
 +static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
 +                                                   u32 ce_ctrl_addr,
 +                                                   unsigned int n)
 +{
-       struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
++      const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
 +
 +      u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
 +                                        ctrl_regs->addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
 +                        (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
 +                        ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
 +}
 +
 +static inline
 +      u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 
ce_id)
 +{
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +
 +      return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
 +              CE_DDR_RRI_MASK;
 +}
 +
 +static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
 +                                                   u32 ce_ctrl_addr)
 +{
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
 +      struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
 +      u32 index;
 +
 +      if (ar->hw_params.rri_on_ddr &&
 +          (ce_state->attr_flags & CE_ATTR_DIS_INTR))
 +              index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
 +      else
 +              index = ath10k_ce_read32(ar, ce_ctrl_addr +
 +                                       ar->hw_ce_regs->current_drri_addr);
 +
 +      return index;
 +}
 +
 +static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
 +                                                   u32 ce_id,
 +                                                   u64 addr)
 +{
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
 +      u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
 +      u32 addr_lo = lower_32_bits(addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr +
 +                        ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
 +
 +      if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
 +              ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
 +                                                           addr);
 +      }
 +}
 +
 +static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
 +                                               u32 ce_ctrl_addr,
 +                                               u64 addr)
 +{
 +      u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
 +      u32 reg_value;
 +
 +      reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
 +                                   ar->hw_ce_regs->dr_base_addr_hi);
 +      reg_value &= ~CE_DESC_ADDR_HI_MASK;
 +      reg_value |= addr_hi;
 +      ath10k_ce_write32(ar, ce_ctrl_addr +
 +                        ar->hw_ce_regs->dr_base_addr_hi, reg_value);
 +}
 +
 +static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
 +                                              u32 ce_ctrl_addr,
 +                                              unsigned int n)
 +{
 +      ath10k_ce_write32(ar, ce_ctrl_addr +
 +                        ar->hw_ce_regs->dr_size_addr, n);
 +}
 +
 +static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
 +                                                 u32 ce_ctrl_addr,
 +                                                 unsigned int n)
 +{
-       struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
++      const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = 
ar->hw_ce_regs->wm_srcr;
 +      u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
 +                        (addr & ~(srcr_wm->wm_high->mask)) |
 +                        (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
 +}
 +
 +static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
 +                                                u32 ce_ctrl_addr,
 +                                                unsigned int n)
 +{
-       struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
++      const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = 
ar->hw_ce_regs->wm_srcr;
 +      u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
 +                        (addr & ~(srcr_wm->wm_low->mask)) |
 +                        (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
 +}
 +
 +static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
 +                                                  u32 ce_ctrl_addr,
 +                                                  unsigned int n)
 +{
-       struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
++      const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = 
ar->hw_ce_regs->wm_dstr;
 +      u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
 +                        (addr & ~(dstr_wm->wm_high->mask)) |
 +                        (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
 +}
 +
 +static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
 +                                                 u32 ce_ctrl_addr,
 +                                                 unsigned int n)
 +{
-       struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
++      const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = 
ar->hw_ce_regs->wm_dstr;
 +      u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
 +                        (addr & ~(dstr_wm->wm_low->mask)) |
 +                        (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
 +}
 +
 +static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
 +                                                      u32 ce_ctrl_addr)
 +{
-       struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
++      const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
 +
 +      u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
 +                                          ar->hw_ce_regs->host_ie_addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
 +                        host_ie_addr | host_ie->copy_complete->mask);
 +}
 +
 +static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
 +                                                      u32 ce_ctrl_addr)
 +{
-       struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
++      const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
 +
 +      u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
 +                                          ar->hw_ce_regs->host_ie_addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
 +                        host_ie_addr & ~(host_ie->copy_complete->mask));
 +}
 +
 +static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
 +                                                  u32 ce_ctrl_addr)
 +{
-       struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
++      const struct ath10k_hw_ce_host_wm_regs *wm_regs = 
ar->hw_ce_regs->wm_regs;
 +
 +      u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
 +                                          ar->hw_ce_regs->host_ie_addr);
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
 +                        host_ie_addr & ~(wm_regs->wm_mask));
 +}
 +
 +static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
 +                                              u32 ce_ctrl_addr)
 +{
-       struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
++      const struct ath10k_hw_ce_misc_regs *misc_regs = 
ar->hw_ce_regs->misc_regs;
 +
 +      u32 misc_ie_addr = ath10k_ce_read32(ar,
 +                      ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
 +
 +      ath10k_ce_write32(ar,
 +                        ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
 +                        misc_ie_addr & ~(misc_regs->err_mask));
 +}
 +
 +static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
 +                                                   u32 ce_ctrl_addr,
 +                                                   unsigned int mask)
 +{
-       struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
++      const struct ath10k_hw_ce_host_wm_regs *wm_regs = 
ar->hw_ce_regs->wm_regs;
 +
 +      ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
 +}
 +
 +/*
 + * Guts of ath10k_ce_send.
 + * The caller takes responsibility for any needed locking.
 + */
 +static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
 +                                void *per_transfer_context,
 +                                dma_addr_t buffer,
 +                                unsigned int nbytes,
 +                                unsigned int transfer_id,
 +                                unsigned int flags)
 +{
 +      struct ath10k *ar = ce_state->ar;
 +      struct ath10k_ce_ring *src_ring = ce_state->src_ring;
 +      struct ce_desc *desc, sdesc;
 +      unsigned int nentries_mask = src_ring->nentries_mask;
 +      unsigned int sw_index = src_ring->sw_index;
 +      unsigned int write_index = src_ring->write_index;
 +      u32 ctrl_addr = ce_state->ctrl_addr;
 +      u32 desc_flags = 0;
 +      int ret = 0;
 +
 +      if (nbytes > ce_state->src_sz_max)
 +              ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
 +                          __func__, nbytes, ce_state->src_sz_max);
 +
 +      if (unlikely(CE_RING_DELTA(nentries_mask,
 +                                 write_index, sw_index - 1) <= 0)) {
 +              ret = -ENOSR;
 +              goto exit;
 +      }
 +
 +      desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
 +                                 write_index);
 +
 +      desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
 +
 +      if (flags & CE_SEND_FLAG_GATHER)
 +              desc_flags |= CE_DESC_FLAGS_GATHER;
 +      if (flags & CE_SEND_FLAG_BYTE_SWAP)
 +              desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
 +
 +      sdesc.addr   = __cpu_to_le32(buffer);
 +      sdesc.nbytes = __cpu_to_le16(nbytes);
 +      sdesc.flags  = __cpu_to_le16(desc_flags);
 +
 +      *desc = sdesc;
 +
 +      src_ring->per_transfer_context[write_index] = per_transfer_context;
 +
 +      /* Update Source Ring Write Index */
 +      write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
 +
 +      /* WORKAROUND */
 +      if (!(flags & CE_SEND_FLAG_GATHER))
 +              ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
 +
 +      src_ring->write_index = write_index;
 +exit:
 +      return ret;
 +}
 +
 +static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
 +                                   void *per_transfer_context,
 +                                   dma_addr_t buffer,
 +                                   unsigned int nbytes,
 +                                   unsigned int transfer_id,
 +                                   unsigned int flags)
 +{
 +      struct ath10k *ar = ce_state->ar;
 +      struct ath10k_ce_ring *src_ring = ce_state->src_ring;
 +      struct ce_desc_64 *desc, sdesc;
 +      unsigned int nentries_mask = src_ring->nentries_mask;
 +      unsigned int sw_index;
 +      unsigned int write_index = src_ring->write_index;
 +      u32 ctrl_addr = ce_state->ctrl_addr;
 +      __le32 *addr;
 +      u32 desc_flags = 0;
 +      int ret = 0;
 +
 +      if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
 +              return -ESHUTDOWN;
 +
 +      if (nbytes > ce_state->src_sz_max)
 +              ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
 +                          __func__, nbytes, ce_state->src_sz_max);
 +
 +      if (ar->hw_params.rri_on_ddr)
 +              sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, 
ce_state->id);
 +      else
 +              sw_index = src_ring->sw_index;
 +
 +      if (unlikely(CE_RING_DELTA(nentries_mask,
 +                                 write_index, sw_index - 1) <= 0)) {
 +              ret = -ENOSR;
 +              goto exit;
 +      }
 +
 +      desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
 +                                    write_index);
 +
 +      desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
 +
 +      if (flags & CE_SEND_FLAG_GATHER)
 +              desc_flags |= CE_DESC_FLAGS_GATHER;
 +
 +      if (flags & CE_SEND_FLAG_BYTE_SWAP)
 +              desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
 +
 +      addr = (__le32 *)&sdesc.addr;
 +
 +      flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
 +      addr[0] = __cpu_to_le32(buffer);
 +      addr[1] = __cpu_to_le32(flags);
 +      if (flags & CE_SEND_FLAG_GATHER)
 +              addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
 +      else
 +              addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
 +
 +      sdesc.nbytes = __cpu_to_le16(nbytes);
 +      sdesc.flags  = __cpu_to_le16(desc_flags);
 +
 +      *desc = sdesc;
 +
 +      src_ring->per_transfer_context[write_index] = per_transfer_context;
 +
 +      /* Update Source Ring Write Index */
 +      write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
 +
 +      if (!(flags & CE_SEND_FLAG_GATHER)) {
 +              if (ar->hw_params.shadow_reg_support)
 +                      ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
 +                                                                write_index);
 +              else
 +                      ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
 +                                                         write_index);
 +      }
 +
 +      src_ring->write_index = write_index;
 +exit:
 +      return ret;
 +}
 +
 +int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
 +                        void *per_transfer_context,
 +                        dma_addr_t buffer,
 +                        unsigned int nbytes,
 +                        unsigned int transfer_id,
 +                        unsigned int flags)
 +{
 +      return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
 +                                  buffer, nbytes, transfer_id, flags);
 +}
 +EXPORT_SYMBOL(ath10k_ce_send_nolock);
 +
 +void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
 +{
 +      struct ath10k *ar = pipe->ar;
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      struct ath10k_ce_ring *src_ring = pipe->src_ring;
 +      u32 ctrl_addr = pipe->ctrl_addr;
 +
 +      lockdep_assert_held(&ce->ce_lock);
 +
 +      /*
 +       * This function must be called only if there is an incomplete
 +       * scatter-gather transfer (before index register is updated)
 +       * that needs to be cleaned up.
 +       */
 +      if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
 +              return;
 +
 +      if (WARN_ON_ONCE(src_ring->write_index ==
 +                       ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
 +              return;
 +
 +      src_ring->write_index--;
 +      src_ring->write_index &= src_ring->nentries_mask;
 +
 +      src_ring->per_transfer_context[src_ring->write_index] = NULL;
 +}
 +EXPORT_SYMBOL(__ath10k_ce_send_revert);
 +
 +int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
 +                 void *per_transfer_context,
 +                 dma_addr_t buffer,
 +                 unsigned int nbytes,
 +                 unsigned int transfer_id,
 +                 unsigned int flags)
 +{
 +      struct ath10k *ar = ce_state->ar;
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      int ret;
 +
 +      spin_lock_bh(&ce->ce_lock);
 +      ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
 +                                  buffer, nbytes, transfer_id, flags);
 +      spin_unlock_bh(&ce->ce_lock);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL(ath10k_ce_send);
 +
 +int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
 +{
 +      struct ath10k *ar = pipe->ar;
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      int delta;
 +
 +      spin_lock_bh(&ce->ce_lock);
 +      delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
 +                            pipe->src_ring->write_index,
 +                            pipe->src_ring->sw_index - 1);
 +      spin_unlock_bh(&ce->ce_lock);
 +
 +      return delta;
 +}
 +EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
 +
 +int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
 +{
 +      struct ath10k *ar = pipe->ar;
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
 +      unsigned int nentries_mask = dest_ring->nentries_mask;
 +      unsigned int write_index = dest_ring->write_index;
 +      unsigned int sw_index = dest_ring->sw_index;
 +
 +      lockdep_assert_held(&ce->ce_lock);
 +
 +      return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
 +}
 +EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
 +
 +static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
 +                                 dma_addr_t paddr)
 +{
 +      struct ath10k *ar = pipe->ar;
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
 +      unsigned int nentries_mask = dest_ring->nentries_mask;
 +      unsigned int write_index = dest_ring->write_index;
 +      unsigned int sw_index = dest_ring->sw_index;
 +      struct ce_desc *base = dest_ring->base_addr_owner_space;
 +      struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
 +      u32 ctrl_addr = pipe->ctrl_addr;
 +
 +      lockdep_assert_held(&ce->ce_lock);
 +
 +      if ((pipe->id != 5) &&
 +          CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
 +              return -ENOSPC;
 +
 +      desc->addr = __cpu_to_le32(paddr);
 +      desc->nbytes = 0;
 +
 +      dest_ring->per_transfer_context[write_index] = ctx;
 +      write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
 +      ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
 +      dest_ring->write_index = write_index;
 +
 +      return 0;
 +}
 +
 +static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
 +                                    void *ctx,
 +                                    dma_addr_t paddr)
 +{
 +      struct ath10k *ar = pipe->ar;
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
 +      unsigned int nentries_mask = dest_ring->nentries_mask;
 +      unsigned int write_index = dest_ring->write_index;
 +      unsigned int sw_index = dest_ring->sw_index;
 +      struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
 +      struct ce_desc_64 *desc =
 +                      CE_DEST_RING_TO_DESC_64(base, write_index);
 +      u32 ctrl_addr = pipe->ctrl_addr;
 +
 +      lockdep_assert_held(&ce->ce_lock);
 +
 +      if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
 +              return -ENOSPC;
 +
 +      desc->addr = __cpu_to_le64(paddr);
 +      desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
 +
 +      desc->nbytes = 0;
 +
 +      dest_ring->per_transfer_context[write_index] = ctx;
 +      write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
 +      ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
 +      dest_ring->write_index = write_index;
 +
 +      return 0;
 +}
 +
 +void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
 +{
 +      struct ath10k *ar = pipe->ar;
 +      struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
 +      unsigned int nentries_mask = dest_ring->nentries_mask;
 +      unsigned int write_index = dest_ring->write_index;
 +      u32 ctrl_addr = pipe->ctrl_addr;
 +      u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
 +
 +      /* Prevent CE ring stuck issue that will occur when ring is full.
 +       * Make sure that write index is 1 less than read index.
 +       */
 +      if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
 +              nentries -= 1;
 +
 +      write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
 +      ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
 +      dest_ring->write_index = write_index;
 +}
 +EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
 +
 +int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
 +                        dma_addr_t paddr)
 +{
 +      struct ath10k *ar = pipe->ar;
 +      struct ath10k_ce *ce = ath10k_ce_priv(ar);
 +      int ret;
 +
 +      spin_lock_bh(&ce->ce_lock);
 +      ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
 +      spin_unlock_bh(&ce->ce_lock);
 +
 +      return ret;
 +}
 +EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
 +
 +/*
 + * Guts of ath10k_ce_completed_recv_next.
 + * The caller takes responsibility for any necessary locking.
 + */
 +static int
 +       _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 +                                             void **per_transfer_contextp,
 +                                             unsigned int *nbytesp)
 +{
 +      struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 +      unsigned int nentries_mask = dest_ring->nentries_mask;
 +      unsigned int sw_index = dest_ring->sw_index;
 +
 +      struct ce_desc *base = dest_ring->base_addr_owner_space;
 +      struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
 +      struct ce_desc sdesc;
 +      u16 nbytes;
 +
 +      /* Copy in one go for performance reasons */
 +      sdesc = *desc;
 +
 +      nbytes = __le16_to_cpu(sdesc.nbytes);
 +      if (nbytes == 0) {
 +              /*
 +               * This closes a relatively unusual race where the Host
 +               * sees the updated DRRI before the update to the
 +               * corresponding descriptor has completed. We treat this
 +               * as a descriptor that is not yet done.
 +               */
 +              return -EIO;
 +      }
 +
 +      desc->nbytes = 0;
 +
 +      /* Return data from completed destination descriptor */
 +      *nbytesp = nbytes;
 +
 +      if (per_transfer_contextp)
 +              *per_transfer_contextp =
 +                      dest_ring->per_transfer_context[sw_index];
 +
 +      /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
 +       * So update transfer context all CEs except CE5.
 +       */
 +      if (ce_state->id != 5)
 +              dest_ring->per_transfer_context[sw_index] = NULL;
 +
 +      /* Update sw_index */
 +      sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
 +      dest_ring->sw_index = sw_index;
 +
 +      return 0;
 +}
 +
 +static int
 +_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
 +                                       void **per_transfer_contextp,
 +                                       unsigned int *nbytesp)
 +{
 +      struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 +      unsigned int nentries_mask = dest_ring->nentries_mask;
 +      unsigned int sw_index = dest_ring->sw_index;
 +      struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
 +      struct ce_desc_64 *desc =
 +              CE_DEST_RING_TO_DESC_64(base, sw_index);
 +      struct ce_desc_64 sdesc;
 +      u16 nbytes;
 +
 +      /* Copy in one go for performance reasons */
 +      sdesc = *desc;
 +
 +      nbytes = __le16_to_cpu(sdesc.nbytes);
 +      if (nbytes == 0) {
 +              /* This closes a relatively unusual race where the Host
 +               * sees the updated DRRI before the update to the
 +               * corresponding descriptor has completed. We treat this
 +               * as a descriptor that is not yet done.
 +               */
 +              return -EIO;
 +      }
 +
 +      desc->nbytes = 0;
 +
 +      /* Return data from completed destination descriptor */
 +      *nbytesp = nbytes;
 +
 +      if (per_transfer_contextp)
 +              *per_transfer_contextp =
 +                      dest_ring->per_transfer_context[sw_index];
 +
 +      /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
 +       * So update transfer context all CEs except CE5.
 +       */
 +      if (ce_state->id != 5)
 +              dest_ring->per_transfer_context[sw_index] = NULL;
*** 36221 LINES SKIPPED ***


Reply via email to