diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-03-29 11:12:28 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-03-29 11:12:28 -0700 |
| commit | 092e335082f22880207384ad736729c67d784665 (patch) | |
| tree | 6f737fedfa5ad5d53f622cc7fc531683cba1a0f8 /drivers/infiniband/hw/bnxt_re/hw_counters.c | |
| parent | 0ccff074d6aa45835ccb7c0e4a995a32e4c90b5a (diff) | |
| parent | 37826f0a8c2f6b6add5179003b8597e32a445362 (diff) | |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
- Usual minor updates and fixes for bnxt_re, hfi1, rxe, mana, iser,
mlx5, vmw_pvrdma, hns
- Make rxe work on tun devices
- mana gains more standard verbs as it moves toward supporting
in-kernel verbs
- DMABUF support for mana
- Fix page size calculations when memory registration exceeds 4G
- On Demand Paging support for rxe
- mlx5 support for RDMA TRANSPORT flow tables and a new ucap mechanism
to access control use of them
- Optional RDMA_TX/RX counters per QP in mlx5
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (73 commits)
IB/mad: Check available slots before posting receive WRs
RDMA/mana_ib: Fix integer overflow during queue creation
RDMA/mlx5: Fix calculation of total invalidated pages
RDMA/mlx5: Fix mlx5_poll_one() cur_qp update flow
RDMA/mlx5: Fix page_size variable overflow
RDMA/mlx5: Drop access_flags from _mlx5_mr_cache_alloc()
RDMA/mlx5: Fix cache entry update on dereg error
RDMA/mlx5: Fix MR cache initialization error flow
RDMA/mlx5: Support optional-counters binding for QPs
RDMA/mlx5: Compile fs.c regardless of INFINIBAND_USER_ACCESS config
RDMA/core: Pass port to counter bind/unbind operations
RDMA/core: Add support to optional-counters binding configuration
RDMA/core: Create and destroy rdma_counter using rdma_zalloc_drv_obj()
RDMA/mlx5: Add optional counters for RDMA_TX/RX_packets/bytes
RDMA/core: Fix use-after-free when rename device name
RDMA/bnxt_re: Support perf management counters
RDMA/rxe: Fix incorrect return value of rxe_odp_atomic_op()
RDMA/uverbs: Propagate errors from rdma_lookup_get_uobject()
RDMA/mana_ib: Handle net event for pointing to the current netdev
net: mana: Change the function signature of mana_get_primary_netdev_rcu
...
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/hw_counters.c')
| -rw-r--r-- | drivers/infiniband/hw/bnxt_re/hw_counters.c | 92 |
1 files changed, 92 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c index f039aefcaf67..44bb082e0a60 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.c +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c @@ -39,6 +39,8 @@ #include <linux/types.h> #include <linux/pci.h> +#include <rdma/ib_mad.h> +#include <rdma/ib_pma.h> #include "roce_hsi.h" #include "qplib_res.h" @@ -285,6 +287,96 @@ static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev, readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off); } +int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad) +{ + struct ib_pma_portcounters_ext *pma_cnt_ext; + struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat; + struct ctx_hw_stats *hw_stats = NULL; + int rc; + + hw_stats = rdev->qplib_ctx.stats.dma; + + pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40); + if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) { + u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn); + + rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat); + if (rc) + return rc; + } + + pma_cnt_ext = (struct ib_pma_portcounters_ext *)(out_mad->data + 40); + if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) || + !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { + pma_cnt_ext->port_xmit_data = + cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_bytes) / 4); + pma_cnt_ext->port_rcv_data = + cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_bytes) / 4); + pma_cnt_ext->port_xmit_packets = + cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts)); + pma_cnt_ext->port_rcv_packets = + cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts)); + pma_cnt_ext->port_unicast_rcv_packets = + cpu_to_be64(le64_to_cpu(hw_stats->rx_ucast_pkts)); + pma_cnt_ext->port_unicast_xmit_packets = + cpu_to_be64(le64_to_cpu(hw_stats->tx_ucast_pkts)); + + } else { + pma_cnt_ext->port_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts); + pma_cnt_ext->port_rcv_data = cpu_to_be64(estat->rx_roce_good_bytes / 4); + pma_cnt_ext->port_xmit_packets = cpu_to_be64(estat->tx_roce_pkts); + pma_cnt_ext->port_xmit_data = cpu_to_be64(estat->tx_roce_bytes / 4); + pma_cnt_ext->port_unicast_rcv_packets = cpu_to_be64(estat->rx_roce_good_pkts); + pma_cnt_ext->port_unicast_xmit_packets = cpu_to_be64(estat->tx_roce_pkts); + } + return 0; +} + +int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad) +{ + struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat; + struct ib_pma_portcounters *pma_cnt; + struct ctx_hw_stats *hw_stats = NULL; + int rc; + + hw_stats = rdev->qplib_ctx.stats.dma; + + pma_cnt = (struct ib_pma_portcounters *)(out_mad->data + 40); + if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags)) { + u32 fid = PCI_FUNC(rdev->en_dev->pdev->devfn); + + rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat); + if (rc) + return rc; + } + if ((bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) && rdev->is_virtfn) || + !bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) { + pma_cnt->port_rcv_packets = + cpu_to_be32((u32)(le64_to_cpu(hw_stats->rx_ucast_pkts)) & 0xFFFFFFFF); + pma_cnt->port_rcv_data = + cpu_to_be32((u32)((le64_to_cpu(hw_stats->rx_ucast_bytes) & + 0xFFFFFFFF) / 4)); + pma_cnt->port_xmit_packets = + cpu_to_be32((u32)(le64_to_cpu(hw_stats->tx_ucast_pkts)) & 0xFFFFFFFF); + pma_cnt->port_xmit_data = + cpu_to_be32((u32)((le64_to_cpu(hw_stats->tx_ucast_bytes) + & 0xFFFFFFFF) / 4)); + } else { + pma_cnt->port_rcv_packets = cpu_to_be32(estat->rx_roce_good_pkts); + pma_cnt->port_rcv_data = cpu_to_be32((estat->rx_roce_good_bytes / 4)); + pma_cnt->port_xmit_packets = cpu_to_be32(estat->tx_roce_pkts); + pma_cnt->port_xmit_data = cpu_to_be32((estat->tx_roce_bytes / 4)); + } + pma_cnt->port_rcv_constraint_errors = (u8)(le64_to_cpu(hw_stats->rx_discard_pkts) & 0xFF); + pma_cnt->port_rcv_errors = cpu_to_be16((u16)(le64_to_cpu(hw_stats->rx_error_pkts) + & 0xFFFF)); + pma_cnt->port_xmit_constraint_errors = (u8)(le64_to_cpu(hw_stats->tx_error_pkts) & 0xFF); + pma_cnt->port_xmit_discards = cpu_to_be16((u16)(le64_to_cpu(hw_stats->tx_discard_pkts) + & 0xFFFF)); + + return 0; +} + int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port, int index) |